libguestfs/SOURCES/0028-v2v-o-rhv-upload-collect-disks-UUIDs-right-after-cop.patch

85 lines
3.4 KiB
Diff
Raw Normal View History

From 9ae4cc6feaf66cf34b9cdf0cf2c251ed7ef61259 Mon Sep 17 00:00:00 2001
From: Pino Toscano <ptoscano@redhat.com>
Date: Mon, 16 Sep 2019 14:07:22 +0200
Subject: [PATCH] v2v: -o rhv-upload: collect disks UUIDs right after copy
Instead of waiting for the completion of the nbdkit transfers to get the
UUIDs of the disks, use the new #disk_copied hook to do that after each
disk is copied.
This has almost no behaviour on rhv-upload, except for the --no-copy
mode:
- previously it used to hit the 5 minute timeout while waiting for the
finalization of the first disk
- now it asserts on the different number of collected UUIDs vs the
actual targets; at the moment there is nothing else that can be done,
as this assumption is needed e.g. when creating the OVF file
(cherry picked from commit 7b93ad6a32f09043bf870202b59bea83d47e0c3a)
---
v2v/output_rhv_upload.ml | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/v2v/output_rhv_upload.ml b/v2v/output_rhv_upload.ml
index 19bdfcf05..382ad0d93 100644
--- a/v2v/output_rhv_upload.ml
+++ b/v2v/output_rhv_upload.ml
@@ -231,6 +231,8 @@ object
val mutable rhv_storagedomain_uuid = None
(* The cluster UUID. *)
val mutable rhv_cluster_uuid = None
+ (* List of disk UUIDs. *)
+ val mutable disks_uuids = []
method precheck () =
Python_script.error_unless_python_interpreter_found ();
@@ -379,23 +381,21 @@ If the messages above are not sufficient to diagnose the problem then add the
TargetURI ("json:" ^ JSON.string_of_doc json_params)
) overlays
- method create_metadata source targets _ guestcaps inspect target_firmware =
- (* Get the UUIDs of each disk image. These files are written
- * out by the nbdkit plugins on successful finalization of the
+ method disk_copied t i nr_disks =
+ (* Get the UUID of the disk image. This file is written
+ * out by the nbdkit plugin on successful finalization of the
* transfer.
*)
- let nr_disks = List.length targets in
- let image_uuids =
- List.mapi (
- fun i t ->
- let id = t.target_overlay.ov_source.s_disk_id in
- let diskid_file = diskid_file_of_id id in
- if not (wait_for_file diskid_file finalization_timeout) then
- error (f_"transfer of disk %d/%d failed, see earlier error messages")
- (i+1) nr_disks;
- let diskid = read_whole_file diskid_file in
- diskid
- ) targets in
+ let id = t.target_overlay.ov_source.s_disk_id in
+ let diskid_file = diskid_file_of_id id in
+ if not (wait_for_file diskid_file finalization_timeout) then
+ error (f_"transfer of disk %d/%d failed, see earlier error messages")
+ (i+1) nr_disks;
+ let diskid = read_whole_file diskid_file in
+ disks_uuids <- disks_uuids @ [diskid];
+
+ method create_metadata source targets _ guestcaps inspect target_firmware =
+ assert (List.length disks_uuids = List.length targets);
(* The storage domain UUID. *)
let sd_uuid =
@@ -411,7 +411,7 @@ If the messages above are not sufficient to diagnose the problem then add the
let ovf =
Create_ovf.create_ovf source targets guestcaps inspect
target_firmware output_alloc
- sd_uuid image_uuids vol_uuids vm_uuid
+ sd_uuid disks_uuids vol_uuids vm_uuid
OVirt in
let ovf = DOM.doc_to_string ovf in
--
2.25.4