libvirt/libvirt-qemu-migration-Always-offer-block-dirty-bitmaps-during-migration.patch
Jiri Denemark df6500fae7 libvirt-11.10.0-5.el10
- qemublocktest: Iterate all nodenames in 'testQemuDetectBitmaps' (RHEL-145769)
- qemu: monitor: Detect list of bitmaps from 'qcow2' format specific data (RHEL-145769)
- qemuMigrationDstPrepareAnyBlockDirtyBitmaps: Fix check for existing bitmaps (RHEL-145769)
- qemu: migration: Always offer block dirty bitmaps during migration (RHEL-145769)
- qemuMigrationDstPrepareAnyBlockDirtyBitmaps: Always consider offered bitmaps (RHEL-145769)

Resolves: RHEL-145769
2026-02-04 12:10:22 +01:00

148 lines
6.0 KiB
Diff

From 64ae2c71b95cd25fb1e18fbc68ddbc814f3de8ca Mon Sep 17 00:00:00 2001
Message-ID: <64ae2c71b95cd25fb1e18fbc68ddbc814f3de8ca.1770203422.git.jdenemar@redhat.com>
From: Peter Krempa <pkrempa@redhat.com>
Date: Tue, 27 Jan 2026 17:00:10 +0100
Subject: [PATCH] qemu: migration: Always offer block dirty bitmaps during
migration
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Until now block dirty bitmaps were offered to destination only if
non-shared storage migration was enabled.
Upcoming patches will want to support it also in cases when storage is
shared but the destination has a qcow2 overlay using the 'data_file'
feature where the qcow2 overlay is not actually shared.
To support that we'll now always offer bitmaps for migration. The
destination can then decide (using existing logic) to pick only the
ones that are not present in the image on destination, which is how
it was supposed to work even now.
The patch removes all the flag checks and simply offers bitmaps in any
case. The overhead incurred by this is one 'query-named-block-nodes'
call to qemu.
Signed-off-by: Peter Krempa <pkrempa@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
(cherry picked from commit a4f610ff3fe190058f18baea18b095d0bc69441b)
https://issues.redhat.com/browse/RHEL-145769 [rhel-10.2]
https://issues.redhat.com/browse/RHEL-145770 [rhel-9.8]
---
src/qemu/qemu_migration.c | 28 +++++++---------------------
1 file changed, 7 insertions(+), 21 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 723e131c98..755b9a5e1a 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -2580,16 +2580,13 @@ qemuMigrationAnyConnectionClosed(virDomainObj *vm,
* qemuMigrationSrcBeginPhaseBlockDirtyBitmaps:
* @mig: migration cookie struct
* @vm: domain object
- * @migrate_disks: disks which are being migrated
- * @nmigrage_disks: number of @migrate_disks
*
* Enumerates block dirty bitmaps on disks which will undergo storage migration
* and fills them into @mig to be offered to the destination.
*/
static int
qemuMigrationSrcBeginPhaseBlockDirtyBitmaps(qemuMigrationCookie *mig,
- virDomainObj *vm,
- const char **migrate_disks)
+ virDomainObj *vm)
{
GSList *disks = NULL;
@@ -2611,9 +2608,6 @@ qemuMigrationSrcBeginPhaseBlockDirtyBitmaps(qemuMigrationCookie *mig,
if (!nodedata)
continue;
- if (!qemuMigrationAnyCopyDisk(diskdef, migrate_disks))
- continue;
-
for (j = 0; j < nodedata->nbitmaps; j++) {
qemuMigrationBlockDirtyBitmapsDiskBitmap *bitmap;
@@ -2680,7 +2674,6 @@ qemuMigrationSrcBeginXML(virDomainObj *vm,
char **cookieout,
int *cookieoutlen,
unsigned int cookieFlags,
- const char **migrate_disks,
unsigned int flags)
{
qemuDomainObjPrivate *priv = vm->privateData;
@@ -2696,8 +2689,7 @@ qemuMigrationSrcBeginXML(virDomainObj *vm,
if (!(mig = qemuMigrationCookieNew(vm->def, priv->origname)))
return NULL;
- if (cookieFlags & QEMU_MIGRATION_COOKIE_NBD &&
- qemuMigrationSrcBeginPhaseBlockDirtyBitmaps(mig, vm, migrate_disks) < 0)
+ if (qemuMigrationSrcBeginPhaseBlockDirtyBitmaps(mig, vm) < 0)
return NULL;
if (qemuMigrationCookieFormat(mig, driver, vm,
@@ -2879,8 +2871,7 @@ qemuMigrationSrcBeginPhase(virQEMUDriver *driver,
return NULL;
return qemuMigrationSrcBeginXML(vm, xmlin,
- cookieout, cookieoutlen, cookieFlags,
- migrate_disks, flags);
+ cookieout, cookieoutlen, cookieFlags, flags);
}
@@ -2969,8 +2960,7 @@ qemuMigrationSrcBeginResume(virDomainObj *vm,
return NULL;
}
- return qemuMigrationSrcBeginXML(vm, xmlin,
- cookieout, cookieoutlen, 0, NULL, flags);
+ return qemuMigrationSrcBeginXML(vm, xmlin, cookieout, cookieoutlen, 0, flags);
}
@@ -4752,7 +4742,6 @@ qemuMigrationSrcRunPrepareBlockDirtyBitmaps(virDomainObj *vm,
/* For VIR_MIGRATE_NON_SHARED_INC we can migrate the bitmaps directly,
* otherwise we must create merged bitmaps from the whole chain */
-
if (!(flags & VIR_MIGRATE_NON_SHARED_INC) &&
qemuMigrationSrcRunPrepareBlockDirtyBitmapsMerge(vm, mig) < 0)
return -1;
@@ -4943,7 +4932,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
VIR_AUTOCLOSE fd = -1;
unsigned long restore_max_bandwidth = priv->migMaxBandwidth;
virErrorPtr orig_err = NULL;
- unsigned int cookieFlags = 0;
+ unsigned int cookieFlags = QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS;
bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
bool storageMigration = flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC);
bool cancel = false;
@@ -4967,10 +4956,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
storageMigration = qemuMigrationHasAnyStorageMigrationDisks(vm->def,
migrate_disks);
- if (storageMigration) {
+ if (storageMigration)
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
- cookieFlags |= QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS;
- }
if (virLockManagerPluginUsesState(driver->lockManager) &&
!cookieout) {
@@ -5004,8 +4991,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
cookiein, cookieinlen,
cookieFlags |
QEMU_MIGRATION_COOKIE_GRAPHICS |
- QEMU_MIGRATION_COOKIE_CAPS |
- QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS);
+ QEMU_MIGRATION_COOKIE_CAPS);
if (!mig)
goto error;
--
2.52.0