- kvm-migration-Add-helper-to-get-target-runstate.patch [RHEL-54670] - kvm-qmp-cont-Only-activate-disks-if-migration-completed.patch [RHEL-54670] - kvm-migration-block-Make-late-block-active-the-default.patch [RHEL-54670] - kvm-migration-block-Apply-late-block-active-behavior-to-.patch [RHEL-54670] - kvm-migration-block-Fix-possible-race-with-block_inactiv.patch [RHEL-54670] - kvm-migration-block-Rewrite-disk-activation.patch [RHEL-54670] - kvm-block-Add-active-field-to-BlockDeviceInfo.patch [RHEL-54670] - kvm-block-Allow-inactivating-already-inactive-nodes.patch [RHEL-54670] - kvm-block-Inactivate-external-snapshot-overlays-when-nec.patch [RHEL-54670] - kvm-migration-block-active-Remove-global-active-flag.patch [RHEL-54670] - kvm-block-Don-t-attach-inactive-child-to-active-node.patch [RHEL-54670] - kvm-block-Fix-crash-on-block_resize-on-inactive-node.patch [RHEL-54670] - kvm-block-Add-option-to-create-inactive-nodes.patch [RHEL-54670] - kvm-block-Add-blockdev-set-active-QMP-command.patch [RHEL-54670] - kvm-block-Support-inactive-nodes-in-blk_insert_bs.patch [RHEL-54670] - kvm-block-export-Don-t-ignore-image-activation-error-in-.patch [RHEL-54670] - kvm-block-Drain-nodes-before-inactivating-them.patch [RHEL-54670] - kvm-block-export-Add-option-to-allow-export-of-inactive-.patch [RHEL-54670] - kvm-nbd-server-Support-inactive-nodes.patch [RHEL-54670] - kvm-iotests-Add-filter_qtest.patch [RHEL-54670] - kvm-iotests-Add-qsd-migrate-case.patch [RHEL-54670] - kvm-iotests-Add-NBD-based-tests-for-inactive-nodes.patch [RHEL-54670] - Resolves: RHEL-54670 (Provide QMP command for block device reactivation after migration [rhel-10.0])
85 lines
3.2 KiB
Diff
85 lines
3.2 KiB
Diff
From b6ed71f7b16e09a29ab479f437805d83ee0c85e0 Mon Sep 17 00:00:00 2001
|
|
From: Peter Xu <peterx@redhat.com>
|
|
Date: Fri, 6 Dec 2024 18:08:33 -0500
|
|
Subject: [PATCH 01/22] migration: Add helper to get target runstate
|
|
|
|
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
RH-MergeRequest: 340: QMP command for block device reactivation after migration
|
|
RH-Jira: RHEL-54670
|
|
RH-Acked-by: Eric Blake <eblake@redhat.com>
|
|
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
RH-Commit: [1/22] a64178a0575a1b921e8a868a8007d0a50eb7ae29 (kmwolf/centos-qemu-kvm)
|
|
|
|
In 99% cases, after QEMU migrates to dest host, it tries to detect the
|
|
target VM runstate using global_state_get_runstate().
|
|
|
|
There's one outlier so far which is Xen that won't send global state.
|
|
That's the major reason why global_state_received() check was always there
|
|
together with global_state_get_runstate().
|
|
|
|
However it's utterly confusing why global_state_received() has anything to
|
|
do with "let's start VM or not".
|
|
|
|
Provide a helper to explain it, then we have an unified entry for getting
|
|
the target dest QEMU runstate after migration.
|
|
|
|
Suggested-by: Fabiano Rosas <farosas@suse.de>
|
|
Signed-off-by: Peter Xu <peterx@redhat.com>
|
|
Message-Id: <20241206230838.1111496-2-peterx@redhat.com>
|
|
Signed-off-by: Fabiano Rosas <farosas@suse.de>
|
|
(cherry picked from commit 7815f69867da92335055d4b5248430b0f122ce4e)
|
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
---
|
|
migration/migration.c | 21 +++++++++++++++++----
|
|
1 file changed, 17 insertions(+), 4 deletions(-)
|
|
|
|
diff --git a/migration/migration.c b/migration/migration.c
|
|
index 3dea06d577..c7a9e2e026 100644
|
|
--- a/migration/migration.c
|
|
+++ b/migration/migration.c
|
|
@@ -135,6 +135,21 @@ static bool migration_needs_multiple_sockets(void)
|
|
return migrate_multifd() || migrate_postcopy_preempt();
|
|
}
|
|
|
|
+static RunState migration_get_target_runstate(void)
|
|
+{
|
|
+ /*
|
|
+ * When the global state is not migrated, it means we don't know the
|
|
+ * runstate of the src QEMU. We don't have much choice but assuming
|
|
+ * the VM is running. NOTE: this is pretty rare case, so far only Xen
|
|
+ * uses it.
|
|
+ */
|
|
+ if (!global_state_received()) {
|
|
+ return RUN_STATE_RUNNING;
|
|
+ }
|
|
+
|
|
+ return global_state_get_runstate();
|
|
+}
|
|
+
|
|
static bool transport_supports_multi_channels(MigrationAddress *addr)
|
|
{
|
|
if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) {
|
|
@@ -727,8 +742,7 @@ static void process_incoming_migration_bh(void *opaque)
|
|
* unless we really are starting the VM.
|
|
*/
|
|
if (!migrate_late_block_activate() ||
|
|
- (autostart && (!global_state_received() ||
|
|
- runstate_is_live(global_state_get_runstate())))) {
|
|
+ (autostart && runstate_is_live(migration_get_target_runstate()))) {
|
|
/* Make sure all file formats throw away their mutable metadata.
|
|
* If we get an error here, just don't restart the VM yet. */
|
|
bdrv_activate_all(&local_err);
|
|
@@ -751,8 +765,7 @@ static void process_incoming_migration_bh(void *opaque)
|
|
|
|
dirty_bitmap_mig_before_vm_start();
|
|
|
|
- if (!global_state_received() ||
|
|
- runstate_is_live(global_state_get_runstate())) {
|
|
+ if (runstate_is_live(migration_get_target_runstate())) {
|
|
if (autostart) {
|
|
vm_start();
|
|
} else {
|
|
--
|
|
2.39.3
|
|
|