b7fbd568b2
- kvm-migration-Move-yank-outside-qemu_start_incoming_migr.patch [bz#1974683] - kvm-migration-Allow-reset-of-postcopy_recover_triggered-.patch [bz#1974683] - kvm-Remove-RHEL-7.0.0-machine-type.patch [bz#1968519] - kvm-Remove-RHEL-7.1.0-machine-type.patch [bz#1968519] - kvm-Remove-RHEL-7.2.0-machine-type.patch [bz#1968519] - kvm-Remove-RHEL-7.3.0-machine-types.patch [bz#1968519] - kvm-Remove-RHEL-7.4.0-machine-types.patch [bz#1968519] - kvm-Remove-RHEL-7.5.0-machine-types.patch [bz#1968519] - kvm-acpi-pc-revert-back-to-v5.2-PCI-slot-enumeration.patch [bz#1957194] - kvm-migration-failover-reset-partially_hotplugged.patch [bz#1957194] - kvm-hmp-Fix-loadvm-to-resume-the-VM-on-success-instead-o.patch [bz#1957194] - kvm-migration-Move-bitmap_mutex-out-of-migration_bitmap_.patch [bz#1957194] - kvm-i386-cpu-Expose-AVX_VNNI-instruction-to-guest.patch [bz#1957194] - kvm-ratelimit-protect-with-a-mutex.patch [bz#1957194] - kvm-Update-Linux-headers-to-5.13-rc4.patch [bz#1957194] - kvm-i386-Add-ratelimit-for-bus-locks-acquired-in-guest.patch [bz#1957194] - kvm-iothread-generalize-iothread_set_param-iothread_get_.patch [bz#1957194] - kvm-iothread-add-aio-max-batch-parameter.patch [bz#1957194] - kvm-linux-aio-limit-the-batch-size-using-aio-max-batch-p.patch [bz#1957194] - kvm-block-nvme-Fix-VFIO_MAP_DMA-failed-No-space-left-on-.patch [bz#1957194] - kvm-migration-move-wait-unplug-loop-to-its-own-function.patch [bz#1957194] - kvm-migration-failover-continue-to-wait-card-unplug-on-e.patch [bz#1957194] - kvm-aarch64-Add-USB-storage-devices.patch [bz#1957194] - kvm-iotests-Improve-and-rename-test-291-to-qemu-img-bitm.patch [bz#1957194] - kvm-qemu-img-Fail-fast-on-convert-bitmaps-with-inconsist.patch [bz#1957194] - kvm-qemu-img-Add-skip-broken-bitmaps-for-convert-bitmaps.patch [bz#1957194] - kvm-audio-Never-send-migration-section.patch [bz#1957194] - kvm-pc-bios-s390-ccw-bootmap-Silence-compiler-warning-fr.patch [bz#1939509 bz#1940132] - kvm-pc-bios-s390-ccw-Use-reset_psw-pointer-instead-of-ha.patch [bz#1939509 bz#1940132] - kvm-pc-bios-s390-ccw-netboot-Use-Wl-prefix-to-pass-param.patch [bz#1939509 bz#1940132] - kvm-pc-bios-s390-ccw-Silence-warning-from-Clang-by-marki.patch [bz#1939509 bz#1940132] - kvm-pc-bios-s390-ccw-Fix-the-cc-option-macro-in-the-Make.patch [bz#1939509 bz#1940132] - kvm-pc-bios-s390-ccw-Silence-GCC-11-stringop-overflow-wa.patch [bz#1939509 bz#1940132] - kvm-pc-bios-s390-ccw-Allow-building-with-Clang-too.patch [bz#1939509 bz#1940132] - kvm-pc-bios-s390-ccw-Fix-inline-assembly-for-older-versi.patch [bz#1939509 bz#1940132] - kvm-configure-Fix-endianess-test-with-LTO.patch [bz#1939509 bz#1940132] - kvm-spec-Switch-toolchain-to-Clang-LLVM.patch [bz#1939509 bz#1940132] - kvm-spec-Use-safe-stack-for-x86_64.patch [bz#1939509 bz#1940132] - kvm-spec-Reenable-write-support-for-VMDK-etc.-in-tools.patch [bz#1989841] - Resolves: bz#1974683 (Fail to set migrate incoming for 2nd time after the first time failed) - Resolves: bz#1968519 (Remove all the old 7.0-7.5 machine types) - Resolves: bz#1957194 (Synchronize RHEL-AV 8.5.0 changes to RHEL 9.0.0 Beta) - Resolves: bz#1939509 (QEMU: enable SafeStack) - Resolves: bz#1940132 (QEMU: switch build toolchain to Clang/LLVM) - Resolves: bz#1989841 (RFE: qemu-img cannot convert images into vmdk and vpc formats)
112 lines
4.5 KiB
Diff
112 lines
4.5 KiB
Diff
From 7726f6461eebf2c4a4b129f1c98add25c0b1bee2 Mon Sep 17 00:00:00 2001
|
|
From: Peter Xu <peterx@redhat.com>
|
|
Date: Thu, 29 Jul 2021 07:42:16 -0400
|
|
Subject: [PATCH 12/39] migration: Move bitmap_mutex out of
|
|
migration_bitmap_clear_dirty()
|
|
|
|
RH-Author: Miroslav Rezanina <mrezanin@redhat.com>
|
|
RH-MergeRequest: 32: Synchronize with RHEL-AV 8.5 release 27 to RHEL 9
|
|
RH-Commit: [4/15] cc207372dab253a4db3b6d351fa2fb2f442437ad (mrezanin/centos-src-qemu-kvm)
|
|
RH-Bugzilla: 1957194
|
|
RH-Acked-by: Stefano Garzarella <sgarzare@redhat.com>
|
|
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
|
|
RH-Acked-by: Igor Mammedov <imammedo@redhat.com>
|
|
RH-Acked-by: Andrew Jones <drjones@redhat.com>
|
|
|
|
Taking the mutex every time for each dirty bit to clear is too slow, especially
|
|
we'll take/release even if the dirty bit is cleared. So far it's only used to
|
|
sync with special cases with qemu_guest_free_page_hint() against migration
|
|
thread, nothing really that serious yet. Let's move the lock to be upper.
|
|
|
|
There're two callers of migration_bitmap_clear_dirty().
|
|
|
|
For migration, move it into ram_save_iterate(). With the help of MAX_WAIT
|
|
logic, we'll only run ram_save_iterate() for no more than 50ms-ish time, so
|
|
taking the lock once there at the entry. It also means any call sites to
|
|
qemu_guest_free_page_hint() can be delayed; but it should be very rare, only
|
|
during migration, and I don't see a problem with it.
|
|
|
|
For COLO, move it up to colo_flush_ram_cache(). I think COLO forgot to take
|
|
that lock even when calling ramblock_sync_dirty_bitmap(), where another example
|
|
is migration_bitmap_sync() who took it right. So let the mutex cover both the
|
|
ramblock_sync_dirty_bitmap() and migration_bitmap_clear_dirty() calls.
|
|
|
|
It's even possible to drop the lock so we use atomic operations upon rb->bmap
|
|
and the variable migration_dirty_pages. I didn't do it just to still be safe,
|
|
also not predictable whether the frequent atomic ops could bring overhead too
|
|
e.g. on huge vms when it happens very often. When that really comes, we can
|
|
keep a local counter and periodically call atomic ops. Keep it simple for now.
|
|
|
|
Cc: Wei Wang <wei.w.wang@intel.com>
|
|
Cc: David Hildenbrand <david@redhat.com>
|
|
Cc: Hailiang Zhang <zhang.zhanghailiang@huawei.com>
|
|
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
Cc: Juan Quintela <quintela@redhat.com>
|
|
Cc: Leonardo Bras Soares Passos <lsoaresp@redhat.com>
|
|
Signed-off-by: Peter Xu <peterx@redhat.com>
|
|
Message-Id: <20210630200805.280905-1-peterx@redhat.com>
|
|
Reviewed-by: Wei Wang <wei.w.wang@intel.com>
|
|
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
(cherry picked from commit 63268c4970a5f126cc9af75f3ccb8057abef5ec0)
|
|
Signed-off-by: Peter Xu <peterx@redhat.com>
|
|
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
---
|
|
migration/ram.c | 13 +++++++++++--
|
|
1 file changed, 11 insertions(+), 2 deletions(-)
|
|
|
|
diff --git a/migration/ram.c b/migration/ram.c
|
|
index 4682f3625c..5d64917dce 100644
|
|
--- a/migration/ram.c
|
|
+++ b/migration/ram.c
|
|
@@ -819,8 +819,6 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
|
{
|
|
bool ret;
|
|
|
|
- QEMU_LOCK_GUARD(&rs->bitmap_mutex);
|
|
-
|
|
/*
|
|
* Clear dirty bitmap if needed. This _must_ be called before we
|
|
* send any of the page in the chunk because we need to make sure
|
|
@@ -2869,6 +2867,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
|
goto out;
|
|
}
|
|
|
|
+ /*
|
|
+ * We'll take this lock a little bit long, but it's okay for two reasons.
|
|
+ * Firstly, the only possible other thread to take it is who calls
|
|
+ * qemu_guest_free_page_hint(), which should be rare; secondly, see
|
|
+ * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
|
|
+ * guarantees that we'll at least released it in a regular basis.
|
|
+ */
|
|
+ qemu_mutex_lock(&rs->bitmap_mutex);
|
|
WITH_RCU_READ_LOCK_GUARD() {
|
|
if (ram_list.version != rs->last_version) {
|
|
ram_state_reset(rs);
|
|
@@ -2928,6 +2934,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
|
i++;
|
|
}
|
|
}
|
|
+ qemu_mutex_unlock(&rs->bitmap_mutex);
|
|
|
|
/*
|
|
* Must occur before EOS (or any QEMUFile operation)
|
|
@@ -3710,6 +3717,7 @@ void colo_flush_ram_cache(void)
|
|
unsigned long offset = 0;
|
|
|
|
memory_global_dirty_log_sync();
|
|
+ qemu_mutex_lock(&ram_state->bitmap_mutex);
|
|
WITH_RCU_READ_LOCK_GUARD() {
|
|
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
|
|
ramblock_sync_dirty_bitmap(ram_state, block);
|
|
@@ -3738,6 +3746,7 @@ void colo_flush_ram_cache(void)
|
|
}
|
|
}
|
|
trace_colo_flush_ram_cache_end();
|
|
+ qemu_mutex_unlock(&ram_state->bitmap_mutex);
|
|
}
|
|
|
|
/**
|
|
--
|
|
2.27.0
|
|
|