23d471091d
- kvm-vhost-Track-descriptor-chain-in-private-at-SVQ.patch [bz#1939363] - kvm-vhost-Fix-device-s-used-descriptor-dequeue.patch [bz#1939363] - kvm-hw-virtio-Replace-g_memdup-by-g_memdup2.patch [bz#1939363] - kvm-vhost-Fix-element-in-vhost_svq_add-failure.patch [bz#1939363] - kvm-meson-create-have_vhost_-variables.patch [bz#1939363] - kvm-meson-use-have_vhost_-variables-to-pick-sources.patch [bz#1939363] - kvm-vhost-move-descriptor-translation-to-vhost_svq_vring.patch [bz#1939363] - kvm-virtio-net-Expose-MAC_TABLE_ENTRIES.patch [bz#1939363] - kvm-virtio-net-Expose-ctrl-virtqueue-logic.patch [bz#1939363] - kvm-vdpa-Avoid-compiler-to-squash-reads-to-used-idx.patch [bz#1939363] - kvm-vhost-Reorder-vhost_svq_kick.patch [bz#1939363] - kvm-vhost-Move-vhost_svq_kick-call-to-vhost_svq_add.patch [bz#1939363] - kvm-vhost-Check-for-queue-full-at-vhost_svq_add.patch [bz#1939363] - kvm-vhost-Decouple-vhost_svq_add-from-VirtQueueElement.patch [bz#1939363] - kvm-vhost-Add-SVQDescState.patch [bz#1939363] - kvm-vhost-Track-number-of-descs-in-SVQDescState.patch [bz#1939363] - kvm-vhost-add-vhost_svq_push_elem.patch [bz#1939363] - kvm-vhost-Expose-vhost_svq_add.patch [bz#1939363] - kvm-vhost-add-vhost_svq_poll.patch [bz#1939363] - kvm-vhost-Add-svq-avail_handler-callback.patch [bz#1939363] - kvm-vdpa-Export-vhost_vdpa_dma_map-and-unmap-calls.patch [bz#1939363] - kvm-vhost-net-vdpa-add-stubs-for-when-no-virtio-net-devi.patch [bz#1939363] - kvm-vdpa-manual-forward-CVQ-buffers.patch [bz#1939363] - kvm-vdpa-Buffer-CVQ-support-on-shadow-virtqueue.patch [bz#1939363] - kvm-vdpa-Extract-get-features-part-from-vhost_vdpa_get_m.patch [bz#1939363] - kvm-vdpa-Add-device-migration-blocker.patch [bz#1939363] - kvm-vdpa-Add-x-svq-to-NetdevVhostVDPAOptions.patch [bz#1939363] - kvm-redhat-Update-linux-headers-linux-kvm.h-to-v5.18-rc6.patch [bz#2111994] - kvm-target-s390x-kvm-Honor-storage-keys-during-emulation.patch [bz#2111994] - kvm-kvm-don-t-use-perror-without-useful-errno.patch [bz#2095608] - kvm-multifd-Copy-pages-before-compressing-them-with-zlib.patch [bz#2099934] - kvm-Revert-migration-Simplify-unqueue_page.patch [bz#2099934] - Resolves: bz#1939363 (vDPA control virtqueue support in Qemu) - Resolves: bz#2111994 (RHEL9: skey test in kvm_unit_test got failed) - Resolves: bz#2095608 (Please correct the error message when try to start qemu with "-M kernel-irqchip=split") - Resolves: bz#2099934 (Guest reboot on destination host after postcopy migration completed)
135 lines
5.4 KiB
Diff
135 lines
5.4 KiB
Diff
From 5ea59b17866add54e5ae8c76d3cb472c67e1fa91 Mon Sep 17 00:00:00 2001
|
|
From: Thomas Huth <thuth@redhat.com>
|
|
Date: Tue, 2 Aug 2022 08:19:49 +0200
|
|
Subject: [PATCH 32/32] Revert "migration: Simplify unqueue_page()"
|
|
|
|
RH-Author: Thomas Huth <thuth@redhat.com>
|
|
RH-MergeRequest: 112: Fix postcopy migration on s390x
|
|
RH-Commit: [2/2] 3913c9ed3f27f4b66245913da29d0c46db0c6567 (thuth/qemu-kvm-cs9)
|
|
RH-Bugzilla: 2099934
|
|
RH-Acked-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
|
|
RH-Acked-by: David Hildenbrand <david@redhat.com>
|
|
RH-Acked-by: Peter Xu <peterx@redhat.com>
|
|
|
|
This reverts commit cfd66f30fb0f735df06ff4220e5000290a43dad3.
|
|
|
|
The simplification of unqueue_page() introduced a bug that sometimes
|
|
breaks migration on s390x hosts.
|
|
|
|
The problem is not fully understood yet, but since we are already in
|
|
the freeze for QEMU 7.1 and we need something working there, let's
|
|
revert this patch for the upcoming release. The optimization can be
|
|
redone later again in a proper way if necessary.
|
|
|
|
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2099934
|
|
Signed-off-by: Thomas Huth <thuth@redhat.com>
|
|
Message-Id: <20220802061949.331576-1-thuth@redhat.com>
|
|
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
(cherry picked from commit 777f53c75983dd10756f5dbfc8af50fe11da81c1)
|
|
Conflicts:
|
|
migration/trace-events
|
|
(trivial contextual conflict)
|
|
Signed-off-by: Thomas Huth <thuth@redhat.com>
|
|
---
|
|
migration/ram.c | 37 ++++++++++++++++++++++++++-----------
|
|
migration/trace-events | 3 ++-
|
|
2 files changed, 28 insertions(+), 12 deletions(-)
|
|
|
|
diff --git a/migration/ram.c b/migration/ram.c
|
|
index fb6db54642..ee40e4a718 100644
|
|
--- a/migration/ram.c
|
|
+++ b/migration/ram.c
|
|
@@ -1548,7 +1548,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
|
|
{
|
|
struct RAMSrcPageRequest *entry;
|
|
RAMBlock *block = NULL;
|
|
- size_t page_size;
|
|
|
|
if (!postcopy_has_request(rs)) {
|
|
return NULL;
|
|
@@ -1565,13 +1564,10 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
|
|
entry = QSIMPLEQ_FIRST(&rs->src_page_requests);
|
|
block = entry->rb;
|
|
*offset = entry->offset;
|
|
- page_size = qemu_ram_pagesize(block);
|
|
- /* Each page request should only be multiple page size of the ramblock */
|
|
- assert((entry->len % page_size) == 0);
|
|
|
|
- if (entry->len > page_size) {
|
|
- entry->len -= page_size;
|
|
- entry->offset += page_size;
|
|
+ if (entry->len > TARGET_PAGE_SIZE) {
|
|
+ entry->len -= TARGET_PAGE_SIZE;
|
|
+ entry->offset += TARGET_PAGE_SIZE;
|
|
} else {
|
|
memory_region_unref(block->mr);
|
|
QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
|
|
@@ -1579,9 +1575,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
|
|
migration_consume_urgent_request();
|
|
}
|
|
|
|
- trace_unqueue_page(block->idstr, *offset,
|
|
- test_bit((*offset >> TARGET_PAGE_BITS), block->bmap));
|
|
-
|
|
return block;
|
|
}
|
|
|
|
@@ -1956,8 +1949,30 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
|
|
{
|
|
RAMBlock *block;
|
|
ram_addr_t offset;
|
|
+ bool dirty;
|
|
+
|
|
+ do {
|
|
+ block = unqueue_page(rs, &offset);
|
|
+ /*
|
|
+ * We're sending this page, and since it's postcopy nothing else
|
|
+ * will dirty it, and we must make sure it doesn't get sent again
|
|
+ * even if this queue request was received after the background
|
|
+ * search already sent it.
|
|
+ */
|
|
+ if (block) {
|
|
+ unsigned long page;
|
|
+
|
|
+ page = offset >> TARGET_PAGE_BITS;
|
|
+ dirty = test_bit(page, block->bmap);
|
|
+ if (!dirty) {
|
|
+ trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
|
|
+ page);
|
|
+ } else {
|
|
+ trace_get_queued_page(block->idstr, (uint64_t)offset, page);
|
|
+ }
|
|
+ }
|
|
|
|
- block = unqueue_page(rs, &offset);
|
|
+ } while (block && !dirty);
|
|
|
|
if (!block) {
|
|
/*
|
|
diff --git a/migration/trace-events b/migration/trace-events
|
|
index 1aec580e92..09d61ed1f4 100644
|
|
--- a/migration/trace-events
|
|
+++ b/migration/trace-events
|
|
@@ -85,6 +85,8 @@ put_qlist_end(const char *field_name, const char *vmsd_name) "%s(%s)"
|
|
qemu_file_fclose(void) ""
|
|
|
|
# ram.c
|
|
+get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx"
|
|
+get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx"
|
|
migration_bitmap_sync_start(void) ""
|
|
migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64
|
|
migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size, unsigned long page) "rb %s start 0x%"PRIx64" size 0x%"PRIx64" page 0x%lx"
|
|
@@ -110,7 +112,6 @@ ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRI
|
|
ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64
|
|
ram_write_tracking_ramblock_start(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu"
|
|
ram_write_tracking_ramblock_stop(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu"
|
|
-unqueue_page(char *block, uint64_t offset, bool dirty) "ramblock '%s' offset 0x%"PRIx64" dirty %d"
|
|
|
|
# multifd.c
|
|
multifd_new_send_channel_async(uint8_t id) "channel %u"
|
|
--
|
|
2.31.1
|
|
|