From 23d471091d195de28bc247989696d18ac6397ce5 Mon Sep 17 00:00:00 2001 From: Miroslav Rezanina Date: Mon, 8 Aug 2022 04:22:43 -0400 Subject: [PATCH] * Mon Aug 08 2022 Miroslav Rezanina - 7.0.0-10 - kvm-vhost-Track-descriptor-chain-in-private-at-SVQ.patch [bz#1939363] - kvm-vhost-Fix-device-s-used-descriptor-dequeue.patch [bz#1939363] - kvm-hw-virtio-Replace-g_memdup-by-g_memdup2.patch [bz#1939363] - kvm-vhost-Fix-element-in-vhost_svq_add-failure.patch [bz#1939363] - kvm-meson-create-have_vhost_-variables.patch [bz#1939363] - kvm-meson-use-have_vhost_-variables-to-pick-sources.patch [bz#1939363] - kvm-vhost-move-descriptor-translation-to-vhost_svq_vring.patch [bz#1939363] - kvm-virtio-net-Expose-MAC_TABLE_ENTRIES.patch [bz#1939363] - kvm-virtio-net-Expose-ctrl-virtqueue-logic.patch [bz#1939363] - kvm-vdpa-Avoid-compiler-to-squash-reads-to-used-idx.patch [bz#1939363] - kvm-vhost-Reorder-vhost_svq_kick.patch [bz#1939363] - kvm-vhost-Move-vhost_svq_kick-call-to-vhost_svq_add.patch [bz#1939363] - kvm-vhost-Check-for-queue-full-at-vhost_svq_add.patch [bz#1939363] - kvm-vhost-Decouple-vhost_svq_add-from-VirtQueueElement.patch [bz#1939363] - kvm-vhost-Add-SVQDescState.patch [bz#1939363] - kvm-vhost-Track-number-of-descs-in-SVQDescState.patch [bz#1939363] - kvm-vhost-add-vhost_svq_push_elem.patch [bz#1939363] - kvm-vhost-Expose-vhost_svq_add.patch [bz#1939363] - kvm-vhost-add-vhost_svq_poll.patch [bz#1939363] - kvm-vhost-Add-svq-avail_handler-callback.patch [bz#1939363] - kvm-vdpa-Export-vhost_vdpa_dma_map-and-unmap-calls.patch [bz#1939363] - kvm-vhost-net-vdpa-add-stubs-for-when-no-virtio-net-devi.patch [bz#1939363] - kvm-vdpa-manual-forward-CVQ-buffers.patch [bz#1939363] - kvm-vdpa-Buffer-CVQ-support-on-shadow-virtqueue.patch [bz#1939363] - kvm-vdpa-Extract-get-features-part-from-vhost_vdpa_get_m.patch [bz#1939363] - kvm-vdpa-Add-device-migration-blocker.patch [bz#1939363] - kvm-vdpa-Add-x-svq-to-NetdevVhostVDPAOptions.patch [bz#1939363] - kvm-redhat-Update-linux-headers-linux-kvm.h-to-v5.18-rc6.patch [bz#2111994] - kvm-target-s390x-kvm-Honor-storage-keys-during-emulation.patch [bz#2111994] - kvm-kvm-don-t-use-perror-without-useful-errno.patch [bz#2095608] - kvm-multifd-Copy-pages-before-compressing-them-with-zlib.patch [bz#2099934] - kvm-Revert-migration-Simplify-unqueue_page.patch [bz#2099934] - Resolves: bz#1939363 (vDPA control virtqueue support in Qemu) - Resolves: bz#2111994 (RHEL9: skey test in kvm_unit_test got failed) - Resolves: bz#2095608 (Please correct the error message when try to start qemu with "-M kernel-irqchip=split") - Resolves: bz#2099934 (Guest reboot on destination host after postcopy migration completed) --- ...vert-migration-Simplify-unqueue_page.patch | 134 ++++++++ ...virtio-Replace-g_memdup-by-g_memdup2.patch | 95 ++++++ ...on-t-use-perror-without-useful-errno.patch | 62 ++++ kvm-meson-create-have_vhost_-variables.patch | 154 +++++++++ ...ave_vhost_-variables-to-pick-sources.patch | 213 ++++++++++++ ...es-before-compressing-them-with-zlib.patch | 142 ++++++++ ...nux-headers-linux-kvm.h-to-v5.18-rc6.patch | 106 ++++++ ...-Honor-storage-keys-during-emulation.patch | 103 ++++++ kvm-vdpa-Add-device-migration-blocker.patch | 106 ++++++ ...-Add-x-svq-to-NetdevVhostVDPAOptions.patch | 223 ++++++++++++ ...compiler-to-squash-reads-to-used-idx.patch | 65 ++++ ...ffer-CVQ-support-on-shadow-virtqueue.patch | 323 ++++++++++++++++++ ...t-vhost_vdpa_dma_map-and-unmap-calls.patch | 84 +++++ ...-features-part-from-vhost_vdpa_get_m.patch | 108 ++++++ kvm-vdpa-manual-forward-CVQ-buffers.patch | 166 +++++++++ kvm-vhost-Add-SVQDescState.patch | 135 ++++++++ ...vhost-Add-svq-avail_handler-callback.patch | 164 +++++++++ ...heck-for-queue-full-at-vhost_svq_add.patch | 134 ++++++++ ...-vhost_svq_add-from-VirtQueueElement.patch | 138 ++++++++ kvm-vhost-Expose-vhost_svq_add.patch | 73 ++++ ...Fix-device-s-used-descriptor-dequeue.patch | 83 +++++ ...Fix-element-in-vhost_svq_add-failure.patch | 68 ++++ ...vhost_svq_kick-call-to-vhost_svq_add.patch | 61 ++++ kvm-vhost-Reorder-vhost_svq_kick.patch | 88 +++++ ...k-descriptor-chain-in-private-at-SVQ.patch | 123 +++++++ ...rack-number-of-descs-in-SVQDescState.patch | 81 +++++ kvm-vhost-add-vhost_svq_poll.patch | 92 +++++ kvm-vhost-add-vhost_svq_push_elem.patch | 83 +++++ ...iptor-translation-to-vhost_svq_vring.patch | 120 +++++++ ...dd-stubs-for-when-no-virtio-net-devi.patch | 87 +++++ kvm-virtio-net-Expose-MAC_TABLE_ENTRIES.patch | 69 ++++ ...rtio-net-Expose-ctrl-virtqueue-logic.patch | 169 +++++++++ qemu-kvm.spec | 108 +++++- 33 files changed, 3959 insertions(+), 1 deletion(-) create mode 100644 kvm-Revert-migration-Simplify-unqueue_page.patch create mode 100644 kvm-hw-virtio-Replace-g_memdup-by-g_memdup2.patch create mode 100644 kvm-kvm-don-t-use-perror-without-useful-errno.patch create mode 100644 kvm-meson-create-have_vhost_-variables.patch create mode 100644 kvm-meson-use-have_vhost_-variables-to-pick-sources.patch create mode 100644 kvm-multifd-Copy-pages-before-compressing-them-with-zlib.patch create mode 100644 kvm-redhat-Update-linux-headers-linux-kvm.h-to-v5.18-rc6.patch create mode 100644 kvm-target-s390x-kvm-Honor-storage-keys-during-emulation.patch create mode 100644 kvm-vdpa-Add-device-migration-blocker.patch create mode 100644 kvm-vdpa-Add-x-svq-to-NetdevVhostVDPAOptions.patch create mode 100644 kvm-vdpa-Avoid-compiler-to-squash-reads-to-used-idx.patch create mode 100644 kvm-vdpa-Buffer-CVQ-support-on-shadow-virtqueue.patch create mode 100644 kvm-vdpa-Export-vhost_vdpa_dma_map-and-unmap-calls.patch create mode 100644 kvm-vdpa-Extract-get-features-part-from-vhost_vdpa_get_m.patch create mode 100644 kvm-vdpa-manual-forward-CVQ-buffers.patch create mode 100644 kvm-vhost-Add-SVQDescState.patch create mode 100644 kvm-vhost-Add-svq-avail_handler-callback.patch create mode 100644 kvm-vhost-Check-for-queue-full-at-vhost_svq_add.patch create mode 100644 kvm-vhost-Decouple-vhost_svq_add-from-VirtQueueElement.patch create mode 100644 kvm-vhost-Expose-vhost_svq_add.patch create mode 100644 kvm-vhost-Fix-device-s-used-descriptor-dequeue.patch create mode 100644 kvm-vhost-Fix-element-in-vhost_svq_add-failure.patch create mode 100644 kvm-vhost-Move-vhost_svq_kick-call-to-vhost_svq_add.patch create mode 100644 kvm-vhost-Reorder-vhost_svq_kick.patch create mode 100644 kvm-vhost-Track-descriptor-chain-in-private-at-SVQ.patch create mode 100644 kvm-vhost-Track-number-of-descs-in-SVQDescState.patch create mode 100644 kvm-vhost-add-vhost_svq_poll.patch create mode 100644 kvm-vhost-add-vhost_svq_push_elem.patch create mode 100644 kvm-vhost-move-descriptor-translation-to-vhost_svq_vring.patch create mode 100644 kvm-vhost-net-vdpa-add-stubs-for-when-no-virtio-net-devi.patch create mode 100644 kvm-virtio-net-Expose-MAC_TABLE_ENTRIES.patch create mode 100644 kvm-virtio-net-Expose-ctrl-virtqueue-logic.patch diff --git a/kvm-Revert-migration-Simplify-unqueue_page.patch b/kvm-Revert-migration-Simplify-unqueue_page.patch new file mode 100644 index 0000000..f5c97f6 --- /dev/null +++ b/kvm-Revert-migration-Simplify-unqueue_page.patch @@ -0,0 +1,134 @@ +From 5ea59b17866add54e5ae8c76d3cb472c67e1fa91 Mon Sep 17 00:00:00 2001 +From: Thomas Huth +Date: Tue, 2 Aug 2022 08:19:49 +0200 +Subject: [PATCH 32/32] Revert "migration: Simplify unqueue_page()" + +RH-Author: Thomas Huth +RH-MergeRequest: 112: Fix postcopy migration on s390x +RH-Commit: [2/2] 3913c9ed3f27f4b66245913da29d0c46db0c6567 (thuth/qemu-kvm-cs9) +RH-Bugzilla: 2099934 +RH-Acked-by: Dr. David Alan Gilbert +RH-Acked-by: Cornelia Huck +RH-Acked-by: David Hildenbrand +RH-Acked-by: Peter Xu + +This reverts commit cfd66f30fb0f735df06ff4220e5000290a43dad3. + +The simplification of unqueue_page() introduced a bug that sometimes +breaks migration on s390x hosts. + +The problem is not fully understood yet, but since we are already in +the freeze for QEMU 7.1 and we need something working there, let's +revert this patch for the upcoming release. The optimization can be +redone later again in a proper way if necessary. + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2099934 +Signed-off-by: Thomas Huth +Message-Id: <20220802061949.331576-1-thuth@redhat.com> +Reviewed-by: Dr. David Alan Gilbert +Signed-off-by: Dr. David Alan Gilbert +(cherry picked from commit 777f53c75983dd10756f5dbfc8af50fe11da81c1) +Conflicts: + migration/trace-events + (trivial contextual conflict) +Signed-off-by: Thomas Huth +--- + migration/ram.c | 37 ++++++++++++++++++++++++++----------- + migration/trace-events | 3 ++- + 2 files changed, 28 insertions(+), 12 deletions(-) + +diff --git a/migration/ram.c b/migration/ram.c +index fb6db54642..ee40e4a718 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -1548,7 +1548,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) + { + struct RAMSrcPageRequest *entry; + RAMBlock *block = NULL; +- size_t page_size; + + if (!postcopy_has_request(rs)) { + return NULL; +@@ -1565,13 +1564,10 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) + entry = QSIMPLEQ_FIRST(&rs->src_page_requests); + block = entry->rb; + *offset = entry->offset; +- page_size = qemu_ram_pagesize(block); +- /* Each page request should only be multiple page size of the ramblock */ +- assert((entry->len % page_size) == 0); + +- if (entry->len > page_size) { +- entry->len -= page_size; +- entry->offset += page_size; ++ if (entry->len > TARGET_PAGE_SIZE) { ++ entry->len -= TARGET_PAGE_SIZE; ++ entry->offset += TARGET_PAGE_SIZE; + } else { + memory_region_unref(block->mr); + QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); +@@ -1579,9 +1575,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) + migration_consume_urgent_request(); + } + +- trace_unqueue_page(block->idstr, *offset, +- test_bit((*offset >> TARGET_PAGE_BITS), block->bmap)); +- + return block; + } + +@@ -1956,8 +1949,30 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) + { + RAMBlock *block; + ram_addr_t offset; ++ bool dirty; ++ ++ do { ++ block = unqueue_page(rs, &offset); ++ /* ++ * We're sending this page, and since it's postcopy nothing else ++ * will dirty it, and we must make sure it doesn't get sent again ++ * even if this queue request was received after the background ++ * search already sent it. ++ */ ++ if (block) { ++ unsigned long page; ++ ++ page = offset >> TARGET_PAGE_BITS; ++ dirty = test_bit(page, block->bmap); ++ if (!dirty) { ++ trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, ++ page); ++ } else { ++ trace_get_queued_page(block->idstr, (uint64_t)offset, page); ++ } ++ } + +- block = unqueue_page(rs, &offset); ++ } while (block && !dirty); + + if (!block) { + /* +diff --git a/migration/trace-events b/migration/trace-events +index 1aec580e92..09d61ed1f4 100644 +--- a/migration/trace-events ++++ b/migration/trace-events +@@ -85,6 +85,8 @@ put_qlist_end(const char *field_name, const char *vmsd_name) "%s(%s)" + qemu_file_fclose(void) "" + + # ram.c ++get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx" ++get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx" + migration_bitmap_sync_start(void) "" + migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64 + migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size, unsigned long page) "rb %s start 0x%"PRIx64" size 0x%"PRIx64" page 0x%lx" +@@ -110,7 +112,6 @@ ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRI + ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64 + ram_write_tracking_ramblock_start(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" + ram_write_tracking_ramblock_stop(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" +-unqueue_page(char *block, uint64_t offset, bool dirty) "ramblock '%s' offset 0x%"PRIx64" dirty %d" + + # multifd.c + multifd_new_send_channel_async(uint8_t id) "channel %u" +-- +2.31.1 + diff --git a/kvm-hw-virtio-Replace-g_memdup-by-g_memdup2.patch b/kvm-hw-virtio-Replace-g_memdup-by-g_memdup2.patch new file mode 100644 index 0000000..44897ac --- /dev/null +++ b/kvm-hw-virtio-Replace-g_memdup-by-g_memdup2.patch @@ -0,0 +1,95 @@ +From 4dad0e9abbc843fba4e5fee6e7aa1b0db13f5898 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:27:35 +0200 +Subject: [PATCH 03/32] hw/virtio: Replace g_memdup() by g_memdup2() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [3/27] ae196903eb1a7aebbf999100e997cf82e5024cb6 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit d792199de55ca5cb5334016884039c740290b5c7 +Author: Philippe Mathieu-Daudé +Date: Thu May 12 19:57:46 2022 +0200 + + hw/virtio: Replace g_memdup() by g_memdup2() + + Per https://discourse.gnome.org/t/port-your-module-from-g-memdup-to-g-memdup2-now/5538 + + The old API took the size of the memory to duplicate as a guint, + whereas most memory functions take memory sizes as a gsize. This + made it easy to accidentally pass a gsize to g_memdup(). For large + values, that would lead to a silent truncation of the size from 64 + to 32 bits, and result in a heap area being returned which is + significantly smaller than what the caller expects. This can likely + be exploited in various modules to cause a heap buffer overflow. + + Replace g_memdup() by the safer g_memdup2() wrapper. + + Acked-by: Jason Wang + Acked-by: Eugenio Pérez + Signed-off-by: Philippe Mathieu-Daudé + Message-Id: <20220512175747.142058-6-eperezma@redhat.com> + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Michael S. Tsirkin + +Signed-off-by: Eugenio Pérez +--- + hw/net/virtio-net.c | 3 ++- + hw/virtio/virtio-crypto.c | 6 +++--- + 2 files changed, 5 insertions(+), 4 deletions(-) + +diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c +index 099e65036d..633de61513 100644 +--- a/hw/net/virtio-net.c ++++ b/hw/net/virtio-net.c +@@ -1458,7 +1458,8 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) + } + + iov_cnt = elem->out_num; +- iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num); ++ iov2 = iov = g_memdup2(elem->out_sg, ++ sizeof(struct iovec) * elem->out_num); + s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); + iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); + if (s != sizeof(ctrl)) { +diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c +index dcd80b904d..0e31e3cc04 100644 +--- a/hw/virtio/virtio-crypto.c ++++ b/hw/virtio/virtio-crypto.c +@@ -242,7 +242,7 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) + } + + out_num = elem->out_num; +- out_iov_copy = g_memdup(elem->out_sg, sizeof(out_iov[0]) * out_num); ++ out_iov_copy = g_memdup2(elem->out_sg, sizeof(out_iov[0]) * out_num); + out_iov = out_iov_copy; + + in_num = elem->in_num; +@@ -605,11 +605,11 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) + } + + out_num = elem->out_num; +- out_iov_copy = g_memdup(elem->out_sg, sizeof(out_iov[0]) * out_num); ++ out_iov_copy = g_memdup2(elem->out_sg, sizeof(out_iov[0]) * out_num); + out_iov = out_iov_copy; + + in_num = elem->in_num; +- in_iov_copy = g_memdup(elem->in_sg, sizeof(in_iov[0]) * in_num); ++ in_iov_copy = g_memdup2(elem->in_sg, sizeof(in_iov[0]) * in_num); + in_iov = in_iov_copy; + + if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req)) +-- +2.31.1 + diff --git a/kvm-kvm-don-t-use-perror-without-useful-errno.patch b/kvm-kvm-don-t-use-perror-without-useful-errno.patch new file mode 100644 index 0000000..a78c089 --- /dev/null +++ b/kvm-kvm-don-t-use-perror-without-useful-errno.patch @@ -0,0 +1,62 @@ +From 9ddefaedf423ec03eadaf17496c14e0d7b2381c8 Mon Sep 17 00:00:00 2001 +From: Cornelia Huck +Date: Thu, 28 Jul 2022 16:24:46 +0200 +Subject: [PATCH 30/32] kvm: don't use perror() without useful errno + +RH-Author: Cornelia Huck +RH-MergeRequest: 110: kvm: don't use perror() without useful errno +RH-Commit: [1/1] 20e51aac6767c1f89f74c7d692d1fb7689eff5f0 (cohuck/qemu-kvm-c9s) +RH-Bugzilla: 2095608 +RH-Acked-by: Eric Auger +RH-Acked-by: Thomas Huth +RH-Acked-by: Gavin Shan + +perror() is designed to append the decoded errno value to a +string. This, however, only makes sense if we called something that +actually sets errno prior to that. + +For the callers that check for split irqchip support that is not the +case, and we end up with confusing error messages that end in +"success". Use error_report() instead. + +Signed-off-by: Cornelia Huck +Message-Id: <20220728142446.438177-1-cohuck@redhat.com> +Signed-off-by: Paolo Bonzini + +https://bugzilla.redhat.com/show_bug.cgi?id=2095608 +(cherry picked from commit 47c182fe8b03c0c40059fb95840923e65c9bdb4f) +Signed-off-by: Cornelia Huck +--- + accel/kvm/kvm-all.c | 2 +- + target/arm/kvm.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c +index 5f1377ca04..e9c7947640 100644 +--- a/accel/kvm/kvm-all.c ++++ b/accel/kvm/kvm-all.c +@@ -2254,7 +2254,7 @@ static void kvm_irqchip_create(KVMState *s) + ret = kvm_arch_irqchip_create(s); + if (ret == 0) { + if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) { +- perror("Split IRQ chip mode not supported."); ++ error_report("Split IRQ chip mode not supported."); + exit(1); + } else { + ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); +diff --git a/target/arm/kvm.c b/target/arm/kvm.c +index bbf1ce7ba3..0a2ba1f8e3 100644 +--- a/target/arm/kvm.c ++++ b/target/arm/kvm.c +@@ -960,7 +960,7 @@ void kvm_arch_init_irq_routing(KVMState *s) + int kvm_arch_irqchip_create(KVMState *s) + { + if (kvm_kernel_irqchip_split()) { +- perror("-machine kernel_irqchip=split is not supported on ARM."); ++ error_report("-machine kernel_irqchip=split is not supported on ARM."); + exit(1); + } + +-- +2.31.1 + diff --git a/kvm-meson-create-have_vhost_-variables.patch b/kvm-meson-create-have_vhost_-variables.patch new file mode 100644 index 0000000..fcae620 --- /dev/null +++ b/kvm-meson-create-have_vhost_-variables.patch @@ -0,0 +1,154 @@ +From 51c310097832724bafac26aed81399da40128400 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:50:43 +0200 +Subject: [PATCH 05/32] meson: create have_vhost_* variables +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [5/27] 3b30f89e6d639923dc9d9a92a4261bb4509e5c83 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 2a3129a37652e5e81d12f6e16dd3c447f09831f9 +Author: Paolo Bonzini +Date: Wed Apr 20 17:34:05 2022 +0200 + + meson: create have_vhost_* variables + + When using Meson options rather than config-host.h, the "when" clauses + have to be changed to if statements (which is not necessarily great, + though at least it highlights which parts of the build are per-target + and which are not). + + Do that before moving vhost logic to meson.build, though for now + the variables are just based on config-host.mak data. + + Reviewed-by: Marc-André Lureau + Signed-off-by: Paolo Bonzini + +Signed-off-by: Eugenio Pérez +--- + meson.build | 30 ++++++++++++++++++++---------- + tests/meson.build | 2 +- + tools/meson.build | 2 +- + 3 files changed, 22 insertions(+), 12 deletions(-) + +diff --git a/meson.build b/meson.build +index 13e3323380..735f538497 100644 +--- a/meson.build ++++ b/meson.build +@@ -298,6 +298,15 @@ have_tpm = get_option('tpm') \ + .require(targetos != 'windows', error_message: 'TPM emulation only available on POSIX systems') \ + .allowed() + ++# vhost ++have_vhost_user = 'CONFIG_VHOST_USER' in config_host ++have_vhost_vdpa = 'CONFIG_VHOST_VDPA' in config_host ++have_vhost_kernel = 'CONFIG_VHOST_KERNEL' in config_host ++have_vhost_net_user = 'CONFIG_VHOST_NET_USER' in config_host ++have_vhost_net_vdpa = 'CONFIG_VHOST_NET_VDPA' in config_host ++have_vhost_net = 'CONFIG_VHOST_NET' in config_host ++have_vhost_user_crypto = 'CONFIG_VHOST_CRYPTO' in config_host ++ + # Target-specific libraries and flags + libm = cc.find_library('m', required: false) + threads = dependency('threads') +@@ -1335,7 +1344,7 @@ has_statx_mnt_id = cc.links(statx_mnt_id_test) + have_vhost_user_blk_server = get_option('vhost_user_blk_server') \ + .require(targetos == 'linux', + error_message: 'vhost_user_blk_server requires linux') \ +- .require('CONFIG_VHOST_USER' in config_host, ++ .require(have_vhost_user, + error_message: 'vhost_user_blk_server requires vhost-user support') \ + .disable_auto_if(not have_system) \ + .allowed() +@@ -2116,9 +2125,9 @@ host_kconfig = \ + (have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \ + ('CONFIG_OPENGL' in config_host ? ['CONFIG_OPENGL=y'] : []) + \ + (x11.found() ? ['CONFIG_X11=y'] : []) + \ +- ('CONFIG_VHOST_USER' in config_host ? ['CONFIG_VHOST_USER=y'] : []) + \ +- ('CONFIG_VHOST_VDPA' in config_host ? ['CONFIG_VHOST_VDPA=y'] : []) + \ +- ('CONFIG_VHOST_KERNEL' in config_host ? ['CONFIG_VHOST_KERNEL=y'] : []) + \ ++ (have_vhost_user ? ['CONFIG_VHOST_USER=y'] : []) + \ ++ (have_vhost_vdpa ? ['CONFIG_VHOST_VDPA=y'] : []) + \ ++ (have_vhost_kernel ? ['CONFIG_VHOST_KERNEL=y'] : []) + \ + (have_virtfs ? ['CONFIG_VIRTFS=y'] : []) + \ + ('CONFIG_LINUX' in config_host ? ['CONFIG_LINUX=y'] : []) + \ + ('CONFIG_PVRDMA' in config_host ? ['CONFIG_PVRDMA=y'] : []) + \ +@@ -2799,7 +2808,7 @@ if have_system or have_user + endif + + vhost_user = not_found +-if targetos == 'linux' and 'CONFIG_VHOST_USER' in config_host ++if targetos == 'linux' and have_vhost_user + libvhost_user = subproject('libvhost-user') + vhost_user = libvhost_user.get_variable('vhost_user_dep') + endif +@@ -3386,7 +3395,7 @@ if have_tools + dependencies: qemuutil, + install: true) + +- if 'CONFIG_VHOST_USER' in config_host ++ if have_vhost_user + subdir('contrib/vhost-user-blk') + subdir('contrib/vhost-user-gpu') + subdir('contrib/vhost-user-input') +@@ -3516,15 +3525,16 @@ if 'simple' in get_option('trace_backends') + endif + summary_info += {'D-Bus display': dbus_display} + summary_info += {'QOM debugging': get_option('qom_cast_debug')} +-summary_info += {'vhost-kernel support': config_host.has_key('CONFIG_VHOST_KERNEL')} +-summary_info += {'vhost-net support': config_host.has_key('CONFIG_VHOST_NET')} +-summary_info += {'vhost-crypto support': config_host.has_key('CONFIG_VHOST_CRYPTO')} ++summary_info += {'vhost-kernel support': have_vhost_kernel} ++summary_info += {'vhost-net support': have_vhost_net} ++summary_info += {'vhost-user support': have_vhost_user} ++summary_info += {'vhost-user-crypto support': have_vhost_user_crypto} + summary_info += {'vhost-scsi support': config_host.has_key('CONFIG_VHOST_SCSI')} + summary_info += {'vhost-vsock support': config_host.has_key('CONFIG_VHOST_VSOCK')} +-summary_info += {'vhost-user support': config_host.has_key('CONFIG_VHOST_USER')} + summary_info += {'vhost-user-blk server support': have_vhost_user_blk_server} + summary_info += {'vhost-user-fs support': config_host.has_key('CONFIG_VHOST_USER_FS')} + summary_info += {'vhost-vdpa support': config_host.has_key('CONFIG_VHOST_VDPA')} ++summary_info += {'vhost-vdpa support': have_vhost_vdpa} + summary_info += {'build guest agent': have_ga} + summary(summary_info, bool_yn: true, section: 'Configurable features') + +diff --git a/tests/meson.build b/tests/meson.build +index 1d05109eb4..bbe41c8559 100644 +--- a/tests/meson.build ++++ b/tests/meson.build +@@ -70,7 +70,7 @@ test_deps = { + 'test-qht-par': qht_bench, + } + +-if have_tools and 'CONFIG_VHOST_USER' in config_host and 'CONFIG_LINUX' in config_host ++if have_tools and have_vhost_user and 'CONFIG_LINUX' in config_host + executable('vhost-user-bridge', + sources: files('vhost-user-bridge.c'), + dependencies: [qemuutil, vhost_user]) +diff --git a/tools/meson.build b/tools/meson.build +index 46977af84f..10eb3a043f 100644 +--- a/tools/meson.build ++++ b/tools/meson.build +@@ -3,7 +3,7 @@ have_virtiofsd = get_option('virtiofsd') \ + error_message: 'virtiofsd requires Linux') \ + .require(seccomp.found() and libcap_ng.found(), + error_message: 'virtiofsd requires libcap-ng-devel and seccomp-devel') \ +- .require('CONFIG_VHOST_USER' in config_host, ++ .require(have_vhost_user, + error_message: 'virtiofsd needs vhost-user-support') \ + .disable_auto_if(not have_tools and not have_system) \ + .allowed() +-- +2.31.1 + diff --git a/kvm-meson-use-have_vhost_-variables-to-pick-sources.patch b/kvm-meson-use-have_vhost_-variables-to-pick-sources.patch new file mode 100644 index 0000000..99d86c1 --- /dev/null +++ b/kvm-meson-use-have_vhost_-variables-to-pick-sources.patch @@ -0,0 +1,213 @@ +From a7d57a09e33275d5e6649273b5c9da1bc3c92491 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:51:53 +0200 +Subject: [PATCH 06/32] meson: use have_vhost_* variables to pick sources +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [6/27] bc3db1efb759c0bc97fde2f4fbb3d6dc404c8d3d (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 43b6d7ee1fbc5b5fb7c85d8131fdac1863214ad6 +Author: Paolo Bonzini +Date: Wed Apr 20 17:34:06 2022 +0200 + + meson: use have_vhost_* variables to pick sources + + Reviewed-by: Marc-André Lureau + Signed-off-by: Paolo Bonzini + +Signed-off-by: Eugenio Pérez +--- + Kconfig.host | 3 --- + backends/meson.build | 8 ++++++-- + hw/net/meson.build | 8 ++++++-- + hw/virtio/Kconfig | 3 --- + hw/virtio/meson.build | 25 ++++++++++++++++--------- + meson.build | 1 + + net/meson.build | 12 +++++++----- + tests/qtest/meson.build | 4 +++- + 8 files changed, 39 insertions(+), 25 deletions(-) + +diff --git a/Kconfig.host b/Kconfig.host +index 60b9c07b5e..1165c4eacd 100644 +--- a/Kconfig.host ++++ b/Kconfig.host +@@ -22,15 +22,12 @@ config TPM + + config VHOST_USER + bool +- select VHOST + + config VHOST_VDPA + bool +- select VHOST + + config VHOST_KERNEL + bool +- select VHOST + + config VIRTFS + bool +diff --git a/backends/meson.build b/backends/meson.build +index 6e68945528..cb92f639ca 100644 +--- a/backends/meson.build ++++ b/backends/meson.build +@@ -12,9 +12,13 @@ softmmu_ss.add([files( + softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files('rng-random.c')) + softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files('hostmem-file.c')) + softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files('hostmem-memfd.c')) +-softmmu_ss.add(when: ['CONFIG_VHOST_USER', 'CONFIG_VIRTIO'], if_true: files('vhost-user.c')) ++if have_vhost_user ++ softmmu_ss.add(when: 'CONFIG_VIRTIO', if_true: files('vhost-user.c')) ++endif + softmmu_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c')) +-softmmu_ss.add(when: ['CONFIG_VIRTIO_CRYPTO', 'CONFIG_VHOST_CRYPTO'], if_true: files('cryptodev-vhost-user.c')) ++if have_vhost_user_crypto ++ softmmu_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost-user.c')) ++endif + softmmu_ss.add(when: 'CONFIG_GIO', if_true: [files('dbus-vmstate.c'), gio]) + softmmu_ss.add(when: 'CONFIG_SGX', if_true: files('hostmem-epc.c')) + +diff --git a/hw/net/meson.build b/hw/net/meson.build +index 685b75badb..ebac261542 100644 +--- a/hw/net/meson.build ++++ b/hw/net/meson.build +@@ -46,8 +46,12 @@ specific_ss.add(when: 'CONFIG_XILINX_ETHLITE', if_true: files('xilinx_ethlite.c' + softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('net_rx_pkt.c')) + specific_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('virtio-net.c')) + +-softmmu_ss.add(when: ['CONFIG_VIRTIO_NET', 'CONFIG_VHOST_NET'], if_true: files('vhost_net.c'), if_false: files('vhost_net-stub.c')) +-softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost_net-stub.c')) ++if have_vhost_net ++ softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost_net.c'), if_false: files('vhost_net-stub.c')) ++ softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost_net-stub.c')) ++else ++ softmmu_ss.add(files('vhost_net-stub.c')) ++endif + + softmmu_ss.add(when: 'CONFIG_ETSEC', if_true: files( + 'fsl_etsec/etsec.c', +diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig +index c144d42f9b..8ca7b3d9d6 100644 +--- a/hw/virtio/Kconfig ++++ b/hw/virtio/Kconfig +@@ -1,6 +1,3 @@ +-config VHOST +- bool +- + config VIRTIO + bool + +diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build +index 67dc77e00f..30a832eb4a 100644 +--- a/hw/virtio/meson.build ++++ b/hw/virtio/meson.build +@@ -2,18 +2,22 @@ softmmu_virtio_ss = ss.source_set() + softmmu_virtio_ss.add(files('virtio-bus.c')) + softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('virtio-pci.c')) + softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_MMIO', if_true: files('virtio-mmio.c')) +-softmmu_virtio_ss.add(when: 'CONFIG_VHOST', if_false: files('vhost-stub.c')) +- +-softmmu_ss.add_all(when: 'CONFIG_VIRTIO', if_true: softmmu_virtio_ss) +-softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) +- +-softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) + + virtio_ss = ss.source_set() + virtio_ss.add(files('virtio.c')) +-virtio_ss.add(when: 'CONFIG_VHOST', if_true: files('vhost.c', 'vhost-backend.c', 'vhost-iova-tree.c')) +-virtio_ss.add(when: 'CONFIG_VHOST_USER', if_true: files('vhost-user.c')) +-virtio_ss.add(when: 'CONFIG_VHOST_VDPA', if_true: files('vhost-shadow-virtqueue.c', 'vhost-vdpa.c')) ++ ++if have_vhost ++ virtio_ss.add(files('vhost.c', 'vhost-backend.c', 'vhost-iova-tree.c')) ++ if have_vhost_user ++ virtio_ss.add(files('vhost-user.c')) ++ endif ++ if have_vhost_vdpa ++ virtio_ss.add(files('vhost-vdpa.c', 'vhost-shadow-virtqueue.c')) ++ endif ++else ++ softmmu_virtio_ss.add(files('vhost-stub.c')) ++endif ++ + virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon.c')) + virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c')) + virtio_ss.add(when: ['CONFIG_VIRTIO_CRYPTO', 'CONFIG_VIRTIO_PCI'], if_true: files('virtio-crypto-pci.c')) +@@ -53,3 +57,6 @@ virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem-pci.c')) + virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss) + + specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: virtio_ss) ++softmmu_ss.add_all(when: 'CONFIG_VIRTIO', if_true: softmmu_virtio_ss) ++softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) ++softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) +diff --git a/meson.build b/meson.build +index 735f538497..9ba675f098 100644 +--- a/meson.build ++++ b/meson.build +@@ -305,6 +305,7 @@ have_vhost_kernel = 'CONFIG_VHOST_KERNEL' in config_host + have_vhost_net_user = 'CONFIG_VHOST_NET_USER' in config_host + have_vhost_net_vdpa = 'CONFIG_VHOST_NET_VDPA' in config_host + have_vhost_net = 'CONFIG_VHOST_NET' in config_host ++have_vhost = have_vhost_user or have_vhost_vdpa or have_vhost_kernel + have_vhost_user_crypto = 'CONFIG_VHOST_CRYPTO' in config_host + + # Target-specific libraries and flags +diff --git a/net/meson.build b/net/meson.build +index 847bc2ac85..c965e83b26 100644 +--- a/net/meson.build ++++ b/net/meson.build +@@ -26,10 +26,10 @@ softmmu_ss.add(when: vde, if_true: files('vde.c')) + if have_netmap + softmmu_ss.add(files('netmap.c')) + endif +-vhost_user_ss = ss.source_set() +-vhost_user_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-user.c'), if_false: files('vhost-user-stub.c')) +-softmmu_ss.add_all(when: 'CONFIG_VHOST_NET_USER', if_true: vhost_user_ss) +-softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-user-stub.c')) ++if have_vhost_net_user ++ softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-user.c'), if_false: files('vhost-user-stub.c')) ++ softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-user-stub.c')) ++endif + + softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files('tap-linux.c')) + softmmu_ss.add(when: 'CONFIG_BSD', if_true: files('tap-bsd.c')) +@@ -40,6 +40,8 @@ if not config_host.has_key('CONFIG_LINUX') and not config_host.has_key('CONFIG_B + endif + softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files(tap_posix)) + softmmu_ss.add(when: 'CONFIG_WIN32', if_true: files('tap-win32.c')) +-softmmu_ss.add(when: 'CONFIG_VHOST_NET_VDPA', if_true: files('vhost-vdpa.c')) ++if have_vhost_net_vdpa ++ softmmu_ss.add(files('vhost-vdpa.c')) ++endif + + subdir('can') +diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build +index 67cd32def1..9f550df900 100644 +--- a/tests/qtest/meson.build ++++ b/tests/qtest/meson.build +@@ -269,7 +269,9 @@ qos_test_ss.add( + if have_virtfs + qos_test_ss.add(files('virtio-9p-test.c')) + endif +-qos_test_ss.add(when: 'CONFIG_VHOST_USER', if_true: files('vhost-user-test.c')) ++if have_vhost_user ++ qos_test_ss.add(files('vhost-user-test.c')) ++endif + if have_tools and have_vhost_user_blk_server + qos_test_ss.add(files('vhost-user-blk-test.c')) + endif +-- +2.31.1 + diff --git a/kvm-multifd-Copy-pages-before-compressing-them-with-zlib.patch b/kvm-multifd-Copy-pages-before-compressing-them-with-zlib.patch new file mode 100644 index 0000000..ea89a9f --- /dev/null +++ b/kvm-multifd-Copy-pages-before-compressing-them-with-zlib.patch @@ -0,0 +1,142 @@ +From 1d280070748b604c60a7be4d4c3c3a28e3964f37 Mon Sep 17 00:00:00 2001 +From: Thomas Huth +Date: Tue, 2 Aug 2022 10:11:21 +0200 +Subject: [PATCH 31/32] multifd: Copy pages before compressing them with zlib + +RH-Author: Thomas Huth +RH-MergeRequest: 112: Fix postcopy migration on s390x +RH-Commit: [1/2] fd5a0221e22b4563bd1cb7f8a8b95f0bfe8f5fc9 (thuth/qemu-kvm-cs9) +RH-Bugzilla: 2099934 +RH-Acked-by: Dr. David Alan Gilbert +RH-Acked-by: Cornelia Huck +RH-Acked-by: David Hildenbrand +RH-Acked-by: Peter Xu + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2099934 + +zlib_send_prepare() compresses pages of a running VM. zlib does not +make any thread-safety guarantees with respect to changing deflate() +input concurrently with deflate() [1]. + +One can observe problems due to this with the IBM zEnterprise Data +Compression accelerator capable zlib [2]. When the hardware +acceleration is enabled, migration/multifd/tcp/plain/zlib test fails +intermittently [3] due to sliding window corruption. The accelerator's +architecture explicitly discourages concurrent accesses [4]: + + Page 26-57, "Other Conditions": + + As observed by this CPU, other CPUs, and channel + programs, references to the parameter block, first, + second, and third operands may be multiple-access + references, accesses to these storage locations are + not necessarily block-concurrent, and the sequence + of these accesses or references is undefined. + +Mark Adler pointed out that vanilla zlib performs double fetches under +certain circumstances as well [5], therefore we need to copy data +before passing it to deflate(). + +[1] https://zlib.net/manual.html +[2] https://github.com/madler/zlib/pull/410 +[3] https://lists.nongnu.org/archive/html/qemu-devel/2022-03/msg03988.html +[4] http://publibfp.dhe.ibm.com/epubs/pdf/a227832c.pdf +[5] https://lists.gnu.org/archive/html/qemu-devel/2022-07/msg00889.html + +Signed-off-by: Ilya Leoshkevich +Message-Id: <20220705203559.2960949-1-iii@linux.ibm.com> +Reviewed-by: Dr. David Alan Gilbert +Signed-off-by: Dr. David Alan Gilbert +(cherry picked from commit 007e179ef0e97eafda4c9ff2a9d665a1947c7c6d) +Signed-off-by: Thomas Huth +--- + migration/multifd-zlib.c | 38 ++++++++++++++++++++++++++++++-------- + 1 file changed, 30 insertions(+), 8 deletions(-) + +diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c +index 3a7ae44485..18213a9513 100644 +--- a/migration/multifd-zlib.c ++++ b/migration/multifd-zlib.c +@@ -27,6 +27,8 @@ struct zlib_data { + uint8_t *zbuff; + /* size of compressed buffer */ + uint32_t zbuff_len; ++ /* uncompressed buffer of size qemu_target_page_size() */ ++ uint8_t *buf; + }; + + /* Multifd zlib compression */ +@@ -45,26 +47,38 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) + { + struct zlib_data *z = g_new0(struct zlib_data, 1); + z_stream *zs = &z->zs; ++ const char *err_msg; + + zs->zalloc = Z_NULL; + zs->zfree = Z_NULL; + zs->opaque = Z_NULL; + if (deflateInit(zs, migrate_multifd_zlib_level()) != Z_OK) { +- g_free(z); +- error_setg(errp, "multifd %u: deflate init failed", p->id); +- return -1; ++ err_msg = "deflate init failed"; ++ goto err_free_z; + } + /* This is the maxium size of the compressed buffer */ + z->zbuff_len = compressBound(MULTIFD_PACKET_SIZE); + z->zbuff = g_try_malloc(z->zbuff_len); + if (!z->zbuff) { +- deflateEnd(&z->zs); +- g_free(z); +- error_setg(errp, "multifd %u: out of memory for zbuff", p->id); +- return -1; ++ err_msg = "out of memory for zbuff"; ++ goto err_deflate_end; ++ } ++ z->buf = g_try_malloc(qemu_target_page_size()); ++ if (!z->buf) { ++ err_msg = "out of memory for buf"; ++ goto err_free_zbuff; + } + p->data = z; + return 0; ++ ++err_free_zbuff: ++ g_free(z->zbuff); ++err_deflate_end: ++ deflateEnd(&z->zs); ++err_free_z: ++ g_free(z); ++ error_setg(errp, "multifd %u: %s", p->id, err_msg); ++ return -1; + } + + /** +@@ -82,6 +96,8 @@ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) + deflateEnd(&z->zs); + g_free(z->zbuff); + z->zbuff = NULL; ++ g_free(z->buf); ++ z->buf = NULL; + g_free(p->data); + p->data = NULL; + } +@@ -114,8 +130,14 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) + flush = Z_SYNC_FLUSH; + } + ++ /* ++ * Since the VM might be running, the page may be changing concurrently ++ * with compression. zlib does not guarantee that this is safe, ++ * therefore copy the page before calling deflate(). ++ */ ++ memcpy(z->buf, p->pages->block->host + p->normal[i], page_size); + zs->avail_in = page_size; +- zs->next_in = p->pages->block->host + p->normal[i]; ++ zs->next_in = z->buf; + + zs->avail_out = available; + zs->next_out = z->zbuff + out_size; +-- +2.31.1 + diff --git a/kvm-redhat-Update-linux-headers-linux-kvm.h-to-v5.18-rc6.patch b/kvm-redhat-Update-linux-headers-linux-kvm.h-to-v5.18-rc6.patch new file mode 100644 index 0000000..f027c45 --- /dev/null +++ b/kvm-redhat-Update-linux-headers-linux-kvm.h-to-v5.18-rc6.patch @@ -0,0 +1,106 @@ +From 236f216309261bc924e49014267998fdc2ef7f46 Mon Sep 17 00:00:00 2001 +From: Thomas Huth +Date: Fri, 29 Jul 2022 16:55:34 +0200 +Subject: [PATCH 28/32] redhat: Update linux-headers/linux/kvm.h to v5.18-rc6 + +RH-Author: Thomas Huth +RH-MergeRequest: 109: Honor storage keys during emulation of I/O instructions +RH-Commit: [1/2] f306d7ff8efa64b14158388b95815ac556a25d8a (thuth/qemu-kvm-cs9) +RH-Bugzilla: 2111994 +RH-Acked-by: Cornelia Huck +RH-Acked-by: David Hildenbrand +RH-Acked-by: Claudio Imbrenda + +Upstream Status: RHEL-only +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2111994 + +Based on upstream commit e4082063e47e9731dbeb1c26174c17f6038f577f +("linux-headers: Update to v5.18-rc6"), but this is focusing on +the file linux-headers/linux/kvm.h only (since the other changes +related to the VFIO renaming might break some stuff). + +Signed-off-by: Thomas Huth +--- + linux-headers/linux/kvm.h | 27 +++++++++++++++++++++------ + 1 file changed, 21 insertions(+), 6 deletions(-) + +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index d232feaae9..0d05d02ee4 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -445,7 +445,11 @@ struct kvm_run { + #define KVM_SYSTEM_EVENT_RESET 2 + #define KVM_SYSTEM_EVENT_CRASH 3 + __u32 type; +- __u64 flags; ++ __u32 ndata; ++ union { ++ __u64 flags; ++ __u64 data[16]; ++ }; + } system_event; + /* KVM_EXIT_S390_STSI */ + struct { +@@ -562,9 +566,12 @@ struct kvm_s390_mem_op { + __u32 op; /* type of operation */ + __u64 buf; /* buffer in userspace */ + union { +- __u8 ar; /* the access register number */ ++ struct { ++ __u8 ar; /* the access register number */ ++ __u8 key; /* access key, ignored if flag unset */ ++ }; + __u32 sida_offset; /* offset into the sida */ +- __u8 reserved[32]; /* should be set to 0 */ ++ __u8 reserved[32]; /* ignored */ + }; + }; + /* types for kvm_s390_mem_op->op */ +@@ -572,9 +579,12 @@ struct kvm_s390_mem_op { + #define KVM_S390_MEMOP_LOGICAL_WRITE 1 + #define KVM_S390_MEMOP_SIDA_READ 2 + #define KVM_S390_MEMOP_SIDA_WRITE 3 ++#define KVM_S390_MEMOP_ABSOLUTE_READ 4 ++#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5 + /* flags for kvm_s390_mem_op->flags */ + #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) + #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) ++#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2) + + /* for KVM_INTERRUPT */ + struct kvm_interrupt { +@@ -1134,6 +1144,12 @@ struct kvm_ppc_resize_hpt { + #define KVM_CAP_VM_GPA_BITS 207 + #define KVM_CAP_XSAVE2 208 + #define KVM_CAP_SYS_ATTRIBUTES 209 ++#define KVM_CAP_PPC_AIL_MODE_3 210 ++#define KVM_CAP_S390_MEM_OP_EXTENSION 211 ++#define KVM_CAP_PMU_CAPABILITY 212 ++#define KVM_CAP_DISABLE_QUIRKS2 213 ++/* #define KVM_CAP_VM_TSC_CONTROL 214 */ ++#define KVM_CAP_SYSTEM_EVENT_DATA 215 + + #ifdef KVM_CAP_IRQ_ROUTING + +@@ -1624,9 +1640,6 @@ struct kvm_enc_region { + #define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3) + #define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4) + +-/* Available with KVM_CAP_XSAVE2 */ +-#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) +- + struct kvm_s390_pv_sec_parm { + __u64 origin; + __u64 length; +@@ -1973,6 +1986,8 @@ struct kvm_dirty_gfn { + #define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) + #define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) + ++#define KVM_PMU_CAP_DISABLE (1 << 0) ++ + /** + * struct kvm_stats_header - Header of per vm/vcpu binary statistics data. + * @flags: Some extra information for header, always 0 for now. +-- +2.31.1 + diff --git a/kvm-target-s390x-kvm-Honor-storage-keys-during-emulation.patch b/kvm-target-s390x-kvm-Honor-storage-keys-during-emulation.patch new file mode 100644 index 0000000..61752c7 --- /dev/null +++ b/kvm-target-s390x-kvm-Honor-storage-keys-during-emulation.patch @@ -0,0 +1,103 @@ +From 27c1d979a994f5afc59c3520af58d15aa5aae723 Mon Sep 17 00:00:00 2001 +From: Janis Schoetterl-Glausch +Date: Fri, 6 May 2022 17:39:56 +0200 +Subject: [PATCH 29/32] target/s390x: kvm: Honor storage keys during emulation + +RH-Author: Thomas Huth +RH-MergeRequest: 109: Honor storage keys during emulation of I/O instructions +RH-Commit: [2/2] 346dee1e13bfe1c074e4c6a4417091711d852f9c (thuth/qemu-kvm-cs9) +RH-Bugzilla: 2111994 +RH-Acked-by: Cornelia Huck +RH-Acked-by: David Hildenbrand +RH-Acked-by: Claudio Imbrenda + +Storage key controlled protection is currently not honored when +emulating instructions. +If available, enable key protection for the MEM_OP ioctl, thereby +enabling it for the s390_cpu_virt_mem_* functions, when using kvm. +As a result, the emulation of the following instructions honors storage +keys: + +* CLP + The Synch I/O CLP command would need special handling in order + to support storage keys, but is currently not supported. +* CHSC + Performing commands asynchronously would require special + handling, but commands are currently always synchronous. +* STSI +* TSCH + Must (and does) not change channel if terminated due to + protection. +* MSCH + Suppressed on protection, works because fetching instruction. +* SSCH + Suppressed on protection, works because fetching instruction. +* STSCH +* STCRW + Suppressed on protection, this works because no partial store is + possible, because the operand cannot span multiple pages. +* PCISTB +* MPCIFC +* STPCIFC + +Signed-off-by: Janis Schoetterl-Glausch +Message-Id: <20220506153956.2217601-3-scgl@linux.ibm.com> +Signed-off-by: Thomas Huth + +(cherry picked from commit 54354861d21b69ec0781f43e67b8d4f6edad7e3f) +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2111994 +Signed-off-by: Thomas Huth +--- + target/s390x/kvm/kvm.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c +index 74f089d87f..1f1d1a33b8 100644 +--- a/target/s390x/kvm/kvm.c ++++ b/target/s390x/kvm/kvm.c +@@ -152,12 +152,15 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + static int cap_sync_regs; + static int cap_async_pf; + static int cap_mem_op; ++static int cap_mem_op_extension; + static int cap_s390_irq; + static int cap_ri; + static int cap_hpage_1m; + static int cap_vcpu_resets; + static int cap_protected; + ++static bool mem_op_storage_key_support; ++ + static int active_cmma; + + static int kvm_s390_query_mem_limit(uint64_t *memory_limit) +@@ -355,6 +358,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s) + cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); + cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); + cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); ++ cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION); ++ mem_op_storage_key_support = cap_mem_op_extension > 0; + cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); + cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); + cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); +@@ -843,6 +848,7 @@ int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, + : KVM_S390_MEMOP_LOGICAL_READ, + .buf = (uint64_t)hostbuf, + .ar = ar, ++ .key = (cpu->env.psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY, + }; + int ret; + +@@ -852,6 +858,9 @@ int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, + if (!hostbuf) { + mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; + } ++ if (mem_op_storage_key_support) { ++ mem_op.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION; ++ } + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); + if (ret < 0) { +-- +2.31.1 + diff --git a/kvm-vdpa-Add-device-migration-blocker.patch b/kvm-vdpa-Add-device-migration-blocker.patch new file mode 100644 index 0000000..1b83c98 --- /dev/null +++ b/kvm-vdpa-Add-device-migration-blocker.patch @@ -0,0 +1,106 @@ +From 8e0fdce814af4cfc84dce5e5920da989b1f1a86d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 16:06:05 +0200 +Subject: [PATCH 26/32] vdpa: Add device migration blocker +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [26/27] 53d94d45b5e5e88f12b95f9b0f243696cfcbd7ce (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit c156d5bf2b142dcc06808ccee06882144f230aec +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:45 2022 +0200 + + vdpa: Add device migration blocker + + Since the vhost-vdpa device is exposing _F_LOG, adding a migration blocker if + it uses CVQ. + + However, qemu is able to migrate simple devices with no CVQ as long as + they use SVQ. To allow it, add a placeholder error to vhost_vdpa, and + only add to vhost_dev when used. vhost_dev machinery place the migration + blocker if needed. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-vdpa.c | 15 +++++++++++++++ + include/hw/virtio/vhost-vdpa.h | 1 + + 2 files changed, 16 insertions(+) + +diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c +index 49effe5462..e3e5bce4bb 100644 +--- a/hw/virtio/vhost-vdpa.c ++++ b/hw/virtio/vhost-vdpa.c +@@ -20,6 +20,7 @@ + #include "hw/virtio/vhost-shadow-virtqueue.h" + #include "hw/virtio/vhost-vdpa.h" + #include "exec/address-spaces.h" ++#include "migration/blocker.h" + #include "qemu/main-loop.h" + #include "cpu.h" + #include "trace.h" +@@ -1020,6 +1021,13 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) + return true; + } + ++ if (v->migration_blocker) { ++ int r = migrate_add_blocker(v->migration_blocker, &err); ++ if (unlikely(r < 0)) { ++ return false; ++ } ++ } ++ + for (i = 0; i < v->shadow_vqs->len; ++i) { + VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); +@@ -1062,6 +1070,10 @@ err: + vhost_svq_stop(svq); + } + ++ if (v->migration_blocker) { ++ migrate_del_blocker(v->migration_blocker); ++ } ++ + return false; + } + +@@ -1081,6 +1093,9 @@ static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev) + } + } + ++ if (v->migration_blocker) { ++ migrate_del_blocker(v->migration_blocker); ++ } + return true; + } + +diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h +index 1111d85643..d10a89303e 100644 +--- a/include/hw/virtio/vhost-vdpa.h ++++ b/include/hw/virtio/vhost-vdpa.h +@@ -35,6 +35,7 @@ typedef struct vhost_vdpa { + bool shadow_vqs_enabled; + /* IOVA mapping used by the Shadow Virtqueue */ + VhostIOVATree *iova_tree; ++ Error *migration_blocker; + GPtrArray *shadow_vqs; + const VhostShadowVirtqueueOps *shadow_vq_ops; + void *shadow_vq_ops_opaque; +-- +2.31.1 + diff --git a/kvm-vdpa-Add-x-svq-to-NetdevVhostVDPAOptions.patch b/kvm-vdpa-Add-x-svq-to-NetdevVhostVDPAOptions.patch new file mode 100644 index 0000000..8a7b600 --- /dev/null +++ b/kvm-vdpa-Add-x-svq-to-NetdevVhostVDPAOptions.patch @@ -0,0 +1,223 @@ +From 0b27781f9984c67625c49a516c3e38fbf5fa1b1b Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 16:06:16 +0200 +Subject: [PATCH 27/32] vdpa: Add x-svq to NetdevVhostVDPAOptions +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [27/27] bd85496c2a8c1ebf34f908fca2be2ab9852fd0e9 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 1576dbb5bbc49344c606e969ec749be70c0fd94e +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:46 2022 +0200 + + vdpa: Add x-svq to NetdevVhostVDPAOptions + + Finally offering the possibility to enable SVQ from the command line. + + Signed-off-by: Eugenio Pérez + Acked-by: Markus Armbruster + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + net/vhost-vdpa.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++-- + qapi/net.json | 9 +++++- + 2 files changed, 77 insertions(+), 4 deletions(-) + +diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c +index 8b76dac966..50672bcd66 100644 +--- a/net/vhost-vdpa.c ++++ b/net/vhost-vdpa.c +@@ -75,6 +75,28 @@ const int vdpa_feature_bits[] = { + VHOST_INVALID_FEATURE_BIT + }; + ++/** Supported device specific feature bits with SVQ */ ++static const uint64_t vdpa_svq_device_features = ++ BIT_ULL(VIRTIO_NET_F_CSUM) | ++ BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | ++ BIT_ULL(VIRTIO_NET_F_MTU) | ++ BIT_ULL(VIRTIO_NET_F_MAC) | ++ BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | ++ BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | ++ BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | ++ BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | ++ BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | ++ BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | ++ BIT_ULL(VIRTIO_NET_F_HOST_ECN) | ++ BIT_ULL(VIRTIO_NET_F_HOST_UFO) | ++ BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | ++ BIT_ULL(VIRTIO_NET_F_STATUS) | ++ BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | ++ BIT_ULL(VIRTIO_F_ANY_LAYOUT) | ++ BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | ++ BIT_ULL(VIRTIO_NET_F_RSC_EXT) | ++ BIT_ULL(VIRTIO_NET_F_STANDBY); ++ + VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) + { + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); +@@ -133,9 +155,13 @@ err_init: + static void vhost_vdpa_cleanup(NetClientState *nc) + { + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); ++ struct vhost_dev *dev = &s->vhost_net->dev; + + qemu_vfree(s->cvq_cmd_out_buffer); + qemu_vfree(s->cvq_cmd_in_buffer); ++ if (dev->vq_index + dev->nvqs == dev->vq_index_end) { ++ g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); ++ } + if (s->vhost_net) { + vhost_net_cleanup(s->vhost_net); + g_free(s->vhost_net); +@@ -437,7 +463,9 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, + int vdpa_device_fd, + int queue_pair_index, + int nvqs, +- bool is_datapath) ++ bool is_datapath, ++ bool svq, ++ VhostIOVATree *iova_tree) + { + NetClientState *nc = NULL; + VhostVDPAState *s; +@@ -455,6 +483,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, + + s->vhost_vdpa.device_fd = vdpa_device_fd; + s->vhost_vdpa.index = queue_pair_index; ++ s->vhost_vdpa.shadow_vqs_enabled = svq; ++ s->vhost_vdpa.iova_tree = iova_tree; + if (!is_datapath) { + s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size, + vhost_vdpa_net_cvq_cmd_page_len()); +@@ -465,6 +495,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, + + s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; + s->vhost_vdpa.shadow_vq_ops_opaque = s; ++ error_setg(&s->vhost_vdpa.migration_blocker, ++ "Migration disabled: vhost-vdpa uses CVQ."); + } + ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); + if (ret) { +@@ -474,6 +506,14 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, + return nc; + } + ++static int vhost_vdpa_get_iova_range(int fd, ++ struct vhost_vdpa_iova_range *iova_range) ++{ ++ int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); ++ ++ return ret < 0 ? -errno : 0; ++} ++ + static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) + { + int ret = ioctl(fd, VHOST_GET_FEATURES, features); +@@ -524,6 +564,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, + uint64_t features; + int vdpa_device_fd; + g_autofree NetClientState **ncs = NULL; ++ g_autoptr(VhostIOVATree) iova_tree = NULL; + NetClientState *nc; + int queue_pairs, r, i, has_cvq = 0; + +@@ -551,22 +592,45 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, + return queue_pairs; + } + ++ if (opts->x_svq) { ++ struct vhost_vdpa_iova_range iova_range; ++ ++ uint64_t invalid_dev_features = ++ features & ~vdpa_svq_device_features & ++ /* Transport are all accepted at this point */ ++ ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, ++ VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); ++ ++ if (invalid_dev_features) { ++ error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, ++ invalid_dev_features); ++ goto err_svq; ++ } ++ ++ vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); ++ iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last); ++ } ++ + ncs = g_malloc0(sizeof(*ncs) * queue_pairs); + + for (i = 0; i < queue_pairs; i++) { + ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, +- vdpa_device_fd, i, 2, true); ++ vdpa_device_fd, i, 2, true, opts->x_svq, ++ iova_tree); + if (!ncs[i]) + goto err; + } + + if (has_cvq) { + nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, +- vdpa_device_fd, i, 1, false); ++ vdpa_device_fd, i, 1, false, ++ opts->x_svq, iova_tree); + if (!nc) + goto err; + } + ++ /* iova_tree ownership belongs to last NetClientState */ ++ g_steal_pointer(&iova_tree); + return 0; + + err: +@@ -575,6 +639,8 @@ err: + qemu_del_net_client(ncs[i]); + } + } ++ ++err_svq: + qemu_close(vdpa_device_fd); + + return -1; +diff --git a/qapi/net.json b/qapi/net.json +index b92f3f5fb4..92848e4362 100644 +--- a/qapi/net.json ++++ b/qapi/net.json +@@ -445,12 +445,19 @@ + # @queues: number of queues to be created for multiqueue vhost-vdpa + # (default: 1) + # ++# @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1) ++# (default: false) ++# ++# Features: ++# @unstable: Member @x-svq is experimental. ++# + # Since: 5.1 + ## + { 'struct': 'NetdevVhostVDPAOptions', + 'data': { + '*vhostdev': 'str', +- '*queues': 'int' } } ++ '*queues': 'int', ++ '*x-svq': {'type': 'bool', 'features' : [ 'unstable'] } } } + + ## + # @NetClientDriver: +-- +2.31.1 + diff --git a/kvm-vdpa-Avoid-compiler-to-squash-reads-to-used-idx.patch b/kvm-vdpa-Avoid-compiler-to-squash-reads-to-used-idx.patch new file mode 100644 index 0000000..acd45e0 --- /dev/null +++ b/kvm-vdpa-Avoid-compiler-to-squash-reads-to-used-idx.patch @@ -0,0 +1,65 @@ +From df06ce560ddfefde98bef822ec2020382059921f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 10/32] vdpa: Avoid compiler to squash reads to used idx +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [10/27] b28789302d4f64749da26f413763f918161d9b70 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit c381abc37f0aba42ed2e3b41cdace8f8438829e4 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:29 2022 +0200 + + vdpa: Avoid compiler to squash reads to used idx + + In the next patch we will allow busypolling of this value. The compiler + have a running path where shadow_used_idx, last_used_idx, and vring used + idx are not modified within the same thread busypolling. + + This was not an issue before since we always cleared device event + notifier before checking it, and that could act as memory barrier. + However, the busypoll needs something similar to kernel READ_ONCE. + + Let's add it here, sepparated from the polling. + + Signed-off-by: Eugenio Pérez + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 3fbda1e3d4..9c46c3a8fa 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -327,11 +327,12 @@ static void vhost_handle_guest_kick_notifier(EventNotifier *n) + + static bool vhost_svq_more_used(VhostShadowVirtqueue *svq) + { ++ uint16_t *used_idx = &svq->vring.used->idx; + if (svq->last_used_idx != svq->shadow_used_idx) { + return true; + } + +- svq->shadow_used_idx = cpu_to_le16(svq->vring.used->idx); ++ svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx); + + return svq->last_used_idx != svq->shadow_used_idx; + } +-- +2.31.1 + diff --git a/kvm-vdpa-Buffer-CVQ-support-on-shadow-virtqueue.patch b/kvm-vdpa-Buffer-CVQ-support-on-shadow-virtqueue.patch new file mode 100644 index 0000000..243aec8 --- /dev/null +++ b/kvm-vdpa-Buffer-CVQ-support-on-shadow-virtqueue.patch @@ -0,0 +1,323 @@ +From 881945094c0e4d33614d40959bfc20e395f5a478 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 16:05:40 +0200 +Subject: [PATCH 24/32] vdpa: Buffer CVQ support on shadow virtqueue +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [24/27] 5486f80141a3ad968a32e782bdcdead32f417352 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 2df4dd31e194c94da7d28c02e92449f4a989fca9 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:43 2022 +0200 + + vdpa: Buffer CVQ support on shadow virtqueue + + Introduce the control virtqueue support for vDPA shadow virtqueue. This + is needed for advanced networking features like rx filtering. + + Virtio-net control VQ copies the descriptors to qemu's VA, so we avoid + TOCTOU with the guest's or device's memory every time there is a device + model change. Otherwise, the guest could change the memory content in + the time between qemu and the device read it. + + To demonstrate command handling, VIRTIO_NET_F_CTRL_MACADDR is + implemented. If the virtio-net driver changes MAC the virtio-net device + model will be updated with the new one, and a rx filtering change event + will be raised. + + More cvq commands could be added here straightforwardly but they have + not been tested. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + net/vhost-vdpa.c | 213 +++++++++++++++++++++++++++++++++++++++++++++-- + 1 file changed, 205 insertions(+), 8 deletions(-) + +diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c +index 2e3b6b10d8..df42822463 100644 +--- a/net/vhost-vdpa.c ++++ b/net/vhost-vdpa.c +@@ -33,6 +33,9 @@ typedef struct VhostVDPAState { + NetClientState nc; + struct vhost_vdpa vhost_vdpa; + VHostNetState *vhost_net; ++ ++ /* Control commands shadow buffers */ ++ void *cvq_cmd_out_buffer, *cvq_cmd_in_buffer; + bool started; + } VhostVDPAState; + +@@ -131,6 +134,8 @@ static void vhost_vdpa_cleanup(NetClientState *nc) + { + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + ++ qemu_vfree(s->cvq_cmd_out_buffer); ++ qemu_vfree(s->cvq_cmd_in_buffer); + if (s->vhost_net) { + vhost_net_cleanup(s->vhost_net); + g_free(s->vhost_net); +@@ -190,24 +195,191 @@ static NetClientInfo net_vhost_vdpa_info = { + .check_peer_type = vhost_vdpa_check_peer_type, + }; + ++static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) ++{ ++ VhostIOVATree *tree = v->iova_tree; ++ DMAMap needle = { ++ /* ++ * No need to specify size or to look for more translations since ++ * this contiguous chunk was allocated by us. ++ */ ++ .translated_addr = (hwaddr)(uintptr_t)addr, ++ }; ++ const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); ++ int r; ++ ++ if (unlikely(!map)) { ++ error_report("Cannot locate expected map"); ++ return; ++ } ++ ++ r = vhost_vdpa_dma_unmap(v, map->iova, map->size + 1); ++ if (unlikely(r != 0)) { ++ error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); ++ } ++ ++ vhost_iova_tree_remove(tree, map); ++} ++ ++static size_t vhost_vdpa_net_cvq_cmd_len(void) ++{ ++ /* ++ * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. ++ * In buffer is always 1 byte, so it should fit here ++ */ ++ return sizeof(struct virtio_net_ctrl_hdr) + ++ 2 * sizeof(struct virtio_net_ctrl_mac) + ++ MAC_TABLE_ENTRIES * ETH_ALEN; ++} ++ ++static size_t vhost_vdpa_net_cvq_cmd_page_len(void) ++{ ++ return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size); ++} ++ ++/** Copy and map a guest buffer. */ ++static bool vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, ++ const struct iovec *out_data, ++ size_t out_num, size_t data_len, void *buf, ++ size_t *written, bool write) ++{ ++ DMAMap map = {}; ++ int r; ++ ++ if (unlikely(!data_len)) { ++ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid legnth of %s buffer\n", ++ __func__, write ? "in" : "out"); ++ return false; ++ } ++ ++ *written = iov_to_buf(out_data, out_num, 0, buf, data_len); ++ map.translated_addr = (hwaddr)(uintptr_t)buf; ++ map.size = vhost_vdpa_net_cvq_cmd_page_len() - 1; ++ map.perm = write ? IOMMU_RW : IOMMU_RO, ++ r = vhost_iova_tree_map_alloc(v->iova_tree, &map); ++ if (unlikely(r != IOVA_OK)) { ++ error_report("Cannot map injected element"); ++ return false; ++ } ++ ++ r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf, ++ !write); ++ if (unlikely(r < 0)) { ++ goto dma_map_err; ++ } ++ ++ return true; ++ ++dma_map_err: ++ vhost_iova_tree_remove(v->iova_tree, &map); ++ return false; ++} ++ + /** +- * Forward buffer for the moment. ++ * Copy the guest element into a dedicated buffer suitable to be sent to NIC ++ * ++ * @iov: [0] is the out buffer, [1] is the in one ++ */ ++static bool vhost_vdpa_net_cvq_map_elem(VhostVDPAState *s, ++ VirtQueueElement *elem, ++ struct iovec *iov) ++{ ++ size_t in_copied; ++ bool ok; ++ ++ iov[0].iov_base = s->cvq_cmd_out_buffer; ++ ok = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, elem->out_sg, elem->out_num, ++ vhost_vdpa_net_cvq_cmd_len(), iov[0].iov_base, ++ &iov[0].iov_len, false); ++ if (unlikely(!ok)) { ++ return false; ++ } ++ ++ iov[1].iov_base = s->cvq_cmd_in_buffer; ++ ok = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, NULL, 0, ++ sizeof(virtio_net_ctrl_ack), iov[1].iov_base, ++ &in_copied, true); ++ if (unlikely(!ok)) { ++ vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); ++ return false; ++ } ++ ++ iov[1].iov_len = sizeof(virtio_net_ctrl_ack); ++ return true; ++} ++ ++/** ++ * Do not forward commands not supported by SVQ. Otherwise, the device could ++ * accept it and qemu would not know how to update the device model. ++ */ ++static bool vhost_vdpa_net_cvq_validate_cmd(const struct iovec *out, ++ size_t out_num) ++{ ++ struct virtio_net_ctrl_hdr ctrl; ++ size_t n; ++ ++ n = iov_to_buf(out, out_num, 0, &ctrl, sizeof(ctrl)); ++ if (unlikely(n < sizeof(ctrl))) { ++ qemu_log_mask(LOG_GUEST_ERROR, ++ "%s: invalid legnth of out buffer %zu\n", __func__, n); ++ return false; ++ } ++ ++ switch (ctrl.class) { ++ case VIRTIO_NET_CTRL_MAC: ++ switch (ctrl.cmd) { ++ case VIRTIO_NET_CTRL_MAC_ADDR_SET: ++ return true; ++ default: ++ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid mac cmd %u\n", ++ __func__, ctrl.cmd); ++ }; ++ break; ++ default: ++ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid control class %u\n", ++ __func__, ctrl.class); ++ }; ++ ++ return false; ++} ++ ++/** ++ * Validate and copy control virtqueue commands. ++ * ++ * Following QEMU guidelines, we offer a copy of the buffers to the device to ++ * prevent TOCTOU bugs. + */ + static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, + VirtQueueElement *elem, + void *opaque) + { +- unsigned int n = elem->out_num + elem->in_num; +- g_autofree struct iovec *dev_buffers = g_new(struct iovec, n); ++ VhostVDPAState *s = opaque; + size_t in_len, dev_written; + virtio_net_ctrl_ack status = VIRTIO_NET_ERR; +- int r; ++ /* out and in buffers sent to the device */ ++ struct iovec dev_buffers[2] = { ++ { .iov_base = s->cvq_cmd_out_buffer }, ++ { .iov_base = s->cvq_cmd_in_buffer }, ++ }; ++ /* in buffer used for device model */ ++ const struct iovec in = { ++ .iov_base = &status, ++ .iov_len = sizeof(status), ++ }; ++ int r = -EINVAL; ++ bool ok; ++ ++ ok = vhost_vdpa_net_cvq_map_elem(s, elem, dev_buffers); ++ if (unlikely(!ok)) { ++ goto out; ++ } + +- memcpy(dev_buffers, elem->out_sg, elem->out_num); +- memcpy(dev_buffers + elem->out_num, elem->in_sg, elem->in_num); ++ ok = vhost_vdpa_net_cvq_validate_cmd(&dev_buffers[0], 1); ++ if (unlikely(!ok)) { ++ goto out; ++ } + +- r = vhost_svq_add(svq, &dev_buffers[0], elem->out_num, &dev_buffers[1], +- elem->in_num, elem); ++ r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, elem); + if (unlikely(r != 0)) { + if (unlikely(r == -ENOSPC)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", +@@ -224,6 +396,18 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, + dev_written = vhost_svq_poll(svq); + if (unlikely(dev_written < sizeof(status))) { + error_report("Insufficient written data (%zu)", dev_written); ++ goto out; ++ } ++ ++ memcpy(&status, dev_buffers[1].iov_base, sizeof(status)); ++ if (status != VIRTIO_NET_OK) { ++ goto out; ++ } ++ ++ status = VIRTIO_NET_ERR; ++ virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1); ++ if (status != VIRTIO_NET_OK) { ++ error_report("Bad CVQ processing in model"); + } + + out: +@@ -234,6 +418,12 @@ out: + } + vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); + g_free(elem); ++ if (dev_buffers[0].iov_base) { ++ vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, dev_buffers[0].iov_base); ++ } ++ if (dev_buffers[1].iov_base) { ++ vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, dev_buffers[1].iov_base); ++ } + return r; + } + +@@ -266,6 +456,13 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, + s->vhost_vdpa.device_fd = vdpa_device_fd; + s->vhost_vdpa.index = queue_pair_index; + if (!is_datapath) { ++ s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size, ++ vhost_vdpa_net_cvq_cmd_page_len()); ++ memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len()); ++ s->cvq_cmd_in_buffer = qemu_memalign(qemu_real_host_page_size, ++ vhost_vdpa_net_cvq_cmd_page_len()); ++ memset(s->cvq_cmd_in_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len()); ++ + s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; + s->vhost_vdpa.shadow_vq_ops_opaque = s; + } +-- +2.31.1 + diff --git a/kvm-vdpa-Export-vhost_vdpa_dma_map-and-unmap-calls.patch b/kvm-vdpa-Export-vhost_vdpa_dma_map-and-unmap-calls.patch new file mode 100644 index 0000000..d6e72ac --- /dev/null +++ b/kvm-vdpa-Export-vhost_vdpa_dma_map-and-unmap-calls.patch @@ -0,0 +1,84 @@ +From 3a5d325fcb2958318262efac31d5fd25fb062523 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 21/32] vdpa: Export vhost_vdpa_dma_map and unmap calls +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [21/27] 97e7a583bbd3c12a0786d53132812ec41702c190 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 463ba1e3b8cf080812895c5f26d95d8d7db2e692 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:40 2022 +0200 + + vdpa: Export vhost_vdpa_dma_map and unmap calls + + Shadow CVQ will copy buffers on qemu VA, so we avoid TOCTOU attacks from + the guest that could set a different state in qemu device model and vdpa + device. + + To do so, it needs to be able to map these new buffers to the device. + + Signed-off-by: Eugenio Pérez + Acked-by: Jason Wang + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-vdpa.c | 7 +++---- + include/hw/virtio/vhost-vdpa.h | 4 ++++ + 2 files changed, 7 insertions(+), 4 deletions(-) + +diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c +index 28df57b12e..14b02fe079 100644 +--- a/hw/virtio/vhost-vdpa.c ++++ b/hw/virtio/vhost-vdpa.c +@@ -71,8 +71,8 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, + return false; + } + +-static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, +- void *vaddr, bool readonly) ++int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, ++ void *vaddr, bool readonly) + { + struct vhost_msg_v2 msg = {}; + int fd = v->device_fd; +@@ -97,8 +97,7 @@ static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, + return ret; + } + +-static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, +- hwaddr size) ++int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size) + { + struct vhost_msg_v2 msg = {}; + int fd = v->device_fd; +diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h +index a29dbb3f53..7214eb47dc 100644 +--- a/include/hw/virtio/vhost-vdpa.h ++++ b/include/hw/virtio/vhost-vdpa.h +@@ -39,4 +39,8 @@ typedef struct vhost_vdpa { + VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; + } VhostVDPA; + ++int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, ++ void *vaddr, bool readonly); ++int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size); ++ + #endif +-- +2.31.1 + diff --git a/kvm-vdpa-Extract-get-features-part-from-vhost_vdpa_get_m.patch b/kvm-vdpa-Extract-get-features-part-from-vhost_vdpa_get_m.patch new file mode 100644 index 0000000..44e97af --- /dev/null +++ b/kvm-vdpa-Extract-get-features-part-from-vhost_vdpa_get_m.patch @@ -0,0 +1,108 @@ +From 9a290bd74f983f3a65aa9ec5df2da9aa94bfdecd Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 16:05:42 +0200 +Subject: [PATCH 25/32] vdpa: Extract get features part from + vhost_vdpa_get_max_queue_pairs +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [25/27] 654ad68e10a4df84cced923c64e72d500721ad67 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 8170ab3f43989680491d00f1017f60b25d346114 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:44 2022 +0200 + + vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs + + To know the device features is needed for CVQ SVQ, so SVQ knows if it + can handle all commands or not. Extract from + vhost_vdpa_get_max_queue_pairs so we can reuse it. + + Signed-off-by: Eugenio Pérez + Acked-by: Jason Wang + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + net/vhost-vdpa.c | 30 ++++++++++++++++++++---------- + 1 file changed, 20 insertions(+), 10 deletions(-) + +diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c +index df42822463..8b76dac966 100644 +--- a/net/vhost-vdpa.c ++++ b/net/vhost-vdpa.c +@@ -474,20 +474,24 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, + return nc; + } + +-static int vhost_vdpa_get_max_queue_pairs(int fd, int *has_cvq, Error **errp) ++static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) ++{ ++ int ret = ioctl(fd, VHOST_GET_FEATURES, features); ++ if (unlikely(ret < 0)) { ++ error_setg_errno(errp, errno, ++ "Fail to query features from vhost-vDPA device"); ++ } ++ return ret; ++} ++ ++static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, ++ int *has_cvq, Error **errp) + { + unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); + g_autofree struct vhost_vdpa_config *config = NULL; + __virtio16 *max_queue_pairs; +- uint64_t features; + int ret; + +- ret = ioctl(fd, VHOST_GET_FEATURES, &features); +- if (ret) { +- error_setg(errp, "Fail to query features from vhost-vDPA device"); +- return ret; +- } +- + if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { + *has_cvq = 1; + } else { +@@ -517,10 +521,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp) + { + const NetdevVhostVDPAOptions *opts; ++ uint64_t features; + int vdpa_device_fd; + g_autofree NetClientState **ncs = NULL; + NetClientState *nc; +- int queue_pairs, i, has_cvq = 0; ++ int queue_pairs, r, i, has_cvq = 0; + + assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); + opts = &netdev->u.vhost_vdpa; +@@ -534,7 +539,12 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, + return -errno; + } + +- queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, ++ r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); ++ if (unlikely(r < 0)) { ++ return r; ++ } ++ ++ queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, + &has_cvq, errp); + if (queue_pairs < 0) { + qemu_close(vdpa_device_fd); +-- +2.31.1 + diff --git a/kvm-vdpa-manual-forward-CVQ-buffers.patch b/kvm-vdpa-manual-forward-CVQ-buffers.patch new file mode 100644 index 0000000..61909ff --- /dev/null +++ b/kvm-vdpa-manual-forward-CVQ-buffers.patch @@ -0,0 +1,166 @@ +From c33bc0b7f2b5cfa330a6d89d60ee94de129c65c1 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 16:05:38 +0200 +Subject: [PATCH 23/32] vdpa: manual forward CVQ buffers +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [23/27] ce128d5152be7eebf87e186eb8b58c2ed95aff6d (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit bd907ae4b00ebedad5e586af05ea3d6490318d45 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:42 2022 +0200 + + vdpa: manual forward CVQ buffers + + Do a simple forwarding of CVQ buffers, the same work SVQ could do but + through callbacks. No functional change intended. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-vdpa.c | 3 +- + include/hw/virtio/vhost-vdpa.h | 3 ++ + net/vhost-vdpa.c | 58 ++++++++++++++++++++++++++++++++++ + 3 files changed, 63 insertions(+), 1 deletion(-) + +diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c +index 14b02fe079..49effe5462 100644 +--- a/hw/virtio/vhost-vdpa.c ++++ b/hw/virtio/vhost-vdpa.c +@@ -417,7 +417,8 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v, + for (unsigned n = 0; n < hdev->nvqs; ++n) { + g_autoptr(VhostShadowVirtqueue) svq; + +- svq = vhost_svq_new(v->iova_tree, NULL, NULL); ++ svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops, ++ v->shadow_vq_ops_opaque); + if (unlikely(!svq)) { + error_setg(errp, "Cannot create svq %u", n); + return -1; +diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h +index 7214eb47dc..1111d85643 100644 +--- a/include/hw/virtio/vhost-vdpa.h ++++ b/include/hw/virtio/vhost-vdpa.h +@@ -15,6 +15,7 @@ + #include + + #include "hw/virtio/vhost-iova-tree.h" ++#include "hw/virtio/vhost-shadow-virtqueue.h" + #include "hw/virtio/virtio.h" + #include "standard-headers/linux/vhost_types.h" + +@@ -35,6 +36,8 @@ typedef struct vhost_vdpa { + /* IOVA mapping used by the Shadow Virtqueue */ + VhostIOVATree *iova_tree; + GPtrArray *shadow_vqs; ++ const VhostShadowVirtqueueOps *shadow_vq_ops; ++ void *shadow_vq_ops_opaque; + struct vhost_dev *dev; + VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; + } VhostVDPA; +diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c +index df1e69ee72..2e3b6b10d8 100644 +--- a/net/vhost-vdpa.c ++++ b/net/vhost-vdpa.c +@@ -11,11 +11,14 @@ + + #include "qemu/osdep.h" + #include "clients.h" ++#include "hw/virtio/virtio-net.h" + #include "net/vhost_net.h" + #include "net/vhost-vdpa.h" + #include "hw/virtio/vhost-vdpa.h" + #include "qemu/config-file.h" + #include "qemu/error-report.h" ++#include "qemu/log.h" ++#include "qemu/memalign.h" + #include "qemu/option.h" + #include "qapi/error.h" + #include +@@ -187,6 +190,57 @@ static NetClientInfo net_vhost_vdpa_info = { + .check_peer_type = vhost_vdpa_check_peer_type, + }; + ++/** ++ * Forward buffer for the moment. ++ */ ++static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, ++ VirtQueueElement *elem, ++ void *opaque) ++{ ++ unsigned int n = elem->out_num + elem->in_num; ++ g_autofree struct iovec *dev_buffers = g_new(struct iovec, n); ++ size_t in_len, dev_written; ++ virtio_net_ctrl_ack status = VIRTIO_NET_ERR; ++ int r; ++ ++ memcpy(dev_buffers, elem->out_sg, elem->out_num); ++ memcpy(dev_buffers + elem->out_num, elem->in_sg, elem->in_num); ++ ++ r = vhost_svq_add(svq, &dev_buffers[0], elem->out_num, &dev_buffers[1], ++ elem->in_num, elem); ++ if (unlikely(r != 0)) { ++ if (unlikely(r == -ENOSPC)) { ++ qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", ++ __func__); ++ } ++ goto out; ++ } ++ ++ /* ++ * We can poll here since we've had BQL from the time we sent the ++ * descriptor. Also, we need to take the answer before SVQ pulls by itself, ++ * when BQL is released ++ */ ++ dev_written = vhost_svq_poll(svq); ++ if (unlikely(dev_written < sizeof(status))) { ++ error_report("Insufficient written data (%zu)", dev_written); ++ } ++ ++out: ++ in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, ++ sizeof(status)); ++ if (unlikely(in_len < sizeof(status))) { ++ error_report("Bad device CVQ written length"); ++ } ++ vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); ++ g_free(elem); ++ return r; ++} ++ ++static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { ++ .avail_handler = vhost_vdpa_net_handle_ctrl_avail, ++}; ++ + static NetClientState *net_vhost_vdpa_init(NetClientState *peer, + const char *device, + const char *name, +@@ -211,6 +265,10 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, + + s->vhost_vdpa.device_fd = vdpa_device_fd; + s->vhost_vdpa.index = queue_pair_index; ++ if (!is_datapath) { ++ s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; ++ s->vhost_vdpa.shadow_vq_ops_opaque = s; ++ } + ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); + if (ret) { + qemu_del_net_client(nc); +-- +2.31.1 + diff --git a/kvm-vhost-Add-SVQDescState.patch b/kvm-vhost-Add-SVQDescState.patch new file mode 100644 index 0000000..b1ea4bb --- /dev/null +++ b/kvm-vhost-Add-SVQDescState.patch @@ -0,0 +1,135 @@ +From 14200f493243f73152ea4a4b97274f0ec4fb36fa Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 15/32] vhost: Add SVQDescState +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [15/27] 2e2866f22e37cace8598ff44dfcdc07fcc915d6d (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 9e87868fcaf5785c8e1490c290505fa32305ff91 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:34 2022 +0200 + + vhost: Add SVQDescState + + This will allow SVQ to add context to the different queue elements. + + This patch only store the actual element, no functional change intended. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 16 ++++++++-------- + hw/virtio/vhost-shadow-virtqueue.h | 8 ++++++-- + 2 files changed, 14 insertions(+), 10 deletions(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 3cec03d709..a08e3d4025 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -256,7 +256,7 @@ static int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + return -EINVAL; + } + +- svq->ring_id_maps[qemu_head] = elem; ++ svq->desc_state[qemu_head].elem = elem; + vhost_svq_kick(svq); + return 0; + } +@@ -411,21 +411,21 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, + return NULL; + } + +- if (unlikely(!svq->ring_id_maps[used_elem.id])) { ++ if (unlikely(!svq->desc_state[used_elem.id].elem)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Device %s says index %u is used, but it was not available", + svq->vdev->name, used_elem.id); + return NULL; + } + +- num = svq->ring_id_maps[used_elem.id]->in_num + +- svq->ring_id_maps[used_elem.id]->out_num; ++ num = svq->desc_state[used_elem.id].elem->in_num + ++ svq->desc_state[used_elem.id].elem->out_num; + last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); + svq->desc_next[last_used_chain] = svq->free_head; + svq->free_head = used_elem.id; + + *len = used_elem.len; +- return g_steal_pointer(&svq->ring_id_maps[used_elem.id]); ++ return g_steal_pointer(&svq->desc_state[used_elem.id].elem); + } + + static void vhost_svq_flush(VhostShadowVirtqueue *svq, +@@ -595,7 +595,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, + memset(svq->vring.desc, 0, driver_size); + svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size); + memset(svq->vring.used, 0, device_size); +- svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num); ++ svq->desc_state = g_new0(SVQDescState, svq->vring.num); + svq->desc_next = g_new0(uint16_t, svq->vring.num); + for (unsigned i = 0; i < svq->vring.num - 1; i++) { + svq->desc_next[i] = cpu_to_le16(i + 1); +@@ -620,7 +620,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) + + for (unsigned i = 0; i < svq->vring.num; ++i) { + g_autofree VirtQueueElement *elem = NULL; +- elem = g_steal_pointer(&svq->ring_id_maps[i]); ++ elem = g_steal_pointer(&svq->desc_state[i].elem); + if (elem) { + virtqueue_detach_element(svq->vq, elem, 0); + } +@@ -632,7 +632,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) + } + svq->vq = NULL; + g_free(svq->desc_next); +- g_free(svq->ring_id_maps); ++ g_free(svq->desc_state); + qemu_vfree(svq->vring.desc); + qemu_vfree(svq->vring.used); + } +diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h +index c132c994e9..d646c35054 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.h ++++ b/hw/virtio/vhost-shadow-virtqueue.h +@@ -15,6 +15,10 @@ + #include "standard-headers/linux/vhost_types.h" + #include "hw/virtio/vhost-iova-tree.h" + ++typedef struct SVQDescState { ++ VirtQueueElement *elem; ++} SVQDescState; ++ + /* Shadow virtqueue to relay notifications */ + typedef struct VhostShadowVirtqueue { + /* Shadow vring */ +@@ -47,8 +51,8 @@ typedef struct VhostShadowVirtqueue { + /* IOVA mapping */ + VhostIOVATree *iova_tree; + +- /* Map for use the guest's descriptors */ +- VirtQueueElement **ring_id_maps; ++ /* SVQ vring descriptors state */ ++ SVQDescState *desc_state; + + /* Next VirtQueue element that guest made available */ + VirtQueueElement *next_guest_avail_elem; +-- +2.31.1 + diff --git a/kvm-vhost-Add-svq-avail_handler-callback.patch b/kvm-vhost-Add-svq-avail_handler-callback.patch new file mode 100644 index 0000000..a8b585d --- /dev/null +++ b/kvm-vhost-Add-svq-avail_handler-callback.patch @@ -0,0 +1,164 @@ +From 433106c286a1961737300ebaece6f10b2747e7d8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 20/32] vhost: Add svq avail_handler callback +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [20/27] d228eb89d204f8be623bc870503bbf0078dfc9ae (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit e966c0b781aebabd2c0f5eef91678f08ce1d068c +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:39 2022 +0200 + + vhost: Add svq avail_handler callback + + This allows external handlers to be aware of new buffers that the guest + places in the virtqueue. + + When this callback is defined the ownership of the guest's virtqueue + element is transferred to the callback. This means that if the user + wants to forward the descriptor it needs to manually inject it. The + callback is also free to process the command by itself and use the + element with svq_push. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 14 ++++++++++++-- + hw/virtio/vhost-shadow-virtqueue.h | 31 +++++++++++++++++++++++++++++- + hw/virtio/vhost-vdpa.c | 3 ++- + 3 files changed, 44 insertions(+), 4 deletions(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 95d0d7a7ee..e53aac45f6 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -306,7 +306,11 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) + break; + } + +- r = vhost_svq_add_element(svq, elem); ++ if (svq->ops) { ++ r = svq->ops->avail_handler(svq, elem, svq->ops_opaque); ++ } else { ++ r = vhost_svq_add_element(svq, elem); ++ } + if (unlikely(r != 0)) { + if (r == -ENOSPC) { + /* +@@ -685,12 +689,16 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) + * shadow methods and file descriptors. + * + * @iova_tree: Tree to perform descriptors translations ++ * @ops: SVQ owner callbacks ++ * @ops_opaque: ops opaque pointer + * + * Returns the new virtqueue or NULL. + * + * In case of error, reason is reported through error_report. + */ +-VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree) ++VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree, ++ const VhostShadowVirtqueueOps *ops, ++ void *ops_opaque) + { + g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1); + int r; +@@ -712,6 +720,8 @@ VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree) + event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND); + event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call); + svq->iova_tree = iova_tree; ++ svq->ops = ops; ++ svq->ops_opaque = ops_opaque; + return g_steal_pointer(&svq); + + err_init_hdev_call: +diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h +index cf442f7dea..d04c34a589 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.h ++++ b/hw/virtio/vhost-shadow-virtqueue.h +@@ -25,6 +25,27 @@ typedef struct SVQDescState { + unsigned int ndescs; + } SVQDescState; + ++typedef struct VhostShadowVirtqueue VhostShadowVirtqueue; ++ ++/** ++ * Callback to handle an avail buffer. ++ * ++ * @svq: Shadow virtqueue ++ * @elem: Element placed in the queue by the guest ++ * @vq_callback_opaque: Opaque ++ * ++ * Returns 0 if the vq is running as expected. ++ * ++ * Note that ownership of elem is transferred to the callback. ++ */ ++typedef int (*VirtQueueAvailCallback)(VhostShadowVirtqueue *svq, ++ VirtQueueElement *elem, ++ void *vq_callback_opaque); ++ ++typedef struct VhostShadowVirtqueueOps { ++ VirtQueueAvailCallback avail_handler; ++} VhostShadowVirtqueueOps; ++ + /* Shadow virtqueue to relay notifications */ + typedef struct VhostShadowVirtqueue { + /* Shadow vring */ +@@ -69,6 +90,12 @@ typedef struct VhostShadowVirtqueue { + */ + uint16_t *desc_next; + ++ /* Caller callbacks */ ++ const VhostShadowVirtqueueOps *ops; ++ ++ /* Caller callbacks opaque */ ++ void *ops_opaque; ++ + /* Next head to expose to the device */ + uint16_t shadow_avail_idx; + +@@ -102,7 +129,9 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, + VirtQueue *vq); + void vhost_svq_stop(VhostShadowVirtqueue *svq); + +-VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree); ++VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree, ++ const VhostShadowVirtqueueOps *ops, ++ void *ops_opaque); + + void vhost_svq_free(gpointer vq); + G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free); +diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c +index 33dcaa135e..28df57b12e 100644 +--- a/hw/virtio/vhost-vdpa.c ++++ b/hw/virtio/vhost-vdpa.c +@@ -416,8 +416,9 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v, + + shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); + for (unsigned n = 0; n < hdev->nvqs; ++n) { +- g_autoptr(VhostShadowVirtqueue) svq = vhost_svq_new(v->iova_tree); ++ g_autoptr(VhostShadowVirtqueue) svq; + ++ svq = vhost_svq_new(v->iova_tree, NULL, NULL); + if (unlikely(!svq)) { + error_setg(errp, "Cannot create svq %u", n); + return -1; +-- +2.31.1 + diff --git a/kvm-vhost-Check-for-queue-full-at-vhost_svq_add.patch b/kvm-vhost-Check-for-queue-full-at-vhost_svq_add.patch new file mode 100644 index 0000000..9b09d42 --- /dev/null +++ b/kvm-vhost-Check-for-queue-full-at-vhost_svq_add.patch @@ -0,0 +1,134 @@ +From 893dffb820973361bcef33612a6b924554a856c1 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 13/32] vhost: Check for queue full at vhost_svq_add +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [13/27] d4bd8299fb7733a1e190618dfc92b4b53b7bbeb3 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit f20b70eb5a68cfd8fef74a13ccdd494ef1cb0221 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:32 2022 +0200 + + vhost: Check for queue full at vhost_svq_add + + The series need to expose vhost_svq_add with full functionality, + including checking for full queue. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 59 +++++++++++++++++------------- + 1 file changed, 33 insertions(+), 26 deletions(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index e3fc3c2658..1d2bab287b 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -233,21 +233,29 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq) + * Add an element to a SVQ. + * + * The caller must check that there is enough slots for the new element. It +- * takes ownership of the element: In case of failure, it is free and the SVQ +- * is considered broken. ++ * takes ownership of the element: In case of failure not ENOSPC, it is free. ++ * ++ * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full + */ +-static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) ++static int vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) + { + unsigned qemu_head; +- bool ok = vhost_svq_add_split(svq, elem, &qemu_head); ++ unsigned ndescs = elem->in_num + elem->out_num; ++ bool ok; ++ ++ if (unlikely(ndescs > vhost_svq_available_slots(svq))) { ++ return -ENOSPC; ++ } ++ ++ ok = vhost_svq_add_split(svq, elem, &qemu_head); + if (unlikely(!ok)) { + g_free(elem); +- return false; ++ return -EINVAL; + } + + svq->ring_id_maps[qemu_head] = elem; + vhost_svq_kick(svq); +- return true; ++ return 0; + } + + /** +@@ -274,7 +282,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) + + while (true) { + VirtQueueElement *elem; +- bool ok; ++ int r; + + if (svq->next_guest_avail_elem) { + elem = g_steal_pointer(&svq->next_guest_avail_elem); +@@ -286,25 +294,24 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) + break; + } + +- if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) { +- /* +- * This condition is possible since a contiguous buffer in GPA +- * does not imply a contiguous buffer in qemu's VA +- * scatter-gather segments. If that happens, the buffer exposed +- * to the device needs to be a chain of descriptors at this +- * moment. +- * +- * SVQ cannot hold more available buffers if we are here: +- * queue the current guest descriptor and ignore further kicks +- * until some elements are used. +- */ +- svq->next_guest_avail_elem = elem; +- return; +- } +- +- ok = vhost_svq_add(svq, elem); +- if (unlikely(!ok)) { +- /* VQ is broken, just return and ignore any other kicks */ ++ r = vhost_svq_add(svq, elem); ++ if (unlikely(r != 0)) { ++ if (r == -ENOSPC) { ++ /* ++ * This condition is possible since a contiguous buffer in ++ * GPA does not imply a contiguous buffer in qemu's VA ++ * scatter-gather segments. If that happens, the buffer ++ * exposed to the device needs to be a chain of descriptors ++ * at this moment. ++ * ++ * SVQ cannot hold more available buffers if we are here: ++ * queue the current guest descriptor and ignore kicks ++ * until some elements are used. ++ */ ++ svq->next_guest_avail_elem = elem; ++ } ++ ++ /* VQ is full or broken, just return and ignore kicks */ + return; + } + } +-- +2.31.1 + diff --git a/kvm-vhost-Decouple-vhost_svq_add-from-VirtQueueElement.patch b/kvm-vhost-Decouple-vhost_svq_add-from-VirtQueueElement.patch new file mode 100644 index 0000000..6755aad --- /dev/null +++ b/kvm-vhost-Decouple-vhost_svq_add-from-VirtQueueElement.patch @@ -0,0 +1,138 @@ +From 5c8de23e185a1a1f0b19eac3c9fa03411c9f545c Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 14/32] vhost: Decouple vhost_svq_add from VirtQueueElement +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [14/27] 463087dd316adc91b9c7a4e6634c6fc1745c1849 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 1f46ae65d85f677b660bda46685dd3e94885a7cb +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:33 2022 +0200 + + vhost: Decouple vhost_svq_add from VirtQueueElement + + VirtQueueElement comes from the guest, but we're heading SVQ to be able + to modify the element presented to the device without the guest's + knowledge. + + To do so, make SVQ accept sg buffers directly, instead of using + VirtQueueElement. + + Add vhost_svq_add_element to maintain element convenience. + + Signed-off-by: Eugenio Pérez + Acked-by: Jason Wang + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 33 ++++++++++++++++++++---------- + 1 file changed, 22 insertions(+), 11 deletions(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 1d2bab287b..3cec03d709 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -172,30 +172,31 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, + } + + static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, +- VirtQueueElement *elem, unsigned *head) ++ const struct iovec *out_sg, size_t out_num, ++ const struct iovec *in_sg, size_t in_num, ++ unsigned *head) + { + unsigned avail_idx; + vring_avail_t *avail = svq->vring.avail; + bool ok; +- g_autofree hwaddr *sgs = g_new(hwaddr, MAX(elem->out_num, elem->in_num)); ++ g_autofree hwaddr *sgs = g_new(hwaddr, MAX(out_num, in_num)); + + *head = svq->free_head; + + /* We need some descriptors here */ +- if (unlikely(!elem->out_num && !elem->in_num)) { ++ if (unlikely(!out_num && !in_num)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Guest provided element with no descriptors"); + return false; + } + +- ok = vhost_svq_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num, +- elem->in_num > 0, false); ++ ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0, ++ false); + if (unlikely(!ok)) { + return false; + } + +- ok = vhost_svq_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, +- true); ++ ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true); + if (unlikely(!ok)) { + return false; + } +@@ -237,17 +238,19 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq) + * + * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full + */ +-static int vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) ++static int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, ++ size_t out_num, const struct iovec *in_sg, ++ size_t in_num, VirtQueueElement *elem) + { + unsigned qemu_head; +- unsigned ndescs = elem->in_num + elem->out_num; ++ unsigned ndescs = in_num + out_num; + bool ok; + + if (unlikely(ndescs > vhost_svq_available_slots(svq))) { + return -ENOSPC; + } + +- ok = vhost_svq_add_split(svq, elem, &qemu_head); ++ ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head); + if (unlikely(!ok)) { + g_free(elem); + return -EINVAL; +@@ -258,6 +261,14 @@ static int vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) + return 0; + } + ++/* Convenience wrapper to add a guest's element to SVQ */ ++static int vhost_svq_add_element(VhostShadowVirtqueue *svq, ++ VirtQueueElement *elem) ++{ ++ return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg, ++ elem->in_num, elem); ++} ++ + /** + * Forward available buffers. + * +@@ -294,7 +305,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) + break; + } + +- r = vhost_svq_add(svq, elem); ++ r = vhost_svq_add_element(svq, elem); + if (unlikely(r != 0)) { + if (r == -ENOSPC) { + /* +-- +2.31.1 + diff --git a/kvm-vhost-Expose-vhost_svq_add.patch b/kvm-vhost-Expose-vhost_svq_add.patch new file mode 100644 index 0000000..70dc774 --- /dev/null +++ b/kvm-vhost-Expose-vhost_svq_add.patch @@ -0,0 +1,73 @@ +From cefd6583a8483c7a80f9cde8f7ad4705983af9e7 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 18/32] vhost: Expose vhost_svq_add +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [18/27] bfb44f597d350336113783bcc9b3c9d9d32ff8c0 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit d0291f3f284d3bc220cdb13b0d8ac8a44eb5fd4c +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:37 2022 +0200 + + vhost: Expose vhost_svq_add + + This allows external parts of SVQ to forward custom buffers to the + device. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 6 +++--- + hw/virtio/vhost-shadow-virtqueue.h | 3 +++ + 2 files changed, 6 insertions(+), 3 deletions(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 1ce52d5b4a..cb879e7b88 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -238,9 +238,9 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq) + * + * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full + */ +-static int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, +- size_t out_num, const struct iovec *in_sg, +- size_t in_num, VirtQueueElement *elem) ++int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, ++ size_t out_num, const struct iovec *in_sg, size_t in_num, ++ VirtQueueElement *elem) + { + unsigned qemu_head; + unsigned ndescs = in_num + out_num; +diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h +index d9fc1f1799..dd78f4bec2 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.h ++++ b/hw/virtio/vhost-shadow-virtqueue.h +@@ -86,6 +86,9 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp); + + void vhost_svq_push_elem(VhostShadowVirtqueue *svq, + const VirtQueueElement *elem, uint32_t len); ++int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, ++ size_t out_num, const struct iovec *in_sg, size_t in_num, ++ VirtQueueElement *elem); + + void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); + void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); +-- +2.31.1 + diff --git a/kvm-vhost-Fix-device-s-used-descriptor-dequeue.patch b/kvm-vhost-Fix-device-s-used-descriptor-dequeue.patch new file mode 100644 index 0000000..f149c05 --- /dev/null +++ b/kvm-vhost-Fix-device-s-used-descriptor-dequeue.patch @@ -0,0 +1,83 @@ +From 793d6d56190397624efdcaf6e0112bd12e39c05d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:25:01 +0200 +Subject: [PATCH 02/32] vhost: Fix device's used descriptor dequeue +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [2/27] b92803a0681c94c65d243dd07424522387594760 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 81abfa5724c9a6502d7a1d3a67c55f2a303a1170 +Author: Eugenio Pérez +Date: Thu May 12 19:57:43 2022 +0200 + + vhost: Fix device's used descriptor dequeue + + Only the first one of them were properly enqueued back. + + Fixes: 100890f7ca ("vhost: Shadow virtqueue buffers forwarding") + + Signed-off-by: Eugenio Pérez + Message-Id: <20220512175747.142058-3-eperezma@redhat.com> + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Michael S. Tsirkin + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 17 +++++++++++++++-- + 1 file changed, 15 insertions(+), 2 deletions(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 3155801f50..31fc50907d 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -334,12 +334,22 @@ static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq) + svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT); + } + ++static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq, ++ uint16_t num, uint16_t i) ++{ ++ for (uint16_t j = 0; j < (num - 1); ++j) { ++ i = le16_to_cpu(svq->desc_next[i]); ++ } ++ ++ return i; ++} ++ + static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, + uint32_t *len) + { + const vring_used_t *used = svq->vring.used; + vring_used_elem_t used_elem; +- uint16_t last_used; ++ uint16_t last_used, last_used_chain, num; + + if (!vhost_svq_more_used(svq)) { + return NULL; +@@ -365,7 +375,10 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, + return NULL; + } + +- svq->desc_next[used_elem.id] = svq->free_head; ++ num = svq->ring_id_maps[used_elem.id]->in_num + ++ svq->ring_id_maps[used_elem.id]->out_num; ++ last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); ++ svq->desc_next[last_used_chain] = svq->free_head; + svq->free_head = used_elem.id; + + *len = used_elem.len; +-- +2.31.1 + diff --git a/kvm-vhost-Fix-element-in-vhost_svq_add-failure.patch b/kvm-vhost-Fix-element-in-vhost_svq_add-failure.patch new file mode 100644 index 0000000..51eb700 --- /dev/null +++ b/kvm-vhost-Fix-element-in-vhost_svq_add-failure.patch @@ -0,0 +1,68 @@ +From aa99cf129923e0203c0caeb3b4e94a0eb973746f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:36:38 +0200 +Subject: [PATCH 04/32] vhost: Fix element in vhost_svq_add failure +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [4/27] 96689c99a47dd49591c0d126cb1fbb975b2f79b4 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 5181db132b587754dda3a520eec923b87a65bbb7 +Author: Eugenio Pérez +Date: Thu May 12 19:57:47 2022 +0200 + + vhost: Fix element in vhost_svq_add failure + + Coverity rightly reports that is not free in that case. + + Fixes: Coverity CID 1487559 + Fixes: 100890f7ca ("vhost: Shadow virtqueue buffers forwarding") + + Signed-off-by: Eugenio Pérez + Message-Id: <20220512175747.142058-7-eperezma@redhat.com> + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Michael S. Tsirkin + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 31fc50907d..06d0bb39d9 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -199,11 +199,19 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, + return true; + } + ++/** ++ * Add an element to a SVQ. ++ * ++ * The caller must check that there is enough slots for the new element. It ++ * takes ownership of the element: In case of failure, it is free and the SVQ ++ * is considered broken. ++ */ + static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) + { + unsigned qemu_head; + bool ok = vhost_svq_add_split(svq, elem, &qemu_head); + if (unlikely(!ok)) { ++ g_free(elem); + return false; + } + +-- +2.31.1 + diff --git a/kvm-vhost-Move-vhost_svq_kick-call-to-vhost_svq_add.patch b/kvm-vhost-Move-vhost_svq_kick-call-to-vhost_svq_add.patch new file mode 100644 index 0000000..513d7b4 --- /dev/null +++ b/kvm-vhost-Move-vhost_svq_kick-call-to-vhost_svq_add.patch @@ -0,0 +1,61 @@ +From 3a944d8cd3d35b2398ff68d9ed8ea51d27dfab3c Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 12/32] vhost: Move vhost_svq_kick call to vhost_svq_add +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [12/27] 29a7e1fb4992c4beca1e9a3379bb4c8a0f567459 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 98b5adef8493a2bfad6655cfee84299e88bedbf7 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:31 2022 +0200 + + vhost: Move vhost_svq_kick call to vhost_svq_add + + The series needs to expose vhost_svq_add with full functionality, + including kick + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 05cd39d1eb..e3fc3c2658 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -246,6 +246,7 @@ static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) + } + + svq->ring_id_maps[qemu_head] = elem; ++ vhost_svq_kick(svq); + return true; + } + +@@ -306,7 +307,6 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) + /* VQ is broken, just return and ignore any other kicks */ + return; + } +- vhost_svq_kick(svq); + } + + virtio_queue_set_notification(svq->vq, true); +-- +2.31.1 + diff --git a/kvm-vhost-Reorder-vhost_svq_kick.patch b/kvm-vhost-Reorder-vhost_svq_kick.patch new file mode 100644 index 0000000..f61f3c3 --- /dev/null +++ b/kvm-vhost-Reorder-vhost_svq_kick.patch @@ -0,0 +1,88 @@ +From fdbf66e4c70de16ab36d70ea591322b1b24df591 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 11/32] vhost: Reorder vhost_svq_kick +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [11/27] 1d08b97eb3960a0f85f2dd48c3331b803f7ea205 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit d93a2405ca6efa9dc1c420cee5a34bd8242818d0 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:30 2022 +0200 + + vhost: Reorder vhost_svq_kick + + Future code needs to call it from vhost_svq_add. + + No functional change intended. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 28 ++++++++++++++-------------- + 1 file changed, 14 insertions(+), 14 deletions(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 9c46c3a8fa..05cd39d1eb 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -215,6 +215,20 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, + return true; + } + ++static void vhost_svq_kick(VhostShadowVirtqueue *svq) ++{ ++ /* ++ * We need to expose the available array entries before checking the used ++ * flags ++ */ ++ smp_mb(); ++ if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) { ++ return; ++ } ++ ++ event_notifier_set(&svq->hdev_kick); ++} ++ + /** + * Add an element to a SVQ. + * +@@ -235,20 +249,6 @@ static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) + return true; + } + +-static void vhost_svq_kick(VhostShadowVirtqueue *svq) +-{ +- /* +- * We need to expose the available array entries before checking the used +- * flags +- */ +- smp_mb(); +- if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) { +- return; +- } +- +- event_notifier_set(&svq->hdev_kick); +-} +- + /** + * Forward available buffers. + * +-- +2.31.1 + diff --git a/kvm-vhost-Track-descriptor-chain-in-private-at-SVQ.patch b/kvm-vhost-Track-descriptor-chain-in-private-at-SVQ.patch new file mode 100644 index 0000000..31bfccc --- /dev/null +++ b/kvm-vhost-Track-descriptor-chain-in-private-at-SVQ.patch @@ -0,0 +1,123 @@ +From 486647551223cc01f4dba87197030bbf4e674f0f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:24:48 +0200 +Subject: [PATCH 01/32] vhost: Track descriptor chain in private at SVQ +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [1/27] 26d16dc383e3064ac6e4288d5c52b39fee0ad204 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 495fe3a78749c39c0e772c4e1a55d6cb8a7e5292 +Author: Eugenio Pérez +Date: Thu May 12 19:57:42 2022 +0200 + + vhost: Track descriptor chain in private at SVQ + + The device could have access to modify them, and it definitely have + access when we implement packed vq. Harden SVQ maintaining a private + copy of the descriptor chain. Other fields like buffer addresses are + already maintained sepparatedly. + + Signed-off-by: Eugenio Pérez + Message-Id: <20220512175747.142058-2-eperezma@redhat.com> + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Michael S. Tsirkin + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 12 +++++++----- + hw/virtio/vhost-shadow-virtqueue.h | 6 ++++++ + 2 files changed, 13 insertions(+), 5 deletions(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index b232803d1b..3155801f50 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -138,6 +138,7 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, + for (n = 0; n < num; n++) { + if (more_descs || (n + 1 < num)) { + descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT); ++ descs[i].next = cpu_to_le16(svq->desc_next[i]); + } else { + descs[i].flags = flags; + } +@@ -145,10 +146,10 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, + descs[i].len = cpu_to_le32(iovec[n].iov_len); + + last = i; +- i = cpu_to_le16(descs[i].next); ++ i = cpu_to_le16(svq->desc_next[i]); + } + +- svq->free_head = le16_to_cpu(descs[last].next); ++ svq->free_head = le16_to_cpu(svq->desc_next[last]); + } + + static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, +@@ -336,7 +337,6 @@ static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq) + static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, + uint32_t *len) + { +- vring_desc_t *descs = svq->vring.desc; + const vring_used_t *used = svq->vring.used; + vring_used_elem_t used_elem; + uint16_t last_used; +@@ -365,7 +365,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, + return NULL; + } + +- descs[used_elem.id].next = svq->free_head; ++ svq->desc_next[used_elem.id] = svq->free_head; + svq->free_head = used_elem.id; + + *len = used_elem.len; +@@ -540,8 +540,9 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, + svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size); + memset(svq->vring.used, 0, device_size); + svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num); ++ svq->desc_next = g_new0(uint16_t, svq->vring.num); + for (unsigned i = 0; i < svq->vring.num - 1; i++) { +- svq->vring.desc[i].next = cpu_to_le16(i + 1); ++ svq->desc_next[i] = cpu_to_le16(i + 1); + } + } + +@@ -574,6 +575,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) + virtqueue_detach_element(svq->vq, next_avail_elem, 0); + } + svq->vq = NULL; ++ g_free(svq->desc_next); + g_free(svq->ring_id_maps); + qemu_vfree(svq->vring.desc); + qemu_vfree(svq->vring.used); +diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h +index e5e24c536d..c132c994e9 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.h ++++ b/hw/virtio/vhost-shadow-virtqueue.h +@@ -53,6 +53,12 @@ typedef struct VhostShadowVirtqueue { + /* Next VirtQueue element that guest made available */ + VirtQueueElement *next_guest_avail_elem; + ++ /* ++ * Backup next field for each descriptor so we can recover securely, not ++ * needing to trust the device access. ++ */ ++ uint16_t *desc_next; ++ + /* Next head to expose to the device */ + uint16_t shadow_avail_idx; + +-- +2.31.1 + diff --git a/kvm-vhost-Track-number-of-descs-in-SVQDescState.patch b/kvm-vhost-Track-number-of-descs-in-SVQDescState.patch new file mode 100644 index 0000000..6a2e147 --- /dev/null +++ b/kvm-vhost-Track-number-of-descs-in-SVQDescState.patch @@ -0,0 +1,81 @@ +From 24b8cf88f53f9fc7cb393c9cad908f759980bfee Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 16/32] vhost: Track number of descs in SVQDescState +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [16/27] 26f30cb6dd35c1eb1ddabe25113431bed3d744aa (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit ac4cfdc6f39c06732d27554523f9d5f8a53b4ffa +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:35 2022 +0200 + + vhost: Track number of descs in SVQDescState + + A guest's buffer continuos on GPA may need multiple descriptors on + qemu's VA, so SVQ should track its length sepparatedly. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 4 ++-- + hw/virtio/vhost-shadow-virtqueue.h | 6 ++++++ + 2 files changed, 8 insertions(+), 2 deletions(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index a08e3d4025..4d99075e73 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -257,6 +257,7 @@ static int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + } + + svq->desc_state[qemu_head].elem = elem; ++ svq->desc_state[qemu_head].ndescs = ndescs; + vhost_svq_kick(svq); + return 0; + } +@@ -418,8 +419,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, + return NULL; + } + +- num = svq->desc_state[used_elem.id].elem->in_num + +- svq->desc_state[used_elem.id].elem->out_num; ++ num = svq->desc_state[used_elem.id].ndescs; + last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); + svq->desc_next[last_used_chain] = svq->free_head; + svq->free_head = used_elem.id; +diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h +index d646c35054..5c7e7cbab6 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.h ++++ b/hw/virtio/vhost-shadow-virtqueue.h +@@ -17,6 +17,12 @@ + + typedef struct SVQDescState { + VirtQueueElement *elem; ++ ++ /* ++ * Number of descriptors exposed to the device. May or may not match ++ * guest's ++ */ ++ unsigned int ndescs; + } SVQDescState; + + /* Shadow virtqueue to relay notifications */ +-- +2.31.1 + diff --git a/kvm-vhost-add-vhost_svq_poll.patch b/kvm-vhost-add-vhost_svq_poll.patch new file mode 100644 index 0000000..fa27e5e --- /dev/null +++ b/kvm-vhost-add-vhost_svq_poll.patch @@ -0,0 +1,92 @@ +From 0ab3da1092362470d256b433c546bd365d34f930 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 19/32] vhost: add vhost_svq_poll +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [19/27] 6807bb0bb6e5183b46a03b12b4027c7d767e8555 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 3f44d13dda83d390cc9563e56e7d337e4f6223f4 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:38 2022 +0200 + + vhost: add vhost_svq_poll + + It allows the Shadow Control VirtQueue to wait for the device to use the + available buffers. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 27 +++++++++++++++++++++++++++ + hw/virtio/vhost-shadow-virtqueue.h | 1 + + 2 files changed, 28 insertions(+) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index cb879e7b88..95d0d7a7ee 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -485,6 +485,33 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, + } while (!vhost_svq_enable_notification(svq)); + } + ++/** ++ * Poll the SVQ for one device used buffer. ++ * ++ * This function race with main event loop SVQ polling, so extra ++ * synchronization is needed. ++ * ++ * Return the length written by the device. ++ */ ++size_t vhost_svq_poll(VhostShadowVirtqueue *svq) ++{ ++ int64_t start_us = g_get_monotonic_time(); ++ do { ++ uint32_t len; ++ VirtQueueElement *elem = vhost_svq_get_buf(svq, &len); ++ if (elem) { ++ return len; ++ } ++ ++ if (unlikely(g_get_monotonic_time() - start_us > 10e6)) { ++ return 0; ++ } ++ ++ /* Make sure we read new used_idx */ ++ smp_rmb(); ++ } while (true); ++} ++ + /** + * Forward used buffers. + * +diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h +index dd78f4bec2..cf442f7dea 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.h ++++ b/hw/virtio/vhost-shadow-virtqueue.h +@@ -89,6 +89,7 @@ void vhost_svq_push_elem(VhostShadowVirtqueue *svq, + int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + size_t out_num, const struct iovec *in_sg, size_t in_num, + VirtQueueElement *elem); ++size_t vhost_svq_poll(VhostShadowVirtqueue *svq); + + void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); + void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); +-- +2.31.1 + diff --git a/kvm-vhost-add-vhost_svq_push_elem.patch b/kvm-vhost-add-vhost_svq_push_elem.patch new file mode 100644 index 0000000..2a9ec40 --- /dev/null +++ b/kvm-vhost-add-vhost_svq_push_elem.patch @@ -0,0 +1,83 @@ +From a26eb02b3a49c5d1163685ba5b83b67138c09047 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 17/32] vhost: add vhost_svq_push_elem +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [17/27] d064b40a262f2dfdc9f648d250aa8c8020c40385 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 432efd144e990b6e040862de25f8f0b6a6eeb03d +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:36 2022 +0200 + + vhost: add vhost_svq_push_elem + + This function allows external SVQ users to return guest's available + buffers. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 16 ++++++++++++++++ + hw/virtio/vhost-shadow-virtqueue.h | 3 +++ + 2 files changed, 19 insertions(+) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 4d99075e73..1ce52d5b4a 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -428,6 +428,22 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, + return g_steal_pointer(&svq->desc_state[used_elem.id].elem); + } + ++/** ++ * Push an element to SVQ, returning it to the guest. ++ */ ++void vhost_svq_push_elem(VhostShadowVirtqueue *svq, ++ const VirtQueueElement *elem, uint32_t len) ++{ ++ virtqueue_push(svq->vq, elem, len); ++ if (svq->next_guest_avail_elem) { ++ /* ++ * Avail ring was full when vhost_svq_flush was called, so it's a ++ * good moment to make more descriptors available if possible. ++ */ ++ vhost_handle_guest_kick(svq); ++ } ++} ++ + static void vhost_svq_flush(VhostShadowVirtqueue *svq, + bool check_for_avail_queue) + { +diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h +index 5c7e7cbab6..d9fc1f1799 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.h ++++ b/hw/virtio/vhost-shadow-virtqueue.h +@@ -84,6 +84,9 @@ typedef struct VhostShadowVirtqueue { + + bool vhost_svq_valid_features(uint64_t features, Error **errp); + ++void vhost_svq_push_elem(VhostShadowVirtqueue *svq, ++ const VirtQueueElement *elem, uint32_t len); ++ + void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); + void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); + void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq, +-- +2.31.1 + diff --git a/kvm-vhost-move-descriptor-translation-to-vhost_svq_vring.patch b/kvm-vhost-move-descriptor-translation-to-vhost_svq_vring.patch new file mode 100644 index 0000000..08bcaf2 --- /dev/null +++ b/kvm-vhost-move-descriptor-translation-to-vhost_svq_vring.patch @@ -0,0 +1,120 @@ +From 2bdea90bfbce3b8d5bfa86178a942a470b85b835 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 07/32] vhost: move descriptor translation to + vhost_svq_vring_write_descs +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [7/27] 5533c72065e4ebf8ea7db966c976a3b29bdafb82 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 009c2549bb9dc7f7061009eb87f2a53d4b364983 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:26 2022 +0200 + + vhost: move descriptor translation to vhost_svq_vring_write_descs + + It's done for both in and out descriptors so it's better placed here. + + Acked-by: Jason Wang + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/virtio/vhost-shadow-virtqueue.c | 38 +++++++++++++++++++++--------- + 1 file changed, 27 insertions(+), 11 deletions(-) + +diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c +index 06d0bb39d9..3fbda1e3d4 100644 +--- a/hw/virtio/vhost-shadow-virtqueue.c ++++ b/hw/virtio/vhost-shadow-virtqueue.c +@@ -122,17 +122,35 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, + return true; + } + +-static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, +- const struct iovec *iovec, size_t num, +- bool more_descs, bool write) ++/** ++ * Write descriptors to SVQ vring ++ * ++ * @svq: The shadow virtqueue ++ * @sg: Cache for hwaddr ++ * @iovec: The iovec from the guest ++ * @num: iovec length ++ * @more_descs: True if more descriptors come in the chain ++ * @write: True if they are writeable descriptors ++ * ++ * Return true if success, false otherwise and print error. ++ */ ++static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, ++ const struct iovec *iovec, size_t num, ++ bool more_descs, bool write) + { + uint16_t i = svq->free_head, last = svq->free_head; + unsigned n; + uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0; + vring_desc_t *descs = svq->vring.desc; ++ bool ok; + + if (num == 0) { +- return; ++ return true; ++ } ++ ++ ok = vhost_svq_translate_addr(svq, sg, iovec, num); ++ if (unlikely(!ok)) { ++ return false; + } + + for (n = 0; n < num; n++) { +@@ -150,6 +168,7 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, + } + + svq->free_head = le16_to_cpu(svq->desc_next[last]); ++ return true; + } + + static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, +@@ -169,21 +188,18 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, + return false; + } + +- ok = vhost_svq_translate_addr(svq, sgs, elem->out_sg, elem->out_num); ++ ok = vhost_svq_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num, ++ elem->in_num > 0, false); + if (unlikely(!ok)) { + return false; + } +- vhost_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num, +- elem->in_num > 0, false); +- + +- ok = vhost_svq_translate_addr(svq, sgs, elem->in_sg, elem->in_num); ++ ok = vhost_svq_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, ++ true); + if (unlikely(!ok)) { + return false; + } + +- vhost_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, true); +- + /* + * Put the entry in the available array (but don't update avail->idx until + * they do sync). +-- +2.31.1 + diff --git a/kvm-vhost-net-vdpa-add-stubs-for-when-no-virtio-net-devi.patch b/kvm-vhost-net-vdpa-add-stubs-for-when-no-virtio-net-devi.patch new file mode 100644 index 0000000..31677fd --- /dev/null +++ b/kvm-vhost-net-vdpa-add-stubs-for-when-no-virtio-net-devi.patch @@ -0,0 +1,87 @@ +From a9095850da8dd4ea3fdb725cb7f79118144e22fa Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:39:27 +0200 +Subject: [PATCH 22/32] vhost-net-vdpa: add stubs for when no virtio-net device + is present +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [22/27] a2b25a805bb06094a5fab27ce8f82bee12a9fcb5 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 94c643732dc110d04bbdf0eb43c41bce23b3593e +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:41 2022 +0200 + + vhost-net-vdpa: add stubs for when no virtio-net device is present + + net/vhost-vdpa.c will need functions that are declared in + vhost-shadow-virtqueue.c, that needs functions of virtio-net.c. + + Copy the vhost-vdpa-stub.c code so + only the constructor net_init_vhost_vdpa needs to be defined. + + Signed-off-by: Eugenio Pérez + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + net/meson.build | 3 ++- + net/vhost-vdpa-stub.c | 21 +++++++++++++++++++++ + 2 files changed, 23 insertions(+), 1 deletion(-) + create mode 100644 net/vhost-vdpa-stub.c + +diff --git a/net/meson.build b/net/meson.build +index c965e83b26..116a9e7cbb 100644 +--- a/net/meson.build ++++ b/net/meson.build +@@ -41,7 +41,8 @@ endif + softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files(tap_posix)) + softmmu_ss.add(when: 'CONFIG_WIN32', if_true: files('tap-win32.c')) + if have_vhost_net_vdpa +- softmmu_ss.add(files('vhost-vdpa.c')) ++ softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-vdpa.c'), if_false: files('vhost-vdpa-stub.c')) ++ softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-vdpa-stub.c')) + endif + + subdir('can') +diff --git a/net/vhost-vdpa-stub.c b/net/vhost-vdpa-stub.c +new file mode 100644 +index 0000000000..1732ed2443 +--- /dev/null ++++ b/net/vhost-vdpa-stub.c +@@ -0,0 +1,21 @@ ++/* ++ * vhost-vdpa-stub.c ++ * ++ * Copyright (c) 2022 Red Hat, Inc. ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ * ++ */ ++ ++#include "qemu/osdep.h" ++#include "clients.h" ++#include "net/vhost-vdpa.h" ++#include "qapi/error.h" ++ ++int net_init_vhost_vdpa(const Netdev *netdev, const char *name, ++ NetClientState *peer, Error **errp) ++{ ++ error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); ++ return -1; ++} +-- +2.31.1 + diff --git a/kvm-virtio-net-Expose-MAC_TABLE_ENTRIES.patch b/kvm-virtio-net-Expose-MAC_TABLE_ENTRIES.patch new file mode 100644 index 0000000..4ae4cc4 --- /dev/null +++ b/kvm-virtio-net-Expose-MAC_TABLE_ENTRIES.patch @@ -0,0 +1,69 @@ +From dffe24d5c1f5a4676e9d2a5bc032effd420b008f Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 08/32] virtio-net: Expose MAC_TABLE_ENTRIES +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [8/27] 5c3b96215ddf853cafc594da47f57d7e157db4ee (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 6758c01f054c2a842d41d927d628b09f649d3254 +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:27 2022 +0200 + + virtio-net: Expose MAC_TABLE_ENTRIES + + vhost-vdpa control virtqueue needs to know the maximum entries supported + by the virtio-net device, so we know if it is possible to apply the + filter. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/net/virtio-net.c | 1 - + include/hw/virtio/virtio-net.h | 3 +++ + 2 files changed, 3 insertions(+), 1 deletion(-) + +diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c +index 633de61513..2a127f0a3b 100644 +--- a/hw/net/virtio-net.c ++++ b/hw/net/virtio-net.c +@@ -49,7 +49,6 @@ + + #define VIRTIO_NET_VM_VERSION 11 + +-#define MAC_TABLE_ENTRIES 64 + #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ + + /* previously fixed value */ +diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h +index eb87032627..cce1c554f7 100644 +--- a/include/hw/virtio/virtio-net.h ++++ b/include/hw/virtio/virtio-net.h +@@ -35,6 +35,9 @@ OBJECT_DECLARE_SIMPLE_TYPE(VirtIONet, VIRTIO_NET) + * and latency. */ + #define TX_BURST 256 + ++/* Maximum VIRTIO_NET_CTRL_MAC_TABLE_SET unicast + multicast entries. */ ++#define MAC_TABLE_ENTRIES 64 ++ + typedef struct virtio_net_conf + { + uint32_t txtimer; +-- +2.31.1 + diff --git a/kvm-virtio-net-Expose-ctrl-virtqueue-logic.patch b/kvm-virtio-net-Expose-ctrl-virtqueue-logic.patch new file mode 100644 index 0000000..b4b9012 --- /dev/null +++ b/kvm-virtio-net-Expose-ctrl-virtqueue-logic.patch @@ -0,0 +1,169 @@ +From 49e91b34b62f5da147fa2fb80d203dd675c48f64 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= +Date: Thu, 21 Jul 2022 15:38:55 +0200 +Subject: [PATCH 09/32] virtio-net: Expose ctrl virtqueue logic +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +RH-Author: Eugenio Pérez +RH-MergeRequest: 108: Net Control Virtqueue shadow Support +RH-Commit: [9/27] c4ab1e35f4ca728df82a687763c662369282c513 (eperezmartin/qemu-kvm) +RH-Bugzilla: 1939363 +RH-Acked-by: Stefano Garzarella +RH-Acked-by: Cindy Lu +RH-Acked-by: Laurent Vivier + +Bugzilla: https://bugzilla.redhat.com/1939363 + +Upstream Status: git://git.qemu.org/qemu.git + +commit 640b8a1c588b56349b3307d88459ea1cd86181fb +Author: Eugenio Pérez +Date: Wed Jul 20 08:59:28 2022 +0200 + + virtio-net: Expose ctrl virtqueue logic + + This allows external vhost-net devices to modify the state of the + VirtIO device model once the vhost-vdpa device has acknowledged the + control commands. + + Signed-off-by: Eugenio Pérez + Reviewed-by: Michael S. Tsirkin + Signed-off-by: Jason Wang + +Signed-off-by: Eugenio Pérez +--- + hw/net/virtio-net.c | 84 ++++++++++++++++++++-------------- + include/hw/virtio/virtio-net.h | 4 ++ + 2 files changed, 53 insertions(+), 35 deletions(-) + +diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c +index 2a127f0a3b..59bedba681 100644 +--- a/hw/net/virtio-net.c ++++ b/hw/net/virtio-net.c +@@ -1433,57 +1433,71 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, + return VIRTIO_NET_OK; + } + +-static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) ++size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev, ++ const struct iovec *in_sg, unsigned in_num, ++ const struct iovec *out_sg, ++ unsigned out_num) + { + VirtIONet *n = VIRTIO_NET(vdev); + struct virtio_net_ctrl_hdr ctrl; + virtio_net_ctrl_ack status = VIRTIO_NET_ERR; +- VirtQueueElement *elem; + size_t s; + struct iovec *iov, *iov2; +- unsigned int iov_cnt; ++ ++ if (iov_size(in_sg, in_num) < sizeof(status) || ++ iov_size(out_sg, out_num) < sizeof(ctrl)) { ++ virtio_error(vdev, "virtio-net ctrl missing headers"); ++ return 0; ++ } ++ ++ iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num); ++ s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl)); ++ iov_discard_front(&iov, &out_num, sizeof(ctrl)); ++ if (s != sizeof(ctrl)) { ++ status = VIRTIO_NET_ERR; ++ } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { ++ status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num); ++ } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { ++ status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num); ++ } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { ++ status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num); ++ } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { ++ status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num); ++ } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { ++ status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num); ++ } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { ++ status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num); ++ } ++ ++ s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status)); ++ assert(s == sizeof(status)); ++ ++ g_free(iov2); ++ return sizeof(status); ++} ++ ++static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) ++{ ++ VirtQueueElement *elem; + + for (;;) { ++ size_t written; + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } +- if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || +- iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { +- virtio_error(vdev, "virtio-net ctrl missing headers"); ++ ++ written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num, ++ elem->out_sg, elem->out_num); ++ if (written > 0) { ++ virtqueue_push(vq, elem, written); ++ virtio_notify(vdev, vq); ++ g_free(elem); ++ } else { + virtqueue_detach_element(vq, elem, 0); + g_free(elem); + break; + } +- +- iov_cnt = elem->out_num; +- iov2 = iov = g_memdup2(elem->out_sg, +- sizeof(struct iovec) * elem->out_num); +- s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); +- iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); +- if (s != sizeof(ctrl)) { +- status = VIRTIO_NET_ERR; +- } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { +- status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); +- } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { +- status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); +- } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { +- status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); +- } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { +- status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); +- } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { +- status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); +- } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { +- status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); +- } +- +- s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); +- assert(s == sizeof(status)); +- +- virtqueue_push(vq, elem, sizeof(status)); +- virtio_notify(vdev, vq); +- g_free(iov2); +- g_free(elem); + } + } + +diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h +index cce1c554f7..ef234ffe7e 100644 +--- a/include/hw/virtio/virtio-net.h ++++ b/include/hw/virtio/virtio-net.h +@@ -221,6 +221,10 @@ struct VirtIONet { + struct EBPFRSSContext ebpf_rss; + }; + ++size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev, ++ const struct iovec *in_sg, unsigned in_num, ++ const struct iovec *out_sg, ++ unsigned out_num); + void virtio_net_set_netclient_name(VirtIONet *n, const char *name, + const char *type); + +-- +2.31.1 + diff --git a/qemu-kvm.spec b/qemu-kvm.spec index 207d6e7..9c43f37 100644 --- a/qemu-kvm.spec +++ b/qemu-kvm.spec @@ -151,7 +151,7 @@ Obsoletes: %{name}-block-ssh <= %{epoch}:%{version} \ Summary: QEMU is a machine emulator and virtualizer Name: qemu-kvm Version: 7.0.0 -Release: 9%{?rcrel}%{?dist}%{?cc_suffix} +Release: 10%{?rcrel}%{?dist}%{?cc_suffix} # Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped # Epoch 15 used for RHEL 8 # Epoch 17 used for RHEL 9 (due to release versioning offset in RHEL 8.5) @@ -358,6 +358,70 @@ Patch101: kvm-pc-bios-s390-ccw-netboot.mak-Ignore-Clang-s-warnings.patch Patch102: kvm-hw-block-fdc-Prevent-end-of-track-overrun-CVE-2021-3.patch # For bz#1951522 - CVE-2021-3507 qemu-kvm: QEMU: fdc: heap buffer overflow in DMA read data transfers [rhel-9.0] Patch103: kvm-tests-qtest-fdc-test-Add-a-regression-test-for-CVE-2.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch104: kvm-vhost-Track-descriptor-chain-in-private-at-SVQ.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch105: kvm-vhost-Fix-device-s-used-descriptor-dequeue.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch106: kvm-hw-virtio-Replace-g_memdup-by-g_memdup2.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch107: kvm-vhost-Fix-element-in-vhost_svq_add-failure.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch108: kvm-meson-create-have_vhost_-variables.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch109: kvm-meson-use-have_vhost_-variables-to-pick-sources.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch110: kvm-vhost-move-descriptor-translation-to-vhost_svq_vring.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch111: kvm-virtio-net-Expose-MAC_TABLE_ENTRIES.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch112: kvm-virtio-net-Expose-ctrl-virtqueue-logic.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch113: kvm-vdpa-Avoid-compiler-to-squash-reads-to-used-idx.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch114: kvm-vhost-Reorder-vhost_svq_kick.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch115: kvm-vhost-Move-vhost_svq_kick-call-to-vhost_svq_add.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch116: kvm-vhost-Check-for-queue-full-at-vhost_svq_add.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch117: kvm-vhost-Decouple-vhost_svq_add-from-VirtQueueElement.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch118: kvm-vhost-Add-SVQDescState.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch119: kvm-vhost-Track-number-of-descs-in-SVQDescState.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch120: kvm-vhost-add-vhost_svq_push_elem.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch121: kvm-vhost-Expose-vhost_svq_add.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch122: kvm-vhost-add-vhost_svq_poll.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch123: kvm-vhost-Add-svq-avail_handler-callback.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch124: kvm-vdpa-Export-vhost_vdpa_dma_map-and-unmap-calls.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch125: kvm-vhost-net-vdpa-add-stubs-for-when-no-virtio-net-devi.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch126: kvm-vdpa-manual-forward-CVQ-buffers.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch127: kvm-vdpa-Buffer-CVQ-support-on-shadow-virtqueue.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch128: kvm-vdpa-Extract-get-features-part-from-vhost_vdpa_get_m.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch129: kvm-vdpa-Add-device-migration-blocker.patch +# For bz#1939363 - vDPA control virtqueue support in Qemu +Patch130: kvm-vdpa-Add-x-svq-to-NetdevVhostVDPAOptions.patch +# For bz#2111994 - RHEL9: skey test in kvm_unit_test got failed +Patch131: kvm-redhat-Update-linux-headers-linux-kvm.h-to-v5.18-rc6.patch +# For bz#2111994 - RHEL9: skey test in kvm_unit_test got failed +Patch132: kvm-target-s390x-kvm-Honor-storage-keys-during-emulation.patch +# For bz#2095608 - Please correct the error message when try to start qemu with "-M kernel-irqchip=split" +Patch133: kvm-kvm-don-t-use-perror-without-useful-errno.patch +# For bz#2099934 - Guest reboot on destination host after postcopy migration completed +Patch134: kvm-multifd-Copy-pages-before-compressing-them-with-zlib.patch +# For bz#2099934 - Guest reboot on destination host after postcopy migration completed +Patch135: kvm-Revert-migration-Simplify-unqueue_page.patch # Source-git patches @@ -1393,6 +1457,48 @@ useradd -r -u 107 -g qemu -G kvm -d / -s /sbin/nologin \ %endif %changelog +* Mon Aug 08 2022 Miroslav Rezanina - 7.0.0-10 +- kvm-vhost-Track-descriptor-chain-in-private-at-SVQ.patch [bz#1939363] +- kvm-vhost-Fix-device-s-used-descriptor-dequeue.patch [bz#1939363] +- kvm-hw-virtio-Replace-g_memdup-by-g_memdup2.patch [bz#1939363] +- kvm-vhost-Fix-element-in-vhost_svq_add-failure.patch [bz#1939363] +- kvm-meson-create-have_vhost_-variables.patch [bz#1939363] +- kvm-meson-use-have_vhost_-variables-to-pick-sources.patch [bz#1939363] +- kvm-vhost-move-descriptor-translation-to-vhost_svq_vring.patch [bz#1939363] +- kvm-virtio-net-Expose-MAC_TABLE_ENTRIES.patch [bz#1939363] +- kvm-virtio-net-Expose-ctrl-virtqueue-logic.patch [bz#1939363] +- kvm-vdpa-Avoid-compiler-to-squash-reads-to-used-idx.patch [bz#1939363] +- kvm-vhost-Reorder-vhost_svq_kick.patch [bz#1939363] +- kvm-vhost-Move-vhost_svq_kick-call-to-vhost_svq_add.patch [bz#1939363] +- kvm-vhost-Check-for-queue-full-at-vhost_svq_add.patch [bz#1939363] +- kvm-vhost-Decouple-vhost_svq_add-from-VirtQueueElement.patch [bz#1939363] +- kvm-vhost-Add-SVQDescState.patch [bz#1939363] +- kvm-vhost-Track-number-of-descs-in-SVQDescState.patch [bz#1939363] +- kvm-vhost-add-vhost_svq_push_elem.patch [bz#1939363] +- kvm-vhost-Expose-vhost_svq_add.patch [bz#1939363] +- kvm-vhost-add-vhost_svq_poll.patch [bz#1939363] +- kvm-vhost-Add-svq-avail_handler-callback.patch [bz#1939363] +- kvm-vdpa-Export-vhost_vdpa_dma_map-and-unmap-calls.patch [bz#1939363] +- kvm-vhost-net-vdpa-add-stubs-for-when-no-virtio-net-devi.patch [bz#1939363] +- kvm-vdpa-manual-forward-CVQ-buffers.patch [bz#1939363] +- kvm-vdpa-Buffer-CVQ-support-on-shadow-virtqueue.patch [bz#1939363] +- kvm-vdpa-Extract-get-features-part-from-vhost_vdpa_get_m.patch [bz#1939363] +- kvm-vdpa-Add-device-migration-blocker.patch [bz#1939363] +- kvm-vdpa-Add-x-svq-to-NetdevVhostVDPAOptions.patch [bz#1939363] +- kvm-redhat-Update-linux-headers-linux-kvm.h-to-v5.18-rc6.patch [bz#2111994] +- kvm-target-s390x-kvm-Honor-storage-keys-during-emulation.patch [bz#2111994] +- kvm-kvm-don-t-use-perror-without-useful-errno.patch [bz#2095608] +- kvm-multifd-Copy-pages-before-compressing-them-with-zlib.patch [bz#2099934] +- kvm-Revert-migration-Simplify-unqueue_page.patch [bz#2099934] +- Resolves: bz#1939363 + (vDPA control virtqueue support in Qemu) +- Resolves: bz#2111994 + (RHEL9: skey test in kvm_unit_test got failed) +- Resolves: bz#2095608 + (Please correct the error message when try to start qemu with "-M kernel-irqchip=split") +- Resolves: bz#2099934 + (Guest reboot on destination host after postcopy migration completed) + * Mon Jul 18 2022 Miroslav Rezanina - 7.0.0-9 - kvm-virtio-iommu-Add-bypass-mode-support-to-assigned-dev.patch [bz#2100106] - kvm-virtio-iommu-Use-recursive-lock-to-avoid-deadlock.patch [bz#2100106]