qemu-kvm/kvm-vhost-fix-vq-dirty-bitmap-syncing-when-vIOMMU-is-ena.patch

158 lines
5.8 KiB
Diff
Raw Normal View History

* Tue Jan 17 2023 Miroslav Rezanina <mrezanin@redhat.com> - 7.2.0-5 - kvm-virtio-introduce-macro-VIRTIO_CONFIG_IRQ_IDX.patch [bz#1905805] - kvm-virtio-pci-decouple-notifier-from-interrupt-process.patch [bz#1905805] - kvm-virtio-pci-decouple-the-single-vector-from-the-inter.patch [bz#1905805] - kvm-vhost-introduce-new-VhostOps-vhost_set_config_call.patch [bz#1905805] - kvm-vhost-vdpa-add-support-for-config-interrupt.patch [bz#1905805] - kvm-virtio-add-support-for-configure-interrupt.patch [bz#1905805] - kvm-vhost-add-support-for-configure-interrupt.patch [bz#1905805] - kvm-virtio-net-add-support-for-configure-interrupt.patch [bz#1905805] - kvm-virtio-mmio-add-support-for-configure-interrupt.patch [bz#1905805] - kvm-virtio-pci-add-support-for-configure-interrupt.patch [bz#1905805] - kvm-s390x-s390-virtio-ccw-Activate-zPCI-features-on-s390.patch [bz#2159408] - kvm-vhost-fix-vq-dirty-bitmap-syncing-when-vIOMMU-is-ena.patch [bz#2124856] - kvm-block-drop-bdrv_remove_filter_or_cow_child.patch [bz#2155112] - kvm-qed-Don-t-yield-in-bdrv_qed_co_drain_begin.patch [bz#2155112] - kvm-test-bdrv-drain-Don-t-yield-in-.bdrv_co_drained_begi.patch [bz#2155112] - kvm-block-Revert-.bdrv_drained_begin-end-to-non-coroutin.patch [bz#2155112] - kvm-block-Remove-drained_end_counter.patch [bz#2155112] - kvm-block-Inline-bdrv_drain_invoke.patch [bz#2155112] - kvm-block-Fix-locking-for-bdrv_reopen_queue_child.patch [bz#2155112] - kvm-block-Drain-individual-nodes-during-reopen.patch [bz#2155112] - kvm-block-Don-t-use-subtree-drains-in-bdrv_drop_intermed.patch [bz#2155112] - kvm-stream-Replace-subtree-drain-with-a-single-node-drai.patch [bz#2155112] - kvm-block-Remove-subtree-drains.patch [bz#2155112] - kvm-block-Call-drain-callbacks-only-once.patch [bz#2155112] - kvm-block-Remove-ignore_bds_parents-parameter-from-drain.patch [bz#2155112] - kvm-block-Drop-out-of-coroutine-in-bdrv_do_drained_begin.patch [bz#2155112] - kvm-block-Don-t-poll-in-bdrv_replace_child_noperm.patch [bz#2155112] - kvm-block-Remove-poll-parameter-from-bdrv_parent_drained.patch [bz#2155112] - kvm-accel-introduce-accelerator-blocker-API.patch [bz#1979276] - kvm-KVM-keep-track-of-running-ioctls.patch [bz#1979276] - kvm-kvm-Atomic-memslot-updates.patch [bz#1979276] - Resolves: bz#1905805 (support config interrupt in vhost-vdpa qemu) - Resolves: bz#2159408 ([s390x] VMs with ISM passthrough don't autostart after leapp upgrade from RHEL 8) - Resolves: bz#2124856 (VM with virtio interface and iommu=on will crash when try to migrate) - Resolves: bz#2155112 (Qemu coredump after do snapshot of mirrored top image and its converted base image(iothread enabled)) - Resolves: bz#1979276 (SVM: non atomic memslot updates cause boot failure with seabios and cpu-pm=on)
2023-01-17 12:06:28 +00:00
From 55aad90e347599e88747888ddbefcba33427f386 Mon Sep 17 00:00:00 2001
From: Jason Wang <jasowang@redhat.com>
Date: Fri, 16 Dec 2022 11:35:52 +0800
Subject: [PATCH 12/31] vhost: fix vq dirty bitmap syncing when vIOMMU is
enabled
RH-Author: Eric Auger <eric.auger@redhat.com>
RH-MergeRequest: 134: vhost: fix vq dirty bitmap syncing when vIOMMU is enabled
RH-Bugzilla: 2124856
RH-Acked-by: Peter Xu <peterx@redhat.com>
RH-Acked-by: Jason Wang <jasowang@redhat.com>
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
RH-Commit: [1/1] 57ef499b63dc2cca6e64ee84d1dc127635868ca2 (eauger1/centos-qemu-kvm)
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2124856
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=49989924
Upstream: yes
When vIOMMU is enabled, the vq->used_phys is actually the IOVA not
GPA. So we need to translate it to GPA before the syncing otherwise we
may hit the following crash since IOVA could be out of the scope of
the GPA log size. This could be noted when using virtio-IOMMU with
vhost using 1G memory.
Fixes: c471ad0e9bd46 ("vhost_net: device IOTLB support")
Cc: qemu-stable@nongnu.org
Tested-by: Lei Yang <leiyang@redhat.com>
Reported-by: Yalan Zhang <yalzhang@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20221216033552.77087-1-jasowang@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit 345cc1cbcbce2bab00abc2b88338d7d89c702d6b)
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
hw/virtio/vhost.c | 84 ++++++++++++++++++++++++++++++++++++-----------
1 file changed, 64 insertions(+), 20 deletions(-)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 84dbb39e07..2c566dc539 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -20,6 +20,7 @@
#include "qemu/range.h"
#include "qemu/error-report.h"
#include "qemu/memfd.h"
+#include "qemu/log.h"
#include "standard-headers/linux/vhost_types.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
@@ -106,6 +107,24 @@ static void vhost_dev_sync_region(struct vhost_dev *dev,
}
}
+static bool vhost_dev_has_iommu(struct vhost_dev *dev)
+{
+ VirtIODevice *vdev = dev->vdev;
+
+ /*
+ * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
+ * incremental memory mapping API via IOTLB API. For platform that
+ * does not have IOMMU, there's no need to enable this feature
+ * which may cause unnecessary IOTLB miss/update transactions.
+ */
+ if (vdev) {
+ return virtio_bus_device_iommu_enabled(vdev) &&
+ virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+ } else {
+ return false;
+ }
+}
+
static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
MemoryRegionSection *section,
hwaddr first,
@@ -137,8 +156,51 @@ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
continue;
}
- vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
- range_get_last(vq->used_phys, vq->used_size));
+ if (vhost_dev_has_iommu(dev)) {
+ IOMMUTLBEntry iotlb;
+ hwaddr used_phys = vq->used_phys, used_size = vq->used_size;
+ hwaddr phys, s, offset;
+
+ while (used_size) {
+ rcu_read_lock();
+ iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
+ used_phys,
+ true,
+ MEMTXATTRS_UNSPECIFIED);
+ rcu_read_unlock();
+
+ if (!iotlb.target_as) {
+ qemu_log_mask(LOG_GUEST_ERROR, "translation "
+ "failure for used_iova %"PRIx64"\n",
+ used_phys);
+ return -EINVAL;
+ }
+
+ offset = used_phys & iotlb.addr_mask;
+ phys = iotlb.translated_addr + offset;
+
+ /*
+ * Distance from start of used ring until last byte of
+ * IOMMU page.
+ */
+ s = iotlb.addr_mask - offset;
+ /*
+ * Size of used ring, or of the part of it until end
+ * of IOMMU page. To avoid zero result, do the adding
+ * outside of MIN().
+ */
+ s = MIN(s, used_size - 1) + 1;
+
+ vhost_dev_sync_region(dev, section, start_addr, end_addr, phys,
+ range_get_last(phys, s));
+ used_size -= s;
+ used_phys += s;
+ }
+ } else {
+ vhost_dev_sync_region(dev, section, start_addr,
+ end_addr, vq->used_phys,
+ range_get_last(vq->used_phys, vq->used_size));
+ }
}
return 0;
}
@@ -306,24 +368,6 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
dev->log_size = size;
}
-static bool vhost_dev_has_iommu(struct vhost_dev *dev)
-{
- VirtIODevice *vdev = dev->vdev;
-
- /*
- * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
- * incremental memory mapping API via IOTLB API. For platform that
- * does not have IOMMU, there's no need to enable this feature
- * which may cause unnecessary IOTLB miss/update transactions.
- */
- if (vdev) {
- return virtio_bus_device_iommu_enabled(vdev) &&
- virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
- } else {
- return false;
- }
-}
-
static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
hwaddr *plen, bool is_write)
{
--
2.31.1