2fe1fc7b2d
- kvm-virtio-introduce-macro-VIRTIO_CONFIG_IRQ_IDX.patch [bz#1905805] - kvm-virtio-pci-decouple-notifier-from-interrupt-process.patch [bz#1905805] - kvm-virtio-pci-decouple-the-single-vector-from-the-inter.patch [bz#1905805] - kvm-vhost-introduce-new-VhostOps-vhost_set_config_call.patch [bz#1905805] - kvm-vhost-vdpa-add-support-for-config-interrupt.patch [bz#1905805] - kvm-virtio-add-support-for-configure-interrupt.patch [bz#1905805] - kvm-vhost-add-support-for-configure-interrupt.patch [bz#1905805] - kvm-virtio-net-add-support-for-configure-interrupt.patch [bz#1905805] - kvm-virtio-mmio-add-support-for-configure-interrupt.patch [bz#1905805] - kvm-virtio-pci-add-support-for-configure-interrupt.patch [bz#1905805] - kvm-s390x-s390-virtio-ccw-Activate-zPCI-features-on-s390.patch [bz#2159408] - kvm-vhost-fix-vq-dirty-bitmap-syncing-when-vIOMMU-is-ena.patch [bz#2124856] - kvm-block-drop-bdrv_remove_filter_or_cow_child.patch [bz#2155112] - kvm-qed-Don-t-yield-in-bdrv_qed_co_drain_begin.patch [bz#2155112] - kvm-test-bdrv-drain-Don-t-yield-in-.bdrv_co_drained_begi.patch [bz#2155112] - kvm-block-Revert-.bdrv_drained_begin-end-to-non-coroutin.patch [bz#2155112] - kvm-block-Remove-drained_end_counter.patch [bz#2155112] - kvm-block-Inline-bdrv_drain_invoke.patch [bz#2155112] - kvm-block-Fix-locking-for-bdrv_reopen_queue_child.patch [bz#2155112] - kvm-block-Drain-individual-nodes-during-reopen.patch [bz#2155112] - kvm-block-Don-t-use-subtree-drains-in-bdrv_drop_intermed.patch [bz#2155112] - kvm-stream-Replace-subtree-drain-with-a-single-node-drai.patch [bz#2155112] - kvm-block-Remove-subtree-drains.patch [bz#2155112] - kvm-block-Call-drain-callbacks-only-once.patch [bz#2155112] - kvm-block-Remove-ignore_bds_parents-parameter-from-drain.patch [bz#2155112] - kvm-block-Drop-out-of-coroutine-in-bdrv_do_drained_begin.patch [bz#2155112] - kvm-block-Don-t-poll-in-bdrv_replace_child_noperm.patch [bz#2155112] - kvm-block-Remove-poll-parameter-from-bdrv_parent_drained.patch [bz#2155112] - kvm-accel-introduce-accelerator-blocker-API.patch [bz#1979276] - kvm-KVM-keep-track-of-running-ioctls.patch [bz#1979276] - kvm-kvm-Atomic-memslot-updates.patch [bz#1979276] - Resolves: bz#1905805 (support config interrupt in vhost-vdpa qemu) - Resolves: bz#2159408 ([s390x] VMs with ISM passthrough don't autostart after leapp upgrade from RHEL 8) - Resolves: bz#2124856 (VM with virtio interface and iommu=on will crash when try to migrate) - Resolves: bz#2155112 (Qemu coredump after do snapshot of mirrored top image and its converted base image(iothread enabled)) - Resolves: bz#1979276 (SVM: non atomic memslot updates cause boot failure with seabios and cpu-pm=on)
213 lines
7.2 KiB
Diff
213 lines
7.2 KiB
Diff
From 58cd577ff157cfaf7506bba135db58e75c330ff0 Mon Sep 17 00:00:00 2001
|
|
From: Cindy Lu <lulu@redhat.com>
|
|
Date: Thu, 22 Dec 2022 15:04:44 +0800
|
|
Subject: [PATCH 03/31] virtio-pci: decouple the single vector from the
|
|
interrupt process
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
RH-Author: Cindy Lu <lulu@redhat.com>
|
|
RH-MergeRequest: 132: vhost-vdpa: support config interrupt in vhost-vdpa
|
|
RH-Bugzilla: 1905805
|
|
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
|
|
RH-Acked-by: Eugenio Pérez <eperezma@redhat.com>
|
|
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
RH-Commit: [3/10] 2c79cb678f005fb2f53b2db0f237347634ab3422 (lulu6/qemu-kvm3)
|
|
|
|
https://bugzilla.redhat.com/show_bug.cgi?id=1905805
|
|
|
|
To reuse the interrupt process in configure interrupt
|
|
Need to decouple the single vector from the interrupt process.
|
|
We add new function kvm_virtio_pci_vector_use_one and _release_one.
|
|
These functions are used for the single vector, the whole process will
|
|
finish in the loop with vq number.
|
|
|
|
Signed-off-by: Cindy Lu <lulu@redhat.com>
|
|
Message-Id: <20221222070451.936503-4-lulu@redhat.com>
|
|
Acked-by: Jason Wang <jasowang@redhat.com>
|
|
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
|
|
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
|
(cherry picked from commit ee3b8dc6cc496ba7f4e27aed4493275c706a7942)
|
|
Signed-off-by: Cindy Lu <lulu@redhat.com>
|
|
---
|
|
hw/virtio/virtio-pci.c | 131 +++++++++++++++++++++++------------------
|
|
1 file changed, 73 insertions(+), 58 deletions(-)
|
|
|
|
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
|
|
index 52c7692fff..ec816ea367 100644
|
|
--- a/hw/virtio/virtio-pci.c
|
|
+++ b/hw/virtio/virtio-pci.c
|
|
@@ -699,7 +699,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
|
|
}
|
|
|
|
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
|
|
- unsigned int queue_no,
|
|
unsigned int vector)
|
|
{
|
|
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
|
|
@@ -764,87 +763,103 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
|
|
return 0;
|
|
}
|
|
|
|
-static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
|
|
+static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
|
|
{
|
|
+ unsigned int vector;
|
|
+ int ret;
|
|
+ EventNotifier *n;
|
|
PCIDevice *dev = &proxy->pci_dev;
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
- unsigned int vector;
|
|
- int ret, queue_no;
|
|
- EventNotifier *n;
|
|
- for (queue_no = 0; queue_no < nvqs; queue_no++) {
|
|
- if (!virtio_queue_get_num(vdev, queue_no)) {
|
|
- break;
|
|
- }
|
|
- ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
|
|
- if (ret < 0) {
|
|
- break;
|
|
- }
|
|
- if (vector >= msix_nr_vectors_allocated(dev)) {
|
|
- continue;
|
|
- }
|
|
- ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
|
|
+
|
|
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
|
|
+ if (ret < 0) {
|
|
+ return ret;
|
|
+ }
|
|
+ if (vector >= msix_nr_vectors_allocated(dev)) {
|
|
+ return 0;
|
|
+ }
|
|
+ ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
|
|
+ if (ret < 0) {
|
|
+ goto undo;
|
|
+ }
|
|
+ /*
|
|
+ * If guest supports masking, set up irqfd now.
|
|
+ * Otherwise, delay until unmasked in the frontend.
|
|
+ */
|
|
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
|
+ ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
|
|
if (ret < 0) {
|
|
+ kvm_virtio_pci_vq_vector_release(proxy, vector);
|
|
goto undo;
|
|
}
|
|
- /* If guest supports masking, set up irqfd now.
|
|
- * Otherwise, delay until unmasked in the frontend.
|
|
- */
|
|
- if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
|
- ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
|
|
- if (ret < 0) {
|
|
- kvm_virtio_pci_vq_vector_release(proxy, vector);
|
|
- goto undo;
|
|
- }
|
|
- }
|
|
}
|
|
- return 0;
|
|
|
|
+ return 0;
|
|
undo:
|
|
- while (--queue_no >= 0) {
|
|
- vector = virtio_queue_vector(vdev, queue_no);
|
|
- if (vector >= msix_nr_vectors_allocated(dev)) {
|
|
- continue;
|
|
+
|
|
+ vector = virtio_queue_vector(vdev, queue_no);
|
|
+ if (vector >= msix_nr_vectors_allocated(dev)) {
|
|
+ return ret;
|
|
+ }
|
|
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
|
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
|
|
+ if (ret < 0) {
|
|
+ return ret;
|
|
}
|
|
- if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
|
- ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
|
|
- if (ret < 0) {
|
|
- break;
|
|
- }
|
|
- kvm_virtio_pci_irqfd_release(proxy, n, vector);
|
|
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
|
|
+{
|
|
+ int queue_no;
|
|
+ int ret = 0;
|
|
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
+
|
|
+ for (queue_no = 0; queue_no < nvqs; queue_no++) {
|
|
+ if (!virtio_queue_get_num(vdev, queue_no)) {
|
|
+ return -1;
|
|
}
|
|
- kvm_virtio_pci_vq_vector_release(proxy, vector);
|
|
+ ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
-static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
|
|
+
|
|
+static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
|
|
+ int queue_no)
|
|
{
|
|
- PCIDevice *dev = &proxy->pci_dev;
|
|
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
unsigned int vector;
|
|
- int queue_no;
|
|
- VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
EventNotifier *n;
|
|
- int ret ;
|
|
+ int ret;
|
|
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
+ PCIDevice *dev = &proxy->pci_dev;
|
|
+
|
|
+ ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
|
|
+ if (ret < 0) {
|
|
+ return;
|
|
+ }
|
|
+ if (vector >= msix_nr_vectors_allocated(dev)) {
|
|
+ return;
|
|
+ }
|
|
+ if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
|
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
|
|
+ }
|
|
+ kvm_virtio_pci_vq_vector_release(proxy, vector);
|
|
+}
|
|
+
|
|
+static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
|
|
+{
|
|
+ int queue_no;
|
|
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
|
|
+
|
|
for (queue_no = 0; queue_no < nvqs; queue_no++) {
|
|
if (!virtio_queue_get_num(vdev, queue_no)) {
|
|
break;
|
|
}
|
|
- ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
|
|
- if (ret < 0) {
|
|
- break;
|
|
- }
|
|
- if (vector >= msix_nr_vectors_allocated(dev)) {
|
|
- continue;
|
|
- }
|
|
- /* If guest supports masking, clean up irqfd now.
|
|
- * Otherwise, it was cleaned when masked in the frontend.
|
|
- */
|
|
- if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
|
|
- kvm_virtio_pci_irqfd_release(proxy, n, vector);
|
|
- }
|
|
- kvm_virtio_pci_vq_vector_release(proxy, vector);
|
|
+ kvm_virtio_pci_vector_release_one(proxy, queue_no);
|
|
}
|
|
}
|
|
|
|
--
|
|
2.31.1
|
|
|