qemu-kvm/kvm-vhost-stop-transfer-elem-ownership-in-vhost_handle_g.patch
Miroslav Rezanina 19bc18cc9e * Thu Sep 29 2022 Miroslav Rezanina <mrezanin@redhat.com> - 7.1.0-2
- kvm-vdpa-Skip-the-maps-not-in-the-iova-tree.patch [RHELX-57]
- kvm-vdpa-do-not-save-failed-dma-maps-in-SVQ-iova-tree.patch [RHELX-57]
- kvm-util-accept-iova_tree_remove_parameter-by-value.patch [RHELX-57]
- kvm-vdpa-Remove-SVQ-vring-from-iova_tree-at-shutdown.patch [RHELX-57]
- kvm-vdpa-Make-SVQ-vring-unmapping-return-void.patch [RHELX-57]
- kvm-vhost-Always-store-new-kick-fd-on-vhost_svq_set_svq_.patch [RHELX-57]
- kvm-vdpa-Use-ring-hwaddr-at-vhost_vdpa_svq_unmap_ring.patch [RHELX-57]
- kvm-vhost-stop-transfer-elem-ownership-in-vhost_handle_g.patch [RHELX-57]
- kvm-vhost-use-SVQ-element-ndescs-instead-of-opaque-data-.patch [RHELX-57]
- kvm-vhost-Delete-useless-read-memory-barrier.patch [RHELX-57]
- kvm-vhost-Do-not-depend-on-NULL-VirtQueueElement-on-vhos.patch [RHELX-57]
- kvm-vhost_net-Add-NetClientInfo-start-callback.patch [RHELX-57]
- kvm-vhost_net-Add-NetClientInfo-stop-callback.patch [RHELX-57]
- kvm-vdpa-add-net_vhost_vdpa_cvq_info-NetClientInfo.patch [RHELX-57]
- kvm-vdpa-Move-command-buffers-map-to-start-of-net-device.patch [RHELX-57]
- kvm-vdpa-extract-vhost_vdpa_net_cvq_add-from-vhost_vdpa_.patch [RHELX-57]
- kvm-vhost_net-add-NetClientState-load-callback.patch [RHELX-57]
- kvm-vdpa-Add-virtio-net-mac-address-via-CVQ-at-start.patch [RHELX-57]
- kvm-vdpa-Delete-CVQ-migration-blocker.patch [RHELX-57]
- kvm-vdpa-Make-VhostVDPAState-cvq_cmd_in_buffer-control-a.patch [RHELX-57]
- kvm-vdpa-extract-vhost_vdpa_net_load_mac-from-vhost_vdpa.patch [RHELX-57]
- kvm-vdpa-Add-vhost_vdpa_net_load_mq.patch [RHELX-57]
- kvm-vdpa-validate-MQ-CVQ-commands.patch [RHELX-57]
- kvm-virtio-net-Update-virtio-net-curr_queue_pairs-in-vdp.patch [RHELX-57]
- kvm-vdpa-Allow-MQ-feature-in-SVQ.patch [RHELX-57]
- kvm-i386-reset-KVM-nested-state-upon-CPU-reset.patch [bz#2125281]
- kvm-i386-do-kvm_put_msr_feature_control-first-thing-when.patch [bz#2125281]
- kvm-Revert-Re-enable-capstone-internal-build.patch [bz#2127825]
- kvm-spec-Use-capstone-package.patch [bz#2127825]
- Resolves: RHELX-57
  (vDPA SVQ Multiqueue support )
- Resolves: bz#2125281
  ([RHEL9.1] Guests in VMX root operation fail to reboot with QEMU's 'system_reset' command [rhel-9.2.0])
- Resolves: bz#2127825
  (Use capstone for qemu-kvm build)
2022-09-29 03:59:55 -04:00

81 lines
3.0 KiB
Diff

From 33c22dd3353f79a037f2473a69176932ac1a1c05 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= <eperezma@redhat.com>
Date: Tue, 23 Aug 2022 20:30:26 +0200
Subject: [PATCH 08/29] vhost: stop transfer elem ownership in
vhost_handle_guest_kick
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
RH-Author: Laurent Vivier <lvivier@redhat.com>
RH-MergeRequest: 117: vDPA SVQ Multiqueue support
RH-Jira: RHELX-57
RH-Acked-by: Jason Wang <jasowang@redhat.com>
RH-Acked-by: Cindy Lu <lulu@redhat.com>
RH-Acked-by: Eugenio Pérez <eperezma@redhat.com>
RH-Commit: [8/25] e9c6314fddeb1f7bc738efea90f2788cae27bab7 (redhat/centos-stream/src/qemu-kvm)
It was easier to allow vhost_svq_add to handle the memory. Now that we
will allow qemu to add elements to a SVQ without the guest's knowledge,
it's better to handle it in the caller.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
(cherry picked from commit 9c2ab2f1ec333be8614cc12272d4b91960704dbe)
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
hw/virtio/vhost-shadow-virtqueue.c | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 82a784d250..a1261d4a0f 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -233,9 +233,6 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
/**
* Add an element to a SVQ.
*
- * The caller must check that there is enough slots for the new element. It
- * takes ownership of the element: In case of failure not ENOSPC, it is free.
- *
* Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
*/
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
@@ -252,7 +249,6 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
if (unlikely(!ok)) {
- g_free(elem);
return -EINVAL;
}
@@ -293,7 +289,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
virtio_queue_set_notification(svq->vq, false);
while (true) {
- VirtQueueElement *elem;
+ g_autofree VirtQueueElement *elem;
int r;
if (svq->next_guest_avail_elem) {
@@ -324,12 +320,14 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
* queue the current guest descriptor and ignore kicks
* until some elements are used.
*/
- svq->next_guest_avail_elem = elem;
+ svq->next_guest_avail_elem = g_steal_pointer(&elem);
}
/* VQ is full or broken, just return and ignore kicks */
return;
}
+ /* elem belongs to SVQ or external caller now */
+ elem = NULL;
}
virtio_queue_set_notification(svq->vq, true);
--
2.31.1