124 lines
4.6 KiB
Diff
124 lines
4.6 KiB
Diff
|
From 486647551223cc01f4dba87197030bbf4e674f0f Mon Sep 17 00:00:00 2001
|
||
|
From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= <eperezma@redhat.com>
|
||
|
Date: Thu, 21 Jul 2022 15:24:48 +0200
|
||
|
Subject: [PATCH 01/32] vhost: Track descriptor chain in private at SVQ
|
||
|
MIME-Version: 1.0
|
||
|
Content-Type: text/plain; charset=UTF-8
|
||
|
Content-Transfer-Encoding: 8bit
|
||
|
|
||
|
RH-Author: Eugenio Pérez <eperezma@redhat.com>
|
||
|
RH-MergeRequest: 108: Net Control Virtqueue shadow Support
|
||
|
RH-Commit: [1/27] 26d16dc383e3064ac6e4288d5c52b39fee0ad204 (eperezmartin/qemu-kvm)
|
||
|
RH-Bugzilla: 1939363
|
||
|
RH-Acked-by: Stefano Garzarella <sgarzare@redhat.com>
|
||
|
RH-Acked-by: Cindy Lu <lulu@redhat.com>
|
||
|
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
|
||
|
|
||
|
Bugzilla: https://bugzilla.redhat.com/1939363
|
||
|
|
||
|
Upstream Status: git://git.qemu.org/qemu.git
|
||
|
|
||
|
commit 495fe3a78749c39c0e772c4e1a55d6cb8a7e5292
|
||
|
Author: Eugenio Pérez <eperezma@redhat.com>
|
||
|
Date: Thu May 12 19:57:42 2022 +0200
|
||
|
|
||
|
vhost: Track descriptor chain in private at SVQ
|
||
|
|
||
|
The device could have access to modify them, and it definitely have
|
||
|
access when we implement packed vq. Harden SVQ maintaining a private
|
||
|
copy of the descriptor chain. Other fields like buffer addresses are
|
||
|
already maintained sepparatedly.
|
||
|
|
||
|
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
|
||
|
Message-Id: <20220512175747.142058-2-eperezma@redhat.com>
|
||
|
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
|
||
|
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
||
|
|
||
|
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
|
||
|
---
|
||
|
hw/virtio/vhost-shadow-virtqueue.c | 12 +++++++-----
|
||
|
hw/virtio/vhost-shadow-virtqueue.h | 6 ++++++
|
||
|
2 files changed, 13 insertions(+), 5 deletions(-)
|
||
|
|
||
|
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
|
||
|
index b232803d1b..3155801f50 100644
|
||
|
--- a/hw/virtio/vhost-shadow-virtqueue.c
|
||
|
+++ b/hw/virtio/vhost-shadow-virtqueue.c
|
||
|
@@ -138,6 +138,7 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
|
||
|
for (n = 0; n < num; n++) {
|
||
|
if (more_descs || (n + 1 < num)) {
|
||
|
descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT);
|
||
|
+ descs[i].next = cpu_to_le16(svq->desc_next[i]);
|
||
|
} else {
|
||
|
descs[i].flags = flags;
|
||
|
}
|
||
|
@@ -145,10 +146,10 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
|
||
|
descs[i].len = cpu_to_le32(iovec[n].iov_len);
|
||
|
|
||
|
last = i;
|
||
|
- i = cpu_to_le16(descs[i].next);
|
||
|
+ i = cpu_to_le16(svq->desc_next[i]);
|
||
|
}
|
||
|
|
||
|
- svq->free_head = le16_to_cpu(descs[last].next);
|
||
|
+ svq->free_head = le16_to_cpu(svq->desc_next[last]);
|
||
|
}
|
||
|
|
||
|
static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
|
||
|
@@ -336,7 +337,6 @@ static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq)
|
||
|
static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
|
||
|
uint32_t *len)
|
||
|
{
|
||
|
- vring_desc_t *descs = svq->vring.desc;
|
||
|
const vring_used_t *used = svq->vring.used;
|
||
|
vring_used_elem_t used_elem;
|
||
|
uint16_t last_used;
|
||
|
@@ -365,7 +365,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
|
||
|
return NULL;
|
||
|
}
|
||
|
|
||
|
- descs[used_elem.id].next = svq->free_head;
|
||
|
+ svq->desc_next[used_elem.id] = svq->free_head;
|
||
|
svq->free_head = used_elem.id;
|
||
|
|
||
|
*len = used_elem.len;
|
||
|
@@ -540,8 +540,9 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
||
|
svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size);
|
||
|
memset(svq->vring.used, 0, device_size);
|
||
|
svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
|
||
|
+ svq->desc_next = g_new0(uint16_t, svq->vring.num);
|
||
|
for (unsigned i = 0; i < svq->vring.num - 1; i++) {
|
||
|
- svq->vring.desc[i].next = cpu_to_le16(i + 1);
|
||
|
+ svq->desc_next[i] = cpu_to_le16(i + 1);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
@@ -574,6 +575,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
|
||
|
virtqueue_detach_element(svq->vq, next_avail_elem, 0);
|
||
|
}
|
||
|
svq->vq = NULL;
|
||
|
+ g_free(svq->desc_next);
|
||
|
g_free(svq->ring_id_maps);
|
||
|
qemu_vfree(svq->vring.desc);
|
||
|
qemu_vfree(svq->vring.used);
|
||
|
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
|
||
|
index e5e24c536d..c132c994e9 100644
|
||
|
--- a/hw/virtio/vhost-shadow-virtqueue.h
|
||
|
+++ b/hw/virtio/vhost-shadow-virtqueue.h
|
||
|
@@ -53,6 +53,12 @@ typedef struct VhostShadowVirtqueue {
|
||
|
/* Next VirtQueue element that guest made available */
|
||
|
VirtQueueElement *next_guest_avail_elem;
|
||
|
|
||
|
+ /*
|
||
|
+ * Backup next field for each descriptor so we can recover securely, not
|
||
|
+ * needing to trust the device access.
|
||
|
+ */
|
||
|
+ uint16_t *desc_next;
|
||
|
+
|
||
|
/* Next head to expose to the device */
|
||
|
uint16_t shadow_avail_idx;
|
||
|
|
||
|
--
|
||
|
2.31.1
|
||
|
|