qemu-kvm/kvm-vdpa-Move-command-buffers-map-to-start-of-net-device.patch
Miroslav Rezanina 19bc18cc9e * Thu Sep 29 2022 Miroslav Rezanina <mrezanin@redhat.com> - 7.1.0-2
- kvm-vdpa-Skip-the-maps-not-in-the-iova-tree.patch [RHELX-57]
- kvm-vdpa-do-not-save-failed-dma-maps-in-SVQ-iova-tree.patch [RHELX-57]
- kvm-util-accept-iova_tree_remove_parameter-by-value.patch [RHELX-57]
- kvm-vdpa-Remove-SVQ-vring-from-iova_tree-at-shutdown.patch [RHELX-57]
- kvm-vdpa-Make-SVQ-vring-unmapping-return-void.patch [RHELX-57]
- kvm-vhost-Always-store-new-kick-fd-on-vhost_svq_set_svq_.patch [RHELX-57]
- kvm-vdpa-Use-ring-hwaddr-at-vhost_vdpa_svq_unmap_ring.patch [RHELX-57]
- kvm-vhost-stop-transfer-elem-ownership-in-vhost_handle_g.patch [RHELX-57]
- kvm-vhost-use-SVQ-element-ndescs-instead-of-opaque-data-.patch [RHELX-57]
- kvm-vhost-Delete-useless-read-memory-barrier.patch [RHELX-57]
- kvm-vhost-Do-not-depend-on-NULL-VirtQueueElement-on-vhos.patch [RHELX-57]
- kvm-vhost_net-Add-NetClientInfo-start-callback.patch [RHELX-57]
- kvm-vhost_net-Add-NetClientInfo-stop-callback.patch [RHELX-57]
- kvm-vdpa-add-net_vhost_vdpa_cvq_info-NetClientInfo.patch [RHELX-57]
- kvm-vdpa-Move-command-buffers-map-to-start-of-net-device.patch [RHELX-57]
- kvm-vdpa-extract-vhost_vdpa_net_cvq_add-from-vhost_vdpa_.patch [RHELX-57]
- kvm-vhost_net-add-NetClientState-load-callback.patch [RHELX-57]
- kvm-vdpa-Add-virtio-net-mac-address-via-CVQ-at-start.patch [RHELX-57]
- kvm-vdpa-Delete-CVQ-migration-blocker.patch [RHELX-57]
- kvm-vdpa-Make-VhostVDPAState-cvq_cmd_in_buffer-control-a.patch [RHELX-57]
- kvm-vdpa-extract-vhost_vdpa_net_load_mac-from-vhost_vdpa.patch [RHELX-57]
- kvm-vdpa-Add-vhost_vdpa_net_load_mq.patch [RHELX-57]
- kvm-vdpa-validate-MQ-CVQ-commands.patch [RHELX-57]
- kvm-virtio-net-Update-virtio-net-curr_queue_pairs-in-vdp.patch [RHELX-57]
- kvm-vdpa-Allow-MQ-feature-in-SVQ.patch [RHELX-57]
- kvm-i386-reset-KVM-nested-state-upon-CPU-reset.patch [bz#2125281]
- kvm-i386-do-kvm_put_msr_feature_control-first-thing-when.patch [bz#2125281]
- kvm-Revert-Re-enable-capstone-internal-build.patch [bz#2127825]
- kvm-spec-Use-capstone-package.patch [bz#2127825]
- Resolves: RHELX-57
  (vDPA SVQ Multiqueue support )
- Resolves: bz#2125281
  ([RHEL9.1] Guests in VMX root operation fail to reboot with QEMU's 'system_reset' command [rhel-9.2.0])
- Resolves: bz#2127825
  (Use capstone for qemu-kvm build)
2022-09-29 03:59:55 -04:00

252 lines
9.0 KiB
Diff

From 0c03e18c49b62241d046ecb15c0ee3e7f9c2e547 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= <eperezma@redhat.com>
Date: Tue, 23 Aug 2022 20:30:33 +0200
Subject: [PATCH 15/29] vdpa: Move command buffers map to start of net device
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
RH-Author: Laurent Vivier <lvivier@redhat.com>
RH-MergeRequest: 117: vDPA SVQ Multiqueue support
RH-Jira: RHELX-57
RH-Acked-by: Jason Wang <jasowang@redhat.com>
RH-Acked-by: Cindy Lu <lulu@redhat.com>
RH-Acked-by: Eugenio Pérez <eperezma@redhat.com>
RH-Commit: [15/25] 216c18aa307f7bdef1575f581b767b6f023a73bd (redhat/centos-stream/src/qemu-kvm)
As this series will reuse them to restore the device state at the end of
a migration (or a device start), let's allocate only once at the device
start so we don't duplicate their map and unmap.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
(cherry picked from commit 7a7f87e94c4e75ca177564491595dd17b7e41a62)
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
net/vhost-vdpa.c | 123 ++++++++++++++++++++++-------------------------
1 file changed, 58 insertions(+), 65 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 1a597c2e92..452d10ed93 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -263,29 +263,20 @@ static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
}
-/** Copy and map a guest buffer. */
-static bool vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v,
- const struct iovec *out_data,
- size_t out_num, size_t data_len, void *buf,
- size_t *written, bool write)
+/** Map CVQ buffer. */
+static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
+ bool write)
{
DMAMap map = {};
int r;
- if (unlikely(!data_len)) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid legnth of %s buffer\n",
- __func__, write ? "in" : "out");
- return false;
- }
-
- *written = iov_to_buf(out_data, out_num, 0, buf, data_len);
map.translated_addr = (hwaddr)(uintptr_t)buf;
- map.size = vhost_vdpa_net_cvq_cmd_page_len() - 1;
+ map.size = size - 1;
map.perm = write ? IOMMU_RW : IOMMU_RO,
r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
if (unlikely(r != IOVA_OK)) {
error_report("Cannot map injected element");
- return false;
+ return r;
}
r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf,
@@ -294,50 +285,58 @@ static bool vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v,
goto dma_map_err;
}
- return true;
+ return 0;
dma_map_err:
vhost_iova_tree_remove(v->iova_tree, map);
- return false;
+ return r;
}
-/**
- * Copy the guest element into a dedicated buffer suitable to be sent to NIC
- *
- * @iov: [0] is the out buffer, [1] is the in one
- */
-static bool vhost_vdpa_net_cvq_map_elem(VhostVDPAState *s,
- VirtQueueElement *elem,
- struct iovec *iov)
+static int vhost_vdpa_net_cvq_start(NetClientState *nc)
{
- size_t in_copied;
- bool ok;
+ VhostVDPAState *s;
+ int r;
- iov[0].iov_base = s->cvq_cmd_out_buffer;
- ok = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, elem->out_sg, elem->out_num,
- vhost_vdpa_net_cvq_cmd_len(), iov[0].iov_base,
- &iov[0].iov_len, false);
- if (unlikely(!ok)) {
- return false;
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+ s = DO_UPCAST(VhostVDPAState, nc, nc);
+ if (!s->vhost_vdpa.shadow_vqs_enabled) {
+ return 0;
}
- iov[1].iov_base = s->cvq_cmd_in_buffer;
- ok = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, NULL, 0,
- sizeof(virtio_net_ctrl_ack), iov[1].iov_base,
- &in_copied, true);
- if (unlikely(!ok)) {
+ r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
+ vhost_vdpa_net_cvq_cmd_page_len(), false);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer,
+ vhost_vdpa_net_cvq_cmd_page_len(), true);
+ if (unlikely(r < 0)) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
- return false;
}
- iov[1].iov_len = sizeof(virtio_net_ctrl_ack);
- return true;
+ return r;
+}
+
+static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+ if (s->vhost_vdpa.shadow_vqs_enabled) {
+ vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
+ vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer);
+ }
}
static NetClientInfo net_vhost_vdpa_cvq_info = {
.type = NET_CLIENT_DRIVER_VHOST_VDPA,
.size = sizeof(VhostVDPAState),
.receive = vhost_vdpa_receive,
+ .start = vhost_vdpa_net_cvq_start,
+ .stop = vhost_vdpa_net_cvq_stop,
.cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
.has_ufo = vhost_vdpa_has_ufo,
@@ -348,19 +347,17 @@ static NetClientInfo net_vhost_vdpa_cvq_info = {
* Do not forward commands not supported by SVQ. Otherwise, the device could
* accept it and qemu would not know how to update the device model.
*/
-static bool vhost_vdpa_net_cvq_validate_cmd(const struct iovec *out,
- size_t out_num)
+static bool vhost_vdpa_net_cvq_validate_cmd(const void *out_buf, size_t len)
{
struct virtio_net_ctrl_hdr ctrl;
- size_t n;
- n = iov_to_buf(out, out_num, 0, &ctrl, sizeof(ctrl));
- if (unlikely(n < sizeof(ctrl))) {
+ if (unlikely(len < sizeof(ctrl))) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid legnth of out buffer %zu\n", __func__, n);
+ "%s: invalid legnth of out buffer %zu\n", __func__, len);
return false;
}
+ memcpy(&ctrl, out_buf, sizeof(ctrl));
switch (ctrl.class) {
case VIRTIO_NET_CTRL_MAC:
switch (ctrl.cmd) {
@@ -392,10 +389,14 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
VhostVDPAState *s = opaque;
size_t in_len, dev_written;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
- /* out and in buffers sent to the device */
- struct iovec dev_buffers[2] = {
- { .iov_base = s->cvq_cmd_out_buffer },
- { .iov_base = s->cvq_cmd_in_buffer },
+ /* Out buffer sent to both the vdpa device and the device model */
+ struct iovec out = {
+ .iov_base = s->cvq_cmd_out_buffer,
+ };
+ /* In buffer sent to the device */
+ const struct iovec dev_in = {
+ .iov_base = s->cvq_cmd_in_buffer,
+ .iov_len = sizeof(virtio_net_ctrl_ack),
};
/* in buffer used for device model */
const struct iovec in = {
@@ -405,17 +406,15 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
int r = -EINVAL;
bool ok;
- ok = vhost_vdpa_net_cvq_map_elem(s, elem, dev_buffers);
- if (unlikely(!ok)) {
- goto out;
- }
-
- ok = vhost_vdpa_net_cvq_validate_cmd(&dev_buffers[0], 1);
+ out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
+ s->cvq_cmd_out_buffer,
+ vhost_vdpa_net_cvq_cmd_len());
+ ok = vhost_vdpa_net_cvq_validate_cmd(s->cvq_cmd_out_buffer, out.iov_len);
if (unlikely(!ok)) {
goto out;
}
- r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, elem);
+ r = vhost_svq_add(svq, &out, 1, &dev_in, 1, elem);
if (unlikely(r != 0)) {
if (unlikely(r == -ENOSPC)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
@@ -435,13 +434,13 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
goto out;
}
- memcpy(&status, dev_buffers[1].iov_base, sizeof(status));
+ memcpy(&status, s->cvq_cmd_in_buffer, sizeof(status));
if (status != VIRTIO_NET_OK) {
goto out;
}
status = VIRTIO_NET_ERR;
- virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1);
+ virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
if (status != VIRTIO_NET_OK) {
error_report("Bad CVQ processing in model");
}
@@ -454,12 +453,6 @@ out:
}
vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
g_free(elem);
- if (dev_buffers[0].iov_base) {
- vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, dev_buffers[0].iov_base);
- }
- if (dev_buffers[1].iov_base) {
- vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, dev_buffers[1].iov_base);
- }
return r;
}
--
2.31.1