qemu-kvm/kvm-vdpa-Add-virtio-net-mac-address-via-CVQ-at-start.patch
Miroslav Rezanina 19bc18cc9e * Thu Sep 29 2022 Miroslav Rezanina <mrezanin@redhat.com> - 7.1.0-2
- kvm-vdpa-Skip-the-maps-not-in-the-iova-tree.patch [RHELX-57]
- kvm-vdpa-do-not-save-failed-dma-maps-in-SVQ-iova-tree.patch [RHELX-57]
- kvm-util-accept-iova_tree_remove_parameter-by-value.patch [RHELX-57]
- kvm-vdpa-Remove-SVQ-vring-from-iova_tree-at-shutdown.patch [RHELX-57]
- kvm-vdpa-Make-SVQ-vring-unmapping-return-void.patch [RHELX-57]
- kvm-vhost-Always-store-new-kick-fd-on-vhost_svq_set_svq_.patch [RHELX-57]
- kvm-vdpa-Use-ring-hwaddr-at-vhost_vdpa_svq_unmap_ring.patch [RHELX-57]
- kvm-vhost-stop-transfer-elem-ownership-in-vhost_handle_g.patch [RHELX-57]
- kvm-vhost-use-SVQ-element-ndescs-instead-of-opaque-data-.patch [RHELX-57]
- kvm-vhost-Delete-useless-read-memory-barrier.patch [RHELX-57]
- kvm-vhost-Do-not-depend-on-NULL-VirtQueueElement-on-vhos.patch [RHELX-57]
- kvm-vhost_net-Add-NetClientInfo-start-callback.patch [RHELX-57]
- kvm-vhost_net-Add-NetClientInfo-stop-callback.patch [RHELX-57]
- kvm-vdpa-add-net_vhost_vdpa_cvq_info-NetClientInfo.patch [RHELX-57]
- kvm-vdpa-Move-command-buffers-map-to-start-of-net-device.patch [RHELX-57]
- kvm-vdpa-extract-vhost_vdpa_net_cvq_add-from-vhost_vdpa_.patch [RHELX-57]
- kvm-vhost_net-add-NetClientState-load-callback.patch [RHELX-57]
- kvm-vdpa-Add-virtio-net-mac-address-via-CVQ-at-start.patch [RHELX-57]
- kvm-vdpa-Delete-CVQ-migration-blocker.patch [RHELX-57]
- kvm-vdpa-Make-VhostVDPAState-cvq_cmd_in_buffer-control-a.patch [RHELX-57]
- kvm-vdpa-extract-vhost_vdpa_net_load_mac-from-vhost_vdpa.patch [RHELX-57]
- kvm-vdpa-Add-vhost_vdpa_net_load_mq.patch [RHELX-57]
- kvm-vdpa-validate-MQ-CVQ-commands.patch [RHELX-57]
- kvm-virtio-net-Update-virtio-net-curr_queue_pairs-in-vdp.patch [RHELX-57]
- kvm-vdpa-Allow-MQ-feature-in-SVQ.patch [RHELX-57]
- kvm-i386-reset-KVM-nested-state-upon-CPU-reset.patch [bz#2125281]
- kvm-i386-do-kvm_put_msr_feature_control-first-thing-when.patch [bz#2125281]
- kvm-Revert-Re-enable-capstone-internal-build.patch [bz#2127825]
- kvm-spec-Use-capstone-package.patch [bz#2127825]
- Resolves: RHELX-57
  (vDPA SVQ Multiqueue support )
- Resolves: bz#2125281
  ([RHEL9.1] Guests in VMX root operation fail to reboot with QEMU's 'system_reset' command [rhel-9.2.0])
- Resolves: bz#2127825
  (Use capstone for qemu-kvm build)
2022-09-29 03:59:55 -04:00

88 lines
2.9 KiB
Diff

From 10157c62f06e86f2ccf1fd4130ef55f7f9beac2f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= <eperezma@redhat.com>
Date: Tue, 23 Aug 2022 20:30:36 +0200
Subject: [PATCH 18/29] vdpa: Add virtio-net mac address via CVQ at start
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
RH-Author: Laurent Vivier <lvivier@redhat.com>
RH-MergeRequest: 117: vDPA SVQ Multiqueue support
RH-Jira: RHELX-57
RH-Acked-by: Jason Wang <jasowang@redhat.com>
RH-Acked-by: Cindy Lu <lulu@redhat.com>
RH-Acked-by: Eugenio Pérez <eperezma@redhat.com>
RH-Commit: [18/25] f5b7a59a70e51450df8c58b48e4eb30ef2a44189 (redhat/centos-stream/src/qemu-kvm)
This is needed so the destination vdpa device see the same state a the
guest set in the source.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
(cherry picked from commit dd036d8d278e6882803bccaa8c51b8527ea33f45)
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
net/vhost-vdpa.c | 40 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 3575bf64ee..640434d1ea 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -363,11 +363,51 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
return vhost_svq_poll(svq);
}
+static int vhost_vdpa_net_load(NetClientState *nc)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ const struct vhost_vdpa *v = &s->vhost_vdpa;
+ const VirtIONet *n;
+ uint64_t features;
+
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+ if (!v->shadow_vqs_enabled) {
+ return 0;
+ }
+
+ n = VIRTIO_NET(v->dev->vdev);
+ features = n->parent_obj.guest_features;
+ if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ const struct virtio_net_ctrl_hdr ctrl = {
+ .class = VIRTIO_NET_CTRL_MAC,
+ .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ };
+ char *cursor = s->cvq_cmd_out_buffer;
+ ssize_t dev_written;
+
+ memcpy(cursor, &ctrl, sizeof(ctrl));
+ cursor += sizeof(ctrl);
+ memcpy(cursor, n->mac, sizeof(n->mac));
+
+ dev_written = vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + sizeof(n->mac),
+ sizeof(virtio_net_ctrl_ack));
+ if (unlikely(dev_written < 0)) {
+ return dev_written;
+ }
+
+ return *((virtio_net_ctrl_ack *)s->cvq_cmd_in_buffer) != VIRTIO_NET_OK;
+ }
+
+ return 0;
+}
+
static NetClientInfo net_vhost_vdpa_cvq_info = {
.type = NET_CLIENT_DRIVER_VHOST_VDPA,
.size = sizeof(VhostVDPAState),
.receive = vhost_vdpa_receive,
.start = vhost_vdpa_net_cvq_start,
+ .load = vhost_vdpa_net_load,
.stop = vhost_vdpa_net_cvq_stop,
.cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
--
2.31.1