- kvm-nbd-server-Silence-server-warnings-on-port-probes.patch [RHEL-67863] - kvm-vhost-Add-stubs-for-the-migration-state-transfer-int.patch [RHEL-78372] - kvm-virtio-net-vhost-user-Implement-internal-migration.patch [RHEL-78372] - Resolves: RHEL-67863 (Ensure qemu as NBD server does not flood logs [rhel 9.6]) - Resolves: RHEL-78372 (Add vhost-user internal migration for passt [rhel-9])
162 lines
5.2 KiB
Diff
162 lines
5.2 KiB
Diff
From f73b4e686c289ef6409c945d16582af16d2c28fc Mon Sep 17 00:00:00 2001
|
|
From: Laurent Vivier <lvivier@redhat.com>
|
|
Date: Wed, 15 Jan 2025 14:50:44 +0100
|
|
Subject: [PATCH 3/3] virtio-net: vhost-user: Implement internal migration
|
|
|
|
RH-Author: Laurent Vivier <lvivier@redhat.com>
|
|
RH-MergeRequest: 337: virtio-net: vhost-user: Implement internal migration
|
|
RH-Jira: RHEL-78372
|
|
RH-Acked-by: Hanna Czenczek <hreitz@redhat.com>
|
|
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
RH-Commit: [2/2] 5832e25e230407a36022318a2a1e0d4f6f54bb92 (lvivier/qemu-kvm-centos)
|
|
|
|
JIRA: https://issues.redhat.com/browse/RHEL-78372
|
|
|
|
Add support of VHOST_USER_PROTOCOL_F_DEVICE_STATE in virtio-net
|
|
with vhost-user backend.
|
|
|
|
Cc: Hanna Czenczek <hreitz@redhat.com>
|
|
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
|
|
Message-Id: <20250115135044.799698-3-lvivier@redhat.com>
|
|
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
|
|
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
|
(cherry picked from commit 60f543ad917fad731e39ff8ce2ca83b9a9cc9d90)
|
|
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
|
|
---
|
|
hw/net/virtio-net.c | 105 ++++++++++++++++++++++++++++++++++++++++----
|
|
1 file changed, 97 insertions(+), 8 deletions(-)
|
|
|
|
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
|
|
index 90d05f94d4..3d2b2460ad 100644
|
|
--- a/hw/net/virtio-net.c
|
|
+++ b/hw/net/virtio-net.c
|
|
@@ -3305,6 +3305,102 @@ static const VMStateDescription vmstate_virtio_net_rss = {
|
|
},
|
|
};
|
|
|
|
+static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
|
|
+{
|
|
+ VirtIONet *n = VIRTIO_NET(vdev);
|
|
+ NetClientState *nc = qemu_get_queue(n->nic);
|
|
+ struct vhost_net *net = get_vhost_net(nc->peer);
|
|
+ return &net->dev;
|
|
+}
|
|
+
|
|
+static int vhost_user_net_save_state(QEMUFile *f, void *pv, size_t size,
|
|
+ const VMStateField *field,
|
|
+ JSONWriter *vmdesc)
|
|
+{
|
|
+ VirtIONet *n = pv;
|
|
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
|
|
+ struct vhost_dev *vhdev;
|
|
+ Error *local_error = NULL;
|
|
+ int ret;
|
|
+
|
|
+ vhdev = virtio_net_get_vhost(vdev);
|
|
+ if (vhdev == NULL) {
|
|
+ error_reportf_err(local_error,
|
|
+ "Error getting vhost back-end of %s device %s: ",
|
|
+ vdev->name, vdev->parent_obj.canonical_path);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ ret = vhost_save_backend_state(vhdev, f, &local_error);
|
|
+ if (ret < 0) {
|
|
+ error_reportf_err(local_error,
|
|
+ "Error saving back-end state of %s device %s: ",
|
|
+ vdev->name, vdev->parent_obj.canonical_path);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int vhost_user_net_load_state(QEMUFile *f, void *pv, size_t size,
|
|
+ const VMStateField *field)
|
|
+{
|
|
+ VirtIONet *n = pv;
|
|
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
|
|
+ struct vhost_dev *vhdev;
|
|
+ Error *local_error = NULL;
|
|
+ int ret;
|
|
+
|
|
+ vhdev = virtio_net_get_vhost(vdev);
|
|
+ if (vhdev == NULL) {
|
|
+ error_reportf_err(local_error,
|
|
+ "Error getting vhost back-end of %s device %s: ",
|
|
+ vdev->name, vdev->parent_obj.canonical_path);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ ret = vhost_load_backend_state(vhdev, f, &local_error);
|
|
+ if (ret < 0) {
|
|
+ error_reportf_err(local_error,
|
|
+ "Error loading back-end state of %s device %s: ",
|
|
+ vdev->name, vdev->parent_obj.canonical_path);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static bool vhost_user_net_is_internal_migration(void *opaque)
|
|
+{
|
|
+ VirtIONet *n = opaque;
|
|
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
|
|
+ struct vhost_dev *vhdev;
|
|
+
|
|
+ vhdev = virtio_net_get_vhost(vdev);
|
|
+ if (vhdev == NULL) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return vhost_supports_device_state(vhdev);
|
|
+}
|
|
+
|
|
+static const VMStateDescription vhost_user_net_backend_state = {
|
|
+ .name = "virtio-net-device/backend",
|
|
+ .version_id = 0,
|
|
+ .needed = vhost_user_net_is_internal_migration,
|
|
+ .fields = (const VMStateField[]) {
|
|
+ {
|
|
+ .name = "backend",
|
|
+ .info = &(const VMStateInfo) {
|
|
+ .name = "virtio-net vhost-user backend state",
|
|
+ .get = vhost_user_net_load_state,
|
|
+ .put = vhost_user_net_save_state,
|
|
+ },
|
|
+ },
|
|
+ VMSTATE_END_OF_LIST()
|
|
+ }
|
|
+};
|
|
+
|
|
static const VMStateDescription vmstate_virtio_net_device = {
|
|
.name = "virtio-net-device",
|
|
.version_id = VIRTIO_NET_VM_VERSION,
|
|
@@ -3357,6 +3453,7 @@ static const VMStateDescription vmstate_virtio_net_device = {
|
|
},
|
|
.subsections = (const VMStateDescription * const []) {
|
|
&vmstate_virtio_net_rss,
|
|
+ &vhost_user_net_backend_state,
|
|
NULL
|
|
}
|
|
};
|
|
@@ -3902,14 +3999,6 @@ static bool dev_unplug_pending(void *opaque)
|
|
return vdc->primary_unplug_pending(dev);
|
|
}
|
|
|
|
-static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
|
|
-{
|
|
- VirtIONet *n = VIRTIO_NET(vdev);
|
|
- NetClientState *nc = qemu_get_queue(n->nic);
|
|
- struct vhost_net *net = get_vhost_net(nc->peer);
|
|
- return &net->dev;
|
|
-}
|
|
-
|
|
static const VMStateDescription vmstate_virtio_net = {
|
|
.name = "virtio-net",
|
|
.minimum_version_id = VIRTIO_NET_VM_VERSION,
|
|
--
|
|
2.48.1
|
|
|