6b2ce2692f
- kvm-hv-balloon-use-get_min_alignment-to-express-32-GiB-a.patch [RHEL-20341] - kvm-memory-device-reintroduce-memory-region-size-check.patch [RHEL-20341] - kvm-block-backend-Allow-concurrent-context-changes.patch [RHEL-24593] - kvm-scsi-Await-request-purging.patch [RHEL-24593] - kvm-string-output-visitor-show-structs-as-omitted.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-string-output-visitor-Fix-pseudo-struct-handling.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-qdev-properties-alias-all-object-class-properties.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-qdev-add-IOThreadVirtQueueMappingList-property-type.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-virtio-blk-add-iothread-vq-mapping-parameter.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-virtio-blk-Fix-potential-nullpointer-read-access-in-.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-iotests-add-filter_qmp_generated_node_ids.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-iotests-port-141-to-Python-for-reliable-QMP-testing.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-monitor-only-run-coroutine-commands-in-qemu_aio_cont.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-virtio-blk-move-dataplane-code-into-virtio-blk.c.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-virtio-blk-rename-dataplane-create-destroy-functions.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-virtio-blk-rename-dataplane-to-ioeventfd.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-virtio-blk-restart-s-rq-reqs-in-vq-AioContexts.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-virtio-blk-tolerate-failure-to-set-BlockBackend-AioC.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-virtio-blk-always-set-ioeventfd-during-startup.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-tests-unit-Bump-test-replication-timeout-to-60-secon.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-iotests-iothreads-stream-Use-the-right-TimeoutError.patch [RHEL-17369 RHEL-20764 RHEL-7356] - kvm-virtio-mem-default-enable-dynamic-memslots.patch [RHEL-24045] - Resolves: RHEL-20341 (memory-device size alignment check invalid in QEMU 8.2) - Resolves: RHEL-24593 (qemu crash blk_get_aio_context(BlockBackend *): Assertion `ctx == blk->ctx' when repeatedly hotplug/unplug disk) - Resolves: RHEL-17369 ([nfv virt][rt][post-copy migration] qemu-kvm: ../block/qcow2.c:5263: ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *, Error **): Assertion `false' failed.) - Resolves: RHEL-20764 ([qemu-kvm] Enable qemu multiqueue block layer support) - Resolves: RHEL-7356 ([qemu-kvm] no response with QMP command device_add when repeatedly hotplug/unplug virtio disks [RHEL-9]) - Resolves: RHEL-24045 (QEMU: default-enable dynamically using multiple memslots for virtio-mem)
465 lines
16 KiB
Diff
465 lines
16 KiB
Diff
From 733fc13f65286c849ad6618be89df450f8bc5f7e Mon Sep 17 00:00:00 2001
|
|
From: Stefan Hajnoczi <stefanha@redhat.com>
|
|
Date: Wed, 20 Dec 2023 08:47:55 -0500
|
|
Subject: [PATCH 09/22] virtio-blk: add iothread-vq-mapping parameter
|
|
|
|
RH-Author: Stefan Hajnoczi <stefanha@redhat.com>
|
|
RH-MergeRequest: 219: virtio-blk: add iothread-vq-mapping parameter
|
|
RH-Jira: RHEL-17369 RHEL-20764 RHEL-7356
|
|
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
|
|
RH-Acked-by: Hanna Czenczek <hreitz@redhat.com>
|
|
RH-Commit: [5/17] c371fe62376c4eb54da88272a5966cec28404224 (stefanha/centos-stream-qemu-kvm)
|
|
|
|
Add the iothread-vq-mapping parameter to assign virtqueues to IOThreads.
|
|
Store the vq:AioContext mapping in the new struct
|
|
VirtIOBlockDataPlane->vq_aio_context[] field and refactor the code to
|
|
use the per-vq AioContext instead of the BlockDriverState's AioContext.
|
|
|
|
Reimplement --device virtio-blk-pci,iothread= and non-IOThread mode by
|
|
assigning all virtqueues to the IOThread and main loop's AioContext in
|
|
vq_aio_context[], respectively.
|
|
|
|
The comment in struct VirtIOBlockDataPlane about EventNotifiers is
|
|
stale. Remove it.
|
|
|
|
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
Message-ID: <20231220134755.814917-5-stefanha@redhat.com>
|
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
(cherry picked from commit b6948ab01df068bef591868c22d1f873d2d05cde)
|
|
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
---
|
|
hw/block/dataplane/virtio-blk.c | 155 ++++++++++++++++++++++++--------
|
|
hw/block/dataplane/virtio-blk.h | 3 +
|
|
hw/block/virtio-blk.c | 92 ++++++++++++++++---
|
|
include/hw/virtio/virtio-blk.h | 2 +
|
|
4 files changed, 202 insertions(+), 50 deletions(-)
|
|
|
|
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
|
|
index 7bbbd981ad..6debd4401e 100644
|
|
--- a/hw/block/dataplane/virtio-blk.c
|
|
+++ b/hw/block/dataplane/virtio-blk.c
|
|
@@ -32,13 +32,11 @@ struct VirtIOBlockDataPlane {
|
|
VirtIOBlkConf *conf;
|
|
VirtIODevice *vdev;
|
|
|
|
- /* Note that these EventNotifiers are assigned by value. This is
|
|
- * fine as long as you do not call event_notifier_cleanup on them
|
|
- * (because you don't own the file descriptor or handle; you just
|
|
- * use it).
|
|
+ /*
|
|
+ * The AioContext for each virtqueue. The BlockDriverState will use the
|
|
+ * first element as its AioContext.
|
|
*/
|
|
- IOThread *iothread;
|
|
- AioContext *ctx;
|
|
+ AioContext **vq_aio_context;
|
|
};
|
|
|
|
/* Raise an interrupt to signal guest, if necessary */
|
|
@@ -47,6 +45,45 @@ void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
|
|
virtio_notify_irqfd(s->vdev, vq);
|
|
}
|
|
|
|
+/* Generate vq:AioContext mappings from a validated iothread-vq-mapping list */
|
|
+static void
|
|
+apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
|
|
+ AioContext **vq_aio_context, uint16_t num_queues)
|
|
+{
|
|
+ IOThreadVirtQueueMappingList *node;
|
|
+ size_t num_iothreads = 0;
|
|
+ size_t cur_iothread = 0;
|
|
+
|
|
+ for (node = iothread_vq_mapping_list; node; node = node->next) {
|
|
+ num_iothreads++;
|
|
+ }
|
|
+
|
|
+ for (node = iothread_vq_mapping_list; node; node = node->next) {
|
|
+ IOThread *iothread = iothread_by_id(node->value->iothread);
|
|
+ AioContext *ctx = iothread_get_aio_context(iothread);
|
|
+
|
|
+ /* Released in virtio_blk_data_plane_destroy() */
|
|
+ object_ref(OBJECT(iothread));
|
|
+
|
|
+ if (node->value->vqs) {
|
|
+ uint16List *vq;
|
|
+
|
|
+ /* Explicit vq:IOThread assignment */
|
|
+ for (vq = node->value->vqs; vq; vq = vq->next) {
|
|
+ vq_aio_context[vq->value] = ctx;
|
|
+ }
|
|
+ } else {
|
|
+ /* Round-robin vq:IOThread assignment */
|
|
+ for (unsigned i = cur_iothread; i < num_queues;
|
|
+ i += num_iothreads) {
|
|
+ vq_aio_context[i] = ctx;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ cur_iothread++;
|
|
+ }
|
|
+}
|
|
+
|
|
/* Context: QEMU global mutex held */
|
|
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
|
VirtIOBlockDataPlane **dataplane,
|
|
@@ -58,7 +95,7 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
|
|
|
*dataplane = NULL;
|
|
|
|
- if (conf->iothread) {
|
|
+ if (conf->iothread || conf->iothread_vq_mapping_list) {
|
|
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
|
|
error_setg(errp,
|
|
"device is incompatible with iothread "
|
|
@@ -86,13 +123,24 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
|
s = g_new0(VirtIOBlockDataPlane, 1);
|
|
s->vdev = vdev;
|
|
s->conf = conf;
|
|
+ s->vq_aio_context = g_new(AioContext *, conf->num_queues);
|
|
+
|
|
+ if (conf->iothread_vq_mapping_list) {
|
|
+ apply_vq_mapping(conf->iothread_vq_mapping_list, s->vq_aio_context,
|
|
+ conf->num_queues);
|
|
+ } else if (conf->iothread) {
|
|
+ AioContext *ctx = iothread_get_aio_context(conf->iothread);
|
|
+ for (unsigned i = 0; i < conf->num_queues; i++) {
|
|
+ s->vq_aio_context[i] = ctx;
|
|
+ }
|
|
|
|
- if (conf->iothread) {
|
|
- s->iothread = conf->iothread;
|
|
- object_ref(OBJECT(s->iothread));
|
|
- s->ctx = iothread_get_aio_context(s->iothread);
|
|
+ /* Released in virtio_blk_data_plane_destroy() */
|
|
+ object_ref(OBJECT(conf->iothread));
|
|
} else {
|
|
- s->ctx = qemu_get_aio_context();
|
|
+ AioContext *ctx = qemu_get_aio_context();
|
|
+ for (unsigned i = 0; i < conf->num_queues; i++) {
|
|
+ s->vq_aio_context[i] = ctx;
|
|
+ }
|
|
}
|
|
|
|
*dataplane = s;
|
|
@@ -104,6 +152,7 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
|
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
|
|
{
|
|
VirtIOBlock *vblk;
|
|
+ VirtIOBlkConf *conf = s->conf;
|
|
|
|
if (!s) {
|
|
return;
|
|
@@ -111,9 +160,21 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
|
|
|
|
vblk = VIRTIO_BLK(s->vdev);
|
|
assert(!vblk->dataplane_started);
|
|
- if (s->iothread) {
|
|
- object_unref(OBJECT(s->iothread));
|
|
+
|
|
+ if (conf->iothread_vq_mapping_list) {
|
|
+ IOThreadVirtQueueMappingList *node;
|
|
+
|
|
+ for (node = conf->iothread_vq_mapping_list; node; node = node->next) {
|
|
+ IOThread *iothread = iothread_by_id(node->value->iothread);
|
|
+ object_unref(OBJECT(iothread));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (conf->iothread) {
|
|
+ object_unref(OBJECT(conf->iothread));
|
|
}
|
|
+
|
|
+ g_free(s->vq_aio_context);
|
|
g_free(s);
|
|
}
|
|
|
|
@@ -177,19 +238,13 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|
|
|
trace_virtio_blk_data_plane_start(s);
|
|
|
|
- r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err);
|
|
+ r = blk_set_aio_context(s->conf->conf.blk, s->vq_aio_context[0],
|
|
+ &local_err);
|
|
if (r < 0) {
|
|
error_report_err(local_err);
|
|
goto fail_aio_context;
|
|
}
|
|
|
|
- /* Kick right away to begin processing requests already in vring */
|
|
- for (i = 0; i < nvqs; i++) {
|
|
- VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
|
-
|
|
- event_notifier_set(virtio_queue_get_host_notifier(vq));
|
|
- }
|
|
-
|
|
/*
|
|
* These fields must be visible to the IOThread when it processes the
|
|
* virtqueue, otherwise it will think dataplane has not started yet.
|
|
@@ -206,8 +261,12 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|
if (!blk_in_drain(s->conf->conf.blk)) {
|
|
for (i = 0; i < nvqs; i++) {
|
|
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
|
+ AioContext *ctx = s->vq_aio_context[i];
|
|
|
|
- virtio_queue_aio_attach_host_notifier(vq, s->ctx);
|
|
+ /* Kick right away to begin processing requests already in vring */
|
|
+ event_notifier_set(virtio_queue_get_host_notifier(vq));
|
|
+
|
|
+ virtio_queue_aio_attach_host_notifier(vq, ctx);
|
|
}
|
|
}
|
|
return 0;
|
|
@@ -236,23 +295,18 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|
*
|
|
* Context: BH in IOThread
|
|
*/
|
|
-static void virtio_blk_data_plane_stop_bh(void *opaque)
|
|
+static void virtio_blk_data_plane_stop_vq_bh(void *opaque)
|
|
{
|
|
- VirtIOBlockDataPlane *s = opaque;
|
|
- unsigned i;
|
|
-
|
|
- for (i = 0; i < s->conf->num_queues; i++) {
|
|
- VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
|
- EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq);
|
|
+ VirtQueue *vq = opaque;
|
|
+ EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq);
|
|
|
|
- virtio_queue_aio_detach_host_notifier(vq, s->ctx);
|
|
+ virtio_queue_aio_detach_host_notifier(vq, qemu_get_current_aio_context());
|
|
|
|
- /*
|
|
- * Test and clear notifier after disabling event, in case poll callback
|
|
- * didn't have time to run.
|
|
- */
|
|
- virtio_queue_host_notifier_read(host_notifier);
|
|
- }
|
|
+ /*
|
|
+ * Test and clear notifier after disabling event, in case poll callback
|
|
+ * didn't have time to run.
|
|
+ */
|
|
+ virtio_queue_host_notifier_read(host_notifier);
|
|
}
|
|
|
|
/* Context: QEMU global mutex held */
|
|
@@ -279,7 +333,12 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
|
trace_virtio_blk_data_plane_stop(s);
|
|
|
|
if (!blk_in_drain(s->conf->conf.blk)) {
|
|
- aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
|
|
+ for (i = 0; i < nvqs; i++) {
|
|
+ VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
|
+ AioContext *ctx = s->vq_aio_context[i];
|
|
+
|
|
+ aio_wait_bh_oneshot(ctx, virtio_blk_data_plane_stop_vq_bh, vq);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
@@ -322,3 +381,23 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
|
|
|
s->stopping = false;
|
|
}
|
|
+
|
|
+void virtio_blk_data_plane_detach(VirtIOBlockDataPlane *s)
|
|
+{
|
|
+ VirtIODevice *vdev = VIRTIO_DEVICE(s->vdev);
|
|
+
|
|
+ for (uint16_t i = 0; i < s->conf->num_queues; i++) {
|
|
+ VirtQueue *vq = virtio_get_queue(vdev, i);
|
|
+ virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
|
|
+ }
|
|
+}
|
|
+
|
|
+void virtio_blk_data_plane_attach(VirtIOBlockDataPlane *s)
|
|
+{
|
|
+ VirtIODevice *vdev = VIRTIO_DEVICE(s->vdev);
|
|
+
|
|
+ for (uint16_t i = 0; i < s->conf->num_queues; i++) {
|
|
+ VirtQueue *vq = virtio_get_queue(vdev, i);
|
|
+ virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
|
|
+ }
|
|
+}
|
|
diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h
|
|
index 5e18bb99ae..1a806fe447 100644
|
|
--- a/hw/block/dataplane/virtio-blk.h
|
|
+++ b/hw/block/dataplane/virtio-blk.h
|
|
@@ -28,4 +28,7 @@ void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq);
|
|
int virtio_blk_data_plane_start(VirtIODevice *vdev);
|
|
void virtio_blk_data_plane_stop(VirtIODevice *vdev);
|
|
|
|
+void virtio_blk_data_plane_detach(VirtIOBlockDataPlane *s);
|
|
+void virtio_blk_data_plane_attach(VirtIOBlockDataPlane *s);
|
|
+
|
|
#endif /* HW_DATAPLANE_VIRTIO_BLK_H */
|
|
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
|
|
index ec9ed09a6a..46e73b2c96 100644
|
|
--- a/hw/block/virtio-blk.c
|
|
+++ b/hw/block/virtio-blk.c
|
|
@@ -1151,6 +1151,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
|
return;
|
|
}
|
|
}
|
|
+
|
|
virtio_blk_handle_vq(s, vq);
|
|
}
|
|
|
|
@@ -1463,6 +1464,68 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
|
|
return 0;
|
|
}
|
|
|
|
+static bool
|
|
+validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
|
|
+ uint16_t num_queues, Error **errp)
|
|
+{
|
|
+ g_autofree unsigned long *vqs = bitmap_new(num_queues);
|
|
+ g_autoptr(GHashTable) iothreads =
|
|
+ g_hash_table_new(g_str_hash, g_str_equal);
|
|
+
|
|
+ for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
|
|
+ const char *name = node->value->iothread;
|
|
+ uint16List *vq;
|
|
+
|
|
+ if (!iothread_by_id(name)) {
|
|
+ error_setg(errp, "IOThread \"%s\" object does not exist", name);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (!g_hash_table_add(iothreads, (gpointer)name)) {
|
|
+ error_setg(errp,
|
|
+ "duplicate IOThread name \"%s\" in iothread-vq-mapping",
|
|
+ name);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (node != list) {
|
|
+ if (!!node->value->vqs != !!list->value->vqs) {
|
|
+ error_setg(errp, "either all items in iothread-vq-mapping "
|
|
+ "must have vqs or none of them must have it");
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (vq = node->value->vqs; vq; vq = vq->next) {
|
|
+ if (vq->value >= num_queues) {
|
|
+ error_setg(errp, "vq index %u for IOThread \"%s\" must be "
|
|
+ "less than num_queues %u in iothread-vq-mapping",
|
|
+ vq->value, name, num_queues);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (test_and_set_bit(vq->value, vqs)) {
|
|
+ error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
|
|
+ "because it is already assigned", vq->value, name);
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (list->value->vqs) {
|
|
+ for (uint16_t i = 0; i < num_queues; i++) {
|
|
+ if (!test_bit(i, vqs)) {
|
|
+ error_setg(errp,
|
|
+ "missing vq %u IOThread assignment in iothread-vq-mapping",
|
|
+ i);
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
static void virtio_resize_cb(void *opaque)
|
|
{
|
|
VirtIODevice *vdev = opaque;
|
|
@@ -1487,34 +1550,24 @@ static void virtio_blk_resize(void *opaque)
|
|
static void virtio_blk_drained_begin(void *opaque)
|
|
{
|
|
VirtIOBlock *s = opaque;
|
|
- VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
|
|
- AioContext *ctx = blk_get_aio_context(s->conf.conf.blk);
|
|
|
|
if (!s->dataplane || !s->dataplane_started) {
|
|
return;
|
|
}
|
|
|
|
- for (uint16_t i = 0; i < s->conf.num_queues; i++) {
|
|
- VirtQueue *vq = virtio_get_queue(vdev, i);
|
|
- virtio_queue_aio_detach_host_notifier(vq, ctx);
|
|
- }
|
|
+ virtio_blk_data_plane_detach(s->dataplane);
|
|
}
|
|
|
|
/* Resume virtqueue ioeventfd processing after drain */
|
|
static void virtio_blk_drained_end(void *opaque)
|
|
{
|
|
VirtIOBlock *s = opaque;
|
|
- VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
|
|
- AioContext *ctx = blk_get_aio_context(s->conf.conf.blk);
|
|
|
|
if (!s->dataplane || !s->dataplane_started) {
|
|
return;
|
|
}
|
|
|
|
- for (uint16_t i = 0; i < s->conf.num_queues; i++) {
|
|
- VirtQueue *vq = virtio_get_queue(vdev, i);
|
|
- virtio_queue_aio_attach_host_notifier(vq, ctx);
|
|
- }
|
|
+ virtio_blk_data_plane_attach(s->dataplane);
|
|
}
|
|
|
|
static const BlockDevOps virtio_block_ops = {
|
|
@@ -1600,6 +1653,19 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
|
|
return;
|
|
}
|
|
|
|
+ if (conf->iothread_vq_mapping_list) {
|
|
+ if (conf->iothread) {
|
|
+ error_setg(errp, "iothread and iothread-vq-mapping properties "
|
|
+ "cannot be set at the same time");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!validate_iothread_vq_mapping_list(conf->iothread_vq_mapping_list,
|
|
+ conf->num_queues, errp)) {
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
|
|
s->host_features);
|
|
virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
|
|
@@ -1702,6 +1768,8 @@ static Property virtio_blk_properties[] = {
|
|
DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true),
|
|
DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
|
|
IOThread *),
|
|
+ DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock,
|
|
+ conf.iothread_vq_mapping_list),
|
|
DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
|
|
VIRTIO_BLK_F_DISCARD, true),
|
|
DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock,
|
|
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
|
|
index 9881009c22..5e4091e4da 100644
|
|
--- a/include/hw/virtio/virtio-blk.h
|
|
+++ b/include/hw/virtio/virtio-blk.h
|
|
@@ -21,6 +21,7 @@
|
|
#include "sysemu/block-backend.h"
|
|
#include "sysemu/block-ram-registrar.h"
|
|
#include "qom/object.h"
|
|
+#include "qapi/qapi-types-virtio.h"
|
|
|
|
#define TYPE_VIRTIO_BLK "virtio-blk-device"
|
|
OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK)
|
|
@@ -37,6 +38,7 @@ struct VirtIOBlkConf
|
|
{
|
|
BlockConf conf;
|
|
IOThread *iothread;
|
|
+ IOThreadVirtQueueMappingList *iothread_vq_mapping_list;
|
|
char *serial;
|
|
uint32_t request_merging;
|
|
uint16_t num_queues;
|
|
--
|
|
2.39.3
|
|
|