qemu-kvm/kvm-virtio-blk-restart-s-rq...

107 lines
3.6 KiB
Diff

From 9311035821b3fea3f78c7f06ddb8a3861584f907 Mon Sep 17 00:00:00 2001
From: Stefan Hajnoczi <stefanha@redhat.com>
Date: Fri, 19 Jan 2024 08:57:46 -0500
Subject: [PATCH 17/22] virtio-blk: restart s->rq reqs in vq AioContexts
RH-Author: Stefan Hajnoczi <stefanha@redhat.com>
RH-MergeRequest: 219: virtio-blk: add iothread-vq-mapping parameter
RH-Jira: RHEL-17369 RHEL-20764 RHEL-7356
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
RH-Acked-by: Hanna Czenczek <hreitz@redhat.com>
RH-Commit: [13/17] cf5ad0352a78458ffc7588f967963f62b267fd64 (stefanha/centos-stream-qemu-kvm)
A virtio-blk device with the iothread-vq-mapping parameter has
per-virtqueue AioContexts. It is not thread-safe to process s->rq
requests in the BlockBackend AioContext since that may be different from
the virtqueue's AioContext to which this request belongs. The code
currently races and could crash.
Adapt virtio_blk_dma_restart_cb() to first split s->rq into per-vq lists
and then schedule a BH each vq's AioContext as necessary. This way
requests are safely processed in their vq's AioContext.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-ID: <20240119135748.270944-5-stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 71ee0cdd14cc01a8b51aa4e9577dd0a1bb2f8e19)
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
hw/block/virtio-blk.c | 44 ++++++++++++++++++++++++++++++++-----------
1 file changed, 33 insertions(+), 11 deletions(-)
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 08c566946a..f48ce5cbb8 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -1156,16 +1156,11 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
static void virtio_blk_dma_restart_bh(void *opaque)
{
- VirtIOBlock *s = opaque;
+ VirtIOBlockReq *req = opaque;
+ VirtIOBlock *s = req->dev; /* we're called with at least one request */
- VirtIOBlockReq *req;
MultiReqBuffer mrb = {};
- WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
- req = s->rq;
- s->rq = NULL;
- }
-
while (req) {
VirtIOBlockReq *next = req->next;
if (virtio_blk_handle_request(req, &mrb)) {
@@ -1195,16 +1190,43 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
RunState state)
{
VirtIOBlock *s = opaque;
+ uint16_t num_queues = s->conf.num_queues;
if (!running) {
return;
}
- /* Paired with dec in virtio_blk_dma_restart_bh() */
- blk_inc_in_flight(s->conf.conf.blk);
+ /* Split the device-wide s->rq request list into per-vq request lists */
+ g_autofree VirtIOBlockReq **vq_rq = g_new0(VirtIOBlockReq *, num_queues);
+ VirtIOBlockReq *rq;
+
+ WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
+ rq = s->rq;
+ s->rq = NULL;
+ }
+
+ while (rq) {
+ VirtIOBlockReq *next = rq->next;
+ uint16_t idx = virtio_get_queue_index(rq->vq);
+
+ rq->next = vq_rq[idx];
+ vq_rq[idx] = rq;
+ rq = next;
+ }
- aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk),
- virtio_blk_dma_restart_bh, s);
+ /* Schedule a BH to submit the requests in each vq's AioContext */
+ for (uint16_t i = 0; i < num_queues; i++) {
+ if (!vq_rq[i]) {
+ continue;
+ }
+
+ /* Paired with dec in virtio_blk_dma_restart_bh() */
+ blk_inc_in_flight(s->conf.conf.blk);
+
+ aio_bh_schedule_oneshot(s->vq_aio_context[i],
+ virtio_blk_dma_restart_bh,
+ vq_rq[i]);
+ }
}
static void virtio_blk_reset(VirtIODevice *vdev)
--
2.39.3