a6628605f7
- kvm-qcow2-Fix-theoretical-corruption-in-store_bitmap-err.patch [bz#2150180] - kvm-qemu-img-commit-Report-errors-while-closing-the-imag.patch [bz#2150180] - kvm-qemu-img-bitmap-Report-errors-while-closing-the-imag.patch [bz#2150180] - kvm-qemu-iotests-Test-qemu-img-bitmap-commit-exit-code-o.patch [bz#2150180] - kvm-accel-tcg-Test-CPUJumpCache-in-tb_jmp_cache_clear_pa.patch [bz#2165280] - kvm-block-Improve-empty-format-specific-info-dump.patch [bz#1860292] - kvm-block-file-Add-file-specific-image-info.patch [bz#1860292] - kvm-block-vmdk-Change-extent-info-type.patch [bz#1860292] - kvm-block-Split-BlockNodeInfo-off-of-ImageInfo.patch [bz#1860292] - kvm-qemu-img-Use-BlockNodeInfo.patch [bz#1860292] - kvm-block-qapi-Let-bdrv_query_image_info-recurse.patch [bz#1860292] - kvm-block-qapi-Introduce-BlockGraphInfo.patch [bz#1860292] - kvm-block-qapi-Add-indentation-to-bdrv_node_info_dump.patch [bz#1860292] - kvm-iotests-Filter-child-node-information.patch [bz#1860292] - kvm-iotests-106-214-308-Read-only-one-size-line.patch [bz#1860292] - kvm-qemu-img-Let-info-print-block-graph.patch [bz#1860292] - kvm-qemu-img-Change-info-key-names-for-protocol-nodes.patch [bz#1860292] - kvm-Revert-vhost-user-Monitor-slave-channel-in-vhost_use.patch [bz#2155173] - kvm-Revert-vhost-user-Introduce-nested-event-loop-in-vho.patch [bz#2155173] - kvm-virtio-rng-pci-fix-transitional-migration-compat-for.patch [bz#2162569] - Resolves: bz#2150180 (qemu-img finishes successfully while having errors in commit or bitmaps operations) - Resolves: bz#2165280 ([kvm-unit-tests] debug-wp-migration fails) - Resolves: bz#1860292 (RFE: add extent_size_hint information to qemu-img info) - Resolves: bz#2155173 ([vhost-user] unable to start vhost net: 71: falling back on userspace) - Resolves: bz#2162569 ([transitional device][virtio-rng-pci-transitional]Stable Guest ABI failed between RHEL 8.6 to RHEL 9.2)
141 lines
4.8 KiB
Diff
141 lines
4.8 KiB
Diff
From 0c19fb7c4a22a30830152b224b2e66963f829a7a Mon Sep 17 00:00:00 2001
|
|
From: Greg Kurz <groug@kaod.org>
|
|
Date: Thu, 19 Jan 2023 18:24:24 +0100
|
|
Subject: [PATCH 19/20] Revert "vhost-user: Introduce nested event loop in
|
|
vhost_user_read()"
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
RH-Author: Laurent Vivier <lvivier@redhat.com>
|
|
RH-MergeRequest: 146: Fix vhost-user with dpdk
|
|
RH-Bugzilla: 2155173
|
|
RH-Acked-by: Cindy Lu <lulu@redhat.com>
|
|
RH-Acked-by: Greg Kurz (RH) <gkurz@redhat.com>
|
|
RH-Acked-by: Eugenio Pérez <eperezma@redhat.com>
|
|
RH-Commit: [2/2] 9b67041f92f29f70b7ccb41d8087801e4e4e38af (lvivier/qemu-kvm-centos)
|
|
|
|
This reverts commit a7f523c7d114d445c5d83aecdba3efc038e5a692.
|
|
|
|
The nested event loop is broken by design. It's only user was removed.
|
|
Drop the code as well so that nobody ever tries to use it again.
|
|
|
|
I had to fix a couple of trivial conflicts around return values because
|
|
of 025faa872bcf ("vhost-user: stick to -errno error return convention").
|
|
|
|
Signed-off-by: Greg Kurz <groug@kaod.org>
|
|
Message-Id: <20230119172424.478268-3-groug@kaod.org>
|
|
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
|
|
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
|
Acked-by: Maxime Coquelin <maxime.coquelin@redhat.com>
|
|
(cherry picked from commit 4382138f642f69fdbc79ebf4e93d84be8061191f)
|
|
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
|
|
---
|
|
hw/virtio/vhost-user.c | 65 ++++--------------------------------------
|
|
1 file changed, 5 insertions(+), 60 deletions(-)
|
|
|
|
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
|
|
index 0ac00eb901..7cb49c50f9 100644
|
|
--- a/hw/virtio/vhost-user.c
|
|
+++ b/hw/virtio/vhost-user.c
|
|
@@ -305,19 +305,8 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
|
|
return 0;
|
|
}
|
|
|
|
-struct vhost_user_read_cb_data {
|
|
- struct vhost_dev *dev;
|
|
- VhostUserMsg *msg;
|
|
- GMainLoop *loop;
|
|
- int ret;
|
|
-};
|
|
-
|
|
-static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
|
|
- gpointer opaque)
|
|
+static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
|
|
{
|
|
- struct vhost_user_read_cb_data *data = opaque;
|
|
- struct vhost_dev *dev = data->dev;
|
|
- VhostUserMsg *msg = data->msg;
|
|
struct vhost_user *u = dev->opaque;
|
|
CharBackend *chr = u->user->chr;
|
|
uint8_t *p = (uint8_t *) msg;
|
|
@@ -325,8 +314,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
|
|
|
|
r = vhost_user_read_header(dev, msg);
|
|
if (r < 0) {
|
|
- data->ret = r;
|
|
- goto end;
|
|
+ return r;
|
|
}
|
|
|
|
/* validate message size is sane */
|
|
@@ -334,8 +322,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
|
|
error_report("Failed to read msg header."
|
|
" Size %d exceeds the maximum %zu.", msg->hdr.size,
|
|
VHOST_USER_PAYLOAD_SIZE);
|
|
- data->ret = -EPROTO;
|
|
- goto end;
|
|
+ return -EPROTO;
|
|
}
|
|
|
|
if (msg->hdr.size) {
|
|
@@ -346,53 +333,11 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
|
|
int saved_errno = errno;
|
|
error_report("Failed to read msg payload."
|
|
" Read %d instead of %d.", r, msg->hdr.size);
|
|
- data->ret = r < 0 ? -saved_errno : -EIO;
|
|
- goto end;
|
|
+ return r < 0 ? -saved_errno : -EIO;
|
|
}
|
|
}
|
|
|
|
-end:
|
|
- g_main_loop_quit(data->loop);
|
|
- return G_SOURCE_REMOVE;
|
|
-}
|
|
-
|
|
-static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
|
|
-{
|
|
- struct vhost_user *u = dev->opaque;
|
|
- CharBackend *chr = u->user->chr;
|
|
- GMainContext *prev_ctxt = chr->chr->gcontext;
|
|
- GMainContext *ctxt = g_main_context_new();
|
|
- GMainLoop *loop = g_main_loop_new(ctxt, FALSE);
|
|
- struct vhost_user_read_cb_data data = {
|
|
- .dev = dev,
|
|
- .loop = loop,
|
|
- .msg = msg,
|
|
- .ret = 0
|
|
- };
|
|
-
|
|
- /*
|
|
- * We want to be able to monitor the slave channel fd while waiting
|
|
- * for chr I/O. This requires an event loop, but we can't nest the
|
|
- * one to which chr is currently attached : its fd handlers might not
|
|
- * be prepared for re-entrancy. So we create a new one and switch chr
|
|
- * to use it.
|
|
- */
|
|
- qemu_chr_be_update_read_handlers(chr->chr, ctxt);
|
|
- qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
|
|
-
|
|
- g_main_loop_run(loop);
|
|
-
|
|
- /*
|
|
- * Restore the previous event loop context. This also destroys/recreates
|
|
- * event sources : this guarantees that all pending events in the original
|
|
- * context that have been processed by the nested loop are purged.
|
|
- */
|
|
- qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
|
|
-
|
|
- g_main_loop_unref(loop);
|
|
- g_main_context_unref(ctxt);
|
|
-
|
|
- return data.ret;
|
|
+ return 0;
|
|
}
|
|
|
|
static int process_message_reply(struct vhost_dev *dev,
|
|
--
|
|
2.31.1
|
|
|