a799a516c5
- kvm-block-compile-out-assert_bdrv_graph_readable-by-defa.patch [bz#2186725] - kvm-graph-lock-Disable-locking-for-now.patch [bz#2186725] - kvm-nbd-server-Fix-drained_poll-to-wake-coroutine-in-rig.patch [bz#2186725] - kvm-iotests-Test-commit-with-iothreads-and-ongoing-I-O.patch [bz#2186725] - kvm-memory-prevent-dma-reentracy-issues.patch [RHEL-516] - kvm-async-Add-an-optional-reentrancy-guard-to-the-BH-API.patch [RHEL-516] - kvm-checkpatch-add-qemu_bh_new-aio_bh_new-checks.patch [RHEL-516] - kvm-hw-replace-most-qemu_bh_new-calls-with-qemu_bh_new_g.patch [RHEL-516] - kvm-lsi53c895a-disable-reentrancy-detection-for-script-R.patch [RHEL-516] - kvm-bcm2835_property-disable-reentrancy-detection-for-io.patch [RHEL-516] - kvm-raven-disable-reentrancy-detection-for-iomem.patch [RHEL-516] - kvm-apic-disable-reentrancy-detection-for-apic-msi.patch [RHEL-516] - kvm-async-avoid-use-after-free-on-re-entrancy-guard.patch [RHEL-516] - kvm-loongarch-mark-loongarch_ipi_iocsr-re-entrnacy-safe.patch [RHEL-516] - kvm-memory-stricter-checks-prior-to-unsetting-engaged_in.patch [RHEL-516] - kvm-lsi53c895a-disable-reentrancy-detection-for-MMIO-reg.patch [RHEL-516] - kvm-hw-scsi-lsi53c895a-Fix-reentrancy-issues-in-the-LSI-.patch [RHEL-516] - kvm-hw-pci-Disable-PCI_ERR_UNCOR_MASK-register-for-machi.patch [bz#2189423] - kvm-multifd-Fix-the-number-of-channels-ready.patch [bz#2196289] - kvm-util-async-teardown-wire-up-query-command-line-optio.patch [bz#2168500] - kvm-s390x-pv-Fix-spurious-warning-with-asynchronous-tear.patch [bz#2168500] - Resolves: bz#2186725 (Qemu hang when commit during fio running(iothread enable)) - Resolves: RHEL-516 (CVE-2023-2680 qemu-kvm: QEMU: hcd-ehci: DMA reentrancy issue (incomplete fix for CVE-2021-3750) [rhel-9]) - Resolves: bz#2189423 (Failed to migrate VM from rhel 9.3 to rhel 9.2) - Resolves: bz#2196289 (Fix number of ready channels on multifd) - Resolves: bz#2168500 ([IBM 9.3 FEAT] KVM: Improve memory reclaiming for z15 Secure Execution guests - qemu part)
154 lines
4.7 KiB
Diff
154 lines
4.7 KiB
Diff
From 516bf44de08a13d97c08e210137078e642ce8e88 Mon Sep 17 00:00:00 2001
|
|
From: Kevin Wolf <kwolf@redhat.com>
|
|
Date: Wed, 17 May 2023 17:28:32 +0200
|
|
Subject: [PATCH 02/21] graph-lock: Disable locking for now
|
|
|
|
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
RH-MergeRequest: 166: block/graph-lock: Disable locking for now
|
|
RH-Bugzilla: 2186725
|
|
RH-Acked-by: Eric Blake <eblake@redhat.com>
|
|
RH-Acked-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
|
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
RH-Commit: [2/4] 39d42fb527aad0491a018743289de7b762108317 (kmwolf/centos-qemu-kvm)
|
|
|
|
In QEMU 8.0, we've been seeing deadlocks in bdrv_graph_wrlock(). They
|
|
come from callers that hold an AioContext lock, which is not allowed
|
|
during polling. In theory, we could temporarily release the lock, but
|
|
callers are inconsistent about whether they hold a lock, and if they do,
|
|
some are also confused about which one they hold. While all of this is
|
|
fixable, it's not trivial, and the best course of action for 8.0.1 is
|
|
probably just disabling the graph locking code temporarily.
|
|
|
|
We don't currently rely on graph locking yet. It is supposed to replace
|
|
the AioContext lock eventually to enable multiqueue support, but as long
|
|
as we still have the AioContext lock, it is sufficient without the graph
|
|
lock. Once the AioContext lock goes away, the deadlock doesn't exist any
|
|
more either and this commit can be reverted. (Of course, it can also be
|
|
reverted while the AioContext lock still exists if the callers have been
|
|
fixed.)
|
|
|
|
Cc: qemu-stable@nongnu.org
|
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
Message-Id: <20230517152834.277483-2-kwolf@redhat.com>
|
|
Reviewed-by: Eric Blake <eblake@redhat.com>
|
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
(cherry picked from commit 80fc5d260002432628710f8b0c7cfc7d9b97bb9d)
|
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
---
|
|
block/graph-lock.c | 24 ++++++++++++++++++++++++
|
|
1 file changed, 24 insertions(+)
|
|
|
|
diff --git a/block/graph-lock.c b/block/graph-lock.c
|
|
index 259a7a0bde..2490926c90 100644
|
|
--- a/block/graph-lock.c
|
|
+++ b/block/graph-lock.c
|
|
@@ -30,8 +30,10 @@ BdrvGraphLock graph_lock;
|
|
/* Protects the list of aiocontext and orphaned_reader_count */
|
|
static QemuMutex aio_context_list_lock;
|
|
|
|
+#if 0
|
|
/* Written and read with atomic operations. */
|
|
static int has_writer;
|
|
+#endif
|
|
|
|
/*
|
|
* A reader coroutine could move from an AioContext to another.
|
|
@@ -88,6 +90,7 @@ void unregister_aiocontext(AioContext *ctx)
|
|
g_free(ctx->bdrv_graph);
|
|
}
|
|
|
|
+#if 0
|
|
static uint32_t reader_count(void)
|
|
{
|
|
BdrvGraphRWlock *brdv_graph;
|
|
@@ -105,10 +108,17 @@ static uint32_t reader_count(void)
|
|
assert((int32_t)rd >= 0);
|
|
return rd;
|
|
}
|
|
+#endif
|
|
|
|
void bdrv_graph_wrlock(void)
|
|
{
|
|
GLOBAL_STATE_CODE();
|
|
+ /*
|
|
+ * TODO Some callers hold an AioContext lock when this is called, which
|
|
+ * causes deadlocks. Reenable once the AioContext locking is cleaned up (or
|
|
+ * AioContext locks are gone).
|
|
+ */
|
|
+#if 0
|
|
assert(!qatomic_read(&has_writer));
|
|
|
|
/* Make sure that constantly arriving new I/O doesn't cause starvation */
|
|
@@ -139,11 +149,13 @@ void bdrv_graph_wrlock(void)
|
|
} while (reader_count() >= 1);
|
|
|
|
bdrv_drain_all_end();
|
|
+#endif
|
|
}
|
|
|
|
void bdrv_graph_wrunlock(void)
|
|
{
|
|
GLOBAL_STATE_CODE();
|
|
+#if 0
|
|
QEMU_LOCK_GUARD(&aio_context_list_lock);
|
|
assert(qatomic_read(&has_writer));
|
|
|
|
@@ -155,10 +167,13 @@ void bdrv_graph_wrunlock(void)
|
|
|
|
/* Wake up all coroutine that are waiting to read the graph */
|
|
qemu_co_enter_all(&reader_queue, &aio_context_list_lock);
|
|
+#endif
|
|
}
|
|
|
|
void coroutine_fn bdrv_graph_co_rdlock(void)
|
|
{
|
|
+ /* TODO Reenable when wrlock is reenabled */
|
|
+#if 0
|
|
BdrvGraphRWlock *bdrv_graph;
|
|
bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
|
|
|
|
@@ -223,10 +238,12 @@ void coroutine_fn bdrv_graph_co_rdlock(void)
|
|
qemu_co_queue_wait(&reader_queue, &aio_context_list_lock);
|
|
}
|
|
}
|
|
+#endif
|
|
}
|
|
|
|
void coroutine_fn bdrv_graph_co_rdunlock(void)
|
|
{
|
|
+#if 0
|
|
BdrvGraphRWlock *bdrv_graph;
|
|
bdrv_graph = qemu_get_current_aio_context()->bdrv_graph;
|
|
|
|
@@ -249,6 +266,7 @@ void coroutine_fn bdrv_graph_co_rdunlock(void)
|
|
if (qatomic_read(&has_writer)) {
|
|
aio_wait_kick();
|
|
}
|
|
+#endif
|
|
}
|
|
|
|
void bdrv_graph_rdlock_main_loop(void)
|
|
@@ -266,13 +284,19 @@ void bdrv_graph_rdunlock_main_loop(void)
|
|
void assert_bdrv_graph_readable(void)
|
|
{
|
|
/* reader_count() is slow due to aio_context_list_lock lock contention */
|
|
+ /* TODO Reenable when wrlock is reenabled */
|
|
+#if 0
|
|
#ifdef CONFIG_DEBUG_GRAPH_LOCK
|
|
assert(qemu_in_main_thread() || reader_count());
|
|
#endif
|
|
+#endif
|
|
}
|
|
|
|
void assert_bdrv_graph_writable(void)
|
|
{
|
|
assert(qemu_in_main_thread());
|
|
+ /* TODO Reenable when wrlock is reenabled */
|
|
+#if 0
|
|
assert(qatomic_read(&has_writer));
|
|
+#endif
|
|
}
|
|
--
|
|
2.39.3
|
|
|