fc2ddd6b1c
- kvm-memory-prevent-dma-reentracy-issues.patch [bz#1999236] - kvm-async-Add-an-optional-reentrancy-guard-to-the-BH-API.patch [bz#1999236] - kvm-checkpatch-add-qemu_bh_new-aio_bh_new-checks.patch [bz#1999236] - kvm-hw-replace-most-qemu_bh_new-calls-with-qemu_bh_new_g.patch [bz#1999236] - kvm-lsi53c895a-disable-reentrancy-detection-for-script-R.patch [bz#1999236] - kvm-bcm2835_property-disable-reentrancy-detection-for-io.patch [bz#1999236] - kvm-raven-disable-reentrancy-detection-for-iomem.patch [bz#1999236] - kvm-apic-disable-reentrancy-detection-for-apic-msi.patch [bz#1999236] - kvm-async-avoid-use-after-free-on-re-entrancy-guard.patch [bz#1999236] - kvm-memory-stricter-checks-prior-to-unsetting-engaged_in.patch [bz#1999236] - kvm-lsi53c895a-disable-reentrancy-detection-for-MMIO-reg.patch [bz#1999236] - kvm-hw-scsi-lsi53c895a-Fix-reentrancy-issues-in-the-LSI-.patch [bz#1999236] - kvm-target-i386-add-support-for-FLUSH_L1D-feature.patch [bz#2216203] - kvm-target-i386-add-support-for-FB_CLEAR-feature.patch [bz#2216203] - kvm-migration-Disable-postcopy-multifd-migration.patch [bz#2169733] - Resolves: bz#1999236 (CVE-2021-3750 virt:rhel/qemu-kvm: QEMU: hcd-ehci: DMA reentrancy issue leads to use-after-free [rhel-8]) - Resolves: bz#2216203 ([qemu-kvm]VM reports vulnerabilty to mmio_stale_data on patched host with microcode) - Resolves: bz#2169733 (Qemu on destination host crashed if migrate with postcopy and multifd enabled)
236 lines
8.9 KiB
Diff
236 lines
8.9 KiB
Diff
From 8996ac4369de7e0cb6f911db6f47c3e4ae88c8aa Mon Sep 17 00:00:00 2001
|
|
From: Jon Maloy <jmaloy@redhat.com>
|
|
Date: Tue, 9 May 2023 10:29:03 -0400
|
|
Subject: [PATCH 02/15] async: Add an optional reentrancy guard to the BH API
|
|
|
|
RH-Author: Jon Maloy <jmaloy@redhat.com>
|
|
RH-MergeRequest: 277: memory: prevent dma-reentracy issues
|
|
RH-Bugzilla: 1999236
|
|
RH-Acked-by: Thomas Huth <thuth@redhat.com>
|
|
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
RH-Commit: [2/12] b03f247e242a6cdb3eebec36477234ac77dcd20c (redhat/rhel/src/qemu-kvm/jons-qemu-kvm-2)
|
|
|
|
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1999236
|
|
Upstream: Merged
|
|
CVE: CVE-2021-3750
|
|
Conflict: The file block/graph-lock.h, inluded from include/block/aio.h,
|
|
doesn't exist in this code version. The code compiles without
|
|
issues if this include is just omitted, so we do that.
|
|
|
|
commit 9c86c97f12c060bf7484dd931f38634e166a81f0
|
|
Author: Alexander Bulekov <alxndr@bu.edu>
|
|
Date: Thu Apr 27 17:10:07 2023 -0400
|
|
|
|
async: Add an optional reentrancy guard to the BH API
|
|
|
|
Devices can pass their MemoryReentrancyGuard (from their DeviceState),
|
|
when creating new BHes. Then, the async API will toggle the guard
|
|
before/after calling the BH call-back. This prevents bh->mmio reentrancy
|
|
issues.
|
|
|
|
Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
|
|
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
|
|
Message-Id: <20230427211013.2994127-3-alxndr@bu.edu>
|
|
[thuth: Fix "line over 90 characters" checkpatch.pl error]
|
|
Signed-off-by: Thomas Huth <thuth@redhat.com>
|
|
|
|
Signed-off-by: Jon Maloy <jmaloy@redhat.com>
|
|
---
|
|
docs/devel/multiple-iothreads.txt | 7 +++++++
|
|
include/block/aio.h | 18 ++++++++++++++++--
|
|
include/qemu/main-loop.h | 7 +++++--
|
|
tests/unit/ptimer-test-stubs.c | 3 ++-
|
|
util/async.c | 18 +++++++++++++++++-
|
|
util/main-loop.c | 6 ++++--
|
|
util/trace-events | 1 +
|
|
7 files changed, 52 insertions(+), 8 deletions(-)
|
|
|
|
diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt
|
|
index aeb997bed5..a11576bc74 100644
|
|
--- a/docs/devel/multiple-iothreads.txt
|
|
+++ b/docs/devel/multiple-iothreads.txt
|
|
@@ -61,6 +61,7 @@ There are several old APIs that use the main loop AioContext:
|
|
* LEGACY qemu_aio_set_event_notifier() - monitor an event notifier
|
|
* LEGACY timer_new_ms() - create a timer
|
|
* LEGACY qemu_bh_new() - create a BH
|
|
+ * LEGACY qemu_bh_new_guarded() - create a BH with a device re-entrancy guard
|
|
* LEGACY qemu_aio_wait() - run an event loop iteration
|
|
|
|
Since they implicitly work on the main loop they cannot be used in code that
|
|
@@ -72,8 +73,14 @@ Instead, use the AioContext functions directly (see include/block/aio.h):
|
|
* aio_set_event_notifier() - monitor an event notifier
|
|
* aio_timer_new() - create a timer
|
|
* aio_bh_new() - create a BH
|
|
+ * aio_bh_new_guarded() - create a BH with a device re-entrancy guard
|
|
* aio_poll() - run an event loop iteration
|
|
|
|
+The qemu_bh_new_guarded/aio_bh_new_guarded APIs accept a "MemReentrancyGuard"
|
|
+argument, which is used to check for and prevent re-entrancy problems. For
|
|
+BHs associated with devices, the reentrancy-guard is contained in the
|
|
+corresponding DeviceState and named "mem_reentrancy_guard".
|
|
+
|
|
The AioContext can be obtained from the IOThread using
|
|
iothread_get_aio_context() or for the main loop using qemu_get_aio_context().
|
|
Code that takes an AioContext argument works both in IOThreads or the main
|
|
diff --git a/include/block/aio.h b/include/block/aio.h
|
|
index 47fbe9d81f..c7da152985 100644
|
|
--- a/include/block/aio.h
|
|
+++ b/include/block/aio.h
|
|
@@ -22,6 +22,8 @@
|
|
#include "qemu/event_notifier.h"
|
|
#include "qemu/thread.h"
|
|
#include "qemu/timer.h"
|
|
+#include "hw/qdev-core.h"
|
|
+
|
|
|
|
typedef struct BlockAIOCB BlockAIOCB;
|
|
typedef void BlockCompletionFunc(void *opaque, int ret);
|
|
@@ -321,9 +323,11 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
|
|
* is opaque and must be allocated prior to its use.
|
|
*
|
|
* @name: A human-readable identifier for debugging purposes.
|
|
+ * @reentrancy_guard: A guard set when entering a cb to prevent
|
|
+ * device-reentrancy issues
|
|
*/
|
|
QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
|
|
- const char *name);
|
|
+ const char *name, MemReentrancyGuard *reentrancy_guard);
|
|
|
|
/**
|
|
* aio_bh_new: Allocate a new bottom half structure
|
|
@@ -332,7 +336,17 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
|
|
* string.
|
|
*/
|
|
#define aio_bh_new(ctx, cb, opaque) \
|
|
- aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)))
|
|
+ aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)
|
|
+
|
|
+/**
|
|
+ * aio_bh_new_guarded: Allocate a new bottom half structure with a
|
|
+ * reentrancy_guard
|
|
+ *
|
|
+ * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
|
|
+ * string.
|
|
+ */
|
|
+#define aio_bh_new_guarded(ctx, cb, opaque, guard) \
|
|
+ aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)
|
|
|
|
/**
|
|
* aio_notify: Force processing of pending events.
|
|
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
|
|
index 8dbc6fcb89..85dd5ada9e 100644
|
|
--- a/include/qemu/main-loop.h
|
|
+++ b/include/qemu/main-loop.h
|
|
@@ -294,9 +294,12 @@ void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);
|
|
|
|
void qemu_fd_register(int fd);
|
|
|
|
+#define qemu_bh_new_guarded(cb, opaque, guard) \
|
|
+ qemu_bh_new_full((cb), (opaque), (stringify(cb)), guard)
|
|
#define qemu_bh_new(cb, opaque) \
|
|
- qemu_bh_new_full((cb), (opaque), (stringify(cb)))
|
|
-QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name);
|
|
+ qemu_bh_new_full((cb), (opaque), (stringify(cb)), NULL)
|
|
+QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
|
|
+ MemReentrancyGuard *reentrancy_guard);
|
|
void qemu_bh_schedule_idle(QEMUBH *bh);
|
|
|
|
enum {
|
|
diff --git a/tests/unit/ptimer-test-stubs.c b/tests/unit/ptimer-test-stubs.c
|
|
index 2a3ef58799..a7a2d08e7e 100644
|
|
--- a/tests/unit/ptimer-test-stubs.c
|
|
+++ b/tests/unit/ptimer-test-stubs.c
|
|
@@ -108,7 +108,8 @@ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask)
|
|
return deadline;
|
|
}
|
|
|
|
-QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name)
|
|
+QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
|
|
+ MemReentrancyGuard *reentrancy_guard)
|
|
{
|
|
QEMUBH *bh = g_new(QEMUBH, 1);
|
|
|
|
diff --git a/util/async.c b/util/async.c
|
|
index 2a63bf90f2..1fff02e7fc 100644
|
|
--- a/util/async.c
|
|
+++ b/util/async.c
|
|
@@ -62,6 +62,7 @@ struct QEMUBH {
|
|
void *opaque;
|
|
QSLIST_ENTRY(QEMUBH) next;
|
|
unsigned flags;
|
|
+ MemReentrancyGuard *reentrancy_guard;
|
|
};
|
|
|
|
/* Called concurrently from any thread */
|
|
@@ -127,7 +128,7 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
|
|
}
|
|
|
|
QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
|
|
- const char *name)
|
|
+ const char *name, MemReentrancyGuard *reentrancy_guard)
|
|
{
|
|
QEMUBH *bh;
|
|
bh = g_new(QEMUBH, 1);
|
|
@@ -136,13 +137,28 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
|
|
.cb = cb,
|
|
.opaque = opaque,
|
|
.name = name,
|
|
+ .reentrancy_guard = reentrancy_guard,
|
|
};
|
|
return bh;
|
|
}
|
|
|
|
void aio_bh_call(QEMUBH *bh)
|
|
{
|
|
+ bool last_engaged_in_io = false;
|
|
+
|
|
+ if (bh->reentrancy_guard) {
|
|
+ last_engaged_in_io = bh->reentrancy_guard->engaged_in_io;
|
|
+ if (bh->reentrancy_guard->engaged_in_io) {
|
|
+ trace_reentrant_aio(bh->ctx, bh->name);
|
|
+ }
|
|
+ bh->reentrancy_guard->engaged_in_io = true;
|
|
+ }
|
|
+
|
|
bh->cb(bh->opaque);
|
|
+
|
|
+ if (bh->reentrancy_guard) {
|
|
+ bh->reentrancy_guard->engaged_in_io = last_engaged_in_io;
|
|
+ }
|
|
}
|
|
|
|
/* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
|
|
diff --git a/util/main-loop.c b/util/main-loop.c
|
|
index 06b18b195c..1eacf04691 100644
|
|
--- a/util/main-loop.c
|
|
+++ b/util/main-loop.c
|
|
@@ -544,9 +544,11 @@ void main_loop_wait(int nonblocking)
|
|
|
|
/* Functions to operate on the main QEMU AioContext. */
|
|
|
|
-QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name)
|
|
+QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
|
|
+ MemReentrancyGuard *reentrancy_guard)
|
|
{
|
|
- return aio_bh_new_full(qemu_aio_context, cb, opaque, name);
|
|
+ return aio_bh_new_full(qemu_aio_context, cb, opaque, name,
|
|
+ reentrancy_guard);
|
|
}
|
|
|
|
/*
|
|
diff --git a/util/trace-events b/util/trace-events
|
|
index c8f53d7d9f..dc3b1eb3bf 100644
|
|
--- a/util/trace-events
|
|
+++ b/util/trace-events
|
|
@@ -11,6 +11,7 @@ poll_remove(void *ctx, void *node, int fd) "ctx %p node %p fd %d"
|
|
# async.c
|
|
aio_co_schedule(void *ctx, void *co) "ctx %p co %p"
|
|
aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p"
|
|
+reentrant_aio(void *ctx, const char *name) "ctx %p name %s"
|
|
|
|
# thread-pool.c
|
|
thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
|
|
--
|
|
2.37.3
|
|
|