163 lines
6.0 KiB
Diff
163 lines
6.0 KiB
Diff
From 02347869410fe53d814487501fb586f7dc614375 Mon Sep 17 00:00:00 2001
|
|
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
|
Date: Thu, 9 Mar 2023 08:24:36 -0500
|
|
Subject: [PATCH 06/12] qemu-thread-win32: cleanup, fix, document QemuEvent
|
|
|
|
RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
|
RH-MergeRequest: 158: qatomic: add smp_mb__before/after_rmw()
|
|
RH-Bugzilla: 2175660
|
|
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
|
|
RH-Acked-by: David Hildenbrand <david@redhat.com>
|
|
RH-Acked-by: Eric Auger <eric.auger@redhat.com>
|
|
RH-Commit: [3/9] d228e9d6a4a75dd1f0a23a6dceaf4fea23d69192 (eesposit/qemu-kvm)
|
|
|
|
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2175660
|
|
|
|
commit 6c5df4b48f0c52a61342ecb307a43f4c2a3565c4
|
|
Author: Paolo Bonzini <pbonzini@redhat.com>
|
|
Date: Thu Mar 2 11:22:50 2023 +0100
|
|
|
|
qemu-thread-win32: cleanup, fix, document QemuEvent
|
|
|
|
QemuEvent is currently broken on ARM due to missing memory barriers
|
|
after qatomic_*(). Apart from adding the memory barrier, a closer look
|
|
reveals some unpaired memory barriers that are not really needed and
|
|
complicated the functions unnecessarily. Also, it is relying on
|
|
a memory barrier in ResetEvent(); the barrier _ought_ to be there
|
|
but there is really no documentation about it, so make it explicit.
|
|
|
|
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
|
|
Reviewed-by: David Hildenbrand <david@redhat.com>
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
|
---
|
|
util/qemu-thread-win32.c | 82 +++++++++++++++++++++++++++-------------
|
|
1 file changed, 56 insertions(+), 26 deletions(-)
|
|
|
|
diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c
|
|
index 69db254ac7..a7fe3cc345 100644
|
|
--- a/util/qemu-thread-win32.c
|
|
+++ b/util/qemu-thread-win32.c
|
|
@@ -272,12 +272,20 @@ void qemu_event_destroy(QemuEvent *ev)
|
|
void qemu_event_set(QemuEvent *ev)
|
|
{
|
|
assert(ev->initialized);
|
|
- /* qemu_event_set has release semantics, but because it *loads*
|
|
+
|
|
+ /*
|
|
+ * Pairs with both qemu_event_reset() and qemu_event_wait().
|
|
+ *
|
|
+ * qemu_event_set has release semantics, but because it *loads*
|
|
* ev->value we need a full memory barrier here.
|
|
*/
|
|
smp_mb();
|
|
if (qatomic_read(&ev->value) != EV_SET) {
|
|
- if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
|
|
+ int old = qatomic_xchg(&ev->value, EV_SET);
|
|
+
|
|
+ /* Pairs with memory barrier after ResetEvent. */
|
|
+ smp_mb__after_rmw();
|
|
+ if (old == EV_BUSY) {
|
|
/* There were waiters, wake them up. */
|
|
SetEvent(ev->event);
|
|
}
|
|
@@ -286,17 +294,19 @@ void qemu_event_set(QemuEvent *ev)
|
|
|
|
void qemu_event_reset(QemuEvent *ev)
|
|
{
|
|
- unsigned value;
|
|
-
|
|
assert(ev->initialized);
|
|
- value = qatomic_read(&ev->value);
|
|
- smp_mb_acquire();
|
|
- if (value == EV_SET) {
|
|
- /* If there was a concurrent reset (or even reset+wait),
|
|
- * do nothing. Otherwise change EV_SET->EV_FREE.
|
|
- */
|
|
- qatomic_or(&ev->value, EV_FREE);
|
|
- }
|
|
+
|
|
+ /*
|
|
+ * If there was a concurrent reset (or even reset+wait),
|
|
+ * do nothing. Otherwise change EV_SET->EV_FREE.
|
|
+ */
|
|
+ qatomic_or(&ev->value, EV_FREE);
|
|
+
|
|
+ /*
|
|
+ * Order reset before checking the condition in the caller.
|
|
+ * Pairs with the first memory barrier in qemu_event_set().
|
|
+ */
|
|
+ smp_mb__after_rmw();
|
|
}
|
|
|
|
void qemu_event_wait(QemuEvent *ev)
|
|
@@ -304,29 +314,49 @@ void qemu_event_wait(QemuEvent *ev)
|
|
unsigned value;
|
|
|
|
assert(ev->initialized);
|
|
- value = qatomic_read(&ev->value);
|
|
- smp_mb_acquire();
|
|
+
|
|
+ /*
|
|
+ * qemu_event_wait must synchronize with qemu_event_set even if it does
|
|
+ * not go down the slow path, so this load-acquire is needed that
|
|
+ * synchronizes with the first memory barrier in qemu_event_set().
|
|
+ *
|
|
+ * If we do go down the slow path, there is no requirement at all: we
|
|
+ * might miss a qemu_event_set() here but ultimately the memory barrier in
|
|
+ * qemu_futex_wait() will ensure the check is done correctly.
|
|
+ */
|
|
+ value = qatomic_load_acquire(&ev->value);
|
|
if (value != EV_SET) {
|
|
if (value == EV_FREE) {
|
|
- /* qemu_event_set is not yet going to call SetEvent, but we are
|
|
- * going to do another check for EV_SET below when setting EV_BUSY.
|
|
- * At that point it is safe to call WaitForSingleObject.
|
|
+ /*
|
|
+ * Here the underlying kernel event is reset, but qemu_event_set is
|
|
+ * not yet going to call SetEvent. However, there will be another
|
|
+ * check for EV_SET below when setting EV_BUSY. At that point it
|
|
+ * is safe to call WaitForSingleObject.
|
|
*/
|
|
ResetEvent(ev->event);
|
|
|
|
- /* Tell qemu_event_set that there are waiters. No need to retry
|
|
- * because there cannot be a concurrent busy->free transition.
|
|
- * After the CAS, the event will be either set or busy.
|
|
+ /*
|
|
+ * It is not clear whether ResetEvent provides this barrier; kernel
|
|
+ * APIs (KeResetEvent/KeClearEvent) do not. Better safe than sorry!
|
|
+ */
|
|
+ smp_mb();
|
|
+
|
|
+ /*
|
|
+ * Leave the event reset and tell qemu_event_set that there are
|
|
+ * waiters. No need to retry, because there cannot be a concurrent
|
|
+ * busy->free transition. After the CAS, the event will be either
|
|
+ * set or busy.
|
|
*/
|
|
if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
|
|
- value = EV_SET;
|
|
- } else {
|
|
- value = EV_BUSY;
|
|
+ return;
|
|
}
|
|
}
|
|
- if (value == EV_BUSY) {
|
|
- WaitForSingleObject(ev->event, INFINITE);
|
|
- }
|
|
+
|
|
+ /*
|
|
+ * ev->value is now EV_BUSY. Since we didn't observe EV_SET,
|
|
+ * qemu_event_set() must observe EV_BUSY and call SetEvent().
|
|
+ */
|
|
+ WaitForSingleObject(ev->event, INFINITE);
|
|
}
|
|
}
|
|
|
|
--
|
|
2.39.1
|
|
|