147 lines
5.3 KiB
Diff
147 lines
5.3 KiB
Diff
|
From d46ca52c3f42add549bd3790a41d06594821334e Mon Sep 17 00:00:00 2001
|
||
|
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||
|
Date: Thu, 9 Mar 2023 08:10:57 -0500
|
||
|
Subject: [PATCH 03/13] qemu-thread-posix: cleanup, fix, document QemuEvent
|
||
|
|
||
|
RH-Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||
|
RH-MergeRequest: 263: qatomic: add smp_mb__before/after_rmw()
|
||
|
RH-Bugzilla: 2168472
|
||
|
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
|
||
|
RH-Acked-by: Eric Auger <eric.auger@redhat.com>
|
||
|
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
||
|
RH-Acked-by: David Hildenbrand <david@redhat.com>
|
||
|
RH-Commit: [3/10] 746070c4d78c7f0a9ac4456d9aee69475acb8964
|
||
|
|
||
|
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2168472
|
||
|
|
||
|
commit 9586a1329f5dce6c1d7f4de53cf0536644d7e593
|
||
|
Author: Paolo Bonzini <pbonzini@redhat.com>
|
||
|
Date: Thu Mar 2 11:19:52 2023 +0100
|
||
|
|
||
|
qemu-thread-posix: cleanup, fix, document QemuEvent
|
||
|
|
||
|
QemuEvent is currently broken on ARM due to missing memory barriers
|
||
|
after qatomic_*(). Apart from adding the memory barrier, a closer look
|
||
|
reveals some unpaired memory barriers too. Document more clearly what
|
||
|
is going on.
|
||
|
|
||
|
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
|
||
|
Reviewed-by: David Hildenbrand <david@redhat.com>
|
||
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||
|
|
||
|
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||
|
---
|
||
|
util/qemu-thread-posix.c | 69 ++++++++++++++++++++++++++++------------
|
||
|
1 file changed, 49 insertions(+), 20 deletions(-)
|
||
|
|
||
|
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
|
||
|
index e1225b63bd..dd3b6d4670 100644
|
||
|
--- a/util/qemu-thread-posix.c
|
||
|
+++ b/util/qemu-thread-posix.c
|
||
|
@@ -430,13 +430,21 @@ void qemu_event_destroy(QemuEvent *ev)
|
||
|
|
||
|
void qemu_event_set(QemuEvent *ev)
|
||
|
{
|
||
|
- /* qemu_event_set has release semantics, but because it *loads*
|
||
|
+ assert(ev->initialized);
|
||
|
+
|
||
|
+ /*
|
||
|
+ * Pairs with both qemu_event_reset() and qemu_event_wait().
|
||
|
+ *
|
||
|
+ * qemu_event_set has release semantics, but because it *loads*
|
||
|
* ev->value we need a full memory barrier here.
|
||
|
*/
|
||
|
- assert(ev->initialized);
|
||
|
smp_mb();
|
||
|
if (qatomic_read(&ev->value) != EV_SET) {
|
||
|
- if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
|
||
|
+ int old = qatomic_xchg(&ev->value, EV_SET);
|
||
|
+
|
||
|
+ /* Pairs with memory barrier in kernel futex_wait system call. */
|
||
|
+ smp_mb__after_rmw();
|
||
|
+ if (old == EV_BUSY) {
|
||
|
/* There were waiters, wake them up. */
|
||
|
qemu_futex_wake(ev, INT_MAX);
|
||
|
}
|
||
|
@@ -445,18 +453,19 @@ void qemu_event_set(QemuEvent *ev)
|
||
|
|
||
|
void qemu_event_reset(QemuEvent *ev)
|
||
|
{
|
||
|
- unsigned value;
|
||
|
-
|
||
|
assert(ev->initialized);
|
||
|
- value = qatomic_read(&ev->value);
|
||
|
- smp_mb_acquire();
|
||
|
- if (value == EV_SET) {
|
||
|
- /*
|
||
|
- * If there was a concurrent reset (or even reset+wait),
|
||
|
- * do nothing. Otherwise change EV_SET->EV_FREE.
|
||
|
- */
|
||
|
- qatomic_or(&ev->value, EV_FREE);
|
||
|
- }
|
||
|
+
|
||
|
+ /*
|
||
|
+ * If there was a concurrent reset (or even reset+wait),
|
||
|
+ * do nothing. Otherwise change EV_SET->EV_FREE.
|
||
|
+ */
|
||
|
+ qatomic_or(&ev->value, EV_FREE);
|
||
|
+
|
||
|
+ /*
|
||
|
+ * Order reset before checking the condition in the caller.
|
||
|
+ * Pairs with the first memory barrier in qemu_event_set().
|
||
|
+ */
|
||
|
+ smp_mb__after_rmw();
|
||
|
}
|
||
|
|
||
|
void qemu_event_wait(QemuEvent *ev)
|
||
|
@@ -464,20 +473,40 @@ void qemu_event_wait(QemuEvent *ev)
|
||
|
unsigned value;
|
||
|
|
||
|
assert(ev->initialized);
|
||
|
- value = qatomic_read(&ev->value);
|
||
|
- smp_mb_acquire();
|
||
|
+
|
||
|
+ /*
|
||
|
+ * qemu_event_wait must synchronize with qemu_event_set even if it does
|
||
|
+ * not go down the slow path, so this load-acquire is needed that
|
||
|
+ * synchronizes with the first memory barrier in qemu_event_set().
|
||
|
+ *
|
||
|
+ * If we do go down the slow path, there is no requirement at all: we
|
||
|
+ * might miss a qemu_event_set() here but ultimately the memory barrier in
|
||
|
+ * qemu_futex_wait() will ensure the check is done correctly.
|
||
|
+ */
|
||
|
+ value = qatomic_load_acquire(&ev->value);
|
||
|
if (value != EV_SET) {
|
||
|
if (value == EV_FREE) {
|
||
|
/*
|
||
|
- * Leave the event reset and tell qemu_event_set that there
|
||
|
- * are waiters. No need to retry, because there cannot be
|
||
|
- * a concurrent busy->free transition. After the CAS, the
|
||
|
- * event will be either set or busy.
|
||
|
+ * Leave the event reset and tell qemu_event_set that there are
|
||
|
+ * waiters. No need to retry, because there cannot be a concurrent
|
||
|
+ * busy->free transition. After the CAS, the event will be either
|
||
|
+ * set or busy.
|
||
|
+ *
|
||
|
+ * This cmpxchg doesn't have particular ordering requirements if it
|
||
|
+ * succeeds (moving the store earlier can only cause qemu_event_set()
|
||
|
+ * to issue _more_ wakeups), the failing case needs acquire semantics
|
||
|
+ * like the load above.
|
||
|
*/
|
||
|
if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
|
||
|
return;
|
||
|
}
|
||
|
}
|
||
|
+
|
||
|
+ /*
|
||
|
+ * This is the final check for a concurrent set, so it does need
|
||
|
+ * a smp_mb() pairing with the second barrier of qemu_event_set().
|
||
|
+ * The barrier is inside the FUTEX_WAIT system call.
|
||
|
+ */
|
||
|
qemu_futex_wait(ev, EV_BUSY);
|
||
|
}
|
||
|
}
|
||
|
--
|
||
|
2.37.3
|
||
|
|