187 lines
6.8 KiB
Diff
187 lines
6.8 KiB
Diff
From 59776df6fa3156a467e889ae00751da4bf6857eb Mon Sep 17 00:00:00 2001
|
|
From: Fam Zheng <famz@redhat.com>
|
|
Date: Tue, 9 Oct 2018 08:16:50 +0100
|
|
Subject: [PATCH 06/17] aio-posix: compute timeout before polling
|
|
|
|
RH-Author: Fam Zheng <famz@redhat.com>
|
|
Message-id: <20181009081651.15463-5-famz@redhat.com>
|
|
Patchwork-id: 82453
|
|
O-Subject: [RHEL8/rhel qemu-kvm PATCH 4/5] aio-posix: compute timeout before polling
|
|
Bugzilla: 1632622
|
|
RH-Acked-by: Thomas Huth <thuth@redhat.com>
|
|
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
|
|
|
|
From: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
BZ: 1632622
|
|
|
|
This is a preparation for the next patch, and also a very small
|
|
optimization. Compute the timeout only once, before invoking
|
|
try_poll_mode, and adjust it in run_poll_handlers. The adjustment
|
|
is the polling time when polling fails, or zero (non-blocking) if
|
|
polling succeeds.
|
|
|
|
Fixes: 70232b5253a3c4e03ed1ac47ef9246a8ac66c6fa
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Message-Id: <20180912171040.1732-3-pbonzini@redhat.com>
|
|
Reviewed-by: Fam Zheng <famz@redhat.com>
|
|
Signed-off-by: Fam Zheng <famz@redhat.com>
|
|
(cherry picked from commit e30cffa04d52e35996569f1cfac111be19576bde)
|
|
Signed-off-by: Fam Zheng <famz@redhat.com>
|
|
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
|
---
|
|
util/aio-posix.c | 59 ++++++++++++++++++++++++++++++++-----------------------
|
|
util/trace-events | 4 ++--
|
|
2 files changed, 36 insertions(+), 27 deletions(-)
|
|
|
|
diff --git a/util/aio-posix.c b/util/aio-posix.c
|
|
index 1d7cc53..1b17597 100644
|
|
--- a/util/aio-posix.c
|
|
+++ b/util/aio-posix.c
|
|
@@ -490,7 +490,7 @@ static void add_pollfd(AioHandler *node)
|
|
npfd++;
|
|
}
|
|
|
|
-static bool run_poll_handlers_once(AioContext *ctx)
|
|
+static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
|
|
{
|
|
bool progress = false;
|
|
AioHandler *node;
|
|
@@ -500,6 +500,7 @@ static bool run_poll_handlers_once(AioContext *ctx)
|
|
aio_node_check(ctx, node->is_external) &&
|
|
node->io_poll(node->opaque) &&
|
|
node->opaque != &ctx->notifier) {
|
|
+ *timeout = 0;
|
|
progress = true;
|
|
}
|
|
|
|
@@ -522,31 +523,38 @@ static bool run_poll_handlers_once(AioContext *ctx)
|
|
*
|
|
* Returns: true if progress was made, false otherwise
|
|
*/
|
|
-static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
|
|
+static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
|
|
{
|
|
bool progress;
|
|
- int64_t end_time;
|
|
+ int64_t start_time, elapsed_time;
|
|
|
|
assert(ctx->notify_me);
|
|
assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
|
|
|
|
- trace_run_poll_handlers_begin(ctx, max_ns);
|
|
-
|
|
- end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns;
|
|
+ trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
|
|
|
|
+ start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
|
do {
|
|
- progress = run_poll_handlers_once(ctx);
|
|
- } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time
|
|
+ progress = run_poll_handlers_once(ctx, timeout);
|
|
+ elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
|
|
+ } while (!progress && elapsed_time < max_ns
|
|
&& !atomic_read(&ctx->poll_disable_cnt));
|
|
|
|
- trace_run_poll_handlers_end(ctx, progress);
|
|
+ /* If time has passed with no successful polling, adjust *timeout to
|
|
+ * keep the same ending time.
|
|
+ */
|
|
+ if (*timeout != -1) {
|
|
+ *timeout -= MIN(*timeout, elapsed_time);
|
|
+ }
|
|
|
|
+ trace_run_poll_handlers_end(ctx, progress, *timeout);
|
|
return progress;
|
|
}
|
|
|
|
/* try_poll_mode:
|
|
* @ctx: the AioContext
|
|
- * @blocking: busy polling is only attempted when blocking is true
|
|
+ * @timeout: timeout for blocking wait, computed by the caller and updated if
|
|
+ * polling succeeds.
|
|
*
|
|
* ctx->notify_me must be non-zero so this function can detect aio_notify().
|
|
*
|
|
@@ -554,19 +562,16 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
|
|
*
|
|
* Returns: true if progress was made, false otherwise
|
|
*/
|
|
-static bool try_poll_mode(AioContext *ctx, bool blocking)
|
|
+static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
|
|
{
|
|
- if (blocking && ctx->poll_max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
|
|
- /* See qemu_soonest_timeout() uint64_t hack */
|
|
- int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
|
|
- (uint64_t)ctx->poll_ns);
|
|
+ /* See qemu_soonest_timeout() uint64_t hack */
|
|
+ int64_t max_ns = MIN((uint64_t)*timeout, (uint64_t)ctx->poll_ns);
|
|
|
|
- if (max_ns) {
|
|
- poll_set_started(ctx, true);
|
|
+ if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
|
|
+ poll_set_started(ctx, true);
|
|
|
|
- if (run_poll_handlers(ctx, max_ns)) {
|
|
- return true;
|
|
- }
|
|
+ if (run_poll_handlers(ctx, max_ns, timeout)) {
|
|
+ return true;
|
|
}
|
|
}
|
|
|
|
@@ -575,7 +580,7 @@ static bool try_poll_mode(AioContext *ctx, bool blocking)
|
|
/* Even if we don't run busy polling, try polling once in case it can make
|
|
* progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
|
|
*/
|
|
- return run_poll_handlers_once(ctx);
|
|
+ return run_poll_handlers_once(ctx, timeout);
|
|
}
|
|
|
|
bool aio_poll(AioContext *ctx, bool blocking)
|
|
@@ -605,8 +610,14 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
|
}
|
|
|
|
- progress = try_poll_mode(ctx, blocking);
|
|
- if (!progress) {
|
|
+ timeout = blocking ? aio_compute_timeout(ctx) : 0;
|
|
+ progress = try_poll_mode(ctx, &timeout);
|
|
+ assert(!(timeout && progress));
|
|
+
|
|
+ /* If polling is allowed, non-blocking aio_poll does not need the
|
|
+ * system call---a single round of run_poll_handlers_once suffices.
|
|
+ */
|
|
+ if (timeout || atomic_read(&ctx->poll_disable_cnt)) {
|
|
assert(npfd == 0);
|
|
|
|
/* fill pollfds */
|
|
@@ -620,8 +631,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|
}
|
|
}
|
|
|
|
- timeout = blocking ? aio_compute_timeout(ctx) : 0;
|
|
-
|
|
/* wait until next event */
|
|
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
|
|
AioHandler epoll_handler;
|
|
diff --git a/util/trace-events b/util/trace-events
|
|
index 4822434..79569b7 100644
|
|
--- a/util/trace-events
|
|
+++ b/util/trace-events
|
|
@@ -1,8 +1,8 @@
|
|
# See docs/devel/tracing.txt for syntax documentation.
|
|
|
|
# util/aio-posix.c
|
|
-run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
|
|
-run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
|
|
+run_poll_handlers_begin(void *ctx, int64_t max_ns, int64_t timeout) "ctx %p max_ns %"PRId64 " timeout %"PRId64
|
|
+run_poll_handlers_end(void *ctx, bool progress, int64_t timeout) "ctx %p progress %d new timeout %"PRId64
|
|
poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
|
|
poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
|
|
|
|
--
|
|
1.8.3.1
|
|
|