65 lines
2.5 KiB
Diff
65 lines
2.5 KiB
Diff
|
From 30bdfc5373eab96cb1f3d62ab90b07becd885272 Mon Sep 17 00:00:00 2001
|
||
|
From: Kevin Wolf <kwolf@redhat.com>
|
||
|
Date: Wed, 10 Oct 2018 20:22:07 +0100
|
||
|
Subject: [PATCH 41/49] block: Remove aio_poll() in bdrv_drain_poll variants
|
||
|
|
||
|
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
||
|
Message-id: <20181010202213.7372-29-kwolf@redhat.com>
|
||
|
Patchwork-id: 82619
|
||
|
O-Subject: [RHEL-8 qemu-kvm PATCH 38/44] block: Remove aio_poll() in bdrv_drain_poll variants
|
||
|
Bugzilla: 1637976
|
||
|
RH-Acked-by: Max Reitz <mreitz@redhat.com>
|
||
|
RH-Acked-by: John Snow <jsnow@redhat.com>
|
||
|
RH-Acked-by: Thomas Huth <thuth@redhat.com>
|
||
|
|
||
|
bdrv_drain_poll_top_level() was buggy because it didn't release the
|
||
|
AioContext lock of the node to be drained before calling aio_poll().
|
||
|
This way, callbacks called by aio_poll() would possibly take the lock a
|
||
|
second time and run into a deadlock with a nested AIO_WAIT_WHILE() call.
|
||
|
|
||
|
However, it turns out that the aio_poll() call isn't actually needed any
|
||
|
more. It was introduced in commit 91af091f923, which is effectively
|
||
|
reverted by this patch. The cases it was supposed to fix are now covered
|
||
|
by bdrv_drain_poll(), which waits for block jobs to reach a quiescent
|
||
|
state.
|
||
|
|
||
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
||
|
Reviewed-by: Fam Zheng <famz@redhat.com>
|
||
|
Reviewed-by: Max Reitz <mreitz@redhat.com>
|
||
|
(cherry picked from commit 4cf077b59fc73eec29f8b7d082919dbb278bdc86)
|
||
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
||
|
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
||
|
---
|
||
|
block/io.c | 8 --------
|
||
|
1 file changed, 8 deletions(-)
|
||
|
|
||
|
diff --git a/block/io.c b/block/io.c
|
||
|
index 19db35e..3313958 100644
|
||
|
--- a/block/io.c
|
||
|
+++ b/block/io.c
|
||
|
@@ -266,10 +266,6 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
|
||
|
static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
|
||
|
BdrvChild *ignore_parent)
|
||
|
{
|
||
|
- /* Execute pending BHs first and check everything else only after the BHs
|
||
|
- * have executed. */
|
||
|
- while (aio_poll(bs->aio_context, false));
|
||
|
-
|
||
|
return bdrv_drain_poll(bs, recursive, ignore_parent, false);
|
||
|
}
|
||
|
|
||
|
@@ -509,10 +505,6 @@ static bool bdrv_drain_all_poll(void)
|
||
|
BlockDriverState *bs = NULL;
|
||
|
bool result = false;
|
||
|
|
||
|
- /* Execute pending BHs first (may modify the graph) and check everything
|
||
|
- * else only after the BHs have executed. */
|
||
|
- while (aio_poll(qemu_get_aio_context(), false));
|
||
|
-
|
||
|
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
|
||
|
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
|
||
|
while ((bs = bdrv_next_all_states(bs))) {
|
||
|
--
|
||
|
1.8.3.1
|
||
|
|