fda7fbcd8d
- kvm-i386-Resolve-CPU-models-to-v1-by-default.patch [bz#1779078 bz#1787291 bz#1779078 bz#1779078] - kvm-iotests-Support-job-complete-in-run_job.patch [bz#1781637] - kvm-iotests-Create-VM.blockdev_create.patch [bz#1781637] - kvm-block-Activate-recursively-even-for-already-active-n.patch [bz#1781637] - kvm-hmp-Allow-using-qdev-ID-for-qemu-io-command.patch [bz#1781637] - kvm-iotests-Test-external-snapshot-with-VM-state.patch [bz#1781637] - kvm-iotests.py-Let-wait_migration-wait-even-more.patch [bz#1781637] - kvm-blockdev-fix-coding-style-issues-in-drive_backup_pre.patch [bz#1745606 bz#1746217 bz#1773517 bz#1779036 bz#1782111 bz#1782175 bz#1783965] - kvm-blockdev-unify-qmp_drive_backup-and-drive-backup-tra.patch [bz#1745606 bz#1746217 bz#1773517 bz#1779036 bz#1782111 bz#1782175 bz#1783965] - kvm-blockdev-unify-qmp_blockdev_backup-and-blockdev-back.patch [bz#1745606 bz#1746217 bz#1773517 bz#1779036 bz#1782111 bz#1782175 bz#1783965] - kvm-blockdev-honor-bdrv_try_set_aio_context-context-requ.patch [bz#1745606 bz#1746217 bz#1773517 bz#1779036 bz#1782111 bz#1782175 bz#1783965] - kvm-backup-top-Begin-drain-earlier.patch [bz#1745606 bz#1746217 bz#1773517 bz#1779036 bz#1782111 bz#1782175 bz#1783965] - kvm-block-backup-top-Don-t-acquire-context-while-droppin.patch [bz#1745606 bz#1746217 bz#1773517 bz#1779036 bz#1782111 bz#1782175 bz#1783965] - kvm-blockdev-Acquire-AioContext-on-dirty-bitmap-function.patch [bz#1745606 bz#1746217 bz#1773517 bz#1779036 bz#1782111 bz#1782175 bz#1783965] - kvm-blockdev-Return-bs-to-the-proper-context-on-snapshot.patch [bz#1745606 bz#1746217 bz#1773517 bz#1779036 bz#1782111 bz#1782175 bz#1783965] - kvm-iotests-Test-handling-of-AioContexts-with-some-block.patch [bz#1745606 bz#1746217 bz#1773517 bz#1779036 bz#1782111 bz#1782175 bz#1783965] - kvm-target-arm-monitor-query-cpu-model-expansion-crashed.patch [bz#1801320] - kvm-docs-arm-cpu-features-Make-kvm-no-adjvtime-comment-c.patch [bz#1801320] - Resolves: bz#1745606 (Qemu hang when do incremental live backup in transaction mode without bitmap) - Resolves: bz#1746217 (Src qemu hang when do storage vm migration during guest installation) - Resolves: bz#1773517 (Src qemu hang when do storage vm migration with dataplane enable) - Resolves: bz#1779036 (Qemu coredump when do snapshot in transaction mode with one snapshot path not exist) - Resolves: bz#1779078 (RHVH 4.4: Failed to run VM on 4.3/4.4 engine (Exit message: the CPU is incompatible with host CPU: Host CPU does not provide required features: hle, rtm)) - Resolves: bz#1781637 (qemu crashed when do mem and disk snapshot) - Resolves: bz#1782111 (Qemu hang when do full backup on multi-disks with one job's 'job-id' missed in transaction mode(data plane enable)) - Resolves: bz#1782175 (Qemu core dump when add persistent bitmap(data plane enable)) - Resolves: bz#1783965 (Qemu core dump when do backup with sync: bitmap and no bitmap provided) - Resolves: bz#1787291 (RHVH 4.4: Failed to run VM on 4.3/4.4 engine (Exit message: the CPU is incompatible with host CPU: Host CPU does not provide required features: hle, rtm) [rhel-8.1.0.z]) - Resolves: bz#1801320 (aarch64: backport query-cpu-model-expansion and adjvtime document fixes)
117 lines
4.2 KiB
Diff
117 lines
4.2 KiB
Diff
From 0ef6691ce8964bb2bbd677756c4e594793ca3ad8 Mon Sep 17 00:00:00 2001
|
|
From: Kevin Wolf <kwolf@redhat.com>
|
|
Date: Fri, 7 Feb 2020 11:24:01 +0000
|
|
Subject: [PATCH 04/18] block: Activate recursively even for already active
|
|
nodes
|
|
|
|
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
Message-id: <20200207112404.25198-4-kwolf@redhat.com>
|
|
Patchwork-id: 93749
|
|
O-Subject: [RHEL-AV-8.2.0 qemu-kvm PATCH v2 3/6] block: Activate recursively even for already active nodes
|
|
Bugzilla: 1781637
|
|
RH-Acked-by: Sergio Lopez Pascual <slp@redhat.com>
|
|
RH-Acked-by: Max Reitz <mreitz@redhat.com>
|
|
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
|
bdrv_invalidate_cache_all() assumes that all nodes in a given subtree
|
|
are either active or inactive when it starts. Therefore, as soon as it
|
|
arrives at an already active node, it stops.
|
|
|
|
However, this assumption is wrong. For example, it's possible to take a
|
|
snapshot of an inactive node, which results in an active overlay over an
|
|
inactive backing file. The active overlay is probably also the root node
|
|
of an inactive BlockBackend (blk->disable_perm == true).
|
|
|
|
In this case, bdrv_invalidate_cache_all() does not need to do anything
|
|
to activate the overlay node, but it still needs to recurse into the
|
|
children and the parents to make sure that after returning success,
|
|
really everything is activated.
|
|
|
|
Cc: qemu-stable@nongnu.org
|
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
Reviewed-by: Max Reitz <mreitz@redhat.com>
|
|
(cherry picked from commit 7bb4941ace471fc7dd6ded4749b95b9622baa6ed)
|
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
|
---
|
|
block.c | 50 ++++++++++++++++++++++++--------------------------
|
|
1 file changed, 24 insertions(+), 26 deletions(-)
|
|
|
|
diff --git a/block.c b/block.c
|
|
index 473eb6e..2e5e8b6 100644
|
|
--- a/block.c
|
|
+++ b/block.c
|
|
@@ -5335,10 +5335,6 @@ static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
|
|
return;
|
|
}
|
|
|
|
- if (!(bs->open_flags & BDRV_O_INACTIVE)) {
|
|
- return;
|
|
- }
|
|
-
|
|
QLIST_FOREACH(child, &bs->children, next) {
|
|
bdrv_co_invalidate_cache(child->bs, &local_err);
|
|
if (local_err) {
|
|
@@ -5360,34 +5356,36 @@ static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
|
|
* just keep the extended permissions for the next time that an activation
|
|
* of the image is tried.
|
|
*/
|
|
- bs->open_flags &= ~BDRV_O_INACTIVE;
|
|
- bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
|
|
- ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, NULL, &local_err);
|
|
- if (ret < 0) {
|
|
- bs->open_flags |= BDRV_O_INACTIVE;
|
|
- error_propagate(errp, local_err);
|
|
- return;
|
|
- }
|
|
- bdrv_set_perm(bs, perm, shared_perm);
|
|
-
|
|
- if (bs->drv->bdrv_co_invalidate_cache) {
|
|
- bs->drv->bdrv_co_invalidate_cache(bs, &local_err);
|
|
- if (local_err) {
|
|
+ if (bs->open_flags & BDRV_O_INACTIVE) {
|
|
+ bs->open_flags &= ~BDRV_O_INACTIVE;
|
|
+ bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
|
|
+ ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, NULL, &local_err);
|
|
+ if (ret < 0) {
|
|
bs->open_flags |= BDRV_O_INACTIVE;
|
|
error_propagate(errp, local_err);
|
|
return;
|
|
}
|
|
- }
|
|
+ bdrv_set_perm(bs, perm, shared_perm);
|
|
|
|
- FOR_EACH_DIRTY_BITMAP(bs, bm) {
|
|
- bdrv_dirty_bitmap_skip_store(bm, false);
|
|
- }
|
|
+ if (bs->drv->bdrv_co_invalidate_cache) {
|
|
+ bs->drv->bdrv_co_invalidate_cache(bs, &local_err);
|
|
+ if (local_err) {
|
|
+ bs->open_flags |= BDRV_O_INACTIVE;
|
|
+ error_propagate(errp, local_err);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
|
|
- ret = refresh_total_sectors(bs, bs->total_sectors);
|
|
- if (ret < 0) {
|
|
- bs->open_flags |= BDRV_O_INACTIVE;
|
|
- error_setg_errno(errp, -ret, "Could not refresh total sector count");
|
|
- return;
|
|
+ FOR_EACH_DIRTY_BITMAP(bs, bm) {
|
|
+ bdrv_dirty_bitmap_skip_store(bm, false);
|
|
+ }
|
|
+
|
|
+ ret = refresh_total_sectors(bs, bs->total_sectors);
|
|
+ if (ret < 0) {
|
|
+ bs->open_flags |= BDRV_O_INACTIVE;
|
|
+ error_setg_errno(errp, -ret, "Could not refresh total sector count");
|
|
+ return;
|
|
+ }
|
|
}
|
|
|
|
QLIST_FOREACH(parent, &bs->parents, next_parent) {
|
|
--
|
|
1.8.3.1
|
|
|