214 lines
7.0 KiB
Diff
214 lines
7.0 KiB
Diff
From 3f16b8a33bd7503cbe857fbeb45fff7301b6bb5f Mon Sep 17 00:00:00 2001
|
|
From: Kevin Wolf <kwolf@redhat.com>
|
|
Date: Wed, 8 Apr 2020 17:29:12 +0100
|
|
Subject: [PATCH 1/6] job: take each job's lock individually in job_txn_apply
|
|
|
|
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
Message-id: <20200408172917.18712-2-kwolf@redhat.com>
|
|
Patchwork-id: 94597
|
|
O-Subject: [RHEL-AV-8.2.0 qemu-kvm PATCH 1/6] job: take each job's lock individually in job_txn_apply
|
|
Bugzilla: 1817621
|
|
RH-Acked-by: Eric Blake <eblake@redhat.com>
|
|
RH-Acked-by: Danilo de Paula <ddepaula@redhat.com>
|
|
RH-Acked-by: Max Reitz <mreitz@redhat.com>
|
|
|
|
From: Stefan Reiter <s.reiter@proxmox.com>
|
|
|
|
All callers of job_txn_apply hold a single job's lock, but different
|
|
jobs within a transaction can have different contexts, thus we need to
|
|
lock each one individually before applying the callback function.
|
|
|
|
Similar to job_completed_txn_abort this also requires releasing the
|
|
caller's context before and reacquiring it after to avoid recursive
|
|
locks which might break AIO_WAIT_WHILE in the callback. This is safe, since
|
|
existing code would already have to take this into account, lest
|
|
job_completed_txn_abort might have broken.
|
|
|
|
This also brings to light a different issue: When a callback function in
|
|
job_txn_apply moves it's job to a different AIO context, callers will
|
|
try to release the wrong lock (now that we re-acquire the lock
|
|
correctly, previously it would just continue with the old lock, leaving
|
|
the job unlocked for the rest of the return path). Fix this by not caching
|
|
the job's context.
|
|
|
|
This is only necessary for qmp_block_job_finalize, qmp_job_finalize and
|
|
job_exit, since everyone else calls through job_exit.
|
|
|
|
One test needed adapting, since it calls job_finalize directly, so it
|
|
manually needs to acquire the correct context.
|
|
|
|
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
|
|
Message-Id: <20200407115651.69472-2-s.reiter@proxmox.com>
|
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
(cherry picked from commit b660a84bbb0eb1a76b505648d31d5e82594fb75e)
|
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
|
---
|
|
blockdev.c | 9 +++++++++
|
|
job-qmp.c | 9 +++++++++
|
|
job.c | 50 ++++++++++++++++++++++++++++++++++++++++----------
|
|
tests/test-blockjob.c | 2 ++
|
|
4 files changed, 60 insertions(+), 10 deletions(-)
|
|
|
|
diff --git a/blockdev.c b/blockdev.c
|
|
index c8d4b51..86eb115 100644
|
|
--- a/blockdev.c
|
|
+++ b/blockdev.c
|
|
@@ -4215,7 +4215,16 @@ void qmp_block_job_finalize(const char *id, Error **errp)
|
|
}
|
|
|
|
trace_qmp_block_job_finalize(job);
|
|
+ job_ref(&job->job);
|
|
job_finalize(&job->job, errp);
|
|
+
|
|
+ /*
|
|
+ * Job's context might have changed via job_finalize (and job_txn_apply
|
|
+ * automatically acquires the new one), so make sure we release the correct
|
|
+ * one.
|
|
+ */
|
|
+ aio_context = blk_get_aio_context(job->blk);
|
|
+ job_unref(&job->job);
|
|
aio_context_release(aio_context);
|
|
}
|
|
|
|
diff --git a/job-qmp.c b/job-qmp.c
|
|
index fbfed25..a201220 100644
|
|
--- a/job-qmp.c
|
|
+++ b/job-qmp.c
|
|
@@ -114,7 +114,16 @@ void qmp_job_finalize(const char *id, Error **errp)
|
|
}
|
|
|
|
trace_qmp_job_finalize(job);
|
|
+ job_ref(job);
|
|
job_finalize(job, errp);
|
|
+
|
|
+ /*
|
|
+ * Job's context might have changed via job_finalize (and job_txn_apply
|
|
+ * automatically acquires the new one), so make sure we release the correct
|
|
+ * one.
|
|
+ */
|
|
+ aio_context = job->aio_context;
|
|
+ job_unref(job);
|
|
aio_context_release(aio_context);
|
|
}
|
|
|
|
diff --git a/job.c b/job.c
|
|
index 04409b4..48fc4ad 100644
|
|
--- a/job.c
|
|
+++ b/job.c
|
|
@@ -136,17 +136,38 @@ static void job_txn_del_job(Job *job)
|
|
}
|
|
}
|
|
|
|
-static int job_txn_apply(JobTxn *txn, int fn(Job *))
|
|
+static int job_txn_apply(Job *job, int fn(Job *))
|
|
{
|
|
- Job *job, *next;
|
|
+ AioContext *inner_ctx;
|
|
+ Job *other_job, *next;
|
|
+ JobTxn *txn = job->txn;
|
|
int rc = 0;
|
|
|
|
- QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
|
|
- rc = fn(job);
|
|
+ /*
|
|
+ * Similar to job_completed_txn_abort, we take each job's lock before
|
|
+ * applying fn, but since we assume that outer_ctx is held by the caller,
|
|
+ * we need to release it here to avoid holding the lock twice - which would
|
|
+ * break AIO_WAIT_WHILE from within fn.
|
|
+ */
|
|
+ job_ref(job);
|
|
+ aio_context_release(job->aio_context);
|
|
+
|
|
+ QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
|
|
+ inner_ctx = other_job->aio_context;
|
|
+ aio_context_acquire(inner_ctx);
|
|
+ rc = fn(other_job);
|
|
+ aio_context_release(inner_ctx);
|
|
if (rc) {
|
|
break;
|
|
}
|
|
}
|
|
+
|
|
+ /*
|
|
+ * Note that job->aio_context might have been changed by calling fn, so we
|
|
+ * can't use a local variable to cache it.
|
|
+ */
|
|
+ aio_context_acquire(job->aio_context);
|
|
+ job_unref(job);
|
|
return rc;
|
|
}
|
|
|
|
@@ -774,11 +795,11 @@ static void job_do_finalize(Job *job)
|
|
assert(job && job->txn);
|
|
|
|
/* prepare the transaction to complete */
|
|
- rc = job_txn_apply(job->txn, job_prepare);
|
|
+ rc = job_txn_apply(job, job_prepare);
|
|
if (rc) {
|
|
job_completed_txn_abort(job);
|
|
} else {
|
|
- job_txn_apply(job->txn, job_finalize_single);
|
|
+ job_txn_apply(job, job_finalize_single);
|
|
}
|
|
}
|
|
|
|
@@ -824,10 +845,10 @@ static void job_completed_txn_success(Job *job)
|
|
assert(other_job->ret == 0);
|
|
}
|
|
|
|
- job_txn_apply(txn, job_transition_to_pending);
|
|
+ job_txn_apply(job, job_transition_to_pending);
|
|
|
|
/* If no jobs need manual finalization, automatically do so */
|
|
- if (job_txn_apply(txn, job_needs_finalize) == 0) {
|
|
+ if (job_txn_apply(job, job_needs_finalize) == 0) {
|
|
job_do_finalize(job);
|
|
}
|
|
}
|
|
@@ -849,9 +870,10 @@ static void job_completed(Job *job)
|
|
static void job_exit(void *opaque)
|
|
{
|
|
Job *job = (Job *)opaque;
|
|
- AioContext *ctx = job->aio_context;
|
|
+ AioContext *ctx;
|
|
|
|
- aio_context_acquire(ctx);
|
|
+ job_ref(job);
|
|
+ aio_context_acquire(job->aio_context);
|
|
|
|
/* This is a lie, we're not quiescent, but still doing the completion
|
|
* callbacks. However, completion callbacks tend to involve operations that
|
|
@@ -862,6 +884,14 @@ static void job_exit(void *opaque)
|
|
|
|
job_completed(job);
|
|
|
|
+ /*
|
|
+ * Note that calling job_completed can move the job to a different
|
|
+ * aio_context, so we cannot cache from above. job_txn_apply takes care of
|
|
+ * acquiring the new lock, and we ref/unref to avoid job_completed freeing
|
|
+ * the job underneath us.
|
|
+ */
|
|
+ ctx = job->aio_context;
|
|
+ job_unref(job);
|
|
aio_context_release(ctx);
|
|
}
|
|
|
|
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
|
|
index 7844c9f..6d857fd 100644
|
|
--- a/tests/test-blockjob.c
|
|
+++ b/tests/test-blockjob.c
|
|
@@ -368,7 +368,9 @@ static void test_cancel_concluded(void)
|
|
aio_poll(qemu_get_aio_context(), true);
|
|
assert(job->status == JOB_STATUS_PENDING);
|
|
|
|
+ aio_context_acquire(job->aio_context);
|
|
job_finalize(job, &error_abort);
|
|
+ aio_context_release(job->aio_context);
|
|
assert(job->status == JOB_STATUS_CONCLUDED);
|
|
|
|
cancel_common(s);
|
|
--
|
|
1.8.3.1
|
|
|