qemu-kvm/kvm-nbd-server-Use-drained-block-ops-to-quiesce-the-serv.patch
Miroslav Rezanina 0253757df8 * Mon Jun 28 2021 Miroslav Rezanina <mrezanin@redhat.com> - 6.0.0-7
- kvm-aarch64-rh-devices-add-CONFIG_PXB.patch [bz#1967502]
- kvm-virtio-gpu-handle-partial-maps-properly.patch [bz#1974795]
- kvm-x86-Add-x86-rhel8.5-machine-types.patch [bz#1957194]
- kvm-redhat-x86-Enable-kvm-asyncpf-int-by-default.patch [bz#1957194]
- kvm-block-backend-add-drained_poll.patch [bz#1957194]
- kvm-nbd-server-Use-drained-block-ops-to-quiesce-the-serv.patch [bz#1957194]
- kvm-disable-CONFIG_USB_STORAGE_BOT.patch [bz#1957194]
- kvm-doc-Fix-some-mistakes-in-the-SEV-documentation.patch [bz#1957194]
- kvm-docs-Add-SEV-ES-documentation-to-amd-memory-encrypti.patch [bz#1957194]
- kvm-docs-interop-firmware.json-Add-SEV-ES-support.patch [bz#1957194]
- kvm-qga-drop-StandardError-syslog.patch [bz#1947977]
- kvm-Remove-iscsi-support.patch [bz#1967133]
- Resolves: bz#1967502
  ([aarch64] [qemu] Compile the PCIe expander bridge)
- Resolves: bz#1974795
  ([RHEL9-beta] [aarch64] Launch guest with virtio-gpu-pci and virtual smmu causes "virtio_gpu_dequeue_ctrl_func" ERROR)
- Resolves: bz#1957194
  (Synchronize RHEL-AV 8.5.0 changes to RHEL 9.0.0 Beta)
- Resolves: bz#1947977
  (remove StandardError=syslog from qemu-guest-agent.service)
- Resolves: bz#1967133
  (QEMU: disable libiscsi in RHEL-9)
2021-06-28 03:28:59 -04:00

192 lines
6.4 KiB
Diff

From 9182af6a819e60a079349fd6d8b28a28adea90b1 Mon Sep 17 00:00:00 2001
From: Sergio Lopez Pascual <slp@redhat.com>
Date: Thu, 17 Jun 2021 09:13:21 -0400
Subject: [PATCH 06/12] nbd/server: Use drained block ops to quiesce the server
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
RH-Author: Miroslav Rezanina <mrezanin@redhat.com>
RH-MergeRequest: 16: Synchronize with RHEL-AV 8.5 release 21 to RHEL 9
RH-Commit: [4/8] ca32c99563254a8a31104948e41fa691453d0399 (mrezanin/centos-src-qemu-kvm)
RH-Bugzilla: 1957194
RH-Acked-by: Vitaly Kuznetsov <vkuznets@redhat.com>
RH-Acked-by: Daniel P. Berrangé <berrange@redhat.com>
Before switching between AioContexts we need to make sure that we're
fully quiesced ("nb_requests == 0" for every client) when entering the
drained section.
To do this, we set "quiescing = true" for every client on
".drained_begin" to prevent new coroutines from being created, and
check if "nb_requests == 0" on ".drained_poll". Finally, once we're
exiting the drained section, on ".drained_end" we set "quiescing =
false" and call "nbd_client_receive_next_request()" to resume the
processing of new requests.
With these changes, "blk_aio_attach()" and "blk_aio_detach()" can be
reverted to be as simple as they were before f148ae7d36.
RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1960137
Suggested-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Sergio Lopez <slp@redhat.com>
Message-Id: <20210602060552.17433-3-slp@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit fd6afc501a019682d1b8468b562355a2887087bd)
Signed-off-by: Sergio Lopez <slp@redhat.com>
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
---
nbd/server.c | 82 ++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 61 insertions(+), 21 deletions(-)
diff --git a/nbd/server.c b/nbd/server.c
index 86a44a9b41..b60ebc3ab6 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -1513,6 +1513,11 @@ static void nbd_request_put(NBDRequestData *req)
g_free(req);
client->nb_requests--;
+
+ if (client->quiescing && client->nb_requests == 0) {
+ aio_wait_kick();
+ }
+
nbd_client_receive_next_request(client);
nbd_client_put(client);
@@ -1530,49 +1535,68 @@ static void blk_aio_attached(AioContext *ctx, void *opaque)
QTAILQ_FOREACH(client, &exp->clients, next) {
qio_channel_attach_aio_context(client->ioc, ctx);
+ assert(client->nb_requests == 0);
assert(client->recv_coroutine == NULL);
assert(client->send_coroutine == NULL);
-
- if (client->quiescing) {
- client->quiescing = false;
- nbd_client_receive_next_request(client);
- }
}
}
-static void nbd_aio_detach_bh(void *opaque)
+static void blk_aio_detach(void *opaque)
{
NBDExport *exp = opaque;
NBDClient *client;
+ trace_nbd_blk_aio_detach(exp->name, exp->common.ctx);
+
QTAILQ_FOREACH(client, &exp->clients, next) {
qio_channel_detach_aio_context(client->ioc);
+ }
+
+ exp->common.ctx = NULL;
+}
+
+static void nbd_drained_begin(void *opaque)
+{
+ NBDExport *exp = opaque;
+ NBDClient *client;
+
+ QTAILQ_FOREACH(client, &exp->clients, next) {
client->quiescing = true;
+ }
+}
- if (client->recv_coroutine) {
- if (client->read_yielding) {
- qemu_aio_coroutine_enter(exp->common.ctx,
- client->recv_coroutine);
- } else {
- AIO_WAIT_WHILE(exp->common.ctx, client->recv_coroutine != NULL);
- }
- }
+static void nbd_drained_end(void *opaque)
+{
+ NBDExport *exp = opaque;
+ NBDClient *client;
- if (client->send_coroutine) {
- AIO_WAIT_WHILE(exp->common.ctx, client->send_coroutine != NULL);
- }
+ QTAILQ_FOREACH(client, &exp->clients, next) {
+ client->quiescing = false;
+ nbd_client_receive_next_request(client);
}
}
-static void blk_aio_detach(void *opaque)
+static bool nbd_drained_poll(void *opaque)
{
NBDExport *exp = opaque;
+ NBDClient *client;
- trace_nbd_blk_aio_detach(exp->name, exp->common.ctx);
+ QTAILQ_FOREACH(client, &exp->clients, next) {
+ if (client->nb_requests != 0) {
+ /*
+ * If there's a coroutine waiting for a request on nbd_read_eof()
+ * enter it here so we don't depend on the client to wake it up.
+ */
+ if (client->recv_coroutine != NULL && client->read_yielding) {
+ qemu_aio_coroutine_enter(exp->common.ctx,
+ client->recv_coroutine);
+ }
- aio_wait_bh_oneshot(exp->common.ctx, nbd_aio_detach_bh, exp);
+ return true;
+ }
+ }
- exp->common.ctx = NULL;
+ return false;
}
static void nbd_eject_notifier(Notifier *n, void *data)
@@ -1594,6 +1618,12 @@ void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk)
blk_add_remove_bs_notifier(blk, &nbd_exp->eject_notifier);
}
+static const BlockDevOps nbd_block_ops = {
+ .drained_begin = nbd_drained_begin,
+ .drained_end = nbd_drained_end,
+ .drained_poll = nbd_drained_poll,
+};
+
static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args,
Error **errp)
{
@@ -1715,8 +1745,17 @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args,
exp->allocation_depth = arg->allocation_depth;
+ /*
+ * We need to inhibit request queuing in the block layer to ensure we can
+ * be properly quiesced when entering a drained section, as our coroutines
+ * servicing pending requests might enter blk_pread().
+ */
+ blk_set_disable_request_queuing(blk, true);
+
blk_add_aio_context_notifier(blk, blk_aio_attached, blk_aio_detach, exp);
+ blk_set_dev_ops(blk, &nbd_block_ops, exp);
+
QTAILQ_INSERT_TAIL(&exports, exp, next);
return 0;
@@ -1788,6 +1827,7 @@ static void nbd_export_delete(BlockExport *blk_exp)
}
blk_remove_aio_context_notifier(exp->common.blk, blk_aio_attached,
blk_aio_detach, exp);
+ blk_set_disable_request_queuing(exp->common.blk, false);
}
for (i = 0; i < exp->nr_export_bitmaps; i++) {
--
2.27.0