9ef7cdf7ca
- kvm-hw-arm-virt-Add-properties-to-disable-high-memory-re.patch [RHEL-19738] - kvm-vfio-Introduce-base-object-for-VFIOContainer-and-tar.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Introduce-a-empty-VFIOIOMMUOps.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Switch-to-dma_map-unmap-API.patch [RHEL-19302 RHEL-21057] - kvm-vfio-common-Introduce-vfio_container_init-destroy-he.patch [RHEL-19302 RHEL-21057] - kvm-vfio-common-Move-giommu_list-in-base-container.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Move-space-field-to-base-container.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Switch-to-IOMMU-BE-set_dirty_page_tra.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Move-per-container-device-list-in-bas.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Convert-functions-to-base-container.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Move-pgsizes-and-dma_max_mappings-to-.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Move-vrdl_list-to-base-container.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Move-listener-to-base-container.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Move-dirty_pgsizes-and-max_dirty_bitm.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Move-iova_ranges-to-base-container.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Implement-attach-detach_device.patch [RHEL-19302 RHEL-21057] - kvm-vfio-spapr-Introduce-spapr-backend-and-target-interf.patch [RHEL-19302 RHEL-21057] - kvm-vfio-spapr-switch-to-spapr-IOMMU-BE-add-del_section_.patch [RHEL-19302 RHEL-21057] - kvm-vfio-spapr-Move-prereg_listener-into-spapr-container.patch [RHEL-19302 RHEL-21057] - kvm-vfio-spapr-Move-hostwin_list-into-spapr-container.patch [RHEL-19302 RHEL-21057] - kvm-backends-iommufd-Introduce-the-iommufd-object.patch [RHEL-19302 RHEL-21057] - kvm-util-char_dev-Add-open_cdev.patch [RHEL-19302 RHEL-21057] - kvm-vfio-common-return-early-if-space-isn-t-empty.patch [RHEL-19302 RHEL-21057] - kvm-vfio-iommufd-Implement-the-iommufd-backend.patch [RHEL-19302 RHEL-21057] - kvm-vfio-iommufd-Relax-assert-check-for-iommufd-backend.patch [RHEL-19302 RHEL-21057] - kvm-vfio-iommufd-Add-support-for-iova_ranges-and-pgsizes.patch [RHEL-19302 RHEL-21057] - kvm-vfio-pci-Extract-out-a-helper-vfio_pci_get_pci_hot_r.patch [RHEL-19302 RHEL-21057] - kvm-vfio-pci-Introduce-a-vfio-pci-hot-reset-interface.patch [RHEL-19302 RHEL-21057] - kvm-vfio-iommufd-Enable-pci-hot-reset-through-iommufd-cd.patch [RHEL-19302 RHEL-21057] - kvm-vfio-pci-Allow-the-selection-of-a-given-iommu-backen.patch [RHEL-19302 RHEL-21057] - kvm-vfio-pci-Make-vfio-cdev-pre-openable-by-passing-a-fi.patch [RHEL-19302 RHEL-21057] - kvm-vfio-platform-Allow-the-selection-of-a-given-iommu-b.patch [RHEL-19302 RHEL-21057] - kvm-vfio-platform-Make-vfio-cdev-pre-openable-by-passing.patch [RHEL-19302 RHEL-21057] - kvm-vfio-ap-Allow-the-selection-of-a-given-iommu-backend.patch [RHEL-19302 RHEL-21057] - kvm-vfio-ap-Make-vfio-cdev-pre-openable-by-passing-a-fil.patch [RHEL-19302 RHEL-21057] - kvm-vfio-ccw-Allow-the-selection-of-a-given-iommu-backen.patch [RHEL-19302 RHEL-21057] - kvm-vfio-ccw-Make-vfio-cdev-pre-openable-by-passing-a-fi.patch [RHEL-19302 RHEL-21057] - kvm-vfio-Make-VFIOContainerBase-poiner-parameter-const-i.patch [RHEL-19302 RHEL-21057] - kvm-hw-arm-Activate-IOMMUFD-for-virt-machines.patch [RHEL-19302 RHEL-21057] - kvm-kconfig-Activate-IOMMUFD-for-s390x-machines.patch [RHEL-19302 RHEL-21057] - kvm-hw-i386-Activate-IOMMUFD-for-q35-machines.patch [RHEL-19302 RHEL-21057] - kvm-vfio-pci-Move-VFIODevice-initializations-in-vfio_ins.patch [RHEL-19302 RHEL-21057] - kvm-vfio-platform-Move-VFIODevice-initializations-in-vfi.patch [RHEL-19302 RHEL-21057] - kvm-vfio-ap-Move-VFIODevice-initializations-in-vfio_ap_i.patch [RHEL-19302 RHEL-21057] - kvm-vfio-ccw-Move-VFIODevice-initializations-in-vfio_ccw.patch [RHEL-19302 RHEL-21057] - kvm-vfio-Introduce-a-helper-function-to-initialize-VFIOD.patch [RHEL-19302 RHEL-21057] - kvm-docs-devel-Add-VFIO-iommufd-backend-documentation.patch [RHEL-19302 RHEL-21057] - kvm-hw-ppc-Kconfig-Imply-VFIO_PCI.patch [RHEL-19302 RHEL-21057] - kvm-vfio-spapr-Extend-VFIOIOMMUOps-with-a-release-handle.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Introduce-vfio_legacy_setup-for-furth.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Initialize-VFIOIOMMUOps-under-vfio_in.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Introduce-a-VFIOIOMMU-QOM-interface.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Introduce-a-VFIOIOMMU-legacy-QOM-inte.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Intoduce-a-new-VFIOIOMMUClass-setup-h.patch [RHEL-19302 RHEL-21057] - kvm-vfio-spapr-Introduce-a-sPAPR-VFIOIOMMU-QOM-interface.patch [RHEL-19302 RHEL-21057] - kvm-vfio-iommufd-Introduce-a-VFIOIOMMU-iommufd-QOM-inter.patch [RHEL-19302 RHEL-21057] - kvm-vfio-spapr-Only-compile-sPAPR-IOMMU-support-when-nee.patch [RHEL-19302 RHEL-21057] - kvm-vfio-iommufd-Remove-CONFIG_IOMMUFD-usage.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Replace-basename-with-g_path_get_base.patch [RHEL-19302 RHEL-21057] - kvm-hw-vfio-fix-iteration-over-global-VFIODevice-list.patch [RHEL-19302 RHEL-21057] - kvm-vfio-iommufd-Remove-the-use-of-stat-to-check-file-ex.patch [RHEL-19302 RHEL-21057] - kvm-vfio-container-Rename-vfio_init_container-to-vfio_se.patch [RHEL-19302 RHEL-21057] - kvm-vfio-migration-Add-helper-function-to-set-state-or-r.patch [RHEL-19302 RHEL-21057] - kvm-backends-iommufd-Remove-check-on-number-of-backend-u.patch [RHEL-19302 RHEL-21057] - kvm-backends-iommufd-Remove-mutex.patch [RHEL-19302 RHEL-21057] - kvm-Compile-IOMMUFD-object-on-aarch64.patch [RHEL-19302 RHEL-21057] - kvm-Compile-IOMMUFD-on-s390x.patch [RHEL-19302 RHEL-21057] - kvm-Compile-IOMMUFD-on-x86_64.patch [RHEL-19302 RHEL-21057] - kvm-target-s390x-kvm-pv-Provide-some-more-useful-informa.patch [RHEL-18212] - kvm-nbd-server-avoid-per-NBDRequest-nbd_client_get-put.patch [RHEL-15965] - kvm-nbd-server-only-traverse-NBDExport-clients-from-main.patch [RHEL-15965] - kvm-nbd-server-introduce-NBDClient-lock-to-protect-field.patch [RHEL-15965] - kvm-block-file-posix-set-up-Linux-AIO-and-io_uring-in-th.patch [RHEL-15965] - kvm-virtio-blk-add-lock-to-protect-s-rq.patch [RHEL-15965] - kvm-virtio-blk-don-t-lock-AioContext-in-the-completion-c.patch [RHEL-15965] - kvm-virtio-blk-don-t-lock-AioContext-in-the-submission-c.patch [RHEL-15965] - kvm-scsi-only-access-SCSIDevice-requests-from-one-thread.patch [RHEL-15965] - kvm-virtio-scsi-don-t-lock-AioContext-around-virtio_queu.patch [RHEL-15965] - kvm-scsi-don-t-lock-AioContext-in-I-O-code-path.patch [RHEL-15965] - kvm-dma-helpers-don-t-lock-AioContext-in-dma_blk_cb.patch [RHEL-15965] - kvm-virtio-scsi-replace-AioContext-lock-with-tmf_bh_lock.patch [RHEL-15965] - kvm-scsi-assert-that-callbacks-run-in-the-correct-AioCon.patch [RHEL-15965] - kvm-tests-remove-aio_context_acquire-tests.patch [RHEL-15965] - kvm-aio-make-aio_context_acquire-aio_context_release-a-n.patch [RHEL-15965] - kvm-graph-lock-remove-AioContext-locking.patch [RHEL-15965] - kvm-block-remove-AioContext-locking.patch [RHEL-15965] - kvm-block-remove-bdrv_co_lock.patch [RHEL-15965] - kvm-scsi-remove-AioContext-locking.patch [RHEL-15965] - kvm-aio-wait-draw-equivalence-between-AIO_WAIT_WHILE-and.patch [RHEL-15965] - kvm-aio-remove-aio_context_acquire-aio_context_release-A.patch [RHEL-15965] - kvm-docs-remove-AioContext-lock-from-IOThread-docs.patch [RHEL-15965] - kvm-scsi-remove-outdated-AioContext-lock-comment.patch [RHEL-15965] - kvm-job-remove-outdated-AioContext-locking-comments.patch [RHEL-15965] - kvm-block-remove-outdated-AioContext-locking-comments.patch [RHEL-15965] - kvm-block-coroutine-wrapper-use-qemu_get_current_aio_con.patch [RHEL-15965] - kvm-s390x-pci-avoid-double-enable-disable-of-aif.patch [RHEL-21169] - kvm-s390x-pci-refresh-fh-before-disabling-aif.patch [RHEL-21169] - kvm-s390x-pci-drive-ISM-reset-from-subsystem-reset.patch [RHEL-21169] - kvm-include-ui-rect.h-fix-qemu_rect_init-mis-assignment.patch [RHEL-21570] - kvm-virtio-gpu-block-migration-of-VMs-with-blob-true.patch [RHEL-7565] - kvm-spec-Enable-zstd.patch [RHEL-7361] - Resolves: RHEL-19738 (Enable properties allowing to disable high memory regions) - Resolves: RHEL-19302 (NVIDIA:Grace-Hopper Backport QEMU IOMMUFD Backend) - Resolves: RHEL-21057 (Request backport of 9353b6da430f90e47f352dbf6dc31120c8914da6) - Resolves: RHEL-18212 ([RHEL9][Secure-execution][s390x] The error message is not clear when boot up a SE guest with wrong encryption) - Resolves: RHEL-15965 ( [qemu-kvm] Remove AioContext lock (no response with QMP command block_resize)) - Resolves: RHEL-21169 ([s390x] VM fails to start with ISM passed through QEMU 8.2) - Resolves: RHEL-21570 (Critical performance degradation for input devices in virtio vnc session) - Resolves: RHEL-7565 (qemu crashed when migrate guest with blob resources enabled) - Resolves: RHEL-7361 ([qemu-kvm] Enable zstd support for qcow2 files)
374 lines
11 KiB
Diff
374 lines
11 KiB
Diff
From bb0a6afff7f23a3ddb460dc1b2e70c06565f8a3f Mon Sep 17 00:00:00 2001
|
|
From: Stefan Hajnoczi <stefanha@redhat.com>
|
|
Date: Thu, 21 Dec 2023 14:24:52 -0500
|
|
Subject: [PATCH 072/101] nbd/server: introduce NBDClient->lock to protect
|
|
fields
|
|
|
|
RH-Author: Kevin Wolf <kwolf@redhat.com>
|
|
RH-MergeRequest: 214: Remove AioContext lock
|
|
RH-Jira: RHEL-15965
|
|
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
RH-Commit: [3/26] 49b64adaaf8b1c30f339d1ecc8ea89fb9db63f1c (kmwolf/centos-qemu-kvm)
|
|
|
|
NBDClient has a number of fields that are accessed by both the export
|
|
AioContext and the main loop thread. When the AioContext lock is removed
|
|
these fields will need another form of protection.
|
|
|
|
Add NBDClient->lock and protect fields that are accessed by both
|
|
threads. Also add assertions where possible and otherwise add doc
|
|
comments stating assumptions about which thread and lock holding.
|
|
|
|
Note this patch moves the client->recv_coroutine assertion from
|
|
nbd_co_receive_request() to nbd_trip() where client->lock is held.
|
|
|
|
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
Message-ID: <20231221192452.1785567-7-stefanha@redhat.com>
|
|
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
|
|
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
---
|
|
nbd/server.c | 144 +++++++++++++++++++++++++++++++++++++++------------
|
|
1 file changed, 111 insertions(+), 33 deletions(-)
|
|
|
|
diff --git a/nbd/server.c b/nbd/server.c
|
|
index e91e2e0903..941832f178 100644
|
|
--- a/nbd/server.c
|
|
+++ b/nbd/server.c
|
|
@@ -125,23 +125,25 @@ struct NBDClient {
|
|
int refcount; /* atomic */
|
|
void (*close_fn)(NBDClient *client, bool negotiated);
|
|
|
|
+ QemuMutex lock;
|
|
+
|
|
NBDExport *exp;
|
|
QCryptoTLSCreds *tlscreds;
|
|
char *tlsauthz;
|
|
QIOChannelSocket *sioc; /* The underlying data channel */
|
|
QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
|
|
|
|
- Coroutine *recv_coroutine;
|
|
+ Coroutine *recv_coroutine; /* protected by lock */
|
|
|
|
CoMutex send_lock;
|
|
Coroutine *send_coroutine;
|
|
|
|
- bool read_yielding;
|
|
- bool quiescing;
|
|
+ bool read_yielding; /* protected by lock */
|
|
+ bool quiescing; /* protected by lock */
|
|
|
|
QTAILQ_ENTRY(NBDClient) next;
|
|
- int nb_requests;
|
|
- bool closing;
|
|
+ int nb_requests; /* protected by lock */
|
|
+ bool closing; /* protected by lock */
|
|
|
|
uint32_t check_align; /* If non-zero, check for aligned client requests */
|
|
|
|
@@ -1415,11 +1417,18 @@ nbd_read_eof(NBDClient *client, void *buffer, size_t size, Error **errp)
|
|
|
|
len = qio_channel_readv(client->ioc, &iov, 1, errp);
|
|
if (len == QIO_CHANNEL_ERR_BLOCK) {
|
|
- client->read_yielding = true;
|
|
+ WITH_QEMU_LOCK_GUARD(&client->lock) {
|
|
+ client->read_yielding = true;
|
|
+
|
|
+ /* Prompt main loop thread to re-run nbd_drained_poll() */
|
|
+ aio_wait_kick();
|
|
+ }
|
|
qio_channel_yield(client->ioc, G_IO_IN);
|
|
- client->read_yielding = false;
|
|
- if (client->quiescing) {
|
|
- return -EAGAIN;
|
|
+ WITH_QEMU_LOCK_GUARD(&client->lock) {
|
|
+ client->read_yielding = false;
|
|
+ if (client->quiescing) {
|
|
+ return -EAGAIN;
|
|
+ }
|
|
}
|
|
continue;
|
|
} else if (len < 0) {
|
|
@@ -1528,6 +1537,7 @@ void nbd_client_put(NBDClient *client)
|
|
blk_exp_unref(&client->exp->common);
|
|
}
|
|
g_free(client->contexts.bitmaps);
|
|
+ qemu_mutex_destroy(&client->lock);
|
|
g_free(client);
|
|
}
|
|
}
|
|
@@ -1561,11 +1571,13 @@ static void client_close(NBDClient *client, bool negotiated)
|
|
{
|
|
assert(qemu_in_main_thread());
|
|
|
|
- if (client->closing) {
|
|
- return;
|
|
- }
|
|
+ WITH_QEMU_LOCK_GUARD(&client->lock) {
|
|
+ if (client->closing) {
|
|
+ return;
|
|
+ }
|
|
|
|
- client->closing = true;
|
|
+ client->closing = true;
|
|
+ }
|
|
|
|
/* Force requests to finish. They will drop their own references,
|
|
* then we'll close the socket and free the NBDClient.
|
|
@@ -1579,6 +1591,7 @@ static void client_close(NBDClient *client, bool negotiated)
|
|
}
|
|
}
|
|
|
|
+/* Runs in export AioContext with client->lock held */
|
|
static NBDRequestData *nbd_request_get(NBDClient *client)
|
|
{
|
|
NBDRequestData *req;
|
|
@@ -1591,6 +1604,7 @@ static NBDRequestData *nbd_request_get(NBDClient *client)
|
|
return req;
|
|
}
|
|
|
|
+/* Runs in export AioContext with client->lock held */
|
|
static void nbd_request_put(NBDRequestData *req)
|
|
{
|
|
NBDClient *client = req->client;
|
|
@@ -1614,14 +1628,18 @@ static void blk_aio_attached(AioContext *ctx, void *opaque)
|
|
NBDExport *exp = opaque;
|
|
NBDClient *client;
|
|
|
|
+ assert(qemu_in_main_thread());
|
|
+
|
|
trace_nbd_blk_aio_attached(exp->name, ctx);
|
|
|
|
exp->common.ctx = ctx;
|
|
|
|
QTAILQ_FOREACH(client, &exp->clients, next) {
|
|
- assert(client->nb_requests == 0);
|
|
- assert(client->recv_coroutine == NULL);
|
|
- assert(client->send_coroutine == NULL);
|
|
+ WITH_QEMU_LOCK_GUARD(&client->lock) {
|
|
+ assert(client->nb_requests == 0);
|
|
+ assert(client->recv_coroutine == NULL);
|
|
+ assert(client->send_coroutine == NULL);
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -1629,6 +1647,8 @@ static void blk_aio_detach(void *opaque)
|
|
{
|
|
NBDExport *exp = opaque;
|
|
|
|
+ assert(qemu_in_main_thread());
|
|
+
|
|
trace_nbd_blk_aio_detach(exp->name, exp->common.ctx);
|
|
|
|
exp->common.ctx = NULL;
|
|
@@ -1639,8 +1659,12 @@ static void nbd_drained_begin(void *opaque)
|
|
NBDExport *exp = opaque;
|
|
NBDClient *client;
|
|
|
|
+ assert(qemu_in_main_thread());
|
|
+
|
|
QTAILQ_FOREACH(client, &exp->clients, next) {
|
|
- client->quiescing = true;
|
|
+ WITH_QEMU_LOCK_GUARD(&client->lock) {
|
|
+ client->quiescing = true;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -1649,28 +1673,48 @@ static void nbd_drained_end(void *opaque)
|
|
NBDExport *exp = opaque;
|
|
NBDClient *client;
|
|
|
|
+ assert(qemu_in_main_thread());
|
|
+
|
|
QTAILQ_FOREACH(client, &exp->clients, next) {
|
|
- client->quiescing = false;
|
|
- nbd_client_receive_next_request(client);
|
|
+ WITH_QEMU_LOCK_GUARD(&client->lock) {
|
|
+ client->quiescing = false;
|
|
+ nbd_client_receive_next_request(client);
|
|
+ }
|
|
}
|
|
}
|
|
|
|
+/* Runs in export AioContext */
|
|
+static void nbd_wake_read_bh(void *opaque)
|
|
+{
|
|
+ NBDClient *client = opaque;
|
|
+ qio_channel_wake_read(client->ioc);
|
|
+}
|
|
+
|
|
static bool nbd_drained_poll(void *opaque)
|
|
{
|
|
NBDExport *exp = opaque;
|
|
NBDClient *client;
|
|
|
|
+ assert(qemu_in_main_thread());
|
|
+
|
|
QTAILQ_FOREACH(client, &exp->clients, next) {
|
|
- if (client->nb_requests != 0) {
|
|
- /*
|
|
- * If there's a coroutine waiting for a request on nbd_read_eof()
|
|
- * enter it here so we don't depend on the client to wake it up.
|
|
- */
|
|
- if (client->recv_coroutine != NULL && client->read_yielding) {
|
|
- qio_channel_wake_read(client->ioc);
|
|
- }
|
|
+ WITH_QEMU_LOCK_GUARD(&client->lock) {
|
|
+ if (client->nb_requests != 0) {
|
|
+ /*
|
|
+ * If there's a coroutine waiting for a request on nbd_read_eof()
|
|
+ * enter it here so we don't depend on the client to wake it up.
|
|
+ *
|
|
+ * Schedule a BH in the export AioContext to avoid missing the
|
|
+ * wake up due to the race between qio_channel_wake_read() and
|
|
+ * qio_channel_yield().
|
|
+ */
|
|
+ if (client->recv_coroutine != NULL && client->read_yielding) {
|
|
+ aio_bh_schedule_oneshot(nbd_export_aio_context(client->exp),
|
|
+ nbd_wake_read_bh, client);
|
|
+ }
|
|
|
|
- return true;
|
|
+ return true;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -1681,6 +1725,8 @@ static void nbd_eject_notifier(Notifier *n, void *data)
|
|
{
|
|
NBDExport *exp = container_of(n, NBDExport, eject_notifier);
|
|
|
|
+ assert(qemu_in_main_thread());
|
|
+
|
|
blk_exp_request_shutdown(&exp->common);
|
|
}
|
|
|
|
@@ -2566,7 +2612,6 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req,
|
|
int ret;
|
|
|
|
g_assert(qemu_in_coroutine());
|
|
- assert(client->recv_coroutine == qemu_coroutine_self());
|
|
ret = nbd_receive_request(client, request, errp);
|
|
if (ret < 0) {
|
|
return ret;
|
|
@@ -2975,6 +3020,9 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|
*/
|
|
|
|
trace_nbd_trip();
|
|
+
|
|
+ qemu_mutex_lock(&client->lock);
|
|
+
|
|
if (client->closing) {
|
|
goto done;
|
|
}
|
|
@@ -2990,7 +3038,21 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|
}
|
|
|
|
req = nbd_request_get(client);
|
|
- ret = nbd_co_receive_request(req, &request, &local_err);
|
|
+
|
|
+ /*
|
|
+ * nbd_co_receive_request() returns -EAGAIN when nbd_drained_begin() has
|
|
+ * set client->quiescing but by the time we get back nbd_drained_end() may
|
|
+ * have already cleared client->quiescing. In that case we try again
|
|
+ * because nothing else will spawn an nbd_trip() coroutine until we set
|
|
+ * client->recv_coroutine = NULL further down.
|
|
+ */
|
|
+ do {
|
|
+ assert(client->recv_coroutine == qemu_coroutine_self());
|
|
+ qemu_mutex_unlock(&client->lock);
|
|
+ ret = nbd_co_receive_request(req, &request, &local_err);
|
|
+ qemu_mutex_lock(&client->lock);
|
|
+ } while (ret == -EAGAIN && !client->quiescing);
|
|
+
|
|
client->recv_coroutine = NULL;
|
|
|
|
if (client->closing) {
|
|
@@ -3002,15 +3064,16 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|
}
|
|
|
|
if (ret == -EAGAIN) {
|
|
- assert(client->quiescing);
|
|
goto done;
|
|
}
|
|
|
|
nbd_client_receive_next_request(client);
|
|
+
|
|
if (ret == -EIO) {
|
|
goto disconnect;
|
|
}
|
|
|
|
+ qemu_mutex_unlock(&client->lock);
|
|
qio_channel_set_cork(client->ioc, true);
|
|
|
|
if (ret < 0) {
|
|
@@ -3030,6 +3093,10 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|
g_free(request.contexts->bitmaps);
|
|
g_free(request.contexts);
|
|
}
|
|
+
|
|
+ qio_channel_set_cork(client->ioc, false);
|
|
+ qemu_mutex_lock(&client->lock);
|
|
+
|
|
if (ret < 0) {
|
|
error_prepend(&local_err, "Failed to send reply: ");
|
|
goto disconnect;
|
|
@@ -3044,11 +3111,13 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|
goto disconnect;
|
|
}
|
|
|
|
- qio_channel_set_cork(client->ioc, false);
|
|
done:
|
|
if (req) {
|
|
nbd_request_put(req);
|
|
}
|
|
+
|
|
+ qemu_mutex_unlock(&client->lock);
|
|
+
|
|
if (!nbd_client_put_nonzero(client)) {
|
|
aio_co_reschedule_self(qemu_get_aio_context());
|
|
nbd_client_put(client);
|
|
@@ -3059,13 +3128,19 @@ disconnect:
|
|
if (local_err) {
|
|
error_reportf_err(local_err, "Disconnect client, due to: ");
|
|
}
|
|
+
|
|
nbd_request_put(req);
|
|
+ qemu_mutex_unlock(&client->lock);
|
|
|
|
aio_co_reschedule_self(qemu_get_aio_context());
|
|
client_close(client, true);
|
|
nbd_client_put(client);
|
|
}
|
|
|
|
+/*
|
|
+ * Runs in export AioContext and main loop thread. Caller must hold
|
|
+ * client->lock.
|
|
+ */
|
|
static void nbd_client_receive_next_request(NBDClient *client)
|
|
{
|
|
if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS &&
|
|
@@ -3091,7 +3166,9 @@ static coroutine_fn void nbd_co_client_start(void *opaque)
|
|
return;
|
|
}
|
|
|
|
- nbd_client_receive_next_request(client);
|
|
+ WITH_QEMU_LOCK_GUARD(&client->lock) {
|
|
+ nbd_client_receive_next_request(client);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
@@ -3108,6 +3185,7 @@ void nbd_client_new(QIOChannelSocket *sioc,
|
|
Coroutine *co;
|
|
|
|
client = g_new0(NBDClient, 1);
|
|
+ qemu_mutex_init(&client->lock);
|
|
client->refcount = 1;
|
|
client->tlscreds = tlscreds;
|
|
if (tlscreds) {
|
|
--
|
|
2.39.3
|
|
|