174 lines
6.9 KiB
Diff
174 lines
6.9 KiB
Diff
From 1b4bf69b064815a41ac18ef7276ceab0b9e0eb5b Mon Sep 17 00:00:00 2001
|
|
From: Eric Blake <eblake@redhat.com>
|
|
Date: Wed, 7 Aug 2024 12:23:13 -0500
|
|
Subject: [PATCH 5/5] nbd/server: CVE-2024-7409: Close stray clients at
|
|
server-stop
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
RH-Author: Eric Blake <eblake@redhat.com>
|
|
RH-MergeRequest: 263: nbd/server: fix CVE-2024-7409 (qemu crash on nbd-server-stop) [RHEL 10.0]
|
|
RH-Jira: RHEL-52599
|
|
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
RH-Commit: [4/4] 6c5c7b5daa2b450122e98eb08ade1e1db56d20ae (redhat/centos-stream/src/qemu-kvm)
|
|
|
|
A malicious client can attempt to connect to an NBD server, and then
|
|
intentionally delay progress in the handshake, including if it does
|
|
not know the TLS secrets. Although the previous two patches reduce
|
|
this behavior by capping the default max-connections parameter and
|
|
killing slow clients, they did not eliminate the possibility of a
|
|
client waiting to close the socket until after the QMP nbd-server-stop
|
|
command is executed, at which point qemu would SEGV when trying to
|
|
dereference the NULL nbd_server global which is no longer present.
|
|
This amounts to a denial of service attack. Worse, if another NBD
|
|
server is started before the malicious client disconnects, I cannot
|
|
rule out additional adverse effects when the old client interferes
|
|
with the connection count of the new server (although the most likely
|
|
is a crash due to an assertion failure when checking
|
|
nbd_server->connections > 0).
|
|
|
|
For environments without this patch, the CVE can be mitigated by
|
|
ensuring (such as via a firewall) that only trusted clients can
|
|
connect to an NBD server. Note that using frameworks like libvirt
|
|
that ensure that TLS is used and that nbd-server-stop is not executed
|
|
while any trusted clients are still connected will only help if there
|
|
is also no possibility for an untrusted client to open a connection
|
|
but then stall on the NBD handshake.
|
|
|
|
Given the previous patches, it would be possible to guarantee that no
|
|
clients remain connected by having nbd-server-stop sleep for longer
|
|
than the default handshake deadline before finally freeing the global
|
|
nbd_server object, but that could make QMP non-responsive for a long
|
|
time. So intead, this patch fixes the problem by tracking all client
|
|
sockets opened while the server is running, and forcefully closing any
|
|
such sockets remaining without a completed handshake at the time of
|
|
nbd-server-stop, then waiting until the coroutines servicing those
|
|
sockets notice the state change. nbd-server-stop now has a second
|
|
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
|
|
blk_exp_close_all_type() that disconnects all clients that completed
|
|
handshakes), but forced socket shutdown is enough to progress the
|
|
coroutines and quickly tear down all clients before the server is
|
|
freed, thus finally fixing the CVE.
|
|
|
|
This patch relies heavily on the fact that nbd/server.c guarantees
|
|
that it only calls nbd_blockdev_client_closed() from the main loop
|
|
(see the assertion in nbd_client_put() and the hoops used in
|
|
nbd_client_put_nonzero() to achieve that); if we did not have that
|
|
guarantee, we would also need a mutex protecting our accesses of the
|
|
list of connections to survive re-entrancy from independent iothreads.
|
|
|
|
Although I did not actually try to test old builds, it looks like this
|
|
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
|
|
even back when that patch started using a QIONetListener to handle
|
|
listening on multiple sockets, nbd_server_free() was already unaware
|
|
that the nbd_blockdev_client_closed callback can be reached later by a
|
|
client thread that has not completed handshakes (and therefore the
|
|
client's socket never got added to the list closed in
|
|
nbd_export_close_all), despite that patch intentionally tearing down
|
|
the QIONetListener to prevent new clients.
|
|
|
|
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
|
|
Fixes: CVE-2024-7409
|
|
CC: qemu-stable@nongnu.org
|
|
Signed-off-by: Eric Blake <eblake@redhat.com>
|
|
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
|
|
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
|
|
|
|
(cherry picked from commit 3e7ef738c8462c45043a1d39f702a0990406a3b3)
|
|
Jira: https://issues.redhat.com/browse/RHEL-52599
|
|
Signed-off-by: Eric Blake <eblake@redhat.com>
|
|
---
|
|
blockdev-nbd.c | 35 ++++++++++++++++++++++++++++++++++-
|
|
1 file changed, 34 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/blockdev-nbd.c b/blockdev-nbd.c
|
|
index 24ba5382db..f73409ae49 100644
|
|
--- a/blockdev-nbd.c
|
|
+++ b/blockdev-nbd.c
|
|
@@ -21,12 +21,18 @@
|
|
#include "io/channel-socket.h"
|
|
#include "io/net-listener.h"
|
|
|
|
+typedef struct NBDConn {
|
|
+ QIOChannelSocket *cioc;
|
|
+ QLIST_ENTRY(NBDConn) next;
|
|
+} NBDConn;
|
|
+
|
|
typedef struct NBDServerData {
|
|
QIONetListener *listener;
|
|
QCryptoTLSCreds *tlscreds;
|
|
char *tlsauthz;
|
|
uint32_t max_connections;
|
|
uint32_t connections;
|
|
+ QLIST_HEAD(, NBDConn) conns;
|
|
} NBDServerData;
|
|
|
|
static NBDServerData *nbd_server;
|
|
@@ -51,6 +57,14 @@ int nbd_server_max_connections(void)
|
|
|
|
static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
|
|
{
|
|
+ NBDConn *conn = nbd_client_owner(client);
|
|
+
|
|
+ assert(qemu_in_main_thread() && nbd_server);
|
|
+
|
|
+ object_unref(OBJECT(conn->cioc));
|
|
+ QLIST_REMOVE(conn, next);
|
|
+ g_free(conn);
|
|
+
|
|
nbd_client_put(client);
|
|
assert(nbd_server->connections > 0);
|
|
nbd_server->connections--;
|
|
@@ -60,14 +74,20 @@ static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
|
|
static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
|
|
gpointer opaque)
|
|
{
|
|
+ NBDConn *conn = g_new0(NBDConn, 1);
|
|
+
|
|
+ assert(qemu_in_main_thread() && nbd_server);
|
|
nbd_server->connections++;
|
|
+ object_ref(OBJECT(cioc));
|
|
+ conn->cioc = cioc;
|
|
+ QLIST_INSERT_HEAD(&nbd_server->conns, conn, next);
|
|
nbd_update_server_watch(nbd_server);
|
|
|
|
qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
|
|
/* TODO - expose handshake timeout as QMP option */
|
|
nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS,
|
|
nbd_server->tlscreds, nbd_server->tlsauthz,
|
|
- nbd_blockdev_client_closed, NULL);
|
|
+ nbd_blockdev_client_closed, conn);
|
|
}
|
|
|
|
static void nbd_update_server_watch(NBDServerData *s)
|
|
@@ -81,12 +101,25 @@ static void nbd_update_server_watch(NBDServerData *s)
|
|
|
|
static void nbd_server_free(NBDServerData *server)
|
|
{
|
|
+ NBDConn *conn, *tmp;
|
|
+
|
|
if (!server) {
|
|
return;
|
|
}
|
|
|
|
+ /*
|
|
+ * Forcefully close the listener socket, and any clients that have
|
|
+ * not yet disconnected on their own.
|
|
+ */
|
|
qio_net_listener_disconnect(server->listener);
|
|
object_unref(OBJECT(server->listener));
|
|
+ QLIST_FOREACH_SAFE(conn, &server->conns, next, tmp) {
|
|
+ qio_channel_shutdown(QIO_CHANNEL(conn->cioc), QIO_CHANNEL_SHUTDOWN_BOTH,
|
|
+ NULL);
|
|
+ }
|
|
+
|
|
+ AIO_WAIT_WHILE_UNLOCKED(NULL, server->connections > 0);
|
|
+
|
|
if (server->tlscreds) {
|
|
object_unref(OBJECT(server->tlscreds));
|
|
}
|
|
--
|
|
2.39.3
|
|
|