import nbdkit-1.32.5-4.el9

This commit is contained in:
CentOS Sources 2023-05-09 05:21:47 +00:00 committed by Stepan Oksanichenko
parent 2b00186655
commit 5520e421fe
34 changed files with 320 additions and 7498 deletions

2
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/libguestfs.keyring
SOURCES/nbdkit-1.30.8.tar.gz
SOURCES/nbdkit-1.32.5.tar.gz

View File

@ -1,2 +1,2 @@
cc1b37b9cfafa515aab3eefd345ecc59aac2ce7b SOURCES/libguestfs.keyring
6c4607ff13e13460cfdf67f47b9fea9ac0a8ebc3 SOURCES/nbdkit-1.30.8.tar.gz
c8260e2f6fb16a16cefe0cf670fc5a0f41dd7110 SOURCES/nbdkit-1.32.5.tar.gz

View File

@ -1,293 +0,0 @@
From 6a2b0aac8be655524ea223e32cac0395fcc9f975 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Fri, 15 Apr 2022 12:08:37 +0100
Subject: [PATCH] ssh: Allow the remote file to be created
This adds new parameters, create=(true|false), create-size=SIZE and
create-mode=MODE to create and truncate the remote file.
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
(cherry picked from commit 0793f30b1071753532362b2ebf9cb8156a88c3c3)
---
plugins/ssh/nbdkit-ssh-plugin.pod | 34 ++++++++-
plugins/ssh/ssh.c | 112 +++++++++++++++++++++++++++---
tests/test-ssh.sh | 13 +++-
3 files changed, 146 insertions(+), 13 deletions(-)
diff --git a/plugins/ssh/nbdkit-ssh-plugin.pod b/plugins/ssh/nbdkit-ssh-plugin.pod
index 3f401c15..2bc2c4a7 100644
--- a/plugins/ssh/nbdkit-ssh-plugin.pod
+++ b/plugins/ssh/nbdkit-ssh-plugin.pod
@@ -5,8 +5,10 @@ nbdkit-ssh-plugin - access disk images over the SSH protocol
=head1 SYNOPSIS
nbdkit ssh host=HOST [path=]PATH
- [compression=true] [config=CONFIG_FILE] [identity=FILENAME]
- [known-hosts=FILENAME] [password=PASSWORD|-|+FILENAME]
+ [compression=true] [config=CONFIG_FILE]
+ [create=true] [create-mode=MODE] [create-size=SIZE]
+ [identity=FILENAME] [known-hosts=FILENAME]
+ [password=PASSWORD|-|+FILENAME]
[port=PORT] [timeout=SECS] [user=USER]
[verify-remote-host=false]
@@ -62,6 +64,34 @@ The C<config> parameter is optional. If it is I<not> specified at all
then F<~/.ssh/config> and F</etc/ssh/ssh_config> are both read.
Missing or unreadable files are ignored.
+=item B<create=true>
+
+(nbdkit E<ge> 1.32)
+
+If set, the remote file will be created. The remote file is created
+on the first NBD connection to nbdkit, not when nbdkit starts up. If
+the file already exists, it will be replaced and any existing content
+lost.
+
+If using this option, you must use C<create-size>. C<create-mode> can
+be used to control the permissions of the new file.
+
+=item B<create-mode=>MODE
+
+(nbdkit E<ge> 1.32)
+
+If using C<create=true> specify the default permissions of the new
+remote file. You can use octal modes like C<create-mode=0777> or
+C<create-mode=0644>. The default is C<0600>, ie. only readable and
+writable by the remote user.
+
+=item B<create-size=>SIZE
+
+(nbdkit E<ge> 1.32)
+
+If using C<create=true>, specify the virtual size of the new disk.
+C<SIZE> can use modifiers like C<100M> etc.
+
=item B<host=>HOST
Specify the name or IP address of the remote host.
diff --git a/plugins/ssh/ssh.c b/plugins/ssh/ssh.c
index 39d77e44..5e314cd7 100644
--- a/plugins/ssh/ssh.c
+++ b/plugins/ssh/ssh.c
@@ -44,6 +44,8 @@
#include <fcntl.h>
#include <sys/stat.h>
+#include <pthread.h>
+
#include <libssh/libssh.h>
#include <libssh/sftp.h>
#include <libssh/callbacks.h>
@@ -51,6 +53,7 @@
#include <nbdkit-plugin.h>
#include "array-size.h"
+#include "cleanup.h"
#include "const-string-vector.h"
#include "minmax.h"
@@ -64,6 +67,9 @@ static const char *known_hosts = NULL;
static const_string_vector identities = empty_vector;
static uint32_t timeout = 0;
static bool compression = false;
+static bool create = false;
+static int64_t create_size = -1;
+static unsigned create_mode = S_IRUSR | S_IWUSR /* 0600 */;
/* config can be:
* NULL => parse options from default file
@@ -167,6 +173,27 @@ ssh_config (const char *key, const char *value)
return -1;
compression = r;
}
+ else if (strcmp (key, "create") == 0) {
+ r = nbdkit_parse_bool (value);
+ if (r == -1)
+ return -1;
+ create = r;
+ }
+ else if (strcmp (key, "create-size") == 0) {
+ create_size = nbdkit_parse_size (value);
+ if (create_size == -1)
+ return -1;
+ }
+ else if (strcmp (key, "create-mode") == 0) {
+ r = nbdkit_parse_unsigned (key, value, &create_mode);
+ if (r == -1)
+ return -1;
+ /* OpenSSH checks this too. */
+ if (create_mode > 0777) {
+ nbdkit_error ("create-mode must be <= 0777");
+ return -1;
+ }
+ }
else {
nbdkit_error ("unknown parameter '%s'", key);
@@ -186,6 +213,13 @@ ssh_config_complete (void)
return -1;
}
+ /* If create=true, create-size must be supplied. */
+ if (create && create_size == -1) {
+ nbdkit_error ("if using create=true, you must specify the size "
+ "of the new remote file using create-size=SIZE");
+ return -1;
+ }
+
return 0;
}
@@ -200,7 +234,10 @@ ssh_config_complete (void)
"identity=<FILENAME> Prepend private key (identity) file.\n" \
"timeout=SECS Set SSH connection timeout.\n" \
"verify-remote-host=false Ignore known_hosts.\n" \
- "compression=true Enable compression."
+ "compression=true Enable compression.\n" \
+ "create=true Create the remote file.\n" \
+ "create-mode=MODE Set the permissions of the remote file.\n" \
+ "create-size=SIZE Set the size of the remote file."
/* Since we must simulate atomic pread and pwrite using seek +
* read/write, calls on each handle must be serialized.
@@ -329,6 +366,65 @@ authenticate (struct ssh_handle *h)
return -1;
}
+/* This function opens or creates the remote file (depending on
+ * create=false|true). Parallel connections might call this function
+ * at the same time, and so we must hold a lock to ensure that the
+ * file is created at most once.
+ */
+static pthread_mutex_t create_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static sftp_file
+open_or_create_path (ssh_session session, sftp_session sftp, int readonly)
+{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&create_lock);
+ int access_type;
+ int r;
+ sftp_file file;
+
+ access_type = readonly ? O_RDONLY : O_RDWR;
+ if (create) access_type |= O_CREAT | O_TRUNC;
+
+ file = sftp_open (sftp, path, access_type, S_IRWXU);
+ if (!file) {
+ nbdkit_error ("cannot %s file for %s: %s",
+ create ? "create" : "open",
+ readonly ? "reading" : "writing",
+ ssh_get_error (session));
+ return NULL;
+ }
+
+ if (create) {
+ /* There's no sftp_truncate call. However OpenSSH lets you call
+ * SSH_FXP_SETSTAT + SSH_FILEXFER_ATTR_SIZE which invokes
+ * truncate(2) on the server. Libssh doesn't provide a binding
+ * for SSH_FXP_FSETSTAT so we have to pass the session + path.
+ */
+ struct sftp_attributes_struct attrs = {
+ .flags = SSH_FILEXFER_ATTR_SIZE |
+ SSH_FILEXFER_ATTR_PERMISSIONS,
+ .size = create_size,
+ .permissions = create_mode,
+ };
+
+ r = sftp_setstat (sftp, path, &attrs);
+ if (r != SSH_OK) {
+ nbdkit_error ("setstat failed: %s", ssh_get_error (session));
+
+ /* Best-effort attempt to delete the remote file on failure. */
+ r = sftp_unlink (sftp, path);
+ if (r != SSH_OK)
+ nbdkit_debug ("unlink failed: %s", ssh_get_error (session));
+
+ return NULL;
+ }
+ }
+
+ /* On the next connection, don't create or truncate the file. */
+ create = false;
+
+ return file;
+}
+
/* Create the per-connection handle. */
static void *
ssh_open (int readonly)
@@ -337,7 +433,6 @@ ssh_open (int readonly)
const int set = 1;
size_t i;
int r;
- int access_type;
h = calloc (1, sizeof *h);
if (h == NULL) {
@@ -471,7 +566,7 @@ ssh_open (int readonly)
if (authenticate (h) == -1)
goto err;
- /* Open the SFTP connection and file. */
+ /* Open the SFTP connection. */
h->sftp = sftp_new (h->session);
if (!h->sftp) {
nbdkit_error ("failed to allocate sftp session: %s",
@@ -484,14 +579,11 @@ ssh_open (int readonly)
ssh_get_error (h->session));
goto err;
}
- access_type = readonly ? O_RDONLY : O_RDWR;
- h->file = sftp_open (h->sftp, path, access_type, S_IRWXU);
- if (!h->file) {
- nbdkit_error ("cannot open file for %s: %s",
- readonly ? "reading" : "writing",
- ssh_get_error (h->session));
+
+ /* Open or create the remote file. */
+ h->file = open_or_create_path (h->session, h->sftp, readonly);
+ if (!h->file)
goto err;
- }
nbdkit_debug ("opened libssh handle");
diff --git a/tests/test-ssh.sh b/tests/test-ssh.sh
index 6c0ce410..f04b4488 100755
--- a/tests/test-ssh.sh
+++ b/tests/test-ssh.sh
@@ -36,6 +36,7 @@ set -x
requires test -f disk
requires nbdcopy --version
+requires stat --version
# Check that ssh to localhost will work without any passwords or phrases.
#
@@ -48,7 +49,7 @@ then
exit 77
fi
-files="ssh.img"
+files="ssh.img ssh2.img"
rm -f $files
cleanup_fn rm -f $files
@@ -59,3 +60,13 @@ nbdkit -v -D ssh.log=2 -U - \
# The output should be identical.
cmp disk ssh.img
+
+# Copy local file 'ssh.img' to newly created "remote" 'ssh2.img'
+size="$(stat -c %s disk)"
+nbdkit -v -D ssh.log=2 -U - \
+ ssh host=localhost $PWD/ssh2.img \
+ create=true create-size=$size \
+ --run 'nbdcopy ssh.img "$uri"'
+
+# The output should be identical.
+cmp disk ssh2.img
--
2.31.1

View File

@ -0,0 +1,31 @@
From e0e592775911ebe2178b04b4b20f95fea2f2fe9c Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Thu, 5 Jan 2023 16:05:33 +0000
Subject: [PATCH] ssh: Remove left over comment
This comment was left over from when I copied the libssh example code.
It adds no value so remove it.
(cherry picked from commit c93a8957efcc26652b31f5bc359dfd3c4019b4f8)
---
plugins/ssh/ssh.c | 4 ----
1 file changed, 4 deletions(-)
diff --git a/plugins/ssh/ssh.c b/plugins/ssh/ssh.c
index 6cf40c26..aaa7c2b9 100644
--- a/plugins/ssh/ssh.c
+++ b/plugins/ssh/ssh.c
@@ -356,10 +356,6 @@ authenticate (struct ssh_handle *h)
if (rc == SSH_AUTH_SUCCESS) return 0;
}
- /* Example code tries keyboard-interactive here, but we cannot use
- * that method from a server.
- */
-
if (password != NULL && (method & SSH_AUTH_METHOD_PASSWORD)) {
rc = authenticate_password (h->session, password);
if (rc == SSH_AUTH_SUCCESS) return 0;
--
2.31.1

View File

@ -1,794 +0,0 @@
From ac40ae11bc9983e11185749b23e793568cb366cc Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sat, 16 Apr 2022 18:39:13 +0100
Subject: [PATCH] readahead: Rewrite this filter so it prefetches using .cache
The previous readahead filter did not work well and we have stopped
using it in virt-v2v. However the concept is a good one if we can
make it work. This commit completely rethinks the filter.
Now, in parallel with the ordinary pread command, we issue a prefetch
(ie. .cache) to the underlying plugin for the data immediately
following the pread. For example a simple sequence of operations:
t=1 pread (offset=0, count=65536)
t=2 pread (offset=65536, count=65536)
t=3 pread (offset=131072, count=65536)
would become:
t=1 pread (offset=0, count=65536) <--\ issued
cache (offset=65536, count=65536) <--/ in parallel
t=2 pread (offset=65536, count=65536)
cache (offset=131072, count=65536)
t=3 pread (offset=131072, count=65536)
cache (offset=196608, count=65536)
This requires that the underlying filter(s) and plugin chain can
actually do something with the .cache request. If this is not the
case then the filter does nothing (but it will print a warning). For
plugins which don't have native support for prefetching, it is
sufficient to insert nbdkit-cache-filter after this filter.
(nbdkit-cow-filter can also be used with cow-on-cache=true, but that
is more useful for advanced users who are already using the cow
filter).
The implementation creates a background thread per connection to issue
the parallel .cache requests. This is safer than the alternative (one
background thread in total) since we don't have to deal with the
problem of cache requests being issued with the wrong export name or
stale next pointer. The background thread is controlled by a queue of
commands, with the only possible commands being "cache" or "quit".
Because the background thread issues parallel requests on the same
connection, the underlying plugin must support the parallel thread
model, otherwise we would be violating the plugin's thread model. It
may be possible in future to open a new connection from the background
thread (but with the same exportname), which would lift this
restriction to at least serialize_requests. Because of the current
limitation, nbdkit-curl-plugin cannot use prefetch.
(cherry picked from commit 2ff548d66ad3eae87868402ec5b3319edd12090f)
---
TODO | 22 +-
filters/readahead/Makefile.am | 2 +
filters/readahead/bgthread.c | 76 ++++
filters/readahead/nbdkit-readahead-filter.pod | 70 +++-
filters/readahead/readahead.c | 338 +++++++++---------
filters/readahead/readahead.h | 60 ++++
tests/test-readahead.sh | 2 +-
7 files changed, 367 insertions(+), 203 deletions(-)
create mode 100644 filters/readahead/bgthread.c
create mode 100644 filters/readahead/readahead.h
diff --git a/TODO b/TODO
index 5ae21db5..4d2a9796 100644
--- a/TODO
+++ b/TODO
@@ -62,16 +62,6 @@ General ideas for improvements
continue to keep their non-standard handshake while utilizing nbdkit
to prototype new behaviors in serving the kernel.
-* Background thread for filters. Some filters (readahead, cache and
- proposed scan filter - see below) could be more effective if they
- were able to defer work to a background thread. We finally have
- nbdkit_next_context_open and friends for allowing a background
- thread to have access into the plugin, but still need to worry about
- thread-safety (how much must the filter do vs. nbdkit, to avoid
- calling into the plugin too many times at once) and cleanup
- (spawning the thread during .after_fork is viable, but cleaning it
- up during .unload is too late).
-
* "nbdkit.so": nbdkit as a loadable shared library. The aim of nbdkit
is to make it reusable from other programs (see nbdkit-captive(1)).
If it was a loadable shared library it would be even more reusable.
@@ -228,6 +218,8 @@ Suggestions for filters
* nbdkit-cache-filter should handle ENOSPC errors automatically by
reclaiming blocks from the cache
+* nbdkit-cache-filter could use a background thread for reclaiming.
+
* zstd filter was requested as a way to do what we currently do with
xz but saving many hours on compression (at the cost of hundreds of
MBs of extra data)
@@ -240,6 +232,16 @@ Suggestions for filters
could inject a flush after pausing. However this requires that
filter background threads have access to the plugin (see above).
+nbdkit-readahead-filter:
+
+* The filter should open a new connection to the plugin per background
+ thread so it is able to work with plugins that use the
+ serialize_requests thread model (like curl). At the moment it makes
+ requests on the same connection, so it requires plugins to use the
+ parallel thread model.
+
+* It should combine (or avoid) overlapping cache requests.
+
nbdkit-rate-filter:
* allow other kinds of traffic shaping such as VBR
diff --git a/filters/readahead/Makefile.am b/filters/readahead/Makefile.am
index ee5bb3fb..187993ae 100644
--- a/filters/readahead/Makefile.am
+++ b/filters/readahead/Makefile.am
@@ -37,6 +37,8 @@ filter_LTLIBRARIES = nbdkit-readahead-filter.la
nbdkit_readahead_filter_la_SOURCES = \
readahead.c \
+ readahead.h \
+ bgthread.c \
$(top_srcdir)/include/nbdkit-filter.h \
$(NULL)
diff --git a/filters/readahead/bgthread.c b/filters/readahead/bgthread.c
new file mode 100644
index 00000000..5894bb5f
--- /dev/null
+++ b/filters/readahead/bgthread.c
@@ -0,0 +1,76 @@
+/* nbdkit
+ * Copyright (C) 2019-2022 Red Hat Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Red Hat nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <config.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+
+#include <nbdkit-filter.h>
+
+#include "readahead.h"
+
+#include "cleanup.h"
+
+void *
+readahead_thread (void *vp)
+{
+ struct bgthread_ctrl *ctrl = vp;
+
+ for (;;) {
+ struct command cmd;
+
+ /* Wait until we are sent at least one command. */
+ {
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&ctrl->lock);
+ while (ctrl->cmds.len == 0)
+ pthread_cond_wait (&ctrl->cond, &ctrl->lock);
+ cmd = ctrl->cmds.ptr[0];
+ command_queue_remove (&ctrl->cmds, 0);
+ }
+
+ switch (cmd.type) {
+ case CMD_QUIT:
+ /* Finish processing and exit the thread. */
+ return NULL;
+
+ case CMD_CACHE:
+ /* Issue .cache (readahead) to underlying plugin. We ignore any
+ * errors because there's no way to communicate that back to the
+ * client, and readahead is only advisory.
+ */
+ cmd.next->cache (cmd.next, cmd.count, cmd.offset, 0, NULL);
+ }
+ }
+}
diff --git a/filters/readahead/nbdkit-readahead-filter.pod b/filters/readahead/nbdkit-readahead-filter.pod
index c220d379..630e5924 100644
--- a/filters/readahead/nbdkit-readahead-filter.pod
+++ b/filters/readahead/nbdkit-readahead-filter.pod
@@ -1,28 +1,66 @@
=head1 NAME
-nbdkit-readahead-filter - prefetch data when reading sequentially
+nbdkit-readahead-filter - prefetch data ahead of sequential reads
=head1 SYNOPSIS
- nbdkit --filter=readahead plugin
+ nbdkit --filter=readahead PLUGIN
+
+ nbdkit --filter=readahead --filter=cache PLUGIN
+
+ nbdkit --filter=readahead --filter=cow PLUGIN cow-on-cache=true
=head1 DESCRIPTION
C<nbdkit-readahead-filter> is a filter that prefetches data when the
-client is reading sequentially.
+client is reading.
-A common use for this filter is to accelerate sequential copy
-operations (like S<C<qemu-img convert>>) when plugin requests have a
-high overhead (like L<nbdkit-curl-plugin(1)>). For example:
-
- nbdkit -U - --filter=readahead curl https://example.com/disk.img \
- --run 'qemu-img convert $nbd disk.img'
+When the client issues a read, this filter issues a parallel prefetch
+(C<.cache>) for subsequent data. Plugins which support this command
+will prefetch the data, making sequential reads faster. For plugins
+which do not support this command, you can inject
+L<nbdkit-cache-filter(1)> below (after) this filter, giving
+approximately the same effect. L<nbdkit-cow-filter(1)> can be used
+instead of nbdkit-cache-filter, if you add the C<cow-on-cache=true>
+option.
The filter uses a simple adaptive algorithm which accelerates
-sequential reads, but has a small penalty if the client does random
-reads. If the client mixes reads with writes or write-like operations
-(trimming, zeroing) then it will work but there can be a large
-performance penalty.
+sequential reads and requires no further configuration.
+
+=head2 Limitations
+
+In a number of significant cases this filter will do nothing. The
+filter will print a warning message if this happens.
+
+=over 4
+
+=item Thread model must be parallel
+
+For example L<nbdkit-curl-plugin(1)> only supports
+C<serialize_requests>, and so this filter cannot perform prefetches in
+parallel with the read requests.
+
+We may be able to lift this restriction in future.
+
+=item Underlying filters or plugin must support C<.cache> (prefetch)
+
+Very many plugins do not have the concept of prefetching and/or
+do not implement the C<.cache> callback, and so there is no
+way for this filter to issue prefetches.
+
+You can usually get around this by adding I<--filter=cache> after this
+filter as explained above. It may be necessary to limit the total
+size of the cache (see L<nbdkit-cache-filter(1)/CACHE MAXIMUM SIZE>).
+
+=item Clients and kernels may do readahead already
+
+It may be the case that NBD clients are already issuing
+C<NBD_CMD_CACHE> (NBD prefetch) commands. It may also be the case
+that your plugin is using local file functions where the kernel is
+doing readahead. In such cases this filter is not necessary and may
+be pessimal.
+
+=back
=head1 PARAMETERS
@@ -50,9 +88,9 @@ C<nbdkit-readahead-filter> first appeared in nbdkit 1.12.
L<nbdkit(1)>,
L<nbdkit-cache-filter(1)>,
-L<nbdkit-curl-plugin(1)>,
+L<nbdkit-cow-filter(1)>,
+L<nbdkit-file-plugin(1)>,
L<nbdkit-retry-filter(1)>,
-L<nbdkit-ssh-plugin(1)>,
L<nbdkit-torrent-plugin(1)>,
L<nbdkit-vddk-plugin(1)>,
L<nbdkit-filter(3)>,
@@ -64,4 +102,4 @@ Richard W.M. Jones
=head1 COPYRIGHT
-Copyright (C) 2019 Red Hat Inc.
+Copyright (C) 2019-2022 Red Hat Inc.
diff --git a/filters/readahead/readahead.c b/filters/readahead/readahead.c
index f5552d4c..1d7ae111 100644
--- a/filters/readahead/readahead.c
+++ b/filters/readahead/readahead.c
@@ -1,5 +1,5 @@
/* nbdkit
- * Copyright (C) 2019-2021 Red Hat Inc.
+ * Copyright (C) 2019-2022 Red Hat Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -34,232 +34,218 @@
#include <stdio.h>
#include <stdlib.h>
+#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
-
#include <pthread.h>
#include <nbdkit-filter.h>
+#include "readahead.h"
+
#include "cleanup.h"
#include "minmax.h"
-
-/* Copied from server/plugins.c. */
-#define MAX_REQUEST_SIZE (64 * 1024 * 1024)
+#include "vector.h"
/* These could be made configurable in future. */
-#define READAHEAD_MIN 65536
-#define READAHEAD_MAX MAX_REQUEST_SIZE
-
-/* This lock protects the global state. */
-static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
-
-/* The real size of the underlying plugin. */
-static uint64_t size;
+#define READAHEAD_MIN 32768
+#define READAHEAD_MAX (4*1024*1024)
/* Size of the readahead window. */
+static pthread_mutex_t window_lock = PTHREAD_MUTEX_INITIALIZER;
static uint64_t window = READAHEAD_MIN;
+static uint64_t last_offset = 0, last_readahead = 0;
-/* The single prefetch buffer shared by all threads, and its virtual
- * location in the virtual disk. The prefetch buffer grows
- * dynamically as required, but never shrinks.
+static int thread_model = -1; /* Thread model of the underlying plugin. */
+
+/* Per-connection data. */
+struct readahead_handle {
+ int can_cache; /* Can the underlying plugin cache? */
+ pthread_t thread; /* The background thread, one per connection. */
+ struct bgthread_ctrl ctrl;
+};
+
+/* We have various requirements of the underlying filter(s) + plugin:
+ * - They must support NBDKIT_CACHE_NATIVE (otherwise our requests
+ * would not do anything useful).
+ * - They must use the PARALLEL thread model (otherwise we could
+ * violate their thread model).
+ */
+static bool
+filter_working (struct readahead_handle *h)
+{
+ return
+ h->can_cache == NBDKIT_CACHE_NATIVE &&
+ thread_model == NBDKIT_THREAD_MODEL_PARALLEL;
+}
+
+static bool
+suggest_cache_filter (struct readahead_handle *h)
+{
+ return
+ h->can_cache != NBDKIT_CACHE_NATIVE &&
+ thread_model == NBDKIT_THREAD_MODEL_PARALLEL;
+}
+
+/* We need to hook into .get_ready() so we can read the final thread
+ * model (of the whole server).
*/
-static char *buffer = NULL;
-static size_t bufsize = 0;
-static uint64_t position;
-static uint32_t length = 0;
+static int
+readahead_get_ready (int final_thread_model)
+{
+ thread_model = final_thread_model;
+ return 0;
+}
+
+static int
+send_command_to_background_thread (struct bgthread_ctrl *ctrl,
+ const struct command cmd)
+{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&ctrl->lock);
+ if (command_queue_append (&ctrl->cmds, cmd) == -1)
+ return -1;
+ /* Signal the thread if it could be sleeping on an empty queue. */
+ if (ctrl->cmds.len == 1)
+ pthread_cond_signal (&ctrl->cond);
+ return 0;
+}
+
+static void *
+readahead_open (nbdkit_next_open *next, nbdkit_context *nxdata,
+ int readonly, const char *exportname, int is_tls)
+{
+ struct readahead_handle *h;
+ int err;
+
+ if (next (nxdata, readonly, exportname) == -1)
+ return NULL;
+
+ h = malloc (sizeof *h);
+ if (h == NULL) {
+ nbdkit_error ("malloc: %m");
+ return NULL;
+ }
+
+ h->ctrl.cmds = (command_queue) empty_vector;
+ pthread_mutex_init (&h->ctrl.lock, NULL);
+ pthread_cond_init (&h->ctrl.cond, NULL);
+
+ /* Create the background thread. */
+ err = pthread_create (&h->thread, NULL, readahead_thread, &h->ctrl);
+ if (err != 0) {
+ errno = err;
+ nbdkit_error ("pthread_create: %m");
+ pthread_cond_destroy (&h->ctrl.cond);
+ pthread_mutex_destroy (&h->ctrl.lock);
+ free (h);
+ return NULL;
+ }
+
+ return h;
+}
static void
-readahead_unload (void)
+readahead_close (void *handle)
{
- free (buffer);
+ struct readahead_handle *h = handle;
+ const struct command quit_cmd = { .type = CMD_QUIT };
+
+ send_command_to_background_thread (&h->ctrl, quit_cmd);
+ pthread_join (h->thread, NULL);
+ pthread_cond_destroy (&h->ctrl.cond);
+ pthread_mutex_destroy (&h->ctrl.lock);
+ command_queue_reset (&h->ctrl.cmds);
+ free (h);
}
-static int64_t readahead_get_size (nbdkit_next *next, void *handle);
-
-/* In prepare, force a call to get_size which sets the size global. */
static int
-readahead_prepare (nbdkit_next *next, void *handle, int readonly)
+readahead_can_cache (nbdkit_next *next, void *handle)
{
- int64_t r;
+ struct readahead_handle *h = handle;
+ int r;
- r = readahead_get_size (next, handle);
- return r >= 0 ? 0 : -1;
-}
-
-/* Get the size. */
-static int64_t
-readahead_get_size (nbdkit_next *next, void *handle)
-{
- int64_t r;
-
- r = next->get_size (next);
+ /* Call next->can_cache to read the underlying 'can_cache'. */
+ r = next->can_cache (next);
if (r == -1)
return -1;
+ h->can_cache = r;
- ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock);
- size = r;
+ if (!filter_working (h)) {
+ nbdkit_error ("readahead: warning: underlying plugin does not support "
+ "NBD_CMD_CACHE or PARALLEL thread model, so the filter "
+ "won't do anything");
+ if (suggest_cache_filter (h))
+ nbdkit_error ("readahead: try adding --filter=cache "
+ "after this filter");
+ /* This is an error, but that's just to ensure that the warning
+ * above is seen. We don't need to return -1 here.
+ */
+ }
return r;
}
-/* Cache */
-static int
-readahead_can_cache (nbdkit_next *next, void *handle)
-{
- /* We are already operating as a cache regardless of the plugin's
- * underlying .can_cache, but it's easiest to just rely on nbdkit's
- * behavior of calling .pread for caching.
- */
- return NBDKIT_CACHE_EMULATE;
-}
-
/* Read data. */
-
-static int
-fill_readahead (nbdkit_next *next,
- uint32_t count, uint64_t offset, uint32_t flags, int *err)
-{
- position = offset;
-
- /* Read at least window bytes, but if count is larger read that.
- * Note that the count cannot be bigger than the buffer size.
- */
- length = MAX (count, window);
-
- /* Don't go beyond the end of the underlying file. */
- length = MIN (length, size - position);
-
- /* Grow the buffer if necessary. */
- if (bufsize < length) {
- char *new_buffer = realloc (buffer, length);
- if (new_buffer == NULL) {
- *err = errno;
- nbdkit_error ("realloc: %m");
- return -1;
- }
- buffer = new_buffer;
- bufsize = length;
- }
-
- if (next->pread (next, buffer, length, offset, flags, err) == -1) {
- length = 0; /* failed to fill the prefetch buffer */
- return -1;
- }
-
- return 0;
-}
-
static int
readahead_pread (nbdkit_next *next,
void *handle, void *buf, uint32_t count, uint64_t offset,
uint32_t flags, int *err)
{
- ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock);
+ struct readahead_handle *h = handle;
- while (count > 0) {
- if (length == 0) {
- /* We don't have a prefetch buffer at all. This could be the
- * first request or reset after a miss.
- */
- window = READAHEAD_MIN;
- if (fill_readahead (next, count, offset, flags, err) == -1)
- return -1;
- }
+ /* If the underlying plugin doesn't support caching then skip that
+ * step completely. The filter will do nothing.
+ */
+ if (filter_working (h)) {
+ struct command ra_cmd = { .type = CMD_CACHE, .next = NULL };
+ int64_t size;
- /* Can we satisfy this request partly or entirely from the prefetch
- * buffer?
- */
- else if (position <= offset && offset < position + length) {
- uint32_t n = MIN (position - offset + length, count);
- memcpy (buf, &buffer[offset-position], n);
- buf += n;
- offset += n;
- count -= n;
- }
+ size = next->get_size (next);
+ if (size >= 0) {
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&window_lock);
- /* Does the request start immediately after the prefetch buffer?
- * This is a “hit” allowing us to double the window size.
- */
- else if (offset == position + length) {
- window = MIN (window * 2, READAHEAD_MAX);
- if (fill_readahead (next, count, offset, flags, err) == -1)
- return -1;
+ /* Generate the asynchronous (background) cache command for
+ * the readahead window.
+ */
+ ra_cmd.offset = offset + count;
+ if (ra_cmd.offset < size) {
+ ra_cmd.count = MIN (window, size - ra_cmd.offset);
+ ra_cmd.next = next; /* If .next is non-NULL, we'll send it below. */
+ }
+
+ /* Should we change the window size?
+ * If the last readahead < current offset, double the window.
+ * If not, but we're still making forward progress, keep the window.
+ * If we're not making forward progress, reduce the window to minimum.
+ */
+ if (last_readahead < offset)
+ window = MIN (window * 2, READAHEAD_MAX);
+ else if (last_offset < offset)
+ /* leave window unchanged */ ;
+ else
+ window = READAHEAD_MIN;
+ last_offset = offset;
+ last_readahead = ra_cmd.offset + ra_cmd.count;
}
- /* Else it's a “miss”. Reset everything and start again. */
- else
- length = 0;
+ if (ra_cmd.next &&
+ send_command_to_background_thread (&h->ctrl, ra_cmd) == -1)
+ return -1;
}
- return 0;
-}
-
-/* Any writes or write-like operations kill the prefetch buffer.
- *
- * We could do better here, but for the current use case of this
- * filter it doesn't matter. XXX
- */
-
-static void
-kill_readahead (void)
-{
- ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock);
- window = READAHEAD_MIN;
- length = 0;
-}
-
-static int
-readahead_pwrite (nbdkit_next *next,
- void *handle,
- const void *buf, uint32_t count, uint64_t offset,
- uint32_t flags, int *err)
-{
- kill_readahead ();
- return next->pwrite (next, buf, count, offset, flags, err);
-}
-
-static int
-readahead_trim (nbdkit_next *next,
- void *handle,
- uint32_t count, uint64_t offset, uint32_t flags,
- int *err)
-{
- kill_readahead ();
- return next->trim (next, count, offset, flags, err);
-}
-
-static int
-readahead_zero (nbdkit_next *next,
- void *handle,
- uint32_t count, uint64_t offset, uint32_t flags,
- int *err)
-{
- kill_readahead ();
- return next->zero (next, count, offset, flags, err);
-}
-
-static int
-readahead_flush (nbdkit_next *next,
- void *handle, uint32_t flags, int *err)
-{
- kill_readahead ();
- return next->flush (next, flags, err);
+ /* Issue the synchronous read. */
+ return next->pread (next, buf, count, offset, flags, err);
}
static struct nbdkit_filter filter = {
.name = "readahead",
.longname = "nbdkit readahead filter",
- .unload = readahead_unload,
- .prepare = readahead_prepare,
- .get_size = readahead_get_size,
+ .get_ready = readahead_get_ready,
+ .open = readahead_open,
+ .close = readahead_close,
.can_cache = readahead_can_cache,
.pread = readahead_pread,
- .pwrite = readahead_pwrite,
- .trim = readahead_trim,
- .zero = readahead_zero,
- .flush = readahead_flush,
};
NBDKIT_REGISTER_FILTER(filter)
diff --git a/filters/readahead/readahead.h b/filters/readahead/readahead.h
new file mode 100644
index 00000000..a68204d5
--- /dev/null
+++ b/filters/readahead/readahead.h
@@ -0,0 +1,60 @@
+/* nbdkit
+ * Copyright (C) 2019-2022 Red Hat Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Red Hat nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef NBDKIT_READAHEAD_H
+#define NBDKIT_READAHEAD_H
+
+#include <pthread.h>
+
+#include <nbdkit-filter.h>
+
+#include "vector.h"
+
+/* List of commands issued to the background thread. */
+struct command {
+ enum { CMD_QUIT, CMD_CACHE } type;
+ nbdkit_next *next;
+ uint64_t offset;
+ uint32_t count;
+};
+DEFINE_VECTOR_TYPE(command_queue, struct command);
+
+struct bgthread_ctrl {
+ command_queue cmds; /* Command queue. */
+ pthread_mutex_t lock; /* Lock for queue. */
+ pthread_cond_t cond; /* Condition queue size 0 -> 1. */
+};
+
+/* Start background thread (one per connection). */
+extern void *readahead_thread (void *vp);
+
+#endif /* NBDKIT_READAHEAD_H */
diff --git a/tests/test-readahead.sh b/tests/test-readahead.sh
index 7ec7f8e9..17126e5a 100755
--- a/tests/test-readahead.sh
+++ b/tests/test-readahead.sh
@@ -59,7 +59,7 @@ for i in range(0, 512*10, 512):
echo $((end_t - start_t))
}
-t1=$(test --filter=readahead)
+t1=$(test --filter=readahead --filter=cache)
t2=$(test)
# In the t1 case we should make only 1 request into the plugin,
--
2.31.1

View File

@ -0,0 +1,68 @@
From 916f90972af60576591dea4a4f1d07e4dae6d9cf Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Thu, 5 Jan 2023 11:29:32 +0000
Subject: [PATCH] ssh: Improve the error message when all authentication
methods fail
The current error message:
nbdkit: ssh[1]: error: all possible authentication methods failed
is confusing and non-actionable. It's hard even for experts to
understand the relationship between the authentication methods offered
by a server and what we require.
Try to improve the error message in some common situations, especially
where password authentication on the server side is disabled but the
client supplied a password=... parameter. After this change, you will
see an actionable error:
nbdkit: ssh[1]: error: the server does not offer password
authentication but you tried to use a password; if you have root
access to the server, try editing 'sshd_config' and setting
'PasswordAuthentication yes'; otherwise try setting up public key
authentication
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=2158300
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
(cherry picked from commit bea88cff5ac9c42f1a068ad24d43d5ed0506edaa)
---
plugins/ssh/ssh.c | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/plugins/ssh/ssh.c b/plugins/ssh/ssh.c
index aaa7c2b9..5a132d8f 100644
--- a/plugins/ssh/ssh.c
+++ b/plugins/ssh/ssh.c
@@ -361,6 +361,28 @@ authenticate (struct ssh_handle *h)
if (rc == SSH_AUTH_SUCCESS) return 0;
}
+ /* All compatible methods were tried and none worked. Come up with
+ * an actionable diagnostic message if we recognise the problem.
+ */
+ if (!(method & SSH_AUTH_METHOD_PUBLICKEY) && password == NULL) {
+ nbdkit_error ("the server does not offer public key authentication; "
+ "try using the password=... parameter");
+ return -1;
+ }
+ if ((method & SSH_AUTH_METHOD_PASSWORD) && password != NULL) {
+ nbdkit_error ("password authentication failed, "
+ "is the username and password correct?");
+ return -1;
+ }
+ if (!(method & SSH_AUTH_METHOD_PASSWORD) && password != NULL) {
+ nbdkit_error ("the server does not offer password authentication "
+ "but you tried to use a password; if you have root access "
+ "to the server, try editing 'sshd_config' and setting "
+ "'PasswordAuthentication yes'; otherwise try setting up "
+ "public key authentication");
+ return -1;
+ }
+
nbdkit_error ("all possible authentication methods failed");
return -1;
}
--
2.31.1

View File

@ -0,0 +1,44 @@
From dc86950fff020688a17b6ff0dbfea7bdb0d8f1b9 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Tue, 10 Jan 2023 08:39:11 +0000
Subject: [PATCH] luks: Avoid crash when image does not contain a LUKS header
We attempt to load the LUKS header in the prepare() callback. If this
fails, h->h will be NULL and we'll crash in close() when we attempt to
access and free h->h->masterkey.
This crash could have been triggered another way: if open() followed
by close() was called, without prepare() or other callbacks.
Reported-by: Ming Xie
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=2159581
(cherry picked from commit cad4b96b17ed4ad7882100efa0d9073ac9d8b11c)
---
filters/luks/luks-encryption.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/filters/luks/luks-encryption.c b/filters/luks/luks-encryption.c
index 26f81e7b..6f33e76e 100644
--- a/filters/luks/luks-encryption.c
+++ b/filters/luks/luks-encryption.c
@@ -856,11 +856,13 @@ load_header (nbdkit_next *next, const char *passphrase)
void
free_luks_data (struct luks_data *h)
{
- if (h->masterkey) {
- memset (h->masterkey, 0, h->phdr.master_key_len);
- free (h->masterkey);
+ if (h) {
+ if (h->masterkey) {
+ memset (h->masterkey, 0, h->phdr.master_key_len);
+ free (h->masterkey);
+ }
+ free (h);
}
- free (h);
}
uint64_t
--
2.31.1

View File

@ -1,120 +0,0 @@
From b41b7d7ddf6d3fba23ac7978c8b272f2ff84265d Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Thu, 21 Apr 2022 16:14:46 +0100
Subject: [PATCH] readahead: Fix test
The previous test turned out to be pretty bad at testing the new
filter. A specific problem is that the filter starts a background
thread which issues .cache requests, while on the main connection
.pread requests are being passed through. The test used
--filter=readahead --filter=cache with the cache filter only caching
on .cache requests (since cache-on-read defaults to false), so only
caching requests made by the background thread.
main thread
client ---- .pread ----- delay-filter -------> plugin
\
\ background thread
.cache --- cache-filter
Under very high load, the background thread could be starved. This
means no requests were being cached at all, and all requests were
passing through the delay filter. It would appear that readahead was
failing (which it was, in a way).
It's not very easy to fix this since readahead is best-effort, but we
can go back to using a simpler plugin that logs reads and caches and
check that they look valid.
Update: commit 2ff548d66ad3eae87868402ec5b3319edd12090f
(cherry picked from commit db1e3311727c6ecab3264a1811d33db1aa45a4d0)
---
tests/test-readahead.sh | 61 +++++++++++++++++++++++------------------
1 file changed, 35 insertions(+), 26 deletions(-)
diff --git a/tests/test-readahead.sh b/tests/test-readahead.sh
index 17126e5a..37f4a06f 100755
--- a/tests/test-readahead.sh
+++ b/tests/test-readahead.sh
@@ -30,43 +30,52 @@
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
-# Is the readahead filter faster? Copy a blank disk with a custom
-# plugin that sleeps on every request. Because the readahead filter
-# should result in fewer requests it should run faster.
-
source ./functions.sh
set -e
set -x
-requires_filter delay
+requires_plugin sh
requires nbdsh --version
requires dd iflag=count_bytes </dev/null
-files="readahead.img"
+files="readahead.out"
rm -f $files
cleanup_fn rm -f $files
-test ()
-{
- start_t=$SECONDS
- nbdkit -fv -U - "$@" null size=1M --filter=delay rdelay=5 \
- --run 'nbdsh --uri "$uri" -c "
+nbdkit -fv -U - "$@" sh - \
+ --filter=readahead \
+ --run 'nbdsh --uri "$uri" -c "
for i in range(0, 512*10, 512):
h.pread(512, i)
-"'
+"' <<'EOF'
+case "$1" in
+ thread_model)
+ echo parallel
+ ;;
+ can_cache)
+ echo native
+ ;;
+ get_size)
+ echo 1M
+ ;;
+ cache)
+ echo "$@" >> readahead.out
+ ;;
+ pread)
+ echo "$@" >> readahead.out
+ dd if=/dev/zero count=$3 iflag=count_bytes
+ ;;
+ *)
+ exit 2
+ ;;
+esac
+EOF
- end_t=$SECONDS
- echo $((end_t - start_t))
-}
+cat readahead.out
-t1=$(test --filter=readahead --filter=cache)
-t2=$(test)
-
-# In the t1 case we should make only 1 request into the plugin,
-# resulting in around 1 sleep period (5 seconds). In the t2 case we
-# make 10 requests so sleep for around 50 seconds. t1 should be < t2
-# is every reasonable scenario.
-if [ $t1 -ge $t2 ]; then
- echo "$0: readahead filter took longer, should be shorter"
- exit 1
-fi
+# We should see the pread requests, and additional cache requests for
+# the 32K region following each pread request.
+for i in `seq 0 512 $((512*10 - 512))` ; do
+ grep "pread 512 $i" readahead.out
+ grep "cache 32768 $((i+512))" readahead.out
+done
--
2.31.1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,95 @@
From 3f74004478d3590840d7eba97a590b7ec954957f Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Thu, 2 Feb 2023 13:59:32 +0000
Subject: [PATCH] curl: Enable multi-conn for read-only connections
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Comparing before and after this commit shows approximately double the
performance. In other tests this allowed us to download files from
web servers at line speed.
Benchmark 1: nbdkit -r curl file:/var/tmp/jammy-server-cloudimg-amd64.raw --run "nbdcopy -p \$uri null:"
Time (mean ± σ): 943.8 ms ± 18.8 ms [User: 316.2 ms, System: 1029.7 ms]
Range (min … max): 923.7 ms … 989.2 ms 10 runs
Benchmark 2: ~/d/nbdkit/nbdkit -r curl file:/var/tmp/jammy-server-cloudimg-amd64.raw --run "nbdcopy -p \$uri null:"
Time (mean ± σ): 455.0 ms ± 6.2 ms [User: 542.2 ms, System: 1824.7 ms]
Range (min … max): 449.1 ms … 471.6 ms 10 runs
Summary
' ~/d/nbdkit/nbdkit -r curl file:/var/tmp/jammy-server-cloudimg-amd64.raw --run "nbdcopy -p \$uri null:" ' ran
2.07 ± 0.05 times faster than ' nbdkit -r curl file:/var/tmp/jammy-server-cloudimg-amd64.raw --run "nbdcopy -p \$uri null:" '
Multi-conn is enabled only when we know the connection is read-only:
$ ./nbdkit -r curl file:/var/tmp/jammy-server-cloudimg-amd64.raw --run ' nbdinfo $uri ' | grep can_multi_conn
can_multi_conn: true
$ ./nbdkit curl file:/var/tmp/jammy-server-cloudimg-amd64.raw --run ' nbdinfo $uri ' | grep can_multi_conn
can_multi_conn: false
See also:
https://listman.redhat.com/archives/libguestfs/2023-February/030581.html
Reviewed-by: Eric Blake <eblake@redhat.com>
(cherry picked from commit bb0f93ad7b9de451874d0c54188bf69cd37c5409)
---
plugins/curl/curl.c | 14 ++++++++++++++
plugins/curl/curldefs.h | 1 +
2 files changed, 15 insertions(+)
diff --git a/plugins/curl/curl.c b/plugins/curl/curl.c
index e89bea99..eeba5aa4 100644
--- a/plugins/curl/curl.c
+++ b/plugins/curl/curl.c
@@ -455,6 +455,7 @@ curl_open (int readonly)
nbdkit_error ("calloc: %m");
return NULL;
}
+ h->readonly = readonly;
h->c = curl_easy_init ();
if (h->c == NULL) {
@@ -764,6 +765,18 @@ curl_get_size (void *handle)
return h->exportsize;
}
+/* Multi-conn is safe for read-only connections, but HTTP does not
+ * have any concept of flushing so we cannot use it for read-write
+ * connections.
+ */
+static int
+curl_can_multi_conn (void *handle)
+{
+ struct curl_handle *h = handle;
+
+ return !! h->readonly;
+}
+
/* NB: The terminology used by libcurl is confusing!
*
* WRITEFUNCTION / write_cb is used when reading from the remote server
@@ -907,6 +920,7 @@ static struct nbdkit_plugin plugin = {
.open = curl_open,
.close = curl_close,
.get_size = curl_get_size,
+ .can_multi_conn = curl_can_multi_conn,
.pread = curl_pread,
.pwrite = curl_pwrite,
};
diff --git a/plugins/curl/curldefs.h b/plugins/curl/curldefs.h
index f3095f92..9d4949f3 100644
--- a/plugins/curl/curldefs.h
+++ b/plugins/curl/curldefs.h
@@ -64,6 +64,7 @@ extern const char *user_agent;
/* The per-connection handle. */
struct curl_handle {
CURL *c;
+ int readonly;
bool accept_range;
int64_t exportsize;
char errbuf[CURL_ERROR_SIZE];
--
2.31.1

View File

@ -1,95 +0,0 @@
From 66daae1a7daf680e06f884e9af6a14830263c932 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sun, 8 May 2022 12:13:39 +0100
Subject: [PATCH] luks: Disable filter with old GnuTLS in Debian 10
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
On Debian 10:
luks.c: In function parse_cipher_strings:
luks.c:574:26: error: GNUTLS_CIPHER_AES_128_XTS undeclared (first use in this function); did you mean GNUTLS_CIPHER_AES_128_CCM?
h->gnutls_cipher = GNUTLS_CIPHER_AES_128_XTS;
^~~~~~~~~~~~~~~~~~~~~~~~~
GNUTLS_CIPHER_AES_128_CCM
luks.c:574:26: note: each undeclared identifier is reported only once for each function it appears in
luks.c:577:26: error: GNUTLS_CIPHER_AES_256_XTS undeclared (first use in this function); did you mean GNUTLS_CIPHER_AES_256_CCM?
h->gnutls_cipher = GNUTLS_CIPHER_AES_256_XTS;
^~~~~~~~~~~~~~~~~~~~~~~~~
GNUTLS_CIPHER_AES_256_CCM
luks.c: In function try_passphrase_in_keyslot:
luks.c:728:7: error: implicit declaration of function gnutls_pbkdf2; did you mean gnutls_prf? [-Werror=implicit-function-declaration]
r = gnutls_pbkdf2 (h->hash_alg, &key, &salt, ks->password_iterations,
^~~~~~~~~~~~~
gnutls_prf
Because gnutls_pbkdf2 is missing there's no chance of making this
filter work on this platform so it's best to compile it out.
Fixes: commit 468919dce6c5eb57503eacac0f67e5dd87c58e6c
(cherry picked from commit f9f67e483f4aad19ad6101163d32562f13504ca7)
---
configure.ac | 5 ++++-
filters/luks/Makefile.am | 2 +-
tests/Makefile.am | 2 +-
3 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/configure.ac b/configure.ac
index de85b4da..1d209f67 100644
--- a/configure.ac
+++ b/configure.ac
@@ -636,12 +636,15 @@ AS_IF([test "x$GNUTLS_LIBS" != "x"],[
gnutls_certificate_set_known_dh_params \
gnutls_group_get \
gnutls_group_get_name \
+ gnutls_pbkdf2 \
gnutls_session_set_verify_cert \
gnutls_srp_server_get_username \
gnutls_transport_is_ktls_enabled \
])
LIBS="$old_LIBS"
])
+AM_CONDITIONAL([HAVE_GNUTLS_PBKDF2],
+ [test "x$GNUTLS_LIBS" != "x" && test "x$ac_cv_func_gnutls_pbkdf2" = xyes])
AC_ARG_ENABLE([linuxdisk],
[AS_HELP_STRING([--disable-linuxdisk],
@@ -1484,7 +1487,7 @@ echo "Optional filters:"
echo
feature "ext2" test "x$HAVE_EXT2_TRUE" = "x"
feature "gzip" test "x$HAVE_ZLIB_TRUE" = "x"
-feature "LUKS" test "x$HAVE_GNUTLS_TRUE" != "x"
+feature "luks" test "x$HAVE_GNUTLS_PBKDF2_TRUE" = "x"
feature "xz" test "x$HAVE_LIBLZMA_TRUE" = "x"
echo
diff --git a/filters/luks/Makefile.am b/filters/luks/Makefile.am
index 30089621..622e5c3d 100644
--- a/filters/luks/Makefile.am
+++ b/filters/luks/Makefile.am
@@ -33,7 +33,7 @@ include $(top_srcdir)/common-rules.mk
EXTRA_DIST = nbdkit-luks-filter.pod
-if HAVE_GNUTLS
+if HAVE_GNUTLS_PBKDF2
filter_LTLIBRARIES = nbdkit-luks-filter.la
diff --git a/tests/Makefile.am b/tests/Makefile.am
index c29453ba..5585b3b7 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -1597,7 +1597,7 @@ EXTRA_DIST += \
$(NULL)
# luks filter test.
-if HAVE_GNUTLS
+if HAVE_GNUTLS_PBKDF2
TESTS += \
test-luks-info.sh \
test-luks-copy.sh \
--
2.31.1

View File

@ -1,71 +0,0 @@
From b3c05065801c723966a3e8d93c9b84e808ff38b9 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sun, 8 May 2022 12:30:09 +0100
Subject: [PATCH] luks: Various fixes for Clang
With Clang:
luks.c:728:25: error: implicit conversion from enumeration type 'gnutls_digest_algorithm_t' to different enumeration type 'gnutls_mac_algorithm_t' [-Werror,-Wenum-conversion]
r = gnutls_pbkdf2 (h->hash_alg, &key, &salt, ks->password_iterations,
~~~~~~~~~~~~~ ~~~^~~~~~~~
luks.c:764:25: error: implicit conversion from enumeration type 'gnutls_digest_algorithm_t' to different enumeration type 'gnutls_mac_algorithm_t' [-Werror,-Wenum-conversion]
r = gnutls_pbkdf2 (h->hash_alg, &mkey, &msalt,
~~~~~~~~~~~~~ ~~~^~~~~~~~
luks.c:886:35: error: result of comparison of constant 18446744073709551615 with expression of type 'uint32_t' (aka 'unsigned int') is always false [-Werror,-Wtautological-constant-out-of-range-compare]
if (ks->password_iterations > ULONG_MAX) {
~~~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~
Fixes: commit 468919dce6c5eb57503eacac0f67e5dd87c58e6c
(cherry picked from commit 87d488ede9101a2effc71cd1851bf4a4caa521d2)
---
filters/luks/luks.c | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/filters/luks/luks.c b/filters/luks/luks.c
index 706a9bd2..cc619698 100644
--- a/filters/luks/luks.c
+++ b/filters/luks/luks.c
@@ -693,6 +693,10 @@ key_material_length_in_sectors (struct handle *h, size_t i)
static int
try_passphrase_in_keyslot (nbdkit_next *next, struct handle *h, size_t i)
{
+ /* I believe this is supposed to be safe, looking at the GnuTLS
+ * header file.
+ */
+ const gnutls_mac_algorithm_t mac = (gnutls_mac_algorithm_t) h->hash_alg;
struct luks_keyslot *ks = &h->phdr.keyslot[i];
size_t split_key_len;
CLEANUP_FREE uint8_t *split_key = NULL;
@@ -725,7 +729,7 @@ try_passphrase_in_keyslot (nbdkit_next *next, struct handle *h, size_t i)
}
/* Hash the passphrase to make a possible masterkey. */
- r = gnutls_pbkdf2 (h->hash_alg, &key, &salt, ks->password_iterations,
+ r = gnutls_pbkdf2 (mac, &key, &salt, ks->password_iterations,
masterkey, h->phdr.master_key_len);
if (r != 0) {
nbdkit_error ("gnutls_pbkdf2: %s", gnutls_strerror (r));
@@ -761,7 +765,7 @@ try_passphrase_in_keyslot (nbdkit_next *next, struct handle *h, size_t i)
/* Check if the masterkey is correct by comparing hash of the
* masterkey with LUKS header.
*/
- r = gnutls_pbkdf2 (h->hash_alg, &mkey, &msalt,
+ r = gnutls_pbkdf2 (mac, &mkey, &msalt,
h->phdr.master_key_digest_iterations,
key_digest, LUKS_DIGESTSIZE);
if (r != 0) {
@@ -883,11 +887,6 @@ luks_prepare (nbdkit_next *next, void *handle, int readonly)
"points beyond the end of the disk", i);
return -1;
}
- if (ks->password_iterations > ULONG_MAX) {
- nbdkit_error ("bad LUKSv1 header: key slot %zu "
- "iterations too large", i);
- return -1;
- }
/*FALLTHROUGH*/
case LUKS_KEY_DISABLED:
break;
--
2.31.1

View File

@ -1,43 +0,0 @@
From 9416effd73a5cb2e1c929449fca88fd7152aa1be Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sun, 8 May 2022 12:38:00 +0100
Subject: [PATCH] luks: Link with libcompat on Windows
/usr/lib/gcc/x86_64-w64-mingw32/11.2.1/../../../../x86_64-w64-mingw32/bin/ld: ../../common/utils/.libs/libutils.a(libutils_la-full-rw.o): in function `full_pread':
/builds/nbdkit/nbdkit/common/utils/full-rw.c:53: undefined reference to `pread'
/usr/lib/gcc/x86_64-w64-mingw32/11.2.1/../../../../x86_64-w64-mingw32/bin/ld: ../../common/utils/.libs/libutils.a(libutils_la-full-rw.o): in function `full_pwrite':
/builds/nbdkit/nbdkit/common/utils/full-rw.c:76: undefined reference to `pwrite'
/usr/lib/gcc/x86_64-w64-mingw32/11.2.1/../../../../x86_64-w64-mingw32/bin/ld: ../../common/utils/.libs/libutils.a(libutils_la-vector.o): in function `generic_vector_reserve_page_aligned':
/builds/nbdkit/nbdkit/common/utils/vector.c:112: undefined reference to `sysconf'
/usr/lib/gcc/x86_64-w64-mingw32/11.2.1/../../../../x86_64-w64-mingw32/bin/ld: /builds/nbdkit/nbdkit/common/utils/vector.c:134: undefined reference to `posix_memalign'
collect2: error: ld returned 1 exit status
Fixes: commit 468919dce6c5eb57503eacac0f67e5dd87c58e6c
(cherry picked from commit 4a28c4c46aedf270929a62a1c5ecf2c1129cd456)
---
filters/luks/Makefile.am | 2 ++
1 file changed, 2 insertions(+)
diff --git a/filters/luks/Makefile.am b/filters/luks/Makefile.am
index 622e5c3d..2688f696 100644
--- a/filters/luks/Makefile.am
+++ b/filters/luks/Makefile.am
@@ -45,6 +45,7 @@ nbdkit_luks_filter_la_SOURCES = \
nbdkit_luks_filter_la_CPPFLAGS = \
-I$(top_srcdir)/include \
-I$(top_srcdir)/common/include \
+ -I$(top_srcdir)/common/replacements \
-I$(top_srcdir)/common/utils \
$(NULL)
nbdkit_luks_filter_la_CFLAGS = \
@@ -53,6 +54,7 @@ nbdkit_luks_filter_la_CFLAGS = \
$(NULL)
nbdkit_luks_filter_la_LIBADD = \
$(top_builddir)/common/utils/libutils.la \
+ $(top_builddir)/common/replacements/libcompat.la \
$(IMPORT_LIBRARY_ON_WINDOWS) \
$(GNUTLS_LIBS) \
$(NULL)
--
2.31.1

File diff suppressed because it is too large Load Diff

View File

@ -1,101 +0,0 @@
From 387bd4c6fee8ab339fd04e0b841b0c67e6020c8a Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sun, 8 May 2022 18:05:45 +0100
Subject: [PATCH] tests: luks: Reduce time taken to run these tests
Under valgrind they ran very slowly. Turns out valgrinding over
GnuTLS hashing code is not pretty. About half the time seems to be
taken opening the keyslot, and the rest copying the data.
This change reduces the time (under valgrind) from 15 minutes 45 seconds
to about 6 mins 30 seconds.
(cherry picked from commit 7320ae5dba476171a024ca44b889b3474302dc40)
---
tests/test-luks-copy.sh | 18 +++++++++---------
tests/test-luks-info.sh | 6 +++---
2 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/tests/test-luks-copy.sh b/tests/test-luks-copy.sh
index 99f300d0..01801811 100755
--- a/tests/test-luks-copy.sh
+++ b/tests/test-luks-copy.sh
@@ -60,8 +60,8 @@ rm -f $encrypt_disk $plain_disk $pid $sock
qemu-img create -f luks \
--object secret,data=123456,id=sec0 \
-o key-secret=sec0 \
- $encrypt_disk 10M
-truncate -s 10M $plain_disk
+ $encrypt_disk 1M
+truncate -s 1M $plain_disk
qemu-img convert --target-image-opts -n \
--object secret,data=123456,id=sec0 \
$plain_disk \
@@ -74,11 +74,11 @@ start_nbdkit -P $pid -U $sock \
uri="nbd+unix:///?socket=$sock"
# Copy the whole disk out. It should be empty.
-nbdcopy "$uri" $plain_disk
+nbdcopy -C 1 "$uri" $plain_disk
if [ "$(hexdump -C $plain_disk)" != '00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
*
-00a00000' ]; then
+00100000' ]; then
echo "$0: expected plaintext disk to be empty"
exit 1
fi
@@ -88,14 +88,14 @@ fi
nbdsh -u "$uri" \
-c 'h.pwrite(b"1"*65536, 0)' \
-c 'h.pwrite(b"2"*65536, 128*1024)' \
- -c 'h.pwrite(b"3"*65536, 9*1024*1024)' \
+ -c 'h.pwrite(b"3"*65536, 900*1024)' \
-c 'buf = h.pread(65536, 0)' \
-c 'assert buf == b"1"*65536' \
-c 'buf = h.pread(65536, 65536)' \
-c 'assert buf == bytearray(65536)' \
-c 'buf = h.pread(65536, 128*1024)' \
-c 'assert buf == b"2"*65536' \
- -c 'buf = h.pread(65536, 9*1024*1024)' \
+ -c 'buf = h.pread(65536, 900*1024)' \
-c 'assert buf == b"3"*65536' \
-c 'h.flush()'
@@ -115,11 +115,11 @@ if [ "$(hexdump -C $plain_disk)" != '00000000 31 31 31 31 31 31 31 31 31 31 31
*
00030000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
*
-00900000 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 |3333333333333333|
+000e1000 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 |3333333333333333|
*
-00910000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
+000f1000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
*
-00a00000' ]; then
+00100000' ]; then
echo "$0: unexpected content"
exit 1
fi
diff --git a/tests/test-luks-info.sh b/tests/test-luks-info.sh
index 3eff657b..ef141ecd 100755
--- a/tests/test-luks-info.sh
+++ b/tests/test-luks-info.sh
@@ -46,11 +46,11 @@ rm -f $disk $info
qemu-img create -f luks \
--object secret,data=123456,id=sec0 \
-o key-secret=sec0 \
- $disk 10M
+ $disk 1M
nbdkit -U - file $disk --filter=luks passphrase=123456 \
--run 'nbdinfo $uri' > $info
cat $info
-# Check the size is 10M (so it doesn't include the LUKS header).
-grep "10485760" $info
+# Check the size is 1M (so it doesn't include the LUKS header).
+grep "1048576" $info
--
2.31.1

View File

@ -1,112 +0,0 @@
From 52ee1dab95436128b44c37cc495022ff90108b2e Mon Sep 17 00:00:00 2001
From: Nikolaus Rath <Nikolaus@rath.org>
Date: Mon, 9 May 2022 10:04:30 +0100
Subject: [PATCH] Add nbdkit.parse_size() Python function.
This enables Python plugins to parse sizes the same way as C plugins.
I'm not sure about the best way to test this - input is appreciated.
I'm not too happy with the way this code is tested. It workes, but putting the tests into
test-python-plugin.py feels misplaced: this file is intended to support the unit tests in
test_python.py, not run its own unit tests.
(cherry picked from commit 1b7d72542be68e254c1ef86ecb1a82b05c78ff63)
---
plugins/python/modfunctions.c | 21 +++++++++++++++++++++
plugins/python/nbdkit-python-plugin.pod | 5 +++++
tests/test-python-plugin.py | 19 +++++++++++++++++++
3 files changed, 45 insertions(+)
diff --git a/plugins/python/modfunctions.c b/plugins/python/modfunctions.c
index fffbaab2..46b0c904 100644
--- a/plugins/python/modfunctions.c
+++ b/plugins/python/modfunctions.c
@@ -93,11 +93,32 @@ do_shutdown (PyObject *self, PyObject *args)
Py_RETURN_NONE;
}
+/* nbdkit.parse_size */
+static PyObject *
+parse_size (PyObject *self, PyObject *args)
+{
+ const char *s;
+ if (!PyArg_ParseTuple (args, "s", &s)) {
+ PyErr_SetString (PyExc_TypeError, "Expected string, got something else");
+ return NULL;
+ }
+
+ int64_t size = nbdkit_parse_size(s);
+ if (size == -1) {
+ PyErr_SetString (PyExc_ValueError, "Unable to parse string as size");
+ return NULL;
+ }
+
+ return PyLong_FromSize_t((size_t)size);
+}
+
static PyMethodDef NbdkitMethods[] = {
{ "debug", debug, METH_VARARGS,
"Print a debug message" },
{ "export_name", export_name, METH_VARARGS,
"Return the optional export name negotiated with the client" },
+ { "parse_size", parse_size, METH_VARARGS,
+ "Parse human-readable size strings into bytes" },
{ "set_error", set_error, METH_VARARGS,
"Store an errno value prior to throwing an exception" },
{ "shutdown", do_shutdown, METH_VARARGS,
diff --git a/plugins/python/nbdkit-python-plugin.pod b/plugins/python/nbdkit-python-plugin.pod
index 051b0237..ccc9406f 100644
--- a/plugins/python/nbdkit-python-plugin.pod
+++ b/plugins/python/nbdkit-python-plugin.pod
@@ -131,6 +131,11 @@ Record C<err> as the reason you are about to throw an exception. C<err>
should correspond to usual errno values, where it may help to
C<import errno>.
+=head3 C<nbdkit.parse_size(str)>
+
+Parse a string (such as "100M") into a size in bytes. Wraps the
+C<nbdkit_parse_size()> C function.
+
=head3 C<nbdkit.shutdown()>
Request asynchronous server shutdown.
diff --git a/tests/test-python-plugin.py b/tests/test-python-plugin.py
index 0b34d532..d4f379fc 100644
--- a/tests/test-python-plugin.py
+++ b/tests/test-python-plugin.py
@@ -34,12 +34,31 @@
import nbdkit
import pickle
import base64
+import unittest
API_VERSION = 2
cfg = {}
+# Not nice, but there doesn't seem to be a better way of putting this
+class TestAPI(unittest.TestCase):
+
+ def test_parse_size(self):
+ self.assertEqual(nbdkit.parse_size('511'), 511)
+ self.assertEqual(nbdkit.parse_size('7k'), 7*1024)
+ self.assertEqual(nbdkit.parse_size('17M'), 17*1024*1024)
+
+ with self.assertRaises(TypeError):
+ nbdkit.parse_size(17)
+
+ with self.assertRaises(ValueError):
+ nbdkit.parse_size('foo')
+
+
+TestAPI().test_parse_size()
+
+
def config(k, v):
global cfg
if k == "cfg":
--
2.31.1

View File

@ -1,34 +0,0 @@
From 644e0ed6333cf5fe2c1e39da157e8f1ce97267b9 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sat, 14 May 2022 13:47:19 +0100
Subject: [PATCH] cache: Fix cross-reference nbdkit-readahead-filter
After the readahead filter was reimplemented so that it only issues
cache requests, the two filters should be used together, not as
alternatives. Update the documentation of the cache filter to make
this clear.
Fixes: commit 2ff548d66ad3eae87868402ec5b3319edd12090f
(cherry picked from commit 894771f39a8fd2632caad00e497146d69cac4bac)
---
filters/cache/nbdkit-cache-filter.pod | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/filters/cache/nbdkit-cache-filter.pod b/filters/cache/nbdkit-cache-filter.pod
index d85fef09..f4234e1a 100644
--- a/filters/cache/nbdkit-cache-filter.pod
+++ b/filters/cache/nbdkit-cache-filter.pod
@@ -28,8 +28,8 @@ loss, as the name suggests).
This filter only caches image contents. To cache image metadata, use
L<nbdkit-cacheextents-filter(1)> between this filter and the plugin.
-To accelerate sequential reads, use L<nbdkit-readahead-filter(1)>
-instead.
+To accelerate sequential reads, use L<nbdkit-readahead-filter(1)> on
+top of this filter.
=head1 PARAMETERS
--
2.31.1

View File

@ -1,48 +0,0 @@
From 4a7e5169935c8850fddcea8da79639ded907c549 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sat, 14 May 2022 14:00:16 +0100
Subject: [PATCH] curl: Don't document curl plugin + readahead filter
nbdkit readahead filter does not support plugins which do not use the
parallel thread model.
Fixes: commit 2ff548d66ad3eae87868402ec5b3319edd12090f
(cherry picked from commit 92fbb76d11b9f17c527debd803aa2505f3642783)
---
docs/nbdkit-captive.pod | 7 -------
plugins/curl/nbdkit-curl-plugin.pod | 1 -
2 files changed, 8 deletions(-)
diff --git a/docs/nbdkit-captive.pod b/docs/nbdkit-captive.pod
index eafe36d8..d41a824d 100644
--- a/docs/nbdkit-captive.pod
+++ b/docs/nbdkit-captive.pod
@@ -110,13 +110,6 @@ an embedded disk image. To copy it out:
nbdkit -U - example1 --run 'qemu-img convert $nbd disk.img'
-If plugin requests have a high overhead (for example making HTTP
-requests to a remote server), adding L<nbdkit-readahead-filter(1)> may
-help performance:
-
- nbdkit -U - --filter=readahead curl https://example.com/disk.img \
- --run 'qemu-img convert $nbd disk.img'
-
If the source suffers from temporary network failures
L<nbdkit-retry-filter(1)> or L<nbdkit-retry-request-filter(1)> may
help.
diff --git a/plugins/curl/nbdkit-curl-plugin.pod b/plugins/curl/nbdkit-curl-plugin.pod
index 54fce66c..fc422ca2 100644
--- a/plugins/curl/nbdkit-curl-plugin.pod
+++ b/plugins/curl/nbdkit-curl-plugin.pod
@@ -509,7 +509,6 @@ L<CURLOPT_VERBOSE(3)>,
L<nbdkit(1)>,
L<nbdkit-extentlist-filter(1)>,
L<nbdkit-file-plugin(1)>,
-L<nbdkit-readahead-filter(1)>,
L<nbdkit-retry-filter(1)>,
L<nbdkit-retry-request-filter(1)>,
L<nbdkit-ssh-plugin(1)>,
--
2.31.1

File diff suppressed because it is too large Load Diff

View File

@ -1,67 +0,0 @@
From 91677241184ab1aa77adadd612fa069d084863ec Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sat, 14 May 2022 18:54:32 +0100
Subject: [PATCH] scan: Remove condition variable
This was copied in from the readahead filter code, but is not actually
needed in this filter because it never has to sleep waiting for a
command.
Fixes: commit 65c20a09ceacb4431986a2982f2c2e746df63fcb
(cherry picked from commit 43ad586698347997cdfa1bd56bfed0292f89f134)
---
filters/scan/scan.c | 6 ------
filters/scan/scan.h | 1 -
2 files changed, 7 deletions(-)
diff --git a/filters/scan/scan.c b/filters/scan/scan.c
index ac5b18d2..8a966577 100644
--- a/filters/scan/scan.c
+++ b/filters/scan/scan.c
@@ -136,9 +136,6 @@ send_command_to_background_thread (struct bgthread_ctrl *ctrl,
ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&ctrl->lock);
if (command_queue_append (&ctrl->cmds, cmd) == -1)
return -1;
- /* Signal the thread if it could be sleeping on an empty queue. */
- if (ctrl->cmds.len == 1)
- pthread_cond_signal (&ctrl->cond);
return 0;
}
@@ -199,13 +196,11 @@ scan_prepare (nbdkit_next *next, void *handle, int readonly)
/* Create the background thread. */
h->ctrl.cmds = (command_queue) empty_vector;
pthread_mutex_init (&h->ctrl.lock, NULL);
- pthread_cond_init (&h->ctrl.cond, NULL);
err = pthread_create (&h->thread, NULL, scan_thread, &h->ctrl);
if (err != 0) {
errno = err;
nbdkit_error ("pthread_create: %m");
- pthread_cond_destroy (&h->ctrl.cond);
pthread_mutex_destroy (&h->ctrl.lock);
return -1;
}
@@ -227,7 +222,6 @@ scan_finalize (nbdkit_next *next, void *handle)
send_command_to_background_thread (&h->ctrl, quit_cmd);
pthread_join (h->thread, NULL);
- pthread_cond_destroy (&h->ctrl.cond);
pthread_mutex_destroy (&h->ctrl.lock);
command_queue_reset (&h->ctrl.cmds);
h->running = false;
diff --git a/filters/scan/scan.h b/filters/scan/scan.h
index 7ff39310..98c0228b 100644
--- a/filters/scan/scan.h
+++ b/filters/scan/scan.h
@@ -54,7 +54,6 @@ DEFINE_VECTOR_TYPE(command_queue, struct command);
struct bgthread_ctrl {
command_queue cmds; /* Command queue. */
pthread_mutex_t lock; /* Lock for queue. */
- pthread_cond_t cond; /* Condition queue size 0 -> 1. */
nbdkit_next *next; /* For sending cache operations. */
};
--
2.31.1

View File

@ -1,57 +0,0 @@
From c191f45530d4dd7f978803c0bfa402ca0fc950df Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sat, 14 May 2022 19:02:48 +0100
Subject: [PATCH] scan: Small typographical fix in manual
Fixes: commit 65c20a09ceacb4431986a2982f2c2e746df63fcb
(cherry picked from commit 67d4e3437d2e28fa3ce1c4b3818d2b1e7939c5ec)
---
filters/scan/nbdkit-scan-filter.pod | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/filters/scan/nbdkit-scan-filter.pod b/filters/scan/nbdkit-scan-filter.pod
index 4a8d0ef9..2fe9bb80 100644
--- a/filters/scan/nbdkit-scan-filter.pod
+++ b/filters/scan/nbdkit-scan-filter.pod
@@ -26,8 +26,8 @@ below (after) this filter, giving approximately the same effect.
L<nbdkit-cow-filter(1)> can be used instead of nbdkit-cache-filter, if
you add the C<cow-on-cache=true> option.
-Various C<scan-*> parameters can be used to tune scanning, although
-the defaults should be suitable in most cases.
+Various parameters can be used to tune scanning, although the defaults
+should be suitable in most cases.
A similar filter is L<nbdkit-readahead-filter(1)>.
@@ -38,23 +38,23 @@ filter will print a warning message if this happens.
=over 4
-=item Thread model must be parallel *
+=item Thread model must be parallel*
For example L<nbdkit-curl-plugin(1)> only supports
C<serialize_requests>, and so this filter cannot perform prefetches in
parallel with the read requests.
-=item Only scans while clients are connected *
+=item Only scans while clients are connected*
The current filter only scans while there is at least one client
connected.
-=item Only scans the default export *
+=item Only scans the default export*
The current filter only scans the default export and ignores all
clients connecting to the non-default export name.
-* We may be able to lift these restrictions in future.
+*We may be able to lift these restrictions in future.
=item Underlying filters or plugin must support C<.cache> (prefetch)
--
2.31.1

View File

@ -1,34 +0,0 @@
From 651045d703804d7dafab04a0387ca92573f52467 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sat, 14 May 2022 20:57:38 +0100
Subject: [PATCH] ssh: Don't reference readahead or scan filters from this
plugin
These filters do not support plugins which do not use the parallel
thread model.
Fixes: commit 2ff548d66ad3eae87868402ec5b3319edd12090f
Fixes: commit 65c20a09ceacb4431986a2982f2c2e746df63fcb
See-also: commit 92fbb76d11b9f17c527debd803aa2505f3642783
(cherry picked from commit 7eb356719376c4d0b2379cea5d39c81602d2d304)
---
plugins/ssh/nbdkit-ssh-plugin.pod | 2 --
1 file changed, 2 deletions(-)
diff --git a/plugins/ssh/nbdkit-ssh-plugin.pod b/plugins/ssh/nbdkit-ssh-plugin.pod
index 214957d6..bb922d37 100644
--- a/plugins/ssh/nbdkit-ssh-plugin.pod
+++ b/plugins/ssh/nbdkit-ssh-plugin.pod
@@ -347,9 +347,7 @@ C<nbdkit-ssh-plugin> first appeared in nbdkit 1.12.
L<nbdkit(1)>,
L<nbdkit-curl-plugin(1)>,
L<nbdkit-extentlist-filter(1)>,
-L<nbdkit-readahead-filter(1)>,
L<nbdkit-retry-filter(1)>,
-L<nbdkit-scan-filter(1)>,
L<nbdkit-plugin(3)>,
L<ssh(1)>,
L<ssh-agent(1)>,
--
2.31.1

View File

@ -1,56 +0,0 @@
From f58d2a04338edc647e2334ff58b49508424e3f3b Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Tue, 17 May 2022 13:20:17 +0100
Subject: [PATCH] scan: Fix bound so we don't try to prefetch beyond end of
disk
An off-by-one error in the bound could cause the filter to try to
prefetch beyond the end of the underlying plugin. This would cause
nbdkit to crash with this assertion failure:
nbdkit: backend.c:782: backend_cache: Assertion `backend_valid_range (c, offset, count)' failed.
The sequence of events was:
- scan filter background thread started
- client reads to the end of the disk
- background thread skips ahead to end of disk (offset == size)
- background thread tries to prefetch from this point
In the final step the calculations caused to the background thread to
prefetch a scan-size block beyond the end of the plugin.
Fixes: commit 65c20a09ceacb4431986a2982f2c2e746df63fcb
(cherry picked from commit 953643429b8c57b4dd20a6c0e5b83704ae9a0e88)
---
filters/scan/bgthread.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/filters/scan/bgthread.c b/filters/scan/bgthread.c
index 384e79b6..5fa5f27f 100644
--- a/filters/scan/bgthread.c
+++ b/filters/scan/bgthread.c
@@ -113,12 +113,12 @@ scan_thread (void *vp)
}
adjust_clock (offset);
- if (offset > size)
- continue;
- /* Issue the next prefetch. */
- n = MIN (scan_size, size - offset);
- ctrl->next->cache (ctrl->next, n, offset, 0, NULL);
+ if (offset < size) {
+ /* Issue the next prefetch. */
+ n = MIN (scan_size, size - offset);
+ ctrl->next->cache (ctrl->next, n, offset, 0, NULL);
+ }
}
if (scan_forever) {
--
2.31.1

View File

@ -1,110 +0,0 @@
From d1d2f43223bcda062d10c8e68776590956892f71 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Fri, 10 Jun 2022 22:11:44 +0100
Subject: [PATCH] tests: Add a regression test for LUKS zeroing crash
https://listman.redhat.com/archives/libguestfs/2022-June/029188.html
(cherry picked from commit 7ab2ef96803bfc385f786be82ebfdd4cc977d504)
---
tests/Makefile.am | 2 ++
tests/test-luks-copy-zero.sh | 70 ++++++++++++++++++++++++++++++++++++
2 files changed, 72 insertions(+)
create mode 100755 tests/test-luks-copy-zero.sh
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 799aa6c2..0f4b0746 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -1601,11 +1601,13 @@ if HAVE_GNUTLS_PBKDF2
TESTS += \
test-luks-info.sh \
test-luks-copy.sh \
+ test-luks-copy-zero.sh \
$(NULL)
endif
EXTRA_DIST += \
test-luks-info.sh \
test-luks-copy.sh \
+ test-luks-copy-zero.sh \
$(NULL)
# multi-conn filter test.
diff --git a/tests/test-luks-copy-zero.sh b/tests/test-luks-copy-zero.sh
new file mode 100755
index 00000000..6ff560e3
--- /dev/null
+++ b/tests/test-luks-copy-zero.sh
@@ -0,0 +1,70 @@
+#!/usr/bin/env bash
+# nbdkit
+# Copyright (C) 2018-2022 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the name of Red Hat nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+# Regression test for:
+# https://listman.redhat.com/archives/libguestfs/2022-June/029188.html
+
+source ./functions.sh
+set -e
+set -x
+
+requires qemu-img --version
+requires nbdcopy --version
+requires truncate --version
+requires file --version
+requires_filter luks
+
+encrypt_disk=luks-copy-zero1.img
+zero_disk=luks-copy-zero2.img
+cleanup_fn rm -f $encrypt_disk $zero_disk
+rm -f $encrypt_disk $zero_disk
+
+# Create an empty encrypted disk container.
+qemu-img create -f luks \
+ --object secret,data=123456,id=sec0 \
+ -o key-secret=sec0 \
+ $encrypt_disk 100M
+
+# Create an all zeroes disk of the same size.
+truncate -s 100M $zero_disk
+
+# Using nbdkit-luks-filter, write the zero disk into the encrypted
+# disk. nbdcopy will do this using NBD_CMD_ZERO operations.
+nbdkit -U - -fv \
+ file $encrypt_disk --filter=luks passphrase=123456 \
+ --run "nbdcopy -C 1 $zero_disk \$nbd"
+
+# Check that the encrypted disk is still a LUKS disk. If zeroing is
+# wrong in the filter it's possible that it writes through to the
+# underlying disk, erasing the container.
+file $encrypt_disk
+file $encrypt_disk | grep "LUKS encrypted file"
--
2.31.1

View File

@ -1,121 +0,0 @@
From c1a7c87fb9710fb29d699d1f39d0da19caf98da0 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Sat, 11 Jun 2022 12:34:02 +0100
Subject: [PATCH] rate: Allow burstiness to be controlled
Previously it was fixed at 2.0 seconds. Allowing it to be adjusted
upwards could help with large, lumpy requests.
(cherry picked from commit f79e951c20510381d5cd83c203c670874a4978f4)
---
filters/rate/nbdkit-rate-filter.pod | 12 ++++++++++--
filters/rate/rate.c | 20 +++++++++++++-------
tests/test-rate.sh | 2 +-
3 files changed, 24 insertions(+), 10 deletions(-)
diff --git a/filters/rate/nbdkit-rate-filter.pod b/filters/rate/nbdkit-rate-filter.pod
index 8956e641..09ce7dbc 100644
--- a/filters/rate/nbdkit-rate-filter.pod
+++ b/filters/rate/nbdkit-rate-filter.pod
@@ -9,6 +9,7 @@ nbdkit-rate-filter - limit bandwidth by connection or server
[connection-rate=BITSPERSEC]
[rate-file=FILENAME]
[connection-rate-file=FILENAME]
+ [burstiness=SECS]
=head1 DESCRIPTION
@@ -63,6 +64,13 @@ Limit total bandwidth across all connections to C<BITSPERSEC>.
Adjust the per-connection or total bandwidth dynamically by writing
C<BITSPERSEC> into C<FILENAME>. See L</DYNAMIC ADJUSTMENT> below.
+=item B<burstiness=>SECS
+
+Control the bucket capacity, expressed as a length of time in
+"rate-equivalent seconds" that the client is allowed to burst for
+after a period of inactivity. The default is 2.0 seconds. It's not
+recommended to set this smaller than the default.
+
=back
C<BITSPERSEC> can be specified as a simple number, or you can use a
@@ -105,8 +113,8 @@ If the size of requests made by your client is much larger than the
rate limit then you can see long, lumpy sleeps in this filter. In the
future we may modify the filter to break up large requests
automatically in order to limit the length of sleeps. Placing the
-L<nbdkit-blocksize-filter(1)> in front of this filter may help in the
-meantime.
+L<nbdkit-blocksize-filter(1)> in front of this filter, or adjusting
+C<burstiness> upwards may help.
=head1 FILES
diff --git a/filters/rate/rate.c b/filters/rate/rate.c
index 1a70d212..26082f8c 100644
--- a/filters/rate/rate.c
+++ b/filters/rate/rate.c
@@ -68,10 +68,9 @@ static char *rate_file = NULL;
/* Bucket capacity controls the burst rate. It is expressed as the
* length of time in "rate-equivalent seconds" that the client can
- * burst for after a period of inactivity. This could be adjustable
- * in future.
+ * burst for after a period of inactivity.
*/
-#define BUCKET_CAPACITY 2.0
+static double bucket_capacity = 2.0 /* seconds */;
/* Global read and write buckets. */
static struct bucket read_bucket;
@@ -142,6 +141,13 @@ rate_config (nbdkit_next_config *next, nbdkit_backend *nxdata,
return -1;
return 0;
}
+ else if (strcmp (key, "burstiness") == 0) {
+ if (sscanf (value, "%lg", &bucket_capacity) != 1) {
+ nbdkit_error ("burstiness must be a floating point number (seconds)");
+ return -1;
+ }
+ return 0;
+ }
else
return next (nxdata, key, value);
}
@@ -150,8 +156,8 @@ static int
rate_get_ready (int thread_model)
{
/* Initialize the global buckets. */
- bucket_init (&read_bucket, rate, BUCKET_CAPACITY);
- bucket_init (&write_bucket, rate, BUCKET_CAPACITY);
+ bucket_init (&read_bucket, rate, bucket_capacity);
+ bucket_init (&write_bucket, rate, bucket_capacity);
return 0;
}
@@ -178,8 +184,8 @@ rate_open (nbdkit_next_open *next, nbdkit_context *nxdata,
return NULL;
}
- bucket_init (&h->read_bucket, connection_rate, BUCKET_CAPACITY);
- bucket_init (&h->write_bucket, connection_rate, BUCKET_CAPACITY);
+ bucket_init (&h->read_bucket, connection_rate, bucket_capacity);
+ bucket_init (&h->write_bucket, connection_rate, bucket_capacity);
pthread_mutex_init (&h->read_bucket_lock, NULL);
pthread_mutex_init (&h->write_bucket_lock, NULL);
diff --git a/tests/test-rate.sh b/tests/test-rate.sh
index 7305c928..ff781c21 100755
--- a/tests/test-rate.sh
+++ b/tests/test-rate.sh
@@ -56,7 +56,7 @@ nbdkit -U - \
--filter=blocksize --filter=rate \
pattern 25M \
maxdata=65536 \
- rate=10M \
+ rate=10M burstiness=2.0 \
--run 'nbdcopy "$uri" rate.img'
end_t=$SECONDS
--
2.31.1

View File

@ -1,104 +0,0 @@
From 4e8599886ba4802fef1683811a725e7c4bc4fe72 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Tue, 12 Jul 2022 18:00:38 +0100
Subject: [PATCH] luks: Check return values from malloc more carefully
Found by Coverity:
Error: GCC_ANALYZER_WARNING (CWE-688): [#def53]
nbdkit-1.30.7/filters/luks/luks-encryption.c: scope_hint: In function 'calculate_iv'
nbdkit-1.30.7/filters/luks/luks-encryption.c:175:5: warning[-Wanalyzer-possible-null-argument]: use of possibly-NULL 'iv' where non-null expected
nbdkit-1.30.7/filters/luks/luks-encryption.c:39: included_from: Included from here.
/usr/include/string.h:43:14: note: argument 1 of 'memcpy' must be non-null
# 173| sector32 = (uint32_t) sector; /* truncate to only lower bits */
# 174| sector32 = htole32 (sector32);
# 175|-> memcpy (iv, &sector32, prefixlen);
# 176| memset (iv + prefixlen, 0, ivlen - prefixlen);
# 177| break;
Error: GCC_ANALYZER_WARNING (CWE-688): [#def54]
nbdkit-1.30.7/filters/luks/luks-encryption.c:184:5: warning[-Wanalyzer-possible-null-argument]: use of possibly-NULL 'iv' where non-null expected
nbdkit-1.30.7/filters/luks/luks-encryption.c:39: included_from: Included from here.
/usr/include/string.h:43:14: note: argument 1 of 'memcpy' must be non-null
# 182| prefixlen = ivlen;
# 183| sector = htole64 (sector);
# 184|-> memcpy (iv, &sector, prefixlen);
# 185| memset (iv + prefixlen, 0, ivlen - prefixlen);
# 186| break;
Error: NULL_RETURNS (CWE-476): [#def55]
nbdkit-1.30.7/filters/luks/luks-encryption.c:498: returned_null: "malloc" returns "NULL" (checked 86 out of 94 times).
nbdkit-1.30.7/filters/luks/luks-encryption.c:498: var_assigned: Assigning: "temp" = "NULL" return value from "malloc".
nbdkit-1.30.7/filters/luks/luks-encryption.c:523: dereference: Dereferencing a pointer that might be "NULL" "temp" when calling "memcpy". [Note: The source code implementation of the function has been overridden by a builtin model.]
# 521| gnutls_hash_deinit (hash, temp);
# 522|
# 523|-> memcpy (&block[i*digest_bytes], temp, blen);
# 524| }
# 525|
Fixes: commit 468919dce6c5eb57503eacac0f67e5dd87c58e6c
(cherry picked from commit 00c8bbd9e321681843140f697985505de7177f34)
---
filters/luks/luks-encryption.c | 28 +++++++++++++++++++++++-----
1 file changed, 23 insertions(+), 5 deletions(-)
diff --git a/filters/luks/luks-encryption.c b/filters/luks/luks-encryption.c
index 8ee0eb35..19aaf06a 100644
--- a/filters/luks/luks-encryption.c
+++ b/filters/luks/luks-encryption.c
@@ -495,9 +495,15 @@ af_hash (gnutls_digest_algorithm_t hash_alg, uint8_t *block, size_t len)
size_t digest_bytes = gnutls_hash_get_len (hash_alg);
size_t nr_blocks, last_block_len;
size_t i;
- CLEANUP_FREE uint8_t *temp = malloc (digest_bytes);
int r;
gnutls_hash_hd_t hash;
+ CLEANUP_FREE uint8_t *temp;
+
+ temp = malloc (digest_bytes);
+ if (!temp) {
+ nbdkit_error ("malloc: %m");
+ return -1;
+ }
nr_blocks = len / digest_bytes;
last_block_len = len % digest_bytes;
@@ -874,9 +880,15 @@ int
do_decrypt (struct luks_data *h, gnutls_cipher_hd_t cipher,
uint64_t sector, uint8_t *buf, size_t nr_sectors)
{
- const size_t ivlen = cipher_alg_iv_len (h->cipher_alg, h->cipher_mode);
- CLEANUP_FREE uint8_t *iv = malloc (ivlen);
int r;
+ const size_t ivlen = cipher_alg_iv_len (h->cipher_alg, h->cipher_mode);
+ CLEANUP_FREE uint8_t *iv;
+
+ iv = malloc (ivlen);
+ if (!iv) {
+ nbdkit_error ("malloc: %m");
+ return -1;
+ }
while (nr_sectors) {
calculate_iv (h->ivgen_alg, iv, ivlen, sector);
@@ -902,9 +914,15 @@ int
do_encrypt (struct luks_data *h, gnutls_cipher_hd_t cipher,
uint64_t sector, uint8_t *buf, size_t nr_sectors)
{
- const size_t ivlen = cipher_alg_iv_len (h->cipher_alg, h->cipher_mode);
- CLEANUP_FREE uint8_t *iv = malloc (ivlen);
int r;
+ const size_t ivlen = cipher_alg_iv_len (h->cipher_alg, h->cipher_mode);
+ CLEANUP_FREE uint8_t *iv;
+
+ iv = malloc (ivlen);
+ if (!iv) {
+ nbdkit_error ("malloc: %m");
+ return -1;
+ }
while (nr_sectors) {
calculate_iv (h->ivgen_alg, iv, ivlen, sector);
--
2.31.1

View File

@ -1,57 +0,0 @@
From 1d593a76796574845d7e32aaadd9f7d1ed4e7987 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Tue, 12 Jul 2022 18:07:25 +0100
Subject: [PATCH] luks: Avoid potential overflow when computing key material
offset and length
Found by Coverity:
Error: OVERFLOW_BEFORE_WIDEN (CWE-190): [#def58]
nbdkit-1.30.7/filters/luks/luks-encryption.c:558: overflow_before_widen: Potentially overflowing expression "h->phdr.master_key_len * h->phdr.keyslot[i].stripes" with type "unsigned int" (32 bits, unsigned) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "uint64_t" (64 bits, unsigned).
nbdkit-1.30.7/filters/luks/luks-encryption.c:558: remediation: To avoid overflow, cast either "h->phdr.master_key_len" or "h->phdr.keyslot[i].stripes" to type "uint64_t".
# 556| uint64_t len, r;
# 557|
# 558|-> len = h->phdr.master_key_len * h->phdr.keyslot[i].stripes;
# 559| r = DIV_ROUND_UP (len, LUKS_SECTOR_SIZE);
# 560| r = ROUND_UP (r, LUKS_ALIGN_KEYSLOTS / LUKS_SECTOR_SIZE);
Error: OVERFLOW_BEFORE_WIDEN (CWE-190): [#def62]
nbdkit-1.30.7/filters/luks/luks-encryption.c:616: overflow_before_widen: Potentially overflowing expression "ks->key_material_offset * 512U" with type "unsigned int" (32 bits, unsigned) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "uint64_t" (64 bits, unsigned).
nbdkit-1.30.7/filters/luks/luks-encryption.c:616: remediation: To avoid overflow, cast either "ks->key_material_offset" or "512U" to type "uint64_t".
# 614|
# 615| /* Read master key material from plugin. */
# 616|-> start = ks->key_material_offset * LUKS_SECTOR_SIZE;
# 617| if (next->pread (next, split_key, split_key_len, start, 0, &err) == -1) {
# 618| errno = err;
Fixes: commit 468919dce6c5eb57503eacac0f67e5dd87c58e6c
(cherry picked from commit 808d88fbc7b58b7c95e05f41fec729cba92ef518)
---
filters/luks/luks-encryption.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/filters/luks/luks-encryption.c b/filters/luks/luks-encryption.c
index 19aaf06a..06435b27 100644
--- a/filters/luks/luks-encryption.c
+++ b/filters/luks/luks-encryption.c
@@ -561,7 +561,7 @@ key_material_length_in_sectors (struct luks_data *h, size_t i)
{
uint64_t len, r;
- len = h->phdr.master_key_len * h->phdr.keyslot[i].stripes;
+ len = (uint64_t) h->phdr.master_key_len * h->phdr.keyslot[i].stripes;
r = DIV_ROUND_UP (len, LUKS_SECTOR_SIZE);
r = ROUND_UP (r, LUKS_ALIGN_KEYSLOTS / LUKS_SECTOR_SIZE);
return r;
@@ -619,7 +619,7 @@ try_passphrase_in_keyslot (nbdkit_next *next, struct luks_data *h,
}
/* Read master key material from plugin. */
- start = ks->key_material_offset * LUKS_SECTOR_SIZE;
+ start = (uint64_t) ks->key_material_offset * LUKS_SECTOR_SIZE;
if (next->pread (next, split_key, split_key_len, start, 0, &err) == -1) {
errno = err;
return -1;
--
2.31.1

View File

@ -1,36 +0,0 @@
From ee25c1be953bf385caf23f96384a9834c1f1c250 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Tue, 12 Jul 2022 18:10:30 +0100
Subject: [PATCH] luks: Avoid memory leak on error path
Found by Coverity:
Error: CPPCHECK_WARNING (CWE-401): [#def65] [important]
nbdkit-1.30.7/filters/luks/luks-encryption.c:707: error[memleak]: Memory leak: h
# 705| if (memcmp (h->phdr.magic, expected_magic, LUKS_MAGIC_LEN) != 0) {
# 706| nbdkit_error ("this disk does not contain a LUKS header");
# 707|-> return NULL;
# 708| }
# 709| h->phdr.version = be16toh (h->phdr.version);
Fixes: commit 468919dce6c5eb57503eacac0f67e5dd87c58e6c
(cherry picked from commit a345cff137763f105f07bb8942c1bbefd0959cff)
---
filters/luks/luks-encryption.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/filters/luks/luks-encryption.c b/filters/luks/luks-encryption.c
index 06435b27..207a4e46 100644
--- a/filters/luks/luks-encryption.c
+++ b/filters/luks/luks-encryption.c
@@ -710,6 +710,7 @@ load_header (nbdkit_next *next, const char *passphrase)
if (memcmp (h->phdr.magic, expected_magic, LUKS_MAGIC_LEN) != 0) {
nbdkit_error ("this disk does not contain a LUKS header");
+ free (h);
return NULL;
}
h->phdr.version = be16toh (h->phdr.version);
--
2.31.1

View File

@ -1,48 +0,0 @@
From 5ccf1068703d300c8b5579b3a6ef0e409b5a713e Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Tue, 19 Jul 2022 11:56:47 +0100
Subject: [PATCH] tests: Hoist some EXTRA_DIST out of automake conditionals
We can fail to add some test files (test.tcl, test.lua) to the tarball
if compiling with those languages disabled, which would cause knock-on
failures when the tarball was used with the languages enabled. We
already fixed this for Ruby etc, this commit fixes it for Tcl and Lua.
(cherry picked from commit 3b6763c82909c95431ff57c2fe9be1b98316b057)
---
tests/Makefile.am | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 0f4b0746..2667be32 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -1203,10 +1203,11 @@ EXTRA_DIST += \
$(NULL)
# Tcl plugin test.
+EXTRA_DIST += test.tcl
+
if HAVE_TCL
LIBGUESTFS_TESTS += test-tcl
-EXTRA_DIST += test.tcl
test_tcl_SOURCES = test-lang-plugins.c test.h
test_tcl_CFLAGS = \
@@ -1219,10 +1220,11 @@ test_tcl_LDADD = libtest.la $(LIBGUESTFS_LIBS)
endif HAVE_TCL
# Lua plugin test.
+EXTRA_DIST += test.lua
+
if HAVE_LUA
LIBGUESTFS_TESTS += test-lua
-EXTRA_DIST += test.lua
test_lua_SOURCES = test-lang-plugins.c test.h
test_lua_CFLAGS = \
--
2.31.1

View File

@ -1,53 +0,0 @@
From b1023cdc159ed852baf1b43e58e95b011df09182 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Fri, 18 Nov 2022 09:43:19 +0000
Subject: [PATCH] vddk: Add support for VDDK 8.0.0
There are no changes in any of the structures or enums that we rely on.
Reported-by: Ming Xie
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=2143889
(cherry picked from commit dbe12ed499baeea94d603db55cad9e971e0ebcf0)
---
plugins/vddk/nbdkit-vddk-plugin.pod | 2 +-
plugins/vddk/vddk.c | 4 +++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/plugins/vddk/nbdkit-vddk-plugin.pod b/plugins/vddk/nbdkit-vddk-plugin.pod
index 3991e86b..4d6040be 100644
--- a/plugins/vddk/nbdkit-vddk-plugin.pod
+++ b/plugins/vddk/nbdkit-vddk-plugin.pod
@@ -526,7 +526,7 @@ by this build.
=item C<vddk_library_version=...>
-The VDDK major library version: 6, 7, ...
+The VDDK major library version: 6, 7, 8, ...
If this is omitted it means the library could not be loaded.
=item C<vddk_dll=...>
diff --git a/plugins/vddk/vddk.c b/plugins/vddk/vddk.c
index 35697bc1..9e29075f 100644
--- a/plugins/vddk/vddk.c
+++ b/plugins/vddk/vddk.c
@@ -77,7 +77,7 @@ NBDKIT_DLL_PUBLIC int vddk_debug_datapath = 1;
void *dl; /* dlopen handle */
bool init_called; /* was InitEx called */
__thread int error_suppression; /* threadlocal error suppression */
-int library_version; /* VDDK major: 6, 7, ... */
+int library_version; /* VDDK major: 6, 7, 8, ... */
bool is_remote; /* true if remote connection */
enum compression_type compression; /* compression */
@@ -403,6 +403,8 @@ load_library (bool load_error_is_fatal)
* our testsuite is easier to write if we point libdir directly to
* a stub .so.
*/
+ { "lib64/libvixDiskLib.so.8", 8 },
+ { "libvixDiskLib.so.8", 8 },
{ "lib64/libvixDiskLib.so.7", 7 },
{ "libvixDiskLib.so.7", 7 },
{ "lib64/libvixDiskLib.so.6", 6 },
--
2.31.1

View File

@ -1,30 +0,0 @@
From 750e4c9e4ba438c2092dc66046d7a87df3886457 Mon Sep 17 00:00:00 2001
From: "Richard W.M. Jones" <rjones@redhat.com>
Date: Tue, 29 Nov 2022 08:26:46 +0000
Subject: [PATCH] vddk: Document that 8.0.0 has now been tested
I forgot to update the man page.
Thanks: Alice Frosi
Fixes: commit dbe12ed499baeea94d603db55cad9e971e0ebcf0
(cherry picked from commit f0ed40307a2a0b873e4271e90f6e9c2e50c75017)
---
plugins/vddk/nbdkit-vddk-plugin.pod | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/plugins/vddk/nbdkit-vddk-plugin.pod b/plugins/vddk/nbdkit-vddk-plugin.pod
index 4d6040be..988413cf 100644
--- a/plugins/vddk/nbdkit-vddk-plugin.pod
+++ b/plugins/vddk/nbdkit-vddk-plugin.pod
@@ -672,7 +672,7 @@ server, which can also be very slow.
This plugin requires VDDK E<ge> 6.5 (released Nov 2016). It is only
supported on the x64-64 archtecture.
-It has been tested with all versions up to 7.0.3 (but should work with
+It has been tested with all versions up to 8.0.0 (but should work with
future versions).
VDDK 6.7 was the first version that supported the
--
2.31.1

View File

@ -1,17 +0,0 @@
-----BEGIN PGP SIGNATURE-----
iQJFBAABCAAvFiEE93dPsa0HSn6Mh2fqkXOPc+G3aKAFAmLWguQRHHJpY2hAYW5u
ZXhpYS5vcmcACgkQkXOPc+G3aKBpTQ//SBnebP9miDMyAnMS0zgK1LevsGMWwnLm
cZcXgvx+hvdomTlFElcxBjSPN51rhywI0vyN8fM1h/eckBkGH/fC8Ta+pQHBxFec
KZZfLZ+EhsBv1JAid3t284xxFkahtZ0UWpPmVlZG2EelGiYHpL+nckf0hTS6R2a2
cWDgpSwxIlPNEvDmDqZyUPCTY4vdxtdlI4JwRcqYn7nH6TbBLWK0/pjQH6NtZ/bu
cBQJQpi+ne3mIU6P8GocFJhNEMO1//aZRJsAX5uURuIaAToH+QDtcXCVPTZDogqh
5BPL/nGQu+LbXFlRTGFIwNRyfSLDGFPWnZ/356NssaPpX4cYB9zsEmdnf9enz/Ez
cYrwoxQbilzkvRH0oovb+2UL4V/dEoUgdND6amHWF+3uuT+c/j9CsIk5IrL5y9Ly
VhkttvYL96rOGQoIeZfg0+Icjx6aSau6L7qRvjZBP6egZN3VUVhMFn7knynENXlv
DxDg4XwqWP6OKsBiBteED48Hn2nF0jniq/6Mx/fVrblGlYGxOSLIupdTXl5n10OK
9vwsp7qSYJP1160Mt5r3MFNH0DygmAHbH5jqzDFn5H6TqSV31PuF4XZg/DnBedaJ
+PI9qOnXjkM9gYeVO3FOosf70kh7zfjP6/kOOJ4BGiaZ5T9Y12oJAwxqkjZhUHHF
QDvUE7KDOM8=
=I81A
-----END PGP SIGNATURE-----

View File

@ -0,0 +1,17 @@
-----BEGIN PGP SIGNATURE-----
iQJFBAABCAAvFiEE93dPsa0HSn6Mh2fqkXOPc+G3aKAFAmO0P9URHHJpY2hAYW5u
ZXhpYS5vcmcACgkQkXOPc+G3aKBj/w/+MahWvHpk6oOoif9pvshQ5ZXWWH8+4DCZ
fMPQPRuU3j64tj2kUrp87zChVPkQv27v+RuQcs5OuhfB/nvCIJiK6dSMq6KQmIv2
b3LieGAuIlhr89YIGQRi7j+R8iWiQgm+dT6BNeu3n7kbpEbJPPUHhz2YNlw1x/LJ
mfSEh+0HXKKz7HsCDwUCenq/pCPyD4p9x0UB0xqDT7PLg3qGwpHCMTuslrX3alOu
EYl+NDr9q266IQYGUh1zpSkobLNLvHI+TFyYEvytDnU4MylyslOdDIsA89E/y29r
rSMl9edDjhQ/h51In1Q8rKmlXFrcwDeRUywybn09m1gu++bxls5W1LFAZvp/YBa+
nWYv3o58epJSbhEL6NO5fl88Ea5JJYqhB+I1ezud/nJ3Uu/t9C7m69Mt96U5NhQ5
9irjO1Przz/3ft9+t7hW2u3MFNrEA/u1+e/Jnyr4+g8ZYmM1V7hQWqGwjO09zUZT
5xR41WHxG3ZbuUOv5r5Xt7Xvp1tiiWxiyEWOBydQwsnV9yZR/G2m3eWFE5H315r+
qGQXbr41mnsUAG6G8VXsNwK8cu3YQkZBHm4et4wFmvI6C1n2I7jhQBoXkdi9BKj9
Rh/h3DDmYB2Ud6G/ApWwfRFhSPM/apuUYfuYlXPKteFhtPjfbNSlHkm4hr1lFbK/
+X9eYoD9410=
=qSZX
-----END PGP SIGNATURE-----

View File

@ -46,14 +46,14 @@ ExclusiveArch: x86_64
%global verify_tarball_signature 1
# If there are patches which touch autotools files, set this to 1.
%global patches_touch_autotools 1
%global patches_touch_autotools %{nil}
# The source directory.
%global source_directory 1.30-stable
%global source_directory 1.32-stable
Name: nbdkit
Version: 1.30.8
Release: 2%{?dist}
Version: 1.32.5
Release: 4%{?dist}
Summary: NBD server
License: BSD
@ -78,31 +78,10 @@ Source3: copy-patches.sh
# https://gitlab.com/nbdkit/nbdkit/-/commits/rhel-9.2/
# Patches.
Patch0001: 0001-ssh-Allow-the-remote-file-to-be-created.patch
Patch0002: 0002-readahead-Rewrite-this-filter-so-it-prefetches-using.patch
Patch0003: 0003-readahead-Fix-test.patch
Patch0004: 0004-New-filter-luks.patch
Patch0005: 0005-luks-Disable-filter-with-old-GnuTLS-in-Debian-10.patch
Patch0006: 0006-luks-Various-fixes-for-Clang.patch
Patch0007: 0007-luks-Link-with-libcompat-on-Windows.patch
Patch0008: 0008-luks-Refactor-the-filter.patch
Patch0009: 0009-tests-luks-Reduce-time-taken-to-run-these-tests.patch
Patch0010: 0010-Add-nbdkit.parse_size-Python-function.patch
Patch0011: 0011-cache-Fix-cross-reference-nbdkit-readahead-filter.patch
Patch0012: 0012-curl-Don-t-document-curl-plugin-readahead-filter.patch
Patch0013: 0013-New-filter-scan.patch
Patch0014: 0014-scan-Remove-condition-variable.patch
Patch0015: 0015-scan-Small-typographical-fix-in-manual.patch
Patch0016: 0016-ssh-Don-t-reference-readahead-or-scan-filters-from-t.patch
Patch0017: 0017-scan-Fix-bound-so-we-don-t-try-to-prefetch-beyond-en.patch
Patch0018: 0018-tests-Add-a-regression-test-for-LUKS-zeroing-crash.patch
Patch0019: 0019-rate-Allow-burstiness-to-be-controlled.patch
Patch0020: 0020-luks-Check-return-values-from-malloc-more-carefully.patch
Patch0021: 0021-luks-Avoid-potential-overflow-when-computing-key-mat.patch
Patch0022: 0022-luks-Avoid-memory-leak-on-error-path.patch
Patch0023: 0023-tests-Hoist-some-EXTRA_DIST-out-of-automake-conditio.patch
Patch0024: 0024-vddk-Add-support-for-VDDK-8.0.0.patch
Patch0025: 0025-vddk-Document-that-8.0.0-has-now-been-tested.patch
Patch0001: 0001-ssh-Remove-left-over-comment.patch
Patch0002: 0002-ssh-Improve-the-error-message-when-all-authenticatio.patch
Patch0003: 0003-luks-Avoid-crash-when-image-does-not-contain-a-LUKS-.patch
Patch0004: 0004-curl-Enable-multi-conn-for-read-only-connections.patch
# For automatic RPM Provides generation.
# See: https://rpm-software-management.github.io/rpm/manual/dependency_generators.html
@ -177,7 +156,6 @@ BuildRequires: %{_bindir}/socat
%endif
BuildRequires: %{_sbindir}/ss
BuildRequires: %{_bindir}/stat
BuildRequires: %{_bindir}/ssh-keygen
# This package has RPM rules that create the automatic Provides: for
# nbdkit plugins and filters. This means nbdkit build depends on
@ -604,8 +582,6 @@ nbdkit-retry-request-filter Retry single requests on error.
nbdkit-scan-filter Prefetch data ahead of sequential reads.
nbdkit-stats-filter Display statistics about operations.
nbdkit-swab-filter Filter for swapping byte order.
nbdkit-tls-fallback-filter TLS protection filter.
@ -634,6 +610,15 @@ Requires: %{name}-server%{?_isa} = %{version}-%{release}
This package is a gzip filter for %{name}.
%package stats-filter
Summary: Statistics filter for %{name}
License: BSD
Requires: %{name}-server%{?_isa} = %{version}-%{release}
%description stats-filter
Display statistics about operations.
%package tar-filter
Summary: Tar archive filter for %{name}
License: BSD
@ -826,7 +811,7 @@ export LIBGUESTFS_TRACE=1
%files server
%doc README
%doc README.md
%license LICENSE
%{_sbindir}/nbdkit
%dir %{_libdir}/%{name}
@ -846,7 +831,7 @@ export LIBGUESTFS_TRACE=1
%files basic-plugins
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-data-plugin.so
%{_libdir}/%{name}/plugins/nbdkit-eval-plugin.so
@ -881,7 +866,7 @@ export LIBGUESTFS_TRACE=1
%files example-plugins
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-example*-plugin.so
%if !0%{?rhel}
@ -892,7 +877,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files cc-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-cc-plugin.so
%{_mandir}/man3/nbdkit-cc-plugin.3*
@ -901,7 +886,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files cdi-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-cdi-plugin.so
%{_mandir}/man1/nbdkit-cdi-plugin.1*
@ -909,7 +894,7 @@ export LIBGUESTFS_TRACE=1
%files curl-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-curl-plugin.so
%{_mandir}/man1/nbdkit-curl-plugin.1*
@ -917,7 +902,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel} && 0%{?have_libguestfs}
%files guestfs-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-guestfs-plugin.so
%{_mandir}/man1/nbdkit-guestfs-plugin.1*
@ -926,7 +911,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files iso-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-iso-plugin.so
%{_mandir}/man1/nbdkit-iso-plugin.1*
@ -935,7 +920,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files libvirt-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-libvirt-plugin.so
%{_mandir}/man1/nbdkit-libvirt-plugin.1*
@ -943,7 +928,7 @@ export LIBGUESTFS_TRACE=1
%files linuxdisk-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-linuxdisk-plugin.so
%{_mandir}/man1/nbdkit-linuxdisk-plugin.1*
@ -951,7 +936,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files lua-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-lua-plugin.so
%{_mandir}/man3/nbdkit-lua-plugin.3*
@ -959,7 +944,7 @@ export LIBGUESTFS_TRACE=1
%files nbd-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-nbd-plugin.so
%{_mandir}/man1/nbdkit-nbd-plugin.1*
@ -967,7 +952,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel} && 0%{?have_ocaml}
%files ocaml-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/libnbdkitocaml.so.*
@ -981,7 +966,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files perl-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-perl-plugin.so
%{_mandir}/man3/nbdkit-perl-plugin.3*
@ -989,7 +974,7 @@ export LIBGUESTFS_TRACE=1
%files python-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-python-plugin.so
%{_mandir}/man3/nbdkit-python-plugin.3*
@ -997,7 +982,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files ruby-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-ruby-plugin.so
%{_mandir}/man3/nbdkit-ruby-plugin.3*
@ -1006,7 +991,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files S3-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-S3-plugin
%{_mandir}/man1/nbdkit-S3-plugin.1*
@ -1014,7 +999,7 @@ export LIBGUESTFS_TRACE=1
%files ssh-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-ssh-plugin.so
%{_mandir}/man1/nbdkit-ssh-plugin.1*
@ -1022,7 +1007,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files tcl-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-tcl-plugin.so
%{_mandir}/man3/nbdkit-tcl-plugin.3*
@ -1030,7 +1015,7 @@ export LIBGUESTFS_TRACE=1
%files tmpdisk-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-tmpdisk-plugin.so
%{_mandir}/man1/nbdkit-tmpdisk-plugin.1*
@ -1038,7 +1023,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files torrent-plugin
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-torrent-plugin.so
%{_mandir}/man1/nbdkit-torrent-plugin.1*
@ -1047,7 +1032,7 @@ export LIBGUESTFS_TRACE=1
%ifarch x86_64
%files vddk-plugin
%doc README plugins/vddk/README.VDDK
%doc README.md plugins/vddk/README.VDDK
%license LICENSE
%{_libdir}/%{name}/plugins/nbdkit-vddk-plugin.so
%{_mandir}/man1/nbdkit-vddk-plugin.1*
@ -1055,7 +1040,7 @@ export LIBGUESTFS_TRACE=1
%files basic-filters
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/filters/nbdkit-blocksize-filter.so
%{_libdir}/%{name}/filters/nbdkit-blocksize-policy-filter.so
@ -1090,7 +1075,6 @@ export LIBGUESTFS_TRACE=1
%{_libdir}/%{name}/filters/nbdkit-retry-filter.so
%{_libdir}/%{name}/filters/nbdkit-retry-request-filter.so
%{_libdir}/%{name}/filters/nbdkit-scan-filter.so
%{_libdir}/%{name}/filters/nbdkit-stats-filter.so
%{_libdir}/%{name}/filters/nbdkit-swab-filter.so
%{_libdir}/%{name}/filters/nbdkit-tls-fallback-filter.so
%{_libdir}/%{name}/filters/nbdkit-truncate-filter.so
@ -1127,7 +1111,6 @@ export LIBGUESTFS_TRACE=1
%{_mandir}/man1/nbdkit-retry-filter.1*
%{_mandir}/man1/nbdkit-retry-request-filter.1*
%{_mandir}/man1/nbdkit-scan-filter.1*
%{_mandir}/man1/nbdkit-stats-filter.1*
%{_mandir}/man1/nbdkit-swab-filter.1*
%{_mandir}/man1/nbdkit-tls-fallback-filter.1*
%{_mandir}/man1/nbdkit-truncate-filter.1*
@ -1135,7 +1118,7 @@ export LIBGUESTFS_TRACE=1
%if !0%{?rhel}
%files ext2-filter
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/filters/nbdkit-ext2-filter.so
%{_mandir}/man1/nbdkit-ext2-filter.1*
@ -1143,28 +1126,35 @@ export LIBGUESTFS_TRACE=1
%files gzip-filter
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/filters/nbdkit-gzip-filter.so
%{_mandir}/man1/nbdkit-gzip-filter.1*
%files stats-filter
%doc README.md
%license LICENSE
%{_libdir}/%{name}/filters/nbdkit-stats-filter.so
%{_mandir}/man1/nbdkit-stats-filter.1*
%files tar-filter
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/filters/nbdkit-tar-filter.so
%{_mandir}/man1/nbdkit-tar-filter.1*
%files xz-filter
%doc README
%doc README.md
%license LICENSE
%{_libdir}/%{name}/filters/nbdkit-xz-filter.so
%{_mandir}/man1/nbdkit-xz-filter.1*
%files devel
%doc BENCHMARKING OTHER_PLUGINS README SECURITY TODO
%doc BENCHMARKING OTHER_PLUGINS README.md SECURITY TODO
%license LICENSE
# Include the source of the example plugins in the documentation.
%doc plugins/example*/*.c
@ -1210,6 +1200,17 @@ export LIBGUESTFS_TRACE=1
%changelog
* Fri Feb 03 2023 Richard W.M. Jones <rjones@redhat.com> - 1.32.5-4
- Rebase to new stable branch version 1.32.5
resolves: rhbz#2135765
- Move stats filter to new subpackage.
- Improve error message when PasswordAuthentication is set to 'no'
resolves: rhbz#2158300
- luks: Avoid crash when image does not contain a LUKS header
resolves: rhbz#2159581
- curl: Enable multi-conn for read-only connections
resolves: rhbz#2166686
* Tue Nov 29 2022 Richard W.M. Jones <rjones@redhat.com> - 1.30.8-2
- Add support for VDDK 8.0.0
resolves: rhbz#2143889