Compare commits

...

No commits in common. "imports/c9/virtiofsd-1.1.0-4.el9_0" and "c9-beta" have entirely different histories.

6 changed files with 28 additions and 274 deletions

4
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/virtiofsd-1.1.0-vendor.tar.gz
SOURCES/virtiofsd-1.1.0.crate
SOURCES/virtiofsd-1.10.1-vendor.tar.gz
SOURCES/virtiofsd-1.10.1.crate

View File

@ -1,2 +1,2 @@
1c55f5d419b01ec49c1cd2a5b2dfd915fd9481a2 SOURCES/virtiofsd-1.1.0-vendor.tar.gz
ea25daee7b35d007c3786cae60ac178a276a54bd SOURCES/virtiofsd-1.1.0.crate
10f7329aee1fb72f01043b1e5421d95f0ac62343 SOURCES/virtiofsd-1.10.1-vendor.tar.gz
0529e43b543aa9fb945d0a9e9661464844f7b04e SOURCES/virtiofsd-1.10.1.crate

View File

@ -1,33 +0,0 @@
From 1bb43a5cdcb48dc9a8add0d1c94e627cd76f80f6 Mon Sep 17 00:00:00 2001
From: Sergio Lopez <slp@redhat.com>
Date: Tue, 22 Mar 2022 10:22:01 +0100
Subject: [PATCH] Clean up flags in opendir (downstream)
Clean up O_RDWR and O_WRONLY flags in opendir to work around a bug in
the Windows virtio-fs guest driver.
Resolves: rhbz#2057252
Signed-off-by: Sergio Lopez <slp@redhat.com>
---
src/passthrough/mod.rs | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/src/passthrough/mod.rs b/src/passthrough/mod.rs
index b2b265c..6e4b236 100644
--- a/src/passthrough/mod.rs
+++ b/src/passthrough/mod.rs
@@ -1133,7 +1133,10 @@ impl FileSystem for PassthroughFs {
inode: Inode,
flags: u32,
) -> io::Result<(Option<Handle>, OpenOptions)> {
- self.do_open(inode, false, flags | (libc::O_DIRECTORY as u32))
+ // Clean up O_RDWR and O_WRONLY from flags to work around a bug in the Windows
+ // virtio-fs guest driver. BZ#2057252
+ let clean_flags: u32 = flags & !((libc::O_RDWR | libc::O_WRONLY) as u32);
+ self.do_open(inode, false, clean_flags | (libc::O_DIRECTORY as u32))
}
fn releasedir(
--
2.35.1

View File

@ -1,88 +0,0 @@
From 80034cacde8f9c4d7cd4ae73316eeec6cc2fd67a Mon Sep 17 00:00:00 2001
From: Sergio Lopez <slp@redhat.com>
Date: Thu, 10 Mar 2022 12:03:13 +0100
Subject: [PATCH 2/3] Set the number of written bytes for used descs
As the Linux driver ignores the "len" field of used descriptors, we
didn't bother to set it properly when return those descriptors to the
queue.
The problem is that other implementations (at least, the Windows one),
do care about it, so let's do the right thing and set it properly.
Resolves: rhbz#2057252
Signed-off-by: Sergio Lopez <slp@redhat.com>
(cherry picked from commit 2bc7c2102d18f47b309fd5f767b5349d9e08d2b8)
Signed-off-by: Sergio Lopez <slp@redhat.com>
---
src/main.rs | 25 ++++++++++++++++++-------
1 file changed, 18 insertions(+), 7 deletions(-)
diff --git a/src/main.rs b/src/main.rs
index 5a9914f..ca183e8 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -6,7 +6,7 @@ use futures::executor::{ThreadPool, ThreadPoolBuilder};
use libc::EFD_NONBLOCK;
use log::*;
use passthrough::xattrmap::XattrMap;
-use std::convert::{self, TryFrom};
+use std::convert::{self, TryFrom, TryInto};
use std::ffi::CString;
use std::os::unix::io::{FromRawFd, RawFd};
use std::sync::{Arc, Mutex, RwLock};
@@ -128,8 +128,18 @@ impl<F: FileSystem + Send + Sync + 'static> VhostUserFsThread<F> {
})
}
- fn return_descriptor(vring_state: &mut VringState, head_index: u16, event_idx: bool) {
- if vring_state.add_used(head_index, 0).is_err() {
+ fn return_descriptor(
+ vring_state: &mut VringState,
+ head_index: u16,
+ event_idx: bool,
+ len: usize,
+ ) {
+ let used_len: u32 = match len.try_into() {
+ Ok(l) => l,
+ Err(_) => panic!("Invalid used length, can't return used descritors to the ring"),
+ };
+
+ if vring_state.add_used(head_index, used_len).is_err() {
warn!("Couldn't return used descriptors to the ring");
}
@@ -185,12 +195,12 @@ impl<F: FileSystem + Send + Sync + 'static> VhostUserFsThread<F> {
.map_err(Error::QueueWriter)
.unwrap();
- server
+ let len = server
.handle_message(reader, writer, vu_req.as_mut())
.map_err(Error::ProcessQueue)
.unwrap();
- Self::return_descriptor(&mut worker_vring.get_mut(), head_index, event_idx);
+ Self::return_descriptor(&mut worker_vring.get_mut(), head_index, event_idx, len);
});
}
@@ -222,12 +232,13 @@ impl<F: FileSystem + Send + Sync + 'static> VhostUserFsThread<F> {
.map_err(Error::QueueWriter)
.unwrap();
- self.server
+ let len = self
+ .server
.handle_message(reader, writer, self.vu_req.as_mut())
.map_err(Error::ProcessQueue)
.unwrap();
- Self::return_descriptor(vring_state, head_index, self.event_idx);
+ Self::return_descriptor(vring_state, head_index, self.event_idx, len);
}
Ok(used_any)
--
2.35.1

View File

@ -1,130 +0,0 @@
From 9e55bb375937f8cc93666d9998f09d23a31185f1 Mon Sep 17 00:00:00 2001
From: Sebastian Hasler <sebastian.hasler@stuvus.uni-stuttgart.de>
Date: Wed, 2 Feb 2022 17:50:34 +0100
Subject: [PATCH 1/3] process_queue_pool: Only acquire the VringMutex lock once
Previously, the worker task in `process_queue_pool()` called
up to 3 functions on `worker_vring` where each function is a
wrapper that first locks the mutex. This is unneccessary
congestion. We fix this by locking the mutex once.
Signed-off-by: Sebastian Hasler <sebastian.hasler@stuvus.uni-stuttgart.de>
(cherry picked from commit c904bd8dbd9557d1a59fa0934e092443c7264d43)
Signed-off-by: Sergio Lopez <slp@redhat.com>
---
src/main.rs | 72 +++++++++++++++++++----------------------------------
1 file changed, 26 insertions(+), 46 deletions(-)
diff --git a/src/main.rs b/src/main.rs
index 2049eda..5a9914f 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -9,7 +9,7 @@ use passthrough::xattrmap::XattrMap;
use std::convert::{self, TryFrom};
use std::ffi::CString;
use std::os::unix::io::{FromRawFd, RawFd};
-use std::sync::{Arc, Mutex, MutexGuard, RwLock};
+use std::sync::{Arc, Mutex, RwLock};
use std::{env, error, fmt, io, process};
use structopt::StructOpt;
@@ -128,6 +128,28 @@ impl<F: FileSystem + Send + Sync + 'static> VhostUserFsThread<F> {
})
}
+ fn return_descriptor(vring_state: &mut VringState, head_index: u16, event_idx: bool) {
+ if vring_state.add_used(head_index, 0).is_err() {
+ warn!("Couldn't return used descriptors to the ring");
+ }
+
+ if event_idx {
+ match vring_state.needs_notification() {
+ Err(_) => {
+ warn!("Couldn't check if queue needs to be notified");
+ vring_state.signal_used_queue().unwrap();
+ }
+ Ok(needs_notification) => {
+ if needs_notification {
+ vring_state.signal_used_queue().unwrap();
+ }
+ }
+ }
+ } else {
+ vring_state.signal_used_queue().unwrap();
+ }
+ }
+
fn process_queue_pool(&mut self, vring: VringMutex) -> Result<bool> {
let mut used_any = false;
let atomic_mem = match &self.mem {
@@ -168,35 +190,14 @@ impl<F: FileSystem + Send + Sync + 'static> VhostUserFsThread<F> {
.map_err(Error::ProcessQueue)
.unwrap();
- if event_idx {
- if worker_vring.add_used(head_index, 0).is_err() {
- warn!("Couldn't return used descriptors to the ring");
- }
-
- match worker_vring.needs_notification() {
- Err(_) => {
- warn!("Couldn't check if queue needs to be notified");
- worker_vring.signal_used_queue().unwrap();
- }
- Ok(needs_notification) => {
- if needs_notification {
- worker_vring.signal_used_queue().unwrap();
- }
- }
- }
- } else {
- if worker_vring.add_used(head_index, 0).is_err() {
- warn!("Couldn't return used descriptors to the ring");
- }
- worker_vring.signal_used_queue().unwrap();
- }
+ Self::return_descriptor(&mut worker_vring.get_mut(), head_index, event_idx);
});
}
Ok(used_any)
}
- fn process_queue_serial(&mut self, vring_state: &mut MutexGuard<VringState>) -> Result<bool> {
+ fn process_queue_serial(&mut self, vring_state: &mut VringState) -> Result<bool> {
let mut used_any = false;
let mem = match &self.mem {
Some(m) => m.memory(),
@@ -226,28 +227,7 @@ impl<F: FileSystem + Send + Sync + 'static> VhostUserFsThread<F> {
.map_err(Error::ProcessQueue)
.unwrap();
- if self.event_idx {
- if vring_state.add_used(head_index, 0).is_err() {
- warn!("Couldn't return used descriptors to the ring");
- }
-
- match vring_state.needs_notification() {
- Err(_) => {
- warn!("Couldn't check if queue needs to be notified");
- vring_state.signal_used_queue().unwrap();
- }
- Ok(needs_notification) => {
- if needs_notification {
- vring_state.signal_used_queue().unwrap();
- }
- }
- }
- } else {
- if vring_state.add_used(head_index, 0).is_err() {
- warn!("Couldn't return used descriptors to the ring");
- }
- vring_state.signal_used_queue().unwrap();
- }
+ Self::return_descriptor(vring_state, head_index, self.event_idx);
}
Ok(used_any)
--
2.35.1

View File

@ -1,21 +1,14 @@
Name: virtiofsd
Version: 1.1.0
Release: 4%{?dist}
Version: 1.10.1
Release: 1%{?dist}
Summary: Virtio-fs vhost-user device daemon (Rust version)
# Upstream license specification: Apache-2.0 AND BSD-3-Clause
License: ASL 2.0 and BSD
License: Apache-2.0 AND BSD-3-Clause
URL: https://gitlab.com/virtio-fs/virtiofsd
Source0: %{crates_source}
Source1: %{name}-%{version}-vendor.tar.gz
# For bz#2057252 - process_queue_pool: Only acquire the VringMutex lock once
Patch1: process_queue_pool-Only-acquire-the-VringMutex-lock-.patch
# For bz#2057252 - Set the number of written bytes for used descs
Patch2: Set-the-number-of-written-bytes-for-used-descs.patch
# For bz#2057252 - Clean up flags in opendir (downstream)
Patch3: Clean-up-flags-in-opendir-downstream.patch
ExclusiveArch: x86_64 aarch64 s390x
BuildRequires: rust-toolset
BuildRequires: libcap-ng-devel
@ -30,7 +23,6 @@ Conflicts: qemu-virtiofsd = 17:6.2.0
%prep
%setup -q -n %{name}-%{version}
%autopatch -p1
%cargo_prep -V 1
@ -40,21 +32,34 @@ Conflicts: qemu-virtiofsd = 17:6.2.0
%install
mkdir -p %{buildroot}%{_libexecdir}
install -D -p -m 0755 target/release/virtiofsd %{buildroot}%{_libexecdir}/virtiofsd
install -D -p -m 0644 50-qemu-virtiofsd.json %{buildroot}%{_datadir}/qemu/vhost-user/50-qemu-virtiofsd.json
install -D -p -m 0644 50-virtiofsd.json %{buildroot}%{_datadir}/qemu/vhost-user/50-virtiofsd.json
%files
%license LICENSE-APACHE LICENSE-BSD-3-Clause
%doc README.md
%{_libexecdir}/virtiofsd
%{_datadir}/qemu/vhost-user/50-qemu-virtiofsd.json
%{_datadir}/qemu/vhost-user/50-virtiofsd.json
%changelog
* Tue Mar 22 2022 Sergio Lopez <slp@redhat.com> - 1.1.0-4
- process_queue_pool-Only-acquire-the-VringMutex-lock-.patch [bz#2057252]
- Set-the-number-of-written-bytes-for-used-descs.patch [bz#2057252]
- Clean-up-flags-in-opendir-downstream.patch [bz#2057252]
- Resolves: bz#2057252
([virtiofsd]Can't access to the shared directory on windows guest with the new virtiofsd(rust))
* Thu Jan 25 2024 Miroslav Rezanina <mrezanin@redhat.com> - 1.10.1-1
- Update to upstream version 1.10.1 [RHEL-22739]
- Resolves: RHEL-22739
(Rebase virtiofsd to latest version for RHEL 9.4)
* Tue Jul 18 2023 German Maglione <gmaglione@redhat.com> - 1.7.2-1
- Update to upstream version 1.7.2 [bz#2233498]
* Tue Jul 18 2023 German Maglione <gmaglione@redhat.com> - 1.7.0-1
- Update to upstream version 1.7.0 [bz#2222221]
* Thu Dec 22 2022 German Maglione <gmaglione@redhat.com> - 1.5.0-1
- Update to upstream version 1.5.0 [bz#2123070]
* Wed Jul 27 2022 Sergio Lopez <slp@redhat.com> - 1.4.0-1
- Update to upstream version 1.4.0 [bz#2111356]
* Mon Jun 27 2022 Sergio Lopez <slp@redhat.com> - 1.3.0-1
- Update to upstream version 1.3.0 [bz#2077854]
* Fri Feb 18 2022 Sergio Lopez <slp@redhat.com> - 1.1.0-3
- Restore "Provides: virtiofsd", despite rpmdeplint complains, to