AlmaLinux changes: Enable Btrfs support

This commit is contained in:
Neal Gompa 2025-10-24 03:42:00 +00:00 committed by root
commit 861b4fd80b
25 changed files with 33 additions and 2999 deletions

2
.gitignore vendored
View File

@ -163,3 +163,5 @@
/blivet-3.9.0.tar.gz
/blivet-3.10.0.tar.gz
/blivet-3.10.0-tests.tar.gz
/blivet-3.13.0.tar.gz
/blivet-3.13.0-tests.tar.gz

View File

@ -1,6 +1,6 @@
From 8b527ee85b6594d506d445ff4c30579cccef8ae6 Mon Sep 17 00:00:00 2001
From 6af9f6b4a7f16b24e26abbdda3d1ec9117549863 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 11 Nov 2020 13:24:55 +0100
Date: Thu, 26 Sep 2024 09:36:45 +0200
Subject: [PATCH] Remove btrfs from requested libblockdev plugins
---
@ -8,21 +8,21 @@ Subject: [PATCH] Remove btrfs from requested libblockdev plugins
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index 14bd5c61..1410d78e 100644
index f4b8f317..f23118d7 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -63,9 +63,9 @@ gi.require_version("BlockDev", "3.0")
from gi.repository import GLib
from gi.repository import BlockDev as blockdev
if arch.is_s390():
- _REQUESTED_PLUGIN_NAMES = set(("lvm", "btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvme", "fs"))
+ _REQUESTED_PLUGIN_NAMES = set(("lvm", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvme", "fs"))
- _REQUESTED_PLUGIN_NAMES = set(("lvm", "btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvme", "fs", "part"))
+ _REQUESTED_PLUGIN_NAMES = set(("lvm", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvme", "fs", "part"))
else:
- _REQUESTED_PLUGIN_NAMES = set(("lvm", "btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvme", "fs"))
+ _REQUESTED_PLUGIN_NAMES = set(("lvm", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvme", "fs"))
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
try:
--
2.26.2
- _REQUESTED_PLUGIN_NAMES = set(("lvm", "btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvme", "fs", "part"))
+ _REQUESTED_PLUGIN_NAMES = set(("lvm", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvme", "fs", "part"))
blockdev.utils_set_log_level(syslog.LOG_INFO)
--
2.46.1

View File

@ -1,49 +0,0 @@
From 95f565d56d21dd7e0d9033236a20be735665e0af Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 14 May 2024 12:35:12 +0200
Subject: [PATCH] Fix skipping btrfs calls when libblockdev btrfs plugin is
missing
We need to check for the btrfs plugin in the set of available
plugins, not in the missing plugins, because on RHEL the plugin is
not missing, it's not even requested.
---
blivet/devices/btrfs.py | 4 ++--
tests/unit_tests/devices_test/btrfs_test.py | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/blivet/devices/btrfs.py b/blivet/devices/btrfs.py
index c446e7e59..0cbaa44d9 100644
--- a/blivet/devices/btrfs.py
+++ b/blivet/devices/btrfs.py
@@ -40,7 +40,7 @@
from ..formats import get_format, DeviceFormat
from ..size import Size
from ..mounts import mounts_cache
-from .. import missing_plugs
+from .. import avail_plugs
import logging
log = logging.getLogger("blivet")
@@ -382,7 +382,7 @@ def _list_subvolumes(self, mountpoint, snapshots_only=False):
def list_subvolumes(self, snapshots_only=False):
subvols = []
- if "btrfs" in missing_plugs:
+ if "btrfs" not in avail_plugs:
log.debug("not listing btrfs subvolumes, libblockdev btrfs plugin is missing")
return subvols
diff --git a/tests/unit_tests/devices_test/btrfs_test.py b/tests/unit_tests/devices_test/btrfs_test.py
index 785afd209..41731e91e 100644
--- a/tests/unit_tests/devices_test/btrfs_test.py
+++ b/tests/unit_tests/devices_test/btrfs_test.py
@@ -83,7 +83,7 @@ def test_btrfs_list_subvolumes(self):
# mounted but libblockdev btrfs plugin not available
blockdev.reset_mock()
- with patch("blivet.devices.btrfs.missing_plugs", new={"btrfs"}):
+ with patch("blivet.devices.btrfs.avail_plugs", new={"lvm"}):
vol.list_subvolumes()
blockdev.list_subvolumes.assert_not_called()
blockdev.get_default_subvolume_id.assert_not_called()

View File

@ -1,32 +0,0 @@
From b7940496b4f8efdccb9b4097b496b0d9b2af1eea Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Jun 2024 14:47:39 +0200
Subject: [PATCH] tests: Try waiting after partition creation for XFS resize
test
The test randomly fails to find the newly created partition so
lets try waiting a bit with udev settle.
---
tests/storage_tests/formats_test/fs_test.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py
index f3c9fef5a..5da4a9339 100644
--- a/tests/storage_tests/formats_test/fs_test.py
+++ b/tests/storage_tests/formats_test/fs_test.py
@@ -11,6 +11,7 @@
from blivet.devices import PartitionDevice, DiskDevice
from blivet.flags import flags
from blivet.util import capture_output
+from blivet import udev
from .loopbackedtestcase import LoopBackedTestCase
@@ -149,6 +150,7 @@ def _create_partition(self, disk, size):
pend = pstart + int(Size(size) / disk.format.parted_device.sectorSize)
disk.format.add_partition(pstart, pend, parted.PARTITION_NORMAL)
disk.format.parted_disk.commit()
+ udev.settle()
part = disk.format.parted_disk.getPartitionBySector(pstart)
device = PartitionDevice(os.path.basename(part.path))

View File

@ -1,43 +0,0 @@
From 52c9699ecad592e35e0cd3841744f8cb8e2b2364 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 12 Jun 2024 16:51:43 +0200
Subject: [PATCH] Run mkfs.xfs with the force (-f) option by default
We stopped adding the force option when switching to libblockdev
in fa3add214ba8edf1965bc851b85f2f2a6a3ea107. This was not
intentional and the missing force option is already causing issues
when running mkfs.xfs on misaligned devices.
---
blivet/tasks/fsmkfs.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/blivet/tasks/fsmkfs.py b/blivet/tasks/fsmkfs.py
index 096b02295..45314ea89 100644
--- a/blivet/tasks/fsmkfs.py
+++ b/blivet/tasks/fsmkfs.py
@@ -241,6 +241,7 @@ class FSBlockDevMkfs(task.BasicApplication, FSMkfsTask, metaclass=abc.ABCMeta):
can_set_uuid = False
can_label = False
fstype = None
+ force = False
def do_task(self, options=None, label=False, set_uuid=False, nodiscard=False):
"""Create the format on the device and label if possible and desired.
@@ -277,7 +278,8 @@ def do_task(self, options=None, label=False, set_uuid=False, nodiscard=False):
try:
bd_options = BlockDev.FSMkfsOptions(label=self.fs.label if label else None,
uuid=self.fs.uuid if set_uuid else None,
- no_discard=self.fs._mkfs_nodiscard if nodiscard else False)
+ no_discard=self.fs._mkfs_nodiscard if nodiscard else False,
+ force=self.force)
BlockDev.fs.mkfs(self.fs.device, self.fstype, bd_options, extra={k: '' for k in create_options})
except BlockDev.FSError as e:
raise FSError(str(e))
@@ -331,6 +333,7 @@ class XFSMkfs(FSBlockDevMkfs):
can_nodiscard = True
can_set_uuid = True
can_label = True
+ force = True
class F2FSMkfs(FSBlockDevMkfs):

View File

@ -1,742 +0,0 @@
From 492122f34fe0ee5d0c7bce7f3dd2ce0ca6e3e9f2 Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Fri, 27 Jan 2023 22:01:23 +0100
Subject: [PATCH 1/7] blivet/zfcp: drop modprobe alias, which is superfluous
since udev in RHEL6
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/zfcp.py | 3 ---
1 file changed, 3 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index a2b7facb..cd765d82 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -555,9 +555,6 @@ class zFCP:
f.write("%s\n" % (d,))
f.close()
- f = open(root + "/etc/modprobe.conf", "a")
- f.write("alias scsi_hostadapter zfcp\n")
- f.close()
# Create ZFCP singleton
--
2.45.2
From a49fdf291acad957675472f5c27be9e5269c199a Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Tue, 28 Feb 2023 17:23:32 +0100
Subject: [PATCH 2/7] blivet/zfcp: remove code broken since zfcp automatic LUN
scan
The old existing test preceding the removed code was only designed for the
old zfcp before it got automatic LUN scan. Hence, the test is incomplete.
With zfcp auto LUN scan, zfcp can just have SCSI devices without any
zfcp unit representation in sysfs.
Do not bother cleaning up an unused FCP device and just remove the code.
Note: Do not confuse zfcp auto port scan with zfcp auto LUN scan.
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/zfcp.py | 3 ---
1 file changed, 3 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index cd765d82..e2c0dc2d 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -384,9 +384,6 @@ class ZFCPDeviceFullPath(ZFCPDeviceBase):
self.devnum, luns[0])
return True
- # no other WWPNs/LUNs exists for this device number, it's safe to bring it offline
- self._set_zfcp_device_offline()
-
return True
--
2.45.2
From 19285bb785ccbfcd72fd1f3242c56e9d06ba74d8 Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Fri, 27 Jan 2023 22:17:45 +0100
Subject: [PATCH 3/7] blivet/zfcp: drop old zfcp port handling gone from the
kernel long ago
Gone since 2008 Linux kernel v2.6.27 commit 235f7f25f492 ("[SCSI] zfcp:
Remove sysfs attribute port_add").
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/zfcp.py | 65 --------------------------------------------------
1 file changed, 65 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index e2c0dc2d..82751382 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -240,7 +240,6 @@ class ZFCPDeviceFullPath(ZFCPDeviceBase):
super().online_device()
- portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
portdir = "%s/%s/%s" % (zfcpsysfs, self.devnum, self.wwpn)
unitadd = "%s/unit_add" % (portdir)
unitdir = "%s/%s" % (portdir, self.fcplun)
@@ -253,31 +252,6 @@ class ZFCPDeviceFullPath(ZFCPDeviceBase):
log.warning("zFCP device %s in NPIV mode brought online. All LUNs will be activated "
"automatically although WWPN and LUN have been provided.", self.devnum)
- # create the sysfs directory for the WWPN/port
- if not os.path.exists(portdir):
- if os.path.exists(portadd):
- # older zfcp sysfs interface
- try:
- logged_write_line_to_file(portadd, self.wwpn)
- udev.settle()
- except OSError as e:
- raise ValueError(_("Could not add WWPN %(wwpn)s to zFCP "
- "device %(devnum)s (%(e)s).")
- % {'wwpn': self.wwpn,
- 'devnum': self.devnum,
- 'e': e})
- else:
- # newer zfcp sysfs interface with auto port scan
- raise ValueError(_("WWPN %(wwpn)s not found at zFCP device "
- "%(devnum)s.") % {'wwpn': self.wwpn,
- 'devnum': self.devnum})
- else:
- if os.path.exists(portadd):
- # older zfcp sysfs interface
- log.info("WWPN %(wwpn)s at zFCP device %(devnum)s already "
- "there.", {'wwpn': self.wwpn,
- 'devnum': self.devnum})
-
# create the sysfs directory for the LUN/unit
if not os.path.exists(unitdir):
try:
@@ -323,10 +297,7 @@ class ZFCPDeviceFullPath(ZFCPDeviceBase):
def offline_device(self):
"""Remove the zFCP device from the system."""
- portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
- portremove = "%s/%s/port_remove" % (zfcpsysfs, self.devnum)
unitremove = "%s/%s/%s/unit_remove" % (zfcpsysfs, self.devnum, self.wwpn)
- portdir = "%s/%s/%s" % (zfcpsysfs, self.devnum, self.wwpn)
devdir = "%s/%s" % (zfcpsysfs, self.devnum)
try:
@@ -348,42 +319,6 @@ class ZFCPDeviceFullPath(ZFCPDeviceBase):
% {'fcplun': self.fcplun, 'wwpn': self.wwpn,
'devnum': self.devnum, 'e': e})
- # remove the WWPN only if there are no other LUNs attached
- if os.path.exists(portadd):
- # only try to remove ports with older zfcp sysfs interface
- for lun in os.listdir(portdir):
- if lun.startswith("0x") and \
- os.path.isdir(os.path.join(portdir, lun)):
- log.info("Not removing WWPN %s at zFCP device %s since port still has other LUNs, e.g. %s.",
- self.wwpn, self.devnum, lun)
- return True
-
- try:
- logged_write_line_to_file(portremove, self.wwpn)
- except OSError as e:
- raise ValueError(_("Could not remove WWPN %(wwpn)s on zFCP "
- "device %(devnum)s (%(e)s).")
- % {'wwpn': self.wwpn,
- 'devnum': self.devnum, 'e': e})
-
- # check if there are other WWPNs existing for the zFCP device number
- if os.path.exists(portadd):
- # older zfcp sysfs interface
- for port in os.listdir(devdir):
- if port.startswith("0x") and \
- os.path.isdir(os.path.join(devdir, port)):
- log.info("Not setting zFCP device %s offline since it still has other ports, e.g. %s.",
- self.devnum, port)
- return True
- else:
- # newer zfcp sysfs interface with auto port scan
- luns = glob.glob("%s/0x????????????????/0x????????????????"
- % (devdir,))
- if len(luns) != 0:
- log.info("Not setting zFCP device %s offline since it still has other LUNs, e.g. %s.",
- self.devnum, luns[0])
- return True
-
return True
--
2.45.2
From cc67470805d871ff6ec09d554fb4b65a375e5b59 Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Tue, 16 Jul 2024 10:21:00 +0200
Subject: [PATCH 4/7] blivet/zfcp: change to consolidated persistent device
config by zdev (#1802482,#1937049)
Implements the zfcp part of referenced bugs.
https://github.com/ibm-s390-linux/s390-tools/tree/master/zdev/
handles everything as of
ibm-s390-linux/s390-tools@06a30ae
("zdev/dracut: add rd.zfcp cmdline option handling").
It is no longer necessary to perform individual pre-req steps, such as
setting an FCP device online, when we want to attach a LUN. Just call
chzdev to configure zfcp LUNs and let it do what is necessary, including
cio_ignore handling and udev settle.
The spec file update reflects the new dependency on `chzdev` from the
s390 architecture specific sub-package s390utils-core. Actually, this
commit here only depends on `chzdev` in older versions already packaged
and shipped, so no version comparison necessary here.
Since chzdev now implicitly sets the FCP device online
and there is no more preceding explicit FCP device online,
move the path over-specification warning after the call to chzdev.
Otherwise, the FCP device could still be offline and its
port_type unknown, so has_auto_lun_scan() would get wrong information
regarding the port_type being NPIV.
Anaconda handles the persistent config of all s390 device types as of
commit ("write persistent config of any (dasd,zfcp,znet) s390 devices to
sysroot"), so drop the special handling in zfcp.write().
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/zfcp.py | 99 +++++++++-------------------------------------
python-blivet.spec | 1 +
2 files changed, 20 insertions(+), 80 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 82751382..38ab5668 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -104,8 +104,6 @@ class ZFCPDeviceBase(ABC):
if not self.devnum:
raise ValueError(_("You have not specified a device number or the number is invalid"))
- self._device_online_path = os.path.join(zfcpsysfs, self.devnum, "online")
-
# Force str and unicode types in case any of the properties are unicode
def _to_string(self):
return str(self.devnum)
@@ -113,20 +111,6 @@ class ZFCPDeviceBase(ABC):
def __str__(self):
return self._to_string()
- def _free_device(self):
- """Remove the device from the I/O ignore list to make it visible to the system.
-
- :raises: ValueError if the device cannot be removed from the I/O ignore list
- """
-
- if not os.path.exists(self._device_online_path):
- log.info("Freeing zFCP device %s", self.devnum)
- util.run_program(["zfcp_cio_free", "-d", self.devnum])
-
- if not os.path.exists(self._device_online_path):
- raise ValueError(_("zFCP device %s not found, not even in device ignore list.") %
- (self.devnum,))
-
def _set_zfcp_device_online(self):
"""Set the zFCP device online.
@@ -134,10 +118,8 @@ class ZFCPDeviceBase(ABC):
"""
try:
- with open(self._device_online_path) as f:
- devonline = f.readline().strip()
- if devonline != "1":
- logged_write_line_to_file(self._device_online_path, "1")
+ util.run_program(["chzdev", "--enable", "zfcp-host", self.devnum,
+ "--yes", "--no-root-update", "--force"])
except OSError as e:
raise ValueError(_("Could not set zFCP device %(devnum)s "
"online (%(e)s).")
@@ -150,7 +132,8 @@ class ZFCPDeviceBase(ABC):
"""
try:
- logged_write_line_to_file(self._device_online_path, "0")
+ util.run_program(["chzdev", "--disable", "zfcp-host", self.devnum,
+ "--yes", "--no-root-update", "--force"])
except OSError as e:
raise ValueError(_("Could not set zFCP device %(devnum)s "
"offline (%(e)s).")
@@ -163,6 +146,7 @@ class ZFCPDeviceBase(ABC):
:returns: True or False
"""
+ @abstractmethod
def online_device(self):
"""Initialize the device and make its storage block device(s) ready to use.
@@ -170,10 +154,6 @@ class ZFCPDeviceBase(ABC):
:raises: ValueError if the device cannot be initialized
"""
- self._free_device()
- self._set_zfcp_device_online()
- return True
-
def offline_scsi_device(self):
"""Find SCSI devices associated to the zFCP device and remove them from the system."""
@@ -238,25 +218,15 @@ class ZFCPDeviceFullPath(ZFCPDeviceBase):
:raises: ValueError if the device cannot be initialized
"""
- super().online_device()
-
portdir = "%s/%s/%s" % (zfcpsysfs, self.devnum, self.wwpn)
- unitadd = "%s/unit_add" % (portdir)
unitdir = "%s/%s" % (portdir, self.fcplun)
- failed = "%s/failed" % (unitdir)
-
- # Activating using devnum, WWPN, and LUN despite available zFCP auto LUN scan should still
- # be possible as this method was used as a workaround until the support for zFCP auto LUN
- # scan devices has been implemented. Just log a warning message and continue.
- if has_auto_lun_scan(self.devnum):
- log.warning("zFCP device %s in NPIV mode brought online. All LUNs will be activated "
- "automatically although WWPN and LUN have been provided.", self.devnum)
# create the sysfs directory for the LUN/unit
if not os.path.exists(unitdir):
try:
- logged_write_line_to_file(unitadd, self.fcplun)
- udev.settle()
+ util.run_program(["chzdev", "--enable", "zfcp-lun",
+ "%s:%s:%s" % (self.devnum, self.wwpn, self.fcplun),
+ "--yes", "--no-root-update", "--force"])
except OSError as e:
raise ValueError(_("Could not add LUN %(fcplun)s to WWPN "
"%(wwpn)s on zFCP device %(devnum)s "
@@ -270,48 +240,23 @@ class ZFCPDeviceFullPath(ZFCPDeviceBase):
'wwpn': self.wwpn,
'devnum': self.devnum})
- # check the state of the LUN
- fail = "0"
- try:
- f = open(failed, "r")
- fail = f.readline().strip()
- f.close()
- except OSError as e:
- raise ValueError(_("Could not read failed attribute of LUN "
- "%(fcplun)s at WWPN %(wwpn)s on zFCP device "
- "%(devnum)s (%(e)s).")
- % {'fcplun': self.fcplun,
- 'wwpn': self.wwpn,
- 'devnum': self.devnum,
- 'e': e})
- if fail != "0":
- self.offline_device()
- raise ValueError(_("Failed LUN %(fcplun)s at WWPN %(wwpn)s on "
- "zFCP device %(devnum)s removed again.")
- % {'fcplun': self.fcplun,
- 'wwpn': self.wwpn,
- 'devnum': self.devnum})
+ # Activating using devnum, WWPN, and LUN despite available zFCP auto LUN scan should still
+ # be possible as this method was used as a workaround until the support for zFCP auto LUN
+ # scan devices has been implemented. Just log a warning message and continue.
+ if has_auto_lun_scan(self.devnum):
+ log.warning("zFCP device %s in NPIV mode brought online. All LUNs will be activated "
+ "automatically although WWPN and LUN have been provided.", self.devnum)
return True
def offline_device(self):
"""Remove the zFCP device from the system."""
- unitremove = "%s/%s/%s/unit_remove" % (zfcpsysfs, self.devnum, self.wwpn)
- devdir = "%s/%s" % (zfcpsysfs, self.devnum)
-
- try:
- self.offline_scsi_device()
- except OSError as e:
- raise ValueError(_("Could not correctly delete SCSI device of "
- "zFCP %(devnum)s %(wwpn)s %(fcplun)s "
- "(%(e)s).")
- % {'devnum': self.devnum, 'wwpn': self.wwpn,
- 'fcplun': self.fcplun, 'e': e})
-
# remove the LUN
try:
- logged_write_line_to_file(unitremove, self.fcplun)
+ util.run_program(["chzdev", "--disable", "zfcp-lun",
+ "%s:%s:%s" % (self.devnum, self.wwpn, self.fcplun),
+ "--yes", "--no-root-update", "--force"])
except OSError as e:
raise ValueError(_("Could not remove LUN %(fcplun)s at WWPN "
"%(wwpn)s on zFCP device %(devnum)s "
@@ -340,7 +285,7 @@ class ZFCPDeviceAutoLunScan(ZFCPDeviceBase):
:raises: ValueError if the device cannot be initialized
"""
- super().online_device()
+ self._set_zfcp_device_online()
if not has_auto_lun_scan(self.devnum):
raise ValueError(_("zFCP device %s cannot use auto LUN scan.") % self)
@@ -480,13 +425,7 @@ class zFCP:
log.warning("%s", str(e))
def write(self, root):
- if len(self.fcpdevs) == 0:
- return
- f = open(root + zfcpconf, "w")
- for d in self.fcpdevs:
- f.write("%s\n" % (d,))
- f.close()
-
+ pass
# Create ZFCP singleton
diff --git a/python-blivet.spec b/python-blivet.spec
index 38a389ae..ac8d2841 100644
--- a/python-blivet.spec
+++ b/python-blivet.spec
@@ -70,6 +70,7 @@ Recommends: libblockdev-swap >= %{libblockdevver}
%ifarch s390 s390x
Recommends: libblockdev-s390 >= %{libblockdevver}
+Requires: s390utils-core
%endif
Requires: python3-bytesize >= %{libbytesizever}
--
2.45.2
From 6c4e57d78562962f014970c32381891c71f05e3b Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Tue, 31 Jan 2023 12:01:31 +0100
Subject: [PATCH 5/7] blivet/zfcp: remove no longer used read_config
functionality (#1802482,#1937049)
Implements the zfcp part of referenced bugs.
Since
https://github.com/rhinstaller/anaconda/commit/87ab1ab2a3aa8b95cd75b2f37e0881e5f57656a5
("Support cio_ignore functionality for zFCP devices (#533492)"),
/etc/zfcp.conf replaced /tmp/fcpconfig.
Since
https://github.com/rhinstaller/anaconda/commit/011ea0a1779459ed20990ddf52166aa75a9c1382
("Remove linuxrc.s390"), /etc/zfcp.conf only exists if the user specified
dracut cmdline parameter rd.zfcp=.
https://github.com/ibm-s390-linux/s390-tools/tree/master/zdev/
handles parsing of rd.zfcp= without /etc/zfcp.conf as of
https://github.com/ibm-s390-linux/s390-tools/commit/06a30ae529a5d6ad2369ed81da056bf3a6147bb6
("zdev/dracut: add rd.zfcp cmdline option handling").
https://src.fedoraproject.org/rpms/s390utils.git
no longer writes /etc/zfcp.conf during deprecated parsing of rd.zfcp=
as of commit
("zfcp: migrate to consolidated persistent device config with zdev")
Hence, nothing populates /etc/zfcp.conf during installer boot anymore.
Anaconda imports configuration for all s390 device types as of
commit ("write persistent config of any (dasd,zfcp,znet) s390 devices to
sysroot"). The only remaining import source is from dracut boot parameters.
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/zfcp.py | 60 ++++++++------------------------------------------
1 file changed, 9 insertions(+), 51 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 38ab5668..a33eb48b 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -45,7 +45,6 @@ def logged_write_line_to_file(fn, value):
zfcpsysfs = "/sys/bus/ccw/drivers/zfcp"
scsidevsysfs = "/sys/bus/scsi/devices"
-zfcpconf = "/etc/zfcp.conf"
def _is_lun_scan_allowed():
@@ -323,18 +322,22 @@ class zFCP:
""" ZFCP utility class.
- This class will automatically online to ZFCP drives configured in
- /tmp/fcpconfig when the startup() method gets called. It can also be
- used to manually configure ZFCP devices through the add_fcp() method.
+ This class is used to manually configure ZFCP devices through the
+ add_fcp() method, which is used by the anaconda GUI or by kickstart.
- As this class needs to make sure that /tmp/fcpconfig configured
+ As this class needs to make sure that configured
drives are only onlined once and as it keeps a global list of all ZFCP
devices it is implemented as a Singleton.
+
+ In particular, this class does not create objects for any other method
+ that enables ZFCP devices such as rd.zfcp= or any device auto
+ configuration. These methods make zfcp-attached SCSI disk block devices
+ available, which ZFCPDiskDevice [devices/disk.py] can directly
+ discover.
"""
def __init__(self):
self.fcpdevs = set()
- self.has_read_config = False
self.down = True
# So that users can write zfcp() to get the singleton instance
@@ -345,46 +348,6 @@ class zFCP:
# pylint: disable=unused-argument
return self
- def read_config(self):
- try:
- f = open(zfcpconf, "r")
- except OSError:
- log.info("no %s; not configuring zfcp", zfcpconf)
- return
-
- lines = [x.strip().lower() for x in f.readlines()]
- f.close()
-
- for line in lines:
- if line.startswith("#") or line == '':
- continue
-
- fields = line.split()
-
- # zFCP auto LUN scan available
- if len(fields) == 1:
- devnum = fields[0]
- wwpn = None
- fcplun = None
- elif len(fields) == 3:
- devnum = fields[0]
- wwpn = fields[1]
- fcplun = fields[2]
- elif len(fields) == 5:
- # support old syntax of:
- # devno scsiid wwpn scsilun fcplun
- devnum = fields[0]
- wwpn = fields[2]
- fcplun = fields[4]
- else:
- log.warning("Invalid line found in %s: %s", zfcpconf, line)
- continue
-
- try:
- self.add_fcp(devnum, wwpn, fcplun)
- except ValueError as e:
- log.warning("%s", str(e))
-
def add_fcp(self, devnum, wwpn=None, fcplun=None):
if wwpn and fcplun:
d = ZFCPDeviceFullPath(devnum, wwpn, fcplun)
@@ -410,11 +373,6 @@ class zFCP:
if not self.down:
return
self.down = False
- if not self.has_read_config:
- self.read_config()
- self.has_read_config = True
- # read_config calls add_fcp which calls online_device already
- return
if len(self.fcpdevs) == 0:
return
--
2.45.2
From e119e1e48a8a8bc83ec42d3c6ab31fac7c4a98eb Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Tue, 28 Feb 2023 17:48:04 +0100
Subject: [PATCH 6/7] respect explicit user choice for full path in zfcp
dracut_setup_args
Complements RHBZ#1937030.
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/devices/disk.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 4ae4a845..edbf41c4 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -498,7 +498,12 @@ class ZFCPDiskDevice(DiskDevice):
from ..zfcp import has_auto_lun_scan
# zFCP auto LUN scan needs only the device ID
- if has_auto_lun_scan(self.hba_id):
+ # If the user explicitly over-specified with a full path configuration
+ # respect this choice and emit a full path specification nonetheless.
+ errorlevel = util.run_program(["lszdev", "zfcp-lun", "--configured",
+ "%s:%s:%s" % (self.hba_id, self.wwpn,
+ self.fcp_lun)])
+ if has_auto_lun_scan(self.hba_id) and errorlevel != 0:
dracut_args = set(["rd.zfcp=%s" % self.hba_id])
else:
dracut_args = set(["rd.zfcp=%s,%s,%s" % (self.hba_id, self.wwpn, self.fcp_lun,)])
--
2.45.2
From 4c2d39c4fcea9361b60d99327a9eb8b9d89078fb Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Tue, 16 Jul 2024 10:22:55 +0200
Subject: [PATCH 7/7] DASDDevice: dracut_setup_args() without deprecated
dasd.conf (#1802482,#1937049)
Implements the dasd part of referenced bugs.
Depends on
ibm-s390-linux/s390-tools@689b894
("zdev: add helper to convert from zdev config to dasd_mod.dasd").
The spec file update reflects the new dependency on `zdev-to-dasd_mod.dasd`
in the new v2.31.0 of the s390 architecture specific sub-package
s390utils-core.
Delegate the generation of rd.dasd statements to a helper tool from
s390-tools, which gets its low-level config information from the
consolidated persistent configuration mechanism using chzdev.
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/devices/disk.py | 56 +++-----------------------------
blivet/populator/helpers/disk.py | 3 --
python-blivet.spec | 3 +-
3 files changed, 6 insertions(+), 56 deletions(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index edbf41c4..a849e7ac 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -530,67 +530,19 @@ class DASDDevice(DiskDevice):
:type format: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword str wwn: the disk's WWN
:keyword busid: bus ID
- :keyword opts: options
- :type opts: dict with option name keys and option value values
"""
self.busid = kwargs.pop('busid')
- self.opts = kwargs.pop('opts')
DiskDevice.__init__(self, device, **kwargs)
@property
def description(self):
return "DASD device %s" % self.busid
- def get_opts(self):
- return ["%s=%s" % (k, v) for k, v in self.opts.items() if v == '1']
-
def dracut_setup_args(self):
- conf = "/etc/dasd.conf"
- line = None
- if os.path.isfile(conf):
- f = open(conf)
- # grab the first line that starts with our bus_id
- for l in f.readlines():
- if l.startswith(self.busid):
- line = l.rstrip()
- break
-
- f.close()
-
- # See if we got a line. If not, grab our get_opts
- if not line:
- line = self.busid
- for devopt in self.get_opts():
- line += " %s" % devopt
-
- # Create a translation mapping from dasd.conf format to module format
- translate = {'use_diag': 'diag',
- 'readonly': 'ro',
- 'erplog': 'erplog',
- 'failfast': 'failfast'}
-
- # this is a really awkward way of determining if the
- # feature found is actually desired (1, not 0), plus
- # translating that feature into the actual kernel module
- # value
- opts = []
- parts = line.split()
- for chunk in parts[1:]:
- try:
- feat, val = chunk.split('=')
- if int(val):
- opts.append(translate[feat])
- except (ValueError, KeyError):
- # If we don't know what the feature is (feat not in translate
- # or if we get a val that doesn't cleanly convert to an int
- # we can't do anything with it.
- log.warning("failed to parse dasd feature %s", chunk)
-
- if opts:
- return set(["rd.dasd=%s(%s)" % (self.busid,
- ":".join(opts))])
- else:
- return set(["rd.dasd=%s" % self.busid])
+ devspec = util.capture_output(["/lib/s390-tools/zdev-to-dasd_mod.dasd",
+ "persistent", self.busid]).strip()
+ # strip to remove trailing newline, which must not appear in zipl BLS
+ return set(["rd.dasd=%s" % devspec])
NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn",
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 3ac3f408..fc47f62a 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -204,9 +204,6 @@ class DASDDevicePopulator(DiskDevicePopulator):
def _get_kwargs(self):
kwargs = super(DASDDevicePopulator, self)._get_kwargs()
kwargs["busid"] = udev.device_get_dasd_bus_id(self.data)
- kwargs["opts"] = {}
- for attr in ['readonly', 'use_diag', 'erplog', 'failfast']:
- kwargs["opts"][attr] = udev.device_get_dasd_flag(self.data, attr)
log.info("%s is a dasd device", udev.device_get_name(self.data))
return kwargs
diff --git a/python-blivet.spec b/python-blivet.spec
index ac8d2841..81177020 100644
--- a/python-blivet.spec
+++ b/python-blivet.spec
@@ -21,6 +21,7 @@ Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realver
%global libblockdevver 3.0
%global libbytesizever 0.3
%global pyudevver 0.18
+%global s390utilscorever 2.31.0
BuildArch: noarch
@@ -70,7 +71,7 @@ Recommends: libblockdev-swap >= %{libblockdevver}
%ifarch s390 s390x
Recommends: libblockdev-s390 >= %{libblockdevver}
-Requires: s390utils-core
+Requires: s390utils-core >= %{s390utilscorever}
%endif
Requires: python3-bytesize >= %{libbytesizever}
--
2.45.2

View File

@ -1,27 +0,0 @@
From 7677fc312b821a9c67750220f2494d06f2357780 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 18 Sep 2024 15:30:05 +0200
Subject: [PATCH] Fix checking for NVMe plugin availability
---
blivet/nvme.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index 4309dea3..72a47070 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -76,6 +76,10 @@ class NVMe(object):
return False
if not hasattr(blockdev.NVMETech, "FABRICS"):
return False
+ try:
+ blockdev.nvme.is_tech_avail(blockdev.NVMETech.FABRICS, 0) # pylint: disable=no-member
+ except (blockdev.BlockDevNotImplementedError, blockdev.NVMEError):
+ return False
return True
def startup(self):
--
2.46.1

View File

@ -1,30 +0,0 @@
From ad7966a456224f22729c55616f2c8c73321654c7 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 24 Oct 2024 12:18:58 +0200
Subject: [PATCH] Align sizes up for growable LVs
Growable LVs usually start at minimum size so adjusting it down
can change the size below allowed minimum.
Resolves: RHEL-45180
Resolves: RHEL-45181
---
blivet/devices/lvm.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 661881ea..661dc6e0 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -2673,7 +2673,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
if not isinstance(newsize, Size):
raise AttributeError("new size must be of type Size")
- newsize = self.vg.align(newsize)
+ newsize = self.vg.align(newsize, roundup=self.growable)
log.debug("trying to set lv %s size to %s", self.name, newsize)
# Don't refuse to set size if we think there's not enough space in the
# VG for an existing LV, since it's existence proves there is enough
--
2.47.0

View File

@ -1,32 +0,0 @@
From c2177aa362d20278a0ebd5c25a776f952d83e5b1 Mon Sep 17 00:00:00 2001
From: Jan Pokorny <japokorn@redhat.com>
Date: Fri, 11 Oct 2024 17:17:41 +0200
Subject: [PATCH] Modified passphrase in stratis test
FIPS requires at least 8 chars long passphrase. Dummy passphrase used
in stratis test was too short causing encryption
tests with FIPS enabled to fail.
Changed passphrase.
fixes RHEL-45173, RHEL-8029
---
tests/storage_tests/devices_test/stratis_test.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/storage_tests/devices_test/stratis_test.py b/tests/storage_tests/devices_test/stratis_test.py
index 5aaa12d4..21c4d0f5 100644
--- a/tests/storage_tests/devices_test/stratis_test.py
+++ b/tests/storage_tests/devices_test/stratis_test.py
@@ -230,7 +230,7 @@ class StratisTestCaseClevis(StratisTestCaseBase):
blivet.partitioning.do_partitioning(self.storage)
pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd],
- encrypted=True, passphrase="abcde",
+ encrypted=True, passphrase="fipsneeds8chars",
clevis=StratisClevisConfig(pin="tang",
tang_url=self._tang_server,
tang_thumbprint=None))
--
2.45.0

View File

@ -1,58 +0,0 @@
From cd9e137a2e33165a8af3a7e4a3d2615adcabf659 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 8 Nov 2024 09:19:45 +0100
Subject: [PATCH 1/2] Fix "Modified passphrase in stratis test"
Follow up for 68708e347ef7b2f98312c76aa80366091dd4aade, two more
places where the passphrase is too short for FIPS mode.
Resolves: RHEL-45173
---
tests/storage_tests/devices_test/stratis_test.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/storage_tests/devices_test/stratis_test.py b/tests/storage_tests/devices_test/stratis_test.py
index 21c4d0f50..9792e0618 100644
--- a/tests/storage_tests/devices_test/stratis_test.py
+++ b/tests/storage_tests/devices_test/stratis_test.py
@@ -105,7 +105,7 @@ def test_stratis_encrypted(self):
blivet.partitioning.do_partitioning(self.storage)
pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd],
- encrypted=True, passphrase="abcde")
+ encrypted=True, passphrase="fipsneeds8chars")
self.storage.create_device(pool)
self.storage.do_it()
@@ -260,7 +260,7 @@ def test_stratis_encrypted_clevis_tpm(self):
blivet.partitioning.do_partitioning(self.storage)
pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd],
- encrypted=True, passphrase="abcde",
+ encrypted=True, passphrase="fipsneeds8chars",
clevis=StratisClevisConfig(pin="tpm2"))
self.storage.create_device(pool)
From ed10d97a5257c0f4fe8a2f53b0b2f787de91c355 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 8 Nov 2024 10:02:47 +0100
Subject: [PATCH 2/2] tests: Fix writing key file for LUKS tests
Related: RHEL-45173
---
tests/storage_tests/formats_test/luks_test.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/tests/storage_tests/formats_test/luks_test.py b/tests/storage_tests/formats_test/luks_test.py
index 93c8d7524..b8ec229ba 100644
--- a/tests/storage_tests/formats_test/luks_test.py
+++ b/tests/storage_tests/formats_test/luks_test.py
@@ -99,6 +99,7 @@ def test_setup_keyfile(self):
with tempfile.NamedTemporaryFile(prefix="blivet_test") as temp:
temp.write(b"password2")
+ temp.flush()
# create the luks format with both passphrase and keyfile
self.fmt._key_file = temp.name

View File

@ -1,122 +0,0 @@
From c8eff25e4c25183a76e97108d4607455cfc96ae2 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 14 Nov 2024 14:53:28 +0100
Subject: [PATCH] Make GPT default label type on all architectures
Exceptions are DASD drives on s390 and 32bit ARM. Everywhere else
GPT will be default.
Resolves: RHEL-52200
---
blivet/formats/disklabel.py | 11 +++++-----
.../formats_tests/disklabel_test.py | 20 +++++++++----------
2 files changed, 16 insertions(+), 15 deletions(-)
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
index f2857f07..8b39dc79 100644
--- a/blivet/formats/disklabel.py
+++ b/blivet/formats/disklabel.py
@@ -220,12 +220,13 @@ class DiskLabel(DeviceFormat):
@classmethod
def get_platform_label_types(cls):
- label_types = ["msdos", "gpt"]
+ # always prefer gpt except for configurations below
+ label_types = ["gpt", "msdos"]
if arch.is_pmac():
label_types = ["mac"]
- # always prefer gpt on aarch64, x86_64, and EFI plats except 32-bit ARM
- elif arch.is_aarch64() or arch.is_x86(bits=64) or (arch.is_efi() and not arch.is_arm()):
- label_types = ["gpt", "msdos"]
+ # prefet msdos on 32-bit ARM
+ elif arch.is_arm():
+ label_types = ["msdos", "gpt"]
elif arch.is_s390():
label_types += ["dasd"]
@@ -254,7 +255,7 @@ class DiskLabel(DeviceFormat):
if arch.is_s390():
if blockdev.s390.dasd_is_fba(self.device):
# the device is FBA DASD
- return "msdos"
+ return "gpt"
elif self.parted_device.type == parted.DEVICE_DASD:
# the device is DASD
return "dasd"
diff --git a/tests/unit_tests/formats_tests/disklabel_test.py b/tests/unit_tests/formats_tests/disklabel_test.py
index 9f6e4542..823a3663 100644
--- a/tests/unit_tests/formats_tests/disklabel_test.py
+++ b/tests/unit_tests/formats_tests/disklabel_test.py
@@ -71,7 +71,7 @@ class DiskLabelTestCase(unittest.TestCase):
arch.is_pmac.return_value = False
arch.is_x86.return_value = False
- self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt"])
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["gpt", "msdos"])
arch.is_pmac.return_value = True
self.assertEqual(disklabel_class.get_platform_label_types(), ["mac"])
@@ -100,7 +100,7 @@ class DiskLabelTestCase(unittest.TestCase):
arch.is_efi.return_value = False
arch.is_s390.return_value = True
- self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt", "dasd"])
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["gpt", "msdos", "dasd"])
arch.is_s390.return_value = False
def test_label_type_size_check(self):
@@ -121,14 +121,14 @@ class DiskLabelTestCase(unittest.TestCase):
with patch.object(blivet.formats.disklabel.DiskLabel, "parted_device", new=PropertyMock(return_value=None)):
# no parted device -> no passing size check
- self.assertEqual(dl._label_type_size_check("msdos"), False)
+ self.assertEqual(dl._label_type_size_check("gpt"), False)
@patch("blivet.formats.disklabel.arch")
def test_best_label_type(self, arch):
"""
1. is always in _disklabel_types
2. is the default unless the device is too long for the default
- 3. is msdos for fba dasd on S390
+ 3. is gpt for fba dasd on S390
4. is dasd for non-fba dasd on S390
"""
dl = blivet.formats.disklabel.DiskLabel()
@@ -144,17 +144,17 @@ class DiskLabelTestCase(unittest.TestCase):
arch.is_x86.return_value = False
with patch.object(dl, '_label_type_size_check') as size_check:
- # size check passes for first type ("msdos")
+ # size check passes for first type ("gpt")
size_check.return_value = True
- self.assertEqual(dl._get_best_label_type(), "msdos")
+ self.assertEqual(dl._get_best_label_type(), "gpt")
# size checks all fail -> label type is None
size_check.return_value = False
self.assertEqual(dl._get_best_label_type(), None)
- # size check passes on second call -> label type is "gpt" (second in platform list)
+ # size check passes on second call -> label type is "msdos" (second in platform list)
size_check.side_effect = [False, True]
- self.assertEqual(dl._get_best_label_type(), "gpt")
+ self.assertEqual(dl._get_best_label_type(), "msdos")
arch.is_pmac.return_value = True
with patch.object(dl, '_label_type_size_check') as size_check:
@@ -175,10 +175,10 @@ class DiskLabelTestCase(unittest.TestCase):
size_check.return_value = True
with patch("blivet.formats.disklabel.blockdev.s390") as _s390:
_s390.dasd_is_fba.return_value = False
- self.assertEqual(dl._get_best_label_type(), "msdos")
+ self.assertEqual(dl._get_best_label_type(), "gpt")
_s390.dasd_is_fba.return_value = True
- self.assertEqual(dl._get_best_label_type(), "msdos")
+ self.assertEqual(dl._get_best_label_type(), "gpt")
_s390.dasd_is_fba.return_value = False
dl._parted_device.type = parted.DEVICE_DASD
--
2.47.0

View File

@ -1,108 +0,0 @@
From 041b320003687fb6c740f429a079dd7b7c8f7f6f Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 5 Dec 2024 14:28:21 +0100
Subject: [PATCH 1/2] Fix ppc64le name in devicelibs/gpt.py
Resolves: RHEL-70153
---
blivet/devicelibs/gpt.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/blivet/devicelibs/gpt.py b/blivet/devicelibs/gpt.py
index 4a6d364d7..c6dbf7b23 100644
--- a/blivet/devicelibs/gpt.py
+++ b/blivet/devicelibs/gpt.py
@@ -66,7 +66,7 @@
"parisc": uuid.UUID("1aacdb3b-5444-4138-bd9e-e5c2239b2346"),
"ppc": uuid.UUID("1de3f1ef-fa98-47b5-8dcd-4a860a654d78"),
"ppc64": uuid.UUID("912ade1d-a839-4913-8964-a10eee08fbd2"),
- "ppc64el": uuid.UUID("c31c45e6-3f39-412e-80fb-4809c4980599"),
+ "ppc64le": uuid.UUID("c31c45e6-3f39-412e-80fb-4809c4980599"),
"riscv32": uuid.UUID("60d5a7fe-8e7d-435c-b714-3dd8162144e1"),
"riscv64": uuid.UUID("72ec70a6-cf74-40e6-bd49-4bda08e8f224"),
"s390": uuid.UUID("08a7acea-624c-4a20-91e8-6e0fa67d23f9"),
@@ -87,7 +87,7 @@
"parisc": uuid.UUID("d212a430-fbc5-49f9-a983-a7feef2b8d0e"),
"ppc": uuid.UUID("98cfe649-1588-46dc-b2f0-add147424925"),
"ppc64": uuid.UUID("9225a9a3-3c19-4d89-b4f6-eeff88f17631"),
- "ppc64el": uuid.UUID("906bd944-4589-4aae-a4e4-dd983917446a"),
+ "ppc64le": uuid.UUID("906bd944-4589-4aae-a4e4-dd983917446a"),
"riscv32": uuid.UUID("ae0253be-1167-4007-ac68-43926c14c5de"),
"riscv64": uuid.UUID("b6ed5582-440b-4209-b8da-5ff7c419ea3d"),
"s390": uuid.UUID("7ac63b47-b25c-463b-8df8-b4a94e6c90e1"),
@@ -108,7 +108,7 @@
"parisc": uuid.UUID("15de6170-65d3-431c-916e-b0dcd8393f25"),
"ppc": uuid.UUID("1b31b5aa-add9-463a-b2ed-bd467fc857e7"),
"ppc64": uuid.UUID("f5e2c20c-45b2-4ffa-bce9-2a60737e1aaf"),
- "ppc64el": uuid.UUID("d4a236e7-e873-4c07-bf1d-bf6cf7f1c3c6"),
+ "ppc64le": uuid.UUID("d4a236e7-e873-4c07-bf1d-bf6cf7f1c3c6"),
"riscv32": uuid.UUID("3a112a75-8729-4380-b4cf-764d79934448"),
"riscv64": uuid.UUID("efe0f087-ea8d-4469-821a-4c2a96a8386a"),
"s390": uuid.UUID("3482388e-4254-435a-a241-766a065f9960"),
@@ -129,7 +129,7 @@
"parisc": uuid.UUID("dc4a4480-6917-4262-a4ec-db9384949f25"),
"ppc": uuid.UUID("7d14fec5-cc71-415d-9d6c-06bf0b3c3eaf"),
"ppc64": uuid.UUID("2c9739e2-f068-46b3-9fd0-01c5a9afbcca"),
- "ppc64el": uuid.UUID("15bb03af-77e7-4d4a-b12b-c0d084f7491c"),
+ "ppc64le": uuid.UUID("15bb03af-77e7-4d4a-b12b-c0d084f7491c"),
"riscv32": uuid.UUID("b933fb22-5c3f-4f91-af90-e2bb0fa50702"),
"riscv64": uuid.UUID("beaec34b-8442-439b-a40b-984381ed097d"),
"s390": uuid.UUID("cd0f869b-d0fb-4ca0-b141-9ea87cc78d66"),
@@ -150,7 +150,7 @@
"parisc": uuid.UUID("5843d618-ec37-48d7-9f12-cea8e08768b2"),
"ppc": uuid.UUID("df765d00-270e-49e5-bc75-f47bb2118b09"),
"ppc64": uuid.UUID("bdb528a5-a259-475f-a87d-da53fa736a07"),
- "ppc64el": uuid.UUID("ee2b9983-21e8-4153-86d9-b6901a54d1ce"),
+ "ppc64le": uuid.UUID("ee2b9983-21e8-4153-86d9-b6901a54d1ce"),
"riscv32": uuid.UUID("cb1ee4e3-8cd0-4136-a0a4-aa61a32e8730"),
"riscv64": uuid.UUID("8f1056be-9b05-47c4-81d6-be53128e5b54"),
"s390": uuid.UUID("b663c618-e7bc-4d6d-90aa-11b756bb1797"),
@@ -171,7 +171,7 @@
"parisc": uuid.UUID("450dd7d1-3224-45ec-9cf2-a43a346d71ee"),
"ppc": uuid.UUID("7007891d-d371-4a80-86a4-5cb875b9302e"),
"ppc64": uuid.UUID("0b888863-d7f8-4d9e-9766-239fce4d58af"),
- "ppc64el": uuid.UUID("c8bfbd1e-268e-4521-8bba-bf314c399557"),
+ "ppc64le": uuid.UUID("c8bfbd1e-268e-4521-8bba-bf314c399557"),
"riscv32": uuid.UUID("c3836a13-3137-45ba-b583-b16c50fe5eb4"),
"riscv64": uuid.UUID("d2f9000a-7a18-453f-b5cd-4d32f77a7b32"),
"s390": uuid.UUID("17440e4f-a8d0-467f-a46e-3912ae6ef2c5"),
From 22740da280258990d557eb45ac90d86c4f821c05 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 5 Dec 2024 14:31:15 +0100
Subject: [PATCH 2/2] Do not crash when we fail to get discoverable GPT type
UUID
No need to raise an exception if we fail to get the type UUID for
whatever reason.
Related: RHEL-70153
---
blivet/devices/partition.py | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 2d67be81f..89470d9fb 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -365,10 +365,16 @@ def part_type_uuid_req(self):
hasattr(parted.Partition, "type_uuid"))
if discoverable:
- parttype = gpt_part_uuid_for_mountpoint(self._mountpoint)
- log.debug("Discovered partition type UUID %s for mount '%s'",
- parttype, self._mountpoint)
- return parttype
+ try:
+ parttype = gpt_part_uuid_for_mountpoint(self._mountpoint)
+ except errors.GPTVolUUIDError as e:
+ log.error("Failed to get partition type UUID for mount '%s': %s",
+ self._mountpoint, str(e))
+ return None
+ else:
+ log.debug("Discovered partition type UUID %s for mount '%s'",
+ parttype, self._mountpoint)
+ return parttype
return None
@property

View File

@ -1,96 +0,0 @@
From 5fc2cfb675580cecc7e583c7c6a7fb767b4507de Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 10 Mar 2025 09:52:27 +0100
Subject: [PATCH 1/2] Set persitent allow-discards flag for newly created LUKS
devices
We are currently using the "allow-discards" in /etc/crypttab to
set the discards/fstrim feature for LUKS, but that doesn't work
for Fedora Silverblue so we need to set the persistent flag in the
LUKS header instead.
Resolves: RHEL-82884
---
blivet/formats/luks.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/blivet/formats/luks.py b/blivet/formats/luks.py
index 92c2f0bd7..151ca985a 100644
--- a/blivet/formats/luks.py
+++ b/blivet/formats/luks.py
@@ -364,6 +364,15 @@ def _create(self, **kwargs):
def _post_create(self, **kwargs):
super(LUKS, self)._post_create(**kwargs)
+ if self.luks_version == "luks2" and flags.discard_new:
+ try:
+ blockdev.crypto.luks_set_persistent_flags(self.device,
+ blockdev.CryptoLUKSPersistentFlags.ALLOW_DISCARDS)
+ except blockdev.CryptoError as e:
+ raise LUKSError("Failed to set allow discards flag for newly created LUKS format: %s" % str(e))
+ except AttributeError:
+ log.warning("Cannot set allow discards flag: not supported")
+
try:
info = blockdev.crypto.luks_info(self.device)
except blockdev.CryptoError as e:
From 8312a8cb8a4f78529174031214d3cc137c503fbc Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 12 Mar 2025 11:08:00 +0100
Subject: [PATCH 2/2] Add a simple test for setting the allow-discards flag on
LUKS
---
tests/unit_tests/formats_tests/luks_test.py | 30 ++++++++++++++++++++-
1 file changed, 29 insertions(+), 1 deletion(-)
diff --git a/tests/unit_tests/formats_tests/luks_test.py b/tests/unit_tests/formats_tests/luks_test.py
index d4322f118..70baf8f7b 100644
--- a/tests/unit_tests/formats_tests/luks_test.py
+++ b/tests/unit_tests/formats_tests/luks_test.py
@@ -18,8 +18,17 @@ def test_create_discard_option(self):
fmt = LUKS(exists=True)
self.assertEqual(fmt.options, None)
+ fmt = LUKS(passphrase="passphrase")
+ with patch("blivet.devicelibs.crypto.calculate_luks2_max_memory", return_value=None):
+ with patch("blivet.devicelibs.crypto.get_optimal_luks_sector_size", return_value=0):
+ with patch("blivet.formats.luks.blockdev") as bd:
+ fmt._create()
+ bd.crypto.luks_format.assert_called()
+ fmt._post_create()
+ bd.crypto.luks_set_persistent_flags.assert_not_called()
+
# flags.discard_new=True --> discard if creating new
- with patch("blivet.flags.flags.discard_new", True):
+ with patch("blivet.formats.luks.flags.discard_new", True):
fmt = LUKS(exists=True)
self.assertEqual(fmt.options, None)
@@ -34,6 +43,25 @@ def test_create_discard_option(self):
fmt = LUKS(exists=False, options="blah")
self.assertEqual(fmt.options, "blah,discard")
+ fmt = LUKS(passphrase="passphrase")
+ with patch("blivet.devicelibs.crypto.calculate_luks2_max_memory", return_value=None):
+ with patch("blivet.devicelibs.crypto.get_optimal_luks_sector_size", return_value=0):
+ with patch("blivet.formats.luks.blockdev") as bd:
+ fmt._create()
+ bd.crypto.luks_format.assert_called()
+ fmt._post_create()
+ bd.crypto.luks_set_persistent_flags.assert_called()
+
+ # LUKS 1 doesn't support the persistent flags
+ fmt = LUKS(passphrase="passphrase", luks_version="luks1")
+ with patch("blivet.devicelibs.crypto.calculate_luks2_max_memory", return_value=None):
+ with patch("blivet.devicelibs.crypto.get_optimal_luks_sector_size", return_value=0):
+ with patch("blivet.formats.luks.blockdev") as bd:
+ fmt._create()
+ bd.crypto.luks_format.assert_called()
+ fmt._post_create()
+ bd.crypto.luks_set_persistent_flags.assert_not_called()
+
def test_key_size(self):
# default cipher is AES-XTS with 512b key
fmt = LUKS()

View File

@ -1,103 +0,0 @@
From 30782ea4482e8118996ffa69f967531515761179 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 20 Jan 2025 13:02:50 +0100
Subject: [PATCH] Do not remove PVs from devices file if disabled or doesn't
exists
When the file doesn't exists the 'lvmdevices --deldev' call will
fail but it will still create the devices file. This means we now
have an empty devices file and all subsequent LVM calls will fail.
Resolves: RHEL-65846
---
blivet/formats/lvmpv.py | 10 +++++++
tests/unit_tests/formats_tests/lvmpv_test.py | 28 ++++++++++++++++++++
2 files changed, 38 insertions(+)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index 982233878..aa5cc0a5a 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -171,6 +171,16 @@ def lvmdevices_remove(self):
if not lvm.HAVE_LVMDEVICES:
raise PhysicalVolumeError("LVM devices file feature is not supported")
+ if not os.path.exists(lvm.LVM_DEVICES_FILE):
+ log.debug("Not removing %s from devices file: %s doesn't exist",
+ self.device, lvm.LVM_DEVICES_FILE)
+ return
+
+ if not flags.lvm_devices_file:
+ log.debug("Not removing %s from devices file: 'lvm_devices_file' flag is set to False",
+ self.device)
+ return
+
try:
blockdev.lvm.devices_delete(self.device)
except blockdev.LVMError as e:
diff --git a/tests/unit_tests/formats_tests/lvmpv_test.py b/tests/unit_tests/formats_tests/lvmpv_test.py
index 8d410f4fd..890e3cb19 100644
--- a/tests/unit_tests/formats_tests/lvmpv_test.py
+++ b/tests/unit_tests/formats_tests/lvmpv_test.py
@@ -38,6 +38,11 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_not_called()
+ # LVM devices file not enabled/supported -> devices_delete should not be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_not_called()
+
with self.patches() as mock:
# LVM devices file enabled and devices file exists -> devices_add should be called
mock["lvm"].HAVE_LVMDEVICES = True
@@ -47,6 +52,11 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+ # LVM devices file enabled and devices file exists -> devices_delete should be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_called_with("/dev/test")
+
with self.patches() as mock:
# LVM devices file enabled and devices file doesn't exist
# and no existing VGs present -> devices_add should be called
@@ -58,6 +68,12 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+ # LVM devices file enabled but devices file doesn't exist
+ # -> devices_delete should not be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_not_called()
+
with self.patches() as mock:
# LVM devices file enabled and devices file doesn't exist
# and existing VGs present -> devices_add should not be called
@@ -69,6 +85,12 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_not_called()
+ # LVM devices file enabled but devices file doesn't exist
+ # -> devices_delete should not be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_not_called()
+
with self.patches() as mock:
# LVM devices file enabled and devices file exists
# but flag set to false -> devices_add should not be called
@@ -81,5 +103,11 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_not_called()
+ # LVM devices file enabled and devices file exists
+ # but flag set to false -> devices_delete should not be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_not_called()
+
# reset the flag back
flags.lvm_devices_file = True

View File

@ -1,23 +0,0 @@
From c16a44b6627a6b4c1cb178f4c2127f21a53344ec Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 3 Mar 2025 12:33:34 +0100
Subject: [PATCH] iscsi: Use node.startup=onboot option for Login
Resolves: RHEL-53719
---
blivet/iscsi.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
index 95674665b..f66c38934 100644
--- a/blivet/iscsi.py
+++ b/blivet/iscsi.py
@@ -278,7 +278,7 @@ def _login(self, node_info, extra=None):
if extra is None:
extra = dict()
- extra["node.startup"] = GLib.Variant("s", "automatic")
+ extra["node.startup"] = GLib.Variant("s", "onboot")
extra["node.session.auth.chap_algs"] = GLib.Variant("s", "SHA1,MD5")
args = GLib.Variant("(sisisa{sv})", node_info.conn_info + (extra,))

View File

@ -1,26 +0,0 @@
From 8195fb13faa587737780f174651964c4f074f482 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 3 Jul 2024 15:49:34 +0200
Subject: [PATCH] tests: Make sure selinux_test doesn't try to create
mountpoints
This is a unit test so it shouldn't try to create directories
anywhere.
Resolves: RHEL-78988
---
tests/unit_tests/formats_tests/selinux_test.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/tests/unit_tests/formats_tests/selinux_test.py b/tests/unit_tests/formats_tests/selinux_test.py
index 484e745a4..ea2f516be 100644
--- a/tests/unit_tests/formats_tests/selinux_test.py
+++ b/tests/unit_tests/formats_tests/selinux_test.py
@@ -23,6 +23,7 @@ def setUp(self):
@patch("blivet.tasks.fsmount.BlockDev.fs.mount", return_value=True)
@patch.object(fs.FS, "_pre_setup", return_value=True)
@patch("os.access", return_value=True)
+ @patch("os.path.isdir", return_value=True)
# pylint: disable=unused-argument
def exec_mount_selinux_format(self, formt, *args):
""" Test of correct selinux context parameter value when mounting """

View File

@ -1,516 +0,0 @@
From 6373572308111c154c323a099103fabaaeace792 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 10:03:17 +0100
Subject: [PATCH 1/6] Use pvs info from static data to get PV size in PVSize
No need for a special code for this, we can reuse the existing
code from LVM static data.
---
blivet/tasks/pvtask.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/blivet/tasks/pvtask.py b/blivet/tasks/pvtask.py
index b6f1896a3..3bbab7cbc 100644
--- a/blivet/tasks/pvtask.py
+++ b/blivet/tasks/pvtask.py
@@ -27,6 +27,7 @@
from ..errors import PhysicalVolumeError
from ..size import Size, B
+from ..static_data import pvs_info
from . import availability
from . import task
@@ -55,13 +56,12 @@ def do_task(self): # pylint: disable=arguments-differ
:raises :class:`~.errors.PhysicalVolumeError`: if size cannot be obtained
"""
- try:
- pv_info = blockdev.lvm.pvinfo(self.pv.device)
- pv_size = pv_info.pv_size
- except blockdev.LVMError as e:
- raise PhysicalVolumeError(e)
+ pvs_info.drop_cache()
+ pv_info = pvs_info.cache.get(self.pv.device)
+ if pv_info is None:
+ raise PhysicalVolumeError("Failed to get PV info for %s" % self.pv.device)
- return Size(pv_size)
+ return Size(pv_info.pv_size)
class PVResize(task.BasicApplication, dfresize.DFResizeTask):
From cc0ad43477e201c8da8f7bffd04c845ea9e57f1c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 10:05:13 +0100
Subject: [PATCH 2/6] Get the actual PV format size for LVMPV format
---
blivet/formats/lvmpv.py | 2 ++
blivet/populator/helpers/lvm.py | 2 ++
tests/unit_tests/populator_test.py | 2 ++
3 files changed, 6 insertions(+)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index aa5cc0a5a..70f4697fc 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -102,6 +102,8 @@ def __init__(self, **kwargs):
# when set to True, blivet will try to resize the PV to fill all available space
self._grow_to_fill = False
+ self._target_size = self._size
+
def __repr__(self):
s = DeviceFormat.__repr__(self)
s += (" vg_name = %(vg_name)s vg_uuid = %(vg_uuid)s"
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index 0cf47ba43..e22c52088 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -114,6 +114,8 @@ def _get_kwargs(self):
log.warning("PV %s has no pe_start", name)
if pv_info.pv_free:
kwargs["free"] = Size(pv_info.pv_free)
+ if pv_info.pv_size:
+ kwargs["size"] = Size(pv_info.pv_size)
return kwargs
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
index 2d8175f2a..0429e8d44 100644
--- a/tests/unit_tests/populator_test.py
+++ b/tests/unit_tests/populator_test.py
@@ -981,6 +981,7 @@ def test_run(self, *args):
pv_info.vg_uuid = sentinel.vg_uuid
pv_info.pe_start = 0
pv_info.pv_free = 0
+ pv_info.pv_size = "10g"
vg_device = Mock()
vg_device.id = 0
@@ -1012,6 +1013,7 @@ def test_run(self, *args):
pv_info.vg_extent_count = 2500
pv_info.vg_free_count = 0
pv_info.vg_pv_count = 1
+ pv_info.pv_size = "10g"
with patch("blivet.static_data.lvm_info.PVsInfo.cache", new_callable=PropertyMock) as mock_pvs_cache:
mock_pvs_cache.return_value = {device.path: pv_info}
From 99fc0b2e9c8c42a894eee7bc6c850364ed85d313 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 13:35:38 +0100
Subject: [PATCH 3/6] Update PV format size after adding/removing the PV
to/from the VG
Unfortunately LVM substracts VG metadata from the reported PV size
so we need to make sure to update the size after the vgextend and
vgreduce operation.
---
blivet/devices/lvm.py | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 661dc6e06..93f3ccbe7 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -343,12 +343,24 @@ def _remove(self, member):
if lv.status and not status:
lv.teardown()
+ # update LVMPV format size --> PV format has different size when in VG
+ try:
+ fmt._size = fmt._target_size = fmt._size_info.do_task()
+ except errors.PhysicalVolumeError as e:
+ log.warning("Failed to obtain current size for device %s: %s", fmt.device, e)
+
def _add(self, member):
try:
blockdev.lvm.vgextend(self.name, member.path)
except blockdev.LVMError as err:
raise errors.LVMError(err)
+ # update LVMPV format size --> PV format has different size when in VG
+ try:
+ member.format._size = member.format._target_size = member.format._size_info.do_task()
+ except errors.PhysicalVolumeError as e:
+ log.warning("Failed to obtain current size for device %s: %s", member.path, e)
+
def _add_log_vol(self, lv):
""" Add an LV to this VG. """
if lv in self._lvs:
From b6a9d661cd99e6973d8555a1ac587da49fd6d3df Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 14:22:07 +0100
Subject: [PATCH 4/6] Use LVMPV format size when calculating VG size and free
space
For existing PVs we need to check the format size instead of
simply expecting the format is fully resized to match the size of
the underlying block device.
---
blivet/devices/lvm.py | 63 ++++++++++++++++++++++++++-----------------
1 file changed, 39 insertions(+), 24 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 93f3ccbe7..d0b0b2b9c 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -534,40 +534,55 @@ def reserved_percent(self, value):
self._reserved_percent = value
- def _get_pv_usable_space(self, pv):
+ def _get_pv_metadata_space(self, pv):
+ """ Returns how much space will be used by VG metadata in given PV
+ This depends on type of the PV, PE size and PE start.
+ """
if isinstance(pv, MDRaidArrayDevice):
- return self.align(pv.size - 2 * pv.format.pe_start)
+ return 2 * pv.format.pe_start
+ else:
+ return pv.format.pe_start
+
+ def _get_pv_usable_space(self, pv):
+ """ Return how much space can be actually used on given PV.
+ This takes into account:
+ - VG metadata that is/will be stored in this PV
+ - the actual PV format size (which might differ from
+ the underlying block device size)
+ """
+
+ if pv.format.exists and pv.format.size and self.exists:
+ # PV format exists, we got its size and VG also exists
+ # -> all metadata is already accounted in the PV format size
+ return pv.format.size
+ elif pv.format.exists and pv.format.size and not self.exists:
+ # PV format exists, we got its size, but the VG doesn't exist
+ # -> metadata size is not accounted in the PV format size
+ return self.align(pv.format.size - self._get_pv_metadata_space(pv))
else:
- return self.align(pv.size - pv.format.pe_start)
+ # something else -> either the PV format is not yet created or
+ # we for some reason failed to get size of the format, either way
+ # lets use the underlying block device size and calculate the
+ # metadata size ourselves
+ return self.align(pv.size - self._get_pv_metadata_space(pv))
@property
def lvm_metadata_space(self):
- """ The amount of the space LVM metadata cost us in this VG's PVs """
- # NOTE: we either specify data alignment in a PV or the default is used
- # which is both handled by pv.format.pe_start, but LVM takes into
- # account also the underlying block device which means that e.g.
- # for an MD RAID device, it tries to align everything also to chunk
- # size and alignment offset of such device which may result in up
- # to a twice as big non-data area
- # TODO: move this to either LVMPhysicalVolume's pe_start property once
- # formats know about their devices or to a new LVMPhysicalVolumeDevice
- # class once it exists
- diff = Size(0)
- for pv in self.pvs:
- diff += pv.size - self._get_pv_usable_space(pv)
-
- return diff
+ """ The amount of the space LVM metadata cost us in this VG's PVs
+ Note: we either specify data alignment in a PV or the default is used
+ which is both handled by pv.format.pe_start, but LVM takes into
+ account also the underlying block device which means that e.g.
+ for an MD RAID device, it tries to align everything also to chunk
+ size and alignment offset of such device which may result in up
+ to a twice as big non-data area
+ """
+ return sum(self._get_pv_metadata_space(pv) for pv in self.pvs)
@property
def size(self):
""" The size of this VG """
# TODO: just ask lvm if isModified returns False
-
- # sum up the sizes of the PVs, subtract the unusable (meta data) space
- size = sum(pv.size for pv in self.pvs)
- size -= self.lvm_metadata_space
-
- return size
+ return sum(self._get_pv_usable_space(pv) for pv in self.pvs)
@property
def extents(self):
From cd4ce45b78aae26424294c3e4dd8d082eb985af6 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 14:28:56 +0100
Subject: [PATCH 5/6] Add more tests for PV and VG size and free space
---
tests/storage_tests/devices_test/lvm_test.py | 101 +++++++++++++++++++
1 file changed, 101 insertions(+)
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index f64af8943..2217eeb63 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -25,6 +25,18 @@ def setUp(self):
self.assertIsNone(disk.format.type)
self.assertFalse(disk.children)
+ def _get_pv_size(self, pv):
+ out = subprocess.check_output(["pvs", "-o", "pv_size", "--noheadings", "--nosuffix", "--units=b", pv])
+ return blivet.size.Size(out.decode().strip())
+
+ def _get_vg_size(self, vg):
+ out = subprocess.check_output(["vgs", "-o", "vg_size", "--noheadings", "--nosuffix", "--units=b", vg])
+ return blivet.size.Size(out.decode().strip())
+
+ def _get_vg_free(self, vg):
+ out = subprocess.check_output(["vgs", "-o", "vg_free", "--noheadings", "--nosuffix", "--units=b", vg])
+ return blivet.size.Size(out.decode().strip())
+
def _clean_up(self):
self.storage.reset()
for disk in self.storage.disks:
@@ -74,6 +86,8 @@ def test_lvm_basic(self):
self.assertIsInstance(pv, blivet.devices.PartitionDevice)
self.assertIsNotNone(pv.format)
self.assertEqual(pv.format.type, "lvmpv")
+ pv_size = self._get_pv_size(pv.path)
+ self.assertEqual(pv.format.size, pv_size)
vg = self.storage.devicetree.get_device_by_name(self.vgname)
self.assertIsNotNone(vg)
@@ -84,6 +98,10 @@ def test_lvm_basic(self):
self.assertEqual(pv.format.vg_uuid, vg.uuid)
self.assertEqual(len(vg.parents), 1)
self.assertEqual(vg.parents[0], pv)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
lv = self.storage.devicetree.get_device_by_name("%s-blivetTestLV" % self.vgname)
self.assertIsNotNone(lv)
@@ -131,6 +149,13 @@ def test_lvm_thin(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
pool = self.storage.devicetree.get_device_by_name("%s-blivetTestPool" % self.vgname)
self.assertIsNotNone(pool)
self.assertTrue(pool.is_thin_pool)
@@ -177,6 +202,14 @@ def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space + vg.reserved_space)
+
raidlv = self.storage.devicetree.get_device_by_name("%s-blivetTestRAIDLV" % self.vgname)
self.assertIsNotNone(raidlv)
self.assertTrue(raidlv.is_raid_lv)
@@ -233,6 +266,13 @@ def test_lvm_cache(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("%s-blivetTestCachedLV" % self.vgname)
self.assertIsNotNone(cachedlv)
self.assertTrue(cachedlv.cached)
@@ -272,6 +312,13 @@ def test_lvm_cache_attach(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("%s-blivetTestCachedLV" % self.vgname)
self.assertIsNotNone(cachedlv)
cachepool = self.storage.devicetree.get_device_by_name("%s-blivetTestFastLV" % self.vgname)
@@ -327,6 +374,13 @@ def test_lvm_cache_create_and_attach(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("%s-blivetTestCachedLV" % self.vgname)
self.assertIsNotNone(cachedlv)
@@ -342,6 +396,13 @@ def test_lvm_cache_create_and_attach(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("%s-blivetTestCachedLV" % self.vgname)
self.assertIsNotNone(cachedlv)
self.assertTrue(cachedlv.cached)
@@ -371,6 +432,13 @@ def test_lvm_pvs_add_remove(self):
self.storage.do_it()
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
# create a second PV
disk2 = self.storage.devicetree.get_device_by_path(self.vdevs[1])
self.assertIsNotNone(disk2)
@@ -385,6 +453,17 @@ def test_lvm_pvs_add_remove(self):
self.storage.do_it()
self.storage.reset()
+ pv1 = self.storage.devicetree.get_device_by_name(pv1.name)
+ pv1_size = self._get_pv_size(pv1.path)
+ self.assertEqual(pv1.format.size, pv1_size)
+
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
# add the PV to the existing VG
vg = self.storage.devicetree.get_device_by_name(self.vgname)
pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
@@ -393,6 +472,17 @@ def test_lvm_pvs_add_remove(self):
self.storage.devicetree.actions.add(ac)
self.storage.do_it()
+ pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
+ pv2_size = self._get_pv_size(pv2.path)
+ self.assertEqual(pv2.format.size, pv2_size)
+
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
self.assertEqual(pv2.format.vg_name, vg.name)
self.storage.reset()
@@ -414,6 +504,17 @@ def test_lvm_pvs_add_remove(self):
self.storage.do_it()
+ pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
+ pv2_size = self._get_pv_size(pv2.path)
+ self.assertEqual(pv2.format.size, pv2_size)
+
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
self.assertIsNone(pv1.format.type)
self.storage.reset()
From a4a7791a150e190089c8f935c7a5aae7fa9bc5a5 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 15:16:29 +0100
Subject: [PATCH 6/6] Add a separate test case for LVMPV smaller than the block
device
---
tests/storage_tests/devices_test/lvm_test.py | 50 ++++++++++++++++++++
1 file changed, 50 insertions(+)
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index 2217eeb63..25d9d71bb 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -524,3 +524,53 @@ def test_lvm_pvs_add_remove(self):
self.assertIsNotNone(vg)
self.assertEqual(len(vg.pvs), 1)
self.assertEqual(vg.pvs[0].name, pv2.name)
+
+ def test_lvm_pv_size(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.storage.initialize_disk(disk)
+
+ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv",
+ parents=[disk])
+ self.storage.create_device(pv)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ pv = self.storage.devicetree.get_device_by_name(pv.name)
+ self.assertIsNotNone(pv)
+
+ pv.format.update_size_info()
+ self.assertTrue(pv.format.resizable)
+
+ ac = blivet.deviceaction.ActionResizeFormat(pv, blivet.size.Size("50 MiB"))
+ self.storage.devicetree.actions.add(ac)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ pv = self.storage.devicetree.get_device_by_name(pv.name)
+ self.assertIsNotNone(pv)
+ self.assertEqual(pv.format.size, blivet.size.Size("50 MiB"))
+ pv_size = self._get_pv_size(pv.path)
+ self.assertEqual(pv_size, pv.format.size)
+
+ vg = self.storage.new_vg(name=self.vgname, parents=[pv])
+ self.storage.create_device(vg)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ pv = self.storage.devicetree.get_device_by_name(pv.name)
+ self.assertIsNotNone(pv)
+ pv_size = self._get_pv_size(pv.path)
+ self.assertEqual(pv_size, pv.format.size)
+
+ vg = self.storage.devicetree.get_device_by_name(self.vgname)
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)

View File

@ -1,85 +0,0 @@
From 964ad0ab491678ad73adb4c894d38619bdcfd1b2 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 22 Jan 2025 13:16:43 +0100
Subject: [PATCH] Include additional information in PartitioningError
The generic 'Unable to allocate requested partition scheme' is not
very helpful, we should try to include additional information if
possible.
Resolves: RHEL-84686
---
blivet/partitioning.py | 25 ++++++++++++++++++++++---
1 file changed, 22 insertions(+), 3 deletions(-)
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
index ec9918d41..86841152b 100644
--- a/blivet/partitioning.py
+++ b/blivet/partitioning.py
@@ -34,7 +34,7 @@
from .flags import flags
from .devices import Device, PartitionDevice, device_path_to_name
from .size import Size
-from .i18n import _
+from .i18n import _, N_
from .util import compare
import logging
@@ -681,6 +681,11 @@ def resolve_disk_tags(disks, tags):
return [disk for disk in disks if any(tag in disk.tags for tag in tags)]
+class PartitioningErrors:
+ NO_PRIMARY = N_("no primary partition slots available")
+ NO_SLOTS = N_("no free partition slots")
+
+
def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
""" Allocate partitions based on requested features.
@@ -763,6 +768,7 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
part_type = None
growth = 0 # in sectors
# loop through disks
+ errors = {}
for _disk in req_disks:
try:
disklabel = disklabels[_disk.path]
@@ -798,6 +804,10 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
if new_part_type is None:
# can't allocate any more partitions on this disk
log.debug("no free partition slots on %s", _disk.name)
+ if PartitioningErrors.NO_SLOTS in errors.keys():
+ errors[PartitioningErrors.NO_SLOTS].append(_disk.name)
+ else:
+ errors[PartitioningErrors.NO_SLOTS] = [_disk.name]
continue
if _part.req_primary and new_part_type != parted.PARTITION_NORMAL:
@@ -808,7 +818,11 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
new_part_type = parted.PARTITION_NORMAL
else:
# we need a primary slot and none are free on this disk
- log.debug("no primary slots available on %s", _disk.name)
+ log.debug("no primary partition slots available on %s", _disk.name)
+ if PartitioningErrors.NO_PRIMARY in errors.keys():
+ errors[PartitioningErrors.NO_PRIMARY].append(_disk.name)
+ else:
+ errors[PartitioningErrors.NO_PRIMARY] = [_disk.name]
continue
elif _part.req_part_type is not None and \
new_part_type != _part.req_part_type:
@@ -968,7 +982,12 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
break
if free is None:
- raise PartitioningError(_("Unable to allocate requested partition scheme."))
+ if not errors:
+ msg = _("Unable to allocate requested partition scheme.")
+ else:
+ errors_by_disk = (", ".join(disks) + ": " + _(error) for error, disks in errors.items())
+ msg = _("Unable to allocate requested partition scheme on requested disks:\n%s") % "\n".join(errors_by_disk)
+ raise PartitioningError(msg)
_disk = use_disk
disklabel = _disk.format

View File

@ -1,310 +0,0 @@
From 8368cab41a1f34452b4c624768245517391ce400 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 16 May 2025 17:15:17 +0200
Subject: [PATCH 1/5] Allow ActionDestroyFormat to be marked as optional
When we are also planning to remove the device, failing to remove
the format is not critical so we can ignore it in these cases.
Resolves: RHEL-84685
Resolves: RHEL-84663
---
blivet/deviceaction.py | 37 +++++++++++++++++++++++--------------
1 file changed, 23 insertions(+), 14 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index b22e00c36..2e6a8489f 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -734,12 +734,13 @@ class ActionDestroyFormat(DeviceAction):
obj = ACTION_OBJECT_FORMAT
type_desc_str = N_("destroy format")
- def __init__(self, device):
+ def __init__(self, device, optional=False):
if device.format_immutable:
raise ValueError("this device's formatting cannot be modified")
DeviceAction.__init__(self, device)
self.orig_format = self.device.format
+ self.optional = optional
if not device.format.destroyable:
raise ValueError("resource to destroy this format type %s is unavailable" % device.format.type)
@@ -758,21 +759,29 @@ def execute(self, callbacks=None):
""" wipe the filesystem signature from the device """
# remove any flag if set
super(ActionDestroyFormat, self).execute(callbacks=callbacks)
- status = self.device.status
- self.device.setup(orig=True)
- if hasattr(self.device, 'set_rw'):
- self.device.set_rw()
- self.format.destroy()
- udev.settle()
- if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported:
- if self.format.parted_flag:
- self.device.unset_flag(self.format.parted_flag)
- self.device.disk.original_format.commit_to_disk()
- udev.settle()
+ try:
+ status = self.device.status
+ self.device.setup(orig=True)
+ if hasattr(self.device, 'set_rw'):
+ self.device.set_rw()
- if not status:
- self.device.teardown()
+ self.format.destroy()
+ udev.settle()
+ if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported:
+ if self.format.parted_flag:
+ self.device.unset_flag(self.format.parted_flag)
+ self.device.disk.original_format.commit_to_disk()
+ udev.settle()
+
+ if not status:
+ self.device.teardown()
+ except Exception as e: # pylint: disable=broad-except
+ if self.optional:
+ log.error("Ignoring error when executing optional action: Failed to destroy format on %s: %s.",
+ self.device.name, str(e))
+ else:
+ raise
def cancel(self):
if not self._applied:
From 94e0ec7f24129159ac5f4fe455f37b85ceb9a004 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 16 May 2025 17:28:40 +0200
Subject: [PATCH 2/5] Make ActionDestroyFormat optional when device is also
removed
In both destroy_device and recursive_remove we try to remove both
the device and its format. In these cases the format destroy can
be considered to be optional and we don't need to fail just
because we failed to remove the format.
Resolves: RHEL-84685
Resolves: RHEL-84663
---
blivet/blivet.py | 2 +-
blivet/devicetree.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index 399992a41..53206d973 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -915,7 +915,7 @@ def destroy_device(self, device):
if device.format.exists and device.format.type and \
not device.format_immutable:
# schedule destruction of any formatting while we're at it
- self.devicetree.actions.add(ActionDestroyFormat(device))
+ self.devicetree.actions.add(ActionDestroyFormat(device, optional=True))
action = ActionDestroyDevice(device)
self.devicetree.actions.add(action)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index 6a27b1e71..4ec955002 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -261,7 +261,7 @@ def recursive_remove(self, device, actions=True, remove_device=True, modparent=T
if actions:
if leaf.format.exists and not leaf.protected and \
not leaf.format_immutable:
- self.actions.add(ActionDestroyFormat(leaf))
+ self.actions.add(ActionDestroyFormat(leaf, optional=True))
self.actions.add(ActionDestroyDevice(leaf))
else:
@@ -273,7 +273,7 @@ def recursive_remove(self, device, actions=True, remove_device=True, modparent=T
if not device.format_immutable:
if actions:
- self.actions.add(ActionDestroyFormat(device))
+ self.actions.add(ActionDestroyFormat(device, optional=True))
else:
device.format = None
From 610b65450fa00a9b8b129ef733536ca080edc6fe Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 19 May 2025 14:24:06 +0200
Subject: [PATCH 3/5] tests: Add a simple test case for optional format destroy
action
Related: RHEL-84685
Related: RHEL-84663
---
tests/unit_tests/devices_test/lvm_test.py | 28 +++++++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
index ed30772fd..7ec3ed0ae 100644
--- a/tests/unit_tests/devices_test/lvm_test.py
+++ b/tests/unit_tests/devices_test/lvm_test.py
@@ -1172,3 +1172,31 @@ def test_vdo_compression_deduplication_change(self):
with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
self.b.do_it()
lvm.vdo_enable_deduplication.assert_called_with(vg.name, vdopool.lvname)
+
+
+@patch("blivet.devices.lvm.LVMLogicalVolumeDevice._external_dependencies", new=[])
+@patch("blivet.devices.lvm.LVMLogicalVolumeBase._external_dependencies", new=[])
+@patch("blivet.devices.dm.DMDevice._external_dependencies", new=[])
+class BlivetLVMOptionalDestroyTest(BlivetLVMUnitTest):
+
+ def test_optional_format_destroy(self, *args): # pylint: disable=unused-argument
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("10 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], exists=True, size=Size("5 GiB"),
+ fmt=blivet.formats.get_format("xfs", exists=True))
+
+ for dev in (pv, vg, lv):
+ self.b.devicetree._add_device(dev)
+
+ self.b.destroy_device(lv)
+ fmt_ac = self.b.devicetree.actions.find(action_type="destroy", object_type="format")
+ self.assertTrue(fmt_ac)
+ self.assertTrue(fmt_ac[0].optional)
+
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ lvm.lvactivate.side_effect = RuntimeError()
+ try:
+ self.b.do_it()
+ except RuntimeError:
+ self.fail("Optional format destroy action is not optional")
From d5c9b690f702d38a9db5bed5d728a1a25fe31077 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 20 May 2025 13:02:00 +0200
Subject: [PATCH 4/5] tests: Add test case for removing broken thin pool
Related: RHEL-84685
Related: RHEL-84663
---
tests/storage_tests/devices_test/lvm_test.py | 51 ++++++++++++++++++++
1 file changed, 51 insertions(+)
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index 25d9d71bb..aae9da8b5 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -1,6 +1,7 @@
import os
import shutil
import subprocess
+import tempfile
from ..storagetestcase import StorageTestCase
@@ -574,3 +575,53 @@ def test_lvm_pv_size(self):
self.assertEqual(vg_size, vg.size)
vg_free = self._get_vg_free(vg.name)
self.assertEqual(vg_free, vg.free_space)
+
+ def _break_thin_pool(self):
+ os.system("vgchange -an %s >/dev/null 2>&1" % self.vgname)
+
+ # changing transaction_id for the pool prevents it from being activated
+ with tempfile.NamedTemporaryFile(prefix="blivet_test") as temp:
+ os.system("vgcfgbackup -f %s %s >/dev/null 2>&1" % (temp.name, self.vgname))
+ os.system("sed -i 's/transaction_id =.*/transaction_id = 123456/' %s >/dev/null 2>&1" % temp.name)
+ os.system("vgcfgrestore -f %s %s --force >/dev/null 2>&1" % (temp.name, self.vgname))
+
+ def test_lvm_broken_thin(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.initialize_disk(disk)
+
+ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv",
+ parents=[disk])
+ self.storage.create_device(pv)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ vg = self.storage.new_vg(name=self.vgname, parents=[pv])
+ self.storage.create_device(vg)
+
+ pool = self.storage.new_lv(thin_pool=True, size=blivet.size.Size("50 MiB"),
+ parents=[vg], name="blivetTestPool")
+ self.storage.create_device(pool)
+
+ self.storage.do_it()
+
+ # intentionally break the thin pool created above
+ self._break_thin_pool()
+
+ self.storage.reset()
+
+ pool = self.storage.devicetree.get_device_by_name("%s-blivetTestPool" % self.vgname)
+ self.assertIsNotNone(pool)
+
+ # check that the pool cannot be activated
+ try:
+ pool.setup()
+ except Exception: # pylint: disable=broad-except
+ pass
+ else:
+ self.fail("Failed to break thinpool for tests")
+
+ # verify that the pool can be destroyed even if it cannot be activated
+ self.storage.recursive_remove(pool)
+ self.storage.do_it()
From 6f0625e06a2ea69be8042cf5e76048b97a1025e1 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 29 Apr 2025 08:09:06 +0200
Subject: [PATCH 5/5] Fix expected exception type when activating devices in
populor
We are no longer raising libblockdev exceptions in our public API
calls (see #1014) so when calling setup() ourselves we need to
catch our exceptions instead of libblockdev ones as well.
Related: RHEL-84685
Related: RHEL-84663
---
blivet/populator/helpers/luks.py | 2 +-
blivet/populator/helpers/lvm.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/blivet/populator/helpers/luks.py b/blivet/populator/helpers/luks.py
index 72da248ed..0b72920e3 100644
--- a/blivet/populator/helpers/luks.py
+++ b/blivet/populator/helpers/luks.py
@@ -161,7 +161,7 @@ def run(self):
self.device.format.passphrase = passphrase
try:
self.device.format.setup()
- except blockdev.BlockDevError:
+ except LUKSError:
self.device.format.passphrase = None
else:
break
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index e22c52088..cdf97e405 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -29,7 +29,7 @@
from ... import udev
from ...devicelibs import lvm
from ...devices.lvm import LVMVolumeGroupDevice, LVMLogicalVolumeDevice, LVMInternalLVtype
-from ...errors import DeviceTreeError, DuplicateVGError
+from ...errors import DeviceTreeError, DuplicateVGError, LVMError
from ...flags import flags
from ...size import Size
from ...storage_log import log_method_call
@@ -289,7 +289,7 @@ def add_lv(lv):
if flags.auto_dev_updates:
try:
lv_device.setup()
- except blockdev.LVMError:
+ except LVMError:
log.warning("failed to activate lv %s", lv_device.name)
lv_device.controllable = False

View File

@ -1,385 +0,0 @@
From 025cc54c04132a056ba863ecdb1d05f3465632a5 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 27 May 2025 15:21:23 +0200
Subject: [PATCH 1/3] Add some basic partitioning storage tests
This supplements the existing tests which use sparse files. These
new test cases actually run do_it() and check the result after
reset. More test cases will follow.
Related: RHEL-93967
---
.../devices_test/partition_test.py | 148 ++++++++++++++++++
1 file changed, 148 insertions(+)
diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py
index 73da87b43..30e6a0151 100644
--- a/tests/storage_tests/devices_test/partition_test.py
+++ b/tests/storage_tests/devices_test/partition_test.py
@@ -5,6 +5,7 @@
from unittest.mock import patch
+import blivet
from blivet.devices import DiskFile
from blivet.devices import PartitionDevice
from blivet.devicelibs.gpt import gpt_part_uuid_for_mountpoint
@@ -13,6 +14,8 @@
from blivet.size import Size
from blivet.util import sparsetmpfile
+from ..storagetestcase import StorageTestCase
+
class PartitionDeviceTestCase(unittest.TestCase):
@@ -266,3 +269,148 @@ def test_dev_part_type_gpt_autodiscover(self):
flags.gpt_discoverable_partitions = True
self.assertEqual(device.part_type_uuid,
gpt_part_uuid_for_mountpoint("/home"))
+
+
+class PartitionTestCase(StorageTestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ disks = [os.path.basename(vdev) for vdev in self.vdevs]
+ self.storage = blivet.Blivet()
+ self.storage.exclusive_disks = disks
+ self.storage.reset()
+
+ # make sure only the targetcli disks are in the devicetree
+ for disk in self.storage.disks:
+ self.assertTrue(disk.path in self.vdevs)
+ self.assertIsNone(disk.format.type)
+ self.assertFalse(disk.children)
+
+ def _clean_up(self):
+ self.storage.reset()
+ for disk in self.storage.disks:
+ if disk.path not in self.vdevs:
+ raise RuntimeError("Disk %s found in devicetree but not in disks created for tests" % disk.name)
+ self.storage.recursive_remove(disk)
+
+ self.storage.do_it()
+
+ def test_msdos_basic(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="msdos"))
+
+ for i in range(4):
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],
+ primary=True)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.assertEqual(disk.format.type, "disklabel")
+ self.assertEqual(disk.format.label_type, "msdos")
+ self.assertIsNotNone(disk.format.parted_disk)
+ self.assertIsNotNone(disk.format.parted_device)
+ self.assertEqual(len(disk.format.partitions), 4)
+ self.assertEqual(len(disk.format.primary_partitions), 4)
+ self.assertEqual(len(disk.children), 4)
+
+ for i in range(4):
+ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1))
+ self.assertIsNotNone(part)
+ self.assertEqual(part.type, "partition")
+ self.assertEqual(part.disk, disk)
+ self.assertEqual(part.size, Size("100 MiB"))
+ self.assertTrue(part.is_primary)
+ self.assertFalse(part.is_extended)
+ self.assertFalse(part.is_logical)
+ self.assertIsNotNone(part.parted_partition)
+
+ def test_msdos_extended(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="msdos"))
+
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part)
+
+ part = self.storage.new_partition(size=Size("1 GiB"), parents=[disk],
+ part_type=parted.PARTITION_EXTENDED)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ for i in range(4):
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],
+ part_type=parted.PARTITION_LOGICAL)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.assertEqual(disk.format.type, "disklabel")
+ self.assertEqual(disk.format.label_type, "msdos")
+ self.assertIsNotNone(disk.format.parted_disk)
+ self.assertIsNotNone(disk.format.parted_device)
+ self.assertEqual(len(disk.format.partitions), 6)
+ self.assertEqual(len(disk.format.primary_partitions), 1)
+ self.assertEqual(len(disk.children), 6)
+
+ for i in range(4, 8):
+ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1))
+ self.assertIsNotNone(part)
+ self.assertEqual(part.type, "partition")
+ self.assertEqual(part.disk, disk)
+ self.assertEqual(part.size, Size("100 MiB"))
+ self.assertFalse(part.is_primary)
+ self.assertFalse(part.is_extended)
+ self.assertTrue(part.is_logical)
+ self.assertIsNotNone(part.parted_partition)
+
+ def test_gpt_basic(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt"))
+
+ for i in range(4):
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.assertEqual(disk.format.type, "disklabel")
+ self.assertEqual(disk.format.label_type, "gpt")
+ self.assertIsNotNone(disk.format.parted_disk)
+ self.assertIsNotNone(disk.format.parted_device)
+ self.assertEqual(len(disk.format.partitions), 4)
+ self.assertEqual(len(disk.format.primary_partitions), 4)
+ self.assertEqual(len(disk.children), 4)
+
+ for i in range(4):
+ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1))
+ self.assertIsNotNone(part)
+ self.assertEqual(part.type, "partition")
+ self.assertEqual(part.disk, disk)
+ self.assertEqual(part.size, Size("100 MiB"))
+ self.assertTrue(part.is_primary)
+ self.assertFalse(part.is_extended)
+ self.assertFalse(part.is_logical)
+ self.assertIsNotNone(part.parted_partition)
From ab6261adbdfedc26c6b0712a42a3dd9169cabb38 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 27 May 2025 14:10:49 +0200
Subject: [PATCH 2/3] Wipe end partition before creating it as well as the
start
We are currently overwritting start of the newly created partition
with zeroes to remove any filesystem metadata that might occupy
the space. This extends this functionality to end of the partition
to remove 1.0 MD metadata that might be there.
Resolves: RHEL-93967
---
blivet/devices/partition.py | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 89470d9fb..fc9a97be7 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -659,7 +659,7 @@ def _wipe(self):
""" Wipe the partition metadata.
Assumes that the partition metadata is located at the start
- of the partition and occupies no more than 1 MiB.
+ and end of the partition and occupies no more than 1 MiB.
Erases in block increments. Erases the smallest number of blocks
such that at least 1 MiB is erased or the whole partition is
@@ -692,6 +692,24 @@ def _wipe(self):
# things to settle.
udev.settle()
+ if count >= part_len:
+ # very small partition, we wiped it completely already
+ return
+
+ # now do the end of the partition as well (RAID 1.0 metadata)
+ end = self.parted_partition.geometry.end
+ cmd = ["dd", "if=/dev/zero", "of=%s" % device, "bs=%d" % bs,
+ "seek=%d" % (end - count), "count=%d" % count]
+ try:
+ util.run_program(cmd)
+ except OSError as e:
+ log.error(str(e))
+ finally:
+ # If a udev device is created with the watch option, then
+ # a change uevent is synthesized and we need to wait for
+ # things to settle.
+ udev.settle()
+
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
From 936cccdf67e3ee612399bd3f0f8b383ca118ce9b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 28 May 2025 11:01:14 +0200
Subject: [PATCH 3/3] tests: Add tests for wiping stale metadata from new
partitions
Related: RHEL-93967
---
.../devices_test/partition_test.py | 119 ++++++++++++++++++
1 file changed, 119 insertions(+)
diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py
index 30e6a0151..87e4b1155 100644
--- a/tests/storage_tests/devices_test/partition_test.py
+++ b/tests/storage_tests/devices_test/partition_test.py
@@ -1,6 +1,7 @@
import os
import unittest
from uuid import UUID
+import blivet.deviceaction
import parted
from unittest.mock import patch
@@ -414,3 +415,121 @@ def test_gpt_basic(self):
self.assertFalse(part.is_extended)
self.assertFalse(part.is_logical)
self.assertIsNotNone(part.parted_partition)
+
+ def _partition_wipe_check(self):
+ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1")
+ self.assertIsNotNone(part1)
+ self.assertIsNone(part1.format.type)
+
+ out = blivet.util.capture_output(["blkid", "-p", "-sTYPE", "-ovalue", self.vdevs[0] + "1"])
+ self.assertEqual(out.strip(), "")
+
+ part2 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "2")
+ self.assertIsNotNone(part2)
+ self.assertEqual(part2.format.type, "ext4")
+
+ try:
+ part2.format.do_check()
+ except blivet.errors.FSError as e:
+ self.fail("Partition wipe corrupted filesystem on an adjacent partition: %s" % str(e))
+
+ out = blivet.util.capture_output(["blkid", "-p", "-sTYPE", "-ovalue", self.vdevs[0] + "2"])
+ self.assertEqual(out.strip(), "ext4")
+
+ def test_partition_wipe_ext(self):
+ """ Check that any stray filesystem metadata are removed before creating a partition """
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt"))
+
+ # create two partitions with ext4
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],
+ fmt=blivet.formats.get_format("ext4"))
+ self.storage.create_device(part1)
+
+ part2 = self.storage.new_partition(size=Size("1 MiB"), parents=[disk], grow=True,
+ fmt=blivet.formats.get_format("ext4"))
+ self.storage.create_device(part2)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # remove the first partition (only the partition without removing the format)
+ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1")
+ ac = blivet.deviceaction.ActionDestroyDevice(part1)
+ self.storage.devicetree.actions.add(ac)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # create the first partition again (without ext4)
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part1)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ # XXX PartitionDevice._post_create calls wipefs on the partition, we want to check that
+ # the _pre_create dd wipe works so we need to skip the _post_create wipefs call
+ part1._post_create = lambda: None
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # make sure the ext4 signature is not present on part1 (and untouched on part2)
+ self._partition_wipe_check()
+
+ def test_partition_wipe_mdraid(self):
+ """ Check that any stray RAID metadata are removed before creating a partition """
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt"))
+
+ # create two partitions, one empty, one with ext4
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part1)
+
+ part2 = self.storage.new_partition(size=Size("1 MiB"), parents=[disk], grow=True,
+ fmt=blivet.formats.get_format("ext4"))
+ self.storage.create_device(part2)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # create MD RAID with metadata 1.0 on the first partition
+ ret = blivet.util.run_program(["mdadm", "--create", "blivetMDTest", "--level=linear",
+ "--metadata=1.0", "--raid-devices=1", "--force", part1.path])
+ self.assertEqual(ret, 0, "Failed to create RAID array for partition wipe test")
+ ret = blivet.util.run_program(["mdadm", "--stop", "/dev/md/blivetMDTest"])
+ self.assertEqual(ret, 0, "Failed to create RAID array for partition wipe test")
+
+ # now remove the partition without removing the array first
+ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1")
+ ac = blivet.deviceaction.ActionDestroyDevice(part1)
+ self.storage.devicetree.actions.add(ac)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # create the first partition again (without format)
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part1)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ # XXX PartitionDevice._post_create calls wipefs on the partition, we want to check that
+ # the _pre_create dd wipe works so we need to skip the _post_create wipefs call
+ part1._post_create = lambda: None
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # make sure the mdmember signature is not present on part1 (and ext4 is untouched on part2)
+ self._partition_wipe_check()

View File

@ -1,65 +0,0 @@
From ee19e665276fd7cd6477da9bee59641b1de1a916 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 27 Jun 2025 11:28:09 +0200
Subject: [PATCH] Tell LVM DBus to refresh it's internal status during reset
Unfortunately some users run wipefs <disk> thinking it's enough
to remove all devices on top of the disk cleanly.
In cases where the PV is not directly on the disk, LVM DBus
doesn't get a udev event and doesn't remove the VG and LVs from
DBus so we think these still exist.
Resolves: RHEL-93967
---
blivet/devicelibs/lvm.py | 19 +++++++++++++++++++
blivet/populator/populator.py | 3 +++
2 files changed, 22 insertions(+)
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index 38e1bc1bc..47cc3e5d7 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -36,6 +36,7 @@
log = logging.getLogger("blivet")
from . import raid
+from .. import safe_dbus
from ..size import Size
from ..i18n import N_
from ..flags import flags
@@ -284,3 +285,21 @@ def recommend_thpool_chunk_size(thpool_size):
def is_valid_cache_md_size(md_size):
return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE
+
+
+def lvm_dbusd_refresh():
+ lvm_soname = blockdev.get_plugin_soname(blockdev.Plugin.LVM)
+ if 'dbus' not in lvm_soname:
+ return
+
+ try:
+ rc = safe_dbus.call_sync("com.redhat.lvmdbus1",
+ "/com/redhat/lvmdbus1/Manager",
+ "com.redhat.lvmdbus1.Manager",
+ "Refresh",
+ None)
+ except safe_dbus.DBusCallError as e:
+ log.error("Exception occurred when calling LVM DBusD refresh: %s", str(e))
+ else:
+ if rc[0] != 0:
+ log.error("Failed to call LVM DBusD refresh: %s", rc)
diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py
index 2ddea6618..da3b33cac 100644
--- a/blivet/populator/populator.py
+++ b/blivet/populator/populator.py
@@ -453,6 +453,9 @@ def _populate(self):
disklib.update_volume_info()
self.drop_device_info_cache()
+ # force LVM DBusD to refresh its internal state
+ lvm.lvm_dbusd_refresh()
+
if flags.auto_dev_updates and availability.BLOCKDEV_MPATH_PLUGIN.available:
blockdev.mpath.set_friendly_names(flags.multipath_friendly_names)

View File

@ -1,36 +0,0 @@
From e5ad3beb216d25a601d8f35c2b2a97d15cbb0d39 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 3 Sep 2024 10:40:27 +0200
Subject: [PATCH] tests: Change expected Stratis metadata size for stratisd
3.7.0
Stratis changes its metadata and the way "stratis-predict-usage"
predicts its size so we need to change our expectations too.
Resolves: RHEL-102299
---
tests/unit_tests/devicefactory_test.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/unit_tests/devicefactory_test.py b/tests/unit_tests/devicefactory_test.py
index 0a5e6a839..89ca34a33 100644
--- a/tests/unit_tests/devicefactory_test.py
+++ b/tests/unit_tests/devicefactory_test.py
@@ -942,7 +942,7 @@ def _get_size_delta(self, devices=None):
:keyword devices: list of factory-managed devices or None
:type devices: list(:class:`blivet.devices.StorageDevice`) or NoneType
"""
- return Size("550 MiB") # huge stratis pool metadata
+ return Size("1.3 GiB") # huge stratis pool metadata
def _validate_factory_device(self, *args, **kwargs):
device = args[0]
@@ -968,7 +968,7 @@ def _validate_factory_device(self, *args, **kwargs):
else:
self.assertAlmostEqual(device.pool.size,
device.size,
- delta=Size("600 MiB"))
+ delta=Size("1.3 GiB"))
self.assertEqual(device.pool.encrypted, kwargs.get("container_encrypted", False))

View File

@ -1,65 +0,0 @@
From 598902388a09e2dd60b0b0f1e556c4661899be68 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 1 Aug 2025 15:03:09 +0200
Subject: [PATCH] Add a pre-wipe fixup function for LVM logical volumes
LVs scheduled to be removed are always activated to remove the
format during installation. If there is a read-only LV with the
skip activation flag with MD metadata this means after activating
the LV to remove the format the MD array is auto-assembled by udev
preventing us from removing it. For this special case, we simply
stop the array before removing the format.
Resolves: RHEL-93966
---
blivet/deviceaction.py | 3 +++
blivet/devices/lvm.py | 19 +++++++++++++++++++
2 files changed, 22 insertions(+)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index 2e6a8489..6590898f 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -766,6 +766,9 @@ class ActionDestroyFormat(DeviceAction):
if hasattr(self.device, 'set_rw'):
self.device.set_rw()
+ if hasattr(self.device, 'pre_format_destroy'):
+ self.device.pre_format_destroy()
+
self.format.destroy()
udev.settle()
if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported:
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index d0b0b2b9..10ed2c94 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -2791,6 +2791,25 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
except blockdev.LVMError as err:
raise errors.LVMError(err)
+ def pre_format_destroy(self):
+ """ Fixup needed to run before wiping this device """
+ if self.ignore_skip_activation > 0:
+ # the LV was not activated during the initial scan so if there is an MD array on it
+ # it will now also get activated and we need to stop it to be able to remove the LV
+ try:
+ info = blockdev.md.examine(self.path)
+ except blockdev.MDRaidError:
+ pass
+ else:
+ # give udev a bit time to activate the array so we can deactivate it again
+ time.sleep(5)
+ log.info("MD metadata found on LV with skip activation, stopping the array %s",
+ info.device)
+ try:
+ blockdev.md.deactivate(info.device)
+ except blockdev.MDRaidError as err:
+ log.info("failed to deactivate %s: %s", info.device, str(err))
+
@type_specific
def _pre_create(self):
LVMLogicalVolumeBase._pre_create(self)
--
2.50.1

View File

@ -1,52 +1,31 @@
Summary: A python module for system storage configuration
Name: python-blivet
Url: https://storageapis.wordpress.com/projects/blivet
Version: 3.10.0
Version: 3.13.0
#%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 23%{?prerelease}%{?dist}.alma.1
Release: 1%{?prerelease}%{?dist}.alma.1
Epoch: 1
License: LGPL-2.1-or-later
%global realname blivet
%global realversion %{version}%{?prerelease}
Source0: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}.tar.gz
Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}-tests.tar.gz
Source0: http://github.com/storaged-project/blivet/releases/download/%{realname}-%{realversion}/%{realname}-%{realversion}.tar.gz
Source1: http://github.com/storaged-project/blivet/releases/download/%{realname}-%{realversion}/%{realname}-%{realversion}-tests.tar.gz
%if 0%{?rhel} >= 9 && ! 0%{?almalinux}
Patch0: 0001-remove-btrfs-plugin.patch
%endif
Patch1: 0002-Fix-skipping-btrfs-calls-when-libblockdev-btrfs-plugin-is-missing.patch
Patch2: 0003-XFS-resize-test-fix.patch
Patch3: 0004-Run-mkfs-xfs-with-force-option-by-default.patch
Patch4: 0005-consolidated-s390-device-configuration.patch
Patch5: 0007-Fix-checking-for-NVMe-plugin-availability.patch
Patch6: 0008-Align-sizes-up-for-growable-LVs.patch
Patch7: 0009-mod_pass_in_stratis_test.patch
Patch8: 0010-Fix_running_tests_in_FIPS_mode.patch
Patch9: 0011-Make-GPT-default-label-type-on-all-architectures.patch
Patch10: 0012-Fix-crash-on-ppc64le-with-GPT.patch
Patch11: 0013-Set-persistent-allow-discards-flag-for-new-LUKS-devices.patch
Patch12: 0014-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch
Patch13: 0015-iscsi-Use-node-startup-onboot-option-for-Login.patch
Patch14: 0016-Make-sure-selinux_test-doesnt-try-to-create-mountpoints.patch
Patch15: 0017-LVMPV-format-size-fix.patch
Patch16: 0018-Include-additional-information-in-PartitioningError.patch
Patch17: 0019-Make-ActionDestroyFormat-optional.patch
Patch18: 0020-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch
Patch19: 0021-Tell-LVM-DBus-to-refresh-its-internal-status-during-reset.patch
Patch20: 0022-Change-expected-Stratis-metadata-size.patch
Patch21: 0023-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch
# Versions of required components (done so we make sure the buildrequires
# match the requires versions of things).
%global partedver 1.8.1
%global pypartedver 3.10.4
%global utillinuxver 2.15.1
%global libblockdevver 3.0
%global libblockdevver 3.4.0
%global libbytesizever 0.3
%global pyudevver 0.18
%global s390utilscorever 2.31.0
BuildArch: noarch
@ -70,19 +49,16 @@ python module.
%package -n python3-%{realname}
Summary: A python3 package for examining and modifying storage configuration.
%{?python_provide:%python_provide python3-%{realname}}
BuildRequires: gettext
BuildRequires: python3-devel
BuildRequires: python3-setuptools
Requires: python3
Requires: python3-pyudev >= %{pyudevver}
Requires: parted >= %{partedver}
Requires: python3-pyparted >= %{pypartedver}
Requires: libselinux-python3
Requires: python3-libmount
Requires: python3-blockdev >= %{libblockdevver}
Requires: python3-dasbus
Recommends: libblockdev-btrfs >= %{libblockdevver}
Recommends: libblockdev-crypto >= %{libblockdevver}
Recommends: libblockdev-dm >= %{libblockdevver}
@ -92,8 +68,10 @@ Recommends: libblockdev-lvm >= %{libblockdevver}
Recommends: libblockdev-mdraid >= %{libblockdevver}
Recommends: libblockdev-mpath >= %{libblockdevver}
Recommends: libblockdev-nvme >= %{libblockdevver}
Recommends: libblockdev-part >= %{libblockdevver}
Recommends: libblockdev-swap >= %{libblockdevver}
Recommends: libblockdev-s390 >= %{libblockdevver}
Recommends: s390utils-core >= %{s390utilscorever}
Requires: python3-bytesize >= %{libbytesizever}
Requires: util-linux >= %{utillinuxver}
@ -112,6 +90,9 @@ configuration.
%autosetup -n %{realname}-%{realversion} -N
%autosetup -n %{realname}-%{realversion} -b1 -p1
%generate_buildrequires
%pyproject_buildrequires
%build
make
@ -132,9 +113,13 @@ make DESTDIR=%{buildroot} install
%{python3_sitelib}/*
%changelog
* Wed Aug 06 2025 Neal Gompa <ngompa@almalinux.org> - 1:3.10.0-23.alma.1
* Fri Oct 24 2025 Neal Gompa <ngompa@almalinux.org> - 1:3.13.0-1.alma.1
- AlmaLinux changes: Enable Btrfs support
* Wed Oct 08 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.13.0-1
- Update to blivet 3.13.0 upstream release
Resolves: RHEL-115005
* Mon Aug 04 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.10.0-23
- Add a pre-wipe fixup function for LVM logical volumes
Resolves: RHEL-93966

View File

@ -1,2 +1,2 @@
SHA512 (blivet-3.10.0.tar.gz) = 74172dec98a1b4f71cee6d64d46cdafa5116cfbbaddfdaed3dd118fef019dce54ff9d9206faada5991efac6b61bc558bb3050165f1d68bde1c1ec228c01916c8
SHA512 (blivet-3.10.0-tests.tar.gz) = 53d2f37ff9822141caf669889835069da6e3f728761c0d006afcf80a2628f8816cbf558d30a085082e1bfec2e4b575f9ea78859a17f4e0bf7458e3faba158ef7
SHA512 (blivet-3.13.0.tar.gz) = afbb626886491b8853d35e5304e7d2eead70023f5403279709039f721c84004ba4ea2a391ed62e8093d966e562622706ecc34370dc33d34084e2d59d7b7d20ef
SHA512 (blivet-3.13.0-tests.tar.gz) = fa424a8d84312f1df851fb6b1a17c7c81c5055e926220aa4079cd46d64844b4c7ed9394bcb8497b5a25f2bf7528e9990d55645d918e4936be28ad7848f604a66