diff --git a/SOURCES/0024-Added-support-for-PV-grow.patch b/SOURCES/0024-Added-support-for-PV-grow.patch new file mode 100644 index 0000000..2406728 --- /dev/null +++ b/SOURCES/0024-Added-support-for-PV-grow.patch @@ -0,0 +1,129 @@ +From 0777b9d519421f3c46f6dcd51e39ecdc2956e2e0 Mon Sep 17 00:00:00 2001 +From: Jan Pokorny +Date: Thu, 25 Apr 2024 14:06:13 +0200 +Subject: [PATCH] Added support for PV grow + +Storage role requires support for a case when PV has to be resized to +fill all available space when its device's size changes (usually on VM). + +A new flag 'grow_to_fill' was added, which marks the device for size +expansion (all available space it taken). +Proper size is determined by LVM, avoiding inaccurate size +calculations in blivet. +--- + blivet/formats/__init__.py | 4 +++- + blivet/formats/lvmpv.py | 23 ++++++++++++++++++- + blivet/tasks/pvtask.py | 7 +++++- + .../storage_tests/formats_test/lvmpv_test.py | 10 ++++++++ + 4 files changed, 41 insertions(+), 3 deletions(-) + +diff --git a/blivet/formats/__init__.py b/blivet/formats/__init__.py +index b1ad740e..eb8b6ab3 100644 +--- a/blivet/formats/__init__.py ++++ b/blivet/formats/__init__.py +@@ -424,7 +424,9 @@ class DeviceFormat(ObjectID): + if not self.resizable: + raise FormatResizeError("format not resizable", self.device) + +- if self.target_size == self.current_size: ++ # skip if sizes are equal unless grow to fill on lvmpv is requested ++ if (self.target_size == self.current_size and ++ (self.type != "lvmpv" or not self.grow_to_fill)): # pylint: disable=no-member + return + + if not self._resize.available: +diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py +index 65acedbe..51fa4a3c 100644 +--- a/blivet/formats/lvmpv.py ++++ b/blivet/formats/lvmpv.py +@@ -33,7 +33,7 @@ from ..devicelibs import lvm + from ..tasks import availability, pvtask + from ..i18n import N_ + from ..size import Size +-from ..errors import PhysicalVolumeError ++from ..errors import DeviceFormatError, PhysicalVolumeError + from . import DeviceFormat, register_device_format + from .. import udev + from ..static_data.lvm_info import pvs_info, vgs_info +@@ -98,6 +98,9 @@ class LVMPhysicalVolume(DeviceFormat): + + self.inconsistent_vg = False + ++ # when set to True, blivet will try to resize the PV to fill all available space ++ self._grow_to_fill = False ++ + def __repr__(self): + s = DeviceFormat.__repr__(self) + s += (" vg_name = %(vg_name)s vg_uuid = %(vg_uuid)s" +@@ -106,6 +109,24 @@ class LVMPhysicalVolume(DeviceFormat): + "pe_start": self.pe_start, "data_alignment": self.data_alignment}) + return s + ++ @property ++ def grow_to_fill(self): ++ """ ++ Can be set to True to mark format for resize so it matches size of its device. ++ (Main usecase is disk size increase on VM) ++ Uses blockdev/lvm for exact new size calculation. ++ ActionResizeFormat has to be executed to apply the change. ++ Format has to be resizable (i.e. run format.update_size_info() first) to allow this. ++ """ ++ return self._grow_to_fill ++ ++ @grow_to_fill.setter ++ def grow_to_fill(self, fill: bool): ++ if fill is True: ++ if not self.resizable: ++ raise DeviceFormatError("format is not resizable") ++ self._grow_to_fill = fill ++ + @property + def dict(self): + d = super(LVMPhysicalVolume, self).dict +diff --git a/blivet/tasks/pvtask.py b/blivet/tasks/pvtask.py +index 04c8a4d1..b5bd72e0 100644 +--- a/blivet/tasks/pvtask.py ++++ b/blivet/tasks/pvtask.py +@@ -82,6 +82,11 @@ class PVResize(task.BasicApplication, dfresize.DFResizeTask): + def do_task(self): # pylint: disable=arguments-differ + """ Resizes the LVMPV format. """ + try: +- blockdev.lvm.pvresize(self.pv.device, self.pv.target_size.convert_to(self.unit)) ++ if self.pv.grow_to_fill: ++ # resize PV to fill all available space on device by omitting ++ # the size parameter ++ blockdev.lvm.pvresize(self.pv.device, 0) ++ else: ++ blockdev.lvm.pvresize(self.pv.device, self.pv.target_size.convert_to(self.unit)) + except blockdev.LVMError as e: + raise PhysicalVolumeError(e) +diff --git a/tests/storage_tests/formats_test/lvmpv_test.py b/tests/storage_tests/formats_test/lvmpv_test.py +index cdc33ec4..d2811f3e 100644 +--- a/tests/storage_tests/formats_test/lvmpv_test.py ++++ b/tests/storage_tests/formats_test/lvmpv_test.py +@@ -37,6 +37,9 @@ class LVMPVTestCase(loopbackedtestcase.LoopBackedTestCase): + self.fmt.update_size_info() + self.assertTrue(self.fmt.resizable) + ++ # save the pv maximum size ++ maxpvsize = self.fmt.current_size ++ + # resize the format + new_size = Size("50 MiB") + self.fmt.target_size = new_size +@@ -46,5 +49,12 @@ class LVMPVTestCase(loopbackedtestcase.LoopBackedTestCase): + self.fmt.update_size_info() + self.assertEqual(self.fmt.current_size, new_size) + ++ # Test growing PV to fill all available space on the device ++ self.fmt.grow_to_fill = True ++ self.fmt.do_resize() ++ ++ self.fmt.update_size_info() ++ self.assertEqual(self.fmt.current_size, maxpvsize) ++ + def _pvremove(self): + self.fmt._destroy() +-- +2.45.0 + diff --git a/SOURCES/0025-Stratis-fixes-backport.patch b/SOURCES/0025-Stratis-fixes-backport.patch new file mode 100644 index 0000000..5293543 --- /dev/null +++ b/SOURCES/0025-Stratis-fixes-backport.patch @@ -0,0 +1,1328 @@ +From 44cea0b768633c1a8c6f472d9bfc9a9118713e58 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 23 Jan 2023 13:47:24 +0100 +Subject: [PATCH 01/16] tests: Patch checking stratis pool metadata size + +In unit tests we don't want to actually use the stratis tools to +check for metadata size, we just need to be sure our internal +calculation works. +--- + tests/unit_tests/devices_test/stratis_test.py | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/tests/unit_tests/devices_test/stratis_test.py b/tests/unit_tests/devices_test/stratis_test.py +index 1d19db7d4..2ce873843 100644 +--- a/tests/unit_tests/devices_test/stratis_test.py ++++ b/tests/unit_tests/devices_test/stratis_test.py +@@ -35,8 +35,9 @@ def test_new_stratis(self): + pool = b.new_stratis_pool(name="testpool", parents=[bd]) + self.assertEqual(pool.name, "testpool") + self.assertEqual(pool.size, bd.size) +- # for 2 GiB pool, metadata should take around 0.5 GiB +- self.assertAlmostEqual(pool.free_space, Size("1.5 GiB"), delta=Size("10 MiB")) ++ ++ with patch("blivet.devicelibs.stratis.pool_used", lambda _d, _e: Size("512 MiB")): ++ self.assertAlmostEqual(pool.free_space, Size("1.5 GiB")) + + with patch("blivet.devicetree.DeviceTree.names", []): + fs = b.new_stratis_filesystem(name="testfs", parents=[pool], size=Size("1 GiB")) + +From f85e35218db42e5f563387866287ea93993bf9c9 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 18 Mar 2024 14:35:04 +0100 +Subject: [PATCH 02/16] Fix creating Stratis filesystem without size specified + +We do allow creating Stratis FS without specifying its size which +simply means creating the default 1 TiB filesystem. +--- + blivet/devices/stratis.py | 2 + + tests/unit_tests/devices_test/stratis_test.py | 51 +++++++++++++++++++ + 2 files changed, 53 insertions(+) + +diff --git a/blivet/devices/stratis.py b/blivet/devices/stratis.py +index 27644e669..2b8d75f95 100644 +--- a/blivet/devices/stratis.py ++++ b/blivet/devices/stratis.py +@@ -198,6 +198,8 @@ class StratisFilesystemDevice(StorageDevice): + _min_size = Size("512 MiB") + + def __init__(self, name, parents=None, size=None, uuid=None, exists=False): ++ if size is None: ++ size = devicelibs.stratis.STRATIS_FS_SIZE + if not exists and parents[0].free_space <= devicelibs.stratis.filesystem_md_size(size): + raise StratisError("cannot create new stratis filesystem, not enough free space in the pool") + +diff --git a/tests/unit_tests/devices_test/stratis_test.py b/tests/unit_tests/devices_test/stratis_test.py +index 2ce873843..f7fae9e08 100644 +--- a/tests/unit_tests/devices_test/stratis_test.py ++++ b/tests/unit_tests/devices_test/stratis_test.py +@@ -104,3 +104,54 @@ def test_new_encrypted_stratis(self): + encrypted=True, + passphrase="secret", + key_file=None) ++ ++ def test_new_stratis_no_size(self): ++ b = blivet.Blivet() ++ bd = StorageDevice("bd1", fmt=blivet.formats.get_format("stratis"), ++ size=Size("2 GiB"), exists=False) ++ ++ b.devicetree._add_device(bd) ++ ++ with patch("blivet.devicetree.DeviceTree.names", []): ++ pool = b.new_stratis_pool(name="testpool", parents=[bd]) ++ self.assertEqual(pool.name, "testpool") ++ self.assertEqual(pool.size, bd.size) ++ ++ with patch("blivet.devicelibs.stratis.pool_used", lambda _d, _e: Size("512 MiB")): ++ self.assertAlmostEqual(pool.free_space, Size("1.5 GiB")) ++ ++ with patch("blivet.devicetree.DeviceTree.names", []): ++ fs = b.new_stratis_filesystem(name="testfs", parents=[pool]) ++ ++ self.assertEqual(fs.name, "testpool/testfs") ++ self.assertEqual(fs.path, "/dev/stratis/%s" % fs.name) ++ self.assertEqual(fs.size, Size("1 TiB")) ++ self.assertEqual(fs.pool, pool) ++ self.assertEqual(fs.format.type, "stratis xfs") ++ # for 1 TiB filesystem, metadata should take around 1 GiB ++ self.assertAlmostEqual(fs.used_size, Size("1 GiB"), delta=Size("50 MiB")) ++ ++ b.create_device(pool) ++ b.create_device(fs) ++ ++ with patch("blivet.devicelibs.stratis") as stratis_dbus: ++ with patch.object(pool, "_pre_create"): ++ with patch.object(pool, "_post_create"): ++ pool.create() ++ stratis_dbus.create_pool.assert_called_with(name='testpool', ++ devices=['/dev/bd1'], ++ encrypted=False, ++ passphrase=None, ++ key_file=None) ++ ++ # we would get this from pool._post_create ++ pool.uuid = "c4fc9ebe-e173-4cab-8d81-cc6abddbe02d" ++ ++ with patch("blivet.devicelibs.stratis") as stratis_dbus: ++ with patch.object(fs, "_pre_create"): ++ with patch.object(fs, "_post_create"): ++ fs.create() ++ stratis_dbus.create_filesystem.assert_called_with(name="testfs", ++ pool_uuid="c4fc9ebe-e173-4cab-8d81-cc6abddbe02d", ++ fs_size=Size("1 TiB")) ++ + +From 3f6f9b2f93a572a055cffa83c9bfbccfe163adec Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 10 Apr 2024 10:54:46 +0200 +Subject: [PATCH 03/16] Do not allow creating stratis pools with different + sector sizes + +Similarly to LVM Stratis alos doesn't allow mixing different +sector sizes in one pool. +--- + blivet/devices/stratis.py | 20 ++++++++++++++++++- + blivet/errors.py | 4 ++++ + tests/unit_tests/devices_test/stratis_test.py | 16 ++++++++++++--- + 3 files changed, 36 insertions(+), 4 deletions(-) + +diff --git a/blivet/devices/stratis.py b/blivet/devices/stratis.py +index 2b8d75f95..61eed991f 100644 +--- a/blivet/devices/stratis.py ++++ b/blivet/devices/stratis.py +@@ -24,10 +24,12 @@ + import logging + log = logging.getLogger("blivet") + ++from collections import defaultdict ++ + from .storage import StorageDevice + from ..static_data import stratis_info + from ..storage_log import log_method_call +-from ..errors import DeviceError, StratisError ++from ..errors import DeviceError, StratisError, InconsistentParentSectorSize + from ..size import Size + from ..tasks import availability + from .. import devicelibs +@@ -161,6 +163,22 @@ def _post_create(self): + parent.format.pool_name = self.name + parent.format.pool_uuid = self.uuid + ++ def _add_parent(self, parent): ++ super(StratisPoolDevice, self)._add_parent(parent) ++ ++ # we are creating new pool ++ if not self.exists: ++ sector_sizes = defaultdict(list) ++ for ss, name in [(p.sector_size, p.name) for p in self.blockdevs + [parent]]: # pylint: disable=no-member ++ sector_sizes[ss].append(name) ++ if len(sector_sizes.keys()) != 1: ++ msg = "Cannot create pool '%s'. "\ ++ "The following disks have inconsistent sector size:\n" % self.name ++ for sector_size in sector_sizes.keys(): ++ msg += "%s: %d\n" % (", ".join(sector_sizes[sector_size]), sector_size) ++ ++ raise InconsistentParentSectorSize(msg) ++ + def _destroy(self): + """ Destroy the device. """ + log_method_call(self, self.name, status=self.status) +diff --git a/blivet/errors.py b/blivet/errors.py +index b886ffec5..ec7d06efb 100644 +--- a/blivet/errors.py ++++ b/blivet/errors.py +@@ -67,6 +67,10 @@ class DeviceUserDeniedFormatError(DeviceError): + class InconsistentPVSectorSize(DeviceError, ValueError): + pass + ++ ++class InconsistentParentSectorSize(DeviceError, ValueError): ++ pass ++ + # DeviceFormat + + +diff --git a/tests/unit_tests/devices_test/stratis_test.py b/tests/unit_tests/devices_test/stratis_test.py +index f7fae9e08..cbdd225ed 100644 +--- a/tests/unit_tests/devices_test/stratis_test.py ++++ b/tests/unit_tests/devices_test/stratis_test.py +@@ -2,16 +2,16 @@ + import unittest + + try: +- from unittest.mock import patch ++ from unittest.mock import patch, PropertyMock + except ImportError: +- from mock import patch ++ from mock import patch, PropertyMock + + import blivet + + from blivet.devices import StorageDevice + from blivet.devices import StratisPoolDevice + from blivet.devices import StratisFilesystemDevice +-from blivet.errors import StratisError ++from blivet.errors import StratisError, InconsistentParentSectorSize + from blivet.size import Size + + +@@ -155,3 +155,13 @@ def test_new_stratis_no_size(self): + pool_uuid="c4fc9ebe-e173-4cab-8d81-cc6abddbe02d", + fs_size=Size("1 TiB")) + ++ def test_pool_inconsistent_sector_size(self): ++ bd = StorageDevice("bd1", fmt=blivet.formats.get_format("stratis"), ++ size=Size("2 GiB"), exists=False) ++ bd2 = StorageDevice("bd2", fmt=blivet.formats.get_format("stratis"), ++ size=Size("2 GiB"), exists=False) ++ ++ with patch("blivet.devices.StorageDevice.sector_size", new_callable=PropertyMock) as mock_property: ++ mock_property.__get__ = lambda _mock, bd, _class: 512 if bd.name == "bd1" else 4096 ++ with self.assertRaisesRegex(InconsistentParentSectorSize, "Cannot create pool"): ++ StratisPoolDevice("testpool", parents=[bd, bd2]) + +From 6f9df7756ca081e59d8c5b0201c11600782bb2fa Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 12 Apr 2024 15:53:15 +0200 +Subject: [PATCH 04/16] Add support for adding new members to existing Stratis + pool + +Currently only new devices can be added to a pool, removing devices +is not supported by Stratis. +--- + blivet/devicelibs/stratis.py | 25 +++++++++++ + blivet/devices/stratis.py | 9 ++++ + .../devices_test/stratis_test.py | 43 +++++++++++++++++++ + 3 files changed, 77 insertions(+) + +diff --git a/blivet/devicelibs/stratis.py b/blivet/devicelibs/stratis.py +index e813924e9..1f7432344 100644 +--- a/blivet/devicelibs/stratis.py ++++ b/blivet/devicelibs/stratis.py +@@ -255,3 +255,28 @@ def create_filesystem(name, pool_uuid, fs_size=None): + + # repopulate the stratis info cache so the new filesystem will be added + stratis_info.drop_cache() ++ ++ ++def add_device(pool_uuid, device): ++ if not availability.STRATIS_DBUS.available: ++ raise StratisError("Stratis DBus service not available") ++ ++ # repopulate the stratis info cache just to be sure all values are still valid ++ stratis_info.drop_cache() ++ ++ pool_info = stratis_info.pools[pool_uuid] ++ ++ try: ++ ((succ, _paths), rc, err) = safe_dbus.call_sync(STRATIS_SERVICE, ++ pool_info.object_path, ++ STRATIS_POOL_INTF, ++ "AddDataDevs", ++ GLib.Variant("(as)", ([device],))) ++ except safe_dbus.DBusCallError as e: ++ raise StratisError("Failed to create stratis filesystem on '%s': %s" % (pool_info.name, str(e))) ++ else: ++ if not succ: ++ raise StratisError("Failed to create stratis filesystem on '%s': %s (%d)" % (pool_info.name, err, rc)) ++ ++ # repopulate the stratis info cache so the new filesystem will be added ++ stratis_info.drop_cache() +diff --git a/blivet/devices/stratis.py b/blivet/devices/stratis.py +index 61eed991f..317b80b36 100644 +--- a/blivet/devices/stratis.py ++++ b/blivet/devices/stratis.py +@@ -179,6 +179,15 @@ def _add_parent(self, parent): + + raise InconsistentParentSectorSize(msg) + ++ parent.format.pool_name = self.name ++ parent.format.pool_uuid = self.uuid ++ ++ def _add(self, member): ++ devicelibs.stratis.add_device(self.uuid, member.path) ++ ++ def _remove(self, member): ++ raise DeviceError("Removing members from a Stratis pool is not supported") ++ + def _destroy(self): + """ Destroy the device. """ + log_method_call(self, self.name, status=self.status) +diff --git a/tests/storage_tests/devices_test/stratis_test.py b/tests/storage_tests/devices_test/stratis_test.py +index 3aba7685f..cfb645ef5 100644 +--- a/tests/storage_tests/devices_test/stratis_test.py ++++ b/tests/storage_tests/devices_test/stratis_test.py +@@ -162,3 +162,46 @@ def test_stratis_overprovision(self): + self.assertIsNotNone(fs) + self.assertIsInstance(fs, blivet.devices.StratisFilesystemDevice) + self.assertAlmostEqual(fs.size, blivet.size.Size("2 GiB"), delta=blivet.size.Size("10 MiB")) ++ ++ def test_stratis_add_device(self): ++ disk1 = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk1) ++ self.storage.initialize_disk(disk1) ++ ++ bd1 = self.storage.new_partition(size=blivet.size.Size("1 GiB"), fmt_type="stratis", ++ parents=[disk1]) ++ self.storage.create_device(bd1) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd1]) ++ self.storage.create_device(pool) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ disk2 = self.storage.devicetree.get_device_by_path(self.vdevs[1]) ++ self.assertIsNotNone(disk2) ++ self.storage.initialize_disk(disk2) ++ ++ bd2 = self.storage.new_partition(size=blivet.size.Size("1 GiB"), fmt_type="stratis", ++ parents=[disk2]) ++ self.storage.create_device(bd2) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ pool = self.storage.devicetree.get_device_by_name("blivetTestPool") ++ ++ ac = blivet.deviceaction.ActionAddMember(pool, bd2) ++ self.storage.devicetree.actions.add(ac) ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pool = self.storage.devicetree.get_device_by_name("blivetTestPool") ++ self.assertIsNotNone(pool) ++ self.assertEqual(len(pool.parents), 2) ++ self.assertCountEqual([p.path for p in pool.parents], [self.vdevs[0] + "1", self.vdevs[1] + "1"]) ++ ++ bd2 = self.storage.devicetree.get_device_by_path(self.vdevs[1] + "1") ++ self.assertEqual(bd2.format.pool_name, pool.name) ++ self.assertEqual(bd2.format.pool_uuid, pool.uuid) + +From c62d54dcccd2bee5dfa631c7aa4cbdb4eefa0b8a Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 12 Apr 2024 15:52:02 +0200 +Subject: [PATCH 05/16] Base StratisPoolDevice on ContainerDevice instead of + StorageDevice + +Stratis pool is a container and we need to base it on +ContainerDevice to support some features like member management. +--- + blivet/devices/stratis.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/blivet/devices/stratis.py b/blivet/devices/stratis.py +index 317b80b36..db89cfaf8 100644 +--- a/blivet/devices/stratis.py ++++ b/blivet/devices/stratis.py +@@ -26,6 +26,7 @@ + + from collections import defaultdict + ++from .container import ContainerDevice + from .storage import StorageDevice + from ..static_data import stratis_info + from ..storage_log import log_method_call +@@ -35,7 +36,7 @@ + from .. import devicelibs + + +-class StratisPoolDevice(StorageDevice): ++class StratisPoolDevice(ContainerDevice): + """ A stratis pool device """ + + _type = "stratis pool" + +From 22724192cc2160771de211e90a1fa3347e6dc1d2 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 18 Apr 2024 16:14:07 +0200 +Subject: [PATCH 06/16] Make sure to include stderr when gathering output of + stratis tools + +Without this we won't get any error message when the call fails. +--- + blivet/devicelibs/stratis.py | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/blivet/devicelibs/stratis.py b/blivet/devicelibs/stratis.py +index 1f7432344..d76339b51 100644 +--- a/blivet/devicelibs/stratis.py ++++ b/blivet/devicelibs/stratis.py +@@ -60,7 +60,7 @@ def pool_used(dev_sizes, encrypted=False): + if encrypted: + cmd.append("--encrypted") + +- rc, out = util.run_program_and_capture_output(cmd) ++ rc, out = util.run_program_and_capture_output(cmd, stderr_to_stdout=True) + if rc: + raise StratisError("Failed to predict usage for stratis pool") + +@@ -78,7 +78,8 @@ def filesystem_md_size(fs_size): + + rc, out = util.run_program_and_capture_output([availability.STRATISPREDICTUSAGE_APP.name, "filesystem", + "--filesystem-size", +- str(fs_size.get_bytes())]) ++ str(fs_size.get_bytes())], ++ stderr_to_stdout=True) + if rc: + raise StratisError("Failed to predict usage for stratis filesystem: %s" % out) + + +From db1e411b5bd8935a29348697ba4e5c1857173e01 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 19 Apr 2024 12:44:19 +0200 +Subject: [PATCH 07/16] Round Stratis Filesystem size down to the nearest + sector + +Stratis requires sizes to be rounded for the filesystems. +--- + blivet/devices/stratis.py | 9 ++++++++- + tests/unit_tests/devices_test/stratis_test.py | 9 +++++++++ + 2 files changed, 17 insertions(+), 1 deletion(-) + +diff --git a/blivet/devices/stratis.py b/blivet/devices/stratis.py +index db89cfaf8..573ab8bd5 100644 +--- a/blivet/devices/stratis.py ++++ b/blivet/devices/stratis.py +@@ -27,11 +27,12 @@ + from collections import defaultdict + + from .container import ContainerDevice ++from .lib import LINUX_SECTOR_SIZE + from .storage import StorageDevice + from ..static_data import stratis_info + from ..storage_log import log_method_call + from ..errors import DeviceError, StratisError, InconsistentParentSectorSize +-from ..size import Size ++from ..size import Size, ROUND_DOWN + from ..tasks import availability + from .. import devicelibs + +@@ -228,6 +229,12 @@ class StratisFilesystemDevice(StorageDevice): + def __init__(self, name, parents=None, size=None, uuid=None, exists=False): + if size is None: + size = devicelibs.stratis.STRATIS_FS_SIZE ++ ++ # round size down to the nearest sector ++ if not exists and size % LINUX_SECTOR_SIZE: ++ log.info("%s: rounding size %s down to the nearest sector", name, size) ++ size = size.round_to_nearest(LINUX_SECTOR_SIZE, ROUND_DOWN) ++ + if not exists and parents[0].free_space <= devicelibs.stratis.filesystem_md_size(size): + raise StratisError("cannot create new stratis filesystem, not enough free space in the pool") + +diff --git a/tests/unit_tests/devices_test/stratis_test.py b/tests/unit_tests/devices_test/stratis_test.py +index cbdd225ed..539995030 100644 +--- a/tests/unit_tests/devices_test/stratis_test.py ++++ b/tests/unit_tests/devices_test/stratis_test.py +@@ -165,3 +165,12 @@ def test_pool_inconsistent_sector_size(self): + mock_property.__get__ = lambda _mock, bd, _class: 512 if bd.name == "bd1" else 4096 + with self.assertRaisesRegex(InconsistentParentSectorSize, "Cannot create pool"): + StratisPoolDevice("testpool", parents=[bd, bd2]) ++ ++ def test_filesystem_round_size(self): ++ bd = StorageDevice("bd1", fmt=blivet.formats.get_format("stratis"), ++ size=Size("2 GiB"), exists=False) ++ pool = StratisPoolDevice("testpool", parents=[bd]) ++ ++ fs = StratisFilesystemDevice("testfs", parents=[pool], size=Size("1 GiB") + Size(1)) ++ # size should be rounded down to 1 GiB ++ self.assertEqual(fs.size, Size("1 GiB")) + +From dfedd1932e7473a92d3e49e6fb0d9cd4f0a6ddec Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 19 Apr 2024 13:33:00 +0200 +Subject: [PATCH 08/16] Add support for creating encrypted Stratis pool with + Clevis + +Both Clevis/Tang and TPM2 is supported by this implementation. +--- + blivet/devicelibs/stratis.py | 14 +++- + blivet/devices/stratis.py | 12 +++- + blivet/populator/helpers/stratis.py | 16 ++++- + blivet/static_data/stratis_info.py | 11 ++- + .../devices_test/stratis_test.py | 69 ++++++++++++++++++- + tests/unit_tests/devices_test/stratis_test.py | 38 +++++++++- + 6 files changed, 148 insertions(+), 12 deletions(-) + +diff --git a/blivet/devicelibs/stratis.py b/blivet/devicelibs/stratis.py +index d76339b51..3f0aaf29c 100644 +--- a/blivet/devicelibs/stratis.py ++++ b/blivet/devicelibs/stratis.py +@@ -190,7 +190,7 @@ def unlock_pool(pool_uuid): + raise StratisError("Failed to unlock pool: %s" % err) + + +-def create_pool(name, devices, encrypted, passphrase, key_file): ++def create_pool(name, devices, encrypted, passphrase, key_file, clevis): + if not availability.STRATIS_DBUS.available: + raise StratisError("Stratis DBus service not available") + +@@ -203,10 +203,18 @@ def create_pool(name, devices, encrypted, passphrase, key_file): + key_desc = "blivet-%s" % name # XXX what would be a good key description? + set_key(key_desc, passphrase, key_file) + key_opt = GLib.Variant("(bs)", (True, key_desc)) ++ if clevis: ++ clevis_config = {"url": clevis.tang_url} ++ if clevis.tang_thumbprint: ++ clevis_config["thp"] = clevis.tang_thumbprint ++ else: ++ clevis_config["stratis:tang:trust_url"] = True ++ clevis_opt = GLib.Variant("(b(ss))", (True, (clevis.pin, json.dumps(clevis_config)))) ++ else: ++ clevis_opt = GLib.Variant("(b(ss))", (False, ("", ""))) + else: + key_opt = GLib.Variant("(bs)", (False, "")) +- +- clevis_opt = GLib.Variant("(b(ss))", (False, ("", ""))) ++ clevis_opt = GLib.Variant("(b(ss))", (False, ("", ""))) + + try: + ((succ, _paths), rc, err) = safe_dbus.call_sync(STRATIS_SERVICE, +diff --git a/blivet/devices/stratis.py b/blivet/devices/stratis.py +index 573ab8bd5..1f46e43e0 100644 +--- a/blivet/devices/stratis.py ++++ b/blivet/devices/stratis.py +@@ -34,9 +34,15 @@ + from ..errors import DeviceError, StratisError, InconsistentParentSectorSize + from ..size import Size, ROUND_DOWN + from ..tasks import availability ++from ..util import default_namedtuple + from .. import devicelibs + + ++StratisClevisConfig = default_namedtuple("StratisClevisConfig", ["pin", ++ ("tang_url", None), ++ ("tang_thumbprint", None)]) ++ ++ + class StratisPoolDevice(ContainerDevice): + """ A stratis pool device """ + +@@ -55,10 +61,13 @@ def __init__(self, *args, **kwargs): + :type passphrase: str + :keyword key_file: path to a file containing a key + :type key_file: str ++ :keyword clevis: clevis configuration ++ :type: StratisClevisConfig + """ + self._encrypted = kwargs.pop("encrypted", False) + self.__passphrase = kwargs.pop("passphrase", None) + self._key_file = kwargs.pop("key_file", None) ++ self._clevis = kwargs.pop("clevis", None) + + super(StratisPoolDevice, self).__init__(*args, **kwargs) + +@@ -150,7 +159,8 @@ def _create(self): + devices=bd_list, + encrypted=self.encrypted, + passphrase=self.__passphrase, +- key_file=self._key_file) ++ key_file=self._key_file, ++ clevis=self._clevis) + + def _post_create(self): + super(StratisPoolDevice, self)._post_create() +diff --git a/blivet/populator/helpers/stratis.py b/blivet/populator/helpers/stratis.py +index ddcd8ec53..0a3da927d 100644 +--- a/blivet/populator/helpers/stratis.py ++++ b/blivet/populator/helpers/stratis.py +@@ -21,11 +21,12 @@ + # + + import copy ++import json + + from ...callbacks import callbacks + from ... import udev + from ...formats import get_format +-from ...devices.stratis import StratisPoolDevice, StratisFilesystemDevice ++from ...devices.stratis import StratisPoolDevice, StratisFilesystemDevice, StratisClevisConfig + from ...devicelibs.stratis import STRATIS_FS_SIZE + from ...storage_log import log_method_call + from .formatpopulator import FormatPopulator +@@ -120,12 +121,23 @@ def _add_pool_device(self): + elif pool_device is None: + # TODO: stratis duplicate pool name + ++ if pool_info.clevis: ++ if pool_info.clevis[0] == "tang": ++ data = json.loads(pool_info.clevis[1]) ++ clevis_info = StratisClevisConfig(pin=pool_info.clevis[0], tang_url=data["url"], ++ tang_thumbprint=data["thp"]) ++ else: ++ clevis_info = StratisClevisConfig(pin=pool_info.clevis[0]) ++ else: ++ clevis_info = None ++ + pool_device = StratisPoolDevice(pool_info.name, + parents=[self.device], + uuid=pool_info.uuid, + size=pool_info.physical_size, + exists=True, +- encrypted=pool_info.encrypted) ++ encrypted=pool_info.encrypted, ++ clevis=clevis_info) + self._devicetree._add_device(pool_device) + + # now add filesystems on this pool +diff --git a/blivet/static_data/stratis_info.py b/blivet/static_data/stratis_info.py +index 42f230ee5..774814500 100644 +--- a/blivet/static_data/stratis_info.py ++++ b/blivet/static_data/stratis_info.py +@@ -41,7 +41,7 @@ + STRATIS_MANAGER_INTF = STRATIS_SERVICE + ".Manager.r0" + + +-StratisPoolInfo = namedtuple("StratisPoolInfo", ["name", "uuid", "physical_size", "physical_used", "object_path", "encrypted"]) ++StratisPoolInfo = namedtuple("StratisPoolInfo", ["name", "uuid", "physical_size", "physical_used", "object_path", "encrypted", "clevis"]) + StratisFilesystemInfo = namedtuple("StratisFilesystemInfo", ["name", "uuid", "used_size", "pool_name", + "pool_uuid", "object_path"]) + StratisBlockdevInfo = namedtuple("StratisBlockdevInfo", ["path", "uuid", "pool_name", "pool_uuid", "object_path"]) +@@ -78,9 +78,16 @@ def _get_pool_info(self, pool_path): + properties["Name"], pool_used) + pool_used = 0 + ++ clevis_info = properties.get("ClevisInfo", None) ++ if not clevis_info or not clevis_info[0] or not clevis_info[1][0]: ++ clevis = None ++ else: ++ clevis = clevis_info[1][1] ++ + return StratisPoolInfo(name=properties["Name"], uuid=properties["Uuid"], + physical_size=Size(pool_size), physical_used=Size(pool_used), +- object_path=pool_path, encrypted=properties["Encrypted"]) ++ object_path=pool_path, encrypted=properties["Encrypted"], ++ clevis=clevis) + + def _get_filesystem_info(self, filesystem_path): + try: +diff --git a/tests/storage_tests/devices_test/stratis_test.py b/tests/storage_tests/devices_test/stratis_test.py +index cfb645ef5..f6fc73650 100644 +--- a/tests/storage_tests/devices_test/stratis_test.py ++++ b/tests/storage_tests/devices_test/stratis_test.py +@@ -5,7 +5,7 @@ + + import blivet + +-from blivet.devices.stratis import StratisFilesystemDevice ++from blivet.devices.stratis import StratisFilesystemDevice, StratisClevisConfig + + + class StratisTestCase(StorageTestCase): +@@ -126,6 +126,7 @@ def test_stratis_encrypted(self): + self.assertEqual(len(pool.parents), 1) + self.assertEqual(pool.parents[0], bd) + self.assertTrue(pool.encrypted) ++ self.assertIsNone(pool._clevis) + + def test_stratis_overprovision(self): + disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) +@@ -205,3 +206,69 @@ def test_stratis_add_device(self): + bd2 = self.storage.devicetree.get_device_by_path(self.vdevs[1] + "1") + self.assertEqual(bd2.format.pool_name, pool.name) + self.assertEqual(bd2.format.pool_uuid, pool.uuid) ++ ++ ++@unittest.skip("Requires TPM or Tang configuration") ++class StratisTestCaseClevis(StorageTestCase): ++ ++ # XXX: we don't have Tang server, this test will be always skipped ++ # the test cases are kept here for manual testing ++ _tang_server = None ++ ++ def test_stratis_encrypted_clevis_tang(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.storage.initialize_disk(disk) ++ ++ bd = self.storage.new_partition(size=blivet.size.Size("1 GiB"), fmt_type="stratis", ++ parents=[disk]) ++ self.storage.create_device(bd) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd], ++ encrypted=True, passphrase="abcde", ++ clevis=StratisClevisConfig(pin="tang", ++ tang_url=self._tang_server, ++ tang_thumbprint=None)) ++ self.storage.create_device(pool) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pool = self.storage.devicetree.get_device_by_name("blivetTestPool") ++ self.assertIsNotNone(pool) ++ self.assertEqual(pool.type, "stratis pool") ++ self.assertTrue(pool.encrypted) ++ self.assertIsNotNone(pool._clevis) ++ self.assertEqual(pool._clevis.pin, "tang") ++ self.assertEqual(pool._clevis.tang_url, self._tang_server) ++ self.assertIsNotNone(pool._clevis.tang_thumbprint) ++ ++ def test_stratis_encrypted_clevis_tpm(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.storage.initialize_disk(disk) ++ ++ bd = self.storage.new_partition(size=blivet.size.Size("1 GiB"), fmt_type="stratis", ++ parents=[disk]) ++ self.storage.create_device(bd) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd], ++ encrypted=True, passphrase="abcde", ++ clevis=StratisClevisConfig(pin="tpm2")) ++ self.storage.create_device(pool) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pool = self.storage.devicetree.get_device_by_name("blivetTestPool") ++ self.assertIsNotNone(pool) ++ self.assertEqual(pool.type, "stratis pool") ++ self.assertTrue(pool.encrypted) ++ self.assertIsNotNone(pool._clevis) ++ self.assertEqual(pool._clevis.pin, "tpm2") ++ self.assertIsNone(pool._clevis.tang_url) ++ self.assertIsNone(pool._clevis.tang_thumbprint) +diff --git a/tests/unit_tests/devices_test/stratis_test.py b/tests/unit_tests/devices_test/stratis_test.py +index 539995030..e98949310 100644 +--- a/tests/unit_tests/devices_test/stratis_test.py ++++ b/tests/unit_tests/devices_test/stratis_test.py +@@ -11,6 +11,7 @@ + from blivet.devices import StorageDevice + from blivet.devices import StratisPoolDevice + from blivet.devices import StratisFilesystemDevice ++from blivet.devices.stratis import StratisClevisConfig + from blivet.errors import StratisError, InconsistentParentSectorSize + from blivet.size import Size + +@@ -66,7 +67,8 @@ def test_new_stratis(self): + devices=['/dev/bd1'], + encrypted=False, + passphrase=None, +- key_file=None) ++ key_file=None, ++ clevis=None) + + # we would get this from pool._post_create + pool.uuid = "c4fc9ebe-e173-4cab-8d81-cc6abddbe02d" +@@ -103,7 +105,36 @@ def test_new_encrypted_stratis(self): + devices=['/dev/bd1'], + encrypted=True, + passphrase="secret", +- key_file=None) ++ key_file=None, ++ clevis=None) ++ ++ def test_new_encrypted_stratis_clevis(self): ++ b = blivet.Blivet() ++ bd = StorageDevice("bd1", fmt=blivet.formats.get_format("stratis"), ++ size=Size("1 GiB"), exists=True) ++ ++ b.devicetree._add_device(bd) ++ ++ clevis = StratisClevisConfig(pin="tang", tang_url="xxx", tang_thumbprint="xxx") ++ with patch("blivet.devicetree.DeviceTree.names", []): ++ pool = b.new_stratis_pool(name="testpool", parents=[bd], passphrase="secret", encrypted=True, clevis=clevis) ++ self.assertEqual(pool.name, "testpool") ++ self.assertEqual(pool.size, bd.size) ++ self.assertTrue(pool.encrypted) ++ self.assertTrue(pool.has_key) ++ ++ b.create_device(pool) ++ ++ with patch("blivet.devicelibs.stratis") as stratis_dbus: ++ with patch.object(pool, "_pre_create"): ++ with patch.object(pool, "_post_create"): ++ pool.create() ++ stratis_dbus.create_pool.assert_called_with(name='testpool', ++ devices=['/dev/bd1'], ++ encrypted=True, ++ passphrase="secret", ++ key_file=None, ++ clevis=clevis) + + def test_new_stratis_no_size(self): + b = blivet.Blivet() +@@ -142,7 +173,8 @@ def test_new_stratis_no_size(self): + devices=['/dev/bd1'], + encrypted=False, + passphrase=None, +- key_file=None) ++ key_file=None, ++ clevis=None) + + # we would get this from pool._post_create + pool.uuid = "c4fc9ebe-e173-4cab-8d81-cc6abddbe02d" + +From a5c59da655a739854ed00881fa985c1b898894f1 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 23 Apr 2024 14:57:01 +0200 +Subject: [PATCH 09/16] Add support for unlocking locked Stratis pools with + Clevis + +--- + blivet/devicelibs/stratis.py | 4 ++-- + blivet/formats/stratis.py | 14 ++++++++++---- + blivet/populator/helpers/stratis.py | 1 + + blivet/static_data/stratis_info.py | 11 ++++++++++- + tests/storage_tests/devices_test/stratis_test.py | 2 +- + 5 files changed, 24 insertions(+), 8 deletions(-) + +diff --git a/blivet/devicelibs/stratis.py b/blivet/devicelibs/stratis.py +index 3f0aaf29c..462fb8cc5 100644 +--- a/blivet/devicelibs/stratis.py ++++ b/blivet/devicelibs/stratis.py +@@ -173,7 +173,7 @@ def set_key(key_desc, passphrase, key_file): + os.close(write) + + +-def unlock_pool(pool_uuid): ++def unlock_pool(pool_uuid, method): + if not availability.STRATIS_DBUS.available: + raise StratisError("Stratis DBus service not available") + +@@ -182,7 +182,7 @@ def unlock_pool(pool_uuid): + STRATIS_PATH, + STRATIS_MANAGER_INTF, + "UnlockPool", +- GLib.Variant("(ss)", (pool_uuid, "keyring"))) ++ GLib.Variant("(ss)", (pool_uuid, method))) + except safe_dbus.DBusCallError as e: + raise StratisError("Failed to unlock pool: %s" % str(e)) + else: +diff --git a/blivet/formats/stratis.py b/blivet/formats/stratis.py +index dbb0528d8..ac5ea63c5 100644 +--- a/blivet/formats/stratis.py ++++ b/blivet/formats/stratis.py +@@ -62,6 +62,8 @@ def __init__(self, **kwargs): + :type passphrase: str + :keyword key_file: path to a file containing a key + :type key_file: str ++ :keyword locked_pool_clevis_pin: clevis PIN for locked pool (either 'tang' or 'tpm') ++ :type locked_pool_clevis_pin: str + + .. note:: + +@@ -78,6 +80,7 @@ def __init__(self, **kwargs): + self.pool_uuid = kwargs.get("pool_uuid") + self.locked_pool = kwargs.get("locked_pool") + self.locked_pool_key_desc = kwargs.get("locked_pool_key_desc") ++ self.locked_pool_clevis_pin = kwargs.get("locked_pool_clevis_pin") + + self.__passphrase = kwargs.get("passphrase") + self._key_file = kwargs.get("key_file") +@@ -119,14 +122,17 @@ def unlock_pool(self): + if not self.locked_pool: + raise StratisError("This device doesn't contain a locked Stratis pool") + +- if not self.has_key: ++ if not self.has_key and not self.locked_pool_clevis_pin: + raise StratisError("No passphrase/key file for the locked Stratis pool") + +- if not self.locked_pool_key_desc: ++ if not self.locked_pool_key_desc and not self.locked_pool_clevis_pin: + raise StratisError("No key description for the locked Stratis pool") + +- stratis.set_key(self.locked_pool_key_desc, self.__passphrase, self.key_file) +- stratis.unlock_pool(self.pool_uuid) ++ if self.has_key: ++ stratis.set_key(self.locked_pool_key_desc, self.__passphrase, self.key_file) ++ stratis.unlock_pool(self.pool_uuid, method="keyring") ++ else: ++ stratis.unlock_pool(self.pool_uuid, method="clevis") + + + register_device_format(StratisBlockdev) +diff --git a/blivet/populator/helpers/stratis.py b/blivet/populator/helpers/stratis.py +index 0a3da927d..e3cdfdb5f 100644 +--- a/blivet/populator/helpers/stratis.py ++++ b/blivet/populator/helpers/stratis.py +@@ -83,6 +83,7 @@ def _get_kwargs(self): + kwargs["locked_pool"] = True + kwargs["pool_uuid"] = pool.uuid + kwargs["locked_pool_key_desc"] = pool.key_desc ++ kwargs["locked_pool_clevis_pin"] = pool.clevis + return kwargs + + bd_info = stratis_info.blockdevs.get(uuid) +diff --git a/blivet/static_data/stratis_info.py b/blivet/static_data/stratis_info.py +index 774814500..6a4c41f20 100644 +--- a/blivet/static_data/stratis_info.py ++++ b/blivet/static_data/stratis_info.py +@@ -45,7 +45,7 @@ + StratisFilesystemInfo = namedtuple("StratisFilesystemInfo", ["name", "uuid", "used_size", "pool_name", + "pool_uuid", "object_path"]) + StratisBlockdevInfo = namedtuple("StratisBlockdevInfo", ["path", "uuid", "pool_name", "pool_uuid", "object_path"]) +-StratisLockedPoolInfo = namedtuple("StratisLockedPoolInfo", ["uuid", "key_desc", "devices"]) ++StratisLockedPoolInfo = namedtuple("StratisLockedPoolInfo", ["uuid", "key_desc", "clevis", "devices"]) + + + class StratisInfo(object): +@@ -165,8 +165,17 @@ def _get_locked_pools_info(self): + if not valid: + log.info("Locked Stratis pool %s doesn't have a valid key description: %s", pool_uuid, description) + description = None ++ valid, (clevis_set, (pin, _options)) = pools_info[pool_uuid]["clevis_info"] ++ if not valid: ++ log.info("Locked Stratis pool %s doesn't have a valid clevis info", pool_uuid) ++ clevis = None ++ elif not clevis_set: ++ clevis = None ++ else: ++ clevis = pin + info = StratisLockedPoolInfo(uuid=pool_uuid, + key_desc=description, ++ clevis=clevis, + devices=[d["devnode"] for d in pools_info[pool_uuid]["devs"]]) + locked_pools.append(info) + +diff --git a/tests/storage_tests/devices_test/stratis_test.py b/tests/storage_tests/devices_test/stratis_test.py +index f6fc73650..147078e7f 100644 +--- a/tests/storage_tests/devices_test/stratis_test.py ++++ b/tests/storage_tests/devices_test/stratis_test.py +@@ -209,7 +209,7 @@ def test_stratis_add_device(self): + + + @unittest.skip("Requires TPM or Tang configuration") +-class StratisTestCaseClevis(StorageTestCase): ++class StratisTestCaseClevis(StratisTestCase): + + # XXX: we don't have Tang server, this test will be always skipped + # the test cases are kept here for manual testing + +From 696f8a3bf42e07620b9285146ddcd0769b8e92a2 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 24 Apr 2024 09:38:02 +0200 +Subject: [PATCH 10/16] Catch JSONDecodeError when parsing Stratis Clevis info + +--- + blivet/populator/helpers/stratis.py | 13 ++++++++++--- + 1 file changed, 10 insertions(+), 3 deletions(-) + +diff --git a/blivet/populator/helpers/stratis.py b/blivet/populator/helpers/stratis.py +index e3cdfdb5f..c813eed2d 100644 +--- a/blivet/populator/helpers/stratis.py ++++ b/blivet/populator/helpers/stratis.py +@@ -124,9 +124,16 @@ def _add_pool_device(self): + + if pool_info.clevis: + if pool_info.clevis[0] == "tang": +- data = json.loads(pool_info.clevis[1]) +- clevis_info = StratisClevisConfig(pin=pool_info.clevis[0], tang_url=data["url"], +- tang_thumbprint=data["thp"]) ++ try: ++ data = json.loads(pool_info.clevis[1]) ++ except json.JSONDecodeError: ++ log.warning("failed to decode tang configuration for stratis pool %s", ++ self.device.name) ++ clevis_info = StratisClevisConfig(pin=pool_info.clevis[0]) ++ else: ++ clevis_info = StratisClevisConfig(pin=pool_info.clevis[0], ++ tang_url=data["url"], ++ tang_thumbprint=data["thp"]) + else: + clevis_info = StratisClevisConfig(pin=pool_info.clevis[0]) + else: + +From 056b543425504cf22707b9887c65bbe652fb6676 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 26 Apr 2024 13:45:36 +0200 +Subject: [PATCH 11/16] safe-dbus: Allow using custom timeouts for the DBus + calls + +--- + blivet/safe_dbus.py | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/blivet/safe_dbus.py b/blivet/safe_dbus.py +index 76416f901..7151ad45a 100644 +--- a/blivet/safe_dbus.py ++++ b/blivet/safe_dbus.py +@@ -100,7 +100,7 @@ def get_new_session_connection(): + + + def call_sync(service, obj_path, iface, method, args, +- connection=None, fds=None): ++ connection=None, fds=None, timeout=DEFAULT_DBUS_TIMEOUT): + """ + Safely call a given method on a given object of a given service over DBus + passing given arguments. If a connection is given, it is used, otherwise a +@@ -122,6 +122,8 @@ def call_sync(service, obj_path, iface, method, args, + :type connection: Gio.DBusConnection + :param fds: list of file descriptors for the call + :type: Gio.UnixFDList ++ :param timeout: timeout in milliseconds for the call (-1 for default timeout) ++ :type timeout: int + :return: unpacked value returned by the method + :rtype: tuple with elements that depend on the method + :raise DBusCallError: if some DBus related error appears +@@ -140,7 +142,7 @@ def call_sync(service, obj_path, iface, method, args, + try: + ret = connection.call_with_unix_fd_list_sync(service, obj_path, iface, method, args, + None, Gio.DBusCallFlags.NONE, +- DEFAULT_DBUS_TIMEOUT, fds, None) ++ timeout, fds, None) + except GLib.GError as gerr: + msg = "Failed to call %s method on %s with %s arguments: %s" % \ + (method, obj_path, args, gerr.message) # pylint: disable=no-member + +From 203f582becca2d5643ad1ea3dce6e6538181c133 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 26 Apr 2024 13:46:56 +0200 +Subject: [PATCH 12/16] Use longer timeout for Stratis DBus calls + +We've seen some timeouts for some Stratis calls (especially with +larger disks and encryption) so we need a longer timeouts for the +calls. This uses 120 seconds which is the same value which stratis +CLI uses. +--- + blivet/devicelibs/stratis.py | 17 ++++++++++++----- + 1 file changed, 12 insertions(+), 5 deletions(-) + +diff --git a/blivet/devicelibs/stratis.py b/blivet/devicelibs/stratis.py +index 462fb8cc5..fd5c5b268 100644 +--- a/blivet/devicelibs/stratis.py ++++ b/blivet/devicelibs/stratis.py +@@ -46,6 +46,8 @@ + + STRATIS_FS_SIZE = Size("1 TiB") + ++STRATIS_CALL_TIMEOUT = 120 * 1000 # 120 s (used by stratis-cli by default) in ms ++ + + safe_name_characters = "0-9a-zA-Z._-" + +@@ -108,7 +110,8 @@ def remove_pool(pool_uuid): + STRATIS_PATH, + STRATIS_MANAGER_INTF, + "DestroyPool", +- GLib.Variant("(o)", (pool_info.object_path,))) ++ GLib.Variant("(o)", (pool_info.object_path,)), ++ timeout=STRATIS_CALL_TIMEOUT) + except safe_dbus.DBusCallError as e: + raise StratisError("Failed to remove stratis pool: %s" % str(e)) + else: +@@ -136,7 +139,8 @@ def remove_filesystem(pool_uuid, fs_uuid): + pool_info.object_path, + STRATIS_POOL_INTF, + "DestroyFilesystems", +- GLib.Variant("(ao)", ([fs_info.object_path],))) ++ GLib.Variant("(ao)", ([fs_info.object_path],)), ++ timeout=STRATIS_CALL_TIMEOUT) + except safe_dbus.DBusCallError as e: + raise StratisError("Failed to remove stratis filesystem: %s" % str(e)) + else: +@@ -223,7 +227,8 @@ def create_pool(name, devices, encrypted, passphrase, key_file, clevis): + "CreatePool", + GLib.Variant("(s(bq)as(bs)(b(ss)))", (name, raid_opt, + devices, key_opt, +- clevis_opt))) ++ clevis_opt)), ++ timeout=STRATIS_CALL_TIMEOUT) + except safe_dbus.DBusCallError as e: + raise StratisError("Failed to create stratis pool: %s" % str(e)) + else: +@@ -255,7 +260,8 @@ def create_filesystem(name, pool_uuid, fs_size=None): + pool_info.object_path, + STRATIS_POOL_INTF, + "CreateFilesystems", +- GLib.Variant("(a(s(bs)))", ([GLib.Variant("(s(bs))", (name, size_opt))],))) ++ GLib.Variant("(a(s(bs)))", ([GLib.Variant("(s(bs))", (name, size_opt))],)), ++ timeout=STRATIS_CALL_TIMEOUT) + except safe_dbus.DBusCallError as e: + raise StratisError("Failed to create stratis filesystem on '%s': %s" % (pool_info.name, str(e))) + else: +@@ -280,7 +286,8 @@ def add_device(pool_uuid, device): + pool_info.object_path, + STRATIS_POOL_INTF, + "AddDataDevs", +- GLib.Variant("(as)", ([device],))) ++ GLib.Variant("(as)", ([device],)), ++ timeout=STRATIS_CALL_TIMEOUT) + except safe_dbus.DBusCallError as e: + raise StratisError("Failed to create stratis filesystem on '%s': %s" % (pool_info.name, str(e))) + else: + +From 6c13b6867ddccdfdb9853fc177160630d985e000 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 19 Mar 2024 13:20:31 +0100 +Subject: [PATCH 13/16] Try to start stratisd before checking its availability + +stratisd is neither autostarted after installing the package nor +DBus activated so we need to try to start it first. +--- + blivet/tasks/availability.py | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py +index 85945c776..50642cc18 100644 +--- a/blivet/tasks/availability.py ++++ b/blivet/tasks/availability.py +@@ -26,6 +26,7 @@ + + from .. import safe_dbus + from ..devicelibs.stratis import STRATIS_SERVICE, STRATIS_PATH ++from .. import util + + import gi + gi.require_version("BlockDev", "2.0") +@@ -273,6 +274,11 @@ def availability_errors(self, resource): + :returns: [] if the name of the plugin is loaded + :rtype: list of str + """ ++ # try to start the service first ++ ret = util.run_program(["systemctl", "start", resource.name]) ++ if ret != 0: ++ return ["DBus service %s not available" % resource.name] ++ + try: + avail = blockdev.utils.dbus_service_available(None, Gio.BusType.SYSTEM, self.dbus_name, self.dbus_path) + avail = safe_dbus.check_object_available(self.dbus_name, self.dbus_path) +@@ -551,4 +557,4 @@ def available_resource(name): + STRATISPREDICTUSAGE_APP = application("stratis-predict-usage") + + STRATIS_SERVICE_METHOD = DBusMethod(STRATIS_SERVICE, STRATIS_PATH) +-STRATIS_DBUS = dbus_service("stratis", STRATIS_SERVICE_METHOD) ++STRATIS_DBUS = dbus_service("stratisd", STRATIS_SERVICE_METHOD) + +From ce80215802caf5e494dfe2ec3de4851f88c782f0 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 5 Apr 2024 13:07:29 +0200 +Subject: [PATCH 14/16] availability: Fix starting DBus services + +We don't want to try to start services that are already running. +--- + blivet/tasks/availability.py | 32 +++++++++++++++++--------------- + 1 file changed, 17 insertions(+), 15 deletions(-) + +diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py +index 50642cc18..0c48ce887 100644 +--- a/blivet/tasks/availability.py ++++ b/blivet/tasks/availability.py +@@ -24,7 +24,6 @@ + + from six import add_metaclass + +-from .. import safe_dbus + from ..devicelibs.stratis import STRATIS_SERVICE, STRATIS_PATH + from .. import util + +@@ -265,6 +264,14 @@ def __init__(self, dbus_name, dbus_path): + self.dbus_path = dbus_path + self._availability_errors = None + ++ def _service_available(self): ++ try: ++ avail = blockdev.utils.dbus_service_available(None, Gio.BusType.SYSTEM, self.dbus_name, self.dbus_path) ++ except blockdev.UtilsError: ++ return False ++ else: ++ return avail ++ + def availability_errors(self, resource): + """ Returns [] if the service is available. + +@@ -274,21 +281,16 @@ def availability_errors(self, resource): + :returns: [] if the name of the plugin is loaded + :rtype: list of str + """ +- # try to start the service first +- ret = util.run_program(["systemctl", "start", resource.name]) +- if ret != 0: +- return ["DBus service %s not available" % resource.name] +- +- try: +- avail = blockdev.utils.dbus_service_available(None, Gio.BusType.SYSTEM, self.dbus_name, self.dbus_path) +- avail = safe_dbus.check_object_available(self.dbus_name, self.dbus_path) +- except safe_dbus.DBusCallError: +- return ["DBus service %s not available" % resource.name] +- else: +- if avail: +- return [] +- else: ++ if not self._service_available(): ++ # try to start the service first ++ ret = util.run_program(["systemctl", "start", resource.name]) ++ if ret != 0: + return ["DBus service %s not available" % resource.name] ++ # try again now when the service should be started ++ else: ++ if not self._service_available(): ++ return ["DBus service %s not available" % resource.name] ++ return [] + + + class _UnavailableMethod(Method): + +From 35bdbd64b670493397cd796be247cfe5fbfb4196 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 6 May 2024 13:44:39 +0200 +Subject: [PATCH 15/16] Add a Stratis example with pool encryption using + Clevis/Tang + +--- + examples/stratis_clevis_tang.py | 65 +++++++++++++++++++++++++++++++++ + 1 file changed, 65 insertions(+) + create mode 100644 examples/stratis_clevis_tang.py + +diff --git a/examples/stratis_clevis_tang.py b/examples/stratis_clevis_tang.py +new file mode 100644 +index 000000000..44bb9d378 +--- /dev/null ++++ b/examples/stratis_clevis_tang.py +@@ -0,0 +1,65 @@ ++import os ++import sys ++ ++import blivet ++from blivet.devices.stratis import StratisClevisConfig ++from blivet.size import Size ++from blivet.util import set_up_logging, create_sparse_tempfile ++ ++ ++TANG_URL = None # URL/IP and port of the Tang server ++TANG_THUMBPRINT = None # thumbprint for verifying the server or None to configure without verification ++ ++ ++set_up_logging() ++b = blivet.Blivet() # create an instance of Blivet (don't add system devices) ++ ++if TANG_URL is None: ++ print("Please set Tang server URL before running this example") ++ sys.exit(1) ++ ++# create a disk image file on which to create new devices ++disk1_file = create_sparse_tempfile("disk1", Size("100GiB")) ++b.disk_images["disk1"] = disk1_file ++disk2_file = create_sparse_tempfile("disk2", Size("100GiB")) ++b.disk_images["disk2"] = disk2_file ++ ++b.reset() ++ ++try: ++ disk1 = b.devicetree.get_device_by_name("disk1") ++ disk2 = b.devicetree.get_device_by_name("disk2") ++ ++ b.initialize_disk(disk1) ++ b.initialize_disk(disk2) ++ ++ bd = b.new_partition(size=Size("50GiB"), fmt_type="stratis", parents=[disk1]) ++ b.create_device(bd) ++ bd2 = b.new_partition(size=Size("50GiB"), fmt_type="stratis", parents=[disk2]) ++ b.create_device(bd2) ++ ++ # allocate the partitions (decide where and on which disks they'll reside) ++ blivet.partitioning.do_partitioning(b) ++ ++ # clevis configuration specification, TPM can be used by setting "pin" to "tpm2" ++ clevis_info = StratisClevisConfig(pin="tang", ++ tang_url=TANG_URL, ++ tang_thumbprint=TANG_THUMBPRINT) ++ pool = b.new_stratis_pool(name="stratis_pool", ++ parents=[bd, bd2], ++ encrypted=True, passphrase="secret", ++ clevis=clevis_info) ++ b.create_device(pool) ++ ++ print(b.devicetree) ++ ++ # write the new partitions to disk and format them as specified ++ b.do_it() ++ print(b.devicetree) ++ input("Check the state and hit ENTER to trigger cleanup") ++finally: ++ b.devicetree.recursive_remove(pool) ++ b.do_it() ++ b.devicetree.teardown_disk_images() ++ os.unlink(disk1_file) ++ os.unlink(disk2_file) + +From d32b2f854d08ef089e1c67185900226fc69ce6e6 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 7 May 2024 09:55:43 +0200 +Subject: [PATCH 16/16] tests: Add a base class for stratis tests + +To avoid re-running all the test cases from StratisTestCase in +StratisTestCaseClevis. +--- + tests/storage_tests/devices_test/stratis_test.py | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/tests/storage_tests/devices_test/stratis_test.py b/tests/storage_tests/devices_test/stratis_test.py +index 147078e7f..5aaa12d4f 100644 +--- a/tests/storage_tests/devices_test/stratis_test.py ++++ b/tests/storage_tests/devices_test/stratis_test.py +@@ -8,7 +8,7 @@ + from blivet.devices.stratis import StratisFilesystemDevice, StratisClevisConfig + + +-class StratisTestCase(StorageTestCase): ++class StratisTestCaseBase(StorageTestCase): + + @classmethod + def setUpClass(cls): +@@ -42,6 +42,9 @@ def _clean_up(self): + + return super()._clean_up() + ++ ++class StratisTestCase(StratisTestCaseBase): ++ + def test_stratis_basic(self): + disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) + self.assertIsNotNone(disk) +@@ -209,7 +212,7 @@ def test_stratis_add_device(self): + + + @unittest.skip("Requires TPM or Tang configuration") +-class StratisTestCaseClevis(StratisTestCase): ++class StratisTestCaseClevis(StratisTestCaseBase): + + # XXX: we don't have Tang server, this test will be always skipped + # the test cases are kept here for manual testing diff --git a/SOURCES/0026-XFS-resize-test-fix.patch b/SOURCES/0026-XFS-resize-test-fix.patch new file mode 100644 index 0000000..0599c03 --- /dev/null +++ b/SOURCES/0026-XFS-resize-test-fix.patch @@ -0,0 +1,76 @@ +From c2e247fe953568a65c73f5408a6da7af12c4d6a1 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 18 Jun 2024 14:47:39 +0200 +Subject: [PATCH 1/2] tests: Try waiting after partition creation for XFS + resize test + +The test randomly fails to find the newly created partition so +lets try waiting a bit with udev settle. +--- + tests/skip.yml | 6 ------ + tests/storage_tests/formats_test/fs_test.py | 2 ++ + 2 files changed, 2 insertions(+), 6 deletions(-) + +diff --git a/tests/skip.yml b/tests/skip.yml +index c0ca0eaf..8d353b1b 100644 +--- a/tests/skip.yml ++++ b/tests/skip.yml +@@ -23,9 +23,3 @@ + # - all "skips" can specified as a list, for example 'version: [10, 11]' + + --- +- +-- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize +- skip_on: +- - distro: ["centos", "enterprise_linux"] +- version: "9" +- reason: "Creating partitions on loop devices is broken on CentOS/RHEL 9 latest kernel" +diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py +index 1d42dc21..59c0f998 100644 +--- a/tests/storage_tests/formats_test/fs_test.py ++++ b/tests/storage_tests/formats_test/fs_test.py +@@ -10,6 +10,7 @@ from blivet.errors import DeviceFormatError, FSError + from blivet.formats import get_format + from blivet.devices import PartitionDevice, DiskDevice + from blivet.flags import flags ++from blivet import udev + + from .loopbackedtestcase import LoopBackedTestCase + +@@ -107,6 +108,7 @@ class XFSTestCase(fstesting.FSAsRoot): + pend = pstart + int(Size(size) / disk.format.parted_device.sectorSize) + disk.format.add_partition(pstart, pend, parted.PARTITION_NORMAL) + disk.format.parted_disk.commit() ++ udev.settle() + part = disk.format.parted_disk.getPartitionBySector(pstart) + + device = PartitionDevice(os.path.basename(part.path)) +-- +2.45.2 + + +From 511d64c69618de0e7bb567353e5e0c92b61da10e Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 7 Mar 2024 09:45:28 +0100 +Subject: [PATCH 2/2] Fix util.detect_virt on Amazon + +--- + blivet/util.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/blivet/util.py b/blivet/util.py +index 3040ee5a..15d41b4f 100644 +--- a/blivet/util.py ++++ b/blivet/util.py +@@ -1137,7 +1137,7 @@ def detect_virt(): + except (safe_dbus.DBusCallError, safe_dbus.DBusPropertyError): + return False + else: +- return vm[0] in ('qemu', 'kvm', 'xen') ++ return vm[0] in ('qemu', 'kvm', 'xen', 'microsoft', 'amazon') + + + def natural_sort_key(device): +-- +2.45.2 + diff --git a/SOURCES/0027-RHEL96-bugfixes-1.patch b/SOURCES/0027-RHEL96-bugfixes-1.patch new file mode 100644 index 0000000..9d67306 --- /dev/null +++ b/SOURCES/0027-RHEL96-bugfixes-1.patch @@ -0,0 +1,165 @@ +From 39382d82c35494d0b359b32a48de723d9f3a0908 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 21 Nov 2022 11:04:40 +0100 +Subject: [PATCH 1/2] Add a basic read-only support for UDF filesystem + +Resolves: RHEL-13329 +--- + blivet/formats/fs.py | 12 ++++++++++++ + blivet/populator/helpers/disklabel.py | 2 +- + blivet/populator/helpers/partition.py | 2 +- + blivet/tasks/fsmount.py | 4 ++++ + tests/storage_tests/formats_test/fs_test.py | 4 ++++ + tests/unit_tests/populator_test.py | 5 +++++ + 6 files changed, 27 insertions(+), 2 deletions(-) + +diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py +index 3f553eb0..5b60bd6f 100644 +--- a/blivet/formats/fs.py ++++ b/blivet/formats/fs.py +@@ -1359,6 +1359,18 @@ class Iso9660FS(FS): + register_device_format(Iso9660FS) + + ++class UDFFS(FS): ++ ++ """ UDF filesystem. """ ++ _type = "udf" ++ _modules = ["udf"] ++ _supported = True ++ _mount_class = fsmount.UDFFSMount ++ ++ ++register_device_format(UDFFS) ++ ++ + class NoDevFS(FS): + + """ nodev filesystem base class """ +diff --git a/blivet/populator/helpers/disklabel.py b/blivet/populator/helpers/disklabel.py +index db10638e..842cd308 100644 +--- a/blivet/populator/helpers/disklabel.py ++++ b/blivet/populator/helpers/disklabel.py +@@ -42,7 +42,7 @@ class DiskLabelFormatPopulator(FormatPopulator): + # XXX ignore disklabels on multipath or biosraid member disks + return (bool(udev.device_get_disklabel_type(data)) and + not udev.device_is_biosraid_member(data) and +- udev.device_get_format(data) != "iso9660" and ++ udev.device_get_format(data) not in ("iso9660", "udf") and + not (device.is_disk and udev.device_get_format(data) == "mpath_member")) + + def _get_kwargs(self): +diff --git a/blivet/populator/helpers/partition.py b/blivet/populator/helpers/partition.py +index 8659bd48..9257407e 100644 +--- a/blivet/populator/helpers/partition.py ++++ b/blivet/populator/helpers/partition.py +@@ -75,7 +75,7 @@ class PartitionDevicePopulator(DevicePopulator): + # For partitions on disklabels parted cannot make sense of, go ahead + # and instantiate a PartitionDevice so our view of the layout is + # complete. +- if not disk.partitionable or disk.format.type == "iso9660" or disk.format.hidden: ++ if not disk.partitionable or disk.format.type in ("iso9660", "udf") or disk.format.hidden: + log.debug("ignoring partition %s on %s", name, disk.format.type) + return + +diff --git a/blivet/tasks/fsmount.py b/blivet/tasks/fsmount.py +index 65b2470a..a7f493dd 100644 +--- a/blivet/tasks/fsmount.py ++++ b/blivet/tasks/fsmount.py +@@ -163,6 +163,10 @@ class Iso9660FSMount(FSMount): + options = ["ro"] + + ++class UDFFSMount(FSMount): ++ options = ["ro"] ++ ++ + class NoDevFSMount(FSMount): + + @property +--- a/tests/storage_tests/formats_test/fs_test.py ++++ b/tests/storage_tests/formats_test/fs_test.py +@@ -223,6 +223,10 @@ class Iso9660FS(fstesting.FSAsRoot): + _fs_class = fs.Iso9660FS + + ++class UDFFS(fstesting.FSAsRoot): ++ _fs_class = fs.UDFFS ++ ++ + @unittest.skip("Too strange to test using this framework.") + class NoDevFSTestCase(fstesting.FSAsRoot): + _fs_class = fs.NoDevFS +diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py +index 1ee29b57..df56e1f5 100644 +--- a/tests/unit_tests/populator_test.py ++++ b/tests/unit_tests/populator_test.py +@@ -979,6 +979,11 @@ class DiskLabelPopulatorTestCase(PopulatorHelperTestCase): + self.assertFalse(self.helper_class.match(data, device)) + device_get_format.return_value = None + ++ # no match for whole-disk udf filesystem ++ device_get_format.return_value = "udf" ++ self.assertFalse(self.helper_class.match(data, device)) ++ device_get_format.return_value = None ++ + # no match for biosraid members + device_is_biosraid_member.return_value = True + self.assertFalse(self.helper_class.match(data, device)) +-- +2.46.0 + + +From 54e6cc7a7e01bfe8a627b2c2f4ba352c9e6e5564 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 14 Mar 2024 15:10:27 +0100 +Subject: [PATCH 2/2] nvme: Skip startup/write when NVMe plugin isn't available + +This is similar to other modules like iSCSI where these methods +are silently skipped if the technology isn't supported or +available. + +Resolves: RHEL-28124 +--- + blivet/nvme.py | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/blivet/nvme.py b/blivet/nvme.py +index b1513c19..4309dea3 100644 +--- a/blivet/nvme.py ++++ b/blivet/nvme.py +@@ -71,10 +71,21 @@ class NVMe(object): + except Exception: # pylint: disable=broad-except + pass + ++ def available(self): ++ if not hasattr(blockdev.Plugin, "NVME"): ++ return False ++ if not hasattr(blockdev.NVMETech, "FABRICS"): ++ return False ++ return True ++ + def startup(self): + if self.started: + return + ++ if not self.available(): ++ log.info("NVMe support not available, not starting") ++ return ++ + self._hostnqn = blockdev.nvme_get_host_nqn() + self._hostid = blockdev.nvme_get_host_id() + if not self._hostnqn: +@@ -97,6 +108,9 @@ class NVMe(object): + self.started = True + + def write(self, root, overwrite=True): # pylint: disable=unused-argument ++ if not self.available(): ++ return ++ + # write down the hostnqn and hostid files + p = root + ETC_NVME_PATH + if not os.path.isdir(p): +-- +2.46.0 + diff --git a/SOURCES/0028-Fix-checking-for-NVMe-plugin-availability.patch b/SOURCES/0028-Fix-checking-for-NVMe-plugin-availability.patch new file mode 100644 index 0000000..55fb740 --- /dev/null +++ b/SOURCES/0028-Fix-checking-for-NVMe-plugin-availability.patch @@ -0,0 +1,27 @@ +From 7677fc312b821a9c67750220f2494d06f2357780 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 18 Sep 2024 15:30:05 +0200 +Subject: [PATCH] Fix checking for NVMe plugin availability + +--- + blivet/nvme.py | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/blivet/nvme.py b/blivet/nvme.py +index 4309dea3..72a47070 100644 +--- a/blivet/nvme.py ++++ b/blivet/nvme.py +@@ -76,6 +76,10 @@ class NVMe(object): + return False + if not hasattr(blockdev.NVMETech, "FABRICS"): + return False ++ try: ++ blockdev.nvme.is_tech_avail(blockdev.NVMETech.FABRICS, 0) # pylint: disable=no-member ++ except (blockdev.BlockDevNotImplementedError, blockdev.NVMEError): ++ return False + return True + + def startup(self): +-- +2.46.1 + diff --git a/SOURCES/0029-Align-sizes-up-for-growable-LVs.patch b/SOURCES/0029-Align-sizes-up-for-growable-LVs.patch new file mode 100644 index 0000000..8301a18 --- /dev/null +++ b/SOURCES/0029-Align-sizes-up-for-growable-LVs.patch @@ -0,0 +1,30 @@ +From 6a6eca0c9604a9bd508d98b75c5608f20a3a7bf6 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Thu, 24 Oct 2024 12:18:58 +0200 +Subject: [PATCH] Align sizes up for growable LVs + +Growable LVs usually start at minimum size so adjusting it down +can change the size below allowed minimum. + +Resolves: RHEL-8036 +Resolves: RHEL-19725 +--- + blivet/devices/lvm.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 62974443..1293cae2 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -2574,7 +2574,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + if not isinstance(newsize, Size): + raise ValueError("new size must be of type Size") + +- newsize = self.vg.align(newsize) ++ newsize = self.vg.align(newsize, roundup=self.growable) + log.debug("trying to set lv %s size to %s", self.name, newsize) + # Don't refuse to set size if we think there's not enough space in the + # VG for an existing LV, since it's existence proves there is enough +-- +2.47.0 + diff --git a/SOURCES/0030-mod_pass_in_stratis_test.patch b/SOURCES/0030-mod_pass_in_stratis_test.patch new file mode 100644 index 0000000..67bb1c0 --- /dev/null +++ b/SOURCES/0030-mod_pass_in_stratis_test.patch @@ -0,0 +1,32 @@ +From c2177aa362d20278a0ebd5c25a776f952d83e5b1 Mon Sep 17 00:00:00 2001 +From: Jan Pokorny +Date: Fri, 11 Oct 2024 17:17:41 +0200 +Subject: [PATCH] Modified passphrase in stratis test + +FIPS requires at least 8 chars long passphrase. Dummy passphrase used +in stratis test was too short causing encryption +tests with FIPS enabled to fail. + +Changed passphrase. + +fixes RHEL-45173, RHEL-8029 +--- + tests/storage_tests/devices_test/stratis_test.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/storage_tests/devices_test/stratis_test.py b/tests/storage_tests/devices_test/stratis_test.py +index 5aaa12d4..21c4d0f5 100644 +--- a/tests/storage_tests/devices_test/stratis_test.py ++++ b/tests/storage_tests/devices_test/stratis_test.py +@@ -230,7 +230,7 @@ class StratisTestCaseClevis(StratisTestCaseBase): + blivet.partitioning.do_partitioning(self.storage) + + pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd], +- encrypted=True, passphrase="abcde", ++ encrypted=True, passphrase="fipsneeds8chars", + clevis=StratisClevisConfig(pin="tang", + tang_url=self._tang_server, + tang_thumbprint=None)) +-- +2.45.0 + diff --git a/SOURCES/0031-Fix_running_tests_in_FIPS_mode.patch b/SOURCES/0031-Fix_running_tests_in_FIPS_mode.patch new file mode 100644 index 0000000..528addf --- /dev/null +++ b/SOURCES/0031-Fix_running_tests_in_FIPS_mode.patch @@ -0,0 +1,35 @@ +From b7f03738543a4bb416fb19c7138f0b9d3049af61 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 8 Nov 2024 09:19:45 +0100 +Subject: [PATCH] Fix "Modified passphrase in stratis test" + +Follow up for 68708e347ef7b2f98312c76aa80366091dd4aade, two more +places where the passphrase is too short for FIPS mode. + +Resolves: RHEL-8029 +--- + tests/storage_tests/devices_test/stratis_test.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/tests/storage_tests/devices_test/stratis_test.py b/tests/storage_tests/devices_test/stratis_test.py +index 21c4d0f50..9792e0618 100644 +--- a/tests/storage_tests/devices_test/stratis_test.py ++++ b/tests/storage_tests/devices_test/stratis_test.py +@@ -105,7 +105,7 @@ def test_stratis_encrypted(self): + blivet.partitioning.do_partitioning(self.storage) + + pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd], +- encrypted=True, passphrase="abcde") ++ encrypted=True, passphrase="fipsneeds8chars") + self.storage.create_device(pool) + + self.storage.do_it() +@@ -260,7 +260,7 @@ def test_stratis_encrypted_clevis_tpm(self): + blivet.partitioning.do_partitioning(self.storage) + + pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd], +- encrypted=True, passphrase="abcde", ++ encrypted=True, passphrase="fipsneeds8chars", + clevis=StratisClevisConfig(pin="tpm2")) + self.storage.create_device(pool) + diff --git a/SOURCES/0032-Set-persistent-allow-discards-flag-for-new-LUKS-devices.patch b/SOURCES/0032-Set-persistent-allow-discards-flag-for-new-LUKS-devices.patch new file mode 100644 index 0000000..a0b1530 --- /dev/null +++ b/SOURCES/0032-Set-persistent-allow-discards-flag-for-new-LUKS-devices.patch @@ -0,0 +1,39 @@ +From dd0ac302f2afcbbf4fd455416850a97a11d2d1b5 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 10 Mar 2025 09:52:27 +0100 +Subject: [PATCH] Set persitent allow-discards flag for newly created LUKS + devices + +We are currently using the "allow-discards" in /etc/crypttab to +set the discards/fstrim feature for LUKS, but that doesn't work +for Fedora Silverblue so we need to set the persistent flag in the +LUKS header instead. + +Resolves: RHEL-82430 +--- + blivet/formats/luks.py | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +diff --git a/blivet/formats/luks.py b/blivet/formats/luks.py +index adf3c7112..4fc6b1342 100644 +--- a/blivet/formats/luks.py ++++ b/blivet/formats/luks.py +@@ -336,7 +336,18 @@ def _create(self, **kwargs): + + def _post_create(self, **kwargs): + super(LUKS, self)._post_create(**kwargs) ++ ++ if self.luks_version == "luks2" and flags.discard_new: ++ try: ++ blockdev.crypto.luks_set_persistent_flags(self.device, ++ blockdev.CryptoLUKSPersistentFlags.ALLOW_DISCARDS) ++ except blockdev.CryptoError as e: ++ raise LUKSError("Failed to set allow discards flag for newly created LUKS format: %s" % str(e)) ++ except AttributeError: ++ log.warning("Cannot set allow discards flag: not supported") ++ + self.uuid = blockdev.crypto.luks_uuid(self.device) ++ + if not self.map_name: + self.map_name = "luks-%s" % self.uuid + diff --git a/SOURCES/0033-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch b/SOURCES/0033-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch new file mode 100644 index 0000000..39d5a12 --- /dev/null +++ b/SOURCES/0033-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch @@ -0,0 +1,83 @@ +From 3e3b8d415ca50c4feaaf8d3688f0ebda2522d866 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 20 Jan 2025 13:02:50 +0100 +Subject: [PATCH] Do not remove PVs from devices file if disabled or doesn't + exists + +When the file doesn't exists the 'lvmdevices --deldev' call will +fail but it will still create the devices file. This means we now +have an empty devices file and all subsequent LVM calls will fail. + +Resolves: RHEL-84662 +--- + blivet/formats/lvmpv.py | 5 +++++ + tests/unit_tests/formats_tests/lvmpv_test.py | 22 ++++++++++++++++++++ + 2 files changed, 27 insertions(+) + +diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py +index 51fa4a3c8..f5d71dbd1 100644 +--- a/blivet/formats/lvmpv.py ++++ b/blivet/formats/lvmpv.py +@@ -166,6 +166,11 @@ def lvmdevices_remove(self): + if not lvm.HAVE_LVMDEVICES: + raise PhysicalVolumeError("LVM devices file feature is not supported") + ++ if not os.path.exists(lvm.LVM_DEVICES_FILE): ++ log.debug("Not removing %s from devices file: %s doesn't exist", ++ self.device, lvm.LVM_DEVICES_FILE) ++ return ++ + try: + blockdev.lvm.devices_delete(self.device) + except blockdev.LVMError as e: +diff --git a/tests/unit_tests/formats_tests/lvmpv_test.py b/tests/unit_tests/formats_tests/lvmpv_test.py +index 6490c7d48..54a59026d 100644 +--- a/tests/unit_tests/formats_tests/lvmpv_test.py ++++ b/tests/unit_tests/formats_tests/lvmpv_test.py +@@ -41,6 +41,11 @@ def test_lvm_devices(self): + + mock["blockdev"].lvm.devices_add.assert_not_called() + ++ # LVM devices file not enabled/supported -> devices_delete should not be called ++ fmt._destroy() ++ ++ mock["blockdev"].lvm.devices_delete.assert_not_called() ++ + with self.patches() as mock: + # LVM devices file enabled and devices file exists -> devices_add should be called + mock["lvm"].HAVE_LVMDEVICES = True +@@ -50,6 +55,11 @@ def test_lvm_devices(self): + + mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test") + ++ # LVM devices file enabled and devices file exists -> devices_delete should be called ++ fmt._destroy() ++ ++ mock["blockdev"].lvm.devices_delete.assert_called_with("/dev/test") ++ + with self.patches() as mock: + # LVM devices file enabled and devices file doesn't exist + # and no existing VGs present -> devices_add should be called +@@ -61,6 +71,12 @@ def test_lvm_devices(self): + + mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test") + ++ # LVM devices file enabled but devices file doesn't exist ++ # -> devices_delete should not be called ++ fmt._destroy() ++ ++ mock["blockdev"].lvm.devices_delete.assert_not_called() ++ + with self.patches() as mock: + # LVM devices file enabled and devices file doesn't exist + # and existing VGs present -> devices_add should not be called +@@ -71,3 +87,9 @@ def test_lvm_devices(self): + fmt._create() + + mock["blockdev"].lvm.devices_add.assert_not_called() ++ ++ # LVM devices file enabled but devices file doesn't exist ++ # -> devices_delete should not be called ++ fmt._destroy() ++ ++ mock["blockdev"].lvm.devices_delete.assert_not_called() diff --git a/SOURCES/0034-Include-additional-information-in-PartitioningError.patch b/SOURCES/0034-Include-additional-information-in-PartitioningError.patch new file mode 100644 index 0000000..33e6fa9 --- /dev/null +++ b/SOURCES/0034-Include-additional-information-in-PartitioningError.patch @@ -0,0 +1,85 @@ +From 6d2e5c70fecc68e0d62255d4e2a65e9d264578dd Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 22 Jan 2025 13:16:43 +0100 +Subject: [PATCH] Include additional information in PartitioningError + +The generic 'Unable to allocate requested partition scheme' is not +very helpful, we should try to include additional information if +possible. + +Resolves: RHEL-8005 +--- + blivet/partitioning.py | 25 ++++++++++++++++++++++--- + 1 file changed, 22 insertions(+), 3 deletions(-) + +diff --git a/blivet/partitioning.py b/blivet/partitioning.py +index ce77e4eb7..0a35c764d 100644 +--- a/blivet/partitioning.py ++++ b/blivet/partitioning.py +@@ -34,7 +34,7 @@ + from .flags import flags + from .devices import Device, PartitionDevice, device_path_to_name + from .size import Size +-from .i18n import _ ++from .i18n import _, N_ + from .util import stringize, unicodeize, compare + + import logging +@@ -681,6 +681,11 @@ def resolve_disk_tags(disks, tags): + return [disk for disk in disks if any(tag in disk.tags for tag in tags)] + + ++class PartitioningErrors: ++ NO_PRIMARY = N_("no primary partition slots available") ++ NO_SLOTS = N_("no free partition slots") ++ ++ + def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + """ Allocate partitions based on requested features. + +@@ -763,6 +768,7 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + part_type = None + growth = 0 # in sectors + # loop through disks ++ errors = {} + for _disk in req_disks: + try: + disklabel = disklabels[_disk.path] +@@ -798,6 +804,10 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + if new_part_type is None: + # can't allocate any more partitions on this disk + log.debug("no free partition slots on %s", _disk.name) ++ if PartitioningErrors.NO_SLOTS in errors.keys(): ++ errors[PartitioningErrors.NO_SLOTS].append(_disk.name) ++ else: ++ errors[PartitioningErrors.NO_SLOTS] = [_disk.name] + continue + + if _part.req_primary and new_part_type != parted.PARTITION_NORMAL: +@@ -808,7 +818,11 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + new_part_type = parted.PARTITION_NORMAL + else: + # we need a primary slot and none are free on this disk +- log.debug("no primary slots available on %s", _disk.name) ++ log.debug("no primary partition slots available on %s", _disk.name) ++ if PartitioningErrors.NO_PRIMARY in errors.keys(): ++ errors[PartitioningErrors.NO_PRIMARY].append(_disk.name) ++ else: ++ errors[PartitioningErrors.NO_PRIMARY] = [_disk.name] + continue + elif _part.req_part_type is not None and \ + new_part_type != _part.req_part_type: +@@ -968,7 +982,12 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + break + + if free is None: +- raise PartitioningError(_("Unable to allocate requested partition scheme.")) ++ if not errors: ++ msg = _("Unable to allocate requested partition scheme.") ++ else: ++ errors_by_disk = (", ".join(disks) + ": " + _(error) for error, disks in errors.items()) ++ msg = _("Unable to allocate requested partition scheme on requested disks:\n%s") % "\n".join(errors_by_disk) ++ raise PartitioningError(msg) + + _disk = use_disk + disklabel = _disk.format diff --git a/SOURCES/0035-LVMPV-format-size-fix.patch b/SOURCES/0035-LVMPV-format-size-fix.patch new file mode 100644 index 0000000..d3e0c69 --- /dev/null +++ b/SOURCES/0035-LVMPV-format-size-fix.patch @@ -0,0 +1,572 @@ +From 6a54de2780aa3fd52b4a25dc8db7ab8c5b1b8d4d Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 10:03:17 +0100 +Subject: [PATCH 1/7] Use pvs info from static data to get PV size in PVSize + +No need for a special code for this, we can reuse the existing +code from LVM static data. +--- + blivet/tasks/pvtask.py | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/blivet/tasks/pvtask.py b/blivet/tasks/pvtask.py +index b5bd72e0d..e93a61bc7 100644 +--- a/blivet/tasks/pvtask.py ++++ b/blivet/tasks/pvtask.py +@@ -27,6 +27,7 @@ + + from ..errors import PhysicalVolumeError + from ..size import Size, B ++from ..static_data import pvs_info + + from . import availability + from . import task +@@ -55,13 +56,12 @@ def do_task(self): # pylint: disable=arguments-differ + :raises :class:`~.errors.PhysicalVolumeError`: if size cannot be obtained + """ + +- try: +- pv_info = blockdev.lvm.pvinfo(self.pv.device) +- pv_size = pv_info.pv_size +- except blockdev.LVMError as e: +- raise PhysicalVolumeError(e) ++ pvs_info.drop_cache() ++ pv_info = pvs_info.cache.get(self.pv.device) ++ if pv_info is None: ++ raise PhysicalVolumeError("Failed to get PV info for %s" % self.pv.device) + +- return Size(pv_size) ++ return Size(pv_info.pv_size) + + + class PVResize(task.BasicApplication, dfresize.DFResizeTask): + +From 0b8239470762cc3b3732d2f40910be7e84102fa0 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 10:05:13 +0100 +Subject: [PATCH 2/7] Get the actual PV format size for LVMPV format + +--- + blivet/formats/lvmpv.py | 2 ++ + blivet/populator/helpers/lvm.py | 2 ++ + tests/unit_tests/populator_test.py | 2 ++ + 3 files changed, 6 insertions(+) + +diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py +index f5d71dbd1..769c96e1d 100644 +--- a/blivet/formats/lvmpv.py ++++ b/blivet/formats/lvmpv.py +@@ -101,6 +101,8 @@ def __init__(self, **kwargs): + # when set to True, blivet will try to resize the PV to fill all available space + self._grow_to_fill = False + ++ self._target_size = self._size ++ + def __repr__(self): + s = DeviceFormat.__repr__(self) + s += (" vg_name = %(vg_name)s vg_uuid = %(vg_uuid)s" +diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py +index 6ef2f4174..74641bcf8 100644 +--- a/blivet/populator/helpers/lvm.py ++++ b/blivet/populator/helpers/lvm.py +@@ -112,6 +112,8 @@ def _get_kwargs(self): + log.warning("PV %s has no pe_start", name) + if pv_info.pv_free: + kwargs["free"] = Size(pv_info.pv_free) ++ if pv_info.pv_size: ++ kwargs["size"] = Size(pv_info.pv_size) + + return kwargs + +diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py +index 1ee29b57f..55b6be8d8 100644 +--- a/tests/unit_tests/populator_test.py ++++ b/tests/unit_tests/populator_test.py +@@ -1064,6 +1064,7 @@ def test_run(self, *args): + pv_info.vg_uuid = sentinel.vg_uuid + pv_info.pe_start = 0 + pv_info.pv_free = 0 ++ pv_info.pv_size = "10g" + + vg_device = Mock() + vg_device.id = 0 +@@ -1095,6 +1096,7 @@ def test_run(self, *args): + pv_info.vg_extent_count = 2500 + pv_info.vg_free_count = 0 + pv_info.vg_pv_count = 1 ++ pv_info.pv_size = "10g" + + with patch("blivet.static_data.lvm_info.PVsInfo.cache", new_callable=PropertyMock) as mock_pvs_cache: + mock_pvs_cache.return_value = {device.path: pv_info} + +From 14b9538a8fd9f5bfc7d744902517739b6fae7a22 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 13:35:38 +0100 +Subject: [PATCH 3/7] Update PV format size after adding/removing the PV + to/from the VG + +Unfortunately LVM substracts VG metadata from the reported PV size +so we need to make sure to update the size after the vgextend and +vgreduce operation. +--- + blivet/devices/lvm.py | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 62974443e..85850d8e8 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -315,9 +315,21 @@ def _remove(self, member): + if lv.status and not status: + lv.teardown() + ++ # update LVMPV format size --> PV format has different size when in VG ++ try: ++ member.format._size = member.format._target_size = member.format._size_info.do_task() ++ except errors.PhysicalVolumeError as e: ++ log.warning("Failed to obtain current size for device %s: %s", member.format.device, e) ++ + def _add(self, member): + blockdev.lvm.vgextend(self.name, member.path) + ++ # update LVMPV format size --> PV format has different size when in VG ++ try: ++ member.format._size = member.format._target_size = member.format._size_info.do_task() ++ except errors.PhysicalVolumeError as e: ++ log.warning("Failed to obtain current size for device %s: %s", member.path, e) ++ + def _add_log_vol(self, lv): + """ Add an LV to this VG. """ + if lv in self._lvs: + +From d6b0c283eb3236f3578dc28d40182f48d05a5c24 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 14:22:07 +0100 +Subject: [PATCH 4/7] Use LVMPV format size when calculating VG size and free + space + +For existing PVs we need to check the format size instead of +simply expecting the format is fully resized to match the size of +the underlying block device. +--- + blivet/devices/lvm.py | 63 ++++++++++++++++++++++++++----------------- + 1 file changed, 39 insertions(+), 24 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 85850d8e8..e3d08dbce 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -500,40 +500,55 @@ def reserved_percent(self, value): + + self._reserved_percent = value + +- def _get_pv_usable_space(self, pv): ++ def _get_pv_metadata_space(self, pv): ++ """ Returns how much space will be used by VG metadata in given PV ++ This depends on type of the PV, PE size and PE start. ++ """ + if isinstance(pv, MDRaidArrayDevice): +- return self.align(pv.size - 2 * pv.format.pe_start) ++ return 2 * pv.format.pe_start ++ else: ++ return pv.format.pe_start ++ ++ def _get_pv_usable_space(self, pv): ++ """ Return how much space can be actually used on given PV. ++ This takes into account: ++ - VG metadata that is/will be stored in this PV ++ - the actual PV format size (which might differ from ++ the underlying block device size) ++ """ ++ ++ if pv.format.exists and pv.format.size and self.exists: ++ # PV format exists, we got its size and VG also exists ++ # -> all metadata is already accounted in the PV format size ++ return pv.format.size ++ elif pv.format.exists and pv.format.size and not self.exists: ++ # PV format exists, we got its size, but the VG doesn't exist ++ # -> metadata size is not accounted in the PV format size ++ return self.align(pv.format.size - self._get_pv_metadata_space(pv)) + else: +- return self.align(pv.size - pv.format.pe_start) ++ # something else -> either the PV format is not yet created or ++ # we for some reason failed to get size of the format, either way ++ # lets use the underlying block device size and calculate the ++ # metadata size ourselves ++ return self.align(pv.size - self._get_pv_metadata_space(pv)) + + @property + def lvm_metadata_space(self): +- """ The amount of the space LVM metadata cost us in this VG's PVs """ +- # NOTE: we either specify data alignment in a PV or the default is used +- # which is both handled by pv.format.pe_start, but LVM takes into +- # account also the underlying block device which means that e.g. +- # for an MD RAID device, it tries to align everything also to chunk +- # size and alignment offset of such device which may result in up +- # to a twice as big non-data area +- # TODO: move this to either LVMPhysicalVolume's pe_start property once +- # formats know about their devices or to a new LVMPhysicalVolumeDevice +- # class once it exists +- diff = Size(0) +- for pv in self.pvs: +- diff += pv.size - self._get_pv_usable_space(pv) +- +- return diff ++ """ The amount of the space LVM metadata cost us in this VG's PVs ++ Note: we either specify data alignment in a PV or the default is used ++ which is both handled by pv.format.pe_start, but LVM takes into ++ account also the underlying block device which means that e.g. ++ for an MD RAID device, it tries to align everything also to chunk ++ size and alignment offset of such device which may result in up ++ to a twice as big non-data area ++ """ ++ return sum(self._get_pv_metadata_space(pv) for pv in self.pvs) + + @property + def size(self): + """ The size of this VG """ + # TODO: just ask lvm if isModified returns False +- +- # sum up the sizes of the PVs, subtract the unusable (meta data) space +- size = sum(pv.size for pv in self.pvs) +- size -= self.lvm_metadata_space +- +- return size ++ return sum(self._get_pv_usable_space(pv) for pv in self.pvs) + + @property + def extents(self): + +From 4d033869de8c22f627cc23e70023e82d9c6e90ed Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 14:28:56 +0100 +Subject: [PATCH 5/7] Add more tests for PV and VG size and free space + +--- + tests/storage_tests/devices_test/lvm_test.py | 104 ++++++++++++++++++- + 1 file changed, 103 insertions(+), 1 deletion(-) + +diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py +index 97ef1c4b9..988201839 100644 +--- a/tests/storage_tests/devices_test/lvm_test.py ++++ b/tests/storage_tests/devices_test/lvm_test.py +@@ -22,6 +22,18 @@ def setUp(self): + self.assertIsNone(disk.format.type) + self.assertFalse(disk.children) + ++ def _get_pv_size(self, pv): ++ out = subprocess.check_output(["pvs", "-o", "pv_size", "--noheadings", "--nosuffix", "--units=b", pv]) ++ return blivet.size.Size(out.decode().strip()) ++ ++ def _get_vg_size(self, vg): ++ out = subprocess.check_output(["vgs", "-o", "vg_size", "--noheadings", "--nosuffix", "--units=b", vg]) ++ return blivet.size.Size(out.decode().strip()) ++ ++ def _get_vg_free(self, vg): ++ out = subprocess.check_output(["vgs", "-o", "vg_free", "--noheadings", "--nosuffix", "--units=b", vg]) ++ return blivet.size.Size(out.decode().strip()) ++ + def _clean_up(self): + self.storage.reset() + for disk in self.storage.disks: +@@ -63,6 +75,8 @@ def test_lvm_basic(self): + self.assertIsInstance(pv, blivet.devices.PartitionDevice) + self.assertIsNotNone(pv.format) + self.assertEqual(pv.format.type, "lvmpv") ++ pv_size = self._get_pv_size(pv.path) ++ self.assertEqual(pv.format.size, pv_size) + + vg = self.storage.devicetree.get_device_by_name("blivetTestVG") + self.assertIsNotNone(vg) +@@ -72,6 +86,10 @@ def test_lvm_basic(self): + self.assertEqual(pv.format.vg_name, vg.name) + self.assertEqual(len(vg.parents), 1) + self.assertEqual(vg.parents[0], pv) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) + + lv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestLV") + self.assertIsNotNone(lv) +@@ -112,6 +130,13 @@ def test_lvm_thin(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + pool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestPool") + self.assertIsNotNone(pool) + self.assertTrue(pool.is_thin_pool) +@@ -158,6 +183,14 @@ def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space + vg.reserved_space) ++ + raidlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestRAIDLV") + self.assertIsNotNone(raidlv) + self.assertTrue(raidlv.is_raid_lv) +@@ -214,6 +247,13 @@ def test_lvm_cache(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV") + self.assertIsNotNone(cachedlv) + self.assertTrue(cachedlv.cached) +@@ -253,6 +293,13 @@ def test_lvm_cache_attach(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV") + self.assertIsNotNone(cachedlv) + cachepool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestFastLV") +@@ -308,6 +355,13 @@ def test_lvm_cache_create_and_attach(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV") + self.assertIsNotNone(cachedlv) + +@@ -323,6 +377,13 @@ def test_lvm_cache_create_and_attach(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV") + self.assertIsNotNone(cachedlv) + self.assertTrue(cachedlv.cached) +@@ -352,6 +413,13 @@ def test_lvm_pvs_add_remove(self): + + self.storage.do_it() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + # create a second PV + disk2 = self.storage.devicetree.get_device_by_path(self.vdevs[1]) + self.assertIsNotNone(disk2) +@@ -366,6 +434,17 @@ def test_lvm_pvs_add_remove(self): + self.storage.do_it() + self.storage.reset() + ++ pv1 = self.storage.devicetree.get_device_by_name(pv1.name) ++ pv1_size = self._get_pv_size(pv1.path) ++ self.assertEqual(pv1.format.size, pv1_size) ++ ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + # add the PV to the existing VG + vg = self.storage.devicetree.get_device_by_name("blivetTestVG") + pv2 = self.storage.devicetree.get_device_by_name(pv2.name) +@@ -374,6 +453,17 @@ def test_lvm_pvs_add_remove(self): + self.storage.devicetree.actions.add(ac) + self.storage.do_it() + ++ pv2 = self.storage.devicetree.get_device_by_name(pv2.name) ++ pv2_size = self._get_pv_size(pv2.path) ++ self.assertEqual(pv2.format.size, pv2_size) ++ ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + self.assertEqual(pv2.format.vg_name, vg.name) + + self.storage.reset() +@@ -387,7 +477,19 @@ def test_lvm_pvs_add_remove(self): + self.storage.devicetree.actions.add(ac) + self.storage.do_it() + +- self.assertIsNone(pv1.format.vg_name) ++ pv2 = self.storage.devicetree.get_device_by_name(pv2.name) ++ pv2_size = self._get_pv_size(pv2.path) ++ self.assertEqual(pv2.format.size, pv2_size) ++ ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ ++ self.assertIsNone(pv1.format.type) ++ + self.storage.reset() + + self.storage.reset() + +From 4dfa8d699ed1216c18d0c7effa33580a3aa56606 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 15:16:29 +0100 +Subject: [PATCH 6/7] Add a separate test case for LVMPV smaller than the block + device + +--- + tests/storage_tests/devices_test/lvm_test.py | 55 ++++++++++++++++++++ + 1 file changed, 55 insertions(+) + +diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py +index 988201839..a1064c9c4 100644 +--- a/tests/storage_tests/devices_test/lvm_test.py ++++ b/tests/storage_tests/devices_test/lvm_test.py +@@ -475,6 +475,11 @@ def test_lvm_pvs_add_remove(self): + pv1 = self.storage.devicetree.get_device_by_name(pv1.name) + ac = blivet.deviceaction.ActionRemoveMember(vg, pv1) + self.storage.devicetree.actions.add(ac) ++ ++ # schedule also removing the lvmpv format from the PV ++ ac = blivet.deviceaction.ActionDestroyFormat(pv1) ++ self.storage.devicetree.actions.add(ac) ++ + self.storage.do_it() + + pv2 = self.storage.devicetree.get_device_by_name(pv2.name) +@@ -497,3 +502,53 @@ def test_lvm_pvs_add_remove(self): + self.assertIsNotNone(vg) + self.assertEqual(len(vg.pvs), 1) + self.assertEqual(vg.pvs[0].name, pv2.name) ++ ++ def test_lvm_pv_size(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.storage.initialize_disk(disk) ++ ++ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv", ++ parents=[disk]) ++ self.storage.create_device(pv) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pv = self.storage.devicetree.get_device_by_name(pv.name) ++ self.assertIsNotNone(pv) ++ ++ pv.format.update_size_info() ++ self.assertTrue(pv.format.resizable) ++ ++ ac = blivet.deviceaction.ActionResizeFormat(pv, blivet.size.Size("50 MiB")) ++ self.storage.devicetree.actions.add(ac) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pv = self.storage.devicetree.get_device_by_name(pv.name) ++ self.assertIsNotNone(pv) ++ self.assertEqual(pv.format.size, blivet.size.Size("50 MiB")) ++ pv_size = self._get_pv_size(pv.path) ++ self.assertEqual(pv_size, pv.format.size) ++ ++ vg = self.storage.new_vg(name="blivetTestVG", parents=[pv]) ++ self.storage.create_device(vg) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pv = self.storage.devicetree.get_device_by_name(pv.name) ++ self.assertIsNotNone(pv) ++ pv_size = self._get_pv_size(pv.path) ++ self.assertEqual(pv_size, pv.format.size) ++ ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) + +From 6cfa9d0df6faa79b8ab471ba34aa0b3d6f0dc338 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 14 Apr 2025 14:54:00 +0200 +Subject: [PATCH 7/7] Fix checking PV free space when removing it from a VG + +--- + blivet/devices/lvm.py | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index e3d08dbce..a03d57f97 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -305,9 +305,15 @@ def _remove(self, member): + if lv.exists: + lv.setup() + ++ # if format was already scheduled for removal, use original_format ++ if member.format != "lvmpv": ++ fmt = member.original_format ++ else: ++ fmt = member.format ++ + # do not run pvmove on empty PVs +- member.format.update_size_info() +- if member.format.free < member.format.current_size: ++ fmt.update_size_info() ++ if fmt.free < fmt.current_size: + blockdev.lvm.pvmove(member.path) + blockdev.lvm.vgreduce(self.name, member.path) + +@@ -317,9 +323,9 @@ def _remove(self, member): + + # update LVMPV format size --> PV format has different size when in VG + try: +- member.format._size = member.format._target_size = member.format._size_info.do_task() ++ fmt._size = fmt._target_size = fmt._size_info.do_task() + except errors.PhysicalVolumeError as e: +- log.warning("Failed to obtain current size for device %s: %s", member.format.device, e) ++ log.warning("Failed to obtain current size for device %s: %s", fmt.device, e) + + def _add(self, member): + blockdev.lvm.vgextend(self.name, member.path) diff --git a/SOURCES/0036-Make-ActionDestroyFormat-optional.patch b/SOURCES/0036-Make-ActionDestroyFormat-optional.patch new file mode 100644 index 0000000..f5077f6 --- /dev/null +++ b/SOURCES/0036-Make-ActionDestroyFormat-optional.patch @@ -0,0 +1,258 @@ +From 68db0569b3508bbedf33d9ee3b69e8fc6a309b65 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 16 May 2025 17:15:17 +0200 +Subject: [PATCH 1/4] Allow ActionDestroyFormat to be marked as optional + +When we are also planning to remove the device, failing to remove +the format is not critical so we can ignore it in these cases. + +Resolves: RHEL-8008 +Resolves: RHEL-8012 +--- + blivet/deviceaction.py | 37 +++++++++++++++++++++++-------------- + 1 file changed, 23 insertions(+), 14 deletions(-) + +diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py +index fc1ca4b65..a6fc211ea 100644 +--- a/blivet/deviceaction.py ++++ b/blivet/deviceaction.py +@@ -728,12 +728,13 @@ class ActionDestroyFormat(DeviceAction): + obj = ACTION_OBJECT_FORMAT + type_desc_str = N_("destroy format") + +- def __init__(self, device): ++ def __init__(self, device, optional=False): + if device.format_immutable: + raise ValueError("this device's formatting cannot be modified") + + DeviceAction.__init__(self, device) + self.orig_format = self.device.format ++ self.optional = optional + + if not device.format.destroyable: + raise ValueError("resource to destroy this format type %s is unavailable" % device.format.type) +@@ -752,21 +753,29 @@ def execute(self, callbacks=None): + """ wipe the filesystem signature from the device """ + # remove any flag if set + super(ActionDestroyFormat, self).execute(callbacks=callbacks) +- status = self.device.status +- self.device.setup(orig=True) +- if hasattr(self.device, 'set_rw'): +- self.device.set_rw() + +- self.format.destroy() +- udev.settle() +- if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported: +- if self.format.parted_flag: +- self.device.unset_flag(self.format.parted_flag) +- self.device.disk.original_format.commit_to_disk() +- udev.settle() ++ try: ++ status = self.device.status ++ self.device.setup(orig=True) ++ if hasattr(self.device, 'set_rw'): ++ self.device.set_rw() + +- if not status: +- self.device.teardown() ++ self.format.destroy() ++ udev.settle() ++ if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported: ++ if self.format.parted_flag: ++ self.device.unset_flag(self.format.parted_flag) ++ self.device.disk.original_format.commit_to_disk() ++ udev.settle() ++ ++ if not status: ++ self.device.teardown() ++ except Exception as e: # pylint: disable=broad-except ++ if self.optional: ++ log.error("Ignoring error when executing optional action: Failed to destroy format on %s: %s.", ++ self.device.name, str(e)) ++ else: ++ raise + + def cancel(self): + if not self._applied: + +From fca71515094840ab1ca8821641284cfb0b687d82 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 16 May 2025 17:28:40 +0200 +Subject: [PATCH 2/4] Make ActionDestroyFormat optional when device is also + removed + +In both destroy_device and recursive_remove we try to remove both +the device and its format. In these cases the format destroy can +be considered to be optional and we don't need to fail just +because we failed to remove the format. + +Resolves: RHEL-8008 +Resolves: RHEL-8012 +--- + blivet/blivet.py | 2 +- + blivet/devicetree.py | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/blivet/blivet.py b/blivet/blivet.py +index dc066b036..2e86f5bf6 100644 +--- a/blivet/blivet.py ++++ b/blivet/blivet.py +@@ -897,7 +897,7 @@ def destroy_device(self, device): + if device.format.exists and device.format.type and \ + not device.format_immutable: + # schedule destruction of any formatting while we're at it +- self.devicetree.actions.add(ActionDestroyFormat(device)) ++ self.devicetree.actions.add(ActionDestroyFormat(device, optional=True)) + + action = ActionDestroyDevice(device) + self.devicetree.actions.add(action) +diff --git a/blivet/devicetree.py b/blivet/devicetree.py +index c6c1b4400..f94e3ca30 100644 +--- a/blivet/devicetree.py ++++ b/blivet/devicetree.py +@@ -264,7 +264,7 @@ def recursive_remove(self, device, actions=True, remove_device=True, modparent=T + if actions: + if leaf.format.exists and not leaf.protected and \ + not leaf.format_immutable: +- self.actions.add(ActionDestroyFormat(leaf)) ++ self.actions.add(ActionDestroyFormat(leaf, optional=True)) + + self.actions.add(ActionDestroyDevice(leaf)) + else: +@@ -276,7 +276,7 @@ def recursive_remove(self, device, actions=True, remove_device=True, modparent=T + + if not device.format_immutable: + if actions: +- self.actions.add(ActionDestroyFormat(device)) ++ self.actions.add(ActionDestroyFormat(device, optional=True)) + else: + device.format = None + + +From 50efc63fa3053f863d03439a507b3e0a6d7b8168 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 19 May 2025 14:24:06 +0200 +Subject: [PATCH 3/4] tests: Add a simple test case for optional format destroy + action + +Related: RHEL-8008 +Related: RHEL-8012 +--- + tests/unit_tests/devices_test/lvm_test.py | 29 +++++++++++++++++++++++ + 1 file changed, 29 insertions(+) + +diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py +index e645309fc..34c2084a8 100644 +--- a/tests/unit_tests/devices_test/lvm_test.py ++++ b/tests/unit_tests/devices_test/lvm_test.py +@@ -1160,3 +1160,32 @@ def test_vdo_compression_deduplication_change(self): + with patch("blivet.devices.lvm.blockdev.lvm") as lvm: + b.do_it() + lvm.vdo_enable_deduplication.assert_called_with(vg.name, vdopool.lvname) ++ ++ ++@patch("blivet.devices.lvm.LVMLogicalVolumeDevice._external_dependencies", new=[]) ++@patch("blivet.devices.lvm.LVMLogicalVolumeBase._external_dependencies", new=[]) ++@patch("blivet.devices.dm.DMDevice._external_dependencies", new=[]) ++class BlivetLVMOptionalDestroyTest(unittest.TestCase): ++ ++ def test_optional_format_destroy(self, *args): # pylint: disable=unused-argument ++ b = blivet.Blivet() ++ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), ++ size=Size("10 GiB"), exists=True) ++ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True) ++ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], exists=True, size=Size("5 GiB"), ++ fmt=blivet.formats.get_format("xfs", exists=True)) ++ ++ for dev in (pv, vg, lv): ++ b.devicetree._add_device(dev) ++ ++ b.destroy_device(lv) ++ fmt_ac = b.devicetree.actions.find(action_type="destroy", object_type="format") ++ self.assertTrue(fmt_ac) ++ self.assertTrue(fmt_ac[0].optional) ++ ++ with patch("blivet.devices.lvm.blockdev.lvm") as lvm: ++ lvm.lvactivate.side_effect = RuntimeError() ++ try: ++ b.do_it() ++ except RuntimeError: ++ self.fail("Optional format destroy action is not optional") + +From ea913c5fa8e60cd5c2fdd8196be51c067a2a73d8 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 20 May 2025 13:02:00 +0200 +Subject: [PATCH 4/4] tests: Add test case for removing broken thin pool + +Related: RHEL-8008 +Related: RHEL-8012 +--- + tests/storage_tests/devices_test/lvm_test.py | 52 ++++++++++++++++++++ + 1 file changed, 52 insertions(+) + +diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py +index a1064c9c4..10e7354ff 100644 +--- a/tests/storage_tests/devices_test/lvm_test.py ++++ b/tests/storage_tests/devices_test/lvm_test.py +@@ -1,5 +1,7 @@ + import os + import subprocess ++import tempfile ++from unittest.mock import patch + + from ..storagetestcase import StorageTestCase + +@@ -552,3 +554,53 @@ def test_lvm_pv_size(self): + self.assertEqual(vg_size, vg.size) + vg_free = self._get_vg_free(vg.name) + self.assertEqual(vg_free, vg.free_space) ++ ++ def _break_thin_pool(self, vgname): ++ os.system("vgchange -an %s >/dev/null 2>&1" % vgname) ++ ++ # changing transaction_id for the pool prevents it from being activated ++ with tempfile.NamedTemporaryFile(prefix="blivet_test") as temp: ++ os.system("vgcfgbackup -f %s %s >/dev/null 2>&1" % (temp.name, vgname)) ++ os.system("sed -i 's/transaction_id =.*/transaction_id = 123456/' %s >/dev/null 2>&1" % temp.name) ++ os.system("vgcfgrestore -f %s %s --force >/dev/null 2>&1" % (temp.name, vgname)) ++ ++ def test_lvm_broken_thin(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.initialize_disk(disk) ++ ++ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv", ++ parents=[disk]) ++ self.storage.create_device(pv) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ vg = self.storage.new_vg(name="blivetTestVG", parents=[pv]) ++ self.storage.create_device(vg) ++ ++ pool = self.storage.new_lv(thin_pool=True, size=blivet.size.Size("50 MiB"), ++ parents=[vg], name="blivetTestPool") ++ self.storage.create_device(pool) ++ ++ self.storage.do_it() ++ ++ # intentionally break the thin pool created above ++ self._break_thin_pool("blivetTestVG") ++ ++ self.storage.reset() ++ ++ pool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestPool") ++ self.assertIsNotNone(pool) ++ ++ # check that the pool cannot be activated ++ try: ++ pool.setup() ++ except Exception: # pylint: disable=broad-except ++ pass ++ else: ++ self.fail("Failed to break thinpool for tests") ++ ++ # verify that the pool can be destroyed even if it cannot be activated ++ self.storage.recursive_remove(pool) ++ self.storage.do_it() diff --git a/SOURCES/0037-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch b/SOURCES/0037-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch new file mode 100644 index 0000000..fb476c5 --- /dev/null +++ b/SOURCES/0037-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch @@ -0,0 +1,384 @@ +From c07938143a9906bc0e06e78c818227b4c06f64ad Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 27 May 2025 15:21:23 +0200 +Subject: [PATCH 1/3] Add some basic partitioning storage tests + +This supplements the existing tests which use sparse files. These +new test cases actually run do_it() and check the result after +reset. More test cases will follow. + +Related: RHEL-76917 +--- + .../devices_test/partition_test.py | 148 ++++++++++++++++++ + 1 file changed, 148 insertions(+) + +diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py +index 679fded6e..6ad8a8f1a 100644 +--- a/tests/storage_tests/devices_test/partition_test.py ++++ b/tests/storage_tests/devices_test/partition_test.py +@@ -11,12 +11,15 @@ + except ImportError: + from mock import patch + ++import blivet + from blivet.devices import DiskFile + from blivet.devices import PartitionDevice + from blivet.formats import get_format + from blivet.size import Size + from blivet.util import sparsetmpfile + ++from ..storagetestcase import StorageTestCase ++ + + Weighted = namedtuple("Weighted", ["fstype", "mountpoint", "true_funcs", "weight"]) + +@@ -218,3 +221,148 @@ def test_extended_min_size(self): + end_free = (extended_end - logical_end) * sector_size + self.assertEqual(extended_device.min_size, + extended_device.align_target_size(extended_device.current_size - end_free)) ++ ++ ++class PartitionTestCase(StorageTestCase): ++ ++ def setUp(self): ++ super().setUp() ++ ++ disks = [os.path.basename(vdev) for vdev in self.vdevs] ++ self.storage = blivet.Blivet() ++ self.storage.exclusive_disks = disks ++ self.storage.reset() ++ ++ # make sure only the targetcli disks are in the devicetree ++ for disk in self.storage.disks: ++ self.assertTrue(disk.path in self.vdevs) ++ self.assertIsNone(disk.format.type) ++ self.assertFalse(disk.children) ++ ++ def _clean_up(self): ++ self.storage.reset() ++ for disk in self.storage.disks: ++ if disk.path not in self.vdevs: ++ raise RuntimeError("Disk %s found in devicetree but not in disks created for tests" % disk.name) ++ self.storage.recursive_remove(disk) ++ ++ self.storage.do_it() ++ ++ def test_msdos_basic(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="msdos")) ++ ++ for i in range(4): ++ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk], ++ primary=True) ++ self.storage.create_device(part) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.assertEqual(disk.format.type, "disklabel") ++ self.assertEqual(disk.format.label_type, "msdos") ++ self.assertIsNotNone(disk.format.parted_disk) ++ self.assertIsNotNone(disk.format.parted_device) ++ self.assertEqual(len(disk.format.partitions), 4) ++ self.assertEqual(len(disk.format.primary_partitions), 4) ++ self.assertEqual(len(disk.children), 4) ++ ++ for i in range(4): ++ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1)) ++ self.assertIsNotNone(part) ++ self.assertEqual(part.type, "partition") ++ self.assertEqual(part.disk, disk) ++ self.assertEqual(part.size, Size("100 MiB")) ++ self.assertTrue(part.is_primary) ++ self.assertFalse(part.is_extended) ++ self.assertFalse(part.is_logical) ++ self.assertIsNotNone(part.parted_partition) ++ ++ def test_msdos_extended(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="msdos")) ++ ++ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk]) ++ self.storage.create_device(part) ++ ++ part = self.storage.new_partition(size=Size("1 GiB"), parents=[disk], ++ part_type=parted.PARTITION_EXTENDED) ++ self.storage.create_device(part) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ for i in range(4): ++ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk], ++ part_type=parted.PARTITION_LOGICAL) ++ self.storage.create_device(part) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.assertEqual(disk.format.type, "disklabel") ++ self.assertEqual(disk.format.label_type, "msdos") ++ self.assertIsNotNone(disk.format.parted_disk) ++ self.assertIsNotNone(disk.format.parted_device) ++ self.assertEqual(len(disk.format.partitions), 6) ++ self.assertEqual(len(disk.format.primary_partitions), 1) ++ self.assertEqual(len(disk.children), 6) ++ ++ for i in range(4, 8): ++ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1)) ++ self.assertIsNotNone(part) ++ self.assertEqual(part.type, "partition") ++ self.assertEqual(part.disk, disk) ++ self.assertEqual(part.size, Size("100 MiB")) ++ self.assertFalse(part.is_primary) ++ self.assertFalse(part.is_extended) ++ self.assertTrue(part.is_logical) ++ self.assertIsNotNone(part.parted_partition) ++ ++ def test_gpt_basic(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt")) ++ ++ for i in range(4): ++ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],) ++ self.storage.create_device(part) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.assertEqual(disk.format.type, "disklabel") ++ self.assertEqual(disk.format.label_type, "gpt") ++ self.assertIsNotNone(disk.format.parted_disk) ++ self.assertIsNotNone(disk.format.parted_device) ++ self.assertEqual(len(disk.format.partitions), 4) ++ self.assertEqual(len(disk.format.primary_partitions), 4) ++ self.assertEqual(len(disk.children), 4) ++ ++ for i in range(4): ++ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1)) ++ self.assertIsNotNone(part) ++ self.assertEqual(part.type, "partition") ++ self.assertEqual(part.disk, disk) ++ self.assertEqual(part.size, Size("100 MiB")) ++ self.assertTrue(part.is_primary) ++ self.assertFalse(part.is_extended) ++ self.assertFalse(part.is_logical) ++ self.assertIsNotNone(part.parted_partition) + +From 1486d2d47d9b757694a3da88ccc13d29d8bb12fd Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 27 May 2025 14:10:49 +0200 +Subject: [PATCH 2/3] Wipe end partition before creating it as well as the + start + +We are currently overwritting start of the newly created partition +with zeroes to remove any filesystem metadata that might occupy +the space. This extends this functionality to end of the partition +to remove 1.0 MD metadata that might be there. + +Resolves: RHEL-76917 +--- + blivet/devices/partition.py | 20 +++++++++++++++++++- + 1 file changed, 19 insertions(+), 1 deletion(-) + +diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py +index 6ae4b8d36..1dac75a5a 100644 +--- a/blivet/devices/partition.py ++++ b/blivet/devices/partition.py +@@ -599,7 +599,7 @@ def _wipe(self): + """ Wipe the partition metadata. + + Assumes that the partition metadata is located at the start +- of the partition and occupies no more than 1 MiB. ++ and end of the partition and occupies no more than 1 MiB. + + Erases in block increments. Erases the smallest number of blocks + such that at least 1 MiB is erased or the whole partition is +@@ -632,6 +632,24 @@ def _wipe(self): + # things to settle. + udev.settle() + ++ if count >= part_len: ++ # very small partition, we wiped it completely already ++ return ++ ++ # now do the end of the partition as well (RAID 1.0 metadata) ++ end = self.parted_partition.geometry.end ++ cmd = ["dd", "if=/dev/zero", "of=%s" % device, "bs=%d" % bs, ++ "seek=%d" % (end - count), "count=%d" % count] ++ try: ++ util.run_program(cmd) ++ except OSError as e: ++ log.error(str(e)) ++ finally: ++ # If a udev device is created with the watch option, then ++ # a change uevent is synthesized and we need to wait for ++ # things to settle. ++ udev.settle() ++ + def _create(self): + """ Create the device. """ + log_method_call(self, self.name, status=self.status) + +From f0f78b801fb52425c13d0384f6867bf55839d98f Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 28 May 2025 11:01:14 +0200 +Subject: [PATCH 3/3] tests: Add tests for wiping stale metadata from new + partitions + +Related: RHEL-76917 +--- + .../devices_test/partition_test.py | 119 ++++++++++++++++++ + 1 file changed, 119 insertions(+) + +diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py +index 6ad8a8f1a..f4be3aa4c 100644 +--- a/tests/storage_tests/devices_test/partition_test.py ++++ b/tests/storage_tests/devices_test/partition_test.py +@@ -4,6 +4,7 @@ + import os + import six + import unittest ++import blivet.deviceaction + import parted + + try: +@@ -366,3 +367,121 @@ def test_gpt_basic(self): + self.assertFalse(part.is_extended) + self.assertFalse(part.is_logical) + self.assertIsNotNone(part.parted_partition) ++ ++ def _partition_wipe_check(self): ++ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1") ++ self.assertIsNotNone(part1) ++ self.assertIsNone(part1.format.type) ++ ++ out = blivet.util.capture_output(["blkid", "-p", "-sTYPE", "-ovalue", self.vdevs[0] + "1"]) ++ self.assertEqual(out.strip(), "") ++ ++ part2 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "2") ++ self.assertIsNotNone(part2) ++ self.assertEqual(part2.format.type, "ext4") ++ ++ try: ++ part2.format.do_check() ++ except blivet.errors.FSError as e: ++ self.fail("Partition wipe corrupted filesystem on an adjacent partition: %s" % str(e)) ++ ++ out = blivet.util.capture_output(["blkid", "-p", "-sTYPE", "-ovalue", self.vdevs[0] + "2"]) ++ self.assertEqual(out.strip(), "ext4") ++ ++ def test_partition_wipe_ext(self): ++ """ Check that any stray filesystem metadata are removed before creating a partition """ ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt")) ++ ++ # create two partitions with ext4 ++ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk], ++ fmt=blivet.formats.get_format("ext4")) ++ self.storage.create_device(part1) ++ ++ part2 = self.storage.new_partition(size=Size("1 MiB"), parents=[disk], grow=True, ++ fmt=blivet.formats.get_format("ext4")) ++ self.storage.create_device(part2) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # remove the first partition (only the partition without removing the format) ++ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1") ++ ac = blivet.deviceaction.ActionDestroyDevice(part1) ++ self.storage.devicetree.actions.add(ac) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # create the first partition again (without ext4) ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk]) ++ self.storage.create_device(part1) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ # XXX PartitionDevice._post_create calls wipefs on the partition, we want to check that ++ # the _pre_create dd wipe works so we need to skip the _post_create wipefs call ++ part1._post_create = lambda: None ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # make sure the ext4 signature is not present on part1 (and untouched on part2) ++ self._partition_wipe_check() ++ ++ def test_partition_wipe_mdraid(self): ++ """ Check that any stray RAID metadata are removed before creating a partition """ ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ ++ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt")) ++ ++ # create two partitions, one empty, one with ext4 ++ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk]) ++ self.storage.create_device(part1) ++ ++ part2 = self.storage.new_partition(size=Size("1 MiB"), parents=[disk], grow=True, ++ fmt=blivet.formats.get_format("ext4")) ++ self.storage.create_device(part2) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # create MD RAID with metadata 1.0 on the first partition ++ ret = blivet.util.run_program(["mdadm", "--create", "blivetMDTest", "--level=linear", ++ "--metadata=1.0", "--raid-devices=1", "--force", part1.path]) ++ self.assertEqual(ret, 0, "Failed to create RAID array for partition wipe test") ++ ret = blivet.util.run_program(["mdadm", "--stop", "/dev/md/blivetMDTest"]) ++ self.assertEqual(ret, 0, "Failed to create RAID array for partition wipe test") ++ ++ # now remove the partition without removing the array first ++ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1") ++ ac = blivet.deviceaction.ActionDestroyDevice(part1) ++ self.storage.devicetree.actions.add(ac) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # create the first partition again (without format) ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk]) ++ self.storage.create_device(part1) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ # XXX PartitionDevice._post_create calls wipefs on the partition, we want to check that ++ # the _pre_create dd wipe works so we need to skip the _post_create wipefs call ++ part1._post_create = lambda: None ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ # make sure the mdmember signature is not present on part1 (and ext4 is untouched on part2) ++ self._partition_wipe_check() diff --git a/SOURCES/0038-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch b/SOURCES/0038-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch new file mode 100644 index 0000000..613c6aa --- /dev/null +++ b/SOURCES/0038-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch @@ -0,0 +1,65 @@ +From f70ee1ef08c20485f49b30fe1072a7ccafaaa2fe Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Fri, 1 Aug 2025 15:03:09 +0200 +Subject: [PATCH] Add a pre-wipe fixup function for LVM logical volumes + +LVs scheduled to be removed are always activated to remove the +format during installation. If there is a read-only LV with the +skip activation flag with MD metadata this means after activating +the LV to remove the format the MD array is auto-assembled by udev +preventing us from removing it. For this special case, we simply +stop the array before removing the format. + +Resolves: RHEL-68368 +--- + blivet/deviceaction.py | 3 +++ + blivet/devices/lvm.py | 19 +++++++++++++++++++ + 2 files changed, 22 insertions(+) + +diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py +index a6fc211e..169c3a10 100644 +--- a/blivet/deviceaction.py ++++ b/blivet/deviceaction.py +@@ -760,6 +760,9 @@ class ActionDestroyFormat(DeviceAction): + if hasattr(self.device, 'set_rw'): + self.device.set_rw() + ++ if hasattr(self.device, 'pre_format_destroy'): ++ self.device.pre_format_destroy() ++ + self.format.destroy() + udev.settle() + if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported: +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index a03d57f9..6ea35212 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -2695,6 +2695,25 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin + else: + blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation) + ++ def pre_format_destroy(self): ++ """ Fixup needed to run before wiping this device """ ++ if self.ignore_skip_activation > 0: ++ # the LV was not activated during the initial scan so if there is an MD array on it ++ # it will now also get activated and we need to stop it to be able to remove the LV ++ try: ++ info = blockdev.md.examine(self.path) ++ except blockdev.MDRaidError: ++ pass ++ else: ++ # give udev a bit time to activate the array so we can deactivate it again ++ time.sleep(5) ++ log.info("MD metadata found on LV with skip activation, stopping the array %s", ++ info.device) ++ try: ++ blockdev.md.deactivate(info.device) ++ except blockdev.MDRaidError as err: ++ log.info("failed to deactivate %s: %s", info.device, str(err)) ++ + @type_specific + def _pre_create(self): + LVMLogicalVolumeBase._pre_create(self) +-- +2.50.1 + diff --git a/SPECS/python-blivet.spec b/SPECS/python-blivet.spec index 487907f..2af0814 100644 --- a/SPECS/python-blivet.spec +++ b/SPECS/python-blivet.spec @@ -23,7 +23,7 @@ Version: 3.6.0 #%%global prerelease .b2 # prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2 -Release: 14%{?prerelease}%{?dist} +Release: 28%{?prerelease}%{?dist} Epoch: 1 License: LGPLv2+ %global realname blivet @@ -53,6 +53,21 @@ Patch19: 0020-nvme-add_unit_tests.patch Patch20: 0021-Add-support-for-creating-shared-LVM-setups.patch Patch21: 0022-add-udev-builtin-path_id-property-to-zfcp-attached-S.patch Patch22: 0023-Do-not-add-new-PVs-to-the-LVM-devices-file-if-it-doe.patch +Patch23: 0024-Added-support-for-PV-grow.patch +Patch24: 0025-Stratis-fixes-backport.patch +Patch25: 0026-XFS-resize-test-fix.patch +Patch26: 0027-RHEL96-bugfixes-1.patch +Patch27: 0028-Fix-checking-for-NVMe-plugin-availability.patch +Patch28: 0029-Align-sizes-up-for-growable-LVs.patch +Patch29: 0030-mod_pass_in_stratis_test.patch +Patch30: 0031-Fix_running_tests_in_FIPS_mode.patch +Patch31: 0032-Set-persistent-allow-discards-flag-for-new-LUKS-devices.patch +Patch32: 0033-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch +Patch33: 0034-Include-additional-information-in-PartitioningError.patch +Patch34: 0035-LVMPV-format-size-fix.patch +Patch35: 0036-Make-ActionDestroyFormat-optional.patch +Patch36: 0037-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch +Patch37: 0038-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch # Versions of required components (done so we make sure the buildrequires # match the requires versions of things). @@ -216,6 +231,70 @@ configuration. %endif %changelog +* Mon Aug 04 2025 Vojtech Trefny - 3.6.0-28 +- Add a pre-wipe fixup function for LVM logical volumes + Resolves: RHEL-68368 + +* Fri May 30 2025 Vojtech Trefny - 3.6.0-27 +- Wipe end partition before creating it as well as the start + Resolves: RHEL-76917 + +* Tue May 20 2025 Vojtech Trefny - 3.6.0-26 +- Make ActionDestroyFormat optional when the device is also scheduled to be removed + Resolves: RHEL-8008 + Resolves: RHEL-8012 + +* Mon Apr 14 2025 Vojtech Trefny - 3.6.0-25 +- Get the actual PV format size for LVMPV format + Resolves: RHEL-74078 +- Include additional information in PartitioningError + Resolves: RHEL-8005 + +* Thu Mar 27 2025 Vojtech Trefny - 3.6.0-24 +- Do not remove PVs from devices file if disabled or doesn't exist + Resolves: RHEL-84662 + +* Tue Mar 11 2025 Vojtech Trefny - 3.6.0-23 +- Set persistent allow-discards flag for newly created LUKS devices + Resolves: RHEL-82430 + +* Tue Nov 12 2024 Vojtech Trefny - 3.6.0-22 +- Fix running tests in FIPS mode + Resolves: RHEL-8029 + +* Fri Nov 1 2024 Jan Pokorny - 3.6.0-21 +- Modified passphrase in stratis test + Resolves: RHEL-8029 + +* Thu Oct 24 2024 Vojtech Trefny - 3.6.0-20 +- Align sizes up for growable LVs + Resolves: RHEL-8036 + Resolves: RHEL-19725 + +* Mon Sep 23 2024 Vojtech Trefny - 3.6.0-19 +- Fix checking for NVMe plugin availability + Resolves: RHEL-28124 + +* Mon Sep 09 2024 Vojtech Trefny - 3.6.0-18 +- Add a basic read-only support for UDF filesystem + Resolves: RHEL-13329 +- nvme: Skip startup/write when NVMe plugin isn't available + Resolves: RHEL-28124 + +* Mon Jul 22 2024 Vojtech Trefny - 3.6.0-17 +- Fix 'Try waiting after partition creation for XFS resize test' + Resolves: RHEL-8009 + +* Thu Jun 27 2024 Vojtech Trefny - 3.6.0-16 +- tests: Try waiting after partition creation for XFS resize test + Resolves: RHEL-8009 + +* Thu May 16 2024 Vojtech Trefny - 3.6.0-15 +- Backport fixes for Stratis support needed for storage role + Resolves: RHEL-35382 +- Add support for resizing PVs to the size of the underlying block device + Resolves: RHEL-35386 + * Fri Feb 09 2024 Vojtech Trefny - 3.6.0-14 - Do not add new PVs to the LVM devices file if it doesn't exist and VGs are present Resolves: RHEL-473