From fcc2d8421c0af8b3da714a5196ad8dc618c92667 Mon Sep 17 00:00:00 2001 From: Vojtech Trefny Date: Mon, 14 Apr 2025 14:31:12 +0200 Subject: [PATCH] C9S bugfix update - Get the actual PV format size for LVMPV format Resolves: RHEL-74078 - Include additional information in PartitioningError Resolves: RHEL-8005 --- ...nal-information-in-PartitioningError.patch | 85 +++ 0035-LVMPV-format-size-fix.patch | 572 ++++++++++++++++++ python-blivet.spec | 10 +- 3 files changed, 666 insertions(+), 1 deletion(-) create mode 100644 0034-Include-additional-information-in-PartitioningError.patch create mode 100644 0035-LVMPV-format-size-fix.patch diff --git a/0034-Include-additional-information-in-PartitioningError.patch b/0034-Include-additional-information-in-PartitioningError.patch new file mode 100644 index 0000000..33e6fa9 --- /dev/null +++ b/0034-Include-additional-information-in-PartitioningError.patch @@ -0,0 +1,85 @@ +From 6d2e5c70fecc68e0d62255d4e2a65e9d264578dd Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Wed, 22 Jan 2025 13:16:43 +0100 +Subject: [PATCH] Include additional information in PartitioningError + +The generic 'Unable to allocate requested partition scheme' is not +very helpful, we should try to include additional information if +possible. + +Resolves: RHEL-8005 +--- + blivet/partitioning.py | 25 ++++++++++++++++++++++--- + 1 file changed, 22 insertions(+), 3 deletions(-) + +diff --git a/blivet/partitioning.py b/blivet/partitioning.py +index ce77e4eb7..0a35c764d 100644 +--- a/blivet/partitioning.py ++++ b/blivet/partitioning.py +@@ -34,7 +34,7 @@ + from .flags import flags + from .devices import Device, PartitionDevice, device_path_to_name + from .size import Size +-from .i18n import _ ++from .i18n import _, N_ + from .util import stringize, unicodeize, compare + + import logging +@@ -681,6 +681,11 @@ def resolve_disk_tags(disks, tags): + return [disk for disk in disks if any(tag in disk.tags for tag in tags)] + + ++class PartitioningErrors: ++ NO_PRIMARY = N_("no primary partition slots available") ++ NO_SLOTS = N_("no free partition slots") ++ ++ + def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + """ Allocate partitions based on requested features. + +@@ -763,6 +768,7 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + part_type = None + growth = 0 # in sectors + # loop through disks ++ errors = {} + for _disk in req_disks: + try: + disklabel = disklabels[_disk.path] +@@ -798,6 +804,10 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + if new_part_type is None: + # can't allocate any more partitions on this disk + log.debug("no free partition slots on %s", _disk.name) ++ if PartitioningErrors.NO_SLOTS in errors.keys(): ++ errors[PartitioningErrors.NO_SLOTS].append(_disk.name) ++ else: ++ errors[PartitioningErrors.NO_SLOTS] = [_disk.name] + continue + + if _part.req_primary and new_part_type != parted.PARTITION_NORMAL: +@@ -808,7 +818,11 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + new_part_type = parted.PARTITION_NORMAL + else: + # we need a primary slot and none are free on this disk +- log.debug("no primary slots available on %s", _disk.name) ++ log.debug("no primary partition slots available on %s", _disk.name) ++ if PartitioningErrors.NO_PRIMARY in errors.keys(): ++ errors[PartitioningErrors.NO_PRIMARY].append(_disk.name) ++ else: ++ errors[PartitioningErrors.NO_PRIMARY] = [_disk.name] + continue + elif _part.req_part_type is not None and \ + new_part_type != _part.req_part_type: +@@ -968,7 +982,12 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None): + break + + if free is None: +- raise PartitioningError(_("Unable to allocate requested partition scheme.")) ++ if not errors: ++ msg = _("Unable to allocate requested partition scheme.") ++ else: ++ errors_by_disk = (", ".join(disks) + ": " + _(error) for error, disks in errors.items()) ++ msg = _("Unable to allocate requested partition scheme on requested disks:\n%s") % "\n".join(errors_by_disk) ++ raise PartitioningError(msg) + + _disk = use_disk + disklabel = _disk.format diff --git a/0035-LVMPV-format-size-fix.patch b/0035-LVMPV-format-size-fix.patch new file mode 100644 index 0000000..d3e0c69 --- /dev/null +++ b/0035-LVMPV-format-size-fix.patch @@ -0,0 +1,572 @@ +From 6a54de2780aa3fd52b4a25dc8db7ab8c5b1b8d4d Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 10:03:17 +0100 +Subject: [PATCH 1/7] Use pvs info from static data to get PV size in PVSize + +No need for a special code for this, we can reuse the existing +code from LVM static data. +--- + blivet/tasks/pvtask.py | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/blivet/tasks/pvtask.py b/blivet/tasks/pvtask.py +index b5bd72e0d..e93a61bc7 100644 +--- a/blivet/tasks/pvtask.py ++++ b/blivet/tasks/pvtask.py +@@ -27,6 +27,7 @@ + + from ..errors import PhysicalVolumeError + from ..size import Size, B ++from ..static_data import pvs_info + + from . import availability + from . import task +@@ -55,13 +56,12 @@ def do_task(self): # pylint: disable=arguments-differ + :raises :class:`~.errors.PhysicalVolumeError`: if size cannot be obtained + """ + +- try: +- pv_info = blockdev.lvm.pvinfo(self.pv.device) +- pv_size = pv_info.pv_size +- except blockdev.LVMError as e: +- raise PhysicalVolumeError(e) ++ pvs_info.drop_cache() ++ pv_info = pvs_info.cache.get(self.pv.device) ++ if pv_info is None: ++ raise PhysicalVolumeError("Failed to get PV info for %s" % self.pv.device) + +- return Size(pv_size) ++ return Size(pv_info.pv_size) + + + class PVResize(task.BasicApplication, dfresize.DFResizeTask): + +From 0b8239470762cc3b3732d2f40910be7e84102fa0 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 10:05:13 +0100 +Subject: [PATCH 2/7] Get the actual PV format size for LVMPV format + +--- + blivet/formats/lvmpv.py | 2 ++ + blivet/populator/helpers/lvm.py | 2 ++ + tests/unit_tests/populator_test.py | 2 ++ + 3 files changed, 6 insertions(+) + +diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py +index f5d71dbd1..769c96e1d 100644 +--- a/blivet/formats/lvmpv.py ++++ b/blivet/formats/lvmpv.py +@@ -101,6 +101,8 @@ def __init__(self, **kwargs): + # when set to True, blivet will try to resize the PV to fill all available space + self._grow_to_fill = False + ++ self._target_size = self._size ++ + def __repr__(self): + s = DeviceFormat.__repr__(self) + s += (" vg_name = %(vg_name)s vg_uuid = %(vg_uuid)s" +diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py +index 6ef2f4174..74641bcf8 100644 +--- a/blivet/populator/helpers/lvm.py ++++ b/blivet/populator/helpers/lvm.py +@@ -112,6 +112,8 @@ def _get_kwargs(self): + log.warning("PV %s has no pe_start", name) + if pv_info.pv_free: + kwargs["free"] = Size(pv_info.pv_free) ++ if pv_info.pv_size: ++ kwargs["size"] = Size(pv_info.pv_size) + + return kwargs + +diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py +index 1ee29b57f..55b6be8d8 100644 +--- a/tests/unit_tests/populator_test.py ++++ b/tests/unit_tests/populator_test.py +@@ -1064,6 +1064,7 @@ def test_run(self, *args): + pv_info.vg_uuid = sentinel.vg_uuid + pv_info.pe_start = 0 + pv_info.pv_free = 0 ++ pv_info.pv_size = "10g" + + vg_device = Mock() + vg_device.id = 0 +@@ -1095,6 +1096,7 @@ def test_run(self, *args): + pv_info.vg_extent_count = 2500 + pv_info.vg_free_count = 0 + pv_info.vg_pv_count = 1 ++ pv_info.pv_size = "10g" + + with patch("blivet.static_data.lvm_info.PVsInfo.cache", new_callable=PropertyMock) as mock_pvs_cache: + mock_pvs_cache.return_value = {device.path: pv_info} + +From 14b9538a8fd9f5bfc7d744902517739b6fae7a22 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 13:35:38 +0100 +Subject: [PATCH 3/7] Update PV format size after adding/removing the PV + to/from the VG + +Unfortunately LVM substracts VG metadata from the reported PV size +so we need to make sure to update the size after the vgextend and +vgreduce operation. +--- + blivet/devices/lvm.py | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 62974443e..85850d8e8 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -315,9 +315,21 @@ def _remove(self, member): + if lv.status and not status: + lv.teardown() + ++ # update LVMPV format size --> PV format has different size when in VG ++ try: ++ member.format._size = member.format._target_size = member.format._size_info.do_task() ++ except errors.PhysicalVolumeError as e: ++ log.warning("Failed to obtain current size for device %s: %s", member.format.device, e) ++ + def _add(self, member): + blockdev.lvm.vgextend(self.name, member.path) + ++ # update LVMPV format size --> PV format has different size when in VG ++ try: ++ member.format._size = member.format._target_size = member.format._size_info.do_task() ++ except errors.PhysicalVolumeError as e: ++ log.warning("Failed to obtain current size for device %s: %s", member.path, e) ++ + def _add_log_vol(self, lv): + """ Add an LV to this VG. """ + if lv in self._lvs: + +From d6b0c283eb3236f3578dc28d40182f48d05a5c24 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 14:22:07 +0100 +Subject: [PATCH 4/7] Use LVMPV format size when calculating VG size and free + space + +For existing PVs we need to check the format size instead of +simply expecting the format is fully resized to match the size of +the underlying block device. +--- + blivet/devices/lvm.py | 63 ++++++++++++++++++++++++++----------------- + 1 file changed, 39 insertions(+), 24 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index 85850d8e8..e3d08dbce 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -500,40 +500,55 @@ def reserved_percent(self, value): + + self._reserved_percent = value + +- def _get_pv_usable_space(self, pv): ++ def _get_pv_metadata_space(self, pv): ++ """ Returns how much space will be used by VG metadata in given PV ++ This depends on type of the PV, PE size and PE start. ++ """ + if isinstance(pv, MDRaidArrayDevice): +- return self.align(pv.size - 2 * pv.format.pe_start) ++ return 2 * pv.format.pe_start ++ else: ++ return pv.format.pe_start ++ ++ def _get_pv_usable_space(self, pv): ++ """ Return how much space can be actually used on given PV. ++ This takes into account: ++ - VG metadata that is/will be stored in this PV ++ - the actual PV format size (which might differ from ++ the underlying block device size) ++ """ ++ ++ if pv.format.exists and pv.format.size and self.exists: ++ # PV format exists, we got its size and VG also exists ++ # -> all metadata is already accounted in the PV format size ++ return pv.format.size ++ elif pv.format.exists and pv.format.size and not self.exists: ++ # PV format exists, we got its size, but the VG doesn't exist ++ # -> metadata size is not accounted in the PV format size ++ return self.align(pv.format.size - self._get_pv_metadata_space(pv)) + else: +- return self.align(pv.size - pv.format.pe_start) ++ # something else -> either the PV format is not yet created or ++ # we for some reason failed to get size of the format, either way ++ # lets use the underlying block device size and calculate the ++ # metadata size ourselves ++ return self.align(pv.size - self._get_pv_metadata_space(pv)) + + @property + def lvm_metadata_space(self): +- """ The amount of the space LVM metadata cost us in this VG's PVs """ +- # NOTE: we either specify data alignment in a PV or the default is used +- # which is both handled by pv.format.pe_start, but LVM takes into +- # account also the underlying block device which means that e.g. +- # for an MD RAID device, it tries to align everything also to chunk +- # size and alignment offset of such device which may result in up +- # to a twice as big non-data area +- # TODO: move this to either LVMPhysicalVolume's pe_start property once +- # formats know about their devices or to a new LVMPhysicalVolumeDevice +- # class once it exists +- diff = Size(0) +- for pv in self.pvs: +- diff += pv.size - self._get_pv_usable_space(pv) +- +- return diff ++ """ The amount of the space LVM metadata cost us in this VG's PVs ++ Note: we either specify data alignment in a PV or the default is used ++ which is both handled by pv.format.pe_start, but LVM takes into ++ account also the underlying block device which means that e.g. ++ for an MD RAID device, it tries to align everything also to chunk ++ size and alignment offset of such device which may result in up ++ to a twice as big non-data area ++ """ ++ return sum(self._get_pv_metadata_space(pv) for pv in self.pvs) + + @property + def size(self): + """ The size of this VG """ + # TODO: just ask lvm if isModified returns False +- +- # sum up the sizes of the PVs, subtract the unusable (meta data) space +- size = sum(pv.size for pv in self.pvs) +- size -= self.lvm_metadata_space +- +- return size ++ return sum(self._get_pv_usable_space(pv) for pv in self.pvs) + + @property + def extents(self): + +From 4d033869de8c22f627cc23e70023e82d9c6e90ed Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 14:28:56 +0100 +Subject: [PATCH 5/7] Add more tests for PV and VG size and free space + +--- + tests/storage_tests/devices_test/lvm_test.py | 104 ++++++++++++++++++- + 1 file changed, 103 insertions(+), 1 deletion(-) + +diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py +index 97ef1c4b9..988201839 100644 +--- a/tests/storage_tests/devices_test/lvm_test.py ++++ b/tests/storage_tests/devices_test/lvm_test.py +@@ -22,6 +22,18 @@ def setUp(self): + self.assertIsNone(disk.format.type) + self.assertFalse(disk.children) + ++ def _get_pv_size(self, pv): ++ out = subprocess.check_output(["pvs", "-o", "pv_size", "--noheadings", "--nosuffix", "--units=b", pv]) ++ return blivet.size.Size(out.decode().strip()) ++ ++ def _get_vg_size(self, vg): ++ out = subprocess.check_output(["vgs", "-o", "vg_size", "--noheadings", "--nosuffix", "--units=b", vg]) ++ return blivet.size.Size(out.decode().strip()) ++ ++ def _get_vg_free(self, vg): ++ out = subprocess.check_output(["vgs", "-o", "vg_free", "--noheadings", "--nosuffix", "--units=b", vg]) ++ return blivet.size.Size(out.decode().strip()) ++ + def _clean_up(self): + self.storage.reset() + for disk in self.storage.disks: +@@ -63,6 +75,8 @@ def test_lvm_basic(self): + self.assertIsInstance(pv, blivet.devices.PartitionDevice) + self.assertIsNotNone(pv.format) + self.assertEqual(pv.format.type, "lvmpv") ++ pv_size = self._get_pv_size(pv.path) ++ self.assertEqual(pv.format.size, pv_size) + + vg = self.storage.devicetree.get_device_by_name("blivetTestVG") + self.assertIsNotNone(vg) +@@ -72,6 +86,10 @@ def test_lvm_basic(self): + self.assertEqual(pv.format.vg_name, vg.name) + self.assertEqual(len(vg.parents), 1) + self.assertEqual(vg.parents[0], pv) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) + + lv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestLV") + self.assertIsNotNone(lv) +@@ -112,6 +130,13 @@ def test_lvm_thin(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + pool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestPool") + self.assertIsNotNone(pool) + self.assertTrue(pool.is_thin_pool) +@@ -158,6 +183,14 @@ def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space + vg.reserved_space) ++ + raidlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestRAIDLV") + self.assertIsNotNone(raidlv) + self.assertTrue(raidlv.is_raid_lv) +@@ -214,6 +247,13 @@ def test_lvm_cache(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV") + self.assertIsNotNone(cachedlv) + self.assertTrue(cachedlv.cached) +@@ -253,6 +293,13 @@ def test_lvm_cache_attach(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV") + self.assertIsNotNone(cachedlv) + cachepool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestFastLV") +@@ -308,6 +355,13 @@ def test_lvm_cache_create_and_attach(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV") + self.assertIsNotNone(cachedlv) + +@@ -323,6 +377,13 @@ def test_lvm_cache_create_and_attach(self): + self.storage.do_it() + self.storage.reset() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV") + self.assertIsNotNone(cachedlv) + self.assertTrue(cachedlv.cached) +@@ -352,6 +413,13 @@ def test_lvm_pvs_add_remove(self): + + self.storage.do_it() + ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + # create a second PV + disk2 = self.storage.devicetree.get_device_by_path(self.vdevs[1]) + self.assertIsNotNone(disk2) +@@ -366,6 +434,17 @@ def test_lvm_pvs_add_remove(self): + self.storage.do_it() + self.storage.reset() + ++ pv1 = self.storage.devicetree.get_device_by_name(pv1.name) ++ pv1_size = self._get_pv_size(pv1.path) ++ self.assertEqual(pv1.format.size, pv1_size) ++ ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + # add the PV to the existing VG + vg = self.storage.devicetree.get_device_by_name("blivetTestVG") + pv2 = self.storage.devicetree.get_device_by_name(pv2.name) +@@ -374,6 +453,17 @@ def test_lvm_pvs_add_remove(self): + self.storage.devicetree.actions.add(ac) + self.storage.do_it() + ++ pv2 = self.storage.devicetree.get_device_by_name(pv2.name) ++ pv2_size = self._get_pv_size(pv2.path) ++ self.assertEqual(pv2.format.size, pv2_size) ++ ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ + self.assertEqual(pv2.format.vg_name, vg.name) + + self.storage.reset() +@@ -387,7 +477,19 @@ def test_lvm_pvs_add_remove(self): + self.storage.devicetree.actions.add(ac) + self.storage.do_it() + +- self.assertIsNone(pv1.format.vg_name) ++ pv2 = self.storage.devicetree.get_device_by_name(pv2.name) ++ pv2_size = self._get_pv_size(pv2.path) ++ self.assertEqual(pv2.format.size, pv2_size) ++ ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) ++ ++ self.assertIsNone(pv1.format.type) ++ + self.storage.reset() + + self.storage.reset() + +From 4dfa8d699ed1216c18d0c7effa33580a3aa56606 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Tue, 21 Jan 2025 15:16:29 +0100 +Subject: [PATCH 6/7] Add a separate test case for LVMPV smaller than the block + device + +--- + tests/storage_tests/devices_test/lvm_test.py | 55 ++++++++++++++++++++ + 1 file changed, 55 insertions(+) + +diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py +index 988201839..a1064c9c4 100644 +--- a/tests/storage_tests/devices_test/lvm_test.py ++++ b/tests/storage_tests/devices_test/lvm_test.py +@@ -475,6 +475,11 @@ def test_lvm_pvs_add_remove(self): + pv1 = self.storage.devicetree.get_device_by_name(pv1.name) + ac = blivet.deviceaction.ActionRemoveMember(vg, pv1) + self.storage.devicetree.actions.add(ac) ++ ++ # schedule also removing the lvmpv format from the PV ++ ac = blivet.deviceaction.ActionDestroyFormat(pv1) ++ self.storage.devicetree.actions.add(ac) ++ + self.storage.do_it() + + pv2 = self.storage.devicetree.get_device_by_name(pv2.name) +@@ -497,3 +502,53 @@ def test_lvm_pvs_add_remove(self): + self.assertIsNotNone(vg) + self.assertEqual(len(vg.pvs), 1) + self.assertEqual(vg.pvs[0].name, pv2.name) ++ ++ def test_lvm_pv_size(self): ++ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0]) ++ self.assertIsNotNone(disk) ++ self.storage.initialize_disk(disk) ++ ++ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv", ++ parents=[disk]) ++ self.storage.create_device(pv) ++ ++ blivet.partitioning.do_partitioning(self.storage) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pv = self.storage.devicetree.get_device_by_name(pv.name) ++ self.assertIsNotNone(pv) ++ ++ pv.format.update_size_info() ++ self.assertTrue(pv.format.resizable) ++ ++ ac = blivet.deviceaction.ActionResizeFormat(pv, blivet.size.Size("50 MiB")) ++ self.storage.devicetree.actions.add(ac) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pv = self.storage.devicetree.get_device_by_name(pv.name) ++ self.assertIsNotNone(pv) ++ self.assertEqual(pv.format.size, blivet.size.Size("50 MiB")) ++ pv_size = self._get_pv_size(pv.path) ++ self.assertEqual(pv_size, pv.format.size) ++ ++ vg = self.storage.new_vg(name="blivetTestVG", parents=[pv]) ++ self.storage.create_device(vg) ++ ++ self.storage.do_it() ++ self.storage.reset() ++ ++ pv = self.storage.devicetree.get_device_by_name(pv.name) ++ self.assertIsNotNone(pv) ++ pv_size = self._get_pv_size(pv.path) ++ self.assertEqual(pv_size, pv.format.size) ++ ++ vg = self.storage.devicetree.get_device_by_name("blivetTestVG") ++ self.assertIsNotNone(vg) ++ vg_size = self._get_vg_size(vg.name) ++ self.assertEqual(vg_size, vg.size) ++ vg_free = self._get_vg_free(vg.name) ++ self.assertEqual(vg_free, vg.free_space) + +From 6cfa9d0df6faa79b8ab471ba34aa0b3d6f0dc338 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 14 Apr 2025 14:54:00 +0200 +Subject: [PATCH 7/7] Fix checking PV free space when removing it from a VG + +--- + blivet/devices/lvm.py | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py +index e3d08dbce..a03d57f97 100644 +--- a/blivet/devices/lvm.py ++++ b/blivet/devices/lvm.py +@@ -305,9 +305,15 @@ def _remove(self, member): + if lv.exists: + lv.setup() + ++ # if format was already scheduled for removal, use original_format ++ if member.format != "lvmpv": ++ fmt = member.original_format ++ else: ++ fmt = member.format ++ + # do not run pvmove on empty PVs +- member.format.update_size_info() +- if member.format.free < member.format.current_size: ++ fmt.update_size_info() ++ if fmt.free < fmt.current_size: + blockdev.lvm.pvmove(member.path) + blockdev.lvm.vgreduce(self.name, member.path) + +@@ -317,9 +323,9 @@ def _remove(self, member): + + # update LVMPV format size --> PV format has different size when in VG + try: +- member.format._size = member.format._target_size = member.format._size_info.do_task() ++ fmt._size = fmt._target_size = fmt._size_info.do_task() + except errors.PhysicalVolumeError as e: +- log.warning("Failed to obtain current size for device %s: %s", member.format.device, e) ++ log.warning("Failed to obtain current size for device %s: %s", fmt.device, e) + + def _add(self, member): + blockdev.lvm.vgextend(self.name, member.path) diff --git a/python-blivet.spec b/python-blivet.spec index 04f78e6..0219a4b 100644 --- a/python-blivet.spec +++ b/python-blivet.spec @@ -23,7 +23,7 @@ Version: 3.6.0 #%%global prerelease .b2 # prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2 -Release: 24%{?prerelease}%{?dist} +Release: 25%{?prerelease}%{?dist} Epoch: 1 License: LGPLv2+ %global realname blivet @@ -63,6 +63,8 @@ Patch29: 0030-mod_pass_in_stratis_test.patch Patch30: 0031-Fix_running_tests_in_FIPS_mode.patch Patch31: 0032-Set-persistent-allow-discards-flag-for-new-LUKS-devices.patch Patch32: 0033-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch +Patch33: 0034-Include-additional-information-in-PartitioningError.patch +Patch34: 0035-LVMPV-format-size-fix.patch # Versions of required components (done so we make sure the buildrequires # match the requires versions of things). @@ -226,6 +228,12 @@ configuration. %endif %changelog +* Mon Apr 14 2025 Vojtech Trefny - 3.6.0-25 +- Get the actual PV format size for LVMPV format + Resolves: RHEL-74078 +- Include additional information in PartitioningError + Resolves: RHEL-8005 + * Thu Mar 27 2025 Vojtech Trefny - 3.6.0-24 - Do not remove PVs from devices file if disabled or doesn't exist Resolves: RHEL-84662