import CS python-blivet-3.6.0-28.el9

This commit is contained in:
eabdullin 2025-09-15 12:34:46 +00:00
parent ad654463db
commit d01e701fb7
7 changed files with 1477 additions and 1 deletions

View File

@ -0,0 +1,83 @@
From 3e3b8d415ca50c4feaaf8d3688f0ebda2522d866 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 20 Jan 2025 13:02:50 +0100
Subject: [PATCH] Do not remove PVs from devices file if disabled or doesn't
exists
When the file doesn't exists the 'lvmdevices --deldev' call will
fail but it will still create the devices file. This means we now
have an empty devices file and all subsequent LVM calls will fail.
Resolves: RHEL-84662
---
blivet/formats/lvmpv.py | 5 +++++
tests/unit_tests/formats_tests/lvmpv_test.py | 22 ++++++++++++++++++++
2 files changed, 27 insertions(+)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index 51fa4a3c8..f5d71dbd1 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -166,6 +166,11 @@ def lvmdevices_remove(self):
if not lvm.HAVE_LVMDEVICES:
raise PhysicalVolumeError("LVM devices file feature is not supported")
+ if not os.path.exists(lvm.LVM_DEVICES_FILE):
+ log.debug("Not removing %s from devices file: %s doesn't exist",
+ self.device, lvm.LVM_DEVICES_FILE)
+ return
+
try:
blockdev.lvm.devices_delete(self.device)
except blockdev.LVMError as e:
diff --git a/tests/unit_tests/formats_tests/lvmpv_test.py b/tests/unit_tests/formats_tests/lvmpv_test.py
index 6490c7d48..54a59026d 100644
--- a/tests/unit_tests/formats_tests/lvmpv_test.py
+++ b/tests/unit_tests/formats_tests/lvmpv_test.py
@@ -41,6 +41,11 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_not_called()
+ # LVM devices file not enabled/supported -> devices_delete should not be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_not_called()
+
with self.patches() as mock:
# LVM devices file enabled and devices file exists -> devices_add should be called
mock["lvm"].HAVE_LVMDEVICES = True
@@ -50,6 +55,11 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+ # LVM devices file enabled and devices file exists -> devices_delete should be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_called_with("/dev/test")
+
with self.patches() as mock:
# LVM devices file enabled and devices file doesn't exist
# and no existing VGs present -> devices_add should be called
@@ -61,6 +71,12 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+ # LVM devices file enabled but devices file doesn't exist
+ # -> devices_delete should not be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_not_called()
+
with self.patches() as mock:
# LVM devices file enabled and devices file doesn't exist
# and existing VGs present -> devices_add should not be called
@@ -71,3 +87,9 @@ def test_lvm_devices(self):
fmt._create()
mock["blockdev"].lvm.devices_add.assert_not_called()
+
+ # LVM devices file enabled but devices file doesn't exist
+ # -> devices_delete should not be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_not_called()

View File

@ -0,0 +1,85 @@
From 6d2e5c70fecc68e0d62255d4e2a65e9d264578dd Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 22 Jan 2025 13:16:43 +0100
Subject: [PATCH] Include additional information in PartitioningError
The generic 'Unable to allocate requested partition scheme' is not
very helpful, we should try to include additional information if
possible.
Resolves: RHEL-8005
---
blivet/partitioning.py | 25 ++++++++++++++++++++++---
1 file changed, 22 insertions(+), 3 deletions(-)
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
index ce77e4eb7..0a35c764d 100644
--- a/blivet/partitioning.py
+++ b/blivet/partitioning.py
@@ -34,7 +34,7 @@
from .flags import flags
from .devices import Device, PartitionDevice, device_path_to_name
from .size import Size
-from .i18n import _
+from .i18n import _, N_
from .util import stringize, unicodeize, compare
import logging
@@ -681,6 +681,11 @@ def resolve_disk_tags(disks, tags):
return [disk for disk in disks if any(tag in disk.tags for tag in tags)]
+class PartitioningErrors:
+ NO_PRIMARY = N_("no primary partition slots available")
+ NO_SLOTS = N_("no free partition slots")
+
+
def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
""" Allocate partitions based on requested features.
@@ -763,6 +768,7 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
part_type = None
growth = 0 # in sectors
# loop through disks
+ errors = {}
for _disk in req_disks:
try:
disklabel = disklabels[_disk.path]
@@ -798,6 +804,10 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
if new_part_type is None:
# can't allocate any more partitions on this disk
log.debug("no free partition slots on %s", _disk.name)
+ if PartitioningErrors.NO_SLOTS in errors.keys():
+ errors[PartitioningErrors.NO_SLOTS].append(_disk.name)
+ else:
+ errors[PartitioningErrors.NO_SLOTS] = [_disk.name]
continue
if _part.req_primary and new_part_type != parted.PARTITION_NORMAL:
@@ -808,7 +818,11 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
new_part_type = parted.PARTITION_NORMAL
else:
# we need a primary slot and none are free on this disk
- log.debug("no primary slots available on %s", _disk.name)
+ log.debug("no primary partition slots available on %s", _disk.name)
+ if PartitioningErrors.NO_PRIMARY in errors.keys():
+ errors[PartitioningErrors.NO_PRIMARY].append(_disk.name)
+ else:
+ errors[PartitioningErrors.NO_PRIMARY] = [_disk.name]
continue
elif _part.req_part_type is not None and \
new_part_type != _part.req_part_type:
@@ -968,7 +982,12 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
break
if free is None:
- raise PartitioningError(_("Unable to allocate requested partition scheme."))
+ if not errors:
+ msg = _("Unable to allocate requested partition scheme.")
+ else:
+ errors_by_disk = (", ".join(disks) + ": " + _(error) for error, disks in errors.items())
+ msg = _("Unable to allocate requested partition scheme on requested disks:\n%s") % "\n".join(errors_by_disk)
+ raise PartitioningError(msg)
_disk = use_disk
disklabel = _disk.format

View File

@ -0,0 +1,572 @@
From 6a54de2780aa3fd52b4a25dc8db7ab8c5b1b8d4d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 10:03:17 +0100
Subject: [PATCH 1/7] Use pvs info from static data to get PV size in PVSize
No need for a special code for this, we can reuse the existing
code from LVM static data.
---
blivet/tasks/pvtask.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/blivet/tasks/pvtask.py b/blivet/tasks/pvtask.py
index b5bd72e0d..e93a61bc7 100644
--- a/blivet/tasks/pvtask.py
+++ b/blivet/tasks/pvtask.py
@@ -27,6 +27,7 @@
from ..errors import PhysicalVolumeError
from ..size import Size, B
+from ..static_data import pvs_info
from . import availability
from . import task
@@ -55,13 +56,12 @@ def do_task(self): # pylint: disable=arguments-differ
:raises :class:`~.errors.PhysicalVolumeError`: if size cannot be obtained
"""
- try:
- pv_info = blockdev.lvm.pvinfo(self.pv.device)
- pv_size = pv_info.pv_size
- except blockdev.LVMError as e:
- raise PhysicalVolumeError(e)
+ pvs_info.drop_cache()
+ pv_info = pvs_info.cache.get(self.pv.device)
+ if pv_info is None:
+ raise PhysicalVolumeError("Failed to get PV info for %s" % self.pv.device)
- return Size(pv_size)
+ return Size(pv_info.pv_size)
class PVResize(task.BasicApplication, dfresize.DFResizeTask):
From 0b8239470762cc3b3732d2f40910be7e84102fa0 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 10:05:13 +0100
Subject: [PATCH 2/7] Get the actual PV format size for LVMPV format
---
blivet/formats/lvmpv.py | 2 ++
blivet/populator/helpers/lvm.py | 2 ++
tests/unit_tests/populator_test.py | 2 ++
3 files changed, 6 insertions(+)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index f5d71dbd1..769c96e1d 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -101,6 +101,8 @@ def __init__(self, **kwargs):
# when set to True, blivet will try to resize the PV to fill all available space
self._grow_to_fill = False
+ self._target_size = self._size
+
def __repr__(self):
s = DeviceFormat.__repr__(self)
s += (" vg_name = %(vg_name)s vg_uuid = %(vg_uuid)s"
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index 6ef2f4174..74641bcf8 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -112,6 +112,8 @@ def _get_kwargs(self):
log.warning("PV %s has no pe_start", name)
if pv_info.pv_free:
kwargs["free"] = Size(pv_info.pv_free)
+ if pv_info.pv_size:
+ kwargs["size"] = Size(pv_info.pv_size)
return kwargs
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
index 1ee29b57f..55b6be8d8 100644
--- a/tests/unit_tests/populator_test.py
+++ b/tests/unit_tests/populator_test.py
@@ -1064,6 +1064,7 @@ def test_run(self, *args):
pv_info.vg_uuid = sentinel.vg_uuid
pv_info.pe_start = 0
pv_info.pv_free = 0
+ pv_info.pv_size = "10g"
vg_device = Mock()
vg_device.id = 0
@@ -1095,6 +1096,7 @@ def test_run(self, *args):
pv_info.vg_extent_count = 2500
pv_info.vg_free_count = 0
pv_info.vg_pv_count = 1
+ pv_info.pv_size = "10g"
with patch("blivet.static_data.lvm_info.PVsInfo.cache", new_callable=PropertyMock) as mock_pvs_cache:
mock_pvs_cache.return_value = {device.path: pv_info}
From 14b9538a8fd9f5bfc7d744902517739b6fae7a22 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 13:35:38 +0100
Subject: [PATCH 3/7] Update PV format size after adding/removing the PV
to/from the VG
Unfortunately LVM substracts VG metadata from the reported PV size
so we need to make sure to update the size after the vgextend and
vgreduce operation.
---
blivet/devices/lvm.py | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 62974443e..85850d8e8 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -315,9 +315,21 @@ def _remove(self, member):
if lv.status and not status:
lv.teardown()
+ # update LVMPV format size --> PV format has different size when in VG
+ try:
+ member.format._size = member.format._target_size = member.format._size_info.do_task()
+ except errors.PhysicalVolumeError as e:
+ log.warning("Failed to obtain current size for device %s: %s", member.format.device, e)
+
def _add(self, member):
blockdev.lvm.vgextend(self.name, member.path)
+ # update LVMPV format size --> PV format has different size when in VG
+ try:
+ member.format._size = member.format._target_size = member.format._size_info.do_task()
+ except errors.PhysicalVolumeError as e:
+ log.warning("Failed to obtain current size for device %s: %s", member.path, e)
+
def _add_log_vol(self, lv):
""" Add an LV to this VG. """
if lv in self._lvs:
From d6b0c283eb3236f3578dc28d40182f48d05a5c24 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 14:22:07 +0100
Subject: [PATCH 4/7] Use LVMPV format size when calculating VG size and free
space
For existing PVs we need to check the format size instead of
simply expecting the format is fully resized to match the size of
the underlying block device.
---
blivet/devices/lvm.py | 63 ++++++++++++++++++++++++++-----------------
1 file changed, 39 insertions(+), 24 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 85850d8e8..e3d08dbce 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -500,40 +500,55 @@ def reserved_percent(self, value):
self._reserved_percent = value
- def _get_pv_usable_space(self, pv):
+ def _get_pv_metadata_space(self, pv):
+ """ Returns how much space will be used by VG metadata in given PV
+ This depends on type of the PV, PE size and PE start.
+ """
if isinstance(pv, MDRaidArrayDevice):
- return self.align(pv.size - 2 * pv.format.pe_start)
+ return 2 * pv.format.pe_start
+ else:
+ return pv.format.pe_start
+
+ def _get_pv_usable_space(self, pv):
+ """ Return how much space can be actually used on given PV.
+ This takes into account:
+ - VG metadata that is/will be stored in this PV
+ - the actual PV format size (which might differ from
+ the underlying block device size)
+ """
+
+ if pv.format.exists and pv.format.size and self.exists:
+ # PV format exists, we got its size and VG also exists
+ # -> all metadata is already accounted in the PV format size
+ return pv.format.size
+ elif pv.format.exists and pv.format.size and not self.exists:
+ # PV format exists, we got its size, but the VG doesn't exist
+ # -> metadata size is not accounted in the PV format size
+ return self.align(pv.format.size - self._get_pv_metadata_space(pv))
else:
- return self.align(pv.size - pv.format.pe_start)
+ # something else -> either the PV format is not yet created or
+ # we for some reason failed to get size of the format, either way
+ # lets use the underlying block device size and calculate the
+ # metadata size ourselves
+ return self.align(pv.size - self._get_pv_metadata_space(pv))
@property
def lvm_metadata_space(self):
- """ The amount of the space LVM metadata cost us in this VG's PVs """
- # NOTE: we either specify data alignment in a PV or the default is used
- # which is both handled by pv.format.pe_start, but LVM takes into
- # account also the underlying block device which means that e.g.
- # for an MD RAID device, it tries to align everything also to chunk
- # size and alignment offset of such device which may result in up
- # to a twice as big non-data area
- # TODO: move this to either LVMPhysicalVolume's pe_start property once
- # formats know about their devices or to a new LVMPhysicalVolumeDevice
- # class once it exists
- diff = Size(0)
- for pv in self.pvs:
- diff += pv.size - self._get_pv_usable_space(pv)
-
- return diff
+ """ The amount of the space LVM metadata cost us in this VG's PVs
+ Note: we either specify data alignment in a PV or the default is used
+ which is both handled by pv.format.pe_start, but LVM takes into
+ account also the underlying block device which means that e.g.
+ for an MD RAID device, it tries to align everything also to chunk
+ size and alignment offset of such device which may result in up
+ to a twice as big non-data area
+ """
+ return sum(self._get_pv_metadata_space(pv) for pv in self.pvs)
@property
def size(self):
""" The size of this VG """
# TODO: just ask lvm if isModified returns False
-
- # sum up the sizes of the PVs, subtract the unusable (meta data) space
- size = sum(pv.size for pv in self.pvs)
- size -= self.lvm_metadata_space
-
- return size
+ return sum(self._get_pv_usable_space(pv) for pv in self.pvs)
@property
def extents(self):
From 4d033869de8c22f627cc23e70023e82d9c6e90ed Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 14:28:56 +0100
Subject: [PATCH 5/7] Add more tests for PV and VG size and free space
---
tests/storage_tests/devices_test/lvm_test.py | 104 ++++++++++++++++++-
1 file changed, 103 insertions(+), 1 deletion(-)
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index 97ef1c4b9..988201839 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -22,6 +22,18 @@ def setUp(self):
self.assertIsNone(disk.format.type)
self.assertFalse(disk.children)
+ def _get_pv_size(self, pv):
+ out = subprocess.check_output(["pvs", "-o", "pv_size", "--noheadings", "--nosuffix", "--units=b", pv])
+ return blivet.size.Size(out.decode().strip())
+
+ def _get_vg_size(self, vg):
+ out = subprocess.check_output(["vgs", "-o", "vg_size", "--noheadings", "--nosuffix", "--units=b", vg])
+ return blivet.size.Size(out.decode().strip())
+
+ def _get_vg_free(self, vg):
+ out = subprocess.check_output(["vgs", "-o", "vg_free", "--noheadings", "--nosuffix", "--units=b", vg])
+ return blivet.size.Size(out.decode().strip())
+
def _clean_up(self):
self.storage.reset()
for disk in self.storage.disks:
@@ -63,6 +75,8 @@ def test_lvm_basic(self):
self.assertIsInstance(pv, blivet.devices.PartitionDevice)
self.assertIsNotNone(pv.format)
self.assertEqual(pv.format.type, "lvmpv")
+ pv_size = self._get_pv_size(pv.path)
+ self.assertEqual(pv.format.size, pv_size)
vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
self.assertIsNotNone(vg)
@@ -72,6 +86,10 @@ def test_lvm_basic(self):
self.assertEqual(pv.format.vg_name, vg.name)
self.assertEqual(len(vg.parents), 1)
self.assertEqual(vg.parents[0], pv)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
lv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestLV")
self.assertIsNotNone(lv)
@@ -112,6 +130,13 @@ def test_lvm_thin(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
pool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestPool")
self.assertIsNotNone(pool)
self.assertTrue(pool.is_thin_pool)
@@ -158,6 +183,14 @@ def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space + vg.reserved_space)
+
raidlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestRAIDLV")
self.assertIsNotNone(raidlv)
self.assertTrue(raidlv.is_raid_lv)
@@ -214,6 +247,13 @@ def test_lvm_cache(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV")
self.assertIsNotNone(cachedlv)
self.assertTrue(cachedlv.cached)
@@ -253,6 +293,13 @@ def test_lvm_cache_attach(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV")
self.assertIsNotNone(cachedlv)
cachepool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestFastLV")
@@ -308,6 +355,13 @@ def test_lvm_cache_create_and_attach(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV")
self.assertIsNotNone(cachedlv)
@@ -323,6 +377,13 @@ def test_lvm_cache_create_and_attach(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV")
self.assertIsNotNone(cachedlv)
self.assertTrue(cachedlv.cached)
@@ -352,6 +413,13 @@ def test_lvm_pvs_add_remove(self):
self.storage.do_it()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
# create a second PV
disk2 = self.storage.devicetree.get_device_by_path(self.vdevs[1])
self.assertIsNotNone(disk2)
@@ -366,6 +434,17 @@ def test_lvm_pvs_add_remove(self):
self.storage.do_it()
self.storage.reset()
+ pv1 = self.storage.devicetree.get_device_by_name(pv1.name)
+ pv1_size = self._get_pv_size(pv1.path)
+ self.assertEqual(pv1.format.size, pv1_size)
+
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
# add the PV to the existing VG
vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
@@ -374,6 +453,17 @@ def test_lvm_pvs_add_remove(self):
self.storage.devicetree.actions.add(ac)
self.storage.do_it()
+ pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
+ pv2_size = self._get_pv_size(pv2.path)
+ self.assertEqual(pv2.format.size, pv2_size)
+
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
self.assertEqual(pv2.format.vg_name, vg.name)
self.storage.reset()
@@ -387,7 +477,19 @@ def test_lvm_pvs_add_remove(self):
self.storage.devicetree.actions.add(ac)
self.storage.do_it()
- self.assertIsNone(pv1.format.vg_name)
+ pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
+ pv2_size = self._get_pv_size(pv2.path)
+ self.assertEqual(pv2.format.size, pv2_size)
+
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
+ self.assertIsNone(pv1.format.type)
+
self.storage.reset()
self.storage.reset()
From 4dfa8d699ed1216c18d0c7effa33580a3aa56606 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 15:16:29 +0100
Subject: [PATCH 6/7] Add a separate test case for LVMPV smaller than the block
device
---
tests/storage_tests/devices_test/lvm_test.py | 55 ++++++++++++++++++++
1 file changed, 55 insertions(+)
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index 988201839..a1064c9c4 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -475,6 +475,11 @@ def test_lvm_pvs_add_remove(self):
pv1 = self.storage.devicetree.get_device_by_name(pv1.name)
ac = blivet.deviceaction.ActionRemoveMember(vg, pv1)
self.storage.devicetree.actions.add(ac)
+
+ # schedule also removing the lvmpv format from the PV
+ ac = blivet.deviceaction.ActionDestroyFormat(pv1)
+ self.storage.devicetree.actions.add(ac)
+
self.storage.do_it()
pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
@@ -497,3 +502,53 @@ def test_lvm_pvs_add_remove(self):
self.assertIsNotNone(vg)
self.assertEqual(len(vg.pvs), 1)
self.assertEqual(vg.pvs[0].name, pv2.name)
+
+ def test_lvm_pv_size(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.storage.initialize_disk(disk)
+
+ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv",
+ parents=[disk])
+ self.storage.create_device(pv)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ pv = self.storage.devicetree.get_device_by_name(pv.name)
+ self.assertIsNotNone(pv)
+
+ pv.format.update_size_info()
+ self.assertTrue(pv.format.resizable)
+
+ ac = blivet.deviceaction.ActionResizeFormat(pv, blivet.size.Size("50 MiB"))
+ self.storage.devicetree.actions.add(ac)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ pv = self.storage.devicetree.get_device_by_name(pv.name)
+ self.assertIsNotNone(pv)
+ self.assertEqual(pv.format.size, blivet.size.Size("50 MiB"))
+ pv_size = self._get_pv_size(pv.path)
+ self.assertEqual(pv_size, pv.format.size)
+
+ vg = self.storage.new_vg(name="blivetTestVG", parents=[pv])
+ self.storage.create_device(vg)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ pv = self.storage.devicetree.get_device_by_name(pv.name)
+ self.assertIsNotNone(pv)
+ pv_size = self._get_pv_size(pv.path)
+ self.assertEqual(pv_size, pv.format.size)
+
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
From 6cfa9d0df6faa79b8ab471ba34aa0b3d6f0dc338 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 14 Apr 2025 14:54:00 +0200
Subject: [PATCH 7/7] Fix checking PV free space when removing it from a VG
---
blivet/devices/lvm.py | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index e3d08dbce..a03d57f97 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -305,9 +305,15 @@ def _remove(self, member):
if lv.exists:
lv.setup()
+ # if format was already scheduled for removal, use original_format
+ if member.format != "lvmpv":
+ fmt = member.original_format
+ else:
+ fmt = member.format
+
# do not run pvmove on empty PVs
- member.format.update_size_info()
- if member.format.free < member.format.current_size:
+ fmt.update_size_info()
+ if fmt.free < fmt.current_size:
blockdev.lvm.pvmove(member.path)
blockdev.lvm.vgreduce(self.name, member.path)
@@ -317,9 +323,9 @@ def _remove(self, member):
# update LVMPV format size --> PV format has different size when in VG
try:
- member.format._size = member.format._target_size = member.format._size_info.do_task()
+ fmt._size = fmt._target_size = fmt._size_info.do_task()
except errors.PhysicalVolumeError as e:
- log.warning("Failed to obtain current size for device %s: %s", member.format.device, e)
+ log.warning("Failed to obtain current size for device %s: %s", fmt.device, e)
def _add(self, member):
blockdev.lvm.vgextend(self.name, member.path)

View File

@ -0,0 +1,258 @@
From 68db0569b3508bbedf33d9ee3b69e8fc6a309b65 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 16 May 2025 17:15:17 +0200
Subject: [PATCH 1/4] Allow ActionDestroyFormat to be marked as optional
When we are also planning to remove the device, failing to remove
the format is not critical so we can ignore it in these cases.
Resolves: RHEL-8008
Resolves: RHEL-8012
---
blivet/deviceaction.py | 37 +++++++++++++++++++++++--------------
1 file changed, 23 insertions(+), 14 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index fc1ca4b65..a6fc211ea 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -728,12 +728,13 @@ class ActionDestroyFormat(DeviceAction):
obj = ACTION_OBJECT_FORMAT
type_desc_str = N_("destroy format")
- def __init__(self, device):
+ def __init__(self, device, optional=False):
if device.format_immutable:
raise ValueError("this device's formatting cannot be modified")
DeviceAction.__init__(self, device)
self.orig_format = self.device.format
+ self.optional = optional
if not device.format.destroyable:
raise ValueError("resource to destroy this format type %s is unavailable" % device.format.type)
@@ -752,21 +753,29 @@ def execute(self, callbacks=None):
""" wipe the filesystem signature from the device """
# remove any flag if set
super(ActionDestroyFormat, self).execute(callbacks=callbacks)
- status = self.device.status
- self.device.setup(orig=True)
- if hasattr(self.device, 'set_rw'):
- self.device.set_rw()
- self.format.destroy()
- udev.settle()
- if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported:
- if self.format.parted_flag:
- self.device.unset_flag(self.format.parted_flag)
- self.device.disk.original_format.commit_to_disk()
- udev.settle()
+ try:
+ status = self.device.status
+ self.device.setup(orig=True)
+ if hasattr(self.device, 'set_rw'):
+ self.device.set_rw()
- if not status:
- self.device.teardown()
+ self.format.destroy()
+ udev.settle()
+ if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported:
+ if self.format.parted_flag:
+ self.device.unset_flag(self.format.parted_flag)
+ self.device.disk.original_format.commit_to_disk()
+ udev.settle()
+
+ if not status:
+ self.device.teardown()
+ except Exception as e: # pylint: disable=broad-except
+ if self.optional:
+ log.error("Ignoring error when executing optional action: Failed to destroy format on %s: %s.",
+ self.device.name, str(e))
+ else:
+ raise
def cancel(self):
if not self._applied:
From fca71515094840ab1ca8821641284cfb0b687d82 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 16 May 2025 17:28:40 +0200
Subject: [PATCH 2/4] Make ActionDestroyFormat optional when device is also
removed
In both destroy_device and recursive_remove we try to remove both
the device and its format. In these cases the format destroy can
be considered to be optional and we don't need to fail just
because we failed to remove the format.
Resolves: RHEL-8008
Resolves: RHEL-8012
---
blivet/blivet.py | 2 +-
blivet/devicetree.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index dc066b036..2e86f5bf6 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -897,7 +897,7 @@ def destroy_device(self, device):
if device.format.exists and device.format.type and \
not device.format_immutable:
# schedule destruction of any formatting while we're at it
- self.devicetree.actions.add(ActionDestroyFormat(device))
+ self.devicetree.actions.add(ActionDestroyFormat(device, optional=True))
action = ActionDestroyDevice(device)
self.devicetree.actions.add(action)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index c6c1b4400..f94e3ca30 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -264,7 +264,7 @@ def recursive_remove(self, device, actions=True, remove_device=True, modparent=T
if actions:
if leaf.format.exists and not leaf.protected and \
not leaf.format_immutable:
- self.actions.add(ActionDestroyFormat(leaf))
+ self.actions.add(ActionDestroyFormat(leaf, optional=True))
self.actions.add(ActionDestroyDevice(leaf))
else:
@@ -276,7 +276,7 @@ def recursive_remove(self, device, actions=True, remove_device=True, modparent=T
if not device.format_immutable:
if actions:
- self.actions.add(ActionDestroyFormat(device))
+ self.actions.add(ActionDestroyFormat(device, optional=True))
else:
device.format = None
From 50efc63fa3053f863d03439a507b3e0a6d7b8168 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 19 May 2025 14:24:06 +0200
Subject: [PATCH 3/4] tests: Add a simple test case for optional format destroy
action
Related: RHEL-8008
Related: RHEL-8012
---
tests/unit_tests/devices_test/lvm_test.py | 29 +++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
index e645309fc..34c2084a8 100644
--- a/tests/unit_tests/devices_test/lvm_test.py
+++ b/tests/unit_tests/devices_test/lvm_test.py
@@ -1160,3 +1160,32 @@ def test_vdo_compression_deduplication_change(self):
with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
b.do_it()
lvm.vdo_enable_deduplication.assert_called_with(vg.name, vdopool.lvname)
+
+
+@patch("blivet.devices.lvm.LVMLogicalVolumeDevice._external_dependencies", new=[])
+@patch("blivet.devices.lvm.LVMLogicalVolumeBase._external_dependencies", new=[])
+@patch("blivet.devices.dm.DMDevice._external_dependencies", new=[])
+class BlivetLVMOptionalDestroyTest(unittest.TestCase):
+
+ def test_optional_format_destroy(self, *args): # pylint: disable=unused-argument
+ b = blivet.Blivet()
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("10 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], exists=True, size=Size("5 GiB"),
+ fmt=blivet.formats.get_format("xfs", exists=True))
+
+ for dev in (pv, vg, lv):
+ b.devicetree._add_device(dev)
+
+ b.destroy_device(lv)
+ fmt_ac = b.devicetree.actions.find(action_type="destroy", object_type="format")
+ self.assertTrue(fmt_ac)
+ self.assertTrue(fmt_ac[0].optional)
+
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ lvm.lvactivate.side_effect = RuntimeError()
+ try:
+ b.do_it()
+ except RuntimeError:
+ self.fail("Optional format destroy action is not optional")
From ea913c5fa8e60cd5c2fdd8196be51c067a2a73d8 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 20 May 2025 13:02:00 +0200
Subject: [PATCH 4/4] tests: Add test case for removing broken thin pool
Related: RHEL-8008
Related: RHEL-8012
---
tests/storage_tests/devices_test/lvm_test.py | 52 ++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index a1064c9c4..10e7354ff 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -1,5 +1,7 @@
import os
import subprocess
+import tempfile
+from unittest.mock import patch
from ..storagetestcase import StorageTestCase
@@ -552,3 +554,53 @@ def test_lvm_pv_size(self):
self.assertEqual(vg_size, vg.size)
vg_free = self._get_vg_free(vg.name)
self.assertEqual(vg_free, vg.free_space)
+
+ def _break_thin_pool(self, vgname):
+ os.system("vgchange -an %s >/dev/null 2>&1" % vgname)
+
+ # changing transaction_id for the pool prevents it from being activated
+ with tempfile.NamedTemporaryFile(prefix="blivet_test") as temp:
+ os.system("vgcfgbackup -f %s %s >/dev/null 2>&1" % (temp.name, vgname))
+ os.system("sed -i 's/transaction_id =.*/transaction_id = 123456/' %s >/dev/null 2>&1" % temp.name)
+ os.system("vgcfgrestore -f %s %s --force >/dev/null 2>&1" % (temp.name, vgname))
+
+ def test_lvm_broken_thin(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.initialize_disk(disk)
+
+ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv",
+ parents=[disk])
+ self.storage.create_device(pv)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ vg = self.storage.new_vg(name="blivetTestVG", parents=[pv])
+ self.storage.create_device(vg)
+
+ pool = self.storage.new_lv(thin_pool=True, size=blivet.size.Size("50 MiB"),
+ parents=[vg], name="blivetTestPool")
+ self.storage.create_device(pool)
+
+ self.storage.do_it()
+
+ # intentionally break the thin pool created above
+ self._break_thin_pool("blivetTestVG")
+
+ self.storage.reset()
+
+ pool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestPool")
+ self.assertIsNotNone(pool)
+
+ # check that the pool cannot be activated
+ try:
+ pool.setup()
+ except Exception: # pylint: disable=broad-except
+ pass
+ else:
+ self.fail("Failed to break thinpool for tests")
+
+ # verify that the pool can be destroyed even if it cannot be activated
+ self.storage.recursive_remove(pool)
+ self.storage.do_it()

View File

@ -0,0 +1,384 @@
From c07938143a9906bc0e06e78c818227b4c06f64ad Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 27 May 2025 15:21:23 +0200
Subject: [PATCH 1/3] Add some basic partitioning storage tests
This supplements the existing tests which use sparse files. These
new test cases actually run do_it() and check the result after
reset. More test cases will follow.
Related: RHEL-76917
---
.../devices_test/partition_test.py | 148 ++++++++++++++++++
1 file changed, 148 insertions(+)
diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py
index 679fded6e..6ad8a8f1a 100644
--- a/tests/storage_tests/devices_test/partition_test.py
+++ b/tests/storage_tests/devices_test/partition_test.py
@@ -11,12 +11,15 @@
except ImportError:
from mock import patch
+import blivet
from blivet.devices import DiskFile
from blivet.devices import PartitionDevice
from blivet.formats import get_format
from blivet.size import Size
from blivet.util import sparsetmpfile
+from ..storagetestcase import StorageTestCase
+
Weighted = namedtuple("Weighted", ["fstype", "mountpoint", "true_funcs", "weight"])
@@ -218,3 +221,148 @@ def test_extended_min_size(self):
end_free = (extended_end - logical_end) * sector_size
self.assertEqual(extended_device.min_size,
extended_device.align_target_size(extended_device.current_size - end_free))
+
+
+class PartitionTestCase(StorageTestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ disks = [os.path.basename(vdev) for vdev in self.vdevs]
+ self.storage = blivet.Blivet()
+ self.storage.exclusive_disks = disks
+ self.storage.reset()
+
+ # make sure only the targetcli disks are in the devicetree
+ for disk in self.storage.disks:
+ self.assertTrue(disk.path in self.vdevs)
+ self.assertIsNone(disk.format.type)
+ self.assertFalse(disk.children)
+
+ def _clean_up(self):
+ self.storage.reset()
+ for disk in self.storage.disks:
+ if disk.path not in self.vdevs:
+ raise RuntimeError("Disk %s found in devicetree but not in disks created for tests" % disk.name)
+ self.storage.recursive_remove(disk)
+
+ self.storage.do_it()
+
+ def test_msdos_basic(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="msdos"))
+
+ for i in range(4):
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],
+ primary=True)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.assertEqual(disk.format.type, "disklabel")
+ self.assertEqual(disk.format.label_type, "msdos")
+ self.assertIsNotNone(disk.format.parted_disk)
+ self.assertIsNotNone(disk.format.parted_device)
+ self.assertEqual(len(disk.format.partitions), 4)
+ self.assertEqual(len(disk.format.primary_partitions), 4)
+ self.assertEqual(len(disk.children), 4)
+
+ for i in range(4):
+ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1))
+ self.assertIsNotNone(part)
+ self.assertEqual(part.type, "partition")
+ self.assertEqual(part.disk, disk)
+ self.assertEqual(part.size, Size("100 MiB"))
+ self.assertTrue(part.is_primary)
+ self.assertFalse(part.is_extended)
+ self.assertFalse(part.is_logical)
+ self.assertIsNotNone(part.parted_partition)
+
+ def test_msdos_extended(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="msdos"))
+
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part)
+
+ part = self.storage.new_partition(size=Size("1 GiB"), parents=[disk],
+ part_type=parted.PARTITION_EXTENDED)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ for i in range(4):
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],
+ part_type=parted.PARTITION_LOGICAL)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.assertEqual(disk.format.type, "disklabel")
+ self.assertEqual(disk.format.label_type, "msdos")
+ self.assertIsNotNone(disk.format.parted_disk)
+ self.assertIsNotNone(disk.format.parted_device)
+ self.assertEqual(len(disk.format.partitions), 6)
+ self.assertEqual(len(disk.format.primary_partitions), 1)
+ self.assertEqual(len(disk.children), 6)
+
+ for i in range(4, 8):
+ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1))
+ self.assertIsNotNone(part)
+ self.assertEqual(part.type, "partition")
+ self.assertEqual(part.disk, disk)
+ self.assertEqual(part.size, Size("100 MiB"))
+ self.assertFalse(part.is_primary)
+ self.assertFalse(part.is_extended)
+ self.assertTrue(part.is_logical)
+ self.assertIsNotNone(part.parted_partition)
+
+ def test_gpt_basic(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt"))
+
+ for i in range(4):
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.assertEqual(disk.format.type, "disklabel")
+ self.assertEqual(disk.format.label_type, "gpt")
+ self.assertIsNotNone(disk.format.parted_disk)
+ self.assertIsNotNone(disk.format.parted_device)
+ self.assertEqual(len(disk.format.partitions), 4)
+ self.assertEqual(len(disk.format.primary_partitions), 4)
+ self.assertEqual(len(disk.children), 4)
+
+ for i in range(4):
+ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1))
+ self.assertIsNotNone(part)
+ self.assertEqual(part.type, "partition")
+ self.assertEqual(part.disk, disk)
+ self.assertEqual(part.size, Size("100 MiB"))
+ self.assertTrue(part.is_primary)
+ self.assertFalse(part.is_extended)
+ self.assertFalse(part.is_logical)
+ self.assertIsNotNone(part.parted_partition)
From 1486d2d47d9b757694a3da88ccc13d29d8bb12fd Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 27 May 2025 14:10:49 +0200
Subject: [PATCH 2/3] Wipe end partition before creating it as well as the
start
We are currently overwritting start of the newly created partition
with zeroes to remove any filesystem metadata that might occupy
the space. This extends this functionality to end of the partition
to remove 1.0 MD metadata that might be there.
Resolves: RHEL-76917
---
blivet/devices/partition.py | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 6ae4b8d36..1dac75a5a 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -599,7 +599,7 @@ def _wipe(self):
""" Wipe the partition metadata.
Assumes that the partition metadata is located at the start
- of the partition and occupies no more than 1 MiB.
+ and end of the partition and occupies no more than 1 MiB.
Erases in block increments. Erases the smallest number of blocks
such that at least 1 MiB is erased or the whole partition is
@@ -632,6 +632,24 @@ def _wipe(self):
# things to settle.
udev.settle()
+ if count >= part_len:
+ # very small partition, we wiped it completely already
+ return
+
+ # now do the end of the partition as well (RAID 1.0 metadata)
+ end = self.parted_partition.geometry.end
+ cmd = ["dd", "if=/dev/zero", "of=%s" % device, "bs=%d" % bs,
+ "seek=%d" % (end - count), "count=%d" % count]
+ try:
+ util.run_program(cmd)
+ except OSError as e:
+ log.error(str(e))
+ finally:
+ # If a udev device is created with the watch option, then
+ # a change uevent is synthesized and we need to wait for
+ # things to settle.
+ udev.settle()
+
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
From f0f78b801fb52425c13d0384f6867bf55839d98f Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 28 May 2025 11:01:14 +0200
Subject: [PATCH 3/3] tests: Add tests for wiping stale metadata from new
partitions
Related: RHEL-76917
---
.../devices_test/partition_test.py | 119 ++++++++++++++++++
1 file changed, 119 insertions(+)
diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py
index 6ad8a8f1a..f4be3aa4c 100644
--- a/tests/storage_tests/devices_test/partition_test.py
+++ b/tests/storage_tests/devices_test/partition_test.py
@@ -4,6 +4,7 @@
import os
import six
import unittest
+import blivet.deviceaction
import parted
try:
@@ -366,3 +367,121 @@ def test_gpt_basic(self):
self.assertFalse(part.is_extended)
self.assertFalse(part.is_logical)
self.assertIsNotNone(part.parted_partition)
+
+ def _partition_wipe_check(self):
+ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1")
+ self.assertIsNotNone(part1)
+ self.assertIsNone(part1.format.type)
+
+ out = blivet.util.capture_output(["blkid", "-p", "-sTYPE", "-ovalue", self.vdevs[0] + "1"])
+ self.assertEqual(out.strip(), "")
+
+ part2 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "2")
+ self.assertIsNotNone(part2)
+ self.assertEqual(part2.format.type, "ext4")
+
+ try:
+ part2.format.do_check()
+ except blivet.errors.FSError as e:
+ self.fail("Partition wipe corrupted filesystem on an adjacent partition: %s" % str(e))
+
+ out = blivet.util.capture_output(["blkid", "-p", "-sTYPE", "-ovalue", self.vdevs[0] + "2"])
+ self.assertEqual(out.strip(), "ext4")
+
+ def test_partition_wipe_ext(self):
+ """ Check that any stray filesystem metadata are removed before creating a partition """
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt"))
+
+ # create two partitions with ext4
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],
+ fmt=blivet.formats.get_format("ext4"))
+ self.storage.create_device(part1)
+
+ part2 = self.storage.new_partition(size=Size("1 MiB"), parents=[disk], grow=True,
+ fmt=blivet.formats.get_format("ext4"))
+ self.storage.create_device(part2)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # remove the first partition (only the partition without removing the format)
+ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1")
+ ac = blivet.deviceaction.ActionDestroyDevice(part1)
+ self.storage.devicetree.actions.add(ac)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # create the first partition again (without ext4)
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part1)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ # XXX PartitionDevice._post_create calls wipefs on the partition, we want to check that
+ # the _pre_create dd wipe works so we need to skip the _post_create wipefs call
+ part1._post_create = lambda: None
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # make sure the ext4 signature is not present on part1 (and untouched on part2)
+ self._partition_wipe_check()
+
+ def test_partition_wipe_mdraid(self):
+ """ Check that any stray RAID metadata are removed before creating a partition """
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt"))
+
+ # create two partitions, one empty, one with ext4
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part1)
+
+ part2 = self.storage.new_partition(size=Size("1 MiB"), parents=[disk], grow=True,
+ fmt=blivet.formats.get_format("ext4"))
+ self.storage.create_device(part2)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # create MD RAID with metadata 1.0 on the first partition
+ ret = blivet.util.run_program(["mdadm", "--create", "blivetMDTest", "--level=linear",
+ "--metadata=1.0", "--raid-devices=1", "--force", part1.path])
+ self.assertEqual(ret, 0, "Failed to create RAID array for partition wipe test")
+ ret = blivet.util.run_program(["mdadm", "--stop", "/dev/md/blivetMDTest"])
+ self.assertEqual(ret, 0, "Failed to create RAID array for partition wipe test")
+
+ # now remove the partition without removing the array first
+ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1")
+ ac = blivet.deviceaction.ActionDestroyDevice(part1)
+ self.storage.devicetree.actions.add(ac)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # create the first partition again (without format)
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part1)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ # XXX PartitionDevice._post_create calls wipefs on the partition, we want to check that
+ # the _pre_create dd wipe works so we need to skip the _post_create wipefs call
+ part1._post_create = lambda: None
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # make sure the mdmember signature is not present on part1 (and ext4 is untouched on part2)
+ self._partition_wipe_check()

View File

@ -0,0 +1,65 @@
From f70ee1ef08c20485f49b30fe1072a7ccafaaa2fe Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 1 Aug 2025 15:03:09 +0200
Subject: [PATCH] Add a pre-wipe fixup function for LVM logical volumes
LVs scheduled to be removed are always activated to remove the
format during installation. If there is a read-only LV with the
skip activation flag with MD metadata this means after activating
the LV to remove the format the MD array is auto-assembled by udev
preventing us from removing it. For this special case, we simply
stop the array before removing the format.
Resolves: RHEL-68368
---
blivet/deviceaction.py | 3 +++
blivet/devices/lvm.py | 19 +++++++++++++++++++
2 files changed, 22 insertions(+)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index a6fc211e..169c3a10 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -760,6 +760,9 @@ class ActionDestroyFormat(DeviceAction):
if hasattr(self.device, 'set_rw'):
self.device.set_rw()
+ if hasattr(self.device, 'pre_format_destroy'):
+ self.device.pre_format_destroy()
+
self.format.destroy()
udev.settle()
if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported:
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index a03d57f9..6ea35212 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -2695,6 +2695,25 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
else:
blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
+ def pre_format_destroy(self):
+ """ Fixup needed to run before wiping this device """
+ if self.ignore_skip_activation > 0:
+ # the LV was not activated during the initial scan so if there is an MD array on it
+ # it will now also get activated and we need to stop it to be able to remove the LV
+ try:
+ info = blockdev.md.examine(self.path)
+ except blockdev.MDRaidError:
+ pass
+ else:
+ # give udev a bit time to activate the array so we can deactivate it again
+ time.sleep(5)
+ log.info("MD metadata found on LV with skip activation, stopping the array %s",
+ info.device)
+ try:
+ blockdev.md.deactivate(info.device)
+ except blockdev.MDRaidError as err:
+ log.info("failed to deactivate %s: %s", info.device, str(err))
+
@type_specific
def _pre_create(self):
LVMLogicalVolumeBase._pre_create(self)
--
2.50.1

View File

@ -23,7 +23,7 @@ Version: 3.6.0
#%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 23%{?prerelease}%{?dist}
Release: 28%{?prerelease}%{?dist}
Epoch: 1
License: LGPLv2+
%global realname blivet
@ -62,6 +62,12 @@ Patch28: 0029-Align-sizes-up-for-growable-LVs.patch
Patch29: 0030-mod_pass_in_stratis_test.patch
Patch30: 0031-Fix_running_tests_in_FIPS_mode.patch
Patch31: 0032-Set-persistent-allow-discards-flag-for-new-LUKS-devices.patch
Patch32: 0033-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch
Patch33: 0034-Include-additional-information-in-PartitioningError.patch
Patch34: 0035-LVMPV-format-size-fix.patch
Patch35: 0036-Make-ActionDestroyFormat-optional.patch
Patch36: 0037-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch
Patch37: 0038-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch
# Versions of required components (done so we make sure the buildrequires
# match the requires versions of things).
@ -225,6 +231,29 @@ configuration.
%endif
%changelog
* Mon Aug 04 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-28
- Add a pre-wipe fixup function for LVM logical volumes
Resolves: RHEL-68368
* Fri May 30 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-27
- Wipe end partition before creating it as well as the start
Resolves: RHEL-76917
* Tue May 20 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-26
- Make ActionDestroyFormat optional when the device is also scheduled to be removed
Resolves: RHEL-8008
Resolves: RHEL-8012
* Mon Apr 14 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-25
- Get the actual PV format size for LVMPV format
Resolves: RHEL-74078
- Include additional information in PartitioningError
Resolves: RHEL-8005
* Thu Mar 27 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-24
- Do not remove PVs from devices file if disabled or doesn't exist
Resolves: RHEL-84662
* Tue Mar 11 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-23
- Set persistent allow-discards flag for newly created LUKS devices
Resolves: RHEL-82430