Compare commits

...

No commits in common. "c8" and "c9-beta" have entirely different histories.
c8 ... c9-beta

39 changed files with 4991 additions and 1172 deletions

2
.python-blivet.metadata Normal file
View File

@ -0,0 +1,2 @@
8393baa22cb433d1012e3923ad0bc232401116c6 SOURCES/blivet-3.6.0-tests.tar.gz
e9d95c1165703fed3da1f35a9199197bfff68f98 SOURCES/blivet-3.6.0.tar.gz

View File

@ -1,4 +1,4 @@
From 83ccc9f9f14845fcce7a5ba5fa21fbb97b1dbbb7 Mon Sep 17 00:00:00 2001
From 2759aaa9cbee38f80819bc136bb893184429380c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 11 Jul 2018 15:36:24 +0200
Subject: [PATCH] Force command line based libblockdev LVM plugin
@ -31,5 +31,5 @@ index dd8d0f54..62cc539a 100644
# do not check for dependencies during libblockdev initializtion, do runtime
# checks instead
--
2.38.1
2.37.3

View File

@ -1,4 +1,4 @@
From c098d4112635b3ea55d5bd7e1817edbd519735fc Mon Sep 17 00:00:00 2001
From f27bdff18e98548f4c094b8cce23ca2d6270e30d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 16 Jul 2018 14:26:11 +0200
Subject: [PATCH] Remove btrfs from requested libblockdev plugins
@ -24,5 +24,5 @@ index 62cc539a..bbc7ea3a 100644
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
# XXX force non-dbus LVM plugin
--
2.38.1
2.37.3

View File

@ -1,4 +1,4 @@
From f6f90805020d7c6ac46f17a13a00f319fc4351f6 Mon Sep 17 00:00:00 2001
From b9021fde8ccdd14cbe192b6597f7ca350b4bb585 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 26 May 2021 12:15:54 +0200
Subject: [PATCH] Revert "More consistent lvm errors (API break)"
@ -326,5 +326,5 @@ index 47613fdc..995c2da4 100644
pv_spec2 = LVPVSpec(pv2, Size("250 MiB"))
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
--
2.38.1
2.37.3

View File

@ -1,4 +1,4 @@
From f6490c469904f4808c63a170210e53acc908b018 Mon Sep 17 00:00:00 2001
From 4ad6f485a1e569feb5fd23ffcf78e08a7756e084 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 17 Aug 2022 14:24:21 +0200
Subject: [PATCH 1/2] Use MD populator instead of DM to handle DDF RAID format
@ -35,10 +35,10 @@ index 41ddef81..4aa3f3b0 100644
_formattable = True # can be formatted
_supported = True # is supported
--
2.38.1
2.37.3
From 5fadd850aae217d7692a6c8a50b2dcd5e61a63cd Mon Sep 17 00:00:00 2001
From abc7e018f43976cdab286d67207d515a74693d16 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 17 Aug 2022 14:24:58 +0200
Subject: [PATCH 2/2] Do not read DDF RAID UUID from udev
@ -82,5 +82,5 @@ index 3479e3f7..a7602d20 100644
def run(self):
--
2.38.1
2.37.3

View File

@ -1,899 +0,0 @@
From d8a8d96450bf0d3458671b9b7d23d972aa540396 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 26 May 2021 12:27:34 +0200
Subject: [PATCH] Revert "Terminology cleanups"
This reverts following commits:
- 3d46339fe9cf12e9082fcbe4dc5acc9f92617e8d
- 63c9c7165e5cdfa4a47dcf0ed9d717b71e7921f2
- 8956b9af8a785ae25e0e7153d2ef0702ce2f567c
---
blivet/devicefactory.py | 24 +++----
blivet/devices/dm.py | 9 ++-
blivet/devices/loop.py | 20 +++---
blivet/devices/luks.py | 26 ++++---
blivet/errors.py | 2 +-
blivet/partitioning.py | 22 +++++-
blivet/populator/helpers/dm.py | 4 +-
blivet/populator/helpers/luks.py | 4 +-
blivet/populator/helpers/lvm.py | 2 +-
blivet/populator/helpers/mdraid.py | 14 ++--
blivet/populator/helpers/multipath.py | 8 +--
blivet/populator/populator.py | 67 ++++++++++---------
blivet/threads.py | 3 +-
blivet/udev.py | 34 +++++-----
tests/unit_tests/devicefactory_test.py | 10 +--
.../devices_test/device_size_test.py | 6 +-
tests/unit_tests/populator_test.py | 34 +++++-----
tests/unit_tests/udev_test.py | 12 ++--
tests/vmtests/vmbackedtestcase.py | 2 +-
19 files changed, 167 insertions(+), 136 deletions(-)
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
index 6f460f6d..90082c28 100644
--- a/blivet/devicefactory.py
+++ b/blivet/devicefactory.py
@@ -859,12 +859,12 @@ class DeviceFactory(object):
parent_container.parents.remove(orig_device)
if self.encrypted and isinstance(self.device, LUKSDevice) and \
- self.raw_device.format.luks_version != self.luks_version:
- self.raw_device.format.luks_version = self.luks_version
+ self.device.slave.format.luks_version != self.luks_version:
+ self.device.slave.format.luks_version = self.luks_version
if self.encrypted and isinstance(self.device, LUKSDevice) and \
- self.raw_device.format.luks_sector_size != self.luks_sector_size:
- self.raw_device.format.luks_sector_size = self.luks_sector_size
+ self.device.slave.format.luks_sector_size != self.luks_sector_size:
+ self.device.slave.format.luks_sector_size = self.luks_sector_size
def _set_name(self):
if not self.device_name:
@@ -1201,11 +1201,11 @@ class PartitionSetFactory(PartitionFactory):
container.parents.remove(member)
self.storage.destroy_device(member)
members.remove(member)
- self.storage.format_device(member.raw_device,
+ self.storage.format_device(member.slave,
get_format(self.fstype))
- members.append(member.raw_device)
+ members.append(member.slave)
if container:
- container.parents.append(member.raw_device)
+ container.parents.append(member.slave)
continue
@@ -1227,10 +1227,10 @@ class PartitionSetFactory(PartitionFactory):
continue
- if member_encrypted and self.encrypted and self.luks_version != member.raw_device.format.luks_version:
- member.raw_device.format.luks_version = self.luks_version
- if member_encrypted and self.encrypted and self.luks_sector_size != member.raw_device.format.luks_sector_size:
- member.raw_device.format.luks_sector_size = self.luks_sector_size
+ if member_encrypted and self.encrypted and self.luks_version != member.slave.format.luks_version:
+ member.slave.format.luks_version = self.luks_version
+ if member_encrypted and self.encrypted and self.luks_sector_size != member.slave.format.luks_sector_size:
+ member.slave.format.luks_sector_size = self.luks_sector_size
##
# Prepare previously allocated member partitions for reallocation.
@@ -1290,7 +1290,7 @@ class PartitionSetFactory(PartitionFactory):
if isinstance(member, LUKSDevice):
self.storage.destroy_device(member)
- member = member.raw_device
+ member = member.slave
self.storage.destroy_device(member)
diff --git a/blivet/devices/dm.py b/blivet/devices/dm.py
index 2f936170..ae25e8e6 100644
--- a/blivet/devices/dm.py
+++ b/blivet/devices/dm.py
@@ -154,6 +154,11 @@ class DMDevice(StorageDevice):
log_method_call(self, self.name, status=self.status)
super(DMDevice, self)._set_name(value)
+ @property
+ def slave(self):
+ """ This device's backing device. """
+ return self.parents[0]
+
class DMLinearDevice(DMDevice):
_type = "dm-linear"
@@ -189,8 +194,8 @@ class DMLinearDevice(DMDevice):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
- parent_length = self.parents[0].current_size / LINUX_SECTOR_SIZE
- blockdev.dm.create_linear(self.name, self.parents[0].path, parent_length,
+ slave_length = self.slave.current_size / LINUX_SECTOR_SIZE
+ blockdev.dm.create_linear(self.name, self.slave.path, slave_length,
self.dm_uuid)
def _post_setup(self):
diff --git a/blivet/devices/loop.py b/blivet/devices/loop.py
index 0f4d7775..78f88d7d 100644
--- a/blivet/devices/loop.py
+++ b/blivet/devices/loop.py
@@ -73,7 +73,7 @@ class LoopDevice(StorageDevice):
def update_name(self):
""" Update this device's name. """
- if not self.parents[0].status:
+ if not self.slave.status:
# if the backing device is inactive, so are we
return self.name
@@ -81,7 +81,7 @@ class LoopDevice(StorageDevice):
# if our name is loopN we must already be active
return self.name
- name = blockdev.loop.get_loop_name(self.parents[0].path)
+ name = blockdev.loop.get_loop_name(self.slave.path)
if name.startswith("loop"):
self.name = name
@@ -89,24 +89,24 @@ class LoopDevice(StorageDevice):
@property
def status(self):
- return (self.parents[0].status and
+ return (self.slave.status and
self.name.startswith("loop") and
- blockdev.loop.get_loop_name(self.parents[0].path) == self.name)
+ blockdev.loop.get_loop_name(self.slave.path) == self.name)
@property
def size(self):
- return self.parents[0].size
+ return self.slave.size
def _pre_setup(self, orig=False):
- if not os.path.exists(self.parents[0].path):
- raise errors.DeviceError("specified file (%s) does not exist" % self.parents[0].path)
+ if not os.path.exists(self.slave.path):
+ raise errors.DeviceError("specified file (%s) does not exist" % self.slave.path)
return StorageDevice._pre_setup(self, orig=orig)
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
- blockdev.loop.setup(self.parents[0].path)
+ blockdev.loop.setup(self.slave.path)
def _post_setup(self):
StorageDevice._post_setup(self)
@@ -123,3 +123,7 @@ class LoopDevice(StorageDevice):
StorageDevice._post_teardown(self, recursive=recursive)
self.name = "tmploop%d" % self.id
self.sysfs_path = ''
+
+ @property
+ def slave(self):
+ return self.parents[0]
diff --git a/blivet/devices/luks.py b/blivet/devices/luks.py
index 2eb1f130..5ab840ea 100644
--- a/blivet/devices/luks.py
+++ b/blivet/devices/luks.py
@@ -66,13 +66,17 @@ class LUKSDevice(DMCryptDevice):
@property
def raw_device(self):
+ return self.slave
+
+ @property
+ def slave(self):
if self._has_integrity:
return self.parents[0].parents[0]
return self.parents[0]
def _get_size(self):
if not self.exists:
- size = self.raw_device.size - crypto.LUKS_METADATA_SIZE
+ size = self.slave.size - crypto.LUKS_METADATA_SIZE
elif self.resizable and self.target_size != Size(0):
size = self.target_size
else:
@@ -80,8 +84,8 @@ class LUKSDevice(DMCryptDevice):
return size
def _set_size(self, newsize):
- if not self.exists and not self.raw_device.exists:
- self.raw_device.size = newsize + crypto.LUKS_METADATA_SIZE
+ if not self.exists and not self.slave.exists:
+ self.slave.size = newsize + crypto.LUKS_METADATA_SIZE
# just run the StorageDevice._set_size to make sure we are in the format limits
super(LUKSDevice, self)._set_size(newsize - crypto.LUKS_METADATA_SIZE)
@@ -108,22 +112,22 @@ class LUKSDevice(DMCryptDevice):
raise ValueError("size is smaller than the minimum for this device")
# don't allow larger luks than size (or target size) of backing device
- if newsize > (self.raw_device.size - crypto.LUKS_METADATA_SIZE):
+ if newsize > (self.slave.size - crypto.LUKS_METADATA_SIZE):
log.error("requested size %s is larger than size of the backing device %s",
- newsize, self.raw_device.size)
+ newsize, self.slave.size)
raise ValueError("size is larger than the size of the backing device")
if self.align_target_size(newsize) != newsize:
raise ValueError("new size would violate alignment requirements")
def _get_target_size(self):
- return self.raw_device.format.target_size
+ return self.slave.format.target_size
@property
def max_size(self):
""" The maximum size this luks device can be. Maximum is based on the
maximum size of the backing device. """
- max_luks = self.raw_device.max_size - crypto.LUKS_METADATA_SIZE
+ max_luks = self.slave.max_size - crypto.LUKS_METADATA_SIZE
max_format = self.format.max_size
return min(max_luks, max_format) if max_format else max_luks
@@ -131,7 +135,7 @@ class LUKSDevice(DMCryptDevice):
def resizable(self):
""" Can this device be resized? """
return (self._resizable and self.exists and self.format.resizable and
- self.raw_device.resizable and not self._has_integrity)
+ self.slave.resizable and not self._has_integrity)
def resize(self):
# size of LUKSDevice depends on size of the LUKS format on backing
@@ -139,7 +143,7 @@ class LUKSDevice(DMCryptDevice):
log_method_call(self, self.name, status=self.status)
def _post_create(self):
- self.name = self.raw_device.format.map_name
+ self.name = self.slave.format.map_name
StorageDevice._post_create(self)
def _post_teardown(self, recursive=False):
@@ -162,10 +166,10 @@ class LUKSDevice(DMCryptDevice):
self.name = new_name
def dracut_setup_args(self):
- return set(["rd.luks.uuid=luks-%s" % self.raw_device.format.uuid])
+ return set(["rd.luks.uuid=luks-%s" % self.slave.format.uuid])
def populate_ksdata(self, data):
- self.raw_device.populate_ksdata(data)
+ self.slave.populate_ksdata(data)
data.encrypted = True
super(LUKSDevice, self).populate_ksdata(data)
diff --git a/blivet/errors.py b/blivet/errors.py
index b886ffec..30c9921a 100644
--- a/blivet/errors.py
+++ b/blivet/errors.py
@@ -201,7 +201,7 @@ class DeviceTreeError(StorageError):
pass
-class NoParentsError(DeviceTreeError):
+class NoSlavesError(DeviceTreeError):
pass
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
index ce77e4eb..2cd6554c 100644
--- a/blivet/partitioning.py
+++ b/blivet/partitioning.py
@@ -32,7 +32,7 @@ import _ped
from .errors import DeviceError, PartitioningError, AlignmentError
from .flags import flags
-from .devices import Device, PartitionDevice, device_path_to_name
+from .devices import Device, PartitionDevice, LUKSDevice, device_path_to_name
from .size import Size
from .i18n import _
from .util import stringize, unicodeize, compare
@@ -1635,7 +1635,15 @@ class TotalSizeSet(object):
:param size: the target combined size
:type size: :class:`~.size.Size`
"""
- self.devices = [d.raw_device for d in devices]
+ self.devices = []
+ for device in devices:
+ if isinstance(device, LUKSDevice):
+ partition = device.slave
+ else:
+ partition = device
+
+ self.devices.append(partition)
+
self.size = size
self.requests = []
@@ -1673,7 +1681,15 @@ class SameSizeSet(object):
:keyword max_size: the maximum size for growable devices
:type max_size: :class:`~.size.Size`
"""
- self.devices = [d.raw_device for d in devices]
+ self.devices = []
+ for device in devices:
+ if isinstance(device, LUKSDevice):
+ partition = device.slave
+ else:
+ partition = device
+
+ self.devices.append(partition)
+
self.size = size / len(devices)
self.grow = grow
self.max_size = max_size
diff --git a/blivet/populator/helpers/dm.py b/blivet/populator/helpers/dm.py
index 4721390e..0ad065e2 100644
--- a/blivet/populator/helpers/dm.py
+++ b/blivet/populator/helpers/dm.py
@@ -47,13 +47,13 @@ class DMDevicePopulator(DevicePopulator):
name = udev.device_get_name(self.data)
log_method_call(self, name=name)
sysfs_path = udev.device_get_sysfs_path(self.data)
- parent_devices = self._devicetree._add_parent_devices(self.data)
+ slave_devices = self._devicetree._add_slave_devices(self.data)
device = self._devicetree.get_device_by_name(name)
if device is None:
device = DMDevice(name, dm_uuid=self.data.get('DM_UUID'),
sysfs_path=sysfs_path, exists=True,
- parents=[parent_devices[0]])
+ parents=[slave_devices[0]])
device.protected = True
device.controllable = False
self._devicetree._add_device(device)
diff --git a/blivet/populator/helpers/luks.py b/blivet/populator/helpers/luks.py
index 3221122a..9b5023f8 100644
--- a/blivet/populator/helpers/luks.py
+++ b/blivet/populator/helpers/luks.py
@@ -43,7 +43,7 @@ class LUKSDevicePopulator(DevicePopulator):
return udev.device_is_dm_luks(data)
def run(self):
- parents = self._devicetree._add_parent_devices(self.data)
+ parents = self._devicetree._add_slave_devices(self.data)
device = LUKSDevice(udev.device_get_name(self.data),
sysfs_path=udev.device_get_sysfs_path(self.data),
parents=parents,
@@ -58,7 +58,7 @@ class IntegrityDevicePopulator(DevicePopulator):
return udev.device_is_dm_integrity(data)
def run(self):
- parents = self._devicetree._add_parent_devices(self.data)
+ parents = self._devicetree._add_slave_devices(self.data)
name = udev.device_get_name(self.data)
try:
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index 6ef2f417..b549e8d3 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -58,7 +58,7 @@ class LVMDevicePopulator(DevicePopulator):
log.warning("found non-vg device with name %s", vg_name)
device = None
- self._devicetree._add_parent_devices(self.data)
+ self._devicetree._add_slave_devices(self.data)
# LVM provides no means to resolve conflicts caused by duplicated VG
# names, so we're just being optimistic here. Woo!
diff --git a/blivet/populator/helpers/mdraid.py b/blivet/populator/helpers/mdraid.py
index a7602d20..9bec11ef 100644
--- a/blivet/populator/helpers/mdraid.py
+++ b/blivet/populator/helpers/mdraid.py
@@ -31,7 +31,7 @@ from ... import udev
from ...devicelibs import raid
from ...devices import MDRaidArrayDevice, MDContainerDevice
from ...devices import device_path_to_name
-from ...errors import DeviceError, NoParentsError
+from ...errors import DeviceError, NoSlavesError
from ...flags import flags
from ...storage_log import log_method_call
from .devicepopulator import DevicePopulator
@@ -52,12 +52,12 @@ class MDDevicePopulator(DevicePopulator):
log_method_call(self, name=name)
try:
- self._devicetree._add_parent_devices(self.data)
- except NoParentsError:
- log.error("no parents found for mdarray %s, skipping", name)
+ self._devicetree._add_slave_devices(self.data)
+ except NoSlavesError:
+ log.error("no slaves found for mdarray %s, skipping", name)
return None
- # try to get the device again now that we've got all the parents
+ # try to get the device again now that we've got all the slaves
device = self._devicetree.get_device_by_name(name, incomplete=flags.allow_imperfect_devices)
if device is None:
@@ -74,8 +74,8 @@ class MDDevicePopulator(DevicePopulator):
device.name = name
if device is None:
- # if we get here, we found all of the parent devices and
- # something must be wrong -- if all of the parents are in
+ # if we get here, we found all of the slave devices and
+ # something must be wrong -- if all of the slaves are in
# the tree, this device should be as well
if name is None:
name = udev.device_get_name(self.data)
diff --git a/blivet/populator/helpers/multipath.py b/blivet/populator/helpers/multipath.py
index 96c0a9ad..10c745bf 100644
--- a/blivet/populator/helpers/multipath.py
+++ b/blivet/populator/helpers/multipath.py
@@ -40,13 +40,13 @@ class MultipathDevicePopulator(DevicePopulator):
name = udev.device_get_name(self.data)
log_method_call(self, name=name)
- parent_devices = self._devicetree._add_parent_devices(self.data)
+ slave_devices = self._devicetree._add_slave_devices(self.data)
device = None
- if parent_devices:
- device = MultipathDevice(name, parents=parent_devices,
+ if slave_devices:
+ device = MultipathDevice(name, parents=slave_devices,
sysfs_path=udev.device_get_sysfs_path(self.data),
- wwn=parent_devices[0].wwn)
+ wwn=slave_devices[0].wwn)
self._devicetree._add_device(device)
return device
diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py
index 3a419418..068270b2 100644
--- a/blivet/populator/populator.py
+++ b/blivet/populator/populator.py
@@ -31,7 +31,7 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import BlockDev as blockdev
-from ..errors import DeviceError, DeviceTreeError, NoParentsError
+from ..errors import DeviceError, DeviceTreeError, NoSlavesError
from ..devices import DMLinearDevice, DMRaidArrayDevice
from ..devices import FileDevice, LoopDevice
from ..devices import MDRaidArrayDevice
@@ -92,55 +92,56 @@ class PopulatorMixin(object):
self._cleanup = False
- def _add_parent_devices(self, info):
- """ Add all parents of a device, raising DeviceTreeError on failure.
+ def _add_slave_devices(self, info):
+ """ Add all slaves of a device, raising DeviceTreeError on failure.
:param :class:`pyudev.Device` info: the device's udev info
- :raises: :class:`~.errors.DeviceTreeError if no parents are found or
- if we fail to add any parent
- :returns: a list of parent devices
+ :raises: :class:`~.errors.DeviceTreeError if no slaves are found or
+ if we fail to add any slave
+ :returns: a list of slave devices
:rtype: list of :class:`~.StorageDevice`
"""
name = udev.device_get_name(info)
sysfs_path = udev.device_get_sysfs_path(info)
- parent_dir = os.path.normpath("%s/slaves" % sysfs_path)
- parent_names = os.listdir(parent_dir)
- parent_devices = []
- if not parent_names:
- log.error("no parents found for %s", name)
- raise NoParentsError("no parents found for device %s" % name)
-
- for parent_name in parent_names:
- path = os.path.normpath("%s/%s" % (parent_dir, parent_name))
- parent_info = udev.get_device(os.path.realpath(path))
-
- if not parent_info:
- msg = "unable to get udev info for %s" % parent_name
+ slave_dir = os.path.normpath("%s/slaves" % sysfs_path)
+ slave_names = os.listdir(slave_dir)
+ slave_devices = []
+ if not slave_names:
+ log.error("no slaves found for %s", name)
+ raise NoSlavesError("no slaves found for device %s" % name)
+
+ for slave_name in slave_names:
+ path = os.path.normpath("%s/%s" % (slave_dir, slave_name))
+ slave_info = udev.get_device(os.path.realpath(path))
+
+ if not slave_info:
+ msg = "unable to get udev info for %s" % slave_name
raise DeviceTreeError(msg)
# cciss in sysfs is "cciss!cXdYpZ" but we need "cciss/cXdYpZ"
- parent_name = udev.device_get_name(parent_info).replace("!", "/")
-
- parent_dev = self.get_device_by_name(parent_name)
- if not parent_dev and parent_info:
- # we haven't scanned the parent yet, so do it now
- self.handle_device(parent_info)
- parent_dev = self.get_device_by_name(parent_name)
- if parent_dev is None:
+ slave_name = udev.device_get_name(slave_info).replace("!", "/")
+
+ slave_dev = self.get_device_by_name(slave_name)
+ if not slave_dev and slave_info:
+ # we haven't scanned the slave yet, so do it now
+ self.handle_device(slave_info)
+ slave_dev = self.get_device_by_name(slave_name)
+ if slave_dev is None:
if udev.device_is_dm_lvm(info):
- if parent_name not in lvs_info.cache:
+ if slave_name not in lvs_info.cache:
# we do not expect hidden lvs to be in the tree
continue
- # if the current parent is still not in
+ # if the current slave is still not in
# the tree, something has gone wrong
- log.error("failure scanning device %s: could not add parent %s", name, parent_name)
- msg = "failed to add parent %s of device %s" % (parent_name, name)
+ log.error("failure scanning device %s: could not add slave %s", name, slave_name)
+ msg = "failed to add slave %s of device %s" % (slave_name,
+ name)
raise DeviceTreeError(msg)
- parent_devices.append(parent_dev)
+ slave_devices.append(slave_dev)
- return parent_devices
+ return slave_devices
def _add_name(self, name):
if name not in self.names:
diff --git a/blivet/threads.py b/blivet/threads.py
index 5e2dff3f..1a5cc6db 100644
--- a/blivet/threads.py
+++ b/blivet/threads.py
@@ -63,11 +63,12 @@ class SynchronizedMeta(type):
"""
def __new__(cls, name, bases, dct):
new_dct = {}
+ blacklist = dct.get('_unsynchronized_methods', [])
for n in dct:
obj = dct[n]
# Do not decorate class or static methods.
- if n in dct.get('_unsynchronized_methods', []):
+ if n in blacklist:
pass
elif isinstance(obj, FunctionType):
obj = exclusive(obj)
diff --git a/blivet/udev.py b/blivet/udev.py
index efbc53d6..ddc49a37 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -39,7 +39,7 @@ from gi.repository import BlockDev as blockdev
global_udev = pyudev.Context()
log = logging.getLogger("blivet")
-ignored_device_names = []
+device_name_blacklist = []
""" device name regexes to ignore; this should be empty by default """
@@ -77,7 +77,7 @@ def get_devices(subsystem="block"):
result = []
for device in global_udev.list_devices(subsystem=subsystem):
- if not __is_ignored_blockdev(device.sys_name):
+ if not __is_blacklisted_blockdev(device.sys_name):
dev = device_to_dict(device)
result.append(dev)
@@ -176,13 +176,13 @@ def resolve_glob(glob):
return ret
-def __is_ignored_blockdev(dev_name):
+def __is_blacklisted_blockdev(dev_name):
"""Is this a blockdev we never want for an install?"""
if dev_name.startswith("ram") or dev_name.startswith("fd"):
return True
- if ignored_device_names:
- if any(re.search(expr, dev_name) for expr in ignored_device_names):
+ if device_name_blacklist:
+ if any(re.search(expr, dev_name) for expr in device_name_blacklist):
return True
dev_path = "/sys/class/block/%s" % dev_name
@@ -375,7 +375,7 @@ def device_is_disk(info):
device_is_dm_crypt(info) or
device_is_dm_stratis(info) or
(device_is_md(info) and
- (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_parents(info))))))
+ (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_slaves(info))))))
def device_is_partition(info):
@@ -454,18 +454,18 @@ def device_get_devname(info):
return info.get('DEVNAME')
-def device_get_parents(info):
- """ Return a list of udev device objects representing this device's parents. """
- parents_dir = device_get_sysfs_path(info) + "/slaves/"
+def device_get_slaves(info):
+ """ Return a list of udev device objects representing this device's slaves. """
+ slaves_dir = device_get_sysfs_path(info) + "/slaves/"
names = list()
- if os.path.isdir(parents_dir):
- names = os.listdir(parents_dir)
+ if os.path.isdir(slaves_dir):
+ names = os.listdir(slaves_dir)
- parents = list()
+ slaves = list()
for name in names:
- parents.append(get_device(device_node="/dev/" + name))
+ slaves.append(get_device(device_node="/dev/" + name))
- return parents
+ return slaves
def device_get_holders(info):
@@ -742,7 +742,7 @@ def device_get_partition_disk(info):
disk = None
majorminor = info.get("ID_PART_ENTRY_DISK")
sysfs_path = device_get_sysfs_path(info)
- parents_dir = "%s/slaves" % sysfs_path
+ slaves_dir = "%s/slaves" % sysfs_path
if majorminor:
major, minor = majorminor.split(":")
for device in get_devices():
@@ -750,8 +750,8 @@ def device_get_partition_disk(info):
disk = device_get_name(device)
break
elif device_is_dm_partition(info):
- if os.path.isdir(parents_dir):
- parents = os.listdir(parents_dir)
+ if os.path.isdir(slaves_dir):
+ parents = os.listdir(slaves_dir)
if len(parents) == 1:
disk = resolve_devspec(parents[0].replace('!', '/'))
else:
diff --git a/tests/unit_tests/devicefactory_test.py b/tests/unit_tests/devicefactory_test.py
index ff6bcb9e..552aadc1 100644
--- a/tests/unit_tests/devicefactory_test.py
+++ b/tests/unit_tests/devicefactory_test.py
@@ -115,9 +115,9 @@ class DeviceFactoryTestCase(unittest.TestCase):
kwargs.get("encrypted", False) or
kwargs.get("container_encrypted", False))
if kwargs.get("encrypted", False):
- self.assertEqual(device.parents[0].format.luks_version,
+ self.assertEqual(device.slave.format.luks_version,
kwargs.get("luks_version", crypto.DEFAULT_LUKS_VERSION))
- self.assertEqual(device.raw_device.format.luks_sector_size,
+ self.assertEqual(device.slave.format.luks_sector_size,
kwargs.get("luks_sector_size", 0))
self.assertTrue(set(device.disks).issubset(kwargs["disks"]))
@@ -357,7 +357,7 @@ class LVMFactoryTestCase(DeviceFactoryTestCase):
device = args[0]
if kwargs.get("encrypted"):
- container = device.parents[0].container
+ container = device.slave.container
else:
container = device.container
@@ -376,7 +376,7 @@ class LVMFactoryTestCase(DeviceFactoryTestCase):
self.assertIsInstance(pv, member_class)
if pv.encrypted:
- self.assertEqual(pv.parents[0].format.luks_version,
+ self.assertEqual(pv.slave.format.luks_version,
kwargs.get("luks_version", crypto.DEFAULT_LUKS_VERSION))
@patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
@@ -592,7 +592,7 @@ class LVMThinPFactoryTestCase(LVMFactoryTestCase):
device = args[0]
if kwargs.get("encrypted", False):
- thinlv = device.parents[0]
+ thinlv = device.slave
else:
thinlv = device
diff --git a/tests/unit_tests/devices_test/device_size_test.py b/tests/unit_tests/devices_test/device_size_test.py
index d0c0a3f4..a1efa86d 100644
--- a/tests/unit_tests/devices_test/device_size_test.py
+++ b/tests/unit_tests/devices_test/device_size_test.py
@@ -107,8 +107,8 @@ class LUKSDeviceSizeTest(StorageDeviceSizeTest):
def _get_device(self, *args, **kwargs):
exists = kwargs.get("exists", False)
- parent = StorageDevice(*args, size=kwargs["size"] + crypto.LUKS_METADATA_SIZE, exists=exists)
- return LUKSDevice(*args, **kwargs, parents=[parent])
+ slave = StorageDevice(*args, size=kwargs["size"] + crypto.LUKS_METADATA_SIZE, exists=exists)
+ return LUKSDevice(*args, **kwargs, parents=[slave])
def test_size_getter(self):
initial_size = Size("10 GiB")
@@ -116,4 +116,4 @@ class LUKSDeviceSizeTest(StorageDeviceSizeTest):
# for LUKS size depends on the backing device size
self.assertEqual(dev.size, initial_size)
- self.assertEqual(dev.raw_device.size, initial_size + crypto.LUKS_METADATA_SIZE)
+ self.assertEqual(dev.slave.size, initial_size + crypto.LUKS_METADATA_SIZE)
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
index 369fe878..7ba04bac 100644
--- a/tests/unit_tests/populator_test.py
+++ b/tests/unit_tests/populator_test.py
@@ -86,7 +86,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
@patch.object(DeviceTree, "get_device_by_name")
@patch.object(DMDevice, "status", return_value=True)
@patch.object(DMDevice, "update_sysfs_path")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_sysfs_path", return_value=sentinel.sysfs_path)
def test_run(self, *args):
@@ -95,7 +95,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
- # The general case for dm devices is that adding the parent devices
+ # The general case for dm devices is that adding the slave/parent devices
# will result in the dm device itself being in the tree.
device = Mock()
device.id = 0
@@ -106,7 +106,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
parent = Mock()
parent.id = 0
parent.parents = []
- devicetree._add_parent_devices.return_value = [parent]
+ devicetree._add_slave_devices.return_value = [parent]
devicetree._add_device(parent)
devicetree.get_device_by_name.return_value = None
device_name = "dmdevice"
@@ -235,7 +235,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch.object(DeviceTree, "get_device_by_name")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_lv_vg_name")
def test_run(self, *args):
@@ -247,7 +247,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
data = Mock()
- # Add parent devices and then look up the device.
+ # Add slave/parent devices and then look up the device.
device_get_name.return_value = sentinel.lv_name
devicetree.get_device_by_name.return_value = None
@@ -267,7 +267,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
call(sentinel.vg_name),
call(sentinel.lv_name)])
- # Add parent devices, but the device is still not in the tree
+ # Add slave/parent devices, but the device is still not in the tree
get_device_by_name.side_effect = None
get_device_by_name.return_value = None
self.assertEqual(helper.run(), None)
@@ -639,7 +639,7 @@ class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch.object(DeviceTree, "get_device_by_name")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_md_uuid")
@patch("blivet.udev.device_get_md_name")
@@ -650,7 +650,7 @@ class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
- # base case: _add_parent_devices gets the array into the tree
+ # base case: _add_slave_devices gets the array into the tree
data = Mock()
device = Mock()
device.parents = []
@@ -713,12 +713,12 @@ class MultipathDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch("blivet.udev.device_get_sysfs_path")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
def test_run(self, *args):
"""Test multipath device populator."""
device_get_name = args[0]
- add_parent_devices = args[1]
+ add_slave_devices = args[1]
devicetree = DeviceTree()
# set up some fake udev data to verify handling of specific entries
@@ -733,13 +733,13 @@ class MultipathDevicePopulatorTestCase(PopulatorHelperTestCase):
device_name = "mpathtest"
device_get_name.return_value = device_name
- parent_1 = Mock(tags=set(), wwn=wwn[2:], id=0)
- parent_1.parents = []
- parent_2 = Mock(tags=set(), wwn=wwn[2:], id=0)
- parent_2.parents = []
- devicetree._add_device(parent_1)
- devicetree._add_device(parent_2)
- add_parent_devices.return_value = [parent_1, parent_2]
+ slave_1 = Mock(tags=set(), wwn=wwn[2:], id=0)
+ slave_1.parents = []
+ slave_2 = Mock(tags=set(), wwn=wwn[2:], id=0)
+ slave_2.parents = []
+ devicetree._add_device(slave_1)
+ devicetree._add_device(slave_2)
+ add_slave_devices.return_value = [slave_1, slave_2]
helper = self.helper_class(devicetree, data)
diff --git a/tests/unit_tests/udev_test.py b/tests/unit_tests/udev_test.py
index b208efa8..ebcd59e2 100644
--- a/tests/unit_tests/udev_test.py
+++ b/tests/unit_tests/udev_test.py
@@ -49,11 +49,11 @@ class UdevTest(unittest.TestCase):
@mock.patch('blivet.udev.device_is_dm_crypt', return_value=False)
@mock.patch('blivet.udev.device_is_md')
@mock.patch('blivet.udev.device_get_md_container')
- @mock.patch('blivet.udev.device_get_parents')
+ @mock.patch('blivet.udev.device_get_slaves')
def test_udev_device_is_disk_md(self, *args):
import blivet.udev
info = dict(DEVTYPE='disk', SYS_PATH=mock.sentinel.md_path)
- (device_get_parents, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
+ (device_get_slaves, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
disk_parents = [dict(DEVTYPE="disk", SYS_PATH='/fake/path/2'),
dict(DEVTYPE="disk", SYS_PATH='/fake/path/3')]
@@ -68,20 +68,20 @@ class UdevTest(unittest.TestCase):
# Intel FW RAID (MD RAID w/ container layer)
# device_get_container will return some mock value which will evaluate to True
device_get_md_container.return_value = mock.sentinel.md_container
- device_get_parents.side_effect = lambda info: list()
+ device_get_slaves.side_effect = lambda info: list()
self.assertTrue(blivet.udev.device_is_disk(info))
# Normal MD RAID
- device_get_parents.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
device_get_md_container.return_value = None
self.assertFalse(blivet.udev.device_is_disk(info))
# Dell FW RAID (MD RAID whose members are all whole disks)
- device_get_parents.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertTrue(blivet.udev.device_is_disk(info))
# Normal MD RAID (w/ at least one non-disk member)
- device_get_parents.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertFalse(blivet.udev.device_is_disk(info))
diff --git a/tests/vmtests/vmbackedtestcase.py b/tests/vmtests/vmbackedtestcase.py
index 797bac85..6255104f 100644
--- a/tests/vmtests/vmbackedtestcase.py
+++ b/tests/vmtests/vmbackedtestcase.py
@@ -50,7 +50,7 @@ class VMBackedTestCase(unittest.TestCase):
defined in set_up_disks.
"""
- udev.ignored_device_names = [r'^zram']
+ udev.device_name_blacklist = [r'^zram']
#
# create disk images
--
2.38.1

View File

@ -1,4 +1,4 @@
From 62af1d7f96b8ed8eb8f2732787576161ae5da79f Mon Sep 17 00:00:00 2001
From 789dd296988aa9da17d97ece1efc33f9e232648e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 13 Oct 2022 10:47:52 +0200
Subject: [PATCH] Revert "Remove the Blivet.roots attribute"
@ -73,5 +73,5 @@ index 8105bfc7..6f460f6d 100644
class PartitionFactory(DeviceFactory):
--
2.38.1
2.37.3

View File

@ -1,4 +1,4 @@
From 1561bfe8820118178bbb07021adc1cacd875c4c7 Mon Sep 17 00:00:00 2001
From 7931a74e691979dd23a16e7a017b4ef5bc296b79 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Oct 2022 12:28:37 +0200
Subject: [PATCH] Fix potential AttributeError when getting stratis blockdev
@ -41,5 +41,5 @@ index bd1c5a18..42f230ee 100644
def _get_locked_pools_info(self):
locked_pools = []
--
2.38.1
2.37.3

View File

@ -1,9 +1,9 @@
From b747c4ed07937f54a546ffb2f2c8c95e0797dd6c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 Oct 2022 15:19:29 +0200
Subject: [PATCH] tests: Skip XFS resize test on CentOS/RHEL 8
Subject: [PATCH] tests: Skip XFS resize test on CentOS/RHEL 9
Partitions on loop devices are broken on CentOS/RHEL 8.
Partitions on loop devices are broken on CentOS/RHEL 9.
---
tests/skip.yml | 6 ++++++
1 file changed, 6 insertions(+)
@ -20,8 +20,8 @@ index 568c3fff..66b34493 100644
+- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize
+ skip_on:
+ - distro: ["centos", "enterprise_linux"]
+ version: "8"
+ reason: "Creating partitions on loop devices is broken on CentOS/RHEL 8 latest kernel"
--
+ version: "9"
+ reason: "Creating partitions on loop devices is broken on CentOS/RHEL 9 latest kernel"
--
2.37.3

View File

@ -0,0 +1,590 @@
From 9383855c8a15e6d7c4033cd8d7ae8310b462d166 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Oct 2022 10:38:00 +0200
Subject: [PATCH 1/3] Add a basic support for NVMe and NVMe Fabrics devices
This adds two new device types: NVMeNamespaceDevice and
NVMeFabricsNamespaceDevice mostly to allow to differentiate
between "local" and "remote" NVMe devices. The new libblockdev
NVMe plugin is required for full functionality.
---
blivet/__init__.py | 6 +-
blivet/devices/__init__.py | 2 +-
blivet/devices/disk.py | 101 ++++++++++++++++++++++
blivet/devices/lib.py | 1 +
blivet/populator/helpers/__init__.py | 2 +-
blivet/populator/helpers/disk.py | 64 ++++++++++++++
blivet/udev.py | 33 +++++++
blivet/util.py | 9 ++
tests/unit_tests/populator_test.py | 124 +++++++++++++++++++++++++++
9 files changed, 339 insertions(+), 3 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index bbc7ea3a..3b9e659e 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -67,6 +67,10 @@ if arch.is_s390():
else:
_REQUESTED_PLUGIN_NAMES = set(("swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
+# nvme plugin is not generally available
+if hasattr(blockdev.Plugin, "NVME"):
+ _REQUESTED_PLUGIN_NAMES.add("nvme")
+
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
# XXX force non-dbus LVM plugin
lvm_plugin = blockdev.PluginSpec()
@@ -74,7 +78,7 @@ lvm_plugin.name = blockdev.Plugin.LVM
lvm_plugin.so_name = "libbd_lvm.so.2"
_requested_plugins.append(lvm_plugin)
try:
- # do not check for dependencies during libblockdev initializtion, do runtime
+ # do not check for dependencies during libblockdev initialization, do runtime
# checks instead
blockdev.switch_init_checks(False)
succ_, avail_plugs = blockdev.try_reinit(require_plugins=_requested_plugins, reload=False, log_func=log_bd_message)
diff --git a/blivet/devices/__init__.py b/blivet/devices/__init__.py
index 8bb0a979..4d16466e 100644
--- a/blivet/devices/__init__.py
+++ b/blivet/devices/__init__.py
@@ -22,7 +22,7 @@
from .lib import device_path_to_name, device_name_to_disk_by_path, ParentList
from .device import Device
from .storage import StorageDevice
-from .disk import DiskDevice, DiskFile, DMRaidArrayDevice, MultipathDevice, iScsiDiskDevice, FcoeDiskDevice, DASDDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice
+from .disk import DiskDevice, DiskFile, DMRaidArrayDevice, MultipathDevice, iScsiDiskDevice, FcoeDiskDevice, DASDDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice, NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from .partition import PartitionDevice
from .dm import DMDevice, DMLinearDevice, DMCryptDevice, DMIntegrityDevice, DM_MAJORS
from .luks import LUKSDevice, IntegrityDevice
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index bc4a1b5e..b5e25939 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -22,10 +22,13 @@
import gi
gi.require_version("BlockDev", "2.0")
+gi.require_version("GLib", "2.0")
from gi.repository import BlockDev as blockdev
+from gi.repository import GLib
import os
+from collections import namedtuple
from .. import errors
from .. import util
@@ -725,3 +728,101 @@ class NVDIMMNamespaceDevice(DiskDevice):
@property
def sector_size(self):
return self._sector_size
+
+
+NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn"])
+
+
+class NVMeNamespaceDevice(DiskDevice):
+
+ """ NVMe namespace """
+ _type = "nvme"
+ _packages = ["nvme-cli"]
+
+ def __init__(self, device, **kwargs):
+ """
+ :param name: the device name (generally a device node's basename)
+ :type name: str
+ :keyword exists: does this device exist?
+ :type exists: bool
+ :keyword size: the device's size
+ :type size: :class:`~.size.Size`
+ :keyword parents: a list of parent devices
+ :type parents: list of :class:`StorageDevice`
+ :keyword format: this device's formatting
+ :type format: :class:`~.formats.DeviceFormat` or a subclass of it
+ :keyword nsid: namespace ID
+ :type nsid: int
+ """
+ self.nsid = kwargs.pop("nsid", 0)
+
+ DiskDevice.__init__(self, device, **kwargs)
+
+ self._clear_local_tags()
+ self.tags.add(Tags.local)
+ self.tags.add(Tags.nvme)
+
+ self._controllers = None
+
+ @property
+ def controllers(self):
+ if self._controllers is not None:
+ return self._controllers
+
+ self._controllers = []
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ log.debug("Failed to get controllers for %s: libblockdev NVME plugin is not available", self.name)
+ return self._controllers
+
+ try:
+ controllers = blockdev.nvme_find_ctrls_for_ns(self.sysfs_path)
+ except GLib.GError as err:
+ log.debug("Failed to get controllers for %s: %s", self.name, str(err))
+ return self._controllers
+
+ for controller in controllers:
+ try:
+ cpath = util.get_path_by_sysfs_path(controller, "char")
+ except RuntimeError as err:
+ log.debug("Failed to find controller %s: %s", controller, str(err))
+ continue
+ try:
+ cinfo = blockdev.nvme_get_controller_info(cpath)
+ except GLib.GError as err:
+ log.debug("Failed to get controller info for %s: %s", cpath, str(err))
+ continue
+ self._controllers.append(NVMeController(name=os.path.basename(cpath),
+ serial=cinfo.serial_number,
+ nvme_ver=cinfo.nvme_ver,
+ id=cinfo.ctrl_id,
+ subsysnqn=cinfo.subsysnqn))
+
+ return self._controllers
+
+
+class NVMeFabricsNamespaceDevice(NVMeNamespaceDevice, NetworkStorageDevice):
+
+ """ NVMe fabrics namespace """
+ _type = "nvme-fabrics"
+ _packages = ["nvme-cli"]
+
+ def __init__(self, device, **kwargs):
+ """
+ :param name: the device name (generally a device node's basename)
+ :type name: str
+ :keyword exists: does this device exist?
+ :type exists: bool
+ :keyword size: the device's size
+ :type size: :class:`~.size.Size`
+ :keyword parents: a list of parent devices
+ :type parents: list of :class:`StorageDevice`
+ :keyword format: this device's formatting
+ :type format: :class:`~.formats.DeviceFormat` or a subclass of it
+ """
+ NVMeNamespaceDevice.__init__(self, device, **kwargs)
+ NetworkStorageDevice.__init__(self)
+
+ self._clear_local_tags()
+ self.tags.add(Tags.remote)
+ self.tags.add(Tags.nvme)
diff --git a/blivet/devices/lib.py b/blivet/devices/lib.py
index 1bda0bab..b3c4c5b0 100644
--- a/blivet/devices/lib.py
+++ b/blivet/devices/lib.py
@@ -32,6 +32,7 @@ class Tags(str, Enum):
"""Tags that describe various classes of disk."""
local = 'local'
nvdimm = 'nvdimm'
+ nvme = 'nvme'
remote = 'remote'
removable = 'removable'
ssd = 'ssd'
diff --git a/blivet/populator/helpers/__init__.py b/blivet/populator/helpers/__init__.py
index c5ac412f..50ab4de8 100644
--- a/blivet/populator/helpers/__init__.py
+++ b/blivet/populator/helpers/__init__.py
@@ -6,7 +6,7 @@ from .formatpopulator import FormatPopulator
from .btrfs import BTRFSFormatPopulator
from .boot import AppleBootFormatPopulator, EFIFormatPopulator, MacEFIFormatPopulator
-from .disk import DiskDevicePopulator, iScsiDevicePopulator, FCoEDevicePopulator, MDBiosRaidDevicePopulator, DASDDevicePopulator, ZFCPDevicePopulator, NVDIMMNamespaceDevicePopulator
+from .disk import DiskDevicePopulator, iScsiDevicePopulator, FCoEDevicePopulator, MDBiosRaidDevicePopulator, DASDDevicePopulator, ZFCPDevicePopulator, NVDIMMNamespaceDevicePopulator, NVMeNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator
from .disklabel import DiskLabelFormatPopulator
from .dm import DMDevicePopulator
from .dmraid import DMRaidFormatPopulator
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 9db7b810..9ed1eebe 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -22,13 +22,16 @@
import gi
gi.require_version("BlockDev", "2.0")
+gi.require_version("GLib", "2.0")
from gi.repository import BlockDev as blockdev
+from gi.repository import GLib
from ... import udev
from ... import util
from ...devices import DASDDevice, DiskDevice, FcoeDiskDevice, iScsiDiskDevice
from ...devices import MDBiosRaidArrayDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice
+from ...devices import NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from ...devices import device_path_to_name
from ...storage_log import log_method_call
from .devicepopulator import DevicePopulator
@@ -251,3 +254,64 @@ class NVDIMMNamespaceDevicePopulator(DiskDevicePopulator):
log.info("%s is an NVDIMM namespace device", udev.device_get_name(self.data))
return kwargs
+
+
+class NVMeNamespaceDevicePopulator(DiskDevicePopulator):
+ priority = 20
+
+ _device_class = NVMeNamespaceDevice
+
+ @classmethod
+ def match(cls, data):
+ return (super(NVMeNamespaceDevicePopulator, NVMeNamespaceDevicePopulator).match(data) and
+ udev.device_is_nvme_namespace(data) and not udev.device_is_nvme_fabrics(data))
+
+ def _get_kwargs(self):
+ kwargs = super(NVMeNamespaceDevicePopulator, self)._get_kwargs()
+
+ log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
+
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ return kwargs
+
+ path = udev.device_get_devname(self.data)
+ try:
+ ninfo = blockdev.nvme_get_namespace_info(path)
+ except GLib.GError as err:
+ log.debug("Failed to get namespace info for %s: %s", path, str(err))
+ else:
+ kwargs["nsid"] = ninfo.nsid
+
+ log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
+ return kwargs
+
+
+class NVMeFabricsNamespaceDevicePopulator(DiskDevicePopulator):
+ priority = 20
+
+ _device_class = NVMeFabricsNamespaceDevice
+
+ @classmethod
+ def match(cls, data):
+ return (super(NVMeFabricsNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator).match(data) and
+ udev.device_is_nvme_namespace(data) and udev.device_is_nvme_fabrics(data))
+
+ def _get_kwargs(self):
+ kwargs = super(NVMeFabricsNamespaceDevicePopulator, self)._get_kwargs()
+
+ log.info("%s is an NVMe fabrics namespace device", udev.device_get_name(self.data))
+
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ return kwargs
+
+ path = udev.device_get_devname(self.data)
+ try:
+ ninfo = blockdev.nvme_get_namespace_info(path)
+ except GLib.GError as err:
+ log.debug("Failed to get namespace info for %s: %s", path, str(err))
+ else:
+ kwargs["nsid"] = ninfo.nsid
+
+ return kwargs
diff --git a/blivet/udev.py b/blivet/udev.py
index efbc53d6..533a1edc 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -1023,6 +1023,39 @@ def device_is_nvdimm_namespace(info):
return ninfo is not None
+def device_is_nvme_namespace(info):
+ if info.get("DEVTYPE") != "disk":
+ return False
+
+ if not info.get("SYS_PATH"):
+ return False
+
+ device = pyudev.Devices.from_sys_path(global_udev, info.get("SYS_PATH"))
+ while device:
+ if device.subsystem and device.subsystem.startswith("nvme"):
+ return True
+ device = device.parent
+
+ return False
+
+
+def device_is_nvme_fabrics(info):
+ if not device_is_nvme_namespace(info):
+ return False
+
+ if not hasattr(blockdev.Plugin, "NVME") or not blockdev.is_plugin_available(blockdev.Plugin.NVME): # pylint: disable=no-member
+ # nvme plugin is not available -- even if this is an nvme fabrics device we
+ # don't have tools to work with it, so we should pretend it's just a normal nvme
+ return False
+
+ controllers = blockdev.nvme_find_ctrls_for_ns(info.get("SYS_PATH", ""))
+ if not controllers:
+ return False
+
+ transport = util.get_sysfs_attr(controllers[0], "transport")
+ return transport in ("rdma", "fc", "tcp", "loop")
+
+
def device_is_hidden(info):
sysfs_path = device_get_sysfs_path(info)
hidden = util.get_sysfs_attr(sysfs_path, "hidden")
diff --git a/blivet/util.py b/blivet/util.py
index 0e578aea..3040ee5a 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -432,6 +432,15 @@ def get_sysfs_path_by_name(dev_node, class_name="block"):
"for '%s' (it is not at '%s')" % (dev_node, dev_path))
+def get_path_by_sysfs_path(sysfs_path, dev_type="block"):
+ """ Return device path for a given device sysfs path. """
+
+ dev = get_sysfs_attr(sysfs_path, "dev")
+ if not dev or not os.path.exists("/dev/%s/%s" % (dev_type, dev)):
+ raise RuntimeError("get_path_by_sysfs_path: Could not find device for %s" % sysfs_path)
+ return os.path.realpath("/dev/%s/%s" % (dev_type, dev))
+
+
def get_cow_sysfs_path(dev_path, dev_sysfsPath):
""" Return sysfs path of cow device for a given device.
"""
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
index 369fe878..1ee29b57 100644
--- a/tests/unit_tests/populator_test.py
+++ b/tests/unit_tests/populator_test.py
@@ -13,6 +13,7 @@ from gi.repository import BlockDev as blockdev
from blivet.devices import DiskDevice, DMDevice, FileDevice, LoopDevice
from blivet.devices import MDRaidArrayDevice, MultipathDevice, OpticalDevice
from blivet.devices import PartitionDevice, StorageDevice, NVDIMMNamespaceDevice
+from blivet.devices import NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from blivet.devicelibs import lvm
from blivet.devicetree import DeviceTree
from blivet.formats import get_device_format_class, get_format, DeviceFormat
@@ -21,6 +22,7 @@ from blivet.populator.helpers import DiskDevicePopulator, DMDevicePopulator, Loo
from blivet.populator.helpers import LVMDevicePopulator, MDDevicePopulator, MultipathDevicePopulator
from blivet.populator.helpers import OpticalDevicePopulator, PartitionDevicePopulator
from blivet.populator.helpers import LVMFormatPopulator, MDFormatPopulator, NVDIMMNamespaceDevicePopulator
+from blivet.populator.helpers import NVMeNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator
from blivet.populator.helpers import get_format_helper, get_device_helper
from blivet.populator.helpers.boot import AppleBootFormatPopulator, EFIFormatPopulator, MacEFIFormatPopulator
from blivet.populator.helpers.formatpopulator import FormatPopulator
@@ -591,6 +593,128 @@ class NVDIMMNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
self.assertTrue(device in devicetree.devices)
+class NVMeNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
+ helper_class = NVMeNamespaceDevicePopulator
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=False)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ def test_match(self, *args):
+ """Test matching of NVMe namespace device populator."""
+ device_is_nvme_namespace = args[0]
+ self.assertTrue(self.helper_class.match(None))
+ device_is_nvme_namespace.return_value = False
+ self.assertFalse(self.helper_class.match(None))
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=False)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ def test_get_helper(self, *args):
+ """Test get_device_helper for NVMe namespaces."""
+ device_is_nvme_namespace = args[0]
+ data = {}
+ self.assertEqual(get_device_helper(data), self.helper_class)
+
+ # verify that setting one of the required True return values to False prevents success
+ device_is_nvme_namespace.return_value = False
+ self.assertNotEqual(get_device_helper(data), self.helper_class)
+ device_is_nvme_namespace.return_value = True
+
+ @patch("blivet.udev.device_get_name")
+ def test_run(self, *args):
+ """Test disk device populator."""
+ device_get_name = args[0]
+
+ devicetree = DeviceTree()
+
+ # set up some fake udev data to verify handling of specific entries
+ data = {'SYS_PATH': 'dummy', 'DEVNAME': 'dummy', 'ID_PATH': 'dummy'}
+
+ device_name = "nop"
+ device_get_name.return_value = device_name
+ helper = self.helper_class(devicetree, data)
+
+ device = helper.run()
+
+ self.assertIsInstance(device, NVMeNamespaceDevice)
+ self.assertTrue(device.exists)
+ self.assertTrue(device.is_disk)
+ self.assertTrue(device in devicetree.devices)
+
+
+class NVMeFabricsNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
+ helper_class = NVMeFabricsNamespaceDevicePopulator
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=True)
+ def test_match(self, *args):
+ """Test matching of NVMe namespace device populator."""
+ device_is_nvme_fabrics = args[0]
+ self.assertTrue(self.helper_class.match(None))
+ device_is_nvme_fabrics.return_value = False
+ self.assertFalse(self.helper_class.match(None))
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=True)
+ def test_get_helper(self, *args):
+ """Test get_device_helper for NVMe namespaces."""
+ device_is_nvme_fabrics = args[0]
+ data = {}
+ self.assertEqual(get_device_helper(data), self.helper_class)
+
+ # verify that setting one of the required True return values to False prevents success
+ device_is_nvme_fabrics.return_value = False
+ self.assertNotEqual(get_device_helper(data), self.helper_class)
+ device_is_nvme_fabrics.return_value = True
+
+ @patch("blivet.udev.device_get_name")
+ def test_run(self, *args):
+ """Test disk device populator."""
+ device_get_name = args[0]
+
+ devicetree = DeviceTree()
+
+ # set up some fake udev data to verify handling of specific entries
+ data = {'SYS_PATH': 'dummy', 'DEVNAME': 'dummy', 'ID_PATH': 'dummy'}
+
+ device_name = "nop"
+ device_get_name.return_value = device_name
+ helper = self.helper_class(devicetree, data)
+
+ device = helper.run()
+
+ self.assertIsInstance(device, NVMeFabricsNamespaceDevice)
+ self.assertTrue(device.exists)
+ self.assertTrue(device.is_disk)
+ self.assertTrue(device in devicetree.devices)
+
+
class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
helper_class = MDDevicePopulator
--
2.38.1
From af6ad7ff2f08180672690910d453158bcd463936 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 2 Dec 2022 12:20:47 +0100
Subject: [PATCH 2/3] Add transport and address to NVMeController info
---
blivet/devices/disk.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index b5e25939..796b5b03 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -730,7 +730,8 @@ class NVDIMMNamespaceDevice(DiskDevice):
return self._sector_size
-NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn"])
+NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn",
+ "transport", "transport_address"])
class NVMeNamespaceDevice(DiskDevice):
@@ -792,11 +793,15 @@ class NVMeNamespaceDevice(DiskDevice):
except GLib.GError as err:
log.debug("Failed to get controller info for %s: %s", cpath, str(err))
continue
+ ctrans = util.get_sysfs_attr(controller, "transport")
+ ctaddr = util.get_sysfs_attr(controller, "address")
self._controllers.append(NVMeController(name=os.path.basename(cpath),
serial=cinfo.serial_number,
nvme_ver=cinfo.nvme_ver,
id=cinfo.ctrl_id,
- subsysnqn=cinfo.subsysnqn))
+ subsysnqn=cinfo.subsysnqn,
+ transport=ctrans,
+ transport_address=ctaddr))
return self._controllers
--
2.38.1
From a04538936ff62958c272b5e2b2657d177df1ef13 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 8 Dec 2022 13:15:33 +0100
Subject: [PATCH 3/3] Add additional identifiers to NVMeNamespaceDevice
---
blivet/devices/disk.py | 2 ++
blivet/populator/helpers/disk.py | 3 +++
2 files changed, 5 insertions(+)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 796b5b03..8842b4dc 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -756,6 +756,8 @@ class NVMeNamespaceDevice(DiskDevice):
:type nsid: int
"""
self.nsid = kwargs.pop("nsid", 0)
+ self.eui64 = kwargs.pop("eui64", "")
+ self.nguid = kwargs.pop("nguid", "")
DiskDevice.__init__(self, device, **kwargs)
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 9ed1eebe..cf20d302 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -282,6 +282,9 @@ class NVMeNamespaceDevicePopulator(DiskDevicePopulator):
log.debug("Failed to get namespace info for %s: %s", path, str(err))
else:
kwargs["nsid"] = ninfo.nsid
+ kwargs["uuid"] = ninfo.uuid
+ kwargs["eui64"] = ninfo.eui64
+ kwargs["nguid"] = ninfo.nguid
log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
return kwargs
--
2.38.1

View File

@ -1,7 +1,7 @@
From 11c3e695d9a2130f325bb5459a9881ff70338f71 Mon Sep 17 00:00:00 2001
From 7a86d4306e3022b73035e21f66d515174264700e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 9 Mar 2023 13:18:42 +0100
Subject: [PATCH] Add support for specifying stripe size for RAID LVs
Subject: [PATCH 1/2] Add support for specifying stripe size for RAID LVs
---
blivet/devices/lvm.py | 28 +++++++++++++++++---
@ -170,3 +170,36 @@ index 995c2da4..d7b55224 100644
--
2.40.1
From bbfd1a70abe8271f5fe3d29fe2be3bb8a1c6ecbc Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 3 May 2023 08:55:31 +0200
Subject: [PATCH 2/2] Revert "tests: Skip test_lvcreate_type on CentOS/RHEL 9"
This reverts commit 16b90071145d2d0f19a38f3003561a0cc9d6e281.
The kernel issue was resolved, we no longer need to skip the test.
---
tests/skip.yml | 6 ------
1 file changed, 6 deletions(-)
diff --git a/tests/skip.yml b/tests/skip.yml
index 66b34493..c0ca0eaf 100644
--- a/tests/skip.yml
+++ b/tests/skip.yml
@@ -24,12 +24,6 @@
---
-- test: storage_tests.devices_test.lvm_test.LVMTestCase.test_lvm_raid
- skip_on:
- - distro: "centos"
- version: "9"
- reason: "Creating RAID 1 LV on CentOS/RHEL 9 causes a system deadlock"
-
- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize
skip_on:
- distro: ["centos", "enterprise_linux"]
--
2.40.1

View File

@ -6,7 +6,7 @@ Subject: [PATCH] Fix setting kickstart data
When changing our code to PEP8 compliant we also changed some
pykickstart properties like onPart by accident. This PR fixes this.
Resolves: rhbz#2175166
Resolves: rhbz#2174296
---
blivet/devices/btrfs.py | 4 ++--
blivet/devices/lvm.py | 2 +-
@ -18,7 +18,7 @@ index 1ae6a04d..3f56624e 100644
--- a/blivet/devices/btrfs.py
+++ b/blivet/devices/btrfs.py
@@ -498,8 +498,8 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
def populate_ksdata(self, data):
super(BTRFSVolumeDevice, self).populate_ksdata(data)
- data.data_level = self.data_level.name if self.data_level else None
@ -27,20 +27,20 @@ index 1ae6a04d..3f56624e 100644
+ data.metaDataLevel = self.metadata_level.name if self.metadata_level else None
data.devices = ["btrfs.%d" % p.id for p in self.parents]
data.preexist = self.exists
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 41358e9b..c3132457 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1161,7 +1161,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
if self.req_grow:
# base size could be literal or percentage
- data.max_size_mb = self.req_max_size.convert_to(MiB)
+ data.maxSizeMB = self.req_max_size.convert_to(MiB)
elif data.resize:
data.size = self.target_size.convert_to(MiB)
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 89d907c2..0e9250ce 100644
--- a/blivet/devices/partition.py
@ -51,7 +51,7 @@ index 89d907c2..0e9250ce 100644
if self.req_grow:
- data.max_size_mb = self.req_max_size.convert_to(MiB)
+ data.maxSizeMB = self.req_max_size.convert_to(MiB)
# data.disk = self.disk.name # by-id
if self.req_disks and len(self.req_disks) == 1:
data.disk = self.disk.name
@ -60,9 +60,9 @@ index 89d907c2..0e9250ce 100644
else:
- data.on_part = self.name # by-id
+ data.onPart = self.name # by-id
if data.resize:
# on s390x in particular, fractional sizes are reported, which
--
--
2.40.1

View File

@ -6,7 +6,7 @@ Subject: [PATCH] Do not set memory limit for LUKS2 when running in FIPS mode
With FIPS enabled LUKS uses pbkdf and not argon so the memory
limit is not a valid parameter.
Resolves: rhbz#2183437
Resolves: rhbz#2193096
---
blivet/devicelibs/crypto.py | 11 +++++++
blivet/formats/luks.py | 12 ++++----
@ -20,15 +20,15 @@ index f0caf0f7..68e68db1 100644
+++ b/blivet/devicelibs/crypto.py
@@ -21,6 +21,7 @@
#
import hashlib
+import os
import gi
gi.require_version("BlockDev", "2.0")
@@ -100,3 +101,13 @@ def calculate_integrity_metadata_size(device_size, algorithm=DEFAULT_INTEGRITY_A
jsize = (jsize / SECTOR_SIZE + 1) * SECTOR_SIZE # round up to sector
return msize + jsize
+
+
@ -60,7 +60,7 @@ index 2637e0c5..adf3c711 100644
+ self.pbkdf_args = LUKS2PBKDFArgs(max_memory_kb=int(mem_limit.convert_to(KiB)))
+ luks_data.pbkdf_args = self.pbkdf_args
+ log.info("PBKDF arguments for LUKS2 not specified, using defaults with memory limit %s", mem_limit)
if self.pbkdf_args:
pbkdf = blockdev.CryptoLUKSPBKDF(type=self.pbkdf_args.type,
diff --git a/tests/unit_tests/formats_tests/luks_test.py b/tests/unit_tests/formats_tests/luks_test.py
@ -69,12 +69,12 @@ index ec7b7592..1127e968 100644
+++ b/tests/unit_tests/formats_tests/luks_test.py
@@ -6,9 +6,14 @@ except ImportError:
import unittest
from blivet.formats.luks import LUKS
+from blivet.size import Size
+from blivet.static_data import luks_data
class LUKSNodevTestCase(unittest.TestCase):
+ def setUp(self):
+ luks_data.pbkdf_args = None
@ -85,7 +85,7 @@ index ec7b7592..1127e968 100644
@@ -51,6 +56,31 @@ class LUKSNodevTestCase(unittest.TestCase):
fmt = LUKS(cipher="aes-cbc-plain64")
self.assertEqual(fmt.key_size, 0)
+ def test_luks2_pbkdf_memory_fips(self):
+ fmt = LUKS()
+ with patch("blivet.formats.luks.blockdev.crypto") as bd:
@ -119,15 +119,15 @@ index 2743b7db..5d30c260 100644
--- a/tests/unit_tests/formats_tests/methods_test.py
+++ b/tests/unit_tests/formats_tests/methods_test.py
@@ -366,7 +366,8 @@ class LUKSMethodsTestCase(FormatMethodsTestCase):
def _test_create_backend(self):
self.format.exists = False
- self.format.create()
+ with patch("blivet.devicelibs.crypto.is_fips_enabled", return_value=False):
+ self.format.create()
self.assertTrue(self.patches["blockdev"].crypto.luks_format.called) # pylint: disable=no-member
def _test_setup_backend(self):
--
--
2.40.1

View File

@ -1,9 +1,9 @@
From d06c45db59d0e917dbab4c283f2f04c8f9206a6e Mon Sep 17 00:00:00 2001
From 2d26f69abc2d793e753c0cddb3086264ca2b4e71 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 6 Mar 2023 10:51:42 +0100
Subject: [PATCH 1/5] Allow changing iSCSI initiator name after setting it
Resolves: rhbz#2083139
Resolves: rhbz#2221935
---
blivet/iscsi.py | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
@ -42,13 +42,14 @@ index 86451db3..0d063f2a 100644
2.40.1
From b71991d65c270c023364b03c499b4bf3e245fbd0 Mon Sep 17 00:00:00 2001
From 7c226ed0e14efcdd6e562e357d8f3465ad43ef33 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 6 Mar 2023 15:10:28 +0100
Subject: [PATCH 2/5] Add a basic test case for the iscsi module
Related: rhbz#2083139
Related: rhbz#2221935
---
misc/install-test-dependencies.yml | 3 +
tests/storage_tests/__init__.py | 2 +
tests/storage_tests/iscsi_test.py | 157 +++++++++++++++++++++++++++++
3 files changed, 162 insertions(+)
@ -231,7 +232,7 @@ index 00000000..00cc7c36
2.40.1
From 65e8150a7404e37dd2740841a88e7f2565836406 Mon Sep 17 00:00:00 2001
From dfd0c59a901f54ecfd8f538a2bb004a2e5ab6eec Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 6 Mar 2023 15:14:40 +0100
Subject: [PATCH 3/5] tests: Use blivet-specific prefix for targetcli backing
@ -241,7 +242,7 @@ The code is originally from libblockdev hence the "bd" prefix, we
should use a different prefix for blivet to be able to identify
which test suite failed to clean the files.
Related: rhbz#2083139
Related: rhbz#2221935
---
tests/storage_tests/storagetestcase.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
@ -263,13 +264,13 @@ index 35d57ce9..9f859977 100644
2.40.1
From 41278ef1b3f949303fd30fff2ccdde75f713c9f8 Mon Sep 17 00:00:00 2001
From 492fc9b8dfc2d4aa7cb44baa4408570babcb5e96 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 19 Jul 2023 13:57:39 +0200
Subject: [PATCH 4/5] iscsi: Save firmware initiator name to
/etc/iscsi/initiatorname.iscsi
Resolves: rhbz#2084043
Resolves: rhbz#2221932
---
blivet/iscsi.py | 5 +++++
1 file changed, 5 insertions(+)
@ -294,7 +295,7 @@ index 0d063f2a..8080a671 100644
2.40.1
From fce8b73965d968aab546bc7e0ecb65d1995da46f Mon Sep 17 00:00:00 2001
From 768d90815b7f95d0d6d278397fd6fd12a0490b5d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 19 Jul 2023 10:38:45 +0200
Subject: [PATCH 5/5] tests: Improve iscsi_test.ISCSITestCase
@ -302,7 +303,7 @@ Subject: [PATCH 5/5] tests: Improve iscsi_test.ISCSITestCase
Changed how we create the initiator name ACLs based on RTT test
case for rhbz#2084043 and also improved the test case itself.
Related: rhbz#2083139
Related: rhbz#2221935
---
tests/storage_tests/iscsi_test.py | 36 +++++++++++++++++++++----------
1 file changed, 25 insertions(+), 11 deletions(-)

View File

@ -0,0 +1,26 @@
From 9dcd32dd85f7f45c3fe6c8d7b1de3b4c322c6807 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Mon, 11 Sep 2023 13:50:24 +0200
Subject: [PATCH] nvme: Require additional rpms for dracut
The '95nvmf' dracut module needs a couple more packages
for the NBFT (NVMe over TCP) to work - such as networking.
Local PCIe NVMe devices have no special needs.
---
blivet/devices/disk.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 2b49ef685..5053f7bb8 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -725,7 +725,8 @@ class NVMeFabricsNamespaceDevice(NVMeNamespaceDevice, NetworkStorageDevice):
""" NVMe fabrics namespace """
_type = "nvme-fabrics"
- _packages = ["nvme-cli"]
+ # dracut '95nvmf' module dependencies
+ _packages = ["nvme-cli", "dracut-network"]
def __init__(self, device, **kwargs):
"""

View File

@ -0,0 +1,107 @@
From 06597099906be55b106c234b3bf0c87ec7d90a07 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Thu, 17 Aug 2023 14:45:18 +0200
Subject: [PATCH] nvme: Align HostNQN and HostID format to TP4126
Also don't overwrite existing files during startup() since they
might have been supplied by early boot stages.
---
blivet/nvme.py | 62 +++++++++++++++++++++++++++++++-------------------
1 file changed, 39 insertions(+), 23 deletions(-)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index 17bead15e..5ac41cffa 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -18,16 +18,20 @@
#
import os
-import shutil
from . import errors
-from . import util
+
+import gi
+gi.require_version("BlockDev", "2.0")
+
+from gi.repository import BlockDev as blockdev
import logging
log = logging.getLogger("blivet")
-HOSTNQN_FILE = "/etc/nvme/hostnqn"
-HOSTID_FILE = "/etc/nvme/hostid"
+ETC_NVME_PATH = "/etc/nvme/"
+HOSTNQN_FILE = ETC_NVME_PATH + "hostnqn"
+HOSTID_FILE = ETC_NVME_PATH + "hostid"
class NVMe(object):
@@ -40,6 +44,8 @@ class NVMe(object):
def __init__(self):
self.started = False
+ self._hostnqn = None
+ self._hostid = None
# So that users can write nvme() to get the singleton instance
def __call__(self):
@@ -52,28 +58,38 @@ def startup(self):
if self.started:
return
- rc, nqn = util.run_program_and_capture_output(["nvme", "gen-hostnqn"])
- if rc != 0:
- raise errors.NVMeError("Failed to generate hostnqn")
-
- with open(HOSTNQN_FILE, "w") as f:
- f.write(nqn)
-
- rc, hid = util.run_program_and_capture_output(["dmidecode", "-s", "system-uuid"])
- if rc != 0:
- raise errors.NVMeError("Failed to generate host ID")
-
- with open(HOSTID_FILE, "w") as f:
- f.write(hid)
+ self._hostnqn = blockdev.nvme_get_host_nqn()
+ self._hostid = blockdev.nvme_get_host_id()
+ if not self._hostnqn:
+ self._hostnqn = blockdev.nvme_generate_host_nqn()
+ if not self._hostnqn:
+ raise errors.NVMeError("Failed to generate HostNQN")
+ if not self._hostid:
+ if 'uuid:' not in self._hostnqn:
+ raise errors.NVMeError("Missing UUID part in the HostNQN string '%s'" % self._hostnqn)
+ # derive HostID from HostNQN's UUID part
+ self._hostid = self._hostnqn.split('uuid:')[1]
+
+ # do not overwrite existing files, taken e.g. from initramfs
+ self.write("/", overwrite=False)
self.started = True
- def write(self, root): # pylint: disable=unused-argument
- # copy the hostnqn and hostid files
- if not os.path.isdir(root + "/etc/nvme"):
- os.makedirs(root + "/etc/nvme", 0o755)
- shutil.copyfile(HOSTNQN_FILE, root + HOSTNQN_FILE)
- shutil.copyfile(HOSTID_FILE, root + HOSTID_FILE)
+ def write(self, root, overwrite=True): # pylint: disable=unused-argument
+ # write down the hostnqn and hostid files
+ p = root + ETC_NVME_PATH
+ if not os.path.isdir(p):
+ os.makedirs(p, 0o755)
+ p = root + HOSTNQN_FILE
+ if overwrite or not os.path.isfile(p):
+ with open(p, "w") as f:
+ f.write(self._hostnqn)
+ f.write("\n")
+ p = root + HOSTID_FILE
+ if overwrite or not os.path.isfile(p):
+ with open(p, "w") as f:
+ f.write(self._hostid)
+ f.write("\n")
# Create nvme singleton

View File

@ -0,0 +1,58 @@
From 63da3cb8a40500c889c8faa4326f81d16997a3c8 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Mon, 27 Nov 2023 18:55:55 +0100
Subject: [PATCH] nvme: Retrieve HostNQN from a first active fabrics connection
When no /etc/hostnqn exists, look for any active NVMe over Fabrics
connections and take the values from a first one, rather than
generating new ones.
---
blivet/nvme.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index 5ac41cffa..2e4686e68 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -18,6 +18,7 @@
#
import os
+import glob
from . import errors
@@ -54,6 +55,22 @@ def __call__(self):
def __deepcopy__(self, memo_dict): # pylint: disable=unused-argument
return self
+ def _retrieve_fabrics_hostnqn(self):
+ for d in glob.glob('/sys/class/nvme-fabrics/ctl/nvme*/'):
+ try:
+ # invalidate old values
+ self._hostnqn = None
+ self._hostid = None
+ # read from sysfs
+ with open(os.path.join(d, 'hostnqn')) as f:
+ self._hostnqn = f.readline().strip()
+ with open(os.path.join(d, 'hostid')) as f:
+ self._hostid = f.readline().strip()
+ if self._hostnqn:
+ break
+ except Exception: # pylint: disable=broad-except
+ pass
+
def startup(self):
if self.started:
return
@@ -61,6 +78,10 @@ def startup(self):
self._hostnqn = blockdev.nvme_get_host_nqn()
self._hostid = blockdev.nvme_get_host_id()
if not self._hostnqn:
+ # see if there are any active fabrics connections and take their values over
+ self._retrieve_fabrics_hostnqn()
+ if not self._hostnqn:
+ # generate new values
self._hostnqn = blockdev.nvme_generate_host_nqn()
if not self._hostnqn:
raise errors.NVMeError("Failed to generate HostNQN")

View File

@ -0,0 +1,67 @@
From c807e234dfd07f3d0005c71501f0300284cd580b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 6 Dec 2023 11:47:31 +0100
Subject: [PATCH] tests: Add a simple unit test for the NVMe module
---
tests/unit_tests/__init__.py | 1 +
tests/unit_tests/nvme_test.py | 38 +++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+)
create mode 100644 tests/unit_tests/nvme_test.py
diff --git a/tests/unit_tests/__init__.py b/tests/unit_tests/__init__.py
index 589366e0f..62bef67f5 100644
--- a/tests/unit_tests/__init__.py
+++ b/tests/unit_tests/__init__.py
@@ -9,6 +9,7 @@
from .devicetree_test import *
from .events_test import *
from .misc_test import *
+from .nvme_test import *
from .parentlist_test import *
from .populator_test import *
from .size_test import *
diff --git a/tests/unit_tests/nvme_test.py b/tests/unit_tests/nvme_test.py
new file mode 100644
index 000000000..cb948687f
--- /dev/null
+++ b/tests/unit_tests/nvme_test.py
@@ -0,0 +1,38 @@
+import unittest
+
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
+from blivet.nvme import nvme
+
+
+class NVMeModuleTestCase(unittest.TestCase):
+
+ host_nqn = "nqn.2014-08.org.nvmexpress:uuid:01234567-8900-abcd-efff-abcdabcdabcd"
+
+ @patch("blivet.nvme.os")
+ @patch("blivet.nvme.blockdev")
+ def test_nvme_module(self, bd, os):
+ self.assertIsNotNone(nvme)
+ bd.nvme_get_host_nqn.return_value = self.host_nqn
+ bd.nvme_get_host_id.return_value = None # None = generate from host_nqn
+ os.path.isdir.return_value = False
+
+ # startup
+ with patch.object(nvme, "write") as write:
+ nvme.startup()
+ write.assert_called_once_with("/", overwrite=False)
+
+ self.assertTrue(nvme.started)
+ self.assertEqual(nvme._hostnqn, self.host_nqn)
+ self.assertEqual(nvme._hostid, "01234567-8900-abcd-efff-abcdabcdabcd")
+
+ # write
+ with patch("blivet.nvme.open") as op:
+ nvme.write("/test")
+
+ os.makedirs.assert_called_with("/test/etc/nvme/", 0o755)
+ op.assert_any_call("/test/etc/nvme/hostnqn", "w")
+ op.assert_any_call("/test/etc/nvme/hostid", "w")

View File

@ -1,4 +1,4 @@
From faef0408d2f7c61aade6d187389c61e64f9f373b Mon Sep 17 00:00:00 2001
From c20296b2df89a9edc4ea9cc41f94df89a8fbfd26 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 Apr 2023 12:35:30 +0200
Subject: [PATCH] Add support for creating shared LVM setups
@ -6,7 +6,7 @@ Subject: [PATCH] Add support for creating shared LVM setups
This feature is requested by GFS2 for the storage role. This adds
support for creating shared VGs and activating LVs in shared mode.
Resolves: RHEL-14021
Resolves: RHEL-324
---
blivet/devices/lvm.py | 44 +++++++++++++++++++----
blivet/tasks/availability.py | 9 +++++

View File

@ -0,0 +1,60 @@
From d7708bca72f4a7d0bfa732912e2087bd6aa8f379 Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Thu, 23 Feb 2023 13:28:50 +0100
Subject: [PATCH] add udev-builtin-path_id property to zfcp-attached SCSI disks
so anaconda can use it to display path_id information for multipath
members
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/devices/disk.py | 2 ++
blivet/populator/helpers/disk.py | 1 +
tests/unit_tests/tags_test.py | 2 +-
3 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 8842b4dc..746f6d58 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -556,10 +556,12 @@ class ZFCPDiskDevice(DiskDevice):
:keyword hba_id: ???
:keyword wwpn: ???
:keyword fcp_lun: ???
+ :keyword id_path: string from udev-builtin-path_id
"""
self.hba_id = kwargs.pop("hba_id")
self.wwpn = kwargs.pop("wwpn")
self.fcp_lun = kwargs.pop("fcp_lun")
+ self.id_path = kwargs.pop("id_path")
DiskDevice.__init__(self, device, **kwargs)
self._clear_local_tags()
self.tags.add(Tags.remote)
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index cf20d302..92e85688 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -223,6 +223,7 @@ class ZFCPDevicePopulator(DiskDevicePopulator):
def _get_kwargs(self):
kwargs = super(ZFCPDevicePopulator, self)._get_kwargs()
+ kwargs["id_path"] = udev.device_get_path(self.data)
for attr in ['hba_id', 'wwpn', 'fcp_lun']:
kwargs[attr] = udev.device_get_zfcp_attribute(self.data, attr=attr)
diff --git a/tests/unit_tests/tags_test.py b/tests/unit_tests/tags_test.py
index 49a2d72e..15fa2a40 100644
--- a/tests/unit_tests/tags_test.py
+++ b/tests/unit_tests/tags_test.py
@@ -72,7 +72,7 @@ class DeviceTagsTest(unittest.TestCase):
fcoe_device = FcoeDiskDevice('test6', nic=None, identifier=None, id_path=None)
self.assertIn(Tags.remote, fcoe_device.tags)
self.assertNotIn(Tags.local, fcoe_device.tags)
- zfcp_device = ZFCPDiskDevice('test7', hba_id=None, wwpn=None, fcp_lun=None)
+ zfcp_device = ZFCPDiskDevice('test7', hba_id=None, wwpn=None, fcp_lun=None, id_path=None)
self.assertIn(Tags.remote, zfcp_device.tags)
self.assertNotIn(Tags.local, zfcp_device.tags)
--
2.43.0

View File

@ -0,0 +1,172 @@
From 517f17481685afbabea6750b57d71a736f9a157e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 25 May 2023 17:02:39 +0200
Subject: [PATCH] Do not add new PVs to the LVM devices file if it doesn't
exist and VGs are present
If there is a preexisting VG on the system when we create a new PV
and the LVM devices file doesn't exist we will create it and add
only the new PV to it which means the preexisting VG will now be
ignored by LVM tools. This change skips adding newly created PVs
to the devices file in the same way 'pvcreate' and 'vgcreate' do.
---
blivet/devicelibs/lvm.py | 3 +
blivet/formats/lvmpv.py | 17 ++++-
tests/unit_tests/formats_tests/__init__.py | 1 +
tests/unit_tests/formats_tests/lvmpv_test.py | 73 ++++++++++++++++++++
4 files changed, 91 insertions(+), 3 deletions(-)
create mode 100644 tests/unit_tests/formats_tests/lvmpv_test.py
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index 16a8e8f8..dc7d0cbe 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -84,6 +84,9 @@ if hasattr(blockdev.LVMTech, "DEVICES"):
else:
HAVE_LVMDEVICES = False
+
+LVM_DEVICES_FILE = "/etc/lvm/devices/system.devices"
+
# list of devices that LVM is allowed to use
# with LVM >= 2.0.13 we'll use this for the --devices option and when creating
# the /etc/lvm/devices/system.devices file
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index cb01b2f3..65acedbe 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -36,7 +36,7 @@ from ..size import Size
from ..errors import PhysicalVolumeError
from . import DeviceFormat, register_device_format
from .. import udev
-from ..static_data.lvm_info import pvs_info
+from ..static_data.lvm_info import pvs_info, vgs_info
import logging
log = logging.getLogger("blivet")
@@ -121,10 +121,21 @@ class LVMPhysicalVolume(DeviceFormat):
def supported(self):
return super(LVMPhysicalVolume, self).supported and self._plugin.available
- def lvmdevices_add(self):
+ def lvmdevices_add(self, force=True):
+ """ Add this PV to the LVM system devices file
+ :keyword force: whether to add the PV even if the system devices file doesn't exist and
+ VGs are present in the system
+ :type force: bool
+ """
+
if not lvm.HAVE_LVMDEVICES:
raise PhysicalVolumeError("LVM devices file feature is not supported")
+ if not os.path.exists(lvm.LVM_DEVICES_FILE) and vgs_info.cache and not force:
+ log.debug("Not adding %s to devices file: %s doesn't exist and there are VGs present in the system",
+ self.device, lvm.LVM_DEVICES_FILE)
+ return
+
try:
blockdev.lvm.devices_add(self.device)
except blockdev.LVMError as e:
@@ -151,7 +162,7 @@ class LVMPhysicalVolume(DeviceFormat):
# with lvmdbusd we need to call the pvcreate without --devices otherwise lvmdbusd
# wouldn't be able to find the newly created pv and the call would fail
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
- self.lvmdevices_add()
+ self.lvmdevices_add(force=False)
else:
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
diff --git a/tests/unit_tests/formats_tests/__init__.py b/tests/unit_tests/formats_tests/__init__.py
index d678900b..95c7a25b 100644
--- a/tests/unit_tests/formats_tests/__init__.py
+++ b/tests/unit_tests/formats_tests/__init__.py
@@ -2,6 +2,7 @@ from .device_test import *
from .disklabel_test import *
from .init_test import *
from .luks_test import *
+from .lvmpv_test import *
from .methods_test import *
from .misc_test import *
from .selinux_test import *
diff --git a/tests/unit_tests/formats_tests/lvmpv_test.py b/tests/unit_tests/formats_tests/lvmpv_test.py
new file mode 100644
index 00000000..6490c7d4
--- /dev/null
+++ b/tests/unit_tests/formats_tests/lvmpv_test.py
@@ -0,0 +1,73 @@
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
+from contextlib import contextmanager
+
+import unittest
+
+from blivet.formats.lvmpv import LVMPhysicalVolume
+
+
+class LVMPVNodevTestCase(unittest.TestCase):
+
+ @contextmanager
+ def patches(self):
+ patchers = dict()
+ mocks = dict()
+
+ patchers["blockdev"] = patch("blivet.formats.lvmpv.blockdev")
+ patchers["lvm"] = patch("blivet.formats.lvmpv.lvm")
+ patchers["vgs_info"] = patch("blivet.formats.lvmpv.vgs_info")
+ patchers["os"] = patch("blivet.formats.lvmpv.os")
+
+ for name, patcher in patchers.items():
+ mocks[name] = patcher.start()
+
+ yield mocks
+
+ for patcher in patchers.values():
+ patcher.stop()
+
+ def test_lvm_devices(self):
+ fmt = LVMPhysicalVolume(device="/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file not enabled/supported -> devices_add should not be called
+ mock["lvm"].HAVE_LVMDEVICES = False
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_not_called()
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file exists -> devices_add should be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = True
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file doesn't exist
+ # and no existing VGs present -> devices_add should be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = False
+ mock["vgs_info"].cache = {}
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file doesn't exist
+ # and existing VGs present -> devices_add should not be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = False
+ mock["vgs_info"].cache = {"fake_vg_uuid": "fake_vg_data"}
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_not_called()
--
2.43.0

View File

@ -0,0 +1,129 @@
From 0777b9d519421f3c46f6dcd51e39ecdc2956e2e0 Mon Sep 17 00:00:00 2001
From: Jan Pokorny <japokorn@redhat.com>
Date: Thu, 25 Apr 2024 14:06:13 +0200
Subject: [PATCH] Added support for PV grow
Storage role requires support for a case when PV has to be resized to
fill all available space when its device's size changes (usually on VM).
A new flag 'grow_to_fill' was added, which marks the device for size
expansion (all available space it taken).
Proper size is determined by LVM, avoiding inaccurate size
calculations in blivet.
---
blivet/formats/__init__.py | 4 +++-
blivet/formats/lvmpv.py | 23 ++++++++++++++++++-
blivet/tasks/pvtask.py | 7 +++++-
.../storage_tests/formats_test/lvmpv_test.py | 10 ++++++++
4 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/blivet/formats/__init__.py b/blivet/formats/__init__.py
index b1ad740e..eb8b6ab3 100644
--- a/blivet/formats/__init__.py
+++ b/blivet/formats/__init__.py
@@ -424,7 +424,9 @@ class DeviceFormat(ObjectID):
if not self.resizable:
raise FormatResizeError("format not resizable", self.device)
- if self.target_size == self.current_size:
+ # skip if sizes are equal unless grow to fill on lvmpv is requested
+ if (self.target_size == self.current_size and
+ (self.type != "lvmpv" or not self.grow_to_fill)): # pylint: disable=no-member
return
if not self._resize.available:
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index 65acedbe..51fa4a3c 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -33,7 +33,7 @@ from ..devicelibs import lvm
from ..tasks import availability, pvtask
from ..i18n import N_
from ..size import Size
-from ..errors import PhysicalVolumeError
+from ..errors import DeviceFormatError, PhysicalVolumeError
from . import DeviceFormat, register_device_format
from .. import udev
from ..static_data.lvm_info import pvs_info, vgs_info
@@ -98,6 +98,9 @@ class LVMPhysicalVolume(DeviceFormat):
self.inconsistent_vg = False
+ # when set to True, blivet will try to resize the PV to fill all available space
+ self._grow_to_fill = False
+
def __repr__(self):
s = DeviceFormat.__repr__(self)
s += (" vg_name = %(vg_name)s vg_uuid = %(vg_uuid)s"
@@ -106,6 +109,24 @@ class LVMPhysicalVolume(DeviceFormat):
"pe_start": self.pe_start, "data_alignment": self.data_alignment})
return s
+ @property
+ def grow_to_fill(self):
+ """
+ Can be set to True to mark format for resize so it matches size of its device.
+ (Main usecase is disk size increase on VM)
+ Uses blockdev/lvm for exact new size calculation.
+ ActionResizeFormat has to be executed to apply the change.
+ Format has to be resizable (i.e. run format.update_size_info() first) to allow this.
+ """
+ return self._grow_to_fill
+
+ @grow_to_fill.setter
+ def grow_to_fill(self, fill: bool):
+ if fill is True:
+ if not self.resizable:
+ raise DeviceFormatError("format is not resizable")
+ self._grow_to_fill = fill
+
@property
def dict(self):
d = super(LVMPhysicalVolume, self).dict
diff --git a/blivet/tasks/pvtask.py b/blivet/tasks/pvtask.py
index 04c8a4d1..b5bd72e0 100644
--- a/blivet/tasks/pvtask.py
+++ b/blivet/tasks/pvtask.py
@@ -82,6 +82,11 @@ class PVResize(task.BasicApplication, dfresize.DFResizeTask):
def do_task(self): # pylint: disable=arguments-differ
""" Resizes the LVMPV format. """
try:
- blockdev.lvm.pvresize(self.pv.device, self.pv.target_size.convert_to(self.unit))
+ if self.pv.grow_to_fill:
+ # resize PV to fill all available space on device by omitting
+ # the size parameter
+ blockdev.lvm.pvresize(self.pv.device, 0)
+ else:
+ blockdev.lvm.pvresize(self.pv.device, self.pv.target_size.convert_to(self.unit))
except blockdev.LVMError as e:
raise PhysicalVolumeError(e)
diff --git a/tests/storage_tests/formats_test/lvmpv_test.py b/tests/storage_tests/formats_test/lvmpv_test.py
index cdc33ec4..d2811f3e 100644
--- a/tests/storage_tests/formats_test/lvmpv_test.py
+++ b/tests/storage_tests/formats_test/lvmpv_test.py
@@ -37,6 +37,9 @@ class LVMPVTestCase(loopbackedtestcase.LoopBackedTestCase):
self.fmt.update_size_info()
self.assertTrue(self.fmt.resizable)
+ # save the pv maximum size
+ maxpvsize = self.fmt.current_size
+
# resize the format
new_size = Size("50 MiB")
self.fmt.target_size = new_size
@@ -46,5 +49,12 @@ class LVMPVTestCase(loopbackedtestcase.LoopBackedTestCase):
self.fmt.update_size_info()
self.assertEqual(self.fmt.current_size, new_size)
+ # Test growing PV to fill all available space on the device
+ self.fmt.grow_to_fill = True
+ self.fmt.do_resize()
+
+ self.fmt.update_size_info()
+ self.assertEqual(self.fmt.current_size, maxpvsize)
+
def _pvremove(self):
self.fmt._destroy()
--
2.45.0

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
From c2e247fe953568a65c73f5408a6da7af12c4d6a1 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Jun 2024 14:47:39 +0200
Subject: [PATCH 1/2] tests: Try waiting after partition creation for XFS
resize test
The test randomly fails to find the newly created partition so
lets try waiting a bit with udev settle.
---
tests/skip.yml | 6 ------
tests/storage_tests/formats_test/fs_test.py | 2 ++
2 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/tests/skip.yml b/tests/skip.yml
index c0ca0eaf..8d353b1b 100644
--- a/tests/skip.yml
+++ b/tests/skip.yml
@@ -23,9 +23,3 @@
# - all "skips" can specified as a list, for example 'version: [10, 11]'
---
-
-- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize
- skip_on:
- - distro: ["centos", "enterprise_linux"]
- version: "9"
- reason: "Creating partitions on loop devices is broken on CentOS/RHEL 9 latest kernel"
diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py
index 1d42dc21..59c0f998 100644
--- a/tests/storage_tests/formats_test/fs_test.py
+++ b/tests/storage_tests/formats_test/fs_test.py
@@ -10,6 +10,7 @@ from blivet.errors import DeviceFormatError, FSError
from blivet.formats import get_format
from blivet.devices import PartitionDevice, DiskDevice
from blivet.flags import flags
+from blivet import udev
from .loopbackedtestcase import LoopBackedTestCase
@@ -107,6 +108,7 @@ class XFSTestCase(fstesting.FSAsRoot):
pend = pstart + int(Size(size) / disk.format.parted_device.sectorSize)
disk.format.add_partition(pstart, pend, parted.PARTITION_NORMAL)
disk.format.parted_disk.commit()
+ udev.settle()
part = disk.format.parted_disk.getPartitionBySector(pstart)
device = PartitionDevice(os.path.basename(part.path))
--
2.45.2
From 511d64c69618de0e7bb567353e5e0c92b61da10e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 7 Mar 2024 09:45:28 +0100
Subject: [PATCH 2/2] Fix util.detect_virt on Amazon
---
blivet/util.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/util.py b/blivet/util.py
index 3040ee5a..15d41b4f 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -1137,7 +1137,7 @@ def detect_virt():
except (safe_dbus.DBusCallError, safe_dbus.DBusPropertyError):
return False
else:
- return vm[0] in ('qemu', 'kvm', 'xen')
+ return vm[0] in ('qemu', 'kvm', 'xen', 'microsoft', 'amazon')
def natural_sort_key(device):
--
2.45.2

View File

@ -0,0 +1,165 @@
From 39382d82c35494d0b359b32a48de723d9f3a0908 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 21 Nov 2022 11:04:40 +0100
Subject: [PATCH 1/2] Add a basic read-only support for UDF filesystem
Resolves: RHEL-13329
---
blivet/formats/fs.py | 12 ++++++++++++
blivet/populator/helpers/disklabel.py | 2 +-
blivet/populator/helpers/partition.py | 2 +-
blivet/tasks/fsmount.py | 4 ++++
tests/storage_tests/formats_test/fs_test.py | 4 ++++
tests/unit_tests/populator_test.py | 5 +++++
6 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index 3f553eb0..5b60bd6f 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -1359,6 +1359,18 @@ class Iso9660FS(FS):
register_device_format(Iso9660FS)
+class UDFFS(FS):
+
+ """ UDF filesystem. """
+ _type = "udf"
+ _modules = ["udf"]
+ _supported = True
+ _mount_class = fsmount.UDFFSMount
+
+
+register_device_format(UDFFS)
+
+
class NoDevFS(FS):
""" nodev filesystem base class """
diff --git a/blivet/populator/helpers/disklabel.py b/blivet/populator/helpers/disklabel.py
index db10638e..842cd308 100644
--- a/blivet/populator/helpers/disklabel.py
+++ b/blivet/populator/helpers/disklabel.py
@@ -42,7 +42,7 @@ class DiskLabelFormatPopulator(FormatPopulator):
# XXX ignore disklabels on multipath or biosraid member disks
return (bool(udev.device_get_disklabel_type(data)) and
not udev.device_is_biosraid_member(data) and
- udev.device_get_format(data) != "iso9660" and
+ udev.device_get_format(data) not in ("iso9660", "udf") and
not (device.is_disk and udev.device_get_format(data) == "mpath_member"))
def _get_kwargs(self):
diff --git a/blivet/populator/helpers/partition.py b/blivet/populator/helpers/partition.py
index 8659bd48..9257407e 100644
--- a/blivet/populator/helpers/partition.py
+++ b/blivet/populator/helpers/partition.py
@@ -75,7 +75,7 @@ class PartitionDevicePopulator(DevicePopulator):
# For partitions on disklabels parted cannot make sense of, go ahead
# and instantiate a PartitionDevice so our view of the layout is
# complete.
- if not disk.partitionable or disk.format.type == "iso9660" or disk.format.hidden:
+ if not disk.partitionable or disk.format.type in ("iso9660", "udf") or disk.format.hidden:
log.debug("ignoring partition %s on %s", name, disk.format.type)
return
diff --git a/blivet/tasks/fsmount.py b/blivet/tasks/fsmount.py
index 65b2470a..a7f493dd 100644
--- a/blivet/tasks/fsmount.py
+++ b/blivet/tasks/fsmount.py
@@ -163,6 +163,10 @@ class Iso9660FSMount(FSMount):
options = ["ro"]
+class UDFFSMount(FSMount):
+ options = ["ro"]
+
+
class NoDevFSMount(FSMount):
@property
--- a/tests/storage_tests/formats_test/fs_test.py
+++ b/tests/storage_tests/formats_test/fs_test.py
@@ -223,6 +223,10 @@ class Iso9660FS(fstesting.FSAsRoot):
_fs_class = fs.Iso9660FS
+class UDFFS(fstesting.FSAsRoot):
+ _fs_class = fs.UDFFS
+
+
@unittest.skip("Too strange to test using this framework.")
class NoDevFSTestCase(fstesting.FSAsRoot):
_fs_class = fs.NoDevFS
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
index 1ee29b57..df56e1f5 100644
--- a/tests/unit_tests/populator_test.py
+++ b/tests/unit_tests/populator_test.py
@@ -979,6 +979,11 @@ class DiskLabelPopulatorTestCase(PopulatorHelperTestCase):
self.assertFalse(self.helper_class.match(data, device))
device_get_format.return_value = None
+ # no match for whole-disk udf filesystem
+ device_get_format.return_value = "udf"
+ self.assertFalse(self.helper_class.match(data, device))
+ device_get_format.return_value = None
+
# no match for biosraid members
device_is_biosraid_member.return_value = True
self.assertFalse(self.helper_class.match(data, device))
--
2.46.0
From 54e6cc7a7e01bfe8a627b2c2f4ba352c9e6e5564 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 14 Mar 2024 15:10:27 +0100
Subject: [PATCH 2/2] nvme: Skip startup/write when NVMe plugin isn't available
This is similar to other modules like iSCSI where these methods
are silently skipped if the technology isn't supported or
available.
Resolves: RHEL-28124
---
blivet/nvme.py | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index b1513c19..4309dea3 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -71,10 +71,21 @@ class NVMe(object):
except Exception: # pylint: disable=broad-except
pass
+ def available(self):
+ if not hasattr(blockdev.Plugin, "NVME"):
+ return False
+ if not hasattr(blockdev.NVMETech, "FABRICS"):
+ return False
+ return True
+
def startup(self):
if self.started:
return
+ if not self.available():
+ log.info("NVMe support not available, not starting")
+ return
+
self._hostnqn = blockdev.nvme_get_host_nqn()
self._hostid = blockdev.nvme_get_host_id()
if not self._hostnqn:
@@ -97,6 +108,9 @@ class NVMe(object):
self.started = True
def write(self, root, overwrite=True): # pylint: disable=unused-argument
+ if not self.available():
+ return
+
# write down the hostnqn and hostid files
p = root + ETC_NVME_PATH
if not os.path.isdir(p):
--
2.46.0

View File

@ -0,0 +1,27 @@
From 7677fc312b821a9c67750220f2494d06f2357780 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 18 Sep 2024 15:30:05 +0200
Subject: [PATCH] Fix checking for NVMe plugin availability
---
blivet/nvme.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index 4309dea3..72a47070 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -76,6 +76,10 @@ class NVMe(object):
return False
if not hasattr(blockdev.NVMETech, "FABRICS"):
return False
+ try:
+ blockdev.nvme.is_tech_avail(blockdev.NVMETech.FABRICS, 0) # pylint: disable=no-member
+ except (blockdev.BlockDevNotImplementedError, blockdev.NVMEError):
+ return False
return True
def startup(self):
--
2.46.1

View File

@ -0,0 +1,30 @@
From 6a6eca0c9604a9bd508d98b75c5608f20a3a7bf6 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 24 Oct 2024 12:18:58 +0200
Subject: [PATCH] Align sizes up for growable LVs
Growable LVs usually start at minimum size so adjusting it down
can change the size below allowed minimum.
Resolves: RHEL-8036
Resolves: RHEL-19725
---
blivet/devices/lvm.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 62974443..1293cae2 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -2574,7 +2574,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
if not isinstance(newsize, Size):
raise ValueError("new size must be of type Size")
- newsize = self.vg.align(newsize)
+ newsize = self.vg.align(newsize, roundup=self.growable)
log.debug("trying to set lv %s size to %s", self.name, newsize)
# Don't refuse to set size if we think there's not enough space in the
# VG for an existing LV, since it's existence proves there is enough
--
2.47.0

View File

@ -0,0 +1,32 @@
From c2177aa362d20278a0ebd5c25a776f952d83e5b1 Mon Sep 17 00:00:00 2001
From: Jan Pokorny <japokorn@redhat.com>
Date: Fri, 11 Oct 2024 17:17:41 +0200
Subject: [PATCH] Modified passphrase in stratis test
FIPS requires at least 8 chars long passphrase. Dummy passphrase used
in stratis test was too short causing encryption
tests with FIPS enabled to fail.
Changed passphrase.
fixes RHEL-45173, RHEL-8029
---
tests/storage_tests/devices_test/stratis_test.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/storage_tests/devices_test/stratis_test.py b/tests/storage_tests/devices_test/stratis_test.py
index 5aaa12d4..21c4d0f5 100644
--- a/tests/storage_tests/devices_test/stratis_test.py
+++ b/tests/storage_tests/devices_test/stratis_test.py
@@ -230,7 +230,7 @@ class StratisTestCaseClevis(StratisTestCaseBase):
blivet.partitioning.do_partitioning(self.storage)
pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd],
- encrypted=True, passphrase="abcde",
+ encrypted=True, passphrase="fipsneeds8chars",
clevis=StratisClevisConfig(pin="tang",
tang_url=self._tang_server,
tang_thumbprint=None))
--
2.45.0

View File

@ -0,0 +1,35 @@
From b7f03738543a4bb416fb19c7138f0b9d3049af61 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 8 Nov 2024 09:19:45 +0100
Subject: [PATCH] Fix "Modified passphrase in stratis test"
Follow up for 68708e347ef7b2f98312c76aa80366091dd4aade, two more
places where the passphrase is too short for FIPS mode.
Resolves: RHEL-8029
---
tests/storage_tests/devices_test/stratis_test.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/storage_tests/devices_test/stratis_test.py b/tests/storage_tests/devices_test/stratis_test.py
index 21c4d0f50..9792e0618 100644
--- a/tests/storage_tests/devices_test/stratis_test.py
+++ b/tests/storage_tests/devices_test/stratis_test.py
@@ -105,7 +105,7 @@ def test_stratis_encrypted(self):
blivet.partitioning.do_partitioning(self.storage)
pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd],
- encrypted=True, passphrase="abcde")
+ encrypted=True, passphrase="fipsneeds8chars")
self.storage.create_device(pool)
self.storage.do_it()
@@ -260,7 +260,7 @@ def test_stratis_encrypted_clevis_tpm(self):
blivet.partitioning.do_partitioning(self.storage)
pool = self.storage.new_stratis_pool(name="blivetTestPool", parents=[bd],
- encrypted=True, passphrase="abcde",
+ encrypted=True, passphrase="fipsneeds8chars",
clevis=StratisClevisConfig(pin="tpm2"))
self.storage.create_device(pool)

View File

@ -0,0 +1,39 @@
From dd0ac302f2afcbbf4fd455416850a97a11d2d1b5 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 10 Mar 2025 09:52:27 +0100
Subject: [PATCH] Set persitent allow-discards flag for newly created LUKS
devices
We are currently using the "allow-discards" in /etc/crypttab to
set the discards/fstrim feature for LUKS, but that doesn't work
for Fedora Silverblue so we need to set the persistent flag in the
LUKS header instead.
Resolves: RHEL-82430
---
blivet/formats/luks.py | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/blivet/formats/luks.py b/blivet/formats/luks.py
index adf3c7112..4fc6b1342 100644
--- a/blivet/formats/luks.py
+++ b/blivet/formats/luks.py
@@ -336,7 +336,18 @@ def _create(self, **kwargs):
def _post_create(self, **kwargs):
super(LUKS, self)._post_create(**kwargs)
+
+ if self.luks_version == "luks2" and flags.discard_new:
+ try:
+ blockdev.crypto.luks_set_persistent_flags(self.device,
+ blockdev.CryptoLUKSPersistentFlags.ALLOW_DISCARDS)
+ except blockdev.CryptoError as e:
+ raise LUKSError("Failed to set allow discards flag for newly created LUKS format: %s" % str(e))
+ except AttributeError:
+ log.warning("Cannot set allow discards flag: not supported")
+
self.uuid = blockdev.crypto.luks_uuid(self.device)
+
if not self.map_name:
self.map_name = "luks-%s" % self.uuid

View File

@ -0,0 +1,83 @@
From 3e3b8d415ca50c4feaaf8d3688f0ebda2522d866 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 20 Jan 2025 13:02:50 +0100
Subject: [PATCH] Do not remove PVs from devices file if disabled or doesn't
exists
When the file doesn't exists the 'lvmdevices --deldev' call will
fail but it will still create the devices file. This means we now
have an empty devices file and all subsequent LVM calls will fail.
Resolves: RHEL-84662
---
blivet/formats/lvmpv.py | 5 +++++
tests/unit_tests/formats_tests/lvmpv_test.py | 22 ++++++++++++++++++++
2 files changed, 27 insertions(+)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index 51fa4a3c8..f5d71dbd1 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -166,6 +166,11 @@ def lvmdevices_remove(self):
if not lvm.HAVE_LVMDEVICES:
raise PhysicalVolumeError("LVM devices file feature is not supported")
+ if not os.path.exists(lvm.LVM_DEVICES_FILE):
+ log.debug("Not removing %s from devices file: %s doesn't exist",
+ self.device, lvm.LVM_DEVICES_FILE)
+ return
+
try:
blockdev.lvm.devices_delete(self.device)
except blockdev.LVMError as e:
diff --git a/tests/unit_tests/formats_tests/lvmpv_test.py b/tests/unit_tests/formats_tests/lvmpv_test.py
index 6490c7d48..54a59026d 100644
--- a/tests/unit_tests/formats_tests/lvmpv_test.py
+++ b/tests/unit_tests/formats_tests/lvmpv_test.py
@@ -41,6 +41,11 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_not_called()
+ # LVM devices file not enabled/supported -> devices_delete should not be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_not_called()
+
with self.patches() as mock:
# LVM devices file enabled and devices file exists -> devices_add should be called
mock["lvm"].HAVE_LVMDEVICES = True
@@ -50,6 +55,11 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+ # LVM devices file enabled and devices file exists -> devices_delete should be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_called_with("/dev/test")
+
with self.patches() as mock:
# LVM devices file enabled and devices file doesn't exist
# and no existing VGs present -> devices_add should be called
@@ -61,6 +71,12 @@ def test_lvm_devices(self):
mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+ # LVM devices file enabled but devices file doesn't exist
+ # -> devices_delete should not be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_not_called()
+
with self.patches() as mock:
# LVM devices file enabled and devices file doesn't exist
# and existing VGs present -> devices_add should not be called
@@ -71,3 +87,9 @@ def test_lvm_devices(self):
fmt._create()
mock["blockdev"].lvm.devices_add.assert_not_called()
+
+ # LVM devices file enabled but devices file doesn't exist
+ # -> devices_delete should not be called
+ fmt._destroy()
+
+ mock["blockdev"].lvm.devices_delete.assert_not_called()

View File

@ -0,0 +1,85 @@
From 6d2e5c70fecc68e0d62255d4e2a65e9d264578dd Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 22 Jan 2025 13:16:43 +0100
Subject: [PATCH] Include additional information in PartitioningError
The generic 'Unable to allocate requested partition scheme' is not
very helpful, we should try to include additional information if
possible.
Resolves: RHEL-8005
---
blivet/partitioning.py | 25 ++++++++++++++++++++++---
1 file changed, 22 insertions(+), 3 deletions(-)
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
index ce77e4eb7..0a35c764d 100644
--- a/blivet/partitioning.py
+++ b/blivet/partitioning.py
@@ -34,7 +34,7 @@
from .flags import flags
from .devices import Device, PartitionDevice, device_path_to_name
from .size import Size
-from .i18n import _
+from .i18n import _, N_
from .util import stringize, unicodeize, compare
import logging
@@ -681,6 +681,11 @@ def resolve_disk_tags(disks, tags):
return [disk for disk in disks if any(tag in disk.tags for tag in tags)]
+class PartitioningErrors:
+ NO_PRIMARY = N_("no primary partition slots available")
+ NO_SLOTS = N_("no free partition slots")
+
+
def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
""" Allocate partitions based on requested features.
@@ -763,6 +768,7 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
part_type = None
growth = 0 # in sectors
# loop through disks
+ errors = {}
for _disk in req_disks:
try:
disklabel = disklabels[_disk.path]
@@ -798,6 +804,10 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
if new_part_type is None:
# can't allocate any more partitions on this disk
log.debug("no free partition slots on %s", _disk.name)
+ if PartitioningErrors.NO_SLOTS in errors.keys():
+ errors[PartitioningErrors.NO_SLOTS].append(_disk.name)
+ else:
+ errors[PartitioningErrors.NO_SLOTS] = [_disk.name]
continue
if _part.req_primary and new_part_type != parted.PARTITION_NORMAL:
@@ -808,7 +818,11 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
new_part_type = parted.PARTITION_NORMAL
else:
# we need a primary slot and none are free on this disk
- log.debug("no primary slots available on %s", _disk.name)
+ log.debug("no primary partition slots available on %s", _disk.name)
+ if PartitioningErrors.NO_PRIMARY in errors.keys():
+ errors[PartitioningErrors.NO_PRIMARY].append(_disk.name)
+ else:
+ errors[PartitioningErrors.NO_PRIMARY] = [_disk.name]
continue
elif _part.req_part_type is not None and \
new_part_type != _part.req_part_type:
@@ -968,7 +982,12 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
break
if free is None:
- raise PartitioningError(_("Unable to allocate requested partition scheme."))
+ if not errors:
+ msg = _("Unable to allocate requested partition scheme.")
+ else:
+ errors_by_disk = (", ".join(disks) + ": " + _(error) for error, disks in errors.items())
+ msg = _("Unable to allocate requested partition scheme on requested disks:\n%s") % "\n".join(errors_by_disk)
+ raise PartitioningError(msg)
_disk = use_disk
disklabel = _disk.format

View File

@ -0,0 +1,572 @@
From 6a54de2780aa3fd52b4a25dc8db7ab8c5b1b8d4d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 10:03:17 +0100
Subject: [PATCH 1/7] Use pvs info from static data to get PV size in PVSize
No need for a special code for this, we can reuse the existing
code from LVM static data.
---
blivet/tasks/pvtask.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/blivet/tasks/pvtask.py b/blivet/tasks/pvtask.py
index b5bd72e0d..e93a61bc7 100644
--- a/blivet/tasks/pvtask.py
+++ b/blivet/tasks/pvtask.py
@@ -27,6 +27,7 @@
from ..errors import PhysicalVolumeError
from ..size import Size, B
+from ..static_data import pvs_info
from . import availability
from . import task
@@ -55,13 +56,12 @@ def do_task(self): # pylint: disable=arguments-differ
:raises :class:`~.errors.PhysicalVolumeError`: if size cannot be obtained
"""
- try:
- pv_info = blockdev.lvm.pvinfo(self.pv.device)
- pv_size = pv_info.pv_size
- except blockdev.LVMError as e:
- raise PhysicalVolumeError(e)
+ pvs_info.drop_cache()
+ pv_info = pvs_info.cache.get(self.pv.device)
+ if pv_info is None:
+ raise PhysicalVolumeError("Failed to get PV info for %s" % self.pv.device)
- return Size(pv_size)
+ return Size(pv_info.pv_size)
class PVResize(task.BasicApplication, dfresize.DFResizeTask):
From 0b8239470762cc3b3732d2f40910be7e84102fa0 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 10:05:13 +0100
Subject: [PATCH 2/7] Get the actual PV format size for LVMPV format
---
blivet/formats/lvmpv.py | 2 ++
blivet/populator/helpers/lvm.py | 2 ++
tests/unit_tests/populator_test.py | 2 ++
3 files changed, 6 insertions(+)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index f5d71dbd1..769c96e1d 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -101,6 +101,8 @@ def __init__(self, **kwargs):
# when set to True, blivet will try to resize the PV to fill all available space
self._grow_to_fill = False
+ self._target_size = self._size
+
def __repr__(self):
s = DeviceFormat.__repr__(self)
s += (" vg_name = %(vg_name)s vg_uuid = %(vg_uuid)s"
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index 6ef2f4174..74641bcf8 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -112,6 +112,8 @@ def _get_kwargs(self):
log.warning("PV %s has no pe_start", name)
if pv_info.pv_free:
kwargs["free"] = Size(pv_info.pv_free)
+ if pv_info.pv_size:
+ kwargs["size"] = Size(pv_info.pv_size)
return kwargs
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
index 1ee29b57f..55b6be8d8 100644
--- a/tests/unit_tests/populator_test.py
+++ b/tests/unit_tests/populator_test.py
@@ -1064,6 +1064,7 @@ def test_run(self, *args):
pv_info.vg_uuid = sentinel.vg_uuid
pv_info.pe_start = 0
pv_info.pv_free = 0
+ pv_info.pv_size = "10g"
vg_device = Mock()
vg_device.id = 0
@@ -1095,6 +1096,7 @@ def test_run(self, *args):
pv_info.vg_extent_count = 2500
pv_info.vg_free_count = 0
pv_info.vg_pv_count = 1
+ pv_info.pv_size = "10g"
with patch("blivet.static_data.lvm_info.PVsInfo.cache", new_callable=PropertyMock) as mock_pvs_cache:
mock_pvs_cache.return_value = {device.path: pv_info}
From 14b9538a8fd9f5bfc7d744902517739b6fae7a22 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 13:35:38 +0100
Subject: [PATCH 3/7] Update PV format size after adding/removing the PV
to/from the VG
Unfortunately LVM substracts VG metadata from the reported PV size
so we need to make sure to update the size after the vgextend and
vgreduce operation.
---
blivet/devices/lvm.py | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 62974443e..85850d8e8 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -315,9 +315,21 @@ def _remove(self, member):
if lv.status and not status:
lv.teardown()
+ # update LVMPV format size --> PV format has different size when in VG
+ try:
+ member.format._size = member.format._target_size = member.format._size_info.do_task()
+ except errors.PhysicalVolumeError as e:
+ log.warning("Failed to obtain current size for device %s: %s", member.format.device, e)
+
def _add(self, member):
blockdev.lvm.vgextend(self.name, member.path)
+ # update LVMPV format size --> PV format has different size when in VG
+ try:
+ member.format._size = member.format._target_size = member.format._size_info.do_task()
+ except errors.PhysicalVolumeError as e:
+ log.warning("Failed to obtain current size for device %s: %s", member.path, e)
+
def _add_log_vol(self, lv):
""" Add an LV to this VG. """
if lv in self._lvs:
From d6b0c283eb3236f3578dc28d40182f48d05a5c24 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 14:22:07 +0100
Subject: [PATCH 4/7] Use LVMPV format size when calculating VG size and free
space
For existing PVs we need to check the format size instead of
simply expecting the format is fully resized to match the size of
the underlying block device.
---
blivet/devices/lvm.py | 63 ++++++++++++++++++++++++++-----------------
1 file changed, 39 insertions(+), 24 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 85850d8e8..e3d08dbce 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -500,40 +500,55 @@ def reserved_percent(self, value):
self._reserved_percent = value
- def _get_pv_usable_space(self, pv):
+ def _get_pv_metadata_space(self, pv):
+ """ Returns how much space will be used by VG metadata in given PV
+ This depends on type of the PV, PE size and PE start.
+ """
if isinstance(pv, MDRaidArrayDevice):
- return self.align(pv.size - 2 * pv.format.pe_start)
+ return 2 * pv.format.pe_start
+ else:
+ return pv.format.pe_start
+
+ def _get_pv_usable_space(self, pv):
+ """ Return how much space can be actually used on given PV.
+ This takes into account:
+ - VG metadata that is/will be stored in this PV
+ - the actual PV format size (which might differ from
+ the underlying block device size)
+ """
+
+ if pv.format.exists and pv.format.size and self.exists:
+ # PV format exists, we got its size and VG also exists
+ # -> all metadata is already accounted in the PV format size
+ return pv.format.size
+ elif pv.format.exists and pv.format.size and not self.exists:
+ # PV format exists, we got its size, but the VG doesn't exist
+ # -> metadata size is not accounted in the PV format size
+ return self.align(pv.format.size - self._get_pv_metadata_space(pv))
else:
- return self.align(pv.size - pv.format.pe_start)
+ # something else -> either the PV format is not yet created or
+ # we for some reason failed to get size of the format, either way
+ # lets use the underlying block device size and calculate the
+ # metadata size ourselves
+ return self.align(pv.size - self._get_pv_metadata_space(pv))
@property
def lvm_metadata_space(self):
- """ The amount of the space LVM metadata cost us in this VG's PVs """
- # NOTE: we either specify data alignment in a PV or the default is used
- # which is both handled by pv.format.pe_start, but LVM takes into
- # account also the underlying block device which means that e.g.
- # for an MD RAID device, it tries to align everything also to chunk
- # size and alignment offset of such device which may result in up
- # to a twice as big non-data area
- # TODO: move this to either LVMPhysicalVolume's pe_start property once
- # formats know about their devices or to a new LVMPhysicalVolumeDevice
- # class once it exists
- diff = Size(0)
- for pv in self.pvs:
- diff += pv.size - self._get_pv_usable_space(pv)
-
- return diff
+ """ The amount of the space LVM metadata cost us in this VG's PVs
+ Note: we either specify data alignment in a PV or the default is used
+ which is both handled by pv.format.pe_start, but LVM takes into
+ account also the underlying block device which means that e.g.
+ for an MD RAID device, it tries to align everything also to chunk
+ size and alignment offset of such device which may result in up
+ to a twice as big non-data area
+ """
+ return sum(self._get_pv_metadata_space(pv) for pv in self.pvs)
@property
def size(self):
""" The size of this VG """
# TODO: just ask lvm if isModified returns False
-
- # sum up the sizes of the PVs, subtract the unusable (meta data) space
- size = sum(pv.size for pv in self.pvs)
- size -= self.lvm_metadata_space
-
- return size
+ return sum(self._get_pv_usable_space(pv) for pv in self.pvs)
@property
def extents(self):
From 4d033869de8c22f627cc23e70023e82d9c6e90ed Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 14:28:56 +0100
Subject: [PATCH 5/7] Add more tests for PV and VG size and free space
---
tests/storage_tests/devices_test/lvm_test.py | 104 ++++++++++++++++++-
1 file changed, 103 insertions(+), 1 deletion(-)
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index 97ef1c4b9..988201839 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -22,6 +22,18 @@ def setUp(self):
self.assertIsNone(disk.format.type)
self.assertFalse(disk.children)
+ def _get_pv_size(self, pv):
+ out = subprocess.check_output(["pvs", "-o", "pv_size", "--noheadings", "--nosuffix", "--units=b", pv])
+ return blivet.size.Size(out.decode().strip())
+
+ def _get_vg_size(self, vg):
+ out = subprocess.check_output(["vgs", "-o", "vg_size", "--noheadings", "--nosuffix", "--units=b", vg])
+ return blivet.size.Size(out.decode().strip())
+
+ def _get_vg_free(self, vg):
+ out = subprocess.check_output(["vgs", "-o", "vg_free", "--noheadings", "--nosuffix", "--units=b", vg])
+ return blivet.size.Size(out.decode().strip())
+
def _clean_up(self):
self.storage.reset()
for disk in self.storage.disks:
@@ -63,6 +75,8 @@ def test_lvm_basic(self):
self.assertIsInstance(pv, blivet.devices.PartitionDevice)
self.assertIsNotNone(pv.format)
self.assertEqual(pv.format.type, "lvmpv")
+ pv_size = self._get_pv_size(pv.path)
+ self.assertEqual(pv.format.size, pv_size)
vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
self.assertIsNotNone(vg)
@@ -72,6 +86,10 @@ def test_lvm_basic(self):
self.assertEqual(pv.format.vg_name, vg.name)
self.assertEqual(len(vg.parents), 1)
self.assertEqual(vg.parents[0], pv)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
lv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestLV")
self.assertIsNotNone(lv)
@@ -112,6 +130,13 @@ def test_lvm_thin(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
pool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestPool")
self.assertIsNotNone(pool)
self.assertTrue(pool.is_thin_pool)
@@ -158,6 +183,14 @@ def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space + vg.reserved_space)
+
raidlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestRAIDLV")
self.assertIsNotNone(raidlv)
self.assertTrue(raidlv.is_raid_lv)
@@ -214,6 +247,13 @@ def test_lvm_cache(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV")
self.assertIsNotNone(cachedlv)
self.assertTrue(cachedlv.cached)
@@ -253,6 +293,13 @@ def test_lvm_cache_attach(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV")
self.assertIsNotNone(cachedlv)
cachepool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestFastLV")
@@ -308,6 +355,13 @@ def test_lvm_cache_create_and_attach(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV")
self.assertIsNotNone(cachedlv)
@@ -323,6 +377,13 @@ def test_lvm_cache_create_and_attach(self):
self.storage.do_it()
self.storage.reset()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
cachedlv = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestCachedLV")
self.assertIsNotNone(cachedlv)
self.assertTrue(cachedlv.cached)
@@ -352,6 +413,13 @@ def test_lvm_pvs_add_remove(self):
self.storage.do_it()
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
# create a second PV
disk2 = self.storage.devicetree.get_device_by_path(self.vdevs[1])
self.assertIsNotNone(disk2)
@@ -366,6 +434,17 @@ def test_lvm_pvs_add_remove(self):
self.storage.do_it()
self.storage.reset()
+ pv1 = self.storage.devicetree.get_device_by_name(pv1.name)
+ pv1_size = self._get_pv_size(pv1.path)
+ self.assertEqual(pv1.format.size, pv1_size)
+
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
# add the PV to the existing VG
vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
@@ -374,6 +453,17 @@ def test_lvm_pvs_add_remove(self):
self.storage.devicetree.actions.add(ac)
self.storage.do_it()
+ pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
+ pv2_size = self._get_pv_size(pv2.path)
+ self.assertEqual(pv2.format.size, pv2_size)
+
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
self.assertEqual(pv2.format.vg_name, vg.name)
self.storage.reset()
@@ -387,7 +477,19 @@ def test_lvm_pvs_add_remove(self):
self.storage.devicetree.actions.add(ac)
self.storage.do_it()
- self.assertIsNone(pv1.format.vg_name)
+ pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
+ pv2_size = self._get_pv_size(pv2.path)
+ self.assertEqual(pv2.format.size, pv2_size)
+
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
+
+ self.assertIsNone(pv1.format.type)
+
self.storage.reset()
self.storage.reset()
From 4dfa8d699ed1216c18d0c7effa33580a3aa56606 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 21 Jan 2025 15:16:29 +0100
Subject: [PATCH 6/7] Add a separate test case for LVMPV smaller than the block
device
---
tests/storage_tests/devices_test/lvm_test.py | 55 ++++++++++++++++++++
1 file changed, 55 insertions(+)
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index 988201839..a1064c9c4 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -475,6 +475,11 @@ def test_lvm_pvs_add_remove(self):
pv1 = self.storage.devicetree.get_device_by_name(pv1.name)
ac = blivet.deviceaction.ActionRemoveMember(vg, pv1)
self.storage.devicetree.actions.add(ac)
+
+ # schedule also removing the lvmpv format from the PV
+ ac = blivet.deviceaction.ActionDestroyFormat(pv1)
+ self.storage.devicetree.actions.add(ac)
+
self.storage.do_it()
pv2 = self.storage.devicetree.get_device_by_name(pv2.name)
@@ -497,3 +502,53 @@ def test_lvm_pvs_add_remove(self):
self.assertIsNotNone(vg)
self.assertEqual(len(vg.pvs), 1)
self.assertEqual(vg.pvs[0].name, pv2.name)
+
+ def test_lvm_pv_size(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.storage.initialize_disk(disk)
+
+ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv",
+ parents=[disk])
+ self.storage.create_device(pv)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ pv = self.storage.devicetree.get_device_by_name(pv.name)
+ self.assertIsNotNone(pv)
+
+ pv.format.update_size_info()
+ self.assertTrue(pv.format.resizable)
+
+ ac = blivet.deviceaction.ActionResizeFormat(pv, blivet.size.Size("50 MiB"))
+ self.storage.devicetree.actions.add(ac)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ pv = self.storage.devicetree.get_device_by_name(pv.name)
+ self.assertIsNotNone(pv)
+ self.assertEqual(pv.format.size, blivet.size.Size("50 MiB"))
+ pv_size = self._get_pv_size(pv.path)
+ self.assertEqual(pv_size, pv.format.size)
+
+ vg = self.storage.new_vg(name="blivetTestVG", parents=[pv])
+ self.storage.create_device(vg)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ pv = self.storage.devicetree.get_device_by_name(pv.name)
+ self.assertIsNotNone(pv)
+ pv_size = self._get_pv_size(pv.path)
+ self.assertEqual(pv_size, pv.format.size)
+
+ vg = self.storage.devicetree.get_device_by_name("blivetTestVG")
+ self.assertIsNotNone(vg)
+ vg_size = self._get_vg_size(vg.name)
+ self.assertEqual(vg_size, vg.size)
+ vg_free = self._get_vg_free(vg.name)
+ self.assertEqual(vg_free, vg.free_space)
From 6cfa9d0df6faa79b8ab471ba34aa0b3d6f0dc338 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 14 Apr 2025 14:54:00 +0200
Subject: [PATCH 7/7] Fix checking PV free space when removing it from a VG
---
blivet/devices/lvm.py | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index e3d08dbce..a03d57f97 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -305,9 +305,15 @@ def _remove(self, member):
if lv.exists:
lv.setup()
+ # if format was already scheduled for removal, use original_format
+ if member.format != "lvmpv":
+ fmt = member.original_format
+ else:
+ fmt = member.format
+
# do not run pvmove on empty PVs
- member.format.update_size_info()
- if member.format.free < member.format.current_size:
+ fmt.update_size_info()
+ if fmt.free < fmt.current_size:
blockdev.lvm.pvmove(member.path)
blockdev.lvm.vgreduce(self.name, member.path)
@@ -317,9 +323,9 @@ def _remove(self, member):
# update LVMPV format size --> PV format has different size when in VG
try:
- member.format._size = member.format._target_size = member.format._size_info.do_task()
+ fmt._size = fmt._target_size = fmt._size_info.do_task()
except errors.PhysicalVolumeError as e:
- log.warning("Failed to obtain current size for device %s: %s", member.format.device, e)
+ log.warning("Failed to obtain current size for device %s: %s", fmt.device, e)
def _add(self, member):
blockdev.lvm.vgextend(self.name, member.path)

View File

@ -0,0 +1,258 @@
From 68db0569b3508bbedf33d9ee3b69e8fc6a309b65 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 16 May 2025 17:15:17 +0200
Subject: [PATCH 1/4] Allow ActionDestroyFormat to be marked as optional
When we are also planning to remove the device, failing to remove
the format is not critical so we can ignore it in these cases.
Resolves: RHEL-8008
Resolves: RHEL-8012
---
blivet/deviceaction.py | 37 +++++++++++++++++++++++--------------
1 file changed, 23 insertions(+), 14 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index fc1ca4b65..a6fc211ea 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -728,12 +728,13 @@ class ActionDestroyFormat(DeviceAction):
obj = ACTION_OBJECT_FORMAT
type_desc_str = N_("destroy format")
- def __init__(self, device):
+ def __init__(self, device, optional=False):
if device.format_immutable:
raise ValueError("this device's formatting cannot be modified")
DeviceAction.__init__(self, device)
self.orig_format = self.device.format
+ self.optional = optional
if not device.format.destroyable:
raise ValueError("resource to destroy this format type %s is unavailable" % device.format.type)
@@ -752,21 +753,29 @@ def execute(self, callbacks=None):
""" wipe the filesystem signature from the device """
# remove any flag if set
super(ActionDestroyFormat, self).execute(callbacks=callbacks)
- status = self.device.status
- self.device.setup(orig=True)
- if hasattr(self.device, 'set_rw'):
- self.device.set_rw()
- self.format.destroy()
- udev.settle()
- if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported:
- if self.format.parted_flag:
- self.device.unset_flag(self.format.parted_flag)
- self.device.disk.original_format.commit_to_disk()
- udev.settle()
+ try:
+ status = self.device.status
+ self.device.setup(orig=True)
+ if hasattr(self.device, 'set_rw'):
+ self.device.set_rw()
- if not status:
- self.device.teardown()
+ self.format.destroy()
+ udev.settle()
+ if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported:
+ if self.format.parted_flag:
+ self.device.unset_flag(self.format.parted_flag)
+ self.device.disk.original_format.commit_to_disk()
+ udev.settle()
+
+ if not status:
+ self.device.teardown()
+ except Exception as e: # pylint: disable=broad-except
+ if self.optional:
+ log.error("Ignoring error when executing optional action: Failed to destroy format on %s: %s.",
+ self.device.name, str(e))
+ else:
+ raise
def cancel(self):
if not self._applied:
From fca71515094840ab1ca8821641284cfb0b687d82 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 16 May 2025 17:28:40 +0200
Subject: [PATCH 2/4] Make ActionDestroyFormat optional when device is also
removed
In both destroy_device and recursive_remove we try to remove both
the device and its format. In these cases the format destroy can
be considered to be optional and we don't need to fail just
because we failed to remove the format.
Resolves: RHEL-8008
Resolves: RHEL-8012
---
blivet/blivet.py | 2 +-
blivet/devicetree.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index dc066b036..2e86f5bf6 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -897,7 +897,7 @@ def destroy_device(self, device):
if device.format.exists and device.format.type and \
not device.format_immutable:
# schedule destruction of any formatting while we're at it
- self.devicetree.actions.add(ActionDestroyFormat(device))
+ self.devicetree.actions.add(ActionDestroyFormat(device, optional=True))
action = ActionDestroyDevice(device)
self.devicetree.actions.add(action)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index c6c1b4400..f94e3ca30 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -264,7 +264,7 @@ def recursive_remove(self, device, actions=True, remove_device=True, modparent=T
if actions:
if leaf.format.exists and not leaf.protected and \
not leaf.format_immutable:
- self.actions.add(ActionDestroyFormat(leaf))
+ self.actions.add(ActionDestroyFormat(leaf, optional=True))
self.actions.add(ActionDestroyDevice(leaf))
else:
@@ -276,7 +276,7 @@ def recursive_remove(self, device, actions=True, remove_device=True, modparent=T
if not device.format_immutable:
if actions:
- self.actions.add(ActionDestroyFormat(device))
+ self.actions.add(ActionDestroyFormat(device, optional=True))
else:
device.format = None
From 50efc63fa3053f863d03439a507b3e0a6d7b8168 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 19 May 2025 14:24:06 +0200
Subject: [PATCH 3/4] tests: Add a simple test case for optional format destroy
action
Related: RHEL-8008
Related: RHEL-8012
---
tests/unit_tests/devices_test/lvm_test.py | 29 +++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
index e645309fc..34c2084a8 100644
--- a/tests/unit_tests/devices_test/lvm_test.py
+++ b/tests/unit_tests/devices_test/lvm_test.py
@@ -1160,3 +1160,32 @@ def test_vdo_compression_deduplication_change(self):
with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
b.do_it()
lvm.vdo_enable_deduplication.assert_called_with(vg.name, vdopool.lvname)
+
+
+@patch("blivet.devices.lvm.LVMLogicalVolumeDevice._external_dependencies", new=[])
+@patch("blivet.devices.lvm.LVMLogicalVolumeBase._external_dependencies", new=[])
+@patch("blivet.devices.dm.DMDevice._external_dependencies", new=[])
+class BlivetLVMOptionalDestroyTest(unittest.TestCase):
+
+ def test_optional_format_destroy(self, *args): # pylint: disable=unused-argument
+ b = blivet.Blivet()
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("10 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], exists=True, size=Size("5 GiB"),
+ fmt=blivet.formats.get_format("xfs", exists=True))
+
+ for dev in (pv, vg, lv):
+ b.devicetree._add_device(dev)
+
+ b.destroy_device(lv)
+ fmt_ac = b.devicetree.actions.find(action_type="destroy", object_type="format")
+ self.assertTrue(fmt_ac)
+ self.assertTrue(fmt_ac[0].optional)
+
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ lvm.lvactivate.side_effect = RuntimeError()
+ try:
+ b.do_it()
+ except RuntimeError:
+ self.fail("Optional format destroy action is not optional")
From ea913c5fa8e60cd5c2fdd8196be51c067a2a73d8 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 20 May 2025 13:02:00 +0200
Subject: [PATCH 4/4] tests: Add test case for removing broken thin pool
Related: RHEL-8008
Related: RHEL-8012
---
tests/storage_tests/devices_test/lvm_test.py | 52 ++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index a1064c9c4..10e7354ff 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -1,5 +1,7 @@
import os
import subprocess
+import tempfile
+from unittest.mock import patch
from ..storagetestcase import StorageTestCase
@@ -552,3 +554,53 @@ def test_lvm_pv_size(self):
self.assertEqual(vg_size, vg.size)
vg_free = self._get_vg_free(vg.name)
self.assertEqual(vg_free, vg.free_space)
+
+ def _break_thin_pool(self, vgname):
+ os.system("vgchange -an %s >/dev/null 2>&1" % vgname)
+
+ # changing transaction_id for the pool prevents it from being activated
+ with tempfile.NamedTemporaryFile(prefix="blivet_test") as temp:
+ os.system("vgcfgbackup -f %s %s >/dev/null 2>&1" % (temp.name, vgname))
+ os.system("sed -i 's/transaction_id =.*/transaction_id = 123456/' %s >/dev/null 2>&1" % temp.name)
+ os.system("vgcfgrestore -f %s %s --force >/dev/null 2>&1" % (temp.name, vgname))
+
+ def test_lvm_broken_thin(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.initialize_disk(disk)
+
+ pv = self.storage.new_partition(size=blivet.size.Size("100 MiB"), fmt_type="lvmpv",
+ parents=[disk])
+ self.storage.create_device(pv)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ vg = self.storage.new_vg(name="blivetTestVG", parents=[pv])
+ self.storage.create_device(vg)
+
+ pool = self.storage.new_lv(thin_pool=True, size=blivet.size.Size("50 MiB"),
+ parents=[vg], name="blivetTestPool")
+ self.storage.create_device(pool)
+
+ self.storage.do_it()
+
+ # intentionally break the thin pool created above
+ self._break_thin_pool("blivetTestVG")
+
+ self.storage.reset()
+
+ pool = self.storage.devicetree.get_device_by_name("blivetTestVG-blivetTestPool")
+ self.assertIsNotNone(pool)
+
+ # check that the pool cannot be activated
+ try:
+ pool.setup()
+ except Exception: # pylint: disable=broad-except
+ pass
+ else:
+ self.fail("Failed to break thinpool for tests")
+
+ # verify that the pool can be destroyed even if it cannot be activated
+ self.storage.recursive_remove(pool)
+ self.storage.do_it()

View File

@ -0,0 +1,384 @@
From c07938143a9906bc0e06e78c818227b4c06f64ad Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 27 May 2025 15:21:23 +0200
Subject: [PATCH 1/3] Add some basic partitioning storage tests
This supplements the existing tests which use sparse files. These
new test cases actually run do_it() and check the result after
reset. More test cases will follow.
Related: RHEL-76917
---
.../devices_test/partition_test.py | 148 ++++++++++++++++++
1 file changed, 148 insertions(+)
diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py
index 679fded6e..6ad8a8f1a 100644
--- a/tests/storage_tests/devices_test/partition_test.py
+++ b/tests/storage_tests/devices_test/partition_test.py
@@ -11,12 +11,15 @@
except ImportError:
from mock import patch
+import blivet
from blivet.devices import DiskFile
from blivet.devices import PartitionDevice
from blivet.formats import get_format
from blivet.size import Size
from blivet.util import sparsetmpfile
+from ..storagetestcase import StorageTestCase
+
Weighted = namedtuple("Weighted", ["fstype", "mountpoint", "true_funcs", "weight"])
@@ -218,3 +221,148 @@ def test_extended_min_size(self):
end_free = (extended_end - logical_end) * sector_size
self.assertEqual(extended_device.min_size,
extended_device.align_target_size(extended_device.current_size - end_free))
+
+
+class PartitionTestCase(StorageTestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ disks = [os.path.basename(vdev) for vdev in self.vdevs]
+ self.storage = blivet.Blivet()
+ self.storage.exclusive_disks = disks
+ self.storage.reset()
+
+ # make sure only the targetcli disks are in the devicetree
+ for disk in self.storage.disks:
+ self.assertTrue(disk.path in self.vdevs)
+ self.assertIsNone(disk.format.type)
+ self.assertFalse(disk.children)
+
+ def _clean_up(self):
+ self.storage.reset()
+ for disk in self.storage.disks:
+ if disk.path not in self.vdevs:
+ raise RuntimeError("Disk %s found in devicetree but not in disks created for tests" % disk.name)
+ self.storage.recursive_remove(disk)
+
+ self.storage.do_it()
+
+ def test_msdos_basic(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="msdos"))
+
+ for i in range(4):
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],
+ primary=True)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.assertEqual(disk.format.type, "disklabel")
+ self.assertEqual(disk.format.label_type, "msdos")
+ self.assertIsNotNone(disk.format.parted_disk)
+ self.assertIsNotNone(disk.format.parted_device)
+ self.assertEqual(len(disk.format.partitions), 4)
+ self.assertEqual(len(disk.format.primary_partitions), 4)
+ self.assertEqual(len(disk.children), 4)
+
+ for i in range(4):
+ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1))
+ self.assertIsNotNone(part)
+ self.assertEqual(part.type, "partition")
+ self.assertEqual(part.disk, disk)
+ self.assertEqual(part.size, Size("100 MiB"))
+ self.assertTrue(part.is_primary)
+ self.assertFalse(part.is_extended)
+ self.assertFalse(part.is_logical)
+ self.assertIsNotNone(part.parted_partition)
+
+ def test_msdos_extended(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="msdos"))
+
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part)
+
+ part = self.storage.new_partition(size=Size("1 GiB"), parents=[disk],
+ part_type=parted.PARTITION_EXTENDED)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ for i in range(4):
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],
+ part_type=parted.PARTITION_LOGICAL)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.assertEqual(disk.format.type, "disklabel")
+ self.assertEqual(disk.format.label_type, "msdos")
+ self.assertIsNotNone(disk.format.parted_disk)
+ self.assertIsNotNone(disk.format.parted_device)
+ self.assertEqual(len(disk.format.partitions), 6)
+ self.assertEqual(len(disk.format.primary_partitions), 1)
+ self.assertEqual(len(disk.children), 6)
+
+ for i in range(4, 8):
+ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1))
+ self.assertIsNotNone(part)
+ self.assertEqual(part.type, "partition")
+ self.assertEqual(part.disk, disk)
+ self.assertEqual(part.size, Size("100 MiB"))
+ self.assertFalse(part.is_primary)
+ self.assertFalse(part.is_extended)
+ self.assertTrue(part.is_logical)
+ self.assertIsNotNone(part.parted_partition)
+
+ def test_gpt_basic(self):
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt"))
+
+ for i in range(4):
+ part = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],)
+ self.storage.create_device(part)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+ self.assertEqual(disk.format.type, "disklabel")
+ self.assertEqual(disk.format.label_type, "gpt")
+ self.assertIsNotNone(disk.format.parted_disk)
+ self.assertIsNotNone(disk.format.parted_device)
+ self.assertEqual(len(disk.format.partitions), 4)
+ self.assertEqual(len(disk.format.primary_partitions), 4)
+ self.assertEqual(len(disk.children), 4)
+
+ for i in range(4):
+ part = self.storage.devicetree.get_device_by_path(self.vdevs[0] + str(i + 1))
+ self.assertIsNotNone(part)
+ self.assertEqual(part.type, "partition")
+ self.assertEqual(part.disk, disk)
+ self.assertEqual(part.size, Size("100 MiB"))
+ self.assertTrue(part.is_primary)
+ self.assertFalse(part.is_extended)
+ self.assertFalse(part.is_logical)
+ self.assertIsNotNone(part.parted_partition)
From 1486d2d47d9b757694a3da88ccc13d29d8bb12fd Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 27 May 2025 14:10:49 +0200
Subject: [PATCH 2/3] Wipe end partition before creating it as well as the
start
We are currently overwritting start of the newly created partition
with zeroes to remove any filesystem metadata that might occupy
the space. This extends this functionality to end of the partition
to remove 1.0 MD metadata that might be there.
Resolves: RHEL-76917
---
blivet/devices/partition.py | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 6ae4b8d36..1dac75a5a 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -599,7 +599,7 @@ def _wipe(self):
""" Wipe the partition metadata.
Assumes that the partition metadata is located at the start
- of the partition and occupies no more than 1 MiB.
+ and end of the partition and occupies no more than 1 MiB.
Erases in block increments. Erases the smallest number of blocks
such that at least 1 MiB is erased or the whole partition is
@@ -632,6 +632,24 @@ def _wipe(self):
# things to settle.
udev.settle()
+ if count >= part_len:
+ # very small partition, we wiped it completely already
+ return
+
+ # now do the end of the partition as well (RAID 1.0 metadata)
+ end = self.parted_partition.geometry.end
+ cmd = ["dd", "if=/dev/zero", "of=%s" % device, "bs=%d" % bs,
+ "seek=%d" % (end - count), "count=%d" % count]
+ try:
+ util.run_program(cmd)
+ except OSError as e:
+ log.error(str(e))
+ finally:
+ # If a udev device is created with the watch option, then
+ # a change uevent is synthesized and we need to wait for
+ # things to settle.
+ udev.settle()
+
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
From f0f78b801fb52425c13d0384f6867bf55839d98f Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 28 May 2025 11:01:14 +0200
Subject: [PATCH 3/3] tests: Add tests for wiping stale metadata from new
partitions
Related: RHEL-76917
---
.../devices_test/partition_test.py | 119 ++++++++++++++++++
1 file changed, 119 insertions(+)
diff --git a/tests/storage_tests/devices_test/partition_test.py b/tests/storage_tests/devices_test/partition_test.py
index 6ad8a8f1a..f4be3aa4c 100644
--- a/tests/storage_tests/devices_test/partition_test.py
+++ b/tests/storage_tests/devices_test/partition_test.py
@@ -4,6 +4,7 @@
import os
import six
import unittest
+import blivet.deviceaction
import parted
try:
@@ -366,3 +367,121 @@ def test_gpt_basic(self):
self.assertFalse(part.is_extended)
self.assertFalse(part.is_logical)
self.assertIsNotNone(part.parted_partition)
+
+ def _partition_wipe_check(self):
+ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1")
+ self.assertIsNotNone(part1)
+ self.assertIsNone(part1.format.type)
+
+ out = blivet.util.capture_output(["blkid", "-p", "-sTYPE", "-ovalue", self.vdevs[0] + "1"])
+ self.assertEqual(out.strip(), "")
+
+ part2 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "2")
+ self.assertIsNotNone(part2)
+ self.assertEqual(part2.format.type, "ext4")
+
+ try:
+ part2.format.do_check()
+ except blivet.errors.FSError as e:
+ self.fail("Partition wipe corrupted filesystem on an adjacent partition: %s" % str(e))
+
+ out = blivet.util.capture_output(["blkid", "-p", "-sTYPE", "-ovalue", self.vdevs[0] + "2"])
+ self.assertEqual(out.strip(), "ext4")
+
+ def test_partition_wipe_ext(self):
+ """ Check that any stray filesystem metadata are removed before creating a partition """
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt"))
+
+ # create two partitions with ext4
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk],
+ fmt=blivet.formats.get_format("ext4"))
+ self.storage.create_device(part1)
+
+ part2 = self.storage.new_partition(size=Size("1 MiB"), parents=[disk], grow=True,
+ fmt=blivet.formats.get_format("ext4"))
+ self.storage.create_device(part2)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # remove the first partition (only the partition without removing the format)
+ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1")
+ ac = blivet.deviceaction.ActionDestroyDevice(part1)
+ self.storage.devicetree.actions.add(ac)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # create the first partition again (without ext4)
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part1)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ # XXX PartitionDevice._post_create calls wipefs on the partition, we want to check that
+ # the _pre_create dd wipe works so we need to skip the _post_create wipefs call
+ part1._post_create = lambda: None
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # make sure the ext4 signature is not present on part1 (and untouched on part2)
+ self._partition_wipe_check()
+
+ def test_partition_wipe_mdraid(self):
+ """ Check that any stray RAID metadata are removed before creating a partition """
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ self.assertIsNotNone(disk)
+
+ self.storage.format_device(disk, blivet.formats.get_format("disklabel", label_type="gpt"))
+
+ # create two partitions, one empty, one with ext4
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part1)
+
+ part2 = self.storage.new_partition(size=Size("1 MiB"), parents=[disk], grow=True,
+ fmt=blivet.formats.get_format("ext4"))
+ self.storage.create_device(part2)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # create MD RAID with metadata 1.0 on the first partition
+ ret = blivet.util.run_program(["mdadm", "--create", "blivetMDTest", "--level=linear",
+ "--metadata=1.0", "--raid-devices=1", "--force", part1.path])
+ self.assertEqual(ret, 0, "Failed to create RAID array for partition wipe test")
+ ret = blivet.util.run_program(["mdadm", "--stop", "/dev/md/blivetMDTest"])
+ self.assertEqual(ret, 0, "Failed to create RAID array for partition wipe test")
+
+ # now remove the partition without removing the array first
+ part1 = self.storage.devicetree.get_device_by_path(self.vdevs[0] + "1")
+ ac = blivet.deviceaction.ActionDestroyDevice(part1)
+ self.storage.devicetree.actions.add(ac)
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # create the first partition again (without format)
+ disk = self.storage.devicetree.get_device_by_path(self.vdevs[0])
+ part1 = self.storage.new_partition(size=Size("100 MiB"), parents=[disk])
+ self.storage.create_device(part1)
+
+ blivet.partitioning.do_partitioning(self.storage)
+
+ # XXX PartitionDevice._post_create calls wipefs on the partition, we want to check that
+ # the _pre_create dd wipe works so we need to skip the _post_create wipefs call
+ part1._post_create = lambda: None
+
+ self.storage.do_it()
+ self.storage.reset()
+
+ # make sure the mdmember signature is not present on part1 (and ext4 is untouched on part2)
+ self._partition_wipe_check()

View File

@ -0,0 +1,65 @@
From f70ee1ef08c20485f49b30fe1072a7ccafaaa2fe Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 1 Aug 2025 15:03:09 +0200
Subject: [PATCH] Add a pre-wipe fixup function for LVM logical volumes
LVs scheduled to be removed are always activated to remove the
format during installation. If there is a read-only LV with the
skip activation flag with MD metadata this means after activating
the LV to remove the format the MD array is auto-assembled by udev
preventing us from removing it. For this special case, we simply
stop the array before removing the format.
Resolves: RHEL-68368
---
blivet/deviceaction.py | 3 +++
blivet/devices/lvm.py | 19 +++++++++++++++++++
2 files changed, 22 insertions(+)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index a6fc211e..169c3a10 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -760,6 +760,9 @@ class ActionDestroyFormat(DeviceAction):
if hasattr(self.device, 'set_rw'):
self.device.set_rw()
+ if hasattr(self.device, 'pre_format_destroy'):
+ self.device.pre_format_destroy()
+
self.format.destroy()
udev.settle()
if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported:
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index a03d57f9..6ea35212 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -2695,6 +2695,25 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
else:
blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
+ def pre_format_destroy(self):
+ """ Fixup needed to run before wiping this device """
+ if self.ignore_skip_activation > 0:
+ # the LV was not activated during the initial scan so if there is an MD array on it
+ # it will now also get activated and we need to stop it to be able to remove the LV
+ try:
+ info = blockdev.md.examine(self.path)
+ except blockdev.MDRaidError:
+ pass
+ else:
+ # give udev a bit time to activate the array so we can deactivate it again
+ time.sleep(5)
+ log.info("MD metadata found on LV with skip activation, stopping the array %s",
+ info.device)
+ try:
+ blockdev.md.deactivate(info.device)
+ except blockdev.MDRaidError as err:
+ log.info("failed to deactivate %s: %s", info.device, str(err))
+
@type_specific
def _pre_create(self):
LVMLogicalVolumeBase._pre_create(self)
--
2.50.1

View File

@ -23,30 +23,51 @@ Version: 3.6.0
#%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 8%{?prerelease}%{?dist}
Release: 28%{?prerelease}%{?dist}
Epoch: 1
License: LGPLv2+
%global realname blivet
%global realversion %{version}%{?prerelease}
Source0: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}.tar.gz
Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}-tests.tar.gz
Patch0: 0001-force-lvm-cli.plugin
Patch0: 0001-force-lvm-cli-plugin.patch
Patch1: 0002-remove-btrfs-plugin.patch
Patch2: 0003-Revert-More-consistent-lvm-errors.patch
Patch3: 0004-Revert-Terminology-cleanups.patch
Patch4: 0005-DDF-RAID-support-using-mdadm.patch
Patch5: 0006-Revert-Remove-the-Blivet.roots-attribute.patch
Patch6: 0007-Fix-potential-AttributeError-when-getting-stratis-bl.patch
Patch7: 0008-tests-Skip-XFS-resize-test-on-CentOS-RHEL-8.patch
Patch8: 0009-Revert-Adjust-to-new-XFS-min-size.patch
Patch9: 0010-Catch-BlockDevNotImplementedError-for-btrfs-plugin-c.patch
Patch3: 0004-DDF-RAID-support-using-mdadm.patch
Patch4: 0005-Revert-Remove-the-Blivet.roots-attribute.patch
Patch5: 0006-Fix-potential-AttributeError-when-getting-stratis-bl.patch
Patch6: 0007-tests-Skip-XFS-resize-test-on-CentOS-RHEL-9.patch
Patch7: 0008-Revert-Adjust-to-new-XFS-min-size.patch
Patch8: 0009-Catch-BlockDevNotImplementedError-for-btrfs-plugin-c.patch
Patch9: 0010-Add-basic-support-for-NVMe-and-NVMe-Fabrics-devices.patch
Patch10: 0011-Default-to-encryption-sector-size-512-for-LUKS-devic.patch
Patch11: 0012-Add-support-for-specifying-stripe-size-for-RAID-LVs.patch
Patch12: 0013-Fix-setting-kickstart-data.patch
Patch13: 0014-Do-not-set-memory-limit-for-LUKS2-when-running-in-FI.patch
Patch14: 0015-Add-support-for-filesystem-online-resize.patch
Patch15: 0016-Backport-iSCSI-initiator-name-related-fixes.patch
Patch16: 0017-Add-support-for-creating-shared-LVM-setups.patch
Patch16: 0017-nvme-additional-rpms-for-dracut.patch
Patch17: 0018-nvme-TP4126-fixes-1.patch
Patch18: 0019-nvme-hostnqn_from_active_fabrics_connection.patch
Patch19: 0020-nvme-add_unit_tests.patch
Patch20: 0021-Add-support-for-creating-shared-LVM-setups.patch
Patch21: 0022-add-udev-builtin-path_id-property-to-zfcp-attached-S.patch
Patch22: 0023-Do-not-add-new-PVs-to-the-LVM-devices-file-if-it-doe.patch
Patch23: 0024-Added-support-for-PV-grow.patch
Patch24: 0025-Stratis-fixes-backport.patch
Patch25: 0026-XFS-resize-test-fix.patch
Patch26: 0027-RHEL96-bugfixes-1.patch
Patch27: 0028-Fix-checking-for-NVMe-plugin-availability.patch
Patch28: 0029-Align-sizes-up-for-growable-LVs.patch
Patch29: 0030-mod_pass_in_stratis_test.patch
Patch30: 0031-Fix_running_tests_in_FIPS_mode.patch
Patch31: 0032-Set-persistent-allow-discards-flag-for-new-LUKS-devices.patch
Patch32: 0033-Do-not-remove-PVs-from-devices-file-if-disabled-or-doesnt-exist.patch
Patch33: 0034-Include-additional-information-in-PartitioningError.patch
Patch34: 0035-LVMPV-format-size-fix.patch
Patch35: 0036-Make-ActionDestroyFormat-optional.patch
Patch36: 0037-Wipe-end-partition-before-creating-it-as-well-as-the-start.patch
Patch37: 0038-Add-a-pre-wipe-fixup-function-for-LVM-logical-volume.patch
# Versions of required components (done so we make sure the buildrequires
# match the requires versions of things).
@ -154,6 +175,7 @@ Recommends: libblockdev-lvm >= %{libblockdevver}
Recommends: libblockdev-mdraid >= %{libblockdevver}
Recommends: libblockdev-mpath >= %{libblockdevver}
Recommends: libblockdev-nvdimm >= %{libblockdevver}
Recommends: libblockdev-nvme >= %{libblockdevver}
Recommends: libblockdev-part >= %{libblockdevver}
Recommends: libblockdev-swap >= %{libblockdevver}
Recommends: libblockdev-s390 >= %{libblockdevver}
@ -209,306 +231,575 @@ configuration.
%endif
%changelog
* Mon Oct 30 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-8
* Mon Aug 04 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-28
- Add a pre-wipe fixup function for LVM logical volumes
Resolves: RHEL-68368
* Fri May 30 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-27
- Wipe end partition before creating it as well as the start
Resolves: RHEL-76917
* Tue May 20 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-26
- Make ActionDestroyFormat optional when the device is also scheduled to be removed
Resolves: RHEL-8008
Resolves: RHEL-8012
* Mon Apr 14 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-25
- Get the actual PV format size for LVMPV format
Resolves: RHEL-74078
- Include additional information in PartitioningError
Resolves: RHEL-8005
* Thu Mar 27 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-24
- Do not remove PVs from devices file if disabled or doesn't exist
Resolves: RHEL-84662
* Tue Mar 11 2025 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-23
- Set persistent allow-discards flag for newly created LUKS devices
Resolves: RHEL-82430
* Tue Nov 12 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-22
- Fix running tests in FIPS mode
Resolves: RHEL-8029
* Fri Nov 1 2024 Jan Pokorny <japokorn@redhat.com> - 3.6.0-21
- Modified passphrase in stratis test
Resolves: RHEL-8029
* Thu Oct 24 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-20
- Align sizes up for growable LVs
Resolves: RHEL-8036
Resolves: RHEL-19725
* Mon Sep 23 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-19
- Fix checking for NVMe plugin availability
Resolves: RHEL-28124
* Mon Sep 09 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-18
- Add a basic read-only support for UDF filesystem
Resolves: RHEL-13329
- nvme: Skip startup/write when NVMe plugin isn't available
Resolves: RHEL-28124
* Mon Jul 22 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-17
- Fix 'Try waiting after partition creation for XFS resize test'
Resolves: RHEL-8009
* Thu Jun 27 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-16
- tests: Try waiting after partition creation for XFS resize test
Resolves: RHEL-8009
* Thu May 16 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-15
- Backport fixes for Stratis support needed for storage role
Resolves: RHEL-35382
- Add support for resizing PVs to the size of the underlying block device
Resolves: RHEL-35386
* Fri Feb 09 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-14
- Do not add new PVs to the LVM devices file if it doesn't exist and VGs are present
Resolves: RHEL-473
* Thu Jan 18 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-13
- add udev-builtin-path_id property to zfcp-attached SCSI disks
Resolves: RHEL-22007
* Wed Dec 13 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-12
- Add support for creating shared LVM setups
Resolves: RHEL-14021
Resolves: RHEL-324
* Mon Jul 24 2023 Jan Pokorny <japokorn@redhat.com> - 3.6.0-7
Backport iSCSI initiator name related fixes:
* Mon Dec 11 2023 Tomas Bzatek <tbzatek@redhat.com> - 3.6.0-11
- nvme: Retrieve HostNQN from a first active fabrics connection
- tests: Add a simple unit test for the NVMe module
Resolves: RHEL-11541
* Tue Sep 26 2023 Tomas Bzatek <tbzatek@redhat.com> - 3.6.0-10
- nvme: Require additional rpms for dracut
Resolves: RHEL-2855
- nvme: Align HostNQN and HostID format to TP-4126
Resolves: RHEL-1254
* Mon Jul 24 2023 Jan Pokorny <japokorn@redhat.com> - 3.6.0-9
Backport iSCSI initiator name related fixes:
- Allow changing iSCSI initiator name after setting it
Resolves: rhbz#2083139
Resolves: rhbz#2221935
- Add a basic test case for the iscsi module
Related: rhbz#2083139
Resolves: rhbz#2221935
- tests: Use blivet-specific prefix for targetcli backing files
Related: rhbz#2083139
Resolves: rhbz#2221935
- iscsi: Save firmware initiator name to /etc/iscsi/initiatorname.iscsi
Resolves: rhbz#2084043
Resolves: rhbz#2221932
- tests: Improve iscsi_test.ISCSITestCase
Related: rhbz#2083139
Resolves: rhbz#2221935
* Thu May 18 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-6
- Fix setting kickstart data
Resolves: rhbz#2175166
- Do not set memory limit for LUKS2 when running in FIPS mode
Resolves: rhbz#2183437
* Wed May 24 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-8
- Add support for filesystem online resize
Resolves: rhbz#2168680
Resolves: RHEL-326
* Tue May 02 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-5
* Thu May 18 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-7
- Fix setting kickstart data
Resolves: rhbz#2174296
- Do not set memory limit for LUKS2 when running in FIPS mode
Resolves: rhbz#2193096
* Tue May 02 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-6
- Add support for specifying stripe size for RAID LVs
Resolves: rhbz#2142550
Resolves: RHEL-327
* Thu Jan 19 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-4
* Thu Jan 19 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-5
- Default to encryption sector size 512 for LUKS devices
Resolves: rhbz#2160465
Resolves: rhbz#2103800
* Tue Dec 13 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-4
- Add basic support for NVMe and NVMe Fabrics devices
Resolves: rhbz#2123337
* Thu Nov 03 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-3
- Catch BlockDevNotImplementedError for btrfs plugin calls
Resolves: rhbz#2139169
Resolves: rhbz#2139166
- Revert "Adjust to new XFS min size"
Resolves: rhbz#2139187
Resolves: rhbz#2139189
* Fri Oct 21 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-2
- Skip XFS resize test on CentOS/RHEL 8
Related: rhbz#2123712
* Thu Oct 20 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-2
- Fix potential AttributeError when getting stratis blockdev info
Related: rhbz#2123711
- tests: Skip XFS resize test on CentOS/RHEL 9
Related: rhbz#2123711
* Fri Oct 21 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-1
* Thu Oct 13 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-1
- Rebase to the latest upstream release 3.6.0
Resolves: rhbz#2123712
Resolves: rhbz#2123711
* Thu Aug 18 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-13
* Thu Aug 18 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-16
- DDF RAID support using mdadm
Resolves: rhbz#2063791
Resolves: rhbz#2109030
* Mon Jun 20 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-12
* Mon Jun 20 2022 Jan Pokorny <japokorn@redhat.com> - 3.4.0-15
- Add a very simple NVMe module
Resolves: rhbz#2073008
- Add support for NPIV-enabled zFCP devices
Resolves: rhbz#1497087
Resolves: rhbz#1937030
- Fix removing zFCP SCSI devices
Related: rhbz#1937030
- tests: Mark "fake" disks in test_get_related_disks as non-existing
Resolves: rhbz#2062690
* Thu Jun 02 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-11
- Fix running gating tests on AWS/Xen machines
Resolves: rhbz#2093207
* Thu Jun 02 2022 Jan Pokorny <japokorn@redhat.com> - 3.4.0-14
- Release version increase to fix RHEL upgrade path
Related: rhbz#2081278
* Thu Jun 02 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-10
* Thu Jun 02 2022 Jan Pokorny <japokorn@redhat.com> - 3.4.0-13
- Fix getting PV info in LVMPhysicalVolume from the cache
Resolves: rhbz#2079220
Resolves: rhbz#2079221
- Do not crash when changing disklabel on disks with active devices
Resolves: rhbz#2078801
Resolves: rhbz#2078803
- ActionDestroyDevice should not obsolete ActionRemoveMember
Resolves: rhbz#2076958
Resolves: rhbz#2076956
- Correctly set vg_name after adding/removing a PV from a VG
Resolves: rhbz#2081276
- Use LVM PV format current_size in LVMVolumeGroupDevice._remove
Related: rhbz#2081276
Resolves: rhbz#2081278
- Add support for creating LVM cache pools
Resolves: rhbz#2055198
Resolves: rhbz#2055200
- Use LVM PV format current_size in LVMVolumeGroupDevice._remove
Related: rhbz#2081278
* Mon Jan 10 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-9
- Translation update
Resolves: rhbz#2003050
* Tue Feb 01 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-12
- Fix log message for the LVM devices filter
Resolves: rhbz#2034277
- Exclude unusable disks from PartitionFactory
Resolves: rhbz#2017432
* Tue Dec 14 2021 ojtech Trefny <vtrefny@redhat.com> - 3.4.0-8
* Tue Dec 14 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-11
- Replace all log_exception_info calls with log.info
Resolves: rhbz#2028134
Resolves: rhbz#2028391
* Fri Nov 26 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-7
- Release number bump
Related: rhbz#1988276
* Thu Dec 09 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-10
- Use LVM devices file instead of filter regex
Resolves: rhbz#1967212
* Fri Nov 26 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-6
* Tue Nov 30 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-9
- Rebuild with higher release number to fix errata
Related: rhbz#2012121
* Fri Nov 26 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-8
- Improve error message printed for missing dependecies
Resolves: rhbz#1988276
Resolves: rhbz#2012121
- Use bigger chunk size for thinpools bigger than ~15.88 TiB
Resolves: rhbz#1949953
Resolves: rhbz#1971516
* Wed Aug 4 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-5
- Fix running upstream test suite in gating
Resolves: rhbz#1990232
* Tue Aug 17 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-7
- Fix script for running tests in gating
Resolves: rhbz#1990237
* Wed Aug 11 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-6
- Remove "Revert Terminology cleanups" patch
Resolves: rhbz#1990982
- Fix running tests in gating
Resolves: rhbz#1990237
- Opt out from using LVM devices file in 9 Beta
Resolves: rhbz#1984851
* Tue Aug 10 2021 Mohan Boddu <mboddu@redhat.com> - 1:3.4.0-5
- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags
Related: rhbz#1991688
* Mon Aug 2 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-4
- Do not set chunk size for RAID 1
Resolves: rhbz#1987170
Resolves: rhbz#1987176
* Wed Jul 21 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-3
* Sat Jul 17 2021 Neal Gompa <ngompa@centosproject.org> - 3.4.0-3
- Fix resolving devices with names that look like BIOS drive number
Resolves: rhbz#1983309
Resolves: rhbz#1983310
* Wed Jul 7 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-2
- Fix activating old style LVM snapshots
Resolves: rhbz#1961739
Resolves: rhbz#1961944
* Wed May 5 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-1
* Wed Jun 9 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-1
- Rebase to latest upstream release 3.4.0
Resolves: rhbz#1918357
Resolves: rhbz#1964341
* Tue Feb 9 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-9
- LVM VDO support
Resolves: rhbz#1509337
* Fri Apr 16 2021 Mohan Boddu <mboddu@redhat.com> - 1:3.3.3-2
- Rebuilt for RHEL 9 BETA on Apr 15th 2021. Related: rhbz#1947937
* Mon Jan 11 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-8
- Let parted fix fixable issues with partition table
Resolves: rhbz#1846869
- Fix possible UnicodeDecodeError when reading sysfs attributes
Resolves: rhbz#1849326
* Thu Feb 18 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.3.3-1
- apply compression settings from blivet.flags.btrfs_compression (#1926892) (michel)
* Wed Nov 18 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-7
- Add support for XFS format grow
Resolves: rhbz#1862349
- Do not limit swap to 128 GiB
Resolves: rhbz#1656485
- Use UnusableConfigurationError for partially hidden multipath devices
Resolves: rhbz#1877052
- Fix possible UnicodeDecodeError when reading model from sysfs
Resolves: rhbz#1849326
- Add basic support for LVM VDO devices
Resolves: rhbz#1828745
* Wed Jan 27 2021 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.3.2-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
* Thu Aug 20 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-6
- Fix name resolution for MD devices and partitions on them
Resolves: rhbz#1862904
- Fix ignoring disk devices with parents or children
Resolves: rhbz#1866243
* Thu Jan 14 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.3.2-1
- Fix "suggest_container_name" for Anaconda (vtrefny)
- Add test for util.get_sysfs_attr (vtrefny)
- Use util.get_sysfs_attr in __is_ignored_blockdev to read device mode (vtrefny)
- Fix possible UnicodeDecodeError when reading sysfs attributes (vtrefny)
- Update LUKS device name after parent partition name change (vtrefny)
- TFT is still broken so let's avoid failures by just doing a build (jkonecny)
- Fix logging information about ignoring hidden devices (vtrefny)
- Add __repr__ and __str__ methods to ParentList (vtrefny)
- Make sure LV name is unique when adding it in device factory (vtrefny)
- In name checks add name which is already in use to error message (vtrefny)
- Refactor suggest device/container name functions (vtrefny)
- Remove an unused attribute from the Blivet class (vponcova)
- Add PyPI build artifacts to .gitignore (vtrefny)
- Sync spec with downstream (vtrefny)
* Thu Jul 16 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-5
* Wed Nov 11 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.3.1-2
- Remove btrfs from requested libblockdev plugins on RHEL 9
* Tue Oct 20 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.3.1-1
- Make sure the product name is safe when using it for device name (vtrefny)
- Run packit RPM builds on Fedora ELN (vtrefny)
- Allow specifying 'mode' for the sdist command (vtrefny)
- Enable packit RPM builds on pull requests (vtrefny)
- Start the iscsi-init service (#1880673) (vponcova)
- Let parted fix fixable issues with partition table (vtrefny)
- edd: Fix UnboundLocalError when trying to close fd in collect_mbrs (vtrefny)
- Use UnusableConfigurationError for partially hidden multipath devices (vtrefny)
- Close fd if it fails to read the device (nashok)
- Do not run udev.settle in StorageDevice._pre_teardown (vtrefny)
- Try to not use udev.resolve_devspec when querying MountsCache (vtrefny)
- Remove Zanata config file (vtrefny)
- Ignore new pylint warning W0707 "raise-missing-from" (vtrefny)
- Use SSH "link" for l10n repository in Makefile (vtrefny)
- Fix source tarball cleanup in srpm and rpm Makefile targets (vtrefny)
* Wed Sep 16 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.3.0-2
- Avoid using unnecessary udev.settle calls (#1876162)
* Thu Aug 20 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.3.0-1
- Account for pmspare grow when adjusting thinpool metadata size (vtrefny)
- Fix ignoring disk devices with parents or children (vtrefny)
- Terminology cleanup, part 3 (vtrefny)
- Terminology cleanups, part 2. (dlehman)
- Clean up some terminology. (dlehman)
- Add tests for udev.device_get_name for RAID devices (vtrefny)
- Fix name resolution for MD devices and partitions on them (vtrefny)
- Fix reading hidden sysfs attribute (vtrefny)
- Add support for specifying sector size for LUKS 2 devices (vtrefny)
- Do not ignore unknown/unsupported device mapper devices (vtrefny)
- Allow specifying custom hash function for LUKS 2 format (vtrefny)
- Ignore devices marked as hidden in sysfs (#1856974) (vtrefny)
- Add basic F2FS support (#1794950) (vtrefny)
- Make safe_device_name device type specific (vtrefny)
- Add exFAT to filesystems we recognize (vtrefny)
- Use xfs_db in read-only mode when getting XFS information (vtrefny)
- Add support for checking and fixing XFS using xfs_repair (vtrefny)
- Ignore zRAM devices in VMBackedTestCase (vtrefny)
- Add tests for XFS resize (vtrefny)
- Add support for XFS format grow (vtrefny)
- Typo fix (vtrefny)
- tests: Skip test_reset when running as non-root (vtrefny)
- tests: Patch LVM availability functions for some tests (vtrefny)
- tests: Patch LVM lvs call for some non-LVM tests (vtrefny)
- Do not propagate ped exception from add_partition (vtrefny)
- Do not use BlockDev.utils_have_kernel_module to check for modules (vtrefny)
- set allowed disk labels for s390x as standard ones (msdos + gpt) plus dasd (dan)
- Do not use FSAVAIL and FSUSE%% options when running lsblk (vtrefny)
- Rewrite README and add it as a long_description in setup.py (vtrefny)
- Round down to nearest MiB value when writing ks parittion info. (sbueno+anaconda)
- Add _teardown method to IntegrityDevice (vtrefny)
- Fix status for DM Integrity format (#1814005) (vtrefny)
- udev: Add function to get list of device's holders (vtrefny)
- Add basic support for LVM writecache devices (vtrefny)
- Add test for SwapSpace max size (vtrefny)
- Do not limit swap to 128 GiB (vtrefny)
- Fix possible UnicodeDecodeError when reading model from sysfs (vtrefny)
- Add install_requires and classifiers to setup.py (vtrefny)
- Import setuptools in setup.py to make bdist_wheel work (vtrefny)
- Set device.original_format to the new format in ActionCreateFormat (vtrefny)
- Fix resizable property for partitions (vtrefny)
- Update TODO. (dlehman)
- Ignore pycodestyle warning E741 (vtrefny)
- Skip test_mounting for filesystems that are not mountable (vtrefny)
- Sync specfile with downstream (japokorn)
- Make extended partitions resizable (vtrefny)
- Fix LV min size for resize in test_action_dependencies (vtrefny)
- Fix checking for filesystem support in action_test (vtrefny)
- Add basic support for LVM VDO devices (vtrefny)
- Update POT file in the Weblate repo during "make potfile" (vtrefny)
- Skip translation canary check if POT file is not available (vtrefny)
- Add blivet-weblate repository as a submodule (vtrefny)
- Remove Zanata from our build process (vtrefny)
- Remove po folder (vtrefny)
- More consistent lvm errors (API break) (japokorn)
- Added support for device tags (japokorn)
* Wed Jul 29 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-4
- set allowed disk labels for s390x as standard ones (msdos + gpt) plus dasd
Resolves: rhbz#1855200
- Do not use BlockDev.utils_have_kernel_module to check for modules
Resolves: rhbz#1855344
* Thu Jul 09 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-4
- Blivet RHEL 8.3 localization update
Resolves: rhbz#182056
- Do not use FSAVAIL and FSUSE% options when running lsblk
Resolves: rhbz#1853624
* Tue Jul 28 2020 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.2.2-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
* Tue Jun 30 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-3
- Round down to nearest MiB value when writing ks parittion info
Resolves: rhbz#1850670
* Sat May 23 2020 Miro Hrončok <mhroncok@redhat.com> - 1:3.2.2-2
- Rebuilt for Python 3.9
* Wed Jun 24 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-2
- Add extra sleep after pvremove call
Resolves: rhbz#1640601
* Thu May 21 2020 Jan Pokorny <japokorn@redhat.com> - 3.2.2-1
- Allow setting size for non-existing LUKS devices (vtrefny)
- Fix toggling container encryption in devicefactory (#1827254) (vtrefny)
- Do no include destroyed devices in list of names (#1830515) (vtrefny)
- Fix changing LUKS version in devicefactory (#1834373) (vtrefny)
- Add "is_empty" property to StorageDevice (vtrefny)
- Mark warning "'GError' has no 'message' member" as false positive (vtrefny)
- Use the specified LUKS version for container encryption (vponcova)
- Log current storage state before reset using lsblk (vtrefny)
- Do not remove _netdev mount option specified manually by users (vtrefny)
- Fix renaming encrypted devices in the DeviceFactory (vtrefny)
- Fix typo in string formatter in EddEntry (vtrefny)
* Fri May 22 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-1
- Rebase to the latest upstream release 3.2.2
Resolves: rhbz#1714970
* Tue Apr 21 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.1-2
- Invalidate LVM caches in blivet device discovery loop (#1824418)
* Mon Mar 02 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-20
- add `-y' to lvm.pvcreate
Resolves: rhbz#1768494
* Mon Apr 06 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.1-1
- Correctly recognize EFI format on an MD RAID device (#1695913) (vtrefny)
- Do not set empty name instead of invalid one in devicefactory (#1813710) (vtrefny)
- Fix crash for devices without ID_PATH udev property (#1814920) (vtrefny)
- Allow for reserved vg space and a growable thin pool. (#1783946) (dlehman)
- Fix name resolution for md member partitions. (#1798792) (dlehman)
* Wed Jan 29 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-19
- Override LVM skip-activation to allow for thorough removal
Resolves: rhbz#1766498
- Make sure LVs are writable before wiping
Related: rhbz#1766498
- Fix udev test names so they actually get run.
Related: rhbz#1758102
- Add recognition of Dell FW RAID to udev.device_is_disk.
Resolves: rhbz#1758102
- Align base sizes up if smaller than min I/O size.
Resolves: rhbz#1781106
- Make minimal and optimal alignment getters public.
Related: rhbz#1781106
* Wed Mar 11 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.0-3
- Fix name resolution for md member partitions. (#1798792)
* Tue Nov 19 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-18
- Check for PV sector size when creating new VG
Resolves: rhbz#1754446
* Thu Jan 30 2020 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.2.0-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
* Wed Oct 02 2019 David Lehman <dlehman@redhat.com> - 3.1.0-17
- Fix util.detect_virt function
Resolves: rhbz#1676935
* Wed Jan 29 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.0-1
- Align base sizes up if smaller than min I/O size. (dlehman)
- Make minimal and optimal alignment getters public. (dlehman)
- Add support for relabeling of the swap format (vtrefny)
- Define the 'relabels' method for all formats (vtrefny)
- Add support for LVMPV format resize (vtrefny)
- Add a new "id_path" attribute for iSCSI and FCoE disks (vtrefny)
- Do not load module when creating an FS instance (vtrefny)
- Add a simple script for running tests manually (vtrefny)
- Remove unused API code (jkonecny)
- devicetree.names is now a property (japokorn)
- initial PowerNV class support (dan)
- Use LUKS2 by default (vponcova)
* Mon Aug 05 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-16
- Minor cleanups to reduce log noise
Related: rhbz#1579375
* Wed Jan 29 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.7-1
- Use SHA256 instead of MD5 for /proc/mounts hash calculation (vtrefny)
- Fix udev test names so they actually get run. (dlehman)
- Add recognition of Dell FW RAID to udev.device_is_disk. (dlehman)
- Fix Blivet DBus service version in service and config files (vtrefny)
- Make sure _chrooted_mountpoint attribute is defined before using it (vtrefny)
- Allow running 'write_label' in dry run mode on non-existing devices (vtrefny)
- Make 'makeupdates' and 'makebumpver' scripts Python 3 compatible (vtrefny)
- Do not hardcode coverage executable name (vtrefny)
- Make sure LVs are writable before wiping. (dlehman)
- Override LVM skip-activation to allow for thorough removal. (dlehman)
- Add setters for requested_size/percent form LVMVolumeGroupDevice (vtrefny)
- Set min size for XFS to 16 MiB (vtrefny)
- Revert "Ignore invalid-overridden-method warning for abstract properties" (vtrefny)
- Fix invalid-overridden-method in events_test (vtrefny)
* Mon Jul 15 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-15
- Do not crash if 'dm.get_member_raid_sets' fails
Resolves: rhbz#1704289
* Fri Oct 25 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.6-1
- Do not allow creating VGs with PVs with different sector size (vtrefny)
- Add a new "sector_size" property to storage devices. (vtrefny)
- Ignore invalid-overridden-method warning for abstract properties (vtrefny)
- Change NFSMount._availability_errors to a property (vtrefny)
- Fix util.detect_virt function (vtrefny)
- Do not try to normalize size for zero size device factories (vtrefny)
- Always set default key size to 512 bits for ciphers with XTS mode (vtrefny)
* Tue Jul 02 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-14
- Correctly handle non-unicode iSCSI initiator names
Resolves: rhbz#1632117
* Thu Oct 03 2019 Miro Hrončok <mhroncok@redhat.com> - 1:3.1.5-4
- Rebuilt for Python 3.8.0rc1 (#1748018)
* Tue Jun 18 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-13
- Fix reading LV attributes in LVMVolumeGroupDevice.status
Resolves: rhbz#1721381
* Tue Aug 27 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.5-3
- Do not try to normalize size for zero size device factories (#1743753)
* Fri Jun 14 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-12
- Deactivate incomplete VGs along with everything else
Resolves: rhbz#1635125
- Automatically adjust size of growable devices for new format
Resolves: rhbz#1680013
- Add flag for protecting cdrom devices during populate
Resolves: rhbz#1719648
- Clean up some errors evident in installer logs
Resolves: rhbz#1579375
- Use dasd disklabel for vm disks backed by dasds
Resolves: rhbz#1676935
* Fri Aug 16 2019 Miro Hrončok <mhroncok@redhat.com> - 1:3.1.5-2
- Rebuilt for Python 3.8
* Thu May 16 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-11
- Various test fixes for RHEL 8
Related: rhbz#1682561
- Add upstream test suite to the SRPM
Related: rhbz#1682561
* Thu Aug 15 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.5-1
- Move dependencies code from StorageDevice to Device (vtrefny)
- Always use luks_data.min_entropy as a default minimum entropy (vponcova)
- Add 'protected' property setter to LVMVolumeGroupDevice (#1729363) (vtrefny)
- fix of LV max size calculation (japokorn)
- Added min size for partitions (japokorn)
- Improved non-unique UUID handling (japokorn)
- Check if disklabel supports partition names (#1723228) (vtrefny)
- format_device: Revert destroy action if create fails (#1727589) (vtrefny)
- Do not allow resizing of LUKS devices with integrity (vtrefny)
- Return underlying block device as 'slave' for LUKS with integrity (vtrefny)
- Fix removing LUKS devices with integrity (vtrefny)
- Check status before activating dmraid set in populate. (#1723979) (dlehman)
- Use DBus call to see if we're in a vm. (dlehman)
- Use dasd disklabel for vm disks backed by dasds. (dlehman)
- Add a function to detect if running in a vm. (dlehman)
- Remove teardown_all from the populate method (vponcova)
- Correctly handle non-unicode iSCSI initiator names (vtrefny)
- Add, test and use a new method to get size with reserve (vpodzime)
- Beware non-positive sizes in thpool metadata size calculations (vpodzime)
- Log sizes in MiB in thpool auto metadata size calculations (vpodzime)
- Recalculate thpool's metadata size on resize in LVMThinPFactory (vpodzime)
- Move the thpool reserve calculations to LVMFactory (vpodzime)
* Wed Apr 03 2019 David Lehman <dlehman@redhat.com> - 3.1.0-10
- Ensure correct type of mpath cache member list.
Related: rhbz#1672971
* Fri Jul 26 2019 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.1.4-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
* Mon Feb 25 2019 David Lehman <dlehman@redhat.com> - 3.1.0-9
- Update to latest translations.
Resolves: rhbz#1608337
- Require libfc instead of fcoe for offloaded FCoE.
Resolves: rhbz#1575953
- Use udev to determine if disk is a multipath member.
Related: rhbz#1575953
- Don't crash if blockdev mpath plugin isn't available.
Resolves: rhbz#1672971
* Thu Jul 11 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.4-2
- Remove teardown_all from the populate method (vponcova)
- initial PowerNV class support (dan)
* Tue Jan 15 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-8
* Tue Jun 11 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.4-1
- Don't call fnmatch with None (#1698937) (vponcova)
- Do not crash on non-int lun argument when creating iscsi disk object. (rvykydal)
- Make iscsi device attribute modifications backward compatible. (rvykydal)
- Do not store iscsi module nodeinfo in device object. (rvykydal)
- Only call mpath plugin when it is available. (#1697378) (dlehman)
- Include tests archive where appropriate in make targets. (dlehman)
- Add spec file logic to include unit tests in SRPM. (dlehman)
- Add a target to create an archive of the unit tests. (dlehman)
- Remove profanity from an old comment. (dlehman)
- Fix mounting of the filesystem iso9660 (vponcova)
- Remove unnecessary pass statements (vtrefny)
- Check for format tools availability in action_test (vtrefny)
- Skip weak dependencies test if we don't have all libblockdev plugins (vtrefny)
- Properly clean after availability test case (vtrefny)
- Ensure correct type of mpath cache member list. (dlehman)
- Do not crash if 'dm.get_member_raid_sets' fails (#1684851) (vtrefny)
- Fix supported disklabels in 'test_platform_label_types' on EFI (vtrefny)
- Support legacy MBR (msdos) as part of UEFI to enable hybrid builds (pbrobinson)
- Automatically adjust size of growable devices for new format (vtrefny)
- spec: Remove obsolete Group tag and bump min libblockdev version (vtrefny)
* Thu Mar 21 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.3-3
- Ensure correct type of mpath cache member list
* Mon Mar 11 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.3-2
- Support legacy MBR (msdos) as part of UEFI to enable hybrid builds (pbrobinson)
* Wed Feb 27 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.3-1
- Don't crash if blockdev mpath plugin isn't available. (#1672971) (dlehman)
- iscsi: Add default value to unused 'storage' argument in 'write' (vtrefny)
- Add exported property to LVMVolumeGroupDevice (vtrefny)
- Add VG data to static_data (vtrefny)
- Do not try to get format free space for non-existing formats (vtrefny)
- Do not raise exception if can't get PV free space (vtrefny)
- Fix undefined attribute in LVM info cache (vtrefny)
- Use raw_device to get thinpool device in LVMThinPFactory (#1490174) (vtrefny)
- Do not crash if DM RAID activation fails (#1661712) (vtrefny)
- Remove the unused sysroot property (vponcova)
- Remove unused attributes from the Blivet class (vponcova)
- Remove the unused gpt flag (vponcova)
- Copy the iSCSI initiator name file to the installed system (vtrefny)
Resolves: rhbz#1664587
- Use udev to determine if disk is a multipath member. (dlehman)
- Require libfc instead of fcoe for offloaded FCoE. (#1575953) (dlehman)
* Mon Dec 17 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-7
- Wipe all stale metadata after creating md array (dlehman)
Resolves: rhbz#1639682
* Sat Feb 02 2019 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.1.2-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
* Tue Oct 16 2018 David Lehman <dlehman@redhat.com> - 3.1.0-6
- Fix options for ISCSI functions (vtrefny)
Resolves: rhbz#1635569
* Wed Dec 12 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.2-1
- Fix reading LV attributes in LVMVolumeGroupDevice.status (vtrefny)
- Do not try to login to iBFTs with active session (vtrefny)
- Fix xfs sync of chrooted mountpoint. (dlehman)
- Only update sysfs path in ctor for active devices. (dlehman)
- Fix new pep8/pycodestyle warnings (vtrefny)
- Ignore PEP8 W504 warning ("line break after binary operator") (vtrefny)
- pylint: Allow loading all C extensions (vtrefny)
- Use 'pycodestyle' instead of 'pep8' (vtrefny)
- Fix failing populator test without nvdimm plugin (vtrefny)
- Add 'srpm' and 'rpm' targets to Makefile for building (S)RPMs (vtrefny)
- Fix crash on reset on systems without nvdimm plugin (vtrefny)
- Use the size info of internal LVs when getting space usage for existing LVs (v.podzimek)
- Calculate the number of RAID PVs from the origin for cached LVs (v.podzimek)
- Make raid_level a property of an LV object (v.podzimek)
- Add a test for DeviceTree.get_related_disks. (dlehman)
- Fix ixgbe/bnx2fc fcoe disk detection (#1651506) (rvykydal)
- Use RAID name for partitions on an MD array (vtrefny)
- Move btrfs name validation to devicelibs (vtrefny)
- Don't try to set selinux context for nodev or vfat file systems. (dlehman)
- Only try to set selinux context for lost+found on ext file systems. (dlehman)
- Wipe all stale metadata after creating md array. (#1639682) (dlehman)
- Don't try to update sysfs path for non-block devices. (#1579375) (dlehman)
- Don't raise errors without messages (vponcova)
- Install ndctl when NVDIMMs are used. (dlehman)
- Deactivate incomplete VGs along with everything else. (dlehman)
- Work around udev timing issues. (dlehman)
- Fix options for ISCSI functions (#1632656) (vtrefny)
- Use format.status when checking for PV status (vtrefny)
- Remove Anaconda flags (vponcova)
- Remove square brackets when matching internal LVs (v.podzimek)
* Thu Sep 27 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-5
* Mon Oct 08 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.1-2
- Fix options for ISCSI functions (#1632656) (vtrefny)
* Wed Sep 26 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.1-1
- Check device dependencies only for device actions (vtrefny)
- Allow removing btrfs volumes without btrfs support (vtrefny)
- Adjust LVMPhysicalVolumeMethodsTestCase to new pvcreate option (vtrefny)
- add `-y' to lvm.pvcreate (hongxu.jia)
- Drop omap partition table tests on ARM platforms (pbrobinson)
- Update disk label tests for ARM platforms (pbrobinson)
- Ignore pylint 'no-value-for-parameter' warning (vtrefny)
- arm: add support for EFI on ARMv7 (pbrobinson)
- Aarch64 platforms: Fix gpt defaults for 64 bit arm platforms (pbrobinson)
- arch: arm: drop get_arm_machine function (pbrobinson)
- arch: arm: drop omap specifics for partitioning (pbrobinson)
- Create a separate availability check for dmraid support (vtrefny)
* Thu Aug 30 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-2
- arm: add support for EFI on ARMv7 (probinson)
Related: rhbz#1623882
- Aarch64 platforms: Fix gpt defaults for 64 bit arm platforms (probinson)
Resolves: rhbz#1623882
- arch: arm: drop get_arm_machine function (probinson)
Related: rhbz#1623882
- arch: arm: drop omap specifics for partitioning (probinson)
Related: rhbz#1623882
* Thu Sep 20 2018 Tomas Orsava <torsava@redhat.com> - 3.1.0-4
- Require the Python interpreter directly instead of using the package name
- Related: rhbz#1619153
* Wed Sep 19 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-3
- Check device dependencies only for device actions
Related: rhbz#1605213
- Allow removing btrfs volumes without btrfs support
Resolves: rhbz#1605213
* Tue Aug 21 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-2
- Create a separate availability check for dmraid support
Resolves: rhbz#1617958
* Fri Aug 10 2018 David Lehman <dlehman@redhat.com> - 3.1.0-1
* Mon Aug 13 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-1
- Allow configuring default LUKS2 PBKDF arguments using luks_data (vtrefny)
Related: rhbz#1561352
- Fix the populate_kickstart method in LUKS (vtrefny)
Related: rhbz#1561352
- Allow specifying extra arguments for PBKDF when creating LUKS2 (vtrefny)
Related: rhbz#1561352
- Add support for LUKS2 to DeviceFactory (vtrefny)
Resolves: rhbz#1561352
- DeviceFactory: use min_luks_entropy from kwargs (vtrefny)
Related: rhbz#1561352
- Fix passing 'min_luks_entropy' when creating LUKS format (vtrefny)
Related: rhbz#1561352
- Use passphrase/key file when resizing LUKS2 format (vtrefny)
Related: rhbz#1561352
- Require libblockdev 2.17 (vtrefny)
Related: rhbz#1561352
- Add support for LUKS2 format (vtrefny)
Related: rhbz#1561352
- Add initial support for DM Integrity "format" (vtrefny)
Related: rhbz#1561352
- Do not try to add LUKSDevice in LUKSFormatPopulator (vtrefny)
Related: rhbz#1561352
- Add support for dm-integrity devices (vtrefny)
Related: rhbz#1561352
- Fixed various issues preventing successful build (japokorn)
Related: rhbz#1561352
* Thu Aug 2 2018 Peter Robinson <pbrobinson@fedoraproject.org> 3.1.0-0.5.b2
- Bump release to fix upgrade path
* Mon Jul 30 2018 David Lehman <dlehman@redhat.com> - 3.1.0-0.1.b2
- Do not ignore "Image out-of-sync" internal LVs (vtrefny)
@ -534,12 +825,15 @@ Backport iSCSI initiator name related fixes:
- Adapt mock imports for compatibility w/ python2 & python3. (dlehman)
- Use py2-compatible syntax to get system architecture. (dlehman)
* Mon Jul 16 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-0.3.b1
- Remove btrfs from requested libblockdev plugins (vtrefny)
* Wed Jul 11 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-0.2.b1
* Tue Jul 17 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-0.4.b1
- Force command line based libblockdev LVM plugin (vtrefny)
* Fri Jul 13 2018 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.1.0-0.3.b1
- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
* Fri Jun 15 2018 Miro Hrončok <mhroncok@redhat.com> - 1:3.1.0-0.2.b1
- Rebuilt for Python 3.7
* Wed May 02 2018 David Lehman <dlehman@redhat.com> - 3.1.0-0.1.b1
- Add 'nvdimm' tag for NVDIMM namespaces (vtrefny)
- Add test for NVDIMMNamespaceDevicePopulator (vtrefny)