Compare commits

...

No commits in common. "c8" and "c9-beta" have entirely different histories.
c8 ... c9-beta

24 changed files with 2811 additions and 1120 deletions

View File

@ -1,4 +1,4 @@
From 83ccc9f9f14845fcce7a5ba5fa21fbb97b1dbbb7 Mon Sep 17 00:00:00 2001
From 2759aaa9cbee38f80819bc136bb893184429380c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 11 Jul 2018 15:36:24 +0200
Subject: [PATCH] Force command line based libblockdev LVM plugin
@ -31,5 +31,5 @@ index dd8d0f54..62cc539a 100644
# do not check for dependencies during libblockdev initializtion, do runtime
# checks instead
--
2.38.1
2.37.3

View File

@ -1,4 +1,4 @@
From c098d4112635b3ea55d5bd7e1817edbd519735fc Mon Sep 17 00:00:00 2001
From f27bdff18e98548f4c094b8cce23ca2d6270e30d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 16 Jul 2018 14:26:11 +0200
Subject: [PATCH] Remove btrfs from requested libblockdev plugins
@ -24,5 +24,5 @@ index 62cc539a..bbc7ea3a 100644
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
# XXX force non-dbus LVM plugin
--
2.38.1
2.37.3

View File

@ -1,4 +1,4 @@
From f6f90805020d7c6ac46f17a13a00f319fc4351f6 Mon Sep 17 00:00:00 2001
From b9021fde8ccdd14cbe192b6597f7ca350b4bb585 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 26 May 2021 12:15:54 +0200
Subject: [PATCH] Revert "More consistent lvm errors (API break)"
@ -326,5 +326,5 @@ index 47613fdc..995c2da4 100644
pv_spec2 = LVPVSpec(pv2, Size("250 MiB"))
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
--
2.38.1
2.37.3

View File

@ -1,4 +1,4 @@
From f6490c469904f4808c63a170210e53acc908b018 Mon Sep 17 00:00:00 2001
From 4ad6f485a1e569feb5fd23ffcf78e08a7756e084 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 17 Aug 2022 14:24:21 +0200
Subject: [PATCH 1/2] Use MD populator instead of DM to handle DDF RAID format
@ -35,10 +35,10 @@ index 41ddef81..4aa3f3b0 100644
_formattable = True # can be formatted
_supported = True # is supported
--
2.38.1
2.37.3
From 5fadd850aae217d7692a6c8a50b2dcd5e61a63cd Mon Sep 17 00:00:00 2001
From abc7e018f43976cdab286d67207d515a74693d16 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 17 Aug 2022 14:24:58 +0200
Subject: [PATCH 2/2] Do not read DDF RAID UUID from udev
@ -82,5 +82,5 @@ index 3479e3f7..a7602d20 100644
def run(self):
--
2.38.1
2.37.3

View File

@ -1,899 +0,0 @@
From d8a8d96450bf0d3458671b9b7d23d972aa540396 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 26 May 2021 12:27:34 +0200
Subject: [PATCH] Revert "Terminology cleanups"
This reverts following commits:
- 3d46339fe9cf12e9082fcbe4dc5acc9f92617e8d
- 63c9c7165e5cdfa4a47dcf0ed9d717b71e7921f2
- 8956b9af8a785ae25e0e7153d2ef0702ce2f567c
---
blivet/devicefactory.py | 24 +++----
blivet/devices/dm.py | 9 ++-
blivet/devices/loop.py | 20 +++---
blivet/devices/luks.py | 26 ++++---
blivet/errors.py | 2 +-
blivet/partitioning.py | 22 +++++-
blivet/populator/helpers/dm.py | 4 +-
blivet/populator/helpers/luks.py | 4 +-
blivet/populator/helpers/lvm.py | 2 +-
blivet/populator/helpers/mdraid.py | 14 ++--
blivet/populator/helpers/multipath.py | 8 +--
blivet/populator/populator.py | 67 ++++++++++---------
blivet/threads.py | 3 +-
blivet/udev.py | 34 +++++-----
tests/unit_tests/devicefactory_test.py | 10 +--
.../devices_test/device_size_test.py | 6 +-
tests/unit_tests/populator_test.py | 34 +++++-----
tests/unit_tests/udev_test.py | 12 ++--
tests/vmtests/vmbackedtestcase.py | 2 +-
19 files changed, 167 insertions(+), 136 deletions(-)
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
index 6f460f6d..90082c28 100644
--- a/blivet/devicefactory.py
+++ b/blivet/devicefactory.py
@@ -859,12 +859,12 @@ class DeviceFactory(object):
parent_container.parents.remove(orig_device)
if self.encrypted and isinstance(self.device, LUKSDevice) and \
- self.raw_device.format.luks_version != self.luks_version:
- self.raw_device.format.luks_version = self.luks_version
+ self.device.slave.format.luks_version != self.luks_version:
+ self.device.slave.format.luks_version = self.luks_version
if self.encrypted and isinstance(self.device, LUKSDevice) and \
- self.raw_device.format.luks_sector_size != self.luks_sector_size:
- self.raw_device.format.luks_sector_size = self.luks_sector_size
+ self.device.slave.format.luks_sector_size != self.luks_sector_size:
+ self.device.slave.format.luks_sector_size = self.luks_sector_size
def _set_name(self):
if not self.device_name:
@@ -1201,11 +1201,11 @@ class PartitionSetFactory(PartitionFactory):
container.parents.remove(member)
self.storage.destroy_device(member)
members.remove(member)
- self.storage.format_device(member.raw_device,
+ self.storage.format_device(member.slave,
get_format(self.fstype))
- members.append(member.raw_device)
+ members.append(member.slave)
if container:
- container.parents.append(member.raw_device)
+ container.parents.append(member.slave)
continue
@@ -1227,10 +1227,10 @@ class PartitionSetFactory(PartitionFactory):
continue
- if member_encrypted and self.encrypted and self.luks_version != member.raw_device.format.luks_version:
- member.raw_device.format.luks_version = self.luks_version
- if member_encrypted and self.encrypted and self.luks_sector_size != member.raw_device.format.luks_sector_size:
- member.raw_device.format.luks_sector_size = self.luks_sector_size
+ if member_encrypted and self.encrypted and self.luks_version != member.slave.format.luks_version:
+ member.slave.format.luks_version = self.luks_version
+ if member_encrypted and self.encrypted and self.luks_sector_size != member.slave.format.luks_sector_size:
+ member.slave.format.luks_sector_size = self.luks_sector_size
##
# Prepare previously allocated member partitions for reallocation.
@@ -1290,7 +1290,7 @@ class PartitionSetFactory(PartitionFactory):
if isinstance(member, LUKSDevice):
self.storage.destroy_device(member)
- member = member.raw_device
+ member = member.slave
self.storage.destroy_device(member)
diff --git a/blivet/devices/dm.py b/blivet/devices/dm.py
index 2f936170..ae25e8e6 100644
--- a/blivet/devices/dm.py
+++ b/blivet/devices/dm.py
@@ -154,6 +154,11 @@ class DMDevice(StorageDevice):
log_method_call(self, self.name, status=self.status)
super(DMDevice, self)._set_name(value)
+ @property
+ def slave(self):
+ """ This device's backing device. """
+ return self.parents[0]
+
class DMLinearDevice(DMDevice):
_type = "dm-linear"
@@ -189,8 +194,8 @@ class DMLinearDevice(DMDevice):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
- parent_length = self.parents[0].current_size / LINUX_SECTOR_SIZE
- blockdev.dm.create_linear(self.name, self.parents[0].path, parent_length,
+ slave_length = self.slave.current_size / LINUX_SECTOR_SIZE
+ blockdev.dm.create_linear(self.name, self.slave.path, slave_length,
self.dm_uuid)
def _post_setup(self):
diff --git a/blivet/devices/loop.py b/blivet/devices/loop.py
index 0f4d7775..78f88d7d 100644
--- a/blivet/devices/loop.py
+++ b/blivet/devices/loop.py
@@ -73,7 +73,7 @@ class LoopDevice(StorageDevice):
def update_name(self):
""" Update this device's name. """
- if not self.parents[0].status:
+ if not self.slave.status:
# if the backing device is inactive, so are we
return self.name
@@ -81,7 +81,7 @@ class LoopDevice(StorageDevice):
# if our name is loopN we must already be active
return self.name
- name = blockdev.loop.get_loop_name(self.parents[0].path)
+ name = blockdev.loop.get_loop_name(self.slave.path)
if name.startswith("loop"):
self.name = name
@@ -89,24 +89,24 @@ class LoopDevice(StorageDevice):
@property
def status(self):
- return (self.parents[0].status and
+ return (self.slave.status and
self.name.startswith("loop") and
- blockdev.loop.get_loop_name(self.parents[0].path) == self.name)
+ blockdev.loop.get_loop_name(self.slave.path) == self.name)
@property
def size(self):
- return self.parents[0].size
+ return self.slave.size
def _pre_setup(self, orig=False):
- if not os.path.exists(self.parents[0].path):
- raise errors.DeviceError("specified file (%s) does not exist" % self.parents[0].path)
+ if not os.path.exists(self.slave.path):
+ raise errors.DeviceError("specified file (%s) does not exist" % self.slave.path)
return StorageDevice._pre_setup(self, orig=orig)
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
- blockdev.loop.setup(self.parents[0].path)
+ blockdev.loop.setup(self.slave.path)
def _post_setup(self):
StorageDevice._post_setup(self)
@@ -123,3 +123,7 @@ class LoopDevice(StorageDevice):
StorageDevice._post_teardown(self, recursive=recursive)
self.name = "tmploop%d" % self.id
self.sysfs_path = ''
+
+ @property
+ def slave(self):
+ return self.parents[0]
diff --git a/blivet/devices/luks.py b/blivet/devices/luks.py
index 2eb1f130..5ab840ea 100644
--- a/blivet/devices/luks.py
+++ b/blivet/devices/luks.py
@@ -66,13 +66,17 @@ class LUKSDevice(DMCryptDevice):
@property
def raw_device(self):
+ return self.slave
+
+ @property
+ def slave(self):
if self._has_integrity:
return self.parents[0].parents[0]
return self.parents[0]
def _get_size(self):
if not self.exists:
- size = self.raw_device.size - crypto.LUKS_METADATA_SIZE
+ size = self.slave.size - crypto.LUKS_METADATA_SIZE
elif self.resizable and self.target_size != Size(0):
size = self.target_size
else:
@@ -80,8 +84,8 @@ class LUKSDevice(DMCryptDevice):
return size
def _set_size(self, newsize):
- if not self.exists and not self.raw_device.exists:
- self.raw_device.size = newsize + crypto.LUKS_METADATA_SIZE
+ if not self.exists and not self.slave.exists:
+ self.slave.size = newsize + crypto.LUKS_METADATA_SIZE
# just run the StorageDevice._set_size to make sure we are in the format limits
super(LUKSDevice, self)._set_size(newsize - crypto.LUKS_METADATA_SIZE)
@@ -108,22 +112,22 @@ class LUKSDevice(DMCryptDevice):
raise ValueError("size is smaller than the minimum for this device")
# don't allow larger luks than size (or target size) of backing device
- if newsize > (self.raw_device.size - crypto.LUKS_METADATA_SIZE):
+ if newsize > (self.slave.size - crypto.LUKS_METADATA_SIZE):
log.error("requested size %s is larger than size of the backing device %s",
- newsize, self.raw_device.size)
+ newsize, self.slave.size)
raise ValueError("size is larger than the size of the backing device")
if self.align_target_size(newsize) != newsize:
raise ValueError("new size would violate alignment requirements")
def _get_target_size(self):
- return self.raw_device.format.target_size
+ return self.slave.format.target_size
@property
def max_size(self):
""" The maximum size this luks device can be. Maximum is based on the
maximum size of the backing device. """
- max_luks = self.raw_device.max_size - crypto.LUKS_METADATA_SIZE
+ max_luks = self.slave.max_size - crypto.LUKS_METADATA_SIZE
max_format = self.format.max_size
return min(max_luks, max_format) if max_format else max_luks
@@ -131,7 +135,7 @@ class LUKSDevice(DMCryptDevice):
def resizable(self):
""" Can this device be resized? """
return (self._resizable and self.exists and self.format.resizable and
- self.raw_device.resizable and not self._has_integrity)
+ self.slave.resizable and not self._has_integrity)
def resize(self):
# size of LUKSDevice depends on size of the LUKS format on backing
@@ -139,7 +143,7 @@ class LUKSDevice(DMCryptDevice):
log_method_call(self, self.name, status=self.status)
def _post_create(self):
- self.name = self.raw_device.format.map_name
+ self.name = self.slave.format.map_name
StorageDevice._post_create(self)
def _post_teardown(self, recursive=False):
@@ -162,10 +166,10 @@ class LUKSDevice(DMCryptDevice):
self.name = new_name
def dracut_setup_args(self):
- return set(["rd.luks.uuid=luks-%s" % self.raw_device.format.uuid])
+ return set(["rd.luks.uuid=luks-%s" % self.slave.format.uuid])
def populate_ksdata(self, data):
- self.raw_device.populate_ksdata(data)
+ self.slave.populate_ksdata(data)
data.encrypted = True
super(LUKSDevice, self).populate_ksdata(data)
diff --git a/blivet/errors.py b/blivet/errors.py
index b886ffec..30c9921a 100644
--- a/blivet/errors.py
+++ b/blivet/errors.py
@@ -201,7 +201,7 @@ class DeviceTreeError(StorageError):
pass
-class NoParentsError(DeviceTreeError):
+class NoSlavesError(DeviceTreeError):
pass
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
index ce77e4eb..2cd6554c 100644
--- a/blivet/partitioning.py
+++ b/blivet/partitioning.py
@@ -32,7 +32,7 @@ import _ped
from .errors import DeviceError, PartitioningError, AlignmentError
from .flags import flags
-from .devices import Device, PartitionDevice, device_path_to_name
+from .devices import Device, PartitionDevice, LUKSDevice, device_path_to_name
from .size import Size
from .i18n import _
from .util import stringize, unicodeize, compare
@@ -1635,7 +1635,15 @@ class TotalSizeSet(object):
:param size: the target combined size
:type size: :class:`~.size.Size`
"""
- self.devices = [d.raw_device for d in devices]
+ self.devices = []
+ for device in devices:
+ if isinstance(device, LUKSDevice):
+ partition = device.slave
+ else:
+ partition = device
+
+ self.devices.append(partition)
+
self.size = size
self.requests = []
@@ -1673,7 +1681,15 @@ class SameSizeSet(object):
:keyword max_size: the maximum size for growable devices
:type max_size: :class:`~.size.Size`
"""
- self.devices = [d.raw_device for d in devices]
+ self.devices = []
+ for device in devices:
+ if isinstance(device, LUKSDevice):
+ partition = device.slave
+ else:
+ partition = device
+
+ self.devices.append(partition)
+
self.size = size / len(devices)
self.grow = grow
self.max_size = max_size
diff --git a/blivet/populator/helpers/dm.py b/blivet/populator/helpers/dm.py
index 4721390e..0ad065e2 100644
--- a/blivet/populator/helpers/dm.py
+++ b/blivet/populator/helpers/dm.py
@@ -47,13 +47,13 @@ class DMDevicePopulator(DevicePopulator):
name = udev.device_get_name(self.data)
log_method_call(self, name=name)
sysfs_path = udev.device_get_sysfs_path(self.data)
- parent_devices = self._devicetree._add_parent_devices(self.data)
+ slave_devices = self._devicetree._add_slave_devices(self.data)
device = self._devicetree.get_device_by_name(name)
if device is None:
device = DMDevice(name, dm_uuid=self.data.get('DM_UUID'),
sysfs_path=sysfs_path, exists=True,
- parents=[parent_devices[0]])
+ parents=[slave_devices[0]])
device.protected = True
device.controllable = False
self._devicetree._add_device(device)
diff --git a/blivet/populator/helpers/luks.py b/blivet/populator/helpers/luks.py
index 3221122a..9b5023f8 100644
--- a/blivet/populator/helpers/luks.py
+++ b/blivet/populator/helpers/luks.py
@@ -43,7 +43,7 @@ class LUKSDevicePopulator(DevicePopulator):
return udev.device_is_dm_luks(data)
def run(self):
- parents = self._devicetree._add_parent_devices(self.data)
+ parents = self._devicetree._add_slave_devices(self.data)
device = LUKSDevice(udev.device_get_name(self.data),
sysfs_path=udev.device_get_sysfs_path(self.data),
parents=parents,
@@ -58,7 +58,7 @@ class IntegrityDevicePopulator(DevicePopulator):
return udev.device_is_dm_integrity(data)
def run(self):
- parents = self._devicetree._add_parent_devices(self.data)
+ parents = self._devicetree._add_slave_devices(self.data)
name = udev.device_get_name(self.data)
try:
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index 6ef2f417..b549e8d3 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -58,7 +58,7 @@ class LVMDevicePopulator(DevicePopulator):
log.warning("found non-vg device with name %s", vg_name)
device = None
- self._devicetree._add_parent_devices(self.data)
+ self._devicetree._add_slave_devices(self.data)
# LVM provides no means to resolve conflicts caused by duplicated VG
# names, so we're just being optimistic here. Woo!
diff --git a/blivet/populator/helpers/mdraid.py b/blivet/populator/helpers/mdraid.py
index a7602d20..9bec11ef 100644
--- a/blivet/populator/helpers/mdraid.py
+++ b/blivet/populator/helpers/mdraid.py
@@ -31,7 +31,7 @@ from ... import udev
from ...devicelibs import raid
from ...devices import MDRaidArrayDevice, MDContainerDevice
from ...devices import device_path_to_name
-from ...errors import DeviceError, NoParentsError
+from ...errors import DeviceError, NoSlavesError
from ...flags import flags
from ...storage_log import log_method_call
from .devicepopulator import DevicePopulator
@@ -52,12 +52,12 @@ class MDDevicePopulator(DevicePopulator):
log_method_call(self, name=name)
try:
- self._devicetree._add_parent_devices(self.data)
- except NoParentsError:
- log.error("no parents found for mdarray %s, skipping", name)
+ self._devicetree._add_slave_devices(self.data)
+ except NoSlavesError:
+ log.error("no slaves found for mdarray %s, skipping", name)
return None
- # try to get the device again now that we've got all the parents
+ # try to get the device again now that we've got all the slaves
device = self._devicetree.get_device_by_name(name, incomplete=flags.allow_imperfect_devices)
if device is None:
@@ -74,8 +74,8 @@ class MDDevicePopulator(DevicePopulator):
device.name = name
if device is None:
- # if we get here, we found all of the parent devices and
- # something must be wrong -- if all of the parents are in
+ # if we get here, we found all of the slave devices and
+ # something must be wrong -- if all of the slaves are in
# the tree, this device should be as well
if name is None:
name = udev.device_get_name(self.data)
diff --git a/blivet/populator/helpers/multipath.py b/blivet/populator/helpers/multipath.py
index 96c0a9ad..10c745bf 100644
--- a/blivet/populator/helpers/multipath.py
+++ b/blivet/populator/helpers/multipath.py
@@ -40,13 +40,13 @@ class MultipathDevicePopulator(DevicePopulator):
name = udev.device_get_name(self.data)
log_method_call(self, name=name)
- parent_devices = self._devicetree._add_parent_devices(self.data)
+ slave_devices = self._devicetree._add_slave_devices(self.data)
device = None
- if parent_devices:
- device = MultipathDevice(name, parents=parent_devices,
+ if slave_devices:
+ device = MultipathDevice(name, parents=slave_devices,
sysfs_path=udev.device_get_sysfs_path(self.data),
- wwn=parent_devices[0].wwn)
+ wwn=slave_devices[0].wwn)
self._devicetree._add_device(device)
return device
diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py
index 3a419418..068270b2 100644
--- a/blivet/populator/populator.py
+++ b/blivet/populator/populator.py
@@ -31,7 +31,7 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import BlockDev as blockdev
-from ..errors import DeviceError, DeviceTreeError, NoParentsError
+from ..errors import DeviceError, DeviceTreeError, NoSlavesError
from ..devices import DMLinearDevice, DMRaidArrayDevice
from ..devices import FileDevice, LoopDevice
from ..devices import MDRaidArrayDevice
@@ -92,55 +92,56 @@ class PopulatorMixin(object):
self._cleanup = False
- def _add_parent_devices(self, info):
- """ Add all parents of a device, raising DeviceTreeError on failure.
+ def _add_slave_devices(self, info):
+ """ Add all slaves of a device, raising DeviceTreeError on failure.
:param :class:`pyudev.Device` info: the device's udev info
- :raises: :class:`~.errors.DeviceTreeError if no parents are found or
- if we fail to add any parent
- :returns: a list of parent devices
+ :raises: :class:`~.errors.DeviceTreeError if no slaves are found or
+ if we fail to add any slave
+ :returns: a list of slave devices
:rtype: list of :class:`~.StorageDevice`
"""
name = udev.device_get_name(info)
sysfs_path = udev.device_get_sysfs_path(info)
- parent_dir = os.path.normpath("%s/slaves" % sysfs_path)
- parent_names = os.listdir(parent_dir)
- parent_devices = []
- if not parent_names:
- log.error("no parents found for %s", name)
- raise NoParentsError("no parents found for device %s" % name)
-
- for parent_name in parent_names:
- path = os.path.normpath("%s/%s" % (parent_dir, parent_name))
- parent_info = udev.get_device(os.path.realpath(path))
-
- if not parent_info:
- msg = "unable to get udev info for %s" % parent_name
+ slave_dir = os.path.normpath("%s/slaves" % sysfs_path)
+ slave_names = os.listdir(slave_dir)
+ slave_devices = []
+ if not slave_names:
+ log.error("no slaves found for %s", name)
+ raise NoSlavesError("no slaves found for device %s" % name)
+
+ for slave_name in slave_names:
+ path = os.path.normpath("%s/%s" % (slave_dir, slave_name))
+ slave_info = udev.get_device(os.path.realpath(path))
+
+ if not slave_info:
+ msg = "unable to get udev info for %s" % slave_name
raise DeviceTreeError(msg)
# cciss in sysfs is "cciss!cXdYpZ" but we need "cciss/cXdYpZ"
- parent_name = udev.device_get_name(parent_info).replace("!", "/")
-
- parent_dev = self.get_device_by_name(parent_name)
- if not parent_dev and parent_info:
- # we haven't scanned the parent yet, so do it now
- self.handle_device(parent_info)
- parent_dev = self.get_device_by_name(parent_name)
- if parent_dev is None:
+ slave_name = udev.device_get_name(slave_info).replace("!", "/")
+
+ slave_dev = self.get_device_by_name(slave_name)
+ if not slave_dev and slave_info:
+ # we haven't scanned the slave yet, so do it now
+ self.handle_device(slave_info)
+ slave_dev = self.get_device_by_name(slave_name)
+ if slave_dev is None:
if udev.device_is_dm_lvm(info):
- if parent_name not in lvs_info.cache:
+ if slave_name not in lvs_info.cache:
# we do not expect hidden lvs to be in the tree
continue
- # if the current parent is still not in
+ # if the current slave is still not in
# the tree, something has gone wrong
- log.error("failure scanning device %s: could not add parent %s", name, parent_name)
- msg = "failed to add parent %s of device %s" % (parent_name, name)
+ log.error("failure scanning device %s: could not add slave %s", name, slave_name)
+ msg = "failed to add slave %s of device %s" % (slave_name,
+ name)
raise DeviceTreeError(msg)
- parent_devices.append(parent_dev)
+ slave_devices.append(slave_dev)
- return parent_devices
+ return slave_devices
def _add_name(self, name):
if name not in self.names:
diff --git a/blivet/threads.py b/blivet/threads.py
index 5e2dff3f..1a5cc6db 100644
--- a/blivet/threads.py
+++ b/blivet/threads.py
@@ -63,11 +63,12 @@ class SynchronizedMeta(type):
"""
def __new__(cls, name, bases, dct):
new_dct = {}
+ blacklist = dct.get('_unsynchronized_methods', [])
for n in dct:
obj = dct[n]
# Do not decorate class or static methods.
- if n in dct.get('_unsynchronized_methods', []):
+ if n in blacklist:
pass
elif isinstance(obj, FunctionType):
obj = exclusive(obj)
diff --git a/blivet/udev.py b/blivet/udev.py
index efbc53d6..ddc49a37 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -39,7 +39,7 @@ from gi.repository import BlockDev as blockdev
global_udev = pyudev.Context()
log = logging.getLogger("blivet")
-ignored_device_names = []
+device_name_blacklist = []
""" device name regexes to ignore; this should be empty by default """
@@ -77,7 +77,7 @@ def get_devices(subsystem="block"):
result = []
for device in global_udev.list_devices(subsystem=subsystem):
- if not __is_ignored_blockdev(device.sys_name):
+ if not __is_blacklisted_blockdev(device.sys_name):
dev = device_to_dict(device)
result.append(dev)
@@ -176,13 +176,13 @@ def resolve_glob(glob):
return ret
-def __is_ignored_blockdev(dev_name):
+def __is_blacklisted_blockdev(dev_name):
"""Is this a blockdev we never want for an install?"""
if dev_name.startswith("ram") or dev_name.startswith("fd"):
return True
- if ignored_device_names:
- if any(re.search(expr, dev_name) for expr in ignored_device_names):
+ if device_name_blacklist:
+ if any(re.search(expr, dev_name) for expr in device_name_blacklist):
return True
dev_path = "/sys/class/block/%s" % dev_name
@@ -375,7 +375,7 @@ def device_is_disk(info):
device_is_dm_crypt(info) or
device_is_dm_stratis(info) or
(device_is_md(info) and
- (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_parents(info))))))
+ (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_slaves(info))))))
def device_is_partition(info):
@@ -454,18 +454,18 @@ def device_get_devname(info):
return info.get('DEVNAME')
-def device_get_parents(info):
- """ Return a list of udev device objects representing this device's parents. """
- parents_dir = device_get_sysfs_path(info) + "/slaves/"
+def device_get_slaves(info):
+ """ Return a list of udev device objects representing this device's slaves. """
+ slaves_dir = device_get_sysfs_path(info) + "/slaves/"
names = list()
- if os.path.isdir(parents_dir):
- names = os.listdir(parents_dir)
+ if os.path.isdir(slaves_dir):
+ names = os.listdir(slaves_dir)
- parents = list()
+ slaves = list()
for name in names:
- parents.append(get_device(device_node="/dev/" + name))
+ slaves.append(get_device(device_node="/dev/" + name))
- return parents
+ return slaves
def device_get_holders(info):
@@ -742,7 +742,7 @@ def device_get_partition_disk(info):
disk = None
majorminor = info.get("ID_PART_ENTRY_DISK")
sysfs_path = device_get_sysfs_path(info)
- parents_dir = "%s/slaves" % sysfs_path
+ slaves_dir = "%s/slaves" % sysfs_path
if majorminor:
major, minor = majorminor.split(":")
for device in get_devices():
@@ -750,8 +750,8 @@ def device_get_partition_disk(info):
disk = device_get_name(device)
break
elif device_is_dm_partition(info):
- if os.path.isdir(parents_dir):
- parents = os.listdir(parents_dir)
+ if os.path.isdir(slaves_dir):
+ parents = os.listdir(slaves_dir)
if len(parents) == 1:
disk = resolve_devspec(parents[0].replace('!', '/'))
else:
diff --git a/tests/unit_tests/devicefactory_test.py b/tests/unit_tests/devicefactory_test.py
index ff6bcb9e..552aadc1 100644
--- a/tests/unit_tests/devicefactory_test.py
+++ b/tests/unit_tests/devicefactory_test.py
@@ -115,9 +115,9 @@ class DeviceFactoryTestCase(unittest.TestCase):
kwargs.get("encrypted", False) or
kwargs.get("container_encrypted", False))
if kwargs.get("encrypted", False):
- self.assertEqual(device.parents[0].format.luks_version,
+ self.assertEqual(device.slave.format.luks_version,
kwargs.get("luks_version", crypto.DEFAULT_LUKS_VERSION))
- self.assertEqual(device.raw_device.format.luks_sector_size,
+ self.assertEqual(device.slave.format.luks_sector_size,
kwargs.get("luks_sector_size", 0))
self.assertTrue(set(device.disks).issubset(kwargs["disks"]))
@@ -357,7 +357,7 @@ class LVMFactoryTestCase(DeviceFactoryTestCase):
device = args[0]
if kwargs.get("encrypted"):
- container = device.parents[0].container
+ container = device.slave.container
else:
container = device.container
@@ -376,7 +376,7 @@ class LVMFactoryTestCase(DeviceFactoryTestCase):
self.assertIsInstance(pv, member_class)
if pv.encrypted:
- self.assertEqual(pv.parents[0].format.luks_version,
+ self.assertEqual(pv.slave.format.luks_version,
kwargs.get("luks_version", crypto.DEFAULT_LUKS_VERSION))
@patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
@@ -592,7 +592,7 @@ class LVMThinPFactoryTestCase(LVMFactoryTestCase):
device = args[0]
if kwargs.get("encrypted", False):
- thinlv = device.parents[0]
+ thinlv = device.slave
else:
thinlv = device
diff --git a/tests/unit_tests/devices_test/device_size_test.py b/tests/unit_tests/devices_test/device_size_test.py
index d0c0a3f4..a1efa86d 100644
--- a/tests/unit_tests/devices_test/device_size_test.py
+++ b/tests/unit_tests/devices_test/device_size_test.py
@@ -107,8 +107,8 @@ class LUKSDeviceSizeTest(StorageDeviceSizeTest):
def _get_device(self, *args, **kwargs):
exists = kwargs.get("exists", False)
- parent = StorageDevice(*args, size=kwargs["size"] + crypto.LUKS_METADATA_SIZE, exists=exists)
- return LUKSDevice(*args, **kwargs, parents=[parent])
+ slave = StorageDevice(*args, size=kwargs["size"] + crypto.LUKS_METADATA_SIZE, exists=exists)
+ return LUKSDevice(*args, **kwargs, parents=[slave])
def test_size_getter(self):
initial_size = Size("10 GiB")
@@ -116,4 +116,4 @@ class LUKSDeviceSizeTest(StorageDeviceSizeTest):
# for LUKS size depends on the backing device size
self.assertEqual(dev.size, initial_size)
- self.assertEqual(dev.raw_device.size, initial_size + crypto.LUKS_METADATA_SIZE)
+ self.assertEqual(dev.slave.size, initial_size + crypto.LUKS_METADATA_SIZE)
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
index 369fe878..7ba04bac 100644
--- a/tests/unit_tests/populator_test.py
+++ b/tests/unit_tests/populator_test.py
@@ -86,7 +86,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
@patch.object(DeviceTree, "get_device_by_name")
@patch.object(DMDevice, "status", return_value=True)
@patch.object(DMDevice, "update_sysfs_path")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_sysfs_path", return_value=sentinel.sysfs_path)
def test_run(self, *args):
@@ -95,7 +95,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
- # The general case for dm devices is that adding the parent devices
+ # The general case for dm devices is that adding the slave/parent devices
# will result in the dm device itself being in the tree.
device = Mock()
device.id = 0
@@ -106,7 +106,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
parent = Mock()
parent.id = 0
parent.parents = []
- devicetree._add_parent_devices.return_value = [parent]
+ devicetree._add_slave_devices.return_value = [parent]
devicetree._add_device(parent)
devicetree.get_device_by_name.return_value = None
device_name = "dmdevice"
@@ -235,7 +235,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch.object(DeviceTree, "get_device_by_name")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_lv_vg_name")
def test_run(self, *args):
@@ -247,7 +247,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
data = Mock()
- # Add parent devices and then look up the device.
+ # Add slave/parent devices and then look up the device.
device_get_name.return_value = sentinel.lv_name
devicetree.get_device_by_name.return_value = None
@@ -267,7 +267,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
call(sentinel.vg_name),
call(sentinel.lv_name)])
- # Add parent devices, but the device is still not in the tree
+ # Add slave/parent devices, but the device is still not in the tree
get_device_by_name.side_effect = None
get_device_by_name.return_value = None
self.assertEqual(helper.run(), None)
@@ -639,7 +639,7 @@ class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch.object(DeviceTree, "get_device_by_name")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_md_uuid")
@patch("blivet.udev.device_get_md_name")
@@ -650,7 +650,7 @@ class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
- # base case: _add_parent_devices gets the array into the tree
+ # base case: _add_slave_devices gets the array into the tree
data = Mock()
device = Mock()
device.parents = []
@@ -713,12 +713,12 @@ class MultipathDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch("blivet.udev.device_get_sysfs_path")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
def test_run(self, *args):
"""Test multipath device populator."""
device_get_name = args[0]
- add_parent_devices = args[1]
+ add_slave_devices = args[1]
devicetree = DeviceTree()
# set up some fake udev data to verify handling of specific entries
@@ -733,13 +733,13 @@ class MultipathDevicePopulatorTestCase(PopulatorHelperTestCase):
device_name = "mpathtest"
device_get_name.return_value = device_name
- parent_1 = Mock(tags=set(), wwn=wwn[2:], id=0)
- parent_1.parents = []
- parent_2 = Mock(tags=set(), wwn=wwn[2:], id=0)
- parent_2.parents = []
- devicetree._add_device(parent_1)
- devicetree._add_device(parent_2)
- add_parent_devices.return_value = [parent_1, parent_2]
+ slave_1 = Mock(tags=set(), wwn=wwn[2:], id=0)
+ slave_1.parents = []
+ slave_2 = Mock(tags=set(), wwn=wwn[2:], id=0)
+ slave_2.parents = []
+ devicetree._add_device(slave_1)
+ devicetree._add_device(slave_2)
+ add_slave_devices.return_value = [slave_1, slave_2]
helper = self.helper_class(devicetree, data)
diff --git a/tests/unit_tests/udev_test.py b/tests/unit_tests/udev_test.py
index b208efa8..ebcd59e2 100644
--- a/tests/unit_tests/udev_test.py
+++ b/tests/unit_tests/udev_test.py
@@ -49,11 +49,11 @@ class UdevTest(unittest.TestCase):
@mock.patch('blivet.udev.device_is_dm_crypt', return_value=False)
@mock.patch('blivet.udev.device_is_md')
@mock.patch('blivet.udev.device_get_md_container')
- @mock.patch('blivet.udev.device_get_parents')
+ @mock.patch('blivet.udev.device_get_slaves')
def test_udev_device_is_disk_md(self, *args):
import blivet.udev
info = dict(DEVTYPE='disk', SYS_PATH=mock.sentinel.md_path)
- (device_get_parents, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
+ (device_get_slaves, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
disk_parents = [dict(DEVTYPE="disk", SYS_PATH='/fake/path/2'),
dict(DEVTYPE="disk", SYS_PATH='/fake/path/3')]
@@ -68,20 +68,20 @@ class UdevTest(unittest.TestCase):
# Intel FW RAID (MD RAID w/ container layer)
# device_get_container will return some mock value which will evaluate to True
device_get_md_container.return_value = mock.sentinel.md_container
- device_get_parents.side_effect = lambda info: list()
+ device_get_slaves.side_effect = lambda info: list()
self.assertTrue(blivet.udev.device_is_disk(info))
# Normal MD RAID
- device_get_parents.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
device_get_md_container.return_value = None
self.assertFalse(blivet.udev.device_is_disk(info))
# Dell FW RAID (MD RAID whose members are all whole disks)
- device_get_parents.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertTrue(blivet.udev.device_is_disk(info))
# Normal MD RAID (w/ at least one non-disk member)
- device_get_parents.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertFalse(blivet.udev.device_is_disk(info))
diff --git a/tests/vmtests/vmbackedtestcase.py b/tests/vmtests/vmbackedtestcase.py
index 797bac85..6255104f 100644
--- a/tests/vmtests/vmbackedtestcase.py
+++ b/tests/vmtests/vmbackedtestcase.py
@@ -50,7 +50,7 @@ class VMBackedTestCase(unittest.TestCase):
defined in set_up_disks.
"""
- udev.ignored_device_names = [r'^zram']
+ udev.device_name_blacklist = [r'^zram']
#
# create disk images
--
2.38.1

View File

@ -1,4 +1,4 @@
From 62af1d7f96b8ed8eb8f2732787576161ae5da79f Mon Sep 17 00:00:00 2001
From 789dd296988aa9da17d97ece1efc33f9e232648e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 13 Oct 2022 10:47:52 +0200
Subject: [PATCH] Revert "Remove the Blivet.roots attribute"
@ -73,5 +73,5 @@ index 8105bfc7..6f460f6d 100644
class PartitionFactory(DeviceFactory):
--
2.38.1
2.37.3

View File

@ -1,4 +1,4 @@
From 1561bfe8820118178bbb07021adc1cacd875c4c7 Mon Sep 17 00:00:00 2001
From 7931a74e691979dd23a16e7a017b4ef5bc296b79 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Oct 2022 12:28:37 +0200
Subject: [PATCH] Fix potential AttributeError when getting stratis blockdev
@ -41,5 +41,5 @@ index bd1c5a18..42f230ee 100644
def _get_locked_pools_info(self):
locked_pools = []
--
2.38.1
2.37.3

View File

@ -1,9 +1,9 @@
From b747c4ed07937f54a546ffb2f2c8c95e0797dd6c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 Oct 2022 15:19:29 +0200
Subject: [PATCH] tests: Skip XFS resize test on CentOS/RHEL 8
Subject: [PATCH] tests: Skip XFS resize test on CentOS/RHEL 9
Partitions on loop devices are broken on CentOS/RHEL 8.
Partitions on loop devices are broken on CentOS/RHEL 9.
---
tests/skip.yml | 6 ++++++
1 file changed, 6 insertions(+)
@ -20,8 +20,8 @@ index 568c3fff..66b34493 100644
+- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize
+ skip_on:
+ - distro: ["centos", "enterprise_linux"]
+ version: "8"
+ reason: "Creating partitions on loop devices is broken on CentOS/RHEL 8 latest kernel"
--
+ version: "9"
+ reason: "Creating partitions on loop devices is broken on CentOS/RHEL 9 latest kernel"
--
2.37.3

View File

@ -0,0 +1,590 @@
From 9383855c8a15e6d7c4033cd8d7ae8310b462d166 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Oct 2022 10:38:00 +0200
Subject: [PATCH 1/3] Add a basic support for NVMe and NVMe Fabrics devices
This adds two new device types: NVMeNamespaceDevice and
NVMeFabricsNamespaceDevice mostly to allow to differentiate
between "local" and "remote" NVMe devices. The new libblockdev
NVMe plugin is required for full functionality.
---
blivet/__init__.py | 6 +-
blivet/devices/__init__.py | 2 +-
blivet/devices/disk.py | 101 ++++++++++++++++++++++
blivet/devices/lib.py | 1 +
blivet/populator/helpers/__init__.py | 2 +-
blivet/populator/helpers/disk.py | 64 ++++++++++++++
blivet/udev.py | 33 +++++++
blivet/util.py | 9 ++
tests/unit_tests/populator_test.py | 124 +++++++++++++++++++++++++++
9 files changed, 339 insertions(+), 3 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index bbc7ea3a..3b9e659e 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -67,6 +67,10 @@ if arch.is_s390():
else:
_REQUESTED_PLUGIN_NAMES = set(("swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
+# nvme plugin is not generally available
+if hasattr(blockdev.Plugin, "NVME"):
+ _REQUESTED_PLUGIN_NAMES.add("nvme")
+
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
# XXX force non-dbus LVM plugin
lvm_plugin = blockdev.PluginSpec()
@@ -74,7 +78,7 @@ lvm_plugin.name = blockdev.Plugin.LVM
lvm_plugin.so_name = "libbd_lvm.so.2"
_requested_plugins.append(lvm_plugin)
try:
- # do not check for dependencies during libblockdev initializtion, do runtime
+ # do not check for dependencies during libblockdev initialization, do runtime
# checks instead
blockdev.switch_init_checks(False)
succ_, avail_plugs = blockdev.try_reinit(require_plugins=_requested_plugins, reload=False, log_func=log_bd_message)
diff --git a/blivet/devices/__init__.py b/blivet/devices/__init__.py
index 8bb0a979..4d16466e 100644
--- a/blivet/devices/__init__.py
+++ b/blivet/devices/__init__.py
@@ -22,7 +22,7 @@
from .lib import device_path_to_name, device_name_to_disk_by_path, ParentList
from .device import Device
from .storage import StorageDevice
-from .disk import DiskDevice, DiskFile, DMRaidArrayDevice, MultipathDevice, iScsiDiskDevice, FcoeDiskDevice, DASDDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice
+from .disk import DiskDevice, DiskFile, DMRaidArrayDevice, MultipathDevice, iScsiDiskDevice, FcoeDiskDevice, DASDDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice, NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from .partition import PartitionDevice
from .dm import DMDevice, DMLinearDevice, DMCryptDevice, DMIntegrityDevice, DM_MAJORS
from .luks import LUKSDevice, IntegrityDevice
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index bc4a1b5e..b5e25939 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -22,10 +22,13 @@
import gi
gi.require_version("BlockDev", "2.0")
+gi.require_version("GLib", "2.0")
from gi.repository import BlockDev as blockdev
+from gi.repository import GLib
import os
+from collections import namedtuple
from .. import errors
from .. import util
@@ -725,3 +728,101 @@ class NVDIMMNamespaceDevice(DiskDevice):
@property
def sector_size(self):
return self._sector_size
+
+
+NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn"])
+
+
+class NVMeNamespaceDevice(DiskDevice):
+
+ """ NVMe namespace """
+ _type = "nvme"
+ _packages = ["nvme-cli"]
+
+ def __init__(self, device, **kwargs):
+ """
+ :param name: the device name (generally a device node's basename)
+ :type name: str
+ :keyword exists: does this device exist?
+ :type exists: bool
+ :keyword size: the device's size
+ :type size: :class:`~.size.Size`
+ :keyword parents: a list of parent devices
+ :type parents: list of :class:`StorageDevice`
+ :keyword format: this device's formatting
+ :type format: :class:`~.formats.DeviceFormat` or a subclass of it
+ :keyword nsid: namespace ID
+ :type nsid: int
+ """
+ self.nsid = kwargs.pop("nsid", 0)
+
+ DiskDevice.__init__(self, device, **kwargs)
+
+ self._clear_local_tags()
+ self.tags.add(Tags.local)
+ self.tags.add(Tags.nvme)
+
+ self._controllers = None
+
+ @property
+ def controllers(self):
+ if self._controllers is not None:
+ return self._controllers
+
+ self._controllers = []
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ log.debug("Failed to get controllers for %s: libblockdev NVME plugin is not available", self.name)
+ return self._controllers
+
+ try:
+ controllers = blockdev.nvme_find_ctrls_for_ns(self.sysfs_path)
+ except GLib.GError as err:
+ log.debug("Failed to get controllers for %s: %s", self.name, str(err))
+ return self._controllers
+
+ for controller in controllers:
+ try:
+ cpath = util.get_path_by_sysfs_path(controller, "char")
+ except RuntimeError as err:
+ log.debug("Failed to find controller %s: %s", controller, str(err))
+ continue
+ try:
+ cinfo = blockdev.nvme_get_controller_info(cpath)
+ except GLib.GError as err:
+ log.debug("Failed to get controller info for %s: %s", cpath, str(err))
+ continue
+ self._controllers.append(NVMeController(name=os.path.basename(cpath),
+ serial=cinfo.serial_number,
+ nvme_ver=cinfo.nvme_ver,
+ id=cinfo.ctrl_id,
+ subsysnqn=cinfo.subsysnqn))
+
+ return self._controllers
+
+
+class NVMeFabricsNamespaceDevice(NVMeNamespaceDevice, NetworkStorageDevice):
+
+ """ NVMe fabrics namespace """
+ _type = "nvme-fabrics"
+ _packages = ["nvme-cli"]
+
+ def __init__(self, device, **kwargs):
+ """
+ :param name: the device name (generally a device node's basename)
+ :type name: str
+ :keyword exists: does this device exist?
+ :type exists: bool
+ :keyword size: the device's size
+ :type size: :class:`~.size.Size`
+ :keyword parents: a list of parent devices
+ :type parents: list of :class:`StorageDevice`
+ :keyword format: this device's formatting
+ :type format: :class:`~.formats.DeviceFormat` or a subclass of it
+ """
+ NVMeNamespaceDevice.__init__(self, device, **kwargs)
+ NetworkStorageDevice.__init__(self)
+
+ self._clear_local_tags()
+ self.tags.add(Tags.remote)
+ self.tags.add(Tags.nvme)
diff --git a/blivet/devices/lib.py b/blivet/devices/lib.py
index 1bda0bab..b3c4c5b0 100644
--- a/blivet/devices/lib.py
+++ b/blivet/devices/lib.py
@@ -32,6 +32,7 @@ class Tags(str, Enum):
"""Tags that describe various classes of disk."""
local = 'local'
nvdimm = 'nvdimm'
+ nvme = 'nvme'
remote = 'remote'
removable = 'removable'
ssd = 'ssd'
diff --git a/blivet/populator/helpers/__init__.py b/blivet/populator/helpers/__init__.py
index c5ac412f..50ab4de8 100644
--- a/blivet/populator/helpers/__init__.py
+++ b/blivet/populator/helpers/__init__.py
@@ -6,7 +6,7 @@ from .formatpopulator import FormatPopulator
from .btrfs import BTRFSFormatPopulator
from .boot import AppleBootFormatPopulator, EFIFormatPopulator, MacEFIFormatPopulator
-from .disk import DiskDevicePopulator, iScsiDevicePopulator, FCoEDevicePopulator, MDBiosRaidDevicePopulator, DASDDevicePopulator, ZFCPDevicePopulator, NVDIMMNamespaceDevicePopulator
+from .disk import DiskDevicePopulator, iScsiDevicePopulator, FCoEDevicePopulator, MDBiosRaidDevicePopulator, DASDDevicePopulator, ZFCPDevicePopulator, NVDIMMNamespaceDevicePopulator, NVMeNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator
from .disklabel import DiskLabelFormatPopulator
from .dm import DMDevicePopulator
from .dmraid import DMRaidFormatPopulator
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 9db7b810..9ed1eebe 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -22,13 +22,16 @@
import gi
gi.require_version("BlockDev", "2.0")
+gi.require_version("GLib", "2.0")
from gi.repository import BlockDev as blockdev
+from gi.repository import GLib
from ... import udev
from ... import util
from ...devices import DASDDevice, DiskDevice, FcoeDiskDevice, iScsiDiskDevice
from ...devices import MDBiosRaidArrayDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice
+from ...devices import NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from ...devices import device_path_to_name
from ...storage_log import log_method_call
from .devicepopulator import DevicePopulator
@@ -251,3 +254,64 @@ class NVDIMMNamespaceDevicePopulator(DiskDevicePopulator):
log.info("%s is an NVDIMM namespace device", udev.device_get_name(self.data))
return kwargs
+
+
+class NVMeNamespaceDevicePopulator(DiskDevicePopulator):
+ priority = 20
+
+ _device_class = NVMeNamespaceDevice
+
+ @classmethod
+ def match(cls, data):
+ return (super(NVMeNamespaceDevicePopulator, NVMeNamespaceDevicePopulator).match(data) and
+ udev.device_is_nvme_namespace(data) and not udev.device_is_nvme_fabrics(data))
+
+ def _get_kwargs(self):
+ kwargs = super(NVMeNamespaceDevicePopulator, self)._get_kwargs()
+
+ log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
+
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ return kwargs
+
+ path = udev.device_get_devname(self.data)
+ try:
+ ninfo = blockdev.nvme_get_namespace_info(path)
+ except GLib.GError as err:
+ log.debug("Failed to get namespace info for %s: %s", path, str(err))
+ else:
+ kwargs["nsid"] = ninfo.nsid
+
+ log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
+ return kwargs
+
+
+class NVMeFabricsNamespaceDevicePopulator(DiskDevicePopulator):
+ priority = 20
+
+ _device_class = NVMeFabricsNamespaceDevice
+
+ @classmethod
+ def match(cls, data):
+ return (super(NVMeFabricsNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator).match(data) and
+ udev.device_is_nvme_namespace(data) and udev.device_is_nvme_fabrics(data))
+
+ def _get_kwargs(self):
+ kwargs = super(NVMeFabricsNamespaceDevicePopulator, self)._get_kwargs()
+
+ log.info("%s is an NVMe fabrics namespace device", udev.device_get_name(self.data))
+
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ return kwargs
+
+ path = udev.device_get_devname(self.data)
+ try:
+ ninfo = blockdev.nvme_get_namespace_info(path)
+ except GLib.GError as err:
+ log.debug("Failed to get namespace info for %s: %s", path, str(err))
+ else:
+ kwargs["nsid"] = ninfo.nsid
+
+ return kwargs
diff --git a/blivet/udev.py b/blivet/udev.py
index efbc53d6..533a1edc 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -1023,6 +1023,39 @@ def device_is_nvdimm_namespace(info):
return ninfo is not None
+def device_is_nvme_namespace(info):
+ if info.get("DEVTYPE") != "disk":
+ return False
+
+ if not info.get("SYS_PATH"):
+ return False
+
+ device = pyudev.Devices.from_sys_path(global_udev, info.get("SYS_PATH"))
+ while device:
+ if device.subsystem and device.subsystem.startswith("nvme"):
+ return True
+ device = device.parent
+
+ return False
+
+
+def device_is_nvme_fabrics(info):
+ if not device_is_nvme_namespace(info):
+ return False
+
+ if not hasattr(blockdev.Plugin, "NVME") or not blockdev.is_plugin_available(blockdev.Plugin.NVME): # pylint: disable=no-member
+ # nvme plugin is not available -- even if this is an nvme fabrics device we
+ # don't have tools to work with it, so we should pretend it's just a normal nvme
+ return False
+
+ controllers = blockdev.nvme_find_ctrls_for_ns(info.get("SYS_PATH", ""))
+ if not controllers:
+ return False
+
+ transport = util.get_sysfs_attr(controllers[0], "transport")
+ return transport in ("rdma", "fc", "tcp", "loop")
+
+
def device_is_hidden(info):
sysfs_path = device_get_sysfs_path(info)
hidden = util.get_sysfs_attr(sysfs_path, "hidden")
diff --git a/blivet/util.py b/blivet/util.py
index 0e578aea..3040ee5a 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -432,6 +432,15 @@ def get_sysfs_path_by_name(dev_node, class_name="block"):
"for '%s' (it is not at '%s')" % (dev_node, dev_path))
+def get_path_by_sysfs_path(sysfs_path, dev_type="block"):
+ """ Return device path for a given device sysfs path. """
+
+ dev = get_sysfs_attr(sysfs_path, "dev")
+ if not dev or not os.path.exists("/dev/%s/%s" % (dev_type, dev)):
+ raise RuntimeError("get_path_by_sysfs_path: Could not find device for %s" % sysfs_path)
+ return os.path.realpath("/dev/%s/%s" % (dev_type, dev))
+
+
def get_cow_sysfs_path(dev_path, dev_sysfsPath):
""" Return sysfs path of cow device for a given device.
"""
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
index 369fe878..1ee29b57 100644
--- a/tests/unit_tests/populator_test.py
+++ b/tests/unit_tests/populator_test.py
@@ -13,6 +13,7 @@ from gi.repository import BlockDev as blockdev
from blivet.devices import DiskDevice, DMDevice, FileDevice, LoopDevice
from blivet.devices import MDRaidArrayDevice, MultipathDevice, OpticalDevice
from blivet.devices import PartitionDevice, StorageDevice, NVDIMMNamespaceDevice
+from blivet.devices import NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from blivet.devicelibs import lvm
from blivet.devicetree import DeviceTree
from blivet.formats import get_device_format_class, get_format, DeviceFormat
@@ -21,6 +22,7 @@ from blivet.populator.helpers import DiskDevicePopulator, DMDevicePopulator, Loo
from blivet.populator.helpers import LVMDevicePopulator, MDDevicePopulator, MultipathDevicePopulator
from blivet.populator.helpers import OpticalDevicePopulator, PartitionDevicePopulator
from blivet.populator.helpers import LVMFormatPopulator, MDFormatPopulator, NVDIMMNamespaceDevicePopulator
+from blivet.populator.helpers import NVMeNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator
from blivet.populator.helpers import get_format_helper, get_device_helper
from blivet.populator.helpers.boot import AppleBootFormatPopulator, EFIFormatPopulator, MacEFIFormatPopulator
from blivet.populator.helpers.formatpopulator import FormatPopulator
@@ -591,6 +593,128 @@ class NVDIMMNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
self.assertTrue(device in devicetree.devices)
+class NVMeNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
+ helper_class = NVMeNamespaceDevicePopulator
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=False)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ def test_match(self, *args):
+ """Test matching of NVMe namespace device populator."""
+ device_is_nvme_namespace = args[0]
+ self.assertTrue(self.helper_class.match(None))
+ device_is_nvme_namespace.return_value = False
+ self.assertFalse(self.helper_class.match(None))
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=False)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ def test_get_helper(self, *args):
+ """Test get_device_helper for NVMe namespaces."""
+ device_is_nvme_namespace = args[0]
+ data = {}
+ self.assertEqual(get_device_helper(data), self.helper_class)
+
+ # verify that setting one of the required True return values to False prevents success
+ device_is_nvme_namespace.return_value = False
+ self.assertNotEqual(get_device_helper(data), self.helper_class)
+ device_is_nvme_namespace.return_value = True
+
+ @patch("blivet.udev.device_get_name")
+ def test_run(self, *args):
+ """Test disk device populator."""
+ device_get_name = args[0]
+
+ devicetree = DeviceTree()
+
+ # set up some fake udev data to verify handling of specific entries
+ data = {'SYS_PATH': 'dummy', 'DEVNAME': 'dummy', 'ID_PATH': 'dummy'}
+
+ device_name = "nop"
+ device_get_name.return_value = device_name
+ helper = self.helper_class(devicetree, data)
+
+ device = helper.run()
+
+ self.assertIsInstance(device, NVMeNamespaceDevice)
+ self.assertTrue(device.exists)
+ self.assertTrue(device.is_disk)
+ self.assertTrue(device in devicetree.devices)
+
+
+class NVMeFabricsNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
+ helper_class = NVMeFabricsNamespaceDevicePopulator
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=True)
+ def test_match(self, *args):
+ """Test matching of NVMe namespace device populator."""
+ device_is_nvme_fabrics = args[0]
+ self.assertTrue(self.helper_class.match(None))
+ device_is_nvme_fabrics.return_value = False
+ self.assertFalse(self.helper_class.match(None))
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=True)
+ def test_get_helper(self, *args):
+ """Test get_device_helper for NVMe namespaces."""
+ device_is_nvme_fabrics = args[0]
+ data = {}
+ self.assertEqual(get_device_helper(data), self.helper_class)
+
+ # verify that setting one of the required True return values to False prevents success
+ device_is_nvme_fabrics.return_value = False
+ self.assertNotEqual(get_device_helper(data), self.helper_class)
+ device_is_nvme_fabrics.return_value = True
+
+ @patch("blivet.udev.device_get_name")
+ def test_run(self, *args):
+ """Test disk device populator."""
+ device_get_name = args[0]
+
+ devicetree = DeviceTree()
+
+ # set up some fake udev data to verify handling of specific entries
+ data = {'SYS_PATH': 'dummy', 'DEVNAME': 'dummy', 'ID_PATH': 'dummy'}
+
+ device_name = "nop"
+ device_get_name.return_value = device_name
+ helper = self.helper_class(devicetree, data)
+
+ device = helper.run()
+
+ self.assertIsInstance(device, NVMeFabricsNamespaceDevice)
+ self.assertTrue(device.exists)
+ self.assertTrue(device.is_disk)
+ self.assertTrue(device in devicetree.devices)
+
+
class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
helper_class = MDDevicePopulator
--
2.38.1
From af6ad7ff2f08180672690910d453158bcd463936 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 2 Dec 2022 12:20:47 +0100
Subject: [PATCH 2/3] Add transport and address to NVMeController info
---
blivet/devices/disk.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index b5e25939..796b5b03 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -730,7 +730,8 @@ class NVDIMMNamespaceDevice(DiskDevice):
return self._sector_size
-NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn"])
+NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn",
+ "transport", "transport_address"])
class NVMeNamespaceDevice(DiskDevice):
@@ -792,11 +793,15 @@ class NVMeNamespaceDevice(DiskDevice):
except GLib.GError as err:
log.debug("Failed to get controller info for %s: %s", cpath, str(err))
continue
+ ctrans = util.get_sysfs_attr(controller, "transport")
+ ctaddr = util.get_sysfs_attr(controller, "address")
self._controllers.append(NVMeController(name=os.path.basename(cpath),
serial=cinfo.serial_number,
nvme_ver=cinfo.nvme_ver,
id=cinfo.ctrl_id,
- subsysnqn=cinfo.subsysnqn))
+ subsysnqn=cinfo.subsysnqn,
+ transport=ctrans,
+ transport_address=ctaddr))
return self._controllers
--
2.38.1
From a04538936ff62958c272b5e2b2657d177df1ef13 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 8 Dec 2022 13:15:33 +0100
Subject: [PATCH 3/3] Add additional identifiers to NVMeNamespaceDevice
---
blivet/devices/disk.py | 2 ++
blivet/populator/helpers/disk.py | 3 +++
2 files changed, 5 insertions(+)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 796b5b03..8842b4dc 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -756,6 +756,8 @@ class NVMeNamespaceDevice(DiskDevice):
:type nsid: int
"""
self.nsid = kwargs.pop("nsid", 0)
+ self.eui64 = kwargs.pop("eui64", "")
+ self.nguid = kwargs.pop("nguid", "")
DiskDevice.__init__(self, device, **kwargs)
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 9ed1eebe..cf20d302 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -282,6 +282,9 @@ class NVMeNamespaceDevicePopulator(DiskDevicePopulator):
log.debug("Failed to get namespace info for %s: %s", path, str(err))
else:
kwargs["nsid"] = ninfo.nsid
+ kwargs["uuid"] = ninfo.uuid
+ kwargs["eui64"] = ninfo.eui64
+ kwargs["nguid"] = ninfo.nguid
log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
return kwargs
--
2.38.1

View File

@ -0,0 +1,205 @@
From 7a86d4306e3022b73035e21f66d515174264700e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 9 Mar 2023 13:18:42 +0100
Subject: [PATCH 1/2] Add support for specifying stripe size for RAID LVs
---
blivet/devices/lvm.py | 28 +++++++++++++++++---
tests/storage_tests/devices_test/lvm_test.py | 12 +++++++--
tests/unit_tests/devices_test/lvm_test.py | 27 +++++++++++++++++++
3 files changed, 61 insertions(+), 6 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index b8595d63..41358e9b 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -659,7 +659,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None,
- percent=None, cache_request=None, pvs=None, from_lvs=None):
+ percent=None, cache_request=None, pvs=None, from_lvs=None,
+ stripe_size=0):
if not exists:
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
@@ -756,6 +757,15 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
if self._pv_specs:
self._assign_pv_space()
+ self._stripe_size = stripe_size
+ if not self.exists and self._stripe_size:
+ if self.seg_type not in lvm.raid_seg_types:
+ raise errors.DeviceError("Stripe size can be specified only for RAID volumes")
+ if self.seg_type in ("raid1", "RAID1", "1", 1, "mirror"):
+ raise errors.DeviceError("Specifying stripe size is not allowed for RAID1 or mirror")
+ if self.cache:
+ raise errors.DeviceError("Creating cached LVs with custom stripe size is not supported")
+
def _assign_pv_space(self):
if not self.is_raid_lv:
# nothing to do for non-RAID (and thus non-striped) LVs here
@@ -2295,7 +2305,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None, cache_mode=None, attach_to=None):
+ write_policy=None, cache_mode=None, attach_to=None, stripe_size=0):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2375,6 +2385,11 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
be attached to when created
:type attach_to: :class:`LVMLogicalVolumeDevice`
+ For RAID LVs only:
+
+ :keyword stripe_size: size of the RAID stripe
+ :type stripe_size: :class:`~.size.Size`
+
"""
if isinstance(parents, (list, ParentList)):
@@ -2395,7 +2410,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
- percent, cache_request, pvs, from_lvs)
+ percent, cache_request, pvs, from_lvs,
+ stripe_size)
LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory,
write_policy)
LVMVDOLogicalVolumeMixin.__init__(self)
@@ -2651,8 +2667,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
pvs = [spec.pv.path for spec in self._pv_specs]
pvs = pvs or None
+ extra = dict()
+ if self._stripe_size:
+ extra["stripesize"] = str(int(self._stripe_size.convert_to("KiB")))
+
blockdev.lvm.lvcreate(self.vg.name, self._name, self.size,
- type=self.seg_type, pv_list=pvs)
+ type=self.seg_type, pv_list=pvs, **extra)
else:
fast_pvs = [pv.path for pv in self.cache.fast_pvs]
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index a055fc27..97ef1c4b 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -1,4 +1,5 @@
import os
+import subprocess
from ..storagetestcase import StorageTestCase
@@ -127,7 +128,7 @@ class LVMTestCase(StorageTestCase):
self.assertTrue(snap.is_snapshot_lv)
self.assertEqual(snap.origin, thinlv)
- def _test_lvm_raid(self, seg_type, raid_level):
+ def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0):
disk1 = self.storage.devicetree.get_device_by_path(self.vdevs[0])
self.assertIsNotNone(disk1)
self.storage.initialize_disk(disk1)
@@ -151,7 +152,7 @@ class LVMTestCase(StorageTestCase):
raidlv = self.storage.new_lv(fmt_type="ext4", size=blivet.size.Size("50 MiB"),
parents=[vg], name="blivetTestRAIDLV",
- seg_type=seg_type, pvs=[pv1, pv2])
+ seg_type=seg_type, pvs=[pv1, pv2], stripe_size=stripe_size)
self.storage.create_device(raidlv)
self.storage.do_it()
@@ -163,9 +164,16 @@ class LVMTestCase(StorageTestCase):
self.assertEqual(raidlv.raid_level, raid_level)
self.assertEqual(raidlv.seg_type, seg_type)
+ if stripe_size:
+ out = subprocess.check_output(["lvs", "-o", "stripe_size", "--noheadings", "--nosuffix", "--units=b", raidlv.vg.name + "/" + raidlv.lvname])
+ self.assertEqual(out.decode().strip(), str(int(stripe_size.convert_to())))
+
def test_lvm_raid_raid0(self):
self._test_lvm_raid("raid0", blivet.devicelibs.raid.RAID0)
+ def test_lvm_raid_raid0_stripe_size(self):
+ self._test_lvm_raid("raid0", blivet.devicelibs.raid.RAID0, stripe_size=blivet.size.Size("1 MiB"))
+
def test_lvm_raid_striped(self):
self._test_lvm_raid("striped", blivet.devicelibs.raid.Striped)
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
index 995c2da4..d7b55224 100644
--- a/tests/unit_tests/devices_test/lvm_test.py
+++ b/tests/unit_tests/devices_test/lvm_test.py
@@ -363,6 +363,33 @@ class LVMDeviceTest(unittest.TestCase):
self.assertEqual(pv.format.free, Size("264 MiB"))
self.assertEqual(pv2.format.free, Size("256 MiB"))
+ def test_lvm_logical_volume_raid_stripe_size(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1025 MiB"))
+ pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("513 MiB"))
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
+
+ with self.assertRaises(blivet.errors.DeviceError):
+ # non-raid LV
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
+ fmt=blivet.formats.get_format("xfs"),
+ exists=False, stripe_size=Size("1 MiB"))
+
+ with self.assertRaises(blivet.errors.DeviceError):
+ # raid1 LV
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
+ fmt=blivet.formats.get_format("xfs"),
+ exists=False, seg_type="raid1", pvs=[pv, pv2],
+ stripe_size=Size("1 MiB"))
+
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
+ fmt=blivet.formats.get_format("xfs"),
+ exists=False, seg_type="raid0", pvs=[pv, pv2],
+ stripe_size=Size("1 MiB"))
+
+ self.assertEqual(lv._stripe_size, Size("1 MiB"))
+
def test_target_size(self):
pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
size=Size("1 GiB"))
--
2.40.1
From bbfd1a70abe8271f5fe3d29fe2be3bb8a1c6ecbc Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 3 May 2023 08:55:31 +0200
Subject: [PATCH 2/2] Revert "tests: Skip test_lvcreate_type on CentOS/RHEL 9"
This reverts commit 16b90071145d2d0f19a38f3003561a0cc9d6e281.
The kernel issue was resolved, we no longer need to skip the test.
---
tests/skip.yml | 6 ------
1 file changed, 6 deletions(-)
diff --git a/tests/skip.yml b/tests/skip.yml
index 66b34493..c0ca0eaf 100644
--- a/tests/skip.yml
+++ b/tests/skip.yml
@@ -24,12 +24,6 @@
---
-- test: storage_tests.devices_test.lvm_test.LVMTestCase.test_lvm_raid
- skip_on:
- - distro: "centos"
- version: "9"
- reason: "Creating RAID 1 LV on CentOS/RHEL 9 causes a system deadlock"
-
- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize
skip_on:
- distro: ["centos", "enterprise_linux"]
--
2.40.1

View File

@ -0,0 +1,68 @@
From 1af0d3c37a93e431790e641a329a7f34dabf291a Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 2 Mar 2023 12:34:42 +0100
Subject: [PATCH] Fix setting kickstart data
When changing our code to PEP8 compliant we also changed some
pykickstart properties like onPart by accident. This PR fixes this.
Resolves: rhbz#2174296
---
blivet/devices/btrfs.py | 4 ++--
blivet/devices/lvm.py | 2 +-
blivet/devices/partition.py | 6 +++---
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/blivet/devices/btrfs.py b/blivet/devices/btrfs.py
index 1ae6a04d..3f56624e 100644
--- a/blivet/devices/btrfs.py
+++ b/blivet/devices/btrfs.py
@@ -498,8 +498,8 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
def populate_ksdata(self, data):
super(BTRFSVolumeDevice, self).populate_ksdata(data)
- data.data_level = self.data_level.name if self.data_level else None
- data.metadata_level = self.metadata_level.name if self.metadata_level else None
+ data.dataLevel = self.data_level.name if self.data_level else None
+ data.metaDataLevel = self.metadata_level.name if self.metadata_level else None
data.devices = ["btrfs.%d" % p.id for p in self.parents]
data.preexist = self.exists
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 41358e9b..c3132457 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1161,7 +1161,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
if self.req_grow:
# base size could be literal or percentage
- data.max_size_mb = self.req_max_size.convert_to(MiB)
+ data.maxSizeMB = self.req_max_size.convert_to(MiB)
elif data.resize:
data.size = self.target_size.convert_to(MiB)
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 89d907c2..0e9250ce 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -982,14 +982,14 @@ class PartitionDevice(StorageDevice):
data.size = self.req_base_size.round_to_nearest(MiB, rounding=ROUND_DOWN).convert_to(spec=MiB)
data.grow = self.req_grow
if self.req_grow:
- data.max_size_mb = self.req_max_size.convert_to(MiB)
+ data.maxSizeMB = self.req_max_size.convert_to(MiB)
# data.disk = self.disk.name # by-id
if self.req_disks and len(self.req_disks) == 1:
data.disk = self.disk.name
- data.prim_only = self.req_primary
+ data.primOnly = self.req_primary
else:
- data.on_part = self.name # by-id
+ data.onPart = self.name # by-id
if data.resize:
# on s390x in particular, fractional sizes are reported, which
--
2.40.1

View File

@ -0,0 +1,133 @@
From c2b06150df0b876c7d442097b6c9ca90c9ca2ecc Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 4 May 2023 11:35:44 +0200
Subject: [PATCH] Do not set memory limit for LUKS2 when running in FIPS mode
With FIPS enabled LUKS uses pbkdf and not argon so the memory
limit is not a valid parameter.
Resolves: rhbz#2193096
---
blivet/devicelibs/crypto.py | 11 +++++++
blivet/formats/luks.py | 12 ++++----
tests/unit_tests/formats_tests/luks_test.py | 30 +++++++++++++++++++
.../unit_tests/formats_tests/methods_test.py | 3 +-
4 files changed, 50 insertions(+), 6 deletions(-)
diff --git a/blivet/devicelibs/crypto.py b/blivet/devicelibs/crypto.py
index f0caf0f7..68e68db1 100644
--- a/blivet/devicelibs/crypto.py
+++ b/blivet/devicelibs/crypto.py
@@ -21,6 +21,7 @@
#
import hashlib
+import os
import gi
gi.require_version("BlockDev", "2.0")
@@ -100,3 +101,13 @@ def calculate_integrity_metadata_size(device_size, algorithm=DEFAULT_INTEGRITY_A
jsize = (jsize / SECTOR_SIZE + 1) * SECTOR_SIZE # round up to sector
return msize + jsize
+
+
+def is_fips_enabled():
+ if not os.path.exists("/proc/sys/crypto/fips_enabled"):
+ # if the file doesn't exist, we are definitely not in FIPS mode
+ return False
+
+ with open("/proc/sys/crypto/fips_enabled", "r") as f:
+ enabled = f.read()
+ return enabled.strip() == "1"
diff --git a/blivet/formats/luks.py b/blivet/formats/luks.py
index 2637e0c5..adf3c711 100644
--- a/blivet/formats/luks.py
+++ b/blivet/formats/luks.py
@@ -303,11 +303,13 @@ class LUKS(DeviceFormat):
if luks_data.pbkdf_args:
self.pbkdf_args = luks_data.pbkdf_args
else:
- mem_limit = crypto.calculate_luks2_max_memory()
- if mem_limit:
- self.pbkdf_args = LUKS2PBKDFArgs(max_memory_kb=int(mem_limit.convert_to(KiB)))
- luks_data.pbkdf_args = self.pbkdf_args
- log.info("PBKDF arguments for LUKS2 not specified, using defaults with memory limit %s", mem_limit)
+ # argon is not used with FIPS so we don't need to adjust the memory when in FIPS mode
+ if not crypto.is_fips_enabled():
+ mem_limit = crypto.calculate_luks2_max_memory()
+ if mem_limit:
+ self.pbkdf_args = LUKS2PBKDFArgs(max_memory_kb=int(mem_limit.convert_to(KiB)))
+ luks_data.pbkdf_args = self.pbkdf_args
+ log.info("PBKDF arguments for LUKS2 not specified, using defaults with memory limit %s", mem_limit)
if self.pbkdf_args:
pbkdf = blockdev.CryptoLUKSPBKDF(type=self.pbkdf_args.type,
diff --git a/tests/unit_tests/formats_tests/luks_test.py b/tests/unit_tests/formats_tests/luks_test.py
index ec7b7592..1127e968 100644
--- a/tests/unit_tests/formats_tests/luks_test.py
+++ b/tests/unit_tests/formats_tests/luks_test.py
@@ -6,9 +6,14 @@ except ImportError:
import unittest
from blivet.formats.luks import LUKS
+from blivet.size import Size
+from blivet.static_data import luks_data
class LUKSNodevTestCase(unittest.TestCase):
+ def setUp(self):
+ luks_data.pbkdf_args = None
+
def test_create_discard_option(self):
# flags.discard_new=False --> no discard
fmt = LUKS(exists=False)
@@ -51,6 +56,31 @@ class LUKSNodevTestCase(unittest.TestCase):
fmt = LUKS(cipher="aes-cbc-plain64")
self.assertEqual(fmt.key_size, 0)
+ def test_luks2_pbkdf_memory_fips(self):
+ fmt = LUKS()
+ with patch("blivet.formats.luks.blockdev.crypto") as bd:
+ # fips enabled, pbkdf memory should not be set
+ with patch("blivet.formats.luks.crypto") as crypto:
+ attrs = {"is_fips_enabled.return_value": True,
+ "get_optimal_luks_sector_size.return_value": 0,
+ "calculate_luks2_max_memory.return_value": Size("256 MiB")}
+ crypto.configure_mock(**attrs)
+
+ fmt._create()
+ crypto.calculate_luks2_max_memory.assert_not_called()
+ self.assertEqual(bd.luks_format.call_args[1]["extra"].pbkdf.max_memory_kb, 0)
+
+ # fips disabled, pbkdf memory should be set
+ with patch("blivet.formats.luks.crypto") as crypto:
+ attrs = {"is_fips_enabled.return_value": False,
+ "get_optimal_luks_sector_size.return_value": 0,
+ "calculate_luks2_max_memory.return_value": Size("256 MiB")}
+ crypto.configure_mock(**attrs)
+
+ fmt._create()
+ crypto.calculate_luks2_max_memory.assert_called()
+ self.assertEqual(bd.luks_format.call_args[1]["extra"].pbkdf.max_memory_kb, 256 * 1024)
+
def test_sector_size(self):
fmt = LUKS()
self.assertEqual(fmt.luks_sector_size, 512)
diff --git a/tests/unit_tests/formats_tests/methods_test.py b/tests/unit_tests/formats_tests/methods_test.py
index 2743b7db..5d30c260 100644
--- a/tests/unit_tests/formats_tests/methods_test.py
+++ b/tests/unit_tests/formats_tests/methods_test.py
@@ -366,7 +366,8 @@ class LUKSMethodsTestCase(FormatMethodsTestCase):
def _test_create_backend(self):
self.format.exists = False
- self.format.create()
+ with patch("blivet.devicelibs.crypto.is_fips_enabled", return_value=False):
+ self.format.create()
self.assertTrue(self.patches["blockdev"].crypto.luks_format.called) # pylint: disable=no-member
def _test_setup_backend(self):
--
2.40.1

View File

@ -0,0 +1,265 @@
From eb16230427fc1081f8515e6ad69ccf99ca521e5d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 4 Apr 2023 13:31:40 +0200
Subject: [PATCH 1/2] Add support for filesystem online resize
Resolves: rhbz#2168680
---
blivet/devices/lvm.py | 13 ++++++++-----
blivet/devices/partition.py | 11 ++++++-----
blivet/flags.py | 3 +++
blivet/formats/fs.py | 32 ++++++++++++++++++++++++++++----
blivet/formats/fslib.py | 7 +++++++
5 files changed, 52 insertions(+), 14 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index c3132457..ca45c4b5 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -42,6 +42,7 @@ from .. import errors
from .. import util
from ..storage_log import log_method_call
from .. import udev
+from ..flags import flags
from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
from ..static_data.lvm_info import lvs_info
from ..tasks import availability
@@ -2729,12 +2730,14 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
# Setup VG parents (in case they are dmraid partitions for example)
self.vg.setup_parents(orig=True)
- if self.original_format.exists:
- self.original_format.teardown()
- if self.format.exists:
- self.format.teardown()
+ if not flags.allow_online_fs_resize:
+ if self.original_format.exists:
+ self.original_format.teardown()
+ if self.format.exists:
+ self.format.teardown()
+
+ udev.settle()
- udev.settle()
blockdev.lvm.lvresize(self.vg.name, self._name, self.size)
@type_specific
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 0e9250ce..6ae4b8d3 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -745,11 +745,12 @@ class PartitionDevice(StorageDevice):
if not self.exists:
raise errors.DeviceError("device has not been created")
- # don't teardown when resizing luks
- if self.format.type == "luks" and self.children:
- self.children[0].format.teardown()
- else:
- self.teardown()
+ if not flags.allow_online_fs_resize:
+ # don't teardown when resizing luks
+ if self.format.type == "luks" and self.children:
+ self.children[0].format.teardown()
+ else:
+ self.teardown()
if not self.sysfs_path:
return
diff --git a/blivet/flags.py b/blivet/flags.py
index 6364164d..ecfa7ad7 100644
--- a/blivet/flags.py
+++ b/blivet/flags.py
@@ -91,6 +91,9 @@ class Flags(object):
self.debug_threads = False
+ # Allow online filesystem resizes
+ self.allow_online_fs_resize = False
+
def get_boot_cmdline(self):
with open("/proc/cmdline") as f:
buf = f.read().strip()
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index 33922f3a..3f553eb0 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -56,7 +56,7 @@ from ..i18n import N_
from .. import udev
from ..mounts import mounts_cache
-from .fslib import kernel_filesystems
+from .fslib import kernel_filesystems, FSResize
import logging
log = logging.getLogger("blivet")
@@ -88,6 +88,9 @@ class FS(DeviceFormat):
# value is already unpredictable and can change in the future...
_metadata_size_factor = 1.0
+ # support for resize: grow/shrink, online/offline
+ _resize_support = 0
+
config_actions_map = {"label": "write_label"}
def __init__(self, **kwargs):
@@ -436,12 +439,27 @@ class FS(DeviceFormat):
self.write_uuid()
def _pre_resize(self):
- # file systems need a check before being resized
- self.do_check()
+ if self.status:
+ if flags.allow_online_fs_resize:
+ if self.target_size > self.size and not self._resize_support & FSResize.ONLINE_GROW:
+ raise FSError("This filesystem doesn't support online growing")
+ if self.target_size < self.size and not self._resize_support & FSResize.ONLINE_SHRINK:
+ raise FSError("This filesystem doesn't support online shrinking")
+ else:
+ raise FSError("Resizing of mounted filesystems is disabled")
+
+ if self.status:
+ # fsck tools in general don't allow checks on mounted filesystems
+ log.debug("Filesystem on %s is mounted, not checking", self.device)
+ else:
+ # file systems need a check before being resized
+ self.do_check()
+
super(FS, self)._pre_resize()
def _post_resize(self):
- self.do_check()
+ if not self.status:
+ self.do_check()
super(FS, self)._post_resize()
def do_check(self):
@@ -838,6 +856,7 @@ class Ext2FS(FS):
_formattable = True
_supported = True
_resizable = True
+ _resize_support = FSResize.ONLINE_GROW | FSResize.OFFLINE_GROW | FSResize.OFFLINE_SHRINK
_linux_native = True
_max_size = Size("8 TiB")
_dump = True
@@ -1097,6 +1116,7 @@ class XFS(FS):
_linux_native = True
_supported = True
_resizable = True
+ _resize_support = FSResize.ONLINE_GROW | FSResize.OFFLINE_GROW
_packages = ["xfsprogs"]
_fsck_class = fsck.XFSCK
_info_class = fsinfo.XFSInfo
@@ -1247,6 +1267,7 @@ class NTFS(FS):
_labelfs = fslabeling.NTFSLabeling()
_uuidfs = fsuuid.NTFSUUID()
_resizable = True
+ _resize_support = FSResize.OFFLINE_GROW | FSResize.OFFLINE_SHRINK
_formattable = True
_supported = True
_min_size = Size("1 MiB")
@@ -1490,6 +1511,9 @@ class TmpFS(NoDevFS):
# same, nothing actually needs to be set
pass
+ def _pre_resize(self):
+ self.do_check()
+
def do_resize(self):
# Override superclass method to record whether mount options
# should include an explicit size specification.
diff --git a/blivet/formats/fslib.py b/blivet/formats/fslib.py
index ea93b1fd..8722e942 100644
--- a/blivet/formats/fslib.py
+++ b/blivet/formats/fslib.py
@@ -36,3 +36,10 @@ def update_kernel_filesystems():
update_kernel_filesystems()
+
+
+class FSResize():
+ OFFLINE_SHRINK = 1 << 1
+ OFFLINE_GROW = 1 << 2
+ ONLINE_SHRINK = 1 << 3
+ ONLINE_GROW = 1 << 4
--
2.40.1
From 3fce5d0bfd7b09a976ff49feed15077477c6a425 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 6 Apr 2023 14:02:11 +0200
Subject: [PATCH 2/2] Add a test case for filesystem online resize
Related: rhbz#2168680
---
tests/storage_tests/formats_test/fs_test.py | 43 ++++++++++++++++++++-
1 file changed, 42 insertions(+), 1 deletion(-)
diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py
index 97f4cbbe..1d42dc21 100644
--- a/tests/storage_tests/formats_test/fs_test.py
+++ b/tests/storage_tests/formats_test/fs_test.py
@@ -6,9 +6,10 @@ import parted
import blivet.formats.fs as fs
from blivet.size import Size, ROUND_DOWN
-from blivet.errors import DeviceFormatError
+from blivet.errors import DeviceFormatError, FSError
from blivet.formats import get_format
from blivet.devices import PartitionDevice, DiskDevice
+from blivet.flags import flags
from .loopbackedtestcase import LoopBackedTestCase
@@ -26,6 +27,46 @@ class Ext3FSTestCase(Ext2FSTestCase):
class Ext4FSTestCase(Ext3FSTestCase):
_fs_class = fs.Ext4FS
+ def test_online_resize(self):
+ an_fs = self._fs_class()
+ if not an_fs.formattable:
+ self.skipTest("can not create filesystem %s" % an_fs.name)
+ an_fs.device = self.loop_devices[0]
+ self.assertIsNone(an_fs.create())
+ an_fs.update_size_info()
+
+ if not self.can_resize(an_fs):
+ self.skipTest("filesystem is not resizable")
+
+ # shrink offline first (ext doesn't support online shrinking)
+ TARGET_SIZE = Size("64 MiB")
+ an_fs.target_size = TARGET_SIZE
+ self.assertEqual(an_fs.target_size, TARGET_SIZE)
+ self.assertNotEqual(an_fs._size, TARGET_SIZE)
+ self.assertIsNone(an_fs.do_resize())
+
+ with tempfile.TemporaryDirectory() as mountpoint:
+ an_fs.mount(mountpoint=mountpoint)
+
+ # grow back when mounted
+ TARGET_SIZE = Size("100 MiB")
+ an_fs.target_size = TARGET_SIZE
+ self.assertEqual(an_fs.target_size, TARGET_SIZE)
+ self.assertNotEqual(an_fs._size, TARGET_SIZE)
+
+ # should fail, online resize disabled by default
+ with self.assertRaisesRegex(FSError, "Resizing of mounted filesystems is disabled"):
+ an_fs.do_resize()
+
+ # enable online resize
+ flags.allow_online_fs_resize = True
+ an_fs.do_resize()
+ flags.allow_online_fs_resize = False
+ self._test_sizes(an_fs)
+ self.assertEqual(an_fs.system_mountpoint, mountpoint)
+
+ an_fs.unmount()
+
class FATFSTestCase(fstesting.FSAsRoot):
_fs_class = fs.FATFS
--
2.40.1

View File

@ -0,0 +1,383 @@
From 2d26f69abc2d793e753c0cddb3086264ca2b4e71 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 6 Mar 2023 10:51:42 +0100
Subject: [PATCH 1/5] Allow changing iSCSI initiator name after setting it
Resolves: rhbz#2221935
---
blivet/iscsi.py | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
index 86451db3..0d063f2a 100644
--- a/blivet/iscsi.py
+++ b/blivet/iscsi.py
@@ -212,14 +212,23 @@ class iSCSI(object):
@initiator.setter
@storaged_iscsi_required(critical=True, eval_mode=util.EvalMode.onetime)
def initiator(self, val):
- if self.initiator_set and val != self._initiator:
- raise ValueError(_("Unable to change iSCSI initiator name once set"))
if len(val) == 0:
raise ValueError(_("Must provide an iSCSI initiator name"))
+ active = self._get_active_sessions()
+ if active:
+ raise errors.ISCSIError(_("Cannot change initiator name with an active session"))
+
log.info("Setting up iSCSI initiator name %s", self.initiator)
args = GLib.Variant("(sa{sv})", (val, None))
self._call_initiator_method("SetInitiatorName", args)
+
+ if self.initiator_set and val != self._initiator:
+ log.info("Restarting iscsid after initiator name change")
+ rc = util.run_program(["systemctl", "restart", "iscsid"])
+ if rc != 0:
+ raise errors.ISCSIError(_("Failed to restart iscsid after initiator name change"))
+
self._initiator = val
def active_nodes(self, target=None):
--
2.40.1
From 7c226ed0e14efcdd6e562e357d8f3465ad43ef33 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 6 Mar 2023 15:10:28 +0100
Subject: [PATCH 2/5] Add a basic test case for the iscsi module
Related: rhbz#2221935
---
misc/install-test-dependencies.yml | 3 +
tests/storage_tests/__init__.py | 2 +
tests/storage_tests/iscsi_test.py | 157 +++++++++++++++++++++++++++++
3 files changed, 162 insertions(+)
create mode 100644 tests/storage_tests/iscsi_test.py
diff --git a/tests/storage_tests/__init__.py b/tests/storage_tests/__init__.py
index 3b2a6cc4..e69fcc34 100644
--- a/tests/storage_tests/__init__.py
+++ b/tests/storage_tests/__init__.py
@@ -3,3 +3,5 @@ from .formats_test import *
from .partitioning_test import *
from .unsupported_disklabel_test import *
+
+from .iscsi_test import *
diff --git a/tests/storage_tests/iscsi_test.py b/tests/storage_tests/iscsi_test.py
new file mode 100644
index 00000000..00cc7c36
--- /dev/null
+++ b/tests/storage_tests/iscsi_test.py
@@ -0,0 +1,157 @@
+import glob
+import os
+import re
+import shutil
+import subprocess
+import unittest
+
+from contextlib import contextmanager
+
+from .storagetestcase import create_sparse_tempfile
+
+
+def read_file(filename, mode="r"):
+ with open(filename, mode) as f:
+ content = f.read()
+ return content
+
+
+@contextmanager
+def udev_settle():
+ try:
+ yield
+ finally:
+ os.system("udevadm settle")
+
+
+def _delete_backstore(name):
+ status = subprocess.call(["targetcli", "/backstores/fileio/ delete %s" % name],
+ stdout=subprocess.DEVNULL)
+ if status != 0:
+ raise RuntimeError("Failed to delete the '%s' fileio backstore" % name)
+
+
+def delete_iscsi_target(iqn, backstore=None):
+ status = subprocess.call(["targetcli", "/iscsi delete %s" % iqn],
+ stdout=subprocess.DEVNULL)
+ if status != 0:
+ raise RuntimeError("Failed to delete the '%s' iscsi device" % iqn)
+
+ if backstore is not None:
+ _delete_backstore(backstore)
+
+
+def create_iscsi_target(fpath, initiator_name=None):
+ """
+ Creates a new iSCSI target (using targetcli) on top of the
+ :param:`fpath` backing file.
+
+ :param str fpath: path of the backing file
+ :returns: iSCSI IQN, backstore name
+ :rtype: tuple of str
+
+ """
+
+ # "register" the backing file as a fileio backstore
+ store_name = os.path.basename(fpath)
+ status = subprocess.call(["targetcli", "/backstores/fileio/ create %s %s" % (store_name, fpath)], stdout=subprocess.DEVNULL)
+ if status != 0:
+ raise RuntimeError("Failed to register '%s' as a fileio backstore" % fpath)
+
+ out = subprocess.check_output(["targetcli", "/backstores/fileio/%s info" % store_name])
+ out = out.decode("utf-8")
+ store_wwn = None
+ for line in out.splitlines():
+ if line.startswith("wwn: "):
+ store_wwn = line[5:]
+ if store_wwn is None:
+ raise RuntimeError("Failed to determine '%s' backstore's wwn" % store_name)
+
+ # create a new iscsi device
+ out = subprocess.check_output(["targetcli", "/iscsi create"])
+ out = out.decode("utf-8")
+ match = re.match(r'Created target (.*).', out)
+ if match:
+ iqn = match.groups()[0]
+ else:
+ _delete_backstore(store_name)
+ raise RuntimeError("Failed to create a new iscsi target")
+
+ if initiator_name:
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
+ if status != 0:
+ delete_iscsi_target(iqn, store_name)
+ raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
+
+ with udev_settle():
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/luns create /backstores/fileio/%s" % (iqn, store_name)], stdout=subprocess.DEVNULL)
+ if status != 0:
+ delete_iscsi_target(iqn, store_name)
+ raise RuntimeError("Failed to create a new LUN for '%s' using '%s'" % (iqn, store_name))
+
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1 set attribute generate_node_acls=1" % iqn], stdout=subprocess.DEVNULL)
+ if status != 0:
+ raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
+
+ return iqn, store_name
+
+
+@unittest.skipUnless(os.geteuid() == 0, "requires root privileges")
+@unittest.skipUnless(os.environ.get("JENKINS_HOME"), "jenkins only test")
+@unittest.skipUnless(shutil.which("iscsiadm"), "iscsiadm not available")
+class ISCSITestCase(unittest.TestCase):
+
+ _disk_size = 512 * 1024**2
+ initiator = 'iqn.1994-05.com.redhat:iscsi-test'
+
+ def setUp(self):
+ self.addCleanup(self._clean_up)
+
+ self._dev_file = None
+ self.dev = None
+
+ self._dev_file = create_sparse_tempfile("blivet_test", self._disk_size)
+ try:
+ self.dev, self.backstore = create_iscsi_target(self._dev_file, self.initiator)
+ except RuntimeError as e:
+ raise RuntimeError("Failed to setup targetcli device for testing: %s" % e)
+
+ def _force_logout(self):
+ subprocess.call(["iscsiadm", "--mode", "node", "--logout", "--name", self.dev], stdout=subprocess.DEVNULL)
+
+ def _clean_up(self):
+ self._force_logout()
+ delete_iscsi_target(self.dev, self.backstore)
+ os.unlink(self._dev_file)
+
+ def test_discover_login(self):
+ from blivet.iscsi import iscsi, has_iscsi
+
+ if not has_iscsi():
+ self.skipTest("iSCSI not available, skipping")
+
+ iscsi.initiator = self.initiator
+ nodes = iscsi.discover("127.0.0.1")
+ self.assertTrue(nodes)
+
+ if len(nodes) > 1:
+ self.skipTest("Discovered more than one iSCSI target on localhost, skipping")
+
+ self.assertEqual(nodes[0].address, "127.0.0.1")
+ self.assertEqual(nodes[0].port, 3260)
+ self.assertEqual(nodes[0].name, self.dev)
+
+ # change the initiator name
+ iscsi.initiator = self.initiator + "_1"
+ self.assertEqual(iscsi.initiator, self.initiator + "_1")
+
+ # try to login
+ ret, err = iscsi.log_into_node(nodes[0])
+ self.assertTrue(ret, "Login failed: %s" % err)
+
+ # check the session for initiator name
+ sessions = glob.glob("/sys/class/iscsi_session/*/")
+ self.assertTrue(sessions)
+ self.assertEqual(len(sessions), 1)
+ initiator = read_file(sessions[0] + "initiatorname").strip()
+ self.assertEqual(initiator, iscsi.initiator)
--
2.40.1
From dfd0c59a901f54ecfd8f538a2bb004a2e5ab6eec Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 6 Mar 2023 15:14:40 +0100
Subject: [PATCH 3/5] tests: Use blivet-specific prefix for targetcli backing
files
The code is originally from libblockdev hence the "bd" prefix, we
should use a different prefix for blivet to be able to identify
which test suite failed to clean the files.
Related: rhbz#2221935
---
tests/storage_tests/storagetestcase.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/storage_tests/storagetestcase.py b/tests/storage_tests/storagetestcase.py
index 35d57ce9..9f859977 100644
--- a/tests/storage_tests/storagetestcase.py
+++ b/tests/storage_tests/storagetestcase.py
@@ -39,7 +39,7 @@ def create_sparse_tempfile(name, size):
:param size: the file size (in bytes)
:returns: the path to the newly created file
"""
- (fd, path) = tempfile.mkstemp(prefix="bd.", suffix="-%s" % name)
+ (fd, path) = tempfile.mkstemp(prefix="blivet.", suffix="-%s" % name)
os.close(fd)
create_sparse_file(path, size)
return path
--
2.40.1
From 492fc9b8dfc2d4aa7cb44baa4408570babcb5e96 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 19 Jul 2023 13:57:39 +0200
Subject: [PATCH 4/5] iscsi: Save firmware initiator name to
/etc/iscsi/initiatorname.iscsi
Resolves: rhbz#2221932
---
blivet/iscsi.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
index 0d063f2a..8080a671 100644
--- a/blivet/iscsi.py
+++ b/blivet/iscsi.py
@@ -160,6 +160,11 @@ class iSCSI(object):
self._initiator = initiatorname
except Exception as e: # pylint: disable=broad-except
log.info("failed to get initiator name from iscsi firmware: %s", str(e))
+ else:
+ # write the firmware initiator to /etc/iscsi/initiatorname.iscsi
+ log.info("Setting up firmware iSCSI initiator name %s", self.initiator)
+ args = GLib.Variant("(sa{sv})", (initiatorname, None))
+ self._call_initiator_method("SetInitiatorName", args)
# So that users can write iscsi() to get the singleton instance
def __call__(self):
--
2.40.1
From 768d90815b7f95d0d6d278397fd6fd12a0490b5d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 19 Jul 2023 10:38:45 +0200
Subject: [PATCH 5/5] tests: Improve iscsi_test.ISCSITestCase
Changed how we create the initiator name ACLs based on RTT test
case for rhbz#2084043 and also improved the test case itself.
Related: rhbz#2221935
---
tests/storage_tests/iscsi_test.py | 36 +++++++++++++++++++++----------
1 file changed, 25 insertions(+), 11 deletions(-)
diff --git a/tests/storage_tests/iscsi_test.py b/tests/storage_tests/iscsi_test.py
index 00cc7c36..6cc83a59 100644
--- a/tests/storage_tests/iscsi_test.py
+++ b/tests/storage_tests/iscsi_test.py
@@ -77,21 +77,17 @@ def create_iscsi_target(fpath, initiator_name=None):
_delete_backstore(store_name)
raise RuntimeError("Failed to create a new iscsi target")
- if initiator_name:
- status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
- if status != 0:
- delete_iscsi_target(iqn, store_name)
- raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
-
with udev_settle():
status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/luns create /backstores/fileio/%s" % (iqn, store_name)], stdout=subprocess.DEVNULL)
if status != 0:
delete_iscsi_target(iqn, store_name)
raise RuntimeError("Failed to create a new LUN for '%s' using '%s'" % (iqn, store_name))
- status = subprocess.call(["targetcli", "/iscsi/%s/tpg1 set attribute generate_node_acls=1" % iqn], stdout=subprocess.DEVNULL)
- if status != 0:
- raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
+ if initiator_name:
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
+ if status != 0:
+ delete_iscsi_target(iqn, store_name)
+ raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
return iqn, store_name
@@ -130,6 +126,7 @@ class ISCSITestCase(unittest.TestCase):
if not has_iscsi():
self.skipTest("iSCSI not available, skipping")
+ # initially set the initiator to the correct/allowed one
iscsi.initiator = self.initiator
nodes = iscsi.discover("127.0.0.1")
self.assertTrue(nodes)
@@ -141,11 +138,28 @@ class ISCSITestCase(unittest.TestCase):
self.assertEqual(nodes[0].port, 3260)
self.assertEqual(nodes[0].name, self.dev)
- # change the initiator name
+ # change the initiator name to a wrong one
iscsi.initiator = self.initiator + "_1"
self.assertEqual(iscsi.initiator, self.initiator + "_1")
- # try to login
+ # check the change made it to /etc/iscsi/initiatorname.iscsi
+ initiator_file = read_file("/etc/iscsi/initiatorname.iscsi").strip()
+ self.assertEqual(initiator_file, "InitiatorName=%s" % self.initiator + "_1")
+
+ # try to login (should fail)
+ ret, err = iscsi.log_into_node(nodes[0])
+ self.assertFalse(ret)
+ self.assertIn("authorization failure", err)
+
+ # change the initiator name back to the correct one
+ iscsi.initiator = self.initiator
+ self.assertEqual(iscsi.initiator, self.initiator)
+
+ # check the change made it to /etc/iscsi/initiatorname.iscsi
+ initiator_file = read_file("/etc/iscsi/initiatorname.iscsi").strip()
+ self.assertEqual(initiator_file, "InitiatorName=%s" % self.initiator)
+
+ # try to login (should work now)
ret, err = iscsi.log_into_node(nodes[0])
self.assertTrue(ret, "Login failed: %s" % err)
--
2.40.1

View File

@ -0,0 +1,26 @@
From 9dcd32dd85f7f45c3fe6c8d7b1de3b4c322c6807 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Mon, 11 Sep 2023 13:50:24 +0200
Subject: [PATCH] nvme: Require additional rpms for dracut
The '95nvmf' dracut module needs a couple more packages
for the NBFT (NVMe over TCP) to work - such as networking.
Local PCIe NVMe devices have no special needs.
---
blivet/devices/disk.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 2b49ef685..5053f7bb8 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -725,7 +725,8 @@ class NVMeFabricsNamespaceDevice(NVMeNamespaceDevice, NetworkStorageDevice):
""" NVMe fabrics namespace """
_type = "nvme-fabrics"
- _packages = ["nvme-cli"]
+ # dracut '95nvmf' module dependencies
+ _packages = ["nvme-cli", "dracut-network"]
def __init__(self, device, **kwargs):
"""

View File

@ -0,0 +1,107 @@
From 06597099906be55b106c234b3bf0c87ec7d90a07 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Thu, 17 Aug 2023 14:45:18 +0200
Subject: [PATCH] nvme: Align HostNQN and HostID format to TP4126
Also don't overwrite existing files during startup() since they
might have been supplied by early boot stages.
---
blivet/nvme.py | 62 +++++++++++++++++++++++++++++++-------------------
1 file changed, 39 insertions(+), 23 deletions(-)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index 17bead15e..5ac41cffa 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -18,16 +18,20 @@
#
import os
-import shutil
from . import errors
-from . import util
+
+import gi
+gi.require_version("BlockDev", "2.0")
+
+from gi.repository import BlockDev as blockdev
import logging
log = logging.getLogger("blivet")
-HOSTNQN_FILE = "/etc/nvme/hostnqn"
-HOSTID_FILE = "/etc/nvme/hostid"
+ETC_NVME_PATH = "/etc/nvme/"
+HOSTNQN_FILE = ETC_NVME_PATH + "hostnqn"
+HOSTID_FILE = ETC_NVME_PATH + "hostid"
class NVMe(object):
@@ -40,6 +44,8 @@ class NVMe(object):
def __init__(self):
self.started = False
+ self._hostnqn = None
+ self._hostid = None
# So that users can write nvme() to get the singleton instance
def __call__(self):
@@ -52,28 +58,38 @@ def startup(self):
if self.started:
return
- rc, nqn = util.run_program_and_capture_output(["nvme", "gen-hostnqn"])
- if rc != 0:
- raise errors.NVMeError("Failed to generate hostnqn")
-
- with open(HOSTNQN_FILE, "w") as f:
- f.write(nqn)
-
- rc, hid = util.run_program_and_capture_output(["dmidecode", "-s", "system-uuid"])
- if rc != 0:
- raise errors.NVMeError("Failed to generate host ID")
-
- with open(HOSTID_FILE, "w") as f:
- f.write(hid)
+ self._hostnqn = blockdev.nvme_get_host_nqn()
+ self._hostid = blockdev.nvme_get_host_id()
+ if not self._hostnqn:
+ self._hostnqn = blockdev.nvme_generate_host_nqn()
+ if not self._hostnqn:
+ raise errors.NVMeError("Failed to generate HostNQN")
+ if not self._hostid:
+ if 'uuid:' not in self._hostnqn:
+ raise errors.NVMeError("Missing UUID part in the HostNQN string '%s'" % self._hostnqn)
+ # derive HostID from HostNQN's UUID part
+ self._hostid = self._hostnqn.split('uuid:')[1]
+
+ # do not overwrite existing files, taken e.g. from initramfs
+ self.write("/", overwrite=False)
self.started = True
- def write(self, root): # pylint: disable=unused-argument
- # copy the hostnqn and hostid files
- if not os.path.isdir(root + "/etc/nvme"):
- os.makedirs(root + "/etc/nvme", 0o755)
- shutil.copyfile(HOSTNQN_FILE, root + HOSTNQN_FILE)
- shutil.copyfile(HOSTID_FILE, root + HOSTID_FILE)
+ def write(self, root, overwrite=True): # pylint: disable=unused-argument
+ # write down the hostnqn and hostid files
+ p = root + ETC_NVME_PATH
+ if not os.path.isdir(p):
+ os.makedirs(p, 0o755)
+ p = root + HOSTNQN_FILE
+ if overwrite or not os.path.isfile(p):
+ with open(p, "w") as f:
+ f.write(self._hostnqn)
+ f.write("\n")
+ p = root + HOSTID_FILE
+ if overwrite or not os.path.isfile(p):
+ with open(p, "w") as f:
+ f.write(self._hostid)
+ f.write("\n")
# Create nvme singleton

View File

@ -0,0 +1,58 @@
From 63da3cb8a40500c889c8faa4326f81d16997a3c8 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Mon, 27 Nov 2023 18:55:55 +0100
Subject: [PATCH] nvme: Retrieve HostNQN from a first active fabrics connection
When no /etc/hostnqn exists, look for any active NVMe over Fabrics
connections and take the values from a first one, rather than
generating new ones.
---
blivet/nvme.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index 5ac41cffa..2e4686e68 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -18,6 +18,7 @@
#
import os
+import glob
from . import errors
@@ -54,6 +55,22 @@ def __call__(self):
def __deepcopy__(self, memo_dict): # pylint: disable=unused-argument
return self
+ def _retrieve_fabrics_hostnqn(self):
+ for d in glob.glob('/sys/class/nvme-fabrics/ctl/nvme*/'):
+ try:
+ # invalidate old values
+ self._hostnqn = None
+ self._hostid = None
+ # read from sysfs
+ with open(os.path.join(d, 'hostnqn')) as f:
+ self._hostnqn = f.readline().strip()
+ with open(os.path.join(d, 'hostid')) as f:
+ self._hostid = f.readline().strip()
+ if self._hostnqn:
+ break
+ except Exception: # pylint: disable=broad-except
+ pass
+
def startup(self):
if self.started:
return
@@ -61,6 +78,10 @@ def startup(self):
self._hostnqn = blockdev.nvme_get_host_nqn()
self._hostid = blockdev.nvme_get_host_id()
if not self._hostnqn:
+ # see if there are any active fabrics connections and take their values over
+ self._retrieve_fabrics_hostnqn()
+ if not self._hostnqn:
+ # generate new values
self._hostnqn = blockdev.nvme_generate_host_nqn()
if not self._hostnqn:
raise errors.NVMeError("Failed to generate HostNQN")

View File

@ -0,0 +1,67 @@
From c807e234dfd07f3d0005c71501f0300284cd580b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 6 Dec 2023 11:47:31 +0100
Subject: [PATCH] tests: Add a simple unit test for the NVMe module
---
tests/unit_tests/__init__.py | 1 +
tests/unit_tests/nvme_test.py | 38 +++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+)
create mode 100644 tests/unit_tests/nvme_test.py
diff --git a/tests/unit_tests/__init__.py b/tests/unit_tests/__init__.py
index 589366e0f..62bef67f5 100644
--- a/tests/unit_tests/__init__.py
+++ b/tests/unit_tests/__init__.py
@@ -9,6 +9,7 @@
from .devicetree_test import *
from .events_test import *
from .misc_test import *
+from .nvme_test import *
from .parentlist_test import *
from .populator_test import *
from .size_test import *
diff --git a/tests/unit_tests/nvme_test.py b/tests/unit_tests/nvme_test.py
new file mode 100644
index 000000000..cb948687f
--- /dev/null
+++ b/tests/unit_tests/nvme_test.py
@@ -0,0 +1,38 @@
+import unittest
+
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
+from blivet.nvme import nvme
+
+
+class NVMeModuleTestCase(unittest.TestCase):
+
+ host_nqn = "nqn.2014-08.org.nvmexpress:uuid:01234567-8900-abcd-efff-abcdabcdabcd"
+
+ @patch("blivet.nvme.os")
+ @patch("blivet.nvme.blockdev")
+ def test_nvme_module(self, bd, os):
+ self.assertIsNotNone(nvme)
+ bd.nvme_get_host_nqn.return_value = self.host_nqn
+ bd.nvme_get_host_id.return_value = None # None = generate from host_nqn
+ os.path.isdir.return_value = False
+
+ # startup
+ with patch.object(nvme, "write") as write:
+ nvme.startup()
+ write.assert_called_once_with("/", overwrite=False)
+
+ self.assertTrue(nvme.started)
+ self.assertEqual(nvme._hostnqn, self.host_nqn)
+ self.assertEqual(nvme._hostid, "01234567-8900-abcd-efff-abcdabcdabcd")
+
+ # write
+ with patch("blivet.nvme.open") as op:
+ nvme.write("/test")
+
+ os.makedirs.assert_called_with("/test/etc/nvme/", 0o755)
+ op.assert_any_call("/test/etc/nvme/hostnqn", "w")
+ op.assert_any_call("/test/etc/nvme/hostid", "w")

View File

@ -0,0 +1,206 @@
From c20296b2df89a9edc4ea9cc41f94df89a8fbfd26 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 Apr 2023 12:35:30 +0200
Subject: [PATCH] Add support for creating shared LVM setups
This feature is requested by GFS2 for the storage role. This adds
support for creating shared VGs and activating LVs in shared mode.
Resolves: RHEL-324
---
blivet/devices/lvm.py | 44 +++++++++++++++++++----
blivet/tasks/availability.py | 9 +++++
tests/unit_tests/devices_test/lvm_test.py | 25 +++++++++++++
3 files changed, 72 insertions(+), 6 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index ca45c4b5..068c5368 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -97,7 +97,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
def __init__(self, name, parents=None, size=None, free=None,
pe_size=None, pe_count=None, pe_free=None, pv_count=None,
- uuid=None, exists=False, sysfs_path='', exported=False):
+ uuid=None, exists=False, sysfs_path='', exported=False,
+ shared=False):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -124,6 +125,11 @@ class LVMVolumeGroupDevice(ContainerDevice):
:type pv_count: int
:keyword uuid: the VG UUID
:type uuid: str
+
+ For non-existing VGs only:
+
+ :keyword shared: whether to create this VG as shared
+ :type shared: bool
"""
# These attributes are used by _add_parent, so they must be initialized
# prior to instantiating the superclass.
@@ -137,6 +143,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
self.pe_count = util.numeric_type(pe_count)
self.pe_free = util.numeric_type(pe_free)
self.exported = exported
+ self._shared = shared
# TODO: validate pe_size if given
if not self.pe_size:
@@ -254,7 +261,19 @@ class LVMVolumeGroupDevice(ContainerDevice):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
pv_list = [pv.path for pv in self.parents]
- blockdev.lvm.vgcreate(self.name, pv_list, self.pe_size)
+ extra = dict()
+ if self._shared:
+ extra["shared"] = ""
+ blockdev.lvm.vgcreate(self.name, pv_list, self.pe_size, **extra)
+
+ if self._shared:
+ if availability.BLOCKDEV_LVM_PLUGIN_SHARED.available:
+ try:
+ blockdev.lvm.vglock_start(self.name)
+ except blockdev.LVMError as err:
+ raise errors.LVMError(err)
+ else:
+ raise errors.LVMError("Shared LVM is not fully supported: %s" % ",".join(availability.BLOCKDEV_LVM_PLUGIN_SHARED.availability_errors))
def _post_create(self):
self._complete = True
@@ -661,7 +680,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None,
percent=None, cache_request=None, pvs=None, from_lvs=None,
- stripe_size=0):
+ stripe_size=0, shared=False):
if not exists:
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
@@ -690,6 +709,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
self.seg_type = seg_type or "linear"
self._raid_level = None
self.ignore_skip_activation = 0
+ self._shared = shared
self.req_grow = None
self.req_max_size = Size(0)
@@ -2306,7 +2326,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None, cache_mode=None, attach_to=None, stripe_size=0):
+ write_policy=None, cache_mode=None, attach_to=None, stripe_size=0,
+ shared=False):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2337,6 +2358,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
:type cache_request: :class:`~.devices.lvm.LVMCacheRequest`
:keyword pvs: list of PVs to allocate extents from (size could be specified for each PV)
:type pvs: list of :class:`~.devices.StorageDevice` or :class:`LVPVSpec` objects (tuples)
+ :keyword shared: whether to activate the newly create LV in shared mode
+ :type shared: bool
For internal LVs only:
@@ -2412,7 +2435,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs,
- stripe_size)
+ stripe_size, shared)
LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory,
write_policy)
LVMVDOLogicalVolumeMixin.__init__(self)
@@ -2634,7 +2657,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
ignore_skip_activation = self.is_snapshot_lv or self.ignore_skip_activation > 0
- blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
+ if self._shared:
+ if availability.BLOCKDEV_LVM_PLUGIN_SHARED.available:
+ blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation, shared=True)
+ else:
+ raise errors.LVMError("Shared LVM is not fully supported: %s" % ",".join(availability.BLOCKDEV_LVM_PLUGIN_SHARED.availability_errors))
+ else:
+ blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
@type_specific
def _pre_create(self):
@@ -2672,6 +2701,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
if self._stripe_size:
extra["stripesize"] = str(int(self._stripe_size.convert_to("KiB")))
+ if self._shared:
+ extra["activate"] = "sy"
+
blockdev.lvm.lvcreate(self.vg.name, self._name, self.size,
type=self.seg_type, pv_list=pvs, **extra)
else:
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index bba1ba84..85945c77 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -435,6 +435,14 @@ if hasattr(blockdev.LVMTech, "VDO"):
else:
BLOCKDEV_LVM_TECH_VDO = _UnavailableMethod(error_msg="Installed version of libblockdev doesn't support LVM VDO technology")
+if hasattr(blockdev.LVMTech, "SHARED"):
+ BLOCKDEV_LVM_SHARED = BlockDevTechInfo(plugin_name="lvm",
+ check_fn=blockdev.lvm_is_tech_avail,
+ technologies={blockdev.LVMTech.SHARED: blockdev.LVMTechMode.MODIFY}) # pylint: disable=no-member
+ BLOCKDEV_LVM_TECH_SHARED = BlockDevMethod(BLOCKDEV_LVM_SHARED)
+else:
+ BLOCKDEV_LVM_TECH_SHARED = _UnavailableMethod(error_msg="Installed version of libblockdev doesn't support shared LVM technology")
+
# libblockdev mdraid plugin required technologies and modes
BLOCKDEV_MD_ALL_MODES = (blockdev.MDTechMode.CREATE |
blockdev.MDTechMode.DELETE |
@@ -476,6 +484,7 @@ BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("libblockdev dm plugin (raid technolog
BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("libblockdev loop plugin", BLOCKDEV_LOOP_TECH)
BLOCKDEV_LVM_PLUGIN = blockdev_plugin("libblockdev lvm plugin", BLOCKDEV_LVM_TECH)
BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("libblockdev lvm plugin (vdo technology)", BLOCKDEV_LVM_TECH_VDO)
+BLOCKDEV_LVM_PLUGIN_SHARED = blockdev_plugin("libblockdev lvm plugin (shared LVM technology)", BLOCKDEV_LVM_TECH_SHARED)
BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("libblockdev mdraid plugin", BLOCKDEV_MD_TECH)
BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("libblockdev mpath plugin", BLOCKDEV_MPATH_TECH)
BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("libblockdev swap plugin", BLOCKDEV_SWAP_TECH)
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
index d7b55224..e645309f 100644
--- a/tests/unit_tests/devices_test/lvm_test.py
+++ b/tests/unit_tests/devices_test/lvm_test.py
@@ -476,6 +476,31 @@ class LVMDeviceTest(unittest.TestCase):
lv.setup()
lvm.lvactivate.assert_called_with(vg.name, lv.lvname, ignore_skip=False)
+ @patch("blivet.tasks.availability.BLOCKDEV_LVM_PLUGIN_SHARED",
+ new=blivet.tasks.availability.ExternalResource(blivet.tasks.availability.AvailableMethod, ""))
+ def test_lv_activate_shared(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+ lv = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=True, shared=True)
+
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ with patch.object(lv, "_pre_setup"):
+ lv.setup()
+ lvm.lvactivate.assert_called_with(vg.name, lv.lvname, ignore_skip=False, shared=True)
+
+ @patch("blivet.tasks.availability.BLOCKDEV_LVM_PLUGIN_SHARED",
+ new=blivet.tasks.availability.ExternalResource(blivet.tasks.availability.AvailableMethod, ""))
+ def test_vg_create_shared(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], shared=True)
+
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ vg._create()
+ lvm.vgcreate.assert_called_with(vg.name, [pv.path], Size("4 MiB"), shared="")
+ lvm.vglock_start.assert_called_with(vg.name)
+
def test_vg_is_empty(self):
pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
size=Size("1024 MiB"))
--
2.41.0

View File

@ -0,0 +1,60 @@
From d7708bca72f4a7d0bfa732912e2087bd6aa8f379 Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Thu, 23 Feb 2023 13:28:50 +0100
Subject: [PATCH] add udev-builtin-path_id property to zfcp-attached SCSI disks
so anaconda can use it to display path_id information for multipath
members
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/devices/disk.py | 2 ++
blivet/populator/helpers/disk.py | 1 +
tests/unit_tests/tags_test.py | 2 +-
3 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 8842b4dc..746f6d58 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -556,10 +556,12 @@ class ZFCPDiskDevice(DiskDevice):
:keyword hba_id: ???
:keyword wwpn: ???
:keyword fcp_lun: ???
+ :keyword id_path: string from udev-builtin-path_id
"""
self.hba_id = kwargs.pop("hba_id")
self.wwpn = kwargs.pop("wwpn")
self.fcp_lun = kwargs.pop("fcp_lun")
+ self.id_path = kwargs.pop("id_path")
DiskDevice.__init__(self, device, **kwargs)
self._clear_local_tags()
self.tags.add(Tags.remote)
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index cf20d302..92e85688 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -223,6 +223,7 @@ class ZFCPDevicePopulator(DiskDevicePopulator):
def _get_kwargs(self):
kwargs = super(ZFCPDevicePopulator, self)._get_kwargs()
+ kwargs["id_path"] = udev.device_get_path(self.data)
for attr in ['hba_id', 'wwpn', 'fcp_lun']:
kwargs[attr] = udev.device_get_zfcp_attribute(self.data, attr=attr)
diff --git a/tests/unit_tests/tags_test.py b/tests/unit_tests/tags_test.py
index 49a2d72e..15fa2a40 100644
--- a/tests/unit_tests/tags_test.py
+++ b/tests/unit_tests/tags_test.py
@@ -72,7 +72,7 @@ class DeviceTagsTest(unittest.TestCase):
fcoe_device = FcoeDiskDevice('test6', nic=None, identifier=None, id_path=None)
self.assertIn(Tags.remote, fcoe_device.tags)
self.assertNotIn(Tags.local, fcoe_device.tags)
- zfcp_device = ZFCPDiskDevice('test7', hba_id=None, wwpn=None, fcp_lun=None)
+ zfcp_device = ZFCPDiskDevice('test7', hba_id=None, wwpn=None, fcp_lun=None, id_path=None)
self.assertIn(Tags.remote, zfcp_device.tags)
self.assertNotIn(Tags.local, zfcp_device.tags)
--
2.43.0

View File

@ -0,0 +1,172 @@
From 517f17481685afbabea6750b57d71a736f9a157e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 25 May 2023 17:02:39 +0200
Subject: [PATCH] Do not add new PVs to the LVM devices file if it doesn't
exist and VGs are present
If there is a preexisting VG on the system when we create a new PV
and the LVM devices file doesn't exist we will create it and add
only the new PV to it which means the preexisting VG will now be
ignored by LVM tools. This change skips adding newly created PVs
to the devices file in the same way 'pvcreate' and 'vgcreate' do.
---
blivet/devicelibs/lvm.py | 3 +
blivet/formats/lvmpv.py | 17 ++++-
tests/unit_tests/formats_tests/__init__.py | 1 +
tests/unit_tests/formats_tests/lvmpv_test.py | 73 ++++++++++++++++++++
4 files changed, 91 insertions(+), 3 deletions(-)
create mode 100644 tests/unit_tests/formats_tests/lvmpv_test.py
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index 16a8e8f8..dc7d0cbe 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -84,6 +84,9 @@ if hasattr(blockdev.LVMTech, "DEVICES"):
else:
HAVE_LVMDEVICES = False
+
+LVM_DEVICES_FILE = "/etc/lvm/devices/system.devices"
+
# list of devices that LVM is allowed to use
# with LVM >= 2.0.13 we'll use this for the --devices option and when creating
# the /etc/lvm/devices/system.devices file
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index cb01b2f3..65acedbe 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -36,7 +36,7 @@ from ..size import Size
from ..errors import PhysicalVolumeError
from . import DeviceFormat, register_device_format
from .. import udev
-from ..static_data.lvm_info import pvs_info
+from ..static_data.lvm_info import pvs_info, vgs_info
import logging
log = logging.getLogger("blivet")
@@ -121,10 +121,21 @@ class LVMPhysicalVolume(DeviceFormat):
def supported(self):
return super(LVMPhysicalVolume, self).supported and self._plugin.available
- def lvmdevices_add(self):
+ def lvmdevices_add(self, force=True):
+ """ Add this PV to the LVM system devices file
+ :keyword force: whether to add the PV even if the system devices file doesn't exist and
+ VGs are present in the system
+ :type force: bool
+ """
+
if not lvm.HAVE_LVMDEVICES:
raise PhysicalVolumeError("LVM devices file feature is not supported")
+ if not os.path.exists(lvm.LVM_DEVICES_FILE) and vgs_info.cache and not force:
+ log.debug("Not adding %s to devices file: %s doesn't exist and there are VGs present in the system",
+ self.device, lvm.LVM_DEVICES_FILE)
+ return
+
try:
blockdev.lvm.devices_add(self.device)
except blockdev.LVMError as e:
@@ -151,7 +162,7 @@ class LVMPhysicalVolume(DeviceFormat):
# with lvmdbusd we need to call the pvcreate without --devices otherwise lvmdbusd
# wouldn't be able to find the newly created pv and the call would fail
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
- self.lvmdevices_add()
+ self.lvmdevices_add(force=False)
else:
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
diff --git a/tests/unit_tests/formats_tests/__init__.py b/tests/unit_tests/formats_tests/__init__.py
index d678900b..95c7a25b 100644
--- a/tests/unit_tests/formats_tests/__init__.py
+++ b/tests/unit_tests/formats_tests/__init__.py
@@ -2,6 +2,7 @@ from .device_test import *
from .disklabel_test import *
from .init_test import *
from .luks_test import *
+from .lvmpv_test import *
from .methods_test import *
from .misc_test import *
from .selinux_test import *
diff --git a/tests/unit_tests/formats_tests/lvmpv_test.py b/tests/unit_tests/formats_tests/lvmpv_test.py
new file mode 100644
index 00000000..6490c7d4
--- /dev/null
+++ b/tests/unit_tests/formats_tests/lvmpv_test.py
@@ -0,0 +1,73 @@
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
+from contextlib import contextmanager
+
+import unittest
+
+from blivet.formats.lvmpv import LVMPhysicalVolume
+
+
+class LVMPVNodevTestCase(unittest.TestCase):
+
+ @contextmanager
+ def patches(self):
+ patchers = dict()
+ mocks = dict()
+
+ patchers["blockdev"] = patch("blivet.formats.lvmpv.blockdev")
+ patchers["lvm"] = patch("blivet.formats.lvmpv.lvm")
+ patchers["vgs_info"] = patch("blivet.formats.lvmpv.vgs_info")
+ patchers["os"] = patch("blivet.formats.lvmpv.os")
+
+ for name, patcher in patchers.items():
+ mocks[name] = patcher.start()
+
+ yield mocks
+
+ for patcher in patchers.values():
+ patcher.stop()
+
+ def test_lvm_devices(self):
+ fmt = LVMPhysicalVolume(device="/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file not enabled/supported -> devices_add should not be called
+ mock["lvm"].HAVE_LVMDEVICES = False
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_not_called()
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file exists -> devices_add should be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = True
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file doesn't exist
+ # and no existing VGs present -> devices_add should be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = False
+ mock["vgs_info"].cache = {}
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file doesn't exist
+ # and existing VGs present -> devices_add should not be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = False
+ mock["vgs_info"].cache = {"fake_vg_uuid": "fake_vg_data"}
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_not_called()
--
2.43.0

View File

@ -23,24 +23,36 @@ Version: 3.6.0
#%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 4%{?prerelease}%{?dist}
Release: 14%{?prerelease}%{?dist}
Epoch: 1
License: LGPLv2+
%global realname blivet
%global realversion %{version}%{?prerelease}
Source0: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}.tar.gz
Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}-tests.tar.gz
Patch0: 0001-force-lvm-cli.plugin
Patch0: 0001-force-lvm-cli-plugin.patch
Patch1: 0002-remove-btrfs-plugin.patch
Patch2: 0003-Revert-More-consistent-lvm-errors.patch
Patch3: 0004-Revert-Terminology-cleanups.patch
Patch4: 0005-DDF-RAID-support-using-mdadm.patch
Patch5: 0006-Revert-Remove-the-Blivet.roots-attribute.patch
Patch6: 0007-Fix-potential-AttributeError-when-getting-stratis-bl.patch
Patch7: 0008-tests-Skip-XFS-resize-test-on-CentOS-RHEL-8.patch
Patch8: 0009-Revert-Adjust-to-new-XFS-min-size.patch
Patch9: 0010-Catch-BlockDevNotImplementedError-for-btrfs-plugin-c.patch
Patch3: 0004-DDF-RAID-support-using-mdadm.patch
Patch4: 0005-Revert-Remove-the-Blivet.roots-attribute.patch
Patch5: 0006-Fix-potential-AttributeError-when-getting-stratis-bl.patch
Patch6: 0007-tests-Skip-XFS-resize-test-on-CentOS-RHEL-9.patch
Patch7: 0008-Revert-Adjust-to-new-XFS-min-size.patch
Patch8: 0009-Catch-BlockDevNotImplementedError-for-btrfs-plugin-c.patch
Patch9: 0010-Add-basic-support-for-NVMe-and-NVMe-Fabrics-devices.patch
Patch10: 0011-Default-to-encryption-sector-size-512-for-LUKS-devic.patch
Patch11: 0012-Add-support-for-specifying-stripe-size-for-RAID-LVs.patch
Patch12: 0013-Fix-setting-kickstart-data.patch
Patch13: 0014-Do-not-set-memory-limit-for-LUKS2-when-running-in-FI.patch
Patch14: 0015-Add-support-for-filesystem-online-resize.patch
Patch15: 0016-Backport-iSCSI-initiator-name-related-fixes.patch
Patch16: 0017-nvme-additional-rpms-for-dracut.patch
Patch17: 0018-nvme-TP4126-fixes-1.patch
Patch18: 0019-nvme-hostnqn_from_active_fabrics_connection.patch
Patch19: 0020-nvme-add_unit_tests.patch
Patch20: 0021-Add-support-for-creating-shared-LVM-setups.patch
Patch21: 0022-add-udev-builtin-path_id-property-to-zfcp-attached-S.patch
Patch22: 0023-Do-not-add-new-PVs-to-the-LVM-devices-file-if-it-doe.patch
# Versions of required components (done so we make sure the buildrequires
# match the requires versions of things).
@ -148,6 +160,7 @@ Recommends: libblockdev-lvm >= %{libblockdevver}
Recommends: libblockdev-mdraid >= %{libblockdevver}
Recommends: libblockdev-mpath >= %{libblockdevver}
Recommends: libblockdev-nvdimm >= %{libblockdevver}
Recommends: libblockdev-nvme >= %{libblockdevver}
Recommends: libblockdev-part >= %{libblockdevver}
Recommends: libblockdev-swap >= %{libblockdevver}
Recommends: libblockdev-s390 >= %{libblockdevver}
@ -203,277 +216,511 @@ configuration.
%endif
%changelog
* Thu Jan 19 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-4
* Fri Feb 09 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-14
- Do not add new PVs to the LVM devices file if it doesn't exist and VGs are present
Resolves: RHEL-473
* Thu Jan 18 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-13
- add udev-builtin-path_id property to zfcp-attached SCSI disks
Resolves: RHEL-22007
* Wed Dec 13 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-12
- Add support for creating shared LVM setups
Resolves: RHEL-324
* Mon Dec 11 2023 Tomas Bzatek <tbzatek@redhat.com> - 3.6.0-11
- nvme: Retrieve HostNQN from a first active fabrics connection
- tests: Add a simple unit test for the NVMe module
Resolves: RHEL-11541
* Tue Sep 26 2023 Tomas Bzatek <tbzatek@redhat.com> - 3.6.0-10
- nvme: Require additional rpms for dracut
Resolves: RHEL-2855
- nvme: Align HostNQN and HostID format to TP-4126
Resolves: RHEL-1254
* Mon Jul 24 2023 Jan Pokorny <japokorn@redhat.com> - 3.6.0-9
Backport iSCSI initiator name related fixes:
- Allow changing iSCSI initiator name after setting it
Resolves: rhbz#2221935
- Add a basic test case for the iscsi module
Resolves: rhbz#2221935
- tests: Use blivet-specific prefix for targetcli backing files
Resolves: rhbz#2221935
- iscsi: Save firmware initiator name to /etc/iscsi/initiatorname.iscsi
Resolves: rhbz#2221932
- tests: Improve iscsi_test.ISCSITestCase
Resolves: rhbz#2221935
* Wed May 24 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-8
- Add support for filesystem online resize
Resolves: RHEL-326
* Thu May 18 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-7
- Fix setting kickstart data
Resolves: rhbz#2174296
- Do not set memory limit for LUKS2 when running in FIPS mode
Resolves: rhbz#2193096
* Tue May 02 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-6
- Add support for specifying stripe size for RAID LVs
Resolves: RHEL-327
* Thu Jan 19 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-5
- Default to encryption sector size 512 for LUKS devices
Resolves: rhbz#2160465
Resolves: rhbz#2103800
* Tue Dec 13 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-4
- Add basic support for NVMe and NVMe Fabrics devices
Resolves: rhbz#2123337
* Thu Nov 03 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-3
- Catch BlockDevNotImplementedError for btrfs plugin calls
Resolves: rhbz#2139169
Resolves: rhbz#2139166
- Revert "Adjust to new XFS min size"
Resolves: rhbz#2139187
Resolves: rhbz#2139189
* Fri Oct 21 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-2
- Skip XFS resize test on CentOS/RHEL 8
Related: rhbz#2123712
* Thu Oct 20 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-2
- Fix potential AttributeError when getting stratis blockdev info
Related: rhbz#2123711
- tests: Skip XFS resize test on CentOS/RHEL 9
Related: rhbz#2123711
* Fri Oct 21 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-1
* Thu Oct 13 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-1
- Rebase to the latest upstream release 3.6.0
Resolves: rhbz#2123712
Resolves: rhbz#2123711
* Thu Aug 18 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-13
* Thu Aug 18 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-16
- DDF RAID support using mdadm
Resolves: rhbz#2063791
Resolves: rhbz#2109030
* Mon Jun 20 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-12
* Mon Jun 20 2022 Jan Pokorny <japokorn@redhat.com> - 3.4.0-15
- Add a very simple NVMe module
Resolves: rhbz#2073008
- Add support for NPIV-enabled zFCP devices
Resolves: rhbz#1497087
Resolves: rhbz#1937030
- Fix removing zFCP SCSI devices
Related: rhbz#1937030
- tests: Mark "fake" disks in test_get_related_disks as non-existing
Resolves: rhbz#2062690
* Thu Jun 02 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-11
- Fix running gating tests on AWS/Xen machines
Resolves: rhbz#2093207
* Thu Jun 02 2022 Jan Pokorny <japokorn@redhat.com> - 3.4.0-14
- Release version increase to fix RHEL upgrade path
Related: rhbz#2081278
* Thu Jun 02 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-10
* Thu Jun 02 2022 Jan Pokorny <japokorn@redhat.com> - 3.4.0-13
- Fix getting PV info in LVMPhysicalVolume from the cache
Resolves: rhbz#2079220
Resolves: rhbz#2079221
- Do not crash when changing disklabel on disks with active devices
Resolves: rhbz#2078801
Resolves: rhbz#2078803
- ActionDestroyDevice should not obsolete ActionRemoveMember
Resolves: rhbz#2076958
Resolves: rhbz#2076956
- Correctly set vg_name after adding/removing a PV from a VG
Resolves: rhbz#2081276
- Use LVM PV format current_size in LVMVolumeGroupDevice._remove
Related: rhbz#2081276
Resolves: rhbz#2081278
- Add support for creating LVM cache pools
Resolves: rhbz#2055198
Resolves: rhbz#2055200
- Use LVM PV format current_size in LVMVolumeGroupDevice._remove
Related: rhbz#2081278
* Mon Jan 10 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-9
- Translation update
Resolves: rhbz#2003050
* Tue Feb 01 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-12
- Fix log message for the LVM devices filter
Resolves: rhbz#2034277
- Exclude unusable disks from PartitionFactory
Resolves: rhbz#2017432
* Tue Dec 14 2021 ojtech Trefny <vtrefny@redhat.com> - 3.4.0-8
* Tue Dec 14 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-11
- Replace all log_exception_info calls with log.info
Resolves: rhbz#2028134
Resolves: rhbz#2028391
* Fri Nov 26 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-7
- Release number bump
Related: rhbz#1988276
* Thu Dec 09 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-10
- Use LVM devices file instead of filter regex
Resolves: rhbz#1967212
* Fri Nov 26 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-6
* Tue Nov 30 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-9
- Rebuild with higher release number to fix errata
Related: rhbz#2012121
* Fri Nov 26 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-8
- Improve error message printed for missing dependecies
Resolves: rhbz#1988276
Resolves: rhbz#2012121
- Use bigger chunk size for thinpools bigger than ~15.88 TiB
Resolves: rhbz#1949953
Resolves: rhbz#1971516
* Wed Aug 4 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-5
- Fix running upstream test suite in gating
Resolves: rhbz#1990232
* Tue Aug 17 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-7
- Fix script for running tests in gating
Resolves: rhbz#1990237
* Wed Aug 11 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-6
- Remove "Revert Terminology cleanups" patch
Resolves: rhbz#1990982
- Fix running tests in gating
Resolves: rhbz#1990237
- Opt out from using LVM devices file in 9 Beta
Resolves: rhbz#1984851
* Tue Aug 10 2021 Mohan Boddu <mboddu@redhat.com> - 1:3.4.0-5
- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags
Related: rhbz#1991688
* Mon Aug 2 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-4
- Do not set chunk size for RAID 1
Resolves: rhbz#1987170
Resolves: rhbz#1987176
* Wed Jul 21 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-3
* Sat Jul 17 2021 Neal Gompa <ngompa@centosproject.org> - 3.4.0-3
- Fix resolving devices with names that look like BIOS drive number
Resolves: rhbz#1983309
Resolves: rhbz#1983310
* Wed Jul 7 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-2
- Fix activating old style LVM snapshots
Resolves: rhbz#1961739
Resolves: rhbz#1961944
* Wed May 5 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-1
* Wed Jun 9 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-1
- Rebase to latest upstream release 3.4.0
Resolves: rhbz#1918357
Resolves: rhbz#1964341
* Tue Feb 9 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-9
- LVM VDO support
Resolves: rhbz#1509337
* Fri Apr 16 2021 Mohan Boddu <mboddu@redhat.com> - 1:3.3.3-2
- Rebuilt for RHEL 9 BETA on Apr 15th 2021. Related: rhbz#1947937
* Mon Jan 11 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-8
- Let parted fix fixable issues with partition table
Resolves: rhbz#1846869
- Fix possible UnicodeDecodeError when reading sysfs attributes
Resolves: rhbz#1849326
* Thu Feb 18 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.3.3-1
- apply compression settings from blivet.flags.btrfs_compression (#1926892) (michel)
* Wed Nov 18 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-7
- Add support for XFS format grow
Resolves: rhbz#1862349
- Do not limit swap to 128 GiB
Resolves: rhbz#1656485
- Use UnusableConfigurationError for partially hidden multipath devices
Resolves: rhbz#1877052
- Fix possible UnicodeDecodeError when reading model from sysfs
Resolves: rhbz#1849326
- Add basic support for LVM VDO devices
Resolves: rhbz#1828745
* Wed Jan 27 2021 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.3.2-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
* Thu Aug 20 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-6
- Fix name resolution for MD devices and partitions on them
Resolves: rhbz#1862904
- Fix ignoring disk devices with parents or children
Resolves: rhbz#1866243
* Thu Jan 14 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.3.2-1
- Fix "suggest_container_name" for Anaconda (vtrefny)
- Add test for util.get_sysfs_attr (vtrefny)
- Use util.get_sysfs_attr in __is_ignored_blockdev to read device mode (vtrefny)
- Fix possible UnicodeDecodeError when reading sysfs attributes (vtrefny)
- Update LUKS device name after parent partition name change (vtrefny)
- TFT is still broken so let's avoid failures by just doing a build (jkonecny)
- Fix logging information about ignoring hidden devices (vtrefny)
- Add __repr__ and __str__ methods to ParentList (vtrefny)
- Make sure LV name is unique when adding it in device factory (vtrefny)
- In name checks add name which is already in use to error message (vtrefny)
- Refactor suggest device/container name functions (vtrefny)
- Remove an unused attribute from the Blivet class (vponcova)
- Add PyPI build artifacts to .gitignore (vtrefny)
- Sync spec with downstream (vtrefny)
* Thu Jul 16 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-5
* Wed Nov 11 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.3.1-2
- Remove btrfs from requested libblockdev plugins on RHEL 9
* Tue Oct 20 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.3.1-1
- Make sure the product name is safe when using it for device name (vtrefny)
- Run packit RPM builds on Fedora ELN (vtrefny)
- Allow specifying 'mode' for the sdist command (vtrefny)
- Enable packit RPM builds on pull requests (vtrefny)
- Start the iscsi-init service (#1880673) (vponcova)
- Let parted fix fixable issues with partition table (vtrefny)
- edd: Fix UnboundLocalError when trying to close fd in collect_mbrs (vtrefny)
- Use UnusableConfigurationError for partially hidden multipath devices (vtrefny)
- Close fd if it fails to read the device (nashok)
- Do not run udev.settle in StorageDevice._pre_teardown (vtrefny)
- Try to not use udev.resolve_devspec when querying MountsCache (vtrefny)
- Remove Zanata config file (vtrefny)
- Ignore new pylint warning W0707 "raise-missing-from" (vtrefny)
- Use SSH "link" for l10n repository in Makefile (vtrefny)
- Fix source tarball cleanup in srpm and rpm Makefile targets (vtrefny)
* Wed Sep 16 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.3.0-2
- Avoid using unnecessary udev.settle calls (#1876162)
* Thu Aug 20 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.3.0-1
- Account for pmspare grow when adjusting thinpool metadata size (vtrefny)
- Fix ignoring disk devices with parents or children (vtrefny)
- Terminology cleanup, part 3 (vtrefny)
- Terminology cleanups, part 2. (dlehman)
- Clean up some terminology. (dlehman)
- Add tests for udev.device_get_name for RAID devices (vtrefny)
- Fix name resolution for MD devices and partitions on them (vtrefny)
- Fix reading hidden sysfs attribute (vtrefny)
- Add support for specifying sector size for LUKS 2 devices (vtrefny)
- Do not ignore unknown/unsupported device mapper devices (vtrefny)
- Allow specifying custom hash function for LUKS 2 format (vtrefny)
- Ignore devices marked as hidden in sysfs (#1856974) (vtrefny)
- Add basic F2FS support (#1794950) (vtrefny)
- Make safe_device_name device type specific (vtrefny)
- Add exFAT to filesystems we recognize (vtrefny)
- Use xfs_db in read-only mode when getting XFS information (vtrefny)
- Add support for checking and fixing XFS using xfs_repair (vtrefny)
- Ignore zRAM devices in VMBackedTestCase (vtrefny)
- Add tests for XFS resize (vtrefny)
- Add support for XFS format grow (vtrefny)
- Typo fix (vtrefny)
- tests: Skip test_reset when running as non-root (vtrefny)
- tests: Patch LVM availability functions for some tests (vtrefny)
- tests: Patch LVM lvs call for some non-LVM tests (vtrefny)
- Do not propagate ped exception from add_partition (vtrefny)
- Do not use BlockDev.utils_have_kernel_module to check for modules (vtrefny)
- set allowed disk labels for s390x as standard ones (msdos + gpt) plus dasd (dan)
- Do not use FSAVAIL and FSUSE%% options when running lsblk (vtrefny)
- Rewrite README and add it as a long_description in setup.py (vtrefny)
- Round down to nearest MiB value when writing ks parittion info. (sbueno+anaconda)
- Add _teardown method to IntegrityDevice (vtrefny)
- Fix status for DM Integrity format (#1814005) (vtrefny)
- udev: Add function to get list of device's holders (vtrefny)
- Add basic support for LVM writecache devices (vtrefny)
- Add test for SwapSpace max size (vtrefny)
- Do not limit swap to 128 GiB (vtrefny)
- Fix possible UnicodeDecodeError when reading model from sysfs (vtrefny)
- Add install_requires and classifiers to setup.py (vtrefny)
- Import setuptools in setup.py to make bdist_wheel work (vtrefny)
- Set device.original_format to the new format in ActionCreateFormat (vtrefny)
- Fix resizable property for partitions (vtrefny)
- Update TODO. (dlehman)
- Ignore pycodestyle warning E741 (vtrefny)
- Skip test_mounting for filesystems that are not mountable (vtrefny)
- Sync specfile with downstream (japokorn)
- Make extended partitions resizable (vtrefny)
- Fix LV min size for resize in test_action_dependencies (vtrefny)
- Fix checking for filesystem support in action_test (vtrefny)
- Add basic support for LVM VDO devices (vtrefny)
- Update POT file in the Weblate repo during "make potfile" (vtrefny)
- Skip translation canary check if POT file is not available (vtrefny)
- Add blivet-weblate repository as a submodule (vtrefny)
- Remove Zanata from our build process (vtrefny)
- Remove po folder (vtrefny)
- More consistent lvm errors (API break) (japokorn)
- Added support for device tags (japokorn)
* Wed Jul 29 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-4
- set allowed disk labels for s390x as standard ones (msdos + gpt) plus dasd
Resolves: rhbz#1855200
- Do not use BlockDev.utils_have_kernel_module to check for modules
Resolves: rhbz#1855344
* Thu Jul 09 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-4
- Blivet RHEL 8.3 localization update
Resolves: rhbz#182056
- Do not use FSAVAIL and FSUSE% options when running lsblk
Resolves: rhbz#1853624
* Tue Jul 28 2020 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.2.2-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
* Tue Jun 30 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-3
- Round down to nearest MiB value when writing ks parittion info
Resolves: rhbz#1850670
* Sat May 23 2020 Miro Hrončok <mhroncok@redhat.com> - 1:3.2.2-2
- Rebuilt for Python 3.9
* Wed Jun 24 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-2
- Add extra sleep after pvremove call
Resolves: rhbz#1640601
* Thu May 21 2020 Jan Pokorny <japokorn@redhat.com> - 3.2.2-1
- Allow setting size for non-existing LUKS devices (vtrefny)
- Fix toggling container encryption in devicefactory (#1827254) (vtrefny)
- Do no include destroyed devices in list of names (#1830515) (vtrefny)
- Fix changing LUKS version in devicefactory (#1834373) (vtrefny)
- Add "is_empty" property to StorageDevice (vtrefny)
- Mark warning "'GError' has no 'message' member" as false positive (vtrefny)
- Use the specified LUKS version for container encryption (vponcova)
- Log current storage state before reset using lsblk (vtrefny)
- Do not remove _netdev mount option specified manually by users (vtrefny)
- Fix renaming encrypted devices in the DeviceFactory (vtrefny)
- Fix typo in string formatter in EddEntry (vtrefny)
* Fri May 22 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-1
- Rebase to the latest upstream release 3.2.2
Resolves: rhbz#1714970
* Tue Apr 21 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.1-2
- Invalidate LVM caches in blivet device discovery loop (#1824418)
* Mon Mar 02 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-20
- add `-y' to lvm.pvcreate
Resolves: rhbz#1768494
* Mon Apr 06 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.1-1
- Correctly recognize EFI format on an MD RAID device (#1695913) (vtrefny)
- Do not set empty name instead of invalid one in devicefactory (#1813710) (vtrefny)
- Fix crash for devices without ID_PATH udev property (#1814920) (vtrefny)
- Allow for reserved vg space and a growable thin pool. (#1783946) (dlehman)
- Fix name resolution for md member partitions. (#1798792) (dlehman)
* Wed Jan 29 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-19
- Override LVM skip-activation to allow for thorough removal
Resolves: rhbz#1766498
- Make sure LVs are writable before wiping
Related: rhbz#1766498
- Fix udev test names so they actually get run.
Related: rhbz#1758102
- Add recognition of Dell FW RAID to udev.device_is_disk.
Resolves: rhbz#1758102
- Align base sizes up if smaller than min I/O size.
Resolves: rhbz#1781106
- Make minimal and optimal alignment getters public.
Related: rhbz#1781106
* Wed Mar 11 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.0-3
- Fix name resolution for md member partitions. (#1798792)
* Tue Nov 19 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-18
- Check for PV sector size when creating new VG
Resolves: rhbz#1754446
* Thu Jan 30 2020 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.2.0-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
* Wed Oct 02 2019 David Lehman <dlehman@redhat.com> - 3.1.0-17
- Fix util.detect_virt function
Resolves: rhbz#1676935
* Wed Jan 29 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.0-1
- Align base sizes up if smaller than min I/O size. (dlehman)
- Make minimal and optimal alignment getters public. (dlehman)
- Add support for relabeling of the swap format (vtrefny)
- Define the 'relabels' method for all formats (vtrefny)
- Add support for LVMPV format resize (vtrefny)
- Add a new "id_path" attribute for iSCSI and FCoE disks (vtrefny)
- Do not load module when creating an FS instance (vtrefny)
- Add a simple script for running tests manually (vtrefny)
- Remove unused API code (jkonecny)
- devicetree.names is now a property (japokorn)
- initial PowerNV class support (dan)
- Use LUKS2 by default (vponcova)
* Mon Aug 05 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-16
- Minor cleanups to reduce log noise
Related: rhbz#1579375
* Wed Jan 29 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.7-1
- Use SHA256 instead of MD5 for /proc/mounts hash calculation (vtrefny)
- Fix udev test names so they actually get run. (dlehman)
- Add recognition of Dell FW RAID to udev.device_is_disk. (dlehman)
- Fix Blivet DBus service version in service and config files (vtrefny)
- Make sure _chrooted_mountpoint attribute is defined before using it (vtrefny)
- Allow running 'write_label' in dry run mode on non-existing devices (vtrefny)
- Make 'makeupdates' and 'makebumpver' scripts Python 3 compatible (vtrefny)
- Do not hardcode coverage executable name (vtrefny)
- Make sure LVs are writable before wiping. (dlehman)
- Override LVM skip-activation to allow for thorough removal. (dlehman)
- Add setters for requested_size/percent form LVMVolumeGroupDevice (vtrefny)
- Set min size for XFS to 16 MiB (vtrefny)
- Revert "Ignore invalid-overridden-method warning for abstract properties" (vtrefny)
- Fix invalid-overridden-method in events_test (vtrefny)
* Mon Jul 15 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-15
- Do not crash if 'dm.get_member_raid_sets' fails
Resolves: rhbz#1704289
* Fri Oct 25 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.6-1
- Do not allow creating VGs with PVs with different sector size (vtrefny)
- Add a new "sector_size" property to storage devices. (vtrefny)
- Ignore invalid-overridden-method warning for abstract properties (vtrefny)
- Change NFSMount._availability_errors to a property (vtrefny)
- Fix util.detect_virt function (vtrefny)
- Do not try to normalize size for zero size device factories (vtrefny)
- Always set default key size to 512 bits for ciphers with XTS mode (vtrefny)
* Tue Jul 02 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-14
- Correctly handle non-unicode iSCSI initiator names
Resolves: rhbz#1632117
* Thu Oct 03 2019 Miro Hrončok <mhroncok@redhat.com> - 1:3.1.5-4
- Rebuilt for Python 3.8.0rc1 (#1748018)
* Tue Jun 18 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-13
- Fix reading LV attributes in LVMVolumeGroupDevice.status
Resolves: rhbz#1721381
* Tue Aug 27 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.5-3
- Do not try to normalize size for zero size device factories (#1743753)
* Fri Jun 14 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-12
- Deactivate incomplete VGs along with everything else
Resolves: rhbz#1635125
- Automatically adjust size of growable devices for new format
Resolves: rhbz#1680013
- Add flag for protecting cdrom devices during populate
Resolves: rhbz#1719648
- Clean up some errors evident in installer logs
Resolves: rhbz#1579375
- Use dasd disklabel for vm disks backed by dasds
Resolves: rhbz#1676935
* Fri Aug 16 2019 Miro Hrončok <mhroncok@redhat.com> - 1:3.1.5-2
- Rebuilt for Python 3.8
* Thu May 16 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-11
- Various test fixes for RHEL 8
Related: rhbz#1682561
- Add upstream test suite to the SRPM
Related: rhbz#1682561
* Thu Aug 15 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.5-1
- Move dependencies code from StorageDevice to Device (vtrefny)
- Always use luks_data.min_entropy as a default minimum entropy (vponcova)
- Add 'protected' property setter to LVMVolumeGroupDevice (#1729363) (vtrefny)
- fix of LV max size calculation (japokorn)
- Added min size for partitions (japokorn)
- Improved non-unique UUID handling (japokorn)
- Check if disklabel supports partition names (#1723228) (vtrefny)
- format_device: Revert destroy action if create fails (#1727589) (vtrefny)
- Do not allow resizing of LUKS devices with integrity (vtrefny)
- Return underlying block device as 'slave' for LUKS with integrity (vtrefny)
- Fix removing LUKS devices with integrity (vtrefny)
- Check status before activating dmraid set in populate. (#1723979) (dlehman)
- Use DBus call to see if we're in a vm. (dlehman)
- Use dasd disklabel for vm disks backed by dasds. (dlehman)
- Add a function to detect if running in a vm. (dlehman)
- Remove teardown_all from the populate method (vponcova)
- Correctly handle non-unicode iSCSI initiator names (vtrefny)
- Add, test and use a new method to get size with reserve (vpodzime)
- Beware non-positive sizes in thpool metadata size calculations (vpodzime)
- Log sizes in MiB in thpool auto metadata size calculations (vpodzime)
- Recalculate thpool's metadata size on resize in LVMThinPFactory (vpodzime)
- Move the thpool reserve calculations to LVMFactory (vpodzime)
* Wed Apr 03 2019 David Lehman <dlehman@redhat.com> - 3.1.0-10
- Ensure correct type of mpath cache member list.
Related: rhbz#1672971
* Fri Jul 26 2019 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.1.4-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
* Mon Feb 25 2019 David Lehman <dlehman@redhat.com> - 3.1.0-9
- Update to latest translations.
Resolves: rhbz#1608337
- Require libfc instead of fcoe for offloaded FCoE.
Resolves: rhbz#1575953
- Use udev to determine if disk is a multipath member.
Related: rhbz#1575953
- Don't crash if blockdev mpath plugin isn't available.
Resolves: rhbz#1672971
* Thu Jul 11 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.4-2
- Remove teardown_all from the populate method (vponcova)
- initial PowerNV class support (dan)
* Tue Jan 15 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-8
* Tue Jun 11 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.4-1
- Don't call fnmatch with None (#1698937) (vponcova)
- Do not crash on non-int lun argument when creating iscsi disk object. (rvykydal)
- Make iscsi device attribute modifications backward compatible. (rvykydal)
- Do not store iscsi module nodeinfo in device object. (rvykydal)
- Only call mpath plugin when it is available. (#1697378) (dlehman)
- Include tests archive where appropriate in make targets. (dlehman)
- Add spec file logic to include unit tests in SRPM. (dlehman)
- Add a target to create an archive of the unit tests. (dlehman)
- Remove profanity from an old comment. (dlehman)
- Fix mounting of the filesystem iso9660 (vponcova)
- Remove unnecessary pass statements (vtrefny)
- Check for format tools availability in action_test (vtrefny)
- Skip weak dependencies test if we don't have all libblockdev plugins (vtrefny)
- Properly clean after availability test case (vtrefny)
- Ensure correct type of mpath cache member list. (dlehman)
- Do not crash if 'dm.get_member_raid_sets' fails (#1684851) (vtrefny)
- Fix supported disklabels in 'test_platform_label_types' on EFI (vtrefny)
- Support legacy MBR (msdos) as part of UEFI to enable hybrid builds (pbrobinson)
- Automatically adjust size of growable devices for new format (vtrefny)
- spec: Remove obsolete Group tag and bump min libblockdev version (vtrefny)
* Thu Mar 21 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.3-3
- Ensure correct type of mpath cache member list
* Mon Mar 11 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.3-2
- Support legacy MBR (msdos) as part of UEFI to enable hybrid builds (pbrobinson)
* Wed Feb 27 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.3-1
- Don't crash if blockdev mpath plugin isn't available. (#1672971) (dlehman)
- iscsi: Add default value to unused 'storage' argument in 'write' (vtrefny)
- Add exported property to LVMVolumeGroupDevice (vtrefny)
- Add VG data to static_data (vtrefny)
- Do not try to get format free space for non-existing formats (vtrefny)
- Do not raise exception if can't get PV free space (vtrefny)
- Fix undefined attribute in LVM info cache (vtrefny)
- Use raw_device to get thinpool device in LVMThinPFactory (#1490174) (vtrefny)
- Do not crash if DM RAID activation fails (#1661712) (vtrefny)
- Remove the unused sysroot property (vponcova)
- Remove unused attributes from the Blivet class (vponcova)
- Remove the unused gpt flag (vponcova)
- Copy the iSCSI initiator name file to the installed system (vtrefny)
Resolves: rhbz#1664587
- Use udev to determine if disk is a multipath member. (dlehman)
- Require libfc instead of fcoe for offloaded FCoE. (#1575953) (dlehman)
* Mon Dec 17 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-7
- Wipe all stale metadata after creating md array (dlehman)
Resolves: rhbz#1639682
* Sat Feb 02 2019 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.1.2-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
* Tue Oct 16 2018 David Lehman <dlehman@redhat.com> - 3.1.0-6
- Fix options for ISCSI functions (vtrefny)
Resolves: rhbz#1635569
* Wed Dec 12 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.2-1
- Fix reading LV attributes in LVMVolumeGroupDevice.status (vtrefny)
- Do not try to login to iBFTs with active session (vtrefny)
- Fix xfs sync of chrooted mountpoint. (dlehman)
- Only update sysfs path in ctor for active devices. (dlehman)
- Fix new pep8/pycodestyle warnings (vtrefny)
- Ignore PEP8 W504 warning ("line break after binary operator") (vtrefny)
- pylint: Allow loading all C extensions (vtrefny)
- Use 'pycodestyle' instead of 'pep8' (vtrefny)
- Fix failing populator test without nvdimm plugin (vtrefny)
- Add 'srpm' and 'rpm' targets to Makefile for building (S)RPMs (vtrefny)
- Fix crash on reset on systems without nvdimm plugin (vtrefny)
- Use the size info of internal LVs when getting space usage for existing LVs (v.podzimek)
- Calculate the number of RAID PVs from the origin for cached LVs (v.podzimek)
- Make raid_level a property of an LV object (v.podzimek)
- Add a test for DeviceTree.get_related_disks. (dlehman)
- Fix ixgbe/bnx2fc fcoe disk detection (#1651506) (rvykydal)
- Use RAID name for partitions on an MD array (vtrefny)
- Move btrfs name validation to devicelibs (vtrefny)
- Don't try to set selinux context for nodev or vfat file systems. (dlehman)
- Only try to set selinux context for lost+found on ext file systems. (dlehman)
- Wipe all stale metadata after creating md array. (#1639682) (dlehman)
- Don't try to update sysfs path for non-block devices. (#1579375) (dlehman)
- Don't raise errors without messages (vponcova)
- Install ndctl when NVDIMMs are used. (dlehman)
- Deactivate incomplete VGs along with everything else. (dlehman)
- Work around udev timing issues. (dlehman)
- Fix options for ISCSI functions (#1632656) (vtrefny)
- Use format.status when checking for PV status (vtrefny)
- Remove Anaconda flags (vponcova)
- Remove square brackets when matching internal LVs (v.podzimek)
* Thu Sep 27 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-5
* Mon Oct 08 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.1-2
- Fix options for ISCSI functions (#1632656) (vtrefny)
* Wed Sep 26 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.1-1
- Check device dependencies only for device actions (vtrefny)
- Allow removing btrfs volumes without btrfs support (vtrefny)
- Adjust LVMPhysicalVolumeMethodsTestCase to new pvcreate option (vtrefny)
- add `-y' to lvm.pvcreate (hongxu.jia)
- Drop omap partition table tests on ARM platforms (pbrobinson)
- Update disk label tests for ARM platforms (pbrobinson)
- Ignore pylint 'no-value-for-parameter' warning (vtrefny)
- arm: add support for EFI on ARMv7 (pbrobinson)
- Aarch64 platforms: Fix gpt defaults for 64 bit arm platforms (pbrobinson)
- arch: arm: drop get_arm_machine function (pbrobinson)
- arch: arm: drop omap specifics for partitioning (pbrobinson)
- Create a separate availability check for dmraid support (vtrefny)
* Thu Aug 30 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-2
- arm: add support for EFI on ARMv7 (probinson)
Related: rhbz#1623882
- Aarch64 platforms: Fix gpt defaults for 64 bit arm platforms (probinson)
Resolves: rhbz#1623882
- arch: arm: drop get_arm_machine function (probinson)
Related: rhbz#1623882
- arch: arm: drop omap specifics for partitioning (probinson)
Related: rhbz#1623882
* Thu Sep 20 2018 Tomas Orsava <torsava@redhat.com> - 3.1.0-4
- Require the Python interpreter directly instead of using the package name
- Related: rhbz#1619153
* Wed Sep 19 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-3
- Check device dependencies only for device actions
Related: rhbz#1605213
- Allow removing btrfs volumes without btrfs support
Resolves: rhbz#1605213
* Tue Aug 21 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-2
- Create a separate availability check for dmraid support
Resolves: rhbz#1617958
* Fri Aug 10 2018 David Lehman <dlehman@redhat.com> - 3.1.0-1
* Mon Aug 13 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-1
- Allow configuring default LUKS2 PBKDF arguments using luks_data (vtrefny)
Related: rhbz#1561352
- Fix the populate_kickstart method in LUKS (vtrefny)
Related: rhbz#1561352
- Allow specifying extra arguments for PBKDF when creating LUKS2 (vtrefny)
Related: rhbz#1561352
- Add support for LUKS2 to DeviceFactory (vtrefny)
Resolves: rhbz#1561352
- DeviceFactory: use min_luks_entropy from kwargs (vtrefny)
Related: rhbz#1561352
- Fix passing 'min_luks_entropy' when creating LUKS format (vtrefny)
Related: rhbz#1561352
- Use passphrase/key file when resizing LUKS2 format (vtrefny)
Related: rhbz#1561352
- Require libblockdev 2.17 (vtrefny)
Related: rhbz#1561352
- Add support for LUKS2 format (vtrefny)
Related: rhbz#1561352
- Add initial support for DM Integrity "format" (vtrefny)
Related: rhbz#1561352
- Do not try to add LUKSDevice in LUKSFormatPopulator (vtrefny)
Related: rhbz#1561352
- Add support for dm-integrity devices (vtrefny)
Related: rhbz#1561352
- Fixed various issues preventing successful build (japokorn)
Related: rhbz#1561352
* Thu Aug 2 2018 Peter Robinson <pbrobinson@fedoraproject.org> 3.1.0-0.5.b2
- Bump release to fix upgrade path
* Mon Jul 30 2018 David Lehman <dlehman@redhat.com> - 3.1.0-0.1.b2
- Do not ignore "Image out-of-sync" internal LVs (vtrefny)
@ -499,12 +746,15 @@ configuration.
- Adapt mock imports for compatibility w/ python2 & python3. (dlehman)
- Use py2-compatible syntax to get system architecture. (dlehman)
* Mon Jul 16 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-0.3.b1
- Remove btrfs from requested libblockdev plugins (vtrefny)
* Wed Jul 11 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-0.2.b1
* Tue Jul 17 2018 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-0.4.b1
- Force command line based libblockdev LVM plugin (vtrefny)
* Fri Jul 13 2018 Fedora Release Engineering <releng@fedoraproject.org> - 1:3.1.0-0.3.b1
- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
* Fri Jun 15 2018 Miro Hrončok <mhroncok@redhat.com> - 1:3.1.0-0.2.b1
- Rebuilt for Python 3.7
* Wed May 02 2018 David Lehman <dlehman@redhat.com> - 3.1.0-0.1.b1
- Add 'nvdimm' tag for NVDIMM namespaces (vtrefny)
- Add test for NVDIMMNamespaceDevicePopulator (vtrefny)