Auto sync2gitlab import of python-blivet-3.4.0-12.el8.src.rpm

This commit is contained in:
CentOS Sources 2022-06-24 14:12:09 +00:00
parent 901aa6a13d
commit 885bfbd000
24 changed files with 9180 additions and 1 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/blivet-3.4.0-tests.tar.gz
/blivet-3.4.0.tar.gz

35
0001-force-lvm-cli.plugin Normal file
View File

@ -0,0 +1,35 @@
From 2f90040ff66eacc9715e370cd49ffb72d8d1f36f Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 11 Jul 2018 15:36:24 +0200
Subject: [PATCH] Force command line based libblockdev LVM plugin
---
blivet/__init__.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index c5a75bb..cb75917 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -63,11 +63,16 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import GLib
from gi.repository import BlockDev as blockdev
if arch.is_s390():
- _REQUESTED_PLUGIN_NAMES = set(("lvm", "btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvdimm"))
+ _REQUESTED_PLUGIN_NAMES = set(("btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvdimm"))
else:
- _REQUESTED_PLUGIN_NAMES = set(("lvm", "btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
+ _REQUESTED_PLUGIN_NAMES = set(("btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
+# XXX force non-dbus LVM plugin
+lvm_plugin = blockdev.PluginSpec()
+lvm_plugin.name = blockdev.Plugin.LVM
+lvm_plugin.so_name = "libbd_lvm.so.2"
+_requested_plugins.append(lvm_plugin)
try:
# do not check for dependencies during libblockdev initializtion, do runtime
# checks instead
--
1.8.3.1

View File

@ -0,0 +1,28 @@
From 6bf3378d3d2a1b6a4338df0c4dd36a783a641633 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 16 Jul 2018 14:26:11 +0200
Subject: [PATCH] Remove btrfs from requested libblockdev plugins
---
blivet/__init__.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index cb75917..09f8b1c 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -63,9 +63,9 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import GLib
from gi.repository import BlockDev as blockdev
if arch.is_s390():
- _REQUESTED_PLUGIN_NAMES = set(("btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvdimm"))
+ _REQUESTED_PLUGIN_NAMES = set(("swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvdimm"))
else:
- _REQUESTED_PLUGIN_NAMES = set(("btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
+ _REQUESTED_PLUGIN_NAMES = set(("swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
# XXX force non-dbus LVM plugin
--
1.8.3.1

View File

@ -0,0 +1,330 @@
From 3a42d9e2afdf04dbbfd2c507f5b2392193fda25b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 26 May 2021 12:15:54 +0200
Subject: [PATCH] Revert "More consistent lvm errors (API break)"
This reverts commit 49ec071c6d0673224a0774d613904387c52c7381.
---
blivet/devices/lvm.py | 72 +++++++++++++++++-----------------
tests/devices_test/lvm_test.py | 14 +++----
2 files changed, 43 insertions(+), 43 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index a55515fc..6d23bfba 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -307,7 +307,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
def _add_log_vol(self, lv):
""" Add an LV to this VG. """
if lv in self._lvs:
- raise errors.DeviceError("lv is already part of this vg")
+ raise ValueError("lv is already part of this vg")
# verify we have the space, then add it
# do not verify for growing vg (because of ks)
@@ -340,7 +340,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
def _remove_log_vol(self, lv):
""" Remove an LV from this VG. """
if lv not in self.lvs:
- raise errors.DeviceError("specified lv is not part of this vg")
+ raise ValueError("specified lv is not part of this vg")
self._lvs.remove(lv)
@@ -415,7 +415,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
@thpool_reserve.setter
def thpool_reserve(self, value):
if value is not None and not isinstance(value, ThPoolReserveSpec):
- raise AttributeError("Invalid thpool_reserve given, must be of type ThPoolReserveSpec")
+ raise ValueError("Invalid thpool_reserve given, must be of type ThPoolReserveSpec")
self._thpool_reserve = value
@property
@@ -646,14 +646,14 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
if seg_type and seg_type in lvm.raid_seg_types and not pvs:
- raise errors.DeviceError("List of PVs has to be given for every non-linear LV")
+ raise ValueError("List of PVs has to be given for every non-linear LV")
elif (not seg_type or seg_type == "linear") and pvs:
if not all(isinstance(pv, LVPVSpec) for pv in pvs):
- raise errors.DeviceError("Invalid specification of PVs for a linear LV: either no or complete "
- "specification (with all space split into PVs has to be given")
+ raise ValueError("Invalid specification of PVs for a linear LV: either no or complete "
+ "specification (with all space split into PVs has to be given")
elif sum(spec.size for spec in pvs) != size:
- raise errors.DeviceError("Invalid specification of PVs for a linear LV: the sum of space "
- "assigned to PVs is not equal to the size of the LV")
+ raise ValueError("Invalid specification of PVs for a linear LV: the sum of space "
+ "assigned to PVs is not equal to the size of the LV")
# When this device's format is set in the superclass constructor it will
# try to access self.snapshots.
@@ -702,13 +702,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
self._from_lvs = from_lvs
if self._from_lvs:
if exists:
- raise errors.DeviceError("Only new LVs can be created from other LVs")
+ raise ValueError("Only new LVs can be created from other LVs")
if size or maxsize or percent:
- raise errors.DeviceError("Cannot specify size for a converted LV")
+ raise ValueError("Cannot specify size for a converted LV")
if fmt:
- raise errors.DeviceError("Cannot specify format for a converted LV")
+ raise ValueError("Cannot specify format for a converted LV")
if any(lv.vg != self.vg for lv in self._from_lvs):
- raise errors.DeviceError("Conversion of LVs only possible inside a VG")
+ raise ValueError("Conversion of LVs only possible inside a VG")
self._cache = None
if cache_request and not self.exists:
@@ -723,13 +723,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
elif isinstance(pv_spec, StorageDevice):
self._pv_specs.append(LVPVSpec(pv_spec, Size(0)))
else:
- raise AttributeError("Invalid PV spec '%s' for the '%s' LV" % (pv_spec, self.name))
+ raise ValueError("Invalid PV spec '%s' for the '%s' LV" % (pv_spec, self.name))
# Make sure any destination PVs are actually PVs in this VG
if not set(spec.pv for spec in self._pv_specs).issubset(set(self.vg.parents)):
missing = [r.name for r in
set(spec.pv for spec in self._pv_specs).difference(set(self.vg.parents))]
msg = "invalid destination PV(s) %s for LV %s" % (missing, self.name)
- raise errors.DeviceError(msg)
+ raise ValueError(msg)
if self._pv_specs:
self._assign_pv_space()
@@ -1072,7 +1072,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
else:
msg = "the specified internal LV '%s' doesn't belong to this LV ('%s')" % (int_lv.lv_name,
self.name)
- raise errors.DeviceError(msg)
+ raise ValueError(msg)
def populate_ksdata(self, data):
super(LVMLogicalVolumeBase, self).populate_ksdata(data)
@@ -1171,7 +1171,7 @@ class LVMInternalLogicalVolumeMixin(object):
def _init_check(self):
# an internal LV should have no parents
if self._parent_lv and self._parents:
- raise errors.DeviceError("an internal LV should have no parents")
+ raise ValueError("an internal LV should have no parents")
@property
def is_internal_lv(self):
@@ -1231,7 +1231,7 @@ class LVMInternalLogicalVolumeMixin(object):
@readonly.setter
def readonly(self, value): # pylint: disable=unused-argument
- raise errors.DeviceError("Cannot make an internal LV read-write")
+ raise ValueError("Cannot make an internal LV read-write")
@property
def type(self):
@@ -1267,7 +1267,7 @@ class LVMInternalLogicalVolumeMixin(object):
def _check_parents(self):
# an internal LV should have no parents
if self._parents:
- raise errors.DeviceError("an internal LV should have no parents")
+ raise ValueError("an internal LV should have no parents")
def _add_to_parents(self):
# nothing to do here, an internal LV has no parents (in the DeviceTree's
@@ -1277,13 +1277,13 @@ class LVMInternalLogicalVolumeMixin(object):
# internal LVs follow different rules limitting size
def _set_size(self, newsize):
if not isinstance(newsize, Size):
- raise AttributeError("new size must of type Size")
+ raise ValueError("new size must of type Size")
if not self.takes_extra_space:
if newsize <= self.parent_lv.size: # pylint: disable=no-member
self._size = newsize # pylint: disable=attribute-defined-outside-init
else:
- raise errors.DeviceError("Internal LV cannot be bigger than its parent LV")
+ raise ValueError("Internal LV cannot be bigger than its parent LV")
else:
# same rules apply as for any other LV
raise NotTypeSpecific()
@@ -1361,18 +1361,18 @@ class LVMSnapshotMixin(object):
return
if self.origin and not isinstance(self.origin, LVMLogicalVolumeDevice):
- raise errors.DeviceError("lvm snapshot origin must be a logical volume")
+ raise ValueError("lvm snapshot origin must be a logical volume")
if self.vorigin and not self.exists:
- raise errors.DeviceError("only existing vorigin snapshots are supported")
+ raise ValueError("only existing vorigin snapshots are supported")
if isinstance(self.origin, LVMLogicalVolumeDevice) and \
isinstance(self.parents[0], LVMVolumeGroupDevice) and \
self.origin.vg != self.parents[0]:
- raise errors.DeviceError("lvm snapshot and origin must be in the same vg")
+ raise ValueError("lvm snapshot and origin must be in the same vg")
if self.is_thin_lv:
if self.origin and self.size and not self.exists:
- raise errors.DeviceError("thin snapshot size is determined automatically")
+ raise ValueError("thin snapshot size is determined automatically")
@property
def is_snapshot_lv(self):
@@ -1544,7 +1544,7 @@ class LVMThinPoolMixin(object):
def _check_from_lvs(self):
if self._from_lvs:
if len(self._from_lvs) != 2:
- raise errors.DeviceError("two LVs required to create a thin pool")
+ raise ValueError("two LVs required to create a thin pool")
def _convert_from_lvs(self):
data_lv, metadata_lv = self._from_lvs
@@ -1590,7 +1590,7 @@ class LVMThinPoolMixin(object):
def _add_log_vol(self, lv):
""" Add an LV to this pool. """
if lv in self._lvs:
- raise errors.DeviceError("lv is already part of this vg")
+ raise ValueError("lv is already part of this vg")
# TODO: add some checking to prevent overcommit for preexisting
self.vg._add_log_vol(lv)
@@ -1601,7 +1601,7 @@ class LVMThinPoolMixin(object):
def _remove_log_vol(self, lv):
""" Remove an LV from this pool. """
if lv not in self._lvs:
- raise errors.DeviceError("specified lv is not part of this vg")
+ raise ValueError("specified lv is not part of this vg")
self._lvs.remove(lv)
self.vg._remove_log_vol(lv)
@@ -1711,14 +1711,14 @@ class LVMThinLogicalVolumeMixin(object):
"""Check that this device has parents as expected"""
if isinstance(self.parents, (list, ParentList)):
if len(self.parents) != 1:
- raise errors.DeviceError("constructor requires a single thin-pool LV")
+ raise ValueError("constructor requires a single thin-pool LV")
container = self.parents[0]
else:
container = self.parents
if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_thin_pool:
- raise errors.DeviceError("constructor requires a thin-pool LV")
+ raise ValueError("constructor requires a thin-pool LV")
@property
def is_thin_lv(self):
@@ -1755,7 +1755,7 @@ class LVMThinLogicalVolumeMixin(object):
def _set_size(self, newsize):
if not isinstance(newsize, Size):
- raise AttributeError("new size must of type Size")
+ raise ValueError("new size must of type Size")
newsize = self.vg.align(newsize)
newsize = self.vg.align(util.numeric_type(newsize))
@@ -2229,7 +2229,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
container = self.parents
if not isinstance(container, LVMVolumeGroupDevice):
- raise AttributeError("constructor requires a LVMVolumeGroupDevice")
+ raise ValueError("constructor requires a LVMVolumeGroupDevice")
@type_specific
def _add_to_parents(self):
@@ -2240,12 +2240,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@type_specific
def _check_from_lvs(self):
"""Check the LVs to create this LV from"""
- raise errors.DeviceError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
+ raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
@type_specific
def _convert_from_lvs(self):
"""Convert the LVs to create this LV from into its internal LVs"""
- raise errors.DeviceError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
+ raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
@property
def external_dependencies(self):
@@ -2265,7 +2265,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@type_specific
def _set_size(self, newsize):
if not isinstance(newsize, Size):
- raise AttributeError("new size must be of type Size")
+ raise ValueError("new size must be of type Size")
newsize = self.vg.align(newsize)
log.debug("trying to set lv %s size to %s", self.name, newsize)
@@ -2274,7 +2274,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
# space for it. A similar reasoning applies to shrinking the LV.
if not self.exists and newsize > self.size and newsize > self.vg.free_space + self.vg_space_used:
log.error("failed to set size: %s short", newsize - (self.vg.free_space + self.vg_space_used))
- raise errors.DeviceError("not enough free space in volume group")
+ raise ValueError("not enough free space in volume group")
LVMLogicalVolumeBase._set_size(self, newsize)
@@ -2622,7 +2622,7 @@ class LVMCache(Cache):
spec.size = spec.pv.format.free
space_to_assign -= spec.pv.format.free
if space_to_assign > 0:
- raise errors.DeviceError("Not enough free space in the PVs for this cache: %s short" % space_to_assign)
+ raise ValueError("Not enough free space in the PVs for this cache: %s short" % space_to_assign)
@property
def size(self):
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 670c91c9..4156d0bf 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -36,10 +36,10 @@ class LVMDeviceTest(unittest.TestCase):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg],
fmt=blivet.formats.get_format("xfs"))
- with six.assertRaisesRegex(self, errors.DeviceError, "lvm snapshot origin must be a logical volume"):
+ with six.assertRaisesRegex(self, ValueError, "lvm snapshot origin must be a logical volume"):
LVMLogicalVolumeDevice("snap1", parents=[vg], origin=pv)
- with six.assertRaisesRegex(self, errors.DeviceError, "only existing vorigin snapshots are supported"):
+ with six.assertRaisesRegex(self, ValueError, "only existing vorigin snapshots are supported"):
LVMLogicalVolumeDevice("snap1", parents=[vg], vorigin=True)
lv.exists = True
@@ -64,7 +64,7 @@ class LVMDeviceTest(unittest.TestCase):
pool = LVMLogicalVolumeDevice("pool1", parents=[vg], size=Size("500 MiB"), seg_type="thin-pool")
thinlv = LVMLogicalVolumeDevice("thinlv", parents=[pool], size=Size("200 MiB"), seg_type="thin")
- with six.assertRaisesRegex(self, errors.DeviceError, "lvm snapshot origin must be a logical volume"):
+ with six.assertRaisesRegex(self, ValueError, "lvm snapshot origin must be a logical volume"):
LVMLogicalVolumeDevice("snap1", parents=[pool], origin=pv, seg_type="thin")
# now make the constructor succeed so we can test some properties
@@ -258,21 +258,21 @@ class LVMDeviceTest(unittest.TestCase):
vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
# pvs have to be specified for non-linear LVs
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
fmt=blivet.formats.get_format("xfs"),
exists=False, seg_type="raid1")
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
fmt=blivet.formats.get_format("xfs"),
exists=False, seg_type="striped")
# no or complete specification has to be given for linear LVs
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
fmt=blivet.formats.get_format("xfs"),
exists=False, pvs=[pv])
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
pv_spec = LVPVSpec(pv, Size("256 MiB"))
pv_spec2 = LVPVSpec(pv2, Size("250 MiB"))
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
--
2.31.1

View File

@ -0,0 +1,908 @@
From 42042e7fb6177d3cfe5568e358a38278925a2624 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 26 May 2021 12:27:34 +0200
Subject: [PATCH] Revert "Terminology cleanups"
This reverts following commits:
- 3d46339fe9cf12e9082fcbe4dc5acc9f92617e8d
- 63c9c7165e5cdfa4a47dcf0ed9d717b71e7921f2
- 8956b9af8a785ae25e0e7153d2ef0702ce2f567c
---
blivet/devicefactory.py | 24 +++++-----
blivet/devices/dm.py | 9 +++-
blivet/devices/loop.py | 20 ++++----
blivet/devices/luks.py | 26 +++++-----
blivet/errors.py | 2 +-
blivet/partitioning.py | 22 +++++++--
blivet/populator/helpers/dm.py | 4 +-
blivet/populator/helpers/luks.py | 4 +-
blivet/populator/helpers/lvm.py | 2 +-
blivet/populator/helpers/mdraid.py | 14 +++---
blivet/populator/helpers/multipath.py | 8 ++--
blivet/populator/populator.py | 69 ++++++++++++++-------------
blivet/threads.py | 3 +-
blivet/udev.py | 34 ++++++-------
tests/devicefactory_test.py | 10 ++--
tests/devices_test/size_test.py | 6 +--
tests/populator_test.py | 34 ++++++-------
tests/udev_test.py | 12 ++---
tests/vmtests/vmbackedtestcase.py | 2 +-
19 files changed, 168 insertions(+), 137 deletions(-)
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
index 0f7fdfa1..f56bd9a3 100644
--- a/blivet/devicefactory.py
+++ b/blivet/devicefactory.py
@@ -849,12 +849,12 @@ class DeviceFactory(object):
parent_container.parents.remove(orig_device)
if self.encrypted and isinstance(self.device, LUKSDevice) and \
- self.raw_device.format.luks_version != self.luks_version:
- self.raw_device.format.luks_version = self.luks_version
+ self.device.slave.format.luks_version != self.luks_version:
+ self.device.slave.format.luks_version = self.luks_version
if self.encrypted and isinstance(self.device, LUKSDevice) and \
- self.raw_device.format.luks_sector_size != self.luks_sector_size:
- self.raw_device.format.luks_sector_size = self.luks_sector_size
+ self.device.slave.format.luks_sector_size != self.luks_sector_size:
+ self.device.slave.format.luks_sector_size = self.luks_sector_size
def _set_name(self):
if not self.device_name:
@@ -1173,11 +1173,11 @@ class PartitionSetFactory(PartitionFactory):
container.parents.remove(member)
self.storage.destroy_device(member)
members.remove(member)
- self.storage.format_device(member.raw_device,
+ self.storage.format_device(member.slave,
get_format(self.fstype))
- members.append(member.raw_device)
+ members.append(member.slave)
if container:
- container.parents.append(member.raw_device)
+ container.parents.append(member.slave)
continue
@@ -1199,10 +1199,10 @@ class PartitionSetFactory(PartitionFactory):
continue
- if member_encrypted and self.encrypted and self.luks_version != member.raw_device.format.luks_version:
- member.raw_device.format.luks_version = self.luks_version
- if member_encrypted and self.encrypted and self.luks_sector_size != member.raw_device.format.luks_sector_size:
- member.raw_device.format.luks_sector_size = self.luks_sector_size
+ if member_encrypted and self.encrypted and self.luks_version != member.slave.format.luks_version:
+ member.slave.format.luks_version = self.luks_version
+ if member_encrypted and self.encrypted and self.luks_sector_size != member.slave.format.luks_sector_size:
+ member.slave.format.luks_sector_size = self.luks_sector_size
##
# Prepare previously allocated member partitions for reallocation.
@@ -1262,7 +1262,7 @@ class PartitionSetFactory(PartitionFactory):
if isinstance(member, LUKSDevice):
self.storage.destroy_device(member)
- member = member.raw_device
+ member = member.slave
self.storage.destroy_device(member)
diff --git a/blivet/devices/dm.py b/blivet/devices/dm.py
index 3529f61c..508a6f89 100644
--- a/blivet/devices/dm.py
+++ b/blivet/devices/dm.py
@@ -154,6 +154,11 @@ class DMDevice(StorageDevice):
log_method_call(self, self.name, status=self.status)
super(DMDevice, self)._set_name(value)
+ @property
+ def slave(self):
+ """ This device's backing device. """
+ return self.parents[0]
+
class DMLinearDevice(DMDevice):
_type = "dm-linear"
@@ -189,8 +194,8 @@ class DMLinearDevice(DMDevice):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
- parent_length = self.parents[0].current_size / LINUX_SECTOR_SIZE
- blockdev.dm.create_linear(self.name, self.parents[0].path, parent_length,
+ slave_length = self.slave.current_size / LINUX_SECTOR_SIZE
+ blockdev.dm.create_linear(self.name, self.slave.path, slave_length,
self.dm_uuid)
def _post_setup(self):
diff --git a/blivet/devices/loop.py b/blivet/devices/loop.py
index 0f4d7775..78f88d7d 100644
--- a/blivet/devices/loop.py
+++ b/blivet/devices/loop.py
@@ -73,7 +73,7 @@ class LoopDevice(StorageDevice):
def update_name(self):
""" Update this device's name. """
- if not self.parents[0].status:
+ if not self.slave.status:
# if the backing device is inactive, so are we
return self.name
@@ -81,7 +81,7 @@ class LoopDevice(StorageDevice):
# if our name is loopN we must already be active
return self.name
- name = blockdev.loop.get_loop_name(self.parents[0].path)
+ name = blockdev.loop.get_loop_name(self.slave.path)
if name.startswith("loop"):
self.name = name
@@ -89,24 +89,24 @@ class LoopDevice(StorageDevice):
@property
def status(self):
- return (self.parents[0].status and
+ return (self.slave.status and
self.name.startswith("loop") and
- blockdev.loop.get_loop_name(self.parents[0].path) == self.name)
+ blockdev.loop.get_loop_name(self.slave.path) == self.name)
@property
def size(self):
- return self.parents[0].size
+ return self.slave.size
def _pre_setup(self, orig=False):
- if not os.path.exists(self.parents[0].path):
- raise errors.DeviceError("specified file (%s) does not exist" % self.parents[0].path)
+ if not os.path.exists(self.slave.path):
+ raise errors.DeviceError("specified file (%s) does not exist" % self.slave.path)
return StorageDevice._pre_setup(self, orig=orig)
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
- blockdev.loop.setup(self.parents[0].path)
+ blockdev.loop.setup(self.slave.path)
def _post_setup(self):
StorageDevice._post_setup(self)
@@ -123,3 +123,7 @@ class LoopDevice(StorageDevice):
StorageDevice._post_teardown(self, recursive=recursive)
self.name = "tmploop%d" % self.id
self.sysfs_path = ''
+
+ @property
+ def slave(self):
+ return self.parents[0]
diff --git a/blivet/devices/luks.py b/blivet/devices/luks.py
index 5d6d6c65..555f1acd 100644
--- a/blivet/devices/luks.py
+++ b/blivet/devices/luks.py
@@ -66,13 +66,17 @@ class LUKSDevice(DMCryptDevice):
@property
def raw_device(self):
+ return self.slave
+
+ @property
+ def slave(self):
if self._has_integrity:
return self.parents[0].parents[0]
return self.parents[0]
def _get_size(self):
if not self.exists:
- size = self.raw_device.size - crypto.LUKS_METADATA_SIZE
+ size = self.slave.size - crypto.LUKS_METADATA_SIZE
elif self.resizable and self.target_size != Size(0):
size = self.target_size
else:
@@ -80,8 +84,8 @@ class LUKSDevice(DMCryptDevice):
return size
def _set_size(self, newsize):
- if not self.exists and not self.raw_device.exists:
- self.raw_device.size = newsize + crypto.LUKS_METADATA_SIZE
+ if not self.exists and not self.slave.exists:
+ self.slave.size = newsize + crypto.LUKS_METADATA_SIZE
# just run the StorageDevice._set_size to make sure we are in the format limits
super(LUKSDevice, self)._set_size(newsize - crypto.LUKS_METADATA_SIZE)
@@ -108,22 +112,22 @@ class LUKSDevice(DMCryptDevice):
raise ValueError("size is smaller than the minimum for this device")
# don't allow larger luks than size (or target size) of backing device
- if newsize > (self.raw_device.size - crypto.LUKS_METADATA_SIZE):
+ if newsize > (self.slave.size - crypto.LUKS_METADATA_SIZE):
log.error("requested size %s is larger than size of the backing device %s",
- newsize, self.raw_device.size)
+ newsize, self.slave.size)
raise ValueError("size is larger than the size of the backing device")
if self.align_target_size(newsize) != newsize:
raise ValueError("new size would violate alignment requirements")
def _get_target_size(self):
- return self.raw_device.format.target_size
+ return self.slave.format.target_size
@property
def max_size(self):
""" The maximum size this luks device can be. Maximum is based on the
maximum size of the backing device. """
- max_luks = self.raw_device.max_size - crypto.LUKS_METADATA_SIZE
+ max_luks = self.slave.max_size - crypto.LUKS_METADATA_SIZE
max_format = self.format.max_size
return min(max_luks, max_format) if max_format else max_luks
@@ -131,7 +135,7 @@ class LUKSDevice(DMCryptDevice):
def resizable(self):
""" Can this device be resized? """
return (self._resizable and self.exists and self.format.resizable and
- self.raw_device.resizable and not self._has_integrity)
+ self.slave.resizable and not self._has_integrity)
def resize(self):
# size of LUKSDevice depends on size of the LUKS format on backing
@@ -139,7 +143,7 @@ class LUKSDevice(DMCryptDevice):
log_method_call(self, self.name, status=self.status)
def _post_create(self):
- self.name = self.raw_device.format.map_name
+ self.name = self.slave.format.map_name
StorageDevice._post_create(self)
def _post_teardown(self, recursive=False):
@@ -162,10 +166,10 @@ class LUKSDevice(DMCryptDevice):
self.name = new_name
def dracut_setup_args(self):
- return set(["rd.luks.uuid=luks-%s" % self.raw_device.format.uuid])
+ return set(["rd.luks.uuid=luks-%s" % self.slave.format.uuid])
def populate_ksdata(self, data):
- self.raw_device.populate_ksdata(data)
+ self.slave.populate_ksdata(data)
data.encrypted = True
super(LUKSDevice, self).populate_ksdata(data)
diff --git a/blivet/errors.py b/blivet/errors.py
index fd51283f..f6bf853a 100644
--- a/blivet/errors.py
+++ b/blivet/errors.py
@@ -192,7 +192,7 @@ class DeviceTreeError(StorageError):
pass
-class NoParentsError(DeviceTreeError):
+class NoSlavesError(DeviceTreeError):
pass
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
index 53f9cc3f..ca0a55d1 100644
--- a/blivet/partitioning.py
+++ b/blivet/partitioning.py
@@ -32,7 +32,7 @@ import _ped
from .errors import DeviceError, PartitioningError, AlignmentError
from .flags import flags
-from .devices import Device, PartitionDevice, device_path_to_name
+from .devices import Device, PartitionDevice, LUKSDevice, device_path_to_name
from .size import Size
from .i18n import _
from .util import stringize, unicodeize, compare
@@ -1632,7 +1632,15 @@ class TotalSizeSet(object):
:param size: the target combined size
:type size: :class:`~.size.Size`
"""
- self.devices = [d.raw_device for d in devices]
+ self.devices = []
+ for device in devices:
+ if isinstance(device, LUKSDevice):
+ partition = device.slave
+ else:
+ partition = device
+
+ self.devices.append(partition)
+
self.size = size
self.requests = []
@@ -1670,7 +1678,15 @@ class SameSizeSet(object):
:keyword max_size: the maximum size for growable devices
:type max_size: :class:`~.size.Size`
"""
- self.devices = [d.raw_device for d in devices]
+ self.devices = []
+ for device in devices:
+ if isinstance(device, LUKSDevice):
+ partition = device.slave
+ else:
+ partition = device
+
+ self.devices.append(partition)
+
self.size = size / len(devices)
self.grow = grow
self.max_size = max_size
diff --git a/blivet/populator/helpers/dm.py b/blivet/populator/helpers/dm.py
index 30e99aa1..770736b0 100644
--- a/blivet/populator/helpers/dm.py
+++ b/blivet/populator/helpers/dm.py
@@ -46,13 +46,13 @@ class DMDevicePopulator(DevicePopulator):
name = udev.device_get_name(self.data)
log_method_call(self, name=name)
sysfs_path = udev.device_get_sysfs_path(self.data)
- parent_devices = self._devicetree._add_parent_devices(self.data)
+ slave_devices = self._devicetree._add_slave_devices(self.data)
device = self._devicetree.get_device_by_name(name)
if device is None:
device = DMDevice(name, dm_uuid=self.data.get('DM_UUID'),
sysfs_path=sysfs_path, exists=True,
- parents=[parent_devices[0]])
+ parents=[slave_devices[0]])
device.protected = True
device.controllable = False
self._devicetree._add_device(device)
diff --git a/blivet/populator/helpers/luks.py b/blivet/populator/helpers/luks.py
index 52795a98..51488691 100644
--- a/blivet/populator/helpers/luks.py
+++ b/blivet/populator/helpers/luks.py
@@ -43,7 +43,7 @@ class LUKSDevicePopulator(DevicePopulator):
return udev.device_is_dm_luks(data)
def run(self):
- parents = self._devicetree._add_parent_devices(self.data)
+ parents = self._devicetree._add_slave_devices(self.data)
device = LUKSDevice(udev.device_get_name(self.data),
sysfs_path=udev.device_get_sysfs_path(self.data),
parents=parents,
@@ -58,7 +58,7 @@ class IntegrityDevicePopulator(DevicePopulator):
return udev.device_is_dm_integrity(data)
def run(self):
- parents = self._devicetree._add_parent_devices(self.data)
+ parents = self._devicetree._add_slave_devices(self.data)
device = IntegrityDevice(udev.device_get_name(self.data),
sysfs_path=udev.device_get_sysfs_path(self.data),
parents=parents,
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index c7adfa4e..b1626306 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -57,7 +57,7 @@ class LVMDevicePopulator(DevicePopulator):
log.warning("found non-vg device with name %s", vg_name)
device = None
- self._devicetree._add_parent_devices(self.data)
+ self._devicetree._add_slave_devices(self.data)
# LVM provides no means to resolve conflicts caused by duplicated VG
# names, so we're just being optimistic here. Woo!
diff --git a/blivet/populator/helpers/mdraid.py b/blivet/populator/helpers/mdraid.py
index 3479e3f7..76aebf25 100644
--- a/blivet/populator/helpers/mdraid.py
+++ b/blivet/populator/helpers/mdraid.py
@@ -31,7 +31,7 @@ from ... import udev
from ...devicelibs import raid
from ...devices import MDRaidArrayDevice, MDContainerDevice
from ...devices import device_path_to_name
-from ...errors import DeviceError, NoParentsError
+from ...errors import DeviceError, NoSlavesError
from ...flags import flags
from ...storage_log import log_method_call
from .devicepopulator import DevicePopulator
@@ -52,12 +52,12 @@ class MDDevicePopulator(DevicePopulator):
log_method_call(self, name=name)
try:
- self._devicetree._add_parent_devices(self.data)
- except NoParentsError:
- log.error("no parents found for mdarray %s, skipping", name)
+ self._devicetree._add_slave_devices(self.data)
+ except NoSlavesError:
+ log.error("no slaves found for mdarray %s, skipping", name)
return None
- # try to get the device again now that we've got all the parents
+ # try to get the device again now that we've got all the slaves
device = self._devicetree.get_device_by_name(name, incomplete=flags.allow_imperfect_devices)
if device is None:
@@ -74,8 +74,8 @@ class MDDevicePopulator(DevicePopulator):
device.name = name
if device is None:
- # if we get here, we found all of the parent devices and
- # something must be wrong -- if all of the parents are in
+ # if we get here, we found all of the slave devices and
+ # something must be wrong -- if all of the slaves are in
# the tree, this device should be as well
if name is None:
name = udev.device_get_name(self.data)
diff --git a/blivet/populator/helpers/multipath.py b/blivet/populator/helpers/multipath.py
index 96c0a9ad..10c745bf 100644
--- a/blivet/populator/helpers/multipath.py
+++ b/blivet/populator/helpers/multipath.py
@@ -40,13 +40,13 @@ class MultipathDevicePopulator(DevicePopulator):
name = udev.device_get_name(self.data)
log_method_call(self, name=name)
- parent_devices = self._devicetree._add_parent_devices(self.data)
+ slave_devices = self._devicetree._add_slave_devices(self.data)
device = None
- if parent_devices:
- device = MultipathDevice(name, parents=parent_devices,
+ if slave_devices:
+ device = MultipathDevice(name, parents=slave_devices,
sysfs_path=udev.device_get_sysfs_path(self.data),
- wwn=parent_devices[0].wwn)
+ wwn=slave_devices[0].wwn)
self._devicetree._add_device(device)
return device
diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py
index 75bb1741..d252281d 100644
--- a/blivet/populator/populator.py
+++ b/blivet/populator/populator.py
@@ -31,7 +31,7 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import BlockDev as blockdev
-from ..errors import DeviceError, DeviceTreeError, NoParentsError
+from ..errors import DeviceError, DeviceTreeError, NoSlavesError
from ..devices import DMLinearDevice, DMRaidArrayDevice
from ..devices import FileDevice, LoopDevice
from ..devices import MDRaidArrayDevice
@@ -92,55 +92,56 @@ class PopulatorMixin(object):
self._cleanup = False
- def _add_parent_devices(self, info):
- """ Add all parents of a device, raising DeviceTreeError on failure.
+ def _add_slave_devices(self, info):
+ """ Add all slaves of a device, raising DeviceTreeError on failure.
:param :class:`pyudev.Device` info: the device's udev info
- :raises: :class:`~.errors.DeviceTreeError if no parents are found or
- if we fail to add any parent
- :returns: a list of parent devices
+ :raises: :class:`~.errors.DeviceTreeError if no slaves are found or
+ if we fail to add any slave
+ :returns: a list of slave devices
:rtype: list of :class:`~.StorageDevice`
"""
name = udev.device_get_name(info)
sysfs_path = udev.device_get_sysfs_path(info)
- parent_dir = os.path.normpath("%s/slaves" % sysfs_path)
- parent_names = os.listdir(parent_dir)
- parent_devices = []
- if not parent_names:
- log.error("no parents found for %s", name)
- raise NoParentsError("no parents found for device %s" % name)
-
- for parent_name in parent_names:
- path = os.path.normpath("%s/%s" % (parent_dir, parent_name))
- parent_info = udev.get_device(os.path.realpath(path))
-
- if not parent_info:
- msg = "unable to get udev info for %s" % parent_name
+ slave_dir = os.path.normpath("%s/slaves" % sysfs_path)
+ slave_names = os.listdir(slave_dir)
+ slave_devices = []
+ if not slave_names:
+ log.error("no slaves found for %s", name)
+ raise NoSlavesError("no slaves found for device %s" % name)
+
+ for slave_name in slave_names:
+ path = os.path.normpath("%s/%s" % (slave_dir, slave_name))
+ slave_info = udev.get_device(os.path.realpath(path))
+
+ if not slave_info:
+ msg = "unable to get udev info for %s" % slave_name
raise DeviceTreeError(msg)
# cciss in sysfs is "cciss!cXdYpZ" but we need "cciss/cXdYpZ"
- parent_name = udev.device_get_name(parent_info).replace("!", "/")
-
- parent_dev = self.get_device_by_name(parent_name)
- if not parent_dev and parent_info:
- # we haven't scanned the parent yet, so do it now
- self.handle_device(parent_info)
- parent_dev = self.get_device_by_name(parent_name)
- if parent_dev is None:
+ slave_name = udev.device_get_name(slave_info).replace("!", "/")
+
+ slave_dev = self.get_device_by_name(slave_name)
+ if not slave_dev and slave_info:
+ # we haven't scanned the slave yet, so do it now
+ self.handle_device(slave_info)
+ slave_dev = self.get_device_by_name(slave_name)
+ if slave_dev is None:
if udev.device_is_dm_lvm(info):
- if parent_name not in lvs_info.cache:
+ if slave_name not in lvs_info.cache:
# we do not expect hidden lvs to be in the tree
continue
- # if the current parent is still not in
+ # if the current slave is still not in
# the tree, something has gone wrong
- log.error("failure scanning device %s: could not add parent %s", name, parent_name)
- msg = "failed to add parent %s of device %s" % (parent_name, name)
+ log.error("failure scanning device %s: could not add slave %s", name, slave_name)
+ msg = "failed to add slave %s of device %s" % (slave_name,
+ name)
raise DeviceTreeError(msg)
- parent_devices.append(parent_dev)
+ slave_devices.append(slave_dev)
- return parent_devices
+ return slave_devices
def _add_name(self, name):
if name not in self.names:
@@ -317,7 +318,7 @@ class PopulatorMixin(object):
continue
# Make sure lvm doesn't get confused by PVs that belong to
- # incomplete VGs. We will remove the PVs from the reject list when/if
+ # incomplete VGs. We will remove the PVs from the blacklist when/if
# the time comes to remove the incomplete VG and its PVs.
for pv in vg.pvs:
lvm.lvm_cc_addFilterRejectRegexp(pv.name)
diff --git a/blivet/threads.py b/blivet/threads.py
index 7e6d3105..a70deb69 100644
--- a/blivet/threads.py
+++ b/blivet/threads.py
@@ -63,11 +63,12 @@ class SynchronizedMeta(type):
"""
def __new__(cls, name, bases, dct):
new_dct = {}
+ blacklist = dct.get('_unsynchronized_methods', [])
for n in dct:
obj = dct[n]
# Do not decorate class or static methods.
- if n in dct.get('_unsynchronized_methods', []):
+ if n in blacklist:
pass
elif isinstance(obj, FunctionType):
obj = exclusive(obj)
diff --git a/blivet/udev.py b/blivet/udev.py
index a8297f3f..e1b67845 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -39,7 +39,7 @@ from gi.repository import BlockDev as blockdev
global_udev = pyudev.Context()
log = logging.getLogger("blivet")
-ignored_device_names = []
+device_name_blacklist = []
""" device name regexes to ignore; this should be empty by default """
@@ -77,7 +77,7 @@ def get_devices(subsystem="block"):
result = []
for device in global_udev.list_devices(subsystem=subsystem):
- if not __is_ignored_blockdev(device.sys_name):
+ if not __is_blacklisted_blockdev(device.sys_name):
dev = device_to_dict(device)
result.append(dev)
@@ -176,13 +176,13 @@ def resolve_glob(glob):
return ret
-def __is_ignored_blockdev(dev_name):
+def __is_blacklisted_blockdev(dev_name):
"""Is this a blockdev we never want for an install?"""
if dev_name.startswith("ram") or dev_name.startswith("fd"):
return True
- if ignored_device_names:
- if any(re.search(expr, dev_name) for expr in ignored_device_names):
+ if device_name_blacklist:
+ if any(re.search(expr, dev_name) for expr in device_name_blacklist):
return True
dev_path = "/sys/class/block/%s" % dev_name
@@ -374,7 +374,7 @@ def device_is_disk(info):
device_is_dm_lvm(info) or
device_is_dm_crypt(info) or
(device_is_md(info) and
- (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_parents(info))))))
+ (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_slaves(info))))))
def device_is_partition(info):
@@ -453,18 +453,18 @@ def device_get_devname(info):
return info.get('DEVNAME')
-def device_get_parents(info):
- """ Return a list of udev device objects representing this device's parents. """
- parents_dir = device_get_sysfs_path(info) + "/slaves/"
+def device_get_slaves(info):
+ """ Return a list of udev device objects representing this device's slaves. """
+ slaves_dir = device_get_sysfs_path(info) + "/slaves/"
names = list()
- if os.path.isdir(parents_dir):
- names = os.listdir(parents_dir)
+ if os.path.isdir(slaves_dir):
+ names = os.listdir(slaves_dir)
- parents = list()
+ slaves = list()
for name in names:
- parents.append(get_device(device_node="/dev/" + name))
+ slaves.append(get_device(device_node="/dev/" + name))
- return parents
+ return slaves
def device_get_holders(info):
@@ -736,7 +736,7 @@ def device_get_partition_disk(info):
disk = None
majorminor = info.get("ID_PART_ENTRY_DISK")
sysfs_path = device_get_sysfs_path(info)
- parents_dir = "%s/slaves" % sysfs_path
+ slaves_dir = "%s/slaves" % sysfs_path
if majorminor:
major, minor = majorminor.split(":")
for device in get_devices():
@@ -744,8 +744,8 @@ def device_get_partition_disk(info):
disk = device_get_name(device)
break
elif device_is_dm_partition(info):
- if os.path.isdir(parents_dir):
- parents = os.listdir(parents_dir)
+ if os.path.isdir(slaves_dir):
+ parents = os.listdir(slaves_dir)
if len(parents) == 1:
disk = resolve_devspec(parents[0].replace('!', '/'))
else:
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
index dc0d6408..dfd78a7a 100644
--- a/tests/devicefactory_test.py
+++ b/tests/devicefactory_test.py
@@ -112,9 +112,9 @@ class DeviceFactoryTestCase(unittest.TestCase):
kwargs.get("encrypted", False) or
kwargs.get("container_encrypted", False))
if kwargs.get("encrypted", False):
- self.assertEqual(device.parents[0].format.luks_version,
+ self.assertEqual(device.slave.format.luks_version,
kwargs.get("luks_version", crypto.DEFAULT_LUKS_VERSION))
- self.assertEqual(device.raw_device.format.luks_sector_size,
+ self.assertEqual(device.slave.format.luks_sector_size,
kwargs.get("luks_sector_size", 0))
self.assertTrue(set(device.disks).issubset(kwargs["disks"]))
@@ -354,7 +354,7 @@ class LVMFactoryTestCase(DeviceFactoryTestCase):
device = args[0]
if kwargs.get("encrypted"):
- container = device.parents[0].container
+ container = device.slave.container
else:
container = device.container
@@ -373,7 +373,7 @@ class LVMFactoryTestCase(DeviceFactoryTestCase):
self.assertIsInstance(pv, member_class)
if pv.encrypted:
- self.assertEqual(pv.parents[0].format.luks_version,
+ self.assertEqual(pv.slave.format.luks_version,
kwargs.get("luks_version", crypto.DEFAULT_LUKS_VERSION))
@patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
@@ -589,7 +589,7 @@ class LVMThinPFactoryTestCase(LVMFactoryTestCase):
device = args[0]
if kwargs.get("encrypted", False):
- thinlv = device.parents[0]
+ thinlv = device.slave
else:
thinlv = device
diff --git a/tests/devices_test/size_test.py b/tests/devices_test/size_test.py
index d0c0a3f4..a1efa86d 100644
--- a/tests/devices_test/size_test.py
+++ b/tests/devices_test/size_test.py
@@ -107,8 +107,8 @@ class LUKSDeviceSizeTest(StorageDeviceSizeTest):
def _get_device(self, *args, **kwargs):
exists = kwargs.get("exists", False)
- parent = StorageDevice(*args, size=kwargs["size"] + crypto.LUKS_METADATA_SIZE, exists=exists)
- return LUKSDevice(*args, **kwargs, parents=[parent])
+ slave = StorageDevice(*args, size=kwargs["size"] + crypto.LUKS_METADATA_SIZE, exists=exists)
+ return LUKSDevice(*args, **kwargs, parents=[slave])
def test_size_getter(self):
initial_size = Size("10 GiB")
@@ -116,4 +116,4 @@ class LUKSDeviceSizeTest(StorageDeviceSizeTest):
# for LUKS size depends on the backing device size
self.assertEqual(dev.size, initial_size)
- self.assertEqual(dev.raw_device.size, initial_size + crypto.LUKS_METADATA_SIZE)
+ self.assertEqual(dev.slave.size, initial_size + crypto.LUKS_METADATA_SIZE)
diff --git a/tests/populator_test.py b/tests/populator_test.py
index a7748a9d..531ec74b 100644
--- a/tests/populator_test.py
+++ b/tests/populator_test.py
@@ -81,7 +81,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
@patch.object(DeviceTree, "get_device_by_name")
@patch.object(DMDevice, "status", return_value=True)
@patch.object(DMDevice, "update_sysfs_path")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_sysfs_path", return_value=sentinel.sysfs_path)
def test_run(self, *args):
@@ -90,7 +90,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
- # The general case for dm devices is that adding the parent devices
+ # The general case for dm devices is that adding the slave/parent devices
# will result in the dm device itself being in the tree.
device = Mock()
devicetree.get_device_by_name.return_value = device
@@ -99,7 +99,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
parent = Mock()
parent.parents = []
- devicetree._add_parent_devices.return_value = [parent]
+ devicetree._add_slave_devices.return_value = [parent]
devicetree._add_device(parent)
devicetree.get_device_by_name.return_value = None
device_name = "dmdevice"
@@ -228,7 +228,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch.object(DeviceTree, "get_device_by_name")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_lv_vg_name")
def test_run(self, *args):
@@ -240,7 +240,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
data = Mock()
- # Add parent devices and then look up the device.
+ # Add slave/parent devices and then look up the device.
device_get_name.return_value = sentinel.lv_name
devicetree.get_device_by_name.return_value = None
@@ -260,7 +260,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
call(sentinel.vg_name),
call(sentinel.lv_name)])
- # Add parent devices, but the device is still not in the tree
+ # Add slave/parent devices, but the device is still not in the tree
get_device_by_name.side_effect = None
get_device_by_name.return_value = None
self.assertEqual(helper.run(), None)
@@ -625,7 +625,7 @@ class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch.object(DeviceTree, "get_device_by_name")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_md_uuid")
@patch("blivet.udev.device_get_md_name")
@@ -636,7 +636,7 @@ class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
- # base case: _add_parent_devices gets the array into the tree
+ # base case: _add_slave_devices gets the array into the tree
data = Mock()
device = Mock()
device.parents = []
@@ -699,12 +699,12 @@ class MultipathDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch("blivet.udev.device_get_sysfs_path")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
def test_run(self, *args):
"""Test multipath device populator."""
device_get_name = args[0]
- add_parent_devices = args[1]
+ add_slave_devices = args[1]
devicetree = DeviceTree()
# set up some fake udev data to verify handling of specific entries
@@ -719,13 +719,13 @@ class MultipathDevicePopulatorTestCase(PopulatorHelperTestCase):
device_name = "mpathtest"
device_get_name.return_value = device_name
- parent_1 = Mock(tags=set(), wwn=wwn[2:])
- parent_1.parents = []
- parent_2 = Mock(tags=set(), wwn=wwn[2:])
- parent_2.parents = []
- devicetree._add_device(parent_1)
- devicetree._add_device(parent_2)
- add_parent_devices.return_value = [parent_1, parent_2]
+ slave_1 = Mock(tags=set(), wwn=wwn[2:])
+ slave_1.parents = []
+ slave_2 = Mock(tags=set(), wwn=wwn[2:])
+ slave_2.parents = []
+ devicetree._add_device(slave_1)
+ devicetree._add_device(slave_2)
+ add_slave_devices.return_value = [slave_1, slave_2]
helper = self.helper_class(devicetree, data)
diff --git a/tests/udev_test.py b/tests/udev_test.py
index f9b10620..d30a647b 100644
--- a/tests/udev_test.py
+++ b/tests/udev_test.py
@@ -45,11 +45,11 @@ class UdevTest(unittest.TestCase):
@mock.patch('blivet.udev.device_is_dm_crypt', return_value=False)
@mock.patch('blivet.udev.device_is_md')
@mock.patch('blivet.udev.device_get_md_container')
- @mock.patch('blivet.udev.device_get_parents')
+ @mock.patch('blivet.udev.device_get_slaves')
def test_udev_device_is_disk_md(self, *args):
import blivet.udev
info = dict(DEVTYPE='disk', SYS_PATH=mock.sentinel.md_path)
- (device_get_parents, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
+ (device_get_slaves, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
disk_parents = [dict(DEVTYPE="disk", SYS_PATH='/fake/path/2'),
dict(DEVTYPE="disk", SYS_PATH='/fake/path/3')]
@@ -64,20 +64,20 @@ class UdevTest(unittest.TestCase):
# Intel FW RAID (MD RAID w/ container layer)
# device_get_container will return some mock value which will evaluate to True
device_get_md_container.return_value = mock.sentinel.md_container
- device_get_parents.side_effect = lambda info: list()
+ device_get_slaves.side_effect = lambda info: list()
self.assertTrue(blivet.udev.device_is_disk(info))
# Normal MD RAID
- device_get_parents.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
device_get_md_container.return_value = None
self.assertFalse(blivet.udev.device_is_disk(info))
# Dell FW RAID (MD RAID whose members are all whole disks)
- device_get_parents.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertTrue(blivet.udev.device_is_disk(info))
# Normal MD RAID (w/ at least one non-disk member)
- device_get_parents.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertFalse(blivet.udev.device_is_disk(info))
diff --git a/tests/vmtests/vmbackedtestcase.py b/tests/vmtests/vmbackedtestcase.py
index 797bac85..6255104f 100644
--- a/tests/vmtests/vmbackedtestcase.py
+++ b/tests/vmtests/vmbackedtestcase.py
@@ -50,7 +50,7 @@ class VMBackedTestCase(unittest.TestCase):
defined in set_up_disks.
"""
- udev.ignored_device_names = [r'^zram']
+ udev.device_name_blacklist = [r'^zram']
#
# create disk images
--
2.31.1

View File

@ -0,0 +1,35 @@
From 8ece3da18b1abb89320d02f4475002e6a3ed7875 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 May 2021 13:40:26 +0200
Subject: [PATCH] Fix activating old style LVM snapshots
The old style snapshots are activated together with the origin LV
so we need to make sure it is activated to be able to remove the
snapshot or its format.
Resolves: rhbz#1961739
---
blivet/devices/lvm.py | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index a55515fcb..fb57804d9 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1461,9 +1461,13 @@ def _set_format(self, fmt): # pylint: disable=unused-argument
self._update_format_from_origin()
@old_snapshot_specific
- def setup(self, orig=False):
- # the old snapshot cannot be setup and torn down
- pass
+ def setup(self, orig=False): # pylint: disable=unused-argument
+ # the old snapshot is activated together with the origin
+ if self.origin and not self.origin.status:
+ try:
+ self.origin.setup()
+ except blockdev.LVMError as lvmerr:
+ log.error("failed to activate origin LV: %s", lvmerr)
@old_snapshot_specific
def teardown(self, recursive=False):

View File

@ -0,0 +1,75 @@
From 344e624f91010b6041c22ee8a24c9305b82af969 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 May 2021 12:54:02 +0200
Subject: [PATCH] Fix resolving devices with names that look like BIOS drive
number
A RAID array named "10" will not be resolved because we try to
resolve it using EDD data and after this lookup fails, we don't
try the name.
Resolves: rhbz#1960798
---
blivet/devicetree.py | 18 +++++++++---------
tests/devicetree_test.py | 4 ++++
2 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index 88e9f0e5..f4ae1968 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -634,20 +634,20 @@ class DeviceTreeBase(object):
(label.startswith("'") and label.endswith("'"))):
label = label[1:-1]
device = self.labels.get(label)
- elif re.match(r'(0x)?[A-Fa-f0-9]{2}(p\d+)?$', devspec):
- # BIOS drive number
- (drive, _p, partnum) = devspec.partition("p")
- spec = int(drive, 16)
- for (edd_name, edd_number) in self.edd_dict.items():
- if edd_number == spec:
- device = self.get_device_by_name(edd_name + partnum)
- break
elif options and "nodev" in options.split(","):
device = self.get_device_by_name(devspec)
if not device:
device = self.get_device_by_path(devspec)
else:
- if not devspec.startswith("/dev/"):
+ if re.match(r'(0x)?[A-Fa-f0-9]{2}(p\d+)?$', devspec):
+ # BIOS drive number
+ (drive, _p, partnum) = devspec.partition("p")
+ spec = int(drive, 16)
+ for (edd_name, edd_number) in self.edd_dict.items():
+ if edd_number == spec:
+ device = self.get_device_by_name(edd_name + partnum)
+ break
+ if not device and not devspec.startswith("/dev/"):
device = self.get_device_by_name(devspec)
if not device:
devspec = "/dev/" + devspec
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index 11f8469d..b033343d 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -49,6 +49,9 @@ class DeviceTreeTestCase(unittest.TestCase):
dev3 = StorageDevice("sdp2", exists=True)
dt._add_device(dev3)
+ dev4 = StorageDevice("10", exists=True)
+ dt._add_device(dev4)
+
dt.edd_dict.update({"dev1": 0x81,
"dev2": 0x82})
@@ -62,6 +65,7 @@ class DeviceTreeTestCase(unittest.TestCase):
self.assertEqual(dt.resolve_device("0x82"), dev2)
self.assertEqual(dt.resolve_device(dev3.name), dev3)
+ self.assertEqual(dt.resolve_device(dev4.name), dev4)
def test_device_name(self):
# check that devicetree.names property contains all device's names
--
2.31.1

View File

@ -0,0 +1,151 @@
From dc1e2fe7783748528cac2f7aa516c89d1959b052 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 29 Jul 2021 14:44:22 +0200
Subject: [PATCH] Do not set chunk size for RAID 1
Setting chunk size for RAID 1 doesn't make sense and latest
mdadm started returning error instead of ignoring the --chunk
option when creating an array.
Resolves: rhbz#1987170
---
blivet/devicelibs/raid.py | 12 ++++++++++
blivet/devices/md.py | 15 ++++++++++---
tests/devices_test/md_test.py | 41 +++++++++++++++++++++++++++++++++--
3 files changed, 63 insertions(+), 5 deletions(-)
diff --git a/blivet/devicelibs/raid.py b/blivet/devicelibs/raid.py
index 19c3fae98..a9e241c7a 100644
--- a/blivet/devicelibs/raid.py
+++ b/blivet/devicelibs/raid.py
@@ -462,6 +462,18 @@ def _pad(self, size, chunk_size):
def _get_recommended_stride(self, member_count):
return None
+ def get_size(self, member_sizes, num_members=None, chunk_size=None, superblock_size_func=None):
+ if not member_sizes:
+ return Size(0)
+
+ if num_members is None:
+ num_members = len(member_sizes)
+
+ min_size = min(member_sizes)
+ superblock_size = superblock_size_func(min_size)
+ min_data_size = self._trim(min_size - superblock_size, chunk_size)
+ return self.get_net_array_size(num_members, min_data_size)
+
RAID1 = RAID1()
ALL_LEVELS.add_raid_level(RAID1)
diff --git a/blivet/devices/md.py b/blivet/devices/md.py
index 69eee93a5..d1a2faf1f 100644
--- a/blivet/devices/md.py
+++ b/blivet/devices/md.py
@@ -138,7 +138,7 @@ def __init__(self, name, level=None, major=None, minor=None, size=None,
if self.exists:
self._chunk_size = self.read_chunk_size()
else:
- self._chunk_size = chunk_size or mdraid.MD_CHUNK_SIZE
+ self.chunk_size = chunk_size or Size(0)
if not self.exists and not isinstance(metadata_version, str):
self.metadata_version = "default"
@@ -208,8 +208,14 @@ def sector_size(self):
@property
def chunk_size(self):
- if self.exists and self._chunk_size == Size(0):
- self._chunk_size = self.read_chunk_size()
+ if self._chunk_size == Size(0):
+ if self.exists:
+ return self.read_chunk_size()
+ else:
+ if self.level == raid.RAID1:
+ return self._chunk_size
+ else:
+ return mdraid.MD_CHUNK_SIZE
return self._chunk_size
@chunk_size.setter
@@ -223,6 +229,9 @@ def chunk_size(self, newsize):
if self.exists:
raise ValueError("cannot set chunk size for an existing device")
+ if self.level == raid.RAID1 and newsize != Size(0):
+ raise ValueError("specifying chunk size is not allowed for raid1")
+
self._chunk_size = newsize
def read_chunk_size(self):
diff --git a/tests/devices_test/md_test.py b/tests/devices_test/md_test.py
index 46df76d3d..47a0fa0cc 100644
--- a/tests/devices_test/md_test.py
+++ b/tests/devices_test/md_test.py
@@ -1,6 +1,11 @@
import six
import unittest
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
import blivet
from blivet.devices import StorageDevice
@@ -27,9 +32,27 @@ def test_chunk_size1(self):
raid_array = MDRaidArrayDevice(name="raid", level="raid0", member_devices=2,
total_devices=2, parents=[member1, member2])
- # no chunk_size specified -- default value
+ # no chunk_size specified and RAID0 -- default value
self.assertEqual(raid_array.chunk_size, mdraid.MD_CHUNK_SIZE)
+ with patch("blivet.devices.md.blockdev.md.create") as md_create:
+ raid_array._create()
+ md_create.assert_called_with("/dev/md/raid", "raid0", ["/dev/member1", "/dev/member2"],
+ 0, version="default", bitmap=False,
+ chunk_size=mdraid.MD_CHUNK_SIZE)
+
+ raid_array = MDRaidArrayDevice(name="raid", level="raid1", member_devices=2,
+ total_devices=2, parents=[member1, member2])
+
+ # no chunk_size specified and RAID1 -- no chunk size set (0)
+ self.assertEqual(raid_array.chunk_size, Size(0))
+
+ with patch("blivet.devices.md.blockdev.md.create") as md_create:
+ raid_array._create()
+ md_create.assert_called_with("/dev/md/raid", "raid1", ["/dev/member1", "/dev/member2"],
+ 0, version="default", bitmap=True,
+ chunk_size=0)
+
def test_chunk_size2(self):
member1 = StorageDevice("member1", fmt=blivet.formats.get_format("mdmember"),
@@ -40,11 +63,25 @@ def test_chunk_size2(self):
raid_array = MDRaidArrayDevice(name="raid", level="raid0", member_devices=2,
total_devices=2, parents=[member1, member2],
chunk_size=Size("1024 KiB"))
-
self.assertEqual(raid_array.chunk_size, Size("1024 KiB"))
+ # for raid0 setting chunk_size = 0 means "default"
+ raid_array.chunk_size = Size(0)
+ self.assertEqual(raid_array.chunk_size, mdraid.MD_CHUNK_SIZE)
+
with six.assertRaisesRegex(self, ValueError, "new chunk size must be of type Size"):
raid_array.chunk_size = 1
with six.assertRaisesRegex(self, ValueError, "new chunk size must be multiple of 4 KiB"):
raid_array.chunk_size = Size("5 KiB")
+
+ with six.assertRaisesRegex(self, ValueError, "specifying chunk size is not allowed for raid1"):
+ MDRaidArrayDevice(name="raid", level="raid1", member_devices=2,
+ total_devices=2, parents=[member1, member2],
+ chunk_size=Size("1024 KiB"))
+
+ raid_array = MDRaidArrayDevice(name="raid", level="raid1", member_devices=2,
+ total_devices=2, parents=[member1, member2])
+
+ with six.assertRaisesRegex(self, ValueError, "specifying chunk size is not allowed for raid1"):
+ raid_array.chunk_size = Size("512 KiB")

View File

@ -0,0 +1,240 @@
From 3b9a781e138830a190d16c8dd970b800a086de46 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 4 Aug 2021 13:00:53 +0200
Subject: [PATCH 1/3] edd_test: Locate the edd_data based on the test file
location
We can't use the blivet.edd module location when running tests
against installed version of blivet.
---
tests/devicelibs_test/edd_test.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/tests/devicelibs_test/edd_test.py b/tests/devicelibs_test/edd_test.py
index 23d736f4..7ec8d1e6 100644
--- a/tests/devicelibs_test/edd_test.py
+++ b/tests/devicelibs_test/edd_test.py
@@ -1,7 +1,6 @@
import unittest
import mock
import os
-import inspect
import logging
import copy
@@ -110,9 +109,9 @@ class EddTestCase(unittest.TestCase):
name = name[:-1]
if name.startswith("/"):
name = name[1:]
- dirname = os.path.dirname(inspect.getfile(edd))
+ dirname = os.path.abspath(os.path.dirname(__file__))
return os.path.join(dirname,
- "../../tests/devicelibs_test/edd_data/",
+ "edd_data/",
name)
def edd_debug(self, *args):
--
2.31.1
From 7ad3824fceb98e2741820b76a9cfea5add338343 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 4 Aug 2021 13:02:08 +0200
Subject: [PATCH 2/3] tests: Allow running tests without the tests directory in
PYTHONPATH
When running the tests against installed version of blivet, the
"tests" directory is not in PYTHONPATH so we need to import all
helper modules using relative path.
---
tests/action_test.py | 2 +-
tests/devicelibs_test/edd_test.py | 2 +-
tests/{ => devicelibs_test}/lib.py | 0
tests/formats_test/fs_test.py | 2 +-
tests/formats_test/fslabeling.py | 2 +-
tests/formats_test/fstesting.py | 2 +-
tests/formats_test/fsuuid.py | 2 +-
tests/formats_test/labeling_test.py | 2 +-
tests/{ => formats_test}/loopbackedtestcase.py | 0
tests/formats_test/luks_test.py | 2 +-
tests/formats_test/lvmpv_test.py | 2 +-
tests/partitioning_test.py | 2 +-
12 files changed, 10 insertions(+), 10 deletions(-)
rename tests/{ => devicelibs_test}/lib.py (100%)
rename tests/{ => formats_test}/loopbackedtestcase.py (100%)
diff --git a/tests/action_test.py b/tests/action_test.py
index 38a2e872..1e84c20b 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -6,7 +6,7 @@ if PY3:
else:
from mock import Mock
-from tests.storagetestcase import StorageTestCase
+from storagetestcase import StorageTestCase
import blivet
from blivet.formats import get_format
from blivet.size import Size
diff --git a/tests/devicelibs_test/edd_test.py b/tests/devicelibs_test/edd_test.py
index 7ec8d1e6..379c7aeb 100644
--- a/tests/devicelibs_test/edd_test.py
+++ b/tests/devicelibs_test/edd_test.py
@@ -6,7 +6,7 @@ import copy
from blivet import arch
from blivet.devicelibs import edd
-from tests import lib
+from . import lib
class FakeDevice(object):
diff --git a/tests/lib.py b/tests/devicelibs_test/lib.py
similarity index 100%
rename from tests/lib.py
rename to tests/devicelibs_test/lib.py
diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py
index ab3499a7..bd643370 100644
--- a/tests/formats_test/fs_test.py
+++ b/tests/formats_test/fs_test.py
@@ -10,7 +10,7 @@ from blivet.errors import DeviceFormatError
from blivet.formats import get_format
from blivet.devices import PartitionDevice, DiskDevice
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from . import fstesting
diff --git a/tests/formats_test/fslabeling.py b/tests/formats_test/fslabeling.py
index fbb28eee..0e0dc261 100644
--- a/tests/formats_test/fslabeling.py
+++ b/tests/formats_test/fslabeling.py
@@ -2,7 +2,7 @@
import abc
import six
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from blivet.errors import FSError, FSReadLabelError
from blivet.size import Size
diff --git a/tests/formats_test/fstesting.py b/tests/formats_test/fstesting.py
index 86b2a116..e34584d8 100644
--- a/tests/formats_test/fstesting.py
+++ b/tests/formats_test/fstesting.py
@@ -5,7 +5,7 @@ from six import add_metaclass
import os
import tempfile
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from blivet.errors import FSError, FSResizeError, DeviceFormatError
from blivet.size import Size, ROUND_DOWN
from blivet.formats import fs
diff --git a/tests/formats_test/fsuuid.py b/tests/formats_test/fsuuid.py
index c8003945..16aa19a6 100644
--- a/tests/formats_test/fsuuid.py
+++ b/tests/formats_test/fsuuid.py
@@ -3,7 +3,7 @@ import abc
import six
from unittest import skipIf
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from blivet.errors import FSError, FSWriteUUIDError
from blivet.size import Size
from blivet.util import capture_output
diff --git a/tests/formats_test/labeling_test.py b/tests/formats_test/labeling_test.py
index e26cb7df..d24e6619 100644
--- a/tests/formats_test/labeling_test.py
+++ b/tests/formats_test/labeling_test.py
@@ -1,10 +1,10 @@
import unittest
-from tests import loopbackedtestcase
from blivet.formats import device_formats
import blivet.formats.fs as fs
import blivet.formats.swap as swap
+from . import loopbackedtestcase
from . import fslabeling
diff --git a/tests/loopbackedtestcase.py b/tests/formats_test/loopbackedtestcase.py
similarity index 100%
rename from tests/loopbackedtestcase.py
rename to tests/formats_test/loopbackedtestcase.py
diff --git a/tests/formats_test/luks_test.py b/tests/formats_test/luks_test.py
index be0d50b0..5423ebdf 100644
--- a/tests/formats_test/luks_test.py
+++ b/tests/formats_test/luks_test.py
@@ -7,7 +7,7 @@ from blivet.formats.luks import LUKS
from blivet.size import Size
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
class LUKSTestCase(loopbackedtestcase.LoopBackedTestCase):
diff --git a/tests/formats_test/lvmpv_test.py b/tests/formats_test/lvmpv_test.py
index 792a2f1d..da7270d9 100644
--- a/tests/formats_test/lvmpv_test.py
+++ b/tests/formats_test/lvmpv_test.py
@@ -4,7 +4,7 @@ from blivet.formats.lvmpv import LVMPhysicalVolume
from blivet.size import Size
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
class LVMPVTestCase(loopbackedtestcase.LoopBackedTestCase):
diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py
index b7aa5045..a713aaa1 100644
--- a/tests/partitioning_test.py
+++ b/tests/partitioning_test.py
@@ -29,7 +29,7 @@ from blivet.devices.lvm import LVMCacheRequest
from blivet.errors import PartitioningError
-from tests.imagebackedtestcase import ImageBackedTestCase
+from imagebackedtestcase import ImageBackedTestCase
from blivet.blivet import Blivet
from blivet.util import sparsetmpfile
from blivet.formats import get_format
--
2.31.1
From 9ee41c8b60c56ce752e305be73001c7089f43011 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 6 Aug 2021 14:51:01 +0200
Subject: [PATCH 3/3] tests: Print version and blivet location when running
tests
---
tests/run_tests.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 32e3f2d3..8ad8b61a 100644
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -32,6 +32,11 @@ if __name__ == '__main__':
testdir = os.path.abspath(os.path.dirname(__file__))
+ import blivet
+ print("Running tests with Blivet %s from %s" % (blivet.__version__,
+ os.path.abspath(os.path.dirname(blivet.__file__))),
+ file=sys.stderr)
+
if args.testname:
for n in args.testname:
suite.addTests(unittest.TestLoader().loadTestsFromName(n))
--
2.31.1

View File

@ -0,0 +1,65 @@
From 46335861073882b7162221fc0995dc1df3c67749 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 6 Aug 2021 16:37:51 +0200
Subject: [PATCH] Improve error message printed for missing dependecies
The existing error message can be confusing for people that don't
know internals of blivet and libblockdev and the information what
is actually broken or not installed on the system is missing
completely. Example for LVM VDO with missing kvdo module:
Before:
device type lvmvdopool requires unavailable_dependencies:
libblockdev lvm plugin (vdo technology)
After:
device type lvmvdopool requires unavailable_dependencies:
libblockdev lvm plugin (vdo technology):
libblockdev plugin lvm is loaded but some required technologies
are not available (BD_LVM_TECH_VDO: Kernel module 'kvdo' not
available)
---
blivet/deviceaction.py | 2 +-
blivet/tasks/availability.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index 56e29215..0458e4be 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -173,7 +173,7 @@ class DeviceAction(util.ObjectID):
def _check_device_dependencies(self):
unavailable_dependencies = self.device.unavailable_dependencies
if unavailable_dependencies:
- dependencies_str = ", ".join(str(d) for d in unavailable_dependencies)
+ dependencies_str = ", ".join("%s:\n%s" % (str(d), ", ".join(d.availability_errors)) for d in unavailable_dependencies)
raise DependencyError("device type %s requires unavailable_dependencies: %s" % (self.device.type, dependencies_str))
def apply(self):
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index 1fd80590..1537f3f5 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -224,7 +224,7 @@ class BlockDevMethod(Method):
try:
self._tech_info.check_fn(tech, mode)
except GLib.GError as e:
- errors.append(str(e))
+ errors.append("%s: %s" % (tech.value_name, e.message))
return errors
def availability_errors(self, resource):
@@ -242,7 +242,7 @@ class BlockDevMethod(Method):
tech_missing = self._check_technologies()
if tech_missing:
return ["libblockdev plugin %s is loaded but some required "
- "technologies are not available:\n%s" % (self._tech_info.plugin_name, tech_missing)]
+ "technologies are not available (%s)" % (self._tech_info.plugin_name, "; ".join(tech_missing))]
else:
return []
--
2.31.1

View File

@ -0,0 +1,90 @@
From 06cafbbbbff0aae3634eb2908d25d0dc46c2048b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 9 Nov 2021 15:52:48 +0100
Subject: [PATCH] Use bigger chunk size for thinpools bigger than ~15.88 TiB
With our default chunk size of 64 KiB we cannot create bigger
thin pools than 15.88 TiB. Unfortunately we need to specify chunk
size to be able to calculate thin metadata properly so we can't
simply leave this to LVM to determine the correct chunk size.
---
blivet/devicelibs/lvm.py | 11 +++++++++++
blivet/devices/lvm.py | 6 +++---
tests/devices_test/lvm_test.py | 11 +++++++++++
3 files changed, 25 insertions(+), 3 deletions(-)
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index d56a76ed..cb6f655e 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -20,6 +20,7 @@
# Author(s): Dave Lehman <dlehman@redhat.com>
#
+import math
import os
import re
@@ -51,6 +52,7 @@ LVM_THINP_MIN_METADATA_SIZE = Size("2 MiB")
LVM_THINP_MAX_METADATA_SIZE = Size("16 GiB")
LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
+LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B") # 15.88 TiB
raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
@@ -225,3 +227,12 @@ def is_lvm_name_valid(name):
return False
return True
+
+
+def recommend_thpool_chunk_size(thpool_size):
+ # calculation of the recommended chunk size by LVM is so complicated that we
+ # can't really replicate it, but we know that 64 KiB chunk size gives us
+ # upper limit of ~15.88 TiB so we will just add 64 KiB to the chunk size
+ # for every ~15.88 TiB of thinpool data size
+ return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
+ LVM_THINP_MAX_CHUNK_SIZE)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 51d785d9..c61eeb4b 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1634,9 +1634,9 @@ class LVMThinPoolMixin(object):
return
# we need to know chunk size to calculate recommended metadata size
- if self._chunk_size == 0:
- self._chunk_size = Size(blockdev.LVM_DEFAULT_CHUNK_SIZE)
- log.debug("Using default chunk size: %s", self._chunk_size)
+ if self._chunk_size == 0 or enforced:
+ self._chunk_size = lvm.recommend_thpool_chunk_size(self._size)
+ log.debug("Using recommended chunk size: %s", self._chunk_size)
old_md_size = self._metadata_size
old_pmspare_size = self.vg.pmspare_size
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 4156d0bf..336c5b99 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -442,6 +442,17 @@ class LVMDeviceTest(unittest.TestCase):
self.assertFalse(pool.exists)
self.assertTrue(lvm.lvremove.called)
+ def test_lvmthinpool_chunk_size(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("100 TiB"))
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv])
+ pool = LVMLogicalVolumeDevice("pool1", parents=[vg], size=Size("500 MiB"), seg_type="thin-pool")
+ self.assertEqual(pool.chunk_size, Size("64 KiB"))
+
+ pool.size = Size("16 TiB")
+ pool.autoset_md_size(enforced=True)
+ self.assertEqual(pool.chunk_size, Size("128 KiB"))
+
class TypeSpecificCallsTest(unittest.TestCase):
def test_type_specific_calls(self):
--
2.31.1

View File

@ -0,0 +1,53 @@
From b938e224c41021c19775d8675dc4337f1e10d4e3 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 1 Dec 2021 16:28:15 +0100
Subject: [PATCH] iscsi: Replace all log_exception_info calls with log.info
We don't get any useful information from the exception, it's
always the same traceback from a failed DBus call and we only use
these when a called failed because firmware ISCSI is not supported.
The resulting log message also looks like a failure with the
traceback logged and not just as a debug information.
Resolves: rhbz#2028134
---
blivet/iscsi.py | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
index 5ee2082b..bc77ca62 100644
--- a/blivet/iscsi.py
+++ b/blivet/iscsi.py
@@ -22,7 +22,6 @@ from . import udev
from . import util
from .flags import flags
from .i18n import _
-from .storage_log import log_exception_info
from . import safe_dbus
import os
import re
@@ -277,8 +276,8 @@ class iSCSI(object):
'org.freedesktop.DBus.ObjectManager',
'GetManagedObjects',
None)[0]
- except safe_dbus.DBusCallError:
- log_exception_info(log.info, "iscsi: Failed to get active sessions.")
+ except safe_dbus.DBusCallError as e:
+ log.info("iscsi: Failed to get active sessions: %s", str(e))
return []
sessions = (obj for obj in objects.keys() if re.match(r'.*/iscsi/session[0-9]+$', obj))
@@ -302,8 +301,8 @@ class iSCSI(object):
args = GLib.Variant("(a{sv})", ([], ))
try:
found_nodes, _n_nodes = self._call_initiator_method("DiscoverFirmware", args)
- except safe_dbus.DBusCallError:
- log_exception_info(log.info, "iscsi: No IBFT info found.")
+ except safe_dbus.DBusCallError as e:
+ log.info("iscsi: No IBFT info found: %s", str(e))
# an exception here means there is no ibft firmware, just return
return
--
2.31.1

2288
0012-PO-update.patch Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,29 @@
From a15c65a5e71f6fd53624bd657ab95b38d37c6f1b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 16 Aug 2021 09:50:34 +0200
Subject: [PATCH] Fix getting PV info in LVMPhysicalVolume from the cache
"self.device" is string for formats so accessing "self.device.path"
results in an AttributeError.
Resolves: rhbz#2079220
---
blivet/formats/lvmpv.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index e4182adb..8cfade9f 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -170,7 +170,7 @@ class LVMPhysicalVolume(DeviceFormat):
if self.exists:
# we don't have any actual value, but the PV exists and is
# active, we should try to determine it
- pv_info = pvs_info.cache.get(self.device.path)
+ pv_info = pvs_info.cache.get(self.device)
if pv_info is None:
log.error("Failed to get free space information for the PV '%s'", self.device)
self._free = Size(0)
--
2.35.3

View File

@ -0,0 +1,41 @@
From 78eda3d74110dbf9669c3271f7d2fddf962d0381 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 28 Apr 2022 14:13:04 +0200
Subject: [PATCH] Do not crash when changing disklabel on disks with active
devices
The _find_active_devices_on_action_disks function originally
prevented from making any changes on disks with active devices
(active LVs, mounted partitions etc.) This was changed in
b72e957d2b23444824316331ae21d1c594371e9c and the check currently
prevents only reformatting the disklabel on such disks which
should be already impossible on disks with an existing partition.
This change for the 3.4 stable branch keeps the current behaviour
where the active devices are teared down when running in installer
mode to avoid potential issues with the installer.
Resolves: rhbz#2078801
---
blivet/actionlist.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/blivet/actionlist.py b/blivet/actionlist.py
index d03e32b9..4ec2dbf8 100644
--- a/blivet/actionlist.py
+++ b/blivet/actionlist.py
@@ -211,9 +211,8 @@ class ActionList(object):
except StorageError as e:
log.info("teardown of %s failed: %s", device.name, e)
else:
- raise RuntimeError("partitions in use on disks with changes "
- "pending: %s" %
- ",".join(problematic))
+ log.debug("ignoring devices in use on disks with changes: %s",
+ ",".join(problematic))
log.info("resetting parted disks...")
for device in devices:
--
2.35.3

View File

@ -0,0 +1,65 @@
From 950f51a4cc041fe1b8a98b17e4828857b7423e55 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 13 Apr 2022 15:43:45 +0200
Subject: [PATCH] ActionDestroyDevice should not obsolete ActionRemoveMember
If we want to remove a PV from a VG and then remove the PV device,
the ActionDestroyDevice must not obsolete the ActionRemoveMember
action. Eventhough we are going to remove the device, we still
need to call "vgreduce" first.
Resolves: rhbz#2076958
---
blivet/deviceaction.py | 10 +++++-----
tests/action_test.py | 7 +++++++
2 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index 0458e4be..78e113bf 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -463,8 +463,8 @@ class ActionDestroyDevice(DeviceAction):
- obsoletes all actions w/ lower id that act on the same device,
including self, if device does not exist
- - obsoletes all but ActionDestroyFormat actions w/ lower id on the
- same device if device exists
+ - obsoletes all but ActionDestroyFormat and ActionRemoveMember actions
+ w/ lower id on the same device if device exists
- obsoletes all actions that add a member to this action's
(container) device
@@ -474,9 +474,9 @@ class ActionDestroyDevice(DeviceAction):
if action.device.id == self.device.id:
if self.id >= action.id and not self.device.exists:
rc = True
- elif self.id > action.id and \
- self.device.exists and \
- not (action.is_destroy and action.is_format):
+ elif self.id > action.id and self.device.exists and \
+ not ((action.is_destroy and action.is_format) or
+ action.is_remove):
rc = True
elif action.is_add and (action.device == self.device):
rc = True
diff --git a/tests/action_test.py b/tests/action_test.py
index 1e84c20b..b3608047 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -1198,6 +1198,13 @@ class DeviceActionTestCase(StorageTestCase):
self.assertEqual(create_sdc2.requires(remove_sdc1), False)
self.assertEqual(remove_sdc1.requires(create_sdc2), False)
+ # destroy sdc1, the ActionRemoveMember should not be obsoleted
+ sdc1.exists = True
+ destroy_sdc1 = ActionDestroyDevice(sdc1)
+ destroy_sdc1.apply()
+ self.assertFalse(destroy_sdc1.obsoletes(remove_sdc1))
+ self.assertTrue(destroy_sdc1.requires(remove_sdc1))
+
def test_action_sorting(self, *args, **kwargs):
""" Verify correct functioning of action sorting. """
--
2.35.3

View File

@ -0,0 +1,63 @@
From a9cb01f948fa5371b3e6f9282e7af81aec5cb1a8 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 2 May 2022 15:30:16 +0200
Subject: [PATCH] Correctly set vg_name after adding/removing a PV from a VG
Without setting the LVMPhysicalVolume.vg_name argument to None
after removing the PV from its VG, the PV is still considered
active and cannot be removed.
Resolves: rhbz#2081276
---
blivet/devices/lvm.py | 3 +++
tests/devices_test/lvm_test.py | 13 +++++++++++++
2 files changed, 16 insertions(+)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index c61eeb4b..7c78c813 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -385,6 +385,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
if not parent.format.exists:
parent.format.free = self._get_pv_usable_space(parent)
+ parent.format.vg_name = self.name
+
def _remove_parent(self, parent):
# XXX It would be nice to raise an exception if removing this member
# would not leave enough space, but the devicefactory relies on it
@@ -395,6 +397,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
super(LVMVolumeGroupDevice, self)._remove_parent(parent)
parent.format.free = None
parent.format.container_uuid = None
+ parent.format.vg_name = None
# We can't rely on lvm to tell us about our size, free space, &c
# since we could have modifications queued, unless the VG and all of
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 336c5b99..c349f003 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -453,6 +453,19 @@ class LVMDeviceTest(unittest.TestCase):
pool.autoset_md_size(enforced=True)
self.assertEqual(pool.chunk_size, Size("128 KiB"))
+ def test_add_remove_pv(self):
+ pv1 = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1024 MiB"))
+ pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1024 MiB"))
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv1])
+
+ vg._add_parent(pv2)
+ self.assertEqual(pv2.format.vg_name, vg.name)
+
+ vg._remove_parent(pv2)
+ self.assertEqual(pv2.format.vg_name, None)
+
class TypeSpecificCallsTest(unittest.TestCase):
def test_type_specific_calls(self):
--
2.35.3

View File

@ -0,0 +1,29 @@
From 6d1bc8ae0cee4ee837d5dc8ad7f1a525208f3eec Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 5 May 2022 16:35:37 +0200
Subject: [PATCH] Use LVM PV format current_size in
LVMVolumeGroupDevice._remove
The member format size is 0 when target size is not set.
Related: rhbz#2081276
---
blivet/devices/lvm.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 7c78c813..4700d141 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -293,7 +293,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
# do not run pvmove on empty PVs
member.format.update_size_info()
- if member.format.free < member.format.size:
+ if member.format.free < member.format.current_size:
blockdev.lvm.pvmove(member.path)
blockdev.lvm.vgreduce(self.name, member.path)
--
2.35.3

View File

@ -0,0 +1,588 @@
From 91e443af7b9f6b8d7f845f353a3897e3c91015b3 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:08:43 +0100
Subject: [PATCH 1/4] Add support for creating LVM cache pools
Resolves: rhbz#2055198
---
blivet/blivet.py | 9 +-
blivet/devicelibs/lvm.py | 9 ++
blivet/devices/lvm.py | 160 +++++++++++++++++++++++++++++++--
tests/devices_test/lvm_test.py | 26 ++++++
4 files changed, 196 insertions(+), 8 deletions(-)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index c6908eb0..d29fadd0 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -576,6 +576,8 @@ class Blivet(object):
:type vdo_pool: bool
:keyword vdo_lv: whether to create a vdo lv
:type vdo_lv: bool
+ :keyword cache_pool: whether to create a cache pool
+ :type cache_pool: bool
:returns: the new device
:rtype: :class:`~.devices.LVMLogicalVolumeDevice`
@@ -594,6 +596,7 @@ class Blivet(object):
thin_pool = kwargs.pop("thin_pool", False)
vdo_pool = kwargs.pop("vdo_pool", False)
vdo_lv = kwargs.pop("vdo_lv", False)
+ cache_pool = kwargs.pop("cache_pool", False)
parent = kwargs.get("parents", [None])[0]
if (thin_volume or vdo_lv) and parent:
# kwargs["parents"] will contain the pool device, so...
@@ -609,6 +612,8 @@ class Blivet(object):
kwargs["seg_type"] = "vdo-pool"
if vdo_lv:
kwargs["seg_type"] = "vdo"
+ if cache_pool:
+ kwargs["seg_type"] = "cache-pool"
mountpoint = kwargs.pop("mountpoint", None)
if 'fmt_type' in kwargs:
@@ -640,7 +645,7 @@ class Blivet(object):
swap = False
prefix = ""
- if thin_pool or vdo_pool:
+ if thin_pool or vdo_pool or cache_pool:
prefix = "pool"
name = self.suggest_device_name(parent=vg,
@@ -651,7 +656,7 @@ class Blivet(object):
if "%s-%s" % (vg.name, name) in self.names:
raise ValueError("name '%s' is already in use" % name)
- if thin_pool or thin_volume or vdo_pool or vdo_lv:
+ if thin_pool or thin_volume or vdo_pool or vdo_lv or cache_pool:
cache_req = kwargs.pop("cache_request", None)
if cache_req:
raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index cb6f655e..724aaff4 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -54,6 +54,11 @@ LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B") # 15.88 TiB
+# cache constants
+LVM_CACHE_MIN_METADATA_SIZE = Size("8 MiB")
+LVM_CACHE_MAX_METADATA_SIZE = Size("16 GiB")
+LVM_CACHE_DEFAULT_MODE = blockdev.LVMCacheMode.WRITETHROUGH
+
raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
@@ -236,3 +241,7 @@ def recommend_thpool_chunk_size(thpool_size):
# for every ~15.88 TiB of thinpool data size
return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
LVM_THINP_MAX_CHUNK_SIZE)
+
+
+def is_valid_cache_md_size(md_size):
+ return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 4700d141..7d374c3b 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -43,6 +43,7 @@ from .. import util
from ..storage_log import log_method_call
from .. import udev
from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
+from ..static_data.lvm_info import lvs_info
from ..tasks import availability
import logging
@@ -646,7 +647,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
percent=None, cache_request=None, pvs=None, from_lvs=None):
if not exists:
- if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
+ if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
if seg_type and seg_type in lvm.raid_seg_types and not pvs:
raise ValueError("List of PVs has to be given for every non-linear LV")
@@ -690,8 +691,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
# we reserve space for it
self._metadata_size = self.vg.pe_size
self._size -= self._metadata_size
- elif self.seg_type == "thin-pool":
- # LVMThinPoolMixin sets self._metadata_size on its own
+ elif self.seg_type in ("thin-pool", "cache_pool"):
+ # LVMThinPoolMixin and LVMCachePoolMixin set self._metadata_size on their own
if not self.exists and not from_lvs and not grow:
# a thin pool we are not going to grow -> lets calculate metadata
# size now if not given explicitly
@@ -1619,7 +1620,6 @@ class LVMThinPoolMixin(object):
""" A list of this pool's LVs """
return self._lvs[:] # we don't want folks changing our list
- @util.requires_property("is_thin_pool")
def autoset_md_size(self, enforced=False):
""" If self._metadata_size not set already, it calculates the recommended value
and sets it while subtracting the size from self.size.
@@ -2032,9 +2032,142 @@ class LVMVDOLogicalVolumeMixin(object):
self.pool._add_log_vol(self)
+class LVMCachePoolMixin(object):
+ def __init__(self, metadata_size, cache_mode=None):
+ self._metadata_size = metadata_size or Size(0)
+ self._cache_mode = cache_mode
+
+ def _init_check(self):
+ if not self.is_cache_pool:
+ return
+
+ if self._metadata_size and not lvm.is_valid_cache_md_size(self._metadata_size):
+ raise ValueError("invalid metadatasize value")
+
+ if not self.exists and not self._pv_specs:
+ raise ValueError("at least one fast PV must be specified to create a cache pool")
+
+ def _check_from_lvs(self):
+ if self._from_lvs:
+ if len(self._from_lvs) != 2:
+ raise errors.DeviceError("two LVs required to create a cache pool")
+
+ def _convert_from_lvs(self):
+ data_lv, metadata_lv = self._from_lvs
+
+ data_lv.parent_lv = self # also adds the LV to self._internal_lvs
+ data_lv.int_lv_type = LVMInternalLVtype.data
+ metadata_lv.parent_lv = self
+ metadata_lv.int_lv_type = LVMInternalLVtype.meta
+
+ self.size = data_lv.size
+
+ @property
+ def is_cache_pool(self):
+ return self.seg_type == "cache-pool"
+
+ @property
+ def profile(self):
+ return self._profile
+
+ @property
+ def type(self):
+ return "lvmcachepool"
+
+ @property
+ def resizable(self):
+ return False
+
+ def read_current_size(self):
+ log_method_call(self, exists=self.exists, path=self.path,
+ sysfs_path=self.sysfs_path)
+ if self.size != Size(0):
+ return self.size
+
+ if self.exists:
+ # cache pools are not active and don't have th device mapper mapping
+ # so we can't get this from sysfs
+ lv_info = lvs_info.cache.get(self.name)
+ if lv_info is None:
+ log.error("Failed to get size for existing cache pool '%s'", self.name)
+ return Size(0)
+ else:
+ return Size(lv_info.size)
+
+ return Size(0)
+
+ def autoset_md_size(self, enforced=False):
+ """ If self._metadata_size not set already, it calculates the recommended value
+ and sets it while subtracting the size from self.size.
+
+ """
+
+ log.debug("Auto-setting cache pool metadata size")
+
+ if self._size <= Size(0):
+ log.debug("Cache pool size not bigger than 0, just setting metadata size to 0")
+ self._metadata_size = 0
+ return
+
+ old_md_size = self._metadata_size
+ if self._metadata_size == 0 or enforced:
+ self._metadata_size = blockdev.lvm.cache_get_default_md_size(self._size)
+ log.debug("Using recommended metadata size: %s", self._metadata_size)
+
+ self._metadata_size = self.vg.align(self._metadata_size, roundup=True)
+ log.debug("Rounded metadata size to extents: %s MiB", self._metadata_size.convert_to("MiB"))
+
+ if self._metadata_size == old_md_size:
+ log.debug("Rounded metadata size unchanged")
+ else:
+ new_size = self.size - (self._metadata_size - old_md_size)
+ log.debug("Adjusting size from %s MiB to %s MiB",
+ self.size.convert_to("MiB"), new_size.convert_to("MiB"))
+ self.size = new_size
+
+ def _pre_create(self):
+ # make sure all the LVs this LV should be created from exist (if any)
+ if self._from_lvs and any(not lv.exists for lv in self._from_lvs):
+ raise errors.DeviceError("Component LVs need to be created first")
+
+ def _create(self):
+ """ Create the device. """
+ log_method_call(self, self.name, status=self.status)
+ if self._cache_mode:
+ try:
+ cache_mode = blockdev.lvm.cache_get_mode_from_str(self._cache_mode)
+ except blockdev.LVMError as e:
+ raise errors.DeviceError from e
+ else:
+ cache_mode = lvm.LVM_CACHE_DEFAULT_MODE
+
+ if self._from_lvs:
+ extra = dict()
+ if self.mode:
+ # we need the string here, it will be passed directly to he lvm command
+ extra["cachemode"] = self._cache_mode
+ data_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.data)
+ meta_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.meta)
+ blockdev.lvm.cache_pool_convert(self.vg.name, data_lv.lvname, meta_lv.lvname, self.lvname, **extra)
+ else:
+ blockdev.lvm.cache_create_pool(self.vg.name, self.lvname, self.size,
+ self.metadata_size,
+ cache_mode,
+ 0,
+ [spec.pv.path for spec in self._pv_specs])
+
+ def dracut_setup_args(self):
+ return set()
+
+ @property
+ def direct(self):
+ """ Is this device directly accessible? """
+ return False
+
+
class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
- LVMVDOLogicalVolumeMixin):
+ LVMVDOLogicalVolumeMixin, LVMCachePoolMixin):
""" An LVM Logical Volume """
# generally resizable, see :property:`resizable` for details
@@ -2046,7 +2179,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None):
+ write_policy=None, cache_mode=None):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2116,6 +2249,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
:keyword write_policy: write policy for the volume or None for default
:type write_policy: str
+ For cache pools only:
+
+ :keyword metadata_size: the size of the metadata LV
+ :type metadata_size: :class:`~.size.Size`
+ :keyword cache_mode: mode for the cache or None for default (writethrough)
+ :type cache_mode: str
+
"""
if isinstance(parents, (list, ParentList)):
@@ -2133,6 +2273,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMSnapshotMixin.__init__(self, origin, vorigin)
LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
LVMThinLogicalVolumeMixin.__init__(self)
+ LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs)
@@ -2144,6 +2285,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMSnapshotMixin._init_check(self)
LVMThinPoolMixin._init_check(self)
LVMThinLogicalVolumeMixin._init_check(self)
+ LVMCachePoolMixin._init_check(self)
if self._from_lvs:
self._check_from_lvs()
@@ -2169,6 +2311,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
ret.append(LVMVDOPoolMixin)
if self.is_vdo_lv:
ret.append(LVMVDOLogicalVolumeMixin)
+ if self.is_cache_pool:
+ ret.append(LVMCachePoolMixin)
return ret
def _try_specific_call(self, name, *args, **kwargs):
@@ -2552,6 +2696,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
return True
+ @type_specific
+ def autoset_md_size(self, enforced=False):
+ pass
+
def attach_cache(self, cache_pool_lv):
if self.is_thin_lv or self.is_snapshot_lv or self.is_internal_lv:
raise errors.DeviceError("Cannot attach a cache pool to the '%s' LV" % self.name)
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index c349f003..a1ddaf2d 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -867,3 +867,29 @@ class BlivetLVMVDODependenciesTest(unittest.TestCase):
vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
self.assertFalse(vdo_supported)
+
+
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
+class BlivetNewLVMCachePoolDeviceTest(unittest.TestCase):
+
+ def test_new_cache_pool(self):
+ b = blivet.Blivet()
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("10 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+
+ for dev in (pv, vg):
+ b.devicetree._add_device(dev)
+
+ # check that all the above devices are in the expected places
+ self.assertEqual(set(b.devices), {pv, vg})
+ self.assertEqual(set(b.vgs), {vg})
+
+ self.assertEqual(vg.size, Size("10236 MiB"))
+
+ cachepool = b.new_lv(name="cachepool", cache_pool=True,
+ parents=[vg], pvs=[pv])
+
+ b.create_device(cachepool)
+
+ self.assertEqual(cachepool.type, "lvmcachepool")
--
2.35.3
From d25d52e146559d226369afdb4b102e516bd9e332 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:09:04 +0100
Subject: [PATCH 2/4] examples: Add LVM cache pool example
Related: rhbz#2055198
---
examples/lvm_cachepool.py | 59 +++++++++++++++++++++++++++++++++++++++
1 file changed, 59 insertions(+)
create mode 100644 examples/lvm_cachepool.py
diff --git a/examples/lvm_cachepool.py b/examples/lvm_cachepool.py
new file mode 100644
index 00000000..ab2e8a72
--- /dev/null
+++ b/examples/lvm_cachepool.py
@@ -0,0 +1,59 @@
+import os
+
+import blivet
+from blivet.size import Size
+from blivet.util import set_up_logging, create_sparse_tempfile
+
+
+set_up_logging()
+b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
+
+# create a disk image file on which to create new devices
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
+b.disk_images["disk1"] = disk1_file
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
+b.disk_images["disk2"] = disk2_file
+
+b.reset()
+
+try:
+ disk1 = b.devicetree.get_device_by_name("disk1")
+ disk2 = b.devicetree.get_device_by_name("disk2")
+
+ b.initialize_disk(disk1)
+ b.initialize_disk(disk2)
+
+ pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
+ b.create_device(pv)
+ pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
+ b.create_device(pv2)
+
+ # allocate the partitions (decide where and on which disks they'll reside)
+ blivet.partitioning.do_partitioning(b)
+
+ vg = b.new_vg(parents=[pv, pv2])
+ b.create_device(vg)
+
+ # new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
+ lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
+ b.create_device(lv)
+
+ # new cache pool
+ cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
+ b.create_device(cpool)
+
+ # write the new partitions to disk and format them as specified
+ b.do_it()
+ print(b.devicetree)
+
+ # attach the newly created cache pool to the "slow" LV
+ lv.attach_cache(cpool)
+
+ b.reset()
+ print(b.devicetree)
+
+ input("Check the state and hit ENTER to trigger cleanup")
+finally:
+ b.devicetree.teardown_disk_images()
+ os.unlink(disk1_file)
+ os.unlink(disk2_file)
--
2.35.3
From 2411d8aa082f6baf46f25d5f97455da983c0ee5f Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:13:33 +0100
Subject: [PATCH 3/4] lvm: Use blivet static data when checking if the VG is
active
Instead of calling 'lvs' again in LVMVolumeGroupDevice.status
Related: rhbz#2055198
---
blivet/devices/lvm.py | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 7d374c3b..9f875e4e 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -220,13 +220,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
# special handling for incomplete VGs
if not self.complete:
- try:
- lvs_info = blockdev.lvm.lvs(vg_name=self.name)
- except blockdev.LVMError:
- lvs_info = []
-
- for lv_info in lvs_info:
- if lv_info.attr and lv_info.attr[4] == 'a':
+ for lv_info in lvs_info.cache.values():
+ if lv_info.vg_name == self.name and lv_info.attr and lv_info.attr[4] == 'a':
return True
return False
--
2.35.3
From c8fda78915f31f3d5011ada3c7463f85e181983b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 30 May 2022 17:02:43 +0200
Subject: [PATCH 4/4] Add option to attach a newly created cache pool to
existing LV
Because we do not have action for attaching the cache pool, we
cannot schedule both adding the fast PV to the VG and attaching
the cache pool to existing LV. This hack allows to schedule the
attach to happen after the cache pool is created.
Related: rhbz#2055198
---
blivet/devices/lvm.py | 38 +++++++++++++++++++++++++++++++++++---
1 file changed, 35 insertions(+), 3 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 9f875e4e..7e4fcf53 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -2028,9 +2028,10 @@ class LVMVDOLogicalVolumeMixin(object):
class LVMCachePoolMixin(object):
- def __init__(self, metadata_size, cache_mode=None):
+ def __init__(self, metadata_size, cache_mode=None, attach_to=None):
self._metadata_size = metadata_size or Size(0)
self._cache_mode = cache_mode
+ self._attach_to = attach_to
def _init_check(self):
if not self.is_cache_pool:
@@ -2042,6 +2043,9 @@ class LVMCachePoolMixin(object):
if not self.exists and not self._pv_specs:
raise ValueError("at least one fast PV must be specified to create a cache pool")
+ if self._attach_to and not self._attach_to.exists:
+ raise ValueError("cache pool can be attached only to an existing LV")
+
def _check_from_lvs(self):
if self._from_lvs:
if len(self._from_lvs) != 2:
@@ -2150,6 +2154,31 @@ class LVMCachePoolMixin(object):
cache_mode,
0,
[spec.pv.path for spec in self._pv_specs])
+ if self._attach_to:
+ self._attach_to.attach_cache(self)
+
+ def _post_create(self):
+ if self._attach_to:
+ # post_create tries to activate the LV and after attaching it no longer exists
+ return
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self)._post_create()
+
+ def add_hook(self, new=True):
+ if self._attach_to:
+ self._attach_to._cache = LVMCache(self._attach_to, size=self.size, exists=False,
+ pvs=self._pv_specs, mode=self._cache_mode)
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self).add_hook(new=new)
+
+ def remove_hook(self, modparent=True):
+ if self._attach_to:
+ self._attach_to._cache = None
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
def dracut_setup_args(self):
return set()
@@ -2174,7 +2203,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None, cache_mode=None):
+ write_policy=None, cache_mode=None, attach_to=None):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2250,6 +2279,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
:type metadata_size: :class:`~.size.Size`
:keyword cache_mode: mode for the cache or None for default (writethrough)
:type cache_mode: str
+ :keyword attach_to: for non-existing cache pools a logical volume the pool should
+ be attached to when created
+ :type attach_to: :class:`LVMLogicalVolumeDevice`
"""
@@ -2268,7 +2300,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMSnapshotMixin.__init__(self, origin, vorigin)
LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
LVMThinLogicalVolumeMixin.__init__(self)
- LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
+ LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs)
--
2.35.3

View File

@ -0,0 +1,23 @@
From d609cebba48744c97ac7e0461f8827ab63198026 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 10 Jun 2021 16:58:42 +0200
Subject: [PATCH] Fix util.virt_detect on Xen
Xen is apparently still alive so we should return True for it too.
---
blivet/util.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/util.py b/blivet/util.py
index 3bebb003..af60210b 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -1130,4 +1130,4 @@ def detect_virt():
except (safe_dbus.DBusCallError, safe_dbus.DBusPropertyError):
return False
else:
- return vm[0] in ('qemu', 'kvm')
+ return vm[0] in ('qemu', 'kvm', 'xen')
--
2.31.1

View File

@ -0,0 +1,776 @@
From a03be3924318788e42bcdb3ed6a5334aed771c43 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Thu, 28 Oct 2021 21:17:25 +0200
Subject: [PATCH 1/8] Fix removing zFCP SCSI devices
Values parsed from /proc/scsi/scsi were not correctly used to assemble
paths to SCSI devices.
For example:
/sys/bus/scsi/devices/0:0:00:00/
was incorrectly accessed instead of:
/sys/bus/scsi/devices/0:0:0:0/
Switch to a more reliable way of listing the available SCSI devices.
---
blivet/zfcp.py | 17 ++++-------------
1 file changed, 4 insertions(+), 13 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 93af5419..3747290e 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -20,6 +20,7 @@
#
import os
+import re
from . import udev
from . import util
from .i18n import _
@@ -167,20 +168,10 @@ class ZFCPDevice:
return True
def offline_scsi_device(self):
- f = open("/proc/scsi/scsi", "r")
- lines = f.readlines()
- f.close()
- # alternatively iterate over /sys/bus/scsi/devices/*:0:*:*/
+ # A list of existing SCSI devices in format Host:Bus:Target:Lun
+ scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
- for line in lines:
- if not line.startswith("Host"):
- continue
- scsihost = line.split()
- host = scsihost[1]
- channel = "0"
- devid = scsihost[5]
- lun = scsihost[7]
- scsidev = "%s:%s:%s:%s" % (host[4:], channel, devid, lun)
+ for scsidev in scsi_devices:
fcpsysfs = "%s/%s" % (scsidevsysfs, scsidev)
scsidel = "%s/%s/delete" % (scsidevsysfs, scsidev)
--
2.36.1
From 82bd018fdc47c64f30d8422eb90bc76564072a26 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 02:47:45 +0100
Subject: [PATCH 2/8] Refactor the ZFCPDevice class
Add a new base class for zFCP devices.
Move code to the new base class.
Improve documentation.
---
blivet/zfcp.py | 131 +++++++++++++++++++++++++++++++++++--------------
1 file changed, 95 insertions(+), 36 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 3747290e..4a50f65f 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -21,6 +21,7 @@
import os
import re
+from abc import ABC
from . import udev
from . import util
from .i18n import _
@@ -46,29 +47,19 @@ zfcpsysfs = "/sys/bus/ccw/drivers/zfcp"
scsidevsysfs = "/sys/bus/scsi/devices"
zfcpconf = "/etc/zfcp.conf"
+class ZFCPDeviceBase(ABC):
+ """An abstract base class for zFCP storage devices."""
-class ZFCPDevice:
- """
- .. warning::
- Since this is a singleton class, calling deepcopy() on the instance
- just returns ``self`` with no copy being created.
- """
-
- def __init__(self, devnum, wwpn, fcplun):
+ def __init__(self, devnum):
self.devnum = blockdev.s390.sanitize_dev_input(devnum)
- self.wwpn = blockdev.s390.zfcp_sanitize_wwpn_input(wwpn)
- self.fcplun = blockdev.s390.zfcp_sanitize_lun_input(fcplun)
-
if not self.devnum:
raise ValueError(_("You have not specified a device number or the number is invalid"))
- if not self.wwpn:
- raise ValueError(_("You have not specified a worldwide port name or the name is invalid."))
- if not self.fcplun:
- raise ValueError(_("You have not specified a FCP LUN or the number is invalid."))
+
+ self._device_online_path = os.path.join(zfcpsysfs, self.devnum, "online")
# Force str and unicode types in case any of the properties are unicode
def _to_string(self):
- return "%s %s %s" % (self.devnum, self.wwpn, self.fcplun)
+ return str(self.devnum)
def __str__(self):
return stringize(self._to_string())
@@ -76,33 +67,97 @@ class ZFCPDevice:
def __unicode__(self):
return unicodeize(self._to_string())
- def online_device(self):
- online = "%s/%s/online" % (zfcpsysfs, self.devnum)
- portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
- portdir = "%s/%s/%s" % (zfcpsysfs, self.devnum, self.wwpn)
- unitadd = "%s/unit_add" % (portdir)
- unitdir = "%s/%s" % (portdir, self.fcplun)
- failed = "%s/failed" % (unitdir)
+ def _free_device(self):
+ """Remove the device from the I/O ignore list to make it visible to the system.
+
+ :raises: ValueError if the device cannot be removed from the I/O ignore list
+ """
- if not os.path.exists(online):
+ if not os.path.exists(self._device_online_path):
log.info("Freeing zFCP device %s", self.devnum)
util.run_program(["zfcp_cio_free", "-d", self.devnum])
- if not os.path.exists(online):
+ if not os.path.exists(self._device_online_path):
raise ValueError(_("zFCP device %s not found, not even in device ignore list.") %
(self.devnum,))
+ def _set_zfcp_device_online(self):
+ """Set the zFCP device online.
+
+ :raises: ValueError if the device cannot be set online
+ """
+
try:
- f = open(online, "r")
- devonline = f.readline().strip()
- f.close()
+ with open(self._device_online_path) as f:
+ devonline = f.readline().strip()
if devonline != "1":
- logged_write_line_to_file(online, "1")
+ logged_write_line_to_file(self._device_online_path, "1")
except OSError as e:
raise ValueError(_("Could not set zFCP device %(devnum)s "
"online (%(e)s).")
% {'devnum': self.devnum, 'e': e})
+ def _set_zfcp_device_offline(self):
+ """Set the zFCP device offline.
+
+ :raises: ValueError if the device cannot be set offline
+ """
+
+ try:
+ logged_write_line_to_file(self._device_online_path, "0")
+ except OSError as e:
+ raise ValueError(_("Could not set zFCP device %(devnum)s "
+ "offline (%(e)s).")
+ % {'devnum': self.devnum, 'e': e})
+
+ def online_device(self):
+ """Initialize the device and make its storage block device(s) ready to use.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be initialized
+ """
+
+ self._free_device()
+ self._set_zfcp_device_online()
+ return True
+
+
+class ZFCPDevice(ZFCPDeviceBase):
+ """A class for zFCP devices that are not configured in NPIV mode. Such
+ devices have to be specified by a device number, WWPN and LUN.
+ """
+
+ def __init__(self, devnum, wwpn, fcplun):
+ super().__init__(devnum)
+
+ self.wwpn = blockdev.s390.zfcp_sanitize_wwpn_input(wwpn)
+ if not self.wwpn:
+ raise ValueError(_("You have not specified a worldwide port name or the name is invalid."))
+
+ self.fcplun = blockdev.s390.zfcp_sanitize_lun_input(fcplun)
+ if not self.fcplun:
+ raise ValueError(_("You have not specified a FCP LUN or the number is invalid."))
+
+ # Force str and unicode types in case any of the properties are unicode
+ def _to_string(self):
+ return "{} {} {}".format(self.devnum, self.wwpn, self.fcplun)
+
+ def online_device(self):
+ """Initialize the device and make its storage block device(s) ready to use.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be initialized
+ """
+
+ super().online_device()
+
+ portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
+ portdir = "%s/%s/%s" % (zfcpsysfs, self.devnum, self.wwpn)
+ unitadd = "%s/unit_add" % (portdir)
+ unitdir = "%s/%s" % (portdir, self.fcplun)
+ failed = "%s/failed" % (unitdir)
+
+ # create the sysfs directory for the WWPN/port
if not os.path.exists(portdir):
if os.path.exists(portadd):
# older zfcp sysfs interface
@@ -127,6 +182,7 @@ class ZFCPDevice:
"there.", {'wwpn': self.wwpn,
'devnum': self.devnum})
+ # create the sysfs directory for the LUN/unit
if not os.path.exists(unitdir):
try:
logged_write_line_to_file(unitadd, self.fcplun)
@@ -144,6 +200,7 @@ class ZFCPDevice:
'wwpn': self.wwpn,
'devnum': self.devnum})
+ # check the state of the LUN
fail = "0"
try:
f = open(failed, "r")
@@ -168,6 +225,8 @@ class ZFCPDevice:
return True
def offline_scsi_device(self):
+ """Find SCSI devices associated to the zFCP device and remove them from the system."""
+
# A list of existing SCSI devices in format Host:Bus:Target:Lun
scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
@@ -196,7 +255,8 @@ class ZFCPDevice:
self.devnum, self.wwpn, self.fcplun)
def offline_device(self):
- offline = "%s/%s/online" % (zfcpsysfs, self.devnum)
+ """Remove the zFCP device from the system."""
+
portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
portremove = "%s/%s/port_remove" % (zfcpsysfs, self.devnum)
unitremove = "%s/%s/%s/unit_remove" % (zfcpsysfs, self.devnum, self.wwpn)
@@ -212,6 +272,7 @@ class ZFCPDevice:
% {'devnum': self.devnum, 'wwpn': self.wwpn,
'fcplun': self.fcplun, 'e': e})
+ # remove the LUN
try:
logged_write_line_to_file(unitremove, self.fcplun)
except OSError as e:
@@ -221,6 +282,7 @@ class ZFCPDevice:
% {'fcplun': self.fcplun, 'wwpn': self.wwpn,
'devnum': self.devnum, 'e': e})
+ # remove the WWPN only if there are no other LUNs attached
if os.path.exists(portadd):
# only try to remove ports with older zfcp sysfs interface
for lun in os.listdir(portdir):
@@ -238,6 +300,7 @@ class ZFCPDevice:
% {'wwpn': self.wwpn,
'devnum': self.devnum, 'e': e})
+ # check if there are other WWPNs existing for the zFCP device number
if os.path.exists(portadd):
# older zfcp sysfs interface
for port in os.listdir(devdir):
@@ -256,12 +319,8 @@ class ZFCPDevice:
self.devnum, luns[0])
return True
- try:
- logged_write_line_to_file(offline, "0")
- except OSError as e:
- raise ValueError(_("Could not set zFCP device %(devnum)s "
- "offline (%(e)s).")
- % {'devnum': self.devnum, 'e': e})
+ # no other WWPNs/LUNs exists for this device number, it's safe to bring it offline
+ self._set_zfcp_device_offline()
return True
--
2.36.1
From a9b9fe124dbc23104c0b60c8e0326cab3eb7a28d Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 02:35:05 +0100
Subject: [PATCH 3/8] Move offline_scsi_device() to the base class
---
blivet/zfcp.py | 74 ++++++++++++++++++++++++++++++--------------------
1 file changed, 44 insertions(+), 30 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 4a50f65f..af8f841d 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -110,6 +110,15 @@ class ZFCPDeviceBase(ABC):
"offline (%(e)s).")
% {'devnum': self.devnum, 'e': e})
+ def _is_scsi_associated_with_fcp(self, fcphbasysfs, _fcpwwpnsysfs, _fcplunsysfs):
+ """Decide if the SCSI device with the provided SCSI attributes
+ corresponds to the zFCP device.
+
+ :returns: True or False
+ """
+
+ return fcphbasysfs == self.devnum
+
def online_device(self):
"""Initialize the device and make its storage block device(s) ready to use.
@@ -121,6 +130,30 @@ class ZFCPDeviceBase(ABC):
self._set_zfcp_device_online()
return True
+ def offline_scsi_device(self):
+ """Find SCSI devices associated to the zFCP device and remove them from the system."""
+
+ # A list of existing SCSI devices in format Host:Bus:Target:Lun
+ scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
+
+ for scsidev in scsi_devices:
+ fcpsysfs = os.path.join(scsidevsysfs, scsidev)
+
+ with open(os.path.join(fcpsysfs, "hba_id")) as f:
+ fcphbasysfs = f.readline().strip()
+ with open(os.path.join(fcpsysfs, "wwpn")) as f:
+ fcpwwpnsysfs = f.readline().strip()
+ with open(os.path.join(fcpsysfs, "fcp_lun")) as f:
+ fcplunsysfs = f.readline().strip()
+
+ if self._is_scsi_associated_with_fcp(fcphbasysfs, fcpwwpnsysfs, fcplunsysfs):
+ scsidel = os.path.join(scsidevsysfs, scsidev, "delete")
+ logged_write_line_to_file(scsidel, "1")
+ udev.settle()
+ return
+
+ log.warning("No scsi device found to delete for zfcp %s", self)
+
class ZFCPDevice(ZFCPDeviceBase):
"""A class for zFCP devices that are not configured in NPIV mode. Such
@@ -142,6 +175,17 @@ class ZFCPDevice(ZFCPDeviceBase):
def _to_string(self):
return "{} {} {}".format(self.devnum, self.wwpn, self.fcplun)
+ def _is_scsi_associated_with_fcp(self, fcphbasysfs, fcpwwpnsysfs, fcplunsysfs):
+ """Decide if the SCSI device with the provided SCSI attributes
+ corresponds to the zFCP device.
+
+ :returns: True or False
+ """
+
+ return (fcphbasysfs == self.devnum and
+ fcpwwpnsysfs == self.wwpn and
+ fcplunsysfs == self.fcplun)
+
def online_device(self):
"""Initialize the device and make its storage block device(s) ready to use.
@@ -224,36 +268,6 @@ class ZFCPDevice(ZFCPDeviceBase):
return True
- def offline_scsi_device(self):
- """Find SCSI devices associated to the zFCP device and remove them from the system."""
-
- # A list of existing SCSI devices in format Host:Bus:Target:Lun
- scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
-
- for scsidev in scsi_devices:
- fcpsysfs = "%s/%s" % (scsidevsysfs, scsidev)
- scsidel = "%s/%s/delete" % (scsidevsysfs, scsidev)
-
- f = open("%s/hba_id" % (fcpsysfs), "r")
- fcphbasysfs = f.readline().strip()
- f.close()
- f = open("%s/wwpn" % (fcpsysfs), "r")
- fcpwwpnsysfs = f.readline().strip()
- f.close()
- f = open("%s/fcp_lun" % (fcpsysfs), "r")
- fcplunsysfs = f.readline().strip()
- f.close()
-
- if fcphbasysfs == self.devnum \
- and fcpwwpnsysfs == self.wwpn \
- and fcplunsysfs == self.fcplun:
- logged_write_line_to_file(scsidel, "1")
- udev.settle()
- return
-
- log.warning("no scsi device found to delete for zfcp %s %s %s",
- self.devnum, self.wwpn, self.fcplun)
-
def offline_device(self):
"""Remove the zFCP device from the system."""
--
2.36.1
From 47997255cf12505d743d6e01a40a51b23ed64a6d Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sat, 6 Nov 2021 21:27:52 +0100
Subject: [PATCH 4/8] Allow to delete more than one SCSI device
NPIV zFCP devices can attach more than one SCSI device, so allow to
delete them all. For non-NPIV devices it means possible slowdown, since
all SCSI devices would now be checked.
---
blivet/zfcp.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index af8f841d..3b3f623b 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -136,6 +136,7 @@ class ZFCPDeviceBase(ABC):
# A list of existing SCSI devices in format Host:Bus:Target:Lun
scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
+ scsi_device_found = False
for scsidev in scsi_devices:
fcpsysfs = os.path.join(scsidevsysfs, scsidev)
@@ -147,12 +148,13 @@ class ZFCPDeviceBase(ABC):
fcplunsysfs = f.readline().strip()
if self._is_scsi_associated_with_fcp(fcphbasysfs, fcpwwpnsysfs, fcplunsysfs):
+ scsi_device_found = True
scsidel = os.path.join(scsidevsysfs, scsidev, "delete")
logged_write_line_to_file(scsidel, "1")
udev.settle()
- return
- log.warning("No scsi device found to delete for zfcp %s", self)
+ if not scsi_device_found:
+ log.warning("No scsi device found to delete for zfcp %s", self)
class ZFCPDevice(ZFCPDeviceBase):
--
2.36.1
From feace41093c97dc88aa20b07a5ff6049df4bd01d Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 03:01:02 +0100
Subject: [PATCH 5/8] Add a function for reading the value of a kernel module
parameter
---
blivet/util.py | 33 +++++++++++++++++++++++++++++++++
tests/util_test.py | 11 +++++++++++
2 files changed, 44 insertions(+)
diff --git a/blivet/util.py b/blivet/util.py
index af60210b..cbef65e0 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -1131,3 +1131,36 @@ def detect_virt():
return False
else:
return vm[0] in ('qemu', 'kvm', 'xen')
+
+
+def natural_sort_key(device):
+ """ Sorting key for devices which makes sure partitions are sorted in natural
+ way, e.g. 'sda1, sda2, ..., sda10' and not like 'sda1, sda10, sda2, ...'
+ """
+ if device.type == "partition" and device.parted_partition and device.disk:
+ part_num = getattr(device.parted_partition, "number", -1)
+ return [device.disk.name, part_num]
+ else:
+ return [device.name, 0]
+
+
+def get_kernel_module_parameter(module, parameter):
+ """ Return the value of a given kernel module parameter
+
+ :param str module: a kernel module
+ :param str parameter: a module parameter
+ :returns: the value of the given kernel module parameter or None
+ :rtype: str
+ """
+
+ value = None
+
+ parameter_path = os.path.join("/sys/module", module, "parameters", parameter)
+ try:
+ with open(parameter_path) as f:
+ value = f.read().strip()
+ except IOError as e:
+ log.warning("Couldn't get the value of the parameter '%s' from the kernel module '%s': %s",
+ parameter, module, str(e))
+
+ return value
diff --git a/tests/util_test.py b/tests/util_test.py
index 853b6166..ed2549ad 100644
--- a/tests/util_test.py
+++ b/tests/util_test.py
@@ -180,3 +180,14 @@ class GetSysfsAttrTestCase(unittest.TestCase):
# the unicode replacement character (U+FFFD) should be used instead
model = util.get_sysfs_attr(sysfs, "model")
self.assertEqual(model, "test model\ufffd")
+
+
+class GetKernelModuleParameterTestCase(unittest.TestCase):
+
+ def test_nonexisting_kernel_module(self):
+ self.assertIsNone(util.get_kernel_module_parameter("unknown_module", "unknown_parameter"))
+
+ def test_get_kernel_module_parameter_value(self):
+ with mock.patch('blivet.util.open', mock.mock_open(read_data='value\n')):
+ value = util.get_kernel_module_parameter("module", "parameter")
+ self.assertEqual(value, "value")
--
2.36.1
From cea53c0f95793d8041391dd8e1edc58aa0f7868c Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 03:01:46 +0100
Subject: [PATCH 6/8] LUN and WWPN should not be used for NPIV zFCP devices
Log a warning if activating a zFCP device in NPIV mode and WWPN or
LUN have been provided. They are superfluous for NPIV devices.
---
blivet/zfcp.py | 58 +++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 57 insertions(+), 1 deletion(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 3b3f623b..726e9364 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -22,6 +22,7 @@
import os
import re
from abc import ABC
+import glob
from . import udev
from . import util
from .i18n import _
@@ -47,6 +48,55 @@ zfcpsysfs = "/sys/bus/ccw/drivers/zfcp"
scsidevsysfs = "/sys/bus/scsi/devices"
zfcpconf = "/etc/zfcp.conf"
+
+def _is_lun_scan_allowed():
+ """Return True if automatic LUN scanning is enabled by the kernel."""
+
+ allow_lun_scan = util.get_kernel_module_parameter("zfcp", "allow_lun_scan")
+ return allow_lun_scan == "Y"
+
+
+def _is_port_in_npiv_mode(device_id):
+ """Return True if the device ID is configured in NPIV mode. See
+ https://www.ibm.com/docs/en/linux-on-systems?topic=devices-use-npiv
+ """
+
+ port_in_npiv_mode = False
+ port_type_path = "/sys/bus/ccw/devices/{}/host*/fc_host/host*/port_type".format(device_id)
+ port_type_paths = glob.glob(port_type_path)
+ try:
+ for filename in port_type_paths:
+ with open(filename) as f:
+ port_type = f.read()
+ if re.search(r"(^|\s)NPIV(\s|$)", port_type):
+ port_in_npiv_mode = True
+ except OSError as e:
+ log.warning("Couldn't read the port_type attribute of the %s device: %s", device_id, str(e))
+ port_in_npiv_mode = False
+
+ return port_in_npiv_mode
+
+
+def is_npiv_enabled(device_id):
+ """Return True if the given zFCP device ID is configured and usable in
+ NPIV (N_Port ID Virtualization) mode.
+
+ :returns: True or False
+ """
+
+ # LUN scanning disabled by the kernel module prevents using the device in NPIV mode
+ if not _is_lun_scan_allowed():
+ log.warning("Automatic LUN scanning is disabled by the zfcp kernel module.")
+ return False
+
+ # The port itself has to be configured in NPIV mode
+ if not _is_port_in_npiv_mode(device_id):
+ log.warning("The zFCP device %s is not configured in NPIV mode.", device_id)
+ return False
+
+ return True
+
+
class ZFCPDeviceBase(ABC):
"""An abstract base class for zFCP storage devices."""
@@ -203,6 +253,13 @@ class ZFCPDevice(ZFCPDeviceBase):
unitdir = "%s/%s" % (portdir, self.fcplun)
failed = "%s/failed" % (unitdir)
+ # Activating an NPIV enabled device using devnum, WWPN and LUN should still be possible
+ # as this method was used as a workaround until the support for NPIV enabled devices has
+ # been implemented. Just log a warning message and continue.
+ if is_npiv_enabled(self.devnum):
+ log.warning("zFCP device %s in NPIV mode brought online. All LUNs will be activated "
+ "automatically although WWPN and LUN have been provided.", self.devnum)
+
# create the sysfs directory for the WWPN/port
if not os.path.exists(portdir):
if os.path.exists(portadd):
@@ -327,7 +384,6 @@ class ZFCPDevice(ZFCPDeviceBase):
return True
else:
# newer zfcp sysfs interface with auto port scan
- import glob
luns = glob.glob("%s/0x????????????????/0x????????????????"
% (devdir,))
if len(luns) != 0:
--
2.36.1
From ff01832941a62fc3113983a51a22369566b3f900 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sat, 6 Nov 2021 21:27:52 +0100
Subject: [PATCH 7/8] Add new class for NPIV-enabled devices
---
blivet/zfcp.py | 53 +++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 50 insertions(+), 3 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 726e9364..e6c0e48a 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -397,6 +397,44 @@ class ZFCPDevice(ZFCPDeviceBase):
return True
+class ZFCPNPIVDevice(ZFCPDeviceBase):
+ """Class for zFCP devices configured in NPIV mode. Only a zFCP device number is
+ needed for such devices.
+ """
+
+ def online_device(self):
+ """Initialize the device and make its storage block device(s) ready to use.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be initialized
+ """
+
+ super().online_device()
+
+ if not is_npiv_enabled(self.devnum):
+ raise ValueError(_("zFCP device %s cannot be used in NPIV mode.") % self)
+
+ return True
+
+ def offline_device(self):
+ """Remove the zFCP device from the system.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be brought offline
+ """
+
+ try:
+ self.offline_scsi_device()
+ except OSError as e:
+ raise ValueError(_("Could not correctly delete SCSI device of "
+ "zFCP %(zfcpdev)s (%(e)s).")
+ % {'zfcpdev': self, 'e': e})
+
+ self._set_zfcp_device_offline()
+
+ return True
+
+
class zFCP:
""" ZFCP utility class.
@@ -439,7 +477,12 @@ class zFCP:
fields = line.split()
- if len(fields) == 3:
+ # NPIV enabled device
+ if len(fields) == 1:
+ devnum = fields[0]
+ wwpn = None
+ fcplun = None
+ elif len(fields) == 3:
devnum = fields[0]
wwpn = fields[1]
fcplun = fields[2]
@@ -458,8 +501,12 @@ class zFCP:
except ValueError as e:
log.warning("%s", str(e))
- def add_fcp(self, devnum, wwpn, fcplun):
- d = ZFCPDevice(devnum, wwpn, fcplun)
+ def add_fcp(self, devnum, wwpn=None, fcplun=None):
+ if wwpn and fcplun:
+ d = ZFCPDevice(devnum, wwpn, fcplun)
+ else:
+ d = ZFCPNPIVDevice(devnum)
+
if d.online_device():
self.fcpdevs.add(d)
--
2.36.1
From ee5b0cdc2393775925fbd9d32caed16eee33fcb0 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sat, 20 Nov 2021 23:12:43 +0100
Subject: [PATCH 8/8] Generate correct dracut boot arguments for NPIV devices
NPIV enabled devices need only the device ID. WWPNs/LUNs are discovered
automatically by the kernel module.
---
blivet/devices/disk.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 67a01ba6..36278507 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -577,7 +577,15 @@ class ZFCPDiskDevice(DiskDevice):
'lun': self.fcp_lun}
def dracut_setup_args(self):
- return set(["rd.zfcp=%s,%s,%s" % (self.hba_id, self.wwpn, self.fcp_lun,)])
+ from ..zfcp import is_npiv_enabled
+
+ # zFCP devices in NPIV mode need only the device ID
+ if is_npiv_enabled(self.hba_id):
+ dracut_args = set(["rd.zfcp=%s" % self.hba_id])
+ else:
+ dracut_args = set(["rd.zfcp=%s,%s,%s" % (self.hba_id, self.wwpn, self.fcp_lun,)])
+
+ return dracut_args
class DASDDevice(DiskDevice):
--
2.36.1

1
EMPTY
View File

@ -1 +0,0 @@

3264
python-blivet.spec Normal file

File diff suppressed because it is too large Load Diff

2
sources Normal file
View File

@ -0,0 +1,2 @@
SHA512 (blivet-3.4.0-tests.tar.gz) = 98351de3a3121d777b644537fedd24596843e609b9e8d9fa6f61729d6bf726bc343c2bb7712ba41138f82738235654756122a290645293df6c2a22944e0a08f7
SHA512 (blivet-3.4.0.tar.gz) = c6797765b82313c55157169489be3e52d03f7656978c6c75bb87c6f1715ceb2447f9fe79cbc3d1d2d00c56329fabca524e1ab7ab074877f6edc8daf62f524038