import python-blivet-3.4.0-16.el9

This commit is contained in:
CentOS Sources 2022-11-15 01:42:47 -05:00 committed by Stepan Oksanichenko
parent a5b1cf812b
commit 958d38db77
12 changed files with 1918 additions and 2674 deletions

View File

@ -0,0 +1,29 @@
From dc6350f87a1dacdebdbb9cf0be43699bb5f7eadd Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 16 Aug 2021 09:50:34 +0200
Subject: [PATCH] Fix getting PV info in LVMPhysicalVolume from the cache
"self.device" is string for formats so accessing "self.device.path"
results in an AttributeError.
Resolves: rhbz#2079221
---
blivet/formats/lvmpv.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index 3fef667e..483b53a4 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -197,7 +197,7 @@ class LVMPhysicalVolume(DeviceFormat):
if self.exists:
# we don't have any actual value, but the PV exists and is
# active, we should try to determine it
- pv_info = pvs_info.cache.get(self.device.path)
+ pv_info = pvs_info.cache.get(self.device)
if pv_info is None:
log.error("Failed to get free space information for the PV '%s'", self.device)
self._free = Size(0)
--
2.34.3

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,41 @@
From 72ace5d66b567baefde10ff9c4197054830067f1 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 28 Apr 2022 14:13:04 +0200
Subject: [PATCH] Do not crash when changing disklabel on disks with active
devices
The _find_active_devices_on_action_disks function originally
prevented from making any changes on disks with active devices
(active LVs, mounted partitions etc.) This was changed in
b72e957d2b23444824316331ae21d1c594371e9c and the check currently
prevents only reformatting the disklabel on such disks which
should be already impossible on disks with an existing partition.
This change for the 3.4 stable branch keeps the current behaviour
where the active devices are teared down when running in installer
mode to avoid potential issues with the installer.
Resolves: rhbz#2078803
---
blivet/actionlist.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/blivet/actionlist.py b/blivet/actionlist.py
index f3977401..9c06228b 100644
--- a/blivet/actionlist.py
+++ b/blivet/actionlist.py
@@ -211,9 +211,8 @@ class ActionList(object):
except StorageError as e:
log.info("teardown of %s failed: %s", device.name, e)
else:
- raise RuntimeError("partitions in use on disks with changes "
- "pending: %s" %
- ",".join(problematic))
+ log.debug("ignoring devices in use on disks with changes: %s",
+ ",".join(problematic))
log.info("resetting parted disks...")
for device in devices:
--
2.34.3

View File

@ -0,0 +1,65 @@
From 070b33c1a80e5740abd7878118a23eaaca1e3460 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 13 Apr 2022 15:43:45 +0200
Subject: [PATCH] ActionDestroyDevice should not obsolete ActionRemoveMember
If we want to remove a PV from a VG and then remove the PV device,
the ActionDestroyDevice must not obsolete the ActionRemoveMember
action. Eventhough we are going to remove the device, we still
need to call "vgreduce" first.
Resolves: rhbz#2076956
---
blivet/deviceaction.py | 10 +++++-----
tests/action_test.py | 7 +++++++
2 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index 0458e4be..78e113bf 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -463,8 +463,8 @@ class ActionDestroyDevice(DeviceAction):
- obsoletes all actions w/ lower id that act on the same device,
including self, if device does not exist
- - obsoletes all but ActionDestroyFormat actions w/ lower id on the
- same device if device exists
+ - obsoletes all but ActionDestroyFormat and ActionRemoveMember actions
+ w/ lower id on the same device if device exists
- obsoletes all actions that add a member to this action's
(container) device
@@ -474,9 +474,9 @@ class ActionDestroyDevice(DeviceAction):
if action.device.id == self.device.id:
if self.id >= action.id and not self.device.exists:
rc = True
- elif self.id > action.id and \
- self.device.exists and \
- not (action.is_destroy and action.is_format):
+ elif self.id > action.id and self.device.exists and \
+ not ((action.is_destroy and action.is_format) or
+ action.is_remove):
rc = True
elif action.is_add and (action.device == self.device):
rc = True
diff --git a/tests/action_test.py b/tests/action_test.py
index 8509ce35..626b9b49 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -1197,6 +1197,13 @@ class DeviceActionTestCase(StorageTestCase):
self.assertEqual(create_sdc2.requires(remove_sdc1), False)
self.assertEqual(remove_sdc1.requires(create_sdc2), False)
+ # destroy sdc1, the ActionRemoveMember should not be obsoleted
+ sdc1.exists = True
+ destroy_sdc1 = ActionDestroyDevice(sdc1)
+ destroy_sdc1.apply()
+ self.assertFalse(destroy_sdc1.obsoletes(remove_sdc1))
+ self.assertTrue(destroy_sdc1.requires(remove_sdc1))
+
def test_action_sorting(self, *args, **kwargs):
""" Verify correct functioning of action sorting. """
--
2.34.3

View File

@ -0,0 +1,63 @@
From ea5054b0cab19f3fe09d7010f8721e7f18ae399e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 2 May 2022 15:30:16 +0200
Subject: [PATCH] Correctly set vg_name after adding/removing a PV from a VG
Without setting the LVMPhysicalVolume.vg_name argument to None
after removing the PV from its VG, the PV is still considered
active and cannot be removed.
Resolves: rhbz#2081278
---
blivet/devices/lvm.py | 3 +++
tests/devices_test/lvm_test.py | 13 +++++++++++++
2 files changed, 16 insertions(+)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 9c230f1b..a971da8e 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -385,6 +385,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
if not parent.format.exists:
parent.format.free = self._get_pv_usable_space(parent)
+ parent.format.vg_name = self.name
+
def _remove_parent(self, parent):
# XXX It would be nice to raise an exception if removing this member
# would not leave enough space, but the devicefactory relies on it
@@ -395,6 +397,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
super(LVMVolumeGroupDevice, self)._remove_parent(parent)
parent.format.free = None
parent.format.container_uuid = None
+ parent.format.vg_name = None
# We can't rely on lvm to tell us about our size, free space, &c
# since we could have modifications queued, unless the VG and all of
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 5efa369e..59c027da 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -454,6 +454,19 @@ class LVMDeviceTest(unittest.TestCase):
pool.autoset_md_size(enforced=True)
self.assertEqual(pool.chunk_size, Size("128 KiB"))
+ def test_add_remove_pv(self):
+ pv1 = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1024 MiB"))
+ pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1024 MiB"))
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv1])
+
+ vg._add_parent(pv2)
+ self.assertEqual(pv2.format.vg_name, vg.name)
+
+ vg._remove_parent(pv2)
+ self.assertEqual(pv2.format.vg_name, None)
+
class TypeSpecificCallsTest(unittest.TestCase):
def test_type_specific_calls(self):
--
2.34.3

View File

@ -0,0 +1,588 @@
From 08f0e12c74e4c2ba25629fe92108283dd5ae3ff3 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:08:43 +0100
Subject: [PATCH 1/4] Add support for creating LVM cache pools
Resolves: rhbz#2055200
---
blivet/blivet.py | 9 +-
blivet/devicelibs/lvm.py | 9 ++
blivet/devices/lvm.py | 160 +++++++++++++++++++++++++++++++--
tests/devices_test/lvm_test.py | 26 ++++++
4 files changed, 196 insertions(+), 8 deletions(-)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index c6908eb0..d29fadd0 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -576,6 +576,8 @@ class Blivet(object):
:type vdo_pool: bool
:keyword vdo_lv: whether to create a vdo lv
:type vdo_lv: bool
+ :keyword cache_pool: whether to create a cache pool
+ :type cache_pool: bool
:returns: the new device
:rtype: :class:`~.devices.LVMLogicalVolumeDevice`
@@ -594,6 +596,7 @@ class Blivet(object):
thin_pool = kwargs.pop("thin_pool", False)
vdo_pool = kwargs.pop("vdo_pool", False)
vdo_lv = kwargs.pop("vdo_lv", False)
+ cache_pool = kwargs.pop("cache_pool", False)
parent = kwargs.get("parents", [None])[0]
if (thin_volume or vdo_lv) and parent:
# kwargs["parents"] will contain the pool device, so...
@@ -609,6 +612,8 @@ class Blivet(object):
kwargs["seg_type"] = "vdo-pool"
if vdo_lv:
kwargs["seg_type"] = "vdo"
+ if cache_pool:
+ kwargs["seg_type"] = "cache-pool"
mountpoint = kwargs.pop("mountpoint", None)
if 'fmt_type' in kwargs:
@@ -640,7 +645,7 @@ class Blivet(object):
swap = False
prefix = ""
- if thin_pool or vdo_pool:
+ if thin_pool or vdo_pool or cache_pool:
prefix = "pool"
name = self.suggest_device_name(parent=vg,
@@ -651,7 +656,7 @@ class Blivet(object):
if "%s-%s" % (vg.name, name) in self.names:
raise ValueError("name '%s' is already in use" % name)
- if thin_pool or thin_volume or vdo_pool or vdo_lv:
+ if thin_pool or thin_volume or vdo_pool or vdo_lv or cache_pool:
cache_req = kwargs.pop("cache_request", None)
if cache_req:
raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index bbde6303..23935009 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -54,6 +54,11 @@ LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B") # 15.88 TiB
+# cache constants
+LVM_CACHE_MIN_METADATA_SIZE = Size("8 MiB")
+LVM_CACHE_MAX_METADATA_SIZE = Size("16 GiB")
+LVM_CACHE_DEFAULT_MODE = blockdev.LVMCacheMode.WRITETHROUGH
+
raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
@@ -248,3 +253,7 @@ def recommend_thpool_chunk_size(thpool_size):
# for every ~15.88 TiB of thinpool data size
return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
LVM_THINP_MAX_CHUNK_SIZE)
+
+
+def is_valid_cache_md_size(md_size):
+ return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index a971da8e..7cb482ab 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -43,6 +43,7 @@ from .. import util
from ..storage_log import log_method_call
from .. import udev
from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
+from ..static_data.lvm_info import lvs_info
from ..tasks import availability
import logging
@@ -646,7 +647,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
percent=None, cache_request=None, pvs=None, from_lvs=None):
if not exists:
- if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
+ if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
if seg_type and seg_type in lvm.raid_seg_types and not pvs:
raise ValueError("List of PVs has to be given for every non-linear LV")
@@ -690,8 +691,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
# we reserve space for it
self._metadata_size = self.vg.pe_size
self._size -= self._metadata_size
- elif self.seg_type == "thin-pool":
- # LVMThinPoolMixin sets self._metadata_size on its own
+ elif self.seg_type in ("thin-pool", "cache_pool"):
+ # LVMThinPoolMixin and LVMCachePoolMixin set self._metadata_size on their own
if not self.exists and not from_lvs and not grow:
# a thin pool we are not going to grow -> lets calculate metadata
# size now if not given explicitly
@@ -1619,7 +1620,6 @@ class LVMThinPoolMixin(object):
""" A list of this pool's LVs """
return self._lvs[:] # we don't want folks changing our list
- @util.requires_property("is_thin_pool")
def autoset_md_size(self, enforced=False):
""" If self._metadata_size not set already, it calculates the recommended value
and sets it while subtracting the size from self.size.
@@ -2032,9 +2032,142 @@ class LVMVDOLogicalVolumeMixin(object):
self.pool._add_log_vol(self)
+class LVMCachePoolMixin(object):
+ def __init__(self, metadata_size, cache_mode=None):
+ self._metadata_size = metadata_size or Size(0)
+ self._cache_mode = cache_mode
+
+ def _init_check(self):
+ if not self.is_cache_pool:
+ return
+
+ if self._metadata_size and not lvm.is_valid_cache_md_size(self._metadata_size):
+ raise ValueError("invalid metadatasize value")
+
+ if not self.exists and not self._pv_specs:
+ raise ValueError("at least one fast PV must be specified to create a cache pool")
+
+ def _check_from_lvs(self):
+ if self._from_lvs:
+ if len(self._from_lvs) != 2:
+ raise errors.DeviceError("two LVs required to create a cache pool")
+
+ def _convert_from_lvs(self):
+ data_lv, metadata_lv = self._from_lvs
+
+ data_lv.parent_lv = self # also adds the LV to self._internal_lvs
+ data_lv.int_lv_type = LVMInternalLVtype.data
+ metadata_lv.parent_lv = self
+ metadata_lv.int_lv_type = LVMInternalLVtype.meta
+
+ self.size = data_lv.size
+
+ @property
+ def is_cache_pool(self):
+ return self.seg_type == "cache-pool"
+
+ @property
+ def profile(self):
+ return self._profile
+
+ @property
+ def type(self):
+ return "lvmcachepool"
+
+ @property
+ def resizable(self):
+ return False
+
+ def read_current_size(self):
+ log_method_call(self, exists=self.exists, path=self.path,
+ sysfs_path=self.sysfs_path)
+ if self.size != Size(0):
+ return self.size
+
+ if self.exists:
+ # cache pools are not active and don't have th device mapper mapping
+ # so we can't get this from sysfs
+ lv_info = lvs_info.cache.get(self.name)
+ if lv_info is None:
+ log.error("Failed to get size for existing cache pool '%s'", self.name)
+ return Size(0)
+ else:
+ return Size(lv_info.size)
+
+ return Size(0)
+
+ def autoset_md_size(self, enforced=False):
+ """ If self._metadata_size not set already, it calculates the recommended value
+ and sets it while subtracting the size from self.size.
+
+ """
+
+ log.debug("Auto-setting cache pool metadata size")
+
+ if self._size <= Size(0):
+ log.debug("Cache pool size not bigger than 0, just setting metadata size to 0")
+ self._metadata_size = 0
+ return
+
+ old_md_size = self._metadata_size
+ if self._metadata_size == 0 or enforced:
+ self._metadata_size = blockdev.lvm.cache_get_default_md_size(self._size)
+ log.debug("Using recommended metadata size: %s", self._metadata_size)
+
+ self._metadata_size = self.vg.align(self._metadata_size, roundup=True)
+ log.debug("Rounded metadata size to extents: %s MiB", self._metadata_size.convert_to("MiB"))
+
+ if self._metadata_size == old_md_size:
+ log.debug("Rounded metadata size unchanged")
+ else:
+ new_size = self.size - (self._metadata_size - old_md_size)
+ log.debug("Adjusting size from %s MiB to %s MiB",
+ self.size.convert_to("MiB"), new_size.convert_to("MiB"))
+ self.size = new_size
+
+ def _pre_create(self):
+ # make sure all the LVs this LV should be created from exist (if any)
+ if self._from_lvs and any(not lv.exists for lv in self._from_lvs):
+ raise errors.DeviceError("Component LVs need to be created first")
+
+ def _create(self):
+ """ Create the device. """
+ log_method_call(self, self.name, status=self.status)
+ if self._cache_mode:
+ try:
+ cache_mode = blockdev.lvm.cache_get_mode_from_str(self._cache_mode)
+ except blockdev.LVMError as e:
+ raise errors.DeviceError from e
+ else:
+ cache_mode = lvm.LVM_CACHE_DEFAULT_MODE
+
+ if self._from_lvs:
+ extra = dict()
+ if self.mode:
+ # we need the string here, it will be passed directly to he lvm command
+ extra["cachemode"] = self._cache_mode
+ data_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.data)
+ meta_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.meta)
+ blockdev.lvm.cache_pool_convert(self.vg.name, data_lv.lvname, meta_lv.lvname, self.lvname, **extra)
+ else:
+ blockdev.lvm.cache_create_pool(self.vg.name, self.lvname, self.size,
+ self.metadata_size,
+ cache_mode,
+ 0,
+ [spec.pv.path for spec in self._pv_specs])
+
+ def dracut_setup_args(self):
+ return set()
+
+ @property
+ def direct(self):
+ """ Is this device directly accessible? """
+ return False
+
+
class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
- LVMVDOLogicalVolumeMixin):
+ LVMVDOLogicalVolumeMixin, LVMCachePoolMixin):
""" An LVM Logical Volume """
# generally resizable, see :property:`resizable` for details
@@ -2046,7 +2179,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None):
+ write_policy=None, cache_mode=None):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2116,6 +2249,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
:keyword write_policy: write policy for the volume or None for default
:type write_policy: str
+ For cache pools only:
+
+ :keyword metadata_size: the size of the metadata LV
+ :type metadata_size: :class:`~.size.Size`
+ :keyword cache_mode: mode for the cache or None for default (writethrough)
+ :type cache_mode: str
+
"""
if isinstance(parents, (list, ParentList)):
@@ -2133,6 +2273,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMSnapshotMixin.__init__(self, origin, vorigin)
LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
LVMThinLogicalVolumeMixin.__init__(self)
+ LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs)
@@ -2144,6 +2285,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMSnapshotMixin._init_check(self)
LVMThinPoolMixin._init_check(self)
LVMThinLogicalVolumeMixin._init_check(self)
+ LVMCachePoolMixin._init_check(self)
if self._from_lvs:
self._check_from_lvs()
@@ -2169,6 +2311,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
ret.append(LVMVDOPoolMixin)
if self.is_vdo_lv:
ret.append(LVMVDOLogicalVolumeMixin)
+ if self.is_cache_pool:
+ ret.append(LVMCachePoolMixin)
return ret
def _try_specific_call(self, name, *args, **kwargs):
@@ -2552,6 +2696,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
return True
+ @type_specific
+ def autoset_md_size(self, enforced=False):
+ pass
+
def attach_cache(self, cache_pool_lv):
if self.is_thin_lv or self.is_snapshot_lv or self.is_internal_lv:
raise errors.DeviceError("Cannot attach a cache pool to the '%s' LV" % self.name)
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 59c027da..0105bcae 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -868,3 +868,29 @@ class BlivetLVMVDODependenciesTest(unittest.TestCase):
vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
self.assertFalse(vdo_supported)
+
+
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
+class BlivetNewLVMCachePoolDeviceTest(unittest.TestCase):
+
+ def test_new_cache_pool(self):
+ b = blivet.Blivet()
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("10 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+
+ for dev in (pv, vg):
+ b.devicetree._add_device(dev)
+
+ # check that all the above devices are in the expected places
+ self.assertEqual(set(b.devices), {pv, vg})
+ self.assertEqual(set(b.vgs), {vg})
+
+ self.assertEqual(vg.size, Size("10236 MiB"))
+
+ cachepool = b.new_lv(name="cachepool", cache_pool=True,
+ parents=[vg], pvs=[pv])
+
+ b.create_device(cachepool)
+
+ self.assertEqual(cachepool.type, "lvmcachepool")
--
2.34.3
From bfb0e71a92f46baae098370207640962c97d8e77 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:09:04 +0100
Subject: [PATCH 2/4] examples: Add LVM cache pool example
Related: rhbz#2055200
---
examples/lvm_cachepool.py | 59 +++++++++++++++++++++++++++++++++++++++
1 file changed, 59 insertions(+)
create mode 100644 examples/lvm_cachepool.py
diff --git a/examples/lvm_cachepool.py b/examples/lvm_cachepool.py
new file mode 100644
index 00000000..ab2e8a72
--- /dev/null
+++ b/examples/lvm_cachepool.py
@@ -0,0 +1,59 @@
+import os
+
+import blivet
+from blivet.size import Size
+from blivet.util import set_up_logging, create_sparse_tempfile
+
+
+set_up_logging()
+b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
+
+# create a disk image file on which to create new devices
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
+b.disk_images["disk1"] = disk1_file
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
+b.disk_images["disk2"] = disk2_file
+
+b.reset()
+
+try:
+ disk1 = b.devicetree.get_device_by_name("disk1")
+ disk2 = b.devicetree.get_device_by_name("disk2")
+
+ b.initialize_disk(disk1)
+ b.initialize_disk(disk2)
+
+ pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
+ b.create_device(pv)
+ pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
+ b.create_device(pv2)
+
+ # allocate the partitions (decide where and on which disks they'll reside)
+ blivet.partitioning.do_partitioning(b)
+
+ vg = b.new_vg(parents=[pv, pv2])
+ b.create_device(vg)
+
+ # new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
+ lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
+ b.create_device(lv)
+
+ # new cache pool
+ cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
+ b.create_device(cpool)
+
+ # write the new partitions to disk and format them as specified
+ b.do_it()
+ print(b.devicetree)
+
+ # attach the newly created cache pool to the "slow" LV
+ lv.attach_cache(cpool)
+
+ b.reset()
+ print(b.devicetree)
+
+ input("Check the state and hit ENTER to trigger cleanup")
+finally:
+ b.devicetree.teardown_disk_images()
+ os.unlink(disk1_file)
+ os.unlink(disk2_file)
--
2.34.3
From 1fece0e7f15f7b0d457d3db876d23c3272df09bd Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:13:33 +0100
Subject: [PATCH 3/4] lvm: Use blivet static data when checking if the VG is
active
Instead of calling 'lvs' again in LVMVolumeGroupDevice.status
Related: rhbz#2055200
---
blivet/devices/lvm.py | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 7cb482ab..12d3d073 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -220,13 +220,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
# special handling for incomplete VGs
if not self.complete:
- try:
- lvs_info = blockdev.lvm.lvs(vg_name=self.name)
- except blockdev.LVMError:
- lvs_info = []
-
- for lv_info in lvs_info:
- if lv_info.attr and lv_info.attr[4] == 'a':
+ for lv_info in lvs_info.cache.values():
+ if lv_info.vg_name == self.name and lv_info.attr and lv_info.attr[4] == 'a':
return True
return False
--
2.34.3
From 8d957f04c2d5f56386b978d1bf890450f38ad108 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 30 May 2022 17:02:43 +0200
Subject: [PATCH 4/4] Add option to attach a newly created cache pool to
existing LV
Because we do not have action for attaching the cache pool, we
cannot schedule both adding the fast PV to the VG and attaching
the cache pool to existing LV. This hack allows to schedule the
attach to happen after the cache pool is created.
Related: rhbz#2055200
---
blivet/devices/lvm.py | 38 +++++++++++++++++++++++++++++++++++---
1 file changed, 35 insertions(+), 3 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 12d3d073..feb92f2e 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -2028,9 +2028,10 @@ class LVMVDOLogicalVolumeMixin(object):
class LVMCachePoolMixin(object):
- def __init__(self, metadata_size, cache_mode=None):
+ def __init__(self, metadata_size, cache_mode=None, attach_to=None):
self._metadata_size = metadata_size or Size(0)
self._cache_mode = cache_mode
+ self._attach_to = attach_to
def _init_check(self):
if not self.is_cache_pool:
@@ -2042,6 +2043,9 @@ class LVMCachePoolMixin(object):
if not self.exists and not self._pv_specs:
raise ValueError("at least one fast PV must be specified to create a cache pool")
+ if self._attach_to and not self._attach_to.exists:
+ raise ValueError("cache pool can be attached only to an existing LV")
+
def _check_from_lvs(self):
if self._from_lvs:
if len(self._from_lvs) != 2:
@@ -2150,6 +2154,31 @@ class LVMCachePoolMixin(object):
cache_mode,
0,
[spec.pv.path for spec in self._pv_specs])
+ if self._attach_to:
+ self._attach_to.attach_cache(self)
+
+ def _post_create(self):
+ if self._attach_to:
+ # post_create tries to activate the LV and after attaching it no longer exists
+ return
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self)._post_create()
+
+ def add_hook(self, new=True):
+ if self._attach_to:
+ self._attach_to._cache = LVMCache(self._attach_to, size=self.size, exists=False,
+ pvs=self._pv_specs, mode=self._cache_mode)
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self).add_hook(new=new)
+
+ def remove_hook(self, modparent=True):
+ if self._attach_to:
+ self._attach_to._cache = None
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
def dracut_setup_args(self):
return set()
@@ -2174,7 +2203,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None, cache_mode=None):
+ write_policy=None, cache_mode=None, attach_to=None):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2250,6 +2279,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
:type metadata_size: :class:`~.size.Size`
:keyword cache_mode: mode for the cache or None for default (writethrough)
:type cache_mode: str
+ :keyword attach_to: for non-existing cache pools a logical volume the pool should
+ be attached to when created
+ :type attach_to: :class:`LVMLogicalVolumeDevice`
"""
@@ -2268,7 +2300,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMSnapshotMixin.__init__(self, origin, vorigin)
LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
LVMThinLogicalVolumeMixin.__init__(self)
- LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
+ LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs)
--
2.34.3

View File

@ -0,0 +1,29 @@
From 4103df5ddaae49d51640d01502e8456409a92be9 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 5 May 2022 16:35:37 +0200
Subject: [PATCH] Use LVM PV format current_size in
LVMVolumeGroupDevice._remove
The member format size is 0 when target size is not set.
Related: rhbz#2081278
---
blivet/devices/lvm.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index feb92f2e..facb1b76 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -289,7 +289,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
# do not run pvmove on empty PVs
member.format.update_size_info()
- if member.format.free < member.format.size:
+ if member.format.free < member.format.current_size:
blockdev.lvm.pvmove(member.path)
blockdev.lvm.vgreduce(self.name, member.path)
--
2.34.3

View File

@ -0,0 +1,36 @@
From a709c4db1bcf2e7ff69158a54ed3a1ea92ba4f97 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 14 Oct 2021 08:48:05 +0200
Subject: [PATCH] tests: Mark "fake" disks in test_get_related_disks as
non-existing
We are using "real" disk names ("sda", "sdb"...) in this test so
we need to avoid reading their real sizes which we do for existing
devices using os.stat. The test can fail if we have a disk with
the same name and small (or zero) size.
Resolves: rhbz#2062690
---
tests/devicetree_test.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index 3be4d572..c1b97c52 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -406,9 +406,9 @@ class DeviceTreeTestCase(unittest.TestCase):
def test_get_related_disks(self):
tree = DeviceTree()
- sda = DiskDevice("sda", size=Size('300g'))
- sdb = DiskDevice("sdb", size=Size('300g'))
- sdc = DiskDevice("sdc", size=Size('300G'))
+ sda = DiskDevice("sda", size=Size('300g'), exists=False)
+ sdb = DiskDevice("sdb", size=Size('300g'), exists=False)
+ sdc = DiskDevice("sdc", size=Size('300G'), exists=False)
tree._add_device(sda)
tree._add_device(sdb)
--
2.34.3

View File

@ -0,0 +1,789 @@
From 35643156c511c8120f2d562c1664a3c7a5a48cfb Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Thu, 28 Oct 2021 21:17:25 +0200
Subject: [PATCH 1/8] Fix removing zFCP SCSI devices
Values parsed from /proc/scsi/scsi were not correctly used to assemble
paths to SCSI devices.
For example:
/sys/bus/scsi/devices/0:0:00:00/
was incorrectly accessed instead of:
/sys/bus/scsi/devices/0:0:0:0/
Switch to a more reliable way of listing the available SCSI devices.
Related: rhbz#1937030
---
blivet/zfcp.py | 17 ++++-------------
1 file changed, 4 insertions(+), 13 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 93af5419..3747290e 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -20,6 +20,7 @@
#
import os
+import re
from . import udev
from . import util
from .i18n import _
@@ -167,20 +168,10 @@ class ZFCPDevice:
return True
def offline_scsi_device(self):
- f = open("/proc/scsi/scsi", "r")
- lines = f.readlines()
- f.close()
- # alternatively iterate over /sys/bus/scsi/devices/*:0:*:*/
+ # A list of existing SCSI devices in format Host:Bus:Target:Lun
+ scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
- for line in lines:
- if not line.startswith("Host"):
- continue
- scsihost = line.split()
- host = scsihost[1]
- channel = "0"
- devid = scsihost[5]
- lun = scsihost[7]
- scsidev = "%s:%s:%s:%s" % (host[4:], channel, devid, lun)
+ for scsidev in scsi_devices:
fcpsysfs = "%s/%s" % (scsidevsysfs, scsidev)
scsidel = "%s/%s/delete" % (scsidevsysfs, scsidev)
--
2.34.3
From 771cbf623030b1fa51ec193a2b5e2db229420a7a Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 02:47:45 +0100
Subject: [PATCH 2/8] Refactor the ZFCPDevice class
Add a new base class for zFCP devices.
Move code to the new base class.
Improve documentation.
Related: rhbz#1937030
---
blivet/zfcp.py | 131 +++++++++++++++++++++++++++++++++++--------------
1 file changed, 95 insertions(+), 36 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 3747290e..4a50f65f 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -21,6 +21,7 @@
import os
import re
+from abc import ABC
from . import udev
from . import util
from .i18n import _
@@ -46,29 +47,19 @@ zfcpsysfs = "/sys/bus/ccw/drivers/zfcp"
scsidevsysfs = "/sys/bus/scsi/devices"
zfcpconf = "/etc/zfcp.conf"
+class ZFCPDeviceBase(ABC):
+ """An abstract base class for zFCP storage devices."""
-class ZFCPDevice:
- """
- .. warning::
- Since this is a singleton class, calling deepcopy() on the instance
- just returns ``self`` with no copy being created.
- """
-
- def __init__(self, devnum, wwpn, fcplun):
+ def __init__(self, devnum):
self.devnum = blockdev.s390.sanitize_dev_input(devnum)
- self.wwpn = blockdev.s390.zfcp_sanitize_wwpn_input(wwpn)
- self.fcplun = blockdev.s390.zfcp_sanitize_lun_input(fcplun)
-
if not self.devnum:
raise ValueError(_("You have not specified a device number or the number is invalid"))
- if not self.wwpn:
- raise ValueError(_("You have not specified a worldwide port name or the name is invalid."))
- if not self.fcplun:
- raise ValueError(_("You have not specified a FCP LUN or the number is invalid."))
+
+ self._device_online_path = os.path.join(zfcpsysfs, self.devnum, "online")
# Force str and unicode types in case any of the properties are unicode
def _to_string(self):
- return "%s %s %s" % (self.devnum, self.wwpn, self.fcplun)
+ return str(self.devnum)
def __str__(self):
return stringize(self._to_string())
@@ -76,33 +67,97 @@ class ZFCPDevice:
def __unicode__(self):
return unicodeize(self._to_string())
- def online_device(self):
- online = "%s/%s/online" % (zfcpsysfs, self.devnum)
- portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
- portdir = "%s/%s/%s" % (zfcpsysfs, self.devnum, self.wwpn)
- unitadd = "%s/unit_add" % (portdir)
- unitdir = "%s/%s" % (portdir, self.fcplun)
- failed = "%s/failed" % (unitdir)
+ def _free_device(self):
+ """Remove the device from the I/O ignore list to make it visible to the system.
+
+ :raises: ValueError if the device cannot be removed from the I/O ignore list
+ """
- if not os.path.exists(online):
+ if not os.path.exists(self._device_online_path):
log.info("Freeing zFCP device %s", self.devnum)
util.run_program(["zfcp_cio_free", "-d", self.devnum])
- if not os.path.exists(online):
+ if not os.path.exists(self._device_online_path):
raise ValueError(_("zFCP device %s not found, not even in device ignore list.") %
(self.devnum,))
+ def _set_zfcp_device_online(self):
+ """Set the zFCP device online.
+
+ :raises: ValueError if the device cannot be set online
+ """
+
try:
- f = open(online, "r")
- devonline = f.readline().strip()
- f.close()
+ with open(self._device_online_path) as f:
+ devonline = f.readline().strip()
if devonline != "1":
- logged_write_line_to_file(online, "1")
+ logged_write_line_to_file(self._device_online_path, "1")
except OSError as e:
raise ValueError(_("Could not set zFCP device %(devnum)s "
"online (%(e)s).")
% {'devnum': self.devnum, 'e': e})
+ def _set_zfcp_device_offline(self):
+ """Set the zFCP device offline.
+
+ :raises: ValueError if the device cannot be set offline
+ """
+
+ try:
+ logged_write_line_to_file(self._device_online_path, "0")
+ except OSError as e:
+ raise ValueError(_("Could not set zFCP device %(devnum)s "
+ "offline (%(e)s).")
+ % {'devnum': self.devnum, 'e': e})
+
+ def online_device(self):
+ """Initialize the device and make its storage block device(s) ready to use.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be initialized
+ """
+
+ self._free_device()
+ self._set_zfcp_device_online()
+ return True
+
+
+class ZFCPDevice(ZFCPDeviceBase):
+ """A class for zFCP devices that are not configured in NPIV mode. Such
+ devices have to be specified by a device number, WWPN and LUN.
+ """
+
+ def __init__(self, devnum, wwpn, fcplun):
+ super().__init__(devnum)
+
+ self.wwpn = blockdev.s390.zfcp_sanitize_wwpn_input(wwpn)
+ if not self.wwpn:
+ raise ValueError(_("You have not specified a worldwide port name or the name is invalid."))
+
+ self.fcplun = blockdev.s390.zfcp_sanitize_lun_input(fcplun)
+ if not self.fcplun:
+ raise ValueError(_("You have not specified a FCP LUN or the number is invalid."))
+
+ # Force str and unicode types in case any of the properties are unicode
+ def _to_string(self):
+ return "{} {} {}".format(self.devnum, self.wwpn, self.fcplun)
+
+ def online_device(self):
+ """Initialize the device and make its storage block device(s) ready to use.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be initialized
+ """
+
+ super().online_device()
+
+ portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
+ portdir = "%s/%s/%s" % (zfcpsysfs, self.devnum, self.wwpn)
+ unitadd = "%s/unit_add" % (portdir)
+ unitdir = "%s/%s" % (portdir, self.fcplun)
+ failed = "%s/failed" % (unitdir)
+
+ # create the sysfs directory for the WWPN/port
if not os.path.exists(portdir):
if os.path.exists(portadd):
# older zfcp sysfs interface
@@ -127,6 +182,7 @@ class ZFCPDevice:
"there.", {'wwpn': self.wwpn,
'devnum': self.devnum})
+ # create the sysfs directory for the LUN/unit
if not os.path.exists(unitdir):
try:
logged_write_line_to_file(unitadd, self.fcplun)
@@ -144,6 +200,7 @@ class ZFCPDevice:
'wwpn': self.wwpn,
'devnum': self.devnum})
+ # check the state of the LUN
fail = "0"
try:
f = open(failed, "r")
@@ -168,6 +225,8 @@ class ZFCPDevice:
return True
def offline_scsi_device(self):
+ """Find SCSI devices associated to the zFCP device and remove them from the system."""
+
# A list of existing SCSI devices in format Host:Bus:Target:Lun
scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
@@ -196,7 +255,8 @@ class ZFCPDevice:
self.devnum, self.wwpn, self.fcplun)
def offline_device(self):
- offline = "%s/%s/online" % (zfcpsysfs, self.devnum)
+ """Remove the zFCP device from the system."""
+
portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
portremove = "%s/%s/port_remove" % (zfcpsysfs, self.devnum)
unitremove = "%s/%s/%s/unit_remove" % (zfcpsysfs, self.devnum, self.wwpn)
@@ -212,6 +272,7 @@ class ZFCPDevice:
% {'devnum': self.devnum, 'wwpn': self.wwpn,
'fcplun': self.fcplun, 'e': e})
+ # remove the LUN
try:
logged_write_line_to_file(unitremove, self.fcplun)
except OSError as e:
@@ -221,6 +282,7 @@ class ZFCPDevice:
% {'fcplun': self.fcplun, 'wwpn': self.wwpn,
'devnum': self.devnum, 'e': e})
+ # remove the WWPN only if there are no other LUNs attached
if os.path.exists(portadd):
# only try to remove ports with older zfcp sysfs interface
for lun in os.listdir(portdir):
@@ -238,6 +300,7 @@ class ZFCPDevice:
% {'wwpn': self.wwpn,
'devnum': self.devnum, 'e': e})
+ # check if there are other WWPNs existing for the zFCP device number
if os.path.exists(portadd):
# older zfcp sysfs interface
for port in os.listdir(devdir):
@@ -256,12 +319,8 @@ class ZFCPDevice:
self.devnum, luns[0])
return True
- try:
- logged_write_line_to_file(offline, "0")
- except OSError as e:
- raise ValueError(_("Could not set zFCP device %(devnum)s "
- "offline (%(e)s).")
- % {'devnum': self.devnum, 'e': e})
+ # no other WWPNs/LUNs exists for this device number, it's safe to bring it offline
+ self._set_zfcp_device_offline()
return True
--
2.34.3
From 2dc44c00f170d64458a7c89abc91cda61af8387f Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 02:35:05 +0100
Subject: [PATCH 3/8] Move offline_scsi_device() to the base class
Related: rhbz#1937030
---
blivet/zfcp.py | 74 ++++++++++++++++++++++++++++++--------------------
1 file changed, 44 insertions(+), 30 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 4a50f65f..af8f841d 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -110,6 +110,15 @@ class ZFCPDeviceBase(ABC):
"offline (%(e)s).")
% {'devnum': self.devnum, 'e': e})
+ def _is_scsi_associated_with_fcp(self, fcphbasysfs, _fcpwwpnsysfs, _fcplunsysfs):
+ """Decide if the SCSI device with the provided SCSI attributes
+ corresponds to the zFCP device.
+
+ :returns: True or False
+ """
+
+ return fcphbasysfs == self.devnum
+
def online_device(self):
"""Initialize the device and make its storage block device(s) ready to use.
@@ -121,6 +130,30 @@ class ZFCPDeviceBase(ABC):
self._set_zfcp_device_online()
return True
+ def offline_scsi_device(self):
+ """Find SCSI devices associated to the zFCP device and remove them from the system."""
+
+ # A list of existing SCSI devices in format Host:Bus:Target:Lun
+ scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
+
+ for scsidev in scsi_devices:
+ fcpsysfs = os.path.join(scsidevsysfs, scsidev)
+
+ with open(os.path.join(fcpsysfs, "hba_id")) as f:
+ fcphbasysfs = f.readline().strip()
+ with open(os.path.join(fcpsysfs, "wwpn")) as f:
+ fcpwwpnsysfs = f.readline().strip()
+ with open(os.path.join(fcpsysfs, "fcp_lun")) as f:
+ fcplunsysfs = f.readline().strip()
+
+ if self._is_scsi_associated_with_fcp(fcphbasysfs, fcpwwpnsysfs, fcplunsysfs):
+ scsidel = os.path.join(scsidevsysfs, scsidev, "delete")
+ logged_write_line_to_file(scsidel, "1")
+ udev.settle()
+ return
+
+ log.warning("No scsi device found to delete for zfcp %s", self)
+
class ZFCPDevice(ZFCPDeviceBase):
"""A class for zFCP devices that are not configured in NPIV mode. Such
@@ -142,6 +175,17 @@ class ZFCPDevice(ZFCPDeviceBase):
def _to_string(self):
return "{} {} {}".format(self.devnum, self.wwpn, self.fcplun)
+ def _is_scsi_associated_with_fcp(self, fcphbasysfs, fcpwwpnsysfs, fcplunsysfs):
+ """Decide if the SCSI device with the provided SCSI attributes
+ corresponds to the zFCP device.
+
+ :returns: True or False
+ """
+
+ return (fcphbasysfs == self.devnum and
+ fcpwwpnsysfs == self.wwpn and
+ fcplunsysfs == self.fcplun)
+
def online_device(self):
"""Initialize the device and make its storage block device(s) ready to use.
@@ -224,36 +268,6 @@ class ZFCPDevice(ZFCPDeviceBase):
return True
- def offline_scsi_device(self):
- """Find SCSI devices associated to the zFCP device and remove them from the system."""
-
- # A list of existing SCSI devices in format Host:Bus:Target:Lun
- scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
-
- for scsidev in scsi_devices:
- fcpsysfs = "%s/%s" % (scsidevsysfs, scsidev)
- scsidel = "%s/%s/delete" % (scsidevsysfs, scsidev)
-
- f = open("%s/hba_id" % (fcpsysfs), "r")
- fcphbasysfs = f.readline().strip()
- f.close()
- f = open("%s/wwpn" % (fcpsysfs), "r")
- fcpwwpnsysfs = f.readline().strip()
- f.close()
- f = open("%s/fcp_lun" % (fcpsysfs), "r")
- fcplunsysfs = f.readline().strip()
- f.close()
-
- if fcphbasysfs == self.devnum \
- and fcpwwpnsysfs == self.wwpn \
- and fcplunsysfs == self.fcplun:
- logged_write_line_to_file(scsidel, "1")
- udev.settle()
- return
-
- log.warning("no scsi device found to delete for zfcp %s %s %s",
- self.devnum, self.wwpn, self.fcplun)
-
def offline_device(self):
"""Remove the zFCP device from the system."""
--
2.34.3
From f194c6e591c3e409f227fd10d3e9923af91ea893 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sat, 6 Nov 2021 21:27:52 +0100
Subject: [PATCH 4/8] Allow to delete more than one SCSI device
NPIV zFCP devices can attach more than one SCSI device, so allow to
delete them all. For non-NPIV devices it means possible slowdown, since
all SCSI devices would now be checked.
Related: rhbz#1937030
---
blivet/zfcp.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index af8f841d..3b3f623b 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -136,6 +136,7 @@ class ZFCPDeviceBase(ABC):
# A list of existing SCSI devices in format Host:Bus:Target:Lun
scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
+ scsi_device_found = False
for scsidev in scsi_devices:
fcpsysfs = os.path.join(scsidevsysfs, scsidev)
@@ -147,12 +148,13 @@ class ZFCPDeviceBase(ABC):
fcplunsysfs = f.readline().strip()
if self._is_scsi_associated_with_fcp(fcphbasysfs, fcpwwpnsysfs, fcplunsysfs):
+ scsi_device_found = True
scsidel = os.path.join(scsidevsysfs, scsidev, "delete")
logged_write_line_to_file(scsidel, "1")
udev.settle()
- return
- log.warning("No scsi device found to delete for zfcp %s", self)
+ if not scsi_device_found:
+ log.warning("No scsi device found to delete for zfcp %s", self)
class ZFCPDevice(ZFCPDeviceBase):
--
2.34.3
From f6615be663434079b3f2a86be5db88b816d8a9e1 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 03:01:02 +0100
Subject: [PATCH 5/8] Add a function for reading the value of a kernel module
parameter
Related: rhbz#1937030
---
blivet/util.py | 33 +++++++++++++++++++++++++++++++++
tests/util_test.py | 11 +++++++++++
2 files changed, 44 insertions(+)
diff --git a/blivet/util.py b/blivet/util.py
index af60210b..cbef65e0 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -1131,3 +1131,36 @@ def detect_virt():
return False
else:
return vm[0] in ('qemu', 'kvm', 'xen')
+
+
+def natural_sort_key(device):
+ """ Sorting key for devices which makes sure partitions are sorted in natural
+ way, e.g. 'sda1, sda2, ..., sda10' and not like 'sda1, sda10, sda2, ...'
+ """
+ if device.type == "partition" and device.parted_partition and device.disk:
+ part_num = getattr(device.parted_partition, "number", -1)
+ return [device.disk.name, part_num]
+ else:
+ return [device.name, 0]
+
+
+def get_kernel_module_parameter(module, parameter):
+ """ Return the value of a given kernel module parameter
+
+ :param str module: a kernel module
+ :param str parameter: a module parameter
+ :returns: the value of the given kernel module parameter or None
+ :rtype: str
+ """
+
+ value = None
+
+ parameter_path = os.path.join("/sys/module", module, "parameters", parameter)
+ try:
+ with open(parameter_path) as f:
+ value = f.read().strip()
+ except IOError as e:
+ log.warning("Couldn't get the value of the parameter '%s' from the kernel module '%s': %s",
+ parameter, module, str(e))
+
+ return value
diff --git a/tests/util_test.py b/tests/util_test.py
index b4f82c1b..805447c7 100644
--- a/tests/util_test.py
+++ b/tests/util_test.py
@@ -182,3 +182,14 @@ class GetSysfsAttrTestCase(unittest.TestCase):
# the unicode replacement character (U+FFFD) should be used instead
model = util.get_sysfs_attr(sysfs, "model")
self.assertEqual(model, "test model\ufffd")
+
+
+class GetKernelModuleParameterTestCase(unittest.TestCase):
+
+ def test_nonexisting_kernel_module(self):
+ self.assertIsNone(util.get_kernel_module_parameter("unknown_module", "unknown_parameter"))
+
+ def test_get_kernel_module_parameter_value(self):
+ with mock.patch('blivet.util.open', mock.mock_open(read_data='value\n')):
+ value = util.get_kernel_module_parameter("module", "parameter")
+ self.assertEqual(value, "value")
--
2.34.3
From 17c99a2444ef750bdbf5b24665c5fd3c52e687d9 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 03:01:46 +0100
Subject: [PATCH 6/8] LUN and WWPN should not be used for NPIV zFCP devices
Log a warning if activating a zFCP device in NPIV mode and WWPN or
LUN have been provided. They are superfluous for NPIV devices.
Related: rhbz#1937030
---
blivet/zfcp.py | 58 +++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 57 insertions(+), 1 deletion(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 3b3f623b..726e9364 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -22,6 +22,7 @@
import os
import re
from abc import ABC
+import glob
from . import udev
from . import util
from .i18n import _
@@ -47,6 +48,55 @@ zfcpsysfs = "/sys/bus/ccw/drivers/zfcp"
scsidevsysfs = "/sys/bus/scsi/devices"
zfcpconf = "/etc/zfcp.conf"
+
+def _is_lun_scan_allowed():
+ """Return True if automatic LUN scanning is enabled by the kernel."""
+
+ allow_lun_scan = util.get_kernel_module_parameter("zfcp", "allow_lun_scan")
+ return allow_lun_scan == "Y"
+
+
+def _is_port_in_npiv_mode(device_id):
+ """Return True if the device ID is configured in NPIV mode. See
+ https://www.ibm.com/docs/en/linux-on-systems?topic=devices-use-npiv
+ """
+
+ port_in_npiv_mode = False
+ port_type_path = "/sys/bus/ccw/devices/{}/host*/fc_host/host*/port_type".format(device_id)
+ port_type_paths = glob.glob(port_type_path)
+ try:
+ for filename in port_type_paths:
+ with open(filename) as f:
+ port_type = f.read()
+ if re.search(r"(^|\s)NPIV(\s|$)", port_type):
+ port_in_npiv_mode = True
+ except OSError as e:
+ log.warning("Couldn't read the port_type attribute of the %s device: %s", device_id, str(e))
+ port_in_npiv_mode = False
+
+ return port_in_npiv_mode
+
+
+def is_npiv_enabled(device_id):
+ """Return True if the given zFCP device ID is configured and usable in
+ NPIV (N_Port ID Virtualization) mode.
+
+ :returns: True or False
+ """
+
+ # LUN scanning disabled by the kernel module prevents using the device in NPIV mode
+ if not _is_lun_scan_allowed():
+ log.warning("Automatic LUN scanning is disabled by the zfcp kernel module.")
+ return False
+
+ # The port itself has to be configured in NPIV mode
+ if not _is_port_in_npiv_mode(device_id):
+ log.warning("The zFCP device %s is not configured in NPIV mode.", device_id)
+ return False
+
+ return True
+
+
class ZFCPDeviceBase(ABC):
"""An abstract base class for zFCP storage devices."""
@@ -203,6 +253,13 @@ class ZFCPDevice(ZFCPDeviceBase):
unitdir = "%s/%s" % (portdir, self.fcplun)
failed = "%s/failed" % (unitdir)
+ # Activating an NPIV enabled device using devnum, WWPN and LUN should still be possible
+ # as this method was used as a workaround until the support for NPIV enabled devices has
+ # been implemented. Just log a warning message and continue.
+ if is_npiv_enabled(self.devnum):
+ log.warning("zFCP device %s in NPIV mode brought online. All LUNs will be activated "
+ "automatically although WWPN and LUN have been provided.", self.devnum)
+
# create the sysfs directory for the WWPN/port
if not os.path.exists(portdir):
if os.path.exists(portadd):
@@ -327,7 +384,6 @@ class ZFCPDevice(ZFCPDeviceBase):
return True
else:
# newer zfcp sysfs interface with auto port scan
- import glob
luns = glob.glob("%s/0x????????????????/0x????????????????"
% (devdir,))
if len(luns) != 0:
--
2.34.3
From a8f97bd0d74e3da9c18bd03d968f5d2f0c3ee46f Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sat, 6 Nov 2021 21:27:52 +0100
Subject: [PATCH 7/8] Add new class for NPIV-enabled devices
Related: rhbz#1937030
---
blivet/zfcp.py | 53 +++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 50 insertions(+), 3 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 726e9364..e6c0e48a 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -397,6 +397,44 @@ class ZFCPDevice(ZFCPDeviceBase):
return True
+class ZFCPNPIVDevice(ZFCPDeviceBase):
+ """Class for zFCP devices configured in NPIV mode. Only a zFCP device number is
+ needed for such devices.
+ """
+
+ def online_device(self):
+ """Initialize the device and make its storage block device(s) ready to use.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be initialized
+ """
+
+ super().online_device()
+
+ if not is_npiv_enabled(self.devnum):
+ raise ValueError(_("zFCP device %s cannot be used in NPIV mode.") % self)
+
+ return True
+
+ def offline_device(self):
+ """Remove the zFCP device from the system.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be brought offline
+ """
+
+ try:
+ self.offline_scsi_device()
+ except OSError as e:
+ raise ValueError(_("Could not correctly delete SCSI device of "
+ "zFCP %(zfcpdev)s (%(e)s).")
+ % {'zfcpdev': self, 'e': e})
+
+ self._set_zfcp_device_offline()
+
+ return True
+
+
class zFCP:
""" ZFCP utility class.
@@ -439,7 +477,12 @@ class zFCP:
fields = line.split()
- if len(fields) == 3:
+ # NPIV enabled device
+ if len(fields) == 1:
+ devnum = fields[0]
+ wwpn = None
+ fcplun = None
+ elif len(fields) == 3:
devnum = fields[0]
wwpn = fields[1]
fcplun = fields[2]
@@ -458,8 +501,12 @@ class zFCP:
except ValueError as e:
log.warning("%s", str(e))
- def add_fcp(self, devnum, wwpn, fcplun):
- d = ZFCPDevice(devnum, wwpn, fcplun)
+ def add_fcp(self, devnum, wwpn=None, fcplun=None):
+ if wwpn and fcplun:
+ d = ZFCPDevice(devnum, wwpn, fcplun)
+ else:
+ d = ZFCPNPIVDevice(devnum)
+
if d.online_device():
self.fcpdevs.add(d)
--
2.34.3
From 963822ff989c938e74d582216f4f7ded595eccc1 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sat, 20 Nov 2021 23:12:43 +0100
Subject: [PATCH 8/8] Generate correct dracut boot arguments for NPIV devices
NPIV enabled devices need only the device ID. WWPNs/LUNs are discovered
automatically by the kernel module.
Resolves: rhbz#1937030
---
blivet/devices/disk.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 67a01ba6..36278507 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -577,7 +577,15 @@ class ZFCPDiskDevice(DiskDevice):
'lun': self.fcp_lun}
def dracut_setup_args(self):
- return set(["rd.zfcp=%s,%s,%s" % (self.hba_id, self.wwpn, self.fcp_lun,)])
+ from ..zfcp import is_npiv_enabled
+
+ # zFCP devices in NPIV mode need only the device ID
+ if is_npiv_enabled(self.hba_id):
+ dracut_args = set(["rd.zfcp=%s" % self.hba_id])
+ else:
+ dracut_args = set(["rd.zfcp=%s,%s,%s" % (self.hba_id, self.wwpn, self.fcp_lun,)])
+
+ return dracut_args
class DASDDevice(DiskDevice):
--
2.34.3

View File

@ -0,0 +1,129 @@
From 5d54b2ede698d5084aa6c780295fcc9aafbfa357 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 9 May 2022 13:38:50 +0200
Subject: [PATCH] Add a very simple NVMe module
This covers only the basic functionallity needed by Anaconda right
now: populating the config files in /etc/nvme and copying them to
the installed system. The API for the NVMe singleton is based on
the similar modules for iSCSI and FCoE.
Resolves: rhbz#2073008
---
blivet/errors.py | 14 +++++++++
blivet/nvme.py | 81 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 95 insertions(+)
create mode 100644 blivet/nvme.py
diff --git a/blivet/errors.py b/blivet/errors.py
index fd51283f..b16cf2c5 100644
--- a/blivet/errors.py
+++ b/blivet/errors.py
@@ -307,3 +307,17 @@ class EventHandlingError(StorageError):
class ThreadError(StorageError):
""" An error occurred in a non-main thread. """
+
+# other
+
+
+class FCoEError(StorageError, OSError):
+ pass
+
+
+class ISCSIError(StorageError, OSError):
+ pass
+
+
+class NVMeError(StorageError, OSError):
+ pass
diff --git a/blivet/nvme.py b/blivet/nvme.py
new file mode 100644
index 00000000..17bead15
--- /dev/null
+++ b/blivet/nvme.py
@@ -0,0 +1,81 @@
+#
+# nvme.py - NVMe class
+#
+# Copyright (C) 2022 Red Hat, Inc. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import shutil
+
+from . import errors
+from . import util
+
+import logging
+log = logging.getLogger("blivet")
+
+HOSTNQN_FILE = "/etc/nvme/hostnqn"
+HOSTID_FILE = "/etc/nvme/hostid"
+
+
+class NVMe(object):
+ """ NVMe utility class.
+
+ .. warning::
+ Since this is a singleton class, calling deepcopy() on the instance
+ just returns ``self`` with no copy being created.
+ """
+
+ def __init__(self):
+ self.started = False
+
+ # So that users can write nvme() to get the singleton instance
+ def __call__(self):
+ return self
+
+ def __deepcopy__(self, memo_dict): # pylint: disable=unused-argument
+ return self
+
+ def startup(self):
+ if self.started:
+ return
+
+ rc, nqn = util.run_program_and_capture_output(["nvme", "gen-hostnqn"])
+ if rc != 0:
+ raise errors.NVMeError("Failed to generate hostnqn")
+
+ with open(HOSTNQN_FILE, "w") as f:
+ f.write(nqn)
+
+ rc, hid = util.run_program_and_capture_output(["dmidecode", "-s", "system-uuid"])
+ if rc != 0:
+ raise errors.NVMeError("Failed to generate host ID")
+
+ with open(HOSTID_FILE, "w") as f:
+ f.write(hid)
+
+ self.started = True
+
+ def write(self, root): # pylint: disable=unused-argument
+ # copy the hostnqn and hostid files
+ if not os.path.isdir(root + "/etc/nvme"):
+ os.makedirs(root + "/etc/nvme", 0o755)
+ shutil.copyfile(HOSTNQN_FILE, root + HOSTNQN_FILE)
+ shutil.copyfile(HOSTID_FILE, root + HOSTID_FILE)
+
+
+# Create nvme singleton
+nvme = NVMe()
+""" An instance of :class:`NVMe` """
--
2.34.3

View File

@ -0,0 +1,107 @@
From 3a64795bdb94f525b55375bd89e7e5c8bc3a8921 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 17 Aug 2022 14:24:21 +0200
Subject: [PATCH 1/3] Use MD populator instead of DM to handle DDF RAID format
---
blivet/formats/dmraid.py | 2 +-
blivet/formats/mdraid.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/formats/dmraid.py b/blivet/formats/dmraid.py
index 2ba9dcfe5..ce15905dc 100644
--- a/blivet/formats/dmraid.py
+++ b/blivet/formats/dmraid.py
@@ -43,7 +43,7 @@ class DMRaidMember(DeviceFormat):
#
# One problem that presents is the possibility of someone passing
# a dmraid member to the MDRaidArrayDevice constructor.
- _udev_types = ["adaptec_raid_member", "ddf_raid_member",
+ _udev_types = ["adaptec_raid_member",
"hpt37x_raid_member", "hpt45x_raid_member",
"isw_raid_member",
"jmicron_raid_member", "lsi_mega_raid_member",
diff --git a/blivet/formats/mdraid.py b/blivet/formats/mdraid.py
index 41ddef810..4aa3f3b07 100644
--- a/blivet/formats/mdraid.py
+++ b/blivet/formats/mdraid.py
@@ -41,7 +41,7 @@ class MDRaidMember(DeviceFormat):
""" An mdraid member disk. """
_type = "mdmember"
_name = N_("software RAID")
- _udev_types = ["linux_raid_member"]
+ _udev_types = ["linux_raid_member", "ddf_raid_member"]
parted_flag = PARTITION_RAID
_formattable = True # can be formatted
_supported = True # is supported
From 3ea946fa7ae18dbc413c17f1cd5a6eb23aaf1ea8 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 17 Aug 2022 14:24:58 +0200
Subject: [PATCH 2/3] Do not read DDF RAID UUID from udev
The UUID we get from udev isn't the array UUID, we need to get
that using libblockdev.
---
blivet/populator/helpers/mdraid.py | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/blivet/populator/helpers/mdraid.py b/blivet/populator/helpers/mdraid.py
index 3479e3f78..a7602d209 100644
--- a/blivet/populator/helpers/mdraid.py
+++ b/blivet/populator/helpers/mdraid.py
@@ -98,17 +98,21 @@ class MDFormatPopulator(FormatPopulator):
def _get_kwargs(self):
kwargs = super(MDFormatPopulator, self)._get_kwargs()
- try:
- # ID_FS_UUID contains the array UUID
- kwargs["md_uuid"] = udev.device_get_uuid(self.data)
- except KeyError:
- log.warning("mdraid member %s has no md uuid", udev.device_get_name(self.data))
+ kwargs["biosraid"] = udev.device_is_biosraid_member(self.data)
+ if not kwargs["biosraid"]:
+ try:
+ # ID_FS_UUID contains the array UUID
+ kwargs["md_uuid"] = udev.device_get_uuid(self.data)
+ except KeyError:
+ log.warning("mdraid member %s has no md uuid", udev.device_get_name(self.data))
+ else:
+ # for BIOS RAIDs we can't get the UUID from udev, we'll get it from mdadm in `run` below
+ kwargs["md_uuid"] = None
# reset the uuid to the member-specific value
# this will be None for members of v0 metadata arrays
kwargs["uuid"] = udev.device_get_md_device_uuid(self.data)
- kwargs["biosraid"] = udev.device_is_biosraid_member(self.data)
return kwargs
def run(self):
From 4e766bb6f2bb487003ed4fa9b8415760c436af81 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 17 Mar 2022 15:48:25 +0100
Subject: [PATCH 3/3] Do not crash when a disk populator doesn't return kwargs
This happens when trying to use Blivet on a system with a BIOS
RAID without dmraid installed. Because we don't fully support
BIOS RAIDs using MD the MDBiosRaidDevicePopulator helper fails
to get kwargs for the BIOS RAID "disk" and populate fails.
---
blivet/populator/helpers/disk.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 2e5026f7e..9db7b810d 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -68,6 +68,8 @@ def run(self):
log_method_call(self, name=name)
kwargs = self._get_kwargs()
+ if not kwargs:
+ return
device = self._device_class(name, **kwargs)
self._devicetree._add_device(device)
return device

View File

@ -23,7 +23,7 @@ Version: 3.4.0
#%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 13%{?prerelease}%{?dist}
Release: 16%{?prerelease}%{?dist}
Epoch: 1
License: LGPLv2+
%global realname blivet
@ -46,7 +46,16 @@ Patch12: 0014-LVM-devices-file-support.patch
Patch13: 0015-iscsi-Replace-all-log_exception_info-calls-with-log.patch
Patch14: 0016-Fix-log-message-for-the-LVM-devices-filter.patch
Patch15: 0017-Exclude-unusable-disks-from-PartitionFactory.patch
Patch16: 0018-PO-update.patch
Patch16: 0018-Fix-getting-PV-info-in-LVMPhysicalVolume-from-the-ca.patch
Patch17: 0019-Do-not-crash-when-changing-disklabel-on-disks-with-a.patch
Patch18: 0020-ActionDestroyDevice-should-not-obsolete-ActionRemove.patch
Patch19: 0021-Correctly-set-vg_name-after-adding-removing-a-PV-fro.patch
Patch20: 0022-Add-support-for-creating-LVM-cache-pools.patch
Patch21: 0023-Use-LVM-PV-format-current_size-in-LVMVolumeGroupDevi.patch
Patch22: 0024-tests-Mark-fake-disks-in-test_get_related_disks-as-n.patch
Patch23: 0025-Add-support-for-NPIV-enabled-zFCP-devices.patch
Patch24: 0026-Add-a-very-simple-NVMe-module.patch
Patch25: 0027-DDF-RAID-support-using-mdadm.patch
# Versions of required components (done so we make sure the buildrequires
# match the requires versions of things).
@ -209,9 +218,37 @@ configuration.
%endif
%changelog
* Mon Mar 07 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-13
- RHEL 9.0 GA Localization
Resolves: rhbz#2017378
* Thu Aug 18 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-16
- DDF RAID support using mdadm
Resolves: rhbz#2109030
* Mon Jun 20 2022 Jan Pokorny <japokorn@redhat.com> - 3.4.0-15
- Add a very simple NVMe module
Resolves: rhbz#2073008
- Add support for NPIV-enabled zFCP devices
Resolves: rhbz#1937030
- Fix removing zFCP SCSI devices
Related: rhbz#1937030
- tests: Mark "fake" disks in test_get_related_disks as non-existing
Resolves: rhbz#2062690
* Thu Jun 02 2022 Jan Pokorny <japokorn@redhat.com> - 3.4.0-14
- Release version increase to fix RHEL upgrade path
Related: rhbz#2081278
* Thu Jun 02 2022 Jan Pokorny <japokorn@redhat.com> - 3.4.0-13
- Fix getting PV info in LVMPhysicalVolume from the cache
Resolves: rhbz#2079221
- Do not crash when changing disklabel on disks with active devices
Resolves: rhbz#2078803
- ActionDestroyDevice should not obsolete ActionRemoveMember
Resolves: rhbz#2076956
- Correctly set vg_name after adding/removing a PV from a VG
Resolves: rhbz#2081278
- Add support for creating LVM cache pools
Resolves: rhbz#2055200
- Use LVM PV format current_size in LVMVolumeGroupDevice._remove
Related: rhbz#2081278
* Tue Feb 01 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-12
- Fix log message for the LVM devices filter