import python-blivet-3.6.0-5.el9

This commit is contained in:
CentOS Sources 2023-03-28 09:32:45 +00:00 committed by Stepan Oksanichenko
parent 4d78582eb0
commit 13571aa6f7
36 changed files with 1103 additions and 4089 deletions

4
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/blivet-3.4.0-tests.tar.gz
SOURCES/blivet-3.4.0.tar.gz
SOURCES/blivet-3.6.0-tests.tar.gz
SOURCES/blivet-3.6.0.tar.gz

View File

@ -1,2 +1,2 @@
d0a86df7bbaeda7be9990b7f7b15ec36b325ec7a SOURCES/blivet-3.4.0-tests.tar.gz
aafc429e224dfd204cb1c284bb70de52920f7b20 SOURCES/blivet-3.4.0.tar.gz
8393baa22cb433d1012e3923ad0bc232401116c6 SOURCES/blivet-3.6.0-tests.tar.gz
e9d95c1165703fed3da1f35a9199197bfff68f98 SOURCES/blivet-3.6.0.tar.gz

View File

@ -1,4 +1,4 @@
From 2f90040ff66eacc9715e370cd49ffb72d8d1f36f Mon Sep 17 00:00:00 2001
From 2759aaa9cbee38f80819bc136bb893184429380c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 11 Jul 2018 15:36:24 +0200
Subject: [PATCH] Force command line based libblockdev LVM plugin
@ -8,7 +8,7 @@ Subject: [PATCH] Force command line based libblockdev LVM plugin
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index c5a75bb..cb75917 100644
index dd8d0f54..62cc539a 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -63,11 +63,16 @@ gi.require_version("BlockDev", "2.0")
@ -31,5 +31,5 @@ index c5a75bb..cb75917 100644
# do not check for dependencies during libblockdev initializtion, do runtime
# checks instead
--
1.8.3.1
2.37.3

View File

@ -1,4 +1,4 @@
From 6bf3378d3d2a1b6a4338df0c4dd36a783a641633 Mon Sep 17 00:00:00 2001
From f27bdff18e98548f4c094b8cce23ca2d6270e30d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 16 Jul 2018 14:26:11 +0200
Subject: [PATCH] Remove btrfs from requested libblockdev plugins
@ -8,7 +8,7 @@ Subject: [PATCH] Remove btrfs from requested libblockdev plugins
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index cb75917..09f8b1c 100644
index 62cc539a..bbc7ea3a 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -63,9 +63,9 @@ gi.require_version("BlockDev", "2.0")
@ -24,5 +24,5 @@ index cb75917..09f8b1c 100644
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
# XXX force non-dbus LVM plugin
--
1.8.3.1
2.37.3

View File

@ -1,19 +1,19 @@
From 3a42d9e2afdf04dbbfd2c507f5b2392193fda25b Mon Sep 17 00:00:00 2001
From b9021fde8ccdd14cbe192b6597f7ca350b4bb585 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 26 May 2021 12:15:54 +0200
Subject: [PATCH] Revert "More consistent lvm errors (API break)"
This reverts commit 49ec071c6d0673224a0774d613904387c52c7381.
---
blivet/devices/lvm.py | 72 +++++++++++++++++-----------------
tests/devices_test/lvm_test.py | 14 +++----
blivet/devices/lvm.py | 72 +++++++++++------------
tests/unit_tests/devices_test/lvm_test.py | 14 ++---
2 files changed, 43 insertions(+), 43 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index a55515fc..6d23bfba 100644
index 38e49e18..b8595d63 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -307,7 +307,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
@@ -304,7 +304,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
def _add_log_vol(self, lv):
""" Add an LV to this VG. """
if lv in self._lvs:
@ -22,7 +22,7 @@ index a55515fc..6d23bfba 100644
# verify we have the space, then add it
# do not verify for growing vg (because of ks)
@@ -340,7 +340,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
@@ -337,7 +337,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
def _remove_log_vol(self, lv):
""" Remove an LV from this VG. """
if lv not in self.lvs:
@ -31,7 +31,7 @@ index a55515fc..6d23bfba 100644
self._lvs.remove(lv)
@@ -415,7 +415,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
@@ -430,7 +430,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
@thpool_reserve.setter
def thpool_reserve(self, value):
if value is not None and not isinstance(value, ThPoolReserveSpec):
@ -40,8 +40,8 @@ index a55515fc..6d23bfba 100644
self._thpool_reserve = value
@property
@@ -646,14 +646,14 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
@@ -665,14 +665,14 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
if seg_type and seg_type in lvm.raid_seg_types and not pvs:
- raise errors.DeviceError("List of PVs has to be given for every non-linear LV")
@ -60,7 +60,7 @@ index a55515fc..6d23bfba 100644
# When this device's format is set in the superclass constructor it will
# try to access self.snapshots.
@@ -702,13 +702,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
@@ -721,13 +721,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
self._from_lvs = from_lvs
if self._from_lvs:
if exists:
@ -78,7 +78,7 @@ index a55515fc..6d23bfba 100644
self._cache = None
if cache_request and not self.exists:
@@ -723,13 +723,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
@@ -746,13 +746,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
elif isinstance(pv_spec, StorageDevice):
self._pv_specs.append(LVPVSpec(pv_spec, Size(0)))
else:
@ -94,7 +94,7 @@ index a55515fc..6d23bfba 100644
if self._pv_specs:
self._assign_pv_space()
@@ -1072,7 +1072,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
@@ -1130,7 +1130,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
else:
msg = "the specified internal LV '%s' doesn't belong to this LV ('%s')" % (int_lv.lv_name,
self.name)
@ -103,7 +103,7 @@ index a55515fc..6d23bfba 100644
def populate_ksdata(self, data):
super(LVMLogicalVolumeBase, self).populate_ksdata(data)
@@ -1171,7 +1171,7 @@ class LVMInternalLogicalVolumeMixin(object):
@@ -1229,7 +1229,7 @@ class LVMInternalLogicalVolumeMixin(object):
def _init_check(self):
# an internal LV should have no parents
if self._parent_lv and self._parents:
@ -112,7 +112,7 @@ index a55515fc..6d23bfba 100644
@property
def is_internal_lv(self):
@@ -1231,7 +1231,7 @@ class LVMInternalLogicalVolumeMixin(object):
@@ -1289,7 +1289,7 @@ class LVMInternalLogicalVolumeMixin(object):
@readonly.setter
def readonly(self, value): # pylint: disable=unused-argument
@ -121,7 +121,7 @@ index a55515fc..6d23bfba 100644
@property
def type(self):
@@ -1267,7 +1267,7 @@ class LVMInternalLogicalVolumeMixin(object):
@@ -1325,7 +1325,7 @@ class LVMInternalLogicalVolumeMixin(object):
def _check_parents(self):
# an internal LV should have no parents
if self._parents:
@ -130,7 +130,7 @@ index a55515fc..6d23bfba 100644
def _add_to_parents(self):
# nothing to do here, an internal LV has no parents (in the DeviceTree's
@@ -1277,13 +1277,13 @@ class LVMInternalLogicalVolumeMixin(object):
@@ -1335,13 +1335,13 @@ class LVMInternalLogicalVolumeMixin(object):
# internal LVs follow different rules limitting size
def _set_size(self, newsize):
if not isinstance(newsize, Size):
@ -146,7 +146,7 @@ index a55515fc..6d23bfba 100644
else:
# same rules apply as for any other LV
raise NotTypeSpecific()
@@ -1361,18 +1361,18 @@ class LVMSnapshotMixin(object):
@@ -1419,18 +1419,18 @@ class LVMSnapshotMixin(object):
return
if self.origin and not isinstance(self.origin, LVMLogicalVolumeDevice):
@ -169,7 +169,7 @@ index a55515fc..6d23bfba 100644
@property
def is_snapshot_lv(self):
@@ -1544,7 +1544,7 @@ class LVMThinPoolMixin(object):
@@ -1606,7 +1606,7 @@ class LVMThinPoolMixin(object):
def _check_from_lvs(self):
if self._from_lvs:
if len(self._from_lvs) != 2:
@ -178,7 +178,7 @@ index a55515fc..6d23bfba 100644
def _convert_from_lvs(self):
data_lv, metadata_lv = self._from_lvs
@@ -1590,7 +1590,7 @@ class LVMThinPoolMixin(object):
@@ -1652,7 +1652,7 @@ class LVMThinPoolMixin(object):
def _add_log_vol(self, lv):
""" Add an LV to this pool. """
if lv in self._lvs:
@ -187,7 +187,7 @@ index a55515fc..6d23bfba 100644
# TODO: add some checking to prevent overcommit for preexisting
self.vg._add_log_vol(lv)
@@ -1601,7 +1601,7 @@ class LVMThinPoolMixin(object):
@@ -1663,7 +1663,7 @@ class LVMThinPoolMixin(object):
def _remove_log_vol(self, lv):
""" Remove an LV from this pool. """
if lv not in self._lvs:
@ -196,7 +196,7 @@ index a55515fc..6d23bfba 100644
self._lvs.remove(lv)
self.vg._remove_log_vol(lv)
@@ -1711,14 +1711,14 @@ class LVMThinLogicalVolumeMixin(object):
@@ -1772,14 +1772,14 @@ class LVMThinLogicalVolumeMixin(object):
"""Check that this device has parents as expected"""
if isinstance(self.parents, (list, ParentList)):
if len(self.parents) != 1:
@ -213,7 +213,7 @@ index a55515fc..6d23bfba 100644
@property
def is_thin_lv(self):
@@ -1755,7 +1755,7 @@ class LVMThinLogicalVolumeMixin(object):
@@ -1816,7 +1816,7 @@ class LVMThinLogicalVolumeMixin(object):
def _set_size(self, newsize):
if not isinstance(newsize, Size):
@ -222,7 +222,7 @@ index a55515fc..6d23bfba 100644
newsize = self.vg.align(newsize)
newsize = self.vg.align(util.numeric_type(newsize))
@@ -2229,7 +2229,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@@ -2499,7 +2499,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
container = self.parents
if not isinstance(container, LVMVolumeGroupDevice):
@ -231,7 +231,7 @@ index a55515fc..6d23bfba 100644
@type_specific
def _add_to_parents(self):
@@ -2240,12 +2240,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@@ -2510,12 +2510,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@type_specific
def _check_from_lvs(self):
"""Check the LVs to create this LV from"""
@ -246,7 +246,7 @@ index a55515fc..6d23bfba 100644
@property
def external_dependencies(self):
@@ -2265,7 +2265,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@@ -2535,7 +2535,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@type_specific
def _set_size(self, newsize):
if not isinstance(newsize, Size):
@ -255,7 +255,7 @@ index a55515fc..6d23bfba 100644
newsize = self.vg.align(newsize)
log.debug("trying to set lv %s size to %s", self.name, newsize)
@@ -2274,7 +2274,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@@ -2544,7 +2544,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
# space for it. A similar reasoning applies to shrinking the LV.
if not self.exists and newsize > self.size and newsize > self.vg.free_space + self.vg_space_used:
log.error("failed to set size: %s short", newsize - (self.vg.free_space + self.vg_space_used))
@ -264,7 +264,7 @@ index a55515fc..6d23bfba 100644
LVMLogicalVolumeBase._set_size(self, newsize)
@@ -2622,7 +2622,7 @@ class LVMCache(Cache):
@@ -2910,7 +2910,7 @@ class LVMCache(Cache):
spec.size = spec.pv.format.free
space_to_assign -= spec.pv.format.free
if space_to_assign > 0:
@ -273,11 +273,11 @@ index a55515fc..6d23bfba 100644
@property
def size(self):
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 670c91c9..4156d0bf 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -36,10 +36,10 @@ class LVMDeviceTest(unittest.TestCase):
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
index 47613fdc..995c2da4 100644
--- a/tests/unit_tests/devices_test/lvm_test.py
+++ b/tests/unit_tests/devices_test/lvm_test.py
@@ -32,10 +32,10 @@ class LVMDeviceTest(unittest.TestCase):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg],
fmt=blivet.formats.get_format("xfs"))
@ -290,7 +290,7 @@ index 670c91c9..4156d0bf 100644
LVMLogicalVolumeDevice("snap1", parents=[vg], vorigin=True)
lv.exists = True
@@ -64,7 +64,7 @@ class LVMDeviceTest(unittest.TestCase):
@@ -60,7 +60,7 @@ class LVMDeviceTest(unittest.TestCase):
pool = LVMLogicalVolumeDevice("pool1", parents=[vg], size=Size("500 MiB"), seg_type="thin-pool")
thinlv = LVMLogicalVolumeDevice("thinlv", parents=[pool], size=Size("200 MiB"), seg_type="thin")
@ -299,7 +299,7 @@ index 670c91c9..4156d0bf 100644
LVMLogicalVolumeDevice("snap1", parents=[pool], origin=pv, seg_type="thin")
# now make the constructor succeed so we can test some properties
@@ -258,21 +258,21 @@ class LVMDeviceTest(unittest.TestCase):
@@ -310,21 +310,21 @@ class LVMDeviceTest(unittest.TestCase):
vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
# pvs have to be specified for non-linear LVs
@ -326,5 +326,5 @@ index 670c91c9..4156d0bf 100644
pv_spec2 = LVPVSpec(pv2, Size("250 MiB"))
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
--
2.31.1
2.37.3

View File

@ -1,7 +1,7 @@
From 3a64795bdb94f525b55375bd89e7e5c8bc3a8921 Mon Sep 17 00:00:00 2001
From 4ad6f485a1e569feb5fd23ffcf78e08a7756e084 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 17 Aug 2022 14:24:21 +0200
Subject: [PATCH 1/3] Use MD populator instead of DM to handle DDF RAID format
Subject: [PATCH 1/2] Use MD populator instead of DM to handle DDF RAID format
---
blivet/formats/dmraid.py | 2 +-
@ -9,7 +9,7 @@ Subject: [PATCH 1/3] Use MD populator instead of DM to handle DDF RAID format
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/formats/dmraid.py b/blivet/formats/dmraid.py
index 2ba9dcfe5..ce15905dc 100644
index 2ba9dcfe..ce15905d 100644
--- a/blivet/formats/dmraid.py
+++ b/blivet/formats/dmraid.py
@@ -43,7 +43,7 @@ class DMRaidMember(DeviceFormat):
@ -22,7 +22,7 @@ index 2ba9dcfe5..ce15905dc 100644
"isw_raid_member",
"jmicron_raid_member", "lsi_mega_raid_member",
diff --git a/blivet/formats/mdraid.py b/blivet/formats/mdraid.py
index 41ddef810..4aa3f3b07 100644
index 41ddef81..4aa3f3b0 100644
--- a/blivet/formats/mdraid.py
+++ b/blivet/formats/mdraid.py
@@ -41,7 +41,7 @@ class MDRaidMember(DeviceFormat):
@ -34,11 +34,14 @@ index 41ddef810..4aa3f3b07 100644
parted_flag = PARTITION_RAID
_formattable = True # can be formatted
_supported = True # is supported
--
2.37.3
From 3ea946fa7ae18dbc413c17f1cd5a6eb23aaf1ea8 Mon Sep 17 00:00:00 2001
From abc7e018f43976cdab286d67207d515a74693d16 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 17 Aug 2022 14:24:58 +0200
Subject: [PATCH 2/3] Do not read DDF RAID UUID from udev
Subject: [PATCH 2/2] Do not read DDF RAID UUID from udev
The UUID we get from udev isn't the array UUID, we need to get
that using libblockdev.
@ -47,7 +50,7 @@ that using libblockdev.
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/blivet/populator/helpers/mdraid.py b/blivet/populator/helpers/mdraid.py
index 3479e3f78..a7602d209 100644
index 3479e3f7..a7602d20 100644
--- a/blivet/populator/helpers/mdraid.py
+++ b/blivet/populator/helpers/mdraid.py
@@ -98,17 +98,21 @@ class MDFormatPopulator(FormatPopulator):
@ -78,30 +81,6 @@ index 3479e3f78..a7602d209 100644
return kwargs
def run(self):
--
2.37.3
From 4e766bb6f2bb487003ed4fa9b8415760c436af81 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 17 Mar 2022 15:48:25 +0100
Subject: [PATCH 3/3] Do not crash when a disk populator doesn't return kwargs
This happens when trying to use Blivet on a system with a BIOS
RAID without dmraid installed. Because we don't fully support
BIOS RAIDs using MD the MDBiosRaidDevicePopulator helper fails
to get kwargs for the BIOS RAID "disk" and populate fails.
---
blivet/populator/helpers/disk.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 2e5026f7e..9db7b810d 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -68,6 +68,8 @@ def run(self):
log_method_call(self, name=name)
kwargs = self._get_kwargs()
+ if not kwargs:
+ return
device = self._device_class(name, **kwargs)
self._devicetree._add_device(device)
return device

View File

@ -1,538 +0,0 @@
From ad34dfa2f983bb3159af8b5780193e0427b505e9 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 10 Jun 2021 15:01:26 +0200
Subject: [PATCH] Fix/unify importing mock module in tests
mock is available in the unittest module since Python 3.3, we need
to use the old mock module not only with Python 2 but also with
the early versions of Python 3.
---
tests/action_test.py | 5 ++-
tests/dbus_test.py | 9 +++--
tests/devicefactory_test.py | 6 ++--
tests/devicelibs_test/disk_test.py | 6 ++--
tests/devicelibs_test/edd_test.py | 6 +++-
tests/devices_test/dependencies_test.py | 6 ++--
tests/devices_test/device_methods_test.py | 8 +++--
tests/devices_test/device_names_test.py | 6 ++--
tests/devices_test/device_properties_test.py | 8 ++---
tests/devices_test/disk_test.py | 7 ++--
tests/devices_test/lvm_test.py | 7 ++--
tests/devices_test/partition_test.py | 6 ++--
tests/devices_test/tags_test.py | 6 ++--
tests/devicetree_test.py | 6 ++--
tests/events_test.py | 6 ++--
tests/formats_test/disklabel_test.py | 6 ++--
tests/formats_test/luks_test.py | 6 ++--
tests/formats_test/lvmpv_test.py | 2 --
tests/formats_test/methods_test.py | 6 ++--
tests/formats_test/selinux_test.py | 6 ++--
tests/formats_test/swap_test.py | 2 --
tests/misc_test.py | 6 ++--
tests/partitioning_test.py | 6 ++--
tests/populator_test.py | 6 ++--
tests/storagetestcase.py | 5 ++-
tests/test_compat.py | 38 --------------------
tests/udev_test.py | 6 +++-
tests/unsupported_disklabel_test.py | 7 ++--
tests/util_test.py | 6 ++--
29 files changed, 104 insertions(+), 102 deletions(-)
delete mode 100644 tests/test_compat.py
diff --git a/tests/action_test.py b/tests/action_test.py
index 38a2e872..f60cf5d7 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -1,9 +1,8 @@
-from six import PY3
import unittest
-if PY3:
+try:
from unittest.mock import Mock
-else:
+except ImportError:
from mock import Mock
from tests.storagetestcase import StorageTestCase
diff --git a/tests/dbus_test.py b/tests/dbus_test.py
index 293ac073..9bb9102a 100644
--- a/tests/dbus_test.py
+++ b/tests/dbus_test.py
@@ -1,7 +1,10 @@
-import test_compat # pylint: disable=unused-import
-
import random
-from six.moves.mock import Mock, patch # pylint: disable=no-name-in-module,import-error
+
+try:
+ from unittest.mock import patch, Mock
+except ImportError:
+ from mock import patch, Mock
+
from unittest import TestCase
import dbus
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
index dc0d6408..93c8bdb7 100644
--- a/tests/devicefactory_test.py
+++ b/tests/devicefactory_test.py
@@ -4,8 +4,10 @@ import unittest
from decimal import Decimal
import os
-import test_compat # pylint: disable=unused-import
-from six.moves.mock import patch # pylint: disable=no-name-in-module,import-error
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
import blivet
diff --git a/tests/devicelibs_test/disk_test.py b/tests/devicelibs_test/disk_test.py
index e67ef5b1..9cb39951 100644
--- a/tests/devicelibs_test/disk_test.py
+++ b/tests/devicelibs_test/disk_test.py
@@ -1,8 +1,10 @@
# pylint: skip-file
-import test_compat
+try:
+ from unittest.mock import Mock, patch, sentinel
+except ImportError:
+ from mock import Mock, patch, sentinel
import six
-from six.moves.mock import Mock, patch, sentinel
import unittest
from blivet.devicelibs import disk as disklib
diff --git a/tests/devicelibs_test/edd_test.py b/tests/devicelibs_test/edd_test.py
index 23d736f4..21bbcffc 100644
--- a/tests/devicelibs_test/edd_test.py
+++ b/tests/devicelibs_test/edd_test.py
@@ -1,5 +1,9 @@
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
import unittest
-import mock
import os
import inspect
import logging
diff --git a/tests/devices_test/dependencies_test.py b/tests/devices_test/dependencies_test.py
index c012751d..493d1c9f 100644
--- a/tests/devices_test/dependencies_test.py
+++ b/tests/devices_test/dependencies_test.py
@@ -1,8 +1,6 @@
-# vim:set fileencoding=utf-8
-from six import PY3
-if PY3:
+try:
from unittest.mock import patch, PropertyMock
-else:
+except ImportError:
from mock import patch, PropertyMock
import unittest
diff --git a/tests/devices_test/device_methods_test.py b/tests/devices_test/device_methods_test.py
index f00509be..8a70b5bb 100644
--- a/tests/devices_test/device_methods_test.py
+++ b/tests/devices_test/device_methods_test.py
@@ -1,9 +1,11 @@
-import test_compat # pylint: disable=unused-import
-
import six
-from six.moves.mock import patch, Mock, PropertyMock # pylint: disable=no-name-in-module,import-error
import unittest
+try:
+ from unittest.mock import patch, Mock, PropertyMock
+except ImportError:
+ from mock import patch, PropertyMock
+
from blivet.devices import StorageDevice
from blivet.devices import DiskDevice, PartitionDevice
from blivet.devices import LVMVolumeGroupDevice, LVMLogicalVolumeDevice
diff --git a/tests/devices_test/device_names_test.py b/tests/devices_test/device_names_test.py
index 2a66f983..ca44d38c 100644
--- a/tests/devices_test/device_names_test.py
+++ b/tests/devices_test/device_names_test.py
@@ -1,7 +1,9 @@
# vim:set fileencoding=utf-8
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
-from six.moves.mock import patch # pylint: disable=no-name-in-module,import-error
import six
import unittest
diff --git a/tests/devices_test/device_properties_test.py b/tests/devices_test/device_properties_test.py
index 240ac088..8928707f 100644
--- a/tests/devices_test/device_properties_test.py
+++ b/tests/devices_test/device_properties_test.py
@@ -1,6 +1,3 @@
-# vim:set fileencoding=utf-8
-import test_compat # pylint: disable=unused-import
-
import six
import unittest
@@ -9,7 +6,10 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import BlockDev as blockdev
-from six.moves.mock import Mock, patch # pylint: disable=no-name-in-module,import-error
+try:
+ from unittest.mock import patch, Mock
+except ImportError:
+ from mock import patch, Mock
import blivet
diff --git a/tests/devices_test/disk_test.py b/tests/devices_test/disk_test.py
index e9852303..cc8454e1 100644
--- a/tests/devices_test/disk_test.py
+++ b/tests/devices_test/disk_test.py
@@ -1,7 +1,8 @@
# pylint: skip-file
-import test_compat
-
-from six.moves.mock import patch
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
import unittest
from blivet.devices import DiskDevice
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 670c91c9..f50933c4 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -1,8 +1,9 @@
-# vim:set fileencoding=utf-8
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest.mock import patch, PropertyMock
+except ImportError:
+ from mock import patch, PropertyMock
import six
-from six.moves.mock import patch, PropertyMock # pylint: disable=no-name-in-module,import-error
import unittest
import blivet
diff --git a/tests/devices_test/partition_test.py b/tests/devices_test/partition_test.py
index 0abd88df..4748dafe 100644
--- a/tests/devices_test/partition_test.py
+++ b/tests/devices_test/partition_test.py
@@ -1,5 +1,4 @@
# vim:set fileencoding=utf-8
-import test_compat # pylint: disable=unused-import
from collections import namedtuple
import os
@@ -7,7 +6,10 @@ import six
import unittest
import parted
-from six.moves.mock import Mock, patch # pylint: disable=no-name-in-module,import-error
+try:
+ from unittest.mock import patch, Mock
+except ImportError:
+ from mock import patch, Mock
from blivet.devices import DiskFile
from blivet.devices import PartitionDevice
diff --git a/tests/devices_test/tags_test.py b/tests/devices_test/tags_test.py
index 1edc37f6..49a2d72e 100644
--- a/tests/devices_test/tags_test.py
+++ b/tests/devices_test/tags_test.py
@@ -1,6 +1,8 @@
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
-from six.moves.mock import patch # pylint: disable=no-name-in-module,import-error
import unittest
from blivet.devices import DiskDevice, FcoeDiskDevice, iScsiDiskDevice, MultipathDevice, StorageDevice, ZFCPDiskDevice
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index 11f8469d..fbf31c77 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -1,6 +1,8 @@
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest.mock import patch, Mock, PropertyMock, sentinel
+except ImportError:
+ from mock import patch, Mock, PropertyMock, sentinel
-from six.moves.mock import Mock, patch, PropertyMock, sentinel # pylint: disable=no-name-in-module,import-error
import os
import six
import unittest
diff --git a/tests/events_test.py b/tests/events_test.py
index 5906b4e2..22666f6d 100644
--- a/tests/events_test.py
+++ b/tests/events_test.py
@@ -1,6 +1,8 @@
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest.mock import patch, Mock
+except ImportError:
+ from mock import patch, Mock
-from six.moves.mock import Mock, patch # pylint: disable=no-name-in-module,import-error
import time
from unittest import TestCase
diff --git a/tests/formats_test/disklabel_test.py b/tests/formats_test/disklabel_test.py
index 0cfa736d..f514a778 100644
--- a/tests/formats_test/disklabel_test.py
+++ b/tests/formats_test/disklabel_test.py
@@ -1,7 +1,9 @@
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest import mock
+except ImportError:
+ import mock
import parted
-from six.moves import mock # pylint: disable=no-name-in-module,import-error
import unittest
import blivet
diff --git a/tests/formats_test/luks_test.py b/tests/formats_test/luks_test.py
index be0d50b0..1edbdcb2 100644
--- a/tests/formats_test/luks_test.py
+++ b/tests/formats_test/luks_test.py
@@ -1,6 +1,8 @@
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
-from six.moves.mock import patch # pylint: disable=no-name-in-module,import-error
import unittest
from blivet.formats.luks import LUKS
diff --git a/tests/formats_test/lvmpv_test.py b/tests/formats_test/lvmpv_test.py
index 792a2f1d..cbd2c419 100644
--- a/tests/formats_test/lvmpv_test.py
+++ b/tests/formats_test/lvmpv_test.py
@@ -1,5 +1,3 @@
-import test_compat # pylint: disable=unused-import
-
from blivet.formats.lvmpv import LVMPhysicalVolume
from blivet.size import Size
diff --git a/tests/formats_test/methods_test.py b/tests/formats_test/methods_test.py
index b2674ea7..2743b7db 100644
--- a/tests/formats_test/methods_test.py
+++ b/tests/formats_test/methods_test.py
@@ -1,7 +1,9 @@
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest.mock import patch, sentinel, PropertyMock
+except ImportError:
+ from mock import patch, sentinel, PropertyMock
import six
-from six.moves.mock import patch, sentinel, PropertyMock # pylint: disable=no-name-in-module,import-error
import unittest
from blivet.errors import DeviceFormatError
diff --git a/tests/formats_test/selinux_test.py b/tests/formats_test/selinux_test.py
index 02e39011..26df5fe9 100644
--- a/tests/formats_test/selinux_test.py
+++ b/tests/formats_test/selinux_test.py
@@ -1,9 +1,9 @@
# pylint: disable=unused-import
import os
-from six import PY3
-if PY3:
+
+try:
from unittest.mock import patch, ANY
-else:
+except ImportError:
from mock import patch, ANY
import unittest
diff --git a/tests/formats_test/swap_test.py b/tests/formats_test/swap_test.py
index 56356144..8968ca15 100644
--- a/tests/formats_test/swap_test.py
+++ b/tests/formats_test/swap_test.py
@@ -1,5 +1,3 @@
-import test_compat # pylint: disable=unused-import
-
import six
import unittest
diff --git a/tests/misc_test.py b/tests/misc_test.py
index 3c8cf344..10ea1320 100644
--- a/tests/misc_test.py
+++ b/tests/misc_test.py
@@ -1,7 +1,9 @@
import unittest
-import test_compat # pylint: disable=unused-import
-from six.moves.mock import patch # pylint: disable=no-name-in-module,import-error
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
import blivet
diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py
index b7aa5045..9b27f0c0 100644
--- a/tests/partitioning_test.py
+++ b/tests/partitioning_test.py
@@ -1,6 +1,8 @@
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest.mock import patch, Mock
+except ImportError:
+ from mock import patch, Mock
-from six.moves.mock import Mock, patch # pylint: disable=no-name-in-module,import-error
import six
import unittest
diff --git a/tests/populator_test.py b/tests/populator_test.py
index a7748a9d..2a8532f0 100644
--- a/tests/populator_test.py
+++ b/tests/populator_test.py
@@ -1,7 +1,9 @@
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest.mock import call, patch, sentinel, Mock, PropertyMock
+except ImportError:
+ from mock import call, patch, sentinel, Mock, PropertyMock
import gi
-from six.moves.mock import call, patch, sentinel, Mock, PropertyMock # pylint: disable=no-name-in-module,import-error
import six
import unittest
diff --git a/tests/storagetestcase.py b/tests/storagetestcase.py
index 1844dec5..1b856914 100644
--- a/tests/storagetestcase.py
+++ b/tests/storagetestcase.py
@@ -1,6 +1,9 @@
import unittest
-from mock import Mock
+try:
+ from unittest.mock import Mock
+except ImportError:
+ from mock import Mock
import parted
diff --git a/tests/test_compat.py b/tests/test_compat.py
deleted file mode 100644
index d0859e24..00000000
--- a/tests/test_compat.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# test_compat.py
-# Python (2 -v- 3) compatibility functions.
-#
-# Copyright (C) 2017 Red Hat, Inc.
-#
-# This copyrighted material is made available to anyone wishing to use,
-# modify, copy, or redistribute it subject to the terms and conditions of
-# the GNU Lesser General Public License v.2, or (at your option) any later
-# version. This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY expressed or implied, including the implied
-# warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU Lesser General Public License for more details. You should have
-# received a copy of the GNU Lesser General Public License along with this
-# program; if not, write to the Free Software Foundation, Inc., 51 Franklin
-# Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks
-# that are incorporated in the source code or documentation are not subject
-# to the GNU Lesser General Public License and may only be used or
-# replicated with the express permission of Red Hat, Inc.
-#
-# Red Hat Author(s): David Lehman <dlehman@redhat.com>
-#
-
-import six as _six
-
-mock_move = _six.MovedModule('mock', 'mock', 'unittest.mock')
-
-
-def add_move(mod):
- _six.add_move(mod)
- # https://bitbucket.org/gutworth/six/issues/116/enable-importing-from-within-custom
- _six._importer._add_module(mod, "moves." + mod.name)
-
-
-def setup():
- add_move(mock_move)
-
-
-setup()
diff --git a/tests/udev_test.py b/tests/udev_test.py
index f9b10620..569a144e 100644
--- a/tests/udev_test.py
+++ b/tests/udev_test.py
@@ -1,6 +1,10 @@
import unittest
-import mock
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
from udev_data import raid_data
diff --git a/tests/unsupported_disklabel_test.py b/tests/unsupported_disklabel_test.py
index f5b24779..38055333 100644
--- a/tests/unsupported_disklabel_test.py
+++ b/tests/unsupported_disklabel_test.py
@@ -1,7 +1,8 @@
-# vim:set fileencoding=utf-8
-import test_compat # pylint: disable=unused-import
+try:
+ from unittest.mock import patch, sentinel, DEFAULT
+except ImportError:
+ from mock import patch, sentinel, DEFAULT
-from six.moves.mock import patch, sentinel, DEFAULT # pylint: disable=no-name-in-module,import-error
import six
import unittest
diff --git a/tests/util_test.py b/tests/util_test.py
index 853b6166..b4f82c1b 100644
--- a/tests/util_test.py
+++ b/tests/util_test.py
@@ -1,7 +1,9 @@
# pylint: skip-file
-import test_compat
+try:
+ from unittest import mock
+except ImportError:
+ import mock
-from six.moves import mock
import os
import six
import tempfile
--
2.31.1

View File

@ -0,0 +1,77 @@
From 789dd296988aa9da17d97ece1efc33f9e232648e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 13 Oct 2022 10:47:52 +0200
Subject: [PATCH] Revert "Remove the Blivet.roots attribute"
This reverts commit 19a826073345ca6b57a8f9a95ec855892320300e.
---
blivet/blivet.py | 21 +++++++++++++++++++++
blivet/devicefactory.py | 3 +++
2 files changed, 24 insertions(+)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index bf72ee9c..dc066b03 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -88,6 +88,7 @@ class Blivet(object):
self.devicetree = DeviceTree(ignored_disks=self.ignored_disks,
exclusive_disks=self.exclusive_disks,
disk_images=self.disk_images)
+ self.roots = []
@property
def short_product_name(self):
@@ -1314,5 +1315,25 @@ class Blivet(object):
p = partition.disk.format.parted_disk.getPartitionByPath(partition.path)
partition.parted_partition = p
+ for root in new.roots:
+ root.swaps = [new.devicetree.get_device_by_id(d.id, hidden=True) for d in root.swaps]
+ root.swaps = [s for s in root.swaps if s]
+
+ removed = set()
+ for (mountpoint, old_dev) in root.mounts.items():
+ if old_dev is None:
+ continue
+
+ new_dev = new.devicetree.get_device_by_id(old_dev.id, hidden=True)
+ if new_dev is None:
+ # if the device has been removed don't include this
+ # mountpoint at all
+ removed.add(mountpoint)
+ else:
+ root.mounts[mountpoint] = new_dev
+
+ for mnt in removed:
+ del root.mounts[mnt]
+
log.debug("finished Blivet copy")
return new
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
index 8105bfc7..6f460f6d 100644
--- a/blivet/devicefactory.py
+++ b/blivet/devicefactory.py
@@ -383,6 +383,7 @@ class DeviceFactory(object):
# used for error recovery
self.__devices = []
self.__actions = []
+ self.__roots = []
def _is_container_encrypted(self):
return all(isinstance(p, LUKSDevice) for p in self.device.container.parents)
@@ -994,10 +995,12 @@ class DeviceFactory(object):
_blivet_copy = self.storage.copy()
self.__devices = _blivet_copy.devicetree._devices
self.__actions = _blivet_copy.devicetree._actions
+ self.__roots = _blivet_copy.roots
def _revert_devicetree(self):
self.storage.devicetree._devices = self.__devices
self.storage.devicetree._actions = self.__actions
+ self.storage.roots = self.__roots
class PartitionFactory(DeviceFactory):
--
2.37.3

View File

@ -0,0 +1,45 @@
From 7931a74e691979dd23a16e7a017b4ef5bc296b79 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Oct 2022 12:28:37 +0200
Subject: [PATCH] Fix potential AttributeError when getting stratis blockdev
info
---
blivet/static_data/stratis_info.py | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/blivet/static_data/stratis_info.py b/blivet/static_data/stratis_info.py
index bd1c5a18..42f230ee 100644
--- a/blivet/static_data/stratis_info.py
+++ b/blivet/static_data/stratis_info.py
@@ -124,20 +124,22 @@ class StratisInfo(object):
log.error("Failed to get DBus properties of '%s'", blockdev_path)
return None
+ blockdev_uuid = str(uuid.UUID(properties["Uuid"]))
+
pool_path = properties["Pool"]
if pool_path == "/":
pool_name = ""
+ return StratisBlockdevInfo(path=properties["Devnode"], uuid=blockdev_uuid,
+ pool_name="", pool_uuid="", object_path=blockdev_path)
else:
pool_info = self._get_pool_info(properties["Pool"])
if not pool_info:
return None
pool_name = pool_info.name
- blockdev_uuid = str(uuid.UUID(properties["Uuid"]))
-
- return StratisBlockdevInfo(path=properties["Devnode"], uuid=blockdev_uuid,
- pool_name=pool_name, pool_uuid=pool_info.uuid,
- object_path=blockdev_path)
+ return StratisBlockdevInfo(path=properties["Devnode"], uuid=blockdev_uuid,
+ pool_name=pool_name, pool_uuid=pool_info.uuid,
+ object_path=blockdev_path)
def _get_locked_pools_info(self):
locked_pools = []
--
2.37.3

View File

@ -1,23 +0,0 @@
From d609cebba48744c97ac7e0461f8827ab63198026 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 10 Jun 2021 16:58:42 +0200
Subject: [PATCH] Fix util.virt_detect on Xen
Xen is apparently still alive so we should return True for it too.
---
blivet/util.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/util.py b/blivet/util.py
index 3bebb003..af60210b 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -1130,4 +1130,4 @@ def detect_virt():
except (safe_dbus.DBusCallError, safe_dbus.DBusPropertyError):
return False
else:
- return vm[0] in ('qemu', 'kvm')
+ return vm[0] in ('qemu', 'kvm', 'xen')
--
2.31.1

View File

@ -1,35 +0,0 @@
From 8ece3da18b1abb89320d02f4475002e6a3ed7875 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 May 2021 13:40:26 +0200
Subject: [PATCH] Fix activating old style LVM snapshots
The old style snapshots are activated together with the origin LV
so we need to make sure it is activated to be able to remove the
snapshot or its format.
Resolves: rhbz#1961739
---
blivet/devices/lvm.py | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index a55515fcb..fb57804d9 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1461,9 +1461,13 @@ def _set_format(self, fmt): # pylint: disable=unused-argument
self._update_format_from_origin()
@old_snapshot_specific
- def setup(self, orig=False):
- # the old snapshot cannot be setup and torn down
- pass
+ def setup(self, orig=False): # pylint: disable=unused-argument
+ # the old snapshot is activated together with the origin
+ if self.origin and not self.origin.status:
+ try:
+ self.origin.setup()
+ except blockdev.LVMError as lvmerr:
+ log.error("failed to activate origin LV: %s", lvmerr)
@old_snapshot_specific
def teardown(self, recursive=False):

View File

@ -0,0 +1,27 @@
From b747c4ed07937f54a546ffb2f2c8c95e0797dd6c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 Oct 2022 15:19:29 +0200
Subject: [PATCH] tests: Skip XFS resize test on CentOS/RHEL 9
Partitions on loop devices are broken on CentOS/RHEL 9.
---
tests/skip.yml | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/tests/skip.yml b/tests/skip.yml
index 568c3fff..66b34493 100644
--- a/tests/skip.yml
+++ b/tests/skip.yml
@@ -29,3 +29,9 @@
- distro: "centos"
version: "9"
reason: "Creating RAID 1 LV on CentOS/RHEL 9 causes a system deadlock"
+
+- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize
+ skip_on:
+ - distro: ["centos", "enterprise_linux"]
+ version: "9"
+ reason: "Creating partitions on loop devices is broken on CentOS/RHEL 9 latest kernel"
--
2.37.3

View File

@ -1,75 +0,0 @@
From 344e624f91010b6041c22ee8a24c9305b82af969 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 May 2021 12:54:02 +0200
Subject: [PATCH] Fix resolving devices with names that look like BIOS drive
number
A RAID array named "10" will not be resolved because we try to
resolve it using EDD data and after this lookup fails, we don't
try the name.
Resolves: rhbz#1960798
---
blivet/devicetree.py | 18 +++++++++---------
tests/devicetree_test.py | 4 ++++
2 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index 88e9f0e5..f4ae1968 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -634,20 +634,20 @@ class DeviceTreeBase(object):
(label.startswith("'") and label.endswith("'"))):
label = label[1:-1]
device = self.labels.get(label)
- elif re.match(r'(0x)?[A-Fa-f0-9]{2}(p\d+)?$', devspec):
- # BIOS drive number
- (drive, _p, partnum) = devspec.partition("p")
- spec = int(drive, 16)
- for (edd_name, edd_number) in self.edd_dict.items():
- if edd_number == spec:
- device = self.get_device_by_name(edd_name + partnum)
- break
elif options and "nodev" in options.split(","):
device = self.get_device_by_name(devspec)
if not device:
device = self.get_device_by_path(devspec)
else:
- if not devspec.startswith("/dev/"):
+ if re.match(r'(0x)?[A-Fa-f0-9]{2}(p\d+)?$', devspec):
+ # BIOS drive number
+ (drive, _p, partnum) = devspec.partition("p")
+ spec = int(drive, 16)
+ for (edd_name, edd_number) in self.edd_dict.items():
+ if edd_number == spec:
+ device = self.get_device_by_name(edd_name + partnum)
+ break
+ if not device and not devspec.startswith("/dev/"):
device = self.get_device_by_name(devspec)
if not device:
devspec = "/dev/" + devspec
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index 11f8469d..b033343d 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -49,6 +49,9 @@ class DeviceTreeTestCase(unittest.TestCase):
dev3 = StorageDevice("sdp2", exists=True)
dt._add_device(dev3)
+ dev4 = StorageDevice("10", exists=True)
+ dt._add_device(dev4)
+
dt.edd_dict.update({"dev1": 0x81,
"dev2": 0x82})
@@ -62,6 +65,7 @@ class DeviceTreeTestCase(unittest.TestCase):
self.assertEqual(dt.resolve_device("0x82"), dev2)
self.assertEqual(dt.resolve_device(dev3.name), dev3)
+ self.assertEqual(dt.resolve_device(dev4.name), dev4)
def test_device_name(self):
# check that devicetree.names property contains all device's names
--
2.31.1

View File

@ -0,0 +1,160 @@
From 9618b84f94187efddc7316c2546bed923a91ecf9 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 3 Nov 2022 08:36:27 +0100
Subject: [PATCH 1/2] Revert "Set XFS minimal size to 300 MiB"
This reverts commit 307d49833771d161314bae50c68e70dc35c3bb36.
---
blivet/formats/fs.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index 8c346aa5..33922f3a 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -1091,7 +1091,7 @@ class XFS(FS):
_modules = ["xfs"]
_labelfs = fslabeling.XFSLabeling()
_uuidfs = fsuuid.XFSUUID()
- _min_size = Size("300 MiB")
+ _min_size = Size("16 MiB")
_max_size = Size("16 EiB")
_formattable = True
_linux_native = True
--
2.38.1
From 24d94922d6879baa85aaa101f6b21efa568a9cbc Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 3 Nov 2022 08:36:39 +0100
Subject: [PATCH 2/2] Revert "tests: Create bigger devices for XFS testing"
This reverts commit 467cb8024010b2cabb1e92d9e64f6d3cbe949ad9.
---
tests/storage_tests/formats_test/fs_test.py | 7 +++----
tests/storage_tests/formats_test/fslabeling.py | 4 +---
tests/storage_tests/formats_test/fsuuid.py | 4 +---
tests/storage_tests/formats_test/labeling_test.py | 2 --
tests/storage_tests/formats_test/uuid_test.py | 3 ---
5 files changed, 5 insertions(+), 15 deletions(-)
diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py
index cf8fb441..97f4cbbe 100644
--- a/tests/storage_tests/formats_test/fs_test.py
+++ b/tests/storage_tests/formats_test/fs_test.py
@@ -54,7 +54,6 @@ class ReiserFSTestCase(fstesting.FSAsRoot):
class XFSTestCase(fstesting.FSAsRoot):
_fs_class = fs.XFS
- _DEVICE_SIZE = Size("500 MiB")
def can_resize(self, an_fs):
resize_tasks = (an_fs._resize, an_fs._size_info)
@@ -96,12 +95,12 @@ class XFSTestCase(fstesting.FSAsRoot):
self.assertFalse(an_fs.resizable)
# Not resizable, so can not do resizing actions.
with self.assertRaises(DeviceFormatError):
- an_fs.target_size = Size("300 MiB")
+ an_fs.target_size = Size("64 MiB")
with self.assertRaises(DeviceFormatError):
an_fs.do_resize()
else:
disk = DiskDevice(os.path.basename(self.loop_devices[0]))
- part = self._create_partition(disk, Size("300 MiB"))
+ part = self._create_partition(disk, Size("50 MiB"))
an_fs = self._fs_class()
an_fs.device = part.path
self.assertIsNone(an_fs.create())
@@ -114,7 +113,7 @@ class XFSTestCase(fstesting.FSAsRoot):
part = self._create_partition(disk, size=part.size + Size("40 MiB"))
# Try a reasonable target size
- TARGET_SIZE = Size("325 MiB")
+ TARGET_SIZE = Size("64 MiB")
an_fs.target_size = TARGET_SIZE
self.assertEqual(an_fs.target_size, TARGET_SIZE)
self.assertNotEqual(an_fs._size, TARGET_SIZE)
diff --git a/tests/storage_tests/formats_test/fslabeling.py b/tests/storage_tests/formats_test/fslabeling.py
index ebe0b70a..0e0dc261 100644
--- a/tests/storage_tests/formats_test/fslabeling.py
+++ b/tests/storage_tests/formats_test/fslabeling.py
@@ -21,10 +21,8 @@ class LabelingAsRoot(loopbackedtestcase.LoopBackedTestCase):
_invalid_label = abc.abstractproperty(
doc="A label which is invalid for this filesystem.")
- _DEVICE_SIZE = Size("100 MiB")
-
def __init__(self, methodName='run_test'):
- super(LabelingAsRoot, self).__init__(methodName=methodName, device_spec=[self._DEVICE_SIZE])
+ super(LabelingAsRoot, self).__init__(methodName=methodName, device_spec=[Size("100 MiB")])
def setUp(self):
an_fs = self._fs_class()
diff --git a/tests/storage_tests/formats_test/fsuuid.py b/tests/storage_tests/formats_test/fsuuid.py
index 0b9762fd..16aa19a6 100644
--- a/tests/storage_tests/formats_test/fsuuid.py
+++ b/tests/storage_tests/formats_test/fsuuid.py
@@ -23,11 +23,9 @@ class SetUUID(loopbackedtestcase.LoopBackedTestCase):
_invalid_uuid = abc.abstractproperty(
doc="An invalid UUID for this filesystem.")
- _DEVICE_SIZE = Size("100 MiB")
-
def __init__(self, methodName='run_test'):
super(SetUUID, self).__init__(methodName=methodName,
- device_spec=[self._DEVICE_SIZE])
+ device_spec=[Size("100 MiB")])
def setUp(self):
an_fs = self._fs_class()
diff --git a/tests/storage_tests/formats_test/labeling_test.py b/tests/storage_tests/formats_test/labeling_test.py
index 0702260a..d24e6619 100644
--- a/tests/storage_tests/formats_test/labeling_test.py
+++ b/tests/storage_tests/formats_test/labeling_test.py
@@ -1,7 +1,6 @@
import unittest
from blivet.formats import device_formats
-from blivet.size import Size
import blivet.formats.fs as fs
import blivet.formats.swap as swap
@@ -62,7 +61,6 @@ class InitializationTestCase(unittest.TestCase):
class XFSTestCase(fslabeling.CompleteLabelingAsRoot):
_fs_class = fs.XFS
_invalid_label = "root filesystem"
- _DEVICE_SIZE = Size("500 MiB")
class FATFSTestCase(fslabeling.CompleteLabelingAsRoot):
diff --git a/tests/storage_tests/formats_test/uuid_test.py b/tests/storage_tests/formats_test/uuid_test.py
index af35c0ee..ee8d452e 100644
--- a/tests/storage_tests/formats_test/uuid_test.py
+++ b/tests/storage_tests/formats_test/uuid_test.py
@@ -2,7 +2,6 @@ import unittest
import blivet.formats.fs as fs
import blivet.formats.swap as swap
-from blivet.size import Size
from . import fsuuid
@@ -53,14 +52,12 @@ class XFSTestCase(fsuuid.SetUUIDWithMkFs):
_fs_class = fs.XFS
_invalid_uuid = "abcdefgh-ijkl-mnop-qrst-uvwxyz123456"
_valid_uuid = "97e3d40f-dca8-497d-8b86-92f257402465"
- _DEVICE_SIZE = Size("500 MiB")
class XFSAfterTestCase(fsuuid.SetUUIDAfterMkFs):
_fs_class = fs.XFS
_invalid_uuid = "abcdefgh-ijkl-mnop-qrst-uvwxyz123456"
_valid_uuid = "97e3d40f-dca8-497d-8b86-92f257402465"
- _DEVICE_SIZE = Size("500 MiB")
class FATFSTestCase(fsuuid.SetUUIDWithMkFs):
--
2.38.1

View File

@ -0,0 +1,55 @@
From fed62af06eb1584adbacd821dfe79c2df52c6aa4 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 2 Nov 2022 12:14:28 +0100
Subject: [PATCH] Catch BlockDevNotImplementedError for btrfs plugin calls
This is a workaround for RHEL where the btrfs plugin is not
available and where we might still try to call some libblockdev
functions to gather information about preexisting btrfs devices.
---
blivet/devices/btrfs.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/blivet/devices/btrfs.py b/blivet/devices/btrfs.py
index 0e029715..1ae6a04d 100644
--- a/blivet/devices/btrfs.py
+++ b/blivet/devices/btrfs.py
@@ -362,7 +362,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
try:
subvols = blockdev.btrfs.list_subvolumes(mountpoint,
snapshots_only=snapshots_only)
- except blockdev.BtrfsError as e:
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
log.debug("failed to list subvolumes: %s", e)
else:
self._get_default_subvolume_id()
@@ -400,7 +400,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
with self._do_temp_mount() as mountpoint:
try:
subvolid = blockdev.btrfs.get_default_subvolume_id(mountpoint)
- except blockdev.BtrfsError as e:
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
log.debug("failed to get default subvolume id: %s", e)
self._default_subvolume_id = subvolid
@@ -413,7 +413,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
with self._do_temp_mount() as mountpoint:
try:
blockdev.btrfs.set_default_subvolume(mountpoint, vol_id)
- except blockdev.BtrfsError as e:
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
log.error("failed to set new default subvolume id (%s): %s",
vol_id, e)
# The only time we set a new default subvolume is so we can remove
@@ -471,7 +471,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
if not self.format.vol_uuid:
try:
bd_info = blockdev.btrfs.filesystem_info(self.parents[0].path)
- except blockdev.BtrfsError as e:
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
log.error("failed to get filesystem info for new btrfs volume %s", e)
else:
self.format.vol_uuid = bd_info.uuid
--
2.38.1

View File

@ -1,151 +0,0 @@
From dc1e2fe7783748528cac2f7aa516c89d1959b052 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 29 Jul 2021 14:44:22 +0200
Subject: [PATCH] Do not set chunk size for RAID 1
Setting chunk size for RAID 1 doesn't make sense and latest
mdadm started returning error instead of ignoring the --chunk
option when creating an array.
Resolves: rhbz#1987170
---
blivet/devicelibs/raid.py | 12 ++++++++++
blivet/devices/md.py | 15 ++++++++++---
tests/devices_test/md_test.py | 41 +++++++++++++++++++++++++++++++++--
3 files changed, 63 insertions(+), 5 deletions(-)
diff --git a/blivet/devicelibs/raid.py b/blivet/devicelibs/raid.py
index 19c3fae98..a9e241c7a 100644
--- a/blivet/devicelibs/raid.py
+++ b/blivet/devicelibs/raid.py
@@ -462,6 +462,18 @@ def _pad(self, size, chunk_size):
def _get_recommended_stride(self, member_count):
return None
+ def get_size(self, member_sizes, num_members=None, chunk_size=None, superblock_size_func=None):
+ if not member_sizes:
+ return Size(0)
+
+ if num_members is None:
+ num_members = len(member_sizes)
+
+ min_size = min(member_sizes)
+ superblock_size = superblock_size_func(min_size)
+ min_data_size = self._trim(min_size - superblock_size, chunk_size)
+ return self.get_net_array_size(num_members, min_data_size)
+
RAID1 = RAID1()
ALL_LEVELS.add_raid_level(RAID1)
diff --git a/blivet/devices/md.py b/blivet/devices/md.py
index 69eee93a5..d1a2faf1f 100644
--- a/blivet/devices/md.py
+++ b/blivet/devices/md.py
@@ -138,7 +138,7 @@ def __init__(self, name, level=None, major=None, minor=None, size=None,
if self.exists:
self._chunk_size = self.read_chunk_size()
else:
- self._chunk_size = chunk_size or mdraid.MD_CHUNK_SIZE
+ self.chunk_size = chunk_size or Size(0)
if not self.exists and not isinstance(metadata_version, str):
self.metadata_version = "default"
@@ -208,8 +208,14 @@ def sector_size(self):
@property
def chunk_size(self):
- if self.exists and self._chunk_size == Size(0):
- self._chunk_size = self.read_chunk_size()
+ if self._chunk_size == Size(0):
+ if self.exists:
+ return self.read_chunk_size()
+ else:
+ if self.level == raid.RAID1:
+ return self._chunk_size
+ else:
+ return mdraid.MD_CHUNK_SIZE
return self._chunk_size
@chunk_size.setter
@@ -223,6 +229,9 @@ def chunk_size(self, newsize):
if self.exists:
raise ValueError("cannot set chunk size for an existing device")
+ if self.level == raid.RAID1 and newsize != Size(0):
+ raise ValueError("specifying chunk size is not allowed for raid1")
+
self._chunk_size = newsize
def read_chunk_size(self):
diff --git a/tests/devices_test/md_test.py b/tests/devices_test/md_test.py
index 46df76d3d..47a0fa0cc 100644
--- a/tests/devices_test/md_test.py
+++ b/tests/devices_test/md_test.py
@@ -1,6 +1,11 @@
import six
import unittest
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
import blivet
from blivet.devices import StorageDevice
@@ -27,9 +32,27 @@ def test_chunk_size1(self):
raid_array = MDRaidArrayDevice(name="raid", level="raid0", member_devices=2,
total_devices=2, parents=[member1, member2])
- # no chunk_size specified -- default value
+ # no chunk_size specified and RAID0 -- default value
self.assertEqual(raid_array.chunk_size, mdraid.MD_CHUNK_SIZE)
+ with patch("blivet.devices.md.blockdev.md.create") as md_create:
+ raid_array._create()
+ md_create.assert_called_with("/dev/md/raid", "raid0", ["/dev/member1", "/dev/member2"],
+ 0, version="default", bitmap=False,
+ chunk_size=mdraid.MD_CHUNK_SIZE)
+
+ raid_array = MDRaidArrayDevice(name="raid", level="raid1", member_devices=2,
+ total_devices=2, parents=[member1, member2])
+
+ # no chunk_size specified and RAID1 -- no chunk size set (0)
+ self.assertEqual(raid_array.chunk_size, Size(0))
+
+ with patch("blivet.devices.md.blockdev.md.create") as md_create:
+ raid_array._create()
+ md_create.assert_called_with("/dev/md/raid", "raid1", ["/dev/member1", "/dev/member2"],
+ 0, version="default", bitmap=True,
+ chunk_size=0)
+
def test_chunk_size2(self):
member1 = StorageDevice("member1", fmt=blivet.formats.get_format("mdmember"),
@@ -40,11 +63,25 @@ def test_chunk_size2(self):
raid_array = MDRaidArrayDevice(name="raid", level="raid0", member_devices=2,
total_devices=2, parents=[member1, member2],
chunk_size=Size("1024 KiB"))
-
self.assertEqual(raid_array.chunk_size, Size("1024 KiB"))
+ # for raid0 setting chunk_size = 0 means "default"
+ raid_array.chunk_size = Size(0)
+ self.assertEqual(raid_array.chunk_size, mdraid.MD_CHUNK_SIZE)
+
with six.assertRaisesRegex(self, ValueError, "new chunk size must be of type Size"):
raid_array.chunk_size = 1
with six.assertRaisesRegex(self, ValueError, "new chunk size must be multiple of 4 KiB"):
raid_array.chunk_size = Size("5 KiB")
+
+ with six.assertRaisesRegex(self, ValueError, "specifying chunk size is not allowed for raid1"):
+ MDRaidArrayDevice(name="raid", level="raid1", member_devices=2,
+ total_devices=2, parents=[member1, member2],
+ chunk_size=Size("1024 KiB"))
+
+ raid_array = MDRaidArrayDevice(name="raid", level="raid1", member_devices=2,
+ total_devices=2, parents=[member1, member2])
+
+ with six.assertRaisesRegex(self, ValueError, "specifying chunk size is not allowed for raid1"):
+ raid_array.chunk_size = Size("512 KiB")

View File

@ -0,0 +1,590 @@
From 9383855c8a15e6d7c4033cd8d7ae8310b462d166 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Oct 2022 10:38:00 +0200
Subject: [PATCH 1/3] Add a basic support for NVMe and NVMe Fabrics devices
This adds two new device types: NVMeNamespaceDevice and
NVMeFabricsNamespaceDevice mostly to allow to differentiate
between "local" and "remote" NVMe devices. The new libblockdev
NVMe plugin is required for full functionality.
---
blivet/__init__.py | 6 +-
blivet/devices/__init__.py | 2 +-
blivet/devices/disk.py | 101 ++++++++++++++++++++++
blivet/devices/lib.py | 1 +
blivet/populator/helpers/__init__.py | 2 +-
blivet/populator/helpers/disk.py | 64 ++++++++++++++
blivet/udev.py | 33 +++++++
blivet/util.py | 9 ++
tests/unit_tests/populator_test.py | 124 +++++++++++++++++++++++++++
9 files changed, 339 insertions(+), 3 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index bbc7ea3a..3b9e659e 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -67,6 +67,10 @@ if arch.is_s390():
else:
_REQUESTED_PLUGIN_NAMES = set(("swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
+# nvme plugin is not generally available
+if hasattr(blockdev.Plugin, "NVME"):
+ _REQUESTED_PLUGIN_NAMES.add("nvme")
+
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
# XXX force non-dbus LVM plugin
lvm_plugin = blockdev.PluginSpec()
@@ -74,7 +78,7 @@ lvm_plugin.name = blockdev.Plugin.LVM
lvm_plugin.so_name = "libbd_lvm.so.2"
_requested_plugins.append(lvm_plugin)
try:
- # do not check for dependencies during libblockdev initializtion, do runtime
+ # do not check for dependencies during libblockdev initialization, do runtime
# checks instead
blockdev.switch_init_checks(False)
succ_, avail_plugs = blockdev.try_reinit(require_plugins=_requested_plugins, reload=False, log_func=log_bd_message)
diff --git a/blivet/devices/__init__.py b/blivet/devices/__init__.py
index 8bb0a979..4d16466e 100644
--- a/blivet/devices/__init__.py
+++ b/blivet/devices/__init__.py
@@ -22,7 +22,7 @@
from .lib import device_path_to_name, device_name_to_disk_by_path, ParentList
from .device import Device
from .storage import StorageDevice
-from .disk import DiskDevice, DiskFile, DMRaidArrayDevice, MultipathDevice, iScsiDiskDevice, FcoeDiskDevice, DASDDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice
+from .disk import DiskDevice, DiskFile, DMRaidArrayDevice, MultipathDevice, iScsiDiskDevice, FcoeDiskDevice, DASDDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice, NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from .partition import PartitionDevice
from .dm import DMDevice, DMLinearDevice, DMCryptDevice, DMIntegrityDevice, DM_MAJORS
from .luks import LUKSDevice, IntegrityDevice
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index bc4a1b5e..b5e25939 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -22,10 +22,13 @@
import gi
gi.require_version("BlockDev", "2.0")
+gi.require_version("GLib", "2.0")
from gi.repository import BlockDev as blockdev
+from gi.repository import GLib
import os
+from collections import namedtuple
from .. import errors
from .. import util
@@ -725,3 +728,101 @@ class NVDIMMNamespaceDevice(DiskDevice):
@property
def sector_size(self):
return self._sector_size
+
+
+NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn"])
+
+
+class NVMeNamespaceDevice(DiskDevice):
+
+ """ NVMe namespace """
+ _type = "nvme"
+ _packages = ["nvme-cli"]
+
+ def __init__(self, device, **kwargs):
+ """
+ :param name: the device name (generally a device node's basename)
+ :type name: str
+ :keyword exists: does this device exist?
+ :type exists: bool
+ :keyword size: the device's size
+ :type size: :class:`~.size.Size`
+ :keyword parents: a list of parent devices
+ :type parents: list of :class:`StorageDevice`
+ :keyword format: this device's formatting
+ :type format: :class:`~.formats.DeviceFormat` or a subclass of it
+ :keyword nsid: namespace ID
+ :type nsid: int
+ """
+ self.nsid = kwargs.pop("nsid", 0)
+
+ DiskDevice.__init__(self, device, **kwargs)
+
+ self._clear_local_tags()
+ self.tags.add(Tags.local)
+ self.tags.add(Tags.nvme)
+
+ self._controllers = None
+
+ @property
+ def controllers(self):
+ if self._controllers is not None:
+ return self._controllers
+
+ self._controllers = []
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ log.debug("Failed to get controllers for %s: libblockdev NVME plugin is not available", self.name)
+ return self._controllers
+
+ try:
+ controllers = blockdev.nvme_find_ctrls_for_ns(self.sysfs_path)
+ except GLib.GError as err:
+ log.debug("Failed to get controllers for %s: %s", self.name, str(err))
+ return self._controllers
+
+ for controller in controllers:
+ try:
+ cpath = util.get_path_by_sysfs_path(controller, "char")
+ except RuntimeError as err:
+ log.debug("Failed to find controller %s: %s", controller, str(err))
+ continue
+ try:
+ cinfo = blockdev.nvme_get_controller_info(cpath)
+ except GLib.GError as err:
+ log.debug("Failed to get controller info for %s: %s", cpath, str(err))
+ continue
+ self._controllers.append(NVMeController(name=os.path.basename(cpath),
+ serial=cinfo.serial_number,
+ nvme_ver=cinfo.nvme_ver,
+ id=cinfo.ctrl_id,
+ subsysnqn=cinfo.subsysnqn))
+
+ return self._controllers
+
+
+class NVMeFabricsNamespaceDevice(NVMeNamespaceDevice, NetworkStorageDevice):
+
+ """ NVMe fabrics namespace """
+ _type = "nvme-fabrics"
+ _packages = ["nvme-cli"]
+
+ def __init__(self, device, **kwargs):
+ """
+ :param name: the device name (generally a device node's basename)
+ :type name: str
+ :keyword exists: does this device exist?
+ :type exists: bool
+ :keyword size: the device's size
+ :type size: :class:`~.size.Size`
+ :keyword parents: a list of parent devices
+ :type parents: list of :class:`StorageDevice`
+ :keyword format: this device's formatting
+ :type format: :class:`~.formats.DeviceFormat` or a subclass of it
+ """
+ NVMeNamespaceDevice.__init__(self, device, **kwargs)
+ NetworkStorageDevice.__init__(self)
+
+ self._clear_local_tags()
+ self.tags.add(Tags.remote)
+ self.tags.add(Tags.nvme)
diff --git a/blivet/devices/lib.py b/blivet/devices/lib.py
index 1bda0bab..b3c4c5b0 100644
--- a/blivet/devices/lib.py
+++ b/blivet/devices/lib.py
@@ -32,6 +32,7 @@ class Tags(str, Enum):
"""Tags that describe various classes of disk."""
local = 'local'
nvdimm = 'nvdimm'
+ nvme = 'nvme'
remote = 'remote'
removable = 'removable'
ssd = 'ssd'
diff --git a/blivet/populator/helpers/__init__.py b/blivet/populator/helpers/__init__.py
index c5ac412f..50ab4de8 100644
--- a/blivet/populator/helpers/__init__.py
+++ b/blivet/populator/helpers/__init__.py
@@ -6,7 +6,7 @@ from .formatpopulator import FormatPopulator
from .btrfs import BTRFSFormatPopulator
from .boot import AppleBootFormatPopulator, EFIFormatPopulator, MacEFIFormatPopulator
-from .disk import DiskDevicePopulator, iScsiDevicePopulator, FCoEDevicePopulator, MDBiosRaidDevicePopulator, DASDDevicePopulator, ZFCPDevicePopulator, NVDIMMNamespaceDevicePopulator
+from .disk import DiskDevicePopulator, iScsiDevicePopulator, FCoEDevicePopulator, MDBiosRaidDevicePopulator, DASDDevicePopulator, ZFCPDevicePopulator, NVDIMMNamespaceDevicePopulator, NVMeNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator
from .disklabel import DiskLabelFormatPopulator
from .dm import DMDevicePopulator
from .dmraid import DMRaidFormatPopulator
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 9db7b810..9ed1eebe 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -22,13 +22,16 @@
import gi
gi.require_version("BlockDev", "2.0")
+gi.require_version("GLib", "2.0")
from gi.repository import BlockDev as blockdev
+from gi.repository import GLib
from ... import udev
from ... import util
from ...devices import DASDDevice, DiskDevice, FcoeDiskDevice, iScsiDiskDevice
from ...devices import MDBiosRaidArrayDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice
+from ...devices import NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from ...devices import device_path_to_name
from ...storage_log import log_method_call
from .devicepopulator import DevicePopulator
@@ -251,3 +254,64 @@ class NVDIMMNamespaceDevicePopulator(DiskDevicePopulator):
log.info("%s is an NVDIMM namespace device", udev.device_get_name(self.data))
return kwargs
+
+
+class NVMeNamespaceDevicePopulator(DiskDevicePopulator):
+ priority = 20
+
+ _device_class = NVMeNamespaceDevice
+
+ @classmethod
+ def match(cls, data):
+ return (super(NVMeNamespaceDevicePopulator, NVMeNamespaceDevicePopulator).match(data) and
+ udev.device_is_nvme_namespace(data) and not udev.device_is_nvme_fabrics(data))
+
+ def _get_kwargs(self):
+ kwargs = super(NVMeNamespaceDevicePopulator, self)._get_kwargs()
+
+ log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
+
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ return kwargs
+
+ path = udev.device_get_devname(self.data)
+ try:
+ ninfo = blockdev.nvme_get_namespace_info(path)
+ except GLib.GError as err:
+ log.debug("Failed to get namespace info for %s: %s", path, str(err))
+ else:
+ kwargs["nsid"] = ninfo.nsid
+
+ log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
+ return kwargs
+
+
+class NVMeFabricsNamespaceDevicePopulator(DiskDevicePopulator):
+ priority = 20
+
+ _device_class = NVMeFabricsNamespaceDevice
+
+ @classmethod
+ def match(cls, data):
+ return (super(NVMeFabricsNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator).match(data) and
+ udev.device_is_nvme_namespace(data) and udev.device_is_nvme_fabrics(data))
+
+ def _get_kwargs(self):
+ kwargs = super(NVMeFabricsNamespaceDevicePopulator, self)._get_kwargs()
+
+ log.info("%s is an NVMe fabrics namespace device", udev.device_get_name(self.data))
+
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ return kwargs
+
+ path = udev.device_get_devname(self.data)
+ try:
+ ninfo = blockdev.nvme_get_namespace_info(path)
+ except GLib.GError as err:
+ log.debug("Failed to get namespace info for %s: %s", path, str(err))
+ else:
+ kwargs["nsid"] = ninfo.nsid
+
+ return kwargs
diff --git a/blivet/udev.py b/blivet/udev.py
index efbc53d6..533a1edc 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -1023,6 +1023,39 @@ def device_is_nvdimm_namespace(info):
return ninfo is not None
+def device_is_nvme_namespace(info):
+ if info.get("DEVTYPE") != "disk":
+ return False
+
+ if not info.get("SYS_PATH"):
+ return False
+
+ device = pyudev.Devices.from_sys_path(global_udev, info.get("SYS_PATH"))
+ while device:
+ if device.subsystem and device.subsystem.startswith("nvme"):
+ return True
+ device = device.parent
+
+ return False
+
+
+def device_is_nvme_fabrics(info):
+ if not device_is_nvme_namespace(info):
+ return False
+
+ if not hasattr(blockdev.Plugin, "NVME") or not blockdev.is_plugin_available(blockdev.Plugin.NVME): # pylint: disable=no-member
+ # nvme plugin is not available -- even if this is an nvme fabrics device we
+ # don't have tools to work with it, so we should pretend it's just a normal nvme
+ return False
+
+ controllers = blockdev.nvme_find_ctrls_for_ns(info.get("SYS_PATH", ""))
+ if not controllers:
+ return False
+
+ transport = util.get_sysfs_attr(controllers[0], "transport")
+ return transport in ("rdma", "fc", "tcp", "loop")
+
+
def device_is_hidden(info):
sysfs_path = device_get_sysfs_path(info)
hidden = util.get_sysfs_attr(sysfs_path, "hidden")
diff --git a/blivet/util.py b/blivet/util.py
index 0e578aea..3040ee5a 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -432,6 +432,15 @@ def get_sysfs_path_by_name(dev_node, class_name="block"):
"for '%s' (it is not at '%s')" % (dev_node, dev_path))
+def get_path_by_sysfs_path(sysfs_path, dev_type="block"):
+ """ Return device path for a given device sysfs path. """
+
+ dev = get_sysfs_attr(sysfs_path, "dev")
+ if not dev or not os.path.exists("/dev/%s/%s" % (dev_type, dev)):
+ raise RuntimeError("get_path_by_sysfs_path: Could not find device for %s" % sysfs_path)
+ return os.path.realpath("/dev/%s/%s" % (dev_type, dev))
+
+
def get_cow_sysfs_path(dev_path, dev_sysfsPath):
""" Return sysfs path of cow device for a given device.
"""
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
index 369fe878..1ee29b57 100644
--- a/tests/unit_tests/populator_test.py
+++ b/tests/unit_tests/populator_test.py
@@ -13,6 +13,7 @@ from gi.repository import BlockDev as blockdev
from blivet.devices import DiskDevice, DMDevice, FileDevice, LoopDevice
from blivet.devices import MDRaidArrayDevice, MultipathDevice, OpticalDevice
from blivet.devices import PartitionDevice, StorageDevice, NVDIMMNamespaceDevice
+from blivet.devices import NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from blivet.devicelibs import lvm
from blivet.devicetree import DeviceTree
from blivet.formats import get_device_format_class, get_format, DeviceFormat
@@ -21,6 +22,7 @@ from blivet.populator.helpers import DiskDevicePopulator, DMDevicePopulator, Loo
from blivet.populator.helpers import LVMDevicePopulator, MDDevicePopulator, MultipathDevicePopulator
from blivet.populator.helpers import OpticalDevicePopulator, PartitionDevicePopulator
from blivet.populator.helpers import LVMFormatPopulator, MDFormatPopulator, NVDIMMNamespaceDevicePopulator
+from blivet.populator.helpers import NVMeNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator
from blivet.populator.helpers import get_format_helper, get_device_helper
from blivet.populator.helpers.boot import AppleBootFormatPopulator, EFIFormatPopulator, MacEFIFormatPopulator
from blivet.populator.helpers.formatpopulator import FormatPopulator
@@ -591,6 +593,128 @@ class NVDIMMNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
self.assertTrue(device in devicetree.devices)
+class NVMeNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
+ helper_class = NVMeNamespaceDevicePopulator
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=False)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ def test_match(self, *args):
+ """Test matching of NVMe namespace device populator."""
+ device_is_nvme_namespace = args[0]
+ self.assertTrue(self.helper_class.match(None))
+ device_is_nvme_namespace.return_value = False
+ self.assertFalse(self.helper_class.match(None))
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=False)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ def test_get_helper(self, *args):
+ """Test get_device_helper for NVMe namespaces."""
+ device_is_nvme_namespace = args[0]
+ data = {}
+ self.assertEqual(get_device_helper(data), self.helper_class)
+
+ # verify that setting one of the required True return values to False prevents success
+ device_is_nvme_namespace.return_value = False
+ self.assertNotEqual(get_device_helper(data), self.helper_class)
+ device_is_nvme_namespace.return_value = True
+
+ @patch("blivet.udev.device_get_name")
+ def test_run(self, *args):
+ """Test disk device populator."""
+ device_get_name = args[0]
+
+ devicetree = DeviceTree()
+
+ # set up some fake udev data to verify handling of specific entries
+ data = {'SYS_PATH': 'dummy', 'DEVNAME': 'dummy', 'ID_PATH': 'dummy'}
+
+ device_name = "nop"
+ device_get_name.return_value = device_name
+ helper = self.helper_class(devicetree, data)
+
+ device = helper.run()
+
+ self.assertIsInstance(device, NVMeNamespaceDevice)
+ self.assertTrue(device.exists)
+ self.assertTrue(device.is_disk)
+ self.assertTrue(device in devicetree.devices)
+
+
+class NVMeFabricsNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
+ helper_class = NVMeFabricsNamespaceDevicePopulator
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=True)
+ def test_match(self, *args):
+ """Test matching of NVMe namespace device populator."""
+ device_is_nvme_fabrics = args[0]
+ self.assertTrue(self.helper_class.match(None))
+ device_is_nvme_fabrics.return_value = False
+ self.assertFalse(self.helper_class.match(None))
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=True)
+ def test_get_helper(self, *args):
+ """Test get_device_helper for NVMe namespaces."""
+ device_is_nvme_fabrics = args[0]
+ data = {}
+ self.assertEqual(get_device_helper(data), self.helper_class)
+
+ # verify that setting one of the required True return values to False prevents success
+ device_is_nvme_fabrics.return_value = False
+ self.assertNotEqual(get_device_helper(data), self.helper_class)
+ device_is_nvme_fabrics.return_value = True
+
+ @patch("blivet.udev.device_get_name")
+ def test_run(self, *args):
+ """Test disk device populator."""
+ device_get_name = args[0]
+
+ devicetree = DeviceTree()
+
+ # set up some fake udev data to verify handling of specific entries
+ data = {'SYS_PATH': 'dummy', 'DEVNAME': 'dummy', 'ID_PATH': 'dummy'}
+
+ device_name = "nop"
+ device_get_name.return_value = device_name
+ helper = self.helper_class(devicetree, data)
+
+ device = helper.run()
+
+ self.assertIsInstance(device, NVMeFabricsNamespaceDevice)
+ self.assertTrue(device.exists)
+ self.assertTrue(device.is_disk)
+ self.assertTrue(device in devicetree.devices)
+
+
class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
helper_class = MDDevicePopulator
--
2.38.1
From af6ad7ff2f08180672690910d453158bcd463936 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 2 Dec 2022 12:20:47 +0100
Subject: [PATCH 2/3] Add transport and address to NVMeController info
---
blivet/devices/disk.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index b5e25939..796b5b03 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -730,7 +730,8 @@ class NVDIMMNamespaceDevice(DiskDevice):
return self._sector_size
-NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn"])
+NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn",
+ "transport", "transport_address"])
class NVMeNamespaceDevice(DiskDevice):
@@ -792,11 +793,15 @@ class NVMeNamespaceDevice(DiskDevice):
except GLib.GError as err:
log.debug("Failed to get controller info for %s: %s", cpath, str(err))
continue
+ ctrans = util.get_sysfs_attr(controller, "transport")
+ ctaddr = util.get_sysfs_attr(controller, "address")
self._controllers.append(NVMeController(name=os.path.basename(cpath),
serial=cinfo.serial_number,
nvme_ver=cinfo.nvme_ver,
id=cinfo.ctrl_id,
- subsysnqn=cinfo.subsysnqn))
+ subsysnqn=cinfo.subsysnqn,
+ transport=ctrans,
+ transport_address=ctaddr))
return self._controllers
--
2.38.1
From a04538936ff62958c272b5e2b2657d177df1ef13 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 8 Dec 2022 13:15:33 +0100
Subject: [PATCH 3/3] Add additional identifiers to NVMeNamespaceDevice
---
blivet/devices/disk.py | 2 ++
blivet/populator/helpers/disk.py | 3 +++
2 files changed, 5 insertions(+)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 796b5b03..8842b4dc 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -756,6 +756,8 @@ class NVMeNamespaceDevice(DiskDevice):
:type nsid: int
"""
self.nsid = kwargs.pop("nsid", 0)
+ self.eui64 = kwargs.pop("eui64", "")
+ self.nguid = kwargs.pop("nguid", "")
DiskDevice.__init__(self, device, **kwargs)
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 9ed1eebe..cf20d302 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -282,6 +282,9 @@ class NVMeNamespaceDevicePopulator(DiskDevicePopulator):
log.debug("Failed to get namespace info for %s: %s", path, str(err))
else:
kwargs["nsid"] = ninfo.nsid
+ kwargs["uuid"] = ninfo.uuid
+ kwargs["eui64"] = ninfo.eui64
+ kwargs["nguid"] = ninfo.nguid
log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
return kwargs
--
2.38.1

View File

@ -1,231 +0,0 @@
From 8dbb92d692db9cbfbca0c82a1ed10a0492208534 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 4 Aug 2021 13:00:53 +0200
Subject: [PATCH 1/3] edd_test: Locate the edd_data based on the test file
location
We can't use the blivet.edd module location when running tests
against installed version of blivet.
---
tests/devicelibs_test/edd_test.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/tests/devicelibs_test/edd_test.py b/tests/devicelibs_test/edd_test.py
index 21bbcffc4..0d0824e6b 100644
--- a/tests/devicelibs_test/edd_test.py
+++ b/tests/devicelibs_test/edd_test.py
@@ -5,7 +5,6 @@
import unittest
import os
-import inspect
import logging
import copy
@@ -114,9 +113,9 @@ def root(self, name):
name = name[:-1]
if name.startswith("/"):
name = name[1:]
- dirname = os.path.dirname(inspect.getfile(edd))
+ dirname = os.path.abspath(os.path.dirname(__file__))
return os.path.join(dirname,
- "../../tests/devicelibs_test/edd_data/",
+ "edd_data/",
name)
def edd_debug(self, *args):
From 924bc805f692b0f050a8a8b8187769f36aea059f Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 4 Aug 2021 13:02:08 +0200
Subject: [PATCH 2/3] tests: Allow running tests without the tests directory in
PYTHONPATH
When running the tests against installed version of blivet, the
"tests" directory is not in PYTHONPATH so we need to import all
helper modules using relative path.
---
tests/action_test.py | 2 +-
tests/devicelibs_test/edd_test.py | 2 +-
tests/{ => devicelibs_test}/lib.py | 0
tests/formats_test/fs_test.py | 2 +-
tests/formats_test/fslabeling.py | 2 +-
tests/formats_test/fstesting.py | 2 +-
tests/formats_test/fsuuid.py | 2 +-
tests/formats_test/labeling_test.py | 2 +-
tests/{ => formats_test}/loopbackedtestcase.py | 0
tests/formats_test/luks_test.py | 2 +-
tests/formats_test/lvmpv_test.py | 2 +-
tests/partitioning_test.py | 2 +-
12 files changed, 10 insertions(+), 10 deletions(-)
rename tests/{ => devicelibs_test}/lib.py (100%)
rename tests/{ => formats_test}/loopbackedtestcase.py (100%)
diff --git a/tests/action_test.py b/tests/action_test.py
index f60cf5d7e..8509ce35a 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -5,7 +5,7 @@
except ImportError:
from mock import Mock
-from tests.storagetestcase import StorageTestCase
+from storagetestcase import StorageTestCase
import blivet
from blivet.formats import get_format
from blivet.size import Size
diff --git a/tests/devicelibs_test/edd_test.py b/tests/devicelibs_test/edd_test.py
index 0d0824e6b..0db1fd16e 100644
--- a/tests/devicelibs_test/edd_test.py
+++ b/tests/devicelibs_test/edd_test.py
@@ -10,7 +10,7 @@
from blivet import arch
from blivet.devicelibs import edd
-from tests import lib
+from . import lib
class FakeDevice(object):
diff --git a/tests/lib.py b/tests/devicelibs_test/lib.py
similarity index 100%
rename from tests/lib.py
rename to tests/devicelibs_test/lib.py
diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py
index ab3499a76..bd6433707 100644
--- a/tests/formats_test/fs_test.py
+++ b/tests/formats_test/fs_test.py
@@ -10,7 +10,7 @@
from blivet.formats import get_format
from blivet.devices import PartitionDevice, DiskDevice
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from . import fstesting
diff --git a/tests/formats_test/fslabeling.py b/tests/formats_test/fslabeling.py
index fbb28eee7..0e0dc2612 100644
--- a/tests/formats_test/fslabeling.py
+++ b/tests/formats_test/fslabeling.py
@@ -2,7 +2,7 @@
import abc
import six
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from blivet.errors import FSError, FSReadLabelError
from blivet.size import Size
diff --git a/tests/formats_test/fstesting.py b/tests/formats_test/fstesting.py
index 86b2a1168..e34584d88 100644
--- a/tests/formats_test/fstesting.py
+++ b/tests/formats_test/fstesting.py
@@ -5,7 +5,7 @@
import os
import tempfile
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from blivet.errors import FSError, FSResizeError, DeviceFormatError
from blivet.size import Size, ROUND_DOWN
from blivet.formats import fs
diff --git a/tests/formats_test/fsuuid.py b/tests/formats_test/fsuuid.py
index c80039457..16aa19a66 100644
--- a/tests/formats_test/fsuuid.py
+++ b/tests/formats_test/fsuuid.py
@@ -3,7 +3,7 @@
import six
from unittest import skipIf
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from blivet.errors import FSError, FSWriteUUIDError
from blivet.size import Size
from blivet.util import capture_output
diff --git a/tests/formats_test/labeling_test.py b/tests/formats_test/labeling_test.py
index e26cb7df1..d24e66191 100644
--- a/tests/formats_test/labeling_test.py
+++ b/tests/formats_test/labeling_test.py
@@ -1,10 +1,10 @@
import unittest
-from tests import loopbackedtestcase
from blivet.formats import device_formats
import blivet.formats.fs as fs
import blivet.formats.swap as swap
+from . import loopbackedtestcase
from . import fslabeling
diff --git a/tests/loopbackedtestcase.py b/tests/formats_test/loopbackedtestcase.py
similarity index 100%
rename from tests/loopbackedtestcase.py
rename to tests/formats_test/loopbackedtestcase.py
diff --git a/tests/formats_test/luks_test.py b/tests/formats_test/luks_test.py
index 1edbdcb28..5f25f0988 100644
--- a/tests/formats_test/luks_test.py
+++ b/tests/formats_test/luks_test.py
@@ -9,7 +9,7 @@
from blivet.size import Size
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
class LUKSTestCase(loopbackedtestcase.LoopBackedTestCase):
diff --git a/tests/formats_test/lvmpv_test.py b/tests/formats_test/lvmpv_test.py
index cbd2c4195..cdc33ec4d 100644
--- a/tests/formats_test/lvmpv_test.py
+++ b/tests/formats_test/lvmpv_test.py
@@ -2,7 +2,7 @@
from blivet.size import Size
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
class LVMPVTestCase(loopbackedtestcase.LoopBackedTestCase):
diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py
index 9b27f0c0d..e7b7aa375 100644
--- a/tests/partitioning_test.py
+++ b/tests/partitioning_test.py
@@ -31,7 +31,7 @@
from blivet.errors import PartitioningError
-from tests.imagebackedtestcase import ImageBackedTestCase
+from imagebackedtestcase import ImageBackedTestCase
from blivet.blivet import Blivet
from blivet.util import sparsetmpfile
from blivet.formats import get_format
From 99385bd67ac944c43bc77f4b5465c672203e2679 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 6 Aug 2021 14:51:01 +0200
Subject: [PATCH 3/3] tests: Print version and blivet location when running
tests
---
tests/run_tests.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 32e3f2d3a..8ad8b61ad 100644
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -32,6 +32,11 @@
testdir = os.path.abspath(os.path.dirname(__file__))
+ import blivet
+ print("Running tests with Blivet %s from %s" % (blivet.__version__,
+ os.path.abspath(os.path.dirname(blivet.__file__))),
+ file=sys.stderr)
+
if args.testname:
for n in args.testname:
suite.addTests(unittest.TestLoader().loadTestsFromName(n))

View File

@ -0,0 +1,57 @@
From 2aba050e74dc5df483da022dcf436b101c7a4301 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 11 Jan 2023 14:59:24 +0100
Subject: [PATCH] Default to encryption sector size 512 for LUKS devices
We are currently letting cryptsetup decide the optimal encryption
sector size for LUKS. The problem is that for disks with physical
sector size 4096 cryptsetup will default to 4096 encryption sector
size even if the drive logical sector size is 512 which means
these disks cannot be combined with other 512 logical sector size
disks in LVM. This requires a more sophisticated solution in the
future, but for now just default to 512 if not specified by the
user otherwise.
Resolves: rhbz#2103800
---
blivet/formats/luks.py | 10 +++++++---
tests/unit_tests/formats_tests/luks_test.py | 2 +-
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/blivet/formats/luks.py b/blivet/formats/luks.py
index 8de4911f..2637e0c5 100644
--- a/blivet/formats/luks.py
+++ b/blivet/formats/luks.py
@@ -166,9 +166,13 @@ class LUKS(DeviceFormat):
if self.pbkdf_args.type == "pbkdf2" and self.pbkdf_args.max_memory_kb:
log.warning("Memory limit is not used for pbkdf2 and it will be ignored.")
- self.luks_sector_size = kwargs.get("luks_sector_size") or 0
- if self.luks_sector_size and self.luks_version != "luks2":
- raise ValueError("Sector size argument is valid only for LUKS version 2.")
+ self.luks_sector_size = kwargs.get("luks_sector_size")
+ if self.luks_version == "luks2":
+ if self.luks_sector_size is None:
+ self.luks_sector_size = 512 # XXX we don't want cryptsetup choose automatically here so fallback to 512
+ else:
+ if self.luks_sector_size:
+ raise ValueError("Sector size argument is valid only for LUKS version 2.")
def __repr__(self):
s = DeviceFormat.__repr__(self)
diff --git a/tests/unit_tests/formats_tests/luks_test.py b/tests/unit_tests/formats_tests/luks_test.py
index 5ae6acfe..ec7b7592 100644
--- a/tests/unit_tests/formats_tests/luks_test.py
+++ b/tests/unit_tests/formats_tests/luks_test.py
@@ -53,7 +53,7 @@ class LUKSNodevTestCase(unittest.TestCase):
def test_sector_size(self):
fmt = LUKS()
- self.assertEqual(fmt.luks_sector_size, 0)
+ self.assertEqual(fmt.luks_sector_size, 512)
with self.assertRaises(ValueError):
fmt = LUKS(luks_version="luks1", luks_sector_size=4096)
--
2.39.0

View File

@ -1,68 +0,0 @@
From a977e8389a09615615dc76dee8aaaea1cc0ac54b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 22 Jul 2021 09:26:54 +0200
Subject: [PATCH 1/2] Tell LVM to ignore the new devices file for now
We currently don't support working with the devices file and it's
not possible to use lvm.conf filters together with the devices
file so we need to tell LVM to ignore it until we can support it.
---
blivet/devicelibs/lvm.py | 4 ++++
blivet/tasks/availability.py | 1 +
2 files changed, 5 insertions(+)
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index d56a76edc..989ecccaf 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -91,6 +91,10 @@ def _set_global_config():
if filter_string:
devices_string += " %s" % filter_string
+ # for now ignore the LVM devices file and rely on our filters
+ if availability.LVMDEVICES.available:
+ devices_string += " use_devicesfile=0"
+
# devices_string can have (inside the brackets) "dir", "scan",
# "preferred_names", "filter", "cache_dir", "write_cache_state",
# "types", "sysfs_scan", "md_component_detection". see man lvm.conf.
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index 1fd805901..5d3e295da 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -451,6 +451,7 @@ def available_resource(name):
HFORMAT_APP = application("hformat")
JFSTUNE_APP = application("jfs_tune")
KPARTX_APP = application("kpartx")
+LVMDEVICES = application("lvmdevices")
MKDOSFS_APP = application("mkdosfs")
MKDOSFS_NEW_APP = application_by_version("mkdosfs", DOSFSTOOLS_VERSION)
MKE2FS_APP = application_by_version("mke2fs", E2FSPROGS_VERSION)
From 08c137b5c98b24a9ba3df21f04cd20120c61198c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 22 Jul 2021 11:33:46 +0200
Subject: [PATCH 2/2] Make sure LVM config is updated before running pvcreate
Our internal "global" LVM config is set during reset() but we have
test cases that run pvcreate without running reset() first so we
need to make sure to update the global config before running it
because the config string now also controls whether the new LVM
devices file will be used or not.
---
blivet/formats/lvmpv.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index e4182adb2..ea84e9e4b 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -125,6 +125,8 @@ def _create(self, **kwargs):
log_method_call(self, device=self.device,
type=self.type, status=self.status)
+ lvm._set_global_config()
+
ea_yes = blockdev.ExtraArg.new("-y", "")
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])

View File

@ -1,65 +0,0 @@
From 46335861073882b7162221fc0995dc1df3c67749 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 6 Aug 2021 16:37:51 +0200
Subject: [PATCH] Improve error message printed for missing dependecies
The existing error message can be confusing for people that don't
know internals of blivet and libblockdev and the information what
is actually broken or not installed on the system is missing
completely. Example for LVM VDO with missing kvdo module:
Before:
device type lvmvdopool requires unavailable_dependencies:
libblockdev lvm plugin (vdo technology)
After:
device type lvmvdopool requires unavailable_dependencies:
libblockdev lvm plugin (vdo technology):
libblockdev plugin lvm is loaded but some required technologies
are not available (BD_LVM_TECH_VDO: Kernel module 'kvdo' not
available)
---
blivet/deviceaction.py | 2 +-
blivet/tasks/availability.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index 56e29215..0458e4be 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -173,7 +173,7 @@ class DeviceAction(util.ObjectID):
def _check_device_dependencies(self):
unavailable_dependencies = self.device.unavailable_dependencies
if unavailable_dependencies:
- dependencies_str = ", ".join(str(d) for d in unavailable_dependencies)
+ dependencies_str = ", ".join("%s:\n%s" % (str(d), ", ".join(d.availability_errors)) for d in unavailable_dependencies)
raise DependencyError("device type %s requires unavailable_dependencies: %s" % (self.device.type, dependencies_str))
def apply(self):
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index 1fd80590..1537f3f5 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -224,7 +224,7 @@ class BlockDevMethod(Method):
try:
self._tech_info.check_fn(tech, mode)
except GLib.GError as e:
- errors.append(str(e))
+ errors.append("%s: %s" % (tech.value_name, e.message))
return errors
def availability_errors(self, resource):
@@ -242,7 +242,7 @@ class BlockDevMethod(Method):
tech_missing = self._check_technologies()
if tech_missing:
return ["libblockdev plugin %s is loaded but some required "
- "technologies are not available:\n%s" % (self._tech_info.plugin_name, tech_missing)]
+ "technologies are not available (%s)" % (self._tech_info.plugin_name, "; ".join(tech_missing))]
else:
return []
--
2.31.1

View File

@ -1,90 +0,0 @@
From 06cafbbbbff0aae3634eb2908d25d0dc46c2048b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 9 Nov 2021 15:52:48 +0100
Subject: [PATCH] Use bigger chunk size for thinpools bigger than ~15.88 TiB
With our default chunk size of 64 KiB we cannot create bigger
thin pools than 15.88 TiB. Unfortunately we need to specify chunk
size to be able to calculate thin metadata properly so we can't
simply leave this to LVM to determine the correct chunk size.
---
blivet/devicelibs/lvm.py | 11 +++++++++++
blivet/devices/lvm.py | 6 +++---
tests/devices_test/lvm_test.py | 11 +++++++++++
3 files changed, 25 insertions(+), 3 deletions(-)
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index d56a76ed..cb6f655e 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -20,6 +20,7 @@
# Author(s): Dave Lehman <dlehman@redhat.com>
#
+import math
import os
import re
@@ -51,6 +52,7 @@ LVM_THINP_MIN_METADATA_SIZE = Size("2 MiB")
LVM_THINP_MAX_METADATA_SIZE = Size("16 GiB")
LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
+LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B") # 15.88 TiB
raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
@@ -225,3 +227,12 @@ def is_lvm_name_valid(name):
return False
return True
+
+
+def recommend_thpool_chunk_size(thpool_size):
+ # calculation of the recommended chunk size by LVM is so complicated that we
+ # can't really replicate it, but we know that 64 KiB chunk size gives us
+ # upper limit of ~15.88 TiB so we will just add 64 KiB to the chunk size
+ # for every ~15.88 TiB of thinpool data size
+ return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
+ LVM_THINP_MAX_CHUNK_SIZE)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 51d785d9..c61eeb4b 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1634,9 +1634,9 @@ class LVMThinPoolMixin(object):
return
# we need to know chunk size to calculate recommended metadata size
- if self._chunk_size == 0:
- self._chunk_size = Size(blockdev.LVM_DEFAULT_CHUNK_SIZE)
- log.debug("Using default chunk size: %s", self._chunk_size)
+ if self._chunk_size == 0 or enforced:
+ self._chunk_size = lvm.recommend_thpool_chunk_size(self._size)
+ log.debug("Using recommended chunk size: %s", self._chunk_size)
old_md_size = self._metadata_size
old_pmspare_size = self.vg.pmspare_size
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 4156d0bf..336c5b99 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -442,6 +442,17 @@ class LVMDeviceTest(unittest.TestCase):
self.assertFalse(pool.exists)
self.assertTrue(lvm.lvremove.called)
+ def test_lvmthinpool_chunk_size(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("100 TiB"))
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv])
+ pool = LVMLogicalVolumeDevice("pool1", parents=[vg], size=Size("500 MiB"), seg_type="thin-pool")
+ self.assertEqual(pool.chunk_size, Size("64 KiB"))
+
+ pool.size = Size("16 TiB")
+ pool.autoset_md_size(enforced=True)
+ self.assertEqual(pool.chunk_size, Size("128 KiB"))
+
class TypeSpecificCallsTest(unittest.TestCase):
def test_type_specific_calls(self):
--
2.31.1

View File

@ -1,774 +0,0 @@
From 43c5a6ef094e5f333a6dd47c467a1516488e2097 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 24 May 2021 13:35:39 +0200
Subject: [PATCH 1/7] Remove action device from LVM reject list
Because the device doesn't depend on itself the existing code
won't remove the device we are trying to modify from the list.
Resolves: rhbz#1955942
---
blivet/actionlist.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/blivet/actionlist.py b/blivet/actionlist.py
index d03e32b9..2de3fed3 100644
--- a/blivet/actionlist.py
+++ b/blivet/actionlist.py
@@ -260,6 +260,7 @@ class ActionList(object):
log.debug("action: %s", action)
# Remove lvm filters for devices we are operating on
+ lvm.lvm_cc_removeFilterRejectRegexp(action.device.name)
for device in (d for d in devices if d.depends_on(action.device)):
lvm.lvm_cc_removeFilterRejectRegexp(device.name)
--
2.31.1
From 2db8aa0aa6ea03c182f7e8e08cd1371ded13b71c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 24 May 2021 14:49:12 +0200
Subject: [PATCH 2/7] Convert LVM filter lists to sets
To prevent devices being added multiple times and removed only
once.
Related: rhbz#1955942
---
blivet/devicelibs/lvm.py | 12 ++++++------
tests/devicetree_test.py | 6 +++---
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index 121797ce..9e396cca 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -72,8 +72,8 @@ safe_name_characters = "0-9a-zA-Z._-"
# Theoretically we can handle all that can be handled with the LVM --config
# argument. For every time we call an lvm_cc (lvm compose config) funciton
# we regenerate the config_args with all global info.
-config_args_data = {"filterRejects": [], # regular expressions to reject.
- "filterAccepts": []} # regexp to accept
+config_args_data = {"filterRejects": set(), # regular expressions to reject.
+ "filterAccepts": set()} # regexp to accept
def _set_global_config():
@@ -125,7 +125,7 @@ def needs_config_refresh(fn):
def lvm_cc_addFilterRejectRegexp(regexp):
""" Add a regular expression to the --config string."""
log.debug("lvm filter: adding %s to the reject list", regexp)
- config_args_data["filterRejects"].append(regexp)
+ config_args_data["filterRejects"].add(regexp)
@needs_config_refresh
@@ -134,15 +134,15 @@ def lvm_cc_removeFilterRejectRegexp(regexp):
log.debug("lvm filter: removing %s from the reject list", regexp)
try:
config_args_data["filterRejects"].remove(regexp)
- except ValueError:
+ except KeyError:
log.debug("%s wasn't in the reject list", regexp)
return
@needs_config_refresh
def lvm_cc_resetFilter():
- config_args_data["filterRejects"] = []
- config_args_data["filterAccepts"] = []
+ config_args_data["filterRejects"] = set()
+ config_args_data["filterAccepts"] = set()
def determine_parent_lv(internal_lv, lvs, lv_info):
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index d1f4d8f3..ef163c0a 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -125,7 +125,7 @@ class DeviceTreeTestCase(unittest.TestCase):
dt.actions._actions.append(Mock(name="fake action"))
lvm.lvm_cc_addFilterRejectRegexp("xxx")
- lvm.config_args_data["filterAccepts"].append("yyy")
+ lvm.config_args_data["filterAccepts"].add("yyy")
dt.ignored_disks.append(names[0])
dt.exclusive_disks.append(names[1])
@@ -144,8 +144,8 @@ class DeviceTreeTestCase(unittest.TestCase):
self.assertEqual(dt._hidden, empty_list)
- self.assertEqual(lvm.config_args_data["filterAccepts"], empty_list)
- self.assertEqual(lvm.config_args_data["filterRejects"], empty_list)
+ self.assertEqual(lvm.config_args_data["filterAccepts"], set())
+ self.assertEqual(lvm.config_args_data["filterRejects"], set())
self.assertEqual(dt.exclusive_disks, empty_list)
self.assertEqual(dt.ignored_disks, empty_list)
--
2.31.1
From e2540422945586ca45848a663e391a91b2fdd714 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 27 Jul 2021 14:07:05 +0200
Subject: [PATCH 3/7] Switch LVM devices filter from "reject" to "accept" by
default
We currently use the LVM reject filter to filter out hidden and
ignored devices, this commit changes the behaviour to reject all
devices by default and accept only physical volumes that are not
hidden or ignored. This is preparation for the switch from the
existing lvm.conf based filtering to the new devices file based
filtering introduced in LVM 2.03.12 which allows only listing
"accepted" devices. This allows us to support both the "old" and
"new" style filtering using the same code.
---
blivet/actionlist.py | 5 +--
blivet/devicelibs/lvm.py | 62 +++++++++++----------------
blivet/devices/lvm.py | 4 +-
blivet/devicetree.py | 8 ++--
blivet/formats/lvmpv.py | 2 +
blivet/populator/helpers/lvm.py | 6 +++
blivet/populator/helpers/partition.py | 8 ----
blivet/populator/populator.py | 4 +-
tests/devicetree_test.py | 37 ++++++++++++++--
tests/populator_test.py | 6 ++-
10 files changed, 81 insertions(+), 61 deletions(-)
diff --git a/blivet/actionlist.py b/blivet/actionlist.py
index 2de3fed3..f3977401 100644
--- a/blivet/actionlist.py
+++ b/blivet/actionlist.py
@@ -259,10 +259,9 @@ class ActionList(object):
for action in self._actions:
log.debug("action: %s", action)
- # Remove lvm filters for devices we are operating on
- lvm.lvm_cc_removeFilterRejectRegexp(action.device.name)
for device in (d for d in devices if d.depends_on(action.device)):
- lvm.lvm_cc_removeFilterRejectRegexp(device.name)
+ if device.format.type == "lvmpv":
+ lvm.lvm_devices_add(device.path)
def _post_process(self, devices=None):
""" Clean up relics from action queue execution. """
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index 9e396cca..96d037b8 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -67,40 +67,29 @@ LVMETAD_SOCKET_PATH = "/run/lvm/lvmetad.socket"
safe_name_characters = "0-9a-zA-Z._-"
-# Start config_args handling code
-#
-# Theoretically we can handle all that can be handled with the LVM --config
-# argument. For every time we call an lvm_cc (lvm compose config) funciton
-# we regenerate the config_args with all global info.
-config_args_data = {"filterRejects": set(), # regular expressions to reject.
- "filterAccepts": set()} # regexp to accept
+# list of devices that LVM is allowed to use
+# with LVM >= 2.0.13 we'll use this for the --devices option and when creating
+# the /etc/lvm/devices/system.devices file
+# with older versions of LVM we will use this for the --config based filtering
+_lvm_devices = set()
def _set_global_config():
"""lvm command accepts lvm.conf type arguments preceded by --config. """
- filter_string = ""
- rejects = config_args_data["filterRejects"]
- for reject in rejects:
- filter_string += ("\"r|/%s$|\"," % reject)
+ device_string = ""
+
+ # now explicitly "accept" all LVM devices
+ for device in _lvm_devices:
+ device_string += "\"a|%s$|\"," % device
- if filter_string:
- filter_string = "filter=[%s]" % filter_string.strip(",")
+ # now add all devices to the "reject" filter
+ device_string += "\"r|.*|\""
- # XXX consider making /tmp/blivet.lvm.XXXXX, writing an lvm.conf there, and
- # setting LVM_SYSTEM_DIR
- devices_string = 'preferred_names=["^/dev/mapper/", "^/dev/md/", "^/dev/sd"]'
- if filter_string:
- devices_string += " %s" % filter_string
+ filter_string = "filter=[%s]" % device_string
- # for now ignore the LVM devices file and rely on our filters
- if availability.LVMDEVICES.available:
- devices_string += " use_devicesfile=0"
+ config_string = " devices { %s } " % filter_string
- # devices_string can have (inside the brackets) "dir", "scan",
- # "preferred_names", "filter", "cache_dir", "write_cache_state",
- # "types", "sysfs_scan", "md_component_detection". see man lvm.conf.
- config_string = " devices { %s } " % (devices_string) # strings can be added
if not flags.lvm_metadata_backup:
config_string += "backup {backup=0 archive=0} "
if flags.debug:
@@ -122,27 +111,26 @@ def needs_config_refresh(fn):
@needs_config_refresh
-def lvm_cc_addFilterRejectRegexp(regexp):
- """ Add a regular expression to the --config string."""
- log.debug("lvm filter: adding %s to the reject list", regexp)
- config_args_data["filterRejects"].add(regexp)
+def lvm_devices_add(path):
+ """ Add a device (PV) to the list of devices LVM is allowed to use """
+ log.debug("lvm filter: device %s added to the list of allowed devices")
+ _lvm_devices.add(path)
@needs_config_refresh
-def lvm_cc_removeFilterRejectRegexp(regexp):
- """ Remove a regular expression from the --config string."""
- log.debug("lvm filter: removing %s from the reject list", regexp)
+def lvm_devices_remove(path):
+ """ Remove a device (PV) to the list of devices LVM is allowed to use """
+ log.debug("lvm filter: device %s removed from the list of allowed devices")
try:
- config_args_data["filterRejects"].remove(regexp)
+ _lvm_devices.remove(path)
except KeyError:
- log.debug("%s wasn't in the reject list", regexp)
+ log.debug("%s wasn't in the devices list", path)
return
@needs_config_refresh
-def lvm_cc_resetFilter():
- config_args_data["filterRejects"] = set()
- config_args_data["filterAccepts"] = set()
+def lvm_devices_reset():
+ _lvm_devices.clear()
def determine_parent_lv(internal_lv, lvs, lv_info):
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index c61eeb4b..9c230f1b 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -273,8 +273,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
log_method_call(self, self.name, status=self.status)
if not self.complete:
for pv in self.pvs:
- # Remove the PVs from the ignore filter so we can wipe them.
- lvm.lvm_cc_removeFilterRejectRegexp(pv.name)
+ # add PVS to the list of LVM devices so we can wipe them.
+ lvm.lvm_devices_add(pv.path)
# Don't run vgremove or vgreduce since there may be another VG with
# the same name that we want to keep/use.
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index f4ae1968..c6c1b440 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -96,7 +96,7 @@ class DeviceTreeBase(object):
self._hidden = []
- lvm.lvm_cc_resetFilter()
+ lvm.lvm_devices_reset()
self.exclusive_disks = exclusive_disks or []
self.ignored_disks = ignored_disks or []
@@ -879,7 +879,8 @@ class DeviceTreeBase(object):
self._remove_device(device, force=True, modparent=False)
self._hidden.append(device)
- lvm.lvm_cc_addFilterRejectRegexp(device.name)
+ if device.format.type == "lvmpv":
+ lvm.lvm_devices_remove(device.path)
def unhide(self, device):
""" Restore a device's visibility.
@@ -905,7 +906,8 @@ class DeviceTreeBase(object):
self._hidden.remove(hidden)
self._devices.append(hidden)
hidden.add_hook(new=False)
- lvm.lvm_cc_removeFilterRejectRegexp(hidden.name)
+ if hidden.format.type == "lvmpv":
+ lvm.lvm_devices_add(hidden.path)
def expand_taglist(self, taglist):
""" Expands tags in input list into devices.
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index ea84e9e4..3b00951f 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -124,6 +124,7 @@ class LVMPhysicalVolume(DeviceFormat):
def _create(self, **kwargs):
log_method_call(self, device=self.device,
type=self.type, status=self.status)
+ lvm.lvm_devices_add(self.device)
lvm._set_global_config()
@@ -138,6 +139,7 @@ class LVMPhysicalVolume(DeviceFormat):
except blockdev.LVMError:
DeviceFormat._destroy(self, **kwargs)
finally:
+ lvm.lvm_devices_remove(self.device)
udev.settle()
@property
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index c7adfa4e..9e7e4630 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -87,6 +87,12 @@ class LVMFormatPopulator(FormatPopulator):
def _get_kwargs(self):
kwargs = super(LVMFormatPopulator, self)._get_kwargs()
+ # new PV, add it to the LVM devices list and re-run pvs/lvs/vgs
+ lvm.lvm_devices_add(self.device.path)
+ pvs_info.drop_cache()
+ vgs_info.drop_cache()
+ lvs_info.drop_cache()
+
pv_info = pvs_info.cache.get(self.device.path, None)
name = udev.device_get_name(self.data)
diff --git a/blivet/populator/helpers/partition.py b/blivet/populator/helpers/partition.py
index f00323d1..8659bd48 100644
--- a/blivet/populator/helpers/partition.py
+++ b/blivet/populator/helpers/partition.py
@@ -24,7 +24,6 @@ import copy
import six
from ... import udev
-from ...devicelibs import lvm
from ...devices import PartitionDevice
from ...errors import DeviceError
from ...formats import get_format
@@ -66,7 +65,6 @@ class PartitionDevicePopulator(DevicePopulator):
if disk is None:
# if the disk is still not in the tree something has gone wrong
log.error("failure finding disk for %s", name)
- lvm.lvm_cc_addFilterRejectRegexp(name)
return
if not disk.partitioned or not disk.format.supported:
@@ -78,12 +76,6 @@ class PartitionDevicePopulator(DevicePopulator):
# and instantiate a PartitionDevice so our view of the layout is
# complete.
if not disk.partitionable or disk.format.type == "iso9660" or disk.format.hidden:
- # there's no need to filter partitions on members of multipaths or
- # fwraid members from lvm since multipath and dmraid are already
- # active and lvm should therefore know to ignore them
- if not disk.format.hidden:
- lvm.lvm_cc_addFilterRejectRegexp(name)
-
log.debug("ignoring partition %s on %s", name, disk.format.type)
return
diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py
index 75bb1741..958593ec 100644
--- a/blivet/populator/populator.py
+++ b/blivet/populator/populator.py
@@ -317,10 +317,10 @@ class PopulatorMixin(object):
continue
# Make sure lvm doesn't get confused by PVs that belong to
- # incomplete VGs. We will remove the PVs from the reject list when/if
+ # incomplete VGs. We will add the PVs to the accept list when/if
# the time comes to remove the incomplete VG and its PVs.
for pv in vg.pvs:
- lvm.lvm_cc_addFilterRejectRegexp(pv.name)
+ lvm.lvm_devices_remove(pv.path)
def set_disk_images(self, images):
""" Set the disk images and reflect them in exclusive_disks.
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index ef163c0a..3be4d572 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -124,8 +124,7 @@ class DeviceTreeTestCase(unittest.TestCase):
dt.actions._actions.append(Mock(name="fake action"))
- lvm.lvm_cc_addFilterRejectRegexp("xxx")
- lvm.config_args_data["filterAccepts"].add("yyy")
+ lvm.lvm_devices_add("xxx")
dt.ignored_disks.append(names[0])
dt.exclusive_disks.append(names[1])
@@ -144,8 +143,7 @@ class DeviceTreeTestCase(unittest.TestCase):
self.assertEqual(dt._hidden, empty_list)
- self.assertEqual(lvm.config_args_data["filterAccepts"], set())
- self.assertEqual(lvm.config_args_data["filterRejects"], set())
+ self.assertEqual(lvm._lvm_devices, set())
self.assertEqual(dt.exclusive_disks, empty_list)
self.assertEqual(dt.ignored_disks, empty_list)
@@ -438,6 +436,37 @@ class DeviceTreeTestCase(unittest.TestCase):
self.assertEqual(tree.get_related_disks(sda), set([sda, sdb]))
self.assertEqual(tree.get_related_disks(sdb), set([sda, sdb]))
+ def test_lvm_filter_hide_unhide(self):
+ tree = DeviceTree()
+
+ sda = DiskDevice("sda", size=Size("30 GiB"))
+ sdb = DiskDevice("sdb", size=Size("30 GiB"))
+
+ tree._add_device(sda)
+ tree._add_device(sdb)
+
+ self.assertTrue(sda in tree.devices)
+ self.assertTrue(sdb in tree.devices)
+
+ sda.format = get_format("lvmpv", device=sda.path)
+ sdb.format = get_format("lvmpv", device=sdb.path)
+
+ # LVMPhysicalVolume._create would do this
+ lvm.lvm_devices_add(sda.path)
+ lvm.lvm_devices_add(sdb.path)
+
+ self.assertSetEqual(lvm._lvm_devices, {sda.path, sdb.path})
+
+ tree.hide(sda)
+ self.assertSetEqual(lvm._lvm_devices, {sdb.path})
+ tree.hide(sdb)
+ self.assertSetEqual(lvm._lvm_devices, set())
+
+ tree.unhide(sda)
+ self.assertSetEqual(lvm._lvm_devices, {sda.path})
+ tree.unhide(sdb)
+ self.assertSetEqual(lvm._lvm_devices, {sda.path, sdb.path})
+
class DeviceTreeIgnoredExclusiveMultipathTestCase(unittest.TestCase):
diff --git a/tests/populator_test.py b/tests/populator_test.py
index 2a8532f0..dd36c16a 100644
--- a/tests/populator_test.py
+++ b/tests/populator_test.py
@@ -13,6 +13,7 @@ from gi.repository import BlockDev as blockdev
from blivet.devices import DiskDevice, DMDevice, FileDevice, LoopDevice
from blivet.devices import MDRaidArrayDevice, MultipathDevice, OpticalDevice
from blivet.devices import PartitionDevice, StorageDevice, NVDIMMNamespaceDevice
+from blivet.devicelibs import lvm
from blivet.devicetree import DeviceTree
from blivet.formats import get_device_format_class, get_format, DeviceFormat
from blivet.formats.disklabel import DiskLabel
@@ -393,8 +394,7 @@ class PartitionDevicePopulatorTestCase(PopulatorHelperTestCase):
@patch.object(DiskLabel, "parted_disk")
@patch.object(DiskLabel, "parted_device")
@patch.object(PartitionDevice, "probe")
- # TODO: fix the naming of the lvm filter functions
- @patch("blivet.devicelibs.lvm.lvm_cc_addFilterRejectRegexp")
+ @patch("blivet.devicelibs.lvm.lvm_devices_add")
@patch("blivet.udev.device_get_major", return_value=88)
@patch("blivet.udev.device_get_minor", return_value=19)
@patch.object(DeviceTree, "get_device_by_name")
@@ -973,6 +973,8 @@ class LVMFormatPopulatorTestCase(FormatPopulatorTestCase):
self.assertTrue(vg_device is not None)
devicetree._remove_device(vg_device)
+ self.assertIn(device.path, lvm._lvm_devices)
+
get_device_by_uuid.reset_mock()
# pv belongs to a valid vg not in the tree with two lvs
--
2.31.1
From 15a63b01bd2b6e7fe197fade849f28b83407c166 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 30 Jul 2021 14:01:04 +0200
Subject: [PATCH 4/7] Use LVM devices for filtering LVM devices with LVM >=
2.02.13
---
blivet/devicelibs/lvm.py | 38 +++++++++++++++++++++++++++++---------
tests/populator_test.py | 9 ++++-----
2 files changed, 33 insertions(+), 14 deletions(-)
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index 96d037b8..3ab1540b 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -67,6 +67,16 @@ LVMETAD_SOCKET_PATH = "/run/lvm/lvmetad.socket"
safe_name_characters = "0-9a-zA-Z._-"
+if hasattr(blockdev.LVMTech, "DEVICES"):
+ try:
+ blockdev.lvm.is_tech_avail(blockdev.LVMTech.DEVICES, 0) # pylint: disable=no-member
+ except blockdev.LVMError:
+ HAVE_LVMDEVICES = False
+ else:
+ HAVE_LVMDEVICES = True
+else:
+ HAVE_LVMDEVICES = False
+
# list of devices that LVM is allowed to use
# with LVM >= 2.0.13 we'll use this for the --devices option and when creating
# the /etc/lvm/devices/system.devices file
@@ -79,25 +89,34 @@ def _set_global_config():
device_string = ""
- # now explicitly "accept" all LVM devices
- for device in _lvm_devices:
- device_string += "\"a|%s$|\"," % device
+ if not HAVE_LVMDEVICES:
+ # now explicitly "accept" all LVM devices
+ for device in _lvm_devices:
+ device_string += "\"a|%s$|\"," % device
- # now add all devices to the "reject" filter
- device_string += "\"r|.*|\""
+ # now add all devices to the "reject" filter
+ device_string += "\"r|.*|\""
- filter_string = "filter=[%s]" % device_string
+ filter_string = "filter=[%s]" % device_string
- config_string = " devices { %s } " % filter_string
+ config_string = " devices { %s } " % filter_string
+ else:
+ config_string = " "
if not flags.lvm_metadata_backup:
config_string += "backup {backup=0 archive=0} "
- if flags.debug:
- config_string += "log {level=7 file=/tmp/lvm.log syslog=0}"
+ config_string += "log {level=7 file=/tmp/lvm.log syslog=0}"
blockdev.lvm.set_global_config(config_string)
+def _set_lvm_devices():
+ if not HAVE_LVMDEVICES:
+ return
+
+ blockdev.lvm.set_devices_filter(list(_lvm_devices))
+
+
def needs_config_refresh(fn):
if not availability.BLOCKDEV_LVM_PLUGIN.available:
return lambda *args, **kwargs: None
@@ -105,6 +124,7 @@ def needs_config_refresh(fn):
def fn_with_refresh(*args, **kwargs):
ret = fn(*args, **kwargs)
_set_global_config()
+ _set_lvm_devices()
return ret
return fn_with_refresh
diff --git a/tests/populator_test.py b/tests/populator_test.py
index dd36c16a..a9584319 100644
--- a/tests/populator_test.py
+++ b/tests/populator_test.py
@@ -897,6 +897,7 @@ class LVMFormatPopulatorTestCase(FormatPopulatorTestCase):
device = Mock()
device.parents = []
device.size = Size("10g")
+ device.path = "/dev/sda1"
devicetree._add_device(device)
# pylint: disable=attribute-defined-outside-init
@@ -924,15 +925,13 @@ class LVMFormatPopulatorTestCase(FormatPopulatorTestCase):
pv_info.pe_start = 0
pv_info.pv_free = 0
- device.path = sentinel.pv_path
-
vg_device = Mock()
vg_device.parents = []
vg_device.lvs = []
get_device_by_uuid.return_value = vg_device
with patch("blivet.static_data.lvm_info.PVsInfo.cache", new_callable=PropertyMock) as mock_pvs_cache:
- mock_pvs_cache.return_value = {sentinel.pv_path: pv_info}
+ mock_pvs_cache.return_value = {device.path: pv_info}
with patch("blivet.udev.device_get_format", return_value=self.udev_type):
helper = self.helper_class(devicetree, data, device)
self.assertFalse(device in vg_device.parents)
@@ -957,7 +956,7 @@ class LVMFormatPopulatorTestCase(FormatPopulatorTestCase):
pv_info.vg_pv_count = 1
with patch("blivet.static_data.lvm_info.PVsInfo.cache", new_callable=PropertyMock) as mock_pvs_cache:
- mock_pvs_cache.return_value = {sentinel.pv_path: pv_info}
+ mock_pvs_cache.return_value = {device.path: pv_info}
with patch("blivet.static_data.lvm_info.VGsInfo.cache", new_callable=PropertyMock) as mock_vgs_cache:
mock_vgs_cache.return_value = {pv_info.vg_uuid: Mock()}
with patch("blivet.udev.device_get_format", return_value=self.udev_type):
@@ -1007,7 +1006,7 @@ class LVMFormatPopulatorTestCase(FormatPopulatorTestCase):
get_device_by_uuid.side_effect = gdbu
with patch("blivet.static_data.lvm_info.PVsInfo.cache", new_callable=PropertyMock) as mock_pvs_cache:
- mock_pvs_cache.return_value = {sentinel.pv_path: pv_info}
+ mock_pvs_cache.return_value = {device.path: pv_info}
with patch("blivet.static_data.lvm_info.VGsInfo.cache", new_callable=PropertyMock) as mock_vgs_cache:
mock_vgs_cache.return_value = {pv_info.vg_uuid: Mock()}
with patch("blivet.static_data.lvm_info.LVsInfo.cache", new_callable=PropertyMock) as mock_lvs_cache:
--
2.31.1
From d4e1395de3691f30196b6b0e3b2c82e83b27afaf Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 30 Jul 2021 14:01:43 +0200
Subject: [PATCH 5/7] Make sure PVs are added/deleted to/from the LVM device
file
We are using the --devices option when running LVM commands which
mean the newly created PV won't be added to the device list by
pvcreate so we need to do that manually.
---
blivet/formats/lvmpv.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index 3b00951f..71ec699f 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -131,6 +131,9 @@ class LVMPhysicalVolume(DeviceFormat):
ea_yes = blockdev.ExtraArg.new("-y", "")
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
+ if lvm.HAVE_LVMDEVICES:
+ blockdev.lvm.devices_add(self.device)
+
def _destroy(self, **kwargs):
log_method_call(self, device=self.device,
type=self.type, status=self.status)
@@ -141,6 +144,8 @@ class LVMPhysicalVolume(DeviceFormat):
finally:
lvm.lvm_devices_remove(self.device)
udev.settle()
+ if lvm.HAVE_LVMDEVICES:
+ blockdev.lvm.devices_delete(self.device)
@property
def destroyable(self):
--
2.31.1
From c221d313bde21fb2cba701b93fe0c57336cba8ec Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 14 Oct 2021 15:32:24 +0200
Subject: [PATCH 6/7] Ignore errors for LVM devices file actions
The LVM devices file feature might be disabled either locally or
globally by LVM config.
---
blivet/formats/lvmpv.py | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index 71ec699f..b27213cc 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -132,7 +132,10 @@ class LVMPhysicalVolume(DeviceFormat):
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
if lvm.HAVE_LVMDEVICES:
- blockdev.lvm.devices_add(self.device)
+ try:
+ blockdev.lvm.devices_add(self.device)
+ except blockdev.LVMError as e:
+ log.debug("Failed to add newly created PV %s to the LVM devices file: %s", self.device, str(e))
def _destroy(self, **kwargs):
log_method_call(self, device=self.device,
@@ -145,7 +148,10 @@ class LVMPhysicalVolume(DeviceFormat):
lvm.lvm_devices_remove(self.device)
udev.settle()
if lvm.HAVE_LVMDEVICES:
- blockdev.lvm.devices_delete(self.device)
+ try:
+ blockdev.lvm.devices_delete(self.device)
+ except blockdev.LVMError as e:
+ log.debug("Failed to remove PV %s from the LVM devices file: %s", self.device, str(e))
@property
def destroyable(self):
--
2.31.1
From 6b96d4ead6890fffd95840b8935f71ecd9e310ef Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 19 Oct 2021 14:27:05 +0200
Subject: [PATCH 7/7] Add public functions to add/remove PV to/from the LVM
system.devices
Anaconda needs to be able to add preexisting PVs to the file
during installation.
---
blivet/formats/lvmpv.py | 28 ++++++++++++++++++++--------
1 file changed, 20 insertions(+), 8 deletions(-)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index b27213cc..3fef667e 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -121,6 +121,24 @@ class LVMPhysicalVolume(DeviceFormat):
def supported(self):
return super(LVMPhysicalVolume, self).supported and self._plugin.available
+ def lvmdevices_add(self):
+ if not lvm.HAVE_LVMDEVICES:
+ raise PhysicalVolumeError("LVM devices file feature is not supported")
+
+ try:
+ blockdev.lvm.devices_add(self.device)
+ except blockdev.LVMError as e:
+ log.debug("Failed to add PV %s to the LVM devices file: %s", self.device, str(e))
+
+ def lvmdevices_remove(self):
+ if not lvm.HAVE_LVMDEVICES:
+ raise PhysicalVolumeError("LVM devices file feature is not supported")
+
+ try:
+ blockdev.lvm.devices_delete(self.device)
+ except blockdev.LVMError as e:
+ log.debug("Failed to remove PV %s from the LVM devices file: %s", self.device, str(e))
+
def _create(self, **kwargs):
log_method_call(self, device=self.device,
type=self.type, status=self.status)
@@ -132,10 +150,7 @@ class LVMPhysicalVolume(DeviceFormat):
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
if lvm.HAVE_LVMDEVICES:
- try:
- blockdev.lvm.devices_add(self.device)
- except blockdev.LVMError as e:
- log.debug("Failed to add newly created PV %s to the LVM devices file: %s", self.device, str(e))
+ self.lvmdevices_add()
def _destroy(self, **kwargs):
log_method_call(self, device=self.device,
@@ -148,10 +163,7 @@ class LVMPhysicalVolume(DeviceFormat):
lvm.lvm_devices_remove(self.device)
udev.settle()
if lvm.HAVE_LVMDEVICES:
- try:
- blockdev.lvm.devices_delete(self.device)
- except blockdev.LVMError as e:
- log.debug("Failed to remove PV %s from the LVM devices file: %s", self.device, str(e))
+ self.lvmdevices_remove()
@property
def destroyable(self):
--
2.31.1

View File

@ -1,53 +0,0 @@
From 23b7573557e69d9b4dccdd6d52e82b82d8dba115 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 1 Dec 2021 16:28:15 +0100
Subject: [PATCH] iscsi: Replace all log_exception_info calls with log.info
We don't get any useful information from the exception, it's
always the same traceback from a failed DBus call and we only use
these when a called failed because firmware ISCSI is not supported.
The resulting log message also looks like a failure with the
traceback logged and not just as a debug information.
Resolves: rhbz#2028134
---
blivet/iscsi.py | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
index 5ee2082b..bc77ca62 100644
--- a/blivet/iscsi.py
+++ b/blivet/iscsi.py
@@ -22,7 +22,6 @@ from . import udev
from . import util
from .flags import flags
from .i18n import _
-from .storage_log import log_exception_info
from . import safe_dbus
import os
import re
@@ -277,8 +276,8 @@ class iSCSI(object):
'org.freedesktop.DBus.ObjectManager',
'GetManagedObjects',
None)[0]
- except safe_dbus.DBusCallError:
- log_exception_info(log.info, "iscsi: Failed to get active sessions.")
+ except safe_dbus.DBusCallError as e:
+ log.info("iscsi: Failed to get active sessions: %s", str(e))
return []
sessions = (obj for obj in objects.keys() if re.match(r'.*/iscsi/session[0-9]+$', obj))
@@ -302,8 +301,8 @@ class iSCSI(object):
args = GLib.Variant("(a{sv})", ([], ))
try:
found_nodes, _n_nodes = self._call_initiator_method("DiscoverFirmware", args)
- except safe_dbus.DBusCallError:
- log_exception_info(log.info, "iscsi: No IBFT info found.")
+ except safe_dbus.DBusCallError as e:
+ log.info("iscsi: No IBFT info found: %s", str(e))
# an exception here means there is no ibft firmware, just return
return
--
2.31.1

View File

@ -1,33 +0,0 @@
From 52019b19caaf383daa5f2f0437e0c9e262adb45e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 13 Dec 2021 14:18:12 +0100
Subject: [PATCH] Fix log message for the LVM devices filter
---
blivet/devicelibs/lvm.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index 3ab1540b..bbde6303 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -133,14 +133,14 @@ def needs_config_refresh(fn):
@needs_config_refresh
def lvm_devices_add(path):
""" Add a device (PV) to the list of devices LVM is allowed to use """
- log.debug("lvm filter: device %s added to the list of allowed devices")
+ log.debug("lvm filter: device %s added to the list of allowed devices", path)
_lvm_devices.add(path)
@needs_config_refresh
def lvm_devices_remove(path):
""" Remove a device (PV) to the list of devices LVM is allowed to use """
- log.debug("lvm filter: device %s removed from the list of allowed devices")
+ log.debug("lvm filter: device %s removed from the list of allowed devices", path)
try:
_lvm_devices.remove(path)
except KeyError:
--
2.34.1

View File

@ -1,81 +0,0 @@
From f02dbed9143664246d400b0f5654062dff5383fc Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 13 Jan 2022 16:53:30 +0100
Subject: [PATCH 1/2] Exclude unusable disks from PartitionFactory
We already remove disks that are too small or not partitionable
in the PartitionSetFactory which allows us to create partitions
on multipath devices where Anaconda tells us to use both the mpath
device and the backing disks, we should do the same for the
PartitionFactory.
Resolves: rhbz#2017432
---
blivet/devicefactory.py | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
index 0f7fdfa1..45b38b0f 100644
--- a/blivet/devicefactory.py
+++ b/blivet/devicefactory.py
@@ -1056,6 +1056,24 @@ class PartitionFactory(DeviceFactory):
**kwargs)
return device
+ def _configure(self):
+ disks = []
+ for disk in self.disks:
+ if not disk.partitioned:
+ log.debug("removing unpartitioned disk %s", disk.name)
+ elif not disk.format.supported:
+ log.debug("removing disk with unsupported format %s", disk.name)
+ else:
+ disks.append(disk)
+
+ if not disks:
+ raise DeviceFactoryError("no usable disks specified for partition")
+
+ log.debug("setting new factory disks to %s", [d.name for d in disks])
+ self.disks = disks # pylint: disable=attribute-defined-outside-init
+
+ super(PartitionFactory, self)._configure()
+
def _set_disks(self):
self.raw_device.req_disks = self.disks[:]
--
2.34.1
From a9adcb050a16ab8231c81ced68302d6ad685ccf4 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 13 Jan 2022 17:27:08 +0100
Subject: [PATCH 2/2] Show better error when using unitialized disk in
do_partitioning
Now all we get is "KeyError: '/dev/sda'" for example.
Related: rhbz#2017432
---
blivet/partitioning.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
index 53f9cc3f..23b150f9 100644
--- a/blivet/partitioning.py
+++ b/blivet/partitioning.py
@@ -764,7 +764,10 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
growth = 0 # in sectors
# loop through disks
for _disk in req_disks:
- disklabel = disklabels[_disk.path]
+ try:
+ disklabel = disklabels[_disk.path]
+ except KeyError:
+ raise PartitioningError("Requested disk %s doesn't have a usable disklabel for partitioning" % _disk.name)
best = None
current_free = free
try:
--
2.34.1

View File

@ -1,29 +0,0 @@
From dc6350f87a1dacdebdbb9cf0be43699bb5f7eadd Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 16 Aug 2021 09:50:34 +0200
Subject: [PATCH] Fix getting PV info in LVMPhysicalVolume from the cache
"self.device" is string for formats so accessing "self.device.path"
results in an AttributeError.
Resolves: rhbz#2079221
---
blivet/formats/lvmpv.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index 3fef667e..483b53a4 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -197,7 +197,7 @@ class LVMPhysicalVolume(DeviceFormat):
if self.exists:
# we don't have any actual value, but the PV exists and is
# active, we should try to determine it
- pv_info = pvs_info.cache.get(self.device.path)
+ pv_info = pvs_info.cache.get(self.device)
if pv_info is None:
log.error("Failed to get free space information for the PV '%s'", self.device)
self._free = Size(0)
--
2.34.3

View File

@ -1,41 +0,0 @@
From 72ace5d66b567baefde10ff9c4197054830067f1 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 28 Apr 2022 14:13:04 +0200
Subject: [PATCH] Do not crash when changing disklabel on disks with active
devices
The _find_active_devices_on_action_disks function originally
prevented from making any changes on disks with active devices
(active LVs, mounted partitions etc.) This was changed in
b72e957d2b23444824316331ae21d1c594371e9c and the check currently
prevents only reformatting the disklabel on such disks which
should be already impossible on disks with an existing partition.
This change for the 3.4 stable branch keeps the current behaviour
where the active devices are teared down when running in installer
mode to avoid potential issues with the installer.
Resolves: rhbz#2078803
---
blivet/actionlist.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/blivet/actionlist.py b/blivet/actionlist.py
index f3977401..9c06228b 100644
--- a/blivet/actionlist.py
+++ b/blivet/actionlist.py
@@ -211,9 +211,8 @@ class ActionList(object):
except StorageError as e:
log.info("teardown of %s failed: %s", device.name, e)
else:
- raise RuntimeError("partitions in use on disks with changes "
- "pending: %s" %
- ",".join(problematic))
+ log.debug("ignoring devices in use on disks with changes: %s",
+ ",".join(problematic))
log.info("resetting parted disks...")
for device in devices:
--
2.34.3

View File

@ -1,65 +0,0 @@
From 070b33c1a80e5740abd7878118a23eaaca1e3460 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 13 Apr 2022 15:43:45 +0200
Subject: [PATCH] ActionDestroyDevice should not obsolete ActionRemoveMember
If we want to remove a PV from a VG and then remove the PV device,
the ActionDestroyDevice must not obsolete the ActionRemoveMember
action. Eventhough we are going to remove the device, we still
need to call "vgreduce" first.
Resolves: rhbz#2076956
---
blivet/deviceaction.py | 10 +++++-----
tests/action_test.py | 7 +++++++
2 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index 0458e4be..78e113bf 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -463,8 +463,8 @@ class ActionDestroyDevice(DeviceAction):
- obsoletes all actions w/ lower id that act on the same device,
including self, if device does not exist
- - obsoletes all but ActionDestroyFormat actions w/ lower id on the
- same device if device exists
+ - obsoletes all but ActionDestroyFormat and ActionRemoveMember actions
+ w/ lower id on the same device if device exists
- obsoletes all actions that add a member to this action's
(container) device
@@ -474,9 +474,9 @@ class ActionDestroyDevice(DeviceAction):
if action.device.id == self.device.id:
if self.id >= action.id and not self.device.exists:
rc = True
- elif self.id > action.id and \
- self.device.exists and \
- not (action.is_destroy and action.is_format):
+ elif self.id > action.id and self.device.exists and \
+ not ((action.is_destroy and action.is_format) or
+ action.is_remove):
rc = True
elif action.is_add and (action.device == self.device):
rc = True
diff --git a/tests/action_test.py b/tests/action_test.py
index 8509ce35..626b9b49 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -1197,6 +1197,13 @@ class DeviceActionTestCase(StorageTestCase):
self.assertEqual(create_sdc2.requires(remove_sdc1), False)
self.assertEqual(remove_sdc1.requires(create_sdc2), False)
+ # destroy sdc1, the ActionRemoveMember should not be obsoleted
+ sdc1.exists = True
+ destroy_sdc1 = ActionDestroyDevice(sdc1)
+ destroy_sdc1.apply()
+ self.assertFalse(destroy_sdc1.obsoletes(remove_sdc1))
+ self.assertTrue(destroy_sdc1.requires(remove_sdc1))
+
def test_action_sorting(self, *args, **kwargs):
""" Verify correct functioning of action sorting. """
--
2.34.3

View File

@ -1,63 +0,0 @@
From ea5054b0cab19f3fe09d7010f8721e7f18ae399e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 2 May 2022 15:30:16 +0200
Subject: [PATCH] Correctly set vg_name after adding/removing a PV from a VG
Without setting the LVMPhysicalVolume.vg_name argument to None
after removing the PV from its VG, the PV is still considered
active and cannot be removed.
Resolves: rhbz#2081278
---
blivet/devices/lvm.py | 3 +++
tests/devices_test/lvm_test.py | 13 +++++++++++++
2 files changed, 16 insertions(+)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 9c230f1b..a971da8e 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -385,6 +385,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
if not parent.format.exists:
parent.format.free = self._get_pv_usable_space(parent)
+ parent.format.vg_name = self.name
+
def _remove_parent(self, parent):
# XXX It would be nice to raise an exception if removing this member
# would not leave enough space, but the devicefactory relies on it
@@ -395,6 +397,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
super(LVMVolumeGroupDevice, self)._remove_parent(parent)
parent.format.free = None
parent.format.container_uuid = None
+ parent.format.vg_name = None
# We can't rely on lvm to tell us about our size, free space, &c
# since we could have modifications queued, unless the VG and all of
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 5efa369e..59c027da 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -454,6 +454,19 @@ class LVMDeviceTest(unittest.TestCase):
pool.autoset_md_size(enforced=True)
self.assertEqual(pool.chunk_size, Size("128 KiB"))
+ def test_add_remove_pv(self):
+ pv1 = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1024 MiB"))
+ pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1024 MiB"))
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv1])
+
+ vg._add_parent(pv2)
+ self.assertEqual(pv2.format.vg_name, vg.name)
+
+ vg._remove_parent(pv2)
+ self.assertEqual(pv2.format.vg_name, None)
+
class TypeSpecificCallsTest(unittest.TestCase):
def test_type_specific_calls(self):
--
2.34.3

View File

@ -1,588 +0,0 @@
From 08f0e12c74e4c2ba25629fe92108283dd5ae3ff3 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:08:43 +0100
Subject: [PATCH 1/4] Add support for creating LVM cache pools
Resolves: rhbz#2055200
---
blivet/blivet.py | 9 +-
blivet/devicelibs/lvm.py | 9 ++
blivet/devices/lvm.py | 160 +++++++++++++++++++++++++++++++--
tests/devices_test/lvm_test.py | 26 ++++++
4 files changed, 196 insertions(+), 8 deletions(-)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index c6908eb0..d29fadd0 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -576,6 +576,8 @@ class Blivet(object):
:type vdo_pool: bool
:keyword vdo_lv: whether to create a vdo lv
:type vdo_lv: bool
+ :keyword cache_pool: whether to create a cache pool
+ :type cache_pool: bool
:returns: the new device
:rtype: :class:`~.devices.LVMLogicalVolumeDevice`
@@ -594,6 +596,7 @@ class Blivet(object):
thin_pool = kwargs.pop("thin_pool", False)
vdo_pool = kwargs.pop("vdo_pool", False)
vdo_lv = kwargs.pop("vdo_lv", False)
+ cache_pool = kwargs.pop("cache_pool", False)
parent = kwargs.get("parents", [None])[0]
if (thin_volume or vdo_lv) and parent:
# kwargs["parents"] will contain the pool device, so...
@@ -609,6 +612,8 @@ class Blivet(object):
kwargs["seg_type"] = "vdo-pool"
if vdo_lv:
kwargs["seg_type"] = "vdo"
+ if cache_pool:
+ kwargs["seg_type"] = "cache-pool"
mountpoint = kwargs.pop("mountpoint", None)
if 'fmt_type' in kwargs:
@@ -640,7 +645,7 @@ class Blivet(object):
swap = False
prefix = ""
- if thin_pool or vdo_pool:
+ if thin_pool or vdo_pool or cache_pool:
prefix = "pool"
name = self.suggest_device_name(parent=vg,
@@ -651,7 +656,7 @@ class Blivet(object):
if "%s-%s" % (vg.name, name) in self.names:
raise ValueError("name '%s' is already in use" % name)
- if thin_pool or thin_volume or vdo_pool or vdo_lv:
+ if thin_pool or thin_volume or vdo_pool or vdo_lv or cache_pool:
cache_req = kwargs.pop("cache_request", None)
if cache_req:
raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index bbde6303..23935009 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -54,6 +54,11 @@ LVM_THINP_MIN_CHUNK_SIZE = Size("64 KiB")
LVM_THINP_MAX_CHUNK_SIZE = Size("1 GiB")
LVM_THINP_ADDRESSABLE_CHUNK_SIZE = Size("17455015526400 B") # 15.88 TiB
+# cache constants
+LVM_CACHE_MIN_METADATA_SIZE = Size("8 MiB")
+LVM_CACHE_MAX_METADATA_SIZE = Size("16 GiB")
+LVM_CACHE_DEFAULT_MODE = blockdev.LVMCacheMode.WRITETHROUGH
+
raid_levels = raid.RAIDLevels(["linear", "striped", "raid1", "raid4", "raid5", "raid6", "raid10"])
raid_seg_types = list(itertools.chain.from_iterable([level.names for level in raid_levels if level.name != "linear"]))
@@ -248,3 +253,7 @@ def recommend_thpool_chunk_size(thpool_size):
# for every ~15.88 TiB of thinpool data size
return min(math.ceil(thpool_size / LVM_THINP_ADDRESSABLE_CHUNK_SIZE) * LVM_THINP_MIN_CHUNK_SIZE,
LVM_THINP_MAX_CHUNK_SIZE)
+
+
+def is_valid_cache_md_size(md_size):
+ return md_size >= LVM_CACHE_MIN_METADATA_SIZE and md_size <= LVM_CACHE_MAX_METADATA_SIZE
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index a971da8e..7cb482ab 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -43,6 +43,7 @@ from .. import util
from ..storage_log import log_method_call
from .. import udev
from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
+from ..static_data.lvm_info import lvs_info
from ..tasks import availability
import logging
@@ -646,7 +647,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
percent=None, cache_request=None, pvs=None, from_lvs=None):
if not exists:
- if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
+ if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
if seg_type and seg_type in lvm.raid_seg_types and not pvs:
raise ValueError("List of PVs has to be given for every non-linear LV")
@@ -690,8 +691,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
# we reserve space for it
self._metadata_size = self.vg.pe_size
self._size -= self._metadata_size
- elif self.seg_type == "thin-pool":
- # LVMThinPoolMixin sets self._metadata_size on its own
+ elif self.seg_type in ("thin-pool", "cache_pool"):
+ # LVMThinPoolMixin and LVMCachePoolMixin set self._metadata_size on their own
if not self.exists and not from_lvs and not grow:
# a thin pool we are not going to grow -> lets calculate metadata
# size now if not given explicitly
@@ -1619,7 +1620,6 @@ class LVMThinPoolMixin(object):
""" A list of this pool's LVs """
return self._lvs[:] # we don't want folks changing our list
- @util.requires_property("is_thin_pool")
def autoset_md_size(self, enforced=False):
""" If self._metadata_size not set already, it calculates the recommended value
and sets it while subtracting the size from self.size.
@@ -2032,9 +2032,142 @@ class LVMVDOLogicalVolumeMixin(object):
self.pool._add_log_vol(self)
+class LVMCachePoolMixin(object):
+ def __init__(self, metadata_size, cache_mode=None):
+ self._metadata_size = metadata_size or Size(0)
+ self._cache_mode = cache_mode
+
+ def _init_check(self):
+ if not self.is_cache_pool:
+ return
+
+ if self._metadata_size and not lvm.is_valid_cache_md_size(self._metadata_size):
+ raise ValueError("invalid metadatasize value")
+
+ if not self.exists and not self._pv_specs:
+ raise ValueError("at least one fast PV must be specified to create a cache pool")
+
+ def _check_from_lvs(self):
+ if self._from_lvs:
+ if len(self._from_lvs) != 2:
+ raise errors.DeviceError("two LVs required to create a cache pool")
+
+ def _convert_from_lvs(self):
+ data_lv, metadata_lv = self._from_lvs
+
+ data_lv.parent_lv = self # also adds the LV to self._internal_lvs
+ data_lv.int_lv_type = LVMInternalLVtype.data
+ metadata_lv.parent_lv = self
+ metadata_lv.int_lv_type = LVMInternalLVtype.meta
+
+ self.size = data_lv.size
+
+ @property
+ def is_cache_pool(self):
+ return self.seg_type == "cache-pool"
+
+ @property
+ def profile(self):
+ return self._profile
+
+ @property
+ def type(self):
+ return "lvmcachepool"
+
+ @property
+ def resizable(self):
+ return False
+
+ def read_current_size(self):
+ log_method_call(self, exists=self.exists, path=self.path,
+ sysfs_path=self.sysfs_path)
+ if self.size != Size(0):
+ return self.size
+
+ if self.exists:
+ # cache pools are not active and don't have th device mapper mapping
+ # so we can't get this from sysfs
+ lv_info = lvs_info.cache.get(self.name)
+ if lv_info is None:
+ log.error("Failed to get size for existing cache pool '%s'", self.name)
+ return Size(0)
+ else:
+ return Size(lv_info.size)
+
+ return Size(0)
+
+ def autoset_md_size(self, enforced=False):
+ """ If self._metadata_size not set already, it calculates the recommended value
+ and sets it while subtracting the size from self.size.
+
+ """
+
+ log.debug("Auto-setting cache pool metadata size")
+
+ if self._size <= Size(0):
+ log.debug("Cache pool size not bigger than 0, just setting metadata size to 0")
+ self._metadata_size = 0
+ return
+
+ old_md_size = self._metadata_size
+ if self._metadata_size == 0 or enforced:
+ self._metadata_size = blockdev.lvm.cache_get_default_md_size(self._size)
+ log.debug("Using recommended metadata size: %s", self._metadata_size)
+
+ self._metadata_size = self.vg.align(self._metadata_size, roundup=True)
+ log.debug("Rounded metadata size to extents: %s MiB", self._metadata_size.convert_to("MiB"))
+
+ if self._metadata_size == old_md_size:
+ log.debug("Rounded metadata size unchanged")
+ else:
+ new_size = self.size - (self._metadata_size - old_md_size)
+ log.debug("Adjusting size from %s MiB to %s MiB",
+ self.size.convert_to("MiB"), new_size.convert_to("MiB"))
+ self.size = new_size
+
+ def _pre_create(self):
+ # make sure all the LVs this LV should be created from exist (if any)
+ if self._from_lvs and any(not lv.exists for lv in self._from_lvs):
+ raise errors.DeviceError("Component LVs need to be created first")
+
+ def _create(self):
+ """ Create the device. """
+ log_method_call(self, self.name, status=self.status)
+ if self._cache_mode:
+ try:
+ cache_mode = blockdev.lvm.cache_get_mode_from_str(self._cache_mode)
+ except blockdev.LVMError as e:
+ raise errors.DeviceError from e
+ else:
+ cache_mode = lvm.LVM_CACHE_DEFAULT_MODE
+
+ if self._from_lvs:
+ extra = dict()
+ if self.mode:
+ # we need the string here, it will be passed directly to he lvm command
+ extra["cachemode"] = self._cache_mode
+ data_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.data)
+ meta_lv = six.next(lv for lv in self._internal_lvs if lv.int_lv_type == LVMInternalLVtype.meta)
+ blockdev.lvm.cache_pool_convert(self.vg.name, data_lv.lvname, meta_lv.lvname, self.lvname, **extra)
+ else:
+ blockdev.lvm.cache_create_pool(self.vg.name, self.lvname, self.size,
+ self.metadata_size,
+ cache_mode,
+ 0,
+ [spec.pv.path for spec in self._pv_specs])
+
+ def dracut_setup_args(self):
+ return set()
+
+ @property
+ def direct(self):
+ """ Is this device directly accessible? """
+ return False
+
+
class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
- LVMVDOLogicalVolumeMixin):
+ LVMVDOLogicalVolumeMixin, LVMCachePoolMixin):
""" An LVM Logical Volume """
# generally resizable, see :property:`resizable` for details
@@ -2046,7 +2179,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None):
+ write_policy=None, cache_mode=None):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2116,6 +2249,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
:keyword write_policy: write policy for the volume or None for default
:type write_policy: str
+ For cache pools only:
+
+ :keyword metadata_size: the size of the metadata LV
+ :type metadata_size: :class:`~.size.Size`
+ :keyword cache_mode: mode for the cache or None for default (writethrough)
+ :type cache_mode: str
+
"""
if isinstance(parents, (list, ParentList)):
@@ -2133,6 +2273,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMSnapshotMixin.__init__(self, origin, vorigin)
LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
LVMThinLogicalVolumeMixin.__init__(self)
+ LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs)
@@ -2144,6 +2285,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMSnapshotMixin._init_check(self)
LVMThinPoolMixin._init_check(self)
LVMThinLogicalVolumeMixin._init_check(self)
+ LVMCachePoolMixin._init_check(self)
if self._from_lvs:
self._check_from_lvs()
@@ -2169,6 +2311,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
ret.append(LVMVDOPoolMixin)
if self.is_vdo_lv:
ret.append(LVMVDOLogicalVolumeMixin)
+ if self.is_cache_pool:
+ ret.append(LVMCachePoolMixin)
return ret
def _try_specific_call(self, name, *args, **kwargs):
@@ -2552,6 +2696,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
return True
+ @type_specific
+ def autoset_md_size(self, enforced=False):
+ pass
+
def attach_cache(self, cache_pool_lv):
if self.is_thin_lv or self.is_snapshot_lv or self.is_internal_lv:
raise errors.DeviceError("Cannot attach a cache pool to the '%s' LV" % self.name)
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 59c027da..0105bcae 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -868,3 +868,29 @@ class BlivetLVMVDODependenciesTest(unittest.TestCase):
vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
self.assertFalse(vdo_supported)
+
+
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
+class BlivetNewLVMCachePoolDeviceTest(unittest.TestCase):
+
+ def test_new_cache_pool(self):
+ b = blivet.Blivet()
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("10 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+
+ for dev in (pv, vg):
+ b.devicetree._add_device(dev)
+
+ # check that all the above devices are in the expected places
+ self.assertEqual(set(b.devices), {pv, vg})
+ self.assertEqual(set(b.vgs), {vg})
+
+ self.assertEqual(vg.size, Size("10236 MiB"))
+
+ cachepool = b.new_lv(name="cachepool", cache_pool=True,
+ parents=[vg], pvs=[pv])
+
+ b.create_device(cachepool)
+
+ self.assertEqual(cachepool.type, "lvmcachepool")
--
2.34.3
From bfb0e71a92f46baae098370207640962c97d8e77 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:09:04 +0100
Subject: [PATCH 2/4] examples: Add LVM cache pool example
Related: rhbz#2055200
---
examples/lvm_cachepool.py | 59 +++++++++++++++++++++++++++++++++++++++
1 file changed, 59 insertions(+)
create mode 100644 examples/lvm_cachepool.py
diff --git a/examples/lvm_cachepool.py b/examples/lvm_cachepool.py
new file mode 100644
index 00000000..ab2e8a72
--- /dev/null
+++ b/examples/lvm_cachepool.py
@@ -0,0 +1,59 @@
+import os
+
+import blivet
+from blivet.size import Size
+from blivet.util import set_up_logging, create_sparse_tempfile
+
+
+set_up_logging()
+b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
+
+# create a disk image file on which to create new devices
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
+b.disk_images["disk1"] = disk1_file
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
+b.disk_images["disk2"] = disk2_file
+
+b.reset()
+
+try:
+ disk1 = b.devicetree.get_device_by_name("disk1")
+ disk2 = b.devicetree.get_device_by_name("disk2")
+
+ b.initialize_disk(disk1)
+ b.initialize_disk(disk2)
+
+ pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
+ b.create_device(pv)
+ pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
+ b.create_device(pv2)
+
+ # allocate the partitions (decide where and on which disks they'll reside)
+ blivet.partitioning.do_partitioning(b)
+
+ vg = b.new_vg(parents=[pv, pv2])
+ b.create_device(vg)
+
+ # new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
+ lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
+ b.create_device(lv)
+
+ # new cache pool
+ cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
+ b.create_device(cpool)
+
+ # write the new partitions to disk and format them as specified
+ b.do_it()
+ print(b.devicetree)
+
+ # attach the newly created cache pool to the "slow" LV
+ lv.attach_cache(cpool)
+
+ b.reset()
+ print(b.devicetree)
+
+ input("Check the state and hit ENTER to trigger cleanup")
+finally:
+ b.devicetree.teardown_disk_images()
+ os.unlink(disk1_file)
+ os.unlink(disk2_file)
--
2.34.3
From 1fece0e7f15f7b0d457d3db876d23c3272df09bd Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 Dec 2021 16:13:33 +0100
Subject: [PATCH 3/4] lvm: Use blivet static data when checking if the VG is
active
Instead of calling 'lvs' again in LVMVolumeGroupDevice.status
Related: rhbz#2055200
---
blivet/devices/lvm.py | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 7cb482ab..12d3d073 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -220,13 +220,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
# special handling for incomplete VGs
if not self.complete:
- try:
- lvs_info = blockdev.lvm.lvs(vg_name=self.name)
- except blockdev.LVMError:
- lvs_info = []
-
- for lv_info in lvs_info:
- if lv_info.attr and lv_info.attr[4] == 'a':
+ for lv_info in lvs_info.cache.values():
+ if lv_info.vg_name == self.name and lv_info.attr and lv_info.attr[4] == 'a':
return True
return False
--
2.34.3
From 8d957f04c2d5f56386b978d1bf890450f38ad108 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 30 May 2022 17:02:43 +0200
Subject: [PATCH 4/4] Add option to attach a newly created cache pool to
existing LV
Because we do not have action for attaching the cache pool, we
cannot schedule both adding the fast PV to the VG and attaching
the cache pool to existing LV. This hack allows to schedule the
attach to happen after the cache pool is created.
Related: rhbz#2055200
---
blivet/devices/lvm.py | 38 +++++++++++++++++++++++++++++++++++---
1 file changed, 35 insertions(+), 3 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 12d3d073..feb92f2e 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -2028,9 +2028,10 @@ class LVMVDOLogicalVolumeMixin(object):
class LVMCachePoolMixin(object):
- def __init__(self, metadata_size, cache_mode=None):
+ def __init__(self, metadata_size, cache_mode=None, attach_to=None):
self._metadata_size = metadata_size or Size(0)
self._cache_mode = cache_mode
+ self._attach_to = attach_to
def _init_check(self):
if not self.is_cache_pool:
@@ -2042,6 +2043,9 @@ class LVMCachePoolMixin(object):
if not self.exists and not self._pv_specs:
raise ValueError("at least one fast PV must be specified to create a cache pool")
+ if self._attach_to and not self._attach_to.exists:
+ raise ValueError("cache pool can be attached only to an existing LV")
+
def _check_from_lvs(self):
if self._from_lvs:
if len(self._from_lvs) != 2:
@@ -2150,6 +2154,31 @@ class LVMCachePoolMixin(object):
cache_mode,
0,
[spec.pv.path for spec in self._pv_specs])
+ if self._attach_to:
+ self._attach_to.attach_cache(self)
+
+ def _post_create(self):
+ if self._attach_to:
+ # post_create tries to activate the LV and after attaching it no longer exists
+ return
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self)._post_create()
+
+ def add_hook(self, new=True):
+ if self._attach_to:
+ self._attach_to._cache = LVMCache(self._attach_to, size=self.size, exists=False,
+ pvs=self._pv_specs, mode=self._cache_mode)
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self).add_hook(new=new)
+
+ def remove_hook(self, modparent=True):
+ if self._attach_to:
+ self._attach_to._cache = None
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
def dracut_setup_args(self):
return set()
@@ -2174,7 +2203,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None, cache_mode=None):
+ write_policy=None, cache_mode=None, attach_to=None):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2250,6 +2279,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
:type metadata_size: :class:`~.size.Size`
:keyword cache_mode: mode for the cache or None for default (writethrough)
:type cache_mode: str
+ :keyword attach_to: for non-existing cache pools a logical volume the pool should
+ be attached to when created
+ :type attach_to: :class:`LVMLogicalVolumeDevice`
"""
@@ -2268,7 +2300,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMSnapshotMixin.__init__(self, origin, vorigin)
LVMThinPoolMixin.__init__(self, metadata_size, chunk_size, profile)
LVMThinLogicalVolumeMixin.__init__(self)
- LVMCachePoolMixin.__init__(self, metadata_size, cache_mode)
+ LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs)
--
2.34.3

View File

@ -1,29 +0,0 @@
From 4103df5ddaae49d51640d01502e8456409a92be9 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 5 May 2022 16:35:37 +0200
Subject: [PATCH] Use LVM PV format current_size in
LVMVolumeGroupDevice._remove
The member format size is 0 when target size is not set.
Related: rhbz#2081278
---
blivet/devices/lvm.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index feb92f2e..facb1b76 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -289,7 +289,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
# do not run pvmove on empty PVs
member.format.update_size_info()
- if member.format.free < member.format.size:
+ if member.format.free < member.format.current_size:
blockdev.lvm.pvmove(member.path)
blockdev.lvm.vgreduce(self.name, member.path)
--
2.34.3

View File

@ -1,36 +0,0 @@
From a709c4db1bcf2e7ff69158a54ed3a1ea92ba4f97 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 14 Oct 2021 08:48:05 +0200
Subject: [PATCH] tests: Mark "fake" disks in test_get_related_disks as
non-existing
We are using "real" disk names ("sda", "sdb"...) in this test so
we need to avoid reading their real sizes which we do for existing
devices using os.stat. The test can fail if we have a disk with
the same name and small (or zero) size.
Resolves: rhbz#2062690
---
tests/devicetree_test.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index 3be4d572..c1b97c52 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -406,9 +406,9 @@ class DeviceTreeTestCase(unittest.TestCase):
def test_get_related_disks(self):
tree = DeviceTree()
- sda = DiskDevice("sda", size=Size('300g'))
- sdb = DiskDevice("sdb", size=Size('300g'))
- sdc = DiskDevice("sdc", size=Size('300G'))
+ sda = DiskDevice("sda", size=Size('300g'), exists=False)
+ sdb = DiskDevice("sdb", size=Size('300g'), exists=False)
+ sdc = DiskDevice("sdc", size=Size('300G'), exists=False)
tree._add_device(sda)
tree._add_device(sdb)
--
2.34.3

View File

@ -1,789 +0,0 @@
From 35643156c511c8120f2d562c1664a3c7a5a48cfb Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Thu, 28 Oct 2021 21:17:25 +0200
Subject: [PATCH 1/8] Fix removing zFCP SCSI devices
Values parsed from /proc/scsi/scsi were not correctly used to assemble
paths to SCSI devices.
For example:
/sys/bus/scsi/devices/0:0:00:00/
was incorrectly accessed instead of:
/sys/bus/scsi/devices/0:0:0:0/
Switch to a more reliable way of listing the available SCSI devices.
Related: rhbz#1937030
---
blivet/zfcp.py | 17 ++++-------------
1 file changed, 4 insertions(+), 13 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 93af5419..3747290e 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -20,6 +20,7 @@
#
import os
+import re
from . import udev
from . import util
from .i18n import _
@@ -167,20 +168,10 @@ class ZFCPDevice:
return True
def offline_scsi_device(self):
- f = open("/proc/scsi/scsi", "r")
- lines = f.readlines()
- f.close()
- # alternatively iterate over /sys/bus/scsi/devices/*:0:*:*/
+ # A list of existing SCSI devices in format Host:Bus:Target:Lun
+ scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
- for line in lines:
- if not line.startswith("Host"):
- continue
- scsihost = line.split()
- host = scsihost[1]
- channel = "0"
- devid = scsihost[5]
- lun = scsihost[7]
- scsidev = "%s:%s:%s:%s" % (host[4:], channel, devid, lun)
+ for scsidev in scsi_devices:
fcpsysfs = "%s/%s" % (scsidevsysfs, scsidev)
scsidel = "%s/%s/delete" % (scsidevsysfs, scsidev)
--
2.34.3
From 771cbf623030b1fa51ec193a2b5e2db229420a7a Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 02:47:45 +0100
Subject: [PATCH 2/8] Refactor the ZFCPDevice class
Add a new base class for zFCP devices.
Move code to the new base class.
Improve documentation.
Related: rhbz#1937030
---
blivet/zfcp.py | 131 +++++++++++++++++++++++++++++++++++--------------
1 file changed, 95 insertions(+), 36 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 3747290e..4a50f65f 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -21,6 +21,7 @@
import os
import re
+from abc import ABC
from . import udev
from . import util
from .i18n import _
@@ -46,29 +47,19 @@ zfcpsysfs = "/sys/bus/ccw/drivers/zfcp"
scsidevsysfs = "/sys/bus/scsi/devices"
zfcpconf = "/etc/zfcp.conf"
+class ZFCPDeviceBase(ABC):
+ """An abstract base class for zFCP storage devices."""
-class ZFCPDevice:
- """
- .. warning::
- Since this is a singleton class, calling deepcopy() on the instance
- just returns ``self`` with no copy being created.
- """
-
- def __init__(self, devnum, wwpn, fcplun):
+ def __init__(self, devnum):
self.devnum = blockdev.s390.sanitize_dev_input(devnum)
- self.wwpn = blockdev.s390.zfcp_sanitize_wwpn_input(wwpn)
- self.fcplun = blockdev.s390.zfcp_sanitize_lun_input(fcplun)
-
if not self.devnum:
raise ValueError(_("You have not specified a device number or the number is invalid"))
- if not self.wwpn:
- raise ValueError(_("You have not specified a worldwide port name or the name is invalid."))
- if not self.fcplun:
- raise ValueError(_("You have not specified a FCP LUN or the number is invalid."))
+
+ self._device_online_path = os.path.join(zfcpsysfs, self.devnum, "online")
# Force str and unicode types in case any of the properties are unicode
def _to_string(self):
- return "%s %s %s" % (self.devnum, self.wwpn, self.fcplun)
+ return str(self.devnum)
def __str__(self):
return stringize(self._to_string())
@@ -76,33 +67,97 @@ class ZFCPDevice:
def __unicode__(self):
return unicodeize(self._to_string())
- def online_device(self):
- online = "%s/%s/online" % (zfcpsysfs, self.devnum)
- portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
- portdir = "%s/%s/%s" % (zfcpsysfs, self.devnum, self.wwpn)
- unitadd = "%s/unit_add" % (portdir)
- unitdir = "%s/%s" % (portdir, self.fcplun)
- failed = "%s/failed" % (unitdir)
+ def _free_device(self):
+ """Remove the device from the I/O ignore list to make it visible to the system.
+
+ :raises: ValueError if the device cannot be removed from the I/O ignore list
+ """
- if not os.path.exists(online):
+ if not os.path.exists(self._device_online_path):
log.info("Freeing zFCP device %s", self.devnum)
util.run_program(["zfcp_cio_free", "-d", self.devnum])
- if not os.path.exists(online):
+ if not os.path.exists(self._device_online_path):
raise ValueError(_("zFCP device %s not found, not even in device ignore list.") %
(self.devnum,))
+ def _set_zfcp_device_online(self):
+ """Set the zFCP device online.
+
+ :raises: ValueError if the device cannot be set online
+ """
+
try:
- f = open(online, "r")
- devonline = f.readline().strip()
- f.close()
+ with open(self._device_online_path) as f:
+ devonline = f.readline().strip()
if devonline != "1":
- logged_write_line_to_file(online, "1")
+ logged_write_line_to_file(self._device_online_path, "1")
except OSError as e:
raise ValueError(_("Could not set zFCP device %(devnum)s "
"online (%(e)s).")
% {'devnum': self.devnum, 'e': e})
+ def _set_zfcp_device_offline(self):
+ """Set the zFCP device offline.
+
+ :raises: ValueError if the device cannot be set offline
+ """
+
+ try:
+ logged_write_line_to_file(self._device_online_path, "0")
+ except OSError as e:
+ raise ValueError(_("Could not set zFCP device %(devnum)s "
+ "offline (%(e)s).")
+ % {'devnum': self.devnum, 'e': e})
+
+ def online_device(self):
+ """Initialize the device and make its storage block device(s) ready to use.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be initialized
+ """
+
+ self._free_device()
+ self._set_zfcp_device_online()
+ return True
+
+
+class ZFCPDevice(ZFCPDeviceBase):
+ """A class for zFCP devices that are not configured in NPIV mode. Such
+ devices have to be specified by a device number, WWPN and LUN.
+ """
+
+ def __init__(self, devnum, wwpn, fcplun):
+ super().__init__(devnum)
+
+ self.wwpn = blockdev.s390.zfcp_sanitize_wwpn_input(wwpn)
+ if not self.wwpn:
+ raise ValueError(_("You have not specified a worldwide port name or the name is invalid."))
+
+ self.fcplun = blockdev.s390.zfcp_sanitize_lun_input(fcplun)
+ if not self.fcplun:
+ raise ValueError(_("You have not specified a FCP LUN or the number is invalid."))
+
+ # Force str and unicode types in case any of the properties are unicode
+ def _to_string(self):
+ return "{} {} {}".format(self.devnum, self.wwpn, self.fcplun)
+
+ def online_device(self):
+ """Initialize the device and make its storage block device(s) ready to use.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be initialized
+ """
+
+ super().online_device()
+
+ portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
+ portdir = "%s/%s/%s" % (zfcpsysfs, self.devnum, self.wwpn)
+ unitadd = "%s/unit_add" % (portdir)
+ unitdir = "%s/%s" % (portdir, self.fcplun)
+ failed = "%s/failed" % (unitdir)
+
+ # create the sysfs directory for the WWPN/port
if not os.path.exists(portdir):
if os.path.exists(portadd):
# older zfcp sysfs interface
@@ -127,6 +182,7 @@ class ZFCPDevice:
"there.", {'wwpn': self.wwpn,
'devnum': self.devnum})
+ # create the sysfs directory for the LUN/unit
if not os.path.exists(unitdir):
try:
logged_write_line_to_file(unitadd, self.fcplun)
@@ -144,6 +200,7 @@ class ZFCPDevice:
'wwpn': self.wwpn,
'devnum': self.devnum})
+ # check the state of the LUN
fail = "0"
try:
f = open(failed, "r")
@@ -168,6 +225,8 @@ class ZFCPDevice:
return True
def offline_scsi_device(self):
+ """Find SCSI devices associated to the zFCP device and remove them from the system."""
+
# A list of existing SCSI devices in format Host:Bus:Target:Lun
scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
@@ -196,7 +255,8 @@ class ZFCPDevice:
self.devnum, self.wwpn, self.fcplun)
def offline_device(self):
- offline = "%s/%s/online" % (zfcpsysfs, self.devnum)
+ """Remove the zFCP device from the system."""
+
portadd = "%s/%s/port_add" % (zfcpsysfs, self.devnum)
portremove = "%s/%s/port_remove" % (zfcpsysfs, self.devnum)
unitremove = "%s/%s/%s/unit_remove" % (zfcpsysfs, self.devnum, self.wwpn)
@@ -212,6 +272,7 @@ class ZFCPDevice:
% {'devnum': self.devnum, 'wwpn': self.wwpn,
'fcplun': self.fcplun, 'e': e})
+ # remove the LUN
try:
logged_write_line_to_file(unitremove, self.fcplun)
except OSError as e:
@@ -221,6 +282,7 @@ class ZFCPDevice:
% {'fcplun': self.fcplun, 'wwpn': self.wwpn,
'devnum': self.devnum, 'e': e})
+ # remove the WWPN only if there are no other LUNs attached
if os.path.exists(portadd):
# only try to remove ports with older zfcp sysfs interface
for lun in os.listdir(portdir):
@@ -238,6 +300,7 @@ class ZFCPDevice:
% {'wwpn': self.wwpn,
'devnum': self.devnum, 'e': e})
+ # check if there are other WWPNs existing for the zFCP device number
if os.path.exists(portadd):
# older zfcp sysfs interface
for port in os.listdir(devdir):
@@ -256,12 +319,8 @@ class ZFCPDevice:
self.devnum, luns[0])
return True
- try:
- logged_write_line_to_file(offline, "0")
- except OSError as e:
- raise ValueError(_("Could not set zFCP device %(devnum)s "
- "offline (%(e)s).")
- % {'devnum': self.devnum, 'e': e})
+ # no other WWPNs/LUNs exists for this device number, it's safe to bring it offline
+ self._set_zfcp_device_offline()
return True
--
2.34.3
From 2dc44c00f170d64458a7c89abc91cda61af8387f Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 02:35:05 +0100
Subject: [PATCH 3/8] Move offline_scsi_device() to the base class
Related: rhbz#1937030
---
blivet/zfcp.py | 74 ++++++++++++++++++++++++++++++--------------------
1 file changed, 44 insertions(+), 30 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 4a50f65f..af8f841d 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -110,6 +110,15 @@ class ZFCPDeviceBase(ABC):
"offline (%(e)s).")
% {'devnum': self.devnum, 'e': e})
+ def _is_scsi_associated_with_fcp(self, fcphbasysfs, _fcpwwpnsysfs, _fcplunsysfs):
+ """Decide if the SCSI device with the provided SCSI attributes
+ corresponds to the zFCP device.
+
+ :returns: True or False
+ """
+
+ return fcphbasysfs == self.devnum
+
def online_device(self):
"""Initialize the device and make its storage block device(s) ready to use.
@@ -121,6 +130,30 @@ class ZFCPDeviceBase(ABC):
self._set_zfcp_device_online()
return True
+ def offline_scsi_device(self):
+ """Find SCSI devices associated to the zFCP device and remove them from the system."""
+
+ # A list of existing SCSI devices in format Host:Bus:Target:Lun
+ scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
+
+ for scsidev in scsi_devices:
+ fcpsysfs = os.path.join(scsidevsysfs, scsidev)
+
+ with open(os.path.join(fcpsysfs, "hba_id")) as f:
+ fcphbasysfs = f.readline().strip()
+ with open(os.path.join(fcpsysfs, "wwpn")) as f:
+ fcpwwpnsysfs = f.readline().strip()
+ with open(os.path.join(fcpsysfs, "fcp_lun")) as f:
+ fcplunsysfs = f.readline().strip()
+
+ if self._is_scsi_associated_with_fcp(fcphbasysfs, fcpwwpnsysfs, fcplunsysfs):
+ scsidel = os.path.join(scsidevsysfs, scsidev, "delete")
+ logged_write_line_to_file(scsidel, "1")
+ udev.settle()
+ return
+
+ log.warning("No scsi device found to delete for zfcp %s", self)
+
class ZFCPDevice(ZFCPDeviceBase):
"""A class for zFCP devices that are not configured in NPIV mode. Such
@@ -142,6 +175,17 @@ class ZFCPDevice(ZFCPDeviceBase):
def _to_string(self):
return "{} {} {}".format(self.devnum, self.wwpn, self.fcplun)
+ def _is_scsi_associated_with_fcp(self, fcphbasysfs, fcpwwpnsysfs, fcplunsysfs):
+ """Decide if the SCSI device with the provided SCSI attributes
+ corresponds to the zFCP device.
+
+ :returns: True or False
+ """
+
+ return (fcphbasysfs == self.devnum and
+ fcpwwpnsysfs == self.wwpn and
+ fcplunsysfs == self.fcplun)
+
def online_device(self):
"""Initialize the device and make its storage block device(s) ready to use.
@@ -224,36 +268,6 @@ class ZFCPDevice(ZFCPDeviceBase):
return True
- def offline_scsi_device(self):
- """Find SCSI devices associated to the zFCP device and remove them from the system."""
-
- # A list of existing SCSI devices in format Host:Bus:Target:Lun
- scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
-
- for scsidev in scsi_devices:
- fcpsysfs = "%s/%s" % (scsidevsysfs, scsidev)
- scsidel = "%s/%s/delete" % (scsidevsysfs, scsidev)
-
- f = open("%s/hba_id" % (fcpsysfs), "r")
- fcphbasysfs = f.readline().strip()
- f.close()
- f = open("%s/wwpn" % (fcpsysfs), "r")
- fcpwwpnsysfs = f.readline().strip()
- f.close()
- f = open("%s/fcp_lun" % (fcpsysfs), "r")
- fcplunsysfs = f.readline().strip()
- f.close()
-
- if fcphbasysfs == self.devnum \
- and fcpwwpnsysfs == self.wwpn \
- and fcplunsysfs == self.fcplun:
- logged_write_line_to_file(scsidel, "1")
- udev.settle()
- return
-
- log.warning("no scsi device found to delete for zfcp %s %s %s",
- self.devnum, self.wwpn, self.fcplun)
-
def offline_device(self):
"""Remove the zFCP device from the system."""
--
2.34.3
From f194c6e591c3e409f227fd10d3e9923af91ea893 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sat, 6 Nov 2021 21:27:52 +0100
Subject: [PATCH 4/8] Allow to delete more than one SCSI device
NPIV zFCP devices can attach more than one SCSI device, so allow to
delete them all. For non-NPIV devices it means possible slowdown, since
all SCSI devices would now be checked.
Related: rhbz#1937030
---
blivet/zfcp.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index af8f841d..3b3f623b 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -136,6 +136,7 @@ class ZFCPDeviceBase(ABC):
# A list of existing SCSI devices in format Host:Bus:Target:Lun
scsi_devices = [f for f in os.listdir(scsidevsysfs) if re.search(r'^[0-9]+:[0-9]+:[0-9]+:[0-9]+$', f)]
+ scsi_device_found = False
for scsidev in scsi_devices:
fcpsysfs = os.path.join(scsidevsysfs, scsidev)
@@ -147,12 +148,13 @@ class ZFCPDeviceBase(ABC):
fcplunsysfs = f.readline().strip()
if self._is_scsi_associated_with_fcp(fcphbasysfs, fcpwwpnsysfs, fcplunsysfs):
+ scsi_device_found = True
scsidel = os.path.join(scsidevsysfs, scsidev, "delete")
logged_write_line_to_file(scsidel, "1")
udev.settle()
- return
- log.warning("No scsi device found to delete for zfcp %s", self)
+ if not scsi_device_found:
+ log.warning("No scsi device found to delete for zfcp %s", self)
class ZFCPDevice(ZFCPDeviceBase):
--
2.34.3
From f6615be663434079b3f2a86be5db88b816d8a9e1 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 03:01:02 +0100
Subject: [PATCH 5/8] Add a function for reading the value of a kernel module
parameter
Related: rhbz#1937030
---
blivet/util.py | 33 +++++++++++++++++++++++++++++++++
tests/util_test.py | 11 +++++++++++
2 files changed, 44 insertions(+)
diff --git a/blivet/util.py b/blivet/util.py
index af60210b..cbef65e0 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -1131,3 +1131,36 @@ def detect_virt():
return False
else:
return vm[0] in ('qemu', 'kvm', 'xen')
+
+
+def natural_sort_key(device):
+ """ Sorting key for devices which makes sure partitions are sorted in natural
+ way, e.g. 'sda1, sda2, ..., sda10' and not like 'sda1, sda10, sda2, ...'
+ """
+ if device.type == "partition" and device.parted_partition and device.disk:
+ part_num = getattr(device.parted_partition, "number", -1)
+ return [device.disk.name, part_num]
+ else:
+ return [device.name, 0]
+
+
+def get_kernel_module_parameter(module, parameter):
+ """ Return the value of a given kernel module parameter
+
+ :param str module: a kernel module
+ :param str parameter: a module parameter
+ :returns: the value of the given kernel module parameter or None
+ :rtype: str
+ """
+
+ value = None
+
+ parameter_path = os.path.join("/sys/module", module, "parameters", parameter)
+ try:
+ with open(parameter_path) as f:
+ value = f.read().strip()
+ except IOError as e:
+ log.warning("Couldn't get the value of the parameter '%s' from the kernel module '%s': %s",
+ parameter, module, str(e))
+
+ return value
diff --git a/tests/util_test.py b/tests/util_test.py
index b4f82c1b..805447c7 100644
--- a/tests/util_test.py
+++ b/tests/util_test.py
@@ -182,3 +182,14 @@ class GetSysfsAttrTestCase(unittest.TestCase):
# the unicode replacement character (U+FFFD) should be used instead
model = util.get_sysfs_attr(sysfs, "model")
self.assertEqual(model, "test model\ufffd")
+
+
+class GetKernelModuleParameterTestCase(unittest.TestCase):
+
+ def test_nonexisting_kernel_module(self):
+ self.assertIsNone(util.get_kernel_module_parameter("unknown_module", "unknown_parameter"))
+
+ def test_get_kernel_module_parameter_value(self):
+ with mock.patch('blivet.util.open', mock.mock_open(read_data='value\n')):
+ value = util.get_kernel_module_parameter("module", "parameter")
+ self.assertEqual(value, "value")
--
2.34.3
From 17c99a2444ef750bdbf5b24665c5fd3c52e687d9 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sun, 21 Nov 2021 03:01:46 +0100
Subject: [PATCH 6/8] LUN and WWPN should not be used for NPIV zFCP devices
Log a warning if activating a zFCP device in NPIV mode and WWPN or
LUN have been provided. They are superfluous for NPIV devices.
Related: rhbz#1937030
---
blivet/zfcp.py | 58 +++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 57 insertions(+), 1 deletion(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 3b3f623b..726e9364 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -22,6 +22,7 @@
import os
import re
from abc import ABC
+import glob
from . import udev
from . import util
from .i18n import _
@@ -47,6 +48,55 @@ zfcpsysfs = "/sys/bus/ccw/drivers/zfcp"
scsidevsysfs = "/sys/bus/scsi/devices"
zfcpconf = "/etc/zfcp.conf"
+
+def _is_lun_scan_allowed():
+ """Return True if automatic LUN scanning is enabled by the kernel."""
+
+ allow_lun_scan = util.get_kernel_module_parameter("zfcp", "allow_lun_scan")
+ return allow_lun_scan == "Y"
+
+
+def _is_port_in_npiv_mode(device_id):
+ """Return True if the device ID is configured in NPIV mode. See
+ https://www.ibm.com/docs/en/linux-on-systems?topic=devices-use-npiv
+ """
+
+ port_in_npiv_mode = False
+ port_type_path = "/sys/bus/ccw/devices/{}/host*/fc_host/host*/port_type".format(device_id)
+ port_type_paths = glob.glob(port_type_path)
+ try:
+ for filename in port_type_paths:
+ with open(filename) as f:
+ port_type = f.read()
+ if re.search(r"(^|\s)NPIV(\s|$)", port_type):
+ port_in_npiv_mode = True
+ except OSError as e:
+ log.warning("Couldn't read the port_type attribute of the %s device: %s", device_id, str(e))
+ port_in_npiv_mode = False
+
+ return port_in_npiv_mode
+
+
+def is_npiv_enabled(device_id):
+ """Return True if the given zFCP device ID is configured and usable in
+ NPIV (N_Port ID Virtualization) mode.
+
+ :returns: True or False
+ """
+
+ # LUN scanning disabled by the kernel module prevents using the device in NPIV mode
+ if not _is_lun_scan_allowed():
+ log.warning("Automatic LUN scanning is disabled by the zfcp kernel module.")
+ return False
+
+ # The port itself has to be configured in NPIV mode
+ if not _is_port_in_npiv_mode(device_id):
+ log.warning("The zFCP device %s is not configured in NPIV mode.", device_id)
+ return False
+
+ return True
+
+
class ZFCPDeviceBase(ABC):
"""An abstract base class for zFCP storage devices."""
@@ -203,6 +253,13 @@ class ZFCPDevice(ZFCPDeviceBase):
unitdir = "%s/%s" % (portdir, self.fcplun)
failed = "%s/failed" % (unitdir)
+ # Activating an NPIV enabled device using devnum, WWPN and LUN should still be possible
+ # as this method was used as a workaround until the support for NPIV enabled devices has
+ # been implemented. Just log a warning message and continue.
+ if is_npiv_enabled(self.devnum):
+ log.warning("zFCP device %s in NPIV mode brought online. All LUNs will be activated "
+ "automatically although WWPN and LUN have been provided.", self.devnum)
+
# create the sysfs directory for the WWPN/port
if not os.path.exists(portdir):
if os.path.exists(portadd):
@@ -327,7 +384,6 @@ class ZFCPDevice(ZFCPDeviceBase):
return True
else:
# newer zfcp sysfs interface with auto port scan
- import glob
luns = glob.glob("%s/0x????????????????/0x????????????????"
% (devdir,))
if len(luns) != 0:
--
2.34.3
From a8f97bd0d74e3da9c18bd03d968f5d2f0c3ee46f Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sat, 6 Nov 2021 21:27:52 +0100
Subject: [PATCH 7/8] Add new class for NPIV-enabled devices
Related: rhbz#1937030
---
blivet/zfcp.py | 53 +++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 50 insertions(+), 3 deletions(-)
diff --git a/blivet/zfcp.py b/blivet/zfcp.py
index 726e9364..e6c0e48a 100644
--- a/blivet/zfcp.py
+++ b/blivet/zfcp.py
@@ -397,6 +397,44 @@ class ZFCPDevice(ZFCPDeviceBase):
return True
+class ZFCPNPIVDevice(ZFCPDeviceBase):
+ """Class for zFCP devices configured in NPIV mode. Only a zFCP device number is
+ needed for such devices.
+ """
+
+ def online_device(self):
+ """Initialize the device and make its storage block device(s) ready to use.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be initialized
+ """
+
+ super().online_device()
+
+ if not is_npiv_enabled(self.devnum):
+ raise ValueError(_("zFCP device %s cannot be used in NPIV mode.") % self)
+
+ return True
+
+ def offline_device(self):
+ """Remove the zFCP device from the system.
+
+ :returns: True if success
+ :raises: ValueError if the device cannot be brought offline
+ """
+
+ try:
+ self.offline_scsi_device()
+ except OSError as e:
+ raise ValueError(_("Could not correctly delete SCSI device of "
+ "zFCP %(zfcpdev)s (%(e)s).")
+ % {'zfcpdev': self, 'e': e})
+
+ self._set_zfcp_device_offline()
+
+ return True
+
+
class zFCP:
""" ZFCP utility class.
@@ -439,7 +477,12 @@ class zFCP:
fields = line.split()
- if len(fields) == 3:
+ # NPIV enabled device
+ if len(fields) == 1:
+ devnum = fields[0]
+ wwpn = None
+ fcplun = None
+ elif len(fields) == 3:
devnum = fields[0]
wwpn = fields[1]
fcplun = fields[2]
@@ -458,8 +501,12 @@ class zFCP:
except ValueError as e:
log.warning("%s", str(e))
- def add_fcp(self, devnum, wwpn, fcplun):
- d = ZFCPDevice(devnum, wwpn, fcplun)
+ def add_fcp(self, devnum, wwpn=None, fcplun=None):
+ if wwpn and fcplun:
+ d = ZFCPDevice(devnum, wwpn, fcplun)
+ else:
+ d = ZFCPNPIVDevice(devnum)
+
if d.online_device():
self.fcpdevs.add(d)
--
2.34.3
From 963822ff989c938e74d582216f4f7ded595eccc1 Mon Sep 17 00:00:00 2001
From: Jan Stodola <jstodola@redhat.com>
Date: Sat, 20 Nov 2021 23:12:43 +0100
Subject: [PATCH 8/8] Generate correct dracut boot arguments for NPIV devices
NPIV enabled devices need only the device ID. WWPNs/LUNs are discovered
automatically by the kernel module.
Resolves: rhbz#1937030
---
blivet/devices/disk.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 67a01ba6..36278507 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -577,7 +577,15 @@ class ZFCPDiskDevice(DiskDevice):
'lun': self.fcp_lun}
def dracut_setup_args(self):
- return set(["rd.zfcp=%s,%s,%s" % (self.hba_id, self.wwpn, self.fcp_lun,)])
+ from ..zfcp import is_npiv_enabled
+
+ # zFCP devices in NPIV mode need only the device ID
+ if is_npiv_enabled(self.hba_id):
+ dracut_args = set(["rd.zfcp=%s" % self.hba_id])
+ else:
+ dracut_args = set(["rd.zfcp=%s,%s,%s" % (self.hba_id, self.wwpn, self.fcp_lun,)])
+
+ return dracut_args
class DASDDevice(DiskDevice):
--
2.34.3

View File

@ -1,129 +0,0 @@
From 5d54b2ede698d5084aa6c780295fcc9aafbfa357 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 9 May 2022 13:38:50 +0200
Subject: [PATCH] Add a very simple NVMe module
This covers only the basic functionallity needed by Anaconda right
now: populating the config files in /etc/nvme and copying them to
the installed system. The API for the NVMe singleton is based on
the similar modules for iSCSI and FCoE.
Resolves: rhbz#2073008
---
blivet/errors.py | 14 +++++++++
blivet/nvme.py | 81 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 95 insertions(+)
create mode 100644 blivet/nvme.py
diff --git a/blivet/errors.py b/blivet/errors.py
index fd51283f..b16cf2c5 100644
--- a/blivet/errors.py
+++ b/blivet/errors.py
@@ -307,3 +307,17 @@ class EventHandlingError(StorageError):
class ThreadError(StorageError):
""" An error occurred in a non-main thread. """
+
+# other
+
+
+class FCoEError(StorageError, OSError):
+ pass
+
+
+class ISCSIError(StorageError, OSError):
+ pass
+
+
+class NVMeError(StorageError, OSError):
+ pass
diff --git a/blivet/nvme.py b/blivet/nvme.py
new file mode 100644
index 00000000..17bead15
--- /dev/null
+++ b/blivet/nvme.py
@@ -0,0 +1,81 @@
+#
+# nvme.py - NVMe class
+#
+# Copyright (C) 2022 Red Hat, Inc. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import shutil
+
+from . import errors
+from . import util
+
+import logging
+log = logging.getLogger("blivet")
+
+HOSTNQN_FILE = "/etc/nvme/hostnqn"
+HOSTID_FILE = "/etc/nvme/hostid"
+
+
+class NVMe(object):
+ """ NVMe utility class.
+
+ .. warning::
+ Since this is a singleton class, calling deepcopy() on the instance
+ just returns ``self`` with no copy being created.
+ """
+
+ def __init__(self):
+ self.started = False
+
+ # So that users can write nvme() to get the singleton instance
+ def __call__(self):
+ return self
+
+ def __deepcopy__(self, memo_dict): # pylint: disable=unused-argument
+ return self
+
+ def startup(self):
+ if self.started:
+ return
+
+ rc, nqn = util.run_program_and_capture_output(["nvme", "gen-hostnqn"])
+ if rc != 0:
+ raise errors.NVMeError("Failed to generate hostnqn")
+
+ with open(HOSTNQN_FILE, "w") as f:
+ f.write(nqn)
+
+ rc, hid = util.run_program_and_capture_output(["dmidecode", "-s", "system-uuid"])
+ if rc != 0:
+ raise errors.NVMeError("Failed to generate host ID")
+
+ with open(HOSTID_FILE, "w") as f:
+ f.write(hid)
+
+ self.started = True
+
+ def write(self, root): # pylint: disable=unused-argument
+ # copy the hostnqn and hostid files
+ if not os.path.isdir(root + "/etc/nvme"):
+ os.makedirs(root + "/etc/nvme", 0o755)
+ shutil.copyfile(HOSTNQN_FILE, root + HOSTNQN_FILE)
+ shutil.copyfile(HOSTID_FILE, root + HOSTID_FILE)
+
+
+# Create nvme singleton
+nvme = NVMe()
+""" An instance of :class:`NVMe` """
--
2.34.3

View File

@ -19,11 +19,11 @@
Summary: A python module for system storage configuration
Name: python-blivet
Url: https://storageapis.wordpress.com/projects/blivet
Version: 3.4.0
Version: 3.6.0
#%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 16%{?prerelease}%{?dist}
Release: 5%{?prerelease}%{?dist}
Epoch: 1
License: LGPLv2+
%global realname blivet
@ -33,29 +33,14 @@ Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realver
Patch0: 0001-force-lvm-cli-plugin.patch
Patch1: 0002-remove-btrfs-plugin.patch
Patch2: 0003-Revert-More-consistent-lvm-errors.patch
Patch3: 0005-Fix-unify-importing-mock-module-in-tests.patch
Patch4: 0006-Fix-util.virt_detect-on-Xen.patch
Patch5: 0007-Fix-activating-old-style-LVM-snapshots.patch
Patch6: 0008-Fix-resolving-devices-with-names-that-look-like-BIOS.patch
Patch7: 0009-Do-not-set-chunk-size-for-RAID1.patch
Patch8: 0010-Fix-running-tests-in-gating.patch
Patch9: 0011-Tell-LVM-to-ignore-the-new-devices-file-for-now.patch
Patch10: 0012-Improve-error-message-printed-for-missing-dependecie.patch
Patch11: 0013-Use-bigger-chunk-size-for-thinpools-bigger-than-15.8.patch
Patch12: 0014-LVM-devices-file-support.patch
Patch13: 0015-iscsi-Replace-all-log_exception_info-calls-with-log.patch
Patch14: 0016-Fix-log-message-for-the-LVM-devices-filter.patch
Patch15: 0017-Exclude-unusable-disks-from-PartitionFactory.patch
Patch16: 0018-Fix-getting-PV-info-in-LVMPhysicalVolume-from-the-ca.patch
Patch17: 0019-Do-not-crash-when-changing-disklabel-on-disks-with-a.patch
Patch18: 0020-ActionDestroyDevice-should-not-obsolete-ActionRemove.patch
Patch19: 0021-Correctly-set-vg_name-after-adding-removing-a-PV-fro.patch
Patch20: 0022-Add-support-for-creating-LVM-cache-pools.patch
Patch21: 0023-Use-LVM-PV-format-current_size-in-LVMVolumeGroupDevi.patch
Patch22: 0024-tests-Mark-fake-disks-in-test_get_related_disks-as-n.patch
Patch23: 0025-Add-support-for-NPIV-enabled-zFCP-devices.patch
Patch24: 0026-Add-a-very-simple-NVMe-module.patch
Patch25: 0027-DDF-RAID-support-using-mdadm.patch
Patch3: 0004-DDF-RAID-support-using-mdadm.patch
Patch4: 0005-Revert-Remove-the-Blivet.roots-attribute.patch
Patch5: 0006-Fix-potential-AttributeError-when-getting-stratis-bl.patch
Patch6: 0007-tests-Skip-XFS-resize-test-on-CentOS-RHEL-9.patch
Patch7: 0008-Revert-Adjust-to-new-XFS-min-size.patch
Patch8: 0009-Catch-BlockDevNotImplementedError-for-btrfs-plugin-c.patch
Patch9: 0010-Add-basic-support-for-NVMe-and-NVMe-Fabrics-devices.patch
Patch10: 0011-Default-to-encryption-sector-size-512-for-LUKS-devic.patch
# Versions of required components (done so we make sure the buildrequires
# match the requires versions of things).
@ -163,6 +148,7 @@ Recommends: libblockdev-lvm >= %{libblockdevver}
Recommends: libblockdev-mdraid >= %{libblockdevver}
Recommends: libblockdev-mpath >= %{libblockdevver}
Recommends: libblockdev-nvdimm >= %{libblockdevver}
Recommends: libblockdev-nvme >= %{libblockdevver}
Recommends: libblockdev-part >= %{libblockdevver}
Recommends: libblockdev-swap >= %{libblockdevver}
Recommends: libblockdev-s390 >= %{libblockdevver}
@ -218,6 +204,30 @@ configuration.
%endif
%changelog
* Thu Jan 19 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-5
- Default to encryption sector size 512 for LUKS devices
Resolves: rhbz#2103800
* Tue Dec 13 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-4
- Add basic support for NVMe and NVMe Fabrics devices
Resolves: rhbz#2123337
* Thu Nov 03 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-3
- Catch BlockDevNotImplementedError for btrfs plugin calls
Resolves: rhbz#2139166
- Revert "Adjust to new XFS min size"
Resolves: rhbz#2139189
* Thu Oct 20 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-2
- Fix potential AttributeError when getting stratis blockdev info
Related: rhbz#2123711
- tests: Skip XFS resize test on CentOS/RHEL 9
Related: rhbz#2123711
* Thu Oct 13 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-1
- Rebase to the latest upstream release 3.6.0
Resolves: rhbz#2123711
* Thu Aug 18 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-16
- DDF RAID support using mdadm
Resolves: rhbz#2109030