Compare commits
No commits in common. "c8" and "imports/c8-beta/python-blivet-3.1.0-18.el8" have entirely different histories.
c8
...
imports/c8
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,2 +1,2 @@
|
||||
SOURCES/blivet-3.6.0-tests.tar.gz
|
||||
SOURCES/blivet-3.6.0.tar.gz
|
||||
SOURCES/blivet-3.1.0-tests.tar.gz
|
||||
SOURCES/blivet-3.1.0.tar.gz
|
||||
|
2
.python-blivet.metadata
Normal file
2
.python-blivet.metadata
Normal file
@ -0,0 +1,2 @@
|
||||
4bd8abd1cb7bffa644cffb017f6583a2fd7c19f9 SOURCES/blivet-3.1.0-tests.tar.gz
|
||||
f388d30e55dfaa9c22415c2e9e3f9670f9d08f27 SOURCES/blivet-3.1.0.tar.gz
|
@ -1,4 +1,4 @@
|
||||
From 83ccc9f9f14845fcce7a5ba5fa21fbb97b1dbbb7 Mon Sep 17 00:00:00 2001
|
||||
From 2f90040ff66eacc9715e370cd49ffb72d8d1f36f Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 11 Jul 2018 15:36:24 +0200
|
||||
Subject: [PATCH] Force command line based libblockdev LVM plugin
|
||||
@ -8,7 +8,7 @@ Subject: [PATCH] Force command line based libblockdev LVM plugin
|
||||
1 file changed, 7 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/blivet/__init__.py b/blivet/__init__.py
|
||||
index dd8d0f54..62cc539a 100644
|
||||
index c5a75bb..cb75917 100644
|
||||
--- a/blivet/__init__.py
|
||||
+++ b/blivet/__init__.py
|
||||
@@ -63,11 +63,16 @@ gi.require_version("BlockDev", "2.0")
|
||||
@ -31,5 +31,5 @@ index dd8d0f54..62cc539a 100644
|
||||
# do not check for dependencies during libblockdev initializtion, do runtime
|
||||
# checks instead
|
||||
--
|
||||
2.38.1
|
||||
1.8.3.1
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
From c098d4112635b3ea55d5bd7e1817edbd519735fc Mon Sep 17 00:00:00 2001
|
||||
From 6bf3378d3d2a1b6a4338df0c4dd36a783a641633 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Mon, 16 Jul 2018 14:26:11 +0200
|
||||
Subject: [PATCH] Remove btrfs from requested libblockdev plugins
|
||||
@ -8,7 +8,7 @@ Subject: [PATCH] Remove btrfs from requested libblockdev plugins
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/blivet/__init__.py b/blivet/__init__.py
|
||||
index 62cc539a..bbc7ea3a 100644
|
||||
index cb75917..09f8b1c 100644
|
||||
--- a/blivet/__init__.py
|
||||
+++ b/blivet/__init__.py
|
||||
@@ -63,9 +63,9 @@ gi.require_version("BlockDev", "2.0")
|
||||
@ -24,5 +24,5 @@ index 62cc539a..bbc7ea3a 100644
|
||||
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
|
||||
# XXX force non-dbus LVM plugin
|
||||
--
|
||||
2.38.1
|
||||
1.8.3.1
|
||||
|
||||
|
@ -1,330 +0,0 @@
|
||||
From f6f90805020d7c6ac46f17a13a00f319fc4351f6 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 26 May 2021 12:15:54 +0200
|
||||
Subject: [PATCH] Revert "More consistent lvm errors (API break)"
|
||||
|
||||
This reverts commit 49ec071c6d0673224a0774d613904387c52c7381.
|
||||
---
|
||||
blivet/devices/lvm.py | 72 +++++++++++------------
|
||||
tests/unit_tests/devices_test/lvm_test.py | 14 ++---
|
||||
2 files changed, 43 insertions(+), 43 deletions(-)
|
||||
|
||||
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
||||
index 38e49e18..b8595d63 100644
|
||||
--- a/blivet/devices/lvm.py
|
||||
+++ b/blivet/devices/lvm.py
|
||||
@@ -304,7 +304,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
|
||||
def _add_log_vol(self, lv):
|
||||
""" Add an LV to this VG. """
|
||||
if lv in self._lvs:
|
||||
- raise errors.DeviceError("lv is already part of this vg")
|
||||
+ raise ValueError("lv is already part of this vg")
|
||||
|
||||
# verify we have the space, then add it
|
||||
# do not verify for growing vg (because of ks)
|
||||
@@ -337,7 +337,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
|
||||
def _remove_log_vol(self, lv):
|
||||
""" Remove an LV from this VG. """
|
||||
if lv not in self.lvs:
|
||||
- raise errors.DeviceError("specified lv is not part of this vg")
|
||||
+ raise ValueError("specified lv is not part of this vg")
|
||||
|
||||
self._lvs.remove(lv)
|
||||
|
||||
@@ -430,7 +430,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
|
||||
@thpool_reserve.setter
|
||||
def thpool_reserve(self, value):
|
||||
if value is not None and not isinstance(value, ThPoolReserveSpec):
|
||||
- raise AttributeError("Invalid thpool_reserve given, must be of type ThPoolReserveSpec")
|
||||
+ raise ValueError("Invalid thpool_reserve given, must be of type ThPoolReserveSpec")
|
||||
self._thpool_reserve = value
|
||||
|
||||
@property
|
||||
@@ -665,14 +665,14 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
||||
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
|
||||
raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
|
||||
if seg_type and seg_type in lvm.raid_seg_types and not pvs:
|
||||
- raise errors.DeviceError("List of PVs has to be given for every non-linear LV")
|
||||
+ raise ValueError("List of PVs has to be given for every non-linear LV")
|
||||
elif (not seg_type or seg_type == "linear") and pvs:
|
||||
if not all(isinstance(pv, LVPVSpec) for pv in pvs):
|
||||
- raise errors.DeviceError("Invalid specification of PVs for a linear LV: either no or complete "
|
||||
- "specification (with all space split into PVs has to be given")
|
||||
+ raise ValueError("Invalid specification of PVs for a linear LV: either no or complete "
|
||||
+ "specification (with all space split into PVs has to be given")
|
||||
elif sum(spec.size for spec in pvs) != size:
|
||||
- raise errors.DeviceError("Invalid specification of PVs for a linear LV: the sum of space "
|
||||
- "assigned to PVs is not equal to the size of the LV")
|
||||
+ raise ValueError("Invalid specification of PVs for a linear LV: the sum of space "
|
||||
+ "assigned to PVs is not equal to the size of the LV")
|
||||
|
||||
# When this device's format is set in the superclass constructor it will
|
||||
# try to access self.snapshots.
|
||||
@@ -721,13 +721,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
||||
self._from_lvs = from_lvs
|
||||
if self._from_lvs:
|
||||
if exists:
|
||||
- raise errors.DeviceError("Only new LVs can be created from other LVs")
|
||||
+ raise ValueError("Only new LVs can be created from other LVs")
|
||||
if size or maxsize or percent:
|
||||
- raise errors.DeviceError("Cannot specify size for a converted LV")
|
||||
+ raise ValueError("Cannot specify size for a converted LV")
|
||||
if fmt:
|
||||
- raise errors.DeviceError("Cannot specify format for a converted LV")
|
||||
+ raise ValueError("Cannot specify format for a converted LV")
|
||||
if any(lv.vg != self.vg for lv in self._from_lvs):
|
||||
- raise errors.DeviceError("Conversion of LVs only possible inside a VG")
|
||||
+ raise ValueError("Conversion of LVs only possible inside a VG")
|
||||
|
||||
self._cache = None
|
||||
if cache_request and not self.exists:
|
||||
@@ -746,13 +746,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
||||
elif isinstance(pv_spec, StorageDevice):
|
||||
self._pv_specs.append(LVPVSpec(pv_spec, Size(0)))
|
||||
else:
|
||||
- raise AttributeError("Invalid PV spec '%s' for the '%s' LV" % (pv_spec, self.name))
|
||||
+ raise ValueError("Invalid PV spec '%s' for the '%s' LV" % (pv_spec, self.name))
|
||||
# Make sure any destination PVs are actually PVs in this VG
|
||||
if not set(spec.pv for spec in self._pv_specs).issubset(set(self.vg.parents)):
|
||||
missing = [r.name for r in
|
||||
set(spec.pv for spec in self._pv_specs).difference(set(self.vg.parents))]
|
||||
msg = "invalid destination PV(s) %s for LV %s" % (missing, self.name)
|
||||
- raise errors.DeviceError(msg)
|
||||
+ raise ValueError(msg)
|
||||
if self._pv_specs:
|
||||
self._assign_pv_space()
|
||||
|
||||
@@ -1130,7 +1130,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
||||
else:
|
||||
msg = "the specified internal LV '%s' doesn't belong to this LV ('%s')" % (int_lv.lv_name,
|
||||
self.name)
|
||||
- raise errors.DeviceError(msg)
|
||||
+ raise ValueError(msg)
|
||||
|
||||
def populate_ksdata(self, data):
|
||||
super(LVMLogicalVolumeBase, self).populate_ksdata(data)
|
||||
@@ -1229,7 +1229,7 @@ class LVMInternalLogicalVolumeMixin(object):
|
||||
def _init_check(self):
|
||||
# an internal LV should have no parents
|
||||
if self._parent_lv and self._parents:
|
||||
- raise errors.DeviceError("an internal LV should have no parents")
|
||||
+ raise ValueError("an internal LV should have no parents")
|
||||
|
||||
@property
|
||||
def is_internal_lv(self):
|
||||
@@ -1289,7 +1289,7 @@ class LVMInternalLogicalVolumeMixin(object):
|
||||
|
||||
@readonly.setter
|
||||
def readonly(self, value): # pylint: disable=unused-argument
|
||||
- raise errors.DeviceError("Cannot make an internal LV read-write")
|
||||
+ raise ValueError("Cannot make an internal LV read-write")
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
@@ -1325,7 +1325,7 @@ class LVMInternalLogicalVolumeMixin(object):
|
||||
def _check_parents(self):
|
||||
# an internal LV should have no parents
|
||||
if self._parents:
|
||||
- raise errors.DeviceError("an internal LV should have no parents")
|
||||
+ raise ValueError("an internal LV should have no parents")
|
||||
|
||||
def _add_to_parents(self):
|
||||
# nothing to do here, an internal LV has no parents (in the DeviceTree's
|
||||
@@ -1335,13 +1335,13 @@ class LVMInternalLogicalVolumeMixin(object):
|
||||
# internal LVs follow different rules limitting size
|
||||
def _set_size(self, newsize):
|
||||
if not isinstance(newsize, Size):
|
||||
- raise AttributeError("new size must of type Size")
|
||||
+ raise ValueError("new size must of type Size")
|
||||
|
||||
if not self.takes_extra_space:
|
||||
if newsize <= self.parent_lv.size: # pylint: disable=no-member
|
||||
self._size = newsize # pylint: disable=attribute-defined-outside-init
|
||||
else:
|
||||
- raise errors.DeviceError("Internal LV cannot be bigger than its parent LV")
|
||||
+ raise ValueError("Internal LV cannot be bigger than its parent LV")
|
||||
else:
|
||||
# same rules apply as for any other LV
|
||||
raise NotTypeSpecific()
|
||||
@@ -1419,18 +1419,18 @@ class LVMSnapshotMixin(object):
|
||||
return
|
||||
|
||||
if self.origin and not isinstance(self.origin, LVMLogicalVolumeDevice):
|
||||
- raise errors.DeviceError("lvm snapshot origin must be a logical volume")
|
||||
+ raise ValueError("lvm snapshot origin must be a logical volume")
|
||||
if self.vorigin and not self.exists:
|
||||
- raise errors.DeviceError("only existing vorigin snapshots are supported")
|
||||
+ raise ValueError("only existing vorigin snapshots are supported")
|
||||
|
||||
if isinstance(self.origin, LVMLogicalVolumeDevice) and \
|
||||
isinstance(self.parents[0], LVMVolumeGroupDevice) and \
|
||||
self.origin.vg != self.parents[0]:
|
||||
- raise errors.DeviceError("lvm snapshot and origin must be in the same vg")
|
||||
+ raise ValueError("lvm snapshot and origin must be in the same vg")
|
||||
|
||||
if self.is_thin_lv:
|
||||
if self.origin and self.size and not self.exists:
|
||||
- raise errors.DeviceError("thin snapshot size is determined automatically")
|
||||
+ raise ValueError("thin snapshot size is determined automatically")
|
||||
|
||||
@property
|
||||
def is_snapshot_lv(self):
|
||||
@@ -1606,7 +1606,7 @@ class LVMThinPoolMixin(object):
|
||||
def _check_from_lvs(self):
|
||||
if self._from_lvs:
|
||||
if len(self._from_lvs) != 2:
|
||||
- raise errors.DeviceError("two LVs required to create a thin pool")
|
||||
+ raise ValueError("two LVs required to create a thin pool")
|
||||
|
||||
def _convert_from_lvs(self):
|
||||
data_lv, metadata_lv = self._from_lvs
|
||||
@@ -1652,7 +1652,7 @@ class LVMThinPoolMixin(object):
|
||||
def _add_log_vol(self, lv):
|
||||
""" Add an LV to this pool. """
|
||||
if lv in self._lvs:
|
||||
- raise errors.DeviceError("lv is already part of this vg")
|
||||
+ raise ValueError("lv is already part of this vg")
|
||||
|
||||
# TODO: add some checking to prevent overcommit for preexisting
|
||||
self.vg._add_log_vol(lv)
|
||||
@@ -1663,7 +1663,7 @@ class LVMThinPoolMixin(object):
|
||||
def _remove_log_vol(self, lv):
|
||||
""" Remove an LV from this pool. """
|
||||
if lv not in self._lvs:
|
||||
- raise errors.DeviceError("specified lv is not part of this vg")
|
||||
+ raise ValueError("specified lv is not part of this vg")
|
||||
|
||||
self._lvs.remove(lv)
|
||||
self.vg._remove_log_vol(lv)
|
||||
@@ -1772,14 +1772,14 @@ class LVMThinLogicalVolumeMixin(object):
|
||||
"""Check that this device has parents as expected"""
|
||||
if isinstance(self.parents, (list, ParentList)):
|
||||
if len(self.parents) != 1:
|
||||
- raise errors.DeviceError("constructor requires a single thin-pool LV")
|
||||
+ raise ValueError("constructor requires a single thin-pool LV")
|
||||
|
||||
container = self.parents[0]
|
||||
else:
|
||||
container = self.parents
|
||||
|
||||
if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_thin_pool:
|
||||
- raise errors.DeviceError("constructor requires a thin-pool LV")
|
||||
+ raise ValueError("constructor requires a thin-pool LV")
|
||||
|
||||
@property
|
||||
def is_thin_lv(self):
|
||||
@@ -1816,7 +1816,7 @@ class LVMThinLogicalVolumeMixin(object):
|
||||
|
||||
def _set_size(self, newsize):
|
||||
if not isinstance(newsize, Size):
|
||||
- raise AttributeError("new size must of type Size")
|
||||
+ raise ValueError("new size must of type Size")
|
||||
|
||||
newsize = self.vg.align(newsize)
|
||||
newsize = self.vg.align(util.numeric_type(newsize))
|
||||
@@ -2499,7 +2499,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
container = self.parents
|
||||
|
||||
if not isinstance(container, LVMVolumeGroupDevice):
|
||||
- raise AttributeError("constructor requires a LVMVolumeGroupDevice")
|
||||
+ raise ValueError("constructor requires a LVMVolumeGroupDevice")
|
||||
|
||||
@type_specific
|
||||
def _add_to_parents(self):
|
||||
@@ -2510,12 +2510,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
@type_specific
|
||||
def _check_from_lvs(self):
|
||||
"""Check the LVs to create this LV from"""
|
||||
- raise errors.DeviceError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
|
||||
+ raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
|
||||
|
||||
@type_specific
|
||||
def _convert_from_lvs(self):
|
||||
"""Convert the LVs to create this LV from into its internal LVs"""
|
||||
- raise errors.DeviceError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
|
||||
+ raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
|
||||
|
||||
@property
|
||||
def external_dependencies(self):
|
||||
@@ -2535,7 +2535,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
@type_specific
|
||||
def _set_size(self, newsize):
|
||||
if not isinstance(newsize, Size):
|
||||
- raise AttributeError("new size must be of type Size")
|
||||
+ raise ValueError("new size must be of type Size")
|
||||
|
||||
newsize = self.vg.align(newsize)
|
||||
log.debug("trying to set lv %s size to %s", self.name, newsize)
|
||||
@@ -2544,7 +2544,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
# space for it. A similar reasoning applies to shrinking the LV.
|
||||
if not self.exists and newsize > self.size and newsize > self.vg.free_space + self.vg_space_used:
|
||||
log.error("failed to set size: %s short", newsize - (self.vg.free_space + self.vg_space_used))
|
||||
- raise errors.DeviceError("not enough free space in volume group")
|
||||
+ raise ValueError("not enough free space in volume group")
|
||||
|
||||
LVMLogicalVolumeBase._set_size(self, newsize)
|
||||
|
||||
@@ -2910,7 +2910,7 @@ class LVMCache(Cache):
|
||||
spec.size = spec.pv.format.free
|
||||
space_to_assign -= spec.pv.format.free
|
||||
if space_to_assign > 0:
|
||||
- raise errors.DeviceError("Not enough free space in the PVs for this cache: %s short" % space_to_assign)
|
||||
+ raise ValueError("Not enough free space in the PVs for this cache: %s short" % space_to_assign)
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
|
||||
index 47613fdc..995c2da4 100644
|
||||
--- a/tests/unit_tests/devices_test/lvm_test.py
|
||||
+++ b/tests/unit_tests/devices_test/lvm_test.py
|
||||
@@ -32,10 +32,10 @@ class LVMDeviceTest(unittest.TestCase):
|
||||
lv = LVMLogicalVolumeDevice("testlv", parents=[vg],
|
||||
fmt=blivet.formats.get_format("xfs"))
|
||||
|
||||
- with six.assertRaisesRegex(self, errors.DeviceError, "lvm snapshot origin must be a logical volume"):
|
||||
+ with six.assertRaisesRegex(self, ValueError, "lvm snapshot origin must be a logical volume"):
|
||||
LVMLogicalVolumeDevice("snap1", parents=[vg], origin=pv)
|
||||
|
||||
- with six.assertRaisesRegex(self, errors.DeviceError, "only existing vorigin snapshots are supported"):
|
||||
+ with six.assertRaisesRegex(self, ValueError, "only existing vorigin snapshots are supported"):
|
||||
LVMLogicalVolumeDevice("snap1", parents=[vg], vorigin=True)
|
||||
|
||||
lv.exists = True
|
||||
@@ -60,7 +60,7 @@ class LVMDeviceTest(unittest.TestCase):
|
||||
pool = LVMLogicalVolumeDevice("pool1", parents=[vg], size=Size("500 MiB"), seg_type="thin-pool")
|
||||
thinlv = LVMLogicalVolumeDevice("thinlv", parents=[pool], size=Size("200 MiB"), seg_type="thin")
|
||||
|
||||
- with six.assertRaisesRegex(self, errors.DeviceError, "lvm snapshot origin must be a logical volume"):
|
||||
+ with six.assertRaisesRegex(self, ValueError, "lvm snapshot origin must be a logical volume"):
|
||||
LVMLogicalVolumeDevice("snap1", parents=[pool], origin=pv, seg_type="thin")
|
||||
|
||||
# now make the constructor succeed so we can test some properties
|
||||
@@ -310,21 +310,21 @@ class LVMDeviceTest(unittest.TestCase):
|
||||
vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
|
||||
|
||||
# pvs have to be specified for non-linear LVs
|
||||
- with self.assertRaises(errors.DeviceError):
|
||||
+ with self.assertRaises(ValueError):
|
||||
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
|
||||
fmt=blivet.formats.get_format("xfs"),
|
||||
exists=False, seg_type="raid1")
|
||||
- with self.assertRaises(errors.DeviceError):
|
||||
+ with self.assertRaises(ValueError):
|
||||
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
|
||||
fmt=blivet.formats.get_format("xfs"),
|
||||
exists=False, seg_type="striped")
|
||||
|
||||
# no or complete specification has to be given for linear LVs
|
||||
- with self.assertRaises(errors.DeviceError):
|
||||
+ with self.assertRaises(ValueError):
|
||||
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
|
||||
fmt=blivet.formats.get_format("xfs"),
|
||||
exists=False, pvs=[pv])
|
||||
- with self.assertRaises(errors.DeviceError):
|
||||
+ with self.assertRaises(ValueError):
|
||||
pv_spec = LVPVSpec(pv, Size("256 MiB"))
|
||||
pv_spec2 = LVPVSpec(pv2, Size("250 MiB"))
|
||||
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
|
||||
--
|
||||
2.38.1
|
||||
|
56
SOURCES/0003-separate-dmraid-availability-check.patch
Normal file
56
SOURCES/0003-separate-dmraid-availability-check.patch
Normal file
@ -0,0 +1,56 @@
|
||||
From cd85b0a41f16c571675f04c58ec4c1a428a88a61 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 16 Aug 2018 13:00:35 +0200
|
||||
Subject: [PATCH] Create a separate availability check for dmraid support
|
||||
|
||||
Resolves: rhbz#1617958
|
||||
---
|
||||
blivet/devices/disk.py | 2 +-
|
||||
blivet/tasks/availability.py | 9 +++++++--
|
||||
2 files changed, 8 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
|
||||
index 5b52330..012413c 100644
|
||||
--- a/blivet/devices/disk.py
|
||||
+++ b/blivet/devices/disk.py
|
||||
@@ -225,7 +225,7 @@ class DMRaidArrayDevice(DMDevice, ContainerDevice):
|
||||
_is_disk = True
|
||||
_format_class_name = property(lambda s: "dmraidmember")
|
||||
_format_uuid_attr = property(lambda s: None)
|
||||
- _external_dependencies = [availability.BLOCKDEV_DM_PLUGIN]
|
||||
+ _external_dependencies = [availability.BLOCKDEV_DM_PLUGIN_RAID]
|
||||
|
||||
def __init__(self, name, fmt=None,
|
||||
size=None, parents=None, sysfs_path='', wwn=None):
|
||||
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
|
||||
index 24909a2..7f64c10 100644
|
||||
--- a/blivet/tasks/availability.py
|
||||
+++ b/blivet/tasks/availability.py
|
||||
@@ -331,10 +331,14 @@ BLOCKDEV_DM_ALL_MODES = (blockdev.DMTechMode.CREATE_ACTIVATE |
|
||||
blockdev.DMTechMode.QUERY)
|
||||
BLOCKDEV_DM = BlockDevTechInfo(plugin_name="dm",
|
||||
check_fn=blockdev.dm_is_tech_avail,
|
||||
- technologies={blockdev.DMTech.MAP: BLOCKDEV_DM_ALL_MODES,
|
||||
- blockdev.DMTech.RAID: BLOCKDEV_DM_ALL_MODES})
|
||||
+ technologies={blockdev.DMTech.MAP: BLOCKDEV_DM_ALL_MODES})
|
||||
BLOCKDEV_DM_TECH = BlockDevMethod(BLOCKDEV_DM)
|
||||
|
||||
+BLOCKDEV_DM_RAID = BlockDevTechInfo(plugin_name="dm",
|
||||
+ check_fn=blockdev.dm_is_tech_avail,
|
||||
+ technologies={blockdev.DMTech.RAID: BLOCKDEV_DM_ALL_MODES})
|
||||
+BLOCKDEV_DM_TECH_RAID = BlockDevMethod(BLOCKDEV_DM_RAID)
|
||||
+
|
||||
# libblockdev loop plugin required technologies and modes
|
||||
BLOCKDEV_LOOP_ALL_MODES = (blockdev.LoopTechMode.CREATE |
|
||||
blockdev.LoopTechMode.CREATE |
|
||||
@@ -399,6 +403,7 @@ BLOCKDEV_SWAP_TECH = BlockDevMethod(BLOCKDEV_SWAP)
|
||||
BLOCKDEV_BTRFS_PLUGIN = blockdev_plugin("btrfs", BLOCKDEV_BTRFS_TECH)
|
||||
BLOCKDEV_CRYPTO_PLUGIN = blockdev_plugin("crypto", BLOCKDEV_CRYPTO_TECH)
|
||||
BLOCKDEV_DM_PLUGIN = blockdev_plugin("dm", BLOCKDEV_DM_TECH)
|
||||
+BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("dm", BLOCKDEV_DM_TECH_RAID)
|
||||
BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("loop", BLOCKDEV_LOOP_TECH)
|
||||
BLOCKDEV_LVM_PLUGIN = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH)
|
||||
BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("mdraid", BLOCKDEV_MD_TECH)
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -1,899 +0,0 @@
|
||||
From d8a8d96450bf0d3458671b9b7d23d972aa540396 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 26 May 2021 12:27:34 +0200
|
||||
Subject: [PATCH] Revert "Terminology cleanups"
|
||||
|
||||
This reverts following commits:
|
||||
- 3d46339fe9cf12e9082fcbe4dc5acc9f92617e8d
|
||||
- 63c9c7165e5cdfa4a47dcf0ed9d717b71e7921f2
|
||||
- 8956b9af8a785ae25e0e7153d2ef0702ce2f567c
|
||||
---
|
||||
blivet/devicefactory.py | 24 +++----
|
||||
blivet/devices/dm.py | 9 ++-
|
||||
blivet/devices/loop.py | 20 +++---
|
||||
blivet/devices/luks.py | 26 ++++---
|
||||
blivet/errors.py | 2 +-
|
||||
blivet/partitioning.py | 22 +++++-
|
||||
blivet/populator/helpers/dm.py | 4 +-
|
||||
blivet/populator/helpers/luks.py | 4 +-
|
||||
blivet/populator/helpers/lvm.py | 2 +-
|
||||
blivet/populator/helpers/mdraid.py | 14 ++--
|
||||
blivet/populator/helpers/multipath.py | 8 +--
|
||||
blivet/populator/populator.py | 67 ++++++++++---------
|
||||
blivet/threads.py | 3 +-
|
||||
blivet/udev.py | 34 +++++-----
|
||||
tests/unit_tests/devicefactory_test.py | 10 +--
|
||||
.../devices_test/device_size_test.py | 6 +-
|
||||
tests/unit_tests/populator_test.py | 34 +++++-----
|
||||
tests/unit_tests/udev_test.py | 12 ++--
|
||||
tests/vmtests/vmbackedtestcase.py | 2 +-
|
||||
19 files changed, 167 insertions(+), 136 deletions(-)
|
||||
|
||||
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
|
||||
index 6f460f6d..90082c28 100644
|
||||
--- a/blivet/devicefactory.py
|
||||
+++ b/blivet/devicefactory.py
|
||||
@@ -859,12 +859,12 @@ class DeviceFactory(object):
|
||||
parent_container.parents.remove(orig_device)
|
||||
|
||||
if self.encrypted and isinstance(self.device, LUKSDevice) and \
|
||||
- self.raw_device.format.luks_version != self.luks_version:
|
||||
- self.raw_device.format.luks_version = self.luks_version
|
||||
+ self.device.slave.format.luks_version != self.luks_version:
|
||||
+ self.device.slave.format.luks_version = self.luks_version
|
||||
|
||||
if self.encrypted and isinstance(self.device, LUKSDevice) and \
|
||||
- self.raw_device.format.luks_sector_size != self.luks_sector_size:
|
||||
- self.raw_device.format.luks_sector_size = self.luks_sector_size
|
||||
+ self.device.slave.format.luks_sector_size != self.luks_sector_size:
|
||||
+ self.device.slave.format.luks_sector_size = self.luks_sector_size
|
||||
|
||||
def _set_name(self):
|
||||
if not self.device_name:
|
||||
@@ -1201,11 +1201,11 @@ class PartitionSetFactory(PartitionFactory):
|
||||
container.parents.remove(member)
|
||||
self.storage.destroy_device(member)
|
||||
members.remove(member)
|
||||
- self.storage.format_device(member.raw_device,
|
||||
+ self.storage.format_device(member.slave,
|
||||
get_format(self.fstype))
|
||||
- members.append(member.raw_device)
|
||||
+ members.append(member.slave)
|
||||
if container:
|
||||
- container.parents.append(member.raw_device)
|
||||
+ container.parents.append(member.slave)
|
||||
|
||||
continue
|
||||
|
||||
@@ -1227,10 +1227,10 @@ class PartitionSetFactory(PartitionFactory):
|
||||
|
||||
continue
|
||||
|
||||
- if member_encrypted and self.encrypted and self.luks_version != member.raw_device.format.luks_version:
|
||||
- member.raw_device.format.luks_version = self.luks_version
|
||||
- if member_encrypted and self.encrypted and self.luks_sector_size != member.raw_device.format.luks_sector_size:
|
||||
- member.raw_device.format.luks_sector_size = self.luks_sector_size
|
||||
+ if member_encrypted and self.encrypted and self.luks_version != member.slave.format.luks_version:
|
||||
+ member.slave.format.luks_version = self.luks_version
|
||||
+ if member_encrypted and self.encrypted and self.luks_sector_size != member.slave.format.luks_sector_size:
|
||||
+ member.slave.format.luks_sector_size = self.luks_sector_size
|
||||
|
||||
##
|
||||
# Prepare previously allocated member partitions for reallocation.
|
||||
@@ -1290,7 +1290,7 @@ class PartitionSetFactory(PartitionFactory):
|
||||
|
||||
if isinstance(member, LUKSDevice):
|
||||
self.storage.destroy_device(member)
|
||||
- member = member.raw_device
|
||||
+ member = member.slave
|
||||
|
||||
self.storage.destroy_device(member)
|
||||
|
||||
diff --git a/blivet/devices/dm.py b/blivet/devices/dm.py
|
||||
index 2f936170..ae25e8e6 100644
|
||||
--- a/blivet/devices/dm.py
|
||||
+++ b/blivet/devices/dm.py
|
||||
@@ -154,6 +154,11 @@ class DMDevice(StorageDevice):
|
||||
log_method_call(self, self.name, status=self.status)
|
||||
super(DMDevice, self)._set_name(value)
|
||||
|
||||
+ @property
|
||||
+ def slave(self):
|
||||
+ """ This device's backing device. """
|
||||
+ return self.parents[0]
|
||||
+
|
||||
|
||||
class DMLinearDevice(DMDevice):
|
||||
_type = "dm-linear"
|
||||
@@ -189,8 +194,8 @@ class DMLinearDevice(DMDevice):
|
||||
""" Open, or set up, a device. """
|
||||
log_method_call(self, self.name, orig=orig, status=self.status,
|
||||
controllable=self.controllable)
|
||||
- parent_length = self.parents[0].current_size / LINUX_SECTOR_SIZE
|
||||
- blockdev.dm.create_linear(self.name, self.parents[0].path, parent_length,
|
||||
+ slave_length = self.slave.current_size / LINUX_SECTOR_SIZE
|
||||
+ blockdev.dm.create_linear(self.name, self.slave.path, slave_length,
|
||||
self.dm_uuid)
|
||||
|
||||
def _post_setup(self):
|
||||
diff --git a/blivet/devices/loop.py b/blivet/devices/loop.py
|
||||
index 0f4d7775..78f88d7d 100644
|
||||
--- a/blivet/devices/loop.py
|
||||
+++ b/blivet/devices/loop.py
|
||||
@@ -73,7 +73,7 @@ class LoopDevice(StorageDevice):
|
||||
|
||||
def update_name(self):
|
||||
""" Update this device's name. """
|
||||
- if not self.parents[0].status:
|
||||
+ if not self.slave.status:
|
||||
# if the backing device is inactive, so are we
|
||||
return self.name
|
||||
|
||||
@@ -81,7 +81,7 @@ class LoopDevice(StorageDevice):
|
||||
# if our name is loopN we must already be active
|
||||
return self.name
|
||||
|
||||
- name = blockdev.loop.get_loop_name(self.parents[0].path)
|
||||
+ name = blockdev.loop.get_loop_name(self.slave.path)
|
||||
if name.startswith("loop"):
|
||||
self.name = name
|
||||
|
||||
@@ -89,24 +89,24 @@ class LoopDevice(StorageDevice):
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
- return (self.parents[0].status and
|
||||
+ return (self.slave.status and
|
||||
self.name.startswith("loop") and
|
||||
- blockdev.loop.get_loop_name(self.parents[0].path) == self.name)
|
||||
+ blockdev.loop.get_loop_name(self.slave.path) == self.name)
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
- return self.parents[0].size
|
||||
+ return self.slave.size
|
||||
|
||||
def _pre_setup(self, orig=False):
|
||||
- if not os.path.exists(self.parents[0].path):
|
||||
- raise errors.DeviceError("specified file (%s) does not exist" % self.parents[0].path)
|
||||
+ if not os.path.exists(self.slave.path):
|
||||
+ raise errors.DeviceError("specified file (%s) does not exist" % self.slave.path)
|
||||
return StorageDevice._pre_setup(self, orig=orig)
|
||||
|
||||
def _setup(self, orig=False):
|
||||
""" Open, or set up, a device. """
|
||||
log_method_call(self, self.name, orig=orig, status=self.status,
|
||||
controllable=self.controllable)
|
||||
- blockdev.loop.setup(self.parents[0].path)
|
||||
+ blockdev.loop.setup(self.slave.path)
|
||||
|
||||
def _post_setup(self):
|
||||
StorageDevice._post_setup(self)
|
||||
@@ -123,3 +123,7 @@ class LoopDevice(StorageDevice):
|
||||
StorageDevice._post_teardown(self, recursive=recursive)
|
||||
self.name = "tmploop%d" % self.id
|
||||
self.sysfs_path = ''
|
||||
+
|
||||
+ @property
|
||||
+ def slave(self):
|
||||
+ return self.parents[0]
|
||||
diff --git a/blivet/devices/luks.py b/blivet/devices/luks.py
|
||||
index 2eb1f130..5ab840ea 100644
|
||||
--- a/blivet/devices/luks.py
|
||||
+++ b/blivet/devices/luks.py
|
||||
@@ -66,13 +66,17 @@ class LUKSDevice(DMCryptDevice):
|
||||
|
||||
@property
|
||||
def raw_device(self):
|
||||
+ return self.slave
|
||||
+
|
||||
+ @property
|
||||
+ def slave(self):
|
||||
if self._has_integrity:
|
||||
return self.parents[0].parents[0]
|
||||
return self.parents[0]
|
||||
|
||||
def _get_size(self):
|
||||
if not self.exists:
|
||||
- size = self.raw_device.size - crypto.LUKS_METADATA_SIZE
|
||||
+ size = self.slave.size - crypto.LUKS_METADATA_SIZE
|
||||
elif self.resizable and self.target_size != Size(0):
|
||||
size = self.target_size
|
||||
else:
|
||||
@@ -80,8 +84,8 @@ class LUKSDevice(DMCryptDevice):
|
||||
return size
|
||||
|
||||
def _set_size(self, newsize):
|
||||
- if not self.exists and not self.raw_device.exists:
|
||||
- self.raw_device.size = newsize + crypto.LUKS_METADATA_SIZE
|
||||
+ if not self.exists and not self.slave.exists:
|
||||
+ self.slave.size = newsize + crypto.LUKS_METADATA_SIZE
|
||||
|
||||
# just run the StorageDevice._set_size to make sure we are in the format limits
|
||||
super(LUKSDevice, self)._set_size(newsize - crypto.LUKS_METADATA_SIZE)
|
||||
@@ -108,22 +112,22 @@ class LUKSDevice(DMCryptDevice):
|
||||
raise ValueError("size is smaller than the minimum for this device")
|
||||
|
||||
# don't allow larger luks than size (or target size) of backing device
|
||||
- if newsize > (self.raw_device.size - crypto.LUKS_METADATA_SIZE):
|
||||
+ if newsize > (self.slave.size - crypto.LUKS_METADATA_SIZE):
|
||||
log.error("requested size %s is larger than size of the backing device %s",
|
||||
- newsize, self.raw_device.size)
|
||||
+ newsize, self.slave.size)
|
||||
raise ValueError("size is larger than the size of the backing device")
|
||||
|
||||
if self.align_target_size(newsize) != newsize:
|
||||
raise ValueError("new size would violate alignment requirements")
|
||||
|
||||
def _get_target_size(self):
|
||||
- return self.raw_device.format.target_size
|
||||
+ return self.slave.format.target_size
|
||||
|
||||
@property
|
||||
def max_size(self):
|
||||
""" The maximum size this luks device can be. Maximum is based on the
|
||||
maximum size of the backing device. """
|
||||
- max_luks = self.raw_device.max_size - crypto.LUKS_METADATA_SIZE
|
||||
+ max_luks = self.slave.max_size - crypto.LUKS_METADATA_SIZE
|
||||
max_format = self.format.max_size
|
||||
return min(max_luks, max_format) if max_format else max_luks
|
||||
|
||||
@@ -131,7 +135,7 @@ class LUKSDevice(DMCryptDevice):
|
||||
def resizable(self):
|
||||
""" Can this device be resized? """
|
||||
return (self._resizable and self.exists and self.format.resizable and
|
||||
- self.raw_device.resizable and not self._has_integrity)
|
||||
+ self.slave.resizable and not self._has_integrity)
|
||||
|
||||
def resize(self):
|
||||
# size of LUKSDevice depends on size of the LUKS format on backing
|
||||
@@ -139,7 +143,7 @@ class LUKSDevice(DMCryptDevice):
|
||||
log_method_call(self, self.name, status=self.status)
|
||||
|
||||
def _post_create(self):
|
||||
- self.name = self.raw_device.format.map_name
|
||||
+ self.name = self.slave.format.map_name
|
||||
StorageDevice._post_create(self)
|
||||
|
||||
def _post_teardown(self, recursive=False):
|
||||
@@ -162,10 +166,10 @@ class LUKSDevice(DMCryptDevice):
|
||||
self.name = new_name
|
||||
|
||||
def dracut_setup_args(self):
|
||||
- return set(["rd.luks.uuid=luks-%s" % self.raw_device.format.uuid])
|
||||
+ return set(["rd.luks.uuid=luks-%s" % self.slave.format.uuid])
|
||||
|
||||
def populate_ksdata(self, data):
|
||||
- self.raw_device.populate_ksdata(data)
|
||||
+ self.slave.populate_ksdata(data)
|
||||
data.encrypted = True
|
||||
super(LUKSDevice, self).populate_ksdata(data)
|
||||
|
||||
diff --git a/blivet/errors.py b/blivet/errors.py
|
||||
index b886ffec..30c9921a 100644
|
||||
--- a/blivet/errors.py
|
||||
+++ b/blivet/errors.py
|
||||
@@ -201,7 +201,7 @@ class DeviceTreeError(StorageError):
|
||||
pass
|
||||
|
||||
|
||||
-class NoParentsError(DeviceTreeError):
|
||||
+class NoSlavesError(DeviceTreeError):
|
||||
pass
|
||||
|
||||
|
||||
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
|
||||
index ce77e4eb..2cd6554c 100644
|
||||
--- a/blivet/partitioning.py
|
||||
+++ b/blivet/partitioning.py
|
||||
@@ -32,7 +32,7 @@ import _ped
|
||||
|
||||
from .errors import DeviceError, PartitioningError, AlignmentError
|
||||
from .flags import flags
|
||||
-from .devices import Device, PartitionDevice, device_path_to_name
|
||||
+from .devices import Device, PartitionDevice, LUKSDevice, device_path_to_name
|
||||
from .size import Size
|
||||
from .i18n import _
|
||||
from .util import stringize, unicodeize, compare
|
||||
@@ -1635,7 +1635,15 @@ class TotalSizeSet(object):
|
||||
:param size: the target combined size
|
||||
:type size: :class:`~.size.Size`
|
||||
"""
|
||||
- self.devices = [d.raw_device for d in devices]
|
||||
+ self.devices = []
|
||||
+ for device in devices:
|
||||
+ if isinstance(device, LUKSDevice):
|
||||
+ partition = device.slave
|
||||
+ else:
|
||||
+ partition = device
|
||||
+
|
||||
+ self.devices.append(partition)
|
||||
+
|
||||
self.size = size
|
||||
|
||||
self.requests = []
|
||||
@@ -1673,7 +1681,15 @@ class SameSizeSet(object):
|
||||
:keyword max_size: the maximum size for growable devices
|
||||
:type max_size: :class:`~.size.Size`
|
||||
"""
|
||||
- self.devices = [d.raw_device for d in devices]
|
||||
+ self.devices = []
|
||||
+ for device in devices:
|
||||
+ if isinstance(device, LUKSDevice):
|
||||
+ partition = device.slave
|
||||
+ else:
|
||||
+ partition = device
|
||||
+
|
||||
+ self.devices.append(partition)
|
||||
+
|
||||
self.size = size / len(devices)
|
||||
self.grow = grow
|
||||
self.max_size = max_size
|
||||
diff --git a/blivet/populator/helpers/dm.py b/blivet/populator/helpers/dm.py
|
||||
index 4721390e..0ad065e2 100644
|
||||
--- a/blivet/populator/helpers/dm.py
|
||||
+++ b/blivet/populator/helpers/dm.py
|
||||
@@ -47,13 +47,13 @@ class DMDevicePopulator(DevicePopulator):
|
||||
name = udev.device_get_name(self.data)
|
||||
log_method_call(self, name=name)
|
||||
sysfs_path = udev.device_get_sysfs_path(self.data)
|
||||
- parent_devices = self._devicetree._add_parent_devices(self.data)
|
||||
+ slave_devices = self._devicetree._add_slave_devices(self.data)
|
||||
device = self._devicetree.get_device_by_name(name)
|
||||
|
||||
if device is None:
|
||||
device = DMDevice(name, dm_uuid=self.data.get('DM_UUID'),
|
||||
sysfs_path=sysfs_path, exists=True,
|
||||
- parents=[parent_devices[0]])
|
||||
+ parents=[slave_devices[0]])
|
||||
device.protected = True
|
||||
device.controllable = False
|
||||
self._devicetree._add_device(device)
|
||||
diff --git a/blivet/populator/helpers/luks.py b/blivet/populator/helpers/luks.py
|
||||
index 3221122a..9b5023f8 100644
|
||||
--- a/blivet/populator/helpers/luks.py
|
||||
+++ b/blivet/populator/helpers/luks.py
|
||||
@@ -43,7 +43,7 @@ class LUKSDevicePopulator(DevicePopulator):
|
||||
return udev.device_is_dm_luks(data)
|
||||
|
||||
def run(self):
|
||||
- parents = self._devicetree._add_parent_devices(self.data)
|
||||
+ parents = self._devicetree._add_slave_devices(self.data)
|
||||
device = LUKSDevice(udev.device_get_name(self.data),
|
||||
sysfs_path=udev.device_get_sysfs_path(self.data),
|
||||
parents=parents,
|
||||
@@ -58,7 +58,7 @@ class IntegrityDevicePopulator(DevicePopulator):
|
||||
return udev.device_is_dm_integrity(data)
|
||||
|
||||
def run(self):
|
||||
- parents = self._devicetree._add_parent_devices(self.data)
|
||||
+ parents = self._devicetree._add_slave_devices(self.data)
|
||||
name = udev.device_get_name(self.data)
|
||||
|
||||
try:
|
||||
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
|
||||
index 6ef2f417..b549e8d3 100644
|
||||
--- a/blivet/populator/helpers/lvm.py
|
||||
+++ b/blivet/populator/helpers/lvm.py
|
||||
@@ -58,7 +58,7 @@ class LVMDevicePopulator(DevicePopulator):
|
||||
log.warning("found non-vg device with name %s", vg_name)
|
||||
device = None
|
||||
|
||||
- self._devicetree._add_parent_devices(self.data)
|
||||
+ self._devicetree._add_slave_devices(self.data)
|
||||
|
||||
# LVM provides no means to resolve conflicts caused by duplicated VG
|
||||
# names, so we're just being optimistic here. Woo!
|
||||
diff --git a/blivet/populator/helpers/mdraid.py b/blivet/populator/helpers/mdraid.py
|
||||
index a7602d20..9bec11ef 100644
|
||||
--- a/blivet/populator/helpers/mdraid.py
|
||||
+++ b/blivet/populator/helpers/mdraid.py
|
||||
@@ -31,7 +31,7 @@ from ... import udev
|
||||
from ...devicelibs import raid
|
||||
from ...devices import MDRaidArrayDevice, MDContainerDevice
|
||||
from ...devices import device_path_to_name
|
||||
-from ...errors import DeviceError, NoParentsError
|
||||
+from ...errors import DeviceError, NoSlavesError
|
||||
from ...flags import flags
|
||||
from ...storage_log import log_method_call
|
||||
from .devicepopulator import DevicePopulator
|
||||
@@ -52,12 +52,12 @@ class MDDevicePopulator(DevicePopulator):
|
||||
log_method_call(self, name=name)
|
||||
|
||||
try:
|
||||
- self._devicetree._add_parent_devices(self.data)
|
||||
- except NoParentsError:
|
||||
- log.error("no parents found for mdarray %s, skipping", name)
|
||||
+ self._devicetree._add_slave_devices(self.data)
|
||||
+ except NoSlavesError:
|
||||
+ log.error("no slaves found for mdarray %s, skipping", name)
|
||||
return None
|
||||
|
||||
- # try to get the device again now that we've got all the parents
|
||||
+ # try to get the device again now that we've got all the slaves
|
||||
device = self._devicetree.get_device_by_name(name, incomplete=flags.allow_imperfect_devices)
|
||||
|
||||
if device is None:
|
||||
@@ -74,8 +74,8 @@ class MDDevicePopulator(DevicePopulator):
|
||||
device.name = name
|
||||
|
||||
if device is None:
|
||||
- # if we get here, we found all of the parent devices and
|
||||
- # something must be wrong -- if all of the parents are in
|
||||
+ # if we get here, we found all of the slave devices and
|
||||
+ # something must be wrong -- if all of the slaves are in
|
||||
# the tree, this device should be as well
|
||||
if name is None:
|
||||
name = udev.device_get_name(self.data)
|
||||
diff --git a/blivet/populator/helpers/multipath.py b/blivet/populator/helpers/multipath.py
|
||||
index 96c0a9ad..10c745bf 100644
|
||||
--- a/blivet/populator/helpers/multipath.py
|
||||
+++ b/blivet/populator/helpers/multipath.py
|
||||
@@ -40,13 +40,13 @@ class MultipathDevicePopulator(DevicePopulator):
|
||||
name = udev.device_get_name(self.data)
|
||||
log_method_call(self, name=name)
|
||||
|
||||
- parent_devices = self._devicetree._add_parent_devices(self.data)
|
||||
+ slave_devices = self._devicetree._add_slave_devices(self.data)
|
||||
|
||||
device = None
|
||||
- if parent_devices:
|
||||
- device = MultipathDevice(name, parents=parent_devices,
|
||||
+ if slave_devices:
|
||||
+ device = MultipathDevice(name, parents=slave_devices,
|
||||
sysfs_path=udev.device_get_sysfs_path(self.data),
|
||||
- wwn=parent_devices[0].wwn)
|
||||
+ wwn=slave_devices[0].wwn)
|
||||
self._devicetree._add_device(device)
|
||||
|
||||
return device
|
||||
diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py
|
||||
index 3a419418..068270b2 100644
|
||||
--- a/blivet/populator/populator.py
|
||||
+++ b/blivet/populator/populator.py
|
||||
@@ -31,7 +31,7 @@ gi.require_version("BlockDev", "2.0")
|
||||
|
||||
from gi.repository import BlockDev as blockdev
|
||||
|
||||
-from ..errors import DeviceError, DeviceTreeError, NoParentsError
|
||||
+from ..errors import DeviceError, DeviceTreeError, NoSlavesError
|
||||
from ..devices import DMLinearDevice, DMRaidArrayDevice
|
||||
from ..devices import FileDevice, LoopDevice
|
||||
from ..devices import MDRaidArrayDevice
|
||||
@@ -92,55 +92,56 @@ class PopulatorMixin(object):
|
||||
|
||||
self._cleanup = False
|
||||
|
||||
- def _add_parent_devices(self, info):
|
||||
- """ Add all parents of a device, raising DeviceTreeError on failure.
|
||||
+ def _add_slave_devices(self, info):
|
||||
+ """ Add all slaves of a device, raising DeviceTreeError on failure.
|
||||
|
||||
:param :class:`pyudev.Device` info: the device's udev info
|
||||
- :raises: :class:`~.errors.DeviceTreeError if no parents are found or
|
||||
- if we fail to add any parent
|
||||
- :returns: a list of parent devices
|
||||
+ :raises: :class:`~.errors.DeviceTreeError if no slaves are found or
|
||||
+ if we fail to add any slave
|
||||
+ :returns: a list of slave devices
|
||||
:rtype: list of :class:`~.StorageDevice`
|
||||
"""
|
||||
name = udev.device_get_name(info)
|
||||
sysfs_path = udev.device_get_sysfs_path(info)
|
||||
- parent_dir = os.path.normpath("%s/slaves" % sysfs_path)
|
||||
- parent_names = os.listdir(parent_dir)
|
||||
- parent_devices = []
|
||||
- if not parent_names:
|
||||
- log.error("no parents found for %s", name)
|
||||
- raise NoParentsError("no parents found for device %s" % name)
|
||||
-
|
||||
- for parent_name in parent_names:
|
||||
- path = os.path.normpath("%s/%s" % (parent_dir, parent_name))
|
||||
- parent_info = udev.get_device(os.path.realpath(path))
|
||||
-
|
||||
- if not parent_info:
|
||||
- msg = "unable to get udev info for %s" % parent_name
|
||||
+ slave_dir = os.path.normpath("%s/slaves" % sysfs_path)
|
||||
+ slave_names = os.listdir(slave_dir)
|
||||
+ slave_devices = []
|
||||
+ if not slave_names:
|
||||
+ log.error("no slaves found for %s", name)
|
||||
+ raise NoSlavesError("no slaves found for device %s" % name)
|
||||
+
|
||||
+ for slave_name in slave_names:
|
||||
+ path = os.path.normpath("%s/%s" % (slave_dir, slave_name))
|
||||
+ slave_info = udev.get_device(os.path.realpath(path))
|
||||
+
|
||||
+ if not slave_info:
|
||||
+ msg = "unable to get udev info for %s" % slave_name
|
||||
raise DeviceTreeError(msg)
|
||||
|
||||
# cciss in sysfs is "cciss!cXdYpZ" but we need "cciss/cXdYpZ"
|
||||
- parent_name = udev.device_get_name(parent_info).replace("!", "/")
|
||||
-
|
||||
- parent_dev = self.get_device_by_name(parent_name)
|
||||
- if not parent_dev and parent_info:
|
||||
- # we haven't scanned the parent yet, so do it now
|
||||
- self.handle_device(parent_info)
|
||||
- parent_dev = self.get_device_by_name(parent_name)
|
||||
- if parent_dev is None:
|
||||
+ slave_name = udev.device_get_name(slave_info).replace("!", "/")
|
||||
+
|
||||
+ slave_dev = self.get_device_by_name(slave_name)
|
||||
+ if not slave_dev and slave_info:
|
||||
+ # we haven't scanned the slave yet, so do it now
|
||||
+ self.handle_device(slave_info)
|
||||
+ slave_dev = self.get_device_by_name(slave_name)
|
||||
+ if slave_dev is None:
|
||||
if udev.device_is_dm_lvm(info):
|
||||
- if parent_name not in lvs_info.cache:
|
||||
+ if slave_name not in lvs_info.cache:
|
||||
# we do not expect hidden lvs to be in the tree
|
||||
continue
|
||||
|
||||
- # if the current parent is still not in
|
||||
+ # if the current slave is still not in
|
||||
# the tree, something has gone wrong
|
||||
- log.error("failure scanning device %s: could not add parent %s", name, parent_name)
|
||||
- msg = "failed to add parent %s of device %s" % (parent_name, name)
|
||||
+ log.error("failure scanning device %s: could not add slave %s", name, slave_name)
|
||||
+ msg = "failed to add slave %s of device %s" % (slave_name,
|
||||
+ name)
|
||||
raise DeviceTreeError(msg)
|
||||
|
||||
- parent_devices.append(parent_dev)
|
||||
+ slave_devices.append(slave_dev)
|
||||
|
||||
- return parent_devices
|
||||
+ return slave_devices
|
||||
|
||||
def _add_name(self, name):
|
||||
if name not in self.names:
|
||||
diff --git a/blivet/threads.py b/blivet/threads.py
|
||||
index 5e2dff3f..1a5cc6db 100644
|
||||
--- a/blivet/threads.py
|
||||
+++ b/blivet/threads.py
|
||||
@@ -63,11 +63,12 @@ class SynchronizedMeta(type):
|
||||
"""
|
||||
def __new__(cls, name, bases, dct):
|
||||
new_dct = {}
|
||||
+ blacklist = dct.get('_unsynchronized_methods', [])
|
||||
|
||||
for n in dct:
|
||||
obj = dct[n]
|
||||
# Do not decorate class or static methods.
|
||||
- if n in dct.get('_unsynchronized_methods', []):
|
||||
+ if n in blacklist:
|
||||
pass
|
||||
elif isinstance(obj, FunctionType):
|
||||
obj = exclusive(obj)
|
||||
diff --git a/blivet/udev.py b/blivet/udev.py
|
||||
index efbc53d6..ddc49a37 100644
|
||||
--- a/blivet/udev.py
|
||||
+++ b/blivet/udev.py
|
||||
@@ -39,7 +39,7 @@ from gi.repository import BlockDev as blockdev
|
||||
global_udev = pyudev.Context()
|
||||
log = logging.getLogger("blivet")
|
||||
|
||||
-ignored_device_names = []
|
||||
+device_name_blacklist = []
|
||||
""" device name regexes to ignore; this should be empty by default """
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ def get_devices(subsystem="block"):
|
||||
|
||||
result = []
|
||||
for device in global_udev.list_devices(subsystem=subsystem):
|
||||
- if not __is_ignored_blockdev(device.sys_name):
|
||||
+ if not __is_blacklisted_blockdev(device.sys_name):
|
||||
dev = device_to_dict(device)
|
||||
result.append(dev)
|
||||
|
||||
@@ -176,13 +176,13 @@ def resolve_glob(glob):
|
||||
return ret
|
||||
|
||||
|
||||
-def __is_ignored_blockdev(dev_name):
|
||||
+def __is_blacklisted_blockdev(dev_name):
|
||||
"""Is this a blockdev we never want for an install?"""
|
||||
if dev_name.startswith("ram") or dev_name.startswith("fd"):
|
||||
return True
|
||||
|
||||
- if ignored_device_names:
|
||||
- if any(re.search(expr, dev_name) for expr in ignored_device_names):
|
||||
+ if device_name_blacklist:
|
||||
+ if any(re.search(expr, dev_name) for expr in device_name_blacklist):
|
||||
return True
|
||||
|
||||
dev_path = "/sys/class/block/%s" % dev_name
|
||||
@@ -375,7 +375,7 @@ def device_is_disk(info):
|
||||
device_is_dm_crypt(info) or
|
||||
device_is_dm_stratis(info) or
|
||||
(device_is_md(info) and
|
||||
- (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_parents(info))))))
|
||||
+ (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_slaves(info))))))
|
||||
|
||||
|
||||
def device_is_partition(info):
|
||||
@@ -454,18 +454,18 @@ def device_get_devname(info):
|
||||
return info.get('DEVNAME')
|
||||
|
||||
|
||||
-def device_get_parents(info):
|
||||
- """ Return a list of udev device objects representing this device's parents. """
|
||||
- parents_dir = device_get_sysfs_path(info) + "/slaves/"
|
||||
+def device_get_slaves(info):
|
||||
+ """ Return a list of udev device objects representing this device's slaves. """
|
||||
+ slaves_dir = device_get_sysfs_path(info) + "/slaves/"
|
||||
names = list()
|
||||
- if os.path.isdir(parents_dir):
|
||||
- names = os.listdir(parents_dir)
|
||||
+ if os.path.isdir(slaves_dir):
|
||||
+ names = os.listdir(slaves_dir)
|
||||
|
||||
- parents = list()
|
||||
+ slaves = list()
|
||||
for name in names:
|
||||
- parents.append(get_device(device_node="/dev/" + name))
|
||||
+ slaves.append(get_device(device_node="/dev/" + name))
|
||||
|
||||
- return parents
|
||||
+ return slaves
|
||||
|
||||
|
||||
def device_get_holders(info):
|
||||
@@ -742,7 +742,7 @@ def device_get_partition_disk(info):
|
||||
disk = None
|
||||
majorminor = info.get("ID_PART_ENTRY_DISK")
|
||||
sysfs_path = device_get_sysfs_path(info)
|
||||
- parents_dir = "%s/slaves" % sysfs_path
|
||||
+ slaves_dir = "%s/slaves" % sysfs_path
|
||||
if majorminor:
|
||||
major, minor = majorminor.split(":")
|
||||
for device in get_devices():
|
||||
@@ -750,8 +750,8 @@ def device_get_partition_disk(info):
|
||||
disk = device_get_name(device)
|
||||
break
|
||||
elif device_is_dm_partition(info):
|
||||
- if os.path.isdir(parents_dir):
|
||||
- parents = os.listdir(parents_dir)
|
||||
+ if os.path.isdir(slaves_dir):
|
||||
+ parents = os.listdir(slaves_dir)
|
||||
if len(parents) == 1:
|
||||
disk = resolve_devspec(parents[0].replace('!', '/'))
|
||||
else:
|
||||
diff --git a/tests/unit_tests/devicefactory_test.py b/tests/unit_tests/devicefactory_test.py
|
||||
index ff6bcb9e..552aadc1 100644
|
||||
--- a/tests/unit_tests/devicefactory_test.py
|
||||
+++ b/tests/unit_tests/devicefactory_test.py
|
||||
@@ -115,9 +115,9 @@ class DeviceFactoryTestCase(unittest.TestCase):
|
||||
kwargs.get("encrypted", False) or
|
||||
kwargs.get("container_encrypted", False))
|
||||
if kwargs.get("encrypted", False):
|
||||
- self.assertEqual(device.parents[0].format.luks_version,
|
||||
+ self.assertEqual(device.slave.format.luks_version,
|
||||
kwargs.get("luks_version", crypto.DEFAULT_LUKS_VERSION))
|
||||
- self.assertEqual(device.raw_device.format.luks_sector_size,
|
||||
+ self.assertEqual(device.slave.format.luks_sector_size,
|
||||
kwargs.get("luks_sector_size", 0))
|
||||
|
||||
self.assertTrue(set(device.disks).issubset(kwargs["disks"]))
|
||||
@@ -357,7 +357,7 @@ class LVMFactoryTestCase(DeviceFactoryTestCase):
|
||||
device = args[0]
|
||||
|
||||
if kwargs.get("encrypted"):
|
||||
- container = device.parents[0].container
|
||||
+ container = device.slave.container
|
||||
else:
|
||||
container = device.container
|
||||
|
||||
@@ -376,7 +376,7 @@ class LVMFactoryTestCase(DeviceFactoryTestCase):
|
||||
self.assertIsInstance(pv, member_class)
|
||||
|
||||
if pv.encrypted:
|
||||
- self.assertEqual(pv.parents[0].format.luks_version,
|
||||
+ self.assertEqual(pv.slave.format.luks_version,
|
||||
kwargs.get("luks_version", crypto.DEFAULT_LUKS_VERSION))
|
||||
|
||||
@patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
|
||||
@@ -592,7 +592,7 @@ class LVMThinPFactoryTestCase(LVMFactoryTestCase):
|
||||
device = args[0]
|
||||
|
||||
if kwargs.get("encrypted", False):
|
||||
- thinlv = device.parents[0]
|
||||
+ thinlv = device.slave
|
||||
else:
|
||||
thinlv = device
|
||||
|
||||
diff --git a/tests/unit_tests/devices_test/device_size_test.py b/tests/unit_tests/devices_test/device_size_test.py
|
||||
index d0c0a3f4..a1efa86d 100644
|
||||
--- a/tests/unit_tests/devices_test/device_size_test.py
|
||||
+++ b/tests/unit_tests/devices_test/device_size_test.py
|
||||
@@ -107,8 +107,8 @@ class LUKSDeviceSizeTest(StorageDeviceSizeTest):
|
||||
|
||||
def _get_device(self, *args, **kwargs):
|
||||
exists = kwargs.get("exists", False)
|
||||
- parent = StorageDevice(*args, size=kwargs["size"] + crypto.LUKS_METADATA_SIZE, exists=exists)
|
||||
- return LUKSDevice(*args, **kwargs, parents=[parent])
|
||||
+ slave = StorageDevice(*args, size=kwargs["size"] + crypto.LUKS_METADATA_SIZE, exists=exists)
|
||||
+ return LUKSDevice(*args, **kwargs, parents=[slave])
|
||||
|
||||
def test_size_getter(self):
|
||||
initial_size = Size("10 GiB")
|
||||
@@ -116,4 +116,4 @@ class LUKSDeviceSizeTest(StorageDeviceSizeTest):
|
||||
|
||||
# for LUKS size depends on the backing device size
|
||||
self.assertEqual(dev.size, initial_size)
|
||||
- self.assertEqual(dev.raw_device.size, initial_size + crypto.LUKS_METADATA_SIZE)
|
||||
+ self.assertEqual(dev.slave.size, initial_size + crypto.LUKS_METADATA_SIZE)
|
||||
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
|
||||
index 369fe878..7ba04bac 100644
|
||||
--- a/tests/unit_tests/populator_test.py
|
||||
+++ b/tests/unit_tests/populator_test.py
|
||||
@@ -86,7 +86,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
|
||||
@patch.object(DeviceTree, "get_device_by_name")
|
||||
@patch.object(DMDevice, "status", return_value=True)
|
||||
@patch.object(DMDevice, "update_sysfs_path")
|
||||
- @patch.object(DeviceTree, "_add_parent_devices")
|
||||
+ @patch.object(DeviceTree, "_add_slave_devices")
|
||||
@patch("blivet.udev.device_get_name")
|
||||
@patch("blivet.udev.device_get_sysfs_path", return_value=sentinel.sysfs_path)
|
||||
def test_run(self, *args):
|
||||
@@ -95,7 +95,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
|
||||
|
||||
devicetree = DeviceTree()
|
||||
|
||||
- # The general case for dm devices is that adding the parent devices
|
||||
+ # The general case for dm devices is that adding the slave/parent devices
|
||||
# will result in the dm device itself being in the tree.
|
||||
device = Mock()
|
||||
device.id = 0
|
||||
@@ -106,7 +106,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
|
||||
parent = Mock()
|
||||
parent.id = 0
|
||||
parent.parents = []
|
||||
- devicetree._add_parent_devices.return_value = [parent]
|
||||
+ devicetree._add_slave_devices.return_value = [parent]
|
||||
devicetree._add_device(parent)
|
||||
devicetree.get_device_by_name.return_value = None
|
||||
device_name = "dmdevice"
|
||||
@@ -235,7 +235,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
|
||||
# could be the first helper class checked.
|
||||
|
||||
@patch.object(DeviceTree, "get_device_by_name")
|
||||
- @patch.object(DeviceTree, "_add_parent_devices")
|
||||
+ @patch.object(DeviceTree, "_add_slave_devices")
|
||||
@patch("blivet.udev.device_get_name")
|
||||
@patch("blivet.udev.device_get_lv_vg_name")
|
||||
def test_run(self, *args):
|
||||
@@ -247,7 +247,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
|
||||
devicetree = DeviceTree()
|
||||
data = Mock()
|
||||
|
||||
- # Add parent devices and then look up the device.
|
||||
+ # Add slave/parent devices and then look up the device.
|
||||
device_get_name.return_value = sentinel.lv_name
|
||||
devicetree.get_device_by_name.return_value = None
|
||||
|
||||
@@ -267,7 +267,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
|
||||
call(sentinel.vg_name),
|
||||
call(sentinel.lv_name)])
|
||||
|
||||
- # Add parent devices, but the device is still not in the tree
|
||||
+ # Add slave/parent devices, but the device is still not in the tree
|
||||
get_device_by_name.side_effect = None
|
||||
get_device_by_name.return_value = None
|
||||
self.assertEqual(helper.run(), None)
|
||||
@@ -639,7 +639,7 @@ class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
|
||||
# could be the first helper class checked.
|
||||
|
||||
@patch.object(DeviceTree, "get_device_by_name")
|
||||
- @patch.object(DeviceTree, "_add_parent_devices")
|
||||
+ @patch.object(DeviceTree, "_add_slave_devices")
|
||||
@patch("blivet.udev.device_get_name")
|
||||
@patch("blivet.udev.device_get_md_uuid")
|
||||
@patch("blivet.udev.device_get_md_name")
|
||||
@@ -650,7 +650,7 @@ class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
|
||||
|
||||
devicetree = DeviceTree()
|
||||
|
||||
- # base case: _add_parent_devices gets the array into the tree
|
||||
+ # base case: _add_slave_devices gets the array into the tree
|
||||
data = Mock()
|
||||
device = Mock()
|
||||
device.parents = []
|
||||
@@ -713,12 +713,12 @@ class MultipathDevicePopulatorTestCase(PopulatorHelperTestCase):
|
||||
# could be the first helper class checked.
|
||||
|
||||
@patch("blivet.udev.device_get_sysfs_path")
|
||||
- @patch.object(DeviceTree, "_add_parent_devices")
|
||||
+ @patch.object(DeviceTree, "_add_slave_devices")
|
||||
@patch("blivet.udev.device_get_name")
|
||||
def test_run(self, *args):
|
||||
"""Test multipath device populator."""
|
||||
device_get_name = args[0]
|
||||
- add_parent_devices = args[1]
|
||||
+ add_slave_devices = args[1]
|
||||
|
||||
devicetree = DeviceTree()
|
||||
# set up some fake udev data to verify handling of specific entries
|
||||
@@ -733,13 +733,13 @@ class MultipathDevicePopulatorTestCase(PopulatorHelperTestCase):
|
||||
|
||||
device_name = "mpathtest"
|
||||
device_get_name.return_value = device_name
|
||||
- parent_1 = Mock(tags=set(), wwn=wwn[2:], id=0)
|
||||
- parent_1.parents = []
|
||||
- parent_2 = Mock(tags=set(), wwn=wwn[2:], id=0)
|
||||
- parent_2.parents = []
|
||||
- devicetree._add_device(parent_1)
|
||||
- devicetree._add_device(parent_2)
|
||||
- add_parent_devices.return_value = [parent_1, parent_2]
|
||||
+ slave_1 = Mock(tags=set(), wwn=wwn[2:], id=0)
|
||||
+ slave_1.parents = []
|
||||
+ slave_2 = Mock(tags=set(), wwn=wwn[2:], id=0)
|
||||
+ slave_2.parents = []
|
||||
+ devicetree._add_device(slave_1)
|
||||
+ devicetree._add_device(slave_2)
|
||||
+ add_slave_devices.return_value = [slave_1, slave_2]
|
||||
|
||||
helper = self.helper_class(devicetree, data)
|
||||
|
||||
diff --git a/tests/unit_tests/udev_test.py b/tests/unit_tests/udev_test.py
|
||||
index b208efa8..ebcd59e2 100644
|
||||
--- a/tests/unit_tests/udev_test.py
|
||||
+++ b/tests/unit_tests/udev_test.py
|
||||
@@ -49,11 +49,11 @@ class UdevTest(unittest.TestCase):
|
||||
@mock.patch('blivet.udev.device_is_dm_crypt', return_value=False)
|
||||
@mock.patch('blivet.udev.device_is_md')
|
||||
@mock.patch('blivet.udev.device_get_md_container')
|
||||
- @mock.patch('blivet.udev.device_get_parents')
|
||||
+ @mock.patch('blivet.udev.device_get_slaves')
|
||||
def test_udev_device_is_disk_md(self, *args):
|
||||
import blivet.udev
|
||||
info = dict(DEVTYPE='disk', SYS_PATH=mock.sentinel.md_path)
|
||||
- (device_get_parents, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
|
||||
+ (device_get_slaves, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
|
||||
|
||||
disk_parents = [dict(DEVTYPE="disk", SYS_PATH='/fake/path/2'),
|
||||
dict(DEVTYPE="disk", SYS_PATH='/fake/path/3')]
|
||||
@@ -68,20 +68,20 @@ class UdevTest(unittest.TestCase):
|
||||
# Intel FW RAID (MD RAID w/ container layer)
|
||||
# device_get_container will return some mock value which will evaluate to True
|
||||
device_get_md_container.return_value = mock.sentinel.md_container
|
||||
- device_get_parents.side_effect = lambda info: list()
|
||||
+ device_get_slaves.side_effect = lambda info: list()
|
||||
self.assertTrue(blivet.udev.device_is_disk(info))
|
||||
|
||||
# Normal MD RAID
|
||||
- device_get_parents.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
|
||||
+ device_get_slaves.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
|
||||
device_get_md_container.return_value = None
|
||||
self.assertFalse(blivet.udev.device_is_disk(info))
|
||||
|
||||
# Dell FW RAID (MD RAID whose members are all whole disks)
|
||||
- device_get_parents.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
|
||||
+ device_get_slaves.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
|
||||
self.assertTrue(blivet.udev.device_is_disk(info))
|
||||
|
||||
# Normal MD RAID (w/ at least one non-disk member)
|
||||
- device_get_parents.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
|
||||
+ device_get_slaves.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
|
||||
self.assertFalse(blivet.udev.device_is_disk(info))
|
||||
|
||||
|
||||
diff --git a/tests/vmtests/vmbackedtestcase.py b/tests/vmtests/vmbackedtestcase.py
|
||||
index 797bac85..6255104f 100644
|
||||
--- a/tests/vmtests/vmbackedtestcase.py
|
||||
+++ b/tests/vmtests/vmbackedtestcase.py
|
||||
@@ -50,7 +50,7 @@ class VMBackedTestCase(unittest.TestCase):
|
||||
defined in set_up_disks.
|
||||
"""
|
||||
|
||||
- udev.ignored_device_names = [r'^zram']
|
||||
+ udev.device_name_blacklist = [r'^zram']
|
||||
|
||||
#
|
||||
# create disk images
|
||||
--
|
||||
2.38.1
|
||||
|
@ -0,0 +1,104 @@
|
||||
From fd07d14ad1f19c700d5344c8af11be6a1e314ceb Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 12 Sep 2018 10:45:41 +0200
|
||||
Subject: [PATCH 1/2] Allow removing btrfs volumes without btrfs support
|
||||
|
||||
Btrfs volumes are removed using wipefs so we don't need to check
|
||||
for device dependencies availability when removing the volume
|
||||
(btrfs support depends on libblockdev btrfs plugin).
|
||||
|
||||
Resolves: rhbz#1605213
|
||||
---
|
||||
blivet/deviceaction.py | 23 ++++++++++++++++++-----
|
||||
1 file changed, 18 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
|
||||
index 3e337e18..b3e9e5f1 100644
|
||||
--- a/blivet/deviceaction.py
|
||||
+++ b/blivet/deviceaction.py
|
||||
@@ -160,15 +160,19 @@ def __init__(self, device):
|
||||
if not isinstance(device, StorageDevice):
|
||||
raise ValueError("arg 1 must be a StorageDevice instance")
|
||||
|
||||
- unavailable_dependencies = device.unavailable_dependencies
|
||||
- if unavailable_dependencies:
|
||||
- dependencies_str = ", ".join(str(d) for d in unavailable_dependencies)
|
||||
- raise DependencyError("device type %s requires unavailable_dependencies: %s" % (device.type, dependencies_str))
|
||||
-
|
||||
self.device = device
|
||||
+
|
||||
+ self._check_device_dependencies()
|
||||
+
|
||||
self.container = getattr(self.device, "container", None)
|
||||
self._applied = False
|
||||
|
||||
+ def _check_device_dependencies(self):
|
||||
+ unavailable_dependencies = self.device.unavailable_dependencies
|
||||
+ if unavailable_dependencies:
|
||||
+ dependencies_str = ", ".join(str(d) for d in unavailable_dependencies)
|
||||
+ raise DependencyError("device type %s requires unavailable_dependencies: %s" % (self.device.type, dependencies_str))
|
||||
+
|
||||
def apply(self):
|
||||
""" apply changes related to the action to the device(s) """
|
||||
self._applied = True
|
||||
@@ -379,6 +383,15 @@ def __init__(self, device):
|
||||
# XXX should we insist that device.fs be None?
|
||||
DeviceAction.__init__(self, device)
|
||||
|
||||
+ def _check_device_dependencies(self):
|
||||
+ if self.device.type == "btrfs volume":
|
||||
+ # XXX destroying a btrfs volume is a special case -- we don't destroy
|
||||
+ # the device, but use wipefs to destroy format on its parents so we
|
||||
+ # don't need btrfs plugin or btrfs-progs for this
|
||||
+ return
|
||||
+
|
||||
+ super(ActionDestroyDevice, self)._check_device_dependencies()
|
||||
+
|
||||
def execute(self, callbacks=None):
|
||||
super(ActionDestroyDevice, self).execute(callbacks=callbacks)
|
||||
self.device.destroy()
|
||||
|
||||
From b9f1b4acb654c5fb70be1a2200bcf3a34dcde467 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Mon, 17 Sep 2018 10:25:24 +0200
|
||||
Subject: [PATCH 2/2] Check device dependencies only for device actions
|
||||
|
||||
We don't want to check device dependencies for format actions.
|
||||
It should be possible to for example format an opened LUKS device
|
||||
without libblockdev crypto plugin.
|
||||
|
||||
Related: rhbz#1605213
|
||||
---
|
||||
blivet/deviceaction.py | 3 ++-
|
||||
tests/devices_test/dependencies_test.py | 4 ----
|
||||
2 files changed, 2 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
|
||||
index b3e9e5f1..14a06ff0 100644
|
||||
--- a/blivet/deviceaction.py
|
||||
+++ b/blivet/deviceaction.py
|
||||
@@ -162,7 +162,8 @@ def __init__(self, device):
|
||||
|
||||
self.device = device
|
||||
|
||||
- self._check_device_dependencies()
|
||||
+ if self.is_device:
|
||||
+ self._check_device_dependencies()
|
||||
|
||||
self.container = getattr(self.device, "container", None)
|
||||
self._applied = False
|
||||
diff --git a/tests/devices_test/dependencies_test.py b/tests/devices_test/dependencies_test.py
|
||||
index 0b44493e..e6b5bdb4 100644
|
||||
--- a/tests/devices_test/dependencies_test.py
|
||||
+++ b/tests/devices_test/dependencies_test.py
|
||||
@@ -97,10 +97,6 @@ def test_availability_mdraidplugin(self):
|
||||
ActionCreateDevice(self.luks)
|
||||
with self.assertRaises(DependencyError):
|
||||
ActionDestroyDevice(self.dev)
|
||||
- with self.assertRaises(DependencyError):
|
||||
- ActionCreateFormat(self.dev)
|
||||
- with self.assertRaises(DependencyError):
|
||||
- ActionDestroyFormat(self.dev)
|
||||
|
||||
def _clean_up(self):
|
||||
availability.BLOCKDEV_MDRAID_PLUGIN._method = self.mdraid_method
|
@ -1,86 +0,0 @@
|
||||
From f6490c469904f4808c63a170210e53acc908b018 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 17 Aug 2022 14:24:21 +0200
|
||||
Subject: [PATCH 1/2] Use MD populator instead of DM to handle DDF RAID format
|
||||
|
||||
---
|
||||
blivet/formats/dmraid.py | 2 +-
|
||||
blivet/formats/mdraid.py | 2 +-
|
||||
2 files changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/blivet/formats/dmraid.py b/blivet/formats/dmraid.py
|
||||
index 2ba9dcfe..ce15905d 100644
|
||||
--- a/blivet/formats/dmraid.py
|
||||
+++ b/blivet/formats/dmraid.py
|
||||
@@ -43,7 +43,7 @@ class DMRaidMember(DeviceFormat):
|
||||
#
|
||||
# One problem that presents is the possibility of someone passing
|
||||
# a dmraid member to the MDRaidArrayDevice constructor.
|
||||
- _udev_types = ["adaptec_raid_member", "ddf_raid_member",
|
||||
+ _udev_types = ["adaptec_raid_member",
|
||||
"hpt37x_raid_member", "hpt45x_raid_member",
|
||||
"isw_raid_member",
|
||||
"jmicron_raid_member", "lsi_mega_raid_member",
|
||||
diff --git a/blivet/formats/mdraid.py b/blivet/formats/mdraid.py
|
||||
index 41ddef81..4aa3f3b0 100644
|
||||
--- a/blivet/formats/mdraid.py
|
||||
+++ b/blivet/formats/mdraid.py
|
||||
@@ -41,7 +41,7 @@ class MDRaidMember(DeviceFormat):
|
||||
""" An mdraid member disk. """
|
||||
_type = "mdmember"
|
||||
_name = N_("software RAID")
|
||||
- _udev_types = ["linux_raid_member"]
|
||||
+ _udev_types = ["linux_raid_member", "ddf_raid_member"]
|
||||
parted_flag = PARTITION_RAID
|
||||
_formattable = True # can be formatted
|
||||
_supported = True # is supported
|
||||
--
|
||||
2.38.1
|
||||
|
||||
|
||||
From 5fadd850aae217d7692a6c8a50b2dcd5e61a63cd Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 17 Aug 2022 14:24:58 +0200
|
||||
Subject: [PATCH 2/2] Do not read DDF RAID UUID from udev
|
||||
|
||||
The UUID we get from udev isn't the array UUID, we need to get
|
||||
that using libblockdev.
|
||||
---
|
||||
blivet/populator/helpers/mdraid.py | 16 ++++++++++------
|
||||
1 file changed, 10 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/blivet/populator/helpers/mdraid.py b/blivet/populator/helpers/mdraid.py
|
||||
index 3479e3f7..a7602d20 100644
|
||||
--- a/blivet/populator/helpers/mdraid.py
|
||||
+++ b/blivet/populator/helpers/mdraid.py
|
||||
@@ -98,17 +98,21 @@ class MDFormatPopulator(FormatPopulator):
|
||||
|
||||
def _get_kwargs(self):
|
||||
kwargs = super(MDFormatPopulator, self)._get_kwargs()
|
||||
- try:
|
||||
- # ID_FS_UUID contains the array UUID
|
||||
- kwargs["md_uuid"] = udev.device_get_uuid(self.data)
|
||||
- except KeyError:
|
||||
- log.warning("mdraid member %s has no md uuid", udev.device_get_name(self.data))
|
||||
+ kwargs["biosraid"] = udev.device_is_biosraid_member(self.data)
|
||||
+ if not kwargs["biosraid"]:
|
||||
+ try:
|
||||
+ # ID_FS_UUID contains the array UUID
|
||||
+ kwargs["md_uuid"] = udev.device_get_uuid(self.data)
|
||||
+ except KeyError:
|
||||
+ log.warning("mdraid member %s has no md uuid", udev.device_get_name(self.data))
|
||||
+ else:
|
||||
+ # for BIOS RAIDs we can't get the UUID from udev, we'll get it from mdadm in `run` below
|
||||
+ kwargs["md_uuid"] = None
|
||||
|
||||
# reset the uuid to the member-specific value
|
||||
# this will be None for members of v0 metadata arrays
|
||||
kwargs["uuid"] = udev.device_get_md_device_uuid(self.data)
|
||||
|
||||
- kwargs["biosraid"] = udev.device_is_biosraid_member(self.data)
|
||||
return kwargs
|
||||
|
||||
def run(self):
|
||||
--
|
||||
2.38.1
|
||||
|
272
SOURCES/0005-arm7-cleanups.patch
Normal file
272
SOURCES/0005-arm7-cleanups.patch
Normal file
@ -0,0 +1,272 @@
|
||||
From 12a2bdf3fc5a7a4568ff56b244d3067b73f82681 Mon Sep 17 00:00:00 2001
|
||||
From: Peter Robinson <pbrobinson@gmail.com>
|
||||
Date: Tue, 7 Aug 2018 15:11:56 +0100
|
||||
Subject: [PATCH 1/6] arch: arm: drop omap specifics for partitioning
|
||||
|
||||
We've long stopped supporting or using any specifics around OMAP
|
||||
ARM machines and all ARM platforms support the extlinux means of
|
||||
doing things one way or another.
|
||||
|
||||
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
|
||||
---
|
||||
blivet/arch.py | 4 ----
|
||||
blivet/devices/partition.py | 3 ---
|
||||
2 files changed, 7 deletions(-)
|
||||
|
||||
diff --git a/blivet/arch.py b/blivet/arch.py
|
||||
index 20fe4f57..f30b2d8b 100644
|
||||
--- a/blivet/arch.py
|
||||
+++ b/blivet/arch.py
|
||||
@@ -352,10 +352,6 @@ def is_ipseries():
|
||||
return is_ppc() and get_ppc_machine() in ("iSeries", "pSeries")
|
||||
|
||||
|
||||
-def is_omap_arm():
|
||||
- return is_arm() and get_arm_machine() == "omap"
|
||||
-
|
||||
-
|
||||
def get_arch():
|
||||
"""
|
||||
:return: The hardware architecture
|
||||
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
|
||||
index 47ff547b..623e1c9d 100644
|
||||
--- a/blivet/devices/partition.py
|
||||
+++ b/blivet/devices/partition.py
|
||||
@@ -421,9 +421,6 @@ def _get_weight(self):
|
||||
# On ARM images '/' must be the last partition.
|
||||
if self.format.mountpoint == "/":
|
||||
weight = -100
|
||||
- elif (arch.is_omap_arm() and
|
||||
- self.format.mountpoint == "/boot/uboot" and self.format.type == "vfat"):
|
||||
- weight = 5000
|
||||
elif arch.is_ppc():
|
||||
if arch.is_pmac() and self.format.type == "appleboot":
|
||||
weight = 5000
|
||||
|
||||
From ec978c3c625c74c387a9c8074d2378c4ecbeac47 Mon Sep 17 00:00:00 2001
|
||||
From: Peter Robinson <pbrobinson@gmail.com>
|
||||
Date: Thu, 16 Aug 2018 14:32:19 +0100
|
||||
Subject: [PATCH 2/6] arch: arm: drop get_arm_machine function
|
||||
|
||||
The get_arm_machine function was used when we had to have detection for which
|
||||
arm specific kernel to install. The last userr of this was the omap check for
|
||||
special partitioning which is no longer used due to extlinux support so we can
|
||||
now drop this function too.
|
||||
|
||||
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
|
||||
---
|
||||
blivet/arch.py | 22 ----------------------
|
||||
blivet/flags.py | 2 --
|
||||
2 files changed, 24 deletions(-)
|
||||
|
||||
diff --git a/blivet/arch.py b/blivet/arch.py
|
||||
index f30b2d8b..55ce8108 100644
|
||||
--- a/blivet/arch.py
|
||||
+++ b/blivet/arch.py
|
||||
@@ -33,7 +33,6 @@
|
||||
|
||||
import os
|
||||
|
||||
-from .flags import flags
|
||||
from .storage_log import log_exception_info
|
||||
|
||||
import logging
|
||||
@@ -182,27 +181,6 @@ def is_aarch64():
|
||||
return os.uname()[4] == 'aarch64'
|
||||
|
||||
|
||||
-def get_arm_machine():
|
||||
- """
|
||||
- :return: The ARM processor variety type, or None if not ARM.
|
||||
- :rtype: string
|
||||
-
|
||||
- """
|
||||
- if not is_arm():
|
||||
- return None
|
||||
-
|
||||
- if flags.arm_platform:
|
||||
- return flags.arm_platform
|
||||
-
|
||||
- arm_machine = os.uname()[2].rpartition('.')[2]
|
||||
-
|
||||
- if arm_machine.startswith('arm'):
|
||||
- # @TBD - Huh? Don't you want the arm machine name here?
|
||||
- return None
|
||||
- else:
|
||||
- return arm_machine
|
||||
-
|
||||
-
|
||||
def is_cell():
|
||||
"""
|
||||
:return: True if the hardware is the Cell platform, False otherwise.
|
||||
diff --git a/blivet/flags.py b/blivet/flags.py
|
||||
index 18401218..4e26d82f 100644
|
||||
--- a/blivet/flags.py
|
||||
+++ b/blivet/flags.py
|
||||
@@ -57,8 +57,6 @@ def __init__(self):
|
||||
self.jfs = True
|
||||
self.reiserfs = True
|
||||
|
||||
- self.arm_platform = None
|
||||
-
|
||||
self.gpt = False
|
||||
|
||||
# for this flag to take effect,
|
||||
|
||||
From e75049e9e9edac9da789cee2add2b4190159805d Mon Sep 17 00:00:00 2001
|
||||
From: Peter Robinson <pbrobinson@gmail.com>
|
||||
Date: Thu, 16 Aug 2018 14:35:30 +0100
|
||||
Subject: [PATCH 3/6] Aarch64 platforms: Fix gpt defaults for 64 bit arm
|
||||
platforms
|
||||
|
||||
The 46165f589d commit added support for msdos needed on some aarch64 devices
|
||||
but it messed up the gpt defaults, this was fixed in 4908746c3a but this now
|
||||
defaults back to msdos so we put in an aarch64 options to put gpt first again.
|
||||
|
||||
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
|
||||
---
|
||||
blivet/formats/disklabel.py | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
|
||||
index 44f9834c..e93a4c13 100644
|
||||
--- a/blivet/formats/disklabel.py
|
||||
+++ b/blivet/formats/disklabel.py
|
||||
@@ -223,6 +223,8 @@ def get_platform_label_types(cls):
|
||||
label_types = ["msdos", "gpt"]
|
||||
if arch.is_pmac():
|
||||
label_types = ["mac"]
|
||||
+ elif arch.is_aarch64():
|
||||
+ label_types = ["gpt", "msdos"]
|
||||
elif arch.is_efi() and not arch.is_aarch64():
|
||||
label_types = ["gpt"]
|
||||
elif arch.is_s390():
|
||||
|
||||
From dda51536e902def437872fcdb3005efaff231703 Mon Sep 17 00:00:00 2001
|
||||
From: Peter Robinson <pbrobinson@gmail.com>
|
||||
Date: Thu, 16 Aug 2018 14:38:16 +0100
|
||||
Subject: [PATCH 4/6] arm: add support for EFI on ARMv7
|
||||
|
||||
We now can support EFI for ARMv7 so add/enabled the checks for ARM too.
|
||||
|
||||
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
|
||||
---
|
||||
blivet/formats/disklabel.py | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
|
||||
index e93a4c13..e13ab2f8 100644
|
||||
--- a/blivet/formats/disklabel.py
|
||||
+++ b/blivet/formats/disklabel.py
|
||||
@@ -225,6 +225,8 @@ def get_platform_label_types(cls):
|
||||
label_types = ["mac"]
|
||||
elif arch.is_aarch64():
|
||||
label_types = ["gpt", "msdos"]
|
||||
+ elif arch.is_efi() and arch.is_arm():
|
||||
+ label_types = ["msdos", "gpt"]
|
||||
elif arch.is_efi() and not arch.is_aarch64():
|
||||
label_types = ["gpt"]
|
||||
elif arch.is_s390():
|
||||
|
||||
From 1cdd509f2034f456402f39045425cbdfe62bde97 Mon Sep 17 00:00:00 2001
|
||||
From: Peter Robinson <pbrobinson@gmail.com>
|
||||
Date: Thu, 23 Aug 2018 14:23:38 +0100
|
||||
Subject: [PATCH 5/6] Update disk label tests for ARM platforms
|
||||
|
||||
UEFI supports either gpt or msdos but different platforms have different
|
||||
requirements. Update the disk label tests to test the following:
|
||||
- aarch64: gpt default but msdos option also supported
|
||||
- ARMv7 UEFI: msdos default but gpt option also supported
|
||||
- ARMv7 extlinux: msdos default, also support gpt
|
||||
|
||||
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
|
||||
---
|
||||
tests/formats_test/disklabel_test.py | 12 ++++++++++++
|
||||
1 file changed, 12 insertions(+)
|
||||
|
||||
diff --git a/tests/formats_test/disklabel_test.py b/tests/formats_test/disklabel_test.py
|
||||
index 4b6608f5..3edbdb0b 100644
|
||||
--- a/tests/formats_test/disklabel_test.py
|
||||
+++ b/tests/formats_test/disklabel_test.py
|
||||
@@ -71,6 +71,7 @@ def test_platform_label_types(self, arch):
|
||||
arch.is_s390.return_value = False
|
||||
arch.is_efi.return_value = False
|
||||
arch.is_aarch64.return_value = False
|
||||
+ arch.is_arm.return_value = False
|
||||
arch.is_pmac.return_value = False
|
||||
|
||||
self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt"])
|
||||
@@ -81,8 +82,18 @@ def test_platform_label_types(self, arch):
|
||||
|
||||
arch.is_efi.return_value = True
|
||||
self.assertEqual(disklabel_class.get_platform_label_types(), ["gpt"])
|
||||
+ arch.is_aarch64.return_value = True
|
||||
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["gpt", "msdos"])
|
||||
+ arch.is_aarch64.return_value = False
|
||||
+ arch.is_arm.return_value = True
|
||||
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt"])
|
||||
+ arch.is_arm.return_value = False
|
||||
arch.is_efi.return_value = False
|
||||
|
||||
+ arch.is_arm.return_value = True
|
||||
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt"])
|
||||
+ arch.is_arm.return_value = False
|
||||
+
|
||||
arch.is_s390.return_value = True
|
||||
self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "dasd"])
|
||||
arch.is_s390.return_value = False
|
||||
@@ -123,6 +134,7 @@ def test_best_label_type(self, arch):
|
||||
arch.is_s390.return_value = False
|
||||
arch.is_efi.return_value = False
|
||||
arch.is_aarch64.return_value = False
|
||||
+ arch.is_arm.return_value = False
|
||||
arch.is_pmac.return_value = False
|
||||
|
||||
with mock.patch.object(dl, '_label_type_size_check') as size_check:
|
||||
|
||||
From e0e6ac41cea805c3bf56852bfe2cd67d4bfe0b83 Mon Sep 17 00:00:00 2001
|
||||
From: Peter Robinson <pbrobinson@gmail.com>
|
||||
Date: Thu, 23 Aug 2018 15:54:51 +0100
|
||||
Subject: [PATCH 6/6] Drop omap partition table tests on ARM platforms
|
||||
|
||||
We no longer need to test the /boot/uboot tests for omap platforms so
|
||||
drop them as they're obsolete.
|
||||
|
||||
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
|
||||
---
|
||||
tests/devices_test/partition_test.py | 14 ++------------
|
||||
1 file changed, 2 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/tests/devices_test/partition_test.py b/tests/devices_test/partition_test.py
|
||||
index 394ffc27..08c0447d 100644
|
||||
--- a/tests/devices_test/partition_test.py
|
||||
+++ b/tests/devices_test/partition_test.py
|
||||
@@ -26,11 +26,9 @@
|
||||
Weighted(fstype="efi", mountpoint="/boot/efi", true_funcs=['is_efi'], weight=5000),
|
||||
Weighted(fstype="prepboot", mountpoint=None, true_funcs=['is_ppc', 'is_ipseries'], weight=5000),
|
||||
Weighted(fstype="appleboot", mountpoint=None, true_funcs=['is_ppc', 'is_pmac'], weight=5000),
|
||||
- Weighted(fstype="vfat", mountpoint="/boot/uboot", true_funcs=['is_arm', 'is_omap_arm'], weight=5000),
|
||||
- Weighted(fstype=None, mountpoint="/", true_funcs=['is_arm'], weight=-100),
|
||||
- Weighted(fstype=None, mountpoint="/", true_funcs=['is_arm', 'is_omap_arm'], weight=-100)]
|
||||
+ Weighted(fstype=None, mountpoint="/", true_funcs=['is_arm'], weight=-100)]
|
||||
|
||||
-arch_funcs = ['is_arm', 'is_efi', 'is_ipseries', 'is_omap_arm', 'is_pmac', 'is_ppc', 'is_x86']
|
||||
+arch_funcs = ['is_arm', 'is_efi', 'is_ipseries', 'is_pmac', 'is_ppc', 'is_x86']
|
||||
|
||||
|
||||
class PartitionDeviceTestCase(unittest.TestCase):
|
||||
@@ -309,14 +307,6 @@ def test_weight_1(self, *patches):
|
||||
fmt.mountpoint = "/"
|
||||
self.assertEqual(dev.weight, -100)
|
||||
|
||||
- arch.is_omap_arm.return_value = False
|
||||
- fmt.mountpoint = "/boot/uboot"
|
||||
- fmt.type = "vfat"
|
||||
- self.assertEqual(dev.weight, 0)
|
||||
-
|
||||
- arch.is_omap_arm.return_value = True
|
||||
- self.assertEqual(dev.weight, 5000)
|
||||
-
|
||||
#
|
||||
# ppc
|
||||
#
|
44
SOURCES/0006-Fix-options-for-ISCSI-functions.patch
Normal file
44
SOURCES/0006-Fix-options-for-ISCSI-functions.patch
Normal file
@ -0,0 +1,44 @@
|
||||
From 04dc595e3921879fa3e5b0f82506d63fdea4d2c8 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 3 Oct 2018 14:11:08 +0200
|
||||
Subject: [PATCH] Fix options for ISCSI functions
|
||||
|
||||
Correct mutual authentication options in UDisks are
|
||||
"reverse-username" and "reverse-password".
|
||||
|
||||
Resolves: rhbz#1635569
|
||||
---
|
||||
blivet/iscsi.py | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
|
||||
index b979e01c..ca51f8ed 100644
|
||||
--- a/blivet/iscsi.py
|
||||
+++ b/blivet/iscsi.py
|
||||
@@ -385,9 +385,9 @@ class iSCSI(object):
|
||||
if password:
|
||||
auth_info["password"] = GLib.Variant("s", password)
|
||||
if r_username:
|
||||
- auth_info["r_username"] = GLib.Variant("s", r_username)
|
||||
+ auth_info["reverse-username"] = GLib.Variant("s", r_username)
|
||||
if r_password:
|
||||
- auth_info["r_password"] = GLib.Variant("s", r_password)
|
||||
+ auth_info["reverse-password"] = GLib.Variant("s", r_password)
|
||||
|
||||
args = GLib.Variant("(sqa{sv})", (ipaddr, int(port), auth_info))
|
||||
nodes, _n_nodes = self._call_initiator_method("DiscoverSendTargets", args)
|
||||
@@ -423,9 +423,9 @@ class iSCSI(object):
|
||||
if password:
|
||||
auth_info["password"] = GLib.Variant("s", password)
|
||||
if r_username:
|
||||
- auth_info["r_username"] = GLib.Variant("s", r_username)
|
||||
+ auth_info["reverse-username"] = GLib.Variant("s", r_username)
|
||||
if r_password:
|
||||
- auth_info["r_password"] = GLib.Variant("s", r_password)
|
||||
+ auth_info["reverse-password"] = GLib.Variant("s", r_password)
|
||||
|
||||
try:
|
||||
self._login(node, auth_info)
|
||||
--
|
||||
2.17.2
|
||||
|
@ -1,77 +0,0 @@
|
||||
From 62af1d7f96b8ed8eb8f2732787576161ae5da79f Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 13 Oct 2022 10:47:52 +0200
|
||||
Subject: [PATCH] Revert "Remove the Blivet.roots attribute"
|
||||
|
||||
This reverts commit 19a826073345ca6b57a8f9a95ec855892320300e.
|
||||
---
|
||||
blivet/blivet.py | 21 +++++++++++++++++++++
|
||||
blivet/devicefactory.py | 3 +++
|
||||
2 files changed, 24 insertions(+)
|
||||
|
||||
diff --git a/blivet/blivet.py b/blivet/blivet.py
|
||||
index bf72ee9c..dc066b03 100644
|
||||
--- a/blivet/blivet.py
|
||||
+++ b/blivet/blivet.py
|
||||
@@ -88,6 +88,7 @@ class Blivet(object):
|
||||
self.devicetree = DeviceTree(ignored_disks=self.ignored_disks,
|
||||
exclusive_disks=self.exclusive_disks,
|
||||
disk_images=self.disk_images)
|
||||
+ self.roots = []
|
||||
|
||||
@property
|
||||
def short_product_name(self):
|
||||
@@ -1314,5 +1315,25 @@ class Blivet(object):
|
||||
p = partition.disk.format.parted_disk.getPartitionByPath(partition.path)
|
||||
partition.parted_partition = p
|
||||
|
||||
+ for root in new.roots:
|
||||
+ root.swaps = [new.devicetree.get_device_by_id(d.id, hidden=True) for d in root.swaps]
|
||||
+ root.swaps = [s for s in root.swaps if s]
|
||||
+
|
||||
+ removed = set()
|
||||
+ for (mountpoint, old_dev) in root.mounts.items():
|
||||
+ if old_dev is None:
|
||||
+ continue
|
||||
+
|
||||
+ new_dev = new.devicetree.get_device_by_id(old_dev.id, hidden=True)
|
||||
+ if new_dev is None:
|
||||
+ # if the device has been removed don't include this
|
||||
+ # mountpoint at all
|
||||
+ removed.add(mountpoint)
|
||||
+ else:
|
||||
+ root.mounts[mountpoint] = new_dev
|
||||
+
|
||||
+ for mnt in removed:
|
||||
+ del root.mounts[mnt]
|
||||
+
|
||||
log.debug("finished Blivet copy")
|
||||
return new
|
||||
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
|
||||
index 8105bfc7..6f460f6d 100644
|
||||
--- a/blivet/devicefactory.py
|
||||
+++ b/blivet/devicefactory.py
|
||||
@@ -383,6 +383,7 @@ class DeviceFactory(object):
|
||||
# used for error recovery
|
||||
self.__devices = []
|
||||
self.__actions = []
|
||||
+ self.__roots = []
|
||||
|
||||
def _is_container_encrypted(self):
|
||||
return all(isinstance(p, LUKSDevice) for p in self.device.container.parents)
|
||||
@@ -994,10 +995,12 @@ class DeviceFactory(object):
|
||||
_blivet_copy = self.storage.copy()
|
||||
self.__devices = _blivet_copy.devicetree._devices
|
||||
self.__actions = _blivet_copy.devicetree._actions
|
||||
+ self.__roots = _blivet_copy.roots
|
||||
|
||||
def _revert_devicetree(self):
|
||||
self.storage.devicetree._devices = self.__devices
|
||||
self.storage.devicetree._actions = self.__actions
|
||||
+ self.storage.roots = self.__roots
|
||||
|
||||
|
||||
class PartitionFactory(DeviceFactory):
|
||||
--
|
||||
2.38.1
|
||||
|
@ -1,45 +0,0 @@
|
||||
From 1561bfe8820118178bbb07021adc1cacd875c4c7 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Tue, 18 Oct 2022 12:28:37 +0200
|
||||
Subject: [PATCH] Fix potential AttributeError when getting stratis blockdev
|
||||
info
|
||||
|
||||
---
|
||||
blivet/static_data/stratis_info.py | 12 +++++++-----
|
||||
1 file changed, 7 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/blivet/static_data/stratis_info.py b/blivet/static_data/stratis_info.py
|
||||
index bd1c5a18..42f230ee 100644
|
||||
--- a/blivet/static_data/stratis_info.py
|
||||
+++ b/blivet/static_data/stratis_info.py
|
||||
@@ -124,20 +124,22 @@ class StratisInfo(object):
|
||||
log.error("Failed to get DBus properties of '%s'", blockdev_path)
|
||||
return None
|
||||
|
||||
+ blockdev_uuid = str(uuid.UUID(properties["Uuid"]))
|
||||
+
|
||||
pool_path = properties["Pool"]
|
||||
if pool_path == "/":
|
||||
pool_name = ""
|
||||
+ return StratisBlockdevInfo(path=properties["Devnode"], uuid=blockdev_uuid,
|
||||
+ pool_name="", pool_uuid="", object_path=blockdev_path)
|
||||
else:
|
||||
pool_info = self._get_pool_info(properties["Pool"])
|
||||
if not pool_info:
|
||||
return None
|
||||
pool_name = pool_info.name
|
||||
|
||||
- blockdev_uuid = str(uuid.UUID(properties["Uuid"]))
|
||||
-
|
||||
- return StratisBlockdevInfo(path=properties["Devnode"], uuid=blockdev_uuid,
|
||||
- pool_name=pool_name, pool_uuid=pool_info.uuid,
|
||||
- object_path=blockdev_path)
|
||||
+ return StratisBlockdevInfo(path=properties["Devnode"], uuid=blockdev_uuid,
|
||||
+ pool_name=pool_name, pool_uuid=pool_info.uuid,
|
||||
+ object_path=blockdev_path)
|
||||
|
||||
def _get_locked_pools_info(self):
|
||||
locked_pools = []
|
||||
--
|
||||
2.38.1
|
||||
|
@ -0,0 +1,45 @@
|
||||
From 0b6f818f46e3b7c5b9be33216ef8438f59d7bcf1 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Thu, 18 Oct 2018 10:07:31 -0400
|
||||
Subject: [PATCH] Wipe all stale metadata after creating md array. (#1639682)
|
||||
|
||||
---
|
||||
blivet/devices/md.py | 4 ++++
|
||||
tests/devices_test/device_methods_test.py | 3 ++-
|
||||
2 files changed, 6 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blivet/devices/md.py b/blivet/devices/md.py
|
||||
index dad099e8..6a837df0 100644
|
||||
--- a/blivet/devices/md.py
|
||||
+++ b/blivet/devices/md.py
|
||||
@@ -31,6 +31,7 @@
|
||||
from ..devicelibs import mdraid, raid
|
||||
|
||||
from .. import errors
|
||||
+from ..formats import DeviceFormat
|
||||
from .. import util
|
||||
from ..static_data import pvs_info
|
||||
from ..storage_log import log_method_call
|
||||
@@ -563,6 +564,9 @@ def remove_stale_lvm():
|
||||
|
||||
remove_stale_lvm()
|
||||
|
||||
+ # remove any other stale metadata before proceeding
|
||||
+ DeviceFormat(device=self.path, exists=True).destroy()
|
||||
+
|
||||
def _create(self):
|
||||
""" Create the device. """
|
||||
log_method_call(self, self.name, status=self.status)
|
||||
diff --git a/tests/devices_test/device_methods_test.py b/tests/devices_test/device_methods_test.py
|
||||
index 8e40e6b6..12d5f7d8 100644
|
||||
--- a/tests/devices_test/device_methods_test.py
|
||||
+++ b/tests/devices_test/device_methods_test.py
|
||||
@@ -404,6 +404,7 @@ def test_setup(self):
|
||||
self.assertTrue(self.patches["md"].activate.called)
|
||||
|
||||
def test_create(self):
|
||||
- super(MDRaidArrayDeviceMethodsTestCase, self).test_create()
|
||||
+ with patch("blivet.devices.md.DeviceFormat"):
|
||||
+ super(MDRaidArrayDeviceMethodsTestCase, self).test_create()
|
||||
self.device._create()
|
||||
self.assertTrue(self.patches["md"].create.called)
|
@ -0,0 +1,29 @@
|
||||
From 653a3df662d10d0c8cc7f34138efd89a61f531a3 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 9 Jan 2019 13:03:49 +0100
|
||||
Subject: [PATCH] Copy the iSCSI initiator name file to the installed system
|
||||
|
||||
The initiatorname.iscsi file is used (sometimes) during boot so
|
||||
we need to write the configuration to the installed system.
|
||||
|
||||
Resolves: rhbz#1664587
|
||||
---
|
||||
blivet/iscsi.py | 5 +++++
|
||||
1 file changed, 5 insertions(+)
|
||||
|
||||
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
|
||||
index 3e44e6ed..f053577d 100644
|
||||
--- a/blivet/iscsi.py
|
||||
+++ b/blivet/iscsi.py
|
||||
@@ -563,6 +563,11 @@ def write(self, root, storage): # pylint: disable=unused-argument
|
||||
shutil.copytree("/var/lib/iscsi", root + "/var/lib/iscsi",
|
||||
symlinks=True)
|
||||
|
||||
+ # copy the initiator file too
|
||||
+ if not os.path.isdir(root + "/etc/iscsi"):
|
||||
+ os.makedirs(root + "/etc/iscsi", 0o755)
|
||||
+ shutil.copyfile(INITIATOR_FILE, root + INITIATOR_FILE)
|
||||
+
|
||||
def get_node(self, name, address, port, iface):
|
||||
for node in self.active_nodes():
|
||||
if node.name == name and node.address == address and \
|
34896
SOURCES/0008-po-updates.patch
Normal file
34896
SOURCES/0008-po-updates.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,27 +0,0 @@
|
||||
From b747c4ed07937f54a546ffb2f2c8c95e0797dd6c Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 20 Oct 2022 15:19:29 +0200
|
||||
Subject: [PATCH] tests: Skip XFS resize test on CentOS/RHEL 8
|
||||
|
||||
Partitions on loop devices are broken on CentOS/RHEL 8.
|
||||
---
|
||||
tests/skip.yml | 6 ++++++
|
||||
1 file changed, 6 insertions(+)
|
||||
|
||||
diff --git a/tests/skip.yml b/tests/skip.yml
|
||||
index 568c3fff..66b34493 100644
|
||||
--- a/tests/skip.yml
|
||||
+++ b/tests/skip.yml
|
||||
@@ -29,3 +29,9 @@
|
||||
- distro: "centos"
|
||||
version: "9"
|
||||
reason: "Creating RAID 1 LV on CentOS/RHEL 9 causes a system deadlock"
|
||||
+
|
||||
+- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize
|
||||
+ skip_on:
|
||||
+ - distro: ["centos", "enterprise_linux"]
|
||||
+ version: "8"
|
||||
+ reason: "Creating partitions on loop devices is broken on CentOS/RHEL 8 latest kernel"
|
||||
--
|
||||
2.37.3
|
||||
|
@ -0,0 +1,33 @@
|
||||
From 8adbf9cf56f486f2f974cf6cdfda657293aff141 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Fri, 19 Oct 2018 09:49:56 -0400
|
||||
Subject: [PATCH 1/2] Require libfc instead of fcoe for offloaded FCoE.
|
||||
(#1575953)
|
||||
|
||||
---
|
||||
blivet/fcoe.py | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/blivet/fcoe.py b/blivet/fcoe.py
|
||||
index 1a2cf9d4..3a1887dc 100644
|
||||
--- a/blivet/fcoe.py
|
||||
+++ b/blivet/fcoe.py
|
||||
@@ -32,13 +32,13 @@ _fcoe_module_loaded = False
|
||||
def has_fcoe():
|
||||
global _fcoe_module_loaded
|
||||
if not _fcoe_module_loaded:
|
||||
- util.run_program(["modprobe", "fcoe"])
|
||||
+ util.run_program(["modprobe", "libfc"])
|
||||
_fcoe_module_loaded = True
|
||||
if "bnx2x" in util.lsmod():
|
||||
log.info("fcoe: loading bnx2fc")
|
||||
util.run_program(["modprobe", "bnx2fc"])
|
||||
|
||||
- return os.access("/sys/module/fcoe", os.X_OK)
|
||||
+ return os.access("/sys/module/libfc", os.X_OK)
|
||||
|
||||
|
||||
class FCoE(object):
|
||||
--
|
||||
2.17.2
|
||||
|
@ -1,160 +0,0 @@
|
||||
From 9618b84f94187efddc7316c2546bed923a91ecf9 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 3 Nov 2022 08:36:27 +0100
|
||||
Subject: [PATCH 1/2] Revert "Set XFS minimal size to 300 MiB"
|
||||
|
||||
This reverts commit 307d49833771d161314bae50c68e70dc35c3bb36.
|
||||
---
|
||||
blivet/formats/fs.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
|
||||
index 8c346aa5..33922f3a 100644
|
||||
--- a/blivet/formats/fs.py
|
||||
+++ b/blivet/formats/fs.py
|
||||
@@ -1091,7 +1091,7 @@ class XFS(FS):
|
||||
_modules = ["xfs"]
|
||||
_labelfs = fslabeling.XFSLabeling()
|
||||
_uuidfs = fsuuid.XFSUUID()
|
||||
- _min_size = Size("300 MiB")
|
||||
+ _min_size = Size("16 MiB")
|
||||
_max_size = Size("16 EiB")
|
||||
_formattable = True
|
||||
_linux_native = True
|
||||
--
|
||||
2.38.1
|
||||
|
||||
|
||||
From 24d94922d6879baa85aaa101f6b21efa568a9cbc Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 3 Nov 2022 08:36:39 +0100
|
||||
Subject: [PATCH 2/2] Revert "tests: Create bigger devices for XFS testing"
|
||||
|
||||
This reverts commit 467cb8024010b2cabb1e92d9e64f6d3cbe949ad9.
|
||||
---
|
||||
tests/storage_tests/formats_test/fs_test.py | 7 +++----
|
||||
tests/storage_tests/formats_test/fslabeling.py | 4 +---
|
||||
tests/storage_tests/formats_test/fsuuid.py | 4 +---
|
||||
tests/storage_tests/formats_test/labeling_test.py | 2 --
|
||||
tests/storage_tests/formats_test/uuid_test.py | 3 ---
|
||||
5 files changed, 5 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py
|
||||
index cf8fb441..97f4cbbe 100644
|
||||
--- a/tests/storage_tests/formats_test/fs_test.py
|
||||
+++ b/tests/storage_tests/formats_test/fs_test.py
|
||||
@@ -54,7 +54,6 @@ class ReiserFSTestCase(fstesting.FSAsRoot):
|
||||
|
||||
class XFSTestCase(fstesting.FSAsRoot):
|
||||
_fs_class = fs.XFS
|
||||
- _DEVICE_SIZE = Size("500 MiB")
|
||||
|
||||
def can_resize(self, an_fs):
|
||||
resize_tasks = (an_fs._resize, an_fs._size_info)
|
||||
@@ -96,12 +95,12 @@ class XFSTestCase(fstesting.FSAsRoot):
|
||||
self.assertFalse(an_fs.resizable)
|
||||
# Not resizable, so can not do resizing actions.
|
||||
with self.assertRaises(DeviceFormatError):
|
||||
- an_fs.target_size = Size("300 MiB")
|
||||
+ an_fs.target_size = Size("64 MiB")
|
||||
with self.assertRaises(DeviceFormatError):
|
||||
an_fs.do_resize()
|
||||
else:
|
||||
disk = DiskDevice(os.path.basename(self.loop_devices[0]))
|
||||
- part = self._create_partition(disk, Size("300 MiB"))
|
||||
+ part = self._create_partition(disk, Size("50 MiB"))
|
||||
an_fs = self._fs_class()
|
||||
an_fs.device = part.path
|
||||
self.assertIsNone(an_fs.create())
|
||||
@@ -114,7 +113,7 @@ class XFSTestCase(fstesting.FSAsRoot):
|
||||
part = self._create_partition(disk, size=part.size + Size("40 MiB"))
|
||||
|
||||
# Try a reasonable target size
|
||||
- TARGET_SIZE = Size("325 MiB")
|
||||
+ TARGET_SIZE = Size("64 MiB")
|
||||
an_fs.target_size = TARGET_SIZE
|
||||
self.assertEqual(an_fs.target_size, TARGET_SIZE)
|
||||
self.assertNotEqual(an_fs._size, TARGET_SIZE)
|
||||
diff --git a/tests/storage_tests/formats_test/fslabeling.py b/tests/storage_tests/formats_test/fslabeling.py
|
||||
index ebe0b70a..0e0dc261 100644
|
||||
--- a/tests/storage_tests/formats_test/fslabeling.py
|
||||
+++ b/tests/storage_tests/formats_test/fslabeling.py
|
||||
@@ -21,10 +21,8 @@ class LabelingAsRoot(loopbackedtestcase.LoopBackedTestCase):
|
||||
_invalid_label = abc.abstractproperty(
|
||||
doc="A label which is invalid for this filesystem.")
|
||||
|
||||
- _DEVICE_SIZE = Size("100 MiB")
|
||||
-
|
||||
def __init__(self, methodName='run_test'):
|
||||
- super(LabelingAsRoot, self).__init__(methodName=methodName, device_spec=[self._DEVICE_SIZE])
|
||||
+ super(LabelingAsRoot, self).__init__(methodName=methodName, device_spec=[Size("100 MiB")])
|
||||
|
||||
def setUp(self):
|
||||
an_fs = self._fs_class()
|
||||
diff --git a/tests/storage_tests/formats_test/fsuuid.py b/tests/storage_tests/formats_test/fsuuid.py
|
||||
index 0b9762fd..16aa19a6 100644
|
||||
--- a/tests/storage_tests/formats_test/fsuuid.py
|
||||
+++ b/tests/storage_tests/formats_test/fsuuid.py
|
||||
@@ -23,11 +23,9 @@ class SetUUID(loopbackedtestcase.LoopBackedTestCase):
|
||||
_invalid_uuid = abc.abstractproperty(
|
||||
doc="An invalid UUID for this filesystem.")
|
||||
|
||||
- _DEVICE_SIZE = Size("100 MiB")
|
||||
-
|
||||
def __init__(self, methodName='run_test'):
|
||||
super(SetUUID, self).__init__(methodName=methodName,
|
||||
- device_spec=[self._DEVICE_SIZE])
|
||||
+ device_spec=[Size("100 MiB")])
|
||||
|
||||
def setUp(self):
|
||||
an_fs = self._fs_class()
|
||||
diff --git a/tests/storage_tests/formats_test/labeling_test.py b/tests/storage_tests/formats_test/labeling_test.py
|
||||
index 0702260a..d24e6619 100644
|
||||
--- a/tests/storage_tests/formats_test/labeling_test.py
|
||||
+++ b/tests/storage_tests/formats_test/labeling_test.py
|
||||
@@ -1,7 +1,6 @@
|
||||
import unittest
|
||||
|
||||
from blivet.formats import device_formats
|
||||
-from blivet.size import Size
|
||||
import blivet.formats.fs as fs
|
||||
import blivet.formats.swap as swap
|
||||
|
||||
@@ -62,7 +61,6 @@ class InitializationTestCase(unittest.TestCase):
|
||||
class XFSTestCase(fslabeling.CompleteLabelingAsRoot):
|
||||
_fs_class = fs.XFS
|
||||
_invalid_label = "root filesystem"
|
||||
- _DEVICE_SIZE = Size("500 MiB")
|
||||
|
||||
|
||||
class FATFSTestCase(fslabeling.CompleteLabelingAsRoot):
|
||||
diff --git a/tests/storage_tests/formats_test/uuid_test.py b/tests/storage_tests/formats_test/uuid_test.py
|
||||
index af35c0ee..ee8d452e 100644
|
||||
--- a/tests/storage_tests/formats_test/uuid_test.py
|
||||
+++ b/tests/storage_tests/formats_test/uuid_test.py
|
||||
@@ -2,7 +2,6 @@ import unittest
|
||||
|
||||
import blivet.formats.fs as fs
|
||||
import blivet.formats.swap as swap
|
||||
-from blivet.size import Size
|
||||
|
||||
from . import fsuuid
|
||||
|
||||
@@ -53,14 +52,12 @@ class XFSTestCase(fsuuid.SetUUIDWithMkFs):
|
||||
_fs_class = fs.XFS
|
||||
_invalid_uuid = "abcdefgh-ijkl-mnop-qrst-uvwxyz123456"
|
||||
_valid_uuid = "97e3d40f-dca8-497d-8b86-92f257402465"
|
||||
- _DEVICE_SIZE = Size("500 MiB")
|
||||
|
||||
|
||||
class XFSAfterTestCase(fsuuid.SetUUIDAfterMkFs):
|
||||
_fs_class = fs.XFS
|
||||
_invalid_uuid = "abcdefgh-ijkl-mnop-qrst-uvwxyz123456"
|
||||
_valid_uuid = "97e3d40f-dca8-497d-8b86-92f257402465"
|
||||
- _DEVICE_SIZE = Size("500 MiB")
|
||||
|
||||
|
||||
class FATFSTestCase(fsuuid.SetUUIDWithMkFs):
|
||||
--
|
||||
2.38.1
|
||||
|
@ -1,55 +0,0 @@
|
||||
From fed62af06eb1584adbacd821dfe79c2df52c6aa4 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 2 Nov 2022 12:14:28 +0100
|
||||
Subject: [PATCH] Catch BlockDevNotImplementedError for btrfs plugin calls
|
||||
|
||||
This is a workaround for RHEL where the btrfs plugin is not
|
||||
available and where we might still try to call some libblockdev
|
||||
functions to gather information about preexisting btrfs devices.
|
||||
---
|
||||
blivet/devices/btrfs.py | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/blivet/devices/btrfs.py b/blivet/devices/btrfs.py
|
||||
index 0e029715..1ae6a04d 100644
|
||||
--- a/blivet/devices/btrfs.py
|
||||
+++ b/blivet/devices/btrfs.py
|
||||
@@ -362,7 +362,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
|
||||
try:
|
||||
subvols = blockdev.btrfs.list_subvolumes(mountpoint,
|
||||
snapshots_only=snapshots_only)
|
||||
- except blockdev.BtrfsError as e:
|
||||
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
|
||||
log.debug("failed to list subvolumes: %s", e)
|
||||
else:
|
||||
self._get_default_subvolume_id()
|
||||
@@ -400,7 +400,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
|
||||
with self._do_temp_mount() as mountpoint:
|
||||
try:
|
||||
subvolid = blockdev.btrfs.get_default_subvolume_id(mountpoint)
|
||||
- except blockdev.BtrfsError as e:
|
||||
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
|
||||
log.debug("failed to get default subvolume id: %s", e)
|
||||
|
||||
self._default_subvolume_id = subvolid
|
||||
@@ -413,7 +413,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
|
||||
with self._do_temp_mount() as mountpoint:
|
||||
try:
|
||||
blockdev.btrfs.set_default_subvolume(mountpoint, vol_id)
|
||||
- except blockdev.BtrfsError as e:
|
||||
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
|
||||
log.error("failed to set new default subvolume id (%s): %s",
|
||||
vol_id, e)
|
||||
# The only time we set a new default subvolume is so we can remove
|
||||
@@ -471,7 +471,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
|
||||
if not self.format.vol_uuid:
|
||||
try:
|
||||
bd_info = blockdev.btrfs.filesystem_info(self.parents[0].path)
|
||||
- except blockdev.BtrfsError as e:
|
||||
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
|
||||
log.error("failed to get filesystem info for new btrfs volume %s", e)
|
||||
else:
|
||||
self.format.vol_uuid = bd_info.uuid
|
||||
--
|
||||
2.38.1
|
||||
|
@ -0,0 +1,64 @@
|
||||
From 8bdade5e60b746e8d992289e71123ad27146a7f1 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Wed, 24 Oct 2018 20:08:48 -0400
|
||||
Subject: [PATCH] Use udev to determine if disk is a multipath member.
|
||||
|
||||
Related: rhbz#1575953
|
||||
---
|
||||
blivet/populator/helpers/disklabel.py | 3 +--
|
||||
tests/populator_test.py | 6 ++----
|
||||
2 files changed, 3 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/blivet/populator/helpers/disklabel.py b/blivet/populator/helpers/disklabel.py
|
||||
index c2acb117..db10638e 100644
|
||||
--- a/blivet/populator/helpers/disklabel.py
|
||||
+++ b/blivet/populator/helpers/disklabel.py
|
||||
@@ -28,7 +28,6 @@
|
||||
from ...errors import InvalidDiskLabelError
|
||||
from ...storage_log import log_exception_info, log_method_call
|
||||
from .formatpopulator import FormatPopulator
|
||||
-from ...static_data import mpath_members
|
||||
|
||||
import logging
|
||||
log = logging.getLogger("blivet")
|
||||
@@ -44,7 +43,7 @@ def match(cls, data, device):
|
||||
return (bool(udev.device_get_disklabel_type(data)) and
|
||||
not udev.device_is_biosraid_member(data) and
|
||||
udev.device_get_format(data) != "iso9660" and
|
||||
- not (device.is_disk and mpath_members.is_mpath_member(device.path)))
|
||||
+ not (device.is_disk and udev.device_get_format(data) == "mpath_member"))
|
||||
|
||||
def _get_kwargs(self):
|
||||
kwargs = super(DiskLabelFormatPopulator, self)._get_kwargs()
|
||||
diff --git a/tests/populator_test.py b/tests/populator_test.py
|
||||
index b6f70319..d9c326d7 100644
|
||||
--- a/tests/populator_test.py
|
||||
+++ b/tests/populator_test.py
|
||||
@@ -827,7 +827,6 @@ class HFSPopulatorTestCase(FormatPopulatorTestCase):
|
||||
class DiskLabelPopulatorTestCase(PopulatorHelperTestCase):
|
||||
helper_class = DiskLabelFormatPopulator
|
||||
|
||||
- @patch("blivet.static_data.mpath_members.is_mpath_member", return_value=False)
|
||||
@patch("blivet.udev.device_is_biosraid_member", return_value=False)
|
||||
@patch("blivet.udev.device_get_format", return_value=None)
|
||||
@patch("blivet.udev.device_get_disklabel_type", return_value="dos")
|
||||
@@ -836,7 +835,6 @@ def test_match(self, *args):
|
||||
device_get_disklabel_type = args[0]
|
||||
device_get_format = args[1]
|
||||
device_is_biosraid_member = args[2]
|
||||
- is_mpath_member = args[3]
|
||||
|
||||
device = Mock()
|
||||
device.is_disk = True
|
||||
@@ -861,9 +859,9 @@ def test_match(self, *args):
|
||||
device_is_biosraid_member.return_value = False
|
||||
|
||||
# no match for multipath members
|
||||
- is_mpath_member.return_value = True
|
||||
+ device_get_format.return_value = "mpath_member"
|
||||
self.assertFalse(self.helper_class.match(data, device))
|
||||
- is_mpath_member.return_value = False
|
||||
+ device_get_format.return_value = None
|
||||
|
||||
@patch("blivet.static_data.mpath_members.is_mpath_member", return_value=False)
|
||||
@patch("blivet.udev.device_is_biosraid_member", return_value=False)
|
@ -1,57 +0,0 @@
|
||||
From 2aba050e74dc5df483da022dcf436b101c7a4301 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 11 Jan 2023 14:59:24 +0100
|
||||
Subject: [PATCH] Default to encryption sector size 512 for LUKS devices
|
||||
|
||||
We are currently letting cryptsetup decide the optimal encryption
|
||||
sector size for LUKS. The problem is that for disks with physical
|
||||
sector size 4096 cryptsetup will default to 4096 encryption sector
|
||||
size even if the drive logical sector size is 512 which means
|
||||
these disks cannot be combined with other 512 logical sector size
|
||||
disks in LVM. This requires a more sophisticated solution in the
|
||||
future, but for now just default to 512 if not specified by the
|
||||
user otherwise.
|
||||
|
||||
Resolves: rhbz#2103800
|
||||
---
|
||||
blivet/formats/luks.py | 10 +++++++---
|
||||
tests/unit_tests/formats_tests/luks_test.py | 2 +-
|
||||
2 files changed, 8 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/blivet/formats/luks.py b/blivet/formats/luks.py
|
||||
index 8de4911f..2637e0c5 100644
|
||||
--- a/blivet/formats/luks.py
|
||||
+++ b/blivet/formats/luks.py
|
||||
@@ -166,9 +166,13 @@ class LUKS(DeviceFormat):
|
||||
if self.pbkdf_args.type == "pbkdf2" and self.pbkdf_args.max_memory_kb:
|
||||
log.warning("Memory limit is not used for pbkdf2 and it will be ignored.")
|
||||
|
||||
- self.luks_sector_size = kwargs.get("luks_sector_size") or 0
|
||||
- if self.luks_sector_size and self.luks_version != "luks2":
|
||||
- raise ValueError("Sector size argument is valid only for LUKS version 2.")
|
||||
+ self.luks_sector_size = kwargs.get("luks_sector_size")
|
||||
+ if self.luks_version == "luks2":
|
||||
+ if self.luks_sector_size is None:
|
||||
+ self.luks_sector_size = 512 # XXX we don't want cryptsetup choose automatically here so fallback to 512
|
||||
+ else:
|
||||
+ if self.luks_sector_size:
|
||||
+ raise ValueError("Sector size argument is valid only for LUKS version 2.")
|
||||
|
||||
def __repr__(self):
|
||||
s = DeviceFormat.__repr__(self)
|
||||
diff --git a/tests/unit_tests/formats_tests/luks_test.py b/tests/unit_tests/formats_tests/luks_test.py
|
||||
index 5ae6acfe..ec7b7592 100644
|
||||
--- a/tests/unit_tests/formats_tests/luks_test.py
|
||||
+++ b/tests/unit_tests/formats_tests/luks_test.py
|
||||
@@ -53,7 +53,7 @@ class LUKSNodevTestCase(unittest.TestCase):
|
||||
|
||||
def test_sector_size(self):
|
||||
fmt = LUKS()
|
||||
- self.assertEqual(fmt.luks_sector_size, 0)
|
||||
+ self.assertEqual(fmt.luks_sector_size, 512)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
fmt = LUKS(luks_version="luks1", luks_sector_size=4096)
|
||||
--
|
||||
2.39.0
|
||||
|
@ -0,0 +1,45 @@
|
||||
From 5b0b1ffcf0d27306e52476984ebd8eb3af4a11aa Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Mon, 25 Feb 2019 11:14:30 -0500
|
||||
Subject: [PATCH] Don't crash if blockdev mpath plugin isn't available.
|
||||
(#1672971)
|
||||
|
||||
---
|
||||
blivet/static_data/mpath_info.py | 7 +++++--
|
||||
1 file changed, 5 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/blivet/static_data/mpath_info.py b/blivet/static_data/mpath_info.py
|
||||
index b16f3c65..49ba4709 100644
|
||||
--- a/blivet/static_data/mpath_info.py
|
||||
+++ b/blivet/static_data/mpath_info.py
|
||||
@@ -27,6 +27,8 @@ from gi.repository import BlockDev as blockdev
|
||||
import logging
|
||||
log = logging.getLogger("blivet")
|
||||
|
||||
+from ..tasks import availability
|
||||
+
|
||||
|
||||
class MpathMembers(object):
|
||||
"""A cache for querying multipath member devices"""
|
||||
@@ -40,7 +42,7 @@ class MpathMembers(object):
|
||||
:param str device: path of the device to query
|
||||
|
||||
"""
|
||||
- if self._members is None:
|
||||
+ if self._members is None and availability.BLOCKDEV_MPATH_PLUGIN.available:
|
||||
self._members = set(blockdev.mpath.get_mpath_members())
|
||||
|
||||
device = os.path.realpath(device)
|
||||
@@ -56,7 +58,8 @@ class MpathMembers(object):
|
||||
"""
|
||||
device = os.path.realpath(device)
|
||||
device = device[len("/dev/"):]
|
||||
- if blockdev.mpath.is_mpath_member(device):
|
||||
+
|
||||
+ if availability.BLOCKDEV_MPATH_PLUGIN.available and blockdev.mpath.is_mpath_member(device):
|
||||
self._members.add(device)
|
||||
|
||||
def drop_cache(self):
|
||||
--
|
||||
2.17.2
|
||||
|
@ -1,172 +0,0 @@
|
||||
From 11c3e695d9a2130f325bb5459a9881ff70338f71 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 9 Mar 2023 13:18:42 +0100
|
||||
Subject: [PATCH] Add support for specifying stripe size for RAID LVs
|
||||
|
||||
---
|
||||
blivet/devices/lvm.py | 28 +++++++++++++++++---
|
||||
tests/storage_tests/devices_test/lvm_test.py | 12 +++++++--
|
||||
tests/unit_tests/devices_test/lvm_test.py | 27 +++++++++++++++++++
|
||||
3 files changed, 61 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
||||
index b8595d63..41358e9b 100644
|
||||
--- a/blivet/devices/lvm.py
|
||||
+++ b/blivet/devices/lvm.py
|
||||
@@ -659,7 +659,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
||||
|
||||
def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
|
||||
fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None,
|
||||
- percent=None, cache_request=None, pvs=None, from_lvs=None):
|
||||
+ percent=None, cache_request=None, pvs=None, from_lvs=None,
|
||||
+ stripe_size=0):
|
||||
|
||||
if not exists:
|
||||
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
|
||||
@@ -756,6 +757,15 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
||||
if self._pv_specs:
|
||||
self._assign_pv_space()
|
||||
|
||||
+ self._stripe_size = stripe_size
|
||||
+ if not self.exists and self._stripe_size:
|
||||
+ if self.seg_type not in lvm.raid_seg_types:
|
||||
+ raise errors.DeviceError("Stripe size can be specified only for RAID volumes")
|
||||
+ if self.seg_type in ("raid1", "RAID1", "1", 1, "mirror"):
|
||||
+ raise errors.DeviceError("Specifying stripe size is not allowed for RAID1 or mirror")
|
||||
+ if self.cache:
|
||||
+ raise errors.DeviceError("Creating cached LVs with custom stripe size is not supported")
|
||||
+
|
||||
def _assign_pv_space(self):
|
||||
if not self.is_raid_lv:
|
||||
# nothing to do for non-RAID (and thus non-striped) LVs here
|
||||
@@ -2295,7 +2305,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
parent_lv=None, int_type=None, origin=None, vorigin=False,
|
||||
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
|
||||
compression=False, deduplication=False, index_memory=0,
|
||||
- write_policy=None, cache_mode=None, attach_to=None):
|
||||
+ write_policy=None, cache_mode=None, attach_to=None, stripe_size=0):
|
||||
"""
|
||||
:param name: the device name (generally a device node's basename)
|
||||
:type name: str
|
||||
@@ -2375,6 +2385,11 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
be attached to when created
|
||||
:type attach_to: :class:`LVMLogicalVolumeDevice`
|
||||
|
||||
+ For RAID LVs only:
|
||||
+
|
||||
+ :keyword stripe_size: size of the RAID stripe
|
||||
+ :type stripe_size: :class:`~.size.Size`
|
||||
+
|
||||
"""
|
||||
|
||||
if isinstance(parents, (list, ParentList)):
|
||||
@@ -2395,7 +2410,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
|
||||
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
|
||||
fmt, exists, sysfs_path, grow, maxsize,
|
||||
- percent, cache_request, pvs, from_lvs)
|
||||
+ percent, cache_request, pvs, from_lvs,
|
||||
+ stripe_size)
|
||||
LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory,
|
||||
write_policy)
|
||||
LVMVDOLogicalVolumeMixin.__init__(self)
|
||||
@@ -2651,8 +2667,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
pvs = [spec.pv.path for spec in self._pv_specs]
|
||||
pvs = pvs or None
|
||||
|
||||
+ extra = dict()
|
||||
+ if self._stripe_size:
|
||||
+ extra["stripesize"] = str(int(self._stripe_size.convert_to("KiB")))
|
||||
+
|
||||
blockdev.lvm.lvcreate(self.vg.name, self._name, self.size,
|
||||
- type=self.seg_type, pv_list=pvs)
|
||||
+ type=self.seg_type, pv_list=pvs, **extra)
|
||||
else:
|
||||
fast_pvs = [pv.path for pv in self.cache.fast_pvs]
|
||||
|
||||
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
|
||||
index a055fc27..97ef1c4b 100644
|
||||
--- a/tests/storage_tests/devices_test/lvm_test.py
|
||||
+++ b/tests/storage_tests/devices_test/lvm_test.py
|
||||
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
+import subprocess
|
||||
|
||||
from ..storagetestcase import StorageTestCase
|
||||
|
||||
@@ -127,7 +128,7 @@ class LVMTestCase(StorageTestCase):
|
||||
self.assertTrue(snap.is_snapshot_lv)
|
||||
self.assertEqual(snap.origin, thinlv)
|
||||
|
||||
- def _test_lvm_raid(self, seg_type, raid_level):
|
||||
+ def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0):
|
||||
disk1 = self.storage.devicetree.get_device_by_path(self.vdevs[0])
|
||||
self.assertIsNotNone(disk1)
|
||||
self.storage.initialize_disk(disk1)
|
||||
@@ -151,7 +152,7 @@ class LVMTestCase(StorageTestCase):
|
||||
|
||||
raidlv = self.storage.new_lv(fmt_type="ext4", size=blivet.size.Size("50 MiB"),
|
||||
parents=[vg], name="blivetTestRAIDLV",
|
||||
- seg_type=seg_type, pvs=[pv1, pv2])
|
||||
+ seg_type=seg_type, pvs=[pv1, pv2], stripe_size=stripe_size)
|
||||
self.storage.create_device(raidlv)
|
||||
|
||||
self.storage.do_it()
|
||||
@@ -163,9 +164,16 @@ class LVMTestCase(StorageTestCase):
|
||||
self.assertEqual(raidlv.raid_level, raid_level)
|
||||
self.assertEqual(raidlv.seg_type, seg_type)
|
||||
|
||||
+ if stripe_size:
|
||||
+ out = subprocess.check_output(["lvs", "-o", "stripe_size", "--noheadings", "--nosuffix", "--units=b", raidlv.vg.name + "/" + raidlv.lvname])
|
||||
+ self.assertEqual(out.decode().strip(), str(int(stripe_size.convert_to())))
|
||||
+
|
||||
def test_lvm_raid_raid0(self):
|
||||
self._test_lvm_raid("raid0", blivet.devicelibs.raid.RAID0)
|
||||
|
||||
+ def test_lvm_raid_raid0_stripe_size(self):
|
||||
+ self._test_lvm_raid("raid0", blivet.devicelibs.raid.RAID0, stripe_size=blivet.size.Size("1 MiB"))
|
||||
+
|
||||
def test_lvm_raid_striped(self):
|
||||
self._test_lvm_raid("striped", blivet.devicelibs.raid.Striped)
|
||||
|
||||
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
|
||||
index 995c2da4..d7b55224 100644
|
||||
--- a/tests/unit_tests/devices_test/lvm_test.py
|
||||
+++ b/tests/unit_tests/devices_test/lvm_test.py
|
||||
@@ -363,6 +363,33 @@ class LVMDeviceTest(unittest.TestCase):
|
||||
self.assertEqual(pv.format.free, Size("264 MiB"))
|
||||
self.assertEqual(pv2.format.free, Size("256 MiB"))
|
||||
|
||||
+ def test_lvm_logical_volume_raid_stripe_size(self):
|
||||
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
|
||||
+ size=Size("1025 MiB"))
|
||||
+ pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"),
|
||||
+ size=Size("513 MiB"))
|
||||
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
|
||||
+
|
||||
+ with self.assertRaises(blivet.errors.DeviceError):
|
||||
+ # non-raid LV
|
||||
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
|
||||
+ fmt=blivet.formats.get_format("xfs"),
|
||||
+ exists=False, stripe_size=Size("1 MiB"))
|
||||
+
|
||||
+ with self.assertRaises(blivet.errors.DeviceError):
|
||||
+ # raid1 LV
|
||||
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
|
||||
+ fmt=blivet.formats.get_format("xfs"),
|
||||
+ exists=False, seg_type="raid1", pvs=[pv, pv2],
|
||||
+ stripe_size=Size("1 MiB"))
|
||||
+
|
||||
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
|
||||
+ fmt=blivet.formats.get_format("xfs"),
|
||||
+ exists=False, seg_type="raid0", pvs=[pv, pv2],
|
||||
+ stripe_size=Size("1 MiB"))
|
||||
+
|
||||
+ self.assertEqual(lv._stripe_size, Size("1 MiB"))
|
||||
+
|
||||
def test_target_size(self):
|
||||
pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
|
||||
size=Size("1 GiB"))
|
||||
--
|
||||
2.40.1
|
||||
|
@ -0,0 +1,31 @@
|
||||
From d01281a69e317d7bae4a7698edb6583b6310d5c1 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Tue, 19 Mar 2019 11:51:47 -0400
|
||||
Subject: [PATCH] Ensure correct type of mpath cache member list.
|
||||
|
||||
Related: rhbz#1672971
|
||||
---
|
||||
blivet/static_data/mpath_info.py | 7 +++++--
|
||||
1 file changed, 5 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/blivet/static_data/mpath_info.py b/blivet/static_data/mpath_info.py
|
||||
index 49ba4709..64958ba8 100644
|
||||
--- a/blivet/static_data/mpath_info.py
|
||||
+++ b/blivet/static_data/mpath_info.py
|
||||
@@ -42,8 +42,11 @@ class MpathMembers(object):
|
||||
:param str device: path of the device to query
|
||||
|
||||
"""
|
||||
- if self._members is None and availability.BLOCKDEV_MPATH_PLUGIN.available:
|
||||
- self._members = set(blockdev.mpath.get_mpath_members())
|
||||
+ if self._members is None:
|
||||
+ if availability.BLOCKDEV_MPATH_PLUGIN.available:
|
||||
+ self._members = set(blockdev.mpath.get_mpath_members())
|
||||
+ else:
|
||||
+ self._members = set()
|
||||
|
||||
device = os.path.realpath(device)
|
||||
device = device[len("/dev/"):]
|
||||
--
|
||||
2.20.1
|
||||
|
@ -1,68 +0,0 @@
|
||||
From 1af0d3c37a93e431790e641a329a7f34dabf291a Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 2 Mar 2023 12:34:42 +0100
|
||||
Subject: [PATCH] Fix setting kickstart data
|
||||
|
||||
When changing our code to PEP8 compliant we also changed some
|
||||
pykickstart properties like onPart by accident. This PR fixes this.
|
||||
|
||||
Resolves: rhbz#2175166
|
||||
---
|
||||
blivet/devices/btrfs.py | 4 ++--
|
||||
blivet/devices/lvm.py | 2 +-
|
||||
blivet/devices/partition.py | 6 +++---
|
||||
3 files changed, 6 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/blivet/devices/btrfs.py b/blivet/devices/btrfs.py
|
||||
index 1ae6a04d..3f56624e 100644
|
||||
--- a/blivet/devices/btrfs.py
|
||||
+++ b/blivet/devices/btrfs.py
|
||||
@@ -498,8 +498,8 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
|
||||
|
||||
def populate_ksdata(self, data):
|
||||
super(BTRFSVolumeDevice, self).populate_ksdata(data)
|
||||
- data.data_level = self.data_level.name if self.data_level else None
|
||||
- data.metadata_level = self.metadata_level.name if self.metadata_level else None
|
||||
+ data.dataLevel = self.data_level.name if self.data_level else None
|
||||
+ data.metaDataLevel = self.metadata_level.name if self.metadata_level else None
|
||||
data.devices = ["btrfs.%d" % p.id for p in self.parents]
|
||||
data.preexist = self.exists
|
||||
|
||||
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
||||
index 41358e9b..c3132457 100644
|
||||
--- a/blivet/devices/lvm.py
|
||||
+++ b/blivet/devices/lvm.py
|
||||
@@ -1161,7 +1161,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
||||
|
||||
if self.req_grow:
|
||||
# base size could be literal or percentage
|
||||
- data.max_size_mb = self.req_max_size.convert_to(MiB)
|
||||
+ data.maxSizeMB = self.req_max_size.convert_to(MiB)
|
||||
elif data.resize:
|
||||
data.size = self.target_size.convert_to(MiB)
|
||||
|
||||
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
|
||||
index 89d907c2..0e9250ce 100644
|
||||
--- a/blivet/devices/partition.py
|
||||
+++ b/blivet/devices/partition.py
|
||||
@@ -982,14 +982,14 @@ class PartitionDevice(StorageDevice):
|
||||
data.size = self.req_base_size.round_to_nearest(MiB, rounding=ROUND_DOWN).convert_to(spec=MiB)
|
||||
data.grow = self.req_grow
|
||||
if self.req_grow:
|
||||
- data.max_size_mb = self.req_max_size.convert_to(MiB)
|
||||
+ data.maxSizeMB = self.req_max_size.convert_to(MiB)
|
||||
|
||||
# data.disk = self.disk.name # by-id
|
||||
if self.req_disks and len(self.req_disks) == 1:
|
||||
data.disk = self.disk.name
|
||||
- data.prim_only = self.req_primary
|
||||
+ data.primOnly = self.req_primary
|
||||
else:
|
||||
- data.on_part = self.name # by-id
|
||||
+ data.onPart = self.name # by-id
|
||||
|
||||
if data.resize:
|
||||
# on s390x in particular, fractional sizes are reported, which
|
||||
--
|
||||
2.40.1
|
||||
|
122
SOURCES/0013-Various-test-fixes.patch
Normal file
122
SOURCES/0013-Various-test-fixes.patch
Normal file
@ -0,0 +1,122 @@
|
||||
From c495f74951caa0104636032e00704a83ab5f73b1 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Tue, 26 Mar 2019 12:58:53 +0100
|
||||
Subject: [PATCH 1/3] Properly clean after availability test case
|
||||
|
||||
We need to set availability of the 'mkfs.hfsplus' utility back to
|
||||
it's real value after changing it to "always available" for this
|
||||
test case.
|
||||
---
|
||||
tests/devices_test/dependencies_test.py | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/tests/devices_test/dependencies_test.py b/tests/devices_test/dependencies_test.py
|
||||
index 9dbdd24d..76bf758b 100644
|
||||
--- a/tests/devices_test/dependencies_test.py
|
||||
+++ b/tests/devices_test/dependencies_test.py
|
||||
@@ -69,6 +69,7 @@ class MockingDeviceDependenciesTestCase1(unittest.TestCase):
|
||||
|
||||
self.mdraid_method = availability.BLOCKDEV_MDRAID_PLUGIN._method
|
||||
self.dm_method = availability.BLOCKDEV_DM_PLUGIN._method
|
||||
+ self.hfsplus_method = availability.MKFS_HFSPLUS_APP._method
|
||||
self.cache_availability = availability.CACHE_AVAILABILITY
|
||||
|
||||
self.addCleanup(self._clean_up)
|
||||
@@ -105,10 +106,12 @@ class MockingDeviceDependenciesTestCase1(unittest.TestCase):
|
||||
def _clean_up(self):
|
||||
availability.BLOCKDEV_MDRAID_PLUGIN._method = self.mdraid_method
|
||||
availability.BLOCKDEV_DM_PLUGIN._method = self.dm_method
|
||||
+ availability.MKFS_HFSPLUS_APP._method = self.hfsplus_method
|
||||
|
||||
availability.CACHE_AVAILABILITY = False
|
||||
availability.BLOCKDEV_MDRAID_PLUGIN.available # pylint: disable=pointless-statement
|
||||
availability.BLOCKDEV_DM_PLUGIN.available # pylint: disable=pointless-statement
|
||||
+ availability.MKFS_HFSPLUS_APP.available # pylint: disable=pointless-statement
|
||||
|
||||
availability.CACHE_AVAILABILITY = self.cache_availability
|
||||
|
||||
--
|
||||
2.20.1
|
||||
|
||||
|
||||
From a6798882f5ba5b1e0ea655255d6f1fd5eda85f64 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Tue, 26 Mar 2019 13:00:40 +0100
|
||||
Subject: [PATCH 2/3] Skip weak dependencies test if we don't have all
|
||||
libblockdev plugins
|
||||
|
||||
This test checks that creating devices works when we have all
|
||||
plugins and fails "nicely" if we don't have all plugins so we
|
||||
actually need all the plugins for this test case.
|
||||
---
|
||||
tests/devices_test/dependencies_test.py | 5 +++++
|
||||
1 file changed, 5 insertions(+)
|
||||
|
||||
diff --git a/tests/devices_test/dependencies_test.py b/tests/devices_test/dependencies_test.py
|
||||
index 76bf758b..308d6192 100644
|
||||
--- a/tests/devices_test/dependencies_test.py
|
||||
+++ b/tests/devices_test/dependencies_test.py
|
||||
@@ -157,6 +157,11 @@ class MissingWeakDependenciesTestCase(unittest.TestCase):
|
||||
self.disk1_file = create_sparse_tempfile("disk1", Size("2GiB"))
|
||||
self.plugins = blockdev.plugin_specs_from_names(blockdev.get_available_plugin_names())
|
||||
|
||||
+ loaded_plugins = self.load_all_plugins()
|
||||
+ if not all(p in loaded_plugins for p in ("btrfs", "crypto", "lvm", "md")):
|
||||
+ # we don't have all plugins needed for this test case
|
||||
+ self.skipTest("Missing libblockdev plugins needed from weak dependencies test.")
|
||||
+
|
||||
def _clean_up(self):
|
||||
# reload all libblockdev plugins
|
||||
self.load_all_plugins()
|
||||
--
|
||||
2.20.1
|
||||
|
||||
|
||||
From 151fce2c9a98dc5a7943b314828518518a755ec8 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Tue, 26 Mar 2019 13:36:31 +0100
|
||||
Subject: [PATCH 3/3] Check for format tools availability in action_test
|
||||
|
||||
---
|
||||
tests/action_test.py | 16 ++++++++++++++++
|
||||
1 file changed, 16 insertions(+)
|
||||
|
||||
diff --git a/tests/action_test.py b/tests/action_test.py
|
||||
index 93ed9e57..101d5a21 100644
|
||||
--- a/tests/action_test.py
|
||||
+++ b/tests/action_test.py
|
||||
@@ -19,6 +19,13 @@ from blivet.devices import MDRaidArrayDevice
|
||||
from blivet.devices import LVMVolumeGroupDevice
|
||||
from blivet.devices import LVMLogicalVolumeDevice
|
||||
|
||||
+# format classes
|
||||
+from blivet.formats.fs import Ext2FS
|
||||
+from blivet.formats.fs import Ext3FS
|
||||
+from blivet.formats.fs import Ext4FS
|
||||
+from blivet.formats.fs import FATFS
|
||||
+from blivet.formats.fs import XFS
|
||||
+
|
||||
# action classes
|
||||
from blivet.deviceaction import ActionCreateDevice
|
||||
from blivet.deviceaction import ActionResizeDevice
|
||||
@@ -39,8 +46,17 @@ DEVICE_CLASSES = [
|
||||
LVMLogicalVolumeDevice
|
||||
]
|
||||
|
||||
+FORMAT_CLASSES = [
|
||||
+ Ext2FS,
|
||||
+ Ext3FS,
|
||||
+ Ext4FS,
|
||||
+ FATFS,
|
||||
+ XFS
|
||||
+]
|
||||
+
|
||||
|
||||
@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
|
||||
+@unittest.skipUnless(not any(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test")
|
||||
class DeviceActionTestCase(StorageTestCase):
|
||||
|
||||
""" DeviceActionTestSuite """
|
||||
--
|
||||
2.20.1
|
||||
|
@ -1,133 +0,0 @@
|
||||
From c2b06150df0b876c7d442097b6c9ca90c9ca2ecc Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 4 May 2023 11:35:44 +0200
|
||||
Subject: [PATCH] Do not set memory limit for LUKS2 when running in FIPS mode
|
||||
|
||||
With FIPS enabled LUKS uses pbkdf and not argon so the memory
|
||||
limit is not a valid parameter.
|
||||
|
||||
Resolves: rhbz#2183437
|
||||
---
|
||||
blivet/devicelibs/crypto.py | 11 +++++++
|
||||
blivet/formats/luks.py | 12 ++++----
|
||||
tests/unit_tests/formats_tests/luks_test.py | 30 +++++++++++++++++++
|
||||
.../unit_tests/formats_tests/methods_test.py | 3 +-
|
||||
4 files changed, 50 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/blivet/devicelibs/crypto.py b/blivet/devicelibs/crypto.py
|
||||
index f0caf0f7..68e68db1 100644
|
||||
--- a/blivet/devicelibs/crypto.py
|
||||
+++ b/blivet/devicelibs/crypto.py
|
||||
@@ -21,6 +21,7 @@
|
||||
#
|
||||
|
||||
import hashlib
|
||||
+import os
|
||||
|
||||
import gi
|
||||
gi.require_version("BlockDev", "2.0")
|
||||
@@ -100,3 +101,13 @@ def calculate_integrity_metadata_size(device_size, algorithm=DEFAULT_INTEGRITY_A
|
||||
jsize = (jsize / SECTOR_SIZE + 1) * SECTOR_SIZE # round up to sector
|
||||
|
||||
return msize + jsize
|
||||
+
|
||||
+
|
||||
+def is_fips_enabled():
|
||||
+ if not os.path.exists("/proc/sys/crypto/fips_enabled"):
|
||||
+ # if the file doesn't exist, we are definitely not in FIPS mode
|
||||
+ return False
|
||||
+
|
||||
+ with open("/proc/sys/crypto/fips_enabled", "r") as f:
|
||||
+ enabled = f.read()
|
||||
+ return enabled.strip() == "1"
|
||||
diff --git a/blivet/formats/luks.py b/blivet/formats/luks.py
|
||||
index 2637e0c5..adf3c711 100644
|
||||
--- a/blivet/formats/luks.py
|
||||
+++ b/blivet/formats/luks.py
|
||||
@@ -303,11 +303,13 @@ class LUKS(DeviceFormat):
|
||||
if luks_data.pbkdf_args:
|
||||
self.pbkdf_args = luks_data.pbkdf_args
|
||||
else:
|
||||
- mem_limit = crypto.calculate_luks2_max_memory()
|
||||
- if mem_limit:
|
||||
- self.pbkdf_args = LUKS2PBKDFArgs(max_memory_kb=int(mem_limit.convert_to(KiB)))
|
||||
- luks_data.pbkdf_args = self.pbkdf_args
|
||||
- log.info("PBKDF arguments for LUKS2 not specified, using defaults with memory limit %s", mem_limit)
|
||||
+ # argon is not used with FIPS so we don't need to adjust the memory when in FIPS mode
|
||||
+ if not crypto.is_fips_enabled():
|
||||
+ mem_limit = crypto.calculate_luks2_max_memory()
|
||||
+ if mem_limit:
|
||||
+ self.pbkdf_args = LUKS2PBKDFArgs(max_memory_kb=int(mem_limit.convert_to(KiB)))
|
||||
+ luks_data.pbkdf_args = self.pbkdf_args
|
||||
+ log.info("PBKDF arguments for LUKS2 not specified, using defaults with memory limit %s", mem_limit)
|
||||
|
||||
if self.pbkdf_args:
|
||||
pbkdf = blockdev.CryptoLUKSPBKDF(type=self.pbkdf_args.type,
|
||||
diff --git a/tests/unit_tests/formats_tests/luks_test.py b/tests/unit_tests/formats_tests/luks_test.py
|
||||
index ec7b7592..1127e968 100644
|
||||
--- a/tests/unit_tests/formats_tests/luks_test.py
|
||||
+++ b/tests/unit_tests/formats_tests/luks_test.py
|
||||
@@ -6,9 +6,14 @@ except ImportError:
|
||||
import unittest
|
||||
|
||||
from blivet.formats.luks import LUKS
|
||||
+from blivet.size import Size
|
||||
+from blivet.static_data import luks_data
|
||||
|
||||
|
||||
class LUKSNodevTestCase(unittest.TestCase):
|
||||
+ def setUp(self):
|
||||
+ luks_data.pbkdf_args = None
|
||||
+
|
||||
def test_create_discard_option(self):
|
||||
# flags.discard_new=False --> no discard
|
||||
fmt = LUKS(exists=False)
|
||||
@@ -51,6 +56,31 @@ class LUKSNodevTestCase(unittest.TestCase):
|
||||
fmt = LUKS(cipher="aes-cbc-plain64")
|
||||
self.assertEqual(fmt.key_size, 0)
|
||||
|
||||
+ def test_luks2_pbkdf_memory_fips(self):
|
||||
+ fmt = LUKS()
|
||||
+ with patch("blivet.formats.luks.blockdev.crypto") as bd:
|
||||
+ # fips enabled, pbkdf memory should not be set
|
||||
+ with patch("blivet.formats.luks.crypto") as crypto:
|
||||
+ attrs = {"is_fips_enabled.return_value": True,
|
||||
+ "get_optimal_luks_sector_size.return_value": 0,
|
||||
+ "calculate_luks2_max_memory.return_value": Size("256 MiB")}
|
||||
+ crypto.configure_mock(**attrs)
|
||||
+
|
||||
+ fmt._create()
|
||||
+ crypto.calculate_luks2_max_memory.assert_not_called()
|
||||
+ self.assertEqual(bd.luks_format.call_args[1]["extra"].pbkdf.max_memory_kb, 0)
|
||||
+
|
||||
+ # fips disabled, pbkdf memory should be set
|
||||
+ with patch("blivet.formats.luks.crypto") as crypto:
|
||||
+ attrs = {"is_fips_enabled.return_value": False,
|
||||
+ "get_optimal_luks_sector_size.return_value": 0,
|
||||
+ "calculate_luks2_max_memory.return_value": Size("256 MiB")}
|
||||
+ crypto.configure_mock(**attrs)
|
||||
+
|
||||
+ fmt._create()
|
||||
+ crypto.calculate_luks2_max_memory.assert_called()
|
||||
+ self.assertEqual(bd.luks_format.call_args[1]["extra"].pbkdf.max_memory_kb, 256 * 1024)
|
||||
+
|
||||
def test_sector_size(self):
|
||||
fmt = LUKS()
|
||||
self.assertEqual(fmt.luks_sector_size, 512)
|
||||
diff --git a/tests/unit_tests/formats_tests/methods_test.py b/tests/unit_tests/formats_tests/methods_test.py
|
||||
index 2743b7db..5d30c260 100644
|
||||
--- a/tests/unit_tests/formats_tests/methods_test.py
|
||||
+++ b/tests/unit_tests/formats_tests/methods_test.py
|
||||
@@ -366,7 +366,8 @@ class LUKSMethodsTestCase(FormatMethodsTestCase):
|
||||
|
||||
def _test_create_backend(self):
|
||||
self.format.exists = False
|
||||
- self.format.create()
|
||||
+ with patch("blivet.devicelibs.crypto.is_fips_enabled", return_value=False):
|
||||
+ self.format.create()
|
||||
self.assertTrue(self.patches["blockdev"].crypto.luks_format.called) # pylint: disable=no-member
|
||||
|
||||
def _test_setup_backend(self):
|
||||
--
|
||||
2.40.1
|
||||
|
110
SOURCES/0014-Tests-archive.patch
Normal file
110
SOURCES/0014-Tests-archive.patch
Normal file
@ -0,0 +1,110 @@
|
||||
From 545c41e6750d5e28743a7da9e43175302c4fa812 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Thu, 4 Apr 2019 13:52:54 -0400
|
||||
Subject: [PATCH 1/4] Remove profanity from an old comment.
|
||||
|
||||
---
|
||||
blivet/blivet.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blivet/blivet.py b/blivet/blivet.py
|
||||
index 8128347f..ff4410ae 100644
|
||||
--- a/blivet/blivet.py
|
||||
+++ b/blivet/blivet.py
|
||||
@@ -875,7 +875,7 @@ def safe_device_name(self, name):
|
||||
|
||||
LVM limits lv names to 128 characters. I don't know the limits for
|
||||
the other various device types, so I'm going to pick a number so
|
||||
- that we don't have to have an entire fucking library to determine
|
||||
+ that we don't have to have an entire library to determine
|
||||
device name limits.
|
||||
"""
|
||||
max_len = 96 # No, you don't need longer names than this. Really.
|
||||
|
||||
From 7395fb481b7b7a5054a3ba12e07f40ba1c8d926a Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Mon, 22 Apr 2019 17:44:42 -0400
|
||||
Subject: [PATCH 2/4] Add a target to create an archive of the unit tests.
|
||||
|
||||
---
|
||||
Makefile | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
diff --git a/Makefile b/Makefile
|
||||
index 76817278..f9b2066e 100644
|
||||
--- a/Makefile
|
||||
+++ b/Makefile
|
||||
@@ -120,6 +120,10 @@ archive: po-pull
|
||||
git checkout -- po/$(PKGNAME).pot
|
||||
@echo "The archive is in $(PKGNAME)-$(VERSION).tar.gz"
|
||||
|
||||
+tests-archive:
|
||||
+ git archive --format=tar --prefix=$(PKGNAME)-$(VERSION)/ $(VERSION_TAG) tests/ | gzip -9 > $(PKGNAME)-$(VERSION)-tests.tar.gz
|
||||
+ @echo "The test archive is in $(PKGNAME)-$(VERSION)-tests.tar.gz"
|
||||
+
|
||||
local: po-pull
|
||||
@make -B ChangeLog
|
||||
$(PYTHON) setup.py -q sdist --dist-dir .
|
||||
|
||||
From 28959739b46d22698c05f34494d2d9c67f37f0c4 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Mon, 22 Apr 2019 17:45:19 -0400
|
||||
Subject: [PATCH 3/4] Add spec file logic to include unit tests in SRPM.
|
||||
|
||||
---
|
||||
python-blivet.spec | 4 +++-
|
||||
1 file changed, 3 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/python-blivet.spec b/python-blivet.spec
|
||||
index 668e0913..23fa07f6 100644
|
||||
--- a/python-blivet.spec
|
||||
+++ b/python-blivet.spec
|
||||
@@ -29,6 +29,7 @@ License: LGPLv2+
|
||||
%global realname blivet
|
||||
%global realversion %{version}%{?prerelease}
|
||||
Source0: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}.tar.gz
|
||||
+Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}-tests.tar.gz
|
||||
|
||||
# Versions of required components (done so we make sure the buildrequires
|
||||
# match the requires versions of things).
|
||||
@@ -165,7 +166,8 @@ configuration.
|
||||
%endif
|
||||
|
||||
%prep
|
||||
-%autosetup -n %{realname}-%{realversion} -p1
|
||||
+%autosetup -n %{realname}-%{realversion} -N
|
||||
+%autosetup -n %{realname}-%{realversion} -b1 -p1
|
||||
|
||||
%build
|
||||
%{?with_python2:make PYTHON=%{__python2}}
|
||||
|
||||
From 305c9b52ee5682baf53be660c501b7b263029699 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Fri, 26 Apr 2019 16:39:35 -0400
|
||||
Subject: [PATCH 4/4] Include tests archive where appropriate in make targets.
|
||||
|
||||
---
|
||||
Makefile | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/Makefile b/Makefile
|
||||
index f9b2066e..552550a6 100644
|
||||
--- a/Makefile
|
||||
+++ b/Makefile
|
||||
@@ -119,6 +119,7 @@ archive: po-pull
|
||||
rm -rf $(PKGNAME)-$(VERSION)
|
||||
git checkout -- po/$(PKGNAME).pot
|
||||
@echo "The archive is in $(PKGNAME)-$(VERSION).tar.gz"
|
||||
+ @make tests-archive
|
||||
|
||||
tests-archive:
|
||||
git archive --format=tar --prefix=$(PKGNAME)-$(VERSION)/ $(VERSION_TAG) tests/ | gzip -9 > $(PKGNAME)-$(VERSION)-tests.tar.gz
|
||||
@@ -128,6 +129,8 @@ local: po-pull
|
||||
@make -B ChangeLog
|
||||
$(PYTHON) setup.py -q sdist --dist-dir .
|
||||
@echo "The archive is in $(PKGNAME)-$(VERSION).tar.gz"
|
||||
+ git ls-files tests/ | tar -T- -czf $(PKGNAME)-$(VERSION)-tests.tar.gz
|
||||
+ @echo "The test archive is in $(PKGNAME)-$(VERSION)-tests.tar.gz"
|
||||
|
||||
rpmlog:
|
||||
@git log --pretty="format:- %s (%ae)" $(RELEASE_TAG).. |sed -e 's/@.*)/)/'
|
@ -1,265 +0,0 @@
|
||||
From eb16230427fc1081f8515e6ad69ccf99ca521e5d Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Tue, 4 Apr 2023 13:31:40 +0200
|
||||
Subject: [PATCH 1/2] Add support for filesystem online resize
|
||||
|
||||
Resolves: rhbz#2168680
|
||||
---
|
||||
blivet/devices/lvm.py | 13 ++++++++-----
|
||||
blivet/devices/partition.py | 11 ++++++-----
|
||||
blivet/flags.py | 3 +++
|
||||
blivet/formats/fs.py | 32 ++++++++++++++++++++++++++++----
|
||||
blivet/formats/fslib.py | 7 +++++++
|
||||
5 files changed, 52 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
||||
index c3132457..ca45c4b5 100644
|
||||
--- a/blivet/devices/lvm.py
|
||||
+++ b/blivet/devices/lvm.py
|
||||
@@ -42,6 +42,7 @@ from .. import errors
|
||||
from .. import util
|
||||
from ..storage_log import log_method_call
|
||||
from .. import udev
|
||||
+from ..flags import flags
|
||||
from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
|
||||
from ..static_data.lvm_info import lvs_info
|
||||
from ..tasks import availability
|
||||
@@ -2729,12 +2730,14 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
# Setup VG parents (in case they are dmraid partitions for example)
|
||||
self.vg.setup_parents(orig=True)
|
||||
|
||||
- if self.original_format.exists:
|
||||
- self.original_format.teardown()
|
||||
- if self.format.exists:
|
||||
- self.format.teardown()
|
||||
+ if not flags.allow_online_fs_resize:
|
||||
+ if self.original_format.exists:
|
||||
+ self.original_format.teardown()
|
||||
+ if self.format.exists:
|
||||
+ self.format.teardown()
|
||||
+
|
||||
+ udev.settle()
|
||||
|
||||
- udev.settle()
|
||||
blockdev.lvm.lvresize(self.vg.name, self._name, self.size)
|
||||
|
||||
@type_specific
|
||||
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
|
||||
index 0e9250ce..6ae4b8d3 100644
|
||||
--- a/blivet/devices/partition.py
|
||||
+++ b/blivet/devices/partition.py
|
||||
@@ -745,11 +745,12 @@ class PartitionDevice(StorageDevice):
|
||||
if not self.exists:
|
||||
raise errors.DeviceError("device has not been created")
|
||||
|
||||
- # don't teardown when resizing luks
|
||||
- if self.format.type == "luks" and self.children:
|
||||
- self.children[0].format.teardown()
|
||||
- else:
|
||||
- self.teardown()
|
||||
+ if not flags.allow_online_fs_resize:
|
||||
+ # don't teardown when resizing luks
|
||||
+ if self.format.type == "luks" and self.children:
|
||||
+ self.children[0].format.teardown()
|
||||
+ else:
|
||||
+ self.teardown()
|
||||
|
||||
if not self.sysfs_path:
|
||||
return
|
||||
diff --git a/blivet/flags.py b/blivet/flags.py
|
||||
index 6364164d..ecfa7ad7 100644
|
||||
--- a/blivet/flags.py
|
||||
+++ b/blivet/flags.py
|
||||
@@ -91,6 +91,9 @@ class Flags(object):
|
||||
|
||||
self.debug_threads = False
|
||||
|
||||
+ # Allow online filesystem resizes
|
||||
+ self.allow_online_fs_resize = False
|
||||
+
|
||||
def get_boot_cmdline(self):
|
||||
with open("/proc/cmdline") as f:
|
||||
buf = f.read().strip()
|
||||
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
|
||||
index 33922f3a..3f553eb0 100644
|
||||
--- a/blivet/formats/fs.py
|
||||
+++ b/blivet/formats/fs.py
|
||||
@@ -56,7 +56,7 @@ from ..i18n import N_
|
||||
from .. import udev
|
||||
from ..mounts import mounts_cache
|
||||
|
||||
-from .fslib import kernel_filesystems
|
||||
+from .fslib import kernel_filesystems, FSResize
|
||||
|
||||
import logging
|
||||
log = logging.getLogger("blivet")
|
||||
@@ -88,6 +88,9 @@ class FS(DeviceFormat):
|
||||
# value is already unpredictable and can change in the future...
|
||||
_metadata_size_factor = 1.0
|
||||
|
||||
+ # support for resize: grow/shrink, online/offline
|
||||
+ _resize_support = 0
|
||||
+
|
||||
config_actions_map = {"label": "write_label"}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
@@ -436,12 +439,27 @@ class FS(DeviceFormat):
|
||||
self.write_uuid()
|
||||
|
||||
def _pre_resize(self):
|
||||
- # file systems need a check before being resized
|
||||
- self.do_check()
|
||||
+ if self.status:
|
||||
+ if flags.allow_online_fs_resize:
|
||||
+ if self.target_size > self.size and not self._resize_support & FSResize.ONLINE_GROW:
|
||||
+ raise FSError("This filesystem doesn't support online growing")
|
||||
+ if self.target_size < self.size and not self._resize_support & FSResize.ONLINE_SHRINK:
|
||||
+ raise FSError("This filesystem doesn't support online shrinking")
|
||||
+ else:
|
||||
+ raise FSError("Resizing of mounted filesystems is disabled")
|
||||
+
|
||||
+ if self.status:
|
||||
+ # fsck tools in general don't allow checks on mounted filesystems
|
||||
+ log.debug("Filesystem on %s is mounted, not checking", self.device)
|
||||
+ else:
|
||||
+ # file systems need a check before being resized
|
||||
+ self.do_check()
|
||||
+
|
||||
super(FS, self)._pre_resize()
|
||||
|
||||
def _post_resize(self):
|
||||
- self.do_check()
|
||||
+ if not self.status:
|
||||
+ self.do_check()
|
||||
super(FS, self)._post_resize()
|
||||
|
||||
def do_check(self):
|
||||
@@ -838,6 +856,7 @@ class Ext2FS(FS):
|
||||
_formattable = True
|
||||
_supported = True
|
||||
_resizable = True
|
||||
+ _resize_support = FSResize.ONLINE_GROW | FSResize.OFFLINE_GROW | FSResize.OFFLINE_SHRINK
|
||||
_linux_native = True
|
||||
_max_size = Size("8 TiB")
|
||||
_dump = True
|
||||
@@ -1097,6 +1116,7 @@ class XFS(FS):
|
||||
_linux_native = True
|
||||
_supported = True
|
||||
_resizable = True
|
||||
+ _resize_support = FSResize.ONLINE_GROW | FSResize.OFFLINE_GROW
|
||||
_packages = ["xfsprogs"]
|
||||
_fsck_class = fsck.XFSCK
|
||||
_info_class = fsinfo.XFSInfo
|
||||
@@ -1247,6 +1267,7 @@ class NTFS(FS):
|
||||
_labelfs = fslabeling.NTFSLabeling()
|
||||
_uuidfs = fsuuid.NTFSUUID()
|
||||
_resizable = True
|
||||
+ _resize_support = FSResize.OFFLINE_GROW | FSResize.OFFLINE_SHRINK
|
||||
_formattable = True
|
||||
_supported = True
|
||||
_min_size = Size("1 MiB")
|
||||
@@ -1490,6 +1511,9 @@ class TmpFS(NoDevFS):
|
||||
# same, nothing actually needs to be set
|
||||
pass
|
||||
|
||||
+ def _pre_resize(self):
|
||||
+ self.do_check()
|
||||
+
|
||||
def do_resize(self):
|
||||
# Override superclass method to record whether mount options
|
||||
# should include an explicit size specification.
|
||||
diff --git a/blivet/formats/fslib.py b/blivet/formats/fslib.py
|
||||
index ea93b1fd..8722e942 100644
|
||||
--- a/blivet/formats/fslib.py
|
||||
+++ b/blivet/formats/fslib.py
|
||||
@@ -36,3 +36,10 @@ def update_kernel_filesystems():
|
||||
|
||||
|
||||
update_kernel_filesystems()
|
||||
+
|
||||
+
|
||||
+class FSResize():
|
||||
+ OFFLINE_SHRINK = 1 << 1
|
||||
+ OFFLINE_GROW = 1 << 2
|
||||
+ ONLINE_SHRINK = 1 << 3
|
||||
+ ONLINE_GROW = 1 << 4
|
||||
--
|
||||
2.40.1
|
||||
|
||||
|
||||
From 3fce5d0bfd7b09a976ff49feed15077477c6a425 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 6 Apr 2023 14:02:11 +0200
|
||||
Subject: [PATCH 2/2] Add a test case for filesystem online resize
|
||||
|
||||
Related: rhbz#2168680
|
||||
---
|
||||
tests/storage_tests/formats_test/fs_test.py | 43 ++++++++++++++++++++-
|
||||
1 file changed, 42 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py
|
||||
index 97f4cbbe..1d42dc21 100644
|
||||
--- a/tests/storage_tests/formats_test/fs_test.py
|
||||
+++ b/tests/storage_tests/formats_test/fs_test.py
|
||||
@@ -6,9 +6,10 @@ import parted
|
||||
|
||||
import blivet.formats.fs as fs
|
||||
from blivet.size import Size, ROUND_DOWN
|
||||
-from blivet.errors import DeviceFormatError
|
||||
+from blivet.errors import DeviceFormatError, FSError
|
||||
from blivet.formats import get_format
|
||||
from blivet.devices import PartitionDevice, DiskDevice
|
||||
+from blivet.flags import flags
|
||||
|
||||
from .loopbackedtestcase import LoopBackedTestCase
|
||||
|
||||
@@ -26,6 +27,46 @@ class Ext3FSTestCase(Ext2FSTestCase):
|
||||
class Ext4FSTestCase(Ext3FSTestCase):
|
||||
_fs_class = fs.Ext4FS
|
||||
|
||||
+ def test_online_resize(self):
|
||||
+ an_fs = self._fs_class()
|
||||
+ if not an_fs.formattable:
|
||||
+ self.skipTest("can not create filesystem %s" % an_fs.name)
|
||||
+ an_fs.device = self.loop_devices[0]
|
||||
+ self.assertIsNone(an_fs.create())
|
||||
+ an_fs.update_size_info()
|
||||
+
|
||||
+ if not self.can_resize(an_fs):
|
||||
+ self.skipTest("filesystem is not resizable")
|
||||
+
|
||||
+ # shrink offline first (ext doesn't support online shrinking)
|
||||
+ TARGET_SIZE = Size("64 MiB")
|
||||
+ an_fs.target_size = TARGET_SIZE
|
||||
+ self.assertEqual(an_fs.target_size, TARGET_SIZE)
|
||||
+ self.assertNotEqual(an_fs._size, TARGET_SIZE)
|
||||
+ self.assertIsNone(an_fs.do_resize())
|
||||
+
|
||||
+ with tempfile.TemporaryDirectory() as mountpoint:
|
||||
+ an_fs.mount(mountpoint=mountpoint)
|
||||
+
|
||||
+ # grow back when mounted
|
||||
+ TARGET_SIZE = Size("100 MiB")
|
||||
+ an_fs.target_size = TARGET_SIZE
|
||||
+ self.assertEqual(an_fs.target_size, TARGET_SIZE)
|
||||
+ self.assertNotEqual(an_fs._size, TARGET_SIZE)
|
||||
+
|
||||
+ # should fail, online resize disabled by default
|
||||
+ with self.assertRaisesRegex(FSError, "Resizing of mounted filesystems is disabled"):
|
||||
+ an_fs.do_resize()
|
||||
+
|
||||
+ # enable online resize
|
||||
+ flags.allow_online_fs_resize = True
|
||||
+ an_fs.do_resize()
|
||||
+ flags.allow_online_fs_resize = False
|
||||
+ self._test_sizes(an_fs)
|
||||
+ self.assertEqual(an_fs.system_mountpoint, mountpoint)
|
||||
+
|
||||
+ an_fs.unmount()
|
||||
+
|
||||
|
||||
class FATFSTestCase(fstesting.FSAsRoot):
|
||||
_fs_class = fs.FATFS
|
||||
--
|
||||
2.40.1
|
||||
|
47
SOURCES/0015-Deactivate-incomplete-VGs.patch
Normal file
47
SOURCES/0015-Deactivate-incomplete-VGs.patch
Normal file
@ -0,0 +1,47 @@
|
||||
From 6528bb0149720b336c9da7b57eaea048d693871c Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Wed, 20 Jun 2018 16:37:24 -0400
|
||||
Subject: [PATCH] Deactivate incomplete VGs along with everything else.
|
||||
|
||||
(cherry picked from commit 39637796ca1aa2f03c89b5ec86ac246eecca1570)
|
||||
---
|
||||
blivet/devices/lvm.py | 18 ++++++++++++++----
|
||||
1 file changed, 14 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
||||
index 0cb1a2ac..1e9da2a8 100644
|
||||
--- a/blivet/devices/lvm.py
|
||||
+++ b/blivet/devices/lvm.py
|
||||
@@ -216,15 +216,25 @@ class LVMVolumeGroupDevice(ContainerDevice):
|
||||
if lv.status:
|
||||
return True
|
||||
|
||||
+ # special handling for incomplete VGs
|
||||
+ if not self.complete:
|
||||
+ try:
|
||||
+ lvs_info = blockdev.lvm.lvs(vg_name=self.name)
|
||||
+ except blockdev.LVMError:
|
||||
+ lvs_info = dict()
|
||||
+
|
||||
+ for lv_info in lvs_info.values():
|
||||
+ lv_attr = udev.device_get_lv_attr(lv_info)
|
||||
+ if lv_attr and lv_attr[4] == 'a':
|
||||
+ return True
|
||||
+
|
||||
+ return False
|
||||
+
|
||||
# if any of our PVs are not active then we cannot be
|
||||
for pv in self.pvs:
|
||||
if not pv.status:
|
||||
return False
|
||||
|
||||
- # if we are missing some of our PVs we cannot be active
|
||||
- if not self.complete:
|
||||
- return False
|
||||
-
|
||||
return True
|
||||
|
||||
def _pre_setup(self, orig=False):
|
||||
--
|
||||
2.20.1
|
||||
|
@ -0,0 +1,31 @@
|
||||
From caec289d8220fc9a8d8b3d6e99271394f4ef83fe Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 27 Feb 2019 12:26:30 +0100
|
||||
Subject: [PATCH] Automatically adjust size of growable devices for new format
|
||||
|
||||
Without this kickstart 'part /home --size=1 --grow --encrypted'
|
||||
will fail because min size for LUKS is 2 MiB.
|
||||
|
||||
Resolves: rhbz#1680013
|
||||
---
|
||||
blivet/devices/storage.py | 7 ++++++-
|
||||
1 file changed, 6 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blivet/devices/storage.py b/blivet/devices/storage.py
|
||||
index 904b60df..9d6001e8 100644
|
||||
--- a/blivet/devices/storage.py
|
||||
+++ b/blivet/devices/storage.py
|
||||
@@ -721,7 +721,12 @@ def _set_format(self, fmt):
|
||||
if fmt.max_size and fmt.max_size < self.size:
|
||||
raise errors.DeviceError("device is too large for new format")
|
||||
elif fmt.min_size and fmt.min_size > self.size:
|
||||
- raise errors.DeviceError("device is too small for new format")
|
||||
+ if self.growable:
|
||||
+ log.info("%s: using size %s instead of %s to accommodate "
|
||||
+ "format minimum size", self.name, fmt.min_size, self.size)
|
||||
+ self.size = fmt.min_size
|
||||
+ else:
|
||||
+ raise errors.DeviceError("device is too small for new format")
|
||||
|
||||
if self._format != fmt:
|
||||
callbacks.format_removed(device=self, fmt=self._format)
|
@ -1,382 +0,0 @@
|
||||
From d06c45db59d0e917dbab4c283f2f04c8f9206a6e Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Mon, 6 Mar 2023 10:51:42 +0100
|
||||
Subject: [PATCH 1/5] Allow changing iSCSI initiator name after setting it
|
||||
|
||||
Resolves: rhbz#2083139
|
||||
---
|
||||
blivet/iscsi.py | 13 +++++++++++--
|
||||
1 file changed, 11 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
|
||||
index 86451db3..0d063f2a 100644
|
||||
--- a/blivet/iscsi.py
|
||||
+++ b/blivet/iscsi.py
|
||||
@@ -212,14 +212,23 @@ class iSCSI(object):
|
||||
@initiator.setter
|
||||
@storaged_iscsi_required(critical=True, eval_mode=util.EvalMode.onetime)
|
||||
def initiator(self, val):
|
||||
- if self.initiator_set and val != self._initiator:
|
||||
- raise ValueError(_("Unable to change iSCSI initiator name once set"))
|
||||
if len(val) == 0:
|
||||
raise ValueError(_("Must provide an iSCSI initiator name"))
|
||||
|
||||
+ active = self._get_active_sessions()
|
||||
+ if active:
|
||||
+ raise errors.ISCSIError(_("Cannot change initiator name with an active session"))
|
||||
+
|
||||
log.info("Setting up iSCSI initiator name %s", self.initiator)
|
||||
args = GLib.Variant("(sa{sv})", (val, None))
|
||||
self._call_initiator_method("SetInitiatorName", args)
|
||||
+
|
||||
+ if self.initiator_set and val != self._initiator:
|
||||
+ log.info("Restarting iscsid after initiator name change")
|
||||
+ rc = util.run_program(["systemctl", "restart", "iscsid"])
|
||||
+ if rc != 0:
|
||||
+ raise errors.ISCSIError(_("Failed to restart iscsid after initiator name change"))
|
||||
+
|
||||
self._initiator = val
|
||||
|
||||
def active_nodes(self, target=None):
|
||||
--
|
||||
2.40.1
|
||||
|
||||
|
||||
From b71991d65c270c023364b03c499b4bf3e245fbd0 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Mon, 6 Mar 2023 15:10:28 +0100
|
||||
Subject: [PATCH 2/5] Add a basic test case for the iscsi module
|
||||
|
||||
Related: rhbz#2083139
|
||||
---
|
||||
tests/storage_tests/__init__.py | 2 +
|
||||
tests/storage_tests/iscsi_test.py | 157 +++++++++++++++++++++++++++++
|
||||
3 files changed, 162 insertions(+)
|
||||
create mode 100644 tests/storage_tests/iscsi_test.py
|
||||
|
||||
diff --git a/tests/storage_tests/__init__.py b/tests/storage_tests/__init__.py
|
||||
index 3b2a6cc4..e69fcc34 100644
|
||||
--- a/tests/storage_tests/__init__.py
|
||||
+++ b/tests/storage_tests/__init__.py
|
||||
@@ -3,3 +3,5 @@ from .formats_test import *
|
||||
|
||||
from .partitioning_test import *
|
||||
from .unsupported_disklabel_test import *
|
||||
+
|
||||
+from .iscsi_test import *
|
||||
diff --git a/tests/storage_tests/iscsi_test.py b/tests/storage_tests/iscsi_test.py
|
||||
new file mode 100644
|
||||
index 00000000..00cc7c36
|
||||
--- /dev/null
|
||||
+++ b/tests/storage_tests/iscsi_test.py
|
||||
@@ -0,0 +1,157 @@
|
||||
+import glob
|
||||
+import os
|
||||
+import re
|
||||
+import shutil
|
||||
+import subprocess
|
||||
+import unittest
|
||||
+
|
||||
+from contextlib import contextmanager
|
||||
+
|
||||
+from .storagetestcase import create_sparse_tempfile
|
||||
+
|
||||
+
|
||||
+def read_file(filename, mode="r"):
|
||||
+ with open(filename, mode) as f:
|
||||
+ content = f.read()
|
||||
+ return content
|
||||
+
|
||||
+
|
||||
+@contextmanager
|
||||
+def udev_settle():
|
||||
+ try:
|
||||
+ yield
|
||||
+ finally:
|
||||
+ os.system("udevadm settle")
|
||||
+
|
||||
+
|
||||
+def _delete_backstore(name):
|
||||
+ status = subprocess.call(["targetcli", "/backstores/fileio/ delete %s" % name],
|
||||
+ stdout=subprocess.DEVNULL)
|
||||
+ if status != 0:
|
||||
+ raise RuntimeError("Failed to delete the '%s' fileio backstore" % name)
|
||||
+
|
||||
+
|
||||
+def delete_iscsi_target(iqn, backstore=None):
|
||||
+ status = subprocess.call(["targetcli", "/iscsi delete %s" % iqn],
|
||||
+ stdout=subprocess.DEVNULL)
|
||||
+ if status != 0:
|
||||
+ raise RuntimeError("Failed to delete the '%s' iscsi device" % iqn)
|
||||
+
|
||||
+ if backstore is not None:
|
||||
+ _delete_backstore(backstore)
|
||||
+
|
||||
+
|
||||
+def create_iscsi_target(fpath, initiator_name=None):
|
||||
+ """
|
||||
+ Creates a new iSCSI target (using targetcli) on top of the
|
||||
+ :param:`fpath` backing file.
|
||||
+
|
||||
+ :param str fpath: path of the backing file
|
||||
+ :returns: iSCSI IQN, backstore name
|
||||
+ :rtype: tuple of str
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ # "register" the backing file as a fileio backstore
|
||||
+ store_name = os.path.basename(fpath)
|
||||
+ status = subprocess.call(["targetcli", "/backstores/fileio/ create %s %s" % (store_name, fpath)], stdout=subprocess.DEVNULL)
|
||||
+ if status != 0:
|
||||
+ raise RuntimeError("Failed to register '%s' as a fileio backstore" % fpath)
|
||||
+
|
||||
+ out = subprocess.check_output(["targetcli", "/backstores/fileio/%s info" % store_name])
|
||||
+ out = out.decode("utf-8")
|
||||
+ store_wwn = None
|
||||
+ for line in out.splitlines():
|
||||
+ if line.startswith("wwn: "):
|
||||
+ store_wwn = line[5:]
|
||||
+ if store_wwn is None:
|
||||
+ raise RuntimeError("Failed to determine '%s' backstore's wwn" % store_name)
|
||||
+
|
||||
+ # create a new iscsi device
|
||||
+ out = subprocess.check_output(["targetcli", "/iscsi create"])
|
||||
+ out = out.decode("utf-8")
|
||||
+ match = re.match(r'Created target (.*).', out)
|
||||
+ if match:
|
||||
+ iqn = match.groups()[0]
|
||||
+ else:
|
||||
+ _delete_backstore(store_name)
|
||||
+ raise RuntimeError("Failed to create a new iscsi target")
|
||||
+
|
||||
+ if initiator_name:
|
||||
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
|
||||
+ if status != 0:
|
||||
+ delete_iscsi_target(iqn, store_name)
|
||||
+ raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
|
||||
+
|
||||
+ with udev_settle():
|
||||
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/luns create /backstores/fileio/%s" % (iqn, store_name)], stdout=subprocess.DEVNULL)
|
||||
+ if status != 0:
|
||||
+ delete_iscsi_target(iqn, store_name)
|
||||
+ raise RuntimeError("Failed to create a new LUN for '%s' using '%s'" % (iqn, store_name))
|
||||
+
|
||||
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1 set attribute generate_node_acls=1" % iqn], stdout=subprocess.DEVNULL)
|
||||
+ if status != 0:
|
||||
+ raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
|
||||
+
|
||||
+ return iqn, store_name
|
||||
+
|
||||
+
|
||||
+@unittest.skipUnless(os.geteuid() == 0, "requires root privileges")
|
||||
+@unittest.skipUnless(os.environ.get("JENKINS_HOME"), "jenkins only test")
|
||||
+@unittest.skipUnless(shutil.which("iscsiadm"), "iscsiadm not available")
|
||||
+class ISCSITestCase(unittest.TestCase):
|
||||
+
|
||||
+ _disk_size = 512 * 1024**2
|
||||
+ initiator = 'iqn.1994-05.com.redhat:iscsi-test'
|
||||
+
|
||||
+ def setUp(self):
|
||||
+ self.addCleanup(self._clean_up)
|
||||
+
|
||||
+ self._dev_file = None
|
||||
+ self.dev = None
|
||||
+
|
||||
+ self._dev_file = create_sparse_tempfile("blivet_test", self._disk_size)
|
||||
+ try:
|
||||
+ self.dev, self.backstore = create_iscsi_target(self._dev_file, self.initiator)
|
||||
+ except RuntimeError as e:
|
||||
+ raise RuntimeError("Failed to setup targetcli device for testing: %s" % e)
|
||||
+
|
||||
+ def _force_logout(self):
|
||||
+ subprocess.call(["iscsiadm", "--mode", "node", "--logout", "--name", self.dev], stdout=subprocess.DEVNULL)
|
||||
+
|
||||
+ def _clean_up(self):
|
||||
+ self._force_logout()
|
||||
+ delete_iscsi_target(self.dev, self.backstore)
|
||||
+ os.unlink(self._dev_file)
|
||||
+
|
||||
+ def test_discover_login(self):
|
||||
+ from blivet.iscsi import iscsi, has_iscsi
|
||||
+
|
||||
+ if not has_iscsi():
|
||||
+ self.skipTest("iSCSI not available, skipping")
|
||||
+
|
||||
+ iscsi.initiator = self.initiator
|
||||
+ nodes = iscsi.discover("127.0.0.1")
|
||||
+ self.assertTrue(nodes)
|
||||
+
|
||||
+ if len(nodes) > 1:
|
||||
+ self.skipTest("Discovered more than one iSCSI target on localhost, skipping")
|
||||
+
|
||||
+ self.assertEqual(nodes[0].address, "127.0.0.1")
|
||||
+ self.assertEqual(nodes[0].port, 3260)
|
||||
+ self.assertEqual(nodes[0].name, self.dev)
|
||||
+
|
||||
+ # change the initiator name
|
||||
+ iscsi.initiator = self.initiator + "_1"
|
||||
+ self.assertEqual(iscsi.initiator, self.initiator + "_1")
|
||||
+
|
||||
+ # try to login
|
||||
+ ret, err = iscsi.log_into_node(nodes[0])
|
||||
+ self.assertTrue(ret, "Login failed: %s" % err)
|
||||
+
|
||||
+ # check the session for initiator name
|
||||
+ sessions = glob.glob("/sys/class/iscsi_session/*/")
|
||||
+ self.assertTrue(sessions)
|
||||
+ self.assertEqual(len(sessions), 1)
|
||||
+ initiator = read_file(sessions[0] + "initiatorname").strip()
|
||||
+ self.assertEqual(initiator, iscsi.initiator)
|
||||
--
|
||||
2.40.1
|
||||
|
||||
|
||||
From 65e8150a7404e37dd2740841a88e7f2565836406 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Mon, 6 Mar 2023 15:14:40 +0100
|
||||
Subject: [PATCH 3/5] tests: Use blivet-specific prefix for targetcli backing
|
||||
files
|
||||
|
||||
The code is originally from libblockdev hence the "bd" prefix, we
|
||||
should use a different prefix for blivet to be able to identify
|
||||
which test suite failed to clean the files.
|
||||
|
||||
Related: rhbz#2083139
|
||||
---
|
||||
tests/storage_tests/storagetestcase.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tests/storage_tests/storagetestcase.py b/tests/storage_tests/storagetestcase.py
|
||||
index 35d57ce9..9f859977 100644
|
||||
--- a/tests/storage_tests/storagetestcase.py
|
||||
+++ b/tests/storage_tests/storagetestcase.py
|
||||
@@ -39,7 +39,7 @@ def create_sparse_tempfile(name, size):
|
||||
:param size: the file size (in bytes)
|
||||
:returns: the path to the newly created file
|
||||
"""
|
||||
- (fd, path) = tempfile.mkstemp(prefix="bd.", suffix="-%s" % name)
|
||||
+ (fd, path) = tempfile.mkstemp(prefix="blivet.", suffix="-%s" % name)
|
||||
os.close(fd)
|
||||
create_sparse_file(path, size)
|
||||
return path
|
||||
--
|
||||
2.40.1
|
||||
|
||||
|
||||
From 41278ef1b3f949303fd30fff2ccdde75f713c9f8 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 19 Jul 2023 13:57:39 +0200
|
||||
Subject: [PATCH 4/5] iscsi: Save firmware initiator name to
|
||||
/etc/iscsi/initiatorname.iscsi
|
||||
|
||||
Resolves: rhbz#2084043
|
||||
---
|
||||
blivet/iscsi.py | 5 +++++
|
||||
1 file changed, 5 insertions(+)
|
||||
|
||||
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
|
||||
index 0d063f2a..8080a671 100644
|
||||
--- a/blivet/iscsi.py
|
||||
+++ b/blivet/iscsi.py
|
||||
@@ -160,6 +160,11 @@ class iSCSI(object):
|
||||
self._initiator = initiatorname
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
log.info("failed to get initiator name from iscsi firmware: %s", str(e))
|
||||
+ else:
|
||||
+ # write the firmware initiator to /etc/iscsi/initiatorname.iscsi
|
||||
+ log.info("Setting up firmware iSCSI initiator name %s", self.initiator)
|
||||
+ args = GLib.Variant("(sa{sv})", (initiatorname, None))
|
||||
+ self._call_initiator_method("SetInitiatorName", args)
|
||||
|
||||
# So that users can write iscsi() to get the singleton instance
|
||||
def __call__(self):
|
||||
--
|
||||
2.40.1
|
||||
|
||||
|
||||
From fce8b73965d968aab546bc7e0ecb65d1995da46f Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 19 Jul 2023 10:38:45 +0200
|
||||
Subject: [PATCH 5/5] tests: Improve iscsi_test.ISCSITestCase
|
||||
|
||||
Changed how we create the initiator name ACLs based on RTT test
|
||||
case for rhbz#2084043 and also improved the test case itself.
|
||||
|
||||
Related: rhbz#2083139
|
||||
---
|
||||
tests/storage_tests/iscsi_test.py | 36 +++++++++++++++++++++----------
|
||||
1 file changed, 25 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/tests/storage_tests/iscsi_test.py b/tests/storage_tests/iscsi_test.py
|
||||
index 00cc7c36..6cc83a59 100644
|
||||
--- a/tests/storage_tests/iscsi_test.py
|
||||
+++ b/tests/storage_tests/iscsi_test.py
|
||||
@@ -77,21 +77,17 @@ def create_iscsi_target(fpath, initiator_name=None):
|
||||
_delete_backstore(store_name)
|
||||
raise RuntimeError("Failed to create a new iscsi target")
|
||||
|
||||
- if initiator_name:
|
||||
- status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
|
||||
- if status != 0:
|
||||
- delete_iscsi_target(iqn, store_name)
|
||||
- raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
|
||||
-
|
||||
with udev_settle():
|
||||
status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/luns create /backstores/fileio/%s" % (iqn, store_name)], stdout=subprocess.DEVNULL)
|
||||
if status != 0:
|
||||
delete_iscsi_target(iqn, store_name)
|
||||
raise RuntimeError("Failed to create a new LUN for '%s' using '%s'" % (iqn, store_name))
|
||||
|
||||
- status = subprocess.call(["targetcli", "/iscsi/%s/tpg1 set attribute generate_node_acls=1" % iqn], stdout=subprocess.DEVNULL)
|
||||
- if status != 0:
|
||||
- raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
|
||||
+ if initiator_name:
|
||||
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
|
||||
+ if status != 0:
|
||||
+ delete_iscsi_target(iqn, store_name)
|
||||
+ raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
|
||||
|
||||
return iqn, store_name
|
||||
|
||||
@@ -130,6 +126,7 @@ class ISCSITestCase(unittest.TestCase):
|
||||
if not has_iscsi():
|
||||
self.skipTest("iSCSI not available, skipping")
|
||||
|
||||
+ # initially set the initiator to the correct/allowed one
|
||||
iscsi.initiator = self.initiator
|
||||
nodes = iscsi.discover("127.0.0.1")
|
||||
self.assertTrue(nodes)
|
||||
@@ -141,11 +138,28 @@ class ISCSITestCase(unittest.TestCase):
|
||||
self.assertEqual(nodes[0].port, 3260)
|
||||
self.assertEqual(nodes[0].name, self.dev)
|
||||
|
||||
- # change the initiator name
|
||||
+ # change the initiator name to a wrong one
|
||||
iscsi.initiator = self.initiator + "_1"
|
||||
self.assertEqual(iscsi.initiator, self.initiator + "_1")
|
||||
|
||||
- # try to login
|
||||
+ # check the change made it to /etc/iscsi/initiatorname.iscsi
|
||||
+ initiator_file = read_file("/etc/iscsi/initiatorname.iscsi").strip()
|
||||
+ self.assertEqual(initiator_file, "InitiatorName=%s" % self.initiator + "_1")
|
||||
+
|
||||
+ # try to login (should fail)
|
||||
+ ret, err = iscsi.log_into_node(nodes[0])
|
||||
+ self.assertFalse(ret)
|
||||
+ self.assertIn("authorization failure", err)
|
||||
+
|
||||
+ # change the initiator name back to the correct one
|
||||
+ iscsi.initiator = self.initiator
|
||||
+ self.assertEqual(iscsi.initiator, self.initiator)
|
||||
+
|
||||
+ # check the change made it to /etc/iscsi/initiatorname.iscsi
|
||||
+ initiator_file = read_file("/etc/iscsi/initiatorname.iscsi").strip()
|
||||
+ self.assertEqual(initiator_file, "InitiatorName=%s" % self.initiator)
|
||||
+
|
||||
+ # try to login (should work now)
|
||||
ret, err = iscsi.log_into_node(nodes[0])
|
||||
self.assertTrue(ret, "Login failed: %s" % err)
|
||||
|
||||
--
|
||||
2.40.1
|
||||
|
@ -0,0 +1,54 @@
|
||||
From ac5646f8e9e59389bdc651c63b5e7dcd2d693bf4 Mon Sep 17 00:00:00 2001
|
||||
From: Radek Vykydal <rvykydal@redhat.com>
|
||||
Date: Wed, 22 May 2019 13:35:01 +0200
|
||||
Subject: [PATCH] Add flag for protecting cdrom devices during populate
|
||||
|
||||
Resolves: rhbz#1719648
|
||||
---
|
||||
blivet/devices/optical.py | 14 ++++++++++++++
|
||||
blivet/flags.py | 3 +++
|
||||
2 files changed, 17 insertions(+)
|
||||
|
||||
diff --git a/blivet/devices/optical.py b/blivet/devices/optical.py
|
||||
index b9dba1f2..122825f2 100644
|
||||
--- a/blivet/devices/optical.py
|
||||
+++ b/blivet/devices/optical.py
|
||||
@@ -24,6 +24,7 @@
|
||||
from .. import errors
|
||||
from .. import util
|
||||
from ..storage_log import log_method_call
|
||||
+from ..flags import flags
|
||||
|
||||
import logging
|
||||
log = logging.getLogger("blivet")
|
||||
@@ -81,3 +82,16 @@ def eject(self):
|
||||
util.run_program(["eject", self.name])
|
||||
except OSError as e:
|
||||
log.warning("error ejecting cdrom %s: %s", self.name, e)
|
||||
+
|
||||
+ @property
|
||||
+ def protected(self):
|
||||
+ protected = super(OpticalDevice, self).protected
|
||||
+
|
||||
+ if flags.protect_cdroms:
|
||||
+ return True
|
||||
+ else:
|
||||
+ return protected
|
||||
+
|
||||
+ @protected.setter
|
||||
+ def protected(self, value):
|
||||
+ self._protected = value
|
||||
diff --git a/blivet/flags.py b/blivet/flags.py
|
||||
index 6500be30..a6a78edc 100644
|
||||
--- a/blivet/flags.py
|
||||
+++ b/blivet/flags.py
|
||||
@@ -77,6 +77,9 @@ def __init__(self):
|
||||
# (so far only for LUKS)
|
||||
self.discard_new = False
|
||||
|
||||
+ # whether cdroms should be protected
|
||||
+ self.protect_cdroms = False
|
||||
+
|
||||
self.boot_cmdline = {}
|
||||
|
||||
self.update_from_boot_cmdline()
|
@ -1,206 +0,0 @@
|
||||
From faef0408d2f7c61aade6d187389c61e64f9f373b Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 20 Apr 2023 12:35:30 +0200
|
||||
Subject: [PATCH] Add support for creating shared LVM setups
|
||||
|
||||
This feature is requested by GFS2 for the storage role. This adds
|
||||
support for creating shared VGs and activating LVs in shared mode.
|
||||
|
||||
Resolves: RHEL-14021
|
||||
---
|
||||
blivet/devices/lvm.py | 44 +++++++++++++++++++----
|
||||
blivet/tasks/availability.py | 9 +++++
|
||||
tests/unit_tests/devices_test/lvm_test.py | 25 +++++++++++++
|
||||
3 files changed, 72 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
||||
index ca45c4b5..068c5368 100644
|
||||
--- a/blivet/devices/lvm.py
|
||||
+++ b/blivet/devices/lvm.py
|
||||
@@ -97,7 +97,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
|
||||
|
||||
def __init__(self, name, parents=None, size=None, free=None,
|
||||
pe_size=None, pe_count=None, pe_free=None, pv_count=None,
|
||||
- uuid=None, exists=False, sysfs_path='', exported=False):
|
||||
+ uuid=None, exists=False, sysfs_path='', exported=False,
|
||||
+ shared=False):
|
||||
"""
|
||||
:param name: the device name (generally a device node's basename)
|
||||
:type name: str
|
||||
@@ -124,6 +125,11 @@ class LVMVolumeGroupDevice(ContainerDevice):
|
||||
:type pv_count: int
|
||||
:keyword uuid: the VG UUID
|
||||
:type uuid: str
|
||||
+
|
||||
+ For non-existing VGs only:
|
||||
+
|
||||
+ :keyword shared: whether to create this VG as shared
|
||||
+ :type shared: bool
|
||||
"""
|
||||
# These attributes are used by _add_parent, so they must be initialized
|
||||
# prior to instantiating the superclass.
|
||||
@@ -137,6 +143,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
|
||||
self.pe_count = util.numeric_type(pe_count)
|
||||
self.pe_free = util.numeric_type(pe_free)
|
||||
self.exported = exported
|
||||
+ self._shared = shared
|
||||
|
||||
# TODO: validate pe_size if given
|
||||
if not self.pe_size:
|
||||
@@ -254,7 +261,19 @@ class LVMVolumeGroupDevice(ContainerDevice):
|
||||
""" Create the device. """
|
||||
log_method_call(self, self.name, status=self.status)
|
||||
pv_list = [pv.path for pv in self.parents]
|
||||
- blockdev.lvm.vgcreate(self.name, pv_list, self.pe_size)
|
||||
+ extra = dict()
|
||||
+ if self._shared:
|
||||
+ extra["shared"] = ""
|
||||
+ blockdev.lvm.vgcreate(self.name, pv_list, self.pe_size, **extra)
|
||||
+
|
||||
+ if self._shared:
|
||||
+ if availability.BLOCKDEV_LVM_PLUGIN_SHARED.available:
|
||||
+ try:
|
||||
+ blockdev.lvm.vglock_start(self.name)
|
||||
+ except blockdev.LVMError as err:
|
||||
+ raise errors.LVMError(err)
|
||||
+ else:
|
||||
+ raise errors.LVMError("Shared LVM is not fully supported: %s" % ",".join(availability.BLOCKDEV_LVM_PLUGIN_SHARED.availability_errors))
|
||||
|
||||
def _post_create(self):
|
||||
self._complete = True
|
||||
@@ -661,7 +680,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
||||
def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
|
||||
fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None,
|
||||
percent=None, cache_request=None, pvs=None, from_lvs=None,
|
||||
- stripe_size=0):
|
||||
+ stripe_size=0, shared=False):
|
||||
|
||||
if not exists:
|
||||
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
|
||||
@@ -690,6 +709,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
|
||||
self.seg_type = seg_type or "linear"
|
||||
self._raid_level = None
|
||||
self.ignore_skip_activation = 0
|
||||
+ self._shared = shared
|
||||
|
||||
self.req_grow = None
|
||||
self.req_max_size = Size(0)
|
||||
@@ -2306,7 +2326,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
parent_lv=None, int_type=None, origin=None, vorigin=False,
|
||||
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
|
||||
compression=False, deduplication=False, index_memory=0,
|
||||
- write_policy=None, cache_mode=None, attach_to=None, stripe_size=0):
|
||||
+ write_policy=None, cache_mode=None, attach_to=None, stripe_size=0,
|
||||
+ shared=False):
|
||||
"""
|
||||
:param name: the device name (generally a device node's basename)
|
||||
:type name: str
|
||||
@@ -2337,6 +2358,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
:type cache_request: :class:`~.devices.lvm.LVMCacheRequest`
|
||||
:keyword pvs: list of PVs to allocate extents from (size could be specified for each PV)
|
||||
:type pvs: list of :class:`~.devices.StorageDevice` or :class:`LVPVSpec` objects (tuples)
|
||||
+ :keyword shared: whether to activate the newly create LV in shared mode
|
||||
+ :type shared: bool
|
||||
|
||||
For internal LVs only:
|
||||
|
||||
@@ -2412,7 +2435,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
|
||||
fmt, exists, sysfs_path, grow, maxsize,
|
||||
percent, cache_request, pvs, from_lvs,
|
||||
- stripe_size)
|
||||
+ stripe_size, shared)
|
||||
LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory,
|
||||
write_policy)
|
||||
LVMVDOLogicalVolumeMixin.__init__(self)
|
||||
@@ -2634,7 +2657,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
log_method_call(self, self.name, orig=orig, status=self.status,
|
||||
controllable=self.controllable)
|
||||
ignore_skip_activation = self.is_snapshot_lv or self.ignore_skip_activation > 0
|
||||
- blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
|
||||
+ if self._shared:
|
||||
+ if availability.BLOCKDEV_LVM_PLUGIN_SHARED.available:
|
||||
+ blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation, shared=True)
|
||||
+ else:
|
||||
+ raise errors.LVMError("Shared LVM is not fully supported: %s" % ",".join(availability.BLOCKDEV_LVM_PLUGIN_SHARED.availability_errors))
|
||||
+ else:
|
||||
+ blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
|
||||
|
||||
@type_specific
|
||||
def _pre_create(self):
|
||||
@@ -2672,6 +2701,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
|
||||
if self._stripe_size:
|
||||
extra["stripesize"] = str(int(self._stripe_size.convert_to("KiB")))
|
||||
|
||||
+ if self._shared:
|
||||
+ extra["activate"] = "sy"
|
||||
+
|
||||
blockdev.lvm.lvcreate(self.vg.name, self._name, self.size,
|
||||
type=self.seg_type, pv_list=pvs, **extra)
|
||||
else:
|
||||
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
|
||||
index bba1ba84..85945c77 100644
|
||||
--- a/blivet/tasks/availability.py
|
||||
+++ b/blivet/tasks/availability.py
|
||||
@@ -435,6 +435,14 @@ if hasattr(blockdev.LVMTech, "VDO"):
|
||||
else:
|
||||
BLOCKDEV_LVM_TECH_VDO = _UnavailableMethod(error_msg="Installed version of libblockdev doesn't support LVM VDO technology")
|
||||
|
||||
+if hasattr(blockdev.LVMTech, "SHARED"):
|
||||
+ BLOCKDEV_LVM_SHARED = BlockDevTechInfo(plugin_name="lvm",
|
||||
+ check_fn=blockdev.lvm_is_tech_avail,
|
||||
+ technologies={blockdev.LVMTech.SHARED: blockdev.LVMTechMode.MODIFY}) # pylint: disable=no-member
|
||||
+ BLOCKDEV_LVM_TECH_SHARED = BlockDevMethod(BLOCKDEV_LVM_SHARED)
|
||||
+else:
|
||||
+ BLOCKDEV_LVM_TECH_SHARED = _UnavailableMethod(error_msg="Installed version of libblockdev doesn't support shared LVM technology")
|
||||
+
|
||||
# libblockdev mdraid plugin required technologies and modes
|
||||
BLOCKDEV_MD_ALL_MODES = (blockdev.MDTechMode.CREATE |
|
||||
blockdev.MDTechMode.DELETE |
|
||||
@@ -476,6 +484,7 @@ BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("libblockdev dm plugin (raid technolog
|
||||
BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("libblockdev loop plugin", BLOCKDEV_LOOP_TECH)
|
||||
BLOCKDEV_LVM_PLUGIN = blockdev_plugin("libblockdev lvm plugin", BLOCKDEV_LVM_TECH)
|
||||
BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("libblockdev lvm plugin (vdo technology)", BLOCKDEV_LVM_TECH_VDO)
|
||||
+BLOCKDEV_LVM_PLUGIN_SHARED = blockdev_plugin("libblockdev lvm plugin (shared LVM technology)", BLOCKDEV_LVM_TECH_SHARED)
|
||||
BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("libblockdev mdraid plugin", BLOCKDEV_MD_TECH)
|
||||
BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("libblockdev mpath plugin", BLOCKDEV_MPATH_TECH)
|
||||
BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("libblockdev swap plugin", BLOCKDEV_SWAP_TECH)
|
||||
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
|
||||
index d7b55224..e645309f 100644
|
||||
--- a/tests/unit_tests/devices_test/lvm_test.py
|
||||
+++ b/tests/unit_tests/devices_test/lvm_test.py
|
||||
@@ -476,6 +476,31 @@ class LVMDeviceTest(unittest.TestCase):
|
||||
lv.setup()
|
||||
lvm.lvactivate.assert_called_with(vg.name, lv.lvname, ignore_skip=False)
|
||||
|
||||
+ @patch("blivet.tasks.availability.BLOCKDEV_LVM_PLUGIN_SHARED",
|
||||
+ new=blivet.tasks.availability.ExternalResource(blivet.tasks.availability.AvailableMethod, ""))
|
||||
+ def test_lv_activate_shared(self):
|
||||
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
|
||||
+ size=Size("1 GiB"), exists=True)
|
||||
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
|
||||
+ lv = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=True, shared=True)
|
||||
+
|
||||
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
|
||||
+ with patch.object(lv, "_pre_setup"):
|
||||
+ lv.setup()
|
||||
+ lvm.lvactivate.assert_called_with(vg.name, lv.lvname, ignore_skip=False, shared=True)
|
||||
+
|
||||
+ @patch("blivet.tasks.availability.BLOCKDEV_LVM_PLUGIN_SHARED",
|
||||
+ new=blivet.tasks.availability.ExternalResource(blivet.tasks.availability.AvailableMethod, ""))
|
||||
+ def test_vg_create_shared(self):
|
||||
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
|
||||
+ size=Size("1 GiB"), exists=True)
|
||||
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], shared=True)
|
||||
+
|
||||
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
|
||||
+ vg._create()
|
||||
+ lvm.vgcreate.assert_called_with(vg.name, [pv.path], Size("4 MiB"), shared="")
|
||||
+ lvm.vglock_start.assert_called_with(vg.name)
|
||||
+
|
||||
def test_vg_is_empty(self):
|
||||
pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
|
||||
size=Size("1024 MiB"))
|
||||
--
|
||||
2.41.0
|
||||
|
@ -0,0 +1,81 @@
|
||||
From 8124b804915d54e341e80bdd84e84eec3a54aaba Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Tue, 27 Nov 2018 13:37:49 -0500
|
||||
Subject: [PATCH 1/2] Only update sysfs path in ctor for active devices.
|
||||
|
||||
Related: rhbz#1579375
|
||||
---
|
||||
blivet/devices/storage.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blivet/devices/storage.py b/blivet/devices/storage.py
|
||||
index 3cc29436..904b60df 100644
|
||||
--- a/blivet/devices/storage.py
|
||||
+++ b/blivet/devices/storage.py
|
||||
@@ -149,8 +149,8 @@ def __init__(self, name, fmt=None, uuid=None,
|
||||
self.device_links = []
|
||||
|
||||
if self.exists:
|
||||
- self.update_sysfs_path()
|
||||
if self.status:
|
||||
+ self.update_sysfs_path()
|
||||
self.update_size()
|
||||
|
||||
def __str__(self):
|
||||
|
||||
From 4cc31c735db820896278a7b91bb761df00becdb5 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Tue, 27 Nov 2018 14:03:40 -0500
|
||||
Subject: [PATCH 2/2] Fix xfs sync of chrooted mountpoint.
|
||||
|
||||
Related: rhbz#1579375
|
||||
---
|
||||
blivet/tasks/fssync.py | 22 ++++++++++++++++------
|
||||
1 file changed, 16 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/blivet/tasks/fssync.py b/blivet/tasks/fssync.py
|
||||
index a15c8e1b..996fe782 100644
|
||||
--- a/blivet/tasks/fssync.py
|
||||
+++ b/blivet/tasks/fssync.py
|
||||
@@ -49,11 +49,21 @@ class XFSSync(FSSync):
|
||||
|
||||
ext = availability.XFSFREEZE_APP
|
||||
|
||||
- def _freeze_command(self):
|
||||
- return [str(self.ext), "-f", self.fs.system_mountpoint]
|
||||
+ def _get_mountpoint(self, root=None):
|
||||
+ mountpoint = self.fs.system_mountpoint
|
||||
+ if root is not None and root.replace('/', ''):
|
||||
+ if mountpoint == root:
|
||||
+ mountpoint = '/'
|
||||
+ else:
|
||||
+ mountpoint = mountpoint[len(root):]
|
||||
|
||||
- def _unfreeze_command(self):
|
||||
- return [str(self.ext), "-u", self.fs.system_mountpoint]
|
||||
+ return mountpoint
|
||||
+
|
||||
+ def _freeze_command(self, root=None):
|
||||
+ return [str(self.ext), "-f", self._get_mountpoint(root=root)]
|
||||
+
|
||||
+ def _unfreeze_command(self, root=None):
|
||||
+ return [str(self.ext), "-u", self._get_mountpoint(root=root)]
|
||||
|
||||
def do_task(self, root="/"):
|
||||
# pylint: disable=arguments-differ
|
||||
@@ -63,13 +73,13 @@ def do_task(self, root="/"):
|
||||
|
||||
error_msg = None
|
||||
try:
|
||||
- rc = util.run_program(self._freeze_command(), root=root)
|
||||
+ rc = util.run_program(self._freeze_command(root=root), root=root)
|
||||
except OSError as e:
|
||||
error_msg = "failed to sync filesytem: %s" % e
|
||||
error_msg = error_msg or rc
|
||||
|
||||
try:
|
||||
- rc = util.run_program(self._unfreeze_command(), root=root)
|
||||
+ rc = util.run_program(self._unfreeze_command(root=root), root=root)
|
||||
except OSError as e:
|
||||
error_msg = error_msg or "failed to sync filesystem: %s" % e
|
||||
error_msg = error_msg or rc
|
@ -0,0 +1,128 @@
|
||||
From 1d9dc59ab2c471d7dcc39cd6982bd14380d5f726 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Thu, 13 Jun 2019 11:22:16 -0400
|
||||
Subject: [PATCH 1/3] Add a function to detect if running in a vm.
|
||||
|
||||
Related: rhbz#1676935
|
||||
---
|
||||
blivet/util.py | 14 ++++++++++++++
|
||||
1 file changed, 14 insertions(+)
|
||||
|
||||
diff --git a/blivet/util.py b/blivet/util.py
|
||||
index 542bc93f..fa5e9e35 100644
|
||||
--- a/blivet/util.py
|
||||
+++ b/blivet/util.py
|
||||
@@ -1,4 +1,5 @@
|
||||
import copy
|
||||
+from distutils.spawn import find_executable
|
||||
import functools
|
||||
import glob
|
||||
import itertools
|
||||
@@ -1100,3 +1101,16 @@ def decorated(*args, **kwargs):
|
||||
return None
|
||||
return decorated
|
||||
return decorator
|
||||
+
|
||||
+
|
||||
+def detect_virt():
|
||||
+ """ Return True if we are running in a virtual machine. """
|
||||
+ in_vm = False
|
||||
+ detect_virt_prog = find_executable('systemd-detect-virt')
|
||||
+ if detect_virt_prog:
|
||||
+ try:
|
||||
+ in_vm = run_program([detect_virt_prog, "--vm"]) == 0
|
||||
+ except OSError:
|
||||
+ pass
|
||||
+
|
||||
+ return in_vm
|
||||
|
||||
From 26d4b48ab5eca44695dced52c6170ec04610bc1d Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Thu, 13 Jun 2019 10:57:48 -0400
|
||||
Subject: [PATCH 2/3] Use dasd disklabel for vm disks backed by dasds.
|
||||
|
||||
Resolves: rhbz#1676935
|
||||
---
|
||||
blivet/formats/disklabel.py | 9 +++++++++
|
||||
1 file changed, 9 insertions(+)
|
||||
|
||||
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
|
||||
index 8186d1a1..0c4fce35 100644
|
||||
--- a/blivet/formats/disklabel.py
|
||||
+++ b/blivet/formats/disklabel.py
|
||||
@@ -261,6 +261,15 @@ def _get_best_label_type(self):
|
||||
elif self.parted_device.type == parted.DEVICE_DASD:
|
||||
# the device is DASD
|
||||
return "dasd"
|
||||
+ elif util.detect_virt():
|
||||
+ # check for dasds exported into qemu as normal virtio/scsi disks
|
||||
+ try:
|
||||
+ _parted_disk = parted.Disk(device=self.parted_device)
|
||||
+ except (_ped.DiskLabelException, _ped.IOException, NotImplementedError):
|
||||
+ pass
|
||||
+ else:
|
||||
+ if _parted_disk.type == "dasd":
|
||||
+ return "dasd"
|
||||
|
||||
for lt in label_types:
|
||||
if self._label_type_size_check(lt):
|
||||
|
||||
From c93d1207bb2942736a390bd58adafda3deb1c25c Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Thu, 13 Jun 2019 12:04:23 -0400
|
||||
Subject: [PATCH 3/3] Use DBus call to see if we're in a vm.
|
||||
|
||||
---
|
||||
blivet/util.py | 22 +++++++++++++---------
|
||||
1 file changed, 13 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/blivet/util.py b/blivet/util.py
|
||||
index fa5e9e35..2932e8b5 100644
|
||||
--- a/blivet/util.py
|
||||
+++ b/blivet/util.py
|
||||
@@ -1,5 +1,4 @@
|
||||
import copy
|
||||
-from distutils.spawn import find_executable
|
||||
import functools
|
||||
import glob
|
||||
import itertools
|
||||
@@ -20,6 +19,7 @@
|
||||
from enum import Enum
|
||||
|
||||
from .errors import DependencyError
|
||||
+from . import safe_dbus
|
||||
|
||||
import gi
|
||||
gi.require_version("BlockDev", "2.0")
|
||||
@@ -39,6 +39,12 @@
|
||||
program_log_lock = Lock()
|
||||
|
||||
|
||||
+SYSTEMD_SERVICE = "org.freedesktop.systemd1"
|
||||
+SYSTEMD_MANAGER_PATH = "/org/freedesktop/systemd1/Manager"
|
||||
+SYSTEMD_MANAGER_IFACE = "org.freedesktop.systemd1.Manager"
|
||||
+VIRT_PROP_NAME = "Virtualization"
|
||||
+
|
||||
+
|
||||
class Path(str):
|
||||
|
||||
""" Path(path, root=None) provides a filesystem path object, which
|
||||
@@ -1105,12 +1111,10 @@ def decorated(*args, **kwargs):
|
||||
|
||||
def detect_virt():
|
||||
""" Return True if we are running in a virtual machine. """
|
||||
- in_vm = False
|
||||
- detect_virt_prog = find_executable('systemd-detect-virt')
|
||||
- if detect_virt_prog:
|
||||
- try:
|
||||
- in_vm = run_program([detect_virt_prog, "--vm"]) == 0
|
||||
- except OSError:
|
||||
- pass
|
||||
+ try:
|
||||
+ vm = safe_dbus.get_property_sync(SYSTEMD_SERVICE, SYSTEMD_MANAGER_PATH,
|
||||
+ SYSTEMD_MANAGER_IFACE, VIRT_PROP_NAME)
|
||||
+ except (safe_dbus.DBusCallError, safe_dbus.DBusPropertyError):
|
||||
+ vm = None
|
||||
|
||||
- return in_vm
|
||||
+ return vm in ('qemu', 'kvm')
|
@ -0,0 +1,30 @@
|
||||
From 5097a0f3fba2960fc77cfd6ceb828287f60c930c Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 6 Dec 2018 10:32:58 +0100
|
||||
Subject: [PATCH] Fix reading LV attributes in LVMVolumeGroupDevice.status
|
||||
|
||||
This was not adjusted to libblockdev API when cherry-picking fixes
|
||||
from rhel7-branch in 3c8f8dbf78b0a093e120f69241b44a48ff07be30
|
||||
---
|
||||
blivet/devices/lvm.py | 7 +++----
|
||||
1 file changed, 3 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
||||
index 7835b7e8..8c4ee2ba 100644
|
||||
--- a/blivet/devices/lvm.py
|
||||
+++ b/blivet/devices/lvm.py
|
||||
@@ -222,11 +222,10 @@ def status(self):
|
||||
try:
|
||||
lvs_info = blockdev.lvm.lvs(vg_name=self.name)
|
||||
except blockdev.LVMError:
|
||||
- lvs_info = dict()
|
||||
+ lvs_info = []
|
||||
|
||||
- for lv_info in lvs_info.values():
|
||||
- lv_attr = udev.device_get_lv_attr(lv_info)
|
||||
- if lv_attr and lv_attr[4] == 'a':
|
||||
+ for lv_info in lvs_info:
|
||||
+ if lv_info.attr and lv_info.attr[4] == 'a':
|
||||
return True
|
||||
|
||||
return False
|
@ -0,0 +1,65 @@
|
||||
From 5f7dbb212b4d6da4f8f2609ae1415e8630d031cd Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Mon, 13 May 2019 12:49:52 +0200
|
||||
Subject: [PATCH] Correctly handle non-unicode iSCSI initiator names
|
||||
|
||||
---
|
||||
blivet/iscsi.py | 4 +++-
|
||||
blivet/udev.py | 20 +++++++++++---------
|
||||
2 files changed, 14 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
|
||||
index 74432505..f612cf15 100644
|
||||
--- a/blivet/iscsi.py
|
||||
+++ b/blivet/iscsi.py
|
||||
@@ -206,7 +206,9 @@ def initiator(self):
|
||||
if self._initiator != "":
|
||||
return self._initiator
|
||||
|
||||
- return self._call_initiator_method("GetInitiatorName")[0]
|
||||
+ # udisks returns initiatorname as a NULL terminated bytearray
|
||||
+ raw_initiator = bytes(self._call_initiator_method("GetInitiatorNameRaw")[0][:-1])
|
||||
+ return raw_initiator.decode("utf-8", errors="replace")
|
||||
|
||||
@initiator.setter
|
||||
@storaged_iscsi_required(critical=True, eval_mode=util.EvalMode.onetime)
|
||||
diff --git a/blivet/udev.py b/blivet/udev.py
|
||||
index 51b69b76..a70e3e08 100644
|
||||
--- a/blivet/udev.py
|
||||
+++ b/blivet/udev.py
|
||||
@@ -836,24 +836,26 @@ def device_get_iscsi_nic(info):
|
||||
|
||||
|
||||
def device_get_iscsi_initiator(info):
|
||||
- initiator = None
|
||||
+ initiator_name = None
|
||||
if device_is_partoff_iscsi(info):
|
||||
host = re.match(r'.*/(host\d+)', device_get_sysfs_path(info)).groups()[0]
|
||||
if host:
|
||||
initiator_file = "/sys/class/iscsi_host/%s/initiatorname" % host
|
||||
if os.access(initiator_file, os.R_OK):
|
||||
- initiator = open(initiator_file).read().strip()
|
||||
+ initiator = open(initiator_file, "rb").read().strip()
|
||||
+ initiator_name = initiator.decode("utf-8", errors="replace")
|
||||
log.debug("found offload iscsi initiatorname %s in file %s",
|
||||
- initiator, initiator_file)
|
||||
- if initiator.lstrip("(").rstrip(")").lower() == "null":
|
||||
- initiator = None
|
||||
- if initiator is None:
|
||||
+ initiator_name, initiator_file)
|
||||
+ if initiator_name.lstrip("(").rstrip(")").lower() == "null":
|
||||
+ initiator_name = None
|
||||
+ if initiator_name is None:
|
||||
session = device_get_iscsi_session(info)
|
||||
if session:
|
||||
initiator = open("/sys/class/iscsi_session/%s/initiatorname" %
|
||||
- session).read().strip()
|
||||
- log.debug("found iscsi initiatorname %s", initiator)
|
||||
- return initiator
|
||||
+ session, "rb").read().strip()
|
||||
+ initiator_name = initiator.decode("utf-8", errors="replace")
|
||||
+ log.debug("found iscsi initiatorname %s", initiator_name)
|
||||
+ return initiator_name
|
||||
|
||||
|
||||
# fcoe disks have ID_PATH in the form of:
|
@ -0,0 +1,27 @@
|
||||
From 408da7ad8eaedf9edb8dfa240af35a222fa8b481 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Mon, 11 Mar 2019 13:29:04 +0100
|
||||
Subject: [PATCH] Do not crash if 'dm.get_member_raid_sets' fails (#1684851)
|
||||
|
||||
---
|
||||
blivet/populator/helpers/dmraid.py | 7 ++++++-
|
||||
1 file changed, 6 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blivet/populator/helpers/dmraid.py b/blivet/populator/helpers/dmraid.py
|
||||
index c8cc3a8e..ed48bd66 100644
|
||||
--- a/blivet/populator/helpers/dmraid.py
|
||||
+++ b/blivet/populator/helpers/dmraid.py
|
||||
@@ -53,7 +53,12 @@ def run(self):
|
||||
minor = udev.device_get_minor(self.data)
|
||||
|
||||
# Have we already created the DMRaidArrayDevice?
|
||||
- rs_names = blockdev.dm.get_member_raid_sets(name, uuid, major, minor)
|
||||
+ try:
|
||||
+ rs_names = blockdev.dm.get_member_raid_sets(name, uuid, major, minor)
|
||||
+ except blockdev.DMError as e:
|
||||
+ log.error("Failed to get RAID sets information for '%s': %s", name, str(e))
|
||||
+ return
|
||||
+
|
||||
if len(rs_names) == 0:
|
||||
log.warning("dmraid member %s does not appear to belong to any "
|
||||
"array", self.device.name)
|
166
SOURCES/0023-Minor-cleanups-to-reduce-log-noise.patch
Normal file
166
SOURCES/0023-Minor-cleanups-to-reduce-log-noise.patch
Normal file
@ -0,0 +1,166 @@
|
||||
From c667dbb3ebf05eafeb4fb55d3ffa22d27c25420c Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Wed, 24 Oct 2018 20:12:20 -0400
|
||||
Subject: [PATCH 1/3] Don't try to update sysfs path for non-block devices.
|
||||
(#1579375)
|
||||
|
||||
---
|
||||
blivet/devices/file.py | 3 +++
|
||||
blivet/devices/nfs.py | 3 +++
|
||||
blivet/devices/nodev.py | 3 +++
|
||||
3 files changed, 9 insertions(+)
|
||||
|
||||
diff --git a/blivet/devices/file.py b/blivet/devices/file.py
|
||||
index 55522c1d..fa3dfb8a 100644
|
||||
--- a/blivet/devices/file.py
|
||||
+++ b/blivet/devices/file.py
|
||||
@@ -132,6 +132,9 @@ def is_name_valid(self, name):
|
||||
# Override StorageDevice.is_name_valid to allow /
|
||||
return not('\x00' in name or name == '.' or name == '..')
|
||||
|
||||
+ def update_sysfs_path(self):
|
||||
+ pass
|
||||
+
|
||||
|
||||
class SparseFileDevice(FileDevice):
|
||||
|
||||
diff --git a/blivet/devices/nfs.py b/blivet/devices/nfs.py
|
||||
index 97cbe01e..a0142f91 100644
|
||||
--- a/blivet/devices/nfs.py
|
||||
+++ b/blivet/devices/nfs.py
|
||||
@@ -77,3 +77,6 @@ def update_size(self, newsize=None):
|
||||
def is_name_valid(self, name):
|
||||
# Override StorageDevice.is_name_valid to allow /
|
||||
return not('\x00' in name or name == '.' or name == '..')
|
||||
+
|
||||
+ def update_sysfs_path(self):
|
||||
+ pass
|
||||
diff --git a/blivet/devices/nodev.py b/blivet/devices/nodev.py
|
||||
index f6129258..f1b87392 100644
|
||||
--- a/blivet/devices/nodev.py
|
||||
+++ b/blivet/devices/nodev.py
|
||||
@@ -75,6 +75,9 @@ def destroy(self):
|
||||
def update_size(self, newsize=None):
|
||||
pass
|
||||
|
||||
+ def update_sysfs_path(self):
|
||||
+ pass
|
||||
+
|
||||
|
||||
class TmpFSDevice(NoDevice):
|
||||
|
||||
|
||||
From acb0953ad89327b3ffd3571b6d45565762548203 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Wed, 24 Oct 2018 20:27:22 -0400
|
||||
Subject: [PATCH 2/3] Only try to set selinux context for lost+found on ext
|
||||
file systems.
|
||||
|
||||
Related: rhbz#1579375
|
||||
---
|
||||
blivet/formats/fs.py | 19 ++++++++++++++-----
|
||||
tests/formats_test/selinux_test.py | 5 ++++-
|
||||
2 files changed, 18 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
|
||||
index 81e367f4..b915a2de 100644
|
||||
--- a/blivet/formats/fs.py
|
||||
+++ b/blivet/formats/fs.py
|
||||
@@ -569,11 +569,6 @@ def _post_setup(self, **kwargs):
|
||||
ret = util.reset_file_context(mountpoint, chroot)
|
||||
if not ret:
|
||||
log.warning("Failed to reset SElinux context for newly mounted filesystem root directory to default.")
|
||||
- lost_and_found_context = util.match_path_context("/lost+found")
|
||||
- lost_and_found_path = os.path.join(mountpoint, "lost+found")
|
||||
- ret = util.set_file_context(lost_and_found_path, lost_and_found_context, chroot)
|
||||
- if not ret:
|
||||
- log.warning("Failed to set SELinux context for newly mounted filesystem lost+found directory at %s to %s", lost_and_found_path, lost_and_found_context)
|
||||
|
||||
def _pre_teardown(self, **kwargs):
|
||||
if not super(FS, self)._pre_teardown(**kwargs):
|
||||
@@ -840,6 +835,20 @@ class Ext2FS(FS):
|
||||
parted_system = fileSystemType["ext2"]
|
||||
_metadata_size_factor = 0.93 # ext2 metadata may take 7% of space
|
||||
|
||||
+ def _post_setup(self, **kwargs):
|
||||
+ super(Ext2FS, self)._post_setup(**kwargs)
|
||||
+
|
||||
+ options = kwargs.get("options", "")
|
||||
+ chroot = kwargs.get("chroot", "/")
|
||||
+ mountpoint = kwargs.get("mountpoint") or self.mountpoint
|
||||
+
|
||||
+ if flags.selinux and "ro" not in self._mount.mount_options(options).split(",") and flags.selinux_reset_fcon:
|
||||
+ lost_and_found_context = util.match_path_context("/lost+found")
|
||||
+ lost_and_found_path = os.path.join(mountpoint, "lost+found")
|
||||
+ ret = util.set_file_context(lost_and_found_path, lost_and_found_context, chroot)
|
||||
+ if not ret:
|
||||
+ log.warning("Failed to set SELinux context for newly mounted filesystem lost+found directory at %s to %s", lost_and_found_path, lost_and_found_context)
|
||||
+
|
||||
register_device_format(Ext2FS)
|
||||
|
||||
|
||||
diff --git a/tests/formats_test/selinux_test.py b/tests/formats_test/selinux_test.py
|
||||
index 79c10327..028e084e 100644
|
||||
--- a/tests/formats_test/selinux_test.py
|
||||
+++ b/tests/formats_test/selinux_test.py
|
||||
@@ -43,7 +43,10 @@ def exec_mount_selinux_format(self, formt, *args):
|
||||
|
||||
blivet.flags.flags.selinux_reset_fcon = True
|
||||
fmt.setup(mountpoint="dummy") # param needed to pass string check
|
||||
- lsetfilecon.assert_called_with(ANY, lost_found_context)
|
||||
+ if isinstance(fmt, fs.Ext2FS):
|
||||
+ lsetfilecon.assert_called_with(ANY, lost_found_context)
|
||||
+ else:
|
||||
+ lsetfilecon.assert_not_called()
|
||||
|
||||
lsetfilecon.reset_mock()
|
||||
|
||||
|
||||
From 1b4e658f098bda3161ff0d5ffee07ea9be5c1d15 Mon Sep 17 00:00:00 2001
|
||||
From: David Lehman <dlehman@redhat.com>
|
||||
Date: Wed, 24 Oct 2018 20:33:36 -0400
|
||||
Subject: [PATCH 3/3] Don't try to set selinux context for nodev or vfat file
|
||||
systems.
|
||||
|
||||
Related: rhbz#1579375
|
||||
---
|
||||
blivet/formats/fs.py | 5 ++++-
|
||||
1 file changed, 4 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
|
||||
index b915a2de..6f09eaff 100644
|
||||
--- a/blivet/formats/fs.py
|
||||
+++ b/blivet/formats/fs.py
|
||||
@@ -76,6 +76,7 @@ class FS(DeviceFormat):
|
||||
_sync_class = fssync.UnimplementedFSSync
|
||||
_writelabel_class = fswritelabel.UnimplementedFSWriteLabel
|
||||
_writeuuid_class = fswriteuuid.UnimplementedFSWriteUUID
|
||||
+ _selinux_supported = True
|
||||
# This constant is aquired by testing some filesystems
|
||||
# and it's giving us percentage of space left after the format.
|
||||
# This number is more guess than precise number because this
|
||||
@@ -565,7 +566,7 @@ def _post_setup(self, **kwargs):
|
||||
chroot = kwargs.get("chroot", "/")
|
||||
mountpoint = kwargs.get("mountpoint") or self.mountpoint
|
||||
|
||||
- if flags.selinux and "ro" not in self._mount.mount_options(options).split(",") and flags.selinux_reset_fcon:
|
||||
+ if self._selinux_supported and flags.selinux and "ro" not in self._mount.mount_options(options).split(",") and flags.selinux_reset_fcon:
|
||||
ret = util.reset_file_context(mountpoint, chroot)
|
||||
if not ret:
|
||||
log.warning("Failed to reset SElinux context for newly mounted filesystem root directory to default.")
|
||||
@@ -902,6 +903,7 @@ class FATFS(FS):
|
||||
_metadata_size_factor = 0.99 # fat metadata may take 1% of space
|
||||
# FIXME this should be fat32 in some cases
|
||||
parted_system = fileSystemType["fat16"]
|
||||
+ _selinux_supported = False
|
||||
|
||||
def generate_new_uuid(self):
|
||||
ret = ""
|
||||
@@ -1235,6 +1237,7 @@ class NoDevFS(FS):
|
||||
""" nodev filesystem base class """
|
||||
_type = "nodev"
|
||||
_mount_class = fsmount.NoDevFSMount
|
||||
+ _selinux_supported = False
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
FS.__init__(self, **kwargs)
|
91
SOURCES/0024-Fix-util.detect_virt-function.patch
Normal file
91
SOURCES/0024-Fix-util.detect_virt-function.patch
Normal file
@ -0,0 +1,91 @@
|
||||
From 471d43cbfe99db1c8246fb863e3ce49b3403fc61 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 11 Sep 2019 10:48:19 +0200
|
||||
Subject: [PATCH] Fix util.detect_virt function
|
||||
|
||||
Fixed the systemd Manager object path, also get_property_sync
|
||||
returns a tuple so we need to check its first element.
|
||||
|
||||
Resolves: rhbz#1676935
|
||||
---
|
||||
blivet/util.py | 8 ++++----
|
||||
tests/formats_test/disklabel_test.py | 26 ++++++++++++++------------
|
||||
tests/util_test.py | 4 ++++
|
||||
3 files changed, 22 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/blivet/util.py b/blivet/util.py
|
||||
index 2932e8b5..27468992 100644
|
||||
--- a/blivet/util.py
|
||||
+++ b/blivet/util.py
|
||||
@@ -40,7 +40,7 @@ program_log_lock = Lock()
|
||||
|
||||
|
||||
SYSTEMD_SERVICE = "org.freedesktop.systemd1"
|
||||
-SYSTEMD_MANAGER_PATH = "/org/freedesktop/systemd1/Manager"
|
||||
+SYSTEMD_MANAGER_PATH = "/org/freedesktop/systemd1"
|
||||
SYSTEMD_MANAGER_IFACE = "org.freedesktop.systemd1.Manager"
|
||||
VIRT_PROP_NAME = "Virtualization"
|
||||
|
||||
@@ -1115,6 +1115,6 @@ def detect_virt():
|
||||
vm = safe_dbus.get_property_sync(SYSTEMD_SERVICE, SYSTEMD_MANAGER_PATH,
|
||||
SYSTEMD_MANAGER_IFACE, VIRT_PROP_NAME)
|
||||
except (safe_dbus.DBusCallError, safe_dbus.DBusPropertyError):
|
||||
- vm = None
|
||||
-
|
||||
- return vm in ('qemu', 'kvm')
|
||||
+ return False
|
||||
+ else:
|
||||
+ return vm[0] in ('qemu', 'kvm')
|
||||
diff --git a/tests/formats_test/disklabel_test.py b/tests/formats_test/disklabel_test.py
|
||||
index 4b105da6..94f3775f 100644
|
||||
--- a/tests/formats_test/disklabel_test.py
|
||||
+++ b/tests/formats_test/disklabel_test.py
|
||||
@@ -163,16 +163,18 @@ class DiskLabelTestCase(unittest.TestCase):
|
||||
arch.is_efi.return_value = False
|
||||
|
||||
arch.is_s390.return_value = True
|
||||
- with mock.patch.object(dl, '_label_type_size_check') as size_check:
|
||||
- size_check.return_value = True
|
||||
- with mock.patch("blivet.formats.disklabel.blockdev.s390") as _s390:
|
||||
- _s390.dasd_is_fba.return_value = False
|
||||
- self.assertEqual(dl._get_best_label_type(), "msdos")
|
||||
-
|
||||
- _s390.dasd_is_fba.return_value = True
|
||||
- self.assertEqual(dl._get_best_label_type(), "msdos")
|
||||
-
|
||||
- _s390.dasd_is_fba.return_value = False
|
||||
- dl._parted_device.type = parted.DEVICE_DASD
|
||||
- self.assertEqual(dl._get_best_label_type(), "dasd")
|
||||
+ with mock.patch('blivet.util.detect_virt') as virt:
|
||||
+ virt.return_value = False
|
||||
+ with mock.patch.object(dl, '_label_type_size_check') as size_check:
|
||||
+ size_check.return_value = True
|
||||
+ with mock.patch("blivet.formats.disklabel.blockdev.s390") as _s390:
|
||||
+ _s390.dasd_is_fba.return_value = False
|
||||
+ self.assertEqual(dl._get_best_label_type(), "msdos")
|
||||
+
|
||||
+ _s390.dasd_is_fba.return_value = True
|
||||
+ self.assertEqual(dl._get_best_label_type(), "msdos")
|
||||
+
|
||||
+ _s390.dasd_is_fba.return_value = False
|
||||
+ dl._parted_device.type = parted.DEVICE_DASD
|
||||
+ self.assertEqual(dl._get_best_label_type(), "dasd")
|
||||
arch.is_s390.return_value = False
|
||||
diff --git a/tests/util_test.py b/tests/util_test.py
|
||||
index 5fa3070e..9a2ff492 100644
|
||||
--- a/tests/util_test.py
|
||||
+++ b/tests/util_test.py
|
||||
@@ -37,6 +37,10 @@ class MiscTest(unittest.TestCase):
|
||||
# real deduplication
|
||||
self.assertEqual([1, 2, 3, 4, 5, 6], util.dedup_list([1, 2, 3, 4, 2, 2, 2, 1, 3, 5, 3, 6, 6, 2, 3, 1, 5]))
|
||||
|
||||
+ def test_detect_virt(self):
|
||||
+ in_virt = not util.run_program(["systemd-detect-virt", "--vm"])
|
||||
+ self.assertEqual(util.detect_virt(), in_virt)
|
||||
+
|
||||
|
||||
class TestDefaultNamedtuple(unittest.TestCase):
|
||||
def test_default_namedtuple(self):
|
||||
--
|
||||
2.20.1
|
||||
|
183
SOURCES/0025-Check-for-PV-sector-size-when-creating-new-VG.patch
Normal file
183
SOURCES/0025-Check-for-PV-sector-size-when-creating-new-VG.patch
Normal file
@ -0,0 +1,183 @@
|
||||
From 83a42f3e232c7c4a02deb3539972c82b6dca284b Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Fri, 4 Oct 2019 12:30:03 +0200
|
||||
Subject: [PATCH 1/2] Add a new "sector_size" property to storage devices.
|
||||
|
||||
This represents the logical sector size of the device.
|
||||
|
||||
Related: rhbz#1754446
|
||||
---
|
||||
blivet/devices/disk.py | 6 +++++-
|
||||
blivet/devices/md.py | 11 +++++++++++
|
||||
blivet/devices/partition.py | 7 +++++++
|
||||
blivet/devices/storage.py | 15 +++++++++++++++
|
||||
4 files changed, 38 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
|
||||
index bf2f7a4f..7dfeabf0 100644
|
||||
--- a/blivet/devices/disk.py
|
||||
+++ b/blivet/devices/disk.py
|
||||
@@ -687,7 +687,7 @@ def __init__(self, device, **kwargs):
|
||||
"""
|
||||
self.mode = kwargs.pop("mode")
|
||||
self.devname = kwargs.pop("devname")
|
||||
- self.sector_size = kwargs.pop("sector_size")
|
||||
+ self._sector_size = kwargs.pop("sector_size")
|
||||
|
||||
DiskDevice.__init__(self, device, **kwargs)
|
||||
|
||||
@@ -710,3 +710,7 @@ def description(self):
|
||||
% {'devname': self.devname,
|
||||
'mode': self.mode,
|
||||
'path': self.path}
|
||||
+
|
||||
+ @property
|
||||
+ def sector_size(self):
|
||||
+ return self._sector_size
|
||||
diff --git a/blivet/devices/md.py b/blivet/devices/md.py
|
||||
index 6a837df0..0b6da980 100644
|
||||
--- a/blivet/devices/md.py
|
||||
+++ b/blivet/devices/md.py
|
||||
@@ -19,10 +19,13 @@
|
||||
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
|
||||
#
|
||||
|
||||
+import math
|
||||
import os
|
||||
import six
|
||||
import time
|
||||
|
||||
+from six.moves import reduce
|
||||
+
|
||||
import gi
|
||||
gi.require_version("BlockDev", "2.0")
|
||||
|
||||
@@ -195,6 +198,14 @@ def level(self, value):
|
||||
|
||||
self._level = level
|
||||
|
||||
+ @property
|
||||
+ def sector_size(self):
|
||||
+ if not self.exists:
|
||||
+ # Least common multiple of parents' sector sizes
|
||||
+ return reduce(lambda a, b: a * b // math.gcd(a, b), (int(p.sector_size) for p in self.parents))
|
||||
+
|
||||
+ return super(MDRaidArrayDevice, self).sector_size
|
||||
+
|
||||
@property
|
||||
def chunk_size(self):
|
||||
if self.exists and self._chunk_size == Size(0):
|
||||
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
|
||||
index 623e1c9d..73daa76f 100644
|
||||
--- a/blivet/devices/partition.py
|
||||
+++ b/blivet/devices/partition.py
|
||||
@@ -729,6 +729,13 @@ def protected(self):
|
||||
def protected(self, value):
|
||||
self._protected = value
|
||||
|
||||
+ @property
|
||||
+ def sector_size(self):
|
||||
+ if self.disk:
|
||||
+ return self.disk.sector_size
|
||||
+
|
||||
+ return super(PartitionDevice, self).sector_size
|
||||
+
|
||||
def _pre_resize(self):
|
||||
if not self.exists:
|
||||
raise errors.DeviceError("device has not been created", self.name)
|
||||
diff --git a/blivet/devices/storage.py b/blivet/devices/storage.py
|
||||
index e087fa64..91c5e60e 100644
|
||||
--- a/blivet/devices/storage.py
|
||||
+++ b/blivet/devices/storage.py
|
||||
@@ -190,6 +190,21 @@ def raw_device(self):
|
||||
""" The device itself, or when encrypted, the backing device. """
|
||||
return self
|
||||
|
||||
+ @property
|
||||
+ def sector_size(self):
|
||||
+ """ Logical sector (block) size of this device """
|
||||
+ if not self.exists:
|
||||
+ if self.parents:
|
||||
+ return self.parents[0].sector_size
|
||||
+ else:
|
||||
+ return LINUX_SECTOR_SIZE
|
||||
+
|
||||
+ block_size = util.get_sysfs_attr(self.sysfs_path, "queue/logical_block_size")
|
||||
+ if block_size:
|
||||
+ return int(block_size)
|
||||
+ else:
|
||||
+ return LINUX_SECTOR_SIZE
|
||||
+
|
||||
@property
|
||||
def controllable(self):
|
||||
return self._controllable and not flags.testing and not self.unavailable_type_dependencies()
|
||||
|
||||
From 9f81bd1ffb877862760223ba88f2086deebd2d06 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Fri, 4 Oct 2019 12:37:01 +0200
|
||||
Subject: [PATCH 2/2] Do not allow creating VGs with PVs with different sector
|
||||
size
|
||||
|
||||
New versions of LVM don't allow mixing PVs with different sector
|
||||
sizes in one VG.
|
||||
|
||||
Resolves: rhbz#1754446
|
||||
---
|
||||
blivet/devices/lvm.py | 12 ++++++++++++
|
||||
tests/devices_test/lvm_test.py | 13 ++++++++++++-
|
||||
2 files changed, 24 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
||||
index 4347f483..b9da286a 100644
|
||||
--- a/blivet/devices/lvm.py
|
||||
+++ b/blivet/devices/lvm.py
|
||||
@@ -356,6 +356,18 @@ def _remove_log_vol(self, lv):
|
||||
def _add_parent(self, parent):
|
||||
super(LVMVolumeGroupDevice, self)._add_parent(parent)
|
||||
|
||||
+ # we are creating new VG or adding a new PV to an existing (complete) one
|
||||
+ if not self.exists or (self.exists and self._complete):
|
||||
+ parent_sectors = set([p.sector_size for p in self.pvs] + [parent.sector_size])
|
||||
+ if len(parent_sectors) != 1:
|
||||
+ if not self.exists:
|
||||
+ msg = "The volume group %s cannot be created. Selected disks have " \
|
||||
+ "inconsistent sector sizes (%s)." % (self.name, parent_sectors)
|
||||
+ else:
|
||||
+ msg = "Disk %s cannot be added to this volume group. LVM doesn't " \
|
||||
+ "allow using physical volumes with inconsistent (logical) sector sizes." % parent.name
|
||||
+ raise ValueError(msg)
|
||||
+
|
||||
if (self.exists and parent.format.exists and
|
||||
len(self.parents) + 1 == self.pv_count):
|
||||
self._complete = True
|
||||
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
|
||||
index 8ed577f4..a32c1d83 100644
|
||||
--- a/tests/devices_test/lvm_test.py
|
||||
+++ b/tests/devices_test/lvm_test.py
|
||||
@@ -2,7 +2,7 @@
|
||||
import test_compat # pylint: disable=unused-import
|
||||
|
||||
import six
|
||||
-from six.moves.mock import patch # pylint: disable=no-name-in-module,import-error
|
||||
+from six.moves.mock import patch, PropertyMock # pylint: disable=no-name-in-module,import-error
|
||||
import unittest
|
||||
|
||||
import blivet
|
||||
@@ -352,6 +352,17 @@ def test_target_size(self):
|
||||
self.assertEqual(lv.target_size, orig_size)
|
||||
self.assertEqual(lv.size, orig_size)
|
||||
|
||||
+ def test_lvm_inconsistent_sector_size(self):
|
||||
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
|
||||
+ size=Size("1024 MiB"))
|
||||
+ pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"),
|
||||
+ size=Size("1024 MiB"))
|
||||
+
|
||||
+ with patch("blivet.devices.StorageDevice.sector_size", new_callable=PropertyMock) as mock_property:
|
||||
+ mock_property.__get__ = lambda _mock, pv, _class: 512 if pv.name == "pv1" else 4096
|
||||
+ with six.assertRaisesRegex(self, ValueError, "The volume group testvg cannot be created."):
|
||||
+ LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
|
||||
+
|
||||
|
||||
class TypeSpecificCallsTest(unittest.TestCase):
|
||||
def test_type_specific_calls(self):
|
@ -19,41 +19,51 @@
|
||||
Summary: A python module for system storage configuration
|
||||
Name: python-blivet
|
||||
Url: https://storageapis.wordpress.com/projects/blivet
|
||||
Version: 3.6.0
|
||||
Version: 3.1.0
|
||||
|
||||
#%%global prerelease .b2
|
||||
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
|
||||
Release: 8%{?prerelease}%{?dist}
|
||||
Release: 18%{?prerelease}%{?dist}
|
||||
Epoch: 1
|
||||
License: LGPLv2+
|
||||
Group: System Environment/Libraries
|
||||
%global realname blivet
|
||||
%global realversion %{version}%{?prerelease}
|
||||
Source0: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}.tar.gz
|
||||
Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}-tests.tar.gz
|
||||
Patch0: 0001-force-lvm-cli.plugin
|
||||
Patch1: 0002-remove-btrfs-plugin.patch
|
||||
Patch2: 0003-Revert-More-consistent-lvm-errors.patch
|
||||
Patch3: 0004-Revert-Terminology-cleanups.patch
|
||||
Patch4: 0005-DDF-RAID-support-using-mdadm.patch
|
||||
Patch5: 0006-Revert-Remove-the-Blivet.roots-attribute.patch
|
||||
Patch6: 0007-Fix-potential-AttributeError-when-getting-stratis-bl.patch
|
||||
Patch7: 0008-tests-Skip-XFS-resize-test-on-CentOS-RHEL-8.patch
|
||||
Patch8: 0009-Revert-Adjust-to-new-XFS-min-size.patch
|
||||
Patch9: 0010-Catch-BlockDevNotImplementedError-for-btrfs-plugin-c.patch
|
||||
Patch10: 0011-Default-to-encryption-sector-size-512-for-LUKS-devic.patch
|
||||
Patch11: 0012-Add-support-for-specifying-stripe-size-for-RAID-LVs.patch
|
||||
Patch12: 0013-Fix-setting-kickstart-data.patch
|
||||
Patch13: 0014-Do-not-set-memory-limit-for-LUKS2-when-running-in-FI.patch
|
||||
Patch14: 0015-Add-support-for-filesystem-online-resize.patch
|
||||
Patch15: 0016-Backport-iSCSI-initiator-name-related-fixes.patch
|
||||
Patch16: 0017-Add-support-for-creating-shared-LVM-setups.patch
|
||||
Patch2: 0003-separate-dmraid-availability-check.patch
|
||||
Patch3: 0004-allow-removing-btrfs-volumes-without-btrfs-support.patch
|
||||
Patch4: 0005-arm7-cleanups.patch
|
||||
Patch5: 0006-Fix-options-for-ISCSI-functions.patch
|
||||
Patch6: 0007-Wipe-all-stale-metadata-after-creating-md-array.patch
|
||||
Patch7: 0008-Copy-the-iSCSI-initiator-name-file-to-the-installed-system.patch
|
||||
Patch8: 0008-po-updates.patch
|
||||
Patch9: 0009-Require-libfc-instead-of-fcoe-for-offloaded-FCoE.-15.patch
|
||||
Patch10: 0010-Use-udev-to-determine-if-disk-is-a-multipath-member.patch
|
||||
Patch11: 0011-Don-t-crash-if-blockdev-mpath-plugin-isn-t-available.patch
|
||||
Patch12: 0012-Ensure-correct-type-of-mpath-cache-member-list.patch
|
||||
Patch13: 0013-Various-test-fixes.patch
|
||||
Patch14: 0014-Tests-archive.patch
|
||||
Patch15: 0015-Deactivate-incomplete-VGs.patch
|
||||
Patch16: 0016-Automatically-adjust-size-of-growable-devices-for-new-format.patch
|
||||
Patch17: 0017-Add-flag-for-protecting-cdrom-devices-during-populate.patch
|
||||
Patch18: 0018-Clean-up-some-errors-evident-in-installer-logs.patch
|
||||
Patch19: 0019-Use-dasd-disklabel-for-vm-disks-backed-by-dasds.patch
|
||||
Patch20: 0020-Fix-reading-LV-attributes-in-LVMVolumeGroupDevice.patch
|
||||
Patch21: 0021-Correctly-handle-non-unicode-iSCSI-initiator-names.patch
|
||||
Patch22: 0022-Do-not-crash-if-dm_get_member_raid_sets-fails.patch
|
||||
Patch23: 0023-Minor-cleanups-to-reduce-log-noise.patch
|
||||
Patch24: 0024-Fix-util.detect_virt-function.patch
|
||||
Patch25: 0025-Check-for-PV-sector-size-when-creating-new-VG.patch
|
||||
|
||||
# Versions of required components (done so we make sure the buildrequires
|
||||
# match the requires versions of things).
|
||||
%global partedver 3.2
|
||||
%global partedver 1.8.1
|
||||
%global pypartedver 3.10.4
|
||||
%global utillinuxver 2.15.1
|
||||
%global libblockdevver 2.24
|
||||
%global libblockdevver 2.17
|
||||
%global libbytesizever 0.3
|
||||
%global pyudevver 0.18
|
||||
|
||||
@ -66,7 +76,6 @@ storage configuration.
|
||||
%package -n %{realname}-data
|
||||
Summary: Data for the %{realname} python module.
|
||||
|
||||
BuildRequires: make
|
||||
BuildRequires: systemd
|
||||
|
||||
Conflicts: python-blivet < 1:2.0.0
|
||||
@ -178,6 +187,7 @@ configuration.
|
||||
%autosetup -n %{realname}-%{realversion} -N
|
||||
%autosetup -n %{realname}-%{realversion} -b1 -p1
|
||||
|
||||
|
||||
%build
|
||||
%{?with_python2:make PYTHON=%{__python2}}
|
||||
%{?with_python3:make PYTHON=%{__python3}}
|
||||
@ -197,199 +207,18 @@ configuration.
|
||||
%if %{with python2}
|
||||
%files -n python2-%{realname}
|
||||
%license COPYING
|
||||
%doc README.md ChangeLog examples
|
||||
%doc README ChangeLog examples
|
||||
%{python2_sitelib}/*
|
||||
%endif
|
||||
|
||||
%if %{with python3}
|
||||
%files -n python3-%{realname}
|
||||
%license COPYING
|
||||
%doc README.md ChangeLog examples
|
||||
%doc README ChangeLog examples
|
||||
%{python3_sitelib}/*
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
* Mon Oct 30 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-8
|
||||
- Add support for creating shared LVM setups
|
||||
Resolves: RHEL-14021
|
||||
|
||||
* Mon Jul 24 2023 Jan Pokorny <japokorn@redhat.com> - 3.6.0-7
|
||||
Backport iSCSI initiator name related fixes:
|
||||
- Allow changing iSCSI initiator name after setting it
|
||||
Resolves: rhbz#2083139
|
||||
- Add a basic test case for the iscsi module
|
||||
Related: rhbz#2083139
|
||||
- tests: Use blivet-specific prefix for targetcli backing files
|
||||
Related: rhbz#2083139
|
||||
- iscsi: Save firmware initiator name to /etc/iscsi/initiatorname.iscsi
|
||||
Resolves: rhbz#2084043
|
||||
- tests: Improve iscsi_test.ISCSITestCase
|
||||
Related: rhbz#2083139
|
||||
|
||||
* Thu May 18 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-6
|
||||
- Fix setting kickstart data
|
||||
Resolves: rhbz#2175166
|
||||
- Do not set memory limit for LUKS2 when running in FIPS mode
|
||||
Resolves: rhbz#2183437
|
||||
- Add support for filesystem online resize
|
||||
Resolves: rhbz#2168680
|
||||
|
||||
* Tue May 02 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-5
|
||||
- Add support for specifying stripe size for RAID LVs
|
||||
Resolves: rhbz#2142550
|
||||
|
||||
* Thu Jan 19 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-4
|
||||
- Default to encryption sector size 512 for LUKS devices
|
||||
Resolves: rhbz#2160465
|
||||
|
||||
* Thu Nov 03 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-3
|
||||
- Catch BlockDevNotImplementedError for btrfs plugin calls
|
||||
Resolves: rhbz#2139169
|
||||
- Revert "Adjust to new XFS min size"
|
||||
Resolves: rhbz#2139187
|
||||
|
||||
* Fri Oct 21 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-2
|
||||
- Skip XFS resize test on CentOS/RHEL 8
|
||||
Related: rhbz#2123712
|
||||
|
||||
* Fri Oct 21 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-1
|
||||
- Rebase to the latest upstream release 3.6.0
|
||||
Resolves: rhbz#2123712
|
||||
|
||||
* Thu Aug 18 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-13
|
||||
- DDF RAID support using mdadm
|
||||
Resolves: rhbz#2063791
|
||||
|
||||
* Mon Jun 20 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-12
|
||||
- Add support for NPIV-enabled zFCP devices
|
||||
Resolves: rhbz#1497087
|
||||
|
||||
* Thu Jun 02 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-11
|
||||
- Fix running gating tests on AWS/Xen machines
|
||||
Resolves: rhbz#2093207
|
||||
|
||||
* Thu Jun 02 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-10
|
||||
- Fix getting PV info in LVMPhysicalVolume from the cache
|
||||
Resolves: rhbz#2079220
|
||||
- Do not crash when changing disklabel on disks with active devices
|
||||
Resolves: rhbz#2078801
|
||||
- ActionDestroyDevice should not obsolete ActionRemoveMember
|
||||
Resolves: rhbz#2076958
|
||||
- Correctly set vg_name after adding/removing a PV from a VG
|
||||
Resolves: rhbz#2081276
|
||||
- Use LVM PV format current_size in LVMVolumeGroupDevice._remove
|
||||
Related: rhbz#2081276
|
||||
- Add support for creating LVM cache pools
|
||||
Resolves: rhbz#2055198
|
||||
|
||||
* Mon Jan 10 2022 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-9
|
||||
- Translation update
|
||||
Resolves: rhbz#2003050
|
||||
|
||||
* Tue Dec 14 2021 ojtech Trefny <vtrefny@redhat.com> - 3.4.0-8
|
||||
- Replace all log_exception_info calls with log.info
|
||||
Resolves: rhbz#2028134
|
||||
|
||||
* Fri Nov 26 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-7
|
||||
- Release number bump
|
||||
Related: rhbz#1988276
|
||||
|
||||
* Fri Nov 26 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-6
|
||||
- Improve error message printed for missing dependecies
|
||||
Resolves: rhbz#1988276
|
||||
- Use bigger chunk size for thinpools bigger than ~15.88 TiB
|
||||
Resolves: rhbz#1949953
|
||||
|
||||
* Wed Aug 4 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-5
|
||||
- Fix running upstream test suite in gating
|
||||
Resolves: rhbz#1990232
|
||||
|
||||
* Mon Aug 2 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-4
|
||||
- Do not set chunk size for RAID 1
|
||||
Resolves: rhbz#1987170
|
||||
|
||||
* Wed Jul 21 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-3
|
||||
- Fix resolving devices with names that look like BIOS drive number
|
||||
Resolves: rhbz#1983309
|
||||
|
||||
* Wed Jul 7 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-2
|
||||
- Fix activating old style LVM snapshots
|
||||
Resolves: rhbz#1961739
|
||||
|
||||
* Wed May 5 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-1
|
||||
- Rebase to latest upstream release 3.4.0
|
||||
Resolves: rhbz#1918357
|
||||
|
||||
* Tue Feb 9 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-9
|
||||
- LVM VDO support
|
||||
Resolves: rhbz#1509337
|
||||
|
||||
* Mon Jan 11 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-8
|
||||
- Let parted fix fixable issues with partition table
|
||||
Resolves: rhbz#1846869
|
||||
- Fix possible UnicodeDecodeError when reading sysfs attributes
|
||||
Resolves: rhbz#1849326
|
||||
|
||||
* Wed Nov 18 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-7
|
||||
- Add support for XFS format grow
|
||||
Resolves: rhbz#1862349
|
||||
- Do not limit swap to 128 GiB
|
||||
Resolves: rhbz#1656485
|
||||
- Use UnusableConfigurationError for partially hidden multipath devices
|
||||
Resolves: rhbz#1877052
|
||||
- Fix possible UnicodeDecodeError when reading model from sysfs
|
||||
Resolves: rhbz#1849326
|
||||
- Add basic support for LVM VDO devices
|
||||
Resolves: rhbz#1828745
|
||||
|
||||
* Thu Aug 20 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-6
|
||||
- Fix name resolution for MD devices and partitions on them
|
||||
Resolves: rhbz#1862904
|
||||
- Fix ignoring disk devices with parents or children
|
||||
Resolves: rhbz#1866243
|
||||
|
||||
* Thu Jul 16 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-5
|
||||
- set allowed disk labels for s390x as standard ones (msdos + gpt) plus dasd
|
||||
Resolves: rhbz#1855200
|
||||
- Do not use BlockDev.utils_have_kernel_module to check for modules
|
||||
Resolves: rhbz#1855344
|
||||
|
||||
* Thu Jul 09 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-4
|
||||
- Blivet RHEL 8.3 localization update
|
||||
Resolves: rhbz#182056
|
||||
- Do not use FSAVAIL and FSUSE% options when running lsblk
|
||||
Resolves: rhbz#1853624
|
||||
|
||||
* Tue Jun 30 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-3
|
||||
- Round down to nearest MiB value when writing ks parittion info
|
||||
Resolves: rhbz#1850670
|
||||
|
||||
* Wed Jun 24 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-2
|
||||
- Add extra sleep after pvremove call
|
||||
Resolves: rhbz#1640601
|
||||
|
||||
* Fri May 22 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-1
|
||||
- Rebase to the latest upstream release 3.2.2
|
||||
Resolves: rhbz#1714970
|
||||
|
||||
* Mon Mar 02 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-20
|
||||
- add `-y' to lvm.pvcreate
|
||||
Resolves: rhbz#1768494
|
||||
|
||||
* Wed Jan 29 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-19
|
||||
- Override LVM skip-activation to allow for thorough removal
|
||||
Resolves: rhbz#1766498
|
||||
- Make sure LVs are writable before wiping
|
||||
Related: rhbz#1766498
|
||||
- Fix udev test names so they actually get run.
|
||||
Related: rhbz#1758102
|
||||
- Add recognition of Dell FW RAID to udev.device_is_disk.
|
||||
Resolves: rhbz#1758102
|
||||
- Align base sizes up if smaller than min I/O size.
|
||||
Resolves: rhbz#1781106
|
||||
- Make minimal and optimal alignment getters public.
|
||||
Related: rhbz#1781106
|
||||
|
||||
* Tue Nov 19 2019 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-18
|
||||
- Check for PV sector size when creating new VG
|
||||
Resolves: rhbz#1754446
|
||||
|
Loading…
Reference in New Issue
Block a user