import python-blivet-3.4.0-5.el8

This commit is contained in:
CentOS Sources 2021-10-05 15:18:52 -04:00 committed by Stepan Oksanichenko
parent b991a48d68
commit 67b639ee16
26 changed files with 1776 additions and 5033 deletions

4
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/blivet-3.2.2-tests.tar.gz SOURCES/blivet-3.4.0-tests.tar.gz
SOURCES/blivet-3.2.2.tar.gz SOURCES/blivet-3.4.0.tar.gz

View File

@ -1,2 +1,2 @@
84988ad63a9a9ddd9f2075b82b36bd98261df9e9 SOURCES/blivet-3.2.2-tests.tar.gz d0a86df7bbaeda7be9990b7f7b15ec36b325ec7a SOURCES/blivet-3.4.0-tests.tar.gz
a89000bc2e9cfc8a1cfe09a58bf5e4e609f9b517 SOURCES/blivet-3.2.2.tar.gz aafc429e224dfd204cb1c284bb70de52920f7b20 SOURCES/blivet-3.4.0.tar.gz

View File

@ -0,0 +1,330 @@
From 3a42d9e2afdf04dbbfd2c507f5b2392193fda25b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 26 May 2021 12:15:54 +0200
Subject: [PATCH] Revert "More consistent lvm errors (API break)"
This reverts commit 49ec071c6d0673224a0774d613904387c52c7381.
---
blivet/devices/lvm.py | 72 +++++++++++++++++-----------------
tests/devices_test/lvm_test.py | 14 +++----
2 files changed, 43 insertions(+), 43 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index a55515fc..6d23bfba 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -307,7 +307,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
def _add_log_vol(self, lv):
""" Add an LV to this VG. """
if lv in self._lvs:
- raise errors.DeviceError("lv is already part of this vg")
+ raise ValueError("lv is already part of this vg")
# verify we have the space, then add it
# do not verify for growing vg (because of ks)
@@ -340,7 +340,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
def _remove_log_vol(self, lv):
""" Remove an LV from this VG. """
if lv not in self.lvs:
- raise errors.DeviceError("specified lv is not part of this vg")
+ raise ValueError("specified lv is not part of this vg")
self._lvs.remove(lv)
@@ -415,7 +415,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
@thpool_reserve.setter
def thpool_reserve(self, value):
if value is not None and not isinstance(value, ThPoolReserveSpec):
- raise AttributeError("Invalid thpool_reserve given, must be of type ThPoolReserveSpec")
+ raise ValueError("Invalid thpool_reserve given, must be of type ThPoolReserveSpec")
self._thpool_reserve = value
@property
@@ -646,14 +646,14 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
if seg_type and seg_type in lvm.raid_seg_types and not pvs:
- raise errors.DeviceError("List of PVs has to be given for every non-linear LV")
+ raise ValueError("List of PVs has to be given for every non-linear LV")
elif (not seg_type or seg_type == "linear") and pvs:
if not all(isinstance(pv, LVPVSpec) for pv in pvs):
- raise errors.DeviceError("Invalid specification of PVs for a linear LV: either no or complete "
- "specification (with all space split into PVs has to be given")
+ raise ValueError("Invalid specification of PVs for a linear LV: either no or complete "
+ "specification (with all space split into PVs has to be given")
elif sum(spec.size for spec in pvs) != size:
- raise errors.DeviceError("Invalid specification of PVs for a linear LV: the sum of space "
- "assigned to PVs is not equal to the size of the LV")
+ raise ValueError("Invalid specification of PVs for a linear LV: the sum of space "
+ "assigned to PVs is not equal to the size of the LV")
# When this device's format is set in the superclass constructor it will
# try to access self.snapshots.
@@ -702,13 +702,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
self._from_lvs = from_lvs
if self._from_lvs:
if exists:
- raise errors.DeviceError("Only new LVs can be created from other LVs")
+ raise ValueError("Only new LVs can be created from other LVs")
if size or maxsize or percent:
- raise errors.DeviceError("Cannot specify size for a converted LV")
+ raise ValueError("Cannot specify size for a converted LV")
if fmt:
- raise errors.DeviceError("Cannot specify format for a converted LV")
+ raise ValueError("Cannot specify format for a converted LV")
if any(lv.vg != self.vg for lv in self._from_lvs):
- raise errors.DeviceError("Conversion of LVs only possible inside a VG")
+ raise ValueError("Conversion of LVs only possible inside a VG")
self._cache = None
if cache_request and not self.exists:
@@ -723,13 +723,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
elif isinstance(pv_spec, StorageDevice):
self._pv_specs.append(LVPVSpec(pv_spec, Size(0)))
else:
- raise AttributeError("Invalid PV spec '%s' for the '%s' LV" % (pv_spec, self.name))
+ raise ValueError("Invalid PV spec '%s' for the '%s' LV" % (pv_spec, self.name))
# Make sure any destination PVs are actually PVs in this VG
if not set(spec.pv for spec in self._pv_specs).issubset(set(self.vg.parents)):
missing = [r.name for r in
set(spec.pv for spec in self._pv_specs).difference(set(self.vg.parents))]
msg = "invalid destination PV(s) %s for LV %s" % (missing, self.name)
- raise errors.DeviceError(msg)
+ raise ValueError(msg)
if self._pv_specs:
self._assign_pv_space()
@@ -1072,7 +1072,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
else:
msg = "the specified internal LV '%s' doesn't belong to this LV ('%s')" % (int_lv.lv_name,
self.name)
- raise errors.DeviceError(msg)
+ raise ValueError(msg)
def populate_ksdata(self, data):
super(LVMLogicalVolumeBase, self).populate_ksdata(data)
@@ -1171,7 +1171,7 @@ class LVMInternalLogicalVolumeMixin(object):
def _init_check(self):
# an internal LV should have no parents
if self._parent_lv and self._parents:
- raise errors.DeviceError("an internal LV should have no parents")
+ raise ValueError("an internal LV should have no parents")
@property
def is_internal_lv(self):
@@ -1231,7 +1231,7 @@ class LVMInternalLogicalVolumeMixin(object):
@readonly.setter
def readonly(self, value): # pylint: disable=unused-argument
- raise errors.DeviceError("Cannot make an internal LV read-write")
+ raise ValueError("Cannot make an internal LV read-write")
@property
def type(self):
@@ -1267,7 +1267,7 @@ class LVMInternalLogicalVolumeMixin(object):
def _check_parents(self):
# an internal LV should have no parents
if self._parents:
- raise errors.DeviceError("an internal LV should have no parents")
+ raise ValueError("an internal LV should have no parents")
def _add_to_parents(self):
# nothing to do here, an internal LV has no parents (in the DeviceTree's
@@ -1277,13 +1277,13 @@ class LVMInternalLogicalVolumeMixin(object):
# internal LVs follow different rules limitting size
def _set_size(self, newsize):
if not isinstance(newsize, Size):
- raise AttributeError("new size must of type Size")
+ raise ValueError("new size must of type Size")
if not self.takes_extra_space:
if newsize <= self.parent_lv.size: # pylint: disable=no-member
self._size = newsize # pylint: disable=attribute-defined-outside-init
else:
- raise errors.DeviceError("Internal LV cannot be bigger than its parent LV")
+ raise ValueError("Internal LV cannot be bigger than its parent LV")
else:
# same rules apply as for any other LV
raise NotTypeSpecific()
@@ -1361,18 +1361,18 @@ class LVMSnapshotMixin(object):
return
if self.origin and not isinstance(self.origin, LVMLogicalVolumeDevice):
- raise errors.DeviceError("lvm snapshot origin must be a logical volume")
+ raise ValueError("lvm snapshot origin must be a logical volume")
if self.vorigin and not self.exists:
- raise errors.DeviceError("only existing vorigin snapshots are supported")
+ raise ValueError("only existing vorigin snapshots are supported")
if isinstance(self.origin, LVMLogicalVolumeDevice) and \
isinstance(self.parents[0], LVMVolumeGroupDevice) and \
self.origin.vg != self.parents[0]:
- raise errors.DeviceError("lvm snapshot and origin must be in the same vg")
+ raise ValueError("lvm snapshot and origin must be in the same vg")
if self.is_thin_lv:
if self.origin and self.size and not self.exists:
- raise errors.DeviceError("thin snapshot size is determined automatically")
+ raise ValueError("thin snapshot size is determined automatically")
@property
def is_snapshot_lv(self):
@@ -1544,7 +1544,7 @@ class LVMThinPoolMixin(object):
def _check_from_lvs(self):
if self._from_lvs:
if len(self._from_lvs) != 2:
- raise errors.DeviceError("two LVs required to create a thin pool")
+ raise ValueError("two LVs required to create a thin pool")
def _convert_from_lvs(self):
data_lv, metadata_lv = self._from_lvs
@@ -1590,7 +1590,7 @@ class LVMThinPoolMixin(object):
def _add_log_vol(self, lv):
""" Add an LV to this pool. """
if lv in self._lvs:
- raise errors.DeviceError("lv is already part of this vg")
+ raise ValueError("lv is already part of this vg")
# TODO: add some checking to prevent overcommit for preexisting
self.vg._add_log_vol(lv)
@@ -1601,7 +1601,7 @@ class LVMThinPoolMixin(object):
def _remove_log_vol(self, lv):
""" Remove an LV from this pool. """
if lv not in self._lvs:
- raise errors.DeviceError("specified lv is not part of this vg")
+ raise ValueError("specified lv is not part of this vg")
self._lvs.remove(lv)
self.vg._remove_log_vol(lv)
@@ -1711,14 +1711,14 @@ class LVMThinLogicalVolumeMixin(object):
"""Check that this device has parents as expected"""
if isinstance(self.parents, (list, ParentList)):
if len(self.parents) != 1:
- raise errors.DeviceError("constructor requires a single thin-pool LV")
+ raise ValueError("constructor requires a single thin-pool LV")
container = self.parents[0]
else:
container = self.parents
if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_thin_pool:
- raise errors.DeviceError("constructor requires a thin-pool LV")
+ raise ValueError("constructor requires a thin-pool LV")
@property
def is_thin_lv(self):
@@ -1755,7 +1755,7 @@ class LVMThinLogicalVolumeMixin(object):
def _set_size(self, newsize):
if not isinstance(newsize, Size):
- raise AttributeError("new size must of type Size")
+ raise ValueError("new size must of type Size")
newsize = self.vg.align(newsize)
newsize = self.vg.align(util.numeric_type(newsize))
@@ -2229,7 +2229,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
container = self.parents
if not isinstance(container, LVMVolumeGroupDevice):
- raise AttributeError("constructor requires a LVMVolumeGroupDevice")
+ raise ValueError("constructor requires a LVMVolumeGroupDevice")
@type_specific
def _add_to_parents(self):
@@ -2240,12 +2240,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@type_specific
def _check_from_lvs(self):
"""Check the LVs to create this LV from"""
- raise errors.DeviceError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
+ raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
@type_specific
def _convert_from_lvs(self):
"""Convert the LVs to create this LV from into its internal LVs"""
- raise errors.DeviceError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
+ raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
@property
def external_dependencies(self):
@@ -2265,7 +2265,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@type_specific
def _set_size(self, newsize):
if not isinstance(newsize, Size):
- raise AttributeError("new size must be of type Size")
+ raise ValueError("new size must be of type Size")
newsize = self.vg.align(newsize)
log.debug("trying to set lv %s size to %s", self.name, newsize)
@@ -2274,7 +2274,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
# space for it. A similar reasoning applies to shrinking the LV.
if not self.exists and newsize > self.size and newsize > self.vg.free_space + self.vg_space_used:
log.error("failed to set size: %s short", newsize - (self.vg.free_space + self.vg_space_used))
- raise errors.DeviceError("not enough free space in volume group")
+ raise ValueError("not enough free space in volume group")
LVMLogicalVolumeBase._set_size(self, newsize)
@@ -2622,7 +2622,7 @@ class LVMCache(Cache):
spec.size = spec.pv.format.free
space_to_assign -= spec.pv.format.free
if space_to_assign > 0:
- raise errors.DeviceError("Not enough free space in the PVs for this cache: %s short" % space_to_assign)
+ raise ValueError("Not enough free space in the PVs for this cache: %s short" % space_to_assign)
@property
def size(self):
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 670c91c9..4156d0bf 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -36,10 +36,10 @@ class LVMDeviceTest(unittest.TestCase):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg],
fmt=blivet.formats.get_format("xfs"))
- with six.assertRaisesRegex(self, errors.DeviceError, "lvm snapshot origin must be a logical volume"):
+ with six.assertRaisesRegex(self, ValueError, "lvm snapshot origin must be a logical volume"):
LVMLogicalVolumeDevice("snap1", parents=[vg], origin=pv)
- with six.assertRaisesRegex(self, errors.DeviceError, "only existing vorigin snapshots are supported"):
+ with six.assertRaisesRegex(self, ValueError, "only existing vorigin snapshots are supported"):
LVMLogicalVolumeDevice("snap1", parents=[vg], vorigin=True)
lv.exists = True
@@ -64,7 +64,7 @@ class LVMDeviceTest(unittest.TestCase):
pool = LVMLogicalVolumeDevice("pool1", parents=[vg], size=Size("500 MiB"), seg_type="thin-pool")
thinlv = LVMLogicalVolumeDevice("thinlv", parents=[pool], size=Size("200 MiB"), seg_type="thin")
- with six.assertRaisesRegex(self, errors.DeviceError, "lvm snapshot origin must be a logical volume"):
+ with six.assertRaisesRegex(self, ValueError, "lvm snapshot origin must be a logical volume"):
LVMLogicalVolumeDevice("snap1", parents=[pool], origin=pv, seg_type="thin")
# now make the constructor succeed so we can test some properties
@@ -258,21 +258,21 @@ class LVMDeviceTest(unittest.TestCase):
vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
# pvs have to be specified for non-linear LVs
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
fmt=blivet.formats.get_format("xfs"),
exists=False, seg_type="raid1")
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
fmt=blivet.formats.get_format("xfs"),
exists=False, seg_type="striped")
# no or complete specification has to be given for linear LVs
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
fmt=blivet.formats.get_format("xfs"),
exists=False, pvs=[pv])
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
pv_spec = LVPVSpec(pv, Size("256 MiB"))
pv_spec2 = LVPVSpec(pv2, Size("250 MiB"))
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
--
2.31.1

View File

@ -1,29 +0,0 @@
From 760f08bbf7b801acd393a6d2b7447ca6ff28d590 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 22 May 2020 12:35:11 +0200
Subject: [PATCH] Skip test_mounting for filesystems that are not mountable
We can have tools to create the filesystem without having kernel
module for mounting it.
---
tests/formats_test/fstesting.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/formats_test/fstesting.py b/tests/formats_test/fstesting.py
index aa1b42e5..62f806f9 100644
--- a/tests/formats_test/fstesting.py
+++ b/tests/formats_test/fstesting.py
@@ -156,8 +156,8 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
# FIXME: BTRFS fails to mount
if isinstance(an_fs, fs.BTRFS):
self.skipTest("no mounting filesystem %s" % an_fs.name)
- if not an_fs.formattable:
- self.skipTest("can not create filesystem %s" % an_fs.name)
+ if not an_fs.formattable or not an_fs.mountable:
+ self.skipTest("can not create or mount filesystem %s" % an_fs.name)
an_fs.device = self.loop_devices[0]
self.assertIsNone(an_fs.create())
self.assertTrue(an_fs.test_mount())
--
2.25.4

View File

@ -1,38 +0,0 @@
From 6a62a81d326a1121a2768735e52a8e1c5e5d6f0d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 24 Jun 2020 14:43:47 +0200
Subject: [PATCH] Add extra sleep after pvremove call
To give enough time for the async pvscan to finish scanning the
partition before removing it.
Resolves: rhbz#1640601
---
blivet/formats/lvmpv.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index e4182adb..9f53ec6b 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -26,6 +26,7 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import BlockDev as blockdev
import os
+import time
from ..storage_log import log_method_call
from parted import PARTITION_LVM
@@ -137,6 +138,9 @@ class LVMPhysicalVolume(DeviceFormat):
DeviceFormat._destroy(self, **kwargs)
finally:
udev.settle()
+ # LVM now has async pvscan jobs so udev.settle doesn't help and if we try to remove
+ # the partition immediately after the pvremove we get an error
+ time.sleep(5)
@property
def destroyable(self):
--
2.25.4

View File

@ -0,0 +1,908 @@
From 42042e7fb6177d3cfe5568e358a38278925a2624 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 26 May 2021 12:27:34 +0200
Subject: [PATCH] Revert "Terminology cleanups"
This reverts following commits:
- 3d46339fe9cf12e9082fcbe4dc5acc9f92617e8d
- 63c9c7165e5cdfa4a47dcf0ed9d717b71e7921f2
- 8956b9af8a785ae25e0e7153d2ef0702ce2f567c
---
blivet/devicefactory.py | 24 +++++-----
blivet/devices/dm.py | 9 +++-
blivet/devices/loop.py | 20 ++++----
blivet/devices/luks.py | 26 +++++-----
blivet/errors.py | 2 +-
blivet/partitioning.py | 22 +++++++--
blivet/populator/helpers/dm.py | 4 +-
blivet/populator/helpers/luks.py | 4 +-
blivet/populator/helpers/lvm.py | 2 +-
blivet/populator/helpers/mdraid.py | 14 +++---
blivet/populator/helpers/multipath.py | 8 ++--
blivet/populator/populator.py | 69 ++++++++++++++-------------
blivet/threads.py | 3 +-
blivet/udev.py | 34 ++++++-------
tests/devicefactory_test.py | 10 ++--
tests/devices_test/size_test.py | 6 +--
tests/populator_test.py | 34 ++++++-------
tests/udev_test.py | 12 ++---
tests/vmtests/vmbackedtestcase.py | 2 +-
19 files changed, 168 insertions(+), 137 deletions(-)
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
index 0f7fdfa1..f56bd9a3 100644
--- a/blivet/devicefactory.py
+++ b/blivet/devicefactory.py
@@ -849,12 +849,12 @@ class DeviceFactory(object):
parent_container.parents.remove(orig_device)
if self.encrypted and isinstance(self.device, LUKSDevice) and \
- self.raw_device.format.luks_version != self.luks_version:
- self.raw_device.format.luks_version = self.luks_version
+ self.device.slave.format.luks_version != self.luks_version:
+ self.device.slave.format.luks_version = self.luks_version
if self.encrypted and isinstance(self.device, LUKSDevice) and \
- self.raw_device.format.luks_sector_size != self.luks_sector_size:
- self.raw_device.format.luks_sector_size = self.luks_sector_size
+ self.device.slave.format.luks_sector_size != self.luks_sector_size:
+ self.device.slave.format.luks_sector_size = self.luks_sector_size
def _set_name(self):
if not self.device_name:
@@ -1173,11 +1173,11 @@ class PartitionSetFactory(PartitionFactory):
container.parents.remove(member)
self.storage.destroy_device(member)
members.remove(member)
- self.storage.format_device(member.raw_device,
+ self.storage.format_device(member.slave,
get_format(self.fstype))
- members.append(member.raw_device)
+ members.append(member.slave)
if container:
- container.parents.append(member.raw_device)
+ container.parents.append(member.slave)
continue
@@ -1199,10 +1199,10 @@ class PartitionSetFactory(PartitionFactory):
continue
- if member_encrypted and self.encrypted and self.luks_version != member.raw_device.format.luks_version:
- member.raw_device.format.luks_version = self.luks_version
- if member_encrypted and self.encrypted and self.luks_sector_size != member.raw_device.format.luks_sector_size:
- member.raw_device.format.luks_sector_size = self.luks_sector_size
+ if member_encrypted and self.encrypted and self.luks_version != member.slave.format.luks_version:
+ member.slave.format.luks_version = self.luks_version
+ if member_encrypted and self.encrypted and self.luks_sector_size != member.slave.format.luks_sector_size:
+ member.slave.format.luks_sector_size = self.luks_sector_size
##
# Prepare previously allocated member partitions for reallocation.
@@ -1262,7 +1262,7 @@ class PartitionSetFactory(PartitionFactory):
if isinstance(member, LUKSDevice):
self.storage.destroy_device(member)
- member = member.raw_device
+ member = member.slave
self.storage.destroy_device(member)
diff --git a/blivet/devices/dm.py b/blivet/devices/dm.py
index 3529f61c..508a6f89 100644
--- a/blivet/devices/dm.py
+++ b/blivet/devices/dm.py
@@ -154,6 +154,11 @@ class DMDevice(StorageDevice):
log_method_call(self, self.name, status=self.status)
super(DMDevice, self)._set_name(value)
+ @property
+ def slave(self):
+ """ This device's backing device. """
+ return self.parents[0]
+
class DMLinearDevice(DMDevice):
_type = "dm-linear"
@@ -189,8 +194,8 @@ class DMLinearDevice(DMDevice):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
- parent_length = self.parents[0].current_size / LINUX_SECTOR_SIZE
- blockdev.dm.create_linear(self.name, self.parents[0].path, parent_length,
+ slave_length = self.slave.current_size / LINUX_SECTOR_SIZE
+ blockdev.dm.create_linear(self.name, self.slave.path, slave_length,
self.dm_uuid)
def _post_setup(self):
diff --git a/blivet/devices/loop.py b/blivet/devices/loop.py
index 0f4d7775..78f88d7d 100644
--- a/blivet/devices/loop.py
+++ b/blivet/devices/loop.py
@@ -73,7 +73,7 @@ class LoopDevice(StorageDevice):
def update_name(self):
""" Update this device's name. """
- if not self.parents[0].status:
+ if not self.slave.status:
# if the backing device is inactive, so are we
return self.name
@@ -81,7 +81,7 @@ class LoopDevice(StorageDevice):
# if our name is loopN we must already be active
return self.name
- name = blockdev.loop.get_loop_name(self.parents[0].path)
+ name = blockdev.loop.get_loop_name(self.slave.path)
if name.startswith("loop"):
self.name = name
@@ -89,24 +89,24 @@ class LoopDevice(StorageDevice):
@property
def status(self):
- return (self.parents[0].status and
+ return (self.slave.status and
self.name.startswith("loop") and
- blockdev.loop.get_loop_name(self.parents[0].path) == self.name)
+ blockdev.loop.get_loop_name(self.slave.path) == self.name)
@property
def size(self):
- return self.parents[0].size
+ return self.slave.size
def _pre_setup(self, orig=False):
- if not os.path.exists(self.parents[0].path):
- raise errors.DeviceError("specified file (%s) does not exist" % self.parents[0].path)
+ if not os.path.exists(self.slave.path):
+ raise errors.DeviceError("specified file (%s) does not exist" % self.slave.path)
return StorageDevice._pre_setup(self, orig=orig)
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
- blockdev.loop.setup(self.parents[0].path)
+ blockdev.loop.setup(self.slave.path)
def _post_setup(self):
StorageDevice._post_setup(self)
@@ -123,3 +123,7 @@ class LoopDevice(StorageDevice):
StorageDevice._post_teardown(self, recursive=recursive)
self.name = "tmploop%d" % self.id
self.sysfs_path = ''
+
+ @property
+ def slave(self):
+ return self.parents[0]
diff --git a/blivet/devices/luks.py b/blivet/devices/luks.py
index 5d6d6c65..555f1acd 100644
--- a/blivet/devices/luks.py
+++ b/blivet/devices/luks.py
@@ -66,13 +66,17 @@ class LUKSDevice(DMCryptDevice):
@property
def raw_device(self):
+ return self.slave
+
+ @property
+ def slave(self):
if self._has_integrity:
return self.parents[0].parents[0]
return self.parents[0]
def _get_size(self):
if not self.exists:
- size = self.raw_device.size - crypto.LUKS_METADATA_SIZE
+ size = self.slave.size - crypto.LUKS_METADATA_SIZE
elif self.resizable and self.target_size != Size(0):
size = self.target_size
else:
@@ -80,8 +84,8 @@ class LUKSDevice(DMCryptDevice):
return size
def _set_size(self, newsize):
- if not self.exists and not self.raw_device.exists:
- self.raw_device.size = newsize + crypto.LUKS_METADATA_SIZE
+ if not self.exists and not self.slave.exists:
+ self.slave.size = newsize + crypto.LUKS_METADATA_SIZE
# just run the StorageDevice._set_size to make sure we are in the format limits
super(LUKSDevice, self)._set_size(newsize - crypto.LUKS_METADATA_SIZE)
@@ -108,22 +112,22 @@ class LUKSDevice(DMCryptDevice):
raise ValueError("size is smaller than the minimum for this device")
# don't allow larger luks than size (or target size) of backing device
- if newsize > (self.raw_device.size - crypto.LUKS_METADATA_SIZE):
+ if newsize > (self.slave.size - crypto.LUKS_METADATA_SIZE):
log.error("requested size %s is larger than size of the backing device %s",
- newsize, self.raw_device.size)
+ newsize, self.slave.size)
raise ValueError("size is larger than the size of the backing device")
if self.align_target_size(newsize) != newsize:
raise ValueError("new size would violate alignment requirements")
def _get_target_size(self):
- return self.raw_device.format.target_size
+ return self.slave.format.target_size
@property
def max_size(self):
""" The maximum size this luks device can be. Maximum is based on the
maximum size of the backing device. """
- max_luks = self.raw_device.max_size - crypto.LUKS_METADATA_SIZE
+ max_luks = self.slave.max_size - crypto.LUKS_METADATA_SIZE
max_format = self.format.max_size
return min(max_luks, max_format) if max_format else max_luks
@@ -131,7 +135,7 @@ class LUKSDevice(DMCryptDevice):
def resizable(self):
""" Can this device be resized? """
return (self._resizable and self.exists and self.format.resizable and
- self.raw_device.resizable and not self._has_integrity)
+ self.slave.resizable and not self._has_integrity)
def resize(self):
# size of LUKSDevice depends on size of the LUKS format on backing
@@ -139,7 +143,7 @@ class LUKSDevice(DMCryptDevice):
log_method_call(self, self.name, status=self.status)
def _post_create(self):
- self.name = self.raw_device.format.map_name
+ self.name = self.slave.format.map_name
StorageDevice._post_create(self)
def _post_teardown(self, recursive=False):
@@ -162,10 +166,10 @@ class LUKSDevice(DMCryptDevice):
self.name = new_name
def dracut_setup_args(self):
- return set(["rd.luks.uuid=luks-%s" % self.raw_device.format.uuid])
+ return set(["rd.luks.uuid=luks-%s" % self.slave.format.uuid])
def populate_ksdata(self, data):
- self.raw_device.populate_ksdata(data)
+ self.slave.populate_ksdata(data)
data.encrypted = True
super(LUKSDevice, self).populate_ksdata(data)
diff --git a/blivet/errors.py b/blivet/errors.py
index fd51283f..f6bf853a 100644
--- a/blivet/errors.py
+++ b/blivet/errors.py
@@ -192,7 +192,7 @@ class DeviceTreeError(StorageError):
pass
-class NoParentsError(DeviceTreeError):
+class NoSlavesError(DeviceTreeError):
pass
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
index 53f9cc3f..ca0a55d1 100644
--- a/blivet/partitioning.py
+++ b/blivet/partitioning.py
@@ -32,7 +32,7 @@ import _ped
from .errors import DeviceError, PartitioningError, AlignmentError
from .flags import flags
-from .devices import Device, PartitionDevice, device_path_to_name
+from .devices import Device, PartitionDevice, LUKSDevice, device_path_to_name
from .size import Size
from .i18n import _
from .util import stringize, unicodeize, compare
@@ -1632,7 +1632,15 @@ class TotalSizeSet(object):
:param size: the target combined size
:type size: :class:`~.size.Size`
"""
- self.devices = [d.raw_device for d in devices]
+ self.devices = []
+ for device in devices:
+ if isinstance(device, LUKSDevice):
+ partition = device.slave
+ else:
+ partition = device
+
+ self.devices.append(partition)
+
self.size = size
self.requests = []
@@ -1670,7 +1678,15 @@ class SameSizeSet(object):
:keyword max_size: the maximum size for growable devices
:type max_size: :class:`~.size.Size`
"""
- self.devices = [d.raw_device for d in devices]
+ self.devices = []
+ for device in devices:
+ if isinstance(device, LUKSDevice):
+ partition = device.slave
+ else:
+ partition = device
+
+ self.devices.append(partition)
+
self.size = size / len(devices)
self.grow = grow
self.max_size = max_size
diff --git a/blivet/populator/helpers/dm.py b/blivet/populator/helpers/dm.py
index 30e99aa1..770736b0 100644
--- a/blivet/populator/helpers/dm.py
+++ b/blivet/populator/helpers/dm.py
@@ -46,13 +46,13 @@ class DMDevicePopulator(DevicePopulator):
name = udev.device_get_name(self.data)
log_method_call(self, name=name)
sysfs_path = udev.device_get_sysfs_path(self.data)
- parent_devices = self._devicetree._add_parent_devices(self.data)
+ slave_devices = self._devicetree._add_slave_devices(self.data)
device = self._devicetree.get_device_by_name(name)
if device is None:
device = DMDevice(name, dm_uuid=self.data.get('DM_UUID'),
sysfs_path=sysfs_path, exists=True,
- parents=[parent_devices[0]])
+ parents=[slave_devices[0]])
device.protected = True
device.controllable = False
self._devicetree._add_device(device)
diff --git a/blivet/populator/helpers/luks.py b/blivet/populator/helpers/luks.py
index 52795a98..51488691 100644
--- a/blivet/populator/helpers/luks.py
+++ b/blivet/populator/helpers/luks.py
@@ -43,7 +43,7 @@ class LUKSDevicePopulator(DevicePopulator):
return udev.device_is_dm_luks(data)
def run(self):
- parents = self._devicetree._add_parent_devices(self.data)
+ parents = self._devicetree._add_slave_devices(self.data)
device = LUKSDevice(udev.device_get_name(self.data),
sysfs_path=udev.device_get_sysfs_path(self.data),
parents=parents,
@@ -58,7 +58,7 @@ class IntegrityDevicePopulator(DevicePopulator):
return udev.device_is_dm_integrity(data)
def run(self):
- parents = self._devicetree._add_parent_devices(self.data)
+ parents = self._devicetree._add_slave_devices(self.data)
device = IntegrityDevice(udev.device_get_name(self.data),
sysfs_path=udev.device_get_sysfs_path(self.data),
parents=parents,
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index c7adfa4e..b1626306 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -57,7 +57,7 @@ class LVMDevicePopulator(DevicePopulator):
log.warning("found non-vg device with name %s", vg_name)
device = None
- self._devicetree._add_parent_devices(self.data)
+ self._devicetree._add_slave_devices(self.data)
# LVM provides no means to resolve conflicts caused by duplicated VG
# names, so we're just being optimistic here. Woo!
diff --git a/blivet/populator/helpers/mdraid.py b/blivet/populator/helpers/mdraid.py
index 3479e3f7..76aebf25 100644
--- a/blivet/populator/helpers/mdraid.py
+++ b/blivet/populator/helpers/mdraid.py
@@ -31,7 +31,7 @@ from ... import udev
from ...devicelibs import raid
from ...devices import MDRaidArrayDevice, MDContainerDevice
from ...devices import device_path_to_name
-from ...errors import DeviceError, NoParentsError
+from ...errors import DeviceError, NoSlavesError
from ...flags import flags
from ...storage_log import log_method_call
from .devicepopulator import DevicePopulator
@@ -52,12 +52,12 @@ class MDDevicePopulator(DevicePopulator):
log_method_call(self, name=name)
try:
- self._devicetree._add_parent_devices(self.data)
- except NoParentsError:
- log.error("no parents found for mdarray %s, skipping", name)
+ self._devicetree._add_slave_devices(self.data)
+ except NoSlavesError:
+ log.error("no slaves found for mdarray %s, skipping", name)
return None
- # try to get the device again now that we've got all the parents
+ # try to get the device again now that we've got all the slaves
device = self._devicetree.get_device_by_name(name, incomplete=flags.allow_imperfect_devices)
if device is None:
@@ -74,8 +74,8 @@ class MDDevicePopulator(DevicePopulator):
device.name = name
if device is None:
- # if we get here, we found all of the parent devices and
- # something must be wrong -- if all of the parents are in
+ # if we get here, we found all of the slave devices and
+ # something must be wrong -- if all of the slaves are in
# the tree, this device should be as well
if name is None:
name = udev.device_get_name(self.data)
diff --git a/blivet/populator/helpers/multipath.py b/blivet/populator/helpers/multipath.py
index 96c0a9ad..10c745bf 100644
--- a/blivet/populator/helpers/multipath.py
+++ b/blivet/populator/helpers/multipath.py
@@ -40,13 +40,13 @@ class MultipathDevicePopulator(DevicePopulator):
name = udev.device_get_name(self.data)
log_method_call(self, name=name)
- parent_devices = self._devicetree._add_parent_devices(self.data)
+ slave_devices = self._devicetree._add_slave_devices(self.data)
device = None
- if parent_devices:
- device = MultipathDevice(name, parents=parent_devices,
+ if slave_devices:
+ device = MultipathDevice(name, parents=slave_devices,
sysfs_path=udev.device_get_sysfs_path(self.data),
- wwn=parent_devices[0].wwn)
+ wwn=slave_devices[0].wwn)
self._devicetree._add_device(device)
return device
diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py
index 75bb1741..d252281d 100644
--- a/blivet/populator/populator.py
+++ b/blivet/populator/populator.py
@@ -31,7 +31,7 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import BlockDev as blockdev
-from ..errors import DeviceError, DeviceTreeError, NoParentsError
+from ..errors import DeviceError, DeviceTreeError, NoSlavesError
from ..devices import DMLinearDevice, DMRaidArrayDevice
from ..devices import FileDevice, LoopDevice
from ..devices import MDRaidArrayDevice
@@ -92,55 +92,56 @@ class PopulatorMixin(object):
self._cleanup = False
- def _add_parent_devices(self, info):
- """ Add all parents of a device, raising DeviceTreeError on failure.
+ def _add_slave_devices(self, info):
+ """ Add all slaves of a device, raising DeviceTreeError on failure.
:param :class:`pyudev.Device` info: the device's udev info
- :raises: :class:`~.errors.DeviceTreeError if no parents are found or
- if we fail to add any parent
- :returns: a list of parent devices
+ :raises: :class:`~.errors.DeviceTreeError if no slaves are found or
+ if we fail to add any slave
+ :returns: a list of slave devices
:rtype: list of :class:`~.StorageDevice`
"""
name = udev.device_get_name(info)
sysfs_path = udev.device_get_sysfs_path(info)
- parent_dir = os.path.normpath("%s/slaves" % sysfs_path)
- parent_names = os.listdir(parent_dir)
- parent_devices = []
- if not parent_names:
- log.error("no parents found for %s", name)
- raise NoParentsError("no parents found for device %s" % name)
-
- for parent_name in parent_names:
- path = os.path.normpath("%s/%s" % (parent_dir, parent_name))
- parent_info = udev.get_device(os.path.realpath(path))
-
- if not parent_info:
- msg = "unable to get udev info for %s" % parent_name
+ slave_dir = os.path.normpath("%s/slaves" % sysfs_path)
+ slave_names = os.listdir(slave_dir)
+ slave_devices = []
+ if not slave_names:
+ log.error("no slaves found for %s", name)
+ raise NoSlavesError("no slaves found for device %s" % name)
+
+ for slave_name in slave_names:
+ path = os.path.normpath("%s/%s" % (slave_dir, slave_name))
+ slave_info = udev.get_device(os.path.realpath(path))
+
+ if not slave_info:
+ msg = "unable to get udev info for %s" % slave_name
raise DeviceTreeError(msg)
# cciss in sysfs is "cciss!cXdYpZ" but we need "cciss/cXdYpZ"
- parent_name = udev.device_get_name(parent_info).replace("!", "/")
-
- parent_dev = self.get_device_by_name(parent_name)
- if not parent_dev and parent_info:
- # we haven't scanned the parent yet, so do it now
- self.handle_device(parent_info)
- parent_dev = self.get_device_by_name(parent_name)
- if parent_dev is None:
+ slave_name = udev.device_get_name(slave_info).replace("!", "/")
+
+ slave_dev = self.get_device_by_name(slave_name)
+ if not slave_dev and slave_info:
+ # we haven't scanned the slave yet, so do it now
+ self.handle_device(slave_info)
+ slave_dev = self.get_device_by_name(slave_name)
+ if slave_dev is None:
if udev.device_is_dm_lvm(info):
- if parent_name not in lvs_info.cache:
+ if slave_name not in lvs_info.cache:
# we do not expect hidden lvs to be in the tree
continue
- # if the current parent is still not in
+ # if the current slave is still not in
# the tree, something has gone wrong
- log.error("failure scanning device %s: could not add parent %s", name, parent_name)
- msg = "failed to add parent %s of device %s" % (parent_name, name)
+ log.error("failure scanning device %s: could not add slave %s", name, slave_name)
+ msg = "failed to add slave %s of device %s" % (slave_name,
+ name)
raise DeviceTreeError(msg)
- parent_devices.append(parent_dev)
+ slave_devices.append(slave_dev)
- return parent_devices
+ return slave_devices
def _add_name(self, name):
if name not in self.names:
@@ -317,7 +318,7 @@ class PopulatorMixin(object):
continue
# Make sure lvm doesn't get confused by PVs that belong to
- # incomplete VGs. We will remove the PVs from the reject list when/if
+ # incomplete VGs. We will remove the PVs from the blacklist when/if
# the time comes to remove the incomplete VG and its PVs.
for pv in vg.pvs:
lvm.lvm_cc_addFilterRejectRegexp(pv.name)
diff --git a/blivet/threads.py b/blivet/threads.py
index 7e6d3105..a70deb69 100644
--- a/blivet/threads.py
+++ b/blivet/threads.py
@@ -63,11 +63,12 @@ class SynchronizedMeta(type):
"""
def __new__(cls, name, bases, dct):
new_dct = {}
+ blacklist = dct.get('_unsynchronized_methods', [])
for n in dct:
obj = dct[n]
# Do not decorate class or static methods.
- if n in dct.get('_unsynchronized_methods', []):
+ if n in blacklist:
pass
elif isinstance(obj, FunctionType):
obj = exclusive(obj)
diff --git a/blivet/udev.py b/blivet/udev.py
index a8297f3f..e1b67845 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -39,7 +39,7 @@ from gi.repository import BlockDev as blockdev
global_udev = pyudev.Context()
log = logging.getLogger("blivet")
-ignored_device_names = []
+device_name_blacklist = []
""" device name regexes to ignore; this should be empty by default """
@@ -77,7 +77,7 @@ def get_devices(subsystem="block"):
result = []
for device in global_udev.list_devices(subsystem=subsystem):
- if not __is_ignored_blockdev(device.sys_name):
+ if not __is_blacklisted_blockdev(device.sys_name):
dev = device_to_dict(device)
result.append(dev)
@@ -176,13 +176,13 @@ def resolve_glob(glob):
return ret
-def __is_ignored_blockdev(dev_name):
+def __is_blacklisted_blockdev(dev_name):
"""Is this a blockdev we never want for an install?"""
if dev_name.startswith("ram") or dev_name.startswith("fd"):
return True
- if ignored_device_names:
- if any(re.search(expr, dev_name) for expr in ignored_device_names):
+ if device_name_blacklist:
+ if any(re.search(expr, dev_name) for expr in device_name_blacklist):
return True
dev_path = "/sys/class/block/%s" % dev_name
@@ -374,7 +374,7 @@ def device_is_disk(info):
device_is_dm_lvm(info) or
device_is_dm_crypt(info) or
(device_is_md(info) and
- (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_parents(info))))))
+ (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_slaves(info))))))
def device_is_partition(info):
@@ -453,18 +453,18 @@ def device_get_devname(info):
return info.get('DEVNAME')
-def device_get_parents(info):
- """ Return a list of udev device objects representing this device's parents. """
- parents_dir = device_get_sysfs_path(info) + "/slaves/"
+def device_get_slaves(info):
+ """ Return a list of udev device objects representing this device's slaves. """
+ slaves_dir = device_get_sysfs_path(info) + "/slaves/"
names = list()
- if os.path.isdir(parents_dir):
- names = os.listdir(parents_dir)
+ if os.path.isdir(slaves_dir):
+ names = os.listdir(slaves_dir)
- parents = list()
+ slaves = list()
for name in names:
- parents.append(get_device(device_node="/dev/" + name))
+ slaves.append(get_device(device_node="/dev/" + name))
- return parents
+ return slaves
def device_get_holders(info):
@@ -736,7 +736,7 @@ def device_get_partition_disk(info):
disk = None
majorminor = info.get("ID_PART_ENTRY_DISK")
sysfs_path = device_get_sysfs_path(info)
- parents_dir = "%s/slaves" % sysfs_path
+ slaves_dir = "%s/slaves" % sysfs_path
if majorminor:
major, minor = majorminor.split(":")
for device in get_devices():
@@ -744,8 +744,8 @@ def device_get_partition_disk(info):
disk = device_get_name(device)
break
elif device_is_dm_partition(info):
- if os.path.isdir(parents_dir):
- parents = os.listdir(parents_dir)
+ if os.path.isdir(slaves_dir):
+ parents = os.listdir(slaves_dir)
if len(parents) == 1:
disk = resolve_devspec(parents[0].replace('!', '/'))
else:
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
index dc0d6408..dfd78a7a 100644
--- a/tests/devicefactory_test.py
+++ b/tests/devicefactory_test.py
@@ -112,9 +112,9 @@ class DeviceFactoryTestCase(unittest.TestCase):
kwargs.get("encrypted", False) or
kwargs.get("container_encrypted", False))
if kwargs.get("encrypted", False):
- self.assertEqual(device.parents[0].format.luks_version,
+ self.assertEqual(device.slave.format.luks_version,
kwargs.get("luks_version", crypto.DEFAULT_LUKS_VERSION))
- self.assertEqual(device.raw_device.format.luks_sector_size,
+ self.assertEqual(device.slave.format.luks_sector_size,
kwargs.get("luks_sector_size", 0))
self.assertTrue(set(device.disks).issubset(kwargs["disks"]))
@@ -354,7 +354,7 @@ class LVMFactoryTestCase(DeviceFactoryTestCase):
device = args[0]
if kwargs.get("encrypted"):
- container = device.parents[0].container
+ container = device.slave.container
else:
container = device.container
@@ -373,7 +373,7 @@ class LVMFactoryTestCase(DeviceFactoryTestCase):
self.assertIsInstance(pv, member_class)
if pv.encrypted:
- self.assertEqual(pv.parents[0].format.luks_version,
+ self.assertEqual(pv.slave.format.luks_version,
kwargs.get("luks_version", crypto.DEFAULT_LUKS_VERSION))
@patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
@@ -589,7 +589,7 @@ class LVMThinPFactoryTestCase(LVMFactoryTestCase):
device = args[0]
if kwargs.get("encrypted", False):
- thinlv = device.parents[0]
+ thinlv = device.slave
else:
thinlv = device
diff --git a/tests/devices_test/size_test.py b/tests/devices_test/size_test.py
index d0c0a3f4..a1efa86d 100644
--- a/tests/devices_test/size_test.py
+++ b/tests/devices_test/size_test.py
@@ -107,8 +107,8 @@ class LUKSDeviceSizeTest(StorageDeviceSizeTest):
def _get_device(self, *args, **kwargs):
exists = kwargs.get("exists", False)
- parent = StorageDevice(*args, size=kwargs["size"] + crypto.LUKS_METADATA_SIZE, exists=exists)
- return LUKSDevice(*args, **kwargs, parents=[parent])
+ slave = StorageDevice(*args, size=kwargs["size"] + crypto.LUKS_METADATA_SIZE, exists=exists)
+ return LUKSDevice(*args, **kwargs, parents=[slave])
def test_size_getter(self):
initial_size = Size("10 GiB")
@@ -116,4 +116,4 @@ class LUKSDeviceSizeTest(StorageDeviceSizeTest):
# for LUKS size depends on the backing device size
self.assertEqual(dev.size, initial_size)
- self.assertEqual(dev.raw_device.size, initial_size + crypto.LUKS_METADATA_SIZE)
+ self.assertEqual(dev.slave.size, initial_size + crypto.LUKS_METADATA_SIZE)
diff --git a/tests/populator_test.py b/tests/populator_test.py
index a7748a9d..531ec74b 100644
--- a/tests/populator_test.py
+++ b/tests/populator_test.py
@@ -81,7 +81,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
@patch.object(DeviceTree, "get_device_by_name")
@patch.object(DMDevice, "status", return_value=True)
@patch.object(DMDevice, "update_sysfs_path")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_sysfs_path", return_value=sentinel.sysfs_path)
def test_run(self, *args):
@@ -90,7 +90,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
- # The general case for dm devices is that adding the parent devices
+ # The general case for dm devices is that adding the slave/parent devices
# will result in the dm device itself being in the tree.
device = Mock()
devicetree.get_device_by_name.return_value = device
@@ -99,7 +99,7 @@ class DMDevicePopulatorTestCase(PopulatorHelperTestCase):
parent = Mock()
parent.parents = []
- devicetree._add_parent_devices.return_value = [parent]
+ devicetree._add_slave_devices.return_value = [parent]
devicetree._add_device(parent)
devicetree.get_device_by_name.return_value = None
device_name = "dmdevice"
@@ -228,7 +228,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch.object(DeviceTree, "get_device_by_name")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_lv_vg_name")
def test_run(self, *args):
@@ -240,7 +240,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
data = Mock()
- # Add parent devices and then look up the device.
+ # Add slave/parent devices and then look up the device.
device_get_name.return_value = sentinel.lv_name
devicetree.get_device_by_name.return_value = None
@@ -260,7 +260,7 @@ class LVMDevicePopulatorTestCase(PopulatorHelperTestCase):
call(sentinel.vg_name),
call(sentinel.lv_name)])
- # Add parent devices, but the device is still not in the tree
+ # Add slave/parent devices, but the device is still not in the tree
get_device_by_name.side_effect = None
get_device_by_name.return_value = None
self.assertEqual(helper.run(), None)
@@ -625,7 +625,7 @@ class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch.object(DeviceTree, "get_device_by_name")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
@patch("blivet.udev.device_get_md_uuid")
@patch("blivet.udev.device_get_md_name")
@@ -636,7 +636,7 @@ class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
devicetree = DeviceTree()
- # base case: _add_parent_devices gets the array into the tree
+ # base case: _add_slave_devices gets the array into the tree
data = Mock()
device = Mock()
device.parents = []
@@ -699,12 +699,12 @@ class MultipathDevicePopulatorTestCase(PopulatorHelperTestCase):
# could be the first helper class checked.
@patch("blivet.udev.device_get_sysfs_path")
- @patch.object(DeviceTree, "_add_parent_devices")
+ @patch.object(DeviceTree, "_add_slave_devices")
@patch("blivet.udev.device_get_name")
def test_run(self, *args):
"""Test multipath device populator."""
device_get_name = args[0]
- add_parent_devices = args[1]
+ add_slave_devices = args[1]
devicetree = DeviceTree()
# set up some fake udev data to verify handling of specific entries
@@ -719,13 +719,13 @@ class MultipathDevicePopulatorTestCase(PopulatorHelperTestCase):
device_name = "mpathtest"
device_get_name.return_value = device_name
- parent_1 = Mock(tags=set(), wwn=wwn[2:])
- parent_1.parents = []
- parent_2 = Mock(tags=set(), wwn=wwn[2:])
- parent_2.parents = []
- devicetree._add_device(parent_1)
- devicetree._add_device(parent_2)
- add_parent_devices.return_value = [parent_1, parent_2]
+ slave_1 = Mock(tags=set(), wwn=wwn[2:])
+ slave_1.parents = []
+ slave_2 = Mock(tags=set(), wwn=wwn[2:])
+ slave_2.parents = []
+ devicetree._add_device(slave_1)
+ devicetree._add_device(slave_2)
+ add_slave_devices.return_value = [slave_1, slave_2]
helper = self.helper_class(devicetree, data)
diff --git a/tests/udev_test.py b/tests/udev_test.py
index f9b10620..d30a647b 100644
--- a/tests/udev_test.py
+++ b/tests/udev_test.py
@@ -45,11 +45,11 @@ class UdevTest(unittest.TestCase):
@mock.patch('blivet.udev.device_is_dm_crypt', return_value=False)
@mock.patch('blivet.udev.device_is_md')
@mock.patch('blivet.udev.device_get_md_container')
- @mock.patch('blivet.udev.device_get_parents')
+ @mock.patch('blivet.udev.device_get_slaves')
def test_udev_device_is_disk_md(self, *args):
import blivet.udev
info = dict(DEVTYPE='disk', SYS_PATH=mock.sentinel.md_path)
- (device_get_parents, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
+ (device_get_slaves, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
disk_parents = [dict(DEVTYPE="disk", SYS_PATH='/fake/path/2'),
dict(DEVTYPE="disk", SYS_PATH='/fake/path/3')]
@@ -64,20 +64,20 @@ class UdevTest(unittest.TestCase):
# Intel FW RAID (MD RAID w/ container layer)
# device_get_container will return some mock value which will evaluate to True
device_get_md_container.return_value = mock.sentinel.md_container
- device_get_parents.side_effect = lambda info: list()
+ device_get_slaves.side_effect = lambda info: list()
self.assertTrue(blivet.udev.device_is_disk(info))
# Normal MD RAID
- device_get_parents.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
device_get_md_container.return_value = None
self.assertFalse(blivet.udev.device_is_disk(info))
# Dell FW RAID (MD RAID whose members are all whole disks)
- device_get_parents.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertTrue(blivet.udev.device_is_disk(info))
# Normal MD RAID (w/ at least one non-disk member)
- device_get_parents.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_slaves.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertFalse(blivet.udev.device_is_disk(info))
diff --git a/tests/vmtests/vmbackedtestcase.py b/tests/vmtests/vmbackedtestcase.py
index 797bac85..6255104f 100644
--- a/tests/vmtests/vmbackedtestcase.py
+++ b/tests/vmtests/vmbackedtestcase.py
@@ -50,7 +50,7 @@ class VMBackedTestCase(unittest.TestCase):
defined in set_up_disks.
"""
- udev.ignored_device_names = [r'^zram']
+ udev.device_name_blacklist = [r'^zram']
#
# create disk images
--
2.31.1

View File

@ -0,0 +1,35 @@
From 8ece3da18b1abb89320d02f4475002e6a3ed7875 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 May 2021 13:40:26 +0200
Subject: [PATCH] Fix activating old style LVM snapshots
The old style snapshots are activated together with the origin LV
so we need to make sure it is activated to be able to remove the
snapshot or its format.
Resolves: rhbz#1961739
---
blivet/devices/lvm.py | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index a55515fcb..fb57804d9 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1461,9 +1461,13 @@ def _set_format(self, fmt): # pylint: disable=unused-argument
self._update_format_from_origin()
@old_snapshot_specific
- def setup(self, orig=False):
- # the old snapshot cannot be setup and torn down
- pass
+ def setup(self, orig=False): # pylint: disable=unused-argument
+ # the old snapshot is activated together with the origin
+ if self.origin and not self.origin.status:
+ try:
+ self.origin.setup()
+ except blockdev.LVMError as lvmerr:
+ log.error("failed to activate origin LV: %s", lvmerr)
@old_snapshot_specific
def teardown(self, recursive=False):

View File

@ -1,48 +0,0 @@
From dc964f10d24499ea7fc90fd896a8b50c9c5e2d74 Mon Sep 17 00:00:00 2001
From: "Samantha N. Bueno" <sbueno+anaconda@redhat.com>
Date: Wed, 8 Jun 2016 13:47:40 -0400
Subject: [PATCH] Round down to nearest MiB value when writing ks parittion
info.
On s390x in particular, some partition alignment issue is causing fractional
sizes to be reported. Pykickstart doesn't take anything except int values for
partition info, hence the call to roundToNearest.
This change only affects the data that is written to ks.cfg.
Resolves: rhbz#1850670
---
blivet/devices/partition.py | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 0c56a6e7..76048aed 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -35,7 +35,7 @@
from ..storage_log import log_method_call
from .. import udev
from ..formats import DeviceFormat, get_format
-from ..size import Size, MiB
+from ..size import Size, MiB, ROUND_DOWN
import logging
log = logging.getLogger("blivet")
@@ -967,7 +967,8 @@ def populate_ksdata(self, data):
data.resize = (self.exists and self.target_size and
self.target_size != self.current_size)
if not self.exists:
- data.size = self.req_base_size.convert_to(MiB)
+ # round this to nearest MiB before doing anything else
+ data.size = self.req_base_size.round_to_nearest(MiB, rounding=ROUND_DOWN).convert_to(spec=MiB)
data.grow = self.req_grow
if self.req_grow:
data.max_size_mb = self.req_max_size.convert_to(MiB)
@@ -980,4 +981,6 @@ def populate_ksdata(self, data):
data.on_part = self.name # by-id
if data.resize:
- data.size = self.size.convert_to(MiB)
+ # on s390x in particular, fractional sizes are reported, which
+ # cause issues when writing to ks.cfg
+ data.size = self.size.round_to_nearest(MiB, rounding=ROUND_DOWN).convert_to(spec=MiB)

View File

@ -1,438 +0,0 @@
From 44d7e9669fe55fd4b2b3a6c96f23e2d0669f8dbb Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 9 Jul 2020 13:42:31 +0200
Subject: [PATCH] Blivet RHEL 8.3 localization update
Resolves: rhbz#1820565
---
po/ja.po | 33 ++++++++++-----------
po/ko.po | 83 ++++++++++++++++++++++++-----------------------------
po/zh_CN.po | 28 +++++++++---------
3 files changed, 68 insertions(+), 76 deletions(-)
diff --git a/po/ja.po b/po/ja.po
index 733e63a0..b4c864c2 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -29,17 +29,17 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-05-21 12:42+0200\n"
-"PO-Revision-Date: 2018-09-21 01:08+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language-Team: Japanese (http://www.transifex.com/projects/p/blivet/language/"
-"ja/)\n"
+"POT-Creation-Date: 2020-01-29 14:04+0100\n"
+"PO-Revision-Date: 2020-07-03 07:42+0000\n"
+"Last-Translator: Ludek Janda <ljanda@redhat.com>\n"
+"Language-Team: Japanese <https://translate.fedoraproject.org/projects/blivet/"
+"blivet-rhel8/ja/>\n"
"Language: ja\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-"X-Generator: Zanata 4.6.2\n"
+"X-Generator: Weblate 4.1.1\n"
#: ../blivet/errors.py:210
msgid ""
@@ -47,6 +47,8 @@ msgid ""
"of the UUID value which should be unique. In that case you can either "
"disconnect one of the devices or reformat it."
msgstr ""
+"これは通常、デバイスイメージを複製したことで、一意であるはずのUUID値が重複することが原因です。その場合は、いずれかのデバイスを切断するか、再フォーマッ"
+"トしてください。"
#: ../blivet/errors.py:217
msgid ""
@@ -54,9 +56,8 @@ msgid ""
"kernel is reporting partitions on. It is unclear what the exact problem is. "
"Please file a bug at http://bugzilla.redhat.com"
msgstr ""
-"なんらかの理由により、kernel がパーティションを報告しているディスク上でディス"
-"クラベルを見つけられませんでした。何が問題となっているかは不明です。バグを "
-"http://bugzilla.redhat.com に提出してください。"
+"なんらかの理由により、kernel がパーティションを報告しているディスク上でディスクラベルを見つけられませんでした。何が問題となっているかは不明です。"
+"バグを http://bugzilla.redhat.com に提出してください"
#: ../blivet/errors.py:224
msgid ""
@@ -84,7 +85,7 @@ msgstr "FCoE は使用できません"
#: ../blivet/zfcp.py:62
msgid "You have not specified a device number or the number is invalid"
-msgstr "デバイス番号を指定していないか番号が無効です。"
+msgstr "デバイス番号を指定していないか番号が無効です"
#: ../blivet/zfcp.py:64
msgid "You have not specified a worldwide port name or the name is invalid."
@@ -202,7 +203,7 @@ msgstr "iSCSI ノードが何も探索できませんでした"
#: ../blivet/iscsi.py:550
msgid "No new iSCSI nodes discovered"
-msgstr "新しい iSCSI ノードは見つかりませんでした。"
+msgstr "新しい iSCSI ノードは見つかりませんでした"
#: ../blivet/iscsi.py:553
msgid "Could not log in to any of the discovered nodes"
@@ -257,7 +258,7 @@ msgstr "要求を超えたサイズを再利用することができません"
#: ../blivet/partitioning.py:1419
msgid "DiskChunk requests must be of type PartitionRequest"
-msgstr "DiskChunk 要求には PartitionResquest タイプが必要です。"
+msgstr "DiskChunk 要求には PartitionResquest タイプが必要です"
#: ../blivet/partitioning.py:1432
msgid "partitions allocated outside disklabel limits"
@@ -265,7 +266,7 @@ msgstr "ディスクラベルの範囲外に割り当てられたパーティシ
#: ../blivet/partitioning.py:1517
msgid "VGChunk requests must be of type LVRequest"
-msgstr "VGChunk 要求には LVResquest タイプが必要です。"
+msgstr "VGChunk 要求には LVResquest タイプが必要です"
#. by now we have allocated the PVs so if there isn't enough
#. space in the VG we have a real problem
@@ -368,15 +369,15 @@ msgstr ""
msgid "Cannot remove a member from existing %s array"
msgstr "既存の %s 配列からメンバーを削除できません"
-#: ../blivet/formats/fs.py:934
+#: ../blivet/formats/fs.py:932
msgid "EFI System Partition"
msgstr "EFI システムパーティション"
-#: ../blivet/formats/fs.py:1139
+#: ../blivet/formats/fs.py:1137
msgid "Apple Bootstrap"
msgstr "Apple ブートストラップ"
-#: ../blivet/formats/fs.py:1175
+#: ../blivet/formats/fs.py:1173
msgid "Linux HFS+ ESP"
msgstr "Linux HFS+ ESP"
diff --git a/po/ko.po b/po/ko.po
index 66789af0..747b00c5 100644
--- a/po/ko.po
+++ b/po/ko.po
@@ -20,17 +20,17 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-05-21 12:42+0200\n"
-"PO-Revision-Date: 2018-09-21 01:08+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language-Team: Korean (http://www.transifex.com/projects/p/blivet/language/"
-"ko/)\n"
+"POT-Creation-Date: 2020-01-29 14:04+0100\n"
+"PO-Revision-Date: 2020-07-03 07:42+0000\n"
+"Last-Translator: Ludek Janda <ljanda@redhat.com>\n"
+"Language-Team: Korean <https://translate.fedoraproject.org/projects/blivet/"
+"blivet-rhel8/ko/>\n"
"Language: ko\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-"X-Generator: Zanata 4.6.2\n"
+"X-Generator: Weblate 4.1.1\n"
#: ../blivet/errors.py:210
msgid ""
@@ -38,6 +38,8 @@ msgid ""
"of the UUID value which should be unique. In that case you can either "
"disconnect one of the devices or reformat it."
msgstr ""
+"이는 일반적으로 장치 이미지 복제로 인해 고유한 UUID 값이 복제되기 때문에 발생합니다. 이 경우 장치 중 하나를 분리하거나 다시 "
+"포맷할 수 있습니다."
#: ../blivet/errors.py:217
msgid ""
@@ -45,9 +47,8 @@ msgid ""
"kernel is reporting partitions on. It is unclear what the exact problem is. "
"Please file a bug at http://bugzilla.redhat.com"
msgstr ""
-"일부 이유로 커널이 파티션 설정을 보고하는 디스크에 디스크 레이블을 배치할 수 "
-"없습니다. 정확한 문제가 무엇인지 알 수 없습니다. http://bugzilla.redhat.com"
-"에 버그 리포트를 제출해 주십시오."
+"일부 이유로 커널이 파티션 설정을 보고하는 디스크에 디스크 레이블을 배치할 수 없습니다. 정확한 문제가 무엇인지 알 수 없습니다. "
+"http://bugzilla.redhat.com에 버그 리포트를 제출해 주십시오"
#: ../blivet/errors.py:224
msgid ""
@@ -78,11 +79,11 @@ msgstr "장치 번호를 지정하지 않았거나, 번호가 맞지 않습니
#: ../blivet/zfcp.py:64
msgid "You have not specified a worldwide port name or the name is invalid."
-msgstr "세계 포트 이름(WWPN)을 지정하지 않았거나, 포트 번호가 맞지 않습니다"
+msgstr "세계 포트 이름(WWPN)을 지정하지 않았거나, 포트 번호가 맞지 않습니다."
#: ../blivet/zfcp.py:66
msgid "You have not specified a FCP LUN or the number is invalid."
-msgstr "FCP LUN을 지정하지 않았거나, 번호가 맞지 않습니다"
+msgstr "FCP LUN을 지정하지 않았거나, 번호가 맞지 않습니다."
#: ../blivet/zfcp.py:91
#, python-format
@@ -103,7 +104,7 @@ msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s (%(e)s)에 추가할 수
#: ../blivet/zfcp.py:119
#, python-format
msgid "WWPN %(wwpn)s not found at zFCP device %(devnum)s."
-msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s에서 찾을 수 없습니다. "
+msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s에서 찾을 수 없습니다."
#: ../blivet/zfcp.py:134
#, python-format
@@ -111,8 +112,7 @@ msgid ""
"Could not add LUN %(fcplun)s to WWPN %(wwpn)s on zFCP device %(devnum)s "
"(%(e)s)."
msgstr ""
-"zFCP 장치 %(devnum)s (%(e)s)에서 LUN %(fcplun)s을(를) WWPN %(wwpn)s에 추가할 "
-"수 없습니다. "
+"zFCP 장치 %(devnum)s (%(e)s)에서 LUN %(fcplun)s을(를) WWPN %(wwpn)s에 추가할 수 없습니다."
#: ../blivet/zfcp.py:140
#, python-format
@@ -136,18 +136,14 @@ msgstr ""
msgid ""
"Failed LUN %(fcplun)s at WWPN %(wwpn)s on zFCP device %(devnum)s removed "
"again."
-msgstr ""
-"zFCP 장치 %(devnum)s에 있는 WWPN %(wwpn)s에서 실패한 LUN %(fcplun)s이 다시 삭"
-"제되었습니다. "
+msgstr "zFCP 장치 %(devnum)s에 있는 WWPN %(wwpn)s에서 실패한 LUN %(fcplun)s이 다시 삭제되었습니다."
#: ../blivet/zfcp.py:218
#, python-format
msgid ""
"Could not correctly delete SCSI device of zFCP %(devnum)s %(wwpn)s "
"%(fcplun)s (%(e)s)."
-msgstr ""
-"zFCP %(devnum)s %(wwpn)s %(fcplun)s (%(e)s)의 SCSI 장치를 올바르게 삭제할 수 "
-"없습니다. "
+msgstr "zFCP %(devnum)s %(wwpn)s %(fcplun)s (%(e)s)의 SCSI 장치를 올바르게 삭제할 수 없습니다."
#: ../blivet/zfcp.py:227
#, python-format
@@ -161,41 +157,40 @@ msgstr ""
#: ../blivet/zfcp.py:245
#, python-format
msgid "Could not remove WWPN %(wwpn)s on zFCP device %(devnum)s (%(e)s)."
-msgstr ""
-"zFCP 장치 %(devnum)s (%(e)s)에서 WWPN %(wwpn)s을(를) 제거할 수 없습니다. "
+msgstr "zFCP 장치 %(devnum)s (%(e)s)에서 WWPN %(wwpn)s을(를) 제거할 수 없습니다."
#: ../blivet/zfcp.py:271
#, python-format
msgid "Could not set zFCP device %(devnum)s offline (%(e)s)."
-msgstr "zFCP 장치 %(devnum)s를 오프라인 (%(e)s)으로 설정할 수 없습니다. "
+msgstr "zFCP 장치 %(devnum)s를 오프라인 (%(e)s)으로 설정할 수 없습니다."
#: ../blivet/iscsi.py:217
msgid "Unable to change iSCSI initiator name once set"
-msgstr "iSCSI 개시자 이름이 설정되면 이를 변경할 수 없음 "
+msgstr "iSCSI 개시자 이름이 설정되면 이를 변경할 수 없음"
#: ../blivet/iscsi.py:219
msgid "Must provide an iSCSI initiator name"
-msgstr "iSCSI 개시자 이름을 지정하십시오 "
+msgstr "iSCSI 개시자 이름을 지정하십시오"
#: ../blivet/iscsi.py:410
msgid "iSCSI not available"
-msgstr "iSCSI 사용 불가능 "
+msgstr "iSCSI 사용 불가능"
#: ../blivet/iscsi.py:412
msgid "No initiator name set"
-msgstr "이니셰이터 이름이 설정되지 않음 "
+msgstr "이니셰이터 이름이 설정되지 않음"
#: ../blivet/iscsi.py:530
msgid "No iSCSI nodes discovered"
-msgstr "iSCSI 노드를 찾을 수 없음 "
+msgstr "iSCSI 노드를 찾을 수 없음"
#: ../blivet/iscsi.py:550
msgid "No new iSCSI nodes discovered"
-msgstr "새 iSCSI 노드를 찾을 수 없음 "
+msgstr "새 iSCSI 노드를 찾을 수 없음"
#: ../blivet/iscsi.py:553
msgid "Could not log in to any of the discovered nodes"
-msgstr "검색된 노드로 로그인할 수 없음 "
+msgstr "검색된 노드로 로그인할 수 없음"
#: ../blivet/partitioning.py:454
msgid "unable to allocate aligned partition"
@@ -265,7 +260,7 @@ msgstr "LVM 요청에 필요한 공간이 충분하지 않습니다"
#: ../blivet/deviceaction.py:194
#, python-format
msgid "Executing %(action)s"
-msgstr "%(action)s 실행 "
+msgstr "%(action)s 실행"
#: ../blivet/deviceaction.py:322
msgid "create device"
@@ -286,7 +281,7 @@ msgstr "포맷 생성"
#: ../blivet/deviceaction.py:613
#, python-format
msgid "Creating %(type)s on %(device)s"
-msgstr "%(device)s에 %(type)s 생성 "
+msgstr "%(device)s에 %(type)s 생성"
#: ../blivet/deviceaction.py:640
#, python-format
@@ -327,11 +322,11 @@ msgstr "컨테이너 멤버 삭제"
#: ../blivet/deviceaction.py:1058
msgid "configure format"
-msgstr "포맷 설정 "
+msgstr "포맷 설정"
#: ../blivet/deviceaction.py:1114
msgid "configure device"
-msgstr "장치 설정 "
+msgstr "장치 설정"
#: ../blivet/devices/raid.py:58
#, python-format
@@ -341,32 +336,28 @@ msgid ""
msgid_plural ""
"RAID level %(raid_level)s requires that device have at least %(min_members)d "
"members."
-msgstr[0] ""
-"RAID 레벨 %(raid_level)s에는 최소 %(min_members)d개의 장치 구성원이 필요합니"
-"다. "
+msgstr[0] "RAID 레벨 %(raid_level)s에는 최소 %(min_members)d개의 장치 구성원이 필요합니다."
#: ../blivet/devices/raid.py:79
#, python-format
msgid ""
"RAID level %(raid_level)s is an invalid value. Must be one of (%(levels)s)."
-msgstr ""
-"RAID 레벨 %(raid_level)s이/가 유효한 값이 아닙니다. (%(levels)s) 중 하나여야 "
-"합니다. "
+msgstr "RAID 레벨 %(raid_level)s이/가 유효한 값이 아닙니다. (%(levels)s) 중 하나여야 합니다."
#: ../blivet/devices/raid.py:104
#, python-format
msgid "Cannot remove a member from existing %s array"
-msgstr "기존 %s 어레이에서 장치 구성원을 제거할 수 없습니다 "
+msgstr "기존 %s 어레이에서 장치 구성원을 제거할 수 없습니다"
-#: ../blivet/formats/fs.py:934
+#: ../blivet/formats/fs.py:932
msgid "EFI System Partition"
-msgstr "EFI 시스템 파티션 "
+msgstr "EFI 시스템 파티션"
-#: ../blivet/formats/fs.py:1139
+#: ../blivet/formats/fs.py:1137
msgid "Apple Bootstrap"
msgstr "Apple 부트스트랩"
-#: ../blivet/formats/fs.py:1175
+#: ../blivet/formats/fs.py:1173
msgid "Linux HFS+ ESP"
msgstr "Linux HFS+ ESP"
@@ -384,7 +375,7 @@ msgstr "암호화됨"
#: ../blivet/formats/luks.py:388
msgid "DM Integrity"
-msgstr "DM 무결성 "
+msgstr "DM 무결성"
#: ../blivet/formats/__init__.py:148
msgid "Unknown"
diff --git a/po/zh_CN.po b/po/zh_CN.po
index 480801de..2be6d492 100644
--- a/po/zh_CN.po
+++ b/po/zh_CN.po
@@ -20,24 +20,24 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-05-21 12:42+0200\n"
-"PO-Revision-Date: 2018-09-13 02:13+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/blivet/"
-"language/zh_CN/)\n"
+"POT-Creation-Date: 2020-01-29 14:04+0100\n"
+"PO-Revision-Date: 2020-07-03 07:42+0000\n"
+"Last-Translator: Ludek Janda <ljanda@redhat.com>\n"
+"Language-Team: Chinese (Simplified) <https://translate.fedoraproject.org/"
+"projects/blivet/blivet-rhel8/zh_CN/>\n"
"Language: zh_CN\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-"X-Generator: Zanata 4.6.2\n"
+"X-Generator: Weblate 4.1.1\n"
#: ../blivet/errors.py:210
msgid ""
"This is usually caused by cloning the device image resulting in duplication "
"of the UUID value which should be unique. In that case you can either "
"disconnect one of the devices or reformat it."
-msgstr ""
+msgstr "这通常是由于克隆设备镜像导致 UUID 值重复造成的,而 UUID 值应该是唯一的。如果是这种情况,可以断开其中一个设备或重新格式化它。"
#: ../blivet/errors.py:217
msgid ""
@@ -45,8 +45,8 @@ msgid ""
"kernel is reporting partitions on. It is unclear what the exact problem is. "
"Please file a bug at http://bugzilla.redhat.com"
msgstr ""
-"由于某些原因无法定位内核报告中显示在其中进行分区的磁盘的磁盘标签。尚不了解具"
-"体问题所在。请在 http://bugzilla.redhat.com 提交 bug。"
+"由于某些原因无法定位内核报告中显示在其中进行分区的磁盘的磁盘标签。尚不了解具体问题所在。请在 http://bugzilla.redhat.com 提交 "
+"bug"
#: ../blivet/errors.py:224
msgid ""
@@ -170,7 +170,7 @@ msgstr "设定后就无法更改 iSCSI 启动程序名称"
#: ../blivet/iscsi.py:219
msgid "Must provide an iSCSI initiator name"
-msgstr "您必须提供一个 iSCSI 启动程序名称。"
+msgstr "您必须提供一个 iSCSI 启动程序名称"
#: ../blivet/iscsi.py:410
msgid "iSCSI not available"
@@ -223,7 +223,7 @@ msgstr ""
#: ../blivet/partitioning.py:962
msgid "Unable to allocate requested partition scheme."
-msgstr "无法分配所需分区方案"
+msgstr "无法分配所需分区方案。"
#: ../blivet/partitioning.py:997
msgid "not enough free space after creating extended partition"
@@ -347,15 +347,15 @@ msgstr ""
msgid "Cannot remove a member from existing %s array"
msgstr "无法从存在的 %s 阵列中删除一个成员"
-#: ../blivet/formats/fs.py:934
+#: ../blivet/formats/fs.py:932
msgid "EFI System Partition"
msgstr "EFI 系统分区"
-#: ../blivet/formats/fs.py:1139
+#: ../blivet/formats/fs.py:1137
msgid "Apple Bootstrap"
msgstr "Apple Bootstrap"
-#: ../blivet/formats/fs.py:1175
+#: ../blivet/formats/fs.py:1173
msgid "Linux HFS+ ESP"
msgstr "Linux HFS+ ESP"
--
2.25.4

View File

@ -0,0 +1,75 @@
From 344e624f91010b6041c22ee8a24c9305b82af969 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 May 2021 12:54:02 +0200
Subject: [PATCH] Fix resolving devices with names that look like BIOS drive
number
A RAID array named "10" will not be resolved because we try to
resolve it using EDD data and after this lookup fails, we don't
try the name.
Resolves: rhbz#1960798
---
blivet/devicetree.py | 18 +++++++++---------
tests/devicetree_test.py | 4 ++++
2 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index 88e9f0e5..f4ae1968 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -634,20 +634,20 @@ class DeviceTreeBase(object):
(label.startswith("'") and label.endswith("'"))):
label = label[1:-1]
device = self.labels.get(label)
- elif re.match(r'(0x)?[A-Fa-f0-9]{2}(p\d+)?$', devspec):
- # BIOS drive number
- (drive, _p, partnum) = devspec.partition("p")
- spec = int(drive, 16)
- for (edd_name, edd_number) in self.edd_dict.items():
- if edd_number == spec:
- device = self.get_device_by_name(edd_name + partnum)
- break
elif options and "nodev" in options.split(","):
device = self.get_device_by_name(devspec)
if not device:
device = self.get_device_by_path(devspec)
else:
- if not devspec.startswith("/dev/"):
+ if re.match(r'(0x)?[A-Fa-f0-9]{2}(p\d+)?$', devspec):
+ # BIOS drive number
+ (drive, _p, partnum) = devspec.partition("p")
+ spec = int(drive, 16)
+ for (edd_name, edd_number) in self.edd_dict.items():
+ if edd_number == spec:
+ device = self.get_device_by_name(edd_name + partnum)
+ break
+ if not device and not devspec.startswith("/dev/"):
device = self.get_device_by_name(devspec)
if not device:
devspec = "/dev/" + devspec
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index 11f8469d..b033343d 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -49,6 +49,9 @@ class DeviceTreeTestCase(unittest.TestCase):
dev3 = StorageDevice("sdp2", exists=True)
dt._add_device(dev3)
+ dev4 = StorageDevice("10", exists=True)
+ dt._add_device(dev4)
+
dt.edd_dict.update({"dev1": 0x81,
"dev2": 0x82})
@@ -62,6 +65,7 @@ class DeviceTreeTestCase(unittest.TestCase):
self.assertEqual(dt.resolve_device("0x82"), dev2)
self.assertEqual(dt.resolve_device(dev3.name), dev3)
+ self.assertEqual(dt.resolve_device(dev4.name), dev4)
def test_device_name(self):
# check that devicetree.names property contains all device's names
--
2.31.1

View File

@ -0,0 +1,151 @@
From dc1e2fe7783748528cac2f7aa516c89d1959b052 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 29 Jul 2021 14:44:22 +0200
Subject: [PATCH] Do not set chunk size for RAID 1
Setting chunk size for RAID 1 doesn't make sense and latest
mdadm started returning error instead of ignoring the --chunk
option when creating an array.
Resolves: rhbz#1987170
---
blivet/devicelibs/raid.py | 12 ++++++++++
blivet/devices/md.py | 15 ++++++++++---
tests/devices_test/md_test.py | 41 +++++++++++++++++++++++++++++++++--
3 files changed, 63 insertions(+), 5 deletions(-)
diff --git a/blivet/devicelibs/raid.py b/blivet/devicelibs/raid.py
index 19c3fae98..a9e241c7a 100644
--- a/blivet/devicelibs/raid.py
+++ b/blivet/devicelibs/raid.py
@@ -462,6 +462,18 @@ def _pad(self, size, chunk_size):
def _get_recommended_stride(self, member_count):
return None
+ def get_size(self, member_sizes, num_members=None, chunk_size=None, superblock_size_func=None):
+ if not member_sizes:
+ return Size(0)
+
+ if num_members is None:
+ num_members = len(member_sizes)
+
+ min_size = min(member_sizes)
+ superblock_size = superblock_size_func(min_size)
+ min_data_size = self._trim(min_size - superblock_size, chunk_size)
+ return self.get_net_array_size(num_members, min_data_size)
+
RAID1 = RAID1()
ALL_LEVELS.add_raid_level(RAID1)
diff --git a/blivet/devices/md.py b/blivet/devices/md.py
index 69eee93a5..d1a2faf1f 100644
--- a/blivet/devices/md.py
+++ b/blivet/devices/md.py
@@ -138,7 +138,7 @@ def __init__(self, name, level=None, major=None, minor=None, size=None,
if self.exists:
self._chunk_size = self.read_chunk_size()
else:
- self._chunk_size = chunk_size or mdraid.MD_CHUNK_SIZE
+ self.chunk_size = chunk_size or Size(0)
if not self.exists and not isinstance(metadata_version, str):
self.metadata_version = "default"
@@ -208,8 +208,14 @@ def sector_size(self):
@property
def chunk_size(self):
- if self.exists and self._chunk_size == Size(0):
- self._chunk_size = self.read_chunk_size()
+ if self._chunk_size == Size(0):
+ if self.exists:
+ return self.read_chunk_size()
+ else:
+ if self.level == raid.RAID1:
+ return self._chunk_size
+ else:
+ return mdraid.MD_CHUNK_SIZE
return self._chunk_size
@chunk_size.setter
@@ -223,6 +229,9 @@ def chunk_size(self, newsize):
if self.exists:
raise ValueError("cannot set chunk size for an existing device")
+ if self.level == raid.RAID1 and newsize != Size(0):
+ raise ValueError("specifying chunk size is not allowed for raid1")
+
self._chunk_size = newsize
def read_chunk_size(self):
diff --git a/tests/devices_test/md_test.py b/tests/devices_test/md_test.py
index 46df76d3d..47a0fa0cc 100644
--- a/tests/devices_test/md_test.py
+++ b/tests/devices_test/md_test.py
@@ -1,6 +1,11 @@
import six
import unittest
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
import blivet
from blivet.devices import StorageDevice
@@ -27,9 +32,27 @@ def test_chunk_size1(self):
raid_array = MDRaidArrayDevice(name="raid", level="raid0", member_devices=2,
total_devices=2, parents=[member1, member2])
- # no chunk_size specified -- default value
+ # no chunk_size specified and RAID0 -- default value
self.assertEqual(raid_array.chunk_size, mdraid.MD_CHUNK_SIZE)
+ with patch("blivet.devices.md.blockdev.md.create") as md_create:
+ raid_array._create()
+ md_create.assert_called_with("/dev/md/raid", "raid0", ["/dev/member1", "/dev/member2"],
+ 0, version="default", bitmap=False,
+ chunk_size=mdraid.MD_CHUNK_SIZE)
+
+ raid_array = MDRaidArrayDevice(name="raid", level="raid1", member_devices=2,
+ total_devices=2, parents=[member1, member2])
+
+ # no chunk_size specified and RAID1 -- no chunk size set (0)
+ self.assertEqual(raid_array.chunk_size, Size(0))
+
+ with patch("blivet.devices.md.blockdev.md.create") as md_create:
+ raid_array._create()
+ md_create.assert_called_with("/dev/md/raid", "raid1", ["/dev/member1", "/dev/member2"],
+ 0, version="default", bitmap=True,
+ chunk_size=0)
+
def test_chunk_size2(self):
member1 = StorageDevice("member1", fmt=blivet.formats.get_format("mdmember"),
@@ -40,11 +63,25 @@ def test_chunk_size2(self):
raid_array = MDRaidArrayDevice(name="raid", level="raid0", member_devices=2,
total_devices=2, parents=[member1, member2],
chunk_size=Size("1024 KiB"))
-
self.assertEqual(raid_array.chunk_size, Size("1024 KiB"))
+ # for raid0 setting chunk_size = 0 means "default"
+ raid_array.chunk_size = Size(0)
+ self.assertEqual(raid_array.chunk_size, mdraid.MD_CHUNK_SIZE)
+
with six.assertRaisesRegex(self, ValueError, "new chunk size must be of type Size"):
raid_array.chunk_size = 1
with six.assertRaisesRegex(self, ValueError, "new chunk size must be multiple of 4 KiB"):
raid_array.chunk_size = Size("5 KiB")
+
+ with six.assertRaisesRegex(self, ValueError, "specifying chunk size is not allowed for raid1"):
+ MDRaidArrayDevice(name="raid", level="raid1", member_devices=2,
+ total_devices=2, parents=[member1, member2],
+ chunk_size=Size("1024 KiB"))
+
+ raid_array = MDRaidArrayDevice(name="raid", level="raid1", member_devices=2,
+ total_devices=2, parents=[member1, member2])
+
+ with six.assertRaisesRegex(self, ValueError, "specifying chunk size is not allowed for raid1"):
+ raid_array.chunk_size = Size("512 KiB")

View File

@ -1,24 +0,0 @@
From 7bc4e324580656585adad0cbe51d60ed3540b766 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 3 Jul 2020 13:04:23 +0200
Subject: [PATCH] Do not use FSAVAIL and FSUSE% options when running lsblk
These options were added in util-linux 2.33 which is not available
on older systems so we should not use these.
---
blivet/blivet.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index fcc2080b..e7dbd37b 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -77,7 +77,7 @@ def __init__(self):
self._dump_file = "%s/storage.state" % tempfile.gettempdir()
try:
- options = "NAME,SIZE,OWNER,GROUP,MODE,FSTYPE,LABEL,UUID,PARTUUID,FSAVAIL,FSUSE%,MOUNTPOINT"
+ options = "NAME,SIZE,OWNER,GROUP,MODE,FSTYPE,LABEL,UUID,PARTUUID,MOUNTPOINT"
out = capture_output(["lsblk", "--bytes", "-a", "-o", options])
except Exception: # pylint: disable=broad-except
pass

View File

@ -0,0 +1,240 @@
From 3b9a781e138830a190d16c8dd970b800a086de46 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 4 Aug 2021 13:00:53 +0200
Subject: [PATCH 1/3] edd_test: Locate the edd_data based on the test file
location
We can't use the blivet.edd module location when running tests
against installed version of blivet.
---
tests/devicelibs_test/edd_test.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/tests/devicelibs_test/edd_test.py b/tests/devicelibs_test/edd_test.py
index 23d736f4..7ec8d1e6 100644
--- a/tests/devicelibs_test/edd_test.py
+++ b/tests/devicelibs_test/edd_test.py
@@ -1,7 +1,6 @@
import unittest
import mock
import os
-import inspect
import logging
import copy
@@ -110,9 +109,9 @@ class EddTestCase(unittest.TestCase):
name = name[:-1]
if name.startswith("/"):
name = name[1:]
- dirname = os.path.dirname(inspect.getfile(edd))
+ dirname = os.path.abspath(os.path.dirname(__file__))
return os.path.join(dirname,
- "../../tests/devicelibs_test/edd_data/",
+ "edd_data/",
name)
def edd_debug(self, *args):
--
2.31.1
From 7ad3824fceb98e2741820b76a9cfea5add338343 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 4 Aug 2021 13:02:08 +0200
Subject: [PATCH 2/3] tests: Allow running tests without the tests directory in
PYTHONPATH
When running the tests against installed version of blivet, the
"tests" directory is not in PYTHONPATH so we need to import all
helper modules using relative path.
---
tests/action_test.py | 2 +-
tests/devicelibs_test/edd_test.py | 2 +-
tests/{ => devicelibs_test}/lib.py | 0
tests/formats_test/fs_test.py | 2 +-
tests/formats_test/fslabeling.py | 2 +-
tests/formats_test/fstesting.py | 2 +-
tests/formats_test/fsuuid.py | 2 +-
tests/formats_test/labeling_test.py | 2 +-
tests/{ => formats_test}/loopbackedtestcase.py | 0
tests/formats_test/luks_test.py | 2 +-
tests/formats_test/lvmpv_test.py | 2 +-
tests/partitioning_test.py | 2 +-
12 files changed, 10 insertions(+), 10 deletions(-)
rename tests/{ => devicelibs_test}/lib.py (100%)
rename tests/{ => formats_test}/loopbackedtestcase.py (100%)
diff --git a/tests/action_test.py b/tests/action_test.py
index 38a2e872..1e84c20b 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -6,7 +6,7 @@ if PY3:
else:
from mock import Mock
-from tests.storagetestcase import StorageTestCase
+from storagetestcase import StorageTestCase
import blivet
from blivet.formats import get_format
from blivet.size import Size
diff --git a/tests/devicelibs_test/edd_test.py b/tests/devicelibs_test/edd_test.py
index 7ec8d1e6..379c7aeb 100644
--- a/tests/devicelibs_test/edd_test.py
+++ b/tests/devicelibs_test/edd_test.py
@@ -6,7 +6,7 @@ import copy
from blivet import arch
from blivet.devicelibs import edd
-from tests import lib
+from . import lib
class FakeDevice(object):
diff --git a/tests/lib.py b/tests/devicelibs_test/lib.py
similarity index 100%
rename from tests/lib.py
rename to tests/devicelibs_test/lib.py
diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py
index ab3499a7..bd643370 100644
--- a/tests/formats_test/fs_test.py
+++ b/tests/formats_test/fs_test.py
@@ -10,7 +10,7 @@ from blivet.errors import DeviceFormatError
from blivet.formats import get_format
from blivet.devices import PartitionDevice, DiskDevice
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from . import fstesting
diff --git a/tests/formats_test/fslabeling.py b/tests/formats_test/fslabeling.py
index fbb28eee..0e0dc261 100644
--- a/tests/formats_test/fslabeling.py
+++ b/tests/formats_test/fslabeling.py
@@ -2,7 +2,7 @@
import abc
import six
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from blivet.errors import FSError, FSReadLabelError
from blivet.size import Size
diff --git a/tests/formats_test/fstesting.py b/tests/formats_test/fstesting.py
index 86b2a116..e34584d8 100644
--- a/tests/formats_test/fstesting.py
+++ b/tests/formats_test/fstesting.py
@@ -5,7 +5,7 @@ from six import add_metaclass
import os
import tempfile
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from blivet.errors import FSError, FSResizeError, DeviceFormatError
from blivet.size import Size, ROUND_DOWN
from blivet.formats import fs
diff --git a/tests/formats_test/fsuuid.py b/tests/formats_test/fsuuid.py
index c8003945..16aa19a6 100644
--- a/tests/formats_test/fsuuid.py
+++ b/tests/formats_test/fsuuid.py
@@ -3,7 +3,7 @@ import abc
import six
from unittest import skipIf
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
from blivet.errors import FSError, FSWriteUUIDError
from blivet.size import Size
from blivet.util import capture_output
diff --git a/tests/formats_test/labeling_test.py b/tests/formats_test/labeling_test.py
index e26cb7df..d24e6619 100644
--- a/tests/formats_test/labeling_test.py
+++ b/tests/formats_test/labeling_test.py
@@ -1,10 +1,10 @@
import unittest
-from tests import loopbackedtestcase
from blivet.formats import device_formats
import blivet.formats.fs as fs
import blivet.formats.swap as swap
+from . import loopbackedtestcase
from . import fslabeling
diff --git a/tests/loopbackedtestcase.py b/tests/formats_test/loopbackedtestcase.py
similarity index 100%
rename from tests/loopbackedtestcase.py
rename to tests/formats_test/loopbackedtestcase.py
diff --git a/tests/formats_test/luks_test.py b/tests/formats_test/luks_test.py
index be0d50b0..5423ebdf 100644
--- a/tests/formats_test/luks_test.py
+++ b/tests/formats_test/luks_test.py
@@ -7,7 +7,7 @@ from blivet.formats.luks import LUKS
from blivet.size import Size
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
class LUKSTestCase(loopbackedtestcase.LoopBackedTestCase):
diff --git a/tests/formats_test/lvmpv_test.py b/tests/formats_test/lvmpv_test.py
index 792a2f1d..da7270d9 100644
--- a/tests/formats_test/lvmpv_test.py
+++ b/tests/formats_test/lvmpv_test.py
@@ -4,7 +4,7 @@ from blivet.formats.lvmpv import LVMPhysicalVolume
from blivet.size import Size
-from tests import loopbackedtestcase
+from . import loopbackedtestcase
class LVMPVTestCase(loopbackedtestcase.LoopBackedTestCase):
diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py
index b7aa5045..a713aaa1 100644
--- a/tests/partitioning_test.py
+++ b/tests/partitioning_test.py
@@ -29,7 +29,7 @@ from blivet.devices.lvm import LVMCacheRequest
from blivet.errors import PartitioningError
-from tests.imagebackedtestcase import ImageBackedTestCase
+from imagebackedtestcase import ImageBackedTestCase
from blivet.blivet import Blivet
from blivet.util import sparsetmpfile
from blivet.formats import get_format
--
2.31.1
From 9ee41c8b60c56ce752e305be73001c7089f43011 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 6 Aug 2021 14:51:01 +0200
Subject: [PATCH 3/3] tests: Print version and blivet location when running
tests
---
tests/run_tests.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 32e3f2d3..8ad8b61a 100644
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -32,6 +32,11 @@ if __name__ == '__main__':
testdir = os.path.abspath(os.path.dirname(__file__))
+ import blivet
+ print("Running tests with Blivet %s from %s" % (blivet.__version__,
+ os.path.abspath(os.path.dirname(blivet.__file__))),
+ file=sys.stderr)
+
if args.testname:
for n in args.testname:
suite.addTests(unittest.TestLoader().loadTestsFromName(n))
--
2.31.1

View File

@ -1,39 +0,0 @@
From 462099a9137fb7997140360c07665a21615a0fea Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Dan=20Hor=C3=A1k?= <dan@danny.cz>
Date: Tue, 7 Jul 2020 13:19:02 +0200
Subject: [PATCH] set allowed disk labels for s390x as standard ones (msdos +
gpt) plus dasd
This will solve issues when a SCSI or NVMe disk with GPT partition table
is used with a s390x machine (rhbz#1827066, rhbz#1854110).
---
blivet/formats/disklabel.py | 2 +-
tests/formats_test/disklabel_test.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
index 3dcac12b..53e2c010 100644
--- a/blivet/formats/disklabel.py
+++ b/blivet/formats/disklabel.py
@@ -230,7 +230,7 @@ def get_platform_label_types(cls):
elif arch.is_efi() and not arch.is_aarch64():
label_types = ["gpt", "msdos"]
elif arch.is_s390():
- label_types = ["msdos", "dasd"]
+ label_types += ["dasd"]
return label_types
diff --git a/tests/formats_test/disklabel_test.py b/tests/formats_test/disklabel_test.py
index 94f3775f..3068dc07 100644
--- a/tests/formats_test/disklabel_test.py
+++ b/tests/formats_test/disklabel_test.py
@@ -95,7 +95,7 @@ def test_platform_label_types(self, arch):
arch.is_arm.return_value = False
arch.is_s390.return_value = True
- self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "dasd"])
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt", "dasd"])
arch.is_s390.return_value = False
def test_label_type_size_check(self):

View File

@ -1,47 +0,0 @@
From 7303f4a3f2fe3280339f6303dcff31b6ade12176 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 9 Jul 2020 16:30:55 +0200
Subject: [PATCH] Do not use BlockDev.utils_have_kernel_module to check for
modules
The function unfortunately uses only the name when searching for
the module and we need to use aliases for modules like ext2 and
ext3. So we need to use "modprobe --dry-run" instead.
---
blivet/formats/fs.py | 12 +++---------
1 file changed, 3 insertions(+), 9 deletions(-)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index eee15aaa..bcfbc08e 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -60,12 +60,6 @@
import logging
log = logging.getLogger("blivet")
-import gi
-gi.require_version("GLib", "2.0")
-gi.require_version("BlockDev", "2.0")
-
-from gi.repository import GLib
-from gi.repository import BlockDev
AVAILABLE_FILESYSTEMS = kernel_filesystems
@@ -462,13 +456,13 @@ def check_module(self):
for module in self._modules:
try:
- succ = BlockDev.utils_have_kernel_module(module)
- except GLib.GError as e:
+ rc = util.run_program(["modprobe", "--dry-run", module])
+ except OSError as e:
log.error("Could not check kernel module availability %s: %s", module, e)
self._supported = False
return
- if not succ:
+ if rc:
log.debug("Kernel module %s not available", module)
self._supported = False
return

View File

@ -1,844 +0,0 @@
From 18ce766bc90abdf0d8ca54bdf578463392a52ee9 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 12 Aug 2020 10:57:19 +0200
Subject: [PATCH 1/2] Fix name resolution for MD devices and partitions on them
UDev data for both member disks/partitions and partitions on arrays
contain the MD_* properties we must be extra careful when deciding
what name we'll use for the device.
Resolves: rhbz#1862904
---
blivet/udev.py | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/blivet/udev.py b/blivet/udev.py
index 41c99496..c85eb3dc 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -202,9 +202,16 @@ def device_get_name(udev_info):
""" Return the best name for a device based on the udev db data. """
if "DM_NAME" in udev_info:
name = udev_info["DM_NAME"]
- elif "MD_DEVNAME" in udev_info and os.path.exists(device_get_sysfs_path(udev_info) + "/md"):
+ elif "MD_DEVNAME" in udev_info:
mdname = udev_info["MD_DEVNAME"]
- if device_is_partition(udev_info):
+ if device_is_md(udev_info):
+ # MD RAID array -> use MD_DEVNAME
+ name = mdname
+ elif device_get_format(udev_info) == "linux_raid_member":
+ # MD RAID member -> use SYS_NAME
+ name = udev_info["SYS_NAME"]
+ elif device_is_partition(udev_info):
+ # partition on RAID -> construct name from MD_DEVNAME + partition number
# for partitions on named RAID we want to use the raid name, not
# the node, e.g. "raid1" instead of "md127p1"
partnum = udev_info["ID_PART_ENTRY_NUMBER"]
@@ -213,6 +220,7 @@ def device_get_name(udev_info):
else:
name = mdname + partnum
else:
+ # something else -> default to MD_DEVNAME
name = mdname
else:
name = udev_info["SYS_NAME"]
--
2.25.4
From dc96961adcb9dd6ef6d09e4daaa0a5eaae1ffe60 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 12 Aug 2020 11:10:03 +0200
Subject: [PATCH 2/2] Add tests for udev.device_get_name for RAID devices
This includes sample UDev data for various combinations of RAID
devices configuration.
Related: rhbz#1862904
---
tests/udev_data/__init__.py | 0
tests/udev_data/raid_data.py | 705 +++++++++++++++++++++++++++++++++++
tests/udev_test.py | 46 +++
3 files changed, 751 insertions(+)
create mode 100644 tests/udev_data/__init__.py
create mode 100644 tests/udev_data/raid_data.py
diff --git a/tests/udev_data/__init__.py b/tests/udev_data/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/udev_data/raid_data.py b/tests/udev_data/raid_data.py
new file mode 100644
index 00000000..509cbfbd
--- /dev/null
+++ b/tests/udev_data/raid_data.py
@@ -0,0 +1,705 @@
+# Sample UDev data for various MD RAID devices:
+# - member_boot: data for the member disk or partition after booting the system
+# - member_assemble: data for the member disk or partition after re-assembling stopped array using
+# 'mdadm --assemble --scan' (yes, this is different from member_boot)
+# - raid_device: data for the RAID array device
+# - raid_partition: data for partition on the array
+#
+# We have data for different combinations of member "types", MD metadata versions and named v unnamed
+# RAID devices.
+# The data were gathered on Fedora 32.
+
+
+class RaidOnDisk1():
+ member_name = "sda"
+ raid_name = "127"
+ raid_node = "md127"
+ metadata_version = "1.2"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:0 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'DEVNAME': '/dev/sda',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:127',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:127',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_ENC': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_SUB': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_UUID_SUB_ENC': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:0',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-0',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md127',
+ 'MD_DEVNAME': '127',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'unsafe',
+ 'MINOR': '0',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdadm-last-resort@md127.timer',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5529231',
+ 'SYS_NAME': 'sda',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:0 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'DEVNAME': '/dev/sda',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:127',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:127',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_ENC': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_SUB': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_UUID_SUB_ENC': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:0',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-0',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '0',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5529231',
+ 'SYS_NAME': 'sda',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda'}
+
+ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-name-localhost.localdomain:127 /dev/disk/by-id/md-uuid-54956eb2:69838759:e2ad4c40:acc92e4b /dev/md/127',
+ 'DEVNAME': '/dev/md127',
+ 'DEVPATH': '/devices/virtual/block/md127',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '4eec0361',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sda_DEV': '/dev/sda',
+ 'MD_DEVICE_ev_sda_ROLE': '0',
+ 'MD_DEVICE_ev_sdb_DEV': '/dev/sdb',
+ 'MD_DEVICE_ev_sdb_ROLE': '1',
+ 'MD_DEVNAME': '127',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:127',
+ 'MD_UUID': '54956eb2:69838759:e2ad4c40:acc92e4b',
+ 'MINOR': '127',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '603606045',
+ 'SYS_NAME': 'md127',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127'}
+
+ raid_partition = {'DEVLINKS': '/dev/md/127p1 /dev/disk/by-id/md-uuid-54956eb2:69838759:e2ad4c40:acc92e4b-part1 /dev/disk/by-id/md-name-localhost.localdomain:127-part1',
+ 'DEVNAME': '/dev/md127p1',
+ 'DEVPATH': '/devices/virtual/block/md127/md127p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:127',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '2091008',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '4eec0361-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sda_DEV': '/dev/sda',
+ 'MD_DEVICE_ev_sda_ROLE': '0',
+ 'MD_DEVICE_ev_sdb_DEV': '/dev/sdb',
+ 'MD_DEVICE_ev_sdb_ROLE': '1',
+ 'MD_DEVNAME': '127',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:127',
+ 'MD_UUID': '54956eb2:69838759:e2ad4c40:acc92e4b',
+ 'MINOR': '2',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '603714783',
+ 'SYS_NAME': 'md127p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127/md127p1'}
+
+
+class RaidOnDisk2():
+ member_name = "sdc"
+ raid_name = "name"
+ raid_node = "md127"
+ metadata_version = "1.2"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:4',
+ 'DEVNAME': '/dev/sdc',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:name',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:name',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_ENC': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_SUB': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_UUID_SUB_ENC': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:4',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_4',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-4',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md127',
+ 'MD_DEVNAME': 'name',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'yes',
+ 'MINOR': '32',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '6109555',
+ 'SYS_NAME': 'sdc',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:4',
+ 'DEVNAME': '/dev/sdc',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:name',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:name',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_ENC': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_SUB': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_UUID_SUB_ENC': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:4',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_4',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-4',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '32',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '6109555',
+ 'SYS_NAME': 'sdc',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc'}
+
+ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-name-localhost.localdomain:name /dev/disk/by-id/md-uuid-143d480c:12c3909f:547698a9:f94a1c4f /dev/md/name',
+ 'DEVNAME': '/dev/md127',
+ 'DEVPATH': '/devices/virtual/block/md127',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '19e9cb5b',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdc_DEV': '/dev/sdc',
+ 'MD_DEVICE_ev_sdc_ROLE': '0',
+ 'MD_DEVICE_ev_sdd_DEV': '/dev/sdd',
+ 'MD_DEVICE_ev_sdd_ROLE': '1',
+ 'MD_DEVNAME': 'name',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:name',
+ 'MD_UUID': '143d480c:12c3909f:547698a9:f94a1c4f',
+ 'MINOR': '127',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5844744',
+ 'SYS_NAME': 'md127',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127'}
+
+ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-143d480c:12c3909f:547698a9:f94a1c4f-part1 /dev/disk/by-id/md-name-localhost.localdomain:name-part1 /dev/md/name1',
+ 'DEVNAME': '/dev/md127p1',
+ 'DEVPATH': '/devices/virtual/block/md127/md127p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:127',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '2091008',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '19e9cb5b-01',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': 'ec985633',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdc_DEV': '/dev/sdc',
+ 'MD_DEVICE_ev_sdc_ROLE': '0',
+ 'MD_DEVICE_ev_sdd_DEV': '/dev/sdd',
+ 'MD_DEVICE_ev_sdd_ROLE': '1',
+ 'MD_DEVNAME': 'name',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:name',
+ 'MD_UUID': '143d480c:12c3909f:547698a9:f94a1c4f',
+ 'MINOR': '1',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5928255',
+ 'SYS_NAME': 'md127p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127/md127p1'}
+
+
+class RaidOnDisk3():
+ member_name = "sde"
+ raid_name = "125"
+ raid_node = "md125"
+ metadata_version = "0.9"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:1',
+ 'DEVNAME': '/dev/sde',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:1',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_1',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-1',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md125',
+ 'MD_DEVNAME': '125',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'unsafe',
+ 'MINOR': '64',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdadm-last-resort@md125.timer',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5538551',
+ 'SYS_NAME': 'sde',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:1',
+ 'DEVNAME': '/dev/sde',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:1',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_1',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-1',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '64',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5538551',
+ 'SYS_NAME': 'sde',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde'}
+
+ raid_device = {'DEVLINKS': '/dev/md/125 /dev/disk/by-id/md-uuid-c4ef60f5:e3745f70:bfe78010:bc810f04',
+ 'DEVNAME': '/dev/md125',
+ 'DEVPATH': '/devices/virtual/block/md125',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': 'e74877cd',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sde_DEV': '/dev/sde',
+ 'MD_DEVICE_ev_sde_ROLE': '0',
+ 'MD_DEVICE_ev_sdf_DEV': '/dev/sdf',
+ 'MD_DEVICE_ev_sdf_ROLE': '1',
+ 'MD_DEVNAME': '125',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': 'c4ef60f5:e3745f70:bfe78010:bc810f04',
+ 'MINOR': '125',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5786380',
+ 'SYS_NAME': 'md125',
+ 'SYS_PATH': '/sys/devices/virtual/block/md125'}
+
+ raid_partition = {'DEVLINKS': '/dev/md/125p1 /dev/disk/by-id/md-uuid-c4ef60f5:e3745f70:bfe78010:bc810f04-part1',
+ 'DEVNAME': '/dev/md125p1',
+ 'DEVPATH': '/devices/virtual/block/md125/md125p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:125',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '2094976',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': 'e74877cd-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sde_DEV': '/dev/sde',
+ 'MD_DEVICE_ev_sde_ROLE': '0',
+ 'MD_DEVICE_ev_sdf_DEV': '/dev/sdf',
+ 'MD_DEVICE_ev_sdf_ROLE': '1',
+ 'MD_DEVNAME': '125',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': 'c4ef60f5:e3745f70:bfe78010:bc810f04',
+ 'MINOR': '3',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8808457',
+ 'SYS_NAME': 'md125p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md125/md125p1'}
+
+
+class RaidOnPartition1():
+ member_name = "sdh3"
+ raid_name = "122"
+ raid_node = "md122"
+ metadata_version = "1.2"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part3 /dev/disk/by-partuuid/73eb11a9-03',
+ 'DEVNAME': '/dev/sdh3',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:122',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:122',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_ENC': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_SUB': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_UUID_SUB_ENC': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '3',
+ 'ID_PART_ENTRY_OFFSET': '411648',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-03',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md122',
+ 'MD_DEVNAME': '122',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'yes',
+ 'MINOR': '115',
+ 'PARTN': '3',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8920462',
+ 'SYS_NAME': 'sdh3',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part3 /dev/disk/by-partuuid/73eb11a9-03',
+ 'DEVNAME': '/dev/sdh3',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:122',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:122',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_ENC': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_SUB': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_UUID_SUB_ENC': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '3',
+ 'ID_PART_ENTRY_OFFSET': '411648',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-03',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '115',
+ 'PARTN': '3',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8920462',
+ 'SYS_NAME': 'sdh3',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3'}
+
+ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-uuid-0628d995:eb60ebd1:a7675173:0b16f212 /dev/disk/by-id/md-name-localhost.localdomain:122 /dev/md/122',
+ 'DEVNAME': '/dev/md122',
+ 'DEVPATH': '/devices/virtual/block/md122',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '6dc80b3b',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh3_DEV': '/dev/sdh3',
+ 'MD_DEVICE_ev_sdh3_ROLE': '0',
+ 'MD_DEVICE_ev_sdh5_DEV': '/dev/sdh5',
+ 'MD_DEVICE_ev_sdh5_ROLE': '1',
+ 'MD_DEVNAME': '122',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:122',
+ 'MD_UUID': '0628d995:eb60ebd1:a7675173:0b16f212',
+ 'MINOR': '122',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8770105',
+ 'SYS_NAME': 'md122',
+ 'SYS_PATH': '/sys/devices/virtual/block/md122'}
+
+ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-0628d995:eb60ebd1:a7675173:0b16f212-part1 /dev/disk/by-id/md-name-localhost.localdomain:122-part1 /dev/md/122p1',
+ 'DEVNAME': '/dev/md122p1',
+ 'DEVPATH': '/devices/virtual/block/md122/md122p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:122',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '200704',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '6dc80b3b-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh3_DEV': '/dev/sdh3',
+ 'MD_DEVICE_ev_sdh3_ROLE': '0',
+ 'MD_DEVICE_ev_sdh5_DEV': '/dev/sdh5',
+ 'MD_DEVICE_ev_sdh5_ROLE': '1',
+ 'MD_DEVNAME': '122',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:122',
+ 'MD_UUID': '0628d995:eb60ebd1:a7675173:0b16f212',
+ 'MINOR': '6',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '9003885',
+ 'SYS_NAME': 'md122p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md122/md122p1'}
+
+
+class RaidOnPartition2():
+ member_name = "sdh1"
+ raid_name = "123"
+ raid_node = "md123"
+ metadata_version = "0.9"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part1 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part1 /dev/disk/by-partuuid/73eb11a9-01',
+ 'DEVNAME': '/dev/sdh1',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-01',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md123',
+ 'MD_DEVNAME': '123',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'unsafe',
+ 'MINOR': '113',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdadm-last-resort@md123.timer',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8778733',
+ 'SYS_NAME': 'sdh1',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part1 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part1 /dev/disk/by-partuuid/73eb11a9-01',
+ 'DEVNAME': '/dev/sdh1',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-01',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '113',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'UDISKS_MD_MEMBER_DEVICES': '2',
+ 'UDISKS_MD_MEMBER_EVENTS': '18',
+ 'UDISKS_MD_MEMBER_LEVEL': 'raid1',
+ 'UDISKS_MD_MEMBER_UPDATE_TIME': '1597143914',
+ 'UDISKS_MD_MEMBER_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'USEC_INITIALIZED': '8778733',
+ 'SYS_NAME': 'sdh1',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1'}
+
+ raid_device = {'DEVLINKS': '/dev/md/123 /dev/disk/by-id/md-uuid-335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'DEVNAME': '/dev/md123',
+ 'DEVPATH': '/devices/virtual/block/md123',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '653f84c8',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh1_DEV': '/dev/sdh1',
+ 'MD_DEVICE_ev_sdh1_ROLE': '0',
+ 'MD_DEVICE_ev_sdh2_DEV': '/dev/sdh2',
+ 'MD_DEVICE_ev_sdh2_ROLE': '1',
+ 'MD_DEVNAME': '123',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'MINOR': '123',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8760382',
+ 'SYS_NAME': 'md123',
+ 'SYS_PATH': '/sys/devices/virtual/block/md123'}
+
+ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-335b35e0:f1af8e86:bfe78010:bc810f04-part1 /dev/md/123p1',
+ 'DEVNAME': '/dev/md123p1',
+ 'DEVPATH': '/devices/virtual/block/md123/md123p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:123',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '202624',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '653f84c8-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh1_DEV': '/dev/sdh1',
+ 'MD_DEVICE_ev_sdh1_ROLE': '0',
+ 'MD_DEVICE_ev_sdh2_DEV': '/dev/sdh2',
+ 'MD_DEVICE_ev_sdh2_ROLE': '1',
+ 'MD_DEVNAME': '123',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'MINOR': '5',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8952876',
+ 'SYS_NAME': 'md123p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md123/md123p1'}
diff --git a/tests/udev_test.py b/tests/udev_test.py
index 653eeb6d..d30a647b 100644
--- a/tests/udev_test.py
+++ b/tests/udev_test.py
@@ -2,6 +2,8 @@
import unittest
import mock
+from udev_data import raid_data
+
class UdevTest(unittest.TestCase):
@@ -77,3 +79,47 @@ class UdevTest(unittest.TestCase):
# Normal MD RAID (w/ at least one non-disk member)
device_get_slaves.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertFalse(blivet.udev.device_is_disk(info))
+
+
+class UdevGetNameRaidTest(unittest.TestCase):
+
+ def _test_raid_name(self, udev_data):
+ import blivet.udev
+
+ # members don't have the device_get_sysfs_path(info) + "/md" folder
+ with mock.patch("blivet.udev.device_is_md", return_value=False):
+ member_name = blivet.udev.device_get_name(udev_data.member_boot)
+ self.assertEqual(member_name, udev_data.member_name)
+
+ member_name = blivet.udev.device_get_name(udev_data.member_assemble)
+ self.assertEqual(member_name, udev_data.member_name)
+
+ with mock.patch("blivet.udev.device_is_md", return_value=True):
+ raid_name = blivet.udev.device_get_name(udev_data.raid_device)
+ self.assertEqual(raid_name, udev_data.raid_name)
+
+ # partitions also don't have the device_get_sysfs_path(info) + "/md" folder
+ with mock.patch("blivet.udev.device_is_md", return_value=False):
+ part_name = blivet.udev.device_get_name(udev_data.raid_partition)
+ expected_name = udev_data.raid_name + "p1" if udev_data.raid_name[-1].isdigit() else udev_data.raid_name + "1"
+ self.assertEqual(part_name, expected_name)
+
+ def test_raid_name_on_disk_no_name(self):
+ data = raid_data.RaidOnDisk1()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_disk__with_name(self):
+ data = raid_data.RaidOnDisk2()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_disk_old_metadata(self):
+ data = raid_data.RaidOnDisk3()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_part_no_name(self):
+ data = raid_data.RaidOnPartition1()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_part_old_metadata(self):
+ data = raid_data.RaidOnPartition2()
+ self._test_raid_name(data)
--
2.25.4

View File

@ -1,269 +0,0 @@
From f19140993e94be9e58c8a01c18f1907792f59927 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 5 Aug 2020 13:44:38 +0200
Subject: [PATCH] Fix ignoring disk devices with parents or children
For disk-like devices like multipath we should allow to ignore
these by simply ignoring the mpath device or by ignoring all of its
drives.
- when ignoring the "mpatha" device we should also ignore "sda" and
"sdb"
- when ignoring both "sda" and "sdb" we should also ignore "mpatha"
- when ignoring only "sda" we should not ignore "mpatha" (we don't
want to deal with an "incomplete" multipath device in the tree)
This is consistent with the existing behaviour when using exclusive
disks (or "ignoredisks --only-use" in kickstart).
Resolves: rhbz#1866243
---
blivet/devicetree.py | 51 ++++++++-----
tests/devicetree_test.py | 157 ++++++++++++++++++++++++++++-----------
2 files changed, 146 insertions(+), 62 deletions(-)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index 5cc360e1..2afb0d0e 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -907,31 +907,48 @@ class DeviceTreeBase(object):
hidden.add_hook(new=False)
lvm.lvm_cc_removeFilterRejectRegexp(hidden.name)
+ def _disk_in_taglist(self, disk, taglist):
+ # Taglist is a list containing mix of disk names and tags into which disk may belong.
+ # Check if it does. Raise ValueError if unknown tag is encountered.
+ if disk.name in taglist:
+ return True
+ tags = [t[1:] for t in taglist if t.startswith("@")]
+ for tag in tags:
+ if tag not in Tags.__members__:
+ raise ValueError("unknown ignoredisk tag '@%s' encountered" % tag)
+ if Tags(tag) in disk.tags:
+ return True
+ return False
+
def _is_ignored_disk(self, disk):
""" Checks config for lists of exclusive and ignored disks
and returns if the given one should be ignored
"""
-
- def disk_in_taglist(disk, taglist):
- # Taglist is a list containing mix of disk names and tags into which disk may belong.
- # Check if it does. Raise ValueError if unknown tag is encountered.
- if disk.name in taglist:
- return True
- tags = [t[1:] for t in taglist if t.startswith("@")]
- for tag in tags:
- if tag not in Tags.__members__:
- raise ValueError("unknown ignoredisk tag '@%s' encountered" % tag)
- if Tags(tag) in disk.tags:
- return True
- return False
-
- return ((self.ignored_disks and disk_in_taglist(disk, self.ignored_disks)) or
- (self.exclusive_disks and not disk_in_taglist(disk, self.exclusive_disks)))
+ return ((self.ignored_disks and self._disk_in_taglist(disk, self.ignored_disks)) or
+ (self.exclusive_disks and not self._disk_in_taglist(disk, self.exclusive_disks)))
def _hide_ignored_disks(self):
# hide any subtrees that begin with an ignored disk
for disk in [d for d in self._devices if d.is_disk]:
- if self._is_ignored_disk(disk):
+ is_ignored = self.ignored_disks and self._disk_in_taglist(disk, self.ignored_disks)
+ is_exclusive = self.exclusive_disks and self._disk_in_taglist(disk, self.exclusive_disks)
+
+ if is_ignored:
+ if len(disk.children) == 1:
+ if not all(self._is_ignored_disk(d) for d in disk.children[0].parents):
+ raise DeviceTreeError("Including only a subset of raid/multipath member disks is not allowed.")
+
+ # and also children like fwraid or mpath
+ self.hide(disk.children[0])
+
+ # this disk is ignored: ignore it and all it's potential parents
+ for p in disk.parents:
+ self.hide(p)
+
+ # and finally hide the disk itself
+ self.hide(disk)
+
+ if self.exclusive_disks and not is_exclusive:
ignored = True
# If the filter allows all members of a fwraid or mpath, the
# fwraid or mpath itself is implicitly allowed as well. I don't
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index a8f369cf..6032e7f6 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -370,51 +370,6 @@ class DeviceTreeTestCase(unittest.TestCase):
self.assertTrue(sdb in tree.devices)
self.assertTrue(sdc in tree.devices)
- # now test exclusive_disks special cases for multipath
- sda.format = get_format("multipath_member", exists=True)
- sdb.format = get_format("multipath_member", exists=True)
- sdc.format = get_format("multipath_member", exists=True)
- mpatha = MultipathDevice("mpatha", parents=[sda, sdb, sdc])
- tree._add_device(mpatha)
-
- tree.ignored_disks = []
- tree.exclusive_disks = ["mpatha"]
-
- with patch.object(tree, "hide") as hide:
- tree._hide_ignored_disks()
- self.assertFalse(hide.called)
-
- tree._hide_ignored_disks()
- self.assertTrue(sda in tree.devices)
- self.assertTrue(sdb in tree.devices)
- self.assertTrue(sdc in tree.devices)
- self.assertTrue(mpatha in tree.devices)
-
- # all members in exclusive_disks implies the mpath in exclusive_disks
- tree.exclusive_disks = ["sda", "sdb", "sdc"]
- with patch.object(tree, "hide") as hide:
- tree._hide_ignored_disks()
- self.assertFalse(hide.called)
-
- tree._hide_ignored_disks()
- self.assertTrue(sda in tree.devices)
- self.assertTrue(sdb in tree.devices)
- self.assertTrue(sdc in tree.devices)
- self.assertTrue(mpatha in tree.devices)
-
- tree.exclusive_disks = ["sda", "sdb"]
- with patch.object(tree, "hide") as hide:
- tree._hide_ignored_disks()
- hide.assert_any_call(mpatha)
- hide.assert_any_call(sdc)
-
- # verify that hide works as expected
- tree._hide_ignored_disks()
- self.assertTrue(sda in tree.devices)
- self.assertTrue(sdb in tree.devices)
- self.assertFalse(sdc in tree.devices)
- self.assertFalse(mpatha in tree.devices)
-
def test_get_related_disks(self):
tree = DeviceTree()
@@ -447,3 +402,115 @@ class DeviceTreeTestCase(unittest.TestCase):
tree.unhide(sda)
self.assertEqual(tree.get_related_disks(sda), set([sda, sdb]))
self.assertEqual(tree.get_related_disks(sdb), set([sda, sdb]))
+
+
+class DeviceTreeIgnoredExclusiveMultipathTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.tree = DeviceTree()
+
+ self.sda = DiskDevice("sda")
+ self.sdb = DiskDevice("sdb")
+ self.sdc = DiskDevice("sdc")
+
+ self.tree._add_device(self.sda)
+ self.tree._add_device(self.sdb)
+ self.tree._add_device(self.sdc)
+
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertTrue(self.sdc in self.tree.devices)
+
+ # now test exclusive_disks special cases for multipath
+ self.sda.format = get_format("multipath_member", exists=True)
+ self.sdb.format = get_format("multipath_member", exists=True)
+ self.sdc.format = get_format("multipath_member", exists=True)
+ self.mpatha = MultipathDevice("mpatha", parents=[self.sda, self.sdb, self.sdc])
+ self.tree._add_device(self.mpatha)
+
+ def test_exclusive_disks_multipath_1(self):
+ # multipath is exclusive -> all disks should be exclusive
+ self.tree.ignored_disks = []
+ self.tree.exclusive_disks = ["mpatha"]
+
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ self.assertFalse(hide.called)
+
+ self.tree._hide_ignored_disks()
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertTrue(self.sdc in self.tree.devices)
+ self.assertTrue(self.mpatha in self.tree.devices)
+
+ def test_exclusive_disks_multipath_2(self):
+ # all disks exclusive -> mpath should also be exclusive
+ self.tree.exclusive_disks = ["sda", "sdb", "sdc"]
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ self.assertFalse(hide.called)
+
+ self.tree._hide_ignored_disks()
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertTrue(self.sdc in self.tree.devices)
+ self.assertTrue(self.mpatha in self.tree.devices)
+
+ def test_exclusive_disks_multipath_3(self):
+ # some disks exclusive -> mpath should be hidden
+ self.tree.exclusive_disks = ["sda", "sdb"]
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ hide.assert_any_call(self.mpatha)
+ hide.assert_any_call(self.sdc)
+
+ # verify that hide works as expected
+ self.tree._hide_ignored_disks()
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertFalse(self.sdc in self.tree.devices)
+ self.assertFalse(self.mpatha in self.tree.devices)
+
+ def test_ignored_disks_multipath_1(self):
+ # mpatha ignored -> disks should be hidden
+ self.tree.ignored_disks = ["mpatha"]
+ self.tree.exclusive_disks = []
+
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ hide.assert_any_call(self.mpatha)
+ hide.assert_any_call(self.sda)
+ hide.assert_any_call(self.sdb)
+ hide.assert_any_call(self.sdc)
+
+ self.tree._hide_ignored_disks()
+ self.assertFalse(self.sda in self.tree.devices)
+ self.assertFalse(self.sdb in self.tree.devices)
+ self.assertFalse(self.sdc in self.tree.devices)
+ self.assertFalse(self.mpatha in self.tree.devices)
+
+ def test_ignored_disks_multipath_2(self):
+ # all disks ignored -> mpath should be hidden
+ self.tree.ignored_disks = ["sda", "sdb", "sdc"]
+ self.tree.exclusive_disks = []
+
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ hide.assert_any_call(self.mpatha)
+ hide.assert_any_call(self.sda)
+ hide.assert_any_call(self.sdb)
+ hide.assert_any_call(self.sdc)
+
+ self.tree._hide_ignored_disks()
+ self.assertFalse(self.sda in self.tree.devices)
+ self.assertFalse(self.sdb in self.tree.devices)
+ self.assertFalse(self.sdc in self.tree.devices)
+ self.assertFalse(self.mpatha in self.tree.devices)
+
+ def test_ignored_disks_multipath_3(self):
+ # some disks ignored -> error
+ self.tree.ignored_disks = ["sda", "sdb"]
+ self.tree.exclusive_disks = []
+
+ with self.assertRaises(DeviceTreeError):
+ self.tree._hide_ignored_disks()
--
2.25.4

View File

@ -1,459 +0,0 @@
From 433d863cd8a57e5fc30948ff905e6a477ed5f17c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 14 Jul 2020 11:27:08 +0200
Subject: [PATCH 1/4] Add support for XFS format grow
---
blivet/formats/fs.py | 2 ++
blivet/tasks/availability.py | 1 +
blivet/tasks/fsresize.py | 54 ++++++++++++++++++++++++++++++++++++
3 files changed, 57 insertions(+)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index eee15aaa..12cb9885 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -1089,11 +1089,13 @@ class XFS(FS):
_formattable = True
_linux_native = True
_supported = True
+ _resizable = True
_packages = ["xfsprogs"]
_info_class = fsinfo.XFSInfo
_mkfs_class = fsmkfs.XFSMkfs
_readlabel_class = fsreadlabel.XFSReadLabel
_size_info_class = fssize.XFSSize
+ _resize_class = fsresize.XFSResize
_sync_class = fssync.XFSSync
_writelabel_class = fswritelabel.XFSWriteLabel
_writeuuid_class = fswriteuuid.XFSWriteUUID
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index b6b5955a..df62780c 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -455,5 +455,6 @@ TUNE2FS_APP = application_by_version("tune2fs", E2FSPROGS_VERSION)
XFSADMIN_APP = application("xfs_admin")
XFSDB_APP = application("xfs_db")
XFSFREEZE_APP = application("xfs_freeze")
+XFSRESIZE_APP = application("xfs_growfs")
MOUNT_APP = application("mount")
diff --git a/blivet/tasks/fsresize.py b/blivet/tasks/fsresize.py
index e7e26984..12c0367f 100644
--- a/blivet/tasks/fsresize.py
+++ b/blivet/tasks/fsresize.py
@@ -20,7 +20,10 @@
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
import abc
+import os
+import tempfile
+from contextlib import contextmanager
from six import add_metaclass
from ..errors import FSError
@@ -32,6 +35,9 @@ from . import task
from . import fstask
from . import dfresize
+import logging
+log = logging.getLogger("blivet")
+
@add_metaclass(abc.ABCMeta)
class FSResizeTask(fstask.FSTask):
@@ -115,6 +121,54 @@ class NTFSResize(FSResize):
]
+class XFSResize(FSResize):
+ ext = availability.XFSRESIZE_APP
+ unit = B
+ size_fmt = None
+
+ @contextmanager
+ def _do_temp_mount(self):
+ if self.fs.status:
+ yield
+ else:
+ dev_name = os.path.basename(self.fs.device)
+ tmpdir = tempfile.mkdtemp(prefix="xfs-tempmount-%s" % dev_name)
+ log.debug("mounting XFS on '%s' to '%s' for resize", self.fs.device, tmpdir)
+ try:
+ self.fs.mount(mountpoint=tmpdir)
+ except FSError as e:
+ raise FSError("Failed to mount XFS filesystem for resize: %s" % str(e))
+
+ try:
+ yield
+ finally:
+ util.umount(mountpoint=tmpdir)
+ os.rmdir(tmpdir)
+
+ def _get_block_size(self):
+ if self.fs._current_info:
+ # this should be set by update_size_info()
+ for line in self.fs._current_info.split("\n"):
+ if line.startswith("blocksize ="):
+ return int(line.split("=")[-1])
+
+ raise FSError("Failed to get XFS filesystem block size for resize")
+
+ def size_spec(self):
+ # size for xfs_growfs is in blocks
+ return str(self.fs.target_size.convert_to(self.unit) / self._get_block_size())
+
+ @property
+ def args(self):
+ return [self.fs.system_mountpoint, "-D", self.size_spec()]
+
+ def do_task(self):
+ """ Resizes the XFS format. """
+
+ with self._do_temp_mount():
+ super(XFSResize, self).do_task()
+
+
class TmpFSResize(FSResize):
ext = availability.MOUNT_APP
--
2.26.2
From 56d05334231c30699a9c77dedbc23fdb021b9dee Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 14 Jul 2020 11:27:51 +0200
Subject: [PATCH 2/4] Add tests for XFS resize
XFS supports only grow so we can't reuse most of the fstesting
code and we also need to test the resize on partition because
XFS won't allow grow to size bigger than the underlying block
device.
---
tests/formats_test/fs_test.py | 91 +++++++++++++++++++++++++++++++++
tests/formats_test/fstesting.py | 33 ++++++------
2 files changed, 107 insertions(+), 17 deletions(-)
diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py
index 15fc0c35..9bc5d20d 100644
--- a/tests/formats_test/fs_test.py
+++ b/tests/formats_test/fs_test.py
@@ -2,8 +2,13 @@ import os
import tempfile
import unittest
+import parted
+
import blivet.formats.fs as fs
from blivet.size import Size, ROUND_DOWN
+from blivet.errors import DeviceFormatError
+from blivet.formats import get_format
+from blivet.devices import PartitionDevice, DiskDevice
from tests import loopbackedtestcase
@@ -50,6 +55,92 @@ class ReiserFSTestCase(fstesting.FSAsRoot):
class XFSTestCase(fstesting.FSAsRoot):
_fs_class = fs.XFS
+ def can_resize(self, an_fs):
+ resize_tasks = (an_fs._resize, an_fs._size_info)
+ return not any(t.availability_errors for t in resize_tasks)
+
+ def _create_partition(self, disk, size):
+ disk.format = get_format("disklabel", device=disk.path, label_type="msdos")
+ disk.format.create()
+ pstart = disk.format.alignment.grainSize
+ pend = pstart + int(Size(size) / disk.format.parted_device.sectorSize)
+ disk.format.add_partition(pstart, pend, parted.PARTITION_NORMAL)
+ disk.format.parted_disk.commit()
+ part = disk.format.parted_disk.getPartitionBySector(pstart)
+
+ device = PartitionDevice(os.path.basename(part.path))
+ device.disk = disk
+ device.exists = True
+ device.parted_partition = part
+
+ return device
+
+ def _remove_partition(self, partition, disk):
+ disk.format.remove_partition(partition.parted_partition)
+ disk.format.parted_disk.commit()
+
+ def test_resize(self):
+ an_fs = self._fs_class()
+ if not an_fs.formattable:
+ self.skipTest("can not create filesystem %s" % an_fs.name)
+ an_fs.device = self.loop_devices[0]
+ self.assertIsNone(an_fs.create())
+ an_fs.update_size_info()
+
+ self._test_sizes(an_fs)
+ # CHECKME: target size is still 0 after updated_size_info is called.
+ self.assertEqual(an_fs.size, Size(0) if an_fs.resizable else an_fs._size)
+
+ if not self.can_resize(an_fs):
+ self.assertFalse(an_fs.resizable)
+ # Not resizable, so can not do resizing actions.
+ with self.assertRaises(DeviceFormatError):
+ an_fs.target_size = Size("64 MiB")
+ with self.assertRaises(DeviceFormatError):
+ an_fs.do_resize()
+ else:
+ disk = DiskDevice(os.path.basename(self.loop_devices[0]))
+ part = self._create_partition(disk, Size("50 MiB"))
+ an_fs = self._fs_class()
+ an_fs.device = part.path
+ self.assertIsNone(an_fs.create())
+ an_fs.update_size_info()
+
+ self.assertTrue(an_fs.resizable)
+
+ # grow the partition so we can grow the filesystem
+ self._remove_partition(part, disk)
+ part = self._create_partition(disk, size=part.size + Size("40 MiB"))
+
+ # Try a reasonable target size
+ TARGET_SIZE = Size("64 MiB")
+ an_fs.target_size = TARGET_SIZE
+ self.assertEqual(an_fs.target_size, TARGET_SIZE)
+ self.assertNotEqual(an_fs._size, TARGET_SIZE)
+ self.assertIsNone(an_fs.do_resize())
+ ACTUAL_SIZE = TARGET_SIZE.round_to_nearest(an_fs._resize.unit, rounding=ROUND_DOWN)
+ self.assertEqual(an_fs.size, ACTUAL_SIZE)
+ self.assertEqual(an_fs._size, ACTUAL_SIZE)
+ self._test_sizes(an_fs)
+
+ self._remove_partition(part, disk)
+
+ # and no errors should occur when checking
+ self.assertIsNone(an_fs.do_check())
+
+ def test_shrink(self):
+ self.skipTest("Not checking resize for this test category.")
+
+ def test_too_small(self):
+ self.skipTest("Not checking resize for this test category.")
+
+ def test_no_explicit_target_size2(self):
+ self.skipTest("Not checking resize for this test category.")
+
+ def test_too_big2(self):
+ # XXX this tests assumes that resizing to max size - 1 B will fail, but xfs_grow won't
+ self.skipTest("Not checking resize for this test category.")
+
class HFSTestCase(fstesting.FSAsRoot):
_fs_class = fs.HFS
diff --git a/tests/formats_test/fstesting.py b/tests/formats_test/fstesting.py
index 62f806f9..86b2a116 100644
--- a/tests/formats_test/fstesting.py
+++ b/tests/formats_test/fstesting.py
@@ -11,16 +11,6 @@ from blivet.size import Size, ROUND_DOWN
from blivet.formats import fs
-def can_resize(an_fs):
- """ Returns True if this filesystem has all necessary resizing tools
- available.
-
- :param an_fs: a filesystem object
- """
- resize_tasks = (an_fs._resize, an_fs._size_info, an_fs._minsize)
- return not any(t.availability_errors for t in resize_tasks)
-
-
@add_metaclass(abc.ABCMeta)
class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
@@ -32,6 +22,15 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
def __init__(self, methodName='run_test'):
super(FSAsRoot, self).__init__(methodName=methodName, device_spec=[self._DEVICE_SIZE])
+ def can_resize(self, an_fs):
+ """ Returns True if this filesystem has all necessary resizing tools
+ available.
+
+ :param an_fs: a filesystem object
+ """
+ resize_tasks = (an_fs._resize, an_fs._size_info, an_fs._minsize)
+ return not any(t.availability_errors for t in resize_tasks)
+
def _test_sizes(self, an_fs):
""" Test relationships between different size values.
@@ -190,7 +189,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
# CHECKME: target size is still 0 after updated_size_info is called.
self.assertEqual(an_fs.size, Size(0) if an_fs.resizable else an_fs._size)
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.assertFalse(an_fs.resizable)
# Not resizable, so can not do resizing actions.
with self.assertRaises(DeviceFormatError):
@@ -221,7 +220,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
# in constructor call behavior would be different.
an_fs = self._fs_class()
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create filesystem %s" % an_fs.name)
@@ -244,7 +243,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
"""
SIZE = Size("64 MiB")
an_fs = self._fs_class(size=SIZE)
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create filesystem %s" % an_fs.name)
@@ -264,7 +263,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
def test_shrink(self):
an_fs = self._fs_class()
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create filesystem %s" % an_fs.name)
@@ -296,7 +295,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
def test_too_small(self):
an_fs = self._fs_class()
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create or resize filesystem %s" % an_fs.name)
@@ -315,7 +314,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
def test_too_big(self):
an_fs = self._fs_class()
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create filesystem %s" % an_fs.name)
@@ -334,7 +333,7 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
def test_too_big2(self):
an_fs = self._fs_class()
- if not can_resize(an_fs):
+ if not self.can_resize(an_fs):
self.skipTest("Not checking resize for this test category.")
if not an_fs.formattable:
self.skipTest("can not create filesystem %s" % an_fs.name)
--
2.26.2
From 51acc04f4639f143b55789a06a68aae988a91296 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 15 Jul 2020 12:59:04 +0200
Subject: [PATCH 3/4] Add support for checking and fixing XFS using xfs_repair
---
blivet/formats/fs.py | 1 +
blivet/tasks/availability.py | 1 +
blivet/tasks/fsck.py | 12 ++++++++++++
tests/formats_test/fs_test.py | 6 +++---
4 files changed, 17 insertions(+), 3 deletions(-)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index 12cb9885..06fbdf10 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -1091,6 +1091,7 @@ class XFS(FS):
_supported = True
_resizable = True
_packages = ["xfsprogs"]
+ _fsck_class = fsck.XFSCK
_info_class = fsinfo.XFSInfo
_mkfs_class = fsmkfs.XFSMkfs
_readlabel_class = fsreadlabel.XFSReadLabel
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index df62780c..f3b76650 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -456,5 +456,6 @@ XFSADMIN_APP = application("xfs_admin")
XFSDB_APP = application("xfs_db")
XFSFREEZE_APP = application("xfs_freeze")
XFSRESIZE_APP = application("xfs_growfs")
+XFSREPAIR_APP = application("xfs_repair")
MOUNT_APP = application("mount")
diff --git a/blivet/tasks/fsck.py b/blivet/tasks/fsck.py
index 5274f13a..8477f5f8 100644
--- a/blivet/tasks/fsck.py
+++ b/blivet/tasks/fsck.py
@@ -123,6 +123,18 @@ class Ext2FSCK(FSCK):
return "\n".join(msgs) or None
+class XFSCK(FSCK):
+ _fsck_errors = {1: "Runtime error encountered during repair operation.",
+ 2: "XFS repair was unable to proceed due to a dirty log."}
+
+ ext = availability.XFSREPAIR_APP
+ options = []
+
+ def _error_message(self, rc):
+ msgs = (self._fsck_errors[c] for c in self._fsck_errors.keys() if rc & c)
+ return "\n".join(msgs) or None
+
+
class HFSPlusFSCK(FSCK):
_fsck_errors = {3: "Quick check found a dirty filesystem; no repairs done.",
4: "Root filesystem was dirty. System should be rebooted.",
diff --git a/tests/formats_test/fs_test.py b/tests/formats_test/fs_test.py
index 9bc5d20d..8fb099fd 100644
--- a/tests/formats_test/fs_test.py
+++ b/tests/formats_test/fs_test.py
@@ -123,10 +123,10 @@ class XFSTestCase(fstesting.FSAsRoot):
self.assertEqual(an_fs._size, ACTUAL_SIZE)
self._test_sizes(an_fs)
- self._remove_partition(part, disk)
+ # and no errors should occur when checking
+ self.assertIsNone(an_fs.do_check())
- # and no errors should occur when checking
- self.assertIsNone(an_fs.do_check())
+ self._remove_partition(part, disk)
def test_shrink(self):
self.skipTest("Not checking resize for this test category.")
--
2.26.2
From 2a6947098e66f880193f3bac2282a6c7857ca5f7 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 16 Jul 2020 09:05:35 +0200
Subject: [PATCH 4/4] Use xfs_db in read-only mode when getting XFS information
This way it will also work on mounted filesystems.
---
blivet/tasks/fsinfo.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/tasks/fsinfo.py b/blivet/tasks/fsinfo.py
index af208f5d..41ff700f 100644
--- a/blivet/tasks/fsinfo.py
+++ b/blivet/tasks/fsinfo.py
@@ -95,7 +95,7 @@ class ReiserFSInfo(FSInfo):
class XFSInfo(FSInfo):
ext = availability.XFSDB_APP
- options = ["-c", "sb 0", "-c", "p dblocks", "-c", "p blocksize"]
+ options = ["-c", "sb 0", "-c", "p dblocks", "-c", "p blocksize", "-r"]
class UnimplementedFSInfo(fstask.UnimplementedFSTask):
--
2.26.2

View File

@ -1,76 +0,0 @@
From aa4ce218fe9b4ee3571d872ff1575a499596181c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 29 May 2020 12:14:30 +0200
Subject: [PATCH 1/2] Do not limit swap to 128 GiB
The limit was part of change to limit suggested swap size in
kickstart which doesn't use the SwapSpace._max_size so there is no
reason to limit this for manual installations.
16 TiB seems to be max usable swap size based on mkswap code.
Resolves: rhbz#1656485
---
blivet/formats/swap.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/blivet/formats/swap.py b/blivet/formats/swap.py
index 4b8a7edf..3cc59138 100644
--- a/blivet/formats/swap.py
+++ b/blivet/formats/swap.py
@@ -52,8 +52,7 @@ class SwapSpace(DeviceFormat):
_linux_native = True # for clearpart
_plugin = availability.BLOCKDEV_SWAP_PLUGIN
- # see rhbz#744129 for details
- _max_size = Size("128 GiB")
+ _max_size = Size("16 TiB")
config_actions_map = {"label": "write_label"}
--
2.26.2
From 93aa6ad87116f1c86616d73dbe561251c4a0c286 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 11 Jun 2020 14:27:44 +0200
Subject: [PATCH 2/2] Add test for SwapSpace max size
---
tests/formats_test/swap_test.py | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
create mode 100644 tests/formats_test/swap_test.py
diff --git a/tests/formats_test/swap_test.py b/tests/formats_test/swap_test.py
new file mode 100644
index 00000000..56356144
--- /dev/null
+++ b/tests/formats_test/swap_test.py
@@ -0,0 +1,24 @@
+import test_compat # pylint: disable=unused-import
+
+import six
+import unittest
+
+from blivet.devices.storage import StorageDevice
+from blivet.errors import DeviceError
+from blivet.formats import get_format
+
+from blivet.size import Size
+
+
+class SwapNodevTestCase(unittest.TestCase):
+
+ def test_swap_max_size(self):
+ StorageDevice("dev", size=Size("129 GiB"),
+ fmt=get_format("swap"))
+
+ StorageDevice("dev", size=Size("15 TiB"),
+ fmt=get_format("swap"))
+
+ with six.assertRaisesRegex(self, DeviceError, "device is too large for new format"):
+ StorageDevice("dev", size=Size("17 TiB"),
+ fmt=get_format("swap"))
--
2.26.2

View File

@ -1,78 +0,0 @@
From 4e6a322d32d2a12f8a87ab763a6286cf3d7b5c27 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 8 Sep 2020 13:57:40 +0200
Subject: [PATCH] Use UnusableConfigurationError for partially hidden multipath
devices
Follow-up for https://github.com/storaged-project/blivet/pull/883
to make Anaconda show an error message instead of crashing.
Resolves: rhbz#1877052
---
blivet/devicetree.py | 4 ++--
blivet/errors.py | 6 ++++++
tests/devicetree_test.py | 4 ++--
3 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index 2afb0d0e..57a9bbd7 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -32,7 +32,7 @@ from gi.repository import BlockDev as blockdev
from .actionlist import ActionList
from .callbacks import callbacks
-from .errors import DeviceError, DeviceTreeError, StorageError, DuplicateUUIDError
+from .errors import DeviceError, DeviceTreeError, StorageError, DuplicateUUIDError, InvalidMultideviceSelection
from .deviceaction import ActionDestroyDevice, ActionDestroyFormat
from .devices import BTRFSDevice, NoDevice, PartitionDevice
from .devices import LVMLogicalVolumeDevice, LVMVolumeGroupDevice
@@ -936,7 +936,7 @@ class DeviceTreeBase(object):
if is_ignored:
if len(disk.children) == 1:
if not all(self._is_ignored_disk(d) for d in disk.children[0].parents):
- raise DeviceTreeError("Including only a subset of raid/multipath member disks is not allowed.")
+ raise InvalidMultideviceSelection("Including only a subset of raid/multipath member disks is not allowed.")
# and also children like fwraid or mpath
self.hide(disk.children[0])
diff --git a/blivet/errors.py b/blivet/errors.py
index 811abf81..7a93f1ce 100644
--- a/blivet/errors.py
+++ b/blivet/errors.py
@@ -233,6 +233,12 @@ class DuplicateVGError(UnusableConfigurationError):
"Hint 2: You can get the VG UUIDs by running "
"'pvs -o +vg_uuid'.")
+
+class InvalidMultideviceSelection(UnusableConfigurationError):
+ suggestion = N_("All parent devices must be selected when choosing exclusive "
+ "or ignored disks for a multipath or firmware RAID device.")
+
+
# DeviceAction
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index 6032e7f6..4e47ffc3 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -5,7 +5,7 @@ import six
import unittest
from blivet.actionlist import ActionList
-from blivet.errors import DeviceTreeError, DuplicateUUIDError
+from blivet.errors import DeviceTreeError, DuplicateUUIDError, InvalidMultideviceSelection
from blivet.deviceaction import ACTION_TYPE_DESTROY, ACTION_OBJECT_DEVICE
from blivet.devicelibs import lvm
from blivet.devices import DiskDevice
@@ -512,5 +512,5 @@ class DeviceTreeIgnoredExclusiveMultipathTestCase(unittest.TestCase):
self.tree.ignored_disks = ["sda", "sdb"]
self.tree.exclusive_disks = []
- with self.assertRaises(DeviceTreeError):
+ with self.assertRaises(InvalidMultideviceSelection):
self.tree._hide_ignored_disks()
--
2.26.2

View File

@ -1,32 +0,0 @@
From 866a48e6c3d8246d2897bb402a191df5f2848aa4 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 23 Jun 2020 10:33:33 +0200
Subject: [PATCH] Fix possible UnicodeDecodeError when reading model from sysfs
Some Innovation IT NVMe devices have an (invalid) unicode in their
model name.
Resolves: rhbz#1849326
---
blivet/udev.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/blivet/udev.py b/blivet/udev.py
index 41c99496..2c795225 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -185,8 +185,9 @@ def __is_blacklisted_blockdev(dev_name):
if any(re.search(expr, dev_name) for expr in device_name_blacklist):
return True
- if os.path.exists("/sys/class/block/%s/device/model" % (dev_name,)):
- model = open("/sys/class/block/%s/device/model" % (dev_name,)).read()
+ model_path = "/sys/class/block/%s/device/model" % dev_name
+ if os.path.exists(model_path):
+ model = open(model_path, encoding="utf-8", errors="replace").read()
for bad in ("IBM *STMF KERNEL", "SCEI Flash-5", "DGC LUNZ"):
if model.find(bad) != -1:
log.info("ignoring %s with model %s", dev_name, model)
--
2.26.2

View File

@ -1,415 +0,0 @@
From 3f6bbf52442609b8e6e3919a3fdd8c5af64923e6 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 12 May 2020 12:48:41 +0200
Subject: [PATCH 1/3] Add basic support for LVM VDO devices
This adds support for LVM VDO devices detection during populate
and allows removing both VDO LVs and VDO pools using actions.
---
blivet/devices/lvm.py | 150 +++++++++++++++++++++++++++++++-
blivet/populator/helpers/lvm.py | 16 +++-
tests/action_test.py | 39 +++++++++
tests/devices_test/lvm_test.py | 34 ++++++++
tests/storagetestcase.py | 11 ++-
5 files changed, 245 insertions(+), 5 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 97de6acd..d9e24a33 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1789,8 +1789,132 @@ class LVMThinLogicalVolumeMixin(object):
data.pool_name = self.pool.lvname
+class LVMVDOPoolMixin(object):
+ def __init__(self):
+ self._lvs = []
+
+ @property
+ def is_vdo_pool(self):
+ return self.seg_type == "vdo-pool"
+
+ @property
+ def type(self):
+ return "lvmvdopool"
+
+ @property
+ def resizable(self):
+ return False
+
+ @util.requires_property("is_vdo_pool")
+ def _add_log_vol(self, lv):
+ """ Add an LV to this VDO pool. """
+ if lv in self._lvs:
+ raise ValueError("lv is already part of this VDO pool")
+
+ self.vg._add_log_vol(lv)
+ log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name)
+ self._lvs.append(lv)
+
+ @util.requires_property("is_vdo_pool")
+ def _remove_log_vol(self, lv):
+ """ Remove an LV from this VDO pool. """
+ if lv not in self._lvs:
+ raise ValueError("specified lv is not part of this VDO pool")
+
+ self._lvs.remove(lv)
+ self.vg._remove_log_vol(lv)
+
+ @property
+ @util.requires_property("is_vdo_pool")
+ def lvs(self):
+ """ A list of this VDO pool's LVs """
+ return self._lvs[:] # we don't want folks changing our list
+
+ @property
+ def direct(self):
+ """ Is this device directly accessible? """
+ return False
+
+ def _create(self):
+ """ Create the device. """
+ raise NotImplementedError
+
+
+class LVMVDOLogicalVolumeMixin(object):
+ def __init__(self):
+ pass
+
+ def _init_check(self):
+ pass
+
+ def _check_parents(self):
+ """Check that this device has parents as expected"""
+ if isinstance(self.parents, (list, ParentList)):
+ if len(self.parents) != 1:
+ raise ValueError("constructor requires a single vdo-pool LV")
+
+ container = self.parents[0]
+ else:
+ container = self.parents
+
+ if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_vdo_pool:
+ raise ValueError("constructor requires a vdo-pool LV")
+
+ @property
+ def vg_space_used(self):
+ return Size(0) # the pool's size is already accounted for in the vg
+
+ @property
+ def is_vdo_lv(self):
+ return self.seg_type == "vdo"
+
+ @property
+ def vg(self):
+ # parents[0] is the pool, not the VG so set the VG here
+ return self.pool.vg
+
+ @property
+ def type(self):
+ return "vdolv"
+
+ @property
+ def resizable(self):
+ return False
+
+ @property
+ @util.requires_property("is_vdo_lv")
+ def pool(self):
+ return self.parents[0]
+
+ def _create(self):
+ """ Create the device. """
+ raise NotImplementedError
+
+ def _destroy(self):
+ # nothing to do here, VDO LV is destroyed automatically together with
+ # the VDO pool
+ pass
+
+ def remove_hook(self, modparent=True):
+ if modparent:
+ self.pool._remove_log_vol(self)
+
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self).remove_hook(modparent=modparent)
+
+ def add_hook(self, new=True):
+ # pylint: disable=bad-super-call
+ super(LVMLogicalVolumeBase, self).add_hook(new=new)
+ if new:
+ return
+
+ if self not in self.pool.lvs:
+ self.pool._add_log_vol(self)
+
+
class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin, LVMSnapshotMixin,
- LVMThinPoolMixin, LVMThinLogicalVolumeMixin):
+ LVMThinPoolMixin, LVMThinLogicalVolumeMixin, LVMVDOPoolMixin,
+ LVMVDOLogicalVolumeMixin):
""" An LVM Logical Volume """
# generally resizable, see :property:`resizable` for details
@@ -1879,6 +2003,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs)
+ LVMVDOPoolMixin.__init__(self)
+ LVMVDOLogicalVolumeMixin.__init__(self)
LVMInternalLogicalVolumeMixin._init_check(self)
LVMSnapshotMixin._init_check(self)
@@ -1905,6 +2031,10 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
ret.append(LVMThinPoolMixin)
if self.is_thin_lv:
ret.append(LVMThinLogicalVolumeMixin)
+ if self.is_vdo_pool:
+ ret.append(LVMVDOPoolMixin)
+ if self.is_vdo_lv:
+ ret.append(LVMVDOLogicalVolumeMixin)
return ret
def _try_specific_call(self, name, *args, **kwargs):
@@ -2066,6 +2196,11 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
def display_lv_name(self):
return self.lvname
+ @property
+ @type_specific
+ def pool(self):
+ return super(LVMLogicalVolumeDevice, self).pool
+
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
@@ -2167,6 +2302,19 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
udev.settle()
blockdev.lvm.lvresize(self.vg.name, self._name, self.size)
+ @type_specific
+ def _add_log_vol(self, lv):
+ pass
+
+ @type_specific
+ def _remove_log_vol(self, lv):
+ pass
+
+ @property
+ @type_specific
+ def lvs(self):
+ return []
+
@property
@type_specific
def direct(self):
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
index 4b674fac..ff8bf59f 100644
--- a/blivet/populator/helpers/lvm.py
+++ b/blivet/populator/helpers/lvm.py
@@ -211,9 +211,6 @@ class LVMFormatPopulator(FormatPopulator):
origin = self._devicetree.get_device_by_name(origin_device_name)
lv_kwargs["origin"] = origin
- elif lv_attr[0] == 'v':
- # skip vorigins
- return
elif lv_attr[0] in 'IrielTCo' and lv_name.endswith(']'):
# an internal LV, add the an instance of the appropriate class
# to internal_lvs for later processing when non-internal LVs are
@@ -237,6 +234,19 @@ class LVMFormatPopulator(FormatPopulator):
origin = self._devicetree.get_device_by_name(origin_device_name)
lv_kwargs["origin"] = origin
+ lv_parents = [self._devicetree.get_device_by_name(pool_device_name)]
+ elif lv_attr[0] == 'd':
+ # vdo pool
+ # nothing to do here
+ pass
+ elif lv_attr[0] == 'v':
+ if lv_type != "vdo":
+ # skip vorigins
+ return
+ pool_name = blockdev.lvm.vdolvpoolname(vg_name, lv_name)
+ pool_device_name = "%s-%s" % (vg_name, pool_name)
+ add_required_lv(pool_device_name, "failed to look up VDO pool")
+
lv_parents = [self._devicetree.get_device_by_name(pool_device_name)]
elif lv_name.endswith(']'):
# unrecognized Internal LVM2 device
diff --git a/tests/action_test.py b/tests/action_test.py
index 90c1b312..8f9a7424 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -1252,6 +1252,45 @@ class DeviceActionTestCase(StorageTestCase):
self.assertEqual(set(self.storage.lvs), {pool})
self.assertEqual(set(pool._internal_lvs), {lv1, lv2})
+ def test_lvm_vdo_destroy(self):
+ self.destroy_all_devices()
+ sdc = self.storage.devicetree.get_device_by_name("sdc")
+ sdc1 = self.new_device(device_class=PartitionDevice, name="sdc1",
+ size=Size("50 GiB"), parents=[sdc],
+ fmt=blivet.formats.get_format("lvmpv"))
+ self.schedule_create_device(sdc1)
+
+ vg = self.new_device(device_class=LVMVolumeGroupDevice,
+ name="vg", parents=[sdc1])
+ self.schedule_create_device(vg)
+
+ pool = self.new_device(device_class=LVMLogicalVolumeDevice,
+ name="data", parents=[vg],
+ size=Size("10 GiB"),
+ seg_type="vdo-pool", exists=True)
+ self.storage.devicetree._add_device(pool)
+ lv = self.new_device(device_class=LVMLogicalVolumeDevice,
+ name="meta", parents=[pool],
+ size=Size("50 GiB"),
+ seg_type="vdo", exists=True)
+ self.storage.devicetree._add_device(lv)
+
+ remove_lv = self.schedule_destroy_device(lv)
+ self.assertListEqual(pool.lvs, [])
+ self.assertNotIn(lv, vg.lvs)
+
+ # cancelling the action should put lv back to both vg and pool lvs
+ self.storage.devicetree.actions.remove(remove_lv)
+ self.assertListEqual(pool.lvs, [lv])
+ self.assertIn(lv, vg.lvs)
+
+ # can't remove non-leaf pool
+ with self.assertRaises(ValueError):
+ self.schedule_destroy_device(pool)
+
+ self.schedule_destroy_device(lv)
+ self.schedule_destroy_device(pool)
+
class ConfigurationActionsTest(unittest.TestCase):
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 9e701d18..204cb99a 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -405,6 +405,40 @@ class LVMDeviceTest(unittest.TestCase):
exists=False)
self.assertFalse(vg.is_empty)
+ def test_lvm_vdo_pool(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv])
+ pool = LVMLogicalVolumeDevice("testpool", parents=[vg], size=Size("512 MiB"),
+ seg_type="vdo-pool", exists=True)
+ self.assertTrue(pool.is_vdo_pool)
+
+ free = vg.free_space
+ lv = LVMLogicalVolumeDevice("testlv", parents=[pool], size=Size("2 GiB"),
+ seg_type="vdo", exists=True)
+ self.assertTrue(lv.is_vdo_lv)
+ self.assertEqual(lv.vg, vg)
+ self.assertEqual(lv.pool, pool)
+
+ # free space in the vg shouldn't be affected by the vdo lv
+ self.assertEqual(lv.vg_space_used, 0)
+ self.assertEqual(free, vg.free_space)
+
+ self.assertListEqual(pool.lvs, [lv])
+
+ # now try to destroy both the pool and the vdo lv
+ # for the lv this should be a no-op, destroying the pool should destroy both
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ lv.destroy()
+ lv.remove_hook()
+ self.assertFalse(lv.exists)
+ self.assertFalse(lvm.lvremove.called)
+ self.assertListEqual(pool.lvs, [])
+
+ pool.destroy()
+ self.assertFalse(pool.exists)
+ self.assertTrue(lvm.lvremove.called)
+
class TypeSpecificCallsTest(unittest.TestCase):
def test_type_specific_calls(self):
diff --git a/tests/storagetestcase.py b/tests/storagetestcase.py
index e581bca6..1844dec5 100644
--- a/tests/storagetestcase.py
+++ b/tests/storagetestcase.py
@@ -96,7 +96,16 @@ class StorageTestCase(unittest.TestCase):
def new_device(self, *args, **kwargs):
""" Return a new Device instance suitable for testing. """
device_class = kwargs.pop("device_class")
- exists = kwargs.pop("exists", False)
+
+ # we intentionally don't pass the "exists" kwarg to the constructor
+ # becauses this causes issues with some devices (especially partitions)
+ # but we still need it for some LVs like VDO because we can't create
+ # those so we need to fake their existence even for the constructor
+ if device_class is blivet.devices.LVMLogicalVolumeDevice:
+ exists = kwargs.get("exists", False)
+ else:
+ exists = kwargs.pop("exists", False)
+
part_type = kwargs.pop("part_type", parted.PARTITION_NORMAL)
device = device_class(*args, **kwargs)
--
2.26.2
From f05a66e1bed1ca1f3cd7d7ffecd6693ab4d7f32a Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 12 May 2020 12:52:47 +0200
Subject: [PATCH 2/3] Fix checking for filesystem support in action_test
---
tests/action_test.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/action_test.py b/tests/action_test.py
index 8f9a7424..228eb97a 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -56,7 +56,7 @@ FORMAT_CLASSES = [
@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
-@unittest.skipUnless(not any(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test")
+@unittest.skipUnless(all(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test")
class DeviceActionTestCase(StorageTestCase):
""" DeviceActionTestSuite """
--
2.26.2
From 69bd2e69e21c8779377a6f54b3d83cb35138867a Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 12 May 2020 12:54:03 +0200
Subject: [PATCH 3/3] Fix LV min size for resize in test_action_dependencies
We've recently changed min size for all filesystems so we can't
resize the LV to the device minimal size.
This was overlooked in the original change because these tests
were skipped.
---
tests/action_test.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/action_test.py b/tests/action_test.py
index 228eb97a..77176f46 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -870,7 +870,7 @@ class DeviceActionTestCase(StorageTestCase):
name="testlv2", parents=[testvg])
testlv2.format = self.new_format("ext4", device=testlv2.path,
exists=True, device_instance=testlv2)
- shrink_lv2 = ActionResizeDevice(testlv2, testlv2.size - Size("10 GiB"))
+ shrink_lv2 = ActionResizeDevice(testlv2, testlv2.size - Size("10 GiB") + Ext4FS._min_size)
shrink_lv2.apply()
self.assertTrue(grow_lv.requires(shrink_lv2))
--
2.26.2

View File

@ -1,30 +0,0 @@
From d477f8d076789cbe1c0a85545ea8b5133fdc4bdf Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 18 Sep 2020 13:58:48 +0200
Subject: [PATCH] Let parted fix fixable issues with partition table
This will automatically fix issues like GPT partition table not
covering whole device after disk size change.
Resolves: rhbz#1846869
---
blivet/populator/populator.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/blivet/populator/populator.py b/blivet/populator/populator.py
index 465c272d..fe566816 100644
--- a/blivet/populator/populator.py
+++ b/blivet/populator/populator.py
@@ -64,6 +64,9 @@ def parted_exn_handler(exn_type, exn_options, exn_msg):
if exn_type == parted.EXCEPTION_TYPE_ERROR and \
exn_options == parted.EXCEPTION_OPT_YES_NO:
ret = parted.EXCEPTION_RESOLVE_YES
+ elif exn_type == parted.EXCEPTION_TYPE_WARNING and \
+ exn_options & parted.EXCEPTION_RESOLVE_FIX:
+ ret = parted.EXCEPTION_RESOLVE_FIX
return ret
--
2.29.2

View File

@ -1,112 +0,0 @@
From 430cd2cdba8fba434b5bed2d2a7ed97803c62f6d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 5 Jan 2021 16:56:52 +0100
Subject: [PATCH 1/3] Fix possible UnicodeDecodeError when reading sysfs
attributes
This is a follow-up for https://github.com/storaged-project/blivet/pull/861
where we fixed reading device model in "__is_blacklisted_blockdev"
but we read the device model from other places too so it makes
more sense to "fix" all sysfs attribute reads.
---
blivet/util.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/util.py b/blivet/util.py
index 2fa9c8fc..48b7818f 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -379,7 +379,7 @@ def get_sysfs_attr(path, attr, root=None):
log.warning("%s is not a valid attribute", attr)
return None
- f = open(fullattr, "r")
+ f = open(fullattr, "r", encoding="utf-8", errors="replace")
data = f.read()
f.close()
sdata = "".join(["%02x" % (ord(x),) for x in data])
--
2.29.2
From 15350b52f30910d4fadad92da0195710adcb69a0 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 5 Jan 2021 16:59:14 +0100
Subject: [PATCH 2/3] Use util.get_sysfs_attr in __is_ignored_blockdev to read
device mode
---
blivet/udev.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/blivet/udev.py b/blivet/udev.py
index 2c795225..25375459 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -185,9 +185,8 @@ def __is_blacklisted_blockdev(dev_name):
if any(re.search(expr, dev_name) for expr in device_name_blacklist):
return True
- model_path = "/sys/class/block/%s/device/model" % dev_name
- if os.path.exists(model_path):
- model = open(model_path, encoding="utf-8", errors="replace").read()
+ model = util.get_sysfs_attr("/sys/class/block/%s" % dev_name, "device/model")
+ if model:
for bad in ("IBM *STMF KERNEL", "SCEI Flash-5", "DGC LUNZ"):
if model.find(bad) != -1:
log.info("ignoring %s with model %s", dev_name, model)
--
2.29.2
From 64ece8c0dafb550bbde4798a766515fb04f44568 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 6 Jan 2021 12:34:49 +0100
Subject: [PATCH 3/3] Add test for util.get_sysfs_attr
---
tests/util_test.py | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/tests/util_test.py b/tests/util_test.py
index 9a2ff492..853b6166 100644
--- a/tests/util_test.py
+++ b/tests/util_test.py
@@ -2,7 +2,9 @@
import test_compat
from six.moves import mock
+import os
import six
+import tempfile
import unittest
from decimal import Decimal
@@ -157,3 +159,24 @@ class DependencyGuardTestCase(unittest.TestCase):
with mock.patch.object(_requires_something, '_check_avail', return_value=True):
self.assertEqual(self._test_dependency_guard_non_critical(), True)
self.assertEqual(self._test_dependency_guard_critical(), True)
+
+
+class GetSysfsAttrTestCase(unittest.TestCase):
+
+ def test_get_sysfs_attr(self):
+
+ with tempfile.TemporaryDirectory() as sysfs:
+ model_file = os.path.join(sysfs, "model")
+ with open(model_file, "w") as f:
+ f.write("test model\n")
+
+ model = util.get_sysfs_attr(sysfs, "model")
+ self.assertEqual(model, "test model")
+
+ # now with some invalid byte in the model
+ with open(model_file, "wb") as f:
+ f.write(b"test model\xef\n")
+
+ # the unicode replacement character (U+FFFD) should be used instead
+ model = util.get_sysfs_attr(sysfs, "model")
+ self.assertEqual(model, "test model\ufffd")
--
2.29.2

File diff suppressed because it is too large Load Diff

View File

@ -19,44 +19,32 @@
Summary: A python module for system storage configuration Summary: A python module for system storage configuration
Name: python-blivet Name: python-blivet
Url: https://storageapis.wordpress.com/projects/blivet Url: https://storageapis.wordpress.com/projects/blivet
Version: 3.2.2 Version: 3.4.0
#%%global prerelease .b2 #%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2 # prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 9%{?prerelease}%{?dist} Release: 5%{?prerelease}%{?dist}
Epoch: 1 Epoch: 1
License: LGPLv2+ License: LGPLv2+
Group: System Environment/Libraries
%global realname blivet %global realname blivet
%global realversion %{version}%{?prerelease} %global realversion %{version}%{?prerelease}
Source0: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}.tar.gz Source0: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}.tar.gz
Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}-tests.tar.gz Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}-tests.tar.gz
Patch0: 0001-force-lvm-cli.plugin Patch0: 0001-force-lvm-cli.plugin
Patch1: 0002-remove-btrfs-plugin.patch Patch1: 0002-remove-btrfs-plugin.patch
Patch2: 0003-Skip-test_mounting-for-filesystems-that-are-not-moun.patch Patch2: 0003-Revert-More-consistent-lvm-errors.patch
Patch3: 0004-Add-extra-sleep-after-pvremove-call.patch Patch3: 0004-Revert-Terminology-cleanups.patch
Patch4: 0005-Round-down-to-nearest-MiB-value-when-writing-ks-parittion-info.ks Patch4: 0005-Fix-activating-old-style-LVM-snapshots.patch
Patch5: 0006-Blivet-RHEL-8.3-localization-update.patch Patch5: 0006-Fix-resolving-devices-with-names-that-look-like-BIOS.patch
Patch6: 0007-Do-not-use-FSAVAIL-and-FSUSE-options-when-running-lsblk.patch Patch6: 0007-Do-not-set-chunk-size-for-RAID1.patch
Patch7: 0008-set-allowed-disk-labels-for-s390x-as-standard-ones-plus-dasd.patch Patch7: 0008-Fix-running-tests-in-gating.patch
Patch8: 0009-Do-not-use-BlockDev-utils_have_kernel_module-to-check-for-modules.patch
Patch9: 0010-Fix-name-resolution-for-MD-devices-and-partitions-on.patch
Patch10: 0011-Fix-ignoring-disk-devices-with-parents-or-children.patch
Patch11: 0012-xfs-grow-support.patch
Patch12: 0013-Do-not-limit-swap-to-128-GiB.patch
Patch13: 0014-Use-UnusableConfigurationError-for-patially-hidden-multipath-devices.patch
Patch14: 0015-Fix-possible-UnicodeDecodeError-when-reading-model-f.patch
Patch15: 0016-Basic-LVM-VDO-support.patch
Patch16: 0017-Let-parted-fix-fixable-issues-with-partition-table.patch
Patch17: 0018-Fix-possible-UnicodeDecodeError-when-reading-sysfs-a.patch
Patch18: 0019-LVM-VDO-support.patch
# Versions of required components (done so we make sure the buildrequires # Versions of required components (done so we make sure the buildrequires
# match the requires versions of things). # match the requires versions of things).
%global partedver 1.8.1 %global partedver 3.2
%global pypartedver 3.10.4 %global pypartedver 3.10.4
%global utillinuxver 2.15.1 %global utillinuxver 2.15.1
%global libblockdevver 2.19 %global libblockdevver 2.24
%global libbytesizever 0.3 %global libbytesizever 0.3
%global pyudevver 0.18 %global pyudevver 0.18
@ -69,6 +57,7 @@ storage configuration.
%package -n %{realname}-data %package -n %{realname}-data
Summary: Data for the %{realname} python module. Summary: Data for the %{realname} python module.
BuildRequires: make
BuildRequires: systemd BuildRequires: systemd
Conflicts: python-blivet < 1:2.0.0 Conflicts: python-blivet < 1:2.0.0
@ -199,18 +188,38 @@ configuration.
%if %{with python2} %if %{with python2}
%files -n python2-%{realname} %files -n python2-%{realname}
%license COPYING %license COPYING
%doc README ChangeLog examples %doc README.md ChangeLog examples
%{python2_sitelib}/* %{python2_sitelib}/*
%endif %endif
%if %{with python3} %if %{with python3}
%files -n python3-%{realname} %files -n python3-%{realname}
%license COPYING %license COPYING
%doc README ChangeLog examples %doc README.md ChangeLog examples
%{python3_sitelib}/* %{python3_sitelib}/*
%endif %endif
%changelog %changelog
* Wed Aug 4 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-5
- Fix running upstream test suite in gating
Resolves: rhbz#1990232
* Mon Aug 2 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-4
- Do not set chunk size for RAID 1
Resolves: rhbz#1987170
* Wed Jul 21 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-3
- Fix resolving devices with names that look like BIOS drive number
Resolves: rhbz#1983309
* Wed Jul 7 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-2
- Fix activating old style LVM snapshots
Resolves: rhbz#1961739
* Wed May 5 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.4.0-1
- Rebase to latest upstream release 3.4.0
Resolves: rhbz#1918357
* Tue Feb 9 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-9 * Tue Feb 9 2021 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-9
- LVM VDO support - LVM VDO support
Resolves: rhbz#1509337 Resolves: rhbz#1509337