import python-blivet-3.2.2-6.el8

This commit is contained in:
CentOS Sources 2020-11-03 07:02:10 -05:00 committed by Andrew Lukoshko
parent 96122335ce
commit e0d4f52514
42 changed files with 1821 additions and 37582 deletions

4
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/blivet-3.1.0-tests.tar.gz SOURCES/blivet-3.2.2-tests.tar.gz
SOURCES/blivet-3.1.0.tar.gz SOURCES/blivet-3.2.2.tar.gz

View File

@ -1,2 +1,2 @@
4bd8abd1cb7bffa644cffb017f6583a2fd7c19f9 SOURCES/blivet-3.1.0-tests.tar.gz 84988ad63a9a9ddd9f2075b82b36bd98261df9e9 SOURCES/blivet-3.2.2-tests.tar.gz
f388d30e55dfaa9c22415c2e9e3f9670f9d08f27 SOURCES/blivet-3.1.0.tar.gz a89000bc2e9cfc8a1cfe09a58bf5e4e609f9b517 SOURCES/blivet-3.2.2.tar.gz

View File

@ -0,0 +1,29 @@
From 760f08bbf7b801acd393a6d2b7447ca6ff28d590 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 22 May 2020 12:35:11 +0200
Subject: [PATCH] Skip test_mounting for filesystems that are not mountable
We can have tools to create the filesystem without having kernel
module for mounting it.
---
tests/formats_test/fstesting.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/formats_test/fstesting.py b/tests/formats_test/fstesting.py
index aa1b42e5..62f806f9 100644
--- a/tests/formats_test/fstesting.py
+++ b/tests/formats_test/fstesting.py
@@ -156,8 +156,8 @@ class FSAsRoot(loopbackedtestcase.LoopBackedTestCase):
# FIXME: BTRFS fails to mount
if isinstance(an_fs, fs.BTRFS):
self.skipTest("no mounting filesystem %s" % an_fs.name)
- if not an_fs.formattable:
- self.skipTest("can not create filesystem %s" % an_fs.name)
+ if not an_fs.formattable or not an_fs.mountable:
+ self.skipTest("can not create or mount filesystem %s" % an_fs.name)
an_fs.device = self.loop_devices[0]
self.assertIsNone(an_fs.create())
self.assertTrue(an_fs.test_mount())
--
2.25.4

View File

@ -1,56 +0,0 @@
From cd85b0a41f16c571675f04c58ec4c1a428a88a61 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 16 Aug 2018 13:00:35 +0200
Subject: [PATCH] Create a separate availability check for dmraid support
Resolves: rhbz#1617958
---
blivet/devices/disk.py | 2 +-
blivet/tasks/availability.py | 9 +++++++--
2 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 5b52330..012413c 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -225,7 +225,7 @@ class DMRaidArrayDevice(DMDevice, ContainerDevice):
_is_disk = True
_format_class_name = property(lambda s: "dmraidmember")
_format_uuid_attr = property(lambda s: None)
- _external_dependencies = [availability.BLOCKDEV_DM_PLUGIN]
+ _external_dependencies = [availability.BLOCKDEV_DM_PLUGIN_RAID]
def __init__(self, name, fmt=None,
size=None, parents=None, sysfs_path='', wwn=None):
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index 24909a2..7f64c10 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -331,10 +331,14 @@ BLOCKDEV_DM_ALL_MODES = (blockdev.DMTechMode.CREATE_ACTIVATE |
blockdev.DMTechMode.QUERY)
BLOCKDEV_DM = BlockDevTechInfo(plugin_name="dm",
check_fn=blockdev.dm_is_tech_avail,
- technologies={blockdev.DMTech.MAP: BLOCKDEV_DM_ALL_MODES,
- blockdev.DMTech.RAID: BLOCKDEV_DM_ALL_MODES})
+ technologies={blockdev.DMTech.MAP: BLOCKDEV_DM_ALL_MODES})
BLOCKDEV_DM_TECH = BlockDevMethod(BLOCKDEV_DM)
+BLOCKDEV_DM_RAID = BlockDevTechInfo(plugin_name="dm",
+ check_fn=blockdev.dm_is_tech_avail,
+ technologies={blockdev.DMTech.RAID: BLOCKDEV_DM_ALL_MODES})
+BLOCKDEV_DM_TECH_RAID = BlockDevMethod(BLOCKDEV_DM_RAID)
+
# libblockdev loop plugin required technologies and modes
BLOCKDEV_LOOP_ALL_MODES = (blockdev.LoopTechMode.CREATE |
blockdev.LoopTechMode.CREATE |
@@ -399,6 +403,7 @@ BLOCKDEV_SWAP_TECH = BlockDevMethod(BLOCKDEV_SWAP)
BLOCKDEV_BTRFS_PLUGIN = blockdev_plugin("btrfs", BLOCKDEV_BTRFS_TECH)
BLOCKDEV_CRYPTO_PLUGIN = blockdev_plugin("crypto", BLOCKDEV_CRYPTO_TECH)
BLOCKDEV_DM_PLUGIN = blockdev_plugin("dm", BLOCKDEV_DM_TECH)
+BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("dm", BLOCKDEV_DM_TECH_RAID)
BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("loop", BLOCKDEV_LOOP_TECH)
BLOCKDEV_LVM_PLUGIN = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH)
BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("mdraid", BLOCKDEV_MD_TECH)
--
1.8.3.1

View File

@ -0,0 +1,38 @@
From 6a62a81d326a1121a2768735e52a8e1c5e5d6f0d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 24 Jun 2020 14:43:47 +0200
Subject: [PATCH] Add extra sleep after pvremove call
To give enough time for the async pvscan to finish scanning the
partition before removing it.
Resolves: rhbz#1640601
---
blivet/formats/lvmpv.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index e4182adb..9f53ec6b 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -26,6 +26,7 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import BlockDev as blockdev
import os
+import time
from ..storage_log import log_method_call
from parted import PARTITION_LVM
@@ -137,6 +138,9 @@ class LVMPhysicalVolume(DeviceFormat):
DeviceFormat._destroy(self, **kwargs)
finally:
udev.settle()
+ # LVM now has async pvscan jobs so udev.settle doesn't help and if we try to remove
+ # the partition immediately after the pvremove we get an error
+ time.sleep(5)
@property
def destroyable(self):
--
2.25.4

View File

@ -1,104 +0,0 @@
From fd07d14ad1f19c700d5344c8af11be6a1e314ceb Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 12 Sep 2018 10:45:41 +0200
Subject: [PATCH 1/2] Allow removing btrfs volumes without btrfs support
Btrfs volumes are removed using wipefs so we don't need to check
for device dependencies availability when removing the volume
(btrfs support depends on libblockdev btrfs plugin).
Resolves: rhbz#1605213
---
blivet/deviceaction.py | 23 ++++++++++++++++++-----
1 file changed, 18 insertions(+), 5 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index 3e337e18..b3e9e5f1 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -160,15 +160,19 @@ def __init__(self, device):
if not isinstance(device, StorageDevice):
raise ValueError("arg 1 must be a StorageDevice instance")
- unavailable_dependencies = device.unavailable_dependencies
- if unavailable_dependencies:
- dependencies_str = ", ".join(str(d) for d in unavailable_dependencies)
- raise DependencyError("device type %s requires unavailable_dependencies: %s" % (device.type, dependencies_str))
-
self.device = device
+
+ self._check_device_dependencies()
+
self.container = getattr(self.device, "container", None)
self._applied = False
+ def _check_device_dependencies(self):
+ unavailable_dependencies = self.device.unavailable_dependencies
+ if unavailable_dependencies:
+ dependencies_str = ", ".join(str(d) for d in unavailable_dependencies)
+ raise DependencyError("device type %s requires unavailable_dependencies: %s" % (self.device.type, dependencies_str))
+
def apply(self):
""" apply changes related to the action to the device(s) """
self._applied = True
@@ -379,6 +383,15 @@ def __init__(self, device):
# XXX should we insist that device.fs be None?
DeviceAction.__init__(self, device)
+ def _check_device_dependencies(self):
+ if self.device.type == "btrfs volume":
+ # XXX destroying a btrfs volume is a special case -- we don't destroy
+ # the device, but use wipefs to destroy format on its parents so we
+ # don't need btrfs plugin or btrfs-progs for this
+ return
+
+ super(ActionDestroyDevice, self)._check_device_dependencies()
+
def execute(self, callbacks=None):
super(ActionDestroyDevice, self).execute(callbacks=callbacks)
self.device.destroy()
From b9f1b4acb654c5fb70be1a2200bcf3a34dcde467 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 17 Sep 2018 10:25:24 +0200
Subject: [PATCH 2/2] Check device dependencies only for device actions
We don't want to check device dependencies for format actions.
It should be possible to for example format an opened LUKS device
without libblockdev crypto plugin.
Related: rhbz#1605213
---
blivet/deviceaction.py | 3 ++-
tests/devices_test/dependencies_test.py | 4 ----
2 files changed, 2 insertions(+), 5 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index b3e9e5f1..14a06ff0 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -162,7 +162,8 @@ def __init__(self, device):
self.device = device
- self._check_device_dependencies()
+ if self.is_device:
+ self._check_device_dependencies()
self.container = getattr(self.device, "container", None)
self._applied = False
diff --git a/tests/devices_test/dependencies_test.py b/tests/devices_test/dependencies_test.py
index 0b44493e..e6b5bdb4 100644
--- a/tests/devices_test/dependencies_test.py
+++ b/tests/devices_test/dependencies_test.py
@@ -97,10 +97,6 @@ def test_availability_mdraidplugin(self):
ActionCreateDevice(self.luks)
with self.assertRaises(DependencyError):
ActionDestroyDevice(self.dev)
- with self.assertRaises(DependencyError):
- ActionCreateFormat(self.dev)
- with self.assertRaises(DependencyError):
- ActionDestroyFormat(self.dev)
def _clean_up(self):
availability.BLOCKDEV_MDRAID_PLUGIN._method = self.mdraid_method

View File

@ -0,0 +1,48 @@
From dc964f10d24499ea7fc90fd896a8b50c9c5e2d74 Mon Sep 17 00:00:00 2001
From: "Samantha N. Bueno" <sbueno+anaconda@redhat.com>
Date: Wed, 8 Jun 2016 13:47:40 -0400
Subject: [PATCH] Round down to nearest MiB value when writing ks parittion
info.
On s390x in particular, some partition alignment issue is causing fractional
sizes to be reported. Pykickstart doesn't take anything except int values for
partition info, hence the call to roundToNearest.
This change only affects the data that is written to ks.cfg.
Resolves: rhbz#1850670
---
blivet/devices/partition.py | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 0c56a6e7..76048aed 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -35,7 +35,7 @@
from ..storage_log import log_method_call
from .. import udev
from ..formats import DeviceFormat, get_format
-from ..size import Size, MiB
+from ..size import Size, MiB, ROUND_DOWN
import logging
log = logging.getLogger("blivet")
@@ -967,7 +967,8 @@ def populate_ksdata(self, data):
data.resize = (self.exists and self.target_size and
self.target_size != self.current_size)
if not self.exists:
- data.size = self.req_base_size.convert_to(MiB)
+ # round this to nearest MiB before doing anything else
+ data.size = self.req_base_size.round_to_nearest(MiB, rounding=ROUND_DOWN).convert_to(spec=MiB)
data.grow = self.req_grow
if self.req_grow:
data.max_size_mb = self.req_max_size.convert_to(MiB)
@@ -980,4 +981,6 @@ def populate_ksdata(self, data):
data.on_part = self.name # by-id
if data.resize:
- data.size = self.size.convert_to(MiB)
+ # on s390x in particular, fractional sizes are reported, which
+ # cause issues when writing to ks.cfg
+ data.size = self.size.round_to_nearest(MiB, rounding=ROUND_DOWN).convert_to(spec=MiB)

View File

@ -1,272 +0,0 @@
From 12a2bdf3fc5a7a4568ff56b244d3067b73f82681 Mon Sep 17 00:00:00 2001
From: Peter Robinson <pbrobinson@gmail.com>
Date: Tue, 7 Aug 2018 15:11:56 +0100
Subject: [PATCH 1/6] arch: arm: drop omap specifics for partitioning
We've long stopped supporting or using any specifics around OMAP
ARM machines and all ARM platforms support the extlinux means of
doing things one way or another.
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
---
blivet/arch.py | 4 ----
blivet/devices/partition.py | 3 ---
2 files changed, 7 deletions(-)
diff --git a/blivet/arch.py b/blivet/arch.py
index 20fe4f57..f30b2d8b 100644
--- a/blivet/arch.py
+++ b/blivet/arch.py
@@ -352,10 +352,6 @@ def is_ipseries():
return is_ppc() and get_ppc_machine() in ("iSeries", "pSeries")
-def is_omap_arm():
- return is_arm() and get_arm_machine() == "omap"
-
-
def get_arch():
"""
:return: The hardware architecture
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 47ff547b..623e1c9d 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -421,9 +421,6 @@ def _get_weight(self):
# On ARM images '/' must be the last partition.
if self.format.mountpoint == "/":
weight = -100
- elif (arch.is_omap_arm() and
- self.format.mountpoint == "/boot/uboot" and self.format.type == "vfat"):
- weight = 5000
elif arch.is_ppc():
if arch.is_pmac() and self.format.type == "appleboot":
weight = 5000
From ec978c3c625c74c387a9c8074d2378c4ecbeac47 Mon Sep 17 00:00:00 2001
From: Peter Robinson <pbrobinson@gmail.com>
Date: Thu, 16 Aug 2018 14:32:19 +0100
Subject: [PATCH 2/6] arch: arm: drop get_arm_machine function
The get_arm_machine function was used when we had to have detection for which
arm specific kernel to install. The last userr of this was the omap check for
special partitioning which is no longer used due to extlinux support so we can
now drop this function too.
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
---
blivet/arch.py | 22 ----------------------
blivet/flags.py | 2 --
2 files changed, 24 deletions(-)
diff --git a/blivet/arch.py b/blivet/arch.py
index f30b2d8b..55ce8108 100644
--- a/blivet/arch.py
+++ b/blivet/arch.py
@@ -33,7 +33,6 @@
import os
-from .flags import flags
from .storage_log import log_exception_info
import logging
@@ -182,27 +181,6 @@ def is_aarch64():
return os.uname()[4] == 'aarch64'
-def get_arm_machine():
- """
- :return: The ARM processor variety type, or None if not ARM.
- :rtype: string
-
- """
- if not is_arm():
- return None
-
- if flags.arm_platform:
- return flags.arm_platform
-
- arm_machine = os.uname()[2].rpartition('.')[2]
-
- if arm_machine.startswith('arm'):
- # @TBD - Huh? Don't you want the arm machine name here?
- return None
- else:
- return arm_machine
-
-
def is_cell():
"""
:return: True if the hardware is the Cell platform, False otherwise.
diff --git a/blivet/flags.py b/blivet/flags.py
index 18401218..4e26d82f 100644
--- a/blivet/flags.py
+++ b/blivet/flags.py
@@ -57,8 +57,6 @@ def __init__(self):
self.jfs = True
self.reiserfs = True
- self.arm_platform = None
-
self.gpt = False
# for this flag to take effect,
From e75049e9e9edac9da789cee2add2b4190159805d Mon Sep 17 00:00:00 2001
From: Peter Robinson <pbrobinson@gmail.com>
Date: Thu, 16 Aug 2018 14:35:30 +0100
Subject: [PATCH 3/6] Aarch64 platforms: Fix gpt defaults for 64 bit arm
platforms
The 46165f589d commit added support for msdos needed on some aarch64 devices
but it messed up the gpt defaults, this was fixed in 4908746c3a but this now
defaults back to msdos so we put in an aarch64 options to put gpt first again.
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
---
blivet/formats/disklabel.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
index 44f9834c..e93a4c13 100644
--- a/blivet/formats/disklabel.py
+++ b/blivet/formats/disklabel.py
@@ -223,6 +223,8 @@ def get_platform_label_types(cls):
label_types = ["msdos", "gpt"]
if arch.is_pmac():
label_types = ["mac"]
+ elif arch.is_aarch64():
+ label_types = ["gpt", "msdos"]
elif arch.is_efi() and not arch.is_aarch64():
label_types = ["gpt"]
elif arch.is_s390():
From dda51536e902def437872fcdb3005efaff231703 Mon Sep 17 00:00:00 2001
From: Peter Robinson <pbrobinson@gmail.com>
Date: Thu, 16 Aug 2018 14:38:16 +0100
Subject: [PATCH 4/6] arm: add support for EFI on ARMv7
We now can support EFI for ARMv7 so add/enabled the checks for ARM too.
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
---
blivet/formats/disklabel.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
index e93a4c13..e13ab2f8 100644
--- a/blivet/formats/disklabel.py
+++ b/blivet/formats/disklabel.py
@@ -225,6 +225,8 @@ def get_platform_label_types(cls):
label_types = ["mac"]
elif arch.is_aarch64():
label_types = ["gpt", "msdos"]
+ elif arch.is_efi() and arch.is_arm():
+ label_types = ["msdos", "gpt"]
elif arch.is_efi() and not arch.is_aarch64():
label_types = ["gpt"]
elif arch.is_s390():
From 1cdd509f2034f456402f39045425cbdfe62bde97 Mon Sep 17 00:00:00 2001
From: Peter Robinson <pbrobinson@gmail.com>
Date: Thu, 23 Aug 2018 14:23:38 +0100
Subject: [PATCH 5/6] Update disk label tests for ARM platforms
UEFI supports either gpt or msdos but different platforms have different
requirements. Update the disk label tests to test the following:
- aarch64: gpt default but msdos option also supported
- ARMv7 UEFI: msdos default but gpt option also supported
- ARMv7 extlinux: msdos default, also support gpt
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
---
tests/formats_test/disklabel_test.py | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/tests/formats_test/disklabel_test.py b/tests/formats_test/disklabel_test.py
index 4b6608f5..3edbdb0b 100644
--- a/tests/formats_test/disklabel_test.py
+++ b/tests/formats_test/disklabel_test.py
@@ -71,6 +71,7 @@ def test_platform_label_types(self, arch):
arch.is_s390.return_value = False
arch.is_efi.return_value = False
arch.is_aarch64.return_value = False
+ arch.is_arm.return_value = False
arch.is_pmac.return_value = False
self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt"])
@@ -81,8 +82,18 @@ def test_platform_label_types(self, arch):
arch.is_efi.return_value = True
self.assertEqual(disklabel_class.get_platform_label_types(), ["gpt"])
+ arch.is_aarch64.return_value = True
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["gpt", "msdos"])
+ arch.is_aarch64.return_value = False
+ arch.is_arm.return_value = True
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt"])
+ arch.is_arm.return_value = False
arch.is_efi.return_value = False
+ arch.is_arm.return_value = True
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt"])
+ arch.is_arm.return_value = False
+
arch.is_s390.return_value = True
self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "dasd"])
arch.is_s390.return_value = False
@@ -123,6 +134,7 @@ def test_best_label_type(self, arch):
arch.is_s390.return_value = False
arch.is_efi.return_value = False
arch.is_aarch64.return_value = False
+ arch.is_arm.return_value = False
arch.is_pmac.return_value = False
with mock.patch.object(dl, '_label_type_size_check') as size_check:
From e0e6ac41cea805c3bf56852bfe2cd67d4bfe0b83 Mon Sep 17 00:00:00 2001
From: Peter Robinson <pbrobinson@gmail.com>
Date: Thu, 23 Aug 2018 15:54:51 +0100
Subject: [PATCH 6/6] Drop omap partition table tests on ARM platforms
We no longer need to test the /boot/uboot tests for omap platforms so
drop them as they're obsolete.
Signed-off-by: Peter Robinson <pbrobinson@gmail.com>
---
tests/devices_test/partition_test.py | 14 ++------------
1 file changed, 2 insertions(+), 12 deletions(-)
diff --git a/tests/devices_test/partition_test.py b/tests/devices_test/partition_test.py
index 394ffc27..08c0447d 100644
--- a/tests/devices_test/partition_test.py
+++ b/tests/devices_test/partition_test.py
@@ -26,11 +26,9 @@
Weighted(fstype="efi", mountpoint="/boot/efi", true_funcs=['is_efi'], weight=5000),
Weighted(fstype="prepboot", mountpoint=None, true_funcs=['is_ppc', 'is_ipseries'], weight=5000),
Weighted(fstype="appleboot", mountpoint=None, true_funcs=['is_ppc', 'is_pmac'], weight=5000),
- Weighted(fstype="vfat", mountpoint="/boot/uboot", true_funcs=['is_arm', 'is_omap_arm'], weight=5000),
- Weighted(fstype=None, mountpoint="/", true_funcs=['is_arm'], weight=-100),
- Weighted(fstype=None, mountpoint="/", true_funcs=['is_arm', 'is_omap_arm'], weight=-100)]
+ Weighted(fstype=None, mountpoint="/", true_funcs=['is_arm'], weight=-100)]
-arch_funcs = ['is_arm', 'is_efi', 'is_ipseries', 'is_omap_arm', 'is_pmac', 'is_ppc', 'is_x86']
+arch_funcs = ['is_arm', 'is_efi', 'is_ipseries', 'is_pmac', 'is_ppc', 'is_x86']
class PartitionDeviceTestCase(unittest.TestCase):
@@ -309,14 +307,6 @@ def test_weight_1(self, *patches):
fmt.mountpoint = "/"
self.assertEqual(dev.weight, -100)
- arch.is_omap_arm.return_value = False
- fmt.mountpoint = "/boot/uboot"
- fmt.type = "vfat"
- self.assertEqual(dev.weight, 0)
-
- arch.is_omap_arm.return_value = True
- self.assertEqual(dev.weight, 5000)
-
#
# ppc
#

View File

@ -0,0 +1,438 @@
From 44d7e9669fe55fd4b2b3a6c96f23e2d0669f8dbb Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 9 Jul 2020 13:42:31 +0200
Subject: [PATCH] Blivet RHEL 8.3 localization update
Resolves: rhbz#1820565
---
po/ja.po | 33 ++++++++++-----------
po/ko.po | 83 ++++++++++++++++++++++++-----------------------------
po/zh_CN.po | 28 +++++++++---------
3 files changed, 68 insertions(+), 76 deletions(-)
diff --git a/po/ja.po b/po/ja.po
index 733e63a0..b4c864c2 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -29,17 +29,17 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-05-21 12:42+0200\n"
-"PO-Revision-Date: 2018-09-21 01:08+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language-Team: Japanese (http://www.transifex.com/projects/p/blivet/language/"
-"ja/)\n"
+"POT-Creation-Date: 2020-01-29 14:04+0100\n"
+"PO-Revision-Date: 2020-07-03 07:42+0000\n"
+"Last-Translator: Ludek Janda <ljanda@redhat.com>\n"
+"Language-Team: Japanese <https://translate.fedoraproject.org/projects/blivet/"
+"blivet-rhel8/ja/>\n"
"Language: ja\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-"X-Generator: Zanata 4.6.2\n"
+"X-Generator: Weblate 4.1.1\n"
#: ../blivet/errors.py:210
msgid ""
@@ -47,6 +47,8 @@ msgid ""
"of the UUID value which should be unique. In that case you can either "
"disconnect one of the devices or reformat it."
msgstr ""
+"これは通常、デバイスイメージを複製したことで、一意であるはずのUUID値が重複することが原因です。その場合は、いずれかのデバイスを切断するか、再フォーマッ"
+"トしてください。"
#: ../blivet/errors.py:217
msgid ""
@@ -54,9 +56,8 @@ msgid ""
"kernel is reporting partitions on. It is unclear what the exact problem is. "
"Please file a bug at http://bugzilla.redhat.com"
msgstr ""
-"なんらかの理由により、kernel がパーティションを報告しているディスク上でディス"
-"クラベルを見つけられませんでした。何が問題となっているかは不明です。バグを "
-"http://bugzilla.redhat.com に提出してください。"
+"なんらかの理由により、kernel がパーティションを報告しているディスク上でディスクラベルを見つけられませんでした。何が問題となっているかは不明です。"
+"バグを http://bugzilla.redhat.com に提出してください"
#: ../blivet/errors.py:224
msgid ""
@@ -84,7 +85,7 @@ msgstr "FCoE は使用できません"
#: ../blivet/zfcp.py:62
msgid "You have not specified a device number or the number is invalid"
-msgstr "デバイス番号を指定していないか番号が無効です。"
+msgstr "デバイス番号を指定していないか番号が無効です"
#: ../blivet/zfcp.py:64
msgid "You have not specified a worldwide port name or the name is invalid."
@@ -202,7 +203,7 @@ msgstr "iSCSI ノードが何も探索できませんでした"
#: ../blivet/iscsi.py:550
msgid "No new iSCSI nodes discovered"
-msgstr "新しい iSCSI ノードは見つかりませんでした。"
+msgstr "新しい iSCSI ノードは見つかりませんでした"
#: ../blivet/iscsi.py:553
msgid "Could not log in to any of the discovered nodes"
@@ -257,7 +258,7 @@ msgstr "要求を超えたサイズを再利用することができません"
#: ../blivet/partitioning.py:1419
msgid "DiskChunk requests must be of type PartitionRequest"
-msgstr "DiskChunk 要求には PartitionResquest タイプが必要です。"
+msgstr "DiskChunk 要求には PartitionResquest タイプが必要です"
#: ../blivet/partitioning.py:1432
msgid "partitions allocated outside disklabel limits"
@@ -265,7 +266,7 @@ msgstr "ディスクラベルの範囲外に割り当てられたパーティシ
#: ../blivet/partitioning.py:1517
msgid "VGChunk requests must be of type LVRequest"
-msgstr "VGChunk 要求には LVResquest タイプが必要です。"
+msgstr "VGChunk 要求には LVResquest タイプが必要です"
#. by now we have allocated the PVs so if there isn't enough
#. space in the VG we have a real problem
@@ -368,15 +369,15 @@ msgstr ""
msgid "Cannot remove a member from existing %s array"
msgstr "既存の %s 配列からメンバーを削除できません"
-#: ../blivet/formats/fs.py:934
+#: ../blivet/formats/fs.py:932
msgid "EFI System Partition"
msgstr "EFI システムパーティション"
-#: ../blivet/formats/fs.py:1139
+#: ../blivet/formats/fs.py:1137
msgid "Apple Bootstrap"
msgstr "Apple ブートストラップ"
-#: ../blivet/formats/fs.py:1175
+#: ../blivet/formats/fs.py:1173
msgid "Linux HFS+ ESP"
msgstr "Linux HFS+ ESP"
diff --git a/po/ko.po b/po/ko.po
index 66789af0..747b00c5 100644
--- a/po/ko.po
+++ b/po/ko.po
@@ -20,17 +20,17 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-05-21 12:42+0200\n"
-"PO-Revision-Date: 2018-09-21 01:08+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language-Team: Korean (http://www.transifex.com/projects/p/blivet/language/"
-"ko/)\n"
+"POT-Creation-Date: 2020-01-29 14:04+0100\n"
+"PO-Revision-Date: 2020-07-03 07:42+0000\n"
+"Last-Translator: Ludek Janda <ljanda@redhat.com>\n"
+"Language-Team: Korean <https://translate.fedoraproject.org/projects/blivet/"
+"blivet-rhel8/ko/>\n"
"Language: ko\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-"X-Generator: Zanata 4.6.2\n"
+"X-Generator: Weblate 4.1.1\n"
#: ../blivet/errors.py:210
msgid ""
@@ -38,6 +38,8 @@ msgid ""
"of the UUID value which should be unique. In that case you can either "
"disconnect one of the devices or reformat it."
msgstr ""
+"이는 일반적으로 장치 이미지 복제로 인해 고유한 UUID 값이 복제되기 때문에 발생합니다. 이 경우 장치 중 하나를 분리하거나 다시 "
+"포맷할 수 있습니다."
#: ../blivet/errors.py:217
msgid ""
@@ -45,9 +47,8 @@ msgid ""
"kernel is reporting partitions on. It is unclear what the exact problem is. "
"Please file a bug at http://bugzilla.redhat.com"
msgstr ""
-"일부 이유로 커널이 파티션 설정을 보고하는 디스크에 디스크 레이블을 배치할 수 "
-"없습니다. 정확한 문제가 무엇인지 알 수 없습니다. http://bugzilla.redhat.com"
-"에 버그 리포트를 제출해 주십시오."
+"일부 이유로 커널이 파티션 설정을 보고하는 디스크에 디스크 레이블을 배치할 수 없습니다. 정확한 문제가 무엇인지 알 수 없습니다. "
+"http://bugzilla.redhat.com에 버그 리포트를 제출해 주십시오"
#: ../blivet/errors.py:224
msgid ""
@@ -78,11 +79,11 @@ msgstr "장치 번호를 지정하지 않았거나, 번호가 맞지 않습니
#: ../blivet/zfcp.py:64
msgid "You have not specified a worldwide port name or the name is invalid."
-msgstr "세계 포트 이름(WWPN)을 지정하지 않았거나, 포트 번호가 맞지 않습니다"
+msgstr "세계 포트 이름(WWPN)을 지정하지 않았거나, 포트 번호가 맞지 않습니다."
#: ../blivet/zfcp.py:66
msgid "You have not specified a FCP LUN or the number is invalid."
-msgstr "FCP LUN을 지정하지 않았거나, 번호가 맞지 않습니다"
+msgstr "FCP LUN을 지정하지 않았거나, 번호가 맞지 않습니다."
#: ../blivet/zfcp.py:91
#, python-format
@@ -103,7 +104,7 @@ msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s (%(e)s)에 추가할 수
#: ../blivet/zfcp.py:119
#, python-format
msgid "WWPN %(wwpn)s not found at zFCP device %(devnum)s."
-msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s에서 찾을 수 없습니다. "
+msgstr "WWPN %(wwpn)s을(를) zFCP 장치 %(devnum)s에서 찾을 수 없습니다."
#: ../blivet/zfcp.py:134
#, python-format
@@ -111,8 +112,7 @@ msgid ""
"Could not add LUN %(fcplun)s to WWPN %(wwpn)s on zFCP device %(devnum)s "
"(%(e)s)."
msgstr ""
-"zFCP 장치 %(devnum)s (%(e)s)에서 LUN %(fcplun)s을(를) WWPN %(wwpn)s에 추가할 "
-"수 없습니다. "
+"zFCP 장치 %(devnum)s (%(e)s)에서 LUN %(fcplun)s을(를) WWPN %(wwpn)s에 추가할 수 없습니다."
#: ../blivet/zfcp.py:140
#, python-format
@@ -136,18 +136,14 @@ msgstr ""
msgid ""
"Failed LUN %(fcplun)s at WWPN %(wwpn)s on zFCP device %(devnum)s removed "
"again."
-msgstr ""
-"zFCP 장치 %(devnum)s에 있는 WWPN %(wwpn)s에서 실패한 LUN %(fcplun)s이 다시 삭"
-"제되었습니다. "
+msgstr "zFCP 장치 %(devnum)s에 있는 WWPN %(wwpn)s에서 실패한 LUN %(fcplun)s이 다시 삭제되었습니다."
#: ../blivet/zfcp.py:218
#, python-format
msgid ""
"Could not correctly delete SCSI device of zFCP %(devnum)s %(wwpn)s "
"%(fcplun)s (%(e)s)."
-msgstr ""
-"zFCP %(devnum)s %(wwpn)s %(fcplun)s (%(e)s)의 SCSI 장치를 올바르게 삭제할 수 "
-"없습니다. "
+msgstr "zFCP %(devnum)s %(wwpn)s %(fcplun)s (%(e)s)의 SCSI 장치를 올바르게 삭제할 수 없습니다."
#: ../blivet/zfcp.py:227
#, python-format
@@ -161,41 +157,40 @@ msgstr ""
#: ../blivet/zfcp.py:245
#, python-format
msgid "Could not remove WWPN %(wwpn)s on zFCP device %(devnum)s (%(e)s)."
-msgstr ""
-"zFCP 장치 %(devnum)s (%(e)s)에서 WWPN %(wwpn)s을(를) 제거할 수 없습니다. "
+msgstr "zFCP 장치 %(devnum)s (%(e)s)에서 WWPN %(wwpn)s을(를) 제거할 수 없습니다."
#: ../blivet/zfcp.py:271
#, python-format
msgid "Could not set zFCP device %(devnum)s offline (%(e)s)."
-msgstr "zFCP 장치 %(devnum)s를 오프라인 (%(e)s)으로 설정할 수 없습니다. "
+msgstr "zFCP 장치 %(devnum)s를 오프라인 (%(e)s)으로 설정할 수 없습니다."
#: ../blivet/iscsi.py:217
msgid "Unable to change iSCSI initiator name once set"
-msgstr "iSCSI 개시자 이름이 설정되면 이를 변경할 수 없음 "
+msgstr "iSCSI 개시자 이름이 설정되면 이를 변경할 수 없음"
#: ../blivet/iscsi.py:219
msgid "Must provide an iSCSI initiator name"
-msgstr "iSCSI 개시자 이름을 지정하십시오 "
+msgstr "iSCSI 개시자 이름을 지정하십시오"
#: ../blivet/iscsi.py:410
msgid "iSCSI not available"
-msgstr "iSCSI 사용 불가능 "
+msgstr "iSCSI 사용 불가능"
#: ../blivet/iscsi.py:412
msgid "No initiator name set"
-msgstr "이니셰이터 이름이 설정되지 않음 "
+msgstr "이니셰이터 이름이 설정되지 않음"
#: ../blivet/iscsi.py:530
msgid "No iSCSI nodes discovered"
-msgstr "iSCSI 노드를 찾을 수 없음 "
+msgstr "iSCSI 노드를 찾을 수 없음"
#: ../blivet/iscsi.py:550
msgid "No new iSCSI nodes discovered"
-msgstr "새 iSCSI 노드를 찾을 수 없음 "
+msgstr "새 iSCSI 노드를 찾을 수 없음"
#: ../blivet/iscsi.py:553
msgid "Could not log in to any of the discovered nodes"
-msgstr "검색된 노드로 로그인할 수 없음 "
+msgstr "검색된 노드로 로그인할 수 없음"
#: ../blivet/partitioning.py:454
msgid "unable to allocate aligned partition"
@@ -265,7 +260,7 @@ msgstr "LVM 요청에 필요한 공간이 충분하지 않습니다"
#: ../blivet/deviceaction.py:194
#, python-format
msgid "Executing %(action)s"
-msgstr "%(action)s 실행 "
+msgstr "%(action)s 실행"
#: ../blivet/deviceaction.py:322
msgid "create device"
@@ -286,7 +281,7 @@ msgstr "포맷 생성"
#: ../blivet/deviceaction.py:613
#, python-format
msgid "Creating %(type)s on %(device)s"
-msgstr "%(device)s에 %(type)s 생성 "
+msgstr "%(device)s에 %(type)s 생성"
#: ../blivet/deviceaction.py:640
#, python-format
@@ -327,11 +322,11 @@ msgstr "컨테이너 멤버 삭제"
#: ../blivet/deviceaction.py:1058
msgid "configure format"
-msgstr "포맷 설정 "
+msgstr "포맷 설정"
#: ../blivet/deviceaction.py:1114
msgid "configure device"
-msgstr "장치 설정 "
+msgstr "장치 설정"
#: ../blivet/devices/raid.py:58
#, python-format
@@ -341,32 +336,28 @@ msgid ""
msgid_plural ""
"RAID level %(raid_level)s requires that device have at least %(min_members)d "
"members."
-msgstr[0] ""
-"RAID 레벨 %(raid_level)s에는 최소 %(min_members)d개의 장치 구성원이 필요합니"
-"다. "
+msgstr[0] "RAID 레벨 %(raid_level)s에는 최소 %(min_members)d개의 장치 구성원이 필요합니다."
#: ../blivet/devices/raid.py:79
#, python-format
msgid ""
"RAID level %(raid_level)s is an invalid value. Must be one of (%(levels)s)."
-msgstr ""
-"RAID 레벨 %(raid_level)s이/가 유효한 값이 아닙니다. (%(levels)s) 중 하나여야 "
-"합니다. "
+msgstr "RAID 레벨 %(raid_level)s이/가 유효한 값이 아닙니다. (%(levels)s) 중 하나여야 합니다."
#: ../blivet/devices/raid.py:104
#, python-format
msgid "Cannot remove a member from existing %s array"
-msgstr "기존 %s 어레이에서 장치 구성원을 제거할 수 없습니다 "
+msgstr "기존 %s 어레이에서 장치 구성원을 제거할 수 없습니다"
-#: ../blivet/formats/fs.py:934
+#: ../blivet/formats/fs.py:932
msgid "EFI System Partition"
-msgstr "EFI 시스템 파티션 "
+msgstr "EFI 시스템 파티션"
-#: ../blivet/formats/fs.py:1139
+#: ../blivet/formats/fs.py:1137
msgid "Apple Bootstrap"
msgstr "Apple 부트스트랩"
-#: ../blivet/formats/fs.py:1175
+#: ../blivet/formats/fs.py:1173
msgid "Linux HFS+ ESP"
msgstr "Linux HFS+ ESP"
@@ -384,7 +375,7 @@ msgstr "암호화됨"
#: ../blivet/formats/luks.py:388
msgid "DM Integrity"
-msgstr "DM 무결성 "
+msgstr "DM 무결성"
#: ../blivet/formats/__init__.py:148
msgid "Unknown"
diff --git a/po/zh_CN.po b/po/zh_CN.po
index 480801de..2be6d492 100644
--- a/po/zh_CN.po
+++ b/po/zh_CN.po
@@ -20,24 +20,24 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-05-21 12:42+0200\n"
-"PO-Revision-Date: 2018-09-13 02:13+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/blivet/"
-"language/zh_CN/)\n"
+"POT-Creation-Date: 2020-01-29 14:04+0100\n"
+"PO-Revision-Date: 2020-07-03 07:42+0000\n"
+"Last-Translator: Ludek Janda <ljanda@redhat.com>\n"
+"Language-Team: Chinese (Simplified) <https://translate.fedoraproject.org/"
+"projects/blivet/blivet-rhel8/zh_CN/>\n"
"Language: zh_CN\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-"X-Generator: Zanata 4.6.2\n"
+"X-Generator: Weblate 4.1.1\n"
#: ../blivet/errors.py:210
msgid ""
"This is usually caused by cloning the device image resulting in duplication "
"of the UUID value which should be unique. In that case you can either "
"disconnect one of the devices or reformat it."
-msgstr ""
+msgstr "这通常是由于克隆设备镜像导致 UUID 值重复造成的,而 UUID 值应该是唯一的。如果是这种情况,可以断开其中一个设备或重新格式化它。"
#: ../blivet/errors.py:217
msgid ""
@@ -45,8 +45,8 @@ msgid ""
"kernel is reporting partitions on. It is unclear what the exact problem is. "
"Please file a bug at http://bugzilla.redhat.com"
msgstr ""
-"由于某些原因无法定位内核报告中显示在其中进行分区的磁盘的磁盘标签。尚不了解具"
-"体问题所在。请在 http://bugzilla.redhat.com 提交 bug。"
+"由于某些原因无法定位内核报告中显示在其中进行分区的磁盘的磁盘标签。尚不了解具体问题所在。请在 http://bugzilla.redhat.com 提交 "
+"bug"
#: ../blivet/errors.py:224
msgid ""
@@ -170,7 +170,7 @@ msgstr "设定后就无法更改 iSCSI 启动程序名称"
#: ../blivet/iscsi.py:219
msgid "Must provide an iSCSI initiator name"
-msgstr "您必须提供一个 iSCSI 启动程序名称。"
+msgstr "您必须提供一个 iSCSI 启动程序名称"
#: ../blivet/iscsi.py:410
msgid "iSCSI not available"
@@ -223,7 +223,7 @@ msgstr ""
#: ../blivet/partitioning.py:962
msgid "Unable to allocate requested partition scheme."
-msgstr "无法分配所需分区方案"
+msgstr "无法分配所需分区方案。"
#: ../blivet/partitioning.py:997
msgid "not enough free space after creating extended partition"
@@ -347,15 +347,15 @@ msgstr ""
msgid "Cannot remove a member from existing %s array"
msgstr "无法从存在的 %s 阵列中删除一个成员"
-#: ../blivet/formats/fs.py:934
+#: ../blivet/formats/fs.py:932
msgid "EFI System Partition"
msgstr "EFI 系统分区"
-#: ../blivet/formats/fs.py:1139
+#: ../blivet/formats/fs.py:1137
msgid "Apple Bootstrap"
msgstr "Apple Bootstrap"
-#: ../blivet/formats/fs.py:1175
+#: ../blivet/formats/fs.py:1173
msgid "Linux HFS+ ESP"
msgstr "Linux HFS+ ESP"
--
2.25.4

View File

@ -1,44 +0,0 @@
From 04dc595e3921879fa3e5b0f82506d63fdea4d2c8 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 3 Oct 2018 14:11:08 +0200
Subject: [PATCH] Fix options for ISCSI functions
Correct mutual authentication options in UDisks are
"reverse-username" and "reverse-password".
Resolves: rhbz#1635569
---
blivet/iscsi.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
index b979e01c..ca51f8ed 100644
--- a/blivet/iscsi.py
+++ b/blivet/iscsi.py
@@ -385,9 +385,9 @@ class iSCSI(object):
if password:
auth_info["password"] = GLib.Variant("s", password)
if r_username:
- auth_info["r_username"] = GLib.Variant("s", r_username)
+ auth_info["reverse-username"] = GLib.Variant("s", r_username)
if r_password:
- auth_info["r_password"] = GLib.Variant("s", r_password)
+ auth_info["reverse-password"] = GLib.Variant("s", r_password)
args = GLib.Variant("(sqa{sv})", (ipaddr, int(port), auth_info))
nodes, _n_nodes = self._call_initiator_method("DiscoverSendTargets", args)
@@ -423,9 +423,9 @@ class iSCSI(object):
if password:
auth_info["password"] = GLib.Variant("s", password)
if r_username:
- auth_info["r_username"] = GLib.Variant("s", r_username)
+ auth_info["reverse-username"] = GLib.Variant("s", r_username)
if r_password:
- auth_info["r_password"] = GLib.Variant("s", r_password)
+ auth_info["reverse-password"] = GLib.Variant("s", r_password)
try:
self._login(node, auth_info)
--
2.17.2

View File

@ -0,0 +1,24 @@
From 7bc4e324580656585adad0cbe51d60ed3540b766 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 3 Jul 2020 13:04:23 +0200
Subject: [PATCH] Do not use FSAVAIL and FSUSE% options when running lsblk
These options were added in util-linux 2.33 which is not available
on older systems so we should not use these.
---
blivet/blivet.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index fcc2080b..e7dbd37b 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -77,7 +77,7 @@ def __init__(self):
self._dump_file = "%s/storage.state" % tempfile.gettempdir()
try:
- options = "NAME,SIZE,OWNER,GROUP,MODE,FSTYPE,LABEL,UUID,PARTUUID,FSAVAIL,FSUSE%,MOUNTPOINT"
+ options = "NAME,SIZE,OWNER,GROUP,MODE,FSTYPE,LABEL,UUID,PARTUUID,MOUNTPOINT"
out = capture_output(["lsblk", "--bytes", "-a", "-o", options])
except Exception: # pylint: disable=broad-except
pass

View File

@ -1,45 +0,0 @@
From 0b6f818f46e3b7c5b9be33216ef8438f59d7bcf1 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Thu, 18 Oct 2018 10:07:31 -0400
Subject: [PATCH] Wipe all stale metadata after creating md array. (#1639682)
---
blivet/devices/md.py | 4 ++++
tests/devices_test/device_methods_test.py | 3 ++-
2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/md.py b/blivet/devices/md.py
index dad099e8..6a837df0 100644
--- a/blivet/devices/md.py
+++ b/blivet/devices/md.py
@@ -31,6 +31,7 @@
from ..devicelibs import mdraid, raid
from .. import errors
+from ..formats import DeviceFormat
from .. import util
from ..static_data import pvs_info
from ..storage_log import log_method_call
@@ -563,6 +564,9 @@ def remove_stale_lvm():
remove_stale_lvm()
+ # remove any other stale metadata before proceeding
+ DeviceFormat(device=self.path, exists=True).destroy()
+
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
diff --git a/tests/devices_test/device_methods_test.py b/tests/devices_test/device_methods_test.py
index 8e40e6b6..12d5f7d8 100644
--- a/tests/devices_test/device_methods_test.py
+++ b/tests/devices_test/device_methods_test.py
@@ -404,6 +404,7 @@ def test_setup(self):
self.assertTrue(self.patches["md"].activate.called)
def test_create(self):
- super(MDRaidArrayDeviceMethodsTestCase, self).test_create()
+ with patch("blivet.devices.md.DeviceFormat"):
+ super(MDRaidArrayDeviceMethodsTestCase, self).test_create()
self.device._create()
self.assertTrue(self.patches["md"].create.called)

View File

@ -1,29 +0,0 @@
From 653a3df662d10d0c8cc7f34138efd89a61f531a3 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 9 Jan 2019 13:03:49 +0100
Subject: [PATCH] Copy the iSCSI initiator name file to the installed system
The initiatorname.iscsi file is used (sometimes) during boot so
we need to write the configuration to the installed system.
Resolves: rhbz#1664587
---
blivet/iscsi.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
index 3e44e6ed..f053577d 100644
--- a/blivet/iscsi.py
+++ b/blivet/iscsi.py
@@ -563,6 +563,11 @@ def write(self, root, storage): # pylint: disable=unused-argument
shutil.copytree("/var/lib/iscsi", root + "/var/lib/iscsi",
symlinks=True)
+ # copy the initiator file too
+ if not os.path.isdir(root + "/etc/iscsi"):
+ os.makedirs(root + "/etc/iscsi", 0o755)
+ shutil.copyfile(INITIATOR_FILE, root + INITIATOR_FILE)
+
def get_node(self, name, address, port, iface):
for node in self.active_nodes():
if node.name == name and node.address == address and \

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
From 462099a9137fb7997140360c07665a21615a0fea Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Dan=20Hor=C3=A1k?= <dan@danny.cz>
Date: Tue, 7 Jul 2020 13:19:02 +0200
Subject: [PATCH] set allowed disk labels for s390x as standard ones (msdos +
gpt) plus dasd
This will solve issues when a SCSI or NVMe disk with GPT partition table
is used with a s390x machine (rhbz#1827066, rhbz#1854110).
---
blivet/formats/disklabel.py | 2 +-
tests/formats_test/disklabel_test.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
index 3dcac12b..53e2c010 100644
--- a/blivet/formats/disklabel.py
+++ b/blivet/formats/disklabel.py
@@ -230,7 +230,7 @@ def get_platform_label_types(cls):
elif arch.is_efi() and not arch.is_aarch64():
label_types = ["gpt", "msdos"]
elif arch.is_s390():
- label_types = ["msdos", "dasd"]
+ label_types += ["dasd"]
return label_types
diff --git a/tests/formats_test/disklabel_test.py b/tests/formats_test/disklabel_test.py
index 94f3775f..3068dc07 100644
--- a/tests/formats_test/disklabel_test.py
+++ b/tests/formats_test/disklabel_test.py
@@ -95,7 +95,7 @@ def test_platform_label_types(self, arch):
arch.is_arm.return_value = False
arch.is_s390.return_value = True
- self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "dasd"])
+ self.assertEqual(disklabel_class.get_platform_label_types(), ["msdos", "gpt", "dasd"])
arch.is_s390.return_value = False
def test_label_type_size_check(self):

View File

@ -0,0 +1,47 @@
From 7303f4a3f2fe3280339f6303dcff31b6ade12176 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 9 Jul 2020 16:30:55 +0200
Subject: [PATCH] Do not use BlockDev.utils_have_kernel_module to check for
modules
The function unfortunately uses only the name when searching for
the module and we need to use aliases for modules like ext2 and
ext3. So we need to use "modprobe --dry-run" instead.
---
blivet/formats/fs.py | 12 +++---------
1 file changed, 3 insertions(+), 9 deletions(-)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index eee15aaa..bcfbc08e 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -60,12 +60,6 @@
import logging
log = logging.getLogger("blivet")
-import gi
-gi.require_version("GLib", "2.0")
-gi.require_version("BlockDev", "2.0")
-
-from gi.repository import GLib
-from gi.repository import BlockDev
AVAILABLE_FILESYSTEMS = kernel_filesystems
@@ -462,13 +456,13 @@ def check_module(self):
for module in self._modules:
try:
- succ = BlockDev.utils_have_kernel_module(module)
- except GLib.GError as e:
+ rc = util.run_program(["modprobe", "--dry-run", module])
+ except OSError as e:
log.error("Could not check kernel module availability %s: %s", module, e)
self._supported = False
return
- if not succ:
+ if rc:
log.debug("Kernel module %s not available", module)
self._supported = False
return

View File

@ -1,33 +0,0 @@
From 8adbf9cf56f486f2f974cf6cdfda657293aff141 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Fri, 19 Oct 2018 09:49:56 -0400
Subject: [PATCH 1/2] Require libfc instead of fcoe for offloaded FCoE.
(#1575953)
---
blivet/fcoe.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/fcoe.py b/blivet/fcoe.py
index 1a2cf9d4..3a1887dc 100644
--- a/blivet/fcoe.py
+++ b/blivet/fcoe.py
@@ -32,13 +32,13 @@ _fcoe_module_loaded = False
def has_fcoe():
global _fcoe_module_loaded
if not _fcoe_module_loaded:
- util.run_program(["modprobe", "fcoe"])
+ util.run_program(["modprobe", "libfc"])
_fcoe_module_loaded = True
if "bnx2x" in util.lsmod():
log.info("fcoe: loading bnx2fc")
util.run_program(["modprobe", "bnx2fc"])
- return os.access("/sys/module/fcoe", os.X_OK)
+ return os.access("/sys/module/libfc", os.X_OK)
class FCoE(object):
--
2.17.2

View File

@ -0,0 +1,844 @@
From 18ce766bc90abdf0d8ca54bdf578463392a52ee9 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 12 Aug 2020 10:57:19 +0200
Subject: [PATCH 1/2] Fix name resolution for MD devices and partitions on them
UDev data for both member disks/partitions and partitions on arrays
contain the MD_* properties we must be extra careful when deciding
what name we'll use for the device.
Resolves: rhbz#1862904
---
blivet/udev.py | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/blivet/udev.py b/blivet/udev.py
index 41c99496..c85eb3dc 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -202,9 +202,16 @@ def device_get_name(udev_info):
""" Return the best name for a device based on the udev db data. """
if "DM_NAME" in udev_info:
name = udev_info["DM_NAME"]
- elif "MD_DEVNAME" in udev_info and os.path.exists(device_get_sysfs_path(udev_info) + "/md"):
+ elif "MD_DEVNAME" in udev_info:
mdname = udev_info["MD_DEVNAME"]
- if device_is_partition(udev_info):
+ if device_is_md(udev_info):
+ # MD RAID array -> use MD_DEVNAME
+ name = mdname
+ elif device_get_format(udev_info) == "linux_raid_member":
+ # MD RAID member -> use SYS_NAME
+ name = udev_info["SYS_NAME"]
+ elif device_is_partition(udev_info):
+ # partition on RAID -> construct name from MD_DEVNAME + partition number
# for partitions on named RAID we want to use the raid name, not
# the node, e.g. "raid1" instead of "md127p1"
partnum = udev_info["ID_PART_ENTRY_NUMBER"]
@@ -213,6 +220,7 @@ def device_get_name(udev_info):
else:
name = mdname + partnum
else:
+ # something else -> default to MD_DEVNAME
name = mdname
else:
name = udev_info["SYS_NAME"]
--
2.25.4
From dc96961adcb9dd6ef6d09e4daaa0a5eaae1ffe60 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 12 Aug 2020 11:10:03 +0200
Subject: [PATCH 2/2] Add tests for udev.device_get_name for RAID devices
This includes sample UDev data for various combinations of RAID
devices configuration.
Related: rhbz#1862904
---
tests/udev_data/__init__.py | 0
tests/udev_data/raid_data.py | 705 +++++++++++++++++++++++++++++++++++
tests/udev_test.py | 46 +++
3 files changed, 751 insertions(+)
create mode 100644 tests/udev_data/__init__.py
create mode 100644 tests/udev_data/raid_data.py
diff --git a/tests/udev_data/__init__.py b/tests/udev_data/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/udev_data/raid_data.py b/tests/udev_data/raid_data.py
new file mode 100644
index 00000000..509cbfbd
--- /dev/null
+++ b/tests/udev_data/raid_data.py
@@ -0,0 +1,705 @@
+# Sample UDev data for various MD RAID devices:
+# - member_boot: data for the member disk or partition after booting the system
+# - member_assemble: data for the member disk or partition after re-assembling stopped array using
+# 'mdadm --assemble --scan' (yes, this is different from member_boot)
+# - raid_device: data for the RAID array device
+# - raid_partition: data for partition on the array
+#
+# We have data for different combinations of member "types", MD metadata versions and named v unnamed
+# RAID devices.
+# The data were gathered on Fedora 32.
+
+
+class RaidOnDisk1():
+ member_name = "sda"
+ raid_name = "127"
+ raid_node = "md127"
+ metadata_version = "1.2"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:0 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'DEVNAME': '/dev/sda',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:127',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:127',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_ENC': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_SUB': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_UUID_SUB_ENC': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:0',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-0',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md127',
+ 'MD_DEVNAME': '127',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'unsafe',
+ 'MINOR': '0',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdadm-last-resort@md127.timer',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5529231',
+ 'SYS_NAME': 'sda',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:0 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'DEVNAME': '/dev/sda',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:127',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:127',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_ENC': '54956eb2-6983-8759-e2ad-4c40acc92e4b',
+ 'ID_FS_UUID_SUB': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_UUID_SUB_ENC': '64f96f0b-e97c-9157-d393-1fe457f3dd59',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:0',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-0',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-0',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '0',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5529231',
+ 'SYS_NAME': 'sda',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:0/block/sda'}
+
+ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-name-localhost.localdomain:127 /dev/disk/by-id/md-uuid-54956eb2:69838759:e2ad4c40:acc92e4b /dev/md/127',
+ 'DEVNAME': '/dev/md127',
+ 'DEVPATH': '/devices/virtual/block/md127',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '4eec0361',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sda_DEV': '/dev/sda',
+ 'MD_DEVICE_ev_sda_ROLE': '0',
+ 'MD_DEVICE_ev_sdb_DEV': '/dev/sdb',
+ 'MD_DEVICE_ev_sdb_ROLE': '1',
+ 'MD_DEVNAME': '127',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:127',
+ 'MD_UUID': '54956eb2:69838759:e2ad4c40:acc92e4b',
+ 'MINOR': '127',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '603606045',
+ 'SYS_NAME': 'md127',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127'}
+
+ raid_partition = {'DEVLINKS': '/dev/md/127p1 /dev/disk/by-id/md-uuid-54956eb2:69838759:e2ad4c40:acc92e4b-part1 /dev/disk/by-id/md-name-localhost.localdomain:127-part1',
+ 'DEVNAME': '/dev/md127p1',
+ 'DEVPATH': '/devices/virtual/block/md127/md127p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:127',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '2091008',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '4eec0361-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sda_DEV': '/dev/sda',
+ 'MD_DEVICE_ev_sda_ROLE': '0',
+ 'MD_DEVICE_ev_sdb_DEV': '/dev/sdb',
+ 'MD_DEVICE_ev_sdb_ROLE': '1',
+ 'MD_DEVNAME': '127',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:127',
+ 'MD_UUID': '54956eb2:69838759:e2ad4c40:acc92e4b',
+ 'MINOR': '2',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '603714783',
+ 'SYS_NAME': 'md127p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127/md127p1'}
+
+
+class RaidOnDisk2():
+ member_name = "sdc"
+ raid_name = "name"
+ raid_node = "md127"
+ metadata_version = "1.2"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:4',
+ 'DEVNAME': '/dev/sdc',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:name',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:name',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_ENC': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_SUB': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_UUID_SUB_ENC': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:4',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_4',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-4',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md127',
+ 'MD_DEVNAME': 'name',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'yes',
+ 'MINOR': '32',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '6109555',
+ 'SYS_NAME': 'sdc',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:4',
+ 'DEVNAME': '/dev/sdc',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:name',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:name',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_ENC': '143d480c-12c3-909f-5476-98a9f94a1c4f',
+ 'ID_FS_UUID_SUB': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_UUID_SUB_ENC': '121f2b71-3634-4183-dc9c-08bfceda765c',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:4',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_4',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-4',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '32',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '6109555',
+ 'SYS_NAME': 'sdc',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:4/block/sdc'}
+
+ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-name-localhost.localdomain:name /dev/disk/by-id/md-uuid-143d480c:12c3909f:547698a9:f94a1c4f /dev/md/name',
+ 'DEVNAME': '/dev/md127',
+ 'DEVPATH': '/devices/virtual/block/md127',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '19e9cb5b',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdc_DEV': '/dev/sdc',
+ 'MD_DEVICE_ev_sdc_ROLE': '0',
+ 'MD_DEVICE_ev_sdd_DEV': '/dev/sdd',
+ 'MD_DEVICE_ev_sdd_ROLE': '1',
+ 'MD_DEVNAME': 'name',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:name',
+ 'MD_UUID': '143d480c:12c3909f:547698a9:f94a1c4f',
+ 'MINOR': '127',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5844744',
+ 'SYS_NAME': 'md127',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127'}
+
+ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-143d480c:12c3909f:547698a9:f94a1c4f-part1 /dev/disk/by-id/md-name-localhost.localdomain:name-part1 /dev/md/name1',
+ 'DEVNAME': '/dev/md127p1',
+ 'DEVPATH': '/devices/virtual/block/md127/md127p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:127',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '2091008',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '19e9cb5b-01',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': 'ec985633',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdc_DEV': '/dev/sdc',
+ 'MD_DEVICE_ev_sdc_ROLE': '0',
+ 'MD_DEVICE_ev_sdd_DEV': '/dev/sdd',
+ 'MD_DEVICE_ev_sdd_ROLE': '1',
+ 'MD_DEVNAME': 'name',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:name',
+ 'MD_UUID': '143d480c:12c3909f:547698a9:f94a1c4f',
+ 'MINOR': '1',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5928255',
+ 'SYS_NAME': 'md127p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md127/md127p1'}
+
+
+class RaidOnDisk3():
+ member_name = "sde"
+ raid_name = "125"
+ raid_node = "md125"
+ metadata_version = "0.9"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:1',
+ 'DEVNAME': '/dev/sde',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:1',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_1',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-1',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md125',
+ 'MD_DEVNAME': '125',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'unsafe',
+ 'MINOR': '64',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdadm-last-resort@md125.timer',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5538551',
+ 'SYS_NAME': 'sde',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1 /dev/disk/by-path/pci-0000:00:11.0-scsi-0:0:0:1',
+ 'DEVNAME': '/dev/sde',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde',
+ 'DEVTYPE': 'disk',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': 'c4ef60f5-e374-5f70-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PATH': 'pci-0000:00:11.0-scsi-0:0:0:1',
+ 'ID_PATH_TAG': 'pci-0000_00_11_0-scsi-0_0_0_1',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-1',
+ 'ID_SERIAL_SHORT': 'drive-scsi0-0-0-1',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '64',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5538551',
+ 'SYS_NAME': 'sde',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:11.0/virtio5/host8/target8:0:0/8:0:0:1/block/sde'}
+
+ raid_device = {'DEVLINKS': '/dev/md/125 /dev/disk/by-id/md-uuid-c4ef60f5:e3745f70:bfe78010:bc810f04',
+ 'DEVNAME': '/dev/md125',
+ 'DEVPATH': '/devices/virtual/block/md125',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': 'e74877cd',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sde_DEV': '/dev/sde',
+ 'MD_DEVICE_ev_sde_ROLE': '0',
+ 'MD_DEVICE_ev_sdf_DEV': '/dev/sdf',
+ 'MD_DEVICE_ev_sdf_ROLE': '1',
+ 'MD_DEVNAME': '125',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': 'c4ef60f5:e3745f70:bfe78010:bc810f04',
+ 'MINOR': '125',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '5786380',
+ 'SYS_NAME': 'md125',
+ 'SYS_PATH': '/sys/devices/virtual/block/md125'}
+
+ raid_partition = {'DEVLINKS': '/dev/md/125p1 /dev/disk/by-id/md-uuid-c4ef60f5:e3745f70:bfe78010:bc810f04-part1',
+ 'DEVNAME': '/dev/md125p1',
+ 'DEVPATH': '/devices/virtual/block/md125/md125p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:125',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '2094976',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': 'e74877cd-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sde_DEV': '/dev/sde',
+ 'MD_DEVICE_ev_sde_ROLE': '0',
+ 'MD_DEVICE_ev_sdf_DEV': '/dev/sdf',
+ 'MD_DEVICE_ev_sdf_ROLE': '1',
+ 'MD_DEVNAME': '125',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': 'c4ef60f5:e3745f70:bfe78010:bc810f04',
+ 'MINOR': '3',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8808457',
+ 'SYS_NAME': 'md125p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md125/md125p1'}
+
+
+class RaidOnPartition1():
+ member_name = "sdh3"
+ raid_name = "122"
+ raid_node = "md122"
+ metadata_version = "1.2"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part3 /dev/disk/by-partuuid/73eb11a9-03',
+ 'DEVNAME': '/dev/sdh3',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:122',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:122',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_ENC': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_SUB': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_UUID_SUB_ENC': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '3',
+ 'ID_PART_ENTRY_OFFSET': '411648',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-03',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md122',
+ 'MD_DEVNAME': '122',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'yes',
+ 'MINOR': '115',
+ 'PARTN': '3',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8920462',
+ 'SYS_NAME': 'sdh3',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part3 /dev/disk/by-partuuid/73eb11a9-03',
+ 'DEVNAME': '/dev/sdh3',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_LABEL': 'localhost.localdomain:122',
+ 'ID_FS_LABEL_ENC': 'localhost.localdomain:122',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_ENC': '0628d995-eb60-ebd1-a767-51730b16f212',
+ 'ID_FS_UUID_SUB': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_UUID_SUB_ENC': 'b301779b-f759-ad7d-5324-b38d4b6d944d',
+ 'ID_FS_VERSION': '1.2',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '3',
+ 'ID_PART_ENTRY_OFFSET': '411648',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-03',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '115',
+ 'PARTN': '3',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8920462',
+ 'SYS_NAME': 'sdh3',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh3'}
+
+ raid_device = {'DEVLINKS': '/dev/disk/by-id/md-uuid-0628d995:eb60ebd1:a7675173:0b16f212 /dev/disk/by-id/md-name-localhost.localdomain:122 /dev/md/122',
+ 'DEVNAME': '/dev/md122',
+ 'DEVPATH': '/devices/virtual/block/md122',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '6dc80b3b',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh3_DEV': '/dev/sdh3',
+ 'MD_DEVICE_ev_sdh3_ROLE': '0',
+ 'MD_DEVICE_ev_sdh5_DEV': '/dev/sdh5',
+ 'MD_DEVICE_ev_sdh5_ROLE': '1',
+ 'MD_DEVNAME': '122',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:122',
+ 'MD_UUID': '0628d995:eb60ebd1:a7675173:0b16f212',
+ 'MINOR': '122',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8770105',
+ 'SYS_NAME': 'md122',
+ 'SYS_PATH': '/sys/devices/virtual/block/md122'}
+
+ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-0628d995:eb60ebd1:a7675173:0b16f212-part1 /dev/disk/by-id/md-name-localhost.localdomain:122-part1 /dev/md/122p1',
+ 'DEVNAME': '/dev/md122p1',
+ 'DEVPATH': '/devices/virtual/block/md122/md122p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:122',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '200704',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '6dc80b3b-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh3_DEV': '/dev/sdh3',
+ 'MD_DEVICE_ev_sdh3_ROLE': '0',
+ 'MD_DEVICE_ev_sdh5_DEV': '/dev/sdh5',
+ 'MD_DEVICE_ev_sdh5_ROLE': '1',
+ 'MD_DEVNAME': '122',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '1.2',
+ 'MD_NAME': 'localhost.localdomain:122',
+ 'MD_UUID': '0628d995:eb60ebd1:a7675173:0b16f212',
+ 'MINOR': '6',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '9003885',
+ 'SYS_NAME': 'md122p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md122/md122p1'}
+
+
+class RaidOnPartition2():
+ member_name = "sdh1"
+ raid_name = "123"
+ raid_node = "md123"
+ metadata_version = "0.9"
+
+ member_boot = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part1 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part1 /dev/disk/by-partuuid/73eb11a9-01',
+ 'DEVNAME': '/dev/sdh1',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-01',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MD_DEVICE': 'md123',
+ 'MD_DEVNAME': '123',
+ 'MD_FOREIGN': 'no',
+ 'MD_STARTED': 'unsafe',
+ 'MINOR': '113',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdadm-last-resort@md123.timer',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8778733',
+ 'SYS_NAME': 'sdh1',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1'}
+
+ member_assemble = {'DEVLINKS': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:2:0-part1 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-0-2-part1 /dev/disk/by-partuuid/73eb11a9-01',
+ 'DEVNAME': '/dev/sdh1',
+ 'DEVPATH': '/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1',
+ 'DEVTYPE': 'partition',
+ 'ID_BUS': 'scsi',
+ 'ID_FS_TYPE': 'linux_raid_member',
+ 'ID_FS_USAGE': 'raid',
+ 'ID_FS_UUID': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_UUID_ENC': '335b35e0-f1af-8e86-bfe7-8010bc810f04',
+ 'ID_FS_VERSION': '0.90.0',
+ 'ID_MODEL': 'QEMU_HARDDISK',
+ 'ID_MODEL_ENC': 'QEMU\\x20HARDDISK\\x20\\x20\\x20',
+ 'ID_PART_ENTRY_DISK': '8:112',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '204800',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '73eb11a9-01',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '73eb11a9',
+ 'ID_PATH': 'pci-0000:00:07.0-scsi-0:0:2:0',
+ 'ID_PATH_TAG': 'pci-0000_00_07_0-scsi-0_0_2_0',
+ 'ID_REVISION': '2.5+',
+ 'ID_SCSI': '1',
+ 'ID_SERIAL': '0QEMU_QEMU_HARDDISK_drive-scsi1-0-2',
+ 'ID_SERIAL_SHORT': 'drive-scsi1-0-2',
+ 'ID_TYPE': 'disk',
+ 'ID_VENDOR': 'QEMU',
+ 'ID_VENDOR_ENC': 'QEMU\\x20\\x20\\x20\\x20',
+ 'MAJOR': '8',
+ 'MINOR': '113',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'TAGS': ':systemd:',
+ 'UDISKS_MD_MEMBER_DEVICES': '2',
+ 'UDISKS_MD_MEMBER_EVENTS': '18',
+ 'UDISKS_MD_MEMBER_LEVEL': 'raid1',
+ 'UDISKS_MD_MEMBER_UPDATE_TIME': '1597143914',
+ 'UDISKS_MD_MEMBER_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'USEC_INITIALIZED': '8778733',
+ 'SYS_NAME': 'sdh1',
+ 'SYS_PATH': '/sys/devices/pci0000:00/0000:00:07.0/host9/target9:0:2/9:0:2:0/block/sdh/sdh1'}
+
+ raid_device = {'DEVLINKS': '/dev/md/123 /dev/disk/by-id/md-uuid-335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'DEVNAME': '/dev/md123',
+ 'DEVPATH': '/devices/virtual/block/md123',
+ 'DEVTYPE': 'disk',
+ 'ID_PART_TABLE_TYPE': 'dos',
+ 'ID_PART_TABLE_UUID': '653f84c8',
+ 'MAJOR': '9',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh1_DEV': '/dev/sdh1',
+ 'MD_DEVICE_ev_sdh1_ROLE': '0',
+ 'MD_DEVICE_ev_sdh2_DEV': '/dev/sdh2',
+ 'MD_DEVICE_ev_sdh2_ROLE': '1',
+ 'MD_DEVNAME': '123',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'MINOR': '123',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8760382',
+ 'SYS_NAME': 'md123',
+ 'SYS_PATH': '/sys/devices/virtual/block/md123'}
+
+ raid_partition = {'DEVLINKS': '/dev/disk/by-id/md-uuid-335b35e0:f1af8e86:bfe78010:bc810f04-part1 /dev/md/123p1',
+ 'DEVNAME': '/dev/md123p1',
+ 'DEVPATH': '/devices/virtual/block/md123/md123p1',
+ 'DEVTYPE': 'partition',
+ 'ID_PART_ENTRY_DISK': '9:123',
+ 'ID_PART_ENTRY_NUMBER': '1',
+ 'ID_PART_ENTRY_OFFSET': '2048',
+ 'ID_PART_ENTRY_SCHEME': 'dos',
+ 'ID_PART_ENTRY_SIZE': '202624',
+ 'ID_PART_ENTRY_TYPE': '0x83',
+ 'ID_PART_ENTRY_UUID': '653f84c8-01',
+ 'MAJOR': '259',
+ 'MD_DEVICES': '2',
+ 'MD_DEVICE_ev_sdh1_DEV': '/dev/sdh1',
+ 'MD_DEVICE_ev_sdh1_ROLE': '0',
+ 'MD_DEVICE_ev_sdh2_DEV': '/dev/sdh2',
+ 'MD_DEVICE_ev_sdh2_ROLE': '1',
+ 'MD_DEVNAME': '123',
+ 'MD_LEVEL': 'raid1',
+ 'MD_METADATA': '0.90',
+ 'MD_UUID': '335b35e0:f1af8e86:bfe78010:bc810f04',
+ 'MINOR': '5',
+ 'PARTN': '1',
+ 'SUBSYSTEM': 'block',
+ 'SYSTEMD_WANTS': 'mdmonitor.service',
+ 'TAGS': ':systemd:',
+ 'USEC_INITIALIZED': '8952876',
+ 'SYS_NAME': 'md123p1',
+ 'SYS_PATH': '/sys/devices/virtual/block/md123/md123p1'}
diff --git a/tests/udev_test.py b/tests/udev_test.py
index 653eeb6d..d30a647b 100644
--- a/tests/udev_test.py
+++ b/tests/udev_test.py
@@ -2,6 +2,8 @@
import unittest
import mock
+from udev_data import raid_data
+
class UdevTest(unittest.TestCase):
@@ -77,3 +79,47 @@ class UdevTest(unittest.TestCase):
# Normal MD RAID (w/ at least one non-disk member)
device_get_slaves.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
self.assertFalse(blivet.udev.device_is_disk(info))
+
+
+class UdevGetNameRaidTest(unittest.TestCase):
+
+ def _test_raid_name(self, udev_data):
+ import blivet.udev
+
+ # members don't have the device_get_sysfs_path(info) + "/md" folder
+ with mock.patch("blivet.udev.device_is_md", return_value=False):
+ member_name = blivet.udev.device_get_name(udev_data.member_boot)
+ self.assertEqual(member_name, udev_data.member_name)
+
+ member_name = blivet.udev.device_get_name(udev_data.member_assemble)
+ self.assertEqual(member_name, udev_data.member_name)
+
+ with mock.patch("blivet.udev.device_is_md", return_value=True):
+ raid_name = blivet.udev.device_get_name(udev_data.raid_device)
+ self.assertEqual(raid_name, udev_data.raid_name)
+
+ # partitions also don't have the device_get_sysfs_path(info) + "/md" folder
+ with mock.patch("blivet.udev.device_is_md", return_value=False):
+ part_name = blivet.udev.device_get_name(udev_data.raid_partition)
+ expected_name = udev_data.raid_name + "p1" if udev_data.raid_name[-1].isdigit() else udev_data.raid_name + "1"
+ self.assertEqual(part_name, expected_name)
+
+ def test_raid_name_on_disk_no_name(self):
+ data = raid_data.RaidOnDisk1()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_disk__with_name(self):
+ data = raid_data.RaidOnDisk2()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_disk_old_metadata(self):
+ data = raid_data.RaidOnDisk3()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_part_no_name(self):
+ data = raid_data.RaidOnPartition1()
+ self._test_raid_name(data)
+
+ def test_raid_name_on_part_old_metadata(self):
+ data = raid_data.RaidOnPartition2()
+ self._test_raid_name(data)
--
2.25.4

View File

@ -1,64 +0,0 @@
From 8bdade5e60b746e8d992289e71123ad27146a7f1 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Wed, 24 Oct 2018 20:08:48 -0400
Subject: [PATCH] Use udev to determine if disk is a multipath member.
Related: rhbz#1575953
---
blivet/populator/helpers/disklabel.py | 3 +--
tests/populator_test.py | 6 ++----
2 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/blivet/populator/helpers/disklabel.py b/blivet/populator/helpers/disklabel.py
index c2acb117..db10638e 100644
--- a/blivet/populator/helpers/disklabel.py
+++ b/blivet/populator/helpers/disklabel.py
@@ -28,7 +28,6 @@
from ...errors import InvalidDiskLabelError
from ...storage_log import log_exception_info, log_method_call
from .formatpopulator import FormatPopulator
-from ...static_data import mpath_members
import logging
log = logging.getLogger("blivet")
@@ -44,7 +43,7 @@ def match(cls, data, device):
return (bool(udev.device_get_disklabel_type(data)) and
not udev.device_is_biosraid_member(data) and
udev.device_get_format(data) != "iso9660" and
- not (device.is_disk and mpath_members.is_mpath_member(device.path)))
+ not (device.is_disk and udev.device_get_format(data) == "mpath_member"))
def _get_kwargs(self):
kwargs = super(DiskLabelFormatPopulator, self)._get_kwargs()
diff --git a/tests/populator_test.py b/tests/populator_test.py
index b6f70319..d9c326d7 100644
--- a/tests/populator_test.py
+++ b/tests/populator_test.py
@@ -827,7 +827,6 @@ class HFSPopulatorTestCase(FormatPopulatorTestCase):
class DiskLabelPopulatorTestCase(PopulatorHelperTestCase):
helper_class = DiskLabelFormatPopulator
- @patch("blivet.static_data.mpath_members.is_mpath_member", return_value=False)
@patch("blivet.udev.device_is_biosraid_member", return_value=False)
@patch("blivet.udev.device_get_format", return_value=None)
@patch("blivet.udev.device_get_disklabel_type", return_value="dos")
@@ -836,7 +835,6 @@ def test_match(self, *args):
device_get_disklabel_type = args[0]
device_get_format = args[1]
device_is_biosraid_member = args[2]
- is_mpath_member = args[3]
device = Mock()
device.is_disk = True
@@ -861,9 +859,9 @@ def test_match(self, *args):
device_is_biosraid_member.return_value = False
# no match for multipath members
- is_mpath_member.return_value = True
+ device_get_format.return_value = "mpath_member"
self.assertFalse(self.helper_class.match(data, device))
- is_mpath_member.return_value = False
+ device_get_format.return_value = None
@patch("blivet.static_data.mpath_members.is_mpath_member", return_value=False)
@patch("blivet.udev.device_is_biosraid_member", return_value=False)

View File

@ -1,45 +0,0 @@
From 5b0b1ffcf0d27306e52476984ebd8eb3af4a11aa Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Mon, 25 Feb 2019 11:14:30 -0500
Subject: [PATCH] Don't crash if blockdev mpath plugin isn't available.
(#1672971)
---
blivet/static_data/mpath_info.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/blivet/static_data/mpath_info.py b/blivet/static_data/mpath_info.py
index b16f3c65..49ba4709 100644
--- a/blivet/static_data/mpath_info.py
+++ b/blivet/static_data/mpath_info.py
@@ -27,6 +27,8 @@ from gi.repository import BlockDev as blockdev
import logging
log = logging.getLogger("blivet")
+from ..tasks import availability
+
class MpathMembers(object):
"""A cache for querying multipath member devices"""
@@ -40,7 +42,7 @@ class MpathMembers(object):
:param str device: path of the device to query
"""
- if self._members is None:
+ if self._members is None and availability.BLOCKDEV_MPATH_PLUGIN.available:
self._members = set(blockdev.mpath.get_mpath_members())
device = os.path.realpath(device)
@@ -56,7 +58,8 @@ class MpathMembers(object):
"""
device = os.path.realpath(device)
device = device[len("/dev/"):]
- if blockdev.mpath.is_mpath_member(device):
+
+ if availability.BLOCKDEV_MPATH_PLUGIN.available and blockdev.mpath.is_mpath_member(device):
self._members.add(device)
def drop_cache(self):
--
2.17.2

View File

@ -0,0 +1,269 @@
From f19140993e94be9e58c8a01c18f1907792f59927 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 5 Aug 2020 13:44:38 +0200
Subject: [PATCH] Fix ignoring disk devices with parents or children
For disk-like devices like multipath we should allow to ignore
these by simply ignoring the mpath device or by ignoring all of its
drives.
- when ignoring the "mpatha" device we should also ignore "sda" and
"sdb"
- when ignoring both "sda" and "sdb" we should also ignore "mpatha"
- when ignoring only "sda" we should not ignore "mpatha" (we don't
want to deal with an "incomplete" multipath device in the tree)
This is consistent with the existing behaviour when using exclusive
disks (or "ignoredisks --only-use" in kickstart).
Resolves: rhbz#1866243
---
blivet/devicetree.py | 51 ++++++++-----
tests/devicetree_test.py | 157 ++++++++++++++++++++++++++++-----------
2 files changed, 146 insertions(+), 62 deletions(-)
diff --git a/blivet/devicetree.py b/blivet/devicetree.py
index 5cc360e1..2afb0d0e 100644
--- a/blivet/devicetree.py
+++ b/blivet/devicetree.py
@@ -907,31 +907,48 @@ class DeviceTreeBase(object):
hidden.add_hook(new=False)
lvm.lvm_cc_removeFilterRejectRegexp(hidden.name)
+ def _disk_in_taglist(self, disk, taglist):
+ # Taglist is a list containing mix of disk names and tags into which disk may belong.
+ # Check if it does. Raise ValueError if unknown tag is encountered.
+ if disk.name in taglist:
+ return True
+ tags = [t[1:] for t in taglist if t.startswith("@")]
+ for tag in tags:
+ if tag not in Tags.__members__:
+ raise ValueError("unknown ignoredisk tag '@%s' encountered" % tag)
+ if Tags(tag) in disk.tags:
+ return True
+ return False
+
def _is_ignored_disk(self, disk):
""" Checks config for lists of exclusive and ignored disks
and returns if the given one should be ignored
"""
-
- def disk_in_taglist(disk, taglist):
- # Taglist is a list containing mix of disk names and tags into which disk may belong.
- # Check if it does. Raise ValueError if unknown tag is encountered.
- if disk.name in taglist:
- return True
- tags = [t[1:] for t in taglist if t.startswith("@")]
- for tag in tags:
- if tag not in Tags.__members__:
- raise ValueError("unknown ignoredisk tag '@%s' encountered" % tag)
- if Tags(tag) in disk.tags:
- return True
- return False
-
- return ((self.ignored_disks and disk_in_taglist(disk, self.ignored_disks)) or
- (self.exclusive_disks and not disk_in_taglist(disk, self.exclusive_disks)))
+ return ((self.ignored_disks and self._disk_in_taglist(disk, self.ignored_disks)) or
+ (self.exclusive_disks and not self._disk_in_taglist(disk, self.exclusive_disks)))
def _hide_ignored_disks(self):
# hide any subtrees that begin with an ignored disk
for disk in [d for d in self._devices if d.is_disk]:
- if self._is_ignored_disk(disk):
+ is_ignored = self.ignored_disks and self._disk_in_taglist(disk, self.ignored_disks)
+ is_exclusive = self.exclusive_disks and self._disk_in_taglist(disk, self.exclusive_disks)
+
+ if is_ignored:
+ if len(disk.children) == 1:
+ if not all(self._is_ignored_disk(d) for d in disk.children[0].parents):
+ raise DeviceTreeError("Including only a subset of raid/multipath member disks is not allowed.")
+
+ # and also children like fwraid or mpath
+ self.hide(disk.children[0])
+
+ # this disk is ignored: ignore it and all it's potential parents
+ for p in disk.parents:
+ self.hide(p)
+
+ # and finally hide the disk itself
+ self.hide(disk)
+
+ if self.exclusive_disks and not is_exclusive:
ignored = True
# If the filter allows all members of a fwraid or mpath, the
# fwraid or mpath itself is implicitly allowed as well. I don't
diff --git a/tests/devicetree_test.py b/tests/devicetree_test.py
index a8f369cf..6032e7f6 100644
--- a/tests/devicetree_test.py
+++ b/tests/devicetree_test.py
@@ -370,51 +370,6 @@ class DeviceTreeTestCase(unittest.TestCase):
self.assertTrue(sdb in tree.devices)
self.assertTrue(sdc in tree.devices)
- # now test exclusive_disks special cases for multipath
- sda.format = get_format("multipath_member", exists=True)
- sdb.format = get_format("multipath_member", exists=True)
- sdc.format = get_format("multipath_member", exists=True)
- mpatha = MultipathDevice("mpatha", parents=[sda, sdb, sdc])
- tree._add_device(mpatha)
-
- tree.ignored_disks = []
- tree.exclusive_disks = ["mpatha"]
-
- with patch.object(tree, "hide") as hide:
- tree._hide_ignored_disks()
- self.assertFalse(hide.called)
-
- tree._hide_ignored_disks()
- self.assertTrue(sda in tree.devices)
- self.assertTrue(sdb in tree.devices)
- self.assertTrue(sdc in tree.devices)
- self.assertTrue(mpatha in tree.devices)
-
- # all members in exclusive_disks implies the mpath in exclusive_disks
- tree.exclusive_disks = ["sda", "sdb", "sdc"]
- with patch.object(tree, "hide") as hide:
- tree._hide_ignored_disks()
- self.assertFalse(hide.called)
-
- tree._hide_ignored_disks()
- self.assertTrue(sda in tree.devices)
- self.assertTrue(sdb in tree.devices)
- self.assertTrue(sdc in tree.devices)
- self.assertTrue(mpatha in tree.devices)
-
- tree.exclusive_disks = ["sda", "sdb"]
- with patch.object(tree, "hide") as hide:
- tree._hide_ignored_disks()
- hide.assert_any_call(mpatha)
- hide.assert_any_call(sdc)
-
- # verify that hide works as expected
- tree._hide_ignored_disks()
- self.assertTrue(sda in tree.devices)
- self.assertTrue(sdb in tree.devices)
- self.assertFalse(sdc in tree.devices)
- self.assertFalse(mpatha in tree.devices)
-
def test_get_related_disks(self):
tree = DeviceTree()
@@ -447,3 +402,115 @@ class DeviceTreeTestCase(unittest.TestCase):
tree.unhide(sda)
self.assertEqual(tree.get_related_disks(sda), set([sda, sdb]))
self.assertEqual(tree.get_related_disks(sdb), set([sda, sdb]))
+
+
+class DeviceTreeIgnoredExclusiveMultipathTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.tree = DeviceTree()
+
+ self.sda = DiskDevice("sda")
+ self.sdb = DiskDevice("sdb")
+ self.sdc = DiskDevice("sdc")
+
+ self.tree._add_device(self.sda)
+ self.tree._add_device(self.sdb)
+ self.tree._add_device(self.sdc)
+
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertTrue(self.sdc in self.tree.devices)
+
+ # now test exclusive_disks special cases for multipath
+ self.sda.format = get_format("multipath_member", exists=True)
+ self.sdb.format = get_format("multipath_member", exists=True)
+ self.sdc.format = get_format("multipath_member", exists=True)
+ self.mpatha = MultipathDevice("mpatha", parents=[self.sda, self.sdb, self.sdc])
+ self.tree._add_device(self.mpatha)
+
+ def test_exclusive_disks_multipath_1(self):
+ # multipath is exclusive -> all disks should be exclusive
+ self.tree.ignored_disks = []
+ self.tree.exclusive_disks = ["mpatha"]
+
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ self.assertFalse(hide.called)
+
+ self.tree._hide_ignored_disks()
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertTrue(self.sdc in self.tree.devices)
+ self.assertTrue(self.mpatha in self.tree.devices)
+
+ def test_exclusive_disks_multipath_2(self):
+ # all disks exclusive -> mpath should also be exclusive
+ self.tree.exclusive_disks = ["sda", "sdb", "sdc"]
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ self.assertFalse(hide.called)
+
+ self.tree._hide_ignored_disks()
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertTrue(self.sdc in self.tree.devices)
+ self.assertTrue(self.mpatha in self.tree.devices)
+
+ def test_exclusive_disks_multipath_3(self):
+ # some disks exclusive -> mpath should be hidden
+ self.tree.exclusive_disks = ["sda", "sdb"]
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ hide.assert_any_call(self.mpatha)
+ hide.assert_any_call(self.sdc)
+
+ # verify that hide works as expected
+ self.tree._hide_ignored_disks()
+ self.assertTrue(self.sda in self.tree.devices)
+ self.assertTrue(self.sdb in self.tree.devices)
+ self.assertFalse(self.sdc in self.tree.devices)
+ self.assertFalse(self.mpatha in self.tree.devices)
+
+ def test_ignored_disks_multipath_1(self):
+ # mpatha ignored -> disks should be hidden
+ self.tree.ignored_disks = ["mpatha"]
+ self.tree.exclusive_disks = []
+
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ hide.assert_any_call(self.mpatha)
+ hide.assert_any_call(self.sda)
+ hide.assert_any_call(self.sdb)
+ hide.assert_any_call(self.sdc)
+
+ self.tree._hide_ignored_disks()
+ self.assertFalse(self.sda in self.tree.devices)
+ self.assertFalse(self.sdb in self.tree.devices)
+ self.assertFalse(self.sdc in self.tree.devices)
+ self.assertFalse(self.mpatha in self.tree.devices)
+
+ def test_ignored_disks_multipath_2(self):
+ # all disks ignored -> mpath should be hidden
+ self.tree.ignored_disks = ["sda", "sdb", "sdc"]
+ self.tree.exclusive_disks = []
+
+ with patch.object(self.tree, "hide") as hide:
+ self.tree._hide_ignored_disks()
+ hide.assert_any_call(self.mpatha)
+ hide.assert_any_call(self.sda)
+ hide.assert_any_call(self.sdb)
+ hide.assert_any_call(self.sdc)
+
+ self.tree._hide_ignored_disks()
+ self.assertFalse(self.sda in self.tree.devices)
+ self.assertFalse(self.sdb in self.tree.devices)
+ self.assertFalse(self.sdc in self.tree.devices)
+ self.assertFalse(self.mpatha in self.tree.devices)
+
+ def test_ignored_disks_multipath_3(self):
+ # some disks ignored -> error
+ self.tree.ignored_disks = ["sda", "sdb"]
+ self.tree.exclusive_disks = []
+
+ with self.assertRaises(DeviceTreeError):
+ self.tree._hide_ignored_disks()
--
2.25.4

View File

@ -1,31 +0,0 @@
From d01281a69e317d7bae4a7698edb6583b6310d5c1 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Tue, 19 Mar 2019 11:51:47 -0400
Subject: [PATCH] Ensure correct type of mpath cache member list.
Related: rhbz#1672971
---
blivet/static_data/mpath_info.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/blivet/static_data/mpath_info.py b/blivet/static_data/mpath_info.py
index 49ba4709..64958ba8 100644
--- a/blivet/static_data/mpath_info.py
+++ b/blivet/static_data/mpath_info.py
@@ -42,8 +42,11 @@ class MpathMembers(object):
:param str device: path of the device to query
"""
- if self._members is None and availability.BLOCKDEV_MPATH_PLUGIN.available:
- self._members = set(blockdev.mpath.get_mpath_members())
+ if self._members is None:
+ if availability.BLOCKDEV_MPATH_PLUGIN.available:
+ self._members = set(blockdev.mpath.get_mpath_members())
+ else:
+ self._members = set()
device = os.path.realpath(device)
device = device[len("/dev/"):]
--
2.20.1

View File

@ -1,122 +0,0 @@
From c495f74951caa0104636032e00704a83ab5f73b1 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 26 Mar 2019 12:58:53 +0100
Subject: [PATCH 1/3] Properly clean after availability test case
We need to set availability of the 'mkfs.hfsplus' utility back to
it's real value after changing it to "always available" for this
test case.
---
tests/devices_test/dependencies_test.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tests/devices_test/dependencies_test.py b/tests/devices_test/dependencies_test.py
index 9dbdd24d..76bf758b 100644
--- a/tests/devices_test/dependencies_test.py
+++ b/tests/devices_test/dependencies_test.py
@@ -69,6 +69,7 @@ class MockingDeviceDependenciesTestCase1(unittest.TestCase):
self.mdraid_method = availability.BLOCKDEV_MDRAID_PLUGIN._method
self.dm_method = availability.BLOCKDEV_DM_PLUGIN._method
+ self.hfsplus_method = availability.MKFS_HFSPLUS_APP._method
self.cache_availability = availability.CACHE_AVAILABILITY
self.addCleanup(self._clean_up)
@@ -105,10 +106,12 @@ class MockingDeviceDependenciesTestCase1(unittest.TestCase):
def _clean_up(self):
availability.BLOCKDEV_MDRAID_PLUGIN._method = self.mdraid_method
availability.BLOCKDEV_DM_PLUGIN._method = self.dm_method
+ availability.MKFS_HFSPLUS_APP._method = self.hfsplus_method
availability.CACHE_AVAILABILITY = False
availability.BLOCKDEV_MDRAID_PLUGIN.available # pylint: disable=pointless-statement
availability.BLOCKDEV_DM_PLUGIN.available # pylint: disable=pointless-statement
+ availability.MKFS_HFSPLUS_APP.available # pylint: disable=pointless-statement
availability.CACHE_AVAILABILITY = self.cache_availability
--
2.20.1
From a6798882f5ba5b1e0ea655255d6f1fd5eda85f64 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 26 Mar 2019 13:00:40 +0100
Subject: [PATCH 2/3] Skip weak dependencies test if we don't have all
libblockdev plugins
This test checks that creating devices works when we have all
plugins and fails "nicely" if we don't have all plugins so we
actually need all the plugins for this test case.
---
tests/devices_test/dependencies_test.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/tests/devices_test/dependencies_test.py b/tests/devices_test/dependencies_test.py
index 76bf758b..308d6192 100644
--- a/tests/devices_test/dependencies_test.py
+++ b/tests/devices_test/dependencies_test.py
@@ -157,6 +157,11 @@ class MissingWeakDependenciesTestCase(unittest.TestCase):
self.disk1_file = create_sparse_tempfile("disk1", Size("2GiB"))
self.plugins = blockdev.plugin_specs_from_names(blockdev.get_available_plugin_names())
+ loaded_plugins = self.load_all_plugins()
+ if not all(p in loaded_plugins for p in ("btrfs", "crypto", "lvm", "md")):
+ # we don't have all plugins needed for this test case
+ self.skipTest("Missing libblockdev plugins needed from weak dependencies test.")
+
def _clean_up(self):
# reload all libblockdev plugins
self.load_all_plugins()
--
2.20.1
From 151fce2c9a98dc5a7943b314828518518a755ec8 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 26 Mar 2019 13:36:31 +0100
Subject: [PATCH 3/3] Check for format tools availability in action_test
---
tests/action_test.py | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/tests/action_test.py b/tests/action_test.py
index 93ed9e57..101d5a21 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -19,6 +19,13 @@ from blivet.devices import MDRaidArrayDevice
from blivet.devices import LVMVolumeGroupDevice
from blivet.devices import LVMLogicalVolumeDevice
+# format classes
+from blivet.formats.fs import Ext2FS
+from blivet.formats.fs import Ext3FS
+from blivet.formats.fs import Ext4FS
+from blivet.formats.fs import FATFS
+from blivet.formats.fs import XFS
+
# action classes
from blivet.deviceaction import ActionCreateDevice
from blivet.deviceaction import ActionResizeDevice
@@ -39,8 +46,17 @@ DEVICE_CLASSES = [
LVMLogicalVolumeDevice
]
+FORMAT_CLASSES = [
+ Ext2FS,
+ Ext3FS,
+ Ext4FS,
+ FATFS,
+ XFS
+]
+
@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
+@unittest.skipUnless(not any(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test")
class DeviceActionTestCase(StorageTestCase):
""" DeviceActionTestSuite """
--
2.20.1

View File

@ -1,110 +0,0 @@
From 545c41e6750d5e28743a7da9e43175302c4fa812 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Thu, 4 Apr 2019 13:52:54 -0400
Subject: [PATCH 1/4] Remove profanity from an old comment.
---
blivet/blivet.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index 8128347f..ff4410ae 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -875,7 +875,7 @@ def safe_device_name(self, name):
LVM limits lv names to 128 characters. I don't know the limits for
the other various device types, so I'm going to pick a number so
- that we don't have to have an entire fucking library to determine
+ that we don't have to have an entire library to determine
device name limits.
"""
max_len = 96 # No, you don't need longer names than this. Really.
From 7395fb481b7b7a5054a3ba12e07f40ba1c8d926a Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Mon, 22 Apr 2019 17:44:42 -0400
Subject: [PATCH 2/4] Add a target to create an archive of the unit tests.
---
Makefile | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/Makefile b/Makefile
index 76817278..f9b2066e 100644
--- a/Makefile
+++ b/Makefile
@@ -120,6 +120,10 @@ archive: po-pull
git checkout -- po/$(PKGNAME).pot
@echo "The archive is in $(PKGNAME)-$(VERSION).tar.gz"
+tests-archive:
+ git archive --format=tar --prefix=$(PKGNAME)-$(VERSION)/ $(VERSION_TAG) tests/ | gzip -9 > $(PKGNAME)-$(VERSION)-tests.tar.gz
+ @echo "The test archive is in $(PKGNAME)-$(VERSION)-tests.tar.gz"
+
local: po-pull
@make -B ChangeLog
$(PYTHON) setup.py -q sdist --dist-dir .
From 28959739b46d22698c05f34494d2d9c67f37f0c4 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Mon, 22 Apr 2019 17:45:19 -0400
Subject: [PATCH 3/4] Add spec file logic to include unit tests in SRPM.
---
python-blivet.spec | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/python-blivet.spec b/python-blivet.spec
index 668e0913..23fa07f6 100644
--- a/python-blivet.spec
+++ b/python-blivet.spec
@@ -29,6 +29,7 @@ License: LGPLv2+
%global realname blivet
%global realversion %{version}%{?prerelease}
Source0: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}.tar.gz
+Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}-tests.tar.gz
# Versions of required components (done so we make sure the buildrequires
# match the requires versions of things).
@@ -165,7 +166,8 @@ configuration.
%endif
%prep
-%autosetup -n %{realname}-%{realversion} -p1
+%autosetup -n %{realname}-%{realversion} -N
+%autosetup -n %{realname}-%{realversion} -b1 -p1
%build
%{?with_python2:make PYTHON=%{__python2}}
From 305c9b52ee5682baf53be660c501b7b263029699 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Fri, 26 Apr 2019 16:39:35 -0400
Subject: [PATCH 4/4] Include tests archive where appropriate in make targets.
---
Makefile | 3 +++
1 file changed, 3 insertions(+)
diff --git a/Makefile b/Makefile
index f9b2066e..552550a6 100644
--- a/Makefile
+++ b/Makefile
@@ -119,6 +119,7 @@ archive: po-pull
rm -rf $(PKGNAME)-$(VERSION)
git checkout -- po/$(PKGNAME).pot
@echo "The archive is in $(PKGNAME)-$(VERSION).tar.gz"
+ @make tests-archive
tests-archive:
git archive --format=tar --prefix=$(PKGNAME)-$(VERSION)/ $(VERSION_TAG) tests/ | gzip -9 > $(PKGNAME)-$(VERSION)-tests.tar.gz
@@ -128,6 +129,8 @@ local: po-pull
@make -B ChangeLog
$(PYTHON) setup.py -q sdist --dist-dir .
@echo "The archive is in $(PKGNAME)-$(VERSION).tar.gz"
+ git ls-files tests/ | tar -T- -czf $(PKGNAME)-$(VERSION)-tests.tar.gz
+ @echo "The test archive is in $(PKGNAME)-$(VERSION)-tests.tar.gz"
rpmlog:
@git log --pretty="format:- %s (%ae)" $(RELEASE_TAG).. |sed -e 's/@.*)/)/'

View File

@ -1,47 +0,0 @@
From 6528bb0149720b336c9da7b57eaea048d693871c Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Wed, 20 Jun 2018 16:37:24 -0400
Subject: [PATCH] Deactivate incomplete VGs along with everything else.
(cherry picked from commit 39637796ca1aa2f03c89b5ec86ac246eecca1570)
---
blivet/devices/lvm.py | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 0cb1a2ac..1e9da2a8 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -216,15 +216,25 @@ class LVMVolumeGroupDevice(ContainerDevice):
if lv.status:
return True
+ # special handling for incomplete VGs
+ if not self.complete:
+ try:
+ lvs_info = blockdev.lvm.lvs(vg_name=self.name)
+ except blockdev.LVMError:
+ lvs_info = dict()
+
+ for lv_info in lvs_info.values():
+ lv_attr = udev.device_get_lv_attr(lv_info)
+ if lv_attr and lv_attr[4] == 'a':
+ return True
+
+ return False
+
# if any of our PVs are not active then we cannot be
for pv in self.pvs:
if not pv.status:
return False
- # if we are missing some of our PVs we cannot be active
- if not self.complete:
- return False
-
return True
def _pre_setup(self, orig=False):
--
2.20.1

View File

@ -1,31 +0,0 @@
From caec289d8220fc9a8d8b3d6e99271394f4ef83fe Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 27 Feb 2019 12:26:30 +0100
Subject: [PATCH] Automatically adjust size of growable devices for new format
Without this kickstart 'part /home --size=1 --grow --encrypted'
will fail because min size for LUKS is 2 MiB.
Resolves: rhbz#1680013
---
blivet/devices/storage.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/storage.py b/blivet/devices/storage.py
index 904b60df..9d6001e8 100644
--- a/blivet/devices/storage.py
+++ b/blivet/devices/storage.py
@@ -721,7 +721,12 @@ def _set_format(self, fmt):
if fmt.max_size and fmt.max_size < self.size:
raise errors.DeviceError("device is too large for new format")
elif fmt.min_size and fmt.min_size > self.size:
- raise errors.DeviceError("device is too small for new format")
+ if self.growable:
+ log.info("%s: using size %s instead of %s to accommodate "
+ "format minimum size", self.name, fmt.min_size, self.size)
+ self.size = fmt.min_size
+ else:
+ raise errors.DeviceError("device is too small for new format")
if self._format != fmt:
callbacks.format_removed(device=self, fmt=self._format)

View File

@ -1,54 +0,0 @@
From ac5646f8e9e59389bdc651c63b5e7dcd2d693bf4 Mon Sep 17 00:00:00 2001
From: Radek Vykydal <rvykydal@redhat.com>
Date: Wed, 22 May 2019 13:35:01 +0200
Subject: [PATCH] Add flag for protecting cdrom devices during populate
Resolves: rhbz#1719648
---
blivet/devices/optical.py | 14 ++++++++++++++
blivet/flags.py | 3 +++
2 files changed, 17 insertions(+)
diff --git a/blivet/devices/optical.py b/blivet/devices/optical.py
index b9dba1f2..122825f2 100644
--- a/blivet/devices/optical.py
+++ b/blivet/devices/optical.py
@@ -24,6 +24,7 @@
from .. import errors
from .. import util
from ..storage_log import log_method_call
+from ..flags import flags
import logging
log = logging.getLogger("blivet")
@@ -81,3 +82,16 @@ def eject(self):
util.run_program(["eject", self.name])
except OSError as e:
log.warning("error ejecting cdrom %s: %s", self.name, e)
+
+ @property
+ def protected(self):
+ protected = super(OpticalDevice, self).protected
+
+ if flags.protect_cdroms:
+ return True
+ else:
+ return protected
+
+ @protected.setter
+ def protected(self, value):
+ self._protected = value
diff --git a/blivet/flags.py b/blivet/flags.py
index 6500be30..a6a78edc 100644
--- a/blivet/flags.py
+++ b/blivet/flags.py
@@ -77,6 +77,9 @@ def __init__(self):
# (so far only for LUKS)
self.discard_new = False
+ # whether cdroms should be protected
+ self.protect_cdroms = False
+
self.boot_cmdline = {}
self.update_from_boot_cmdline()

View File

@ -1,81 +0,0 @@
From 8124b804915d54e341e80bdd84e84eec3a54aaba Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Tue, 27 Nov 2018 13:37:49 -0500
Subject: [PATCH 1/2] Only update sysfs path in ctor for active devices.
Related: rhbz#1579375
---
blivet/devices/storage.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/devices/storage.py b/blivet/devices/storage.py
index 3cc29436..904b60df 100644
--- a/blivet/devices/storage.py
+++ b/blivet/devices/storage.py
@@ -149,8 +149,8 @@ def __init__(self, name, fmt=None, uuid=None,
self.device_links = []
if self.exists:
- self.update_sysfs_path()
if self.status:
+ self.update_sysfs_path()
self.update_size()
def __str__(self):
From 4cc31c735db820896278a7b91bb761df00becdb5 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Tue, 27 Nov 2018 14:03:40 -0500
Subject: [PATCH 2/2] Fix xfs sync of chrooted mountpoint.
Related: rhbz#1579375
---
blivet/tasks/fssync.py | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
diff --git a/blivet/tasks/fssync.py b/blivet/tasks/fssync.py
index a15c8e1b..996fe782 100644
--- a/blivet/tasks/fssync.py
+++ b/blivet/tasks/fssync.py
@@ -49,11 +49,21 @@ class XFSSync(FSSync):
ext = availability.XFSFREEZE_APP
- def _freeze_command(self):
- return [str(self.ext), "-f", self.fs.system_mountpoint]
+ def _get_mountpoint(self, root=None):
+ mountpoint = self.fs.system_mountpoint
+ if root is not None and root.replace('/', ''):
+ if mountpoint == root:
+ mountpoint = '/'
+ else:
+ mountpoint = mountpoint[len(root):]
- def _unfreeze_command(self):
- return [str(self.ext), "-u", self.fs.system_mountpoint]
+ return mountpoint
+
+ def _freeze_command(self, root=None):
+ return [str(self.ext), "-f", self._get_mountpoint(root=root)]
+
+ def _unfreeze_command(self, root=None):
+ return [str(self.ext), "-u", self._get_mountpoint(root=root)]
def do_task(self, root="/"):
# pylint: disable=arguments-differ
@@ -63,13 +73,13 @@ def do_task(self, root="/"):
error_msg = None
try:
- rc = util.run_program(self._freeze_command(), root=root)
+ rc = util.run_program(self._freeze_command(root=root), root=root)
except OSError as e:
error_msg = "failed to sync filesytem: %s" % e
error_msg = error_msg or rc
try:
- rc = util.run_program(self._unfreeze_command(), root=root)
+ rc = util.run_program(self._unfreeze_command(root=root), root=root)
except OSError as e:
error_msg = error_msg or "failed to sync filesystem: %s" % e
error_msg = error_msg or rc

View File

@ -1,128 +0,0 @@
From 1d9dc59ab2c471d7dcc39cd6982bd14380d5f726 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Thu, 13 Jun 2019 11:22:16 -0400
Subject: [PATCH 1/3] Add a function to detect if running in a vm.
Related: rhbz#1676935
---
blivet/util.py | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/blivet/util.py b/blivet/util.py
index 542bc93f..fa5e9e35 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -1,4 +1,5 @@
import copy
+from distutils.spawn import find_executable
import functools
import glob
import itertools
@@ -1100,3 +1101,16 @@ def decorated(*args, **kwargs):
return None
return decorated
return decorator
+
+
+def detect_virt():
+ """ Return True if we are running in a virtual machine. """
+ in_vm = False
+ detect_virt_prog = find_executable('systemd-detect-virt')
+ if detect_virt_prog:
+ try:
+ in_vm = run_program([detect_virt_prog, "--vm"]) == 0
+ except OSError:
+ pass
+
+ return in_vm
From 26d4b48ab5eca44695dced52c6170ec04610bc1d Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Thu, 13 Jun 2019 10:57:48 -0400
Subject: [PATCH 2/3] Use dasd disklabel for vm disks backed by dasds.
Resolves: rhbz#1676935
---
blivet/formats/disklabel.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
index 8186d1a1..0c4fce35 100644
--- a/blivet/formats/disklabel.py
+++ b/blivet/formats/disklabel.py
@@ -261,6 +261,15 @@ def _get_best_label_type(self):
elif self.parted_device.type == parted.DEVICE_DASD:
# the device is DASD
return "dasd"
+ elif util.detect_virt():
+ # check for dasds exported into qemu as normal virtio/scsi disks
+ try:
+ _parted_disk = parted.Disk(device=self.parted_device)
+ except (_ped.DiskLabelException, _ped.IOException, NotImplementedError):
+ pass
+ else:
+ if _parted_disk.type == "dasd":
+ return "dasd"
for lt in label_types:
if self._label_type_size_check(lt):
From c93d1207bb2942736a390bd58adafda3deb1c25c Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Thu, 13 Jun 2019 12:04:23 -0400
Subject: [PATCH 3/3] Use DBus call to see if we're in a vm.
---
blivet/util.py | 22 +++++++++++++---------
1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/blivet/util.py b/blivet/util.py
index fa5e9e35..2932e8b5 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -1,5 +1,4 @@
import copy
-from distutils.spawn import find_executable
import functools
import glob
import itertools
@@ -20,6 +19,7 @@
from enum import Enum
from .errors import DependencyError
+from . import safe_dbus
import gi
gi.require_version("BlockDev", "2.0")
@@ -39,6 +39,12 @@
program_log_lock = Lock()
+SYSTEMD_SERVICE = "org.freedesktop.systemd1"
+SYSTEMD_MANAGER_PATH = "/org/freedesktop/systemd1/Manager"
+SYSTEMD_MANAGER_IFACE = "org.freedesktop.systemd1.Manager"
+VIRT_PROP_NAME = "Virtualization"
+
+
class Path(str):
""" Path(path, root=None) provides a filesystem path object, which
@@ -1105,12 +1111,10 @@ def decorated(*args, **kwargs):
def detect_virt():
""" Return True if we are running in a virtual machine. """
- in_vm = False
- detect_virt_prog = find_executable('systemd-detect-virt')
- if detect_virt_prog:
- try:
- in_vm = run_program([detect_virt_prog, "--vm"]) == 0
- except OSError:
- pass
+ try:
+ vm = safe_dbus.get_property_sync(SYSTEMD_SERVICE, SYSTEMD_MANAGER_PATH,
+ SYSTEMD_MANAGER_IFACE, VIRT_PROP_NAME)
+ except (safe_dbus.DBusCallError, safe_dbus.DBusPropertyError):
+ vm = None
- return in_vm
+ return vm in ('qemu', 'kvm')

View File

@ -1,30 +0,0 @@
From 5097a0f3fba2960fc77cfd6ceb828287f60c930c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 6 Dec 2018 10:32:58 +0100
Subject: [PATCH] Fix reading LV attributes in LVMVolumeGroupDevice.status
This was not adjusted to libblockdev API when cherry-picking fixes
from rhel7-branch in 3c8f8dbf78b0a093e120f69241b44a48ff07be30
---
blivet/devices/lvm.py | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 7835b7e8..8c4ee2ba 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -222,11 +222,10 @@ def status(self):
try:
lvs_info = blockdev.lvm.lvs(vg_name=self.name)
except blockdev.LVMError:
- lvs_info = dict()
+ lvs_info = []
- for lv_info in lvs_info.values():
- lv_attr = udev.device_get_lv_attr(lv_info)
- if lv_attr and lv_attr[4] == 'a':
+ for lv_info in lvs_info:
+ if lv_info.attr and lv_info.attr[4] == 'a':
return True
return False

View File

@ -1,65 +0,0 @@
From 5f7dbb212b4d6da4f8f2609ae1415e8630d031cd Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 13 May 2019 12:49:52 +0200
Subject: [PATCH] Correctly handle non-unicode iSCSI initiator names
---
blivet/iscsi.py | 4 +++-
blivet/udev.py | 20 +++++++++++---------
2 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
index 74432505..f612cf15 100644
--- a/blivet/iscsi.py
+++ b/blivet/iscsi.py
@@ -206,7 +206,9 @@ def initiator(self):
if self._initiator != "":
return self._initiator
- return self._call_initiator_method("GetInitiatorName")[0]
+ # udisks returns initiatorname as a NULL terminated bytearray
+ raw_initiator = bytes(self._call_initiator_method("GetInitiatorNameRaw")[0][:-1])
+ return raw_initiator.decode("utf-8", errors="replace")
@initiator.setter
@storaged_iscsi_required(critical=True, eval_mode=util.EvalMode.onetime)
diff --git a/blivet/udev.py b/blivet/udev.py
index 51b69b76..a70e3e08 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -836,24 +836,26 @@ def device_get_iscsi_nic(info):
def device_get_iscsi_initiator(info):
- initiator = None
+ initiator_name = None
if device_is_partoff_iscsi(info):
host = re.match(r'.*/(host\d+)', device_get_sysfs_path(info)).groups()[0]
if host:
initiator_file = "/sys/class/iscsi_host/%s/initiatorname" % host
if os.access(initiator_file, os.R_OK):
- initiator = open(initiator_file).read().strip()
+ initiator = open(initiator_file, "rb").read().strip()
+ initiator_name = initiator.decode("utf-8", errors="replace")
log.debug("found offload iscsi initiatorname %s in file %s",
- initiator, initiator_file)
- if initiator.lstrip("(").rstrip(")").lower() == "null":
- initiator = None
- if initiator is None:
+ initiator_name, initiator_file)
+ if initiator_name.lstrip("(").rstrip(")").lower() == "null":
+ initiator_name = None
+ if initiator_name is None:
session = device_get_iscsi_session(info)
if session:
initiator = open("/sys/class/iscsi_session/%s/initiatorname" %
- session).read().strip()
- log.debug("found iscsi initiatorname %s", initiator)
- return initiator
+ session, "rb").read().strip()
+ initiator_name = initiator.decode("utf-8", errors="replace")
+ log.debug("found iscsi initiatorname %s", initiator_name)
+ return initiator_name
# fcoe disks have ID_PATH in the form of:

View File

@ -1,27 +0,0 @@
From 408da7ad8eaedf9edb8dfa240af35a222fa8b481 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 11 Mar 2019 13:29:04 +0100
Subject: [PATCH] Do not crash if 'dm.get_member_raid_sets' fails (#1684851)
---
blivet/populator/helpers/dmraid.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/blivet/populator/helpers/dmraid.py b/blivet/populator/helpers/dmraid.py
index c8cc3a8e..ed48bd66 100644
--- a/blivet/populator/helpers/dmraid.py
+++ b/blivet/populator/helpers/dmraid.py
@@ -53,7 +53,12 @@ def run(self):
minor = udev.device_get_minor(self.data)
# Have we already created the DMRaidArrayDevice?
- rs_names = blockdev.dm.get_member_raid_sets(name, uuid, major, minor)
+ try:
+ rs_names = blockdev.dm.get_member_raid_sets(name, uuid, major, minor)
+ except blockdev.DMError as e:
+ log.error("Failed to get RAID sets information for '%s': %s", name, str(e))
+ return
+
if len(rs_names) == 0:
log.warning("dmraid member %s does not appear to belong to any "
"array", self.device.name)

View File

@ -1,166 +0,0 @@
From c667dbb3ebf05eafeb4fb55d3ffa22d27c25420c Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Wed, 24 Oct 2018 20:12:20 -0400
Subject: [PATCH 1/3] Don't try to update sysfs path for non-block devices.
(#1579375)
---
blivet/devices/file.py | 3 +++
blivet/devices/nfs.py | 3 +++
blivet/devices/nodev.py | 3 +++
3 files changed, 9 insertions(+)
diff --git a/blivet/devices/file.py b/blivet/devices/file.py
index 55522c1d..fa3dfb8a 100644
--- a/blivet/devices/file.py
+++ b/blivet/devices/file.py
@@ -132,6 +132,9 @@ def is_name_valid(self, name):
# Override StorageDevice.is_name_valid to allow /
return not('\x00' in name or name == '.' or name == '..')
+ def update_sysfs_path(self):
+ pass
+
class SparseFileDevice(FileDevice):
diff --git a/blivet/devices/nfs.py b/blivet/devices/nfs.py
index 97cbe01e..a0142f91 100644
--- a/blivet/devices/nfs.py
+++ b/blivet/devices/nfs.py
@@ -77,3 +77,6 @@ def update_size(self, newsize=None):
def is_name_valid(self, name):
# Override StorageDevice.is_name_valid to allow /
return not('\x00' in name or name == '.' or name == '..')
+
+ def update_sysfs_path(self):
+ pass
diff --git a/blivet/devices/nodev.py b/blivet/devices/nodev.py
index f6129258..f1b87392 100644
--- a/blivet/devices/nodev.py
+++ b/blivet/devices/nodev.py
@@ -75,6 +75,9 @@ def destroy(self):
def update_size(self, newsize=None):
pass
+ def update_sysfs_path(self):
+ pass
+
class TmpFSDevice(NoDevice):
From acb0953ad89327b3ffd3571b6d45565762548203 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Wed, 24 Oct 2018 20:27:22 -0400
Subject: [PATCH 2/3] Only try to set selinux context for lost+found on ext
file systems.
Related: rhbz#1579375
---
blivet/formats/fs.py | 19 ++++++++++++++-----
tests/formats_test/selinux_test.py | 5 ++++-
2 files changed, 18 insertions(+), 6 deletions(-)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index 81e367f4..b915a2de 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -569,11 +569,6 @@ def _post_setup(self, **kwargs):
ret = util.reset_file_context(mountpoint, chroot)
if not ret:
log.warning("Failed to reset SElinux context for newly mounted filesystem root directory to default.")
- lost_and_found_context = util.match_path_context("/lost+found")
- lost_and_found_path = os.path.join(mountpoint, "lost+found")
- ret = util.set_file_context(lost_and_found_path, lost_and_found_context, chroot)
- if not ret:
- log.warning("Failed to set SELinux context for newly mounted filesystem lost+found directory at %s to %s", lost_and_found_path, lost_and_found_context)
def _pre_teardown(self, **kwargs):
if not super(FS, self)._pre_teardown(**kwargs):
@@ -840,6 +835,20 @@ class Ext2FS(FS):
parted_system = fileSystemType["ext2"]
_metadata_size_factor = 0.93 # ext2 metadata may take 7% of space
+ def _post_setup(self, **kwargs):
+ super(Ext2FS, self)._post_setup(**kwargs)
+
+ options = kwargs.get("options", "")
+ chroot = kwargs.get("chroot", "/")
+ mountpoint = kwargs.get("mountpoint") or self.mountpoint
+
+ if flags.selinux and "ro" not in self._mount.mount_options(options).split(",") and flags.selinux_reset_fcon:
+ lost_and_found_context = util.match_path_context("/lost+found")
+ lost_and_found_path = os.path.join(mountpoint, "lost+found")
+ ret = util.set_file_context(lost_and_found_path, lost_and_found_context, chroot)
+ if not ret:
+ log.warning("Failed to set SELinux context for newly mounted filesystem lost+found directory at %s to %s", lost_and_found_path, lost_and_found_context)
+
register_device_format(Ext2FS)
diff --git a/tests/formats_test/selinux_test.py b/tests/formats_test/selinux_test.py
index 79c10327..028e084e 100644
--- a/tests/formats_test/selinux_test.py
+++ b/tests/formats_test/selinux_test.py
@@ -43,7 +43,10 @@ def exec_mount_selinux_format(self, formt, *args):
blivet.flags.flags.selinux_reset_fcon = True
fmt.setup(mountpoint="dummy") # param needed to pass string check
- lsetfilecon.assert_called_with(ANY, lost_found_context)
+ if isinstance(fmt, fs.Ext2FS):
+ lsetfilecon.assert_called_with(ANY, lost_found_context)
+ else:
+ lsetfilecon.assert_not_called()
lsetfilecon.reset_mock()
From 1b4e658f098bda3161ff0d5ffee07ea9be5c1d15 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Wed, 24 Oct 2018 20:33:36 -0400
Subject: [PATCH 3/3] Don't try to set selinux context for nodev or vfat file
systems.
Related: rhbz#1579375
---
blivet/formats/fs.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index b915a2de..6f09eaff 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -76,6 +76,7 @@ class FS(DeviceFormat):
_sync_class = fssync.UnimplementedFSSync
_writelabel_class = fswritelabel.UnimplementedFSWriteLabel
_writeuuid_class = fswriteuuid.UnimplementedFSWriteUUID
+ _selinux_supported = True
# This constant is aquired by testing some filesystems
# and it's giving us percentage of space left after the format.
# This number is more guess than precise number because this
@@ -565,7 +566,7 @@ def _post_setup(self, **kwargs):
chroot = kwargs.get("chroot", "/")
mountpoint = kwargs.get("mountpoint") or self.mountpoint
- if flags.selinux and "ro" not in self._mount.mount_options(options).split(",") and flags.selinux_reset_fcon:
+ if self._selinux_supported and flags.selinux and "ro" not in self._mount.mount_options(options).split(",") and flags.selinux_reset_fcon:
ret = util.reset_file_context(mountpoint, chroot)
if not ret:
log.warning("Failed to reset SElinux context for newly mounted filesystem root directory to default.")
@@ -902,6 +903,7 @@ class FATFS(FS):
_metadata_size_factor = 0.99 # fat metadata may take 1% of space
# FIXME this should be fat32 in some cases
parted_system = fileSystemType["fat16"]
+ _selinux_supported = False
def generate_new_uuid(self):
ret = ""
@@ -1235,6 +1237,7 @@ class NoDevFS(FS):
""" nodev filesystem base class """
_type = "nodev"
_mount_class = fsmount.NoDevFSMount
+ _selinux_supported = False
def __init__(self, **kwargs):
FS.__init__(self, **kwargs)

View File

@ -1,91 +0,0 @@
From 471d43cbfe99db1c8246fb863e3ce49b3403fc61 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 11 Sep 2019 10:48:19 +0200
Subject: [PATCH] Fix util.detect_virt function
Fixed the systemd Manager object path, also get_property_sync
returns a tuple so we need to check its first element.
Resolves: rhbz#1676935
---
blivet/util.py | 8 ++++----
tests/formats_test/disklabel_test.py | 26 ++++++++++++++------------
tests/util_test.py | 4 ++++
3 files changed, 22 insertions(+), 16 deletions(-)
diff --git a/blivet/util.py b/blivet/util.py
index 2932e8b5..27468992 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -40,7 +40,7 @@ program_log_lock = Lock()
SYSTEMD_SERVICE = "org.freedesktop.systemd1"
-SYSTEMD_MANAGER_PATH = "/org/freedesktop/systemd1/Manager"
+SYSTEMD_MANAGER_PATH = "/org/freedesktop/systemd1"
SYSTEMD_MANAGER_IFACE = "org.freedesktop.systemd1.Manager"
VIRT_PROP_NAME = "Virtualization"
@@ -1115,6 +1115,6 @@ def detect_virt():
vm = safe_dbus.get_property_sync(SYSTEMD_SERVICE, SYSTEMD_MANAGER_PATH,
SYSTEMD_MANAGER_IFACE, VIRT_PROP_NAME)
except (safe_dbus.DBusCallError, safe_dbus.DBusPropertyError):
- vm = None
-
- return vm in ('qemu', 'kvm')
+ return False
+ else:
+ return vm[0] in ('qemu', 'kvm')
diff --git a/tests/formats_test/disklabel_test.py b/tests/formats_test/disklabel_test.py
index 4b105da6..94f3775f 100644
--- a/tests/formats_test/disklabel_test.py
+++ b/tests/formats_test/disklabel_test.py
@@ -163,16 +163,18 @@ class DiskLabelTestCase(unittest.TestCase):
arch.is_efi.return_value = False
arch.is_s390.return_value = True
- with mock.patch.object(dl, '_label_type_size_check') as size_check:
- size_check.return_value = True
- with mock.patch("blivet.formats.disklabel.blockdev.s390") as _s390:
- _s390.dasd_is_fba.return_value = False
- self.assertEqual(dl._get_best_label_type(), "msdos")
-
- _s390.dasd_is_fba.return_value = True
- self.assertEqual(dl._get_best_label_type(), "msdos")
-
- _s390.dasd_is_fba.return_value = False
- dl._parted_device.type = parted.DEVICE_DASD
- self.assertEqual(dl._get_best_label_type(), "dasd")
+ with mock.patch('blivet.util.detect_virt') as virt:
+ virt.return_value = False
+ with mock.patch.object(dl, '_label_type_size_check') as size_check:
+ size_check.return_value = True
+ with mock.patch("blivet.formats.disklabel.blockdev.s390") as _s390:
+ _s390.dasd_is_fba.return_value = False
+ self.assertEqual(dl._get_best_label_type(), "msdos")
+
+ _s390.dasd_is_fba.return_value = True
+ self.assertEqual(dl._get_best_label_type(), "msdos")
+
+ _s390.dasd_is_fba.return_value = False
+ dl._parted_device.type = parted.DEVICE_DASD
+ self.assertEqual(dl._get_best_label_type(), "dasd")
arch.is_s390.return_value = False
diff --git a/tests/util_test.py b/tests/util_test.py
index 5fa3070e..9a2ff492 100644
--- a/tests/util_test.py
+++ b/tests/util_test.py
@@ -37,6 +37,10 @@ class MiscTest(unittest.TestCase):
# real deduplication
self.assertEqual([1, 2, 3, 4, 5, 6], util.dedup_list([1, 2, 3, 4, 2, 2, 2, 1, 3, 5, 3, 6, 6, 2, 3, 1, 5]))
+ def test_detect_virt(self):
+ in_virt = not util.run_program(["systemd-detect-virt", "--vm"])
+ self.assertEqual(util.detect_virt(), in_virt)
+
class TestDefaultNamedtuple(unittest.TestCase):
def test_default_namedtuple(self):
--
2.20.1

View File

@ -1,183 +0,0 @@
From 83a42f3e232c7c4a02deb3539972c82b6dca284b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 4 Oct 2019 12:30:03 +0200
Subject: [PATCH 1/2] Add a new "sector_size" property to storage devices.
This represents the logical sector size of the device.
Related: rhbz#1754446
---
blivet/devices/disk.py | 6 +++++-
blivet/devices/md.py | 11 +++++++++++
blivet/devices/partition.py | 7 +++++++
blivet/devices/storage.py | 15 +++++++++++++++
4 files changed, 38 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index bf2f7a4f..7dfeabf0 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -687,7 +687,7 @@ def __init__(self, device, **kwargs):
"""
self.mode = kwargs.pop("mode")
self.devname = kwargs.pop("devname")
- self.sector_size = kwargs.pop("sector_size")
+ self._sector_size = kwargs.pop("sector_size")
DiskDevice.__init__(self, device, **kwargs)
@@ -710,3 +710,7 @@ def description(self):
% {'devname': self.devname,
'mode': self.mode,
'path': self.path}
+
+ @property
+ def sector_size(self):
+ return self._sector_size
diff --git a/blivet/devices/md.py b/blivet/devices/md.py
index 6a837df0..0b6da980 100644
--- a/blivet/devices/md.py
+++ b/blivet/devices/md.py
@@ -19,10 +19,13 @@
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
+import math
import os
import six
import time
+from six.moves import reduce
+
import gi
gi.require_version("BlockDev", "2.0")
@@ -195,6 +198,14 @@ def level(self, value):
self._level = level
+ @property
+ def sector_size(self):
+ if not self.exists:
+ # Least common multiple of parents' sector sizes
+ return reduce(lambda a, b: a * b // math.gcd(a, b), (int(p.sector_size) for p in self.parents))
+
+ return super(MDRaidArrayDevice, self).sector_size
+
@property
def chunk_size(self):
if self.exists and self._chunk_size == Size(0):
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 623e1c9d..73daa76f 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -729,6 +729,13 @@ def protected(self):
def protected(self, value):
self._protected = value
+ @property
+ def sector_size(self):
+ if self.disk:
+ return self.disk.sector_size
+
+ return super(PartitionDevice, self).sector_size
+
def _pre_resize(self):
if not self.exists:
raise errors.DeviceError("device has not been created", self.name)
diff --git a/blivet/devices/storage.py b/blivet/devices/storage.py
index e087fa64..91c5e60e 100644
--- a/blivet/devices/storage.py
+++ b/blivet/devices/storage.py
@@ -190,6 +190,21 @@ def raw_device(self):
""" The device itself, or when encrypted, the backing device. """
return self
+ @property
+ def sector_size(self):
+ """ Logical sector (block) size of this device """
+ if not self.exists:
+ if self.parents:
+ return self.parents[0].sector_size
+ else:
+ return LINUX_SECTOR_SIZE
+
+ block_size = util.get_sysfs_attr(self.sysfs_path, "queue/logical_block_size")
+ if block_size:
+ return int(block_size)
+ else:
+ return LINUX_SECTOR_SIZE
+
@property
def controllable(self):
return self._controllable and not flags.testing and not self.unavailable_type_dependencies()
From 9f81bd1ffb877862760223ba88f2086deebd2d06 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 4 Oct 2019 12:37:01 +0200
Subject: [PATCH 2/2] Do not allow creating VGs with PVs with different sector
size
New versions of LVM don't allow mixing PVs with different sector
sizes in one VG.
Resolves: rhbz#1754446
---
blivet/devices/lvm.py | 12 ++++++++++++
tests/devices_test/lvm_test.py | 13 ++++++++++++-
2 files changed, 24 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 4347f483..b9da286a 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -356,6 +356,18 @@ def _remove_log_vol(self, lv):
def _add_parent(self, parent):
super(LVMVolumeGroupDevice, self)._add_parent(parent)
+ # we are creating new VG or adding a new PV to an existing (complete) one
+ if not self.exists or (self.exists and self._complete):
+ parent_sectors = set([p.sector_size for p in self.pvs] + [parent.sector_size])
+ if len(parent_sectors) != 1:
+ if not self.exists:
+ msg = "The volume group %s cannot be created. Selected disks have " \
+ "inconsistent sector sizes (%s)." % (self.name, parent_sectors)
+ else:
+ msg = "Disk %s cannot be added to this volume group. LVM doesn't " \
+ "allow using physical volumes with inconsistent (logical) sector sizes." % parent.name
+ raise ValueError(msg)
+
if (self.exists and parent.format.exists and
len(self.parents) + 1 == self.pv_count):
self._complete = True
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 8ed577f4..a32c1d83 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -2,7 +2,7 @@
import test_compat # pylint: disable=unused-import
import six
-from six.moves.mock import patch # pylint: disable=no-name-in-module,import-error
+from six.moves.mock import patch, PropertyMock # pylint: disable=no-name-in-module,import-error
import unittest
import blivet
@@ -352,6 +352,17 @@ def test_target_size(self):
self.assertEqual(lv.target_size, orig_size)
self.assertEqual(lv.size, orig_size)
+ def test_lvm_inconsistent_sector_size(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1024 MiB"))
+ pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1024 MiB"))
+
+ with patch("blivet.devices.StorageDevice.sector_size", new_callable=PropertyMock) as mock_property:
+ mock_property.__get__ = lambda _mock, pv, _class: 512 if pv.name == "pv1" else 4096
+ with six.assertRaisesRegex(self, ValueError, "The volume group testvg cannot be created."):
+ LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
+
class TypeSpecificCallsTest(unittest.TestCase):
def test_type_specific_calls(self):

View File

@ -1,309 +0,0 @@
From c85a80ca54eabb1cf2458a3e17b3472ba2eb0914 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Fri, 1 Nov 2019 12:07:43 -0400
Subject: [PATCH 1/2] Override LVM skip-activation to allow for thorough
removal.
When we have been told to remove the LV or manage the formatting we
must tell LVM to ignore the skip-activation bit. Otherwise we have
no way to properly perform the requested management.
Resolves: rhbz#1766498
---
blivet/deviceaction.py | 35 ++++++++++++++++++++++++++++++++++
blivet/devices/lvm.py | 12 ++++--------
tests/action_test.py | 16 ++++++++++++++++
tests/devices_test/lvm_test.py | 29 ++++++++++++++++++++++++++++
4 files changed, 84 insertions(+), 8 deletions(-)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index 14a06ff0..57115662 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -393,10 +393,29 @@ class ActionDestroyDevice(DeviceAction):
super(ActionDestroyDevice, self)._check_device_dependencies()
+ def apply(self):
+ """ apply changes related to the action to the device(s) """
+ if self._applied:
+ return
+
+ if hasattr(self.device, 'ignore_skip_activation'):
+ self.device.ignore_skip_activation += 1
+
+ super(ActionDestroyDevice, self).apply()
+
def execute(self, callbacks=None):
super(ActionDestroyDevice, self).execute(callbacks=callbacks)
self.device.destroy()
+ def cancel(self):
+ if not self._applied:
+ return
+
+ if hasattr(self.device, 'ignore_skip_activation'):
+ self.device.ignore_skip_activation -= 1
+
+ super(ActionDestroyDevice, self).cancel()
+
def requires(self, action):
""" Return True if self requires action.
@@ -715,6 +734,9 @@ class ActionDestroyFormat(DeviceAction):
return
self.device.format = None
+ if hasattr(self.device, 'ignore_skip_activation'):
+ self.device.ignore_skip_activation += 1
+
super(ActionDestroyFormat, self).apply()
def execute(self, callbacks=None):
@@ -739,6 +761,8 @@ class ActionDestroyFormat(DeviceAction):
return
self.device.format = self.orig_format
+ if hasattr(self.device, 'ignore_skip_activation'):
+ self.device.ignore_skip_activation -= 1
super(ActionDestroyFormat, self).cancel()
@property
@@ -834,6 +858,9 @@ class ActionResizeFormat(DeviceAction):
return
self.device.format.target_size = self._target_size
+ if hasattr(self.device, 'ignore_skip_activation'):
+ self.device.ignore_skip_activation += 1
+
super(ActionResizeFormat, self).apply()
def execute(self, callbacks=None):
@@ -854,6 +881,9 @@ class ActionResizeFormat(DeviceAction):
return
self.device.format.target_size = self.orig_size
+ if hasattr(self.device, 'ignore_skip_activation'):
+ self.device.ignore_skip_activation -= 1
+
super(ActionResizeFormat, self).cancel()
def requires(self, action):
@@ -1056,6 +1086,9 @@ class ActionConfigureFormat(DeviceAction):
return
setattr(self.device.format, self.attr, self.new_value)
+ if hasattr(self.device, 'ignore_skip_activation'):
+ self.device.ignore_skip_activation += 1
+
super(ActionConfigureFormat, self).apply()
def cancel(self):
@@ -1063,6 +1096,8 @@ class ActionConfigureFormat(DeviceAction):
return
setattr(self.device.format, self.attr, self.old_value)
+ if hasattr(self.device, 'ignore_skip_activation'):
+ self.device.ignore_skip_activation -= 1
def execute(self, callbacks=None):
super(ActionConfigureFormat, self).execute(callbacks=callbacks)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 06191110..58adf5cf 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -628,6 +628,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
self.uuid = uuid
self.seg_type = seg_type or "linear"
self._raid_level = None
+ self.ignore_skip_activation = 0
+
if self.seg_type in lvm.raid_seg_types:
self._raid_level = lvm.raid_levels.raid_level(self.seg_type)
else:
@@ -1367,12 +1369,6 @@ class LVMSnapshotMixin(object):
# the old snapshot cannot be setup and torn down
pass
- def _setup(self, orig=False):
- """ Open, or set up, a device. """
- log_method_call(self, self.name, orig=orig, status=self.status,
- controllable=self.controllable)
- blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=True)
-
@old_snapshot_specific
def teardown(self, recursive=False):
# the old snapshot cannot be setup and torn down
@@ -1969,12 +1965,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
def display_lv_name(self):
return self.lvname
- @type_specific
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
- blockdev.lvm.lvactivate(self.vg.name, self._name)
+ ignore_skip_activation = self.is_snapshot_lv or self.ignore_skip_activation > 0
+ blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
@type_specific
def _pre_create(self):
diff --git a/tests/action_test.py b/tests/action_test.py
index 101d5a21..24ed10b2 100644
--- a/tests/action_test.py
+++ b/tests/action_test.py
@@ -1025,12 +1025,28 @@ class DeviceActionTestCase(StorageTestCase):
# ActionDestroyFormat
original_format = lv_root.format
action = ActionDestroyFormat(lv_root)
+ orig_ignore_skip = lv_root.ignore_skip_activation
self.assertEqual(lv_root.format, original_format)
self.assertNotEqual(lv_root.format.type, None)
action.apply()
self.assertEqual(lv_root.format.type, None)
+ self.assertEqual(lv_root.ignore_skip_activation, orig_ignore_skip + 1)
action.cancel()
self.assertEqual(lv_root.format, original_format)
+ self.assertEqual(lv_root.ignore_skip_activation, orig_ignore_skip)
+
+ # ActionDestroyDevice
+ action1 = ActionDestroyFormat(lv_root)
+ orig_ignore_skip = lv_root.ignore_skip_activation
+ action1.apply()
+ self.assertEqual(lv_root.ignore_skip_activation, orig_ignore_skip + 1)
+ action2 = ActionDestroyDevice(lv_root)
+ action2.apply()
+ self.assertEqual(lv_root.ignore_skip_activation, orig_ignore_skip + 2)
+ action2.cancel()
+ self.assertEqual(lv_root.ignore_skip_activation, orig_ignore_skip + 1)
+ action1.cancel()
+ self.assertEqual(lv_root.ignore_skip_activation, orig_ignore_skip)
sdc = self.storage.devicetree.get_device_by_name("sdc")
sdc.format = None
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
index 76a3a5db..c4c50748 100644
--- a/tests/devices_test/lvm_test.py
+++ b/tests/devices_test/lvm_test.py
@@ -360,6 +360,35 @@ class LVMDeviceTest(unittest.TestCase):
with six.assertRaisesRegex(self, ValueError, "The volume group testvg cannot be created."):
LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
+ def test_skip_activate(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+ lv = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=True)
+
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ with patch.object(lv, "_pre_setup"):
+ lv.setup()
+ self.assertTrue(lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=False))
+
+ lv.ignore_skip_activation += 1
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ with patch.object(lv, "_pre_setup"):
+ lv.setup()
+ self.assertTrue(lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=True))
+
+ lv.ignore_skip_activation += 1
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ with patch.object(lv, "_pre_setup"):
+ lv.setup()
+ self.assertTrue(lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=True))
+
+ lv.ignore_skip_activation -= 2
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ with patch.object(lv, "_pre_setup"):
+ lv.setup()
+ self.assertTrue(lvm.lvactivate.called_with(vg.name, lv.lvname, ignore_skip=False))
+
class TypeSpecificCallsTest(unittest.TestCase):
def test_type_specific_calls(self):
--
2.24.1
From 0e19f91ff0917b7c498cdc2e6d5484847cf18cee Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Tue, 17 Dec 2019 14:43:02 -0500
Subject: [PATCH 2/2] Make sure LVs are writable before wiping.
Related: rhbz#1766498
---
blivet/deviceaction.py | 3 +++
blivet/devicelibs/lvm.py | 18 ++++++++++++++++++
blivet/devices/lvm.py | 4 ++++
3 files changed, 25 insertions(+)
diff --git a/blivet/deviceaction.py b/blivet/deviceaction.py
index 57115662..ac89365b 100644
--- a/blivet/deviceaction.py
+++ b/blivet/deviceaction.py
@@ -745,6 +745,9 @@ class ActionDestroyFormat(DeviceAction):
super(ActionDestroyFormat, self).execute(callbacks=callbacks)
status = self.device.status
self.device.setup(orig=True)
+ if hasattr(self.device, 'set_rw'):
+ self.device.set_rw()
+
self.format.destroy()
udev.settle()
if isinstance(self.device, PartitionDevice) and self.device.disklabel_supported:
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index 8eea9d19..65dc425e 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -38,7 +38,9 @@ from . import raid
from ..size import Size
from ..i18n import N_
from ..flags import flags
+from ..static_data import lvs_info
from ..tasks import availability
+from ..util import run_program
# some of lvm's defaults that we have no way to ask it for
LVM_PE_START = Size("1 MiB")
@@ -187,6 +189,22 @@ def lvmetad_socket_exists():
return os.path.exists(LVMETAD_SOCKET_PATH)
+def ensure_lv_is_writable(vg_name, lv_name):
+ lv_info = lvs_info.cache.get("%s-%s" % (vg_name, lv_name))
+ if lv_info is None:
+ return
+
+ if lv_info.attr[1] == 'w':
+ return
+
+ try:
+ rc = run_program(['lvchange', '-prw', "%s/%s" % (vg_name, lv_name)])
+ except OSError:
+ rc = -1
+
+ return rc == 0
+
+
def is_lvm_name_valid(name):
# No . or ..
if name == '.' or name == '..':
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 58adf5cf..dbecc1e5 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -951,6 +951,10 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
# set up the vg's pvs so lvm can remove the lv
self.vg.setup_parents(orig=True)
+ def set_rw(self):
+ """ Run lvchange as needed to ensure the lv is not read-only. """
+ lvm.ensure_lv_is_writable(self.vg.name, self.lvname)
+
@property
def lvname(self):
""" The LV's name (not including VG name). """
--
2.24.1

View File

@ -1,195 +0,0 @@
From 16db72b7adc5e1a295ecd52c0a53ee5a12111878 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Tue, 7 Jan 2020 17:10:24 -0500
Subject: [PATCH 1/2] Make minimal and optimal alignment getters public.
Related: rhbz#1781106
---
blivet/formats/disklabel.py | 10 +++++-----
tests/formats_test/disklabel_test.py | 6 +++---
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/blivet/formats/disklabel.py b/blivet/formats/disklabel.py
index a435bc59..a3f9d04b 100644
--- a/blivet/formats/disklabel.py
+++ b/blivet/formats/disklabel.py
@@ -462,7 +462,7 @@ class DiskLabel(DeviceFormat):
return self._disk_label_alignment
- def _get_minimal_alignment(self):
+ def get_minimal_alignment(self):
""" Return the device's minimal alignment for new partitions.
:rtype: :class:`parted.Alignment`
@@ -484,7 +484,7 @@ class DiskLabel(DeviceFormat):
return self._minimal_alignment
- def _get_optimal_alignment(self):
+ def get_optimal_alignment(self):
""" Return the device's optimal alignment for new partitions.
:rtype: :class:`parted.Alignment`
@@ -502,7 +502,7 @@ class DiskLabel(DeviceFormat):
# if there is no optimal alignment, use the minimal alignment,
# which has already been intersected with the disklabel
# alignment
- alignment = self._get_minimal_alignment()
+ alignment = self.get_minimal_alignment()
else:
try:
alignment = optimal_alignment.intersect(disklabel_alignment)
@@ -524,13 +524,13 @@ class DiskLabel(DeviceFormat):
small to be aligned
"""
# default to the optimal alignment
- alignment = self._get_optimal_alignment()
+ alignment = self.get_optimal_alignment()
if size is None:
return alignment
# use the minimal alignment if the requested size is smaller than the
# optimal io size
- minimal_alignment = self._get_minimal_alignment()
+ minimal_alignment = self.get_minimal_alignment()
optimal_grain_size = Size(alignment.grainSize * self.sector_size)
minimal_grain_size = Size(minimal_alignment.grainSize * self.sector_size)
if size < minimal_grain_size:
diff --git a/tests/formats_test/disklabel_test.py b/tests/formats_test/disklabel_test.py
index 93ce8c4a..6a1187e1 100644
--- a/tests/formats_test/disklabel_test.py
+++ b/tests/formats_test/disklabel_test.py
@@ -41,8 +41,8 @@ class DiskLabelTestCase(unittest.TestCase):
# make sure the private methods all return the expected values
self.assertEqual(dl._get_disk_label_alignment(), disklabel_alignment)
- self.assertEqual(dl._get_minimal_alignment(), minimal_alignment)
- self.assertEqual(dl._get_optimal_alignment(), optimal_alignment)
+ self.assertEqual(dl.get_minimal_alignment(), minimal_alignment)
+ self.assertEqual(dl.get_optimal_alignment(), optimal_alignment)
# validate result when passing a start alignment to get_end_alignment
self.assertEqual(dl.get_end_alignment(alignment=optimal_alignment),
@@ -61,7 +61,7 @@ class DiskLabelTestCase(unittest.TestCase):
minimal_end_alignment)
# test the old deprecated properties' values
- self.assertEqual(dl.alignment, dl._get_optimal_alignment())
+ self.assertEqual(dl.alignment, dl.get_optimal_alignment())
self.assertEqual(dl.end_alignment, dl.get_end_alignment())
@patch("blivet.formats.disklabel.arch")
--
2.24.1
From f5810a412048bd445dbed02ce0d01e50a1d083ec Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Tue, 7 Jan 2020 17:11:43 -0500
Subject: [PATCH 2/2] Align base sizes up if smaller than min I/O size.
Resolves: rhbz#1781106
---
blivet/partitioning.py | 18 +++++++++++++++---
tests/partitioning_test.py | 34 ++++++++++++++++++++++++++++++++++
2 files changed, 49 insertions(+), 3 deletions(-)
diff --git a/blivet/partitioning.py b/blivet/partitioning.py
index 026a3f8c..bc0fe237 100644
--- a/blivet/partitioning.py
+++ b/blivet/partitioning.py
@@ -408,7 +408,11 @@ def add_partition(disklabel, free, part_type, size, start=None, end=None):
else:
_size = size
- alignment = disklabel.get_alignment(size=_size)
+ try:
+ alignment = disklabel.get_alignment(size=_size)
+ except AlignmentError:
+ alignment = disklabel.get_minimal_alignment()
+
end_alignment = disklabel.get_end_alignment(alignment=alignment)
else:
alignment = parted.Alignment(grainSize=1, offset=0)
@@ -646,7 +650,12 @@ def do_partitioning(storage, boot_disk=None):
def align_size_for_disklabel(size, disklabel):
# Align the base size to the disk's grain size.
- grain_size = Size(disklabel.alignment.grainSize)
+ try:
+ alignment = disklabel.get_alignment(size=size)
+ except AlignmentError:
+ alignment = disklabel.get_minimal_alignment()
+
+ grain_size = Size(alignment.grainSize)
grains, rem = divmod(size, grain_size)
return (grains * grain_size) + (grain_size if rem else Size(0))
@@ -751,7 +760,10 @@ def allocate_partitions(storage, disks, partitions, freespace, boot_disk=None):
disklabel = disklabels[_disk.path]
best = None
current_free = free
- alignment = disklabel.get_alignment(size=_part.req_size)
+ try:
+ alignment = disklabel.get_alignment(size=_part.req_size)
+ except AlignmentError:
+ alignment = disklabel.get_minimal_alignment()
# for growable requests, we don't want to pass the current free
# geometry to get_best_free_region -- this allows us to try the
diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py
index ebd05260..4fe87ebe 100644
--- a/tests/partitioning_test.py
+++ b/tests/partitioning_test.py
@@ -179,6 +179,8 @@ class PartitioningTestCase(unittest.TestCase):
min_str = 'parted.Device.minimumAlignment'
opt_al = parted.Alignment(offset=0, grainSize=8192) # 4 MiB
min_al = parted.Alignment(offset=0, grainSize=2048) # 1 MiB
+ disk.format._minimal_alignment = None # drop cache
+ disk.format._optimal_alignment = None # drop cache
with patch(opt_str, opt_al) as optimal, patch(min_str, min_al) as minimal:
optimal_end = disk.format.get_end_alignment(alignment=optimal)
minimal_end = disk.format.get_end_alignment(alignment=minimal)
@@ -201,6 +203,38 @@ class PartitioningTestCase(unittest.TestCase):
disk.format.remove_partition(part)
self.assertEqual(len(disk.format.partitions), 0)
+ #
+ # adding a partition smaller than the minimal io size should yield
+ # a partition whose size is aligned up to the minimal io size
+ #
+ opt_str = 'parted.Device.optimumAlignment'
+ min_str = 'parted.Device.minimumAlignment'
+ opt_al = parted.Alignment(offset=0, grainSize=8192) # 4 MiB
+ min_al = parted.Alignment(offset=0, grainSize=2048) # 1 MiB
+ disk.format._minimal_alignment = None # drop cache
+ disk.format._optimal_alignment = None # drop cache
+ with patch(opt_str, opt_al) as optimal, patch(min_str, min_al) as minimal:
+ optimal_end = disk.format.get_end_alignment(alignment=optimal)
+ minimal_end = disk.format.get_end_alignment(alignment=minimal)
+
+ sector_size = Size(disk.format.sector_size)
+ length = 1024 # 512 KiB
+ size = Size(sector_size * length)
+ part = add_partition(disk.format, free, parted.PARTITION_NORMAL,
+ size)
+ self.assertEqual(part.geometry.length, min_al.grainSize)
+ self.assertEqual(optimal.isAligned(free, part.geometry.start),
+ False)
+ self.assertEqual(minimal.isAligned(free, part.geometry.start),
+ True)
+ self.assertEqual(optimal_end.isAligned(free, part.geometry.end),
+ False)
+ self.assertEqual(minimal_end.isAligned(free, part.geometry.end),
+ True)
+
+ disk.format.remove_partition(part)
+ self.assertEqual(len(disk.format.partitions), 0)
+
#
# add a partition with an unaligned start sector
#
--
2.24.1

View File

@ -1,130 +0,0 @@
From 4e23e410bb5fcab5db931ad42a9b46af6be4fb3d Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Thu, 16 Jan 2020 13:14:29 -0500
Subject: [PATCH 1/2] Add recognition of Dell FW RAID to udev.device_is_disk.
Resolves: rhbz#1758102
---
blivet/udev.py | 16 +++++++++++++++-
tests/udev_test.py | 42 ++++++++++++++++++++++++++++++++++++++++++
2 files changed, 57 insertions(+), 1 deletion(-)
diff --git a/blivet/udev.py b/blivet/udev.py
index 53e7b7ca..df2b4e64 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -353,7 +353,7 @@ def device_is_disk(info):
device_is_dm_lvm(info) or
device_is_dm_crypt(info) or
(device_is_md(info) and
- not device_get_md_container(info))))
+ (not device_get_md_container(info) and not all(device_is_disk(d) for d in device_get_slaves(info))))))
def device_is_partition(info):
@@ -432,6 +432,20 @@ def device_get_devname(info):
return info.get('DEVNAME')
+def device_get_slaves(info):
+ """ Return a list of udev device objects representing this device's slaves. """
+ slaves_dir = device_get_sysfs_path(info) + "/slaves/"
+ names = list()
+ if os.path.isdir(slaves_dir):
+ names = os.listdir(slaves_dir)
+
+ slaves = list()
+ for name in names:
+ slaves.append(get_device(device_node="/dev/" + name))
+
+ return slaves
+
+
def device_get_md_level(info):
""" Returns the RAID level of the array of which this device is a member.
diff --git a/tests/udev_test.py b/tests/udev_test.py
index 5cc81a05..beb8109c 100644
--- a/tests/udev_test.py
+++ b/tests/udev_test.py
@@ -35,3 +35,45 @@ class UdevTest(unittest.TestCase):
import blivet.udev
blivet.udev.trigger()
self.assertTrue(blivet.udev.util.run_program.called)
+
+ @mock.patch('blivet.udev.device_is_cdrom', return_value=False)
+ @mock.patch('blivet.udev.device_is_partition', return_value=False)
+ @mock.patch('blivet.udev.device_is_dm_partition', return_value=False)
+ @mock.patch('blivet.udev.device_is_dm_lvm', return_value=False)
+ @mock.patch('blivet.udev.device_is_dm_crypt', return_value=False)
+ @mock.patch('blivet.udev.device_is_md')
+ @mock.patch('blivet.udev.device_get_md_container')
+ @mock.patch('blivet.udev.device_get_slaves')
+ def test_udev_device_is_disk_md(self, *args):
+ import blivet.udev
+ info = dict(DEVTYPE='disk', SYS_PATH=mock.sentinel.md_path)
+ (device_get_slaves, device_get_md_container, device_is_md) = args[:3] # pylint: disable=unbalanced-tuple-unpacking
+
+ disk_parents = [dict(DEVTYPE="disk", SYS_PATH='/fake/path/2'),
+ dict(DEVTYPE="disk", SYS_PATH='/fake/path/3')]
+ partition_parents = [dict(DEVTYPE="partition", SYS_PATH='/fake/path/2'),
+ dict(DEVTYPE="partition", SYS_PATH='/fake/path/3')]
+ mixed_parents = [dict(DEVTYPE="partition", SYS_PATH='/fake/path/2'),
+ dict(DEVTYPE="partition", SYS_PATH='/fake/path/3')]
+
+ blivet.udev.os.path.exists.return_value = False # has_range checked in device_is_disk
+ device_is_md.return_value = True
+
+ # Intel FW RAID (MD RAID w/ container layer)
+ # device_get_container will return some mock value which will evaluate to True
+ device_get_md_container.return_value = mock.sentinel.md_container
+ device_get_slaves.side_effect = lambda info: list()
+ self.assertTrue(blivet.udev.device_is_disk(info))
+
+ # Normal MD RAID
+ device_get_slaves.side_effect = lambda info: partition_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ device_get_md_container.return_value = None
+ self.assertFalse(blivet.udev.device_is_disk(info))
+
+ # Dell FW RAID (MD RAID whose members are all whole disks)
+ device_get_slaves.side_effect = lambda info: disk_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ self.assertTrue(blivet.udev.device_is_disk(info))
+
+ # Normal MD RAID (w/ at least one non-disk member)
+ device_get_slaves.side_effect = lambda info: mixed_parents if info['SYS_PATH'] == mock.sentinel.md_path else list()
+ self.assertFalse(blivet.udev.device_is_disk(info))
--
2.24.1
From 1d75298702f55830a3d69858c3b0b7defa7bf6f2 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Tue, 21 Jan 2020 15:28:27 -0500
Subject: [PATCH 2/2] Fix udev test names so they actually get run.
---
tests/udev_test.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/udev_test.py b/tests/udev_test.py
index beb8109c..653eeb6d 100644
--- a/tests/udev_test.py
+++ b/tests/udev_test.py
@@ -26,12 +26,12 @@ class UdevTest(unittest.TestCase):
for device in devices:
self.assertNotEqual(blivet.udev.get_device(device.sys_path), None)
- def udev_settle_test(self):
+ def test_udev_settle(self):
import blivet.udev
blivet.udev.settle()
self.assertTrue(blivet.udev.util.run_program.called)
- def udev_trigger_test(self):
+ def test_udev_trigger(self):
import blivet.udev
blivet.udev.trigger()
self.assertTrue(blivet.udev.util.run_program.called)
--
2.24.1

View File

@ -1,71 +0,0 @@
From a873679b9440105740e7e34f5a3fc9ce0f2c2ace Mon Sep 17 00:00:00 2001
From: Hongxu Jia <hongxu.jia@windriver.com>
Date: Tue, 28 Aug 2018 09:41:38 +0800
Subject: [PATCH 1/2] add `-y' to lvm.pvcreate
While reinstall a crypt fs, it occasionally failed
[snip]
|gi.overrides.BlockDev.LVMError: Process reported exit code 5:
WARNING: atari signature detected on /dev/mapper/luks-0e5f891c
-7701-48bc-a41e-8d626b6ef953 at offset 466. Wipe it? [y/n]:
[snip]
Add `-y' to lvm.pvcreate
Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
---
blivet/formats/lvmpv.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index 260cc0bd..96d25394 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -120,9 +120,8 @@ class LVMPhysicalVolume(DeviceFormat):
log_method_call(self, device=self.device,
type=self.type, status=self.status)
- # Consider use of -Z|--zero
- # -f|--force or -y|--yes may be required
- blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment)
+ ea_yes = blockdev.ExtraArg.new("-y", "")
+ blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
def _destroy(self, **kwargs):
log_method_call(self, device=self.device,
--
2.24.1
From d3d86ec2383bbd8e2797ebaaed551a3fbe8ee437 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 29 Aug 2018 10:05:29 +0200
Subject: [PATCH 2/2] Adjust LVMPhysicalVolumeMethodsTestCase to new pvcreate
option
Adjust tests to changes in f8a7ee3dbd6617eb9a0add96b2c4d124d78a1b98
---
tests/formats_test/methods_test.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/tests/formats_test/methods_test.py b/tests/formats_test/methods_test.py
index 741c4f15..710fa1c5 100644
--- a/tests/formats_test/methods_test.py
+++ b/tests/formats_test/methods_test.py
@@ -389,10 +389,12 @@ class LVMPhysicalVolumeMethodsTestCase(FormatMethodsTestCase):
self.patches["blockdev"].lvm.pvremove.assert_called_with(self.format.device)
def _test_create_backend(self):
+ self.patches["blockdev"].ExtraArg.new.return_value = sentinel.extra_arg
self.format.exists = False
self.format.create()
self.patches["blockdev"].lvm.pvcreate.assert_called_with(self.format.device,
- data_alignment=self.format.data_alignment) # pylint: disable=no-member
+ data_alignment=self.format.data_alignment, # pylint: disable=no-member
+ extra=[sentinel.extra_arg])
class MDRaidMemberMethodsTestCase(FormatMethodsTestCase):
--
2.24.1

View File

@ -1,47 +0,0 @@
From 6d1863f3298fa7115b35e46ffd42ff56b5634256 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 20 Nov 2019 11:19:36 +0100
Subject: [PATCH 1/2] Add setters for requested_size/percent form
LVMVolumeGroupDevice
Anaconda needs these to be able to set reserved size or percent
specified in kickstart.
Resolves: rhbz#1737490
---
blivet/devices/lvm.py | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index dbecc1e5..3b33104d 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -427,6 +427,25 @@ class LVMVolumeGroupDevice(ContainerDevice):
return self.align(reserved, roundup=True)
+ @reserved_space.setter
+ def reserved_space(self, value):
+ if self.exists:
+ raise ValueError("Can't set reserved space for an existing VG")
+
+ self._reserved_space = value
+
+ @property
+ def reserved_percent(self):
+ """ Reserved space in this VG in percent """
+ return self._reserved_percent
+
+ @reserved_percent.setter
+ def reserved_percent(self, value):
+ if self.exists:
+ raise ValueError("Can't set reserved percent for an existing VG")
+
+ self._reserved_percent = value
+
def _get_pv_usable_space(self, pv):
if isinstance(pv, MDRaidArrayDevice):
return self.align(pv.size - 2 * pv.format.pe_start)
--
2.25.4

View File

@ -1,33 +0,0 @@
From 2970b30815943edaa1575095cbf434fa9fc288a8 Mon Sep 17 00:00:00 2001
From: David Lehman <dlehman@redhat.com>
Date: Wed, 11 Mar 2020 12:58:50 -0400
Subject: [PATCH 2/2] Allow for reserved vg space and a growable thin pool.
(#1783946)
---
blivet/devices/lvm.py | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 3b33104d..ed25fd1a 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -417,10 +417,11 @@ class LVMVolumeGroupDevice(ContainerDevice):
reserved = self._reserved_percent * Decimal('0.01') * self.size
elif self._reserved_space > Size(0):
reserved = self._reserved_space
- elif self._thpool_reserve and any(lv.is_thin_pool for lv in self._lvs):
- reserved = min(max(self._thpool_reserve.percent * Decimal(0.01) * self.size,
- self._thpool_reserve.min),
- self._thpool_reserve.max)
+
+ if self._thpool_reserve and any(lv.is_thin_pool for lv in self._lvs):
+ reserved += min(max(self._thpool_reserve.percent * Decimal(0.01) * self.size,
+ self._thpool_reserve.min),
+ self._thpool_reserve.max)
# reserve space for the pmspare LV LVM creates behind our back
reserved += self.pmspare_size
--
2.25.4

View File

@ -19,11 +19,11 @@
Summary: A python module for system storage configuration Summary: A python module for system storage configuration
Name: python-blivet Name: python-blivet
Url: https://storageapis.wordpress.com/projects/blivet Url: https://storageapis.wordpress.com/projects/blivet
Version: 3.1.0 Version: 3.2.2
#%%global prerelease .b2 #%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2 # prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 21%{?prerelease}%{?dist} Release: 6%{?prerelease}%{?dist}
Epoch: 1 Epoch: 1
License: LGPLv2+ License: LGPLv2+
Group: System Environment/Libraries Group: System Environment/Libraries
@ -33,43 +33,22 @@ Source0: http://github.com/storaged-project/blivet/archive/%{realname}-%{realver
Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}-tests.tar.gz Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realversion}-tests.tar.gz
Patch0: 0001-force-lvm-cli.plugin Patch0: 0001-force-lvm-cli.plugin
Patch1: 0002-remove-btrfs-plugin.patch Patch1: 0002-remove-btrfs-plugin.patch
Patch2: 0003-separate-dmraid-availability-check.patch Patch2: 0003-Skip-test_mounting-for-filesystems-that-are-not-moun.patch
Patch3: 0004-allow-removing-btrfs-volumes-without-btrfs-support.patch Patch3: 0004-Add-extra-sleep-after-pvremove-call.patch
Patch4: 0005-arm7-cleanups.patch Patch4: 0005-Round-down-to-nearest-MiB-value-when-writing-ks-parittion-info.ks
Patch5: 0006-Fix-options-for-ISCSI-functions.patch Patch5: 0006-Blivet-RHEL-8.3-localization-update.patch
Patch6: 0007-Wipe-all-stale-metadata-after-creating-md-array.patch Patch6: 0007-Do-not-use-FSAVAIL-and-FSUSE-options-when-running-lsblk.patch
Patch7: 0008-Copy-the-iSCSI-initiator-name-file-to-the-installed-system.patch Patch7: 0008-set-allowed-disk-labels-for-s390x-as-standard-ones-plus-dasd.patch
Patch8: 0008-po-updates.patch Patch8: 0009-Do-not-use-BlockDev-utils_have_kernel_module-to-check-for-modules.patch
Patch9: 0009-Require-libfc-instead-of-fcoe-for-offloaded-FCoE.-15.patch Patch9: 0010-Fix-name-resolution-for-MD-devices-and-partitions-on.patch
Patch10: 0010-Use-udev-to-determine-if-disk-is-a-multipath-member.patch Patch10: 0011-Fix-ignoring-disk-devices-with-parents-or-children.patch
Patch11: 0011-Don-t-crash-if-blockdev-mpath-plugin-isn-t-available.patch
Patch12: 0012-Ensure-correct-type-of-mpath-cache-member-list.patch
Patch13: 0013-Various-test-fixes.patch
Patch14: 0014-Tests-archive.patch
Patch15: 0015-Deactivate-incomplete-VGs.patch
Patch16: 0016-Automatically-adjust-size-of-growable-devices-for-new-format.patch
Patch17: 0017-Add-flag-for-protecting-cdrom-devices-during-populate.patch
Patch18: 0018-Clean-up-some-errors-evident-in-installer-logs.patch
Patch19: 0019-Use-dasd-disklabel-for-vm-disks-backed-by-dasds.patch
Patch20: 0020-Fix-reading-LV-attributes-in-LVMVolumeGroupDevice.patch
Patch21: 0021-Correctly-handle-non-unicode-iSCSI-initiator-names.patch
Patch22: 0022-Do-not-crash-if-dm_get_member_raid_sets-fails.patch
Patch23: 0023-Minor-cleanups-to-reduce-log-noise.patch
Patch24: 0024-Fix-util.detect_virt-function.patch
Patch25: 0025-Check-for-PV-sector-size-when-creating-new-VG.patch
Patch26: 0026-Tell-lvm-to-ignore-skip-activation-flag-on-lvs-we-are-removing-or-otherwise-modifying.patch
Patch27: 0027-Align-base-partition-sizes-in-PartitionFactory.patch
Patch28: 0028-Add-recognition-of-Dell-FW-RAID-to-udev-device_is_disk.patch
Patch29: 0029-add-y-to-lvm.pvcreate.patch
Patch30: 0030-Add-setters-for-requested_size-percent-form-LVMVolum.patch
Patch31: 0031-Allow-for-reserved-vg-space-and-a-growable-thin-pool.patch
# Versions of required components (done so we make sure the buildrequires # Versions of required components (done so we make sure the buildrequires
# match the requires versions of things). # match the requires versions of things).
%global partedver 1.8.1 %global partedver 1.8.1
%global pypartedver 3.10.4 %global pypartedver 3.10.4
%global utillinuxver 2.15.1 %global utillinuxver 2.15.1
%global libblockdevver 2.17 %global libblockdevver 2.19
%global libbytesizever 0.3 %global libbytesizever 0.3
%global pyudevver 0.18 %global pyudevver 0.18
@ -193,7 +172,6 @@ configuration.
%autosetup -n %{realname}-%{realversion} -N %autosetup -n %{realname}-%{realversion} -N
%autosetup -n %{realname}-%{realversion} -b1 -p1 %autosetup -n %{realname}-%{realversion} -b1 -p1
%build %build
%{?with_python2:make PYTHON=%{__python2}} %{?with_python2:make PYTHON=%{__python2}}
%{?with_python3:make PYTHON=%{__python3}} %{?with_python3:make PYTHON=%{__python3}}
@ -225,11 +203,35 @@ configuration.
%endif %endif
%changelog %changelog
* Wed Jun 10 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-21 * Thu Aug 20 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-6
- Add setters for requested_size/percent form LVMVolumeGroupDevice - Fix name resolution for MD devices and partitions on them
Related: rhbz#1841131 Resolves: rhbz#1862904
- Allow for reserved vg space and a growable thin pool. - Fix ignoring disk devices with parents or children
Resolves: rhbz#1841131 Resolves: rhbz#1866243
* Thu Jul 16 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-5
- set allowed disk labels for s390x as standard ones (msdos + gpt) plus dasd
Resolves: rhbz#1855200
- Do not use BlockDev.utils_have_kernel_module to check for modules
Resolves: rhbz#1855344
* Thu Jul 09 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-4
- Blivet RHEL 8.3 localization update
Resolves: rhbz#182056
- Do not use FSAVAIL and FSUSE% options when running lsblk
Resolves: rhbz#1853624
* Tue Jun 30 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-3
- Round down to nearest MiB value when writing ks parittion info
Resolves: rhbz#1850670
* Wed Jun 24 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-2
- Add extra sleep after pvremove call
Resolves: rhbz#1640601
* Fri May 22 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.2.2-1
- Rebase to the latest upstream release 3.2.2
Resolves: rhbz#1714970
* Mon Mar 02 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-20 * Mon Mar 02 2020 Vojtech Trefny <vtrefny@redhat.com> - 3.1.0-20
- add `-y' to lvm.pvcreate - add `-y' to lvm.pvcreate