Import from AlmaLinux stable repository

This commit is contained in:
eabdullin 2024-05-15 08:39:37 +00:00
parent 6a62cce02a
commit f90fd69f37
13 changed files with 1813 additions and 1 deletions

View File

@ -0,0 +1,205 @@
From 7a86d4306e3022b73035e21f66d515174264700e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 9 Mar 2023 13:18:42 +0100
Subject: [PATCH 1/2] Add support for specifying stripe size for RAID LVs
---
blivet/devices/lvm.py | 28 +++++++++++++++++---
tests/storage_tests/devices_test/lvm_test.py | 12 +++++++--
tests/unit_tests/devices_test/lvm_test.py | 27 +++++++++++++++++++
3 files changed, 61 insertions(+), 6 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index b8595d63..41358e9b 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -659,7 +659,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None,
- percent=None, cache_request=None, pvs=None, from_lvs=None):
+ percent=None, cache_request=None, pvs=None, from_lvs=None,
+ stripe_size=0):
if not exists:
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
@@ -756,6 +757,15 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
if self._pv_specs:
self._assign_pv_space()
+ self._stripe_size = stripe_size
+ if not self.exists and self._stripe_size:
+ if self.seg_type not in lvm.raid_seg_types:
+ raise errors.DeviceError("Stripe size can be specified only for RAID volumes")
+ if self.seg_type in ("raid1", "RAID1", "1", 1, "mirror"):
+ raise errors.DeviceError("Specifying stripe size is not allowed for RAID1 or mirror")
+ if self.cache:
+ raise errors.DeviceError("Creating cached LVs with custom stripe size is not supported")
+
def _assign_pv_space(self):
if not self.is_raid_lv:
# nothing to do for non-RAID (and thus non-striped) LVs here
@@ -2295,7 +2305,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None, cache_mode=None, attach_to=None):
+ write_policy=None, cache_mode=None, attach_to=None, stripe_size=0):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2375,6 +2385,11 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
be attached to when created
:type attach_to: :class:`LVMLogicalVolumeDevice`
+ For RAID LVs only:
+
+ :keyword stripe_size: size of the RAID stripe
+ :type stripe_size: :class:`~.size.Size`
+
"""
if isinstance(parents, (list, ParentList)):
@@ -2395,7 +2410,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to)
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
- percent, cache_request, pvs, from_lvs)
+ percent, cache_request, pvs, from_lvs,
+ stripe_size)
LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory,
write_policy)
LVMVDOLogicalVolumeMixin.__init__(self)
@@ -2651,8 +2667,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
pvs = [spec.pv.path for spec in self._pv_specs]
pvs = pvs or None
+ extra = dict()
+ if self._stripe_size:
+ extra["stripesize"] = str(int(self._stripe_size.convert_to("KiB")))
+
blockdev.lvm.lvcreate(self.vg.name, self._name, self.size,
- type=self.seg_type, pv_list=pvs)
+ type=self.seg_type, pv_list=pvs, **extra)
else:
fast_pvs = [pv.path for pv in self.cache.fast_pvs]
diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
index a055fc27..97ef1c4b 100644
--- a/tests/storage_tests/devices_test/lvm_test.py
+++ b/tests/storage_tests/devices_test/lvm_test.py
@@ -1,4 +1,5 @@
import os
+import subprocess
from ..storagetestcase import StorageTestCase
@@ -127,7 +128,7 @@ class LVMTestCase(StorageTestCase):
self.assertTrue(snap.is_snapshot_lv)
self.assertEqual(snap.origin, thinlv)
- def _test_lvm_raid(self, seg_type, raid_level):
+ def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0):
disk1 = self.storage.devicetree.get_device_by_path(self.vdevs[0])
self.assertIsNotNone(disk1)
self.storage.initialize_disk(disk1)
@@ -151,7 +152,7 @@ class LVMTestCase(StorageTestCase):
raidlv = self.storage.new_lv(fmt_type="ext4", size=blivet.size.Size("50 MiB"),
parents=[vg], name="blivetTestRAIDLV",
- seg_type=seg_type, pvs=[pv1, pv2])
+ seg_type=seg_type, pvs=[pv1, pv2], stripe_size=stripe_size)
self.storage.create_device(raidlv)
self.storage.do_it()
@@ -163,9 +164,16 @@ class LVMTestCase(StorageTestCase):
self.assertEqual(raidlv.raid_level, raid_level)
self.assertEqual(raidlv.seg_type, seg_type)
+ if stripe_size:
+ out = subprocess.check_output(["lvs", "-o", "stripe_size", "--noheadings", "--nosuffix", "--units=b", raidlv.vg.name + "/" + raidlv.lvname])
+ self.assertEqual(out.decode().strip(), str(int(stripe_size.convert_to())))
+
def test_lvm_raid_raid0(self):
self._test_lvm_raid("raid0", blivet.devicelibs.raid.RAID0)
+ def test_lvm_raid_raid0_stripe_size(self):
+ self._test_lvm_raid("raid0", blivet.devicelibs.raid.RAID0, stripe_size=blivet.size.Size("1 MiB"))
+
def test_lvm_raid_striped(self):
self._test_lvm_raid("striped", blivet.devicelibs.raid.Striped)
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
index 995c2da4..d7b55224 100644
--- a/tests/unit_tests/devices_test/lvm_test.py
+++ b/tests/unit_tests/devices_test/lvm_test.py
@@ -363,6 +363,33 @@ class LVMDeviceTest(unittest.TestCase):
self.assertEqual(pv.format.free, Size("264 MiB"))
self.assertEqual(pv2.format.free, Size("256 MiB"))
+ def test_lvm_logical_volume_raid_stripe_size(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1025 MiB"))
+ pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("513 MiB"))
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
+
+ with self.assertRaises(blivet.errors.DeviceError):
+ # non-raid LV
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
+ fmt=blivet.formats.get_format("xfs"),
+ exists=False, stripe_size=Size("1 MiB"))
+
+ with self.assertRaises(blivet.errors.DeviceError):
+ # raid1 LV
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
+ fmt=blivet.formats.get_format("xfs"),
+ exists=False, seg_type="raid1", pvs=[pv, pv2],
+ stripe_size=Size("1 MiB"))
+
+ lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
+ fmt=blivet.formats.get_format("xfs"),
+ exists=False, seg_type="raid0", pvs=[pv, pv2],
+ stripe_size=Size("1 MiB"))
+
+ self.assertEqual(lv._stripe_size, Size("1 MiB"))
+
def test_target_size(self):
pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
size=Size("1 GiB"))
--
2.40.1
From bbfd1a70abe8271f5fe3d29fe2be3bb8a1c6ecbc Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 3 May 2023 08:55:31 +0200
Subject: [PATCH 2/2] Revert "tests: Skip test_lvcreate_type on CentOS/RHEL 9"
This reverts commit 16b90071145d2d0f19a38f3003561a0cc9d6e281.
The kernel issue was resolved, we no longer need to skip the test.
---
tests/skip.yml | 6 ------
1 file changed, 6 deletions(-)
diff --git a/tests/skip.yml b/tests/skip.yml
index 66b34493..c0ca0eaf 100644
--- a/tests/skip.yml
+++ b/tests/skip.yml
@@ -24,12 +24,6 @@
---
-- test: storage_tests.devices_test.lvm_test.LVMTestCase.test_lvm_raid
- skip_on:
- - distro: "centos"
- version: "9"
- reason: "Creating RAID 1 LV on CentOS/RHEL 9 causes a system deadlock"
-
- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize
skip_on:
- distro: ["centos", "enterprise_linux"]
--
2.40.1

View File

@ -0,0 +1,68 @@
From 1af0d3c37a93e431790e641a329a7f34dabf291a Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 2 Mar 2023 12:34:42 +0100
Subject: [PATCH] Fix setting kickstart data
When changing our code to PEP8 compliant we also changed some
pykickstart properties like onPart by accident. This PR fixes this.
Resolves: rhbz#2174296
---
blivet/devices/btrfs.py | 4 ++--
blivet/devices/lvm.py | 2 +-
blivet/devices/partition.py | 6 +++---
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/blivet/devices/btrfs.py b/blivet/devices/btrfs.py
index 1ae6a04d..3f56624e 100644
--- a/blivet/devices/btrfs.py
+++ b/blivet/devices/btrfs.py
@@ -498,8 +498,8 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
def populate_ksdata(self, data):
super(BTRFSVolumeDevice, self).populate_ksdata(data)
- data.data_level = self.data_level.name if self.data_level else None
- data.metadata_level = self.metadata_level.name if self.metadata_level else None
+ data.dataLevel = self.data_level.name if self.data_level else None
+ data.metaDataLevel = self.metadata_level.name if self.metadata_level else None
data.devices = ["btrfs.%d" % p.id for p in self.parents]
data.preexist = self.exists
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 41358e9b..c3132457 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -1161,7 +1161,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
if self.req_grow:
# base size could be literal or percentage
- data.max_size_mb = self.req_max_size.convert_to(MiB)
+ data.maxSizeMB = self.req_max_size.convert_to(MiB)
elif data.resize:
data.size = self.target_size.convert_to(MiB)
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 89d907c2..0e9250ce 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -982,14 +982,14 @@ class PartitionDevice(StorageDevice):
data.size = self.req_base_size.round_to_nearest(MiB, rounding=ROUND_DOWN).convert_to(spec=MiB)
data.grow = self.req_grow
if self.req_grow:
- data.max_size_mb = self.req_max_size.convert_to(MiB)
+ data.maxSizeMB = self.req_max_size.convert_to(MiB)
# data.disk = self.disk.name # by-id
if self.req_disks and len(self.req_disks) == 1:
data.disk = self.disk.name
- data.prim_only = self.req_primary
+ data.primOnly = self.req_primary
else:
- data.on_part = self.name # by-id
+ data.onPart = self.name # by-id
if data.resize:
# on s390x in particular, fractional sizes are reported, which
--
2.40.1

View File

@ -0,0 +1,133 @@
From c2b06150df0b876c7d442097b6c9ca90c9ca2ecc Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 4 May 2023 11:35:44 +0200
Subject: [PATCH] Do not set memory limit for LUKS2 when running in FIPS mode
With FIPS enabled LUKS uses pbkdf and not argon so the memory
limit is not a valid parameter.
Resolves: rhbz#2193096
---
blivet/devicelibs/crypto.py | 11 +++++++
blivet/formats/luks.py | 12 ++++----
tests/unit_tests/formats_tests/luks_test.py | 30 +++++++++++++++++++
.../unit_tests/formats_tests/methods_test.py | 3 +-
4 files changed, 50 insertions(+), 6 deletions(-)
diff --git a/blivet/devicelibs/crypto.py b/blivet/devicelibs/crypto.py
index f0caf0f7..68e68db1 100644
--- a/blivet/devicelibs/crypto.py
+++ b/blivet/devicelibs/crypto.py
@@ -21,6 +21,7 @@
#
import hashlib
+import os
import gi
gi.require_version("BlockDev", "2.0")
@@ -100,3 +101,13 @@ def calculate_integrity_metadata_size(device_size, algorithm=DEFAULT_INTEGRITY_A
jsize = (jsize / SECTOR_SIZE + 1) * SECTOR_SIZE # round up to sector
return msize + jsize
+
+
+def is_fips_enabled():
+ if not os.path.exists("/proc/sys/crypto/fips_enabled"):
+ # if the file doesn't exist, we are definitely not in FIPS mode
+ return False
+
+ with open("/proc/sys/crypto/fips_enabled", "r") as f:
+ enabled = f.read()
+ return enabled.strip() == "1"
diff --git a/blivet/formats/luks.py b/blivet/formats/luks.py
index 2637e0c5..adf3c711 100644
--- a/blivet/formats/luks.py
+++ b/blivet/formats/luks.py
@@ -303,11 +303,13 @@ class LUKS(DeviceFormat):
if luks_data.pbkdf_args:
self.pbkdf_args = luks_data.pbkdf_args
else:
- mem_limit = crypto.calculate_luks2_max_memory()
- if mem_limit:
- self.pbkdf_args = LUKS2PBKDFArgs(max_memory_kb=int(mem_limit.convert_to(KiB)))
- luks_data.pbkdf_args = self.pbkdf_args
- log.info("PBKDF arguments for LUKS2 not specified, using defaults with memory limit %s", mem_limit)
+ # argon is not used with FIPS so we don't need to adjust the memory when in FIPS mode
+ if not crypto.is_fips_enabled():
+ mem_limit = crypto.calculate_luks2_max_memory()
+ if mem_limit:
+ self.pbkdf_args = LUKS2PBKDFArgs(max_memory_kb=int(mem_limit.convert_to(KiB)))
+ luks_data.pbkdf_args = self.pbkdf_args
+ log.info("PBKDF arguments for LUKS2 not specified, using defaults with memory limit %s", mem_limit)
if self.pbkdf_args:
pbkdf = blockdev.CryptoLUKSPBKDF(type=self.pbkdf_args.type,
diff --git a/tests/unit_tests/formats_tests/luks_test.py b/tests/unit_tests/formats_tests/luks_test.py
index ec7b7592..1127e968 100644
--- a/tests/unit_tests/formats_tests/luks_test.py
+++ b/tests/unit_tests/formats_tests/luks_test.py
@@ -6,9 +6,14 @@ except ImportError:
import unittest
from blivet.formats.luks import LUKS
+from blivet.size import Size
+from blivet.static_data import luks_data
class LUKSNodevTestCase(unittest.TestCase):
+ def setUp(self):
+ luks_data.pbkdf_args = None
+
def test_create_discard_option(self):
# flags.discard_new=False --> no discard
fmt = LUKS(exists=False)
@@ -51,6 +56,31 @@ class LUKSNodevTestCase(unittest.TestCase):
fmt = LUKS(cipher="aes-cbc-plain64")
self.assertEqual(fmt.key_size, 0)
+ def test_luks2_pbkdf_memory_fips(self):
+ fmt = LUKS()
+ with patch("blivet.formats.luks.blockdev.crypto") as bd:
+ # fips enabled, pbkdf memory should not be set
+ with patch("blivet.formats.luks.crypto") as crypto:
+ attrs = {"is_fips_enabled.return_value": True,
+ "get_optimal_luks_sector_size.return_value": 0,
+ "calculate_luks2_max_memory.return_value": Size("256 MiB")}
+ crypto.configure_mock(**attrs)
+
+ fmt._create()
+ crypto.calculate_luks2_max_memory.assert_not_called()
+ self.assertEqual(bd.luks_format.call_args[1]["extra"].pbkdf.max_memory_kb, 0)
+
+ # fips disabled, pbkdf memory should be set
+ with patch("blivet.formats.luks.crypto") as crypto:
+ attrs = {"is_fips_enabled.return_value": False,
+ "get_optimal_luks_sector_size.return_value": 0,
+ "calculate_luks2_max_memory.return_value": Size("256 MiB")}
+ crypto.configure_mock(**attrs)
+
+ fmt._create()
+ crypto.calculate_luks2_max_memory.assert_called()
+ self.assertEqual(bd.luks_format.call_args[1]["extra"].pbkdf.max_memory_kb, 256 * 1024)
+
def test_sector_size(self):
fmt = LUKS()
self.assertEqual(fmt.luks_sector_size, 512)
diff --git a/tests/unit_tests/formats_tests/methods_test.py b/tests/unit_tests/formats_tests/methods_test.py
index 2743b7db..5d30c260 100644
--- a/tests/unit_tests/formats_tests/methods_test.py
+++ b/tests/unit_tests/formats_tests/methods_test.py
@@ -366,7 +366,8 @@ class LUKSMethodsTestCase(FormatMethodsTestCase):
def _test_create_backend(self):
self.format.exists = False
- self.format.create()
+ with patch("blivet.devicelibs.crypto.is_fips_enabled", return_value=False):
+ self.format.create()
self.assertTrue(self.patches["blockdev"].crypto.luks_format.called) # pylint: disable=no-member
def _test_setup_backend(self):
--
2.40.1

View File

@ -0,0 +1,265 @@
From eb16230427fc1081f8515e6ad69ccf99ca521e5d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 4 Apr 2023 13:31:40 +0200
Subject: [PATCH 1/2] Add support for filesystem online resize
Resolves: rhbz#2168680
---
blivet/devices/lvm.py | 13 ++++++++-----
blivet/devices/partition.py | 11 ++++++-----
blivet/flags.py | 3 +++
blivet/formats/fs.py | 32 ++++++++++++++++++++++++++++----
blivet/formats/fslib.py | 7 +++++++
5 files changed, 52 insertions(+), 14 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index c3132457..ca45c4b5 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -42,6 +42,7 @@ from .. import errors
from .. import util
from ..storage_log import log_method_call
from .. import udev
+from ..flags import flags
from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
from ..static_data.lvm_info import lvs_info
from ..tasks import availability
@@ -2729,12 +2730,14 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
# Setup VG parents (in case they are dmraid partitions for example)
self.vg.setup_parents(orig=True)
- if self.original_format.exists:
- self.original_format.teardown()
- if self.format.exists:
- self.format.teardown()
+ if not flags.allow_online_fs_resize:
+ if self.original_format.exists:
+ self.original_format.teardown()
+ if self.format.exists:
+ self.format.teardown()
+
+ udev.settle()
- udev.settle()
blockdev.lvm.lvresize(self.vg.name, self._name, self.size)
@type_specific
diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
index 0e9250ce..6ae4b8d3 100644
--- a/blivet/devices/partition.py
+++ b/blivet/devices/partition.py
@@ -745,11 +745,12 @@ class PartitionDevice(StorageDevice):
if not self.exists:
raise errors.DeviceError("device has not been created")
- # don't teardown when resizing luks
- if self.format.type == "luks" and self.children:
- self.children[0].format.teardown()
- else:
- self.teardown()
+ if not flags.allow_online_fs_resize:
+ # don't teardown when resizing luks
+ if self.format.type == "luks" and self.children:
+ self.children[0].format.teardown()
+ else:
+ self.teardown()
if not self.sysfs_path:
return
diff --git a/blivet/flags.py b/blivet/flags.py
index 6364164d..ecfa7ad7 100644
--- a/blivet/flags.py
+++ b/blivet/flags.py
@@ -91,6 +91,9 @@ class Flags(object):
self.debug_threads = False
+ # Allow online filesystem resizes
+ self.allow_online_fs_resize = False
+
def get_boot_cmdline(self):
with open("/proc/cmdline") as f:
buf = f.read().strip()
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index 33922f3a..3f553eb0 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -56,7 +56,7 @@ from ..i18n import N_
from .. import udev
from ..mounts import mounts_cache
-from .fslib import kernel_filesystems
+from .fslib import kernel_filesystems, FSResize
import logging
log = logging.getLogger("blivet")
@@ -88,6 +88,9 @@ class FS(DeviceFormat):
# value is already unpredictable and can change in the future...
_metadata_size_factor = 1.0
+ # support for resize: grow/shrink, online/offline
+ _resize_support = 0
+
config_actions_map = {"label": "write_label"}
def __init__(self, **kwargs):
@@ -436,12 +439,27 @@ class FS(DeviceFormat):
self.write_uuid()
def _pre_resize(self):
- # file systems need a check before being resized
- self.do_check()
+ if self.status:
+ if flags.allow_online_fs_resize:
+ if self.target_size > self.size and not self._resize_support & FSResize.ONLINE_GROW:
+ raise FSError("This filesystem doesn't support online growing")
+ if self.target_size < self.size and not self._resize_support & FSResize.ONLINE_SHRINK:
+ raise FSError("This filesystem doesn't support online shrinking")
+ else:
+ raise FSError("Resizing of mounted filesystems is disabled")
+
+ if self.status:
+ # fsck tools in general don't allow checks on mounted filesystems
+ log.debug("Filesystem on %s is mounted, not checking", self.device)
+ else:
+ # file systems need a check before being resized
+ self.do_check()
+
super(FS, self)._pre_resize()
def _post_resize(self):
- self.do_check()
+ if not self.status:
+ self.do_check()
super(FS, self)._post_resize()
def do_check(self):
@@ -838,6 +856,7 @@ class Ext2FS(FS):
_formattable = True
_supported = True
_resizable = True
+ _resize_support = FSResize.ONLINE_GROW | FSResize.OFFLINE_GROW | FSResize.OFFLINE_SHRINK
_linux_native = True
_max_size = Size("8 TiB")
_dump = True
@@ -1097,6 +1116,7 @@ class XFS(FS):
_linux_native = True
_supported = True
_resizable = True
+ _resize_support = FSResize.ONLINE_GROW | FSResize.OFFLINE_GROW
_packages = ["xfsprogs"]
_fsck_class = fsck.XFSCK
_info_class = fsinfo.XFSInfo
@@ -1247,6 +1267,7 @@ class NTFS(FS):
_labelfs = fslabeling.NTFSLabeling()
_uuidfs = fsuuid.NTFSUUID()
_resizable = True
+ _resize_support = FSResize.OFFLINE_GROW | FSResize.OFFLINE_SHRINK
_formattable = True
_supported = True
_min_size = Size("1 MiB")
@@ -1490,6 +1511,9 @@ class TmpFS(NoDevFS):
# same, nothing actually needs to be set
pass
+ def _pre_resize(self):
+ self.do_check()
+
def do_resize(self):
# Override superclass method to record whether mount options
# should include an explicit size specification.
diff --git a/blivet/formats/fslib.py b/blivet/formats/fslib.py
index ea93b1fd..8722e942 100644
--- a/blivet/formats/fslib.py
+++ b/blivet/formats/fslib.py
@@ -36,3 +36,10 @@ def update_kernel_filesystems():
update_kernel_filesystems()
+
+
+class FSResize():
+ OFFLINE_SHRINK = 1 << 1
+ OFFLINE_GROW = 1 << 2
+ ONLINE_SHRINK = 1 << 3
+ ONLINE_GROW = 1 << 4
--
2.40.1
From 3fce5d0bfd7b09a976ff49feed15077477c6a425 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 6 Apr 2023 14:02:11 +0200
Subject: [PATCH 2/2] Add a test case for filesystem online resize
Related: rhbz#2168680
---
tests/storage_tests/formats_test/fs_test.py | 43 ++++++++++++++++++++-
1 file changed, 42 insertions(+), 1 deletion(-)
diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py
index 97f4cbbe..1d42dc21 100644
--- a/tests/storage_tests/formats_test/fs_test.py
+++ b/tests/storage_tests/formats_test/fs_test.py
@@ -6,9 +6,10 @@ import parted
import blivet.formats.fs as fs
from blivet.size import Size, ROUND_DOWN
-from blivet.errors import DeviceFormatError
+from blivet.errors import DeviceFormatError, FSError
from blivet.formats import get_format
from blivet.devices import PartitionDevice, DiskDevice
+from blivet.flags import flags
from .loopbackedtestcase import LoopBackedTestCase
@@ -26,6 +27,46 @@ class Ext3FSTestCase(Ext2FSTestCase):
class Ext4FSTestCase(Ext3FSTestCase):
_fs_class = fs.Ext4FS
+ def test_online_resize(self):
+ an_fs = self._fs_class()
+ if not an_fs.formattable:
+ self.skipTest("can not create filesystem %s" % an_fs.name)
+ an_fs.device = self.loop_devices[0]
+ self.assertIsNone(an_fs.create())
+ an_fs.update_size_info()
+
+ if not self.can_resize(an_fs):
+ self.skipTest("filesystem is not resizable")
+
+ # shrink offline first (ext doesn't support online shrinking)
+ TARGET_SIZE = Size("64 MiB")
+ an_fs.target_size = TARGET_SIZE
+ self.assertEqual(an_fs.target_size, TARGET_SIZE)
+ self.assertNotEqual(an_fs._size, TARGET_SIZE)
+ self.assertIsNone(an_fs.do_resize())
+
+ with tempfile.TemporaryDirectory() as mountpoint:
+ an_fs.mount(mountpoint=mountpoint)
+
+ # grow back when mounted
+ TARGET_SIZE = Size("100 MiB")
+ an_fs.target_size = TARGET_SIZE
+ self.assertEqual(an_fs.target_size, TARGET_SIZE)
+ self.assertNotEqual(an_fs._size, TARGET_SIZE)
+
+ # should fail, online resize disabled by default
+ with self.assertRaisesRegex(FSError, "Resizing of mounted filesystems is disabled"):
+ an_fs.do_resize()
+
+ # enable online resize
+ flags.allow_online_fs_resize = True
+ an_fs.do_resize()
+ flags.allow_online_fs_resize = False
+ self._test_sizes(an_fs)
+ self.assertEqual(an_fs.system_mountpoint, mountpoint)
+
+ an_fs.unmount()
+
class FATFSTestCase(fstesting.FSAsRoot):
_fs_class = fs.FATFS
--
2.40.1

View File

@ -0,0 +1,383 @@
From 2d26f69abc2d793e753c0cddb3086264ca2b4e71 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 6 Mar 2023 10:51:42 +0100
Subject: [PATCH 1/5] Allow changing iSCSI initiator name after setting it
Resolves: rhbz#2221935
---
blivet/iscsi.py | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
index 86451db3..0d063f2a 100644
--- a/blivet/iscsi.py
+++ b/blivet/iscsi.py
@@ -212,14 +212,23 @@ class iSCSI(object):
@initiator.setter
@storaged_iscsi_required(critical=True, eval_mode=util.EvalMode.onetime)
def initiator(self, val):
- if self.initiator_set and val != self._initiator:
- raise ValueError(_("Unable to change iSCSI initiator name once set"))
if len(val) == 0:
raise ValueError(_("Must provide an iSCSI initiator name"))
+ active = self._get_active_sessions()
+ if active:
+ raise errors.ISCSIError(_("Cannot change initiator name with an active session"))
+
log.info("Setting up iSCSI initiator name %s", self.initiator)
args = GLib.Variant("(sa{sv})", (val, None))
self._call_initiator_method("SetInitiatorName", args)
+
+ if self.initiator_set and val != self._initiator:
+ log.info("Restarting iscsid after initiator name change")
+ rc = util.run_program(["systemctl", "restart", "iscsid"])
+ if rc != 0:
+ raise errors.ISCSIError(_("Failed to restart iscsid after initiator name change"))
+
self._initiator = val
def active_nodes(self, target=None):
--
2.40.1
From 7c226ed0e14efcdd6e562e357d8f3465ad43ef33 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 6 Mar 2023 15:10:28 +0100
Subject: [PATCH 2/5] Add a basic test case for the iscsi module
Related: rhbz#2221935
---
misc/install-test-dependencies.yml | 3 +
tests/storage_tests/__init__.py | 2 +
tests/storage_tests/iscsi_test.py | 157 +++++++++++++++++++++++++++++
3 files changed, 162 insertions(+)
create mode 100644 tests/storage_tests/iscsi_test.py
diff --git a/tests/storage_tests/__init__.py b/tests/storage_tests/__init__.py
index 3b2a6cc4..e69fcc34 100644
--- a/tests/storage_tests/__init__.py
+++ b/tests/storage_tests/__init__.py
@@ -3,3 +3,5 @@ from .formats_test import *
from .partitioning_test import *
from .unsupported_disklabel_test import *
+
+from .iscsi_test import *
diff --git a/tests/storage_tests/iscsi_test.py b/tests/storage_tests/iscsi_test.py
new file mode 100644
index 00000000..00cc7c36
--- /dev/null
+++ b/tests/storage_tests/iscsi_test.py
@@ -0,0 +1,157 @@
+import glob
+import os
+import re
+import shutil
+import subprocess
+import unittest
+
+from contextlib import contextmanager
+
+from .storagetestcase import create_sparse_tempfile
+
+
+def read_file(filename, mode="r"):
+ with open(filename, mode) as f:
+ content = f.read()
+ return content
+
+
+@contextmanager
+def udev_settle():
+ try:
+ yield
+ finally:
+ os.system("udevadm settle")
+
+
+def _delete_backstore(name):
+ status = subprocess.call(["targetcli", "/backstores/fileio/ delete %s" % name],
+ stdout=subprocess.DEVNULL)
+ if status != 0:
+ raise RuntimeError("Failed to delete the '%s' fileio backstore" % name)
+
+
+def delete_iscsi_target(iqn, backstore=None):
+ status = subprocess.call(["targetcli", "/iscsi delete %s" % iqn],
+ stdout=subprocess.DEVNULL)
+ if status != 0:
+ raise RuntimeError("Failed to delete the '%s' iscsi device" % iqn)
+
+ if backstore is not None:
+ _delete_backstore(backstore)
+
+
+def create_iscsi_target(fpath, initiator_name=None):
+ """
+ Creates a new iSCSI target (using targetcli) on top of the
+ :param:`fpath` backing file.
+
+ :param str fpath: path of the backing file
+ :returns: iSCSI IQN, backstore name
+ :rtype: tuple of str
+
+ """
+
+ # "register" the backing file as a fileio backstore
+ store_name = os.path.basename(fpath)
+ status = subprocess.call(["targetcli", "/backstores/fileio/ create %s %s" % (store_name, fpath)], stdout=subprocess.DEVNULL)
+ if status != 0:
+ raise RuntimeError("Failed to register '%s' as a fileio backstore" % fpath)
+
+ out = subprocess.check_output(["targetcli", "/backstores/fileio/%s info" % store_name])
+ out = out.decode("utf-8")
+ store_wwn = None
+ for line in out.splitlines():
+ if line.startswith("wwn: "):
+ store_wwn = line[5:]
+ if store_wwn is None:
+ raise RuntimeError("Failed to determine '%s' backstore's wwn" % store_name)
+
+ # create a new iscsi device
+ out = subprocess.check_output(["targetcli", "/iscsi create"])
+ out = out.decode("utf-8")
+ match = re.match(r'Created target (.*).', out)
+ if match:
+ iqn = match.groups()[0]
+ else:
+ _delete_backstore(store_name)
+ raise RuntimeError("Failed to create a new iscsi target")
+
+ if initiator_name:
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
+ if status != 0:
+ delete_iscsi_target(iqn, store_name)
+ raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
+
+ with udev_settle():
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/luns create /backstores/fileio/%s" % (iqn, store_name)], stdout=subprocess.DEVNULL)
+ if status != 0:
+ delete_iscsi_target(iqn, store_name)
+ raise RuntimeError("Failed to create a new LUN for '%s' using '%s'" % (iqn, store_name))
+
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1 set attribute generate_node_acls=1" % iqn], stdout=subprocess.DEVNULL)
+ if status != 0:
+ raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
+
+ return iqn, store_name
+
+
+@unittest.skipUnless(os.geteuid() == 0, "requires root privileges")
+@unittest.skipUnless(os.environ.get("JENKINS_HOME"), "jenkins only test")
+@unittest.skipUnless(shutil.which("iscsiadm"), "iscsiadm not available")
+class ISCSITestCase(unittest.TestCase):
+
+ _disk_size = 512 * 1024**2
+ initiator = 'iqn.1994-05.com.redhat:iscsi-test'
+
+ def setUp(self):
+ self.addCleanup(self._clean_up)
+
+ self._dev_file = None
+ self.dev = None
+
+ self._dev_file = create_sparse_tempfile("blivet_test", self._disk_size)
+ try:
+ self.dev, self.backstore = create_iscsi_target(self._dev_file, self.initiator)
+ except RuntimeError as e:
+ raise RuntimeError("Failed to setup targetcli device for testing: %s" % e)
+
+ def _force_logout(self):
+ subprocess.call(["iscsiadm", "--mode", "node", "--logout", "--name", self.dev], stdout=subprocess.DEVNULL)
+
+ def _clean_up(self):
+ self._force_logout()
+ delete_iscsi_target(self.dev, self.backstore)
+ os.unlink(self._dev_file)
+
+ def test_discover_login(self):
+ from blivet.iscsi import iscsi, has_iscsi
+
+ if not has_iscsi():
+ self.skipTest("iSCSI not available, skipping")
+
+ iscsi.initiator = self.initiator
+ nodes = iscsi.discover("127.0.0.1")
+ self.assertTrue(nodes)
+
+ if len(nodes) > 1:
+ self.skipTest("Discovered more than one iSCSI target on localhost, skipping")
+
+ self.assertEqual(nodes[0].address, "127.0.0.1")
+ self.assertEqual(nodes[0].port, 3260)
+ self.assertEqual(nodes[0].name, self.dev)
+
+ # change the initiator name
+ iscsi.initiator = self.initiator + "_1"
+ self.assertEqual(iscsi.initiator, self.initiator + "_1")
+
+ # try to login
+ ret, err = iscsi.log_into_node(nodes[0])
+ self.assertTrue(ret, "Login failed: %s" % err)
+
+ # check the session for initiator name
+ sessions = glob.glob("/sys/class/iscsi_session/*/")
+ self.assertTrue(sessions)
+ self.assertEqual(len(sessions), 1)
+ initiator = read_file(sessions[0] + "initiatorname").strip()
+ self.assertEqual(initiator, iscsi.initiator)
--
2.40.1
From dfd0c59a901f54ecfd8f538a2bb004a2e5ab6eec Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 6 Mar 2023 15:14:40 +0100
Subject: [PATCH 3/5] tests: Use blivet-specific prefix for targetcli backing
files
The code is originally from libblockdev hence the "bd" prefix, we
should use a different prefix for blivet to be able to identify
which test suite failed to clean the files.
Related: rhbz#2221935
---
tests/storage_tests/storagetestcase.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/storage_tests/storagetestcase.py b/tests/storage_tests/storagetestcase.py
index 35d57ce9..9f859977 100644
--- a/tests/storage_tests/storagetestcase.py
+++ b/tests/storage_tests/storagetestcase.py
@@ -39,7 +39,7 @@ def create_sparse_tempfile(name, size):
:param size: the file size (in bytes)
:returns: the path to the newly created file
"""
- (fd, path) = tempfile.mkstemp(prefix="bd.", suffix="-%s" % name)
+ (fd, path) = tempfile.mkstemp(prefix="blivet.", suffix="-%s" % name)
os.close(fd)
create_sparse_file(path, size)
return path
--
2.40.1
From 492fc9b8dfc2d4aa7cb44baa4408570babcb5e96 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 19 Jul 2023 13:57:39 +0200
Subject: [PATCH 4/5] iscsi: Save firmware initiator name to
/etc/iscsi/initiatorname.iscsi
Resolves: rhbz#2221932
---
blivet/iscsi.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/blivet/iscsi.py b/blivet/iscsi.py
index 0d063f2a..8080a671 100644
--- a/blivet/iscsi.py
+++ b/blivet/iscsi.py
@@ -160,6 +160,11 @@ class iSCSI(object):
self._initiator = initiatorname
except Exception as e: # pylint: disable=broad-except
log.info("failed to get initiator name from iscsi firmware: %s", str(e))
+ else:
+ # write the firmware initiator to /etc/iscsi/initiatorname.iscsi
+ log.info("Setting up firmware iSCSI initiator name %s", self.initiator)
+ args = GLib.Variant("(sa{sv})", (initiatorname, None))
+ self._call_initiator_method("SetInitiatorName", args)
# So that users can write iscsi() to get the singleton instance
def __call__(self):
--
2.40.1
From 768d90815b7f95d0d6d278397fd6fd12a0490b5d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 19 Jul 2023 10:38:45 +0200
Subject: [PATCH 5/5] tests: Improve iscsi_test.ISCSITestCase
Changed how we create the initiator name ACLs based on RTT test
case for rhbz#2084043 and also improved the test case itself.
Related: rhbz#2221935
---
tests/storage_tests/iscsi_test.py | 36 +++++++++++++++++++++----------
1 file changed, 25 insertions(+), 11 deletions(-)
diff --git a/tests/storage_tests/iscsi_test.py b/tests/storage_tests/iscsi_test.py
index 00cc7c36..6cc83a59 100644
--- a/tests/storage_tests/iscsi_test.py
+++ b/tests/storage_tests/iscsi_test.py
@@ -77,21 +77,17 @@ def create_iscsi_target(fpath, initiator_name=None):
_delete_backstore(store_name)
raise RuntimeError("Failed to create a new iscsi target")
- if initiator_name:
- status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
- if status != 0:
- delete_iscsi_target(iqn, store_name)
- raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
-
with udev_settle():
status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/luns create /backstores/fileio/%s" % (iqn, store_name)], stdout=subprocess.DEVNULL)
if status != 0:
delete_iscsi_target(iqn, store_name)
raise RuntimeError("Failed to create a new LUN for '%s' using '%s'" % (iqn, store_name))
- status = subprocess.call(["targetcli", "/iscsi/%s/tpg1 set attribute generate_node_acls=1" % iqn], stdout=subprocess.DEVNULL)
- if status != 0:
- raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
+ if initiator_name:
+ status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
+ if status != 0:
+ delete_iscsi_target(iqn, store_name)
+ raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
return iqn, store_name
@@ -130,6 +126,7 @@ class ISCSITestCase(unittest.TestCase):
if not has_iscsi():
self.skipTest("iSCSI not available, skipping")
+ # initially set the initiator to the correct/allowed one
iscsi.initiator = self.initiator
nodes = iscsi.discover("127.0.0.1")
self.assertTrue(nodes)
@@ -141,11 +138,28 @@ class ISCSITestCase(unittest.TestCase):
self.assertEqual(nodes[0].port, 3260)
self.assertEqual(nodes[0].name, self.dev)
- # change the initiator name
+ # change the initiator name to a wrong one
iscsi.initiator = self.initiator + "_1"
self.assertEqual(iscsi.initiator, self.initiator + "_1")
- # try to login
+ # check the change made it to /etc/iscsi/initiatorname.iscsi
+ initiator_file = read_file("/etc/iscsi/initiatorname.iscsi").strip()
+ self.assertEqual(initiator_file, "InitiatorName=%s" % self.initiator + "_1")
+
+ # try to login (should fail)
+ ret, err = iscsi.log_into_node(nodes[0])
+ self.assertFalse(ret)
+ self.assertIn("authorization failure", err)
+
+ # change the initiator name back to the correct one
+ iscsi.initiator = self.initiator
+ self.assertEqual(iscsi.initiator, self.initiator)
+
+ # check the change made it to /etc/iscsi/initiatorname.iscsi
+ initiator_file = read_file("/etc/iscsi/initiatorname.iscsi").strip()
+ self.assertEqual(initiator_file, "InitiatorName=%s" % self.initiator)
+
+ # try to login (should work now)
ret, err = iscsi.log_into_node(nodes[0])
self.assertTrue(ret, "Login failed: %s" % err)
--
2.40.1

View File

@ -0,0 +1,26 @@
From 9dcd32dd85f7f45c3fe6c8d7b1de3b4c322c6807 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Mon, 11 Sep 2023 13:50:24 +0200
Subject: [PATCH] nvme: Require additional rpms for dracut
The '95nvmf' dracut module needs a couple more packages
for the NBFT (NVMe over TCP) to work - such as networking.
Local PCIe NVMe devices have no special needs.
---
blivet/devices/disk.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 2b49ef685..5053f7bb8 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -725,7 +725,8 @@ class NVMeFabricsNamespaceDevice(NVMeNamespaceDevice, NetworkStorageDevice):
""" NVMe fabrics namespace """
_type = "nvme-fabrics"
- _packages = ["nvme-cli"]
+ # dracut '95nvmf' module dependencies
+ _packages = ["nvme-cli", "dracut-network"]
def __init__(self, device, **kwargs):
"""

View File

@ -0,0 +1,107 @@
From 06597099906be55b106c234b3bf0c87ec7d90a07 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Thu, 17 Aug 2023 14:45:18 +0200
Subject: [PATCH] nvme: Align HostNQN and HostID format to TP4126
Also don't overwrite existing files during startup() since they
might have been supplied by early boot stages.
---
blivet/nvme.py | 62 +++++++++++++++++++++++++++++++-------------------
1 file changed, 39 insertions(+), 23 deletions(-)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index 17bead15e..5ac41cffa 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -18,16 +18,20 @@
#
import os
-import shutil
from . import errors
-from . import util
+
+import gi
+gi.require_version("BlockDev", "2.0")
+
+from gi.repository import BlockDev as blockdev
import logging
log = logging.getLogger("blivet")
-HOSTNQN_FILE = "/etc/nvme/hostnqn"
-HOSTID_FILE = "/etc/nvme/hostid"
+ETC_NVME_PATH = "/etc/nvme/"
+HOSTNQN_FILE = ETC_NVME_PATH + "hostnqn"
+HOSTID_FILE = ETC_NVME_PATH + "hostid"
class NVMe(object):
@@ -40,6 +44,8 @@ class NVMe(object):
def __init__(self):
self.started = False
+ self._hostnqn = None
+ self._hostid = None
# So that users can write nvme() to get the singleton instance
def __call__(self):
@@ -52,28 +58,38 @@ def startup(self):
if self.started:
return
- rc, nqn = util.run_program_and_capture_output(["nvme", "gen-hostnqn"])
- if rc != 0:
- raise errors.NVMeError("Failed to generate hostnqn")
-
- with open(HOSTNQN_FILE, "w") as f:
- f.write(nqn)
-
- rc, hid = util.run_program_and_capture_output(["dmidecode", "-s", "system-uuid"])
- if rc != 0:
- raise errors.NVMeError("Failed to generate host ID")
-
- with open(HOSTID_FILE, "w") as f:
- f.write(hid)
+ self._hostnqn = blockdev.nvme_get_host_nqn()
+ self._hostid = blockdev.nvme_get_host_id()
+ if not self._hostnqn:
+ self._hostnqn = blockdev.nvme_generate_host_nqn()
+ if not self._hostnqn:
+ raise errors.NVMeError("Failed to generate HostNQN")
+ if not self._hostid:
+ if 'uuid:' not in self._hostnqn:
+ raise errors.NVMeError("Missing UUID part in the HostNQN string '%s'" % self._hostnqn)
+ # derive HostID from HostNQN's UUID part
+ self._hostid = self._hostnqn.split('uuid:')[1]
+
+ # do not overwrite existing files, taken e.g. from initramfs
+ self.write("/", overwrite=False)
self.started = True
- def write(self, root): # pylint: disable=unused-argument
- # copy the hostnqn and hostid files
- if not os.path.isdir(root + "/etc/nvme"):
- os.makedirs(root + "/etc/nvme", 0o755)
- shutil.copyfile(HOSTNQN_FILE, root + HOSTNQN_FILE)
- shutil.copyfile(HOSTID_FILE, root + HOSTID_FILE)
+ def write(self, root, overwrite=True): # pylint: disable=unused-argument
+ # write down the hostnqn and hostid files
+ p = root + ETC_NVME_PATH
+ if not os.path.isdir(p):
+ os.makedirs(p, 0o755)
+ p = root + HOSTNQN_FILE
+ if overwrite or not os.path.isfile(p):
+ with open(p, "w") as f:
+ f.write(self._hostnqn)
+ f.write("\n")
+ p = root + HOSTID_FILE
+ if overwrite or not os.path.isfile(p):
+ with open(p, "w") as f:
+ f.write(self._hostid)
+ f.write("\n")
# Create nvme singleton

View File

@ -0,0 +1,58 @@
From 63da3cb8a40500c889c8faa4326f81d16997a3c8 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Mon, 27 Nov 2023 18:55:55 +0100
Subject: [PATCH] nvme: Retrieve HostNQN from a first active fabrics connection
When no /etc/hostnqn exists, look for any active NVMe over Fabrics
connections and take the values from a first one, rather than
generating new ones.
---
blivet/nvme.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index 5ac41cffa..2e4686e68 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -18,6 +18,7 @@
#
import os
+import glob
from . import errors
@@ -54,6 +55,22 @@ def __call__(self):
def __deepcopy__(self, memo_dict): # pylint: disable=unused-argument
return self
+ def _retrieve_fabrics_hostnqn(self):
+ for d in glob.glob('/sys/class/nvme-fabrics/ctl/nvme*/'):
+ try:
+ # invalidate old values
+ self._hostnqn = None
+ self._hostid = None
+ # read from sysfs
+ with open(os.path.join(d, 'hostnqn')) as f:
+ self._hostnqn = f.readline().strip()
+ with open(os.path.join(d, 'hostid')) as f:
+ self._hostid = f.readline().strip()
+ if self._hostnqn:
+ break
+ except Exception: # pylint: disable=broad-except
+ pass
+
def startup(self):
if self.started:
return
@@ -61,6 +78,10 @@ def startup(self):
self._hostnqn = blockdev.nvme_get_host_nqn()
self._hostid = blockdev.nvme_get_host_id()
if not self._hostnqn:
+ # see if there are any active fabrics connections and take their values over
+ self._retrieve_fabrics_hostnqn()
+ if not self._hostnqn:
+ # generate new values
self._hostnqn = blockdev.nvme_generate_host_nqn()
if not self._hostnqn:
raise errors.NVMeError("Failed to generate HostNQN")

View File

@ -0,0 +1,67 @@
From c807e234dfd07f3d0005c71501f0300284cd580b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 6 Dec 2023 11:47:31 +0100
Subject: [PATCH] tests: Add a simple unit test for the NVMe module
---
tests/unit_tests/__init__.py | 1 +
tests/unit_tests/nvme_test.py | 38 +++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+)
create mode 100644 tests/unit_tests/nvme_test.py
diff --git a/tests/unit_tests/__init__.py b/tests/unit_tests/__init__.py
index 589366e0f..62bef67f5 100644
--- a/tests/unit_tests/__init__.py
+++ b/tests/unit_tests/__init__.py
@@ -9,6 +9,7 @@
from .devicetree_test import *
from .events_test import *
from .misc_test import *
+from .nvme_test import *
from .parentlist_test import *
from .populator_test import *
from .size_test import *
diff --git a/tests/unit_tests/nvme_test.py b/tests/unit_tests/nvme_test.py
new file mode 100644
index 000000000..cb948687f
--- /dev/null
+++ b/tests/unit_tests/nvme_test.py
@@ -0,0 +1,38 @@
+import unittest
+
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
+from blivet.nvme import nvme
+
+
+class NVMeModuleTestCase(unittest.TestCase):
+
+ host_nqn = "nqn.2014-08.org.nvmexpress:uuid:01234567-8900-abcd-efff-abcdabcdabcd"
+
+ @patch("blivet.nvme.os")
+ @patch("blivet.nvme.blockdev")
+ def test_nvme_module(self, bd, os):
+ self.assertIsNotNone(nvme)
+ bd.nvme_get_host_nqn.return_value = self.host_nqn
+ bd.nvme_get_host_id.return_value = None # None = generate from host_nqn
+ os.path.isdir.return_value = False
+
+ # startup
+ with patch.object(nvme, "write") as write:
+ nvme.startup()
+ write.assert_called_once_with("/", overwrite=False)
+
+ self.assertTrue(nvme.started)
+ self.assertEqual(nvme._hostnqn, self.host_nqn)
+ self.assertEqual(nvme._hostid, "01234567-8900-abcd-efff-abcdabcdabcd")
+
+ # write
+ with patch("blivet.nvme.open") as op:
+ nvme.write("/test")
+
+ os.makedirs.assert_called_with("/test/etc/nvme/", 0o755)
+ op.assert_any_call("/test/etc/nvme/hostnqn", "w")
+ op.assert_any_call("/test/etc/nvme/hostid", "w")

View File

@ -0,0 +1,206 @@
From c20296b2df89a9edc4ea9cc41f94df89a8fbfd26 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 Apr 2023 12:35:30 +0200
Subject: [PATCH] Add support for creating shared LVM setups
This feature is requested by GFS2 for the storage role. This adds
support for creating shared VGs and activating LVs in shared mode.
Resolves: RHEL-324
---
blivet/devices/lvm.py | 44 +++++++++++++++++++----
blivet/tasks/availability.py | 9 +++++
tests/unit_tests/devices_test/lvm_test.py | 25 +++++++++++++
3 files changed, 72 insertions(+), 6 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index ca45c4b5..068c5368 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -97,7 +97,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
def __init__(self, name, parents=None, size=None, free=None,
pe_size=None, pe_count=None, pe_free=None, pv_count=None,
- uuid=None, exists=False, sysfs_path='', exported=False):
+ uuid=None, exists=False, sysfs_path='', exported=False,
+ shared=False):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -124,6 +125,11 @@ class LVMVolumeGroupDevice(ContainerDevice):
:type pv_count: int
:keyword uuid: the VG UUID
:type uuid: str
+
+ For non-existing VGs only:
+
+ :keyword shared: whether to create this VG as shared
+ :type shared: bool
"""
# These attributes are used by _add_parent, so they must be initialized
# prior to instantiating the superclass.
@@ -137,6 +143,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
self.pe_count = util.numeric_type(pe_count)
self.pe_free = util.numeric_type(pe_free)
self.exported = exported
+ self._shared = shared
# TODO: validate pe_size if given
if not self.pe_size:
@@ -254,7 +261,19 @@ class LVMVolumeGroupDevice(ContainerDevice):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
pv_list = [pv.path for pv in self.parents]
- blockdev.lvm.vgcreate(self.name, pv_list, self.pe_size)
+ extra = dict()
+ if self._shared:
+ extra["shared"] = ""
+ blockdev.lvm.vgcreate(self.name, pv_list, self.pe_size, **extra)
+
+ if self._shared:
+ if availability.BLOCKDEV_LVM_PLUGIN_SHARED.available:
+ try:
+ blockdev.lvm.vglock_start(self.name)
+ except blockdev.LVMError as err:
+ raise errors.LVMError(err)
+ else:
+ raise errors.LVMError("Shared LVM is not fully supported: %s" % ",".join(availability.BLOCKDEV_LVM_PLUGIN_SHARED.availability_errors))
def _post_create(self):
self._complete = True
@@ -661,7 +680,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None,
percent=None, cache_request=None, pvs=None, from_lvs=None,
- stripe_size=0):
+ stripe_size=0, shared=False):
if not exists:
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
@@ -690,6 +709,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
self.seg_type = seg_type or "linear"
self._raid_level = None
self.ignore_skip_activation = 0
+ self._shared = shared
self.req_grow = None
self.req_max_size = Size(0)
@@ -2306,7 +2326,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None, cache_mode=None, attach_to=None, stripe_size=0):
+ write_policy=None, cache_mode=None, attach_to=None, stripe_size=0,
+ shared=False):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2337,6 +2358,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
:type cache_request: :class:`~.devices.lvm.LVMCacheRequest`
:keyword pvs: list of PVs to allocate extents from (size could be specified for each PV)
:type pvs: list of :class:`~.devices.StorageDevice` or :class:`LVPVSpec` objects (tuples)
+ :keyword shared: whether to activate the newly create LV in shared mode
+ :type shared: bool
For internal LVs only:
@@ -2412,7 +2435,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs,
- stripe_size)
+ stripe_size, shared)
LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory,
write_policy)
LVMVDOLogicalVolumeMixin.__init__(self)
@@ -2634,7 +2657,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
ignore_skip_activation = self.is_snapshot_lv or self.ignore_skip_activation > 0
- blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
+ if self._shared:
+ if availability.BLOCKDEV_LVM_PLUGIN_SHARED.available:
+ blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation, shared=True)
+ else:
+ raise errors.LVMError("Shared LVM is not fully supported: %s" % ",".join(availability.BLOCKDEV_LVM_PLUGIN_SHARED.availability_errors))
+ else:
+ blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
@type_specific
def _pre_create(self):
@@ -2672,6 +2701,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
if self._stripe_size:
extra["stripesize"] = str(int(self._stripe_size.convert_to("KiB")))
+ if self._shared:
+ extra["activate"] = "sy"
+
blockdev.lvm.lvcreate(self.vg.name, self._name, self.size,
type=self.seg_type, pv_list=pvs, **extra)
else:
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index bba1ba84..85945c77 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -435,6 +435,14 @@ if hasattr(blockdev.LVMTech, "VDO"):
else:
BLOCKDEV_LVM_TECH_VDO = _UnavailableMethod(error_msg="Installed version of libblockdev doesn't support LVM VDO technology")
+if hasattr(blockdev.LVMTech, "SHARED"):
+ BLOCKDEV_LVM_SHARED = BlockDevTechInfo(plugin_name="lvm",
+ check_fn=blockdev.lvm_is_tech_avail,
+ technologies={blockdev.LVMTech.SHARED: blockdev.LVMTechMode.MODIFY}) # pylint: disable=no-member
+ BLOCKDEV_LVM_TECH_SHARED = BlockDevMethod(BLOCKDEV_LVM_SHARED)
+else:
+ BLOCKDEV_LVM_TECH_SHARED = _UnavailableMethod(error_msg="Installed version of libblockdev doesn't support shared LVM technology")
+
# libblockdev mdraid plugin required technologies and modes
BLOCKDEV_MD_ALL_MODES = (blockdev.MDTechMode.CREATE |
blockdev.MDTechMode.DELETE |
@@ -476,6 +484,7 @@ BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("libblockdev dm plugin (raid technolog
BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("libblockdev loop plugin", BLOCKDEV_LOOP_TECH)
BLOCKDEV_LVM_PLUGIN = blockdev_plugin("libblockdev lvm plugin", BLOCKDEV_LVM_TECH)
BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("libblockdev lvm plugin (vdo technology)", BLOCKDEV_LVM_TECH_VDO)
+BLOCKDEV_LVM_PLUGIN_SHARED = blockdev_plugin("libblockdev lvm plugin (shared LVM technology)", BLOCKDEV_LVM_TECH_SHARED)
BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("libblockdev mdraid plugin", BLOCKDEV_MD_TECH)
BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("libblockdev mpath plugin", BLOCKDEV_MPATH_TECH)
BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("libblockdev swap plugin", BLOCKDEV_SWAP_TECH)
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
index d7b55224..e645309f 100644
--- a/tests/unit_tests/devices_test/lvm_test.py
+++ b/tests/unit_tests/devices_test/lvm_test.py
@@ -476,6 +476,31 @@ class LVMDeviceTest(unittest.TestCase):
lv.setup()
lvm.lvactivate.assert_called_with(vg.name, lv.lvname, ignore_skip=False)
+ @patch("blivet.tasks.availability.BLOCKDEV_LVM_PLUGIN_SHARED",
+ new=blivet.tasks.availability.ExternalResource(blivet.tasks.availability.AvailableMethod, ""))
+ def test_lv_activate_shared(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+ lv = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=True, shared=True)
+
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ with patch.object(lv, "_pre_setup"):
+ lv.setup()
+ lvm.lvactivate.assert_called_with(vg.name, lv.lvname, ignore_skip=False, shared=True)
+
+ @patch("blivet.tasks.availability.BLOCKDEV_LVM_PLUGIN_SHARED",
+ new=blivet.tasks.availability.ExternalResource(blivet.tasks.availability.AvailableMethod, ""))
+ def test_vg_create_shared(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], shared=True)
+
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ vg._create()
+ lvm.vgcreate.assert_called_with(vg.name, [pv.path], Size("4 MiB"), shared="")
+ lvm.vglock_start.assert_called_with(vg.name)
+
def test_vg_is_empty(self):
pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
size=Size("1024 MiB"))
--
2.41.0

View File

@ -0,0 +1,60 @@
From d7708bca72f4a7d0bfa732912e2087bd6aa8f379 Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Thu, 23 Feb 2023 13:28:50 +0100
Subject: [PATCH] add udev-builtin-path_id property to zfcp-attached SCSI disks
so anaconda can use it to display path_id information for multipath
members
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/devices/disk.py | 2 ++
blivet/populator/helpers/disk.py | 1 +
tests/unit_tests/tags_test.py | 2 +-
3 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 8842b4dc..746f6d58 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -556,10 +556,12 @@ class ZFCPDiskDevice(DiskDevice):
:keyword hba_id: ???
:keyword wwpn: ???
:keyword fcp_lun: ???
+ :keyword id_path: string from udev-builtin-path_id
"""
self.hba_id = kwargs.pop("hba_id")
self.wwpn = kwargs.pop("wwpn")
self.fcp_lun = kwargs.pop("fcp_lun")
+ self.id_path = kwargs.pop("id_path")
DiskDevice.__init__(self, device, **kwargs)
self._clear_local_tags()
self.tags.add(Tags.remote)
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index cf20d302..92e85688 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -223,6 +223,7 @@ class ZFCPDevicePopulator(DiskDevicePopulator):
def _get_kwargs(self):
kwargs = super(ZFCPDevicePopulator, self)._get_kwargs()
+ kwargs["id_path"] = udev.device_get_path(self.data)
for attr in ['hba_id', 'wwpn', 'fcp_lun']:
kwargs[attr] = udev.device_get_zfcp_attribute(self.data, attr=attr)
diff --git a/tests/unit_tests/tags_test.py b/tests/unit_tests/tags_test.py
index 49a2d72e..15fa2a40 100644
--- a/tests/unit_tests/tags_test.py
+++ b/tests/unit_tests/tags_test.py
@@ -72,7 +72,7 @@ class DeviceTagsTest(unittest.TestCase):
fcoe_device = FcoeDiskDevice('test6', nic=None, identifier=None, id_path=None)
self.assertIn(Tags.remote, fcoe_device.tags)
self.assertNotIn(Tags.local, fcoe_device.tags)
- zfcp_device = ZFCPDiskDevice('test7', hba_id=None, wwpn=None, fcp_lun=None)
+ zfcp_device = ZFCPDiskDevice('test7', hba_id=None, wwpn=None, fcp_lun=None, id_path=None)
self.assertIn(Tags.remote, zfcp_device.tags)
self.assertNotIn(Tags.local, zfcp_device.tags)
--
2.43.0

View File

@ -0,0 +1,172 @@
From 517f17481685afbabea6750b57d71a736f9a157e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 25 May 2023 17:02:39 +0200
Subject: [PATCH] Do not add new PVs to the LVM devices file if it doesn't
exist and VGs are present
If there is a preexisting VG on the system when we create a new PV
and the LVM devices file doesn't exist we will create it and add
only the new PV to it which means the preexisting VG will now be
ignored by LVM tools. This change skips adding newly created PVs
to the devices file in the same way 'pvcreate' and 'vgcreate' do.
---
blivet/devicelibs/lvm.py | 3 +
blivet/formats/lvmpv.py | 17 ++++-
tests/unit_tests/formats_tests/__init__.py | 1 +
tests/unit_tests/formats_tests/lvmpv_test.py | 73 ++++++++++++++++++++
4 files changed, 91 insertions(+), 3 deletions(-)
create mode 100644 tests/unit_tests/formats_tests/lvmpv_test.py
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index 16a8e8f8..dc7d0cbe 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -84,6 +84,9 @@ if hasattr(blockdev.LVMTech, "DEVICES"):
else:
HAVE_LVMDEVICES = False
+
+LVM_DEVICES_FILE = "/etc/lvm/devices/system.devices"
+
# list of devices that LVM is allowed to use
# with LVM >= 2.0.13 we'll use this for the --devices option and when creating
# the /etc/lvm/devices/system.devices file
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index cb01b2f3..65acedbe 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -36,7 +36,7 @@ from ..size import Size
from ..errors import PhysicalVolumeError
from . import DeviceFormat, register_device_format
from .. import udev
-from ..static_data.lvm_info import pvs_info
+from ..static_data.lvm_info import pvs_info, vgs_info
import logging
log = logging.getLogger("blivet")
@@ -121,10 +121,21 @@ class LVMPhysicalVolume(DeviceFormat):
def supported(self):
return super(LVMPhysicalVolume, self).supported and self._plugin.available
- def lvmdevices_add(self):
+ def lvmdevices_add(self, force=True):
+ """ Add this PV to the LVM system devices file
+ :keyword force: whether to add the PV even if the system devices file doesn't exist and
+ VGs are present in the system
+ :type force: bool
+ """
+
if not lvm.HAVE_LVMDEVICES:
raise PhysicalVolumeError("LVM devices file feature is not supported")
+ if not os.path.exists(lvm.LVM_DEVICES_FILE) and vgs_info.cache and not force:
+ log.debug("Not adding %s to devices file: %s doesn't exist and there are VGs present in the system",
+ self.device, lvm.LVM_DEVICES_FILE)
+ return
+
try:
blockdev.lvm.devices_add(self.device)
except blockdev.LVMError as e:
@@ -151,7 +162,7 @@ class LVMPhysicalVolume(DeviceFormat):
# with lvmdbusd we need to call the pvcreate without --devices otherwise lvmdbusd
# wouldn't be able to find the newly created pv and the call would fail
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
- self.lvmdevices_add()
+ self.lvmdevices_add(force=False)
else:
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
diff --git a/tests/unit_tests/formats_tests/__init__.py b/tests/unit_tests/formats_tests/__init__.py
index d678900b..95c7a25b 100644
--- a/tests/unit_tests/formats_tests/__init__.py
+++ b/tests/unit_tests/formats_tests/__init__.py
@@ -2,6 +2,7 @@ from .device_test import *
from .disklabel_test import *
from .init_test import *
from .luks_test import *
+from .lvmpv_test import *
from .methods_test import *
from .misc_test import *
from .selinux_test import *
diff --git a/tests/unit_tests/formats_tests/lvmpv_test.py b/tests/unit_tests/formats_tests/lvmpv_test.py
new file mode 100644
index 00000000..6490c7d4
--- /dev/null
+++ b/tests/unit_tests/formats_tests/lvmpv_test.py
@@ -0,0 +1,73 @@
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
+from contextlib import contextmanager
+
+import unittest
+
+from blivet.formats.lvmpv import LVMPhysicalVolume
+
+
+class LVMPVNodevTestCase(unittest.TestCase):
+
+ @contextmanager
+ def patches(self):
+ patchers = dict()
+ mocks = dict()
+
+ patchers["blockdev"] = patch("blivet.formats.lvmpv.blockdev")
+ patchers["lvm"] = patch("blivet.formats.lvmpv.lvm")
+ patchers["vgs_info"] = patch("blivet.formats.lvmpv.vgs_info")
+ patchers["os"] = patch("blivet.formats.lvmpv.os")
+
+ for name, patcher in patchers.items():
+ mocks[name] = patcher.start()
+
+ yield mocks
+
+ for patcher in patchers.values():
+ patcher.stop()
+
+ def test_lvm_devices(self):
+ fmt = LVMPhysicalVolume(device="/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file not enabled/supported -> devices_add should not be called
+ mock["lvm"].HAVE_LVMDEVICES = False
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_not_called()
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file exists -> devices_add should be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = True
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file doesn't exist
+ # and no existing VGs present -> devices_add should be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = False
+ mock["vgs_info"].cache = {}
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file doesn't exist
+ # and existing VGs present -> devices_add should not be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = False
+ mock["vgs_info"].cache = {"fake_vg_uuid": "fake_vg_data"}
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_not_called()
--
2.43.0

View File

@ -23,7 +23,7 @@ Version: 3.6.0
#%%global prerelease .b2 #%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2 # prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 5%{?prerelease}%{?dist} Release: 14%{?prerelease}%{?dist}
Epoch: 1 Epoch: 1
License: LGPLv2+ License: LGPLv2+
%global realname blivet %global realname blivet
@ -41,6 +41,18 @@ Patch7: 0008-Revert-Adjust-to-new-XFS-min-size.patch
Patch8: 0009-Catch-BlockDevNotImplementedError-for-btrfs-plugin-c.patch Patch8: 0009-Catch-BlockDevNotImplementedError-for-btrfs-plugin-c.patch
Patch9: 0010-Add-basic-support-for-NVMe-and-NVMe-Fabrics-devices.patch Patch9: 0010-Add-basic-support-for-NVMe-and-NVMe-Fabrics-devices.patch
Patch10: 0011-Default-to-encryption-sector-size-512-for-LUKS-devic.patch Patch10: 0011-Default-to-encryption-sector-size-512-for-LUKS-devic.patch
Patch11: 0012-Add-support-for-specifying-stripe-size-for-RAID-LVs.patch
Patch12: 0013-Fix-setting-kickstart-data.patch
Patch13: 0014-Do-not-set-memory-limit-for-LUKS2-when-running-in-FI.patch
Patch14: 0015-Add-support-for-filesystem-online-resize.patch
Patch15: 0016-Backport-iSCSI-initiator-name-related-fixes.patch
Patch16: 0017-nvme-additional-rpms-for-dracut.patch
Patch17: 0018-nvme-TP4126-fixes-1.patch
Patch18: 0019-nvme-hostnqn_from_active_fabrics_connection.patch
Patch19: 0020-nvme-add_unit_tests.patch
Patch20: 0021-Add-support-for-creating-shared-LVM-setups.patch
Patch21: 0022-add-udev-builtin-path_id-property-to-zfcp-attached-S.patch
Patch22: 0023-Do-not-add-new-PVs-to-the-LVM-devices-file-if-it-doe.patch
# Versions of required components (done so we make sure the buildrequires # Versions of required components (done so we make sure the buildrequires
# match the requires versions of things). # match the requires versions of things).
@ -204,6 +216,56 @@ configuration.
%endif %endif
%changelog %changelog
* Fri Feb 09 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-14
- Do not add new PVs to the LVM devices file if it doesn't exist and VGs are present
Resolves: RHEL-473
* Thu Jan 18 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-13
- add udev-builtin-path_id property to zfcp-attached SCSI disks
Resolves: RHEL-22007
* Wed Dec 13 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-12
- Add support for creating shared LVM setups
Resolves: RHEL-324
* Mon Dec 11 2023 Tomas Bzatek <tbzatek@redhat.com> - 3.6.0-11
- nvme: Retrieve HostNQN from a first active fabrics connection
- tests: Add a simple unit test for the NVMe module
Resolves: RHEL-11541
* Tue Sep 26 2023 Tomas Bzatek <tbzatek@redhat.com> - 3.6.0-10
- nvme: Require additional rpms for dracut
Resolves: RHEL-2855
- nvme: Align HostNQN and HostID format to TP-4126
Resolves: RHEL-1254
* Mon Jul 24 2023 Jan Pokorny <japokorn@redhat.com> - 3.6.0-9
Backport iSCSI initiator name related fixes:
- Allow changing iSCSI initiator name after setting it
Resolves: rhbz#2221935
- Add a basic test case for the iscsi module
Resolves: rhbz#2221935
- tests: Use blivet-specific prefix for targetcli backing files
Resolves: rhbz#2221935
- iscsi: Save firmware initiator name to /etc/iscsi/initiatorname.iscsi
Resolves: rhbz#2221932
- tests: Improve iscsi_test.ISCSITestCase
Resolves: rhbz#2221935
* Wed May 24 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-8
- Add support for filesystem online resize
Resolves: RHEL-326
* Thu May 18 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-7
- Fix setting kickstart data
Resolves: rhbz#2174296
- Do not set memory limit for LUKS2 when running in FIPS mode
Resolves: rhbz#2193096
* Tue May 02 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-6
- Add support for specifying stripe size for RAID LVs
Resolves: RHEL-327
* Thu Jan 19 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-5 * Thu Jan 19 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-5
- Default to encryption sector size 512 for LUKS devices - Default to encryption sector size 512 for LUKS devices
Resolves: rhbz#2103800 Resolves: rhbz#2103800