forked from rpms/python-blivet
		
	Import from AlmaLinux stable repository
This commit is contained in:
		
							parent
							
								
									3d132c95da
								
							
						
					
					
						commit
						a76ce8e66f
					
				| @ -1,2 +0,0 @@ | ||||
| 8393baa22cb433d1012e3923ad0bc232401116c6 SOURCES/blivet-3.6.0-tests.tar.gz | ||||
| e9d95c1165703fed3da1f35a9199197bfff68f98 SOURCES/blivet-3.6.0.tar.gz | ||||
| @ -0,0 +1,172 @@ | ||||
| From 11c3e695d9a2130f325bb5459a9881ff70338f71 Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Thu, 9 Mar 2023 13:18:42 +0100 | ||||
| Subject: [PATCH] Add support for specifying stripe size for RAID LVs | ||||
| 
 | ||||
| ---
 | ||||
|  blivet/devices/lvm.py                        | 28 +++++++++++++++++--- | ||||
|  tests/storage_tests/devices_test/lvm_test.py | 12 +++++++-- | ||||
|  tests/unit_tests/devices_test/lvm_test.py    | 27 +++++++++++++++++++ | ||||
|  3 files changed, 61 insertions(+), 6 deletions(-) | ||||
| 
 | ||||
| diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
 | ||||
| index b8595d63..41358e9b 100644
 | ||||
| --- a/blivet/devices/lvm.py
 | ||||
| +++ b/blivet/devices/lvm.py
 | ||||
| @@ -659,7 +659,8 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
 | ||||
|   | ||||
|      def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, | ||||
|                   fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None, | ||||
| -                 percent=None, cache_request=None, pvs=None, from_lvs=None):
 | ||||
| +                 percent=None, cache_request=None, pvs=None, from_lvs=None,
 | ||||
| +                 stripe_size=0):
 | ||||
|   | ||||
|          if not exists: | ||||
|              if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types: | ||||
| @@ -756,6 +757,15 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
 | ||||
|          if self._pv_specs: | ||||
|              self._assign_pv_space() | ||||
|   | ||||
| +        self._stripe_size = stripe_size
 | ||||
| +        if not self.exists and self._stripe_size:
 | ||||
| +            if self.seg_type not in lvm.raid_seg_types:
 | ||||
| +                raise errors.DeviceError("Stripe size can be specified only for RAID volumes")
 | ||||
| +            if self.seg_type in ("raid1", "RAID1", "1", 1, "mirror"):
 | ||||
| +                raise errors.DeviceError("Specifying stripe size is not allowed for RAID1 or mirror")
 | ||||
| +            if self.cache:
 | ||||
| +                raise errors.DeviceError("Creating cached LVs with custom stripe size is not supported")
 | ||||
| +
 | ||||
|      def _assign_pv_space(self): | ||||
|          if not self.is_raid_lv: | ||||
|              # nothing to do for non-RAID (and thus non-striped) LVs here | ||||
| @@ -2295,7 +2305,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 | ||||
|                   parent_lv=None, int_type=None, origin=None, vorigin=False, | ||||
|                   metadata_size=None, chunk_size=None, profile=None, from_lvs=None, | ||||
|                   compression=False, deduplication=False, index_memory=0, | ||||
| -                 write_policy=None, cache_mode=None, attach_to=None):
 | ||||
| +                 write_policy=None, cache_mode=None, attach_to=None, stripe_size=0):
 | ||||
|          """ | ||||
|              :param name: the device name (generally a device node's basename) | ||||
|              :type name: str | ||||
| @@ -2375,6 +2385,11 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 | ||||
|                                  be attached to when created | ||||
|              :type attach_to: :class:`LVMLogicalVolumeDevice` | ||||
|   | ||||
| +            For RAID LVs only:
 | ||||
| +
 | ||||
| +            :keyword stripe_size: size of the RAID stripe
 | ||||
| +            :type stripe_size: :class:`~.size.Size`
 | ||||
| +
 | ||||
|          """ | ||||
|   | ||||
|          if isinstance(parents, (list, ParentList)): | ||||
| @@ -2395,7 +2410,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 | ||||
|          LVMCachePoolMixin.__init__(self, metadata_size, cache_mode, attach_to) | ||||
|          LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type, | ||||
|                                        fmt, exists, sysfs_path, grow, maxsize, | ||||
| -                                      percent, cache_request, pvs, from_lvs)
 | ||||
| +                                      percent, cache_request, pvs, from_lvs,
 | ||||
| +                                      stripe_size)
 | ||||
|          LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory, | ||||
|                                   write_policy) | ||||
|          LVMVDOLogicalVolumeMixin.__init__(self) | ||||
| @@ -2651,8 +2667,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 | ||||
|              pvs = [spec.pv.path for spec in self._pv_specs] | ||||
|              pvs = pvs or None | ||||
|   | ||||
| +            extra = dict()
 | ||||
| +            if self._stripe_size:
 | ||||
| +                extra["stripesize"] = str(int(self._stripe_size.convert_to("KiB")))
 | ||||
| +
 | ||||
|              blockdev.lvm.lvcreate(self.vg.name, self._name, self.size, | ||||
| -                                  type=self.seg_type, pv_list=pvs)
 | ||||
| +                                  type=self.seg_type, pv_list=pvs, **extra)
 | ||||
|          else: | ||||
|              fast_pvs = [pv.path for pv in self.cache.fast_pvs] | ||||
|   | ||||
| diff --git a/tests/storage_tests/devices_test/lvm_test.py b/tests/storage_tests/devices_test/lvm_test.py
 | ||||
| index a055fc27..97ef1c4b 100644
 | ||||
| --- a/tests/storage_tests/devices_test/lvm_test.py
 | ||||
| +++ b/tests/storage_tests/devices_test/lvm_test.py
 | ||||
| @@ -1,4 +1,5 @@
 | ||||
|  import os | ||||
| +import subprocess
 | ||||
|   | ||||
|  from ..storagetestcase import StorageTestCase | ||||
|   | ||||
| @@ -127,7 +128,7 @@ class LVMTestCase(StorageTestCase):
 | ||||
|          self.assertTrue(snap.is_snapshot_lv) | ||||
|          self.assertEqual(snap.origin, thinlv) | ||||
|   | ||||
| -    def _test_lvm_raid(self, seg_type, raid_level):
 | ||||
| +    def _test_lvm_raid(self, seg_type, raid_level, stripe_size=0):
 | ||||
|          disk1 = self.storage.devicetree.get_device_by_path(self.vdevs[0]) | ||||
|          self.assertIsNotNone(disk1) | ||||
|          self.storage.initialize_disk(disk1) | ||||
| @@ -151,7 +152,7 @@ class LVMTestCase(StorageTestCase):
 | ||||
|   | ||||
|          raidlv = self.storage.new_lv(fmt_type="ext4", size=blivet.size.Size("50 MiB"), | ||||
|                                       parents=[vg], name="blivetTestRAIDLV", | ||||
| -                                     seg_type=seg_type, pvs=[pv1, pv2])
 | ||||
| +                                     seg_type=seg_type, pvs=[pv1, pv2], stripe_size=stripe_size)
 | ||||
|          self.storage.create_device(raidlv) | ||||
|   | ||||
|          self.storage.do_it() | ||||
| @@ -163,9 +164,16 @@ class LVMTestCase(StorageTestCase):
 | ||||
|          self.assertEqual(raidlv.raid_level, raid_level) | ||||
|          self.assertEqual(raidlv.seg_type, seg_type) | ||||
|   | ||||
| +        if stripe_size:
 | ||||
| +            out = subprocess.check_output(["lvs", "-o", "stripe_size", "--noheadings", "--nosuffix", "--units=b", raidlv.vg.name + "/" + raidlv.lvname])
 | ||||
| +            self.assertEqual(out.decode().strip(), str(int(stripe_size.convert_to())))
 | ||||
| +
 | ||||
|      def test_lvm_raid_raid0(self): | ||||
|          self._test_lvm_raid("raid0", blivet.devicelibs.raid.RAID0) | ||||
|   | ||||
| +    def test_lvm_raid_raid0_stripe_size(self):
 | ||||
| +        self._test_lvm_raid("raid0", blivet.devicelibs.raid.RAID0, stripe_size=blivet.size.Size("1 MiB"))
 | ||||
| +
 | ||||
|      def test_lvm_raid_striped(self): | ||||
|          self._test_lvm_raid("striped", blivet.devicelibs.raid.Striped) | ||||
|   | ||||
| diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
 | ||||
| index 995c2da4..d7b55224 100644
 | ||||
| --- a/tests/unit_tests/devices_test/lvm_test.py
 | ||||
| +++ b/tests/unit_tests/devices_test/lvm_test.py
 | ||||
| @@ -363,6 +363,33 @@ class LVMDeviceTest(unittest.TestCase):
 | ||||
|          self.assertEqual(pv.format.free, Size("264 MiB")) | ||||
|          self.assertEqual(pv2.format.free, Size("256 MiB")) | ||||
|   | ||||
| +    def test_lvm_logical_volume_raid_stripe_size(self):
 | ||||
| +        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
 | ||||
| +                           size=Size("1025 MiB"))
 | ||||
| +        pv2 = StorageDevice("pv2", fmt=blivet.formats.get_format("lvmpv"),
 | ||||
| +                            size=Size("513 MiB"))
 | ||||
| +        vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
 | ||||
| +
 | ||||
| +        with self.assertRaises(blivet.errors.DeviceError):
 | ||||
| +            # non-raid LV
 | ||||
| +            lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
 | ||||
| +                                        fmt=blivet.formats.get_format("xfs"),
 | ||||
| +                                        exists=False, stripe_size=Size("1 MiB"))
 | ||||
| +
 | ||||
| +        with self.assertRaises(blivet.errors.DeviceError):
 | ||||
| +            # raid1 LV
 | ||||
| +            lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
 | ||||
| +                                        fmt=blivet.formats.get_format("xfs"),
 | ||||
| +                                        exists=False, seg_type="raid1", pvs=[pv, pv2],
 | ||||
| +                                        stripe_size=Size("1 MiB"))
 | ||||
| +
 | ||||
| +        lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("1 GiB"),
 | ||||
| +                                    fmt=blivet.formats.get_format("xfs"),
 | ||||
| +                                    exists=False, seg_type="raid0", pvs=[pv, pv2],
 | ||||
| +                                    stripe_size=Size("1 MiB"))
 | ||||
| +
 | ||||
| +        self.assertEqual(lv._stripe_size, Size("1 MiB"))
 | ||||
| +
 | ||||
|      def test_target_size(self): | ||||
|          pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), | ||||
|                             size=Size("1 GiB")) | ||||
| -- 
 | ||||
| 2.40.1 | ||||
| 
 | ||||
							
								
								
									
										68
									
								
								SOURCES/0013-Fix-setting-kickstart-data.patch
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								SOURCES/0013-Fix-setting-kickstart-data.patch
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,68 @@ | ||||
| From 1af0d3c37a93e431790e641a329a7f34dabf291a Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Thu, 2 Mar 2023 12:34:42 +0100 | ||||
| Subject: [PATCH] Fix setting kickstart data | ||||
| 
 | ||||
| When changing our code to PEP8 compliant we also changed some | ||||
| pykickstart properties like onPart by accident. This PR fixes this. | ||||
| 
 | ||||
| Resolves: rhbz#2175166 | ||||
| ---
 | ||||
|  blivet/devices/btrfs.py     | 4 ++-- | ||||
|  blivet/devices/lvm.py       | 2 +- | ||||
|  blivet/devices/partition.py | 6 +++--- | ||||
|  3 files changed, 6 insertions(+), 6 deletions(-) | ||||
| 
 | ||||
| diff --git a/blivet/devices/btrfs.py b/blivet/devices/btrfs.py
 | ||||
| index 1ae6a04d..3f56624e 100644
 | ||||
| --- a/blivet/devices/btrfs.py
 | ||||
| +++ b/blivet/devices/btrfs.py
 | ||||
| @@ -498,8 +498,8 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
 | ||||
|   | ||||
|      def populate_ksdata(self, data): | ||||
|          super(BTRFSVolumeDevice, self).populate_ksdata(data) | ||||
| -        data.data_level = self.data_level.name if self.data_level else None
 | ||||
| -        data.metadata_level = self.metadata_level.name if self.metadata_level else None
 | ||||
| +        data.dataLevel = self.data_level.name if self.data_level else None
 | ||||
| +        data.metaDataLevel = self.metadata_level.name if self.metadata_level else None
 | ||||
|          data.devices = ["btrfs.%d" % p.id for p in self.parents] | ||||
|          data.preexist = self.exists | ||||
|   | ||||
| diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
 | ||||
| index 41358e9b..c3132457 100644
 | ||||
| --- a/blivet/devices/lvm.py
 | ||||
| +++ b/blivet/devices/lvm.py
 | ||||
| @@ -1161,7 +1161,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
 | ||||
|   | ||||
|              if self.req_grow: | ||||
|                  # base size could be literal or percentage | ||||
| -                data.max_size_mb = self.req_max_size.convert_to(MiB)
 | ||||
| +                data.maxSizeMB = self.req_max_size.convert_to(MiB)
 | ||||
|          elif data.resize: | ||||
|              data.size = self.target_size.convert_to(MiB) | ||||
|   | ||||
| diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
 | ||||
| index 89d907c2..0e9250ce 100644
 | ||||
| --- a/blivet/devices/partition.py
 | ||||
| +++ b/blivet/devices/partition.py
 | ||||
| @@ -982,14 +982,14 @@ class PartitionDevice(StorageDevice):
 | ||||
|              data.size = self.req_base_size.round_to_nearest(MiB, rounding=ROUND_DOWN).convert_to(spec=MiB) | ||||
|              data.grow = self.req_grow | ||||
|              if self.req_grow: | ||||
| -                data.max_size_mb = self.req_max_size.convert_to(MiB)
 | ||||
| +                data.maxSizeMB = self.req_max_size.convert_to(MiB)
 | ||||
|   | ||||
|              # data.disk = self.disk.name                      # by-id | ||||
|              if self.req_disks and len(self.req_disks) == 1: | ||||
|                  data.disk = self.disk.name | ||||
| -            data.prim_only = self.req_primary
 | ||||
| +            data.primOnly = self.req_primary
 | ||||
|          else: | ||||
| -            data.on_part = self.name                     # by-id
 | ||||
| +            data.onPart = self.name                     # by-id
 | ||||
|   | ||||
|              if data.resize: | ||||
|                  # on s390x in particular, fractional sizes are reported, which | ||||
| -- 
 | ||||
| 2.40.1 | ||||
| 
 | ||||
| @ -0,0 +1,133 @@ | ||||
| From c2b06150df0b876c7d442097b6c9ca90c9ca2ecc Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Thu, 4 May 2023 11:35:44 +0200 | ||||
| Subject: [PATCH] Do not set memory limit for LUKS2 when running in FIPS mode | ||||
| 
 | ||||
| With FIPS enabled LUKS uses pbkdf and not argon so the memory | ||||
| limit is not a valid parameter. | ||||
| 
 | ||||
| Resolves: rhbz#2183437 | ||||
| ---
 | ||||
|  blivet/devicelibs/crypto.py                   | 11 +++++++ | ||||
|  blivet/formats/luks.py                        | 12 ++++---- | ||||
|  tests/unit_tests/formats_tests/luks_test.py   | 30 +++++++++++++++++++ | ||||
|  .../unit_tests/formats_tests/methods_test.py  |  3 +- | ||||
|  4 files changed, 50 insertions(+), 6 deletions(-) | ||||
| 
 | ||||
| diff --git a/blivet/devicelibs/crypto.py b/blivet/devicelibs/crypto.py
 | ||||
| index f0caf0f7..68e68db1 100644
 | ||||
| --- a/blivet/devicelibs/crypto.py
 | ||||
| +++ b/blivet/devicelibs/crypto.py
 | ||||
| @@ -21,6 +21,7 @@
 | ||||
|  # | ||||
|   | ||||
|  import hashlib | ||||
| +import os
 | ||||
|   | ||||
|  import gi | ||||
|  gi.require_version("BlockDev", "2.0") | ||||
| @@ -100,3 +101,13 @@ def calculate_integrity_metadata_size(device_size, algorithm=DEFAULT_INTEGRITY_A
 | ||||
|      jsize = (jsize / SECTOR_SIZE + 1) * SECTOR_SIZE  # round up to sector | ||||
|   | ||||
|      return msize + jsize | ||||
| +
 | ||||
| +
 | ||||
| +def is_fips_enabled():
 | ||||
| +    if not os.path.exists("/proc/sys/crypto/fips_enabled"):
 | ||||
| +        # if the file doesn't exist, we are definitely not in FIPS mode
 | ||||
| +        return False
 | ||||
| +
 | ||||
| +    with open("/proc/sys/crypto/fips_enabled", "r") as f:
 | ||||
| +        enabled = f.read()
 | ||||
| +    return enabled.strip() == "1"
 | ||||
| diff --git a/blivet/formats/luks.py b/blivet/formats/luks.py
 | ||||
| index 2637e0c5..adf3c711 100644
 | ||||
| --- a/blivet/formats/luks.py
 | ||||
| +++ b/blivet/formats/luks.py
 | ||||
| @@ -303,11 +303,13 @@ class LUKS(DeviceFormat):
 | ||||
|              if luks_data.pbkdf_args: | ||||
|                  self.pbkdf_args = luks_data.pbkdf_args | ||||
|              else: | ||||
| -                mem_limit = crypto.calculate_luks2_max_memory()
 | ||||
| -                if mem_limit:
 | ||||
| -                    self.pbkdf_args = LUKS2PBKDFArgs(max_memory_kb=int(mem_limit.convert_to(KiB)))
 | ||||
| -                    luks_data.pbkdf_args = self.pbkdf_args
 | ||||
| -                    log.info("PBKDF arguments for LUKS2 not specified, using defaults with memory limit %s", mem_limit)
 | ||||
| +                # argon is not used with FIPS so we don't need to adjust the memory when in FIPS mode
 | ||||
| +                if not crypto.is_fips_enabled():
 | ||||
| +                    mem_limit = crypto.calculate_luks2_max_memory()
 | ||||
| +                    if mem_limit:
 | ||||
| +                        self.pbkdf_args = LUKS2PBKDFArgs(max_memory_kb=int(mem_limit.convert_to(KiB)))
 | ||||
| +                        luks_data.pbkdf_args = self.pbkdf_args
 | ||||
| +                        log.info("PBKDF arguments for LUKS2 not specified, using defaults with memory limit %s", mem_limit)
 | ||||
|   | ||||
|          if self.pbkdf_args: | ||||
|              pbkdf = blockdev.CryptoLUKSPBKDF(type=self.pbkdf_args.type, | ||||
| diff --git a/tests/unit_tests/formats_tests/luks_test.py b/tests/unit_tests/formats_tests/luks_test.py
 | ||||
| index ec7b7592..1127e968 100644
 | ||||
| --- a/tests/unit_tests/formats_tests/luks_test.py
 | ||||
| +++ b/tests/unit_tests/formats_tests/luks_test.py
 | ||||
| @@ -6,9 +6,14 @@ except ImportError:
 | ||||
|  import unittest | ||||
|   | ||||
|  from blivet.formats.luks import LUKS | ||||
| +from blivet.size import Size
 | ||||
| +from blivet.static_data import luks_data
 | ||||
|   | ||||
|   | ||||
|  class LUKSNodevTestCase(unittest.TestCase): | ||||
| +    def setUp(self):
 | ||||
| +        luks_data.pbkdf_args = None
 | ||||
| +
 | ||||
|      def test_create_discard_option(self): | ||||
|          # flags.discard_new=False --> no discard | ||||
|          fmt = LUKS(exists=False) | ||||
| @@ -51,6 +56,31 @@ class LUKSNodevTestCase(unittest.TestCase):
 | ||||
|          fmt = LUKS(cipher="aes-cbc-plain64") | ||||
|          self.assertEqual(fmt.key_size, 0) | ||||
|   | ||||
| +    def test_luks2_pbkdf_memory_fips(self):
 | ||||
| +        fmt = LUKS()
 | ||||
| +        with patch("blivet.formats.luks.blockdev.crypto") as bd:
 | ||||
| +            # fips enabled, pbkdf memory should not be set
 | ||||
| +            with patch("blivet.formats.luks.crypto") as crypto:
 | ||||
| +                attrs = {"is_fips_enabled.return_value": True,
 | ||||
| +                         "get_optimal_luks_sector_size.return_value": 0,
 | ||||
| +                         "calculate_luks2_max_memory.return_value": Size("256 MiB")}
 | ||||
| +                crypto.configure_mock(**attrs)
 | ||||
| +
 | ||||
| +                fmt._create()
 | ||||
| +                crypto.calculate_luks2_max_memory.assert_not_called()
 | ||||
| +                self.assertEqual(bd.luks_format.call_args[1]["extra"].pbkdf.max_memory_kb, 0)
 | ||||
| +
 | ||||
| +            # fips disabled, pbkdf memory should be set
 | ||||
| +            with patch("blivet.formats.luks.crypto") as crypto:
 | ||||
| +                attrs = {"is_fips_enabled.return_value": False,
 | ||||
| +                         "get_optimal_luks_sector_size.return_value": 0,
 | ||||
| +                         "calculate_luks2_max_memory.return_value": Size("256 MiB")}
 | ||||
| +                crypto.configure_mock(**attrs)
 | ||||
| +
 | ||||
| +                fmt._create()
 | ||||
| +                crypto.calculate_luks2_max_memory.assert_called()
 | ||||
| +                self.assertEqual(bd.luks_format.call_args[1]["extra"].pbkdf.max_memory_kb, 256 * 1024)
 | ||||
| +
 | ||||
|      def test_sector_size(self): | ||||
|          fmt = LUKS() | ||||
|          self.assertEqual(fmt.luks_sector_size, 512) | ||||
| diff --git a/tests/unit_tests/formats_tests/methods_test.py b/tests/unit_tests/formats_tests/methods_test.py
 | ||||
| index 2743b7db..5d30c260 100644
 | ||||
| --- a/tests/unit_tests/formats_tests/methods_test.py
 | ||||
| +++ b/tests/unit_tests/formats_tests/methods_test.py
 | ||||
| @@ -366,7 +366,8 @@ class LUKSMethodsTestCase(FormatMethodsTestCase):
 | ||||
|   | ||||
|      def _test_create_backend(self): | ||||
|          self.format.exists = False | ||||
| -        self.format.create()
 | ||||
| +        with patch("blivet.devicelibs.crypto.is_fips_enabled", return_value=False):
 | ||||
| +            self.format.create()
 | ||||
|          self.assertTrue(self.patches["blockdev"].crypto.luks_format.called)  # pylint: disable=no-member | ||||
|   | ||||
|      def _test_setup_backend(self): | ||||
| -- 
 | ||||
| 2.40.1 | ||||
| 
 | ||||
							
								
								
									
										265
									
								
								SOURCES/0015-Add-support-for-filesystem-online-resize.patch
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										265
									
								
								SOURCES/0015-Add-support-for-filesystem-online-resize.patch
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,265 @@ | ||||
| From eb16230427fc1081f8515e6ad69ccf99ca521e5d Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Tue, 4 Apr 2023 13:31:40 +0200 | ||||
| Subject: [PATCH 1/2] Add support for filesystem online resize | ||||
| 
 | ||||
| Resolves: rhbz#2168680 | ||||
| ---
 | ||||
|  blivet/devices/lvm.py       | 13 ++++++++----- | ||||
|  blivet/devices/partition.py | 11 ++++++----- | ||||
|  blivet/flags.py             |  3 +++ | ||||
|  blivet/formats/fs.py        | 32 ++++++++++++++++++++++++++++---- | ||||
|  blivet/formats/fslib.py     |  7 +++++++ | ||||
|  5 files changed, 52 insertions(+), 14 deletions(-) | ||||
| 
 | ||||
| diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
 | ||||
| index c3132457..ca45c4b5 100644
 | ||||
| --- a/blivet/devices/lvm.py
 | ||||
| +++ b/blivet/devices/lvm.py
 | ||||
| @@ -42,6 +42,7 @@ from .. import errors
 | ||||
|  from .. import util | ||||
|  from ..storage_log import log_method_call | ||||
|  from .. import udev | ||||
| +from ..flags import flags
 | ||||
|  from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN | ||||
|  from ..static_data.lvm_info import lvs_info | ||||
|  from ..tasks import availability | ||||
| @@ -2729,12 +2730,14 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 | ||||
|          # Setup VG parents (in case they are dmraid partitions for example) | ||||
|          self.vg.setup_parents(orig=True) | ||||
|   | ||||
| -        if self.original_format.exists:
 | ||||
| -            self.original_format.teardown()
 | ||||
| -        if self.format.exists:
 | ||||
| -            self.format.teardown()
 | ||||
| +        if not flags.allow_online_fs_resize:
 | ||||
| +            if self.original_format.exists:
 | ||||
| +                self.original_format.teardown()
 | ||||
| +            if self.format.exists:
 | ||||
| +                self.format.teardown()
 | ||||
| +
 | ||||
| +            udev.settle()
 | ||||
|   | ||||
| -        udev.settle()
 | ||||
|          blockdev.lvm.lvresize(self.vg.name, self._name, self.size) | ||||
|   | ||||
|      @type_specific | ||||
| diff --git a/blivet/devices/partition.py b/blivet/devices/partition.py
 | ||||
| index 0e9250ce..6ae4b8d3 100644
 | ||||
| --- a/blivet/devices/partition.py
 | ||||
| +++ b/blivet/devices/partition.py
 | ||||
| @@ -745,11 +745,12 @@ class PartitionDevice(StorageDevice):
 | ||||
|          if not self.exists: | ||||
|              raise errors.DeviceError("device has not been created") | ||||
|   | ||||
| -        # don't teardown when resizing luks
 | ||||
| -        if self.format.type == "luks" and self.children:
 | ||||
| -            self.children[0].format.teardown()
 | ||||
| -        else:
 | ||||
| -            self.teardown()
 | ||||
| +        if not flags.allow_online_fs_resize:
 | ||||
| +            # don't teardown when resizing luks
 | ||||
| +            if self.format.type == "luks" and self.children:
 | ||||
| +                self.children[0].format.teardown()
 | ||||
| +            else:
 | ||||
| +                self.teardown()
 | ||||
|   | ||||
|          if not self.sysfs_path: | ||||
|              return | ||||
| diff --git a/blivet/flags.py b/blivet/flags.py
 | ||||
| index 6364164d..ecfa7ad7 100644
 | ||||
| --- a/blivet/flags.py
 | ||||
| +++ b/blivet/flags.py
 | ||||
| @@ -91,6 +91,9 @@ class Flags(object):
 | ||||
|   | ||||
|          self.debug_threads = False | ||||
|   | ||||
| +        # Allow online filesystem resizes
 | ||||
| +        self.allow_online_fs_resize = False
 | ||||
| +
 | ||||
|      def get_boot_cmdline(self): | ||||
|          with open("/proc/cmdline") as f: | ||||
|              buf = f.read().strip() | ||||
| diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
 | ||||
| index 33922f3a..3f553eb0 100644
 | ||||
| --- a/blivet/formats/fs.py
 | ||||
| +++ b/blivet/formats/fs.py
 | ||||
| @@ -56,7 +56,7 @@ from ..i18n import N_
 | ||||
|  from .. import udev | ||||
|  from ..mounts import mounts_cache | ||||
|   | ||||
| -from .fslib import kernel_filesystems
 | ||||
| +from .fslib import kernel_filesystems, FSResize
 | ||||
|   | ||||
|  import logging | ||||
|  log = logging.getLogger("blivet") | ||||
| @@ -88,6 +88,9 @@ class FS(DeviceFormat):
 | ||||
|      # value is already unpredictable and can change in the future... | ||||
|      _metadata_size_factor = 1.0 | ||||
|   | ||||
| +    # support for resize: grow/shrink, online/offline
 | ||||
| +    _resize_support = 0
 | ||||
| +
 | ||||
|      config_actions_map = {"label": "write_label"} | ||||
|   | ||||
|      def __init__(self, **kwargs): | ||||
| @@ -436,12 +439,27 @@ class FS(DeviceFormat):
 | ||||
|              self.write_uuid() | ||||
|   | ||||
|      def _pre_resize(self): | ||||
| -        # file systems need a check before being resized
 | ||||
| -        self.do_check()
 | ||||
| +        if self.status:
 | ||||
| +            if flags.allow_online_fs_resize:
 | ||||
| +                if self.target_size > self.size and not self._resize_support & FSResize.ONLINE_GROW:
 | ||||
| +                    raise FSError("This filesystem doesn't support online growing")
 | ||||
| +                if self.target_size < self.size and not self._resize_support & FSResize.ONLINE_SHRINK:
 | ||||
| +                    raise FSError("This filesystem doesn't support online shrinking")
 | ||||
| +            else:
 | ||||
| +                raise FSError("Resizing of mounted filesystems is disabled")
 | ||||
| +
 | ||||
| +        if self.status:
 | ||||
| +            # fsck tools in general don't allow checks on mounted filesystems
 | ||||
| +            log.debug("Filesystem on %s is mounted, not checking", self.device)
 | ||||
| +        else:
 | ||||
| +            # file systems need a check before being resized
 | ||||
| +            self.do_check()
 | ||||
| +
 | ||||
|          super(FS, self)._pre_resize() | ||||
|   | ||||
|      def _post_resize(self): | ||||
| -        self.do_check()
 | ||||
| +        if not self.status:
 | ||||
| +            self.do_check()
 | ||||
|          super(FS, self)._post_resize() | ||||
|   | ||||
|      def do_check(self): | ||||
| @@ -838,6 +856,7 @@ class Ext2FS(FS):
 | ||||
|      _formattable = True | ||||
|      _supported = True | ||||
|      _resizable = True | ||||
| +    _resize_support = FSResize.ONLINE_GROW | FSResize.OFFLINE_GROW | FSResize.OFFLINE_SHRINK
 | ||||
|      _linux_native = True | ||||
|      _max_size = Size("8 TiB") | ||||
|      _dump = True | ||||
| @@ -1097,6 +1116,7 @@ class XFS(FS):
 | ||||
|      _linux_native = True | ||||
|      _supported = True | ||||
|      _resizable = True | ||||
| +    _resize_support = FSResize.ONLINE_GROW | FSResize.OFFLINE_GROW
 | ||||
|      _packages = ["xfsprogs"] | ||||
|      _fsck_class = fsck.XFSCK | ||||
|      _info_class = fsinfo.XFSInfo | ||||
| @@ -1247,6 +1267,7 @@ class NTFS(FS):
 | ||||
|      _labelfs = fslabeling.NTFSLabeling() | ||||
|      _uuidfs = fsuuid.NTFSUUID() | ||||
|      _resizable = True | ||||
| +    _resize_support = FSResize.OFFLINE_GROW | FSResize.OFFLINE_SHRINK
 | ||||
|      _formattable = True | ||||
|      _supported = True | ||||
|      _min_size = Size("1 MiB") | ||||
| @@ -1490,6 +1511,9 @@ class TmpFS(NoDevFS):
 | ||||
|          # same, nothing actually needs to be set | ||||
|          pass | ||||
|   | ||||
| +    def _pre_resize(self):
 | ||||
| +        self.do_check()
 | ||||
| +
 | ||||
|      def do_resize(self): | ||||
|          # Override superclass method to record whether mount options | ||||
|          # should include an explicit size specification. | ||||
| diff --git a/blivet/formats/fslib.py b/blivet/formats/fslib.py
 | ||||
| index ea93b1fd..8722e942 100644
 | ||||
| --- a/blivet/formats/fslib.py
 | ||||
| +++ b/blivet/formats/fslib.py
 | ||||
| @@ -36,3 +36,10 @@ def update_kernel_filesystems():
 | ||||
|   | ||||
|   | ||||
|  update_kernel_filesystems() | ||||
| +
 | ||||
| +
 | ||||
| +class FSResize():
 | ||||
| +    OFFLINE_SHRINK = 1 << 1
 | ||||
| +    OFFLINE_GROW = 1 << 2
 | ||||
| +    ONLINE_SHRINK = 1 << 3
 | ||||
| +    ONLINE_GROW = 1 << 4
 | ||||
| -- 
 | ||||
| 2.40.1 | ||||
| 
 | ||||
| 
 | ||||
| From 3fce5d0bfd7b09a976ff49feed15077477c6a425 Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Thu, 6 Apr 2023 14:02:11 +0200 | ||||
| Subject: [PATCH 2/2] Add a test case for filesystem online resize | ||||
| 
 | ||||
| Related: rhbz#2168680 | ||||
| ---
 | ||||
|  tests/storage_tests/formats_test/fs_test.py | 43 ++++++++++++++++++++- | ||||
|  1 file changed, 42 insertions(+), 1 deletion(-) | ||||
| 
 | ||||
| diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py
 | ||||
| index 97f4cbbe..1d42dc21 100644
 | ||||
| --- a/tests/storage_tests/formats_test/fs_test.py
 | ||||
| +++ b/tests/storage_tests/formats_test/fs_test.py
 | ||||
| @@ -6,9 +6,10 @@ import parted
 | ||||
|   | ||||
|  import blivet.formats.fs as fs | ||||
|  from blivet.size import Size, ROUND_DOWN | ||||
| -from blivet.errors import DeviceFormatError
 | ||||
| +from blivet.errors import DeviceFormatError, FSError
 | ||||
|  from blivet.formats import get_format | ||||
|  from blivet.devices import PartitionDevice, DiskDevice | ||||
| +from blivet.flags import flags
 | ||||
|   | ||||
|  from .loopbackedtestcase import LoopBackedTestCase | ||||
|   | ||||
| @@ -26,6 +27,46 @@ class Ext3FSTestCase(Ext2FSTestCase):
 | ||||
|  class Ext4FSTestCase(Ext3FSTestCase): | ||||
|      _fs_class = fs.Ext4FS | ||||
|   | ||||
| +    def test_online_resize(self):
 | ||||
| +        an_fs = self._fs_class()
 | ||||
| +        if not an_fs.formattable:
 | ||||
| +            self.skipTest("can not create filesystem %s" % an_fs.name)
 | ||||
| +        an_fs.device = self.loop_devices[0]
 | ||||
| +        self.assertIsNone(an_fs.create())
 | ||||
| +        an_fs.update_size_info()
 | ||||
| +
 | ||||
| +        if not self.can_resize(an_fs):
 | ||||
| +            self.skipTest("filesystem is not resizable")
 | ||||
| +
 | ||||
| +        # shrink offline first (ext doesn't support online shrinking)
 | ||||
| +        TARGET_SIZE = Size("64 MiB")
 | ||||
| +        an_fs.target_size = TARGET_SIZE
 | ||||
| +        self.assertEqual(an_fs.target_size, TARGET_SIZE)
 | ||||
| +        self.assertNotEqual(an_fs._size, TARGET_SIZE)
 | ||||
| +        self.assertIsNone(an_fs.do_resize())
 | ||||
| +
 | ||||
| +        with tempfile.TemporaryDirectory() as mountpoint:
 | ||||
| +            an_fs.mount(mountpoint=mountpoint)
 | ||||
| +
 | ||||
| +            # grow back when mounted
 | ||||
| +            TARGET_SIZE = Size("100 MiB")
 | ||||
| +            an_fs.target_size = TARGET_SIZE
 | ||||
| +            self.assertEqual(an_fs.target_size, TARGET_SIZE)
 | ||||
| +            self.assertNotEqual(an_fs._size, TARGET_SIZE)
 | ||||
| +
 | ||||
| +            # should fail, online resize disabled by default
 | ||||
| +            with self.assertRaisesRegex(FSError, "Resizing of mounted filesystems is disabled"):
 | ||||
| +                an_fs.do_resize()
 | ||||
| +
 | ||||
| +            # enable online resize
 | ||||
| +            flags.allow_online_fs_resize = True
 | ||||
| +            an_fs.do_resize()
 | ||||
| +            flags.allow_online_fs_resize = False
 | ||||
| +            self._test_sizes(an_fs)
 | ||||
| +            self.assertEqual(an_fs.system_mountpoint, mountpoint)
 | ||||
| +
 | ||||
| +            an_fs.unmount()
 | ||||
| +
 | ||||
|   | ||||
|  class FATFSTestCase(fstesting.FSAsRoot): | ||||
|      _fs_class = fs.FATFS | ||||
| -- 
 | ||||
| 2.40.1 | ||||
| 
 | ||||
							
								
								
									
										382
									
								
								SOURCES/0016-Backport-iSCSI-initiator-name-related-fixes.patch
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										382
									
								
								SOURCES/0016-Backport-iSCSI-initiator-name-related-fixes.patch
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,382 @@ | ||||
| From d06c45db59d0e917dbab4c283f2f04c8f9206a6e Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Mon, 6 Mar 2023 10:51:42 +0100 | ||||
| Subject: [PATCH 1/5] Allow changing iSCSI initiator name after setting it | ||||
| 
 | ||||
| Resolves: rhbz#2083139 | ||||
| ---
 | ||||
|  blivet/iscsi.py | 13 +++++++++++-- | ||||
|  1 file changed, 11 insertions(+), 2 deletions(-) | ||||
| 
 | ||||
| diff --git a/blivet/iscsi.py b/blivet/iscsi.py
 | ||||
| index 86451db3..0d063f2a 100644
 | ||||
| --- a/blivet/iscsi.py
 | ||||
| +++ b/blivet/iscsi.py
 | ||||
| @@ -212,14 +212,23 @@ class iSCSI(object):
 | ||||
|      @initiator.setter | ||||
|      @storaged_iscsi_required(critical=True, eval_mode=util.EvalMode.onetime) | ||||
|      def initiator(self, val): | ||||
| -        if self.initiator_set and val != self._initiator:
 | ||||
| -            raise ValueError(_("Unable to change iSCSI initiator name once set"))
 | ||||
|          if len(val) == 0: | ||||
|              raise ValueError(_("Must provide an iSCSI initiator name")) | ||||
|   | ||||
| +        active = self._get_active_sessions()
 | ||||
| +        if active:
 | ||||
| +            raise errors.ISCSIError(_("Cannot change initiator name with an active session"))
 | ||||
| +
 | ||||
|          log.info("Setting up iSCSI initiator name %s", self.initiator) | ||||
|          args = GLib.Variant("(sa{sv})", (val, None)) | ||||
|          self._call_initiator_method("SetInitiatorName", args) | ||||
| +
 | ||||
| +        if self.initiator_set and val != self._initiator:
 | ||||
| +            log.info("Restarting iscsid after initiator name change")
 | ||||
| +            rc = util.run_program(["systemctl", "restart", "iscsid"])
 | ||||
| +            if rc != 0:
 | ||||
| +                raise errors.ISCSIError(_("Failed to restart iscsid after initiator name change"))
 | ||||
| +
 | ||||
|          self._initiator = val | ||||
|   | ||||
|      def active_nodes(self, target=None): | ||||
| -- 
 | ||||
| 2.40.1 | ||||
| 
 | ||||
| 
 | ||||
| From b71991d65c270c023364b03c499b4bf3e245fbd0 Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Mon, 6 Mar 2023 15:10:28 +0100 | ||||
| Subject: [PATCH 2/5] Add a basic test case for the iscsi module | ||||
| 
 | ||||
| Related: rhbz#2083139 | ||||
| ---
 | ||||
|  tests/storage_tests/__init__.py    |   2 + | ||||
|  tests/storage_tests/iscsi_test.py  | 157 +++++++++++++++++++++++++++++ | ||||
|  3 files changed, 162 insertions(+) | ||||
|  create mode 100644 tests/storage_tests/iscsi_test.py | ||||
| 
 | ||||
| diff --git a/tests/storage_tests/__init__.py b/tests/storage_tests/__init__.py
 | ||||
| index 3b2a6cc4..e69fcc34 100644
 | ||||
| --- a/tests/storage_tests/__init__.py
 | ||||
| +++ b/tests/storage_tests/__init__.py
 | ||||
| @@ -3,3 +3,5 @@ from .formats_test import *
 | ||||
|   | ||||
|  from .partitioning_test import * | ||||
|  from .unsupported_disklabel_test import * | ||||
| +
 | ||||
| +from .iscsi_test import *
 | ||||
| diff --git a/tests/storage_tests/iscsi_test.py b/tests/storage_tests/iscsi_test.py
 | ||||
| new file mode 100644 | ||||
| index 00000000..00cc7c36
 | ||||
| --- /dev/null
 | ||||
| +++ b/tests/storage_tests/iscsi_test.py
 | ||||
| @@ -0,0 +1,157 @@
 | ||||
| +import glob
 | ||||
| +import os
 | ||||
| +import re
 | ||||
| +import shutil
 | ||||
| +import subprocess
 | ||||
| +import unittest
 | ||||
| +
 | ||||
| +from contextlib import contextmanager
 | ||||
| +
 | ||||
| +from .storagetestcase import create_sparse_tempfile
 | ||||
| +
 | ||||
| +
 | ||||
| +def read_file(filename, mode="r"):
 | ||||
| +    with open(filename, mode) as f:
 | ||||
| +        content = f.read()
 | ||||
| +    return content
 | ||||
| +
 | ||||
| +
 | ||||
| +@contextmanager
 | ||||
| +def udev_settle():
 | ||||
| +    try:
 | ||||
| +        yield
 | ||||
| +    finally:
 | ||||
| +        os.system("udevadm settle")
 | ||||
| +
 | ||||
| +
 | ||||
| +def _delete_backstore(name):
 | ||||
| +    status = subprocess.call(["targetcli", "/backstores/fileio/ delete %s" % name],
 | ||||
| +                             stdout=subprocess.DEVNULL)
 | ||||
| +    if status != 0:
 | ||||
| +        raise RuntimeError("Failed to delete the '%s' fileio backstore" % name)
 | ||||
| +
 | ||||
| +
 | ||||
| +def delete_iscsi_target(iqn, backstore=None):
 | ||||
| +    status = subprocess.call(["targetcli", "/iscsi delete %s" % iqn],
 | ||||
| +                             stdout=subprocess.DEVNULL)
 | ||||
| +    if status != 0:
 | ||||
| +        raise RuntimeError("Failed to delete the '%s' iscsi device" % iqn)
 | ||||
| +
 | ||||
| +    if backstore is not None:
 | ||||
| +        _delete_backstore(backstore)
 | ||||
| +
 | ||||
| +
 | ||||
| +def create_iscsi_target(fpath, initiator_name=None):
 | ||||
| +    """
 | ||||
| +    Creates a new iSCSI target (using targetcli) on top of the
 | ||||
| +    :param:`fpath` backing file.
 | ||||
| +
 | ||||
| +    :param str fpath: path of the backing file
 | ||||
| +    :returns: iSCSI IQN, backstore name
 | ||||
| +    :rtype: tuple of str
 | ||||
| +
 | ||||
| +    """
 | ||||
| +
 | ||||
| +    # "register" the backing file as a fileio backstore
 | ||||
| +    store_name = os.path.basename(fpath)
 | ||||
| +    status = subprocess.call(["targetcli", "/backstores/fileio/ create %s %s" % (store_name, fpath)], stdout=subprocess.DEVNULL)
 | ||||
| +    if status != 0:
 | ||||
| +        raise RuntimeError("Failed to register '%s' as a fileio backstore" % fpath)
 | ||||
| +
 | ||||
| +    out = subprocess.check_output(["targetcli", "/backstores/fileio/%s info" % store_name])
 | ||||
| +    out = out.decode("utf-8")
 | ||||
| +    store_wwn = None
 | ||||
| +    for line in out.splitlines():
 | ||||
| +        if line.startswith("wwn: "):
 | ||||
| +            store_wwn = line[5:]
 | ||||
| +    if store_wwn is None:
 | ||||
| +        raise RuntimeError("Failed to determine '%s' backstore's wwn" % store_name)
 | ||||
| +
 | ||||
| +    # create a new iscsi device
 | ||||
| +    out = subprocess.check_output(["targetcli", "/iscsi create"])
 | ||||
| +    out = out.decode("utf-8")
 | ||||
| +    match = re.match(r'Created target (.*).', out)
 | ||||
| +    if match:
 | ||||
| +        iqn = match.groups()[0]
 | ||||
| +    else:
 | ||||
| +        _delete_backstore(store_name)
 | ||||
| +        raise RuntimeError("Failed to create a new iscsi target")
 | ||||
| +
 | ||||
| +    if initiator_name:
 | ||||
| +        status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
 | ||||
| +        if status != 0:
 | ||||
| +            delete_iscsi_target(iqn, store_name)
 | ||||
| +            raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
 | ||||
| +
 | ||||
| +    with udev_settle():
 | ||||
| +        status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/luns create /backstores/fileio/%s" % (iqn, store_name)], stdout=subprocess.DEVNULL)
 | ||||
| +    if status != 0:
 | ||||
| +        delete_iscsi_target(iqn, store_name)
 | ||||
| +        raise RuntimeError("Failed to create a new LUN for '%s' using '%s'" % (iqn, store_name))
 | ||||
| +
 | ||||
| +    status = subprocess.call(["targetcli", "/iscsi/%s/tpg1 set attribute generate_node_acls=1" % iqn], stdout=subprocess.DEVNULL)
 | ||||
| +    if status != 0:
 | ||||
| +        raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
 | ||||
| +
 | ||||
| +    return iqn, store_name
 | ||||
| +
 | ||||
| +
 | ||||
| +@unittest.skipUnless(os.geteuid() == 0, "requires root privileges")
 | ||||
| +@unittest.skipUnless(os.environ.get("JENKINS_HOME"), "jenkins only test")
 | ||||
| +@unittest.skipUnless(shutil.which("iscsiadm"), "iscsiadm not available")
 | ||||
| +class ISCSITestCase(unittest.TestCase):
 | ||||
| +
 | ||||
| +    _disk_size = 512 * 1024**2
 | ||||
| +    initiator = 'iqn.1994-05.com.redhat:iscsi-test'
 | ||||
| +
 | ||||
| +    def setUp(self):
 | ||||
| +        self.addCleanup(self._clean_up)
 | ||||
| +
 | ||||
| +        self._dev_file = None
 | ||||
| +        self.dev = None
 | ||||
| +
 | ||||
| +        self._dev_file = create_sparse_tempfile("blivet_test", self._disk_size)
 | ||||
| +        try:
 | ||||
| +            self.dev, self.backstore = create_iscsi_target(self._dev_file, self.initiator)
 | ||||
| +        except RuntimeError as e:
 | ||||
| +            raise RuntimeError("Failed to setup targetcli device for testing: %s" % e)
 | ||||
| +
 | ||||
| +    def _force_logout(self):
 | ||||
| +        subprocess.call(["iscsiadm", "--mode", "node", "--logout", "--name", self.dev], stdout=subprocess.DEVNULL)
 | ||||
| +
 | ||||
| +    def _clean_up(self):
 | ||||
| +        self._force_logout()
 | ||||
| +        delete_iscsi_target(self.dev, self.backstore)
 | ||||
| +        os.unlink(self._dev_file)
 | ||||
| +
 | ||||
| +    def test_discover_login(self):
 | ||||
| +        from blivet.iscsi import iscsi, has_iscsi
 | ||||
| +
 | ||||
| +        if not has_iscsi():
 | ||||
| +            self.skipTest("iSCSI not available, skipping")
 | ||||
| +
 | ||||
| +        iscsi.initiator = self.initiator
 | ||||
| +        nodes = iscsi.discover("127.0.0.1")
 | ||||
| +        self.assertTrue(nodes)
 | ||||
| +
 | ||||
| +        if len(nodes) > 1:
 | ||||
| +            self.skipTest("Discovered more than one iSCSI target on localhost, skipping")
 | ||||
| +
 | ||||
| +        self.assertEqual(nodes[0].address, "127.0.0.1")
 | ||||
| +        self.assertEqual(nodes[0].port, 3260)
 | ||||
| +        self.assertEqual(nodes[0].name, self.dev)
 | ||||
| +
 | ||||
| +        # change the initiator name
 | ||||
| +        iscsi.initiator = self.initiator + "_1"
 | ||||
| +        self.assertEqual(iscsi.initiator, self.initiator + "_1")
 | ||||
| +
 | ||||
| +        # try to login
 | ||||
| +        ret, err = iscsi.log_into_node(nodes[0])
 | ||||
| +        self.assertTrue(ret, "Login failed: %s" % err)
 | ||||
| +
 | ||||
| +        # check the session for initiator name
 | ||||
| +        sessions = glob.glob("/sys/class/iscsi_session/*/")
 | ||||
| +        self.assertTrue(sessions)
 | ||||
| +        self.assertEqual(len(sessions), 1)
 | ||||
| +        initiator = read_file(sessions[0] + "initiatorname").strip()
 | ||||
| +        self.assertEqual(initiator, iscsi.initiator)
 | ||||
| -- 
 | ||||
| 2.40.1 | ||||
| 
 | ||||
| 
 | ||||
| From 65e8150a7404e37dd2740841a88e7f2565836406 Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Mon, 6 Mar 2023 15:14:40 +0100 | ||||
| Subject: [PATCH 3/5] tests: Use blivet-specific prefix for targetcli backing | ||||
|  files | ||||
| 
 | ||||
| The code is originally from libblockdev hence the "bd" prefix, we | ||||
| should use a different prefix for blivet to be able to identify | ||||
| which test suite failed to clean the files. | ||||
| 
 | ||||
| Related: rhbz#2083139 | ||||
| ---
 | ||||
|  tests/storage_tests/storagetestcase.py | 2 +- | ||||
|  1 file changed, 1 insertion(+), 1 deletion(-) | ||||
| 
 | ||||
| diff --git a/tests/storage_tests/storagetestcase.py b/tests/storage_tests/storagetestcase.py
 | ||||
| index 35d57ce9..9f859977 100644
 | ||||
| --- a/tests/storage_tests/storagetestcase.py
 | ||||
| +++ b/tests/storage_tests/storagetestcase.py
 | ||||
| @@ -39,7 +39,7 @@ def create_sparse_tempfile(name, size):
 | ||||
|          :param size: the file size (in bytes) | ||||
|          :returns: the path to the newly created file | ||||
|      """ | ||||
| -    (fd, path) = tempfile.mkstemp(prefix="bd.", suffix="-%s" % name)
 | ||||
| +    (fd, path) = tempfile.mkstemp(prefix="blivet.", suffix="-%s" % name)
 | ||||
|      os.close(fd) | ||||
|      create_sparse_file(path, size) | ||||
|      return path | ||||
| -- 
 | ||||
| 2.40.1 | ||||
| 
 | ||||
| 
 | ||||
| From 41278ef1b3f949303fd30fff2ccdde75f713c9f8 Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Wed, 19 Jul 2023 13:57:39 +0200 | ||||
| Subject: [PATCH 4/5] iscsi: Save firmware initiator name to | ||||
|  /etc/iscsi/initiatorname.iscsi | ||||
| 
 | ||||
| Resolves: rhbz#2084043 | ||||
| ---
 | ||||
|  blivet/iscsi.py | 5 +++++ | ||||
|  1 file changed, 5 insertions(+) | ||||
| 
 | ||||
| diff --git a/blivet/iscsi.py b/blivet/iscsi.py
 | ||||
| index 0d063f2a..8080a671 100644
 | ||||
| --- a/blivet/iscsi.py
 | ||||
| +++ b/blivet/iscsi.py
 | ||||
| @@ -160,6 +160,11 @@ class iSCSI(object):
 | ||||
|                  self._initiator = initiatorname | ||||
|              except Exception as e:  # pylint: disable=broad-except | ||||
|                  log.info("failed to get initiator name from iscsi firmware: %s", str(e)) | ||||
| +            else:
 | ||||
| +                # write the firmware initiator to /etc/iscsi/initiatorname.iscsi
 | ||||
| +                log.info("Setting up firmware iSCSI initiator name %s", self.initiator)
 | ||||
| +                args = GLib.Variant("(sa{sv})", (initiatorname, None))
 | ||||
| +                self._call_initiator_method("SetInitiatorName", args)
 | ||||
|   | ||||
|      # So that users can write iscsi() to get the singleton instance | ||||
|      def __call__(self): | ||||
| -- 
 | ||||
| 2.40.1 | ||||
| 
 | ||||
| 
 | ||||
| From fce8b73965d968aab546bc7e0ecb65d1995da46f Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Wed, 19 Jul 2023 10:38:45 +0200 | ||||
| Subject: [PATCH 5/5] tests: Improve iscsi_test.ISCSITestCase | ||||
| 
 | ||||
| Changed how we create the initiator name ACLs based on RTT test | ||||
| case for rhbz#2084043 and also improved the test case itself. | ||||
| 
 | ||||
| Related: rhbz#2083139 | ||||
| ---
 | ||||
|  tests/storage_tests/iscsi_test.py | 36 +++++++++++++++++++++---------- | ||||
|  1 file changed, 25 insertions(+), 11 deletions(-) | ||||
| 
 | ||||
| diff --git a/tests/storage_tests/iscsi_test.py b/tests/storage_tests/iscsi_test.py
 | ||||
| index 00cc7c36..6cc83a59 100644
 | ||||
| --- a/tests/storage_tests/iscsi_test.py
 | ||||
| +++ b/tests/storage_tests/iscsi_test.py
 | ||||
| @@ -77,21 +77,17 @@ def create_iscsi_target(fpath, initiator_name=None):
 | ||||
|          _delete_backstore(store_name) | ||||
|          raise RuntimeError("Failed to create a new iscsi target") | ||||
|   | ||||
| -    if initiator_name:
 | ||||
| -        status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
 | ||||
| -        if status != 0:
 | ||||
| -            delete_iscsi_target(iqn, store_name)
 | ||||
| -            raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
 | ||||
| -
 | ||||
|      with udev_settle(): | ||||
|          status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/luns create /backstores/fileio/%s" % (iqn, store_name)], stdout=subprocess.DEVNULL) | ||||
|      if status != 0: | ||||
|          delete_iscsi_target(iqn, store_name) | ||||
|          raise RuntimeError("Failed to create a new LUN for '%s' using '%s'" % (iqn, store_name)) | ||||
|   | ||||
| -    status = subprocess.call(["targetcli", "/iscsi/%s/tpg1 set attribute generate_node_acls=1" % iqn], stdout=subprocess.DEVNULL)
 | ||||
| -    if status != 0:
 | ||||
| -        raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
 | ||||
| +    if initiator_name:
 | ||||
| +        status = subprocess.call(["targetcli", "/iscsi/%s/tpg1/acls create %s" % (iqn, initiator_name)], stdout=subprocess.DEVNULL)
 | ||||
| +        if status != 0:
 | ||||
| +            delete_iscsi_target(iqn, store_name)
 | ||||
| +            raise RuntimeError("Failed to set ACLs for '%s'" % iqn)
 | ||||
|   | ||||
|      return iqn, store_name | ||||
|   | ||||
| @@ -130,6 +126,7 @@ class ISCSITestCase(unittest.TestCase):
 | ||||
|          if not has_iscsi(): | ||||
|              self.skipTest("iSCSI not available, skipping") | ||||
|   | ||||
| +        # initially set the initiator to the correct/allowed one
 | ||||
|          iscsi.initiator = self.initiator | ||||
|          nodes = iscsi.discover("127.0.0.1") | ||||
|          self.assertTrue(nodes) | ||||
| @@ -141,11 +138,28 @@ class ISCSITestCase(unittest.TestCase):
 | ||||
|          self.assertEqual(nodes[0].port, 3260) | ||||
|          self.assertEqual(nodes[0].name, self.dev) | ||||
|   | ||||
| -        # change the initiator name
 | ||||
| +        # change the initiator name to a wrong one
 | ||||
|          iscsi.initiator = self.initiator + "_1" | ||||
|          self.assertEqual(iscsi.initiator, self.initiator + "_1") | ||||
|   | ||||
| -        # try to login
 | ||||
| +        # check the change made it to /etc/iscsi/initiatorname.iscsi
 | ||||
| +        initiator_file = read_file("/etc/iscsi/initiatorname.iscsi").strip()
 | ||||
| +        self.assertEqual(initiator_file, "InitiatorName=%s" % self.initiator + "_1")
 | ||||
| +
 | ||||
| +        # try to login (should fail)
 | ||||
| +        ret, err = iscsi.log_into_node(nodes[0])
 | ||||
| +        self.assertFalse(ret)
 | ||||
| +        self.assertIn("authorization failure", err)
 | ||||
| +
 | ||||
| +        # change the initiator name back to the correct one
 | ||||
| +        iscsi.initiator = self.initiator
 | ||||
| +        self.assertEqual(iscsi.initiator, self.initiator)
 | ||||
| +
 | ||||
| +        # check the change made it to /etc/iscsi/initiatorname.iscsi
 | ||||
| +        initiator_file = read_file("/etc/iscsi/initiatorname.iscsi").strip()
 | ||||
| +        self.assertEqual(initiator_file, "InitiatorName=%s" % self.initiator)
 | ||||
| +
 | ||||
| +        # try to login (should work now)
 | ||||
|          ret, err = iscsi.log_into_node(nodes[0]) | ||||
|          self.assertTrue(ret, "Login failed: %s" % err) | ||||
|   | ||||
| -- 
 | ||||
| 2.40.1 | ||||
| 
 | ||||
							
								
								
									
										206
									
								
								SOURCES/0017-Add-support-for-creating-shared-LVM-setups.patch
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										206
									
								
								SOURCES/0017-Add-support-for-creating-shared-LVM-setups.patch
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,206 @@ | ||||
| From faef0408d2f7c61aade6d187389c61e64f9f373b Mon Sep 17 00:00:00 2001 | ||||
| From: Vojtech Trefny <vtrefny@redhat.com> | ||||
| Date: Thu, 20 Apr 2023 12:35:30 +0200 | ||||
| Subject: [PATCH] Add support for creating shared LVM setups | ||||
| 
 | ||||
| This feature is requested by GFS2 for the storage role. This adds | ||||
| support for creating shared VGs and activating LVs in shared mode. | ||||
| 
 | ||||
| Resolves: RHEL-14021 | ||||
| ---
 | ||||
|  blivet/devices/lvm.py                     | 44 +++++++++++++++++++---- | ||||
|  blivet/tasks/availability.py              |  9 +++++ | ||||
|  tests/unit_tests/devices_test/lvm_test.py | 25 +++++++++++++ | ||||
|  3 files changed, 72 insertions(+), 6 deletions(-) | ||||
| 
 | ||||
| diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
 | ||||
| index ca45c4b5..068c5368 100644
 | ||||
| --- a/blivet/devices/lvm.py
 | ||||
| +++ b/blivet/devices/lvm.py
 | ||||
| @@ -97,7 +97,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
 | ||||
|   | ||||
|      def __init__(self, name, parents=None, size=None, free=None, | ||||
|                   pe_size=None, pe_count=None, pe_free=None, pv_count=None, | ||||
| -                 uuid=None, exists=False, sysfs_path='', exported=False):
 | ||||
| +                 uuid=None, exists=False, sysfs_path='', exported=False,
 | ||||
| +                 shared=False):
 | ||||
|          """ | ||||
|              :param name: the device name (generally a device node's basename) | ||||
|              :type name: str | ||||
| @@ -124,6 +125,11 @@ class LVMVolumeGroupDevice(ContainerDevice):
 | ||||
|              :type pv_count: int | ||||
|              :keyword uuid: the VG UUID | ||||
|              :type uuid: str | ||||
| +
 | ||||
| +            For non-existing VGs only:
 | ||||
| +
 | ||||
| +            :keyword shared: whether to create this VG as shared
 | ||||
| +            :type shared: bool
 | ||||
|          """ | ||||
|          # These attributes are used by _add_parent, so they must be initialized | ||||
|          # prior to instantiating the superclass. | ||||
| @@ -137,6 +143,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
 | ||||
|          self.pe_count = util.numeric_type(pe_count) | ||||
|          self.pe_free = util.numeric_type(pe_free) | ||||
|          self.exported = exported | ||||
| +        self._shared = shared
 | ||||
|   | ||||
|          # TODO: validate pe_size if given | ||||
|          if not self.pe_size: | ||||
| @@ -254,7 +261,19 @@ class LVMVolumeGroupDevice(ContainerDevice):
 | ||||
|          """ Create the device. """ | ||||
|          log_method_call(self, self.name, status=self.status) | ||||
|          pv_list = [pv.path for pv in self.parents] | ||||
| -        blockdev.lvm.vgcreate(self.name, pv_list, self.pe_size)
 | ||||
| +        extra = dict()
 | ||||
| +        if self._shared:
 | ||||
| +            extra["shared"] = ""
 | ||||
| +        blockdev.lvm.vgcreate(self.name, pv_list, self.pe_size, **extra)
 | ||||
| +
 | ||||
| +        if self._shared:
 | ||||
| +            if availability.BLOCKDEV_LVM_PLUGIN_SHARED.available:
 | ||||
| +                try:
 | ||||
| +                    blockdev.lvm.vglock_start(self.name)
 | ||||
| +                except blockdev.LVMError as err:
 | ||||
| +                    raise errors.LVMError(err)
 | ||||
| +            else:
 | ||||
| +                raise errors.LVMError("Shared LVM is not fully supported: %s" % ",".join(availability.BLOCKDEV_LVM_PLUGIN_SHARED.availability_errors))
 | ||||
|   | ||||
|      def _post_create(self): | ||||
|          self._complete = True | ||||
| @@ -661,7 +680,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
 | ||||
|      def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, | ||||
|                   fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None, | ||||
|                   percent=None, cache_request=None, pvs=None, from_lvs=None, | ||||
| -                 stripe_size=0):
 | ||||
| +                 stripe_size=0, shared=False):
 | ||||
|   | ||||
|          if not exists: | ||||
|              if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types: | ||||
| @@ -690,6 +709,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
 | ||||
|          self.seg_type = seg_type or "linear" | ||||
|          self._raid_level = None | ||||
|          self.ignore_skip_activation = 0 | ||||
| +        self._shared = shared
 | ||||
|   | ||||
|          self.req_grow = None | ||||
|          self.req_max_size = Size(0) | ||||
| @@ -2306,7 +2326,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 | ||||
|                   parent_lv=None, int_type=None, origin=None, vorigin=False, | ||||
|                   metadata_size=None, chunk_size=None, profile=None, from_lvs=None, | ||||
|                   compression=False, deduplication=False, index_memory=0, | ||||
| -                 write_policy=None, cache_mode=None, attach_to=None, stripe_size=0):
 | ||||
| +                 write_policy=None, cache_mode=None, attach_to=None, stripe_size=0,
 | ||||
| +                 shared=False):
 | ||||
|          """ | ||||
|              :param name: the device name (generally a device node's basename) | ||||
|              :type name: str | ||||
| @@ -2337,6 +2358,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 | ||||
|              :type cache_request: :class:`~.devices.lvm.LVMCacheRequest` | ||||
|              :keyword pvs: list of PVs to allocate extents from (size could be specified for each PV) | ||||
|              :type pvs: list of :class:`~.devices.StorageDevice` or :class:`LVPVSpec` objects (tuples) | ||||
| +            :keyword shared: whether to activate the newly create LV in shared mode
 | ||||
| +            :type shared: bool
 | ||||
|   | ||||
|              For internal LVs only: | ||||
|   | ||||
| @@ -2412,7 +2435,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 | ||||
|          LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type, | ||||
|                                        fmt, exists, sysfs_path, grow, maxsize, | ||||
|                                        percent, cache_request, pvs, from_lvs, | ||||
| -                                      stripe_size)
 | ||||
| +                                      stripe_size, shared)
 | ||||
|          LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory, | ||||
|                                   write_policy) | ||||
|          LVMVDOLogicalVolumeMixin.__init__(self) | ||||
| @@ -2634,7 +2657,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 | ||||
|          log_method_call(self, self.name, orig=orig, status=self.status, | ||||
|                          controllable=self.controllable) | ||||
|          ignore_skip_activation = self.is_snapshot_lv or self.ignore_skip_activation > 0 | ||||
| -        blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
 | ||||
| +        if self._shared:
 | ||||
| +            if availability.BLOCKDEV_LVM_PLUGIN_SHARED.available:
 | ||||
| +                blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation, shared=True)
 | ||||
| +            else:
 | ||||
| +                raise errors.LVMError("Shared LVM is not fully supported: %s" % ",".join(availability.BLOCKDEV_LVM_PLUGIN_SHARED.availability_errors))
 | ||||
| +        else:
 | ||||
| +            blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
 | ||||
|   | ||||
|      @type_specific | ||||
|      def _pre_create(self): | ||||
| @@ -2672,6 +2701,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
 | ||||
|              if self._stripe_size: | ||||
|                  extra["stripesize"] = str(int(self._stripe_size.convert_to("KiB"))) | ||||
|   | ||||
| +            if self._shared:
 | ||||
| +                extra["activate"] = "sy"
 | ||||
| +
 | ||||
|              blockdev.lvm.lvcreate(self.vg.name, self._name, self.size, | ||||
|                                    type=self.seg_type, pv_list=pvs, **extra) | ||||
|          else: | ||||
| diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
 | ||||
| index bba1ba84..85945c77 100644
 | ||||
| --- a/blivet/tasks/availability.py
 | ||||
| +++ b/blivet/tasks/availability.py
 | ||||
| @@ -435,6 +435,14 @@ if hasattr(blockdev.LVMTech, "VDO"):
 | ||||
|  else: | ||||
|      BLOCKDEV_LVM_TECH_VDO = _UnavailableMethod(error_msg="Installed version of libblockdev doesn't support LVM VDO technology") | ||||
|   | ||||
| +if hasattr(blockdev.LVMTech, "SHARED"):
 | ||||
| +    BLOCKDEV_LVM_SHARED = BlockDevTechInfo(plugin_name="lvm",
 | ||||
| +                                           check_fn=blockdev.lvm_is_tech_avail,
 | ||||
| +                                           technologies={blockdev.LVMTech.SHARED: blockdev.LVMTechMode.MODIFY})  # pylint: disable=no-member
 | ||||
| +    BLOCKDEV_LVM_TECH_SHARED = BlockDevMethod(BLOCKDEV_LVM_SHARED)
 | ||||
| +else:
 | ||||
| +    BLOCKDEV_LVM_TECH_SHARED = _UnavailableMethod(error_msg="Installed version of libblockdev doesn't support shared LVM technology")
 | ||||
| +
 | ||||
|  # libblockdev mdraid plugin required technologies and modes | ||||
|  BLOCKDEV_MD_ALL_MODES = (blockdev.MDTechMode.CREATE | | ||||
|                           blockdev.MDTechMode.DELETE | | ||||
| @@ -476,6 +484,7 @@ BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("libblockdev dm plugin (raid technolog
 | ||||
|  BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("libblockdev loop plugin", BLOCKDEV_LOOP_TECH) | ||||
|  BLOCKDEV_LVM_PLUGIN = blockdev_plugin("libblockdev lvm plugin", BLOCKDEV_LVM_TECH) | ||||
|  BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("libblockdev lvm plugin (vdo technology)", BLOCKDEV_LVM_TECH_VDO) | ||||
| +BLOCKDEV_LVM_PLUGIN_SHARED = blockdev_plugin("libblockdev lvm plugin (shared LVM technology)", BLOCKDEV_LVM_TECH_SHARED)
 | ||||
|  BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("libblockdev mdraid plugin", BLOCKDEV_MD_TECH) | ||||
|  BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("libblockdev mpath plugin", BLOCKDEV_MPATH_TECH) | ||||
|  BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("libblockdev swap plugin", BLOCKDEV_SWAP_TECH) | ||||
| diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
 | ||||
| index d7b55224..e645309f 100644
 | ||||
| --- a/tests/unit_tests/devices_test/lvm_test.py
 | ||||
| +++ b/tests/unit_tests/devices_test/lvm_test.py
 | ||||
| @@ -476,6 +476,31 @@ class LVMDeviceTest(unittest.TestCase):
 | ||||
|                  lv.setup() | ||||
|                  lvm.lvactivate.assert_called_with(vg.name, lv.lvname, ignore_skip=False) | ||||
|   | ||||
| +    @patch("blivet.tasks.availability.BLOCKDEV_LVM_PLUGIN_SHARED",
 | ||||
| +           new=blivet.tasks.availability.ExternalResource(blivet.tasks.availability.AvailableMethod, ""))
 | ||||
| +    def test_lv_activate_shared(self):
 | ||||
| +        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
 | ||||
| +                           size=Size("1 GiB"), exists=True)
 | ||||
| +        vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
 | ||||
| +        lv = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=True, shared=True)
 | ||||
| +
 | ||||
| +        with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
 | ||||
| +            with patch.object(lv, "_pre_setup"):
 | ||||
| +                lv.setup()
 | ||||
| +                lvm.lvactivate.assert_called_with(vg.name, lv.lvname, ignore_skip=False, shared=True)
 | ||||
| +
 | ||||
| +    @patch("blivet.tasks.availability.BLOCKDEV_LVM_PLUGIN_SHARED",
 | ||||
| +           new=blivet.tasks.availability.ExternalResource(blivet.tasks.availability.AvailableMethod, ""))
 | ||||
| +    def test_vg_create_shared(self):
 | ||||
| +        pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
 | ||||
| +                           size=Size("1 GiB"), exists=True)
 | ||||
| +        vg = LVMVolumeGroupDevice("testvg", parents=[pv], shared=True)
 | ||||
| +
 | ||||
| +        with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
 | ||||
| +            vg._create()
 | ||||
| +            lvm.vgcreate.assert_called_with(vg.name, [pv.path], Size("4 MiB"), shared="")
 | ||||
| +            lvm.vglock_start.assert_called_with(vg.name)
 | ||||
| +
 | ||||
|      def test_vg_is_empty(self): | ||||
|          pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"), | ||||
|                             size=Size("1024 MiB")) | ||||
| -- 
 | ||||
| 2.41.0 | ||||
| 
 | ||||
| @ -23,7 +23,7 @@ Version: 3.6.0 | ||||
| 
 | ||||
| #%%global prerelease .b2 | ||||
| # prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2 | ||||
| Release: 4%{?prerelease}%{?dist} | ||||
| Release: 8%{?prerelease}%{?dist} | ||||
| Epoch: 1 | ||||
| License: LGPLv2+ | ||||
| %global realname blivet | ||||
| @ -41,6 +41,12 @@ Patch7: 0008-tests-Skip-XFS-resize-test-on-CentOS-RHEL-8.patch | ||||
| Patch8: 0009-Revert-Adjust-to-new-XFS-min-size.patch | ||||
| Patch9: 0010-Catch-BlockDevNotImplementedError-for-btrfs-plugin-c.patch | ||||
| Patch10: 0011-Default-to-encryption-sector-size-512-for-LUKS-devic.patch | ||||
| Patch11: 0012-Add-support-for-specifying-stripe-size-for-RAID-LVs.patch | ||||
| Patch12: 0013-Fix-setting-kickstart-data.patch | ||||
| Patch13: 0014-Do-not-set-memory-limit-for-LUKS2-when-running-in-FI.patch | ||||
| Patch14: 0015-Add-support-for-filesystem-online-resize.patch | ||||
| Patch15: 0016-Backport-iSCSI-initiator-name-related-fixes.patch | ||||
| Patch16: 0017-Add-support-for-creating-shared-LVM-setups.patch | ||||
| 
 | ||||
| # Versions of required components (done so we make sure the buildrequires | ||||
| # match the requires versions of things). | ||||
| @ -203,6 +209,35 @@ configuration. | ||||
| %endif | ||||
| 
 | ||||
| %changelog | ||||
| * Mon Oct 30 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-8 | ||||
| - Add support for creating shared LVM setups | ||||
|   Resolves: RHEL-14021 | ||||
| 
 | ||||
| * Mon Jul 24 2023 Jan Pokorny <japokorn@redhat.com> - 3.6.0-7 | ||||
| Backport iSCSI initiator name related fixes: | ||||
| - Allow changing iSCSI initiator name after setting it | ||||
|   Resolves: rhbz#2083139 | ||||
| - Add a basic test case for the iscsi module | ||||
|   Related: rhbz#2083139 | ||||
| - tests: Use blivet-specific prefix for targetcli backing files | ||||
|   Related: rhbz#2083139 | ||||
| - iscsi: Save firmware initiator name to /etc/iscsi/initiatorname.iscsi | ||||
|   Resolves: rhbz#2084043 | ||||
| - tests: Improve iscsi_test.ISCSITestCase | ||||
|   Related: rhbz#2083139 | ||||
| 
 | ||||
| * Thu May 18 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-6 | ||||
| - Fix setting kickstart data | ||||
|   Resolves: rhbz#2175166 | ||||
| - Do not set memory limit for LUKS2 when running in FIPS mode | ||||
|   Resolves: rhbz#2183437 | ||||
| - Add support for filesystem online resize | ||||
|   Resolves: rhbz#2168680 | ||||
| 
 | ||||
| * Tue May 02 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-5 | ||||
| - Add support for specifying stripe size for RAID LVs | ||||
|   Resolves: rhbz#2142550 | ||||
| 
 | ||||
| * Thu Jan 19 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-4 | ||||
| - Default to encryption sector size 512 for LUKS devices | ||||
|   Resolves: rhbz#2160465 | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user