2028 lines
85 KiB
Diff
2028 lines
85 KiB
Diff
From 18f05802f07f580ed31f38931b1103842397d598 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Mon, 2 Nov 2020 14:19:52 +0100
|
|
Subject: [PATCH 01/17] Fix type of LVM VDO logical volumes
|
|
|
|
We should use "lvmvdolv" to make it similar to other "lvmXYZ"
|
|
types.
|
|
---
|
|
blivet/devices/lvm.py | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
index d9e24a33..9639256d 100644
|
|
--- a/blivet/devices/lvm.py
|
|
+++ b/blivet/devices/lvm.py
|
|
@@ -1875,7 +1875,7 @@ def vg(self):
|
|
|
|
@property
|
|
def type(self):
|
|
- return "vdolv"
|
|
+ return "lvmvdolv"
|
|
|
|
@property
|
|
def resizable(self):
|
|
|
|
From 7f4815e14075550f55f2afb44bfba461eacea1c4 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Mon, 2 Nov 2020 14:21:33 +0100
|
|
Subject: [PATCH 02/17] Add VDO pool data LV to internal LVs during populate
|
|
|
|
---
|
|
blivet/devices/lvm.py | 9 ++++++++-
|
|
blivet/populator/helpers/lvm.py | 2 +-
|
|
2 files changed, 9 insertions(+), 2 deletions(-)
|
|
|
|
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
index 9639256d..d0957d6a 100644
|
|
--- a/blivet/devices/lvm.py
|
|
+++ b/blivet/devices/lvm.py
|
|
@@ -1119,7 +1119,7 @@ class LVMInternalLVtype(Enum):
|
|
|
|
@classmethod
|
|
def get_type(cls, lv_attr, lv_name): # pylint: disable=unused-argument
|
|
- attr_letters = {cls.data: ("T", "C"),
|
|
+ attr_letters = {cls.data: ("T", "C", "D"),
|
|
cls.meta: ("e",),
|
|
cls.log: ("l", "L"),
|
|
cls.image: ("i", "I"),
|
|
@@ -1824,6 +1824,13 @@ def _remove_log_vol(self, lv):
|
|
self._lvs.remove(lv)
|
|
self.vg._remove_log_vol(lv)
|
|
|
|
+ @property
|
|
+ @util.requires_property("is_vdo_pool")
|
|
+ def _vdopool_data_lv(self):
|
|
+ if not self._internal_lvs:
|
|
+ return None
|
|
+ return self._internal_lvs[0]
|
|
+
|
|
@property
|
|
@util.requires_property("is_vdo_pool")
|
|
def lvs(self):
|
|
diff --git a/blivet/populator/helpers/lvm.py b/blivet/populator/helpers/lvm.py
|
|
index ff8bf59f..b1626306 100644
|
|
--- a/blivet/populator/helpers/lvm.py
|
|
+++ b/blivet/populator/helpers/lvm.py
|
|
@@ -211,7 +211,7 @@ def add_lv(lv):
|
|
origin = self._devicetree.get_device_by_name(origin_device_name)
|
|
|
|
lv_kwargs["origin"] = origin
|
|
- elif lv_attr[0] in 'IrielTCo' and lv_name.endswith(']'):
|
|
+ elif lv_attr[0] in 'IrielTCoD' and lv_name.endswith(']'):
|
|
# an internal LV, add the an instance of the appropriate class
|
|
# to internal_lvs for later processing when non-internal LVs are
|
|
# processed
|
|
|
|
From c164864955e371aef78b5020f28bf0c9d235ac7c Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Mon, 2 Nov 2020 14:22:12 +0100
|
|
Subject: [PATCH 03/17] Add availability functions for LVM VDO
|
|
|
|
VDO is currently available only on RHEL/CentOS so we need a
|
|
separate availability check for LVM VDO devices.
|
|
---
|
|
blivet/devices/lvm.py | 6 ++++++
|
|
blivet/tasks/availability.py | 8 ++++++++
|
|
2 files changed, 14 insertions(+)
|
|
|
|
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
index d0957d6a..ffc65dcd 100644
|
|
--- a/blivet/devices/lvm.py
|
|
+++ b/blivet/devices/lvm.py
|
|
@@ -1790,6 +1790,9 @@ def populate_ksdata(self, data):
|
|
|
|
|
|
class LVMVDOPoolMixin(object):
|
|
+
|
|
+ _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO]
|
|
+
|
|
def __init__(self):
|
|
self._lvs = []
|
|
|
|
@@ -1848,6 +1851,9 @@ def _create(self):
|
|
|
|
|
|
class LVMVDOLogicalVolumeMixin(object):
|
|
+
|
|
+ _external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO]
|
|
+
|
|
def __init__(self):
|
|
pass
|
|
|
|
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
|
|
index f3b76650..b107428e 100644
|
|
--- a/blivet/tasks/availability.py
|
|
+++ b/blivet/tasks/availability.py
|
|
@@ -372,6 +372,13 @@ def available_resource(name):
|
|
blockdev.LVMTechMode.MODIFY)})
|
|
BLOCKDEV_LVM_TECH = BlockDevMethod(BLOCKDEV_LVM)
|
|
|
|
+BLOCKDEV_LVM_VDO = BlockDevTechInfo(plugin_name="lvm",
|
|
+ check_fn=blockdev.lvm_is_tech_avail,
|
|
+ technologies={blockdev.LVMTech.VDO: (blockdev.LVMTechMode.CREATE |
|
|
+ blockdev.LVMTechMode.REMOVE |
|
|
+ blockdev.LVMTechMode.QUERY)})
|
|
+BLOCKDEV_LVM_TECH_VDO = BlockDevMethod(BLOCKDEV_LVM_VDO)
|
|
+
|
|
# libblockdev mdraid plugin required technologies and modes
|
|
BLOCKDEV_MD_ALL_MODES = (blockdev.MDTechMode.CREATE |
|
|
blockdev.MDTechMode.DELETE |
|
|
@@ -410,6 +417,7 @@ def available_resource(name):
|
|
BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("dm", BLOCKDEV_DM_TECH_RAID)
|
|
BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("loop", BLOCKDEV_LOOP_TECH)
|
|
BLOCKDEV_LVM_PLUGIN = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH)
|
|
+BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH_VDO)
|
|
BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("mdraid", BLOCKDEV_MD_TECH)
|
|
BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("mpath", BLOCKDEV_MPATH_TECH)
|
|
BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("swap", BLOCKDEV_SWAP_TECH)
|
|
|
|
From d782620129d47a7b79b0e6b80455e6d93f8bcc88 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Mon, 2 Nov 2020 14:27:55 +0100
|
|
Subject: [PATCH 04/17] Read the LVM VDO pool current size from the internal
|
|
data LV
|
|
|
|
The pool device mapper device size is always 512k when active.
|
|
---
|
|
blivet/devices/lvm.py | 9 +++++++++
|
|
1 file changed, 9 insertions(+)
|
|
|
|
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
index ffc65dcd..73743fa8 100644
|
|
--- a/blivet/devices/lvm.py
|
|
+++ b/blivet/devices/lvm.py
|
|
@@ -1845,6 +1845,15 @@ def direct(self):
|
|
""" Is this device directly accessible? """
|
|
return False
|
|
|
|
+ def read_current_size(self):
|
|
+ log_method_call(self, exists=self.exists, path=self.path,
|
|
+ sysfs_path=self.sysfs_path)
|
|
+ if self.size != Size(0):
|
|
+ return self.size
|
|
+ if self._vdopool_data_lv:
|
|
+ return self._vdopool_data_lv.read_current_size()
|
|
+ return Size(0)
|
|
+
|
|
def _create(self):
|
|
""" Create the device. """
|
|
raise NotImplementedError
|
|
|
|
From 2da48ae84f4eac84e8cf998ee2402249a5a52626 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Mon, 2 Nov 2020 14:29:43 +0100
|
|
Subject: [PATCH 05/17] Add "vdo_lv" property to LVMVDOPoolMixin
|
|
|
|
---
|
|
blivet/devices/lvm.py | 7 +++++++
|
|
1 file changed, 7 insertions(+)
|
|
|
|
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
index 73743fa8..2f93fa22 100644
|
|
--- a/blivet/devices/lvm.py
|
|
+++ b/blivet/devices/lvm.py
|
|
@@ -1840,6 +1840,13 @@ def lvs(self):
|
|
""" A list of this VDO pool's LVs """
|
|
return self._lvs[:] # we don't want folks changing our list
|
|
|
|
+ @property
|
|
+ @util.requires_property("is_vdo_pool")
|
|
+ def vdo_lv(self):
|
|
+ if not self._lvs:
|
|
+ return None
|
|
+ return self._lvs[0]
|
|
+
|
|
@property
|
|
def direct(self):
|
|
""" Is this device directly accessible? """
|
|
|
|
From bbfa2cbdc6cb85d405b895c66eb4867cea4218b4 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Mon, 2 Nov 2020 14:30:37 +0100
|
|
Subject: [PATCH 06/17] Add support for creating LVM VDO pools and LVM VDO
|
|
volumes
|
|
|
|
The pool and the volume are created by one call but these can have
|
|
different properties (like size) and are in fact two block devices
|
|
when created, we also need to create two devices and add them to
|
|
the devicetree. The pool device must be always created first and
|
|
the _create function for the VDO volume is a no-op.
|
|
---
|
|
blivet/devices/lvm.py | 63 +++++++++++++++++++++++++++++++++++++------
|
|
1 file changed, 55 insertions(+), 8 deletions(-)
|
|
|
|
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
index 2f93fa22..0802e2de 100644
|
|
--- a/blivet/devices/lvm.py
|
|
+++ b/blivet/devices/lvm.py
|
|
@@ -311,7 +311,7 @@ def _add_log_vol(self, lv):
|
|
|
|
# verify we have the space, then add it
|
|
# do not verify for growing vg (because of ks)
|
|
- if not lv.exists and not self.growable and not lv.is_thin_lv and lv.size > self.free_space:
|
|
+ if not lv.exists and not self.growable and not (lv.is_thin_lv or lv.is_vdo_lv) and lv.size > self.free_space:
|
|
raise errors.DeviceError("new lv is too large to fit in free space", self.name)
|
|
|
|
log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name)
|
|
@@ -639,7 +639,7 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
|
|
percent=None, cache_request=None, pvs=None, from_lvs=None):
|
|
|
|
if not exists:
|
|
- if seg_type not in [None, "linear", "thin", "thin-pool", "cache"] + lvm.raid_seg_types:
|
|
+ if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo"] + lvm.raid_seg_types:
|
|
raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
|
|
if seg_type and seg_type in lvm.raid_seg_types and not pvs:
|
|
raise ValueError("List of PVs has to be given for every non-linear LV")
|
|
@@ -1793,7 +1793,11 @@ class LVMVDOPoolMixin(object):
|
|
|
|
_external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO]
|
|
|
|
- def __init__(self):
|
|
+ def __init__(self, compression=True, deduplication=True, index_memory=0, write_policy=None):
|
|
+ self.compression = compression
|
|
+ self.deduplication = deduplication
|
|
+ self.index_memory = index_memory
|
|
+ self.write_policy = write_policy
|
|
self._lvs = []
|
|
|
|
@property
|
|
@@ -1863,7 +1867,19 @@ def read_current_size(self):
|
|
|
|
def _create(self):
|
|
""" Create the device. """
|
|
- raise NotImplementedError
|
|
+
|
|
+ if not self.vdo_lv:
|
|
+ raise errors.DeviceError("Cannot create new VDO pool without a VDO LV.")
|
|
+
|
|
+ if self.write_policy:
|
|
+ write_policy = blockdev.lvm_get_vdo_write_policy_str(self.write_policy)
|
|
+ else:
|
|
+ write_policy = blockdev.LVMVDOWritePolicy.AUTO
|
|
+
|
|
+ blockdev.lvm.vdo_pool_create(self.vg.name, self.vdo_lv.lvname, self.lvname,
|
|
+ self.size, self.vdo_lv.size, self.index_memory,
|
|
+ self.compression, self.deduplication,
|
|
+ write_policy)
|
|
|
|
|
|
class LVMVDOLogicalVolumeMixin(object):
|
|
@@ -1915,9 +1931,26 @@ def resizable(self):
|
|
def pool(self):
|
|
return self.parents[0]
|
|
|
|
+ def _set_size(self, newsize):
|
|
+ if not isinstance(newsize, Size):
|
|
+ raise AttributeError("new size must of type Size")
|
|
+
|
|
+ newsize = self.vg.align(newsize)
|
|
+ newsize = self.vg.align(util.numeric_type(newsize))
|
|
+ # just make sure the size is set (no VG size/free space check needed for
|
|
+ # a VDO LV)
|
|
+ DMDevice._set_size(self, newsize)
|
|
+
|
|
+ def _pre_create(self):
|
|
+ # skip LVMLogicalVolumeDevice's _pre_create() method as it checks for a
|
|
+ # free space in a VG which doesn't make sense for a VDO LV and causes a
|
|
+ # bug by limitting the VDO LV's size to VG free space which is nonsense
|
|
+ super(LVMLogicalVolumeBase, self)._pre_create() # pylint: disable=bad-super-call
|
|
+
|
|
def _create(self):
|
|
- """ Create the device. """
|
|
- raise NotImplementedError
|
|
+ # nothing to do here, VDO LV is created automatically together with
|
|
+ # the VDO pool
|
|
+ pass
|
|
|
|
def _destroy(self):
|
|
# nothing to do here, VDO LV is destroyed automatically together with
|
|
@@ -1953,7 +1986,9 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
|
|
fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None,
|
|
percent=None, cache_request=None, pvs=None,
|
|
parent_lv=None, int_type=None, origin=None, vorigin=False,
|
|
- metadata_size=None, chunk_size=None, profile=None, from_lvs=None):
|
|
+ metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
|
|
+ compression=False, deduplication=False, index_memory=0,
|
|
+ write_policy=None):
|
|
"""
|
|
:param name: the device name (generally a device node's basename)
|
|
:type name: str
|
|
@@ -2012,6 +2047,17 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
|
|
:keyword from_lvs: LVs to create the new LV from (in the (data_lv, metadata_lv) order)
|
|
:type from_lvs: tuple of :class:`LVMLogicalVolumeDevice`
|
|
|
|
+ For VDO pools only:
|
|
+
|
|
+ :keyword compression: whether to enable compression on the VDO pool
|
|
+ :type compression: bool
|
|
+ :keyword dudplication: whether to enable dudplication on the VDO pool
|
|
+ :type dudplication: bool
|
|
+ :keyword index_memory: amount of index memory (in bytes) or 0 for default
|
|
+ :type index_memory: int
|
|
+ :keyword write_policy: write policy for the volume or None for default
|
|
+ :type write_policy: str
|
|
+
|
|
"""
|
|
|
|
if isinstance(parents, (list, ParentList)):
|
|
@@ -2032,7 +2078,8 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
|
|
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
|
|
fmt, exists, sysfs_path, grow, maxsize,
|
|
percent, cache_request, pvs, from_lvs)
|
|
- LVMVDOPoolMixin.__init__(self)
|
|
+ LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory,
|
|
+ write_policy)
|
|
LVMVDOLogicalVolumeMixin.__init__(self)
|
|
|
|
LVMInternalLogicalVolumeMixin._init_check(self)
|
|
|
|
From 2d1593b50dc6232e213b4df86dfbf5cf6d282dcd Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Mon, 2 Nov 2020 14:31:35 +0100
|
|
Subject: [PATCH 07/17] Allow creating LVM VDO pools and volumes using
|
|
"blivet.new_lv"
|
|
|
|
The steps to create the VDO devices would typically look like:
|
|
|
|
pool = b.new_lv(vdo_pool=True, parents=[data], size=Size("8 GiB"))
|
|
vdolv = b.new_lv(vdo_lv=True, parents=[pool], size=Size("40 GiB"))
|
|
b.create_device(pool)
|
|
b.create_device(vdolv)
|
|
b.do_it()
|
|
---
|
|
blivet/blivet.py | 18 ++++++++++++++----
|
|
tests/devices_test/lvm_test.py | 31 +++++++++++++++++++++++++++++++
|
|
2 files changed, 45 insertions(+), 4 deletions(-)
|
|
|
|
diff --git a/blivet/blivet.py b/blivet/blivet.py
|
|
index e7dbd37b..754eb152 100644
|
|
--- a/blivet/blivet.py
|
|
+++ b/blivet/blivet.py
|
|
@@ -573,6 +573,10 @@ def new_lv(self, *args, **kwargs):
|
|
:type thin_pool: bool
|
|
:keyword thin_volume: whether to create a thin volume
|
|
:type thin_volume: bool
|
|
+ :keyword vdo_pool: whether to create a vdo pool
|
|
+ :type vdo_pool: bool
|
|
+ :keyword vdo_lv: whether to create a vdo lv
|
|
+ :type vdo_lv: bool
|
|
:returns: the new device
|
|
:rtype: :class:`~.devices.LVMLogicalVolumeDevice`
|
|
|
|
@@ -589,8 +593,10 @@ def new_lv(self, *args, **kwargs):
|
|
"""
|
|
thin_volume = kwargs.pop("thin_volume", False)
|
|
thin_pool = kwargs.pop("thin_pool", False)
|
|
+ vdo_pool = kwargs.pop("vdo_pool", False)
|
|
+ vdo_lv = kwargs.pop("vdo_lv", False)
|
|
parent = kwargs.get("parents", [None])[0]
|
|
- if thin_volume and parent:
|
|
+ if (thin_volume or vdo_lv) and parent:
|
|
# kwargs["parents"] will contain the pool device, so...
|
|
vg = parent.vg
|
|
else:
|
|
@@ -600,6 +606,10 @@ def new_lv(self, *args, **kwargs):
|
|
kwargs["seg_type"] = "thin"
|
|
if thin_pool:
|
|
kwargs["seg_type"] = "thin-pool"
|
|
+ if vdo_pool:
|
|
+ kwargs["seg_type"] = "vdo-pool"
|
|
+ if vdo_lv:
|
|
+ kwargs["seg_type"] = "vdo"
|
|
|
|
mountpoint = kwargs.pop("mountpoint", None)
|
|
if 'fmt_type' in kwargs:
|
|
@@ -625,7 +635,7 @@ def new_lv(self, *args, **kwargs):
|
|
swap = False
|
|
|
|
prefix = ""
|
|
- if thin_pool:
|
|
+ if thin_pool or vdo_pool:
|
|
prefix = "pool"
|
|
|
|
name = self.suggest_device_name(parent=vg,
|
|
@@ -636,10 +646,10 @@ def new_lv(self, *args, **kwargs):
|
|
if "%s-%s" % (vg.name, name) in self.names:
|
|
raise ValueError("name already in use")
|
|
|
|
- if thin_pool or thin_volume:
|
|
+ if thin_pool or thin_volume or vdo_pool or vdo_lv:
|
|
cache_req = kwargs.pop("cache_request", None)
|
|
if cache_req:
|
|
- raise ValueError("Creating cached thin volumes and pools is not supported")
|
|
+ raise ValueError("Creating cached thin and VDO volumes and pools is not supported")
|
|
|
|
return LVMLogicalVolumeDevice(name, *args, **kwargs)
|
|
|
|
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
|
|
index 204cb99a..493d3ba1 100644
|
|
--- a/tests/devices_test/lvm_test.py
|
|
+++ b/tests/devices_test/lvm_test.py
|
|
@@ -689,3 +689,34 @@ def test_new_lv_from_non_existing_lvs(self):
|
|
with patch.object(pool, "_pre_create"):
|
|
pool.create()
|
|
self.assertTrue(lvm.thpool_convert.called)
|
|
+
|
|
+ def test_new_vdo_pool(self):
|
|
+ b = blivet.Blivet()
|
|
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
|
|
+ size=Size("10 GiB"), exists=True)
|
|
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
|
|
+
|
|
+ for dev in (pv, vg):
|
|
+ b.devicetree._add_device(dev)
|
|
+
|
|
+ # check that all the above devices are in the expected places
|
|
+ self.assertEqual(set(b.devices), {pv, vg})
|
|
+ self.assertEqual(set(b.vgs), {vg})
|
|
+
|
|
+ self.assertEqual(vg.size, Size("10236 MiB"))
|
|
+
|
|
+ vdopool = b.new_lv(name="vdopool", vdo_pool=True,
|
|
+ parents=[vg], compression=True,
|
|
+ deduplication=True,
|
|
+ size=blivet.size.Size("8 GiB"))
|
|
+
|
|
+ vdolv = b.new_lv(name="vdolv", vdo_lv=True,
|
|
+ parents=[vdopool],
|
|
+ size=blivet.size.Size("40 GiB"))
|
|
+
|
|
+ b.create_device(vdopool)
|
|
+ b.create_device(vdolv)
|
|
+
|
|
+ self.assertEqual(vdopool.children[0], vdolv)
|
|
+ self.assertEqual(vdolv.parents[0], vdopool)
|
|
+ self.assertListEqual(vg.lvs, [vdopool, vdolv])
|
|
|
|
From 31ec429ad7bd0857a768e2dfebe1de088dafc144 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Mon, 2 Nov 2020 14:32:47 +0100
|
|
Subject: [PATCH 08/17] Add LVM VDO device factory
|
|
|
|
---
|
|
blivet/devicefactory.py | 100 +++++++++++++++++++++++++++-
|
|
tests/devicefactory_test.py | 128 +++++++++++++++++++++++++++++++++---
|
|
2 files changed, 218 insertions(+), 10 deletions(-)
|
|
|
|
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
|
|
index 9214ad54..c95037cc 100644
|
|
--- a/blivet/devicefactory.py
|
|
+++ b/blivet/devicefactory.py
|
|
@@ -27,7 +27,7 @@
|
|
from .devices import BTRFSDevice, DiskDevice
|
|
from .devices import LUKSDevice, LVMLogicalVolumeDevice
|
|
from .devices import PartitionDevice, MDRaidArrayDevice
|
|
-from .devices.lvm import DEFAULT_THPOOL_RESERVE
|
|
+from .devices.lvm import LVMVDOPoolMixin, DEFAULT_THPOOL_RESERVE
|
|
from .formats import get_format
|
|
from .devicelibs import btrfs
|
|
from .devicelibs import mdraid
|
|
@@ -58,6 +58,7 @@
|
|
DEVICE_TYPE_BTRFS = 3
|
|
DEVICE_TYPE_DISK = 4
|
|
DEVICE_TYPE_LVM_THINP = 5
|
|
+DEVICE_TYPE_LVM_VDO = 6
|
|
|
|
|
|
def is_supported_device_type(device_type):
|
|
@@ -69,6 +70,9 @@ def is_supported_device_type(device_type):
|
|
:returns: True if this device type is supported
|
|
:rtype: bool
|
|
"""
|
|
+ if device_type == DEVICE_TYPE_LVM_VDO:
|
|
+ return not any(e for e in LVMVDOPoolMixin._external_dependencies if not e.available)
|
|
+
|
|
devices = []
|
|
if device_type == DEVICE_TYPE_BTRFS:
|
|
devices = [BTRFSDevice]
|
|
@@ -96,7 +100,7 @@ def get_supported_raid_levels(device_type):
|
|
pkg = None
|
|
if device_type == DEVICE_TYPE_BTRFS:
|
|
pkg = btrfs
|
|
- elif device_type in (DEVICE_TYPE_LVM, DEVICE_TYPE_LVM_THINP):
|
|
+ elif device_type in (DEVICE_TYPE_LVM, DEVICE_TYPE_LVM_THINP, DEVICE_TYPE_LVM_VDO):
|
|
pkg = lvm
|
|
elif device_type == DEVICE_TYPE_MD:
|
|
pkg = mdraid
|
|
@@ -116,6 +120,8 @@ def get_device_type(device):
|
|
"lvmlv": DEVICE_TYPE_LVM,
|
|
"lvmthinlv": DEVICE_TYPE_LVM_THINP,
|
|
"lvmthinpool": DEVICE_TYPE_LVM,
|
|
+ "lvmvdolv": DEVICE_TYPE_LVM_VDO,
|
|
+ "lvmvdopool": DEVICE_TYPE_LVM,
|
|
"btrfs subvolume": DEVICE_TYPE_BTRFS,
|
|
"btrfs volume": DEVICE_TYPE_BTRFS,
|
|
"mdarray": DEVICE_TYPE_MD}
|
|
@@ -136,6 +142,7 @@ def get_device_factory(blivet, device_type=DEVICE_TYPE_LVM, **kwargs):
|
|
DEVICE_TYPE_PARTITION: PartitionFactory,
|
|
DEVICE_TYPE_MD: MDFactory,
|
|
DEVICE_TYPE_LVM_THINP: LVMThinPFactory,
|
|
+ DEVICE_TYPE_LVM_VDO: LVMVDOFactory,
|
|
DEVICE_TYPE_DISK: DeviceFactory}
|
|
|
|
factory_class = class_table[device_type]
|
|
@@ -1738,6 +1745,95 @@ def _get_new_device(self, *args, **kwargs):
|
|
return super(LVMThinPFactory, self)._get_new_device(*args, **kwargs)
|
|
|
|
|
|
+class LVMVDOFactory(LVMFactory):
|
|
+
|
|
+ """ Factory for creating LVM VDO volumes.
|
|
+
|
|
+ :keyword pool_name: name for the VDO pool, if not specified unique name will be generated
|
|
+ :type pool_name: str
|
|
+ :keyword virtual_size: size for the VDO volume, usually bigger than pool size, if not
|
|
+ specified physical size (pool size) will be used
|
|
+ :type size: :class:`~.size.Size`
|
|
+ :keyword compression: whether to enable compression (defaults to True)
|
|
+ :type compression: bool
|
|
+ :keyword deduplication: whether to enable deduplication (defaults to True)
|
|
+ :type deduplication: bool
|
|
+ """
|
|
+
|
|
+ def __init__(self, storage, **kwargs):
|
|
+ self.pool_name = kwargs.pop("pool_name", None)
|
|
+ self.virtual_size = kwargs.pop("virtual_size", None)
|
|
+ self.compression = kwargs.pop("compression", True)
|
|
+ self.deduplication = kwargs.pop("deduplication", True)
|
|
+ super(LVMVDOFactory, self).__init__(storage, **kwargs)
|
|
+
|
|
+ def _get_new_pool(self, *args, **kwargs):
|
|
+ kwargs["vdo_pool"] = True
|
|
+ return super(LVMVDOFactory, self)._get_new_device(*args, **kwargs)
|
|
+
|
|
+ def _set_device_size(self):
|
|
+ """ Set the size of the factory device. """
|
|
+ super(LVMVDOFactory, self)._set_device_size()
|
|
+
|
|
+ self.device.pool.size = self.size
|
|
+ self._reconfigure_container()
|
|
+
|
|
+ if not self.virtual_size or self.virtual_size < self.size:
|
|
+ # virtual_size is not set or smaller than current size --> it should be same as the pool size
|
|
+ self.device.size = self.size
|
|
+ else:
|
|
+ self.device.size = self.virtual_size
|
|
+
|
|
+ def _set_pool_name(self):
|
|
+ safe_new_name = self.storage.safe_device_name(self.pool_name)
|
|
+ if self.device.pool.name != safe_new_name:
|
|
+ if not safe_new_name:
|
|
+ log.error("not renaming '%s' to invalid name '%s'",
|
|
+ self.device.pool.name, self.pool_name)
|
|
+ return
|
|
+ if safe_new_name in self.storage.names:
|
|
+ log.error("not renaming '%s' to in-use name '%s'",
|
|
+ self.device.pool.name, safe_new_name)
|
|
+ return
|
|
+
|
|
+ log.debug("renaming device '%s' to '%s'",
|
|
+ self.device.pool.name, safe_new_name)
|
|
+ self.device.pool.raw_device.name = safe_new_name
|
|
+
|
|
+ def _set_name(self):
|
|
+ super(LVMVDOFactory, self)._set_name()
|
|
+ if self.pool_name:
|
|
+ self._set_pool_name()
|
|
+
|
|
+ def _reconfigure_device(self):
|
|
+ super(LVMVDOFactory, self)._reconfigure_device()
|
|
+
|
|
+ self.device.pool.compression = self.compression
|
|
+ self.device.pool.deduplication = self.deduplication
|
|
+
|
|
+ #
|
|
+ # methods to configure the factory's device
|
|
+ #
|
|
+ def _get_new_device(self, *args, **kwargs):
|
|
+ """ Create and return the factory device as a StorageDevice. """
|
|
+ pool = self._get_new_pool(name=self.pool_name,
|
|
+ size=self.size,
|
|
+ parents=[self.vg],
|
|
+ compression=self.compression,
|
|
+ deduplication=self.deduplication)
|
|
+ self.storage.create_device(pool)
|
|
+
|
|
+ kwargs["parents"] = [pool]
|
|
+ kwargs["vdo_lv"] = True
|
|
+
|
|
+ if self.virtual_size:
|
|
+ vdolv_kwargs = kwargs.copy()
|
|
+ vdolv_kwargs["size"] = self.virtual_size
|
|
+ else:
|
|
+ vdolv_kwargs = kwargs
|
|
+ return super(LVMVDOFactory, self)._get_new_device(*args, **vdolv_kwargs)
|
|
+
|
|
+
|
|
class MDFactory(DeviceFactory):
|
|
|
|
""" Factory for creating MD RAID devices. """
|
|
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
|
|
index 08068779..7cdb51c5 100644
|
|
--- a/tests/devicefactory_test.py
|
|
+++ b/tests/devicefactory_test.py
|
|
@@ -4,6 +4,9 @@
|
|
from decimal import Decimal
|
|
import os
|
|
|
|
+import test_compat # pylint: disable=unused-import
|
|
+from six.moves.mock import patch # pylint: disable=no-name-in-module,import-error
|
|
+
|
|
import blivet
|
|
|
|
from blivet import devicefactory
|
|
@@ -93,10 +96,12 @@ def _validate_factory_device(self, *args, **kwargs):
|
|
self.assertEqual(device.format.label,
|
|
kwargs.get('label'))
|
|
|
|
- self.assertLessEqual(device.size, kwargs.get("size"))
|
|
- self.assertGreaterEqual(device.size, device.format.min_size)
|
|
- if device.format.max_size:
|
|
- self.assertLessEqual(device.size, device.format.max_size)
|
|
+ # sizes with VDO are special, we have a special check in LVMVDOFactoryTestCase._validate_factory_device
|
|
+ if device_type != devicefactory.DEVICE_TYPE_LVM_VDO:
|
|
+ self.assertLessEqual(device.size, kwargs.get("size"))
|
|
+ self.assertGreaterEqual(device.size, device.format.min_size)
|
|
+ if device.format.max_size:
|
|
+ self.assertLessEqual(device.size, device.format.max_size)
|
|
|
|
self.assertEqual(device.encrypted,
|
|
kwargs.get("encrypted", False) or
|
|
@@ -115,7 +120,11 @@ def test_device_factory(self):
|
|
"mountpoint": '/factorytest'}
|
|
device = self._factory_device(device_type, **kwargs)
|
|
self._validate_factory_device(device, device_type, **kwargs)
|
|
- self.b.recursive_remove(device)
|
|
+
|
|
+ if device.type == "lvmvdolv":
|
|
+ self.b.recursive_remove(device.pool)
|
|
+ else:
|
|
+ self.b.recursive_remove(device)
|
|
|
|
if self.encryption_supported:
|
|
# Encrypt the leaf device
|
|
@@ -157,6 +166,12 @@ def test_device_factory(self):
|
|
device = self._factory_device(device_type, **kwargs)
|
|
self._validate_factory_device(device, device_type, **kwargs)
|
|
|
|
+ # change size up
|
|
+ kwargs["device"] = device
|
|
+ kwargs["size"] = Size("900 MiB")
|
|
+ device = self._factory_device(device_type, **kwargs)
|
|
+ self._validate_factory_device(device, device_type, **kwargs)
|
|
+
|
|
# Change LUKS version
|
|
kwargs["luks_version"] = "luks1"
|
|
device = self._factory_device(device_type, **kwargs)
|
|
@@ -179,7 +194,7 @@ def _get_size_delta(self, devices=None):
|
|
"""
|
|
return Size("1 MiB")
|
|
|
|
- def test_get_free_disk_space(self):
|
|
+ def test_get_free_disk_space(self, *args): # pylint: disable=unused-argument
|
|
# get_free_disk_space should return the total free space on disks
|
|
kwargs = self._get_test_factory_args()
|
|
kwargs["size"] = Size("500 MiB")
|
|
@@ -206,7 +221,7 @@ def test_get_free_disk_space(self):
|
|
sum(d.size for d in self.b.disks) - device_space,
|
|
delta=self._get_size_delta(devices=[device]))
|
|
|
|
- def test_normalize_size(self):
|
|
+ def test_normalize_size(self, *args): # pylint: disable=unused-argument
|
|
# _normalize_size should adjust target size to within the format limits
|
|
fstype = "ext2"
|
|
ext2 = get_format(fstype)
|
|
@@ -258,7 +273,7 @@ def test_default_factory_type(self):
|
|
factory = devicefactory.get_device_factory(self.b)
|
|
self.assertIsInstance(factory, devicefactory.LVMFactory)
|
|
|
|
- def test_factory_defaults(self):
|
|
+ def test_factory_defaults(self, *args): # pylint: disable=unused-argument
|
|
ctor_kwargs = self._get_test_factory_args()
|
|
factory = devicefactory.get_device_factory(self.b, self.device_type, **ctor_kwargs)
|
|
for setting, value in factory._default_settings.items():
|
|
@@ -522,6 +537,103 @@ def _get_size_delta(self, devices=None):
|
|
return delta
|
|
|
|
|
|
+class LVMVDOFactoryTestCase(LVMFactoryTestCase):
|
|
+ device_class = LVMLogicalVolumeDevice
|
|
+ device_type = devicefactory.DEVICE_TYPE_LVM_VDO
|
|
+ encryption_supported = False
|
|
+
|
|
+ def _validate_factory_device(self, *args, **kwargs):
|
|
+ super(LVMVDOFactoryTestCase, self)._validate_factory_device(*args,
|
|
+ **kwargs)
|
|
+ device = args[0]
|
|
+
|
|
+ if kwargs.get("encrypted", False):
|
|
+ vdolv = device.parents[0]
|
|
+ else:
|
|
+ vdolv = device
|
|
+
|
|
+ self.assertTrue(hasattr(vdolv, "pool"))
|
|
+
|
|
+ virtual_size = kwargs.get("virtual_size", 0)
|
|
+ if virtual_size:
|
|
+ self.assertEqual(vdolv.size, virtual_size)
|
|
+ else:
|
|
+ self.assertEqual(vdolv.size, vdolv.pool.size)
|
|
+ self.assertGreaterEqual(vdolv.size, vdolv.pool.size)
|
|
+
|
|
+ compression = kwargs.get("compression", True)
|
|
+ self.assertEqual(vdolv.pool.compression, compression)
|
|
+
|
|
+ deduplication = kwargs.get("deduplication", True)
|
|
+ self.assertEqual(vdolv.pool.deduplication, deduplication)
|
|
+
|
|
+ pool_name = kwargs.get("pool_name", None)
|
|
+ if pool_name:
|
|
+ self.assertEqual(vdolv.pool.lvname, pool_name)
|
|
+
|
|
+ return device
|
|
+
|
|
+ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
|
|
+ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True)
|
|
+ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[])
|
|
+ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set())
|
|
+ def test_device_factory(self, *args): # pylint: disable=unused-argument,arguments-differ
|
|
+ device_type = self.device_type
|
|
+ kwargs = {"disks": self.b.disks,
|
|
+ "size": Size("400 MiB"),
|
|
+ "fstype": 'ext4',
|
|
+ "mountpoint": '/factorytest'}
|
|
+ device = self._factory_device(device_type, **kwargs)
|
|
+ self._validate_factory_device(device, device_type, **kwargs)
|
|
+ self.b.recursive_remove(device.pool)
|
|
+
|
|
+ kwargs = {"disks": self.b.disks,
|
|
+ "size": Size("400 MiB"),
|
|
+ "fstype": 'ext4',
|
|
+ "mountpoint": '/factorytest',
|
|
+ "pool_name": "vdopool",
|
|
+ "deduplication": True,
|
|
+ "compression": True}
|
|
+ device = self._factory_device(device_type, **kwargs)
|
|
+ self._validate_factory_device(device, device_type, **kwargs)
|
|
+
|
|
+ # change size without specifying virtual_size: both sizes should grow
|
|
+ kwargs["size"] = Size("600 MiB")
|
|
+ kwargs["device"] = device
|
|
+ device = self._factory_device(device_type, **kwargs)
|
|
+ self._validate_factory_device(device, device_type, **kwargs)
|
|
+
|
|
+ # change virtual size
|
|
+ kwargs["virtual_size"] = Size("6 GiB")
|
|
+ kwargs["device"] = device
|
|
+ device = self._factory_device(device_type, **kwargs)
|
|
+ self._validate_factory_device(device, device_type, **kwargs)
|
|
+
|
|
+ # change virtual size to smaller than size
|
|
+ kwargs["virtual_size"] = Size("500 GiB")
|
|
+ kwargs["device"] = device
|
|
+ device = self._factory_device(device_type, **kwargs)
|
|
+ self._validate_factory_device(device, device_type, **kwargs)
|
|
+
|
|
+ # change deduplication and compression
|
|
+ kwargs["deduplication"] = False
|
|
+ kwargs["device"] = device
|
|
+ device = self._factory_device(device_type, **kwargs)
|
|
+ self._validate_factory_device(device, device_type, **kwargs)
|
|
+
|
|
+ kwargs["compression"] = False
|
|
+ kwargs["device"] = device
|
|
+ device = self._factory_device(device_type, **kwargs)
|
|
+ self._validate_factory_device(device, device_type, **kwargs)
|
|
+
|
|
+ # rename the pool
|
|
+ kwargs["pool_name"] = "vdopool2"
|
|
+ kwargs["device"] = device
|
|
+ device = self._factory_device(device_type, **kwargs)
|
|
+ self._validate_factory_device(device, device_type, **kwargs)
|
|
+
|
|
+
|
|
class MDFactoryTestCase(DeviceFactoryTestCase):
|
|
device_type = devicefactory.DEVICE_TYPE_MD
|
|
device_class = MDRaidArrayDevice
|
|
|
|
From 22ba2b96111d5f153a3b55d3c56d84e597cf9a90 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Mon, 2 Nov 2020 14:33:06 +0100
|
|
Subject: [PATCH 09/17] Add VM test for LVM VDO
|
|
|
|
---
|
|
tests/vmtests/blivet_reset_vmtest.py | 15 +++++++++++++++
|
|
tests/vmtests/runvmtests.py | 3 ++-
|
|
2 files changed, 17 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/tests/vmtests/blivet_reset_vmtest.py b/tests/vmtests/blivet_reset_vmtest.py
|
|
index 8743d51e..47fc84c4 100644
|
|
--- a/tests/vmtests/blivet_reset_vmtest.py
|
|
+++ b/tests/vmtests/blivet_reset_vmtest.py
|
|
@@ -192,6 +192,21 @@ def setUp(self):
|
|
self.collect_expected_data()
|
|
|
|
|
|
+class LVMVDOTestCase(BlivetResetTestCase):
|
|
+
|
|
+ def _set_up_storage(self):
|
|
+ if not devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO):
|
|
+ self.skipTest("VDO not supported, skipping")
|
|
+
|
|
+ self.blivet.factory_device(devicefactory.DEVICE_TYPE_LVM_VDO,
|
|
+ size=Size("10 GiB"),
|
|
+ fstype="ext4",
|
|
+ disks=self.blivet.disks[:],
|
|
+ name="vdolv",
|
|
+ pool_name="vdopool",
|
|
+ virtual_size=Size("40 GiB"))
|
|
+
|
|
+
|
|
@unittest.skip("temporarily disabled due to issues with raids with metadata version 0.90")
|
|
class MDRaid0TestCase(BlivetResetTestCase):
|
|
|
|
diff --git a/tests/vmtests/runvmtests.py b/tests/vmtests/runvmtests.py
|
|
index 88143d3a..6f20484f 100644
|
|
--- a/tests/vmtests/runvmtests.py
|
|
+++ b/tests/vmtests/runvmtests.py
|
|
@@ -12,7 +12,8 @@
|
|
"tests.vmtests.blivet_reset_vmtest.LVMThinSnapShotTestCase",
|
|
"tests.vmtests.blivet_reset_vmtest.LVMRaidTestCase",
|
|
"tests.vmtests.blivet_reset_vmtest.MDRaid0TestCase",
|
|
- "tests.vmtests.blivet_reset_vmtest.LVMOnMDTestCase"]
|
|
+ "tests.vmtests.blivet_reset_vmtest.LVMOnMDTestCase",
|
|
+ "tests.vmtests.blivet_reset_vmtest.LVMVDOTestCase"]
|
|
|
|
SNAP_NAME = "snapshot"
|
|
|
|
|
|
From 52b37bb86e856f1ede71f7cceb7284a639d741f4 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Thu, 19 Nov 2020 13:07:17 +0100
|
|
Subject: [PATCH 10/17] Allow adding nodiscard option when running mkfs
|
|
|
|
For filesystems that support it we might want to add some nodiscard
|
|
option to mkfs when creating format on devices like LVM VDO
|
|
volumes where discard is very slow and doesn't really makes sense
|
|
when running mkfs.
|
|
---
|
|
blivet/formats/fs.py | 12 +++++-
|
|
blivet/tasks/fsmkfs.py | 59 +++++++++++++++++++++++++++---
|
|
tests/formats_test/methods_test.py | 3 +-
|
|
3 files changed, 66 insertions(+), 8 deletions(-)
|
|
|
|
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
|
|
index 4ba83e6d..e61e5b86 100644
|
|
--- a/blivet/formats/fs.py
|
|
+++ b/blivet/formats/fs.py
|
|
@@ -132,6 +132,7 @@ def __init__(self, **kwargs):
|
|
self.mountopts = kwargs.get("mountopts", "")
|
|
self.label = kwargs.get("label")
|
|
self.fsprofile = kwargs.get("fsprofile")
|
|
+ self._mkfs_nodiscard = kwargs.get("nodiscard", False)
|
|
|
|
self._user_mountopts = self.mountopts
|
|
|
|
@@ -263,6 +264,14 @@ def label_format_ok(self, label):
|
|
label = property(lambda s: s._get_label(), lambda s, l: s._set_label(l),
|
|
doc="this filesystem's label")
|
|
|
|
+ def can_nodiscard(self):
|
|
+ """Returns True if this filesystem supports nodiscard option during
|
|
+ creation, otherwise False.
|
|
+
|
|
+ :rtype: bool
|
|
+ """
|
|
+ return self._mkfs.can_nodiscard and self._mkfs.available
|
|
+
|
|
def can_set_uuid(self):
|
|
"""Returns True if this filesystem supports setting an UUID during
|
|
creation, otherwise False.
|
|
@@ -402,7 +411,8 @@ def _create(self, **kwargs):
|
|
try:
|
|
self._mkfs.do_task(options=kwargs.get("options"),
|
|
label=not self.relabels(),
|
|
- set_uuid=self.can_set_uuid())
|
|
+ set_uuid=self.can_set_uuid(),
|
|
+ nodiscard=self.can_nodiscard())
|
|
except FSWriteLabelError as e:
|
|
log.warning("Choosing not to apply label (%s) during creation of filesystem %s. Label format is unacceptable for this filesystem.", self.label, self.type)
|
|
except FSWriteUUIDError as e:
|
|
diff --git a/blivet/tasks/fsmkfs.py b/blivet/tasks/fsmkfs.py
|
|
index ad166aa0..c982f7e7 100644
|
|
--- a/blivet/tasks/fsmkfs.py
|
|
+++ b/blivet/tasks/fsmkfs.py
|
|
@@ -37,6 +37,7 @@ class FSMkfsTask(fstask.FSTask):
|
|
|
|
can_label = abc.abstractproperty(doc="whether this task labels")
|
|
can_set_uuid = abc.abstractproperty(doc="whether this task can set UUID")
|
|
+ can_nodiscard = abc.abstractproperty(doc="whether this task can set nodiscard option")
|
|
|
|
|
|
@add_metaclass(abc.ABCMeta)
|
|
@@ -48,6 +49,9 @@ class FSMkfs(task.BasicApplication, FSMkfsTask):
|
|
label_option = abc.abstractproperty(
|
|
doc="Option for setting a filesystem label.")
|
|
|
|
+ nodiscard_option = abc.abstractproperty(
|
|
+ doc="Option for setting nodiscrad option for mkfs.")
|
|
+
|
|
args = abc.abstractproperty(doc="options for creating filesystem")
|
|
|
|
@abc.abstractmethod
|
|
@@ -80,6 +84,15 @@ def can_set_uuid(self):
|
|
"""
|
|
return self.get_uuid_args is not None
|
|
|
|
+ @property
|
|
+ def can_nodiscard(self):
|
|
+ """Whether this task can set nodiscard option for a filesystem.
|
|
+
|
|
+ :returns: True if nodiscard can be set
|
|
+ :rtype: bool
|
|
+ """
|
|
+ return self.nodiscard_option is not None
|
|
+
|
|
@property
|
|
def _label_options(self):
|
|
""" Any labeling options that a particular filesystem may use.
|
|
@@ -100,6 +113,23 @@ def _label_options(self):
|
|
else:
|
|
raise FSWriteLabelError("Choosing not to apply label (%s) during creation of filesystem %s. Label format is unacceptable for this filesystem." % (self.fs.label, self.fs.type))
|
|
|
|
+ @property
|
|
+ def _nodiscard_option(self):
|
|
+ """ Any nodiscard options that a particular filesystem may use.
|
|
+
|
|
+ :returns: nodiscard options
|
|
+ :rtype: list of str
|
|
+ """
|
|
+ # Do not know how to set nodiscard while formatting.
|
|
+ if self.nodiscard_option is None:
|
|
+ return []
|
|
+
|
|
+ # nodiscard option not requested
|
|
+ if not self.fs._mkfs_nodiscard:
|
|
+ return []
|
|
+
|
|
+ return self.nodiscard_option
|
|
+
|
|
@property
|
|
def _uuid_options(self):
|
|
"""Any UUID options that a particular filesystem may use.
|
|
@@ -119,7 +149,7 @@ def _uuid_options(self):
|
|
" is unacceptable for this filesystem."
|
|
% (self.fs.uuid, self.fs.type))
|
|
|
|
- def _format_options(self, options=None, label=False, set_uuid=False):
|
|
+ def _format_options(self, options=None, label=False, set_uuid=False, nodiscard=False):
|
|
"""Get a list of format options to be used when creating the
|
|
filesystem.
|
|
|
|
@@ -135,11 +165,12 @@ def _format_options(self, options=None, label=False, set_uuid=False):
|
|
|
|
label_options = self._label_options if label else []
|
|
uuid_options = self._uuid_options if set_uuid else []
|
|
+ nodiscard_option = self._nodiscard_option if nodiscard else []
|
|
create_options = shlex.split(self.fs.create_options or "")
|
|
return (options + self.args + label_options + uuid_options +
|
|
- create_options + [self.fs.device])
|
|
+ nodiscard_option + create_options + [self.fs.device])
|
|
|
|
- def _mkfs_command(self, options, label, set_uuid):
|
|
+ def _mkfs_command(self, options, label, set_uuid, nodiscard):
|
|
"""Return the command to make the filesystem.
|
|
|
|
:param options: any special options
|
|
@@ -148,12 +179,14 @@ def _mkfs_command(self, options, label, set_uuid):
|
|
:type label: bool
|
|
:param set_uuid: whether to set an UUID
|
|
:type set_uuid: bool
|
|
+ :param nodiscard: whether to run mkfs with nodiscard option
|
|
+ :type nodiscard: bool
|
|
:returns: the mkfs command
|
|
:rtype: list of str
|
|
"""
|
|
- return [str(self.ext)] + self._format_options(options, label, set_uuid)
|
|
+ return [str(self.ext)] + self._format_options(options, label, set_uuid, nodiscard)
|
|
|
|
- def do_task(self, options=None, label=False, set_uuid=False):
|
|
+ def do_task(self, options=None, label=False, set_uuid=False, nodiscard=False):
|
|
"""Create the format on the device and label if possible and desired.
|
|
|
|
:param options: any special options, may be None
|
|
@@ -168,7 +201,7 @@ def do_task(self, options=None, label=False, set_uuid=False):
|
|
raise FSError("\n".join(error_msgs))
|
|
|
|
options = options or []
|
|
- cmd = self._mkfs_command(options, label, set_uuid)
|
|
+ cmd = self._mkfs_command(options, label, set_uuid, nodiscard)
|
|
try:
|
|
ret = util.run_program(cmd)
|
|
except OSError as e:
|
|
@@ -181,6 +214,7 @@ def do_task(self, options=None, label=False, set_uuid=False):
|
|
class BTRFSMkfs(FSMkfs):
|
|
ext = availability.MKFS_BTRFS_APP
|
|
label_option = None
|
|
+ nodiscard_option = ["--nodiscard"]
|
|
|
|
def get_uuid_args(self, uuid):
|
|
return ["-U", uuid]
|
|
@@ -193,6 +227,7 @@ def args(self):
|
|
class Ext2FSMkfs(FSMkfs):
|
|
ext = availability.MKE2FS_APP
|
|
label_option = "-L"
|
|
+ nodiscard_option = ["-E", "nodiscard"]
|
|
|
|
_opts = []
|
|
|
|
@@ -215,6 +250,7 @@ class Ext4FSMkfs(Ext3FSMkfs):
|
|
class FATFSMkfs(FSMkfs):
|
|
ext = availability.MKDOSFS_APP
|
|
label_option = "-n"
|
|
+ nodiscard_option = None
|
|
|
|
def get_uuid_args(self, uuid):
|
|
return ["-i", uuid.replace('-', '')]
|
|
@@ -227,6 +263,7 @@ def args(self):
|
|
class GFS2Mkfs(FSMkfs):
|
|
ext = availability.MKFS_GFS2_APP
|
|
label_option = None
|
|
+ nodiscard_option = None
|
|
get_uuid_args = None
|
|
|
|
@property
|
|
@@ -237,6 +274,7 @@ def args(self):
|
|
class HFSMkfs(FSMkfs):
|
|
ext = availability.HFORMAT_APP
|
|
label_option = "-l"
|
|
+ nodiscard_option = None
|
|
get_uuid_args = None
|
|
|
|
@property
|
|
@@ -247,6 +285,7 @@ def args(self):
|
|
class HFSPlusMkfs(FSMkfs):
|
|
ext = availability.MKFS_HFSPLUS_APP
|
|
label_option = "-v"
|
|
+ nodiscard_option = None
|
|
get_uuid_args = None
|
|
|
|
@property
|
|
@@ -257,6 +296,7 @@ def args(self):
|
|
class JFSMkfs(FSMkfs):
|
|
ext = availability.MKFS_JFS_APP
|
|
label_option = "-L"
|
|
+ nodiscard_option = None
|
|
get_uuid_args = None
|
|
|
|
@property
|
|
@@ -267,6 +307,7 @@ def args(self):
|
|
class NTFSMkfs(FSMkfs):
|
|
ext = availability.MKNTFS_APP
|
|
label_option = "-L"
|
|
+ nodiscard_option = None
|
|
get_uuid_args = None
|
|
|
|
@property
|
|
@@ -277,6 +318,7 @@ def args(self):
|
|
class ReiserFSMkfs(FSMkfs):
|
|
ext = availability.MKREISERFS_APP
|
|
label_option = "-l"
|
|
+ nodiscard_option = None
|
|
|
|
def get_uuid_args(self, uuid):
|
|
return ["-u", uuid]
|
|
@@ -289,6 +331,7 @@ def args(self):
|
|
class XFSMkfs(FSMkfs):
|
|
ext = availability.MKFS_XFS_APP
|
|
label_option = "-L"
|
|
+ nodiscard_option = ["-K"]
|
|
|
|
def get_uuid_args(self, uuid):
|
|
return ["-m", "uuid=" + uuid]
|
|
@@ -307,3 +350,7 @@ def can_label(self):
|
|
@property
|
|
def can_set_uuid(self):
|
|
return False
|
|
+
|
|
+ @property
|
|
+ def can_nodiscard(self):
|
|
+ return False
|
|
diff --git a/tests/formats_test/methods_test.py b/tests/formats_test/methods_test.py
|
|
index 710fa1c5..b2674ea7 100644
|
|
--- a/tests/formats_test/methods_test.py
|
|
+++ b/tests/formats_test/methods_test.py
|
|
@@ -307,7 +307,8 @@ def _test_create_backend(self):
|
|
self.format._mkfs.do_task.assert_called_with(
|
|
options=None,
|
|
label=not self.format.relabels(),
|
|
- set_uuid=self.format.can_set_uuid()
|
|
+ set_uuid=self.format.can_set_uuid(),
|
|
+ nodiscard=self.format.can_nodiscard()
|
|
)
|
|
|
|
def _test_setup_backend(self):
|
|
|
|
From ac04f74fa9bc8ded3facd302ca74ec033009a0bd Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Thu, 19 Nov 2020 13:19:21 +0100
|
|
Subject: [PATCH 11/17] Add nodiscard option by default when creating VDO
|
|
logical volumes
|
|
|
|
User can override this by passing "nodiscard=False" to the LV
|
|
constructor, but we want nodiscard by default.
|
|
---
|
|
blivet/blivet.py | 8 +++++++-
|
|
blivet/devicefactory.py | 6 ++++++
|
|
tests/devicefactory_test.py | 7 +++++++
|
|
3 files changed, 20 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/blivet/blivet.py b/blivet/blivet.py
|
|
index 754eb152..e4115691 100644
|
|
--- a/blivet/blivet.py
|
|
+++ b/blivet/blivet.py
|
|
@@ -613,9 +613,15 @@ def new_lv(self, *args, **kwargs):
|
|
|
|
mountpoint = kwargs.pop("mountpoint", None)
|
|
if 'fmt_type' in kwargs:
|
|
+ fmt_args = kwargs.pop("fmt_args", {})
|
|
+ if vdo_lv and "nodiscard" not in fmt_args.keys():
|
|
+ # we don't want to run discard on VDO LV during mkfs so if user don't
|
|
+ # tell us not to do it, we should add the nodiscard option to mkfs
|
|
+ fmt_args["nodiscard"] = True
|
|
+
|
|
kwargs["fmt"] = get_format(kwargs.pop("fmt_type"),
|
|
mountpoint=mountpoint,
|
|
- **kwargs.pop("fmt_args", {}))
|
|
+ **fmt_args)
|
|
|
|
name = kwargs.pop("name", None)
|
|
if name:
|
|
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
|
|
index c95037cc..085f2fd6 100644
|
|
--- a/blivet/devicefactory.py
|
|
+++ b/blivet/devicefactory.py
|
|
@@ -1811,6 +1811,12 @@ def _reconfigure_device(self):
|
|
self.device.pool.compression = self.compression
|
|
self.device.pool.deduplication = self.deduplication
|
|
|
|
+ def _set_format(self):
|
|
+ super(LVMVDOFactory, self)._set_format()
|
|
+
|
|
+ # preserve nodiscard mkfs option after changing filesystem
|
|
+ self.device.format._mkfs_nodiscard = True
|
|
+
|
|
#
|
|
# methods to configure the factory's device
|
|
#
|
|
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
|
|
index 7cdb51c5..4de1e05b 100644
|
|
--- a/tests/devicefactory_test.py
|
|
+++ b/tests/devicefactory_test.py
|
|
@@ -571,6 +571,10 @@ def _validate_factory_device(self, *args, **kwargs):
|
|
if pool_name:
|
|
self.assertEqual(vdolv.pool.lvname, pool_name)
|
|
|
|
+ # nodiscard should be always set for VDO LV format
|
|
+ if vdolv.format.type:
|
|
+ self.assertTrue(vdolv.format._mkfs_nodiscard)
|
|
+
|
|
return device
|
|
|
|
@patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
|
|
@@ -633,6 +637,9 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen
|
|
device = self._factory_device(device_type, **kwargs)
|
|
self._validate_factory_device(device, device_type, **kwargs)
|
|
|
|
+ # change fstype
|
|
+ kwargs["fstype"] = "xfs"
|
|
+
|
|
|
|
class MDFactoryTestCase(DeviceFactoryTestCase):
|
|
device_type = devicefactory.DEVICE_TYPE_MD
|
|
|
|
From 43f25ce84729c321d1ff2bbba2f50489f6d736b4 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Thu, 19 Nov 2020 13:31:40 +0100
|
|
Subject: [PATCH 12/17] Add LVM VDO example
|
|
|
|
---
|
|
examples/lvm_vdo.py | 61 +++++++++++++++++++++++++++++++++++++++++++++
|
|
1 file changed, 61 insertions(+)
|
|
create mode 100644 examples/lvm_vdo.py
|
|
|
|
diff --git a/examples/lvm_vdo.py b/examples/lvm_vdo.py
|
|
new file mode 100644
|
|
index 00000000..ad081642
|
|
--- /dev/null
|
|
+++ b/examples/lvm_vdo.py
|
|
@@ -0,0 +1,61 @@
|
|
+import os
|
|
+
|
|
+import blivet
|
|
+from blivet.size import Size
|
|
+from blivet.util import set_up_logging, create_sparse_tempfile
|
|
+
|
|
+set_up_logging()
|
|
+b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
|
|
+
|
|
+# create a disk image file on which to create new devices
|
|
+disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
|
|
+b.disk_images["disk1"] = disk1_file
|
|
+disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
|
|
+b.disk_images["disk2"] = disk2_file
|
|
+
|
|
+b.reset()
|
|
+
|
|
+try:
|
|
+ disk1 = b.devicetree.get_device_by_name("disk1")
|
|
+ disk2 = b.devicetree.get_device_by_name("disk2")
|
|
+
|
|
+ b.initialize_disk(disk1)
|
|
+ b.initialize_disk(disk2)
|
|
+
|
|
+ pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
|
|
+ b.create_device(pv)
|
|
+ pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
|
|
+ b.create_device(pv2)
|
|
+
|
|
+ # allocate the partitions (decide where and on which disks they'll reside)
|
|
+ blivet.partitioning.do_partitioning(b)
|
|
+
|
|
+ vg = b.new_vg(parents=[pv, pv2])
|
|
+ b.create_device(vg)
|
|
+
|
|
+ # create 80 GiB VDO pool
|
|
+ # there can be only one VDO LV on the pool and these are created together
|
|
+ # with one LVM call, we have 2 separate devices because there are two block
|
|
+ # devices in the end and it allows to control the different "physical" size of
|
|
+ # the pool and "logical" size of the VDO LV (which is usually bigger, accounting
|
|
+ # for the saved space with deduplication and/or compression)
|
|
+ pool = b.new_lv(size=Size("80GiB"), parents=[vg], name="vdopool", vdo_pool=True,
|
|
+ deduplication=True, compression=True)
|
|
+ b.create_device(pool)
|
|
+
|
|
+ # create the VDO LV with 400 GiB "virtual size" and ext4 filesystem on the VDO
|
|
+ # pool
|
|
+ lv = b.new_lv(size=Size("400GiB"), parents=[pool], name="vdolv", vdo_lv=True,
|
|
+ fmt_type="ext4")
|
|
+ b.create_device(lv)
|
|
+
|
|
+ print(b.devicetree)
|
|
+
|
|
+ # write the new partitions to disk and format them as specified
|
|
+ b.do_it()
|
|
+ print(b.devicetree)
|
|
+ input("Check the state and hit ENTER to trigger cleanup")
|
|
+finally:
|
|
+ b.devicetree.teardown_disk_images()
|
|
+ os.unlink(disk1_file)
|
|
+ os.unlink(disk2_file)
|
|
|
|
From c487a1e6023b54f5beea8d99ba2f5da5d80590ee Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Wed, 25 Nov 2020 13:30:15 +0100
|
|
Subject: [PATCH 13/17] Add LVM VDO documentation
|
|
|
|
---
|
|
doc/lvmvdo.rst | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++
|
|
1 file changed, 86 insertions(+)
|
|
create mode 100644 doc/lvmvdo.rst
|
|
|
|
diff --git a/doc/lvmvdo.rst b/doc/lvmvdo.rst
|
|
new file mode 100644
|
|
index 00000000..3965abd3
|
|
--- /dev/null
|
|
+++ b/doc/lvmvdo.rst
|
|
@@ -0,0 +1,86 @@
|
|
+LVM VDO support
|
|
+===============
|
|
+
|
|
+Support for creating LVM VDO devices has been added in Blivet 3.4.
|
|
+
|
|
+These devices are similar to LVM thinly provisioned volumes, but there are some special steps
|
|
+and limitations when creating these devices which this document describes.
|
|
+
|
|
+LVM VDO in Blivet
|
|
+-----------------
|
|
+
|
|
+LVM VDO devices are represented by two ``LVMLogicalVolumeDevice`` devices:
|
|
+
|
|
+- VDO Pool logical volume with type 'lvmvdopool'
|
|
+- VDO logical volume with type 'lvmvdolv' which is the child of the VDO Pool device
|
|
+
|
|
+Existing LVM VDO setup in Blivet:
|
|
+
|
|
+ existing 20 GiB disk vdb (265) with existing msdos disklabel
|
|
+ existing 20 GiB partition vdb1 (275) with existing lvmpv
|
|
+ existing 20 GiB lvmvg data (284)
|
|
+ existing 10 GiB lvmvdopool data-vdopool (288)
|
|
+ existing 50 GiB lvmvdolv data-vdolv (295)
|
|
+
|
|
+When creating LVM VDO setup using Blivet these two devices must be created together as these
|
|
+are created by a single LVM command.
|
|
+
|
|
+It currently isn't possible to create additional VDO logical volumes in the pool. It is however
|
|
+possible to create multiple VDO pools in a single volume group.
|
|
+
|
|
+Deduplication and compression are properties of the VDO pool. Size specified for the VDO pool
|
|
+volume will be used as the "physical" size for the pool and size specified for the VDO logical volume
|
|
+will be used as the "virtual" size for the VDO volume.
|
|
+
|
|
+When creating format, it must be created on the VDO logical volume. For filesystems with discard
|
|
+support, no discard option will be automatically added when calling the ``mkfs`` command
|
|
+(e.g. ``-K`` for ``mkfs.xfs``).
|
|
+
|
|
+Example for creating a *80 GiB* VDO pool with *400 GiB* VDO logical volume with an *ext4* format with
|
|
+both deduplication and compression enabled:
|
|
+
|
|
+ pool = b.new_lv(size=Size("80GiB"), parents=[vg], name="vdopool", vdo_pool=True,
|
|
+ deduplication=True, compression=True)
|
|
+ b.create_device(pool)
|
|
+
|
|
+ lv = b.new_lv(size=Size("400GiB"), parents=[pool], name="vdolv", vdo_lv=True,
|
|
+ fmt_type="ext4")
|
|
+ b.create_device(lv)
|
|
+
|
|
+When removing existing LVM VDO devices, both devices must be removed from the devicetree and the VDO
|
|
+logical volume must be removed first (``recursive_remove`` can be used to automate these two steps).
|
|
+
|
|
+Managing of existing LVM VDO devices is currently not supported.
|
|
+
|
|
+
|
|
+LVM VDO in Devicefactory
|
|
+------------------------
|
|
+
|
|
+For the top-down specified creation using device factories a new ``LVMVDOFactory`` factory has been
|
|
+added. Factory device in this case is the VDO logical volume and is again automatically created
|
|
+together with the VDO pool.
|
|
+
|
|
+Example of creating a new LVM VDO setup using the ``devicefactory`` module:
|
|
+
|
|
+ factory = blivet.devicefactory.LVMVDOFactory(b, size=Size("5 GiB"), virtual_size=Size("50 GiB"),
|
|
+ disks=disks, fstype="xfs",
|
|
+ container_name="data",
|
|
+ pool_name="myvdopool",
|
|
+ compression=True, deduplication=True)
|
|
+ factory.configure()
|
|
+ factory.device
|
|
+
|
|
+ LVMLogicalVolumeDevice instance (0x7f14d17422b0) --
|
|
+ name = data-00 status = False id = 528
|
|
+ children = []
|
|
+ parents = ['non-existent 5 GiB lvmvdopool data-myvdopool (519)']
|
|
+ ...
|
|
+
|
|
+``size`` in this case sets the pool (physical) size, the VDO logical volume size can be specified
|
|
+with ``virtual_size`` (if not specified it will be same as the pool size). Name for the VDO volume
|
|
+can be specified using the ``name`` keyword argument. ``pool_name`` argument is optional and
|
|
+a unique name will be generated if omitted. Both ``compression`` and ``deduplication`` default to
|
|
+``True`` (enabled) if not specified.
|
|
+
|
|
+This factory can create only a single VDO logical volume in a single VDO pool but additional VDO pools
|
|
+can be added by repeating the steps to create the first one.
|
|
|
|
From c6c776cf137b5c6ae454487df469e9a6dba8a5d1 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Wed, 9 Dec 2020 14:06:27 +0100
|
|
Subject: [PATCH 14/17] Set minimum size for LVM VDO pool devices
|
|
|
|
---
|
|
blivet/devicefactory.py | 3 +++
|
|
blivet/devices/lvm.py | 26 ++++++++++++++++++++++++++
|
|
tests/devicefactory_test.py | 29 ++++++++++++++++++++---------
|
|
tests/devices_test/lvm_test.py | 6 ++++++
|
|
4 files changed, 55 insertions(+), 9 deletions(-)
|
|
|
|
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
|
|
index 085f2fd6..5e47eb9a 100644
|
|
--- a/blivet/devicefactory.py
|
|
+++ b/blivet/devicefactory.py
|
|
@@ -277,6 +277,7 @@ class DeviceFactory(object):
|
|
"container_size": SIZE_POLICY_AUTO,
|
|
"container_raid_level": None,
|
|
"container_encrypted": None}
|
|
+ _device_min_size = Size(0) # no limit by default, limited only by filesystem size
|
|
|
|
def __init__(self, storage, **kwargs):
|
|
"""
|
|
@@ -1760,6 +1761,8 @@ class LVMVDOFactory(LVMFactory):
|
|
:type deduplication: bool
|
|
"""
|
|
|
|
+ _device_min_size = LVMVDOPoolMixin._min_size
|
|
+
|
|
def __init__(self, storage, **kwargs):
|
|
self.pool_name = kwargs.pop("pool_name", None)
|
|
self.virtual_size = kwargs.pop("virtual_size", None)
|
|
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
index 0802e2de..785fa2d2 100644
|
|
--- a/blivet/devices/lvm.py
|
|
+++ b/blivet/devices/lvm.py
|
|
@@ -1792,6 +1792,7 @@ def populate_ksdata(self, data):
|
|
class LVMVDOPoolMixin(object):
|
|
|
|
_external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN, availability.BLOCKDEV_LVM_PLUGIN_VDO]
|
|
+ _min_size = Size("5 GiB") # 2.5 GiB for index and one 2 GiB slab rounded up to 5 GiB
|
|
|
|
def __init__(self, compression=True, deduplication=True, index_memory=0, write_policy=None):
|
|
self.compression = compression
|
|
@@ -1800,6 +1801,9 @@ def __init__(self, compression=True, deduplication=True, index_memory=0, write_p
|
|
self.write_policy = write_policy
|
|
self._lvs = []
|
|
|
|
+ if not self.exists and self.size < self.min_size:
|
|
+ raise ValueError("Requested size %s is smaller than minimum %s" % (self.size, self.min_size))
|
|
+
|
|
@property
|
|
def is_vdo_pool(self):
|
|
return self.seg_type == "vdo-pool"
|
|
@@ -1856,6 +1860,23 @@ def direct(self):
|
|
""" Is this device directly accessible? """
|
|
return False
|
|
|
|
+ @property
|
|
+ @util.requires_property("is_vdo_pool")
|
|
+ def min_size(self):
|
|
+ if self.exists:
|
|
+ return self.current_size
|
|
+
|
|
+ return self._min_size
|
|
+
|
|
+ def _set_size(self, newsize):
|
|
+ if not isinstance(newsize, Size):
|
|
+ raise AttributeError("new size must of type Size")
|
|
+
|
|
+ if newsize < self.min_size:
|
|
+ raise ValueError("Requested size %s is smaller than minimum %s" % (newsize, self.min_size))
|
|
+
|
|
+ DMDevice._set_size(self, newsize)
|
|
+
|
|
def read_current_size(self):
|
|
log_method_call(self, exists=self.exists, path=self.path,
|
|
sysfs_path=self.sysfs_path)
|
|
@@ -2229,6 +2250,11 @@ def max_size(self):
|
|
max_format = self.format.max_size
|
|
return min(max_lv, max_format) if max_format else max_lv
|
|
|
|
+ @property
|
|
+ @type_specific
|
|
+ def min_size(self):
|
|
+ return super(LVMLogicalVolumeDevice, self).min_size
|
|
+
|
|
@property
|
|
@type_specific
|
|
def vg_space_used(self):
|
|
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
|
|
index 4de1e05b..a1334cda 100644
|
|
--- a/tests/devicefactory_test.py
|
|
+++ b/tests/devicefactory_test.py
|
|
@@ -49,13 +49,18 @@ class DeviceFactoryTestCase(unittest.TestCase):
|
|
encryption_supported = True
|
|
""" whether encryption of this device type is supported by blivet """
|
|
|
|
+ factory_class = None
|
|
+ """ devicefactory class used in this test case """
|
|
+
|
|
+ _disk_size = Size("2 GiB")
|
|
+
|
|
def setUp(self):
|
|
if self.device_type is None:
|
|
raise unittest.SkipTest("abstract base class")
|
|
|
|
self.b = blivet.Blivet() # don't populate it
|
|
- self.disk_files = [create_sparse_tempfile("factorytest", Size("2 GiB")),
|
|
- create_sparse_tempfile("factorytest", Size("2 GiB"))]
|
|
+ self.disk_files = [create_sparse_tempfile("factorytest", self._disk_size),
|
|
+ create_sparse_tempfile("factorytest", self._disk_size)]
|
|
for filename in self.disk_files:
|
|
disk = DiskFile(filename)
|
|
self.b.devicetree._add_device(disk)
|
|
@@ -197,7 +202,7 @@ def _get_size_delta(self, devices=None):
|
|
def test_get_free_disk_space(self, *args): # pylint: disable=unused-argument
|
|
# get_free_disk_space should return the total free space on disks
|
|
kwargs = self._get_test_factory_args()
|
|
- kwargs["size"] = Size("500 MiB")
|
|
+ kwargs["size"] = max(Size("500 MiB"), self.factory_class._device_min_size)
|
|
factory = devicefactory.get_device_factory(self.b,
|
|
self.device_type,
|
|
disks=self.b.disks,
|
|
@@ -285,7 +290,7 @@ def test_factory_defaults(self, *args): # pylint: disable=unused-argument
|
|
kwargs = self._get_test_factory_args()
|
|
kwargs.update({"disks": self.b.disks[:],
|
|
"fstype": "swap",
|
|
- "size": Size("2GiB"),
|
|
+ "size": max(Size("2GiB"), self.factory_class._device_min_size),
|
|
"label": "SWAP"})
|
|
device = self._factory_device(self.device_type, **kwargs)
|
|
factory = devicefactory.get_device_factory(self.b, self.device_type,
|
|
@@ -302,6 +307,7 @@ def test_factory_defaults(self, *args): # pylint: disable=unused-argument
|
|
class PartitionFactoryTestCase(DeviceFactoryTestCase):
|
|
device_class = PartitionDevice
|
|
device_type = devicefactory.DEVICE_TYPE_PARTITION
|
|
+ factory_class = devicefactory.PartitionFactory
|
|
|
|
def test_bug1178884(self):
|
|
# Test a change of format and size where old size is too large for the
|
|
@@ -330,6 +336,7 @@ def _get_size_delta(self, devices=None):
|
|
class LVMFactoryTestCase(DeviceFactoryTestCase):
|
|
device_class = LVMLogicalVolumeDevice
|
|
device_type = devicefactory.DEVICE_TYPE_LVM
|
|
+ factory_class = devicefactory.LVMFactory
|
|
|
|
def _validate_factory_device(self, *args, **kwargs):
|
|
super(LVMFactoryTestCase, self)._validate_factory_device(*args, **kwargs)
|
|
@@ -510,6 +517,7 @@ class LVMThinPFactoryTestCase(LVMFactoryTestCase):
|
|
device_class = LVMLogicalVolumeDevice
|
|
device_type = devicefactory.DEVICE_TYPE_LVM_THINP
|
|
encryption_supported = False
|
|
+ factory_class = devicefactory.LVMThinPFactory
|
|
|
|
def _validate_factory_device(self, *args, **kwargs):
|
|
super(LVMThinPFactoryTestCase, self)._validate_factory_device(*args,
|
|
@@ -541,6 +549,8 @@ class LVMVDOFactoryTestCase(LVMFactoryTestCase):
|
|
device_class = LVMLogicalVolumeDevice
|
|
device_type = devicefactory.DEVICE_TYPE_LVM_VDO
|
|
encryption_supported = False
|
|
+ _disk_size = Size("10 GiB") # we need bigger disks for VDO
|
|
+ factory_class = devicefactory.LVMVDOFactory
|
|
|
|
def _validate_factory_device(self, *args, **kwargs):
|
|
super(LVMVDOFactoryTestCase, self)._validate_factory_device(*args,
|
|
@@ -585,7 +595,7 @@ def _validate_factory_device(self, *args, **kwargs):
|
|
def test_device_factory(self, *args): # pylint: disable=unused-argument,arguments-differ
|
|
device_type = self.device_type
|
|
kwargs = {"disks": self.b.disks,
|
|
- "size": Size("400 MiB"),
|
|
+ "size": Size("6 GiB"),
|
|
"fstype": 'ext4',
|
|
"mountpoint": '/factorytest'}
|
|
device = self._factory_device(device_type, **kwargs)
|
|
@@ -593,7 +603,7 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen
|
|
self.b.recursive_remove(device.pool)
|
|
|
|
kwargs = {"disks": self.b.disks,
|
|
- "size": Size("400 MiB"),
|
|
+ "size": Size("6 GiB"),
|
|
"fstype": 'ext4',
|
|
"mountpoint": '/factorytest',
|
|
"pool_name": "vdopool",
|
|
@@ -603,19 +613,19 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen
|
|
self._validate_factory_device(device, device_type, **kwargs)
|
|
|
|
# change size without specifying virtual_size: both sizes should grow
|
|
- kwargs["size"] = Size("600 MiB")
|
|
+ kwargs["size"] = Size("8 GiB")
|
|
kwargs["device"] = device
|
|
device = self._factory_device(device_type, **kwargs)
|
|
self._validate_factory_device(device, device_type, **kwargs)
|
|
|
|
# change virtual size
|
|
- kwargs["virtual_size"] = Size("6 GiB")
|
|
+ kwargs["virtual_size"] = Size("40 GiB")
|
|
kwargs["device"] = device
|
|
device = self._factory_device(device_type, **kwargs)
|
|
self._validate_factory_device(device, device_type, **kwargs)
|
|
|
|
# change virtual size to smaller than size
|
|
- kwargs["virtual_size"] = Size("500 GiB")
|
|
+ kwargs["virtual_size"] = Size("10 GiB")
|
|
kwargs["device"] = device
|
|
device = self._factory_device(device_type, **kwargs)
|
|
self._validate_factory_device(device, device_type, **kwargs)
|
|
@@ -644,6 +654,7 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen
|
|
class MDFactoryTestCase(DeviceFactoryTestCase):
|
|
device_type = devicefactory.DEVICE_TYPE_MD
|
|
device_class = MDRaidArrayDevice
|
|
+ factory_class = devicefactory.MDFactory
|
|
|
|
def test_device_factory(self):
|
|
# RAID0 across two disks
|
|
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
|
|
index 493d3ba1..78b140ba 100644
|
|
--- a/tests/devices_test/lvm_test.py
|
|
+++ b/tests/devices_test/lvm_test.py
|
|
@@ -705,6 +705,12 @@ def test_new_vdo_pool(self):
|
|
|
|
self.assertEqual(vg.size, Size("10236 MiB"))
|
|
|
|
+ with self.assertRaises(ValueError):
|
|
+ vdopool = b.new_lv(name="vdopool", vdo_pool=True,
|
|
+ parents=[vg], compression=True,
|
|
+ deduplication=True,
|
|
+ size=blivet.size.Size("1 GiB"))
|
|
+
|
|
vdopool = b.new_lv(name="vdopool", vdo_pool=True,
|
|
parents=[vg], compression=True,
|
|
deduplication=True,
|
|
|
|
From 197f2877709e702c101ada6b9a055a88f09320c8 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Fri, 11 Dec 2020 14:20:48 +0100
|
|
Subject: [PATCH 15/17] Use better description for libblockdev plugins in
|
|
tasks.availability
|
|
|
|
The old names were quite confusing when showing that "lvm" is
|
|
missing when in fact libblockdev LVM plugin is missing. Also with
|
|
LVM VDO we need to be able to tell the difference between missing
|
|
LVM plugin and missing LVM VDO support.
|
|
---
|
|
blivet/tasks/availability.py | 26 +++++++++++++-------------
|
|
1 file changed, 13 insertions(+), 13 deletions(-)
|
|
|
|
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
|
|
index b107428e..52418685 100644
|
|
--- a/blivet/tasks/availability.py
|
|
+++ b/blivet/tasks/availability.py
|
|
@@ -236,13 +236,13 @@ def availability_errors(self, resource):
|
|
:returns: [] if the name of the plugin is loaded
|
|
:rtype: list of str
|
|
"""
|
|
- if resource.name not in blockdev.get_available_plugin_names(): # pylint: disable=no-value-for-parameter
|
|
- return ["libblockdev plugin %s not loaded" % resource.name]
|
|
+ if self._tech_info.plugin_name not in blockdev.get_available_plugin_names(): # pylint: disable=no-value-for-parameter
|
|
+ return ["libblockdev plugin %s not loaded" % self._tech_info.plugin_name]
|
|
else:
|
|
tech_missing = self._check_technologies()
|
|
if tech_missing:
|
|
return ["libblockdev plugin %s is loaded but some required "
|
|
- "technologies are not available:\n%s" % (resource.name, tech_missing)]
|
|
+ "technologies are not available:\n%s" % (self._tech_info.plugin_name, tech_missing)]
|
|
else:
|
|
return []
|
|
|
|
@@ -411,16 +411,16 @@ def available_resource(name):
|
|
# we can't just check if the plugin is loaded, we also need to make sure
|
|
# that all technologies required by us our supported (some may be missing
|
|
# due to missing dependencies)
|
|
-BLOCKDEV_BTRFS_PLUGIN = blockdev_plugin("btrfs", BLOCKDEV_BTRFS_TECH)
|
|
-BLOCKDEV_CRYPTO_PLUGIN = blockdev_plugin("crypto", BLOCKDEV_CRYPTO_TECH)
|
|
-BLOCKDEV_DM_PLUGIN = blockdev_plugin("dm", BLOCKDEV_DM_TECH)
|
|
-BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("dm", BLOCKDEV_DM_TECH_RAID)
|
|
-BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("loop", BLOCKDEV_LOOP_TECH)
|
|
-BLOCKDEV_LVM_PLUGIN = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH)
|
|
-BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("lvm", BLOCKDEV_LVM_TECH_VDO)
|
|
-BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("mdraid", BLOCKDEV_MD_TECH)
|
|
-BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("mpath", BLOCKDEV_MPATH_TECH)
|
|
-BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("swap", BLOCKDEV_SWAP_TECH)
|
|
+BLOCKDEV_BTRFS_PLUGIN = blockdev_plugin("libblockdev btrfs plugin", BLOCKDEV_BTRFS_TECH)
|
|
+BLOCKDEV_CRYPTO_PLUGIN = blockdev_plugin("libblockdev crypto plugin", BLOCKDEV_CRYPTO_TECH)
|
|
+BLOCKDEV_DM_PLUGIN = blockdev_plugin("libblockdev dm plugin", BLOCKDEV_DM_TECH)
|
|
+BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("libblockdev dm plugin (raid technology)", BLOCKDEV_DM_TECH_RAID)
|
|
+BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("libblockdev loop plugin", BLOCKDEV_LOOP_TECH)
|
|
+BLOCKDEV_LVM_PLUGIN = blockdev_plugin("libblockdev lvm plugin", BLOCKDEV_LVM_TECH)
|
|
+BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("libblockdev lvm plugin (vdo technology)", BLOCKDEV_LVM_TECH_VDO)
|
|
+BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("libblockdev mdraid plugin", BLOCKDEV_MD_TECH)
|
|
+BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("libblockdev mpath plugin", BLOCKDEV_MPATH_TECH)
|
|
+BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("libblockdev swap plugin", BLOCKDEV_SWAP_TECH)
|
|
|
|
# applications with versions
|
|
# we need e2fsprogs newer than 1.41 and we are checking the version by running
|
|
|
|
From 5fc047b48b0de18fa249f102d2a7163ac2d6e6a6 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Fri, 11 Dec 2020 14:24:18 +0100
|
|
Subject: [PATCH 16/17] Fix external dependencies for LVM VDO devices
|
|
|
|
The external and unavailable dependencies code is mostly supposed
|
|
to work with just class objects and not instances, which is problem
|
|
for LVM devices where the LVMLogicalVolumeDevice can't depend on
|
|
LVM VDO and special LVM VDO device mixin classes don't inherit
|
|
from the Device class so they are missing some availability
|
|
functions.
|
|
This fix adds the neccessary functions to LVM VDO mixin classes to
|
|
make sure both "unavailable_type_dependencies" and
|
|
"type_external_dependencies" work with LVMVDOLogicalVolumeMixin
|
|
and LVMVDOPoolMixin. When working with an LVMLogicalVolumeDevice
|
|
instance its dependencies are correctly set based on type of the
|
|
logical volume.
|
|
---
|
|
blivet/devicefactory.py | 7 +--
|
|
blivet/devices/lvm.py | 31 ++++++++++
|
|
tests/action_test.py | 7 +++
|
|
tests/devicefactory_test.py | 32 ++++++++++
|
|
tests/devices_test/lvm_test.py | 106 +++++++++++++++++++++++++++++++++
|
|
5 files changed, 179 insertions(+), 4 deletions(-)
|
|
|
|
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
|
|
index 5e47eb9a..b29a107a 100644
|
|
--- a/blivet/devicefactory.py
|
|
+++ b/blivet/devicefactory.py
|
|
@@ -27,7 +27,7 @@
|
|
from .devices import BTRFSDevice, DiskDevice
|
|
from .devices import LUKSDevice, LVMLogicalVolumeDevice
|
|
from .devices import PartitionDevice, MDRaidArrayDevice
|
|
-from .devices.lvm import LVMVDOPoolMixin, DEFAULT_THPOOL_RESERVE
|
|
+from .devices.lvm import LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin, DEFAULT_THPOOL_RESERVE
|
|
from .formats import get_format
|
|
from .devicelibs import btrfs
|
|
from .devicelibs import mdraid
|
|
@@ -70,9 +70,6 @@ def is_supported_device_type(device_type):
|
|
:returns: True if this device type is supported
|
|
:rtype: bool
|
|
"""
|
|
- if device_type == DEVICE_TYPE_LVM_VDO:
|
|
- return not any(e for e in LVMVDOPoolMixin._external_dependencies if not e.available)
|
|
-
|
|
devices = []
|
|
if device_type == DEVICE_TYPE_BTRFS:
|
|
devices = [BTRFSDevice]
|
|
@@ -84,6 +81,8 @@ def is_supported_device_type(device_type):
|
|
devices = [PartitionDevice]
|
|
elif device_type == DEVICE_TYPE_MD:
|
|
devices = [MDRaidArrayDevice]
|
|
+ elif device_type == DEVICE_TYPE_LVM_VDO:
|
|
+ devices = [LVMLogicalVolumeDevice, LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin]
|
|
|
|
return not any(c.unavailable_type_dependencies() for c in devices)
|
|
|
|
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
|
|
index 785fa2d2..ac900bf3 100644
|
|
--- a/blivet/devices/lvm.py
|
|
+++ b/blivet/devices/lvm.py
|
|
@@ -1804,6 +1804,17 @@ def __init__(self, compression=True, deduplication=True, index_memory=0, write_p
|
|
if not self.exists and self.size < self.min_size:
|
|
raise ValueError("Requested size %s is smaller than minimum %s" % (self.size, self.min_size))
|
|
|
|
+ # these two methods are defined in Device but LVMVDOPoolMixin doesn't inherit from
|
|
+ # it and we can't have this code in LVMLogicalVolumeDevice because we need to be able
|
|
+ # to get dependencies without creating instance of the class
|
|
+ @classmethod
|
|
+ def type_external_dependencies(cls):
|
|
+ return set(d for d in cls._external_dependencies) | LVMLogicalVolumeDevice.type_external_dependencies()
|
|
+
|
|
+ @classmethod
|
|
+ def unavailable_type_dependencies(cls):
|
|
+ return set(e for e in cls.type_external_dependencies() if not e.available)
|
|
+
|
|
@property
|
|
def is_vdo_pool(self):
|
|
return self.seg_type == "vdo-pool"
|
|
@@ -1926,6 +1937,17 @@ def _check_parents(self):
|
|
if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_vdo_pool:
|
|
raise ValueError("constructor requires a vdo-pool LV")
|
|
|
|
+ # these two methods are defined in Device but LVMVDOLogicalVolumeMixin doesn't inherit
|
|
+ # from it and we can't have this code in LVMLogicalVolumeDevice because we need to be
|
|
+ # able to get dependencies without creating instance of the class
|
|
+ @classmethod
|
|
+ def type_external_dependencies(cls):
|
|
+ return set(d for d in cls._external_dependencies) | LVMLogicalVolumeDevice.type_external_dependencies()
|
|
+
|
|
+ @classmethod
|
|
+ def unavailable_type_dependencies(cls):
|
|
+ return set(e for e in cls.type_external_dependencies() if not e.available)
|
|
+
|
|
@property
|
|
def vg_space_used(self):
|
|
return Size(0) # the pool's size is already accounted for in the vg
|
|
@@ -2217,6 +2239,15 @@ def _convert_from_lvs(self):
|
|
"""Convert the LVs to create this LV from into its internal LVs"""
|
|
raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
|
|
|
|
+ @property
|
|
+ def external_dependencies(self):
|
|
+ deps = super(LVMLogicalVolumeBase, self).external_dependencies
|
|
+ if self.is_vdo_pool:
|
|
+ deps.update(LVMVDOPoolMixin.type_external_dependencies())
|
|
+ if self.is_vdo_lv:
|
|
+ deps.update(LVMVDOLogicalVolumeMixin.type_external_dependencies())
|
|
+ return deps
|
|
+
|
|
@property
|
|
@type_specific
|
|
def vg(self):
|
|
diff --git a/tests/action_test.py b/tests/action_test.py
|
|
index 77176f46..38a2e872 100644
|
|
--- a/tests/action_test.py
|
|
+++ b/tests/action_test.py
|
|
@@ -18,6 +18,8 @@
|
|
from blivet.devices import MDRaidArrayDevice
|
|
from blivet.devices import LVMVolumeGroupDevice
|
|
from blivet.devices import LVMLogicalVolumeDevice
|
|
+from blivet.devices.lvm import LVMVDOPoolMixin
|
|
+from blivet.devices.lvm import LVMVDOLogicalVolumeMixin
|
|
|
|
# format classes
|
|
from blivet.formats.fs import Ext2FS
|
|
@@ -1252,6 +1254,11 @@ def test_lv_from_lvs_actions(self):
|
|
self.assertEqual(set(self.storage.lvs), {pool})
|
|
self.assertEqual(set(pool._internal_lvs), {lv1, lv2})
|
|
|
|
+
|
|
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES + [LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin]), "some unsupported device classes required for this test")
|
|
+@unittest.skipUnless(all(x().utils_available for x in FORMAT_CLASSES), "some unsupported format classes required for this test")
|
|
+class DeviceActionLVMVDOTestCase(DeviceActionTestCase):
|
|
+
|
|
def test_lvm_vdo_destroy(self):
|
|
self.destroy_all_devices()
|
|
sdc = self.storage.devicetree.get_device_by_name("sdc")
|
|
diff --git a/tests/devicefactory_test.py b/tests/devicefactory_test.py
|
|
index a1334cda..e4210ead 100644
|
|
--- a/tests/devicefactory_test.py
|
|
+++ b/tests/devicefactory_test.py
|
|
@@ -592,6 +592,8 @@ def _validate_factory_device(self, *args, **kwargs):
|
|
@patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[])
|
|
@patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set())
|
|
@patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set())
|
|
def test_device_factory(self, *args): # pylint: disable=unused-argument,arguments-differ
|
|
device_type = self.device_type
|
|
kwargs = {"disks": self.b.disks,
|
|
@@ -650,6 +652,36 @@ def test_device_factory(self, *args): # pylint: disable=unused-argument,argumen
|
|
# change fstype
|
|
kwargs["fstype"] = "xfs"
|
|
|
|
+ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
|
|
+ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True)
|
|
+ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[])
|
|
+ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set())
|
|
+ def test_factory_defaults(self, *args): # pylint: disable=unused-argument
|
|
+ super(LVMVDOFactoryTestCase, self).test_factory_defaults()
|
|
+
|
|
+ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
|
|
+ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True)
|
|
+ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[])
|
|
+ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set())
|
|
+ def test_get_free_disk_space(self, *args):
|
|
+ super(LVMVDOFactoryTestCase, self).test_get_free_disk_space()
|
|
+
|
|
+ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.formattable", return_value=True)
|
|
+ @patch("blivet.formats.lvmpv.LVMPhysicalVolume.destroyable", return_value=True)
|
|
+ @patch("blivet.static_data.lvm_info.blockdev.lvm.lvs", return_value=[])
|
|
+ @patch("blivet.devices.lvm.LVMVolumeGroupDevice.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMLogicalVolumeBase.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMVDOPoolMixin.type_external_dependencies", return_value=set())
|
|
+ @patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin.type_external_dependencies", return_value=set())
|
|
+ def test_normalize_size(self, *args): # pylint: disable=unused-argument
|
|
+ super(LVMVDOFactoryTestCase, self).test_normalize_size()
|
|
+
|
|
|
|
class MDFactoryTestCase(DeviceFactoryTestCase):
|
|
device_type = devicefactory.DEVICE_TYPE_MD
|
|
diff --git a/tests/devices_test/lvm_test.py b/tests/devices_test/lvm_test.py
|
|
index 78b140ba..d938144d 100644
|
|
--- a/tests/devices_test/lvm_test.py
|
|
+++ b/tests/devices_test/lvm_test.py
|
|
@@ -10,10 +10,13 @@
|
|
from blivet.devices import StorageDevice
|
|
from blivet.devices import LVMLogicalVolumeDevice
|
|
from blivet.devices import LVMVolumeGroupDevice
|
|
+from blivet.devices.lvm import LVMVDOPoolMixin
|
|
+from blivet.devices.lvm import LVMVDOLogicalVolumeMixin
|
|
from blivet.devices.lvm import LVMCacheRequest
|
|
from blivet.devices.lvm import LVPVSpec, LVMInternalLVtype
|
|
from blivet.size import Size
|
|
from blivet.devicelibs import raid
|
|
+from blivet import devicefactory
|
|
from blivet import errors
|
|
|
|
DEVICE_CLASSES = [
|
|
@@ -690,6 +693,10 @@ def test_new_lv_from_non_existing_lvs(self):
|
|
pool.create()
|
|
self.assertTrue(lvm.thpool_convert.called)
|
|
|
|
+
|
|
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES + [LVMVDOPoolMixin, LVMVDOLogicalVolumeMixin]), "some unsupported device classes required for this test")
|
|
+class BlivetNewLVMVDODeviceTest(unittest.TestCase):
|
|
+
|
|
def test_new_vdo_pool(self):
|
|
b = blivet.Blivet()
|
|
pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
|
|
@@ -726,3 +733,102 @@ def test_new_vdo_pool(self):
|
|
self.assertEqual(vdopool.children[0], vdolv)
|
|
self.assertEqual(vdolv.parents[0], vdopool)
|
|
self.assertListEqual(vg.lvs, [vdopool, vdolv])
|
|
+
|
|
+
|
|
+@unittest.skipUnless(not any(x.unavailable_type_dependencies() for x in DEVICE_CLASSES), "some unsupported device classes required for this test")
|
|
+class BlivetLVMVDODependenciesTest(unittest.TestCase):
|
|
+ def test_vdo_dependencies(self):
|
|
+ blivet.tasks.availability.CACHE_AVAILABILITY = False
|
|
+
|
|
+ b = blivet.Blivet()
|
|
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
|
|
+ size=Size("10 GiB"), exists=True)
|
|
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
|
|
+
|
|
+ for dev in (pv, vg):
|
|
+ b.devicetree._add_device(dev)
|
|
+
|
|
+ # check that all the above devices are in the expected places
|
|
+ self.assertEqual(set(b.devices), {pv, vg})
|
|
+ self.assertEqual(set(b.vgs), {vg})
|
|
+
|
|
+ self.assertEqual(vg.size, Size("10236 MiB"))
|
|
+
|
|
+ vdopool = b.new_lv(name="vdopool", vdo_pool=True,
|
|
+ parents=[vg], compression=True,
|
|
+ deduplication=True,
|
|
+ size=blivet.size.Size("8 GiB"))
|
|
+
|
|
+ vdolv = b.new_lv(name="vdolv", vdo_lv=True,
|
|
+ parents=[vdopool],
|
|
+ size=blivet.size.Size("40 GiB"))
|
|
+
|
|
+ # Dependencies check: for VDO types these should be combination of "normal"
|
|
+ # LVM dependencies (LVM libblockdev plugin + kpartx and DM plugin from DMDevice)
|
|
+ # and LVM VDO technology from the LVM plugin
|
|
+ lvm_vdo_dependencies = ["kpartx",
|
|
+ "libblockdev dm plugin",
|
|
+ "libblockdev lvm plugin",
|
|
+ "libblockdev lvm plugin (vdo technology)"]
|
|
+ pool_deps = [d.name for d in vdopool.external_dependencies]
|
|
+ six.assertCountEqual(self, pool_deps, lvm_vdo_dependencies)
|
|
+
|
|
+ vdolv_deps = [d.name for d in vdolv.external_dependencies]
|
|
+ six.assertCountEqual(self, vdolv_deps, lvm_vdo_dependencies)
|
|
+
|
|
+ # same dependencies should be returned when checking with class not instance
|
|
+ pool_type_deps = [d.name for d in LVMVDOPoolMixin.type_external_dependencies()]
|
|
+ six.assertCountEqual(self, pool_type_deps, lvm_vdo_dependencies)
|
|
+
|
|
+ vdolv_type_deps = [d.name for d in LVMVDOLogicalVolumeMixin.type_external_dependencies()]
|
|
+ six.assertCountEqual(self, vdolv_type_deps, lvm_vdo_dependencies)
|
|
+
|
|
+ # just to be sure LVM VDO specific code didn't break "normal" LVs
|
|
+ normallv = b.new_lv(name="lvol0",
|
|
+ parents=[vg],
|
|
+ size=blivet.size.Size("1 GiB"))
|
|
+
|
|
+ normalvl_deps = [d.name for d in normallv.external_dependencies]
|
|
+ six.assertCountEqual(self, normalvl_deps, ["kpartx",
|
|
+ "libblockdev dm plugin",
|
|
+ "libblockdev lvm plugin"])
|
|
+
|
|
+ with patch("blivet.devices.lvm.LVMVDOPoolMixin._external_dependencies",
|
|
+ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]):
|
|
+ with patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin._external_dependencies",
|
|
+ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]):
|
|
+
|
|
+ pool_deps = [d.name for d in vdopool.unavailable_dependencies]
|
|
+ self.assertEqual(pool_deps, ["VDO unavailability test"])
|
|
+
|
|
+ vdolv_deps = [d.name for d in vdolv.unavailable_dependencies]
|
|
+ self.assertEqual(vdolv_deps, ["VDO unavailability test"])
|
|
+
|
|
+ # same dependencies should be returned when checking with class not instance
|
|
+ pool_type_deps = [d.name for d in LVMVDOPoolMixin.unavailable_type_dependencies()]
|
|
+ six.assertCountEqual(self, pool_type_deps, ["VDO unavailability test"])
|
|
+
|
|
+ vdolv_type_deps = [d.name for d in LVMVDOLogicalVolumeMixin.unavailable_type_dependencies()]
|
|
+ six.assertCountEqual(self, vdolv_type_deps, ["VDO unavailability test"])
|
|
+
|
|
+ normallv_deps = [d.name for d in normallv.unavailable_dependencies]
|
|
+ self.assertEqual(normallv_deps, [])
|
|
+
|
|
+ with self.assertRaises(errors.DependencyError):
|
|
+ b.create_device(vdopool)
|
|
+ b.create_device(vdolv)
|
|
+
|
|
+ b.create_device(normallv)
|
|
+
|
|
+ def test_vdo_dependencies_devicefactory(self):
|
|
+ with patch("blivet.devices.lvm.LVMVDOPoolMixin._external_dependencies",
|
|
+ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]):
|
|
+ with patch("blivet.devices.lvm.LVMVDOLogicalVolumeMixin._external_dependencies",
|
|
+ new=[blivet.tasks.availability.unavailable_resource("VDO unavailability test")]):
|
|
+
|
|
+ # shouldn't affect "normal" LVM
|
|
+ lvm_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM)
|
|
+ self.assertTrue(lvm_supported)
|
|
+
|
|
+ vdo_supported = devicefactory.is_supported_device_type(devicefactory.DEVICE_TYPE_LVM_VDO)
|
|
+ self.assertFalse(vdo_supported)
|
|
|
|
From c7fb125ec552ee5070f8180f92fe5545709192ff Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Fri, 11 Dec 2020 15:02:05 +0100
|
|
Subject: [PATCH 17/17] Bump required libblockdev version to 2.24
|
|
|
|
LVM VDO support was added in 2.24.
|
|
---
|
|
python-blivet.spec | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
diff --git a/python-blivet.spec b/python-blivet.spec
|
|
index ffd4210e..58cad0b2 100644
|
|
--- a/python-blivet.spec
|
|
+++ b/python-blivet.spec
|
|
@@ -36,7 +36,7 @@ Source1: http://github.com/storaged-project/blivet/archive/%{realname}-%{realver
|
|
%global partedver 1.8.1
|
|
%global pypartedver 3.10.4
|
|
%global utillinuxver 2.15.1
|
|
-%global libblockdevver 2.19
|
|
+%global libblockdevver 2.24
|
|
%global libbytesizever 0.3
|
|
%global pyudevver 0.18
|
|
|