import rhel-system-roles-1.16.2-1.el9_0.3

This commit is contained in:
CentOS Sources 2022-08-09 05:42:07 -04:00 committed by Stepan Oksanichenko
parent 0440365642
commit 9c094cac15
3 changed files with 357 additions and 1 deletions

View File

@ -0,0 +1,151 @@
From acb99e74a24fa07863c596fe59d2999adc28c249 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 2 Jun 2022 15:18:19 +0200
Subject: [PATCH] LVM RAID raid0 level support (#272)
* Add workaround for missing LVM raid0 support in blivet
Blivet supports creating LVs with segment type "raid0" but it is
not in the list of supported RAID levels. This will be fixed in
blivet, see https://github.com/storaged-project/blivet/pull/1047
* Add a test for LVM RAID raid0 level
* README: Remove "striped" from the list of supported RAID for pools
We use MD RAID for RAIDs on the pool level which doesn't support
"striped" level.
* README: Clarify supported volume RAID levels
We support different levels for LVM RAID and MD RAID.
(cherry picked from commit 8b868a348155b08479743945aba88271121ad4b0)
---
README.md | 7 ++-
library/blivet.py | 7 +++
tests/tests_create_raid_pool_then_remove.yml | 54 ++++++++++++++++++++
3 files changed, 66 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index f8e3daa..bd123d7 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ device node basename (like `sda` or `mpathb`), /dev/disk/ symlink
##### `raid_level`
When used with `type: lvm` it manages a volume group with a mdraid array of given level
on it. Input `disks` are in this case used as RAID members.
-Accepted values are: `linear`, `striped`, `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`
+Accepted values are: `linear`, `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`
##### `volumes`
This is a list of volumes that belong to the current pool. It follows the
@@ -136,7 +136,10 @@ Specifies RAID level. LVM RAID can be created as well.
"Regular" RAID volume requires type to be `raid`.
LVM RAID needs that volume has `storage_pools` parent with type `lvm`,
`raid_disks` need to be specified as well.
-Accepted values are: `linear` (N/A for LVM RAID), `striped`, `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`
+Accepted values are:
+* for LVM RAID volume: `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`, `striped`, `mirror`
+* for RAID volume: `linear`, `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`
+
__WARNING__: Changing `raid_level` for a volume is a destructive operation, meaning
all data on that volume will be lost as part of the process of
removing old and adding new RAID. RAID reshaping is currently not
diff --git a/library/blivet.py b/library/blivet.py
index 29552fa..33c93b2 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -118,6 +118,7 @@ LIB_IMP_ERR = ""
try:
from blivet3 import Blivet
from blivet3.callbacks import callbacks
+ from blivet3 import devicelibs
from blivet3 import devices
from blivet3.deviceaction import ActionConfigureFormat
from blivet3.flags import flags as blivet_flags
@@ -132,6 +133,7 @@ except ImportError:
try:
from blivet import Blivet
from blivet.callbacks import callbacks
+ from blivet import devicelibs
from blivet import devices
from blivet.deviceaction import ActionConfigureFormat
from blivet.flags import flags as blivet_flags
@@ -152,6 +154,11 @@ if BLIVET_PACKAGE:
set_up_logging()
log = logging.getLogger(BLIVET_PACKAGE + ".ansible")
+ # XXX add support for LVM RAID raid0 level
+ devicelibs.lvm.raid_levels.add_raid_level(devicelibs.raid.RAID0)
+ if "raid0" not in devicelibs.lvm.raid_seg_types:
+ devicelibs.lvm.raid_seg_types.append("raid0")
+
MAX_TRIM_PERCENT = 2
diff --git a/tests/tests_create_raid_pool_then_remove.yml b/tests/tests_create_raid_pool_then_remove.yml
index d81680d..1fb4e15 100644
--- a/tests/tests_create_raid_pool_then_remove.yml
+++ b/tests/tests_create_raid_pool_then_remove.yml
@@ -150,3 +150,57 @@
raid_disks: "{{ [unused_disks[0], unused_disks[1]] }}"
- include_tasks: verify-role-results.yml
+
+ - name: Create a RAID0 lvm raid device
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: vg1
+ disks: "{{ unused_disks }}"
+ type: lvm
+ state: present
+ volumes:
+ - name: lv1
+ size: "{{ volume1_size }}"
+ mount_point: "{{ mount_location1 }}"
+ raid_disks: "{{ [unused_disks[0], unused_disks[1]] }}"
+ raid_level: raid0
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Repeat the previous invocation to verify idempotence
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: vg1
+ disks: "{{ unused_disks }}"
+ type: lvm
+ state: present
+ volumes:
+ - name: lv1
+ size: "{{ volume1_size }}"
+ mount_point: "{{ mount_location1 }}"
+ raid_level: raid0
+ raid_disks: "{{ [unused_disks[0], unused_disks[1]] }}"
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Remove the device created above
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: vg1
+ disks: "{{ unused_disks }}"
+ type: lvm
+ state: absent
+ volumes:
+ - name: lv1
+ size: "{{ volume1_size }}"
+ mount_point: "{{ mount_location1 }}"
+ raid_level: raid0
+ raid_disks: "{{ [unused_disks[0], unused_disks[1]] }}"
+
+ - include_tasks: verify-role-results.yml
--
2.35.3

View File

@ -0,0 +1,192 @@
From ba8a97039805f488c26b4d857f0137a349359c23 Mon Sep 17 00:00:00 2001
From: Richard Megginson <rmeggins@redhat.com>
Date: Mon, 16 May 2022 07:51:43 -0600
Subject: [PATCH] add support for mount_options (#270)
* add support for mount_options
When support for argument validation was added, that support did not
include the `mount_options` parameter. This fix adds back that
parameter. In addition, the volume module arguments are refactored
so that the common volume parameters such as `mount_options` can be
specified in one place.
This adds a test for the `mount_options` parameter, and adds
verification for that parameter.
* only checkout mount_options if requested
(cherry picked from commit ecf3d04bb704db5c1a095aaef40c2372fd45d4d6)
---
library/blivet.py | 78 ++++++++++++++----------------
tests/test-verify-volume-fstab.yml | 22 ++++++++-
tests/tests_misc.yml | 3 ++
3 files changed, 60 insertions(+), 43 deletions(-)
diff --git a/library/blivet.py b/library/blivet.py
index 80575bb..29552fa 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -105,6 +105,7 @@ volumes:
elements: dict
'''
+import copy
import logging
import os
import traceback
@@ -1500,6 +1501,39 @@ def activate_swaps(b, pools, volumes):
def run_module():
# available arguments/parameters that a user can pass
+ common_volume_opts = dict(encryption=dict(type='bool'),
+ encryption_cipher=dict(type='str'),
+ encryption_key=dict(type='str'),
+ encryption_key_size=dict(type='int'),
+ encryption_luks_version=dict(type='str'),
+ encryption_password=dict(type='str'),
+ fs_create_options=dict(type='str'),
+ fs_label=dict(type='str', default=''),
+ fs_type=dict(type='str'),
+ mount_options=dict(type='str'),
+ mount_point=dict(type='str'),
+ name=dict(type='str'),
+ raid_level=dict(type='str'),
+ size=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ type=dict(type='str'))
+ volume_opts = copy.deepcopy(common_volume_opts)
+ volume_opts.update(
+ dict(disks=dict(type='list'),
+ raid_device_count=dict(type='int'),
+ raid_spare_count=dict(type='int'),
+ raid_metadata_version=dict(type='str')))
+ pool_volume_opts = copy.deepcopy(common_volume_opts)
+ pool_volume_opts.update(
+ dict(cached=dict(type='bool'),
+ cache_devices=dict(type='list', elements='str', default=list()),
+ cache_mode=dict(type='str'),
+ cache_size=dict(type='str'),
+ compression=dict(type='bool'),
+ deduplication=dict(type='bool'),
+ raid_disks=dict(type='list', elements='str', default=list()),
+ vdo_pool_size=dict(type='str')))
+
module_args = dict(
pools=dict(type='list', elements='dict',
options=dict(disks=dict(type='list', elements='str', default=list()),
@@ -1517,49 +1551,9 @@ def run_module():
state=dict(type='str', default='present', choices=['present', 'absent']),
type=dict(type='str'),
volumes=dict(type='list', elements='dict', default=list(),
- options=dict(cached=dict(type='bool'),
- cache_devices=dict(type='list', elements='str', default=list()),
- cache_mode=dict(type='str'),
- cache_size=dict(type='str'),
- compression=dict(type='bool'),
- deduplication=dict(type='bool'),
- encryption=dict(type='bool'),
- encryption_cipher=dict(type='str'),
- encryption_key=dict(type='str'),
- encryption_key_size=dict(type='int'),
- encryption_luks_version=dict(type='str'),
- encryption_password=dict(type='str'),
- fs_create_options=dict(type='str'),
- fs_label=dict(type='str', default=''),
- fs_type=dict(type='str'),
- mount_point=dict(type='str'),
- name=dict(type='str'),
- raid_disks=dict(type='list', elements='str', default=list()),
- raid_level=dict(type='str'),
- size=dict(type='str'),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- type=dict(type='str'),
- vdo_pool_size=dict(type='str'))))),
+ options=pool_volume_opts))),
volumes=dict(type='list', elements='dict',
- options=dict(disks=dict(type='list'),
- encryption=dict(type='bool'),
- encryption_cipher=dict(type='str'),
- encryption_key=dict(type='str'),
- encryption_key_size=dict(type='int'),
- encryption_luks_version=dict(type='str'),
- encryption_password=dict(type='str'),
- fs_create_options=dict(type='str'),
- fs_label=dict(type='str', default=''),
- fs_type=dict(type='str'),
- mount_point=dict(type='str'),
- name=dict(type='str'),
- raid_level=dict(type='str'),
- raid_device_count=dict(type='int'),
- raid_spare_count=dict(type='int'),
- raid_metadata_version=dict(type='str'),
- size=dict(type='str'),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- type=dict(type='str'))),
+ options=volume_opts),
packages_only=dict(type='bool', required=False, default=False),
disklabel_type=dict(type='str', required=False, default=None),
safe_mode=dict(type='bool', required=False, default=True),
diff --git a/tests/test-verify-volume-fstab.yml b/tests/test-verify-volume-fstab.yml
index 80d78f0..0091084 100644
--- a/tests/test-verify-volume-fstab.yml
+++ b/tests/test-verify-volume-fstab.yml
@@ -11,6 +11,15 @@
storage_test_fstab_expected_mount_point_matches: "{{ 1
if (_storage_test_volume_present and storage_test_volume.mount_point and storage_test_volume.mount_point.startswith('/'))
else 0 }}"
+ storage_test_fstab_mount_options_matches: "{{ storage_test_fstab.stdout_lines |
+ map('regex_search', ' ' + storage_test_volume.mount_point + ' .* ' + storage_test_volume.mount_options + ' +') |
+ select('string')|list if (
+ storage_test_volume.mount_options|d('none',true) != 'none'
+ and storage_test_volume.mount_point|d('none',true) != 'none'
+ ) else [] }}"
+ storage_test_fstab_expected_mount_options_matches: "{{ 1
+ if (_storage_test_volume_present and storage_test_volume.mount_options)
+ else 0 }}"
# device id
- name: Verify that the device identifier appears in /etc/fstab
@@ -26,7 +35,16 @@
msg: "Expected number ({{ storage_test_fstab_expected_mount_point_matches }}) of
entries with volume '{{ storage_test_volume.name }}' mount point not found in /etc/fstab."
-# todo: options
+# mount options
+- name: Verify mount_options
+ assert:
+ that: storage_test_fstab_mount_options_matches|length == storage_test_fstab_expected_mount_options_matches|int
+ msg: "Expected number ({{ storage_test_fstab_expected_mount_options_matches }}) of
+ entries with volume '{{ storage_test_volume.name }}' mount options not found in /etc/fstab."
+ when:
+ - __storage_verify_mount_options | d(false)
+ - "'mount_options' in storage_test_volume"
+ - "'mount_point' in storage_test_volume"
- name: Clean up variables
set_fact:
@@ -34,3 +52,5 @@
storage_test_fstab_mount_point_matches: null
storage_test_fstab_expected_id_matches: null
storage_test_fstab_expected_mount_point_matches: null
+ storage_test_fstab_mount_options_matches: null
+ storage_test_fstab_expected_mount_options_matches: null
diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml
index 159c959..97c1627 100644
--- a/tests/tests_misc.yml
+++ b/tests/tests_misc.yml
@@ -189,8 +189,11 @@
fs_type: 'ext4'
fs_create_options: '-F'
mount_point: "{{ mount_location }}"
+ mount_options: rw,noatime,defaults
- include_tasks: verify-role-results.yml
+ vars:
+ __storage_verify_mount_options: true
- name: Remove the disk volume created above
include_role:
--
2.35.3

View File

@ -31,7 +31,7 @@ Name: linux-system-roles
Url: https://github.com/linux-system-roles
Summary: Set of interfaces for unified system management
Version: 1.16.2
Release: 1%{?dist}.2
Release: 1%{?dist}.3
#Group: Development/Libraries
License: GPLv3+ and MIT and BSD and Python
@ -242,6 +242,9 @@ Source998: collection_readme.sh
Patch51: network-disable-bondtests.diff
Patch61: Bug-2098224-storage-role-cannot-set-mount_options-for-volumes.patch
Patch62: Bug-2098223-storage-role-raid_level-striped-is-not-supported.patch
Patch1501: ansible-sshd.patch
BuildArch: noarch
@ -335,6 +338,10 @@ cd ../..
cd %{rolename5}
%patch51 -p1
cd ..
cd %{rolename6}
%patch61 -p1
%patch62 -p1
cd ..
cd %{rolename15}
%patch1501 -p1
sed -r -i -e "s/ansible-sshd/linux-system-roles.sshd/" tests/*.yml examples/*.yml
@ -713,6 +720,12 @@ fi
%endif
%changelog
* Fri Jun 17 2022 Rich Megginson <rmeggins@redhat.com> - 1.16.2-1.3
- storage role cannot set mount_options for volumes
Resolves: rhbz#2098224
- storage role raid_level "striped" is not supported
Resolves: rhbz#2098223
* Thu Apr 21 2022 Rich Megginson <rmeggins@redhat.com> - 1.16.2-1.2
- sshd - FIPS mode detection in SSHD role is wrong
Resolves rhbz#2077475 (EL9)