import from CS git rhel-system-roles-1.23.0-4.el8
This commit is contained in:
parent
260649931c
commit
a0a1e851cc
18
.gitignore
vendored
18
.gitignore
vendored
@ -1,12 +1,12 @@
|
||||
SOURCES/ad_integration-1.4.2.tar.gz
|
||||
SOURCES/ad_integration-1.4.6.tar.gz
|
||||
SOURCES/ansible-posix-1.5.4.tar.gz
|
||||
SOURCES/ansible-sshd-v0.23.2.tar.gz
|
||||
SOURCES/ansible-sshd-v0.25.0.tar.gz
|
||||
SOURCES/auto-maintenance-11ad785c9bb72611244e7909450ca4247e12db4d.tar.gz
|
||||
SOURCES/bootloader-1.0.3.tar.gz
|
||||
SOURCES/bootloader-1.0.7.tar.gz
|
||||
SOURCES/certificate-1.3.3.tar.gz
|
||||
SOURCES/cockpit-1.5.5.tar.gz
|
||||
SOURCES/cockpit-1.5.10.tar.gz
|
||||
SOURCES/community-general-8.3.0.tar.gz
|
||||
SOURCES/containers-podman-1.12.0.tar.gz
|
||||
SOURCES/containers-podman-1.15.4.tar.gz
|
||||
SOURCES/crypto_policies-1.3.2.tar.gz
|
||||
SOURCES/fapolicyd-1.1.1.tar.gz
|
||||
SOURCES/firewall-1.7.4.tar.gz
|
||||
@ -15,9 +15,9 @@ SOURCES/journald-1.2.3.tar.gz
|
||||
SOURCES/kdump-1.4.4.tar.gz
|
||||
SOURCES/kernel_settings-1.2.2.tar.gz
|
||||
SOURCES/keylime_server-1.1.2.tar.gz
|
||||
SOURCES/logging-1.12.4.tar.gz
|
||||
SOURCES/logging-1.13.4.tar.gz
|
||||
SOURCES/metrics-1.10.1.tar.gz
|
||||
SOURCES/nbde_client-1.2.17.tar.gz
|
||||
SOURCES/nbde_client-1.3.0.tar.gz
|
||||
SOURCES/nbde_server-1.4.3.tar.gz
|
||||
SOURCES/network-1.15.1.tar.gz
|
||||
SOURCES/podman-1.4.7.tar.gz
|
||||
@ -29,6 +29,6 @@ SOURCES/snapshot-1.3.1.tar.gz
|
||||
SOURCES/ssh-1.3.2.tar.gz
|
||||
SOURCES/storage-1.16.2.tar.gz
|
||||
SOURCES/systemd-1.1.2.tar.gz
|
||||
SOURCES/timesync-1.8.2.tar.gz
|
||||
SOURCES/timesync-1.9.0.tar.gz
|
||||
SOURCES/tlog-1.3.3.tar.gz
|
||||
SOURCES/vpn-1.6.3.tar.gz
|
||||
SOURCES/vpn-1.6.3.tar.gz
|
34
.rhel-system-roles.metadata
Normal file
34
.rhel-system-roles.metadata
Normal file
@ -0,0 +1,34 @@
|
||||
11b58e43e1b78cb75eda26724359f4d748173d5f SOURCES/ad_integration-1.4.6.tar.gz
|
||||
da646eb9ba655f1693cc950ecb5c24af39ee1af6 SOURCES/ansible-posix-1.5.4.tar.gz
|
||||
5829f61d848d1fe52ecd1702c055eeed8ef56e70 SOURCES/ansible-sshd-v0.25.0.tar.gz
|
||||
e4df3548cf129b61c40b2d013917e07be2f3ba4e SOURCES/auto-maintenance-11ad785c9bb72611244e7909450ca4247e12db4d.tar.gz
|
||||
7ae4b79529d14c0c8958cf9633f8d560d718f4e7 SOURCES/bootloader-1.0.7.tar.gz
|
||||
9eaac83b306b2fb8dd8e82bc4b03b30285d2024f SOURCES/certificate-1.3.3.tar.gz
|
||||
15677bec6ddafb75911d7c29fe1eb1c24b9b4f1c SOURCES/cockpit-1.5.10.tar.gz
|
||||
15fd2f2c08ae17cc47efb76bd14fb9ab6f33bc26 SOURCES/community-general-8.3.0.tar.gz
|
||||
2c0a98aedb2c031bfc94609bc9553d192224b159 SOURCES/containers-podman-1.15.4.tar.gz
|
||||
6705818b1fdf3cc82083937265f7942e3d3ccc2d SOURCES/crypto_policies-1.3.2.tar.gz
|
||||
29505121f6798f527045c5f66656fd5c19bed5fe SOURCES/fapolicyd-1.1.1.tar.gz
|
||||
1a7a875cebbd3e146f6ca554269ee20845cf877b SOURCES/firewall-1.7.4.tar.gz
|
||||
53e8991ca7e0c5c97ab010e843bc1a7c4a98eb96 SOURCES/ha_cluster-1.14.0.tar.gz
|
||||
e96ba9f5b3ae08a12dbf072f118e316036553b94 SOURCES/journald-1.2.3.tar.gz
|
||||
de6c6103b7023aa21782906696e712b428600a92 SOURCES/kdump-1.4.4.tar.gz
|
||||
0f28a0919874f650ef0149409116bae12d2363e0 SOURCES/kernel_settings-1.2.2.tar.gz
|
||||
85c14c7e260b247eb7947c8706af82ff5aac07d2 SOURCES/keylime_server-1.1.2.tar.gz
|
||||
4825923fc0fa29e80c08864b0afc50e2e075be91 SOURCES/logging-1.13.4.tar.gz
|
||||
e795238995d2dfb2cbdb5cc9bf4923f7410ac49a SOURCES/metrics-1.10.1.tar.gz
|
||||
544c5c9e53beef034b0d39ecf944e0bb13231535 SOURCES/nbde_client-1.3.0.tar.gz
|
||||
dce6435ca145b3143c1326a8e413e8173e5655ef SOURCES/nbde_server-1.4.3.tar.gz
|
||||
e89a4d6974a089f035b1f3fc79a1f9cacfa1f933 SOURCES/network-1.15.1.tar.gz
|
||||
fc242b6f776088720ef04e5891c75fd33e6e1e96 SOURCES/podman-1.4.7.tar.gz
|
||||
ddb7e2a575e4b96666ce13dbdbaea97cc2f83954 SOURCES/postfix-1.4.3.tar.gz
|
||||
bf0f12e78bfc2120d85c5458aa7d53b15738e73c SOURCES/postgresql-1.3.5.tar.gz
|
||||
b519a4e35b55e97bf954916d77f1f1f82ec2615b SOURCES/rhc-1.6.0.tar.gz
|
||||
458b076a73a1c3597485b60bc734b225f3079a86 SOURCES/selinux-1.7.4.tar.gz
|
||||
8fdcd362f021d41165c4a959ba79136491389343 SOURCES/snapshot-1.3.1.tar.gz
|
||||
d2c153993e51ce949db861db2aa15e8ec90b45af SOURCES/ssh-1.3.2.tar.gz
|
||||
e08c1df6c6842f6ad37fff34d2e9d96e9cdddd70 SOURCES/storage-1.16.2.tar.gz
|
||||
df8f2896ad761da73872d17a0f0cd8cfd34e0671 SOURCES/systemd-1.1.2.tar.gz
|
||||
0a9df710ddd8a43e74cbd77e4414d5ea7e90d7b9 SOURCES/timesync-1.9.0.tar.gz
|
||||
6d559dc44f44bc7e505602b36b51b4d1b60f2754 SOURCES/tlog-1.3.3.tar.gz
|
||||
27395883fa555658257e70287e709f8ccc1d8392 SOURCES/vpn-1.6.3.tar.gz
|
@ -0,0 +1,74 @@
|
||||
From 8b3cfc1a30da1ab681eb8c250baa2d6395ecc0d2 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 3 Apr 2024 15:12:00 +0200
|
||||
Subject: [PATCH 01/10] test: fix sector-based disk size calculation from
|
||||
ansible_devices
|
||||
|
||||
Device sizes specified in sectors are in general in 512 sectors
|
||||
regardless of the actual device physical sector size. Example of
|
||||
ansible_devices facts for a 4k sector size drive:
|
||||
|
||||
...
|
||||
"sectors": "41943040",
|
||||
"sectorsize": "4096",
|
||||
"size": "20.00 GB"
|
||||
...
|
||||
|
||||
Resolves: RHEL-30959
|
||||
|
||||
Signed-off-by: Vojtech Trefny <vtrefny@redhat.com>
|
||||
(cherry picked from commit bb1eb23ccd6e9475cd698f0a6f2f497ffefbccd2)
|
||||
---
|
||||
tests/tests_create_lv_size_equal_to_vg.yml | 3 +--
|
||||
tests/tests_misc.yml | 3 +--
|
||||
tests/tests_resize.yml | 6 ++----
|
||||
3 files changed, 4 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/tests/tests_create_lv_size_equal_to_vg.yml b/tests/tests_create_lv_size_equal_to_vg.yml
|
||||
index cab4f08..535f73b 100644
|
||||
--- a/tests/tests_create_lv_size_equal_to_vg.yml
|
||||
+++ b/tests/tests_create_lv_size_equal_to_vg.yml
|
||||
@@ -8,8 +8,7 @@
|
||||
volume_group_size: '10g'
|
||||
lv_size: '10g'
|
||||
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
|
||||
- disk_size: '{{ unused_disk_subfact.sectors | int *
|
||||
- unused_disk_subfact.sectorsize | int }}'
|
||||
+ disk_size: '{{ unused_disk_subfact.sectors | int * 512 }}'
|
||||
tags:
|
||||
- tests::lvm
|
||||
|
||||
diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml
|
||||
index 6373897..363d843 100644
|
||||
--- a/tests/tests_misc.yml
|
||||
+++ b/tests/tests_misc.yml
|
||||
@@ -8,8 +8,7 @@
|
||||
volume_group_size: "5g"
|
||||
volume1_size: "4g"
|
||||
unused_disk_subfact: "{{ ansible_devices[unused_disks[0]] }}"
|
||||
- too_large_size: "{{ (unused_disk_subfact.sectors | int * 1.2) *
|
||||
- unused_disk_subfact.sectorsize | int }}"
|
||||
+ too_large_size: "{{ (unused_disk_subfact.sectors | int * 1.2) * 512 }}"
|
||||
tags:
|
||||
- tests::lvm
|
||||
tasks:
|
||||
diff --git a/tests/tests_resize.yml b/tests/tests_resize.yml
|
||||
index 06fb375..1cd2176 100644
|
||||
--- a/tests/tests_resize.yml
|
||||
+++ b/tests/tests_resize.yml
|
||||
@@ -11,10 +11,8 @@
|
||||
invalid_size1: xyz GiB
|
||||
invalid_size2: none
|
||||
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
|
||||
- too_large_size: '{{ unused_disk_subfact.sectors | int * 1.2 *
|
||||
- unused_disk_subfact.sectorsize | int }}'
|
||||
- disk_size: '{{ unused_disk_subfact.sectors | int *
|
||||
- unused_disk_subfact.sectorsize | int }}'
|
||||
+ too_large_size: '{{ unused_disk_subfact.sectors | int * 1.2 * 512 }}'
|
||||
+ disk_size: '{{ unused_disk_subfact.sectors | int * 512 }}'
|
||||
tags:
|
||||
- tests::lvm
|
||||
tasks:
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,64 @@
|
||||
From 9f561445271a14fee598e9a793f72297f66eae56 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 10 Apr 2024 17:05:46 +0200
|
||||
Subject: [PATCH 02/10] fix: Fix recreate check for formats without labelling
|
||||
support
|
||||
|
||||
Formats like LUKS or LVMPV don't support labels so we need to skip
|
||||
the label check in BlivetVolume._reformat.
|
||||
|
||||
Resolves: RHEL-29874
|
||||
(cherry picked from commit a70e8108110e30ebc5e7c404d39339c511f9bd09)
|
||||
---
|
||||
library/blivet.py | 3 +++
|
||||
tests/tests_volume_relabel.yml | 20 ++++++++++++++++++++
|
||||
2 files changed, 23 insertions(+)
|
||||
|
||||
diff --git a/library/blivet.py b/library/blivet.py
|
||||
index 20389ea..18807de 100644
|
||||
--- a/library/blivet.py
|
||||
+++ b/library/blivet.py
|
||||
@@ -826,6 +826,9 @@ class BlivetVolume(BlivetBase):
|
||||
if ((fmt is None and self._device.format.type is None)
|
||||
or (fmt is not None and self._device.format.type == fmt.type)):
|
||||
# format is the same, no need to run reformatting
|
||||
+ if not hasattr(self._device.format, "label"):
|
||||
+ # not all formats support labels
|
||||
+ return
|
||||
dev_label = '' if self._device.format.label is None else self._device.format.label
|
||||
if dev_label != fmt.label:
|
||||
# ...but the label has changed - schedule modification action
|
||||
diff --git a/tests/tests_volume_relabel.yml b/tests/tests_volume_relabel.yml
|
||||
index 8916b73..6624fbd 100644
|
||||
--- a/tests/tests_volume_relabel.yml
|
||||
+++ b/tests/tests_volume_relabel.yml
|
||||
@@ -111,6 +111,26 @@
|
||||
- name: Verify role results
|
||||
include_tasks: verify-role-results.yml
|
||||
|
||||
+ - name: Format the device to LVMPV which doesn't support labels
|
||||
+ include_role:
|
||||
+ name: linux-system-roles.storage
|
||||
+ vars:
|
||||
+ storage_volumes:
|
||||
+ - name: test1
|
||||
+ type: disk
|
||||
+ fs_type: lvmpv
|
||||
+ disks: "{{ unused_disks }}"
|
||||
+
|
||||
+ - name: Rerun to check we don't try to relabel preexisitng LVMPV (regression test for RHEL-29874)
|
||||
+ include_role:
|
||||
+ name: linux-system-roles.storage
|
||||
+ vars:
|
||||
+ storage_volumes:
|
||||
+ - name: test1
|
||||
+ type: disk
|
||||
+ fs_type: lvmpv
|
||||
+ disks: "{{ unused_disks }}"
|
||||
+
|
||||
- name: Clean up
|
||||
include_role:
|
||||
name: linux-system-roles.storage
|
||||
--
|
||||
2.46.0
|
||||
|
28
SOURCES/0003-fix-Fix-incorrent-populate-call.patch
Normal file
28
SOURCES/0003-fix-Fix-incorrent-populate-call.patch
Normal file
@ -0,0 +1,28 @@
|
||||
From 7abfaeddab812e4eec0c3d3d6bcbabe047722c4f Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Wed, 10 Apr 2024 17:08:20 +0200
|
||||
Subject: [PATCH 03/10] fix: Fix incorrent populate call
|
||||
|
||||
`populate()` is method of DeviceTree, not Blivet.
|
||||
|
||||
(cherry picked from commit 6471e65abd429c82df37cbcf07fdf909e4277aa8)
|
||||
---
|
||||
library/blivet.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/library/blivet.py b/library/blivet.py
|
||||
index 18807de..d82b86b 100644
|
||||
--- a/library/blivet.py
|
||||
+++ b/library/blivet.py
|
||||
@@ -630,7 +630,7 @@ class BlivetVolume(BlivetBase):
|
||||
device.original_format._key_file = self._volume.get('encryption_key')
|
||||
device.original_format.passphrase = self._volume.get('encryption_password')
|
||||
if device.isleaf:
|
||||
- self._blivet.populate()
|
||||
+ self._blivet.devicetree.populate()
|
||||
|
||||
if not device.isleaf:
|
||||
device = device.children[0]
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,174 @@
|
||||
From 912c33982d9cc412eb72bc9baeab6696e29e7f27 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Tue, 28 May 2024 16:23:48 +0200
|
||||
Subject: [PATCH 04/10] tests: Add a new 'match_sector_size' argument to
|
||||
find_unused_disks
|
||||
|
||||
Some storage pools cannot be created on disks with different
|
||||
sector sizes so we want to be able to find unused disks with the
|
||||
same sector sizes for our tests.
|
||||
|
||||
Related: RHEL-25994
|
||||
(cherry picked from commit 368ecd0214dbaad7c42547eeac0565e51c924546)
|
||||
---
|
||||
library/find_unused_disk.py | 79 ++++++++++++++++++++++------------
|
||||
tests/get_unused_disk.yml | 1 +
|
||||
tests/unit/test_unused_disk.py | 6 +--
|
||||
3 files changed, 56 insertions(+), 30 deletions(-)
|
||||
|
||||
diff --git a/library/find_unused_disk.py b/library/find_unused_disk.py
|
||||
index 09b8ad5..098f235 100644
|
||||
--- a/library/find_unused_disk.py
|
||||
+++ b/library/find_unused_disk.py
|
||||
@@ -39,6 +39,11 @@ options:
|
||||
description: Specifies which disk interface will be accepted (scsi, virtio, nvme).
|
||||
default: null
|
||||
type: str
|
||||
+
|
||||
+ match_sector_size:
|
||||
+ description: Specifies whether all returned disks must have the same (logical) sector size.
|
||||
+ default: false
|
||||
+ type: bool
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -138,13 +143,13 @@ def get_partitions(disk_path):
|
||||
|
||||
|
||||
def get_disks(module):
|
||||
- buf = module.run_command(["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE"])[1]
|
||||
+ buf = module.run_command(["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE,LOG-SEC"])[1]
|
||||
disks = dict()
|
||||
for line in buf.splitlines():
|
||||
if not line:
|
||||
continue
|
||||
|
||||
- m = re.search(r'NAME="(?P<path>[^"]*)" TYPE="(?P<type>[^"]*)" SIZE="(?P<size>\d+)" FSTYPE="(?P<fstype>[^"]*)"', line)
|
||||
+ m = re.search(r'NAME="(?P<path>[^"]*)" TYPE="(?P<type>[^"]*)" SIZE="(?P<size>\d+)" FSTYPE="(?P<fstype>[^"]*)" LOG-SEC="(?P<ssize>\d+)"', line)
|
||||
if m is None:
|
||||
module.log(line)
|
||||
continue
|
||||
@@ -152,31 +157,16 @@ def get_disks(module):
|
||||
if m.group('type') != "disk":
|
||||
continue
|
||||
|
||||
- disks[m.group('path')] = {"type": m.group('type'), "size": m.group('size'), "fstype": m.group('fstype')}
|
||||
+ disks[m.group('path')] = {"type": m.group('type'), "size": m.group('size'),
|
||||
+ "fstype": m.group('fstype'), "ssize": m.group('ssize')}
|
||||
|
||||
return disks
|
||||
|
||||
|
||||
-def run_module():
|
||||
- """Create the module"""
|
||||
- module_args = dict(
|
||||
- max_return=dict(type='int', required=False, default=10),
|
||||
- min_size=dict(type='str', required=False, default='0'),
|
||||
- max_size=dict(type='str', required=False, default='0'),
|
||||
- with_interface=dict(type='str', required=False, default=None)
|
||||
- )
|
||||
-
|
||||
- result = dict(
|
||||
- changed=False,
|
||||
- disks=[]
|
||||
- )
|
||||
-
|
||||
- module = AnsibleModule(
|
||||
- argument_spec=module_args,
|
||||
- supports_check_mode=True
|
||||
- )
|
||||
-
|
||||
+def filter_disks(module):
|
||||
+ disks = {}
|
||||
max_size = Size(module.params['max_size'])
|
||||
+
|
||||
for path, attrs in get_disks(module).items():
|
||||
if is_ignored(path):
|
||||
continue
|
||||
@@ -204,14 +194,49 @@ def run_module():
|
||||
if not can_open(path):
|
||||
continue
|
||||
|
||||
- result['disks'].append(os.path.basename(path))
|
||||
- if len(result['disks']) >= module.params['max_return']:
|
||||
- break
|
||||
+ disks[path] = attrs
|
||||
+
|
||||
+ return disks
|
||||
+
|
||||
+
|
||||
+def run_module():
|
||||
+ """Create the module"""
|
||||
+ module_args = dict(
|
||||
+ max_return=dict(type='int', required=False, default=10),
|
||||
+ min_size=dict(type='str', required=False, default='0'),
|
||||
+ max_size=dict(type='str', required=False, default='0'),
|
||||
+ with_interface=dict(type='str', required=False, default=None),
|
||||
+ match_sector_size=dict(type='bool', required=False, default=False)
|
||||
+ )
|
||||
+
|
||||
+ result = dict(
|
||||
+ changed=False,
|
||||
+ disks=[]
|
||||
+ )
|
||||
+
|
||||
+ module = AnsibleModule(
|
||||
+ argument_spec=module_args,
|
||||
+ supports_check_mode=True
|
||||
+ )
|
||||
+
|
||||
+ disks = filter_disks(module)
|
||||
+
|
||||
+ if module.params['match_sector_size']:
|
||||
+ # pick the most disks with the same sector size
|
||||
+ sector_sizes = dict()
|
||||
+ for path, ss in [(path, disks[path]["ssize"]) for path in disks.keys()]:
|
||||
+ if ss in sector_sizes.keys():
|
||||
+ sector_sizes[ss].append(path)
|
||||
+ else:
|
||||
+ sector_sizes[ss] = [path]
|
||||
+ disks = [os.path.basename(p) for p in max(sector_sizes.values(), key=len)]
|
||||
+ else:
|
||||
+ disks = [os.path.basename(p) for p in disks.keys()]
|
||||
|
||||
- if not result['disks']:
|
||||
+ if not disks:
|
||||
result['disks'] = "Unable to find unused disk"
|
||||
else:
|
||||
- result['disks'].sort()
|
||||
+ result['disks'] = sorted(disks)[:int(module.params['max_return'])]
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
diff --git a/tests/get_unused_disk.yml b/tests/get_unused_disk.yml
|
||||
index 685541f..a61487e 100644
|
||||
--- a/tests/get_unused_disk.yml
|
||||
+++ b/tests/get_unused_disk.yml
|
||||
@@ -19,6 +19,7 @@
|
||||
max_size: "{{ max_size | d(omit) }}"
|
||||
max_return: "{{ max_return | d(omit) }}"
|
||||
with_interface: "{{ storage_test_use_interface | d(omit) }}"
|
||||
+ match_sector_size: "{{ match_sector_size | d(omit) }}"
|
||||
register: unused_disks_return
|
||||
|
||||
- name: Set unused_disks if necessary
|
||||
diff --git a/tests/unit/test_unused_disk.py b/tests/unit/test_unused_disk.py
|
||||
index 74c9cf1..ca44d0f 100644
|
||||
--- a/tests/unit/test_unused_disk.py
|
||||
+++ b/tests/unit/test_unused_disk.py
|
||||
@@ -10,9 +10,9 @@ import os
|
||||
blkid_data_pttype = [('/dev/sdx', '/dev/sdx: PTTYPE=\"dos\"'),
|
||||
('/dev/sdy', '/dev/sdy: PTTYPE=\"test\"')]
|
||||
|
||||
-blkid_data = [('/dev/sdx', 'UUID=\"hello-1234-56789\" TYPE=\"crypto_LUKS\"'),
|
||||
- ('/dev/sdy', 'UUID=\"this-1s-a-t3st-f0r-ansible\" VERSION=\"LVM2 001\" TYPE=\"LVM2_member\" USAGE=\"raid\"'),
|
||||
- ('/dev/sdz', 'LABEL=\"/data\" UUID=\"a12bcdef-345g-67h8-90i1-234j56789k10\" VERSION=\"1.0\" TYPE=\"ext4\" USAGE=\"filesystem\"')]
|
||||
+blkid_data = [('/dev/sdx', 'UUID=\"hello-1234-56789\" TYPE=\"crypto_LUKS\" LOG-SEC=\"512\"'),
|
||||
+ ('/dev/sdy', 'UUID=\"this-1s-a-t3st-f0r-ansible\" VERSION=\"LVM2 001\" TYPE=\"LVM2_member\" USAGE=\"raid\" LOG-SEC=\"512\"'),
|
||||
+ ('/dev/sdz', 'LABEL=\"/data\" UUID=\"a12bcdef-345g-67h8-90i1-234j56789k10\" VERSION=\"1.0\" TYPE=\"ext4\" USAGE=\"filesystem\" LOG-SEC=\"512\"')]
|
||||
|
||||
holders_data_none = [('/dev/sdx', ''),
|
||||
('/dev/dm-99', '')]
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,96 @@
|
||||
From da871866f07e2990f37b3fdea404bbaf091d81b6 Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Thu, 30 May 2024 10:41:26 +0200
|
||||
Subject: [PATCH 05/10] tests: Require same sector size disks for LVM tests
|
||||
|
||||
LVM VGs cannot be created on top of disks with different sector
|
||||
sizes so for tests that need multiple disks we need to make sure
|
||||
we get unused disks with the same sector size.
|
||||
|
||||
Resolves: RHEL-25994
|
||||
(cherry picked from commit d8c5938c28417cc905a647ec30246a0fc4d19297)
|
||||
---
|
||||
tests/tests_change_fs_use_partitions.yml | 2 +-
|
||||
tests/tests_create_lvm_cache_then_remove.yml | 1 +
|
||||
tests/tests_create_thinp_then_remove.yml | 1 +
|
||||
tests/tests_fatals_cache_volume.yml | 1 +
|
||||
tests/tests_lvm_multiple_disks_multiple_volumes.yml | 1 +
|
||||
tests/tests_lvm_pool_members.yml | 1 +
|
||||
6 files changed, 6 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tests/tests_change_fs_use_partitions.yml b/tests/tests_change_fs_use_partitions.yml
|
||||
index 52afb7f..87fed69 100644
|
||||
--- a/tests/tests_change_fs_use_partitions.yml
|
||||
+++ b/tests/tests_change_fs_use_partitions.yml
|
||||
@@ -31,7 +31,7 @@
|
||||
include_tasks: get_unused_disk.yml
|
||||
vars:
|
||||
min_size: "{{ volume_size }}"
|
||||
- max_return: 2
|
||||
+ max_return: 1
|
||||
|
||||
- name: Create an LVM partition with the default file system type
|
||||
include_role:
|
||||
diff --git a/tests/tests_create_lvm_cache_then_remove.yml b/tests/tests_create_lvm_cache_then_remove.yml
|
||||
index 1769a78..6b5d0a5 100644
|
||||
--- a/tests/tests_create_lvm_cache_then_remove.yml
|
||||
+++ b/tests/tests_create_lvm_cache_then_remove.yml
|
||||
@@ -57,6 +57,7 @@
|
||||
min_size: "{{ volume_group_size }}"
|
||||
max_return: 2
|
||||
disks_needed: 2
|
||||
+ match_sector_size: true
|
||||
|
||||
- name: Create a cached LVM logical volume under volume group 'foo'
|
||||
include_role:
|
||||
diff --git a/tests/tests_create_thinp_then_remove.yml b/tests/tests_create_thinp_then_remove.yml
|
||||
index bf6c4b1..2e7f046 100644
|
||||
--- a/tests/tests_create_thinp_then_remove.yml
|
||||
+++ b/tests/tests_create_thinp_then_remove.yml
|
||||
@@ -23,6 +23,7 @@
|
||||
include_tasks: get_unused_disk.yml
|
||||
vars:
|
||||
max_return: 3
|
||||
+ match_sector_size: true
|
||||
|
||||
- name: Create a thinpool device
|
||||
include_role:
|
||||
diff --git a/tests/tests_fatals_cache_volume.yml b/tests/tests_fatals_cache_volume.yml
|
||||
index c14cf3f..fcfdbb8 100644
|
||||
--- a/tests/tests_fatals_cache_volume.yml
|
||||
+++ b/tests/tests_fatals_cache_volume.yml
|
||||
@@ -29,6 +29,7 @@
|
||||
vars:
|
||||
max_return: 2
|
||||
disks_needed: 2
|
||||
+ match_sector_size: true
|
||||
|
||||
- name: Verify that creating a cached partition volume fails
|
||||
include_tasks: verify-role-failed.yml
|
||||
diff --git a/tests/tests_lvm_multiple_disks_multiple_volumes.yml b/tests/tests_lvm_multiple_disks_multiple_volumes.yml
|
||||
index 9a01ec5..68f2e76 100644
|
||||
--- a/tests/tests_lvm_multiple_disks_multiple_volumes.yml
|
||||
+++ b/tests/tests_lvm_multiple_disks_multiple_volumes.yml
|
||||
@@ -29,6 +29,7 @@
|
||||
min_size: "{{ volume_group_size }}"
|
||||
max_return: 2
|
||||
disks_needed: 2
|
||||
+ match_sector_size: true
|
||||
|
||||
- name: >-
|
||||
Create a logical volume spanning two physical volumes that changes its
|
||||
diff --git a/tests/tests_lvm_pool_members.yml b/tests/tests_lvm_pool_members.yml
|
||||
index d1b941d..63c10c7 100644
|
||||
--- a/tests/tests_lvm_pool_members.yml
|
||||
+++ b/tests/tests_lvm_pool_members.yml
|
||||
@@ -59,6 +59,7 @@
|
||||
vars:
|
||||
min_size: "{{ volume_group_size }}"
|
||||
disks_needed: 3
|
||||
+ match_sector_size: true
|
||||
|
||||
- name: Create volume group 'foo' with 3 PVs
|
||||
include_role:
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,41 @@
|
||||
From 705a9db65a230013a9118481082d2bb548cd113d Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Fri, 31 May 2024 06:31:52 +0200
|
||||
Subject: [PATCH 06/10] fix: Fix 'possibly-used-before-assignment' pylint
|
||||
issues (#440)
|
||||
|
||||
Latest pylint added a new check for values used before assignment.
|
||||
This fixes these issues found in the blivet module. Some of these
|
||||
are false positives, some real potential issues.
|
||||
|
||||
(cherry picked from commit bfaae50586681bb4b0fcad5df6f6adde2b7c8502)
|
||||
---
|
||||
library/blivet.py | 5 +++++
|
||||
1 file changed, 5 insertions(+)
|
||||
|
||||
diff --git a/library/blivet.py b/library/blivet.py
|
||||
index d82b86b..a6715d9 100644
|
||||
--- a/library/blivet.py
|
||||
+++ b/library/blivet.py
|
||||
@@ -642,6 +642,9 @@ class BlivetVolume(BlivetBase):
|
||||
self._device = None
|
||||
return # TODO: see if we can create this device w/ the specified name
|
||||
|
||||
+ # pylint doesn't understand that "luks_fmt" is always set when "encrypted" is true
|
||||
+ # pylint: disable=unknown-option-value
|
||||
+ # pylint: disable=possibly-used-before-assignment
|
||||
def _update_from_device(self, param_name):
|
||||
""" Return True if param_name's value was retrieved from a looked-up device. """
|
||||
log.debug("Updating volume settings from device: %r", self._device)
|
||||
@@ -1717,6 +1720,8 @@ class BlivetLVMPool(BlivetPool):
|
||||
|
||||
if auto_size_dev_count > 0:
|
||||
calculated_thinlv_size = available_space / auto_size_dev_count
|
||||
+ else:
|
||||
+ calculated_thinlv_size = available_space
|
||||
|
||||
for thinlv in thinlvs_to_create:
|
||||
|
||||
--
|
||||
2.46.0
|
||||
|
54
SOURCES/0007-test-lsblk-can-return-LOG_SEC-or-LOG-SEC.patch
Normal file
54
SOURCES/0007-test-lsblk-can-return-LOG_SEC-or-LOG-SEC.patch
Normal file
@ -0,0 +1,54 @@
|
||||
From 18edc9af26684f03e44fe2e22c82a8f93182da4a Mon Sep 17 00:00:00 2001
|
||||
From: Rich Megginson <rmeggins@redhat.com>
|
||||
Date: Wed, 5 Jun 2024 08:49:19 -0600
|
||||
Subject: [PATCH 07/10] test: lsblk can return LOG_SEC or LOG-SEC
|
||||
|
||||
get_unused_disk is broken on some systems because `lsblk ... LOG-SEC` can
|
||||
return `LOG_SEC` with an underscore instead of the requested
|
||||
`LOG-SEC` with a dash.
|
||||
|
||||
(cherry picked from commit 64333ce8aa42f4b961c39a443ac43cc6590097b3)
|
||||
---
|
||||
library/find_unused_disk.py | 4 ++--
|
||||
tests/get_unused_disk.yml | 9 +++++++++
|
||||
2 files changed, 11 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/library/find_unused_disk.py b/library/find_unused_disk.py
|
||||
index 098f235..270fb58 100644
|
||||
--- a/library/find_unused_disk.py
|
||||
+++ b/library/find_unused_disk.py
|
||||
@@ -149,9 +149,9 @@ def get_disks(module):
|
||||
if not line:
|
||||
continue
|
||||
|
||||
- m = re.search(r'NAME="(?P<path>[^"]*)" TYPE="(?P<type>[^"]*)" SIZE="(?P<size>\d+)" FSTYPE="(?P<fstype>[^"]*)" LOG-SEC="(?P<ssize>\d+)"', line)
|
||||
+ m = re.search(r'NAME="(?P<path>[^"]*)" TYPE="(?P<type>[^"]*)" SIZE="(?P<size>\d+)" FSTYPE="(?P<fstype>[^"]*)" LOG[_-]SEC="(?P<ssize>\d+)"', line)
|
||||
if m is None:
|
||||
- module.log(line)
|
||||
+ module.log("Line did not match: " + line)
|
||||
continue
|
||||
|
||||
if m.group('type') != "disk":
|
||||
diff --git a/tests/get_unused_disk.yml b/tests/get_unused_disk.yml
|
||||
index a61487e..0402770 100644
|
||||
--- a/tests/get_unused_disk.yml
|
||||
+++ b/tests/get_unused_disk.yml
|
||||
@@ -22,6 +22,15 @@
|
||||
match_sector_size: "{{ match_sector_size | d(omit) }}"
|
||||
register: unused_disks_return
|
||||
|
||||
+- name: Debug why there are no unused disks
|
||||
+ shell: |
|
||||
+ set -x
|
||||
+ exec 1>&2
|
||||
+ lsblk -p --pairs --bytes -o NAME,TYPE,SIZE,FSTYPE,LOG-SEC
|
||||
+ journalctl -ex
|
||||
+ changed_when: false
|
||||
+ when: "'Unable to find unused disk' in unused_disks_return.disks"
|
||||
+
|
||||
- name: Set unused_disks if necessary
|
||||
set_fact:
|
||||
unused_disks: "{{ unused_disks_return.disks }}"
|
||||
--
|
||||
2.46.0
|
||||
|
34
SOURCES/0008-test-lvm-pool-members-test-fix.patch
Normal file
34
SOURCES/0008-test-lvm-pool-members-test-fix.patch
Normal file
@ -0,0 +1,34 @@
|
||||
From aa6e494963a3bded3b1ca7ef5a81e0106e68d5bc Mon Sep 17 00:00:00 2001
|
||||
From: Jan Pokorny <japokorn@redhat.com>
|
||||
Date: Thu, 6 Jun 2024 11:54:48 +0200
|
||||
Subject: [PATCH 08/10] test: lvm pool members test fix
|
||||
|
||||
tests_lvm_pool_members started to fail. It tried to create a device with
|
||||
a requested size (20m) that was less than minimal allowed size (300m) for that type of
|
||||
volume. Role automatically resized the device to allowed size. That lead to discrepancy
|
||||
in actual and expected size values.
|
||||
|
||||
Increasing the requested device size to be same or larger than minimal fixes the
|
||||
issue.
|
||||
|
||||
(cherry picked from commit ee740b7b14d09e09a26dd5eb95e8950aeb15147d)
|
||||
---
|
||||
tests/tests_lvm_pool_members.yml | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tests/tests_lvm_pool_members.yml b/tests/tests_lvm_pool_members.yml
|
||||
index 63c10c7..320626e 100644
|
||||
--- a/tests/tests_lvm_pool_members.yml
|
||||
+++ b/tests/tests_lvm_pool_members.yml
|
||||
@@ -6,7 +6,7 @@
|
||||
storage_safe_mode: false
|
||||
storage_use_partitions: true
|
||||
volume_group_size: '10g'
|
||||
- volume_size: '20m'
|
||||
+ volume_size: '300m'
|
||||
tags:
|
||||
- tests::lvm
|
||||
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,40 @@
|
||||
From d2b59ac3758f51ffac5156e9f006b7ce9d8a28eb Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Tue, 4 Jun 2024 10:30:03 +0200
|
||||
Subject: [PATCH 09/10] fix: Fix expected error message in tests_misc.yml
|
||||
|
||||
Different versions of blivet return a different error message when
|
||||
trying to create a filesystem with invalid parameters.
|
||||
|
||||
On Fedora 39 and older:
|
||||
"Failed to commit changes to disk: (FSError('format failed: 1'),
|
||||
'/dev/mapper/foo-test1')"
|
||||
|
||||
On Fedora 40 and newer:
|
||||
"Failed to commit changes to disk: Process reported exit code 1:
|
||||
mke2fs: invalid block size - 512\n"
|
||||
|
||||
(cherry picked from commit 7ef66d85bd52f339483b24dbb8bc66e22054b378)
|
||||
---
|
||||
tests/tests_misc.yml | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml
|
||||
index 363d843..432ec16 100644
|
||||
--- a/tests/tests_misc.yml
|
||||
+++ b/tests/tests_misc.yml
|
||||
@@ -68,8 +68,9 @@
|
||||
include_tasks: verify-role-failed.yml
|
||||
vars:
|
||||
__storage_failed_regex: >-
|
||||
- Failed to commit changes to disk.*FSError.*format failed:
|
||||
- 1.*/dev/mapper/foo-test1
|
||||
+ Failed to commit changes to disk.*(FSError.*format failed:
|
||||
+ 1.*/dev/mapper/foo-test1|
|
||||
+ Process reported exit code 1: mke2fs: invalid block size - 512)
|
||||
__storage_failed_msg: >-
|
||||
Unexpected behavior when creating ext4 filesystem with invalid
|
||||
parameter
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,180 @@
|
||||
From a86f7e013fe881e477b65509363bbb5af851662f Mon Sep 17 00:00:00 2001
|
||||
From: Vojtech Trefny <vtrefny@redhat.com>
|
||||
Date: Fri, 12 Apr 2024 14:45:15 +0200
|
||||
Subject: [PATCH 10/10] tests: Use blockdev_info to check volume mount points
|
||||
|
||||
We can use the information from `lsblk` we already use for other
|
||||
checks instead of using the Ansible mountinfo facts. This makes
|
||||
the check simpler and also makes it easier to check for Stratis
|
||||
volume mount points, because of the complicated Stratis devices
|
||||
structure in /dev.
|
||||
|
||||
(cherry picked from commit 10e657bde68ffa9495b2441ed9f472cf79edbb19)
|
||||
---
|
||||
library/blockdev_info.py | 2 +-
|
||||
tests/test-verify-volume-fs.yml | 51 ++++++++++++++++--------------
|
||||
tests/test-verify-volume-mount.yml | 48 +++++-----------------------
|
||||
3 files changed, 37 insertions(+), 64 deletions(-)
|
||||
|
||||
diff --git a/library/blockdev_info.py b/library/blockdev_info.py
|
||||
index 13858fb..ec018de 100644
|
||||
--- a/library/blockdev_info.py
|
||||
+++ b/library/blockdev_info.py
|
||||
@@ -64,7 +64,7 @@ def fixup_md_path(path):
|
||||
|
||||
|
||||
def get_block_info(module):
|
||||
- buf = module.run_command(["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE,SIZE", "-p", "-P", "-a"])[1]
|
||||
+ buf = module.run_command(["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE,SIZE,MOUNTPOINT", "-p", "-P", "-a"])[1]
|
||||
info = dict()
|
||||
for line in buf.splitlines():
|
||||
dev = dict()
|
||||
diff --git a/tests/test-verify-volume-fs.yml b/tests/test-verify-volume-fs.yml
|
||||
index 8e488c5..63b2770 100644
|
||||
--- a/tests/test-verify-volume-fs.yml
|
||||
+++ b/tests/test-verify-volume-fs.yml
|
||||
@@ -1,26 +1,31 @@
|
||||
---
|
||||
# type
|
||||
-- name: Verify fs type
|
||||
- assert:
|
||||
- that: storage_test_blkinfo.info[storage_test_volume._device].fstype ==
|
||||
- storage_test_volume.fs_type or
|
||||
- (storage_test_blkinfo.info[storage_test_volume._device].fstype | length
|
||||
- == 0 and storage_test_volume.fs_type == "unformatted")
|
||||
- when: storage_test_volume.fs_type and _storage_test_volume_present
|
||||
+- name: Check volume filesystem
|
||||
+ when: storage_test_volume.type != "stratis"
|
||||
+ block:
|
||||
+ - name: Verify fs type
|
||||
+ assert:
|
||||
+ that: storage_test_blkinfo.info[storage_test_volume._device].fstype ==
|
||||
+ storage_test_volume.fs_type or
|
||||
+ (storage_test_blkinfo.info[storage_test_volume._device].fstype | length
|
||||
+ == 0 and storage_test_volume.fs_type == "unformatted")
|
||||
+ when:
|
||||
+ - storage_test_volume.fs_type
|
||||
+ - _storage_test_volume_present
|
||||
|
||||
-# label
|
||||
-- name: Verify fs label
|
||||
- assert:
|
||||
- that: storage_test_blkinfo.info[storage_test_volume._device].label ==
|
||||
- storage_test_volume.fs_label
|
||||
- msg: >-
|
||||
- Volume '{{ storage_test_volume.name }}' labels do not match when they
|
||||
- should
|
||||
- ('{{ storage_test_blkinfo.info[storage_test_volume._device].label }}',
|
||||
- '{{ storage_test_volume.fs_label }}')
|
||||
- when:
|
||||
- - _storage_test_volume_present | bool
|
||||
- # label for GFS2 is set manually with the extra `-t` fs_create_options
|
||||
- # so we can't verify it here because it was not set with fs_label so
|
||||
- # the label from blkinfo doesn't match the expected "empty" fs_label
|
||||
- - storage_test_volume.fs_type != "gfs2"
|
||||
+ # label
|
||||
+ - name: Verify fs label
|
||||
+ assert:
|
||||
+ that: storage_test_blkinfo.info[storage_test_volume._device].label ==
|
||||
+ storage_test_volume.fs_label
|
||||
+ msg: >-
|
||||
+ Volume '{{ storage_test_volume.name }}' labels do not match when they
|
||||
+ should
|
||||
+ ('{{ storage_test_blkinfo.info[storage_test_volume._device].label }}',
|
||||
+ '{{ storage_test_volume.fs_label }}')
|
||||
+ when:
|
||||
+ - _storage_test_volume_present | bool
|
||||
+ # label for GFS2 is set manually with the extra `-t` fs_create_options
|
||||
+ # so we can't verify it here because it was not set with fs_label so
|
||||
+ # the label from blkinfo doesn't match the expected "empty" fs_label
|
||||
+ - storage_test_volume.fs_type != "gfs2"
|
||||
diff --git a/tests/test-verify-volume-mount.yml b/tests/test-verify-volume-mount.yml
|
||||
index cf86b34..17d2a01 100644
|
||||
--- a/tests/test-verify-volume-mount.yml
|
||||
+++ b/tests/test-verify-volume-mount.yml
|
||||
@@ -15,20 +15,13 @@
|
||||
|
||||
- name: Set some facts
|
||||
set_fact:
|
||||
- storage_test_mount_device_matches: "{{ ansible_mounts |
|
||||
- selectattr('device', 'match', '^' ~ storage_test_device_path ~ '$') |
|
||||
- list }}"
|
||||
- storage_test_mount_point_matches: "{{ ansible_mounts |
|
||||
- selectattr('mount', 'match',
|
||||
- '^' ~ mount_prefix ~ storage_test_volume.mount_point ~ '$') |
|
||||
- list if storage_test_volume.mount_point else [] }}"
|
||||
- storage_test_mount_expected_match_count: "{{ 1
|
||||
- if _storage_test_volume_present and storage_test_volume.mount_point and
|
||||
- storage_test_volume.mount_point.startswith('/')
|
||||
- else 0 }}"
|
||||
storage_test_swap_expected_matches: "{{ 1 if
|
||||
_storage_test_volume_present and
|
||||
storage_test_volume.fs_type == 'swap' else 0 }}"
|
||||
+ storage_test_mount_expected_mount_point: "{{
|
||||
+ '[SWAP]' if storage_test_volume.fs_type == 'swap' else
|
||||
+ '' if storage_test_volume.mount_point == 'none' else
|
||||
+ mount_prefix + storage_test_volume.mount_point if storage_test_volume.mount_point else '' }}"
|
||||
vars:
|
||||
# assumes /opt which is /var/opt in ostree
|
||||
mount_prefix: "{{ '/var' if __storage_is_ostree | d(false)
|
||||
@@ -50,23 +43,12 @@
|
||||
#
|
||||
- name: Verify the current mount state by device
|
||||
assert:
|
||||
- that: storage_test_mount_device_matches | length ==
|
||||
- storage_test_mount_expected_match_count | int
|
||||
+ that: storage_test_blkinfo.info[storage_test_volume._device].mountpoint ==
|
||||
+ storage_test_mount_expected_mount_point
|
||||
msg: >-
|
||||
Found unexpected mount state for volume
|
||||
'{{ storage_test_volume.name }}' device
|
||||
- when: _storage_test_volume_present and storage_test_volume.mount_point
|
||||
-
|
||||
-#
|
||||
-# Verify mount directory (state, owner, group, permissions).
|
||||
-#
|
||||
-- name: Verify the current mount state by mount point
|
||||
- assert:
|
||||
- that: storage_test_mount_point_matches | length ==
|
||||
- storage_test_mount_expected_match_count | int
|
||||
- msg: >-
|
||||
- Found unexpected mount state for volume
|
||||
- '{{ storage_test_volume.name }}' mount point
|
||||
+ when: _storage_test_volume_present
|
||||
|
||||
- name: Verify mount directory user
|
||||
assert:
|
||||
@@ -104,18 +86,6 @@
|
||||
storage_test_volume.mount_point and
|
||||
storage_test_volume.mount_mode
|
||||
|
||||
-#
|
||||
-# Verify mount fs type.
|
||||
-#
|
||||
-- name: Verify the mount fs type
|
||||
- assert:
|
||||
- that: storage_test_mount_point_matches[0].fstype ==
|
||||
- storage_test_volume.fs_type
|
||||
- msg: >-
|
||||
- Found unexpected mount state for volume
|
||||
- '{{ storage_test_volume.name }} fs type
|
||||
- when: storage_test_mount_expected_match_count | int == 1
|
||||
-
|
||||
#
|
||||
# Verify swap status.
|
||||
#
|
||||
@@ -145,10 +115,8 @@
|
||||
|
||||
- name: Unset facts
|
||||
set_fact:
|
||||
- storage_test_mount_device_matches: null
|
||||
- storage_test_mount_point_matches: null
|
||||
- storage_test_mount_expected_match_count: null
|
||||
storage_test_swap_expected_matches: null
|
||||
storage_test_sys_node: null
|
||||
storage_test_swaps: null
|
||||
storage_test_found_mount_stat: null
|
||||
+ storage_test_mount_expected_mount_point: null
|
||||
--
|
||||
2.46.0
|
||||
|
26
SOURCES/0101-fix-Add-support-for-check-flag.patch
Normal file
26
SOURCES/0101-fix-Add-support-for-check-flag.patch
Normal file
@ -0,0 +1,26 @@
|
||||
From 36acf32d30d106159ba9f2fa88d723d9577c9f15 Mon Sep 17 00:00:00 2001
|
||||
From: Samuel Bancal <Samuel.Bancal@groupe-t2i.com>
|
||||
Date: Thu, 14 Mar 2024 10:15:11 +0100
|
||||
Subject: [PATCH 101/115] fix: Add support for --check flag
|
||||
|
||||
Fix: https://github.com/linux-system-roles/podman/issues/133
|
||||
(cherry picked from commit a47e6a95e2a5ee70714bf315d3e03310365d3650)
|
||||
---
|
||||
tasks/main.yml | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/tasks/main.yml b/tasks/main.yml
|
||||
index 1b9ca4a..61f1d1c 100644
|
||||
--- a/tasks/main.yml
|
||||
+++ b/tasks/main.yml
|
||||
@@ -21,6 +21,7 @@
|
||||
when: (__podman_packages | difference(ansible_facts.packages))
|
||||
|
||||
- name: Get podman version
|
||||
+ check_mode: false
|
||||
command: podman --version
|
||||
changed_when: false
|
||||
register: __podman_version_output
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,56 @@
|
||||
From 53f83475c59092e2c23d1957c2fc24c8ca4b6ad9 Mon Sep 17 00:00:00 2001
|
||||
From: Rich Megginson <rmeggins@redhat.com>
|
||||
Date: Tue, 9 Apr 2024 18:27:25 -0600
|
||||
Subject: [PATCH 102/115] fix: use correct user for cancel linger file name
|
||||
|
||||
Cause: When processing a list of kube or quadlet items, the
|
||||
code was using the user id associated with the list, not the
|
||||
item, to specify the linger filename.
|
||||
|
||||
Consequence: The linger file does not exist, so the code
|
||||
does not cancel linger for the actual user.
|
||||
|
||||
Fix: Use the correct username to construct the linger filename.
|
||||
|
||||
Result: Lingering is cancelled for the correct users.
|
||||
|
||||
QE: The test is now in tests_basic.yml
|
||||
|
||||
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
|
||||
(cherry picked from commit 67b88b9aa0a1b1123c2ae24bb7ca4a527924cd13)
|
||||
---
|
||||
tasks/cancel_linger.yml | 2 +-
|
||||
tests/tests_basic.yml | 7 +++++++
|
||||
2 files changed, 8 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tasks/cancel_linger.yml b/tasks/cancel_linger.yml
|
||||
index 761778b..ede71fe 100644
|
||||
--- a/tasks/cancel_linger.yml
|
||||
+++ b/tasks/cancel_linger.yml
|
||||
@@ -59,4 +59,4 @@
|
||||
- __podman_linger_secrets.stdout == ""
|
||||
changed_when: true
|
||||
args:
|
||||
- removes: /var/lib/systemd/linger/{{ __podman_user }}
|
||||
+ removes: /var/lib/systemd/linger/{{ __podman_linger_user }}
|
||||
diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml
|
||||
index a9f01c9..d4f9238 100644
|
||||
--- a/tests/tests_basic.yml
|
||||
+++ b/tests/tests_basic.yml
|
||||
@@ -409,6 +409,13 @@
|
||||
^[ ]*podman-kube@.+-{{ item[0] }}[.]yml[.]service[ ]+loaded[
|
||||
]+active
|
||||
|
||||
+ - name: Ensure no linger
|
||||
+ stat:
|
||||
+ path: /var/lib/systemd/linger/{{ item[1] }}
|
||||
+ loop: "{{ test_names_users }}"
|
||||
+ register: __stat
|
||||
+ failed_when: __stat.stat.exists
|
||||
+
|
||||
rescue:
|
||||
- name: Dump journal
|
||||
command: journalctl -ex
|
||||
--
|
||||
2.46.0
|
||||
|
28
SOURCES/0103-test-do-not-check-for-root-linger.patch
Normal file
28
SOURCES/0103-test-do-not-check-for-root-linger.patch
Normal file
@ -0,0 +1,28 @@
|
||||
From dd93ef65b0d1929184d458914386086fca8b8d7a Mon Sep 17 00:00:00 2001
|
||||
From: Rich Megginson <rmeggins@redhat.com>
|
||||
Date: Wed, 10 Apr 2024 16:06:28 -0600
|
||||
Subject: [PATCH 103/115] test: do not check for root linger
|
||||
|
||||
Do not check if there is a linger file for root.
|
||||
|
||||
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
|
||||
(cherry picked from commit 2b29e049daa28ba6c3b38f514cff9c62be5f3caf)
|
||||
---
|
||||
tests/tests_basic.yml | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml
|
||||
index d4f9238..d578b15 100644
|
||||
--- a/tests/tests_basic.yml
|
||||
+++ b/tests/tests_basic.yml
|
||||
@@ -412,6 +412,7 @@
|
||||
- name: Ensure no linger
|
||||
stat:
|
||||
path: /var/lib/systemd/linger/{{ item[1] }}
|
||||
+ when: item[1] != "root"
|
||||
loop: "{{ test_names_users }}"
|
||||
register: __stat
|
||||
failed_when: __stat.stat.exists
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,210 @@
|
||||
From b2e79348094ea8d89b71727d82a80a9f3cfbb1ce Mon Sep 17 00:00:00 2001
|
||||
From: Rich Megginson <rmeggins@redhat.com>
|
||||
Date: Tue, 9 Apr 2024 18:28:19 -0600
|
||||
Subject: [PATCH 104/115] fix: do not use become for changing hostdir
|
||||
ownership, and expose subuid/subgid info
|
||||
|
||||
When creating host directories, do not use `become`, because if
|
||||
it needs to change ownership, that must be done by `root`, not
|
||||
as the rootless podman user.
|
||||
|
||||
In order to test this, I have changed the role to export the subuid and subgid
|
||||
information for the rootless users as two dictionaries:
|
||||
`podman_subuid_info` and `podman_subgid_info`. See `README.md` for
|
||||
usage.
|
||||
|
||||
NOTE that depending on the namespace used by your containers, you might not
|
||||
be able to use the subuid and subgid information, which comes from `getsubids`
|
||||
if available, or directly from the files `/etc/subuid` and `/etc/subgid` on
|
||||
the host.
|
||||
|
||||
QE: The test tests_basic.yml has been extended for this.
|
||||
|
||||
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
|
||||
(cherry picked from commit 3d02eb725355088df6c707717547f5ad6b7c400c)
|
||||
---
|
||||
README.md | 28 ++++++++++++
|
||||
tasks/create_update_kube_spec.yml | 2 -
|
||||
tasks/create_update_quadlet_spec.yml | 2 -
|
||||
tasks/handle_user_group.yml | 66 +++++++++++++++++++++-------
|
||||
tests/tests_basic.yml | 2 +
|
||||
5 files changed, 79 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/README.md b/README.md
|
||||
index ea1edfb..e5a7c12 100644
|
||||
--- a/README.md
|
||||
+++ b/README.md
|
||||
@@ -418,6 +418,34 @@ PodmanArgs=--secret=my-app-pwd,type=env,target=MYAPP_PASSWORD
|
||||
{% endif %}
|
||||
```
|
||||
|
||||
+### podman_subuid_info, podman_subgid_info
|
||||
+
|
||||
+The role needs to ensure any users and groups are present in the subuid and
|
||||
+subgid information. Once it extracts this data, it will be available in
|
||||
+`podman_subuid_info` and `podman_subgid_info`. These are dicts. The key is the
|
||||
+user or group name, and the value is a `dict` with two fields:
|
||||
+
|
||||
+* `start` - the start of the id range for that user or group, as an `int`
|
||||
+* `range` - the id range for that user or group, as an `int`
|
||||
+
|
||||
+```yaml
|
||||
+podman_host_directories:
|
||||
+ "/var/lib/db":
|
||||
+ mode: "0777"
|
||||
+ owner: "{{ 1001 + podman_subuid_info['dbuser']['start'] - 1 }}"
|
||||
+ group: "{{ 1001 + podman_subgid_info['dbgroup']['start'] - 1 }}"
|
||||
+```
|
||||
+
|
||||
+Where `1001` is the uid for user `dbuser`, and `1001` is the gid for group
|
||||
+`dbgroup`.
|
||||
+
|
||||
+**NOTE**: depending on the namespace used by your containers, you might not be
|
||||
+able to use the subuid and subgid information, which comes from `getsubids` if
|
||||
+available, or directly from the files `/etc/subuid` and `/etc/subgid` on the
|
||||
+host. See
|
||||
+[podman user namespace modes](https://www.redhat.com/sysadmin/rootless-podman-user-namespace-modes)
|
||||
+for more information.
|
||||
+
|
||||
## Example Playbooks
|
||||
|
||||
Create rootless container with volume mount:
|
||||
diff --git a/tasks/create_update_kube_spec.yml b/tasks/create_update_kube_spec.yml
|
||||
index 95d7d35..7a8ba9c 100644
|
||||
--- a/tasks/create_update_kube_spec.yml
|
||||
+++ b/tasks/create_update_kube_spec.yml
|
||||
@@ -32,8 +32,6 @@
|
||||
__defaults: "{{ {'path': item} | combine(__podman_hostdirs_defaults) |
|
||||
combine(__owner_group) }}"
|
||||
loop: "{{ __podman_volumes }}"
|
||||
- become: "{{ __podman_rootless | ternary(true, omit) }}"
|
||||
- become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
|
||||
when:
|
||||
- podman_create_host_directories | bool
|
||||
- __podman_volumes | d([]) | length > 0
|
||||
diff --git a/tasks/create_update_quadlet_spec.yml b/tasks/create_update_quadlet_spec.yml
|
||||
index c3e0095..062c105 100644
|
||||
--- a/tasks/create_update_quadlet_spec.yml
|
||||
+++ b/tasks/create_update_quadlet_spec.yml
|
||||
@@ -16,8 +16,6 @@
|
||||
__defaults: "{{ {'path': item} | combine(__podman_hostdirs_defaults) |
|
||||
combine(__owner_group) }}"
|
||||
loop: "{{ __podman_volumes }}"
|
||||
- become: "{{ __podman_rootless | ternary(true, omit) }}"
|
||||
- become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
|
||||
when:
|
||||
- podman_create_host_directories | bool
|
||||
- __podman_volumes | d([]) | length > 0
|
||||
diff --git a/tasks/handle_user_group.yml b/tasks/handle_user_group.yml
|
||||
index 17300b6..ea9984d 100644
|
||||
--- a/tasks/handle_user_group.yml
|
||||
+++ b/tasks/handle_user_group.yml
|
||||
@@ -52,10 +52,26 @@
|
||||
- name: Check user with getsubids
|
||||
command: getsubids {{ __podman_user | quote }}
|
||||
changed_when: false
|
||||
+ register: __podman_register_subuids
|
||||
|
||||
- name: Check group with getsubids
|
||||
command: getsubids -g {{ __podman_group_name | quote }}
|
||||
changed_when: false
|
||||
+ register: __podman_register_subgids
|
||||
+
|
||||
+ - name: Set user subuid and subgid info
|
||||
+ set_fact:
|
||||
+ podman_subuid_info: "{{ podman_subuid_info | d({}) |
|
||||
+ combine({__podman_user:
|
||||
+ {'start': __subuid_data[2] | int, 'range': __subuid_data[3] | int}})
|
||||
+ if __subuid_data | length > 0 else podman_subuid_info | d({}) }}"
|
||||
+ podman_subgid_info: "{{ podman_subgid_info | d({}) |
|
||||
+ combine({__podman_group_name:
|
||||
+ {'start': __subgid_data[2] | int, 'range': __subgid_data[3] | int}})
|
||||
+ if __subgid_data | length > 0 else podman_subgid_info | d({}) }}"
|
||||
+ vars:
|
||||
+ __subuid_data: "{{ __podman_register_subuids.stdout.split() | list }}"
|
||||
+ __subgid_data: "{{ __podman_register_subgids.stdout.split() | list }}"
|
||||
|
||||
- name: Check subuid, subgid files if no getsubids
|
||||
when:
|
||||
@@ -63,32 +79,48 @@
|
||||
- __podman_user not in ["root", "0"]
|
||||
- __podman_group not in ["root", "0"]
|
||||
block:
|
||||
- - name: Check if user is in subuid file
|
||||
- find:
|
||||
- path: /etc
|
||||
- pattern: subuid
|
||||
- use_regex: true
|
||||
- contains: "^{{ __podman_user }}:.*$"
|
||||
- register: __podman_uid_line_found
|
||||
+ - name: Get subuid file
|
||||
+ slurp:
|
||||
+ path: /etc/subuid
|
||||
+ register: __podman_register_subuids
|
||||
+
|
||||
+ - name: Get subgid file
|
||||
+ slurp:
|
||||
+ path: /etc/subgid
|
||||
+ register: __podman_register_subgids
|
||||
+
|
||||
+ - name: Set user subuid and subgid info
|
||||
+ set_fact:
|
||||
+ podman_subuid_info: "{{ podman_subuid_info | d({}) |
|
||||
+ combine({__podman_user:
|
||||
+ {'start': __subuid_data[1] | int, 'range': __subuid_data[2] | int}})
|
||||
+ if __subuid_data else podman_subuid_info | d({}) }}"
|
||||
+ podman_subgid_info: "{{ podman_subgid_info | d({}) |
|
||||
+ combine({__podman_group_name:
|
||||
+ {'start': __subgid_data[1] | int, 'range': __subgid_data[2] | int}})
|
||||
+ if __subgid_data else podman_subgid_info | d({}) }}"
|
||||
+ vars:
|
||||
+ __subuid_match_line: "{{
|
||||
+ (__podman_register_subuids.content | b64decode).split('\n') | list |
|
||||
+ select('match', '^' ~ __podman_user ~ ':') | list }}"
|
||||
+ __subuid_data: "{{ __subuid_match_line[0].split(':') | list
|
||||
+ if __subuid_match_line else null }}"
|
||||
+ __subgid_match_line: "{{
|
||||
+ (__podman_register_subgids.content | b64decode).split('\n') | list |
|
||||
+ select('match', '^' ~ __podman_group_name ~ ':') | list }}"
|
||||
+ __subgid_data: "{{ __subgid_match_line[0].split(':') | list
|
||||
+ if __subgid_match_line else null }}"
|
||||
|
||||
- name: Fail if user not in subuid file
|
||||
fail:
|
||||
msg: >
|
||||
The given podman user [{{ __podman_user }}] is not in the
|
||||
/etc/subuid file - cannot continue
|
||||
- when: not __podman_uid_line_found.matched
|
||||
-
|
||||
- - name: Check if group is in subgid file
|
||||
- find:
|
||||
- path: /etc
|
||||
- pattern: subgid
|
||||
- use_regex: true
|
||||
- contains: "^{{ __podman_group_name }}:.*$"
|
||||
- register: __podman_gid_line_found
|
||||
+ when: not __podman_user in podman_subuid_info
|
||||
|
||||
- name: Fail if group not in subgid file
|
||||
fail:
|
||||
msg: >
|
||||
The given podman group [{{ __podman_group_name }}] is not in the
|
||||
/etc/subgid file - cannot continue
|
||||
- when: not __podman_gid_line_found.matched
|
||||
+ when: not __podman_group_name in podman_subuid_info
|
||||
diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml
|
||||
index d578b15..121c3a7 100644
|
||||
--- a/tests/tests_basic.yml
|
||||
+++ b/tests/tests_basic.yml
|
||||
@@ -8,6 +8,8 @@
|
||||
podman_host_directories:
|
||||
"/tmp/httpd1-create":
|
||||
mode: "0777"
|
||||
+ owner: "{{ 1001 + podman_subuid_info['user1']['start'] - 1 }}"
|
||||
+ group: "{{ 1001 + podman_subgid_info['user1']['start'] - 1 }}"
|
||||
podman_run_as_user: root
|
||||
test_names_users:
|
||||
- [httpd1, user1, 1001]
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,42 @@
|
||||
From 7978bed4d52e44feae114ba56e9b5035b7dd2c1c Mon Sep 17 00:00:00 2001
|
||||
From: Rich Megginson <rmeggins@redhat.com>
|
||||
Date: Wed, 17 Apr 2024 10:14:21 -0600
|
||||
Subject: [PATCH 105/115] chore: change no_log false to true; fix comment
|
||||
|
||||
Forgot to change a `no_log: false` back to `no_log: true` when debugging.
|
||||
Fix an error in a comment
|
||||
|
||||
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
|
||||
(cherry picked from commit b37ee8fc7e12317660cca765760c32bd4ba91035)
|
||||
---
|
||||
tasks/handle_secret.yml | 2 +-
|
||||
vars/main.yml | 2 +-
|
||||
2 files changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/tasks/handle_secret.yml b/tasks/handle_secret.yml
|
||||
index b3677ef..02bc15b 100644
|
||||
--- a/tasks/handle_secret.yml
|
||||
+++ b/tasks/handle_secret.yml
|
||||
@@ -39,7 +39,7 @@
|
||||
become: "{{ __podman_rootless | ternary(true, omit) }}"
|
||||
become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
|
||||
when: not __podman_rootless or __podman_xdg_stat.stat.exists
|
||||
- no_log: false
|
||||
+ no_log: true
|
||||
vars:
|
||||
__params: |
|
||||
{% set rc = {} %}
|
||||
diff --git a/vars/main.yml b/vars/main.yml
|
||||
index 47293c5..38402ff 100644
|
||||
--- a/vars/main.yml
|
||||
+++ b/vars/main.yml
|
||||
@@ -74,5 +74,5 @@ __podman_user_kube_path: "/.config/containers/ansible-kubernetes.d"
|
||||
# location for system quadlet files
|
||||
__podman_system_quadlet_path: "/etc/containers/systemd"
|
||||
|
||||
-# location for user kubernetes yaml files
|
||||
+# location for user quadlet files
|
||||
__podman_user_quadlet_path: "/.config/containers/systemd"
|
||||
--
|
||||
2.46.0
|
||||
|
214
SOURCES/0106-fix-make-kube-cleanup-idempotent.patch
Normal file
214
SOURCES/0106-fix-make-kube-cleanup-idempotent.patch
Normal file
@ -0,0 +1,214 @@
|
||||
From 07053a415b4a0bde557f28f6f607250915e908e6 Mon Sep 17 00:00:00 2001
|
||||
From: Rich Megginson <rmeggins@redhat.com>
|
||||
Date: Wed, 17 Apr 2024 11:35:52 -0600
|
||||
Subject: [PATCH 106/115] fix: make kube cleanup idempotent
|
||||
|
||||
Cause: The task that calls podman_play was not checking if the kube yaml
|
||||
file existed when cleaning up.
|
||||
|
||||
Consequence: The task would give an error that the pod could not be
|
||||
removed.
|
||||
|
||||
Fix: Do not attempt to remove the pod if the kube yaml file does not
|
||||
exist.
|
||||
|
||||
Result: Calling the podman role repeatedly to remove a kube spec
|
||||
will not fail and will not report changes for subsequent removals.
|
||||
|
||||
QE: tests_basic.yml has been changed to check for this case
|
||||
|
||||
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
|
||||
(cherry picked from commit e506f39b6608613a5801190091a72b013b85a888)
|
||||
---
|
||||
tasks/cleanup_kube_spec.yml | 9 +++++-
|
||||
tests/tests_basic.yml | 62 ++++++++++++++++++++++++++-----------
|
||||
2 files changed, 52 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/tasks/cleanup_kube_spec.yml b/tasks/cleanup_kube_spec.yml
|
||||
index c864179..b6b47bd 100644
|
||||
--- a/tasks/cleanup_kube_spec.yml
|
||||
+++ b/tasks/cleanup_kube_spec.yml
|
||||
@@ -25,6 +25,11 @@
|
||||
vars:
|
||||
__service_error: Could not find the requested service
|
||||
|
||||
+- name: Check if kube file exists
|
||||
+ stat:
|
||||
+ path: "{{ __podman_kube_file }}"
|
||||
+ register: __podman_kube_file_stat
|
||||
+
|
||||
- name: Remove pod/containers
|
||||
containers.podman.podman_play: "{{ __podman_kube_spec |
|
||||
combine({'kube_file': __podman_kube_file}) }}"
|
||||
@@ -33,7 +38,9 @@
|
||||
become: "{{ __podman_rootless | ternary(true, omit) }}"
|
||||
become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
|
||||
register: __podman_removed
|
||||
- when: not __podman_rootless or __podman_xdg_stat.stat.exists
|
||||
+ when:
|
||||
+ - not __podman_rootless or __podman_xdg_stat.stat.exists
|
||||
+ - __podman_kube_file_stat.stat.exists
|
||||
|
||||
- name: Remove kubernetes yaml file
|
||||
file:
|
||||
diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml
|
||||
index 121c3a7..b8ddc50 100644
|
||||
--- a/tests/tests_basic.yml
|
||||
+++ b/tests/tests_basic.yml
|
||||
@@ -6,13 +6,16 @@
|
||||
- vars/test_vars.yml
|
||||
vars:
|
||||
podman_host_directories:
|
||||
- "/tmp/httpd1-create":
|
||||
+ "{{ __test_tmpdir.path ~ '/httpd1-create' }}":
|
||||
mode: "0777"
|
||||
- owner: "{{ 1001 + podman_subuid_info['user1']['start'] - 1 }}"
|
||||
- group: "{{ 1001 + podman_subgid_info['user1']['start'] - 1 }}"
|
||||
+ owner: "{{ 1001 +
|
||||
+ podman_subuid_info[__podman_test_username]['start'] - 1 }}"
|
||||
+ group: "{{ 1001 +
|
||||
+ podman_subgid_info[__podman_test_username]['start'] - 1 }}"
|
||||
podman_run_as_user: root
|
||||
+ __podman_test_username: podman_basic_user
|
||||
test_names_users:
|
||||
- - [httpd1, user1, 1001]
|
||||
+ - [httpd1, "{{ __podman_test_username }}", 1001]
|
||||
- [httpd2, root, 0]
|
||||
- [httpd3, root, 0]
|
||||
podman_create_host_directories: true
|
||||
@@ -26,7 +29,7 @@
|
||||
- state: started
|
||||
debug: true
|
||||
log_level: debug
|
||||
- run_as_user: user1
|
||||
+ run_as_user: "{{ __podman_test_username }}"
|
||||
kube_file_content:
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
@@ -57,10 +60,10 @@
|
||||
volumes:
|
||||
- name: www
|
||||
hostPath:
|
||||
- path: /tmp/httpd1
|
||||
+ path: "{{ __test_tmpdir.path ~ '/httpd1' }}"
|
||||
- name: create
|
||||
hostPath:
|
||||
- path: /tmp/httpd1-create
|
||||
+ path: "{{ __test_tmpdir.path ~ '/httpd1-create' }}"
|
||||
- state: started
|
||||
debug: true
|
||||
log_level: debug
|
||||
@@ -94,10 +97,10 @@
|
||||
volumes:
|
||||
- name: www
|
||||
hostPath:
|
||||
- path: /tmp/httpd2
|
||||
+ path: "{{ __test_tmpdir.path ~ '/httpd2' }}"
|
||||
- name: create
|
||||
hostPath:
|
||||
- path: /tmp/httpd2-create
|
||||
+ path: "{{ __test_tmpdir.path ~ '/httpd2-create' }}"
|
||||
__podman_kube_file_content: |
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
@@ -128,11 +131,23 @@
|
||||
volumes:
|
||||
- name: www
|
||||
hostPath:
|
||||
- path: /tmp/httpd3
|
||||
+ path: "{{ __test_tmpdir.path ~ '/httpd3' }}"
|
||||
- name: create
|
||||
hostPath:
|
||||
- path: /tmp/httpd3-create
|
||||
+ path: "{{ __test_tmpdir.path ~ '/httpd3-create' }}"
|
||||
tasks:
|
||||
+ - name: Create tmpdir for testing
|
||||
+ tempfile:
|
||||
+ state: directory
|
||||
+ prefix: lsr_
|
||||
+ suffix: _podman
|
||||
+ register: __test_tmpdir
|
||||
+
|
||||
+ - name: Change tmpdir permissions
|
||||
+ file:
|
||||
+ path: "{{ __test_tmpdir.path }}"
|
||||
+ mode: "0777"
|
||||
+
|
||||
- name: Run basic tests
|
||||
vars:
|
||||
__podman_use_kube_file:
|
||||
@@ -156,7 +171,7 @@
|
||||
|
||||
- name: Create user
|
||||
user:
|
||||
- name: user1
|
||||
+ name: "{{ __podman_test_username }}"
|
||||
uid: 1001
|
||||
|
||||
- name: Create tempfile for kube_src
|
||||
@@ -171,12 +186,12 @@
|
||||
copy:
|
||||
content: "{{ __podman_kube_file_content }}"
|
||||
dest: "{{ __kube_file_src.path }}"
|
||||
- mode: 0600
|
||||
+ mode: "0600"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create host directories for data
|
||||
file:
|
||||
- path: /tmp/{{ item[0] }}
|
||||
+ path: "{{ __test_tmpdir.path ~ '/' ~ item[0] }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
owner: "{{ item[1] }}"
|
||||
@@ -184,7 +199,7 @@
|
||||
|
||||
- name: Create data files
|
||||
copy:
|
||||
- dest: /tmp/{{ item[0] }}/index.txt
|
||||
+ dest: "{{ __test_tmpdir.path ~ '/' ~ item[0] ~ '/index.txt' }}"
|
||||
content: "123"
|
||||
mode: "0644"
|
||||
owner: "{{ item[1] }}"
|
||||
@@ -315,7 +330,7 @@
|
||||
loop: [15001, 15002]
|
||||
|
||||
- name: Check host directories
|
||||
- command: ls -alrtF /tmp/{{ item[0] }}-create
|
||||
+ command: ls -alrtF {{ __test_tmpdir.path ~ '/' ~ item[0] }}-create
|
||||
loop: "{{ test_names_users }}"
|
||||
changed_when: false
|
||||
|
||||
@@ -419,6 +434,18 @@
|
||||
register: __stat
|
||||
failed_when: __stat.stat.exists
|
||||
|
||||
+ - name: Remove pods and units again - test idempotence
|
||||
+ include_role:
|
||||
+ name: linux-system-roles.podman
|
||||
+ vars:
|
||||
+ # noqa jinja[spacing]
|
||||
+ podman_kube_specs: "{{ __podman_kube_specs |
|
||||
+ union([__podman_use_kube_file]) |
|
||||
+ map('combine', {'state':'absent'}) | list }}"
|
||||
+ podman_create_host_directories: false
|
||||
+ podman_firewall: []
|
||||
+ podman_selinux_ports: []
|
||||
+
|
||||
rescue:
|
||||
- name: Dump journal
|
||||
command: journalctl -ex
|
||||
@@ -438,9 +465,8 @@
|
||||
|
||||
- name: Clean up host directories
|
||||
file:
|
||||
- path: /tmp/{{ item }}
|
||||
+ path: "{{ __test_tmpdir.path }}"
|
||||
state: absent
|
||||
- loop: [httpd1, httpd2, httpd3]
|
||||
tags:
|
||||
- tests::cleanup
|
||||
|
||||
--
|
||||
2.46.0
|
||||
|
35
SOURCES/0107-chore-use-none-in-jinja-code-not-null.patch
Normal file
35
SOURCES/0107-chore-use-none-in-jinja-code-not-null.patch
Normal file
@ -0,0 +1,35 @@
|
||||
From 0a8ce32cdc093c388718d4fe28007259ac86854d Mon Sep 17 00:00:00 2001
|
||||
From: Rich Megginson <rmeggins@redhat.com>
|
||||
Date: Thu, 18 Apr 2024 08:39:33 -0600
|
||||
Subject: [PATCH 107/115] chore: use none in jinja code, not null
|
||||
|
||||
Must use `none` in Jinja code, not `null`, which is used in YAML.
|
||||
|
||||
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
|
||||
(cherry picked from commit fdf98595e9ecdacfed80d40c2539b18c7d715368)
|
||||
---
|
||||
tasks/handle_user_group.yml | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/tasks/handle_user_group.yml b/tasks/handle_user_group.yml
|
||||
index ea9984d..0b98d99 100644
|
||||
--- a/tasks/handle_user_group.yml
|
||||
+++ b/tasks/handle_user_group.yml
|
||||
@@ -104,12 +104,12 @@
|
||||
(__podman_register_subuids.content | b64decode).split('\n') | list |
|
||||
select('match', '^' ~ __podman_user ~ ':') | list }}"
|
||||
__subuid_data: "{{ __subuid_match_line[0].split(':') | list
|
||||
- if __subuid_match_line else null }}"
|
||||
+ if __subuid_match_line else none }}"
|
||||
__subgid_match_line: "{{
|
||||
(__podman_register_subgids.content | b64decode).split('\n') | list |
|
||||
select('match', '^' ~ __podman_group_name ~ ':') | list }}"
|
||||
__subgid_data: "{{ __subgid_match_line[0].split(':') | list
|
||||
- if __subgid_match_line else null }}"
|
||||
+ if __subgid_match_line else none }}"
|
||||
|
||||
- name: Fail if user not in subuid file
|
||||
fail:
|
||||
--
|
||||
2.46.0
|
||||
|
44
SOURCES/0108-uid-1001-conflicts-on-some-test-systems.patch
Normal file
44
SOURCES/0108-uid-1001-conflicts-on-some-test-systems.patch
Normal file
@ -0,0 +1,44 @@
|
||||
From 4824891e596c197e49557d9d2679cabc76e598e9 Mon Sep 17 00:00:00 2001
|
||||
From: Rich Megginson <rmeggins@redhat.com>
|
||||
Date: Fri, 19 Apr 2024 07:33:41 -0600
|
||||
Subject: [PATCH 108/115] uid 1001 conflicts on some test systems
|
||||
|
||||
(cherry picked from commit 5b7ad16d23b78f6f0f68638c0d69015ebb26b3b0)
|
||||
---
|
||||
tests/tests_basic.yml | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml
|
||||
index b8ddc50..c91cc5f 100644
|
||||
--- a/tests/tests_basic.yml
|
||||
+++ b/tests/tests_basic.yml
|
||||
@@ -8,14 +8,14 @@
|
||||
podman_host_directories:
|
||||
"{{ __test_tmpdir.path ~ '/httpd1-create' }}":
|
||||
mode: "0777"
|
||||
- owner: "{{ 1001 +
|
||||
+ owner: "{{ 3001 +
|
||||
podman_subuid_info[__podman_test_username]['start'] - 1 }}"
|
||||
- group: "{{ 1001 +
|
||||
+ group: "{{ 3001 +
|
||||
podman_subgid_info[__podman_test_username]['start'] - 1 }}"
|
||||
podman_run_as_user: root
|
||||
__podman_test_username: podman_basic_user
|
||||
test_names_users:
|
||||
- - [httpd1, "{{ __podman_test_username }}", 1001]
|
||||
+ - [httpd1, "{{ __podman_test_username }}", 3001]
|
||||
- [httpd2, root, 0]
|
||||
- [httpd3, root, 0]
|
||||
podman_create_host_directories: true
|
||||
@@ -172,7 +172,7 @@
|
||||
- name: Create user
|
||||
user:
|
||||
name: "{{ __podman_test_username }}"
|
||||
- uid: 1001
|
||||
+ uid: 3001
|
||||
|
||||
- name: Create tempfile for kube_src
|
||||
tempfile:
|
||||
--
|
||||
2.46.0
|
||||
|
26
SOURCES/0109-fix-ansible-lint-octal-value-issues.patch
Normal file
26
SOURCES/0109-fix-ansible-lint-octal-value-issues.patch
Normal file
@ -0,0 +1,26 @@
|
||||
From 2343663a17a42e71aa5b78ad5deca72823a0afb0 Mon Sep 17 00:00:00 2001
|
||||
From: Rich Megginson <rmeggins@redhat.com>
|
||||
Date: Mon, 3 Jun 2024 13:15:07 -0600
|
||||
Subject: [PATCH 109/115] fix ansible-lint octal value issues
|
||||
|
||||
(cherry picked from commit c684c68151f106b4a494bed865e138a0b54ecb43)
|
||||
---
|
||||
tests/tests_quadlet_demo.yml | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/tests/tests_quadlet_demo.yml b/tests/tests_quadlet_demo.yml
|
||||
index a719f9c..259a694 100644
|
||||
--- a/tests/tests_quadlet_demo.yml
|
||||
+++ b/tests/tests_quadlet_demo.yml
|
||||
@@ -98,7 +98,7 @@
|
||||
get_url:
|
||||
url: https://localhost:8000
|
||||
dest: /run/out
|
||||
- mode: 0600
|
||||
+ mode: "0600"
|
||||
validate_certs: false
|
||||
register: __web_status
|
||||
until: __web_status is success
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,308 @@
|
||||
From 6a5722ce2a591c57e50ac4ff702c810bf452431d Mon Sep 17 00:00:00 2001
|
||||
From: Rich Megginson <rmeggins@redhat.com>
|
||||
Date: Thu, 6 Jun 2024 15:20:22 -0600
|
||||
Subject: [PATCH 110/115] fix: grab name of network to remove from quadlet file
|
||||
|
||||
Cause: The code was using "systemd-" + name of quadlet for
|
||||
the network name when removing networks.
|
||||
|
||||
Consequence: If the quadlet had a different NetworkName, the
|
||||
removal would fail.
|
||||
|
||||
Fix: Grab the network quadlet file and grab the NetworkName from
|
||||
the file to use to remove the network.
|
||||
|
||||
Result: The removal of quadlet networks will work both with and
|
||||
without a custom NetworkName in the quadlet file.
|
||||
|
||||
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
|
||||
|
||||
This also adds a fix for el10 and Fedora which installs the iptables-nft
|
||||
package to allow rootless podman to manage networks using nftables.
|
||||
|
||||
(cherry picked from commit bcd5a750250736a07605c72f98e50c1babcddf16)
|
||||
---
|
||||
.ostree/packages-runtime-CentOS-10.txt | 3 ++
|
||||
.ostree/packages-runtime-Fedora.txt | 3 ++
|
||||
.ostree/packages-runtime-RedHat-10.txt | 3 ++
|
||||
tasks/cleanup_quadlet_spec.yml | 43 +++++++++++++++++++++++++-
|
||||
tests/files/quadlet-basic.network | 5 +++
|
||||
tests/tests_quadlet_basic.yml | 31 +++++++------------
|
||||
tests/tests_quadlet_demo.yml | 19 +++---------
|
||||
vars/CentOS_10.yml | 7 +++++
|
||||
vars/Fedora.yml | 7 +++++
|
||||
vars/RedHat_10.yml | 7 +++++
|
||||
10 files changed, 94 insertions(+), 34 deletions(-)
|
||||
create mode 100644 .ostree/packages-runtime-CentOS-10.txt
|
||||
create mode 100644 .ostree/packages-runtime-Fedora.txt
|
||||
create mode 100644 .ostree/packages-runtime-RedHat-10.txt
|
||||
create mode 100644 tests/files/quadlet-basic.network
|
||||
create mode 100644 vars/CentOS_10.yml
|
||||
create mode 100644 vars/Fedora.yml
|
||||
create mode 100644 vars/RedHat_10.yml
|
||||
|
||||
diff --git a/.ostree/packages-runtime-CentOS-10.txt b/.ostree/packages-runtime-CentOS-10.txt
|
||||
new file mode 100644
|
||||
index 0000000..16b8eae
|
||||
--- /dev/null
|
||||
+++ b/.ostree/packages-runtime-CentOS-10.txt
|
||||
@@ -0,0 +1,3 @@
|
||||
+iptables-nft
|
||||
+podman
|
||||
+shadow-utils-subid
|
||||
diff --git a/.ostree/packages-runtime-Fedora.txt b/.ostree/packages-runtime-Fedora.txt
|
||||
new file mode 100644
|
||||
index 0000000..16b8eae
|
||||
--- /dev/null
|
||||
+++ b/.ostree/packages-runtime-Fedora.txt
|
||||
@@ -0,0 +1,3 @@
|
||||
+iptables-nft
|
||||
+podman
|
||||
+shadow-utils-subid
|
||||
diff --git a/.ostree/packages-runtime-RedHat-10.txt b/.ostree/packages-runtime-RedHat-10.txt
|
||||
new file mode 100644
|
||||
index 0000000..16b8eae
|
||||
--- /dev/null
|
||||
+++ b/.ostree/packages-runtime-RedHat-10.txt
|
||||
@@ -0,0 +1,3 @@
|
||||
+iptables-nft
|
||||
+podman
|
||||
+shadow-utils-subid
|
||||
diff --git a/tasks/cleanup_quadlet_spec.yml b/tasks/cleanup_quadlet_spec.yml
|
||||
index ba68771..8ea069b 100644
|
||||
--- a/tasks/cleanup_quadlet_spec.yml
|
||||
+++ b/tasks/cleanup_quadlet_spec.yml
|
||||
@@ -30,6 +30,43 @@
|
||||
vars:
|
||||
__service_error: Could not find the requested service
|
||||
|
||||
+- name: See if quadlet file exists
|
||||
+ stat:
|
||||
+ path: "{{ __podman_quadlet_file }}"
|
||||
+ register: __podman_network_stat
|
||||
+ when: __podman_quadlet_type == "network"
|
||||
+
|
||||
+- name: Get network quadlet network name
|
||||
+ when:
|
||||
+ - __podman_quadlet_type == "network"
|
||||
+ - __podman_network_stat.stat.exists
|
||||
+ block:
|
||||
+ - name: Create tempdir
|
||||
+ tempfile:
|
||||
+ prefix: podman_
|
||||
+ suffix: _lsr.ini
|
||||
+ state: directory
|
||||
+ register: __podman_network_tmpdir
|
||||
+ delegate_to: localhost
|
||||
+
|
||||
+ - name: Fetch the network quadlet
|
||||
+ fetch:
|
||||
+ dest: "{{ __podman_network_tmpdir.path }}/network.ini"
|
||||
+ src: "{{ __podman_quadlet_file }}"
|
||||
+ flat: true
|
||||
+
|
||||
+ - name: Get the network name
|
||||
+ set_fact:
|
||||
+ __podman_network_name: "{{
|
||||
+ lookup('ini', 'NetworkName section=Network file=' ~
|
||||
+ __podman_network_tmpdir.path ~ '/network.ini') }}"
|
||||
+ always:
|
||||
+ - name: Remove tempdir
|
||||
+ file:
|
||||
+ path: "{{ __podman_network_tmpdir.path }}"
|
||||
+ state: absent
|
||||
+ delegate_to: localhost
|
||||
+
|
||||
- name: Remove quadlet file
|
||||
file:
|
||||
path: "{{ __podman_quadlet_file }}"
|
||||
@@ -62,10 +99,14 @@
|
||||
changed_when: true
|
||||
|
||||
- name: Remove network
|
||||
- command: podman network rm systemd-{{ __podman_quadlet_name }}
|
||||
+ command: podman network rm {{ __name | quote }}
|
||||
changed_when: true
|
||||
when: __podman_quadlet_type == "network"
|
||||