Compare commits

..

No commits in common. "c8" and "imports/c8/rhel-system-roles-1.0-21.el8" have entirely different histories.

61 changed files with 3508 additions and 5780 deletions

47
.gitignore vendored
View File

@ -1,34 +1,13 @@
SOURCES/ad_integration-1.4.6.tar.gz
SOURCES/ansible-posix-1.5.4.tar.gz
SOURCES/ansible-sshd-v0.25.0.tar.gz
SOURCES/auto-maintenance-11ad785c9bb72611244e7909450ca4247e12db4d.tar.gz
SOURCES/bootloader-1.0.7.tar.gz
SOURCES/certificate-1.3.3.tar.gz
SOURCES/cockpit-1.5.10.tar.gz
SOURCES/community-general-8.3.0.tar.gz
SOURCES/containers-podman-1.15.4.tar.gz
SOURCES/crypto_policies-1.3.2.tar.gz
SOURCES/fapolicyd-1.1.1.tar.gz
SOURCES/firewall-1.7.4.tar.gz
SOURCES/ha_cluster-1.14.0.tar.gz
SOURCES/journald-1.2.3.tar.gz
SOURCES/kdump-1.4.4.tar.gz
SOURCES/kernel_settings-1.2.2.tar.gz
SOURCES/keylime_server-1.1.2.tar.gz
SOURCES/logging-1.13.4.tar.gz
SOURCES/metrics-1.10.1.tar.gz
SOURCES/nbde_client-1.3.0.tar.gz
SOURCES/nbde_server-1.4.3.tar.gz
SOURCES/network-1.15.1.tar.gz
SOURCES/podman-1.4.7.tar.gz
SOURCES/postfix-1.4.3.tar.gz
SOURCES/postgresql-1.3.5.tar.gz
SOURCES/rhc-1.6.0.tar.gz
SOURCES/selinux-1.7.4.tar.gz
SOURCES/snapshot-1.3.1.tar.gz
SOURCES/ssh-1.3.2.tar.gz
SOURCES/storage-1.16.2.tar.gz
SOURCES/systemd-1.1.2.tar.gz
SOURCES/timesync-1.9.0.tar.gz
SOURCES/tlog-1.3.3.tar.gz
SOURCES/vpn-1.6.3.tar.gz
SOURCES/certificate-fedef6e.tar.gz
SOURCES/kdump-0c2bb28.tar.gz
SOURCES/kernel_settings-901a73a.tar.gz
SOURCES/logging-fe3f658.tar.gz
SOURCES/metrics-7f94b49.tar.gz
SOURCES/nbde_client-6306def.tar.gz
SOURCES/nbde_server-4b6cfca.tar.gz
SOURCES/network-bf4501b.tar.gz
SOURCES/postfix-0.1.tar.gz
SOURCES/selinux-6cd1ec8.tar.gz
SOURCES/storage-81f30ab.tar.gz
SOURCES/timesync-924650d.tar.gz
SOURCES/tlog-cfa70b6.tar.gz

View File

@ -1,34 +1,13 @@
11b58e43e1b78cb75eda26724359f4d748173d5f SOURCES/ad_integration-1.4.6.tar.gz
da646eb9ba655f1693cc950ecb5c24af39ee1af6 SOURCES/ansible-posix-1.5.4.tar.gz
5829f61d848d1fe52ecd1702c055eeed8ef56e70 SOURCES/ansible-sshd-v0.25.0.tar.gz
e4df3548cf129b61c40b2d013917e07be2f3ba4e SOURCES/auto-maintenance-11ad785c9bb72611244e7909450ca4247e12db4d.tar.gz
7ae4b79529d14c0c8958cf9633f8d560d718f4e7 SOURCES/bootloader-1.0.7.tar.gz
9eaac83b306b2fb8dd8e82bc4b03b30285d2024f SOURCES/certificate-1.3.3.tar.gz
15677bec6ddafb75911d7c29fe1eb1c24b9b4f1c SOURCES/cockpit-1.5.10.tar.gz
15fd2f2c08ae17cc47efb76bd14fb9ab6f33bc26 SOURCES/community-general-8.3.0.tar.gz
2c0a98aedb2c031bfc94609bc9553d192224b159 SOURCES/containers-podman-1.15.4.tar.gz
6705818b1fdf3cc82083937265f7942e3d3ccc2d SOURCES/crypto_policies-1.3.2.tar.gz
29505121f6798f527045c5f66656fd5c19bed5fe SOURCES/fapolicyd-1.1.1.tar.gz
1a7a875cebbd3e146f6ca554269ee20845cf877b SOURCES/firewall-1.7.4.tar.gz
53e8991ca7e0c5c97ab010e843bc1a7c4a98eb96 SOURCES/ha_cluster-1.14.0.tar.gz
e96ba9f5b3ae08a12dbf072f118e316036553b94 SOURCES/journald-1.2.3.tar.gz
de6c6103b7023aa21782906696e712b428600a92 SOURCES/kdump-1.4.4.tar.gz
0f28a0919874f650ef0149409116bae12d2363e0 SOURCES/kernel_settings-1.2.2.tar.gz
85c14c7e260b247eb7947c8706af82ff5aac07d2 SOURCES/keylime_server-1.1.2.tar.gz
4825923fc0fa29e80c08864b0afc50e2e075be91 SOURCES/logging-1.13.4.tar.gz
e795238995d2dfb2cbdb5cc9bf4923f7410ac49a SOURCES/metrics-1.10.1.tar.gz
544c5c9e53beef034b0d39ecf944e0bb13231535 SOURCES/nbde_client-1.3.0.tar.gz
dce6435ca145b3143c1326a8e413e8173e5655ef SOURCES/nbde_server-1.4.3.tar.gz
e89a4d6974a089f035b1f3fc79a1f9cacfa1f933 SOURCES/network-1.15.1.tar.gz
fc242b6f776088720ef04e5891c75fd33e6e1e96 SOURCES/podman-1.4.7.tar.gz
ddb7e2a575e4b96666ce13dbdbaea97cc2f83954 SOURCES/postfix-1.4.3.tar.gz
bf0f12e78bfc2120d85c5458aa7d53b15738e73c SOURCES/postgresql-1.3.5.tar.gz
b519a4e35b55e97bf954916d77f1f1f82ec2615b SOURCES/rhc-1.6.0.tar.gz
458b076a73a1c3597485b60bc734b225f3079a86 SOURCES/selinux-1.7.4.tar.gz
8fdcd362f021d41165c4a959ba79136491389343 SOURCES/snapshot-1.3.1.tar.gz
d2c153993e51ce949db861db2aa15e8ec90b45af SOURCES/ssh-1.3.2.tar.gz
e08c1df6c6842f6ad37fff34d2e9d96e9cdddd70 SOURCES/storage-1.16.2.tar.gz
df8f2896ad761da73872d17a0f0cd8cfd34e0671 SOURCES/systemd-1.1.2.tar.gz
0a9df710ddd8a43e74cbd77e4414d5ea7e90d7b9 SOURCES/timesync-1.9.0.tar.gz
6d559dc44f44bc7e505602b36b51b4d1b60f2754 SOURCES/tlog-1.3.3.tar.gz
27395883fa555658257e70287e709f8ccc1d8392 SOURCES/vpn-1.6.3.tar.gz
5aa98ec9e109c5ebfae327718e5cad1d3c837e4f SOURCES/certificate-fedef6e.tar.gz
36b200d1c6a8d1cb1ea87e3e9aa8c4f6bbd8155d SOURCES/kdump-0c2bb28.tar.gz
263a6bbe7b25fbbc13c60b6b30861b63ec2648cd SOURCES/kernel_settings-901a73a.tar.gz
9f365ee569d0d6e542983842ffd7c81c82e2c3ca SOURCES/logging-fe3f658.tar.gz
3c25f49356e9325ba694d14ece036c8ea3aa16f6 SOURCES/metrics-7f94b49.tar.gz
435fed277e03b6c409ebbfa421c15f97ba15e8c8 SOURCES/nbde_client-6306def.tar.gz
e936390ddc7440e25190d6ff98cf5e5b3bf1fc3b SOURCES/nbde_server-4b6cfca.tar.gz
d1e3e5cd724e7a61a9b3f4eb2bf669d6ed6f9cde SOURCES/network-bf4501b.tar.gz
66c82331f4ac9598c506c3999965b4d07dbfe49d SOURCES/postfix-0.1.tar.gz
246383bd6823533ed3a51a0501b75e38ba852908 SOURCES/selinux-6cd1ec8.tar.gz
d1ba125b693ac5b8705e79d92b13f24c01c51a86 SOURCES/storage-81f30ab.tar.gz
ffd2a706e4e3007684aa9874c8457ad5c8920050 SOURCES/timesync-924650d.tar.gz
66538d3279cb5972f73a70960a4407d2abe56883 SOURCES/tlog-cfa70b6.tar.gz

View File

@ -1,74 +0,0 @@
From 8b3cfc1a30da1ab681eb8c250baa2d6395ecc0d2 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 3 Apr 2024 15:12:00 +0200
Subject: [PATCH 01/10] test: fix sector-based disk size calculation from
ansible_devices
Device sizes specified in sectors are in general in 512 sectors
regardless of the actual device physical sector size. Example of
ansible_devices facts for a 4k sector size drive:
...
"sectors": "41943040",
"sectorsize": "4096",
"size": "20.00 GB"
...
Resolves: RHEL-30959
Signed-off-by: Vojtech Trefny <vtrefny@redhat.com>
(cherry picked from commit bb1eb23ccd6e9475cd698f0a6f2f497ffefbccd2)
---
tests/tests_create_lv_size_equal_to_vg.yml | 3 +--
tests/tests_misc.yml | 3 +--
tests/tests_resize.yml | 6 ++----
3 files changed, 4 insertions(+), 8 deletions(-)
diff --git a/tests/tests_create_lv_size_equal_to_vg.yml b/tests/tests_create_lv_size_equal_to_vg.yml
index cab4f08..535f73b 100644
--- a/tests/tests_create_lv_size_equal_to_vg.yml
+++ b/tests/tests_create_lv_size_equal_to_vg.yml
@@ -8,8 +8,7 @@
volume_group_size: '10g'
lv_size: '10g'
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
- disk_size: '{{ unused_disk_subfact.sectors | int *
- unused_disk_subfact.sectorsize | int }}'
+ disk_size: '{{ unused_disk_subfact.sectors | int * 512 }}'
tags:
- tests::lvm
diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml
index 6373897..363d843 100644
--- a/tests/tests_misc.yml
+++ b/tests/tests_misc.yml
@@ -8,8 +8,7 @@
volume_group_size: "5g"
volume1_size: "4g"
unused_disk_subfact: "{{ ansible_devices[unused_disks[0]] }}"
- too_large_size: "{{ (unused_disk_subfact.sectors | int * 1.2) *
- unused_disk_subfact.sectorsize | int }}"
+ too_large_size: "{{ (unused_disk_subfact.sectors | int * 1.2) * 512 }}"
tags:
- tests::lvm
tasks:
diff --git a/tests/tests_resize.yml b/tests/tests_resize.yml
index 06fb375..1cd2176 100644
--- a/tests/tests_resize.yml
+++ b/tests/tests_resize.yml
@@ -11,10 +11,8 @@
invalid_size1: xyz GiB
invalid_size2: none
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
- too_large_size: '{{ unused_disk_subfact.sectors | int * 1.2 *
- unused_disk_subfact.sectorsize | int }}'
- disk_size: '{{ unused_disk_subfact.sectors | int *
- unused_disk_subfact.sectorsize | int }}'
+ too_large_size: '{{ unused_disk_subfact.sectors | int * 1.2 * 512 }}'
+ disk_size: '{{ unused_disk_subfact.sectors | int * 512 }}'
tags:
- tests::lvm
tasks:
--
2.46.0

View File

@ -1,64 +0,0 @@
From 9f561445271a14fee598e9a793f72297f66eae56 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 10 Apr 2024 17:05:46 +0200
Subject: [PATCH 02/10] fix: Fix recreate check for formats without labelling
support
Formats like LUKS or LVMPV don't support labels so we need to skip
the label check in BlivetVolume._reformat.
Resolves: RHEL-29874
(cherry picked from commit a70e8108110e30ebc5e7c404d39339c511f9bd09)
---
library/blivet.py | 3 +++
tests/tests_volume_relabel.yml | 20 ++++++++++++++++++++
2 files changed, 23 insertions(+)
diff --git a/library/blivet.py b/library/blivet.py
index 20389ea..18807de 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -826,6 +826,9 @@ class BlivetVolume(BlivetBase):
if ((fmt is None and self._device.format.type is None)
or (fmt is not None and self._device.format.type == fmt.type)):
# format is the same, no need to run reformatting
+ if not hasattr(self._device.format, "label"):
+ # not all formats support labels
+ return
dev_label = '' if self._device.format.label is None else self._device.format.label
if dev_label != fmt.label:
# ...but the label has changed - schedule modification action
diff --git a/tests/tests_volume_relabel.yml b/tests/tests_volume_relabel.yml
index 8916b73..6624fbd 100644
--- a/tests/tests_volume_relabel.yml
+++ b/tests/tests_volume_relabel.yml
@@ -111,6 +111,26 @@
- name: Verify role results
include_tasks: verify-role-results.yml
+ - name: Format the device to LVMPV which doesn't support labels
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_volumes:
+ - name: test1
+ type: disk
+ fs_type: lvmpv
+ disks: "{{ unused_disks }}"
+
+ - name: Rerun to check we don't try to relabel preexisitng LVMPV (regression test for RHEL-29874)
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_volumes:
+ - name: test1
+ type: disk
+ fs_type: lvmpv
+ disks: "{{ unused_disks }}"
+
- name: Clean up
include_role:
name: linux-system-roles.storage
--
2.46.0

View File

@ -1,28 +0,0 @@
From 7abfaeddab812e4eec0c3d3d6bcbabe047722c4f Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 10 Apr 2024 17:08:20 +0200
Subject: [PATCH 03/10] fix: Fix incorrent populate call
`populate()` is method of DeviceTree, not Blivet.
(cherry picked from commit 6471e65abd429c82df37cbcf07fdf909e4277aa8)
---
library/blivet.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/library/blivet.py b/library/blivet.py
index 18807de..d82b86b 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -630,7 +630,7 @@ class BlivetVolume(BlivetBase):
device.original_format._key_file = self._volume.get('encryption_key')
device.original_format.passphrase = self._volume.get('encryption_password')
if device.isleaf:
- self._blivet.populate()
+ self._blivet.devicetree.populate()
if not device.isleaf:
device = device.children[0]
--
2.46.0

View File

@ -1,174 +0,0 @@
From 912c33982d9cc412eb72bc9baeab6696e29e7f27 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 28 May 2024 16:23:48 +0200
Subject: [PATCH 04/10] tests: Add a new 'match_sector_size' argument to
find_unused_disks
Some storage pools cannot be created on disks with different
sector sizes so we want to be able to find unused disks with the
same sector sizes for our tests.
Related: RHEL-25994
(cherry picked from commit 368ecd0214dbaad7c42547eeac0565e51c924546)
---
library/find_unused_disk.py | 79 ++++++++++++++++++++++------------
tests/get_unused_disk.yml | 1 +
tests/unit/test_unused_disk.py | 6 +--
3 files changed, 56 insertions(+), 30 deletions(-)
diff --git a/library/find_unused_disk.py b/library/find_unused_disk.py
index 09b8ad5..098f235 100644
--- a/library/find_unused_disk.py
+++ b/library/find_unused_disk.py
@@ -39,6 +39,11 @@ options:
description: Specifies which disk interface will be accepted (scsi, virtio, nvme).
default: null
type: str
+
+ match_sector_size:
+ description: Specifies whether all returned disks must have the same (logical) sector size.
+ default: false
+ type: bool
'''
EXAMPLES = '''
@@ -138,13 +143,13 @@ def get_partitions(disk_path):
def get_disks(module):
- buf = module.run_command(["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE"])[1]
+ buf = module.run_command(["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE,LOG-SEC"])[1]
disks = dict()
for line in buf.splitlines():
if not line:
continue
- m = re.search(r'NAME="(?P<path>[^"]*)" TYPE="(?P<type>[^"]*)" SIZE="(?P<size>\d+)" FSTYPE="(?P<fstype>[^"]*)"', line)
+ m = re.search(r'NAME="(?P<path>[^"]*)" TYPE="(?P<type>[^"]*)" SIZE="(?P<size>\d+)" FSTYPE="(?P<fstype>[^"]*)" LOG-SEC="(?P<ssize>\d+)"', line)
if m is None:
module.log(line)
continue
@@ -152,31 +157,16 @@ def get_disks(module):
if m.group('type') != "disk":
continue
- disks[m.group('path')] = {"type": m.group('type'), "size": m.group('size'), "fstype": m.group('fstype')}
+ disks[m.group('path')] = {"type": m.group('type'), "size": m.group('size'),
+ "fstype": m.group('fstype'), "ssize": m.group('ssize')}
return disks
-def run_module():
- """Create the module"""
- module_args = dict(
- max_return=dict(type='int', required=False, default=10),
- min_size=dict(type='str', required=False, default='0'),
- max_size=dict(type='str', required=False, default='0'),
- with_interface=dict(type='str', required=False, default=None)
- )
-
- result = dict(
- changed=False,
- disks=[]
- )
-
- module = AnsibleModule(
- argument_spec=module_args,
- supports_check_mode=True
- )
-
+def filter_disks(module):
+ disks = {}
max_size = Size(module.params['max_size'])
+
for path, attrs in get_disks(module).items():
if is_ignored(path):
continue
@@ -204,14 +194,49 @@ def run_module():
if not can_open(path):
continue
- result['disks'].append(os.path.basename(path))
- if len(result['disks']) >= module.params['max_return']:
- break
+ disks[path] = attrs
+
+ return disks
+
+
+def run_module():
+ """Create the module"""
+ module_args = dict(
+ max_return=dict(type='int', required=False, default=10),
+ min_size=dict(type='str', required=False, default='0'),
+ max_size=dict(type='str', required=False, default='0'),
+ with_interface=dict(type='str', required=False, default=None),
+ match_sector_size=dict(type='bool', required=False, default=False)
+ )
+
+ result = dict(
+ changed=False,
+ disks=[]
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ disks = filter_disks(module)
+
+ if module.params['match_sector_size']:
+ # pick the most disks with the same sector size
+ sector_sizes = dict()
+ for path, ss in [(path, disks[path]["ssize"]) for path in disks.keys()]:
+ if ss in sector_sizes.keys():
+ sector_sizes[ss].append(path)
+ else:
+ sector_sizes[ss] = [path]
+ disks = [os.path.basename(p) for p in max(sector_sizes.values(), key=len)]
+ else:
+ disks = [os.path.basename(p) for p in disks.keys()]
- if not result['disks']:
+ if not disks:
result['disks'] = "Unable to find unused disk"
else:
- result['disks'].sort()
+ result['disks'] = sorted(disks)[:int(module.params['max_return'])]
module.exit_json(**result)
diff --git a/tests/get_unused_disk.yml b/tests/get_unused_disk.yml
index 685541f..a61487e 100644
--- a/tests/get_unused_disk.yml
+++ b/tests/get_unused_disk.yml
@@ -19,6 +19,7 @@
max_size: "{{ max_size | d(omit) }}"
max_return: "{{ max_return | d(omit) }}"
with_interface: "{{ storage_test_use_interface | d(omit) }}"
+ match_sector_size: "{{ match_sector_size | d(omit) }}"
register: unused_disks_return
- name: Set unused_disks if necessary
diff --git a/tests/unit/test_unused_disk.py b/tests/unit/test_unused_disk.py
index 74c9cf1..ca44d0f 100644
--- a/tests/unit/test_unused_disk.py
+++ b/tests/unit/test_unused_disk.py
@@ -10,9 +10,9 @@ import os
blkid_data_pttype = [('/dev/sdx', '/dev/sdx: PTTYPE=\"dos\"'),
('/dev/sdy', '/dev/sdy: PTTYPE=\"test\"')]
-blkid_data = [('/dev/sdx', 'UUID=\"hello-1234-56789\" TYPE=\"crypto_LUKS\"'),
- ('/dev/sdy', 'UUID=\"this-1s-a-t3st-f0r-ansible\" VERSION=\"LVM2 001\" TYPE=\"LVM2_member\" USAGE=\"raid\"'),
- ('/dev/sdz', 'LABEL=\"/data\" UUID=\"a12bcdef-345g-67h8-90i1-234j56789k10\" VERSION=\"1.0\" TYPE=\"ext4\" USAGE=\"filesystem\"')]
+blkid_data = [('/dev/sdx', 'UUID=\"hello-1234-56789\" TYPE=\"crypto_LUKS\" LOG-SEC=\"512\"'),
+ ('/dev/sdy', 'UUID=\"this-1s-a-t3st-f0r-ansible\" VERSION=\"LVM2 001\" TYPE=\"LVM2_member\" USAGE=\"raid\" LOG-SEC=\"512\"'),
+ ('/dev/sdz', 'LABEL=\"/data\" UUID=\"a12bcdef-345g-67h8-90i1-234j56789k10\" VERSION=\"1.0\" TYPE=\"ext4\" USAGE=\"filesystem\" LOG-SEC=\"512\"')]
holders_data_none = [('/dev/sdx', ''),
('/dev/dm-99', '')]
--
2.46.0

View File

@ -1,96 +0,0 @@
From da871866f07e2990f37b3fdea404bbaf091d81b6 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 30 May 2024 10:41:26 +0200
Subject: [PATCH 05/10] tests: Require same sector size disks for LVM tests
LVM VGs cannot be created on top of disks with different sector
sizes so for tests that need multiple disks we need to make sure
we get unused disks with the same sector size.
Resolves: RHEL-25994
(cherry picked from commit d8c5938c28417cc905a647ec30246a0fc4d19297)
---
tests/tests_change_fs_use_partitions.yml | 2 +-
tests/tests_create_lvm_cache_then_remove.yml | 1 +
tests/tests_create_thinp_then_remove.yml | 1 +
tests/tests_fatals_cache_volume.yml | 1 +
tests/tests_lvm_multiple_disks_multiple_volumes.yml | 1 +
tests/tests_lvm_pool_members.yml | 1 +
6 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/tests/tests_change_fs_use_partitions.yml b/tests/tests_change_fs_use_partitions.yml
index 52afb7f..87fed69 100644
--- a/tests/tests_change_fs_use_partitions.yml
+++ b/tests/tests_change_fs_use_partitions.yml
@@ -31,7 +31,7 @@
include_tasks: get_unused_disk.yml
vars:
min_size: "{{ volume_size }}"
- max_return: 2
+ max_return: 1
- name: Create an LVM partition with the default file system type
include_role:
diff --git a/tests/tests_create_lvm_cache_then_remove.yml b/tests/tests_create_lvm_cache_then_remove.yml
index 1769a78..6b5d0a5 100644
--- a/tests/tests_create_lvm_cache_then_remove.yml
+++ b/tests/tests_create_lvm_cache_then_remove.yml
@@ -57,6 +57,7 @@
min_size: "{{ volume_group_size }}"
max_return: 2
disks_needed: 2
+ match_sector_size: true
- name: Create a cached LVM logical volume under volume group 'foo'
include_role:
diff --git a/tests/tests_create_thinp_then_remove.yml b/tests/tests_create_thinp_then_remove.yml
index bf6c4b1..2e7f046 100644
--- a/tests/tests_create_thinp_then_remove.yml
+++ b/tests/tests_create_thinp_then_remove.yml
@@ -23,6 +23,7 @@
include_tasks: get_unused_disk.yml
vars:
max_return: 3
+ match_sector_size: true
- name: Create a thinpool device
include_role:
diff --git a/tests/tests_fatals_cache_volume.yml b/tests/tests_fatals_cache_volume.yml
index c14cf3f..fcfdbb8 100644
--- a/tests/tests_fatals_cache_volume.yml
+++ b/tests/tests_fatals_cache_volume.yml
@@ -29,6 +29,7 @@
vars:
max_return: 2
disks_needed: 2
+ match_sector_size: true
- name: Verify that creating a cached partition volume fails
include_tasks: verify-role-failed.yml
diff --git a/tests/tests_lvm_multiple_disks_multiple_volumes.yml b/tests/tests_lvm_multiple_disks_multiple_volumes.yml
index 9a01ec5..68f2e76 100644
--- a/tests/tests_lvm_multiple_disks_multiple_volumes.yml
+++ b/tests/tests_lvm_multiple_disks_multiple_volumes.yml
@@ -29,6 +29,7 @@
min_size: "{{ volume_group_size }}"
max_return: 2
disks_needed: 2
+ match_sector_size: true
- name: >-
Create a logical volume spanning two physical volumes that changes its
diff --git a/tests/tests_lvm_pool_members.yml b/tests/tests_lvm_pool_members.yml
index d1b941d..63c10c7 100644
--- a/tests/tests_lvm_pool_members.yml
+++ b/tests/tests_lvm_pool_members.yml
@@ -59,6 +59,7 @@
vars:
min_size: "{{ volume_group_size }}"
disks_needed: 3
+ match_sector_size: true
- name: Create volume group 'foo' with 3 PVs
include_role:
--
2.46.0

View File

@ -1,41 +0,0 @@
From 705a9db65a230013a9118481082d2bb548cd113d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 31 May 2024 06:31:52 +0200
Subject: [PATCH 06/10] fix: Fix 'possibly-used-before-assignment' pylint
issues (#440)
Latest pylint added a new check for values used before assignment.
This fixes these issues found in the blivet module. Some of these
are false positives, some real potential issues.
(cherry picked from commit bfaae50586681bb4b0fcad5df6f6adde2b7c8502)
---
library/blivet.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/library/blivet.py b/library/blivet.py
index d82b86b..a6715d9 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -642,6 +642,9 @@ class BlivetVolume(BlivetBase):
self._device = None
return # TODO: see if we can create this device w/ the specified name
+ # pylint doesn't understand that "luks_fmt" is always set when "encrypted" is true
+ # pylint: disable=unknown-option-value
+ # pylint: disable=possibly-used-before-assignment
def _update_from_device(self, param_name):
""" Return True if param_name's value was retrieved from a looked-up device. """
log.debug("Updating volume settings from device: %r", self._device)
@@ -1717,6 +1720,8 @@ class BlivetLVMPool(BlivetPool):
if auto_size_dev_count > 0:
calculated_thinlv_size = available_space / auto_size_dev_count
+ else:
+ calculated_thinlv_size = available_space
for thinlv in thinlvs_to_create:
--
2.46.0

View File

@ -1,54 +0,0 @@
From 18edc9af26684f03e44fe2e22c82a8f93182da4a Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Wed, 5 Jun 2024 08:49:19 -0600
Subject: [PATCH 07/10] test: lsblk can return LOG_SEC or LOG-SEC
get_unused_disk is broken on some systems because `lsblk ... LOG-SEC` can
return `LOG_SEC` with an underscore instead of the requested
`LOG-SEC` with a dash.
(cherry picked from commit 64333ce8aa42f4b961c39a443ac43cc6590097b3)
---
library/find_unused_disk.py | 4 ++--
tests/get_unused_disk.yml | 9 +++++++++
2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/library/find_unused_disk.py b/library/find_unused_disk.py
index 098f235..270fb58 100644
--- a/library/find_unused_disk.py
+++ b/library/find_unused_disk.py
@@ -149,9 +149,9 @@ def get_disks(module):
if not line:
continue
- m = re.search(r'NAME="(?P<path>[^"]*)" TYPE="(?P<type>[^"]*)" SIZE="(?P<size>\d+)" FSTYPE="(?P<fstype>[^"]*)" LOG-SEC="(?P<ssize>\d+)"', line)
+ m = re.search(r'NAME="(?P<path>[^"]*)" TYPE="(?P<type>[^"]*)" SIZE="(?P<size>\d+)" FSTYPE="(?P<fstype>[^"]*)" LOG[_-]SEC="(?P<ssize>\d+)"', line)
if m is None:
- module.log(line)
+ module.log("Line did not match: " + line)
continue
if m.group('type') != "disk":
diff --git a/tests/get_unused_disk.yml b/tests/get_unused_disk.yml
index a61487e..0402770 100644
--- a/tests/get_unused_disk.yml
+++ b/tests/get_unused_disk.yml
@@ -22,6 +22,15 @@
match_sector_size: "{{ match_sector_size | d(omit) }}"
register: unused_disks_return
+- name: Debug why there are no unused disks
+ shell: |
+ set -x
+ exec 1>&2
+ lsblk -p --pairs --bytes -o NAME,TYPE,SIZE,FSTYPE,LOG-SEC
+ journalctl -ex
+ changed_when: false
+ when: "'Unable to find unused disk' in unused_disks_return.disks"
+
- name: Set unused_disks if necessary
set_fact:
unused_disks: "{{ unused_disks_return.disks }}"
--
2.46.0

View File

@ -1,34 +0,0 @@
From aa6e494963a3bded3b1ca7ef5a81e0106e68d5bc Mon Sep 17 00:00:00 2001
From: Jan Pokorny <japokorn@redhat.com>
Date: Thu, 6 Jun 2024 11:54:48 +0200
Subject: [PATCH 08/10] test: lvm pool members test fix
tests_lvm_pool_members started to fail. It tried to create a device with
a requested size (20m) that was less than minimal allowed size (300m) for that type of
volume. Role automatically resized the device to allowed size. That lead to discrepancy
in actual and expected size values.
Increasing the requested device size to be same or larger than minimal fixes the
issue.
(cherry picked from commit ee740b7b14d09e09a26dd5eb95e8950aeb15147d)
---
tests/tests_lvm_pool_members.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/tests_lvm_pool_members.yml b/tests/tests_lvm_pool_members.yml
index 63c10c7..320626e 100644
--- a/tests/tests_lvm_pool_members.yml
+++ b/tests/tests_lvm_pool_members.yml
@@ -6,7 +6,7 @@
storage_safe_mode: false
storage_use_partitions: true
volume_group_size: '10g'
- volume_size: '20m'
+ volume_size: '300m'
tags:
- tests::lvm
--
2.46.0

View File

@ -1,40 +0,0 @@
From d2b59ac3758f51ffac5156e9f006b7ce9d8a28eb Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 4 Jun 2024 10:30:03 +0200
Subject: [PATCH 09/10] fix: Fix expected error message in tests_misc.yml
Different versions of blivet return a different error message when
trying to create a filesystem with invalid parameters.
On Fedora 39 and older:
"Failed to commit changes to disk: (FSError('format failed: 1'),
'/dev/mapper/foo-test1')"
On Fedora 40 and newer:
"Failed to commit changes to disk: Process reported exit code 1:
mke2fs: invalid block size - 512\n"
(cherry picked from commit 7ef66d85bd52f339483b24dbb8bc66e22054b378)
---
tests/tests_misc.yml | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml
index 363d843..432ec16 100644
--- a/tests/tests_misc.yml
+++ b/tests/tests_misc.yml
@@ -68,8 +68,9 @@
include_tasks: verify-role-failed.yml
vars:
__storage_failed_regex: >-
- Failed to commit changes to disk.*FSError.*format failed:
- 1.*/dev/mapper/foo-test1
+ Failed to commit changes to disk.*(FSError.*format failed:
+ 1.*/dev/mapper/foo-test1|
+ Process reported exit code 1: mke2fs: invalid block size - 512)
__storage_failed_msg: >-
Unexpected behavior when creating ext4 filesystem with invalid
parameter
--
2.46.0

View File

@ -1,180 +0,0 @@
From a86f7e013fe881e477b65509363bbb5af851662f Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 12 Apr 2024 14:45:15 +0200
Subject: [PATCH 10/10] tests: Use blockdev_info to check volume mount points
We can use the information from `lsblk` we already use for other
checks instead of using the Ansible mountinfo facts. This makes
the check simpler and also makes it easier to check for Stratis
volume mount points, because of the complicated Stratis devices
structure in /dev.
(cherry picked from commit 10e657bde68ffa9495b2441ed9f472cf79edbb19)
---
library/blockdev_info.py | 2 +-
tests/test-verify-volume-fs.yml | 51 ++++++++++++++++--------------
tests/test-verify-volume-mount.yml | 48 +++++-----------------------
3 files changed, 37 insertions(+), 64 deletions(-)
diff --git a/library/blockdev_info.py b/library/blockdev_info.py
index 13858fb..ec018de 100644
--- a/library/blockdev_info.py
+++ b/library/blockdev_info.py
@@ -64,7 +64,7 @@ def fixup_md_path(path):
def get_block_info(module):
- buf = module.run_command(["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE,SIZE", "-p", "-P", "-a"])[1]
+ buf = module.run_command(["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE,SIZE,MOUNTPOINT", "-p", "-P", "-a"])[1]
info = dict()
for line in buf.splitlines():
dev = dict()
diff --git a/tests/test-verify-volume-fs.yml b/tests/test-verify-volume-fs.yml
index 8e488c5..63b2770 100644
--- a/tests/test-verify-volume-fs.yml
+++ b/tests/test-verify-volume-fs.yml
@@ -1,26 +1,31 @@
---
# type
-- name: Verify fs type
- assert:
- that: storage_test_blkinfo.info[storage_test_volume._device].fstype ==
- storage_test_volume.fs_type or
- (storage_test_blkinfo.info[storage_test_volume._device].fstype | length
- == 0 and storage_test_volume.fs_type == "unformatted")
- when: storage_test_volume.fs_type and _storage_test_volume_present
+- name: Check volume filesystem
+ when: storage_test_volume.type != "stratis"
+ block:
+ - name: Verify fs type
+ assert:
+ that: storage_test_blkinfo.info[storage_test_volume._device].fstype ==
+ storage_test_volume.fs_type or
+ (storage_test_blkinfo.info[storage_test_volume._device].fstype | length
+ == 0 and storage_test_volume.fs_type == "unformatted")
+ when:
+ - storage_test_volume.fs_type
+ - _storage_test_volume_present
-# label
-- name: Verify fs label
- assert:
- that: storage_test_blkinfo.info[storage_test_volume._device].label ==
- storage_test_volume.fs_label
- msg: >-
- Volume '{{ storage_test_volume.name }}' labels do not match when they
- should
- ('{{ storage_test_blkinfo.info[storage_test_volume._device].label }}',
- '{{ storage_test_volume.fs_label }}')
- when:
- - _storage_test_volume_present | bool
- # label for GFS2 is set manually with the extra `-t` fs_create_options
- # so we can't verify it here because it was not set with fs_label so
- # the label from blkinfo doesn't match the expected "empty" fs_label
- - storage_test_volume.fs_type != "gfs2"
+ # label
+ - name: Verify fs label
+ assert:
+ that: storage_test_blkinfo.info[storage_test_volume._device].label ==
+ storage_test_volume.fs_label
+ msg: >-
+ Volume '{{ storage_test_volume.name }}' labels do not match when they
+ should
+ ('{{ storage_test_blkinfo.info[storage_test_volume._device].label }}',
+ '{{ storage_test_volume.fs_label }}')
+ when:
+ - _storage_test_volume_present | bool
+ # label for GFS2 is set manually with the extra `-t` fs_create_options
+ # so we can't verify it here because it was not set with fs_label so
+ # the label from blkinfo doesn't match the expected "empty" fs_label
+ - storage_test_volume.fs_type != "gfs2"
diff --git a/tests/test-verify-volume-mount.yml b/tests/test-verify-volume-mount.yml
index cf86b34..17d2a01 100644
--- a/tests/test-verify-volume-mount.yml
+++ b/tests/test-verify-volume-mount.yml
@@ -15,20 +15,13 @@
- name: Set some facts
set_fact:
- storage_test_mount_device_matches: "{{ ansible_mounts |
- selectattr('device', 'match', '^' ~ storage_test_device_path ~ '$') |
- list }}"
- storage_test_mount_point_matches: "{{ ansible_mounts |
- selectattr('mount', 'match',
- '^' ~ mount_prefix ~ storage_test_volume.mount_point ~ '$') |
- list if storage_test_volume.mount_point else [] }}"
- storage_test_mount_expected_match_count: "{{ 1
- if _storage_test_volume_present and storage_test_volume.mount_point and
- storage_test_volume.mount_point.startswith('/')
- else 0 }}"
storage_test_swap_expected_matches: "{{ 1 if
_storage_test_volume_present and
storage_test_volume.fs_type == 'swap' else 0 }}"
+ storage_test_mount_expected_mount_point: "{{
+ '[SWAP]' if storage_test_volume.fs_type == 'swap' else
+ '' if storage_test_volume.mount_point == 'none' else
+ mount_prefix + storage_test_volume.mount_point if storage_test_volume.mount_point else '' }}"
vars:
# assumes /opt which is /var/opt in ostree
mount_prefix: "{{ '/var' if __storage_is_ostree | d(false)
@@ -50,23 +43,12 @@
#
- name: Verify the current mount state by device
assert:
- that: storage_test_mount_device_matches | length ==
- storage_test_mount_expected_match_count | int
+ that: storage_test_blkinfo.info[storage_test_volume._device].mountpoint ==
+ storage_test_mount_expected_mount_point
msg: >-
Found unexpected mount state for volume
'{{ storage_test_volume.name }}' device
- when: _storage_test_volume_present and storage_test_volume.mount_point
-
-#
-# Verify mount directory (state, owner, group, permissions).
-#
-- name: Verify the current mount state by mount point
- assert:
- that: storage_test_mount_point_matches | length ==
- storage_test_mount_expected_match_count | int
- msg: >-
- Found unexpected mount state for volume
- '{{ storage_test_volume.name }}' mount point
+ when: _storage_test_volume_present
- name: Verify mount directory user
assert:
@@ -104,18 +86,6 @@
storage_test_volume.mount_point and
storage_test_volume.mount_mode
-#
-# Verify mount fs type.
-#
-- name: Verify the mount fs type
- assert:
- that: storage_test_mount_point_matches[0].fstype ==
- storage_test_volume.fs_type
- msg: >-
- Found unexpected mount state for volume
- '{{ storage_test_volume.name }} fs type
- when: storage_test_mount_expected_match_count | int == 1
-
#
# Verify swap status.
#
@@ -145,10 +115,8 @@
- name: Unset facts
set_fact:
- storage_test_mount_device_matches: null
- storage_test_mount_point_matches: null
- storage_test_mount_expected_match_count: null
storage_test_swap_expected_matches: null
storage_test_sys_node: null
storage_test_swaps: null
storage_test_found_mount_stat: null
+ storage_test_mount_expected_mount_point: null
--
2.46.0

View File

@ -1,26 +0,0 @@
From 36acf32d30d106159ba9f2fa88d723d9577c9f15 Mon Sep 17 00:00:00 2001
From: Samuel Bancal <Samuel.Bancal@groupe-t2i.com>
Date: Thu, 14 Mar 2024 10:15:11 +0100
Subject: [PATCH 101/115] fix: Add support for --check flag
Fix: https://github.com/linux-system-roles/podman/issues/133
(cherry picked from commit a47e6a95e2a5ee70714bf315d3e03310365d3650)
---
tasks/main.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/tasks/main.yml b/tasks/main.yml
index 1b9ca4a..61f1d1c 100644
--- a/tasks/main.yml
+++ b/tasks/main.yml
@@ -21,6 +21,7 @@
when: (__podman_packages | difference(ansible_facts.packages))
- name: Get podman version
+ check_mode: false
command: podman --version
changed_when: false
register: __podman_version_output
--
2.46.0

View File

@ -1,56 +0,0 @@
From 53f83475c59092e2c23d1957c2fc24c8ca4b6ad9 Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Tue, 9 Apr 2024 18:27:25 -0600
Subject: [PATCH 102/115] fix: use correct user for cancel linger file name
Cause: When processing a list of kube or quadlet items, the
code was using the user id associated with the list, not the
item, to specify the linger filename.
Consequence: The linger file does not exist, so the code
does not cancel linger for the actual user.
Fix: Use the correct username to construct the linger filename.
Result: Lingering is cancelled for the correct users.
QE: The test is now in tests_basic.yml
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit 67b88b9aa0a1b1123c2ae24bb7ca4a527924cd13)
---
tasks/cancel_linger.yml | 2 +-
tests/tests_basic.yml | 7 +++++++
2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/tasks/cancel_linger.yml b/tasks/cancel_linger.yml
index 761778b..ede71fe 100644
--- a/tasks/cancel_linger.yml
+++ b/tasks/cancel_linger.yml
@@ -59,4 +59,4 @@
- __podman_linger_secrets.stdout == ""
changed_when: true
args:
- removes: /var/lib/systemd/linger/{{ __podman_user }}
+ removes: /var/lib/systemd/linger/{{ __podman_linger_user }}
diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml
index a9f01c9..d4f9238 100644
--- a/tests/tests_basic.yml
+++ b/tests/tests_basic.yml
@@ -409,6 +409,13 @@
^[ ]*podman-kube@.+-{{ item[0] }}[.]yml[.]service[ ]+loaded[
]+active
+ - name: Ensure no linger
+ stat:
+ path: /var/lib/systemd/linger/{{ item[1] }}
+ loop: "{{ test_names_users }}"
+ register: __stat
+ failed_when: __stat.stat.exists
+
rescue:
- name: Dump journal
command: journalctl -ex
--
2.46.0

View File

@ -1,28 +0,0 @@
From dd93ef65b0d1929184d458914386086fca8b8d7a Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Wed, 10 Apr 2024 16:06:28 -0600
Subject: [PATCH 103/115] test: do not check for root linger
Do not check if there is a linger file for root.
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit 2b29e049daa28ba6c3b38f514cff9c62be5f3caf)
---
tests/tests_basic.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml
index d4f9238..d578b15 100644
--- a/tests/tests_basic.yml
+++ b/tests/tests_basic.yml
@@ -412,6 +412,7 @@
- name: Ensure no linger
stat:
path: /var/lib/systemd/linger/{{ item[1] }}
+ when: item[1] != "root"
loop: "{{ test_names_users }}"
register: __stat
failed_when: __stat.stat.exists
--
2.46.0

View File

@ -1,210 +0,0 @@
From b2e79348094ea8d89b71727d82a80a9f3cfbb1ce Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Tue, 9 Apr 2024 18:28:19 -0600
Subject: [PATCH 104/115] fix: do not use become for changing hostdir
ownership, and expose subuid/subgid info
When creating host directories, do not use `become`, because if
it needs to change ownership, that must be done by `root`, not
as the rootless podman user.
In order to test this, I have changed the role to export the subuid and subgid
information for the rootless users as two dictionaries:
`podman_subuid_info` and `podman_subgid_info`. See `README.md` for
usage.
NOTE that depending on the namespace used by your containers, you might not
be able to use the subuid and subgid information, which comes from `getsubids`
if available, or directly from the files `/etc/subuid` and `/etc/subgid` on
the host.
QE: The test tests_basic.yml has been extended for this.
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit 3d02eb725355088df6c707717547f5ad6b7c400c)
---
README.md | 28 ++++++++++++
tasks/create_update_kube_spec.yml | 2 -
tasks/create_update_quadlet_spec.yml | 2 -
tasks/handle_user_group.yml | 66 +++++++++++++++++++++-------
tests/tests_basic.yml | 2 +
5 files changed, 79 insertions(+), 21 deletions(-)
diff --git a/README.md b/README.md
index ea1edfb..e5a7c12 100644
--- a/README.md
+++ b/README.md
@@ -418,6 +418,34 @@ PodmanArgs=--secret=my-app-pwd,type=env,target=MYAPP_PASSWORD
{% endif %}
```
+### podman_subuid_info, podman_subgid_info
+
+The role needs to ensure any users and groups are present in the subuid and
+subgid information. Once it extracts this data, it will be available in
+`podman_subuid_info` and `podman_subgid_info`. These are dicts. The key is the
+user or group name, and the value is a `dict` with two fields:
+
+* `start` - the start of the id range for that user or group, as an `int`
+* `range` - the id range for that user or group, as an `int`
+
+```yaml
+podman_host_directories:
+ "/var/lib/db":
+ mode: "0777"
+ owner: "{{ 1001 + podman_subuid_info['dbuser']['start'] - 1 }}"
+ group: "{{ 1001 + podman_subgid_info['dbgroup']['start'] - 1 }}"
+```
+
+Where `1001` is the uid for user `dbuser`, and `1001` is the gid for group
+`dbgroup`.
+
+**NOTE**: depending on the namespace used by your containers, you might not be
+able to use the subuid and subgid information, which comes from `getsubids` if
+available, or directly from the files `/etc/subuid` and `/etc/subgid` on the
+host. See
+[podman user namespace modes](https://www.redhat.com/sysadmin/rootless-podman-user-namespace-modes)
+for more information.
+
## Example Playbooks
Create rootless container with volume mount:
diff --git a/tasks/create_update_kube_spec.yml b/tasks/create_update_kube_spec.yml
index 95d7d35..7a8ba9c 100644
--- a/tasks/create_update_kube_spec.yml
+++ b/tasks/create_update_kube_spec.yml
@@ -32,8 +32,6 @@
__defaults: "{{ {'path': item} | combine(__podman_hostdirs_defaults) |
combine(__owner_group) }}"
loop: "{{ __podman_volumes }}"
- become: "{{ __podman_rootless | ternary(true, omit) }}"
- become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
when:
- podman_create_host_directories | bool
- __podman_volumes | d([]) | length > 0
diff --git a/tasks/create_update_quadlet_spec.yml b/tasks/create_update_quadlet_spec.yml
index c3e0095..062c105 100644
--- a/tasks/create_update_quadlet_spec.yml
+++ b/tasks/create_update_quadlet_spec.yml
@@ -16,8 +16,6 @@
__defaults: "{{ {'path': item} | combine(__podman_hostdirs_defaults) |
combine(__owner_group) }}"
loop: "{{ __podman_volumes }}"
- become: "{{ __podman_rootless | ternary(true, omit) }}"
- become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
when:
- podman_create_host_directories | bool
- __podman_volumes | d([]) | length > 0
diff --git a/tasks/handle_user_group.yml b/tasks/handle_user_group.yml
index 17300b6..ea9984d 100644
--- a/tasks/handle_user_group.yml
+++ b/tasks/handle_user_group.yml
@@ -52,10 +52,26 @@
- name: Check user with getsubids
command: getsubids {{ __podman_user | quote }}
changed_when: false
+ register: __podman_register_subuids
- name: Check group with getsubids
command: getsubids -g {{ __podman_group_name | quote }}
changed_when: false
+ register: __podman_register_subgids
+
+ - name: Set user subuid and subgid info
+ set_fact:
+ podman_subuid_info: "{{ podman_subuid_info | d({}) |
+ combine({__podman_user:
+ {'start': __subuid_data[2] | int, 'range': __subuid_data[3] | int}})
+ if __subuid_data | length > 0 else podman_subuid_info | d({}) }}"
+ podman_subgid_info: "{{ podman_subgid_info | d({}) |
+ combine({__podman_group_name:
+ {'start': __subgid_data[2] | int, 'range': __subgid_data[3] | int}})
+ if __subgid_data | length > 0 else podman_subgid_info | d({}) }}"
+ vars:
+ __subuid_data: "{{ __podman_register_subuids.stdout.split() | list }}"
+ __subgid_data: "{{ __podman_register_subgids.stdout.split() | list }}"
- name: Check subuid, subgid files if no getsubids
when:
@@ -63,32 +79,48 @@
- __podman_user not in ["root", "0"]
- __podman_group not in ["root", "0"]
block:
- - name: Check if user is in subuid file
- find:
- path: /etc
- pattern: subuid
- use_regex: true
- contains: "^{{ __podman_user }}:.*$"
- register: __podman_uid_line_found
+ - name: Get subuid file
+ slurp:
+ path: /etc/subuid
+ register: __podman_register_subuids
+
+ - name: Get subgid file
+ slurp:
+ path: /etc/subgid
+ register: __podman_register_subgids
+
+ - name: Set user subuid and subgid info
+ set_fact:
+ podman_subuid_info: "{{ podman_subuid_info | d({}) |
+ combine({__podman_user:
+ {'start': __subuid_data[1] | int, 'range': __subuid_data[2] | int}})
+ if __subuid_data else podman_subuid_info | d({}) }}"
+ podman_subgid_info: "{{ podman_subgid_info | d({}) |
+ combine({__podman_group_name:
+ {'start': __subgid_data[1] | int, 'range': __subgid_data[2] | int}})
+ if __subgid_data else podman_subgid_info | d({}) }}"
+ vars:
+ __subuid_match_line: "{{
+ (__podman_register_subuids.content | b64decode).split('\n') | list |
+ select('match', '^' ~ __podman_user ~ ':') | list }}"
+ __subuid_data: "{{ __subuid_match_line[0].split(':') | list
+ if __subuid_match_line else null }}"
+ __subgid_match_line: "{{
+ (__podman_register_subgids.content | b64decode).split('\n') | list |
+ select('match', '^' ~ __podman_group_name ~ ':') | list }}"
+ __subgid_data: "{{ __subgid_match_line[0].split(':') | list
+ if __subgid_match_line else null }}"
- name: Fail if user not in subuid file
fail:
msg: >
The given podman user [{{ __podman_user }}] is not in the
/etc/subuid file - cannot continue
- when: not __podman_uid_line_found.matched
-
- - name: Check if group is in subgid file
- find:
- path: /etc
- pattern: subgid
- use_regex: true
- contains: "^{{ __podman_group_name }}:.*$"
- register: __podman_gid_line_found
+ when: not __podman_user in podman_subuid_info
- name: Fail if group not in subgid file
fail:
msg: >
The given podman group [{{ __podman_group_name }}] is not in the
/etc/subgid file - cannot continue
- when: not __podman_gid_line_found.matched
+ when: not __podman_group_name in podman_subuid_info
diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml
index d578b15..121c3a7 100644
--- a/tests/tests_basic.yml
+++ b/tests/tests_basic.yml
@@ -8,6 +8,8 @@
podman_host_directories:
"/tmp/httpd1-create":
mode: "0777"
+ owner: "{{ 1001 + podman_subuid_info['user1']['start'] - 1 }}"
+ group: "{{ 1001 + podman_subgid_info['user1']['start'] - 1 }}"
podman_run_as_user: root
test_names_users:
- [httpd1, user1, 1001]
--
2.46.0

View File

@ -1,42 +0,0 @@
From 7978bed4d52e44feae114ba56e9b5035b7dd2c1c Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Wed, 17 Apr 2024 10:14:21 -0600
Subject: [PATCH 105/115] chore: change no_log false to true; fix comment
Forgot to change a `no_log: false` back to `no_log: true` when debugging.
Fix an error in a comment
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit b37ee8fc7e12317660cca765760c32bd4ba91035)
---
tasks/handle_secret.yml | 2 +-
vars/main.yml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/tasks/handle_secret.yml b/tasks/handle_secret.yml
index b3677ef..02bc15b 100644
--- a/tasks/handle_secret.yml
+++ b/tasks/handle_secret.yml
@@ -39,7 +39,7 @@
become: "{{ __podman_rootless | ternary(true, omit) }}"
become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
when: not __podman_rootless or __podman_xdg_stat.stat.exists
- no_log: false
+ no_log: true
vars:
__params: |
{% set rc = {} %}
diff --git a/vars/main.yml b/vars/main.yml
index 47293c5..38402ff 100644
--- a/vars/main.yml
+++ b/vars/main.yml
@@ -74,5 +74,5 @@ __podman_user_kube_path: "/.config/containers/ansible-kubernetes.d"
# location for system quadlet files
__podman_system_quadlet_path: "/etc/containers/systemd"
-# location for user kubernetes yaml files
+# location for user quadlet files
__podman_user_quadlet_path: "/.config/containers/systemd"
--
2.46.0

View File

@ -1,214 +0,0 @@
From 07053a415b4a0bde557f28f6f607250915e908e6 Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Wed, 17 Apr 2024 11:35:52 -0600
Subject: [PATCH 106/115] fix: make kube cleanup idempotent
Cause: The task that calls podman_play was not checking if the kube yaml
file existed when cleaning up.
Consequence: The task would give an error that the pod could not be
removed.
Fix: Do not attempt to remove the pod if the kube yaml file does not
exist.
Result: Calling the podman role repeatedly to remove a kube spec
will not fail and will not report changes for subsequent removals.
QE: tests_basic.yml has been changed to check for this case
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit e506f39b6608613a5801190091a72b013b85a888)
---
tasks/cleanup_kube_spec.yml | 9 +++++-
tests/tests_basic.yml | 62 ++++++++++++++++++++++++++-----------
2 files changed, 52 insertions(+), 19 deletions(-)
diff --git a/tasks/cleanup_kube_spec.yml b/tasks/cleanup_kube_spec.yml
index c864179..b6b47bd 100644
--- a/tasks/cleanup_kube_spec.yml
+++ b/tasks/cleanup_kube_spec.yml
@@ -25,6 +25,11 @@
vars:
__service_error: Could not find the requested service
+- name: Check if kube file exists
+ stat:
+ path: "{{ __podman_kube_file }}"
+ register: __podman_kube_file_stat
+
- name: Remove pod/containers
containers.podman.podman_play: "{{ __podman_kube_spec |
combine({'kube_file': __podman_kube_file}) }}"
@@ -33,7 +38,9 @@
become: "{{ __podman_rootless | ternary(true, omit) }}"
become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
register: __podman_removed
- when: not __podman_rootless or __podman_xdg_stat.stat.exists
+ when:
+ - not __podman_rootless or __podman_xdg_stat.stat.exists
+ - __podman_kube_file_stat.stat.exists
- name: Remove kubernetes yaml file
file:
diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml
index 121c3a7..b8ddc50 100644
--- a/tests/tests_basic.yml
+++ b/tests/tests_basic.yml
@@ -6,13 +6,16 @@
- vars/test_vars.yml
vars:
podman_host_directories:
- "/tmp/httpd1-create":
+ "{{ __test_tmpdir.path ~ '/httpd1-create' }}":
mode: "0777"
- owner: "{{ 1001 + podman_subuid_info['user1']['start'] - 1 }}"
- group: "{{ 1001 + podman_subgid_info['user1']['start'] - 1 }}"
+ owner: "{{ 1001 +
+ podman_subuid_info[__podman_test_username]['start'] - 1 }}"
+ group: "{{ 1001 +
+ podman_subgid_info[__podman_test_username]['start'] - 1 }}"
podman_run_as_user: root
+ __podman_test_username: podman_basic_user
test_names_users:
- - [httpd1, user1, 1001]
+ - [httpd1, "{{ __podman_test_username }}", 1001]
- [httpd2, root, 0]
- [httpd3, root, 0]
podman_create_host_directories: true
@@ -26,7 +29,7 @@
- state: started
debug: true
log_level: debug
- run_as_user: user1
+ run_as_user: "{{ __podman_test_username }}"
kube_file_content:
apiVersion: v1
kind: Pod
@@ -57,10 +60,10 @@
volumes:
- name: www
hostPath:
- path: /tmp/httpd1
+ path: "{{ __test_tmpdir.path ~ '/httpd1' }}"
- name: create
hostPath:
- path: /tmp/httpd1-create
+ path: "{{ __test_tmpdir.path ~ '/httpd1-create' }}"
- state: started
debug: true
log_level: debug
@@ -94,10 +97,10 @@
volumes:
- name: www
hostPath:
- path: /tmp/httpd2
+ path: "{{ __test_tmpdir.path ~ '/httpd2' }}"
- name: create
hostPath:
- path: /tmp/httpd2-create
+ path: "{{ __test_tmpdir.path ~ '/httpd2-create' }}"
__podman_kube_file_content: |
apiVersion: v1
kind: Pod
@@ -128,11 +131,23 @@
volumes:
- name: www
hostPath:
- path: /tmp/httpd3
+ path: "{{ __test_tmpdir.path ~ '/httpd3' }}"
- name: create
hostPath:
- path: /tmp/httpd3-create
+ path: "{{ __test_tmpdir.path ~ '/httpd3-create' }}"
tasks:
+ - name: Create tmpdir for testing
+ tempfile:
+ state: directory
+ prefix: lsr_
+ suffix: _podman
+ register: __test_tmpdir
+
+ - name: Change tmpdir permissions
+ file:
+ path: "{{ __test_tmpdir.path }}"
+ mode: "0777"
+
- name: Run basic tests
vars:
__podman_use_kube_file:
@@ -156,7 +171,7 @@
- name: Create user
user:
- name: user1
+ name: "{{ __podman_test_username }}"
uid: 1001
- name: Create tempfile for kube_src
@@ -171,12 +186,12 @@
copy:
content: "{{ __podman_kube_file_content }}"
dest: "{{ __kube_file_src.path }}"
- mode: 0600
+ mode: "0600"
delegate_to: localhost
- name: Create host directories for data
file:
- path: /tmp/{{ item[0] }}
+ path: "{{ __test_tmpdir.path ~ '/' ~ item[0] }}"
state: directory
mode: "0755"
owner: "{{ item[1] }}"
@@ -184,7 +199,7 @@
- name: Create data files
copy:
- dest: /tmp/{{ item[0] }}/index.txt
+ dest: "{{ __test_tmpdir.path ~ '/' ~ item[0] ~ '/index.txt' }}"
content: "123"
mode: "0644"
owner: "{{ item[1] }}"
@@ -315,7 +330,7 @@
loop: [15001, 15002]
- name: Check host directories
- command: ls -alrtF /tmp/{{ item[0] }}-create
+ command: ls -alrtF {{ __test_tmpdir.path ~ '/' ~ item[0] }}-create
loop: "{{ test_names_users }}"
changed_when: false
@@ -419,6 +434,18 @@
register: __stat
failed_when: __stat.stat.exists
+ - name: Remove pods and units again - test idempotence
+ include_role:
+ name: linux-system-roles.podman
+ vars:
+ # noqa jinja[spacing]
+ podman_kube_specs: "{{ __podman_kube_specs |
+ union([__podman_use_kube_file]) |
+ map('combine', {'state':'absent'}) | list }}"
+ podman_create_host_directories: false
+ podman_firewall: []
+ podman_selinux_ports: []
+
rescue:
- name: Dump journal
command: journalctl -ex
@@ -438,9 +465,8 @@
- name: Clean up host directories
file:
- path: /tmp/{{ item }}
+ path: "{{ __test_tmpdir.path }}"
state: absent
- loop: [httpd1, httpd2, httpd3]
tags:
- tests::cleanup
--
2.46.0

View File

@ -1,35 +0,0 @@
From 0a8ce32cdc093c388718d4fe28007259ac86854d Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Thu, 18 Apr 2024 08:39:33 -0600
Subject: [PATCH 107/115] chore: use none in jinja code, not null
Must use `none` in Jinja code, not `null`, which is used in YAML.
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit fdf98595e9ecdacfed80d40c2539b18c7d715368)
---
tasks/handle_user_group.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tasks/handle_user_group.yml b/tasks/handle_user_group.yml
index ea9984d..0b98d99 100644
--- a/tasks/handle_user_group.yml
+++ b/tasks/handle_user_group.yml
@@ -104,12 +104,12 @@
(__podman_register_subuids.content | b64decode).split('\n') | list |
select('match', '^' ~ __podman_user ~ ':') | list }}"
__subuid_data: "{{ __subuid_match_line[0].split(':') | list
- if __subuid_match_line else null }}"
+ if __subuid_match_line else none }}"
__subgid_match_line: "{{
(__podman_register_subgids.content | b64decode).split('\n') | list |
select('match', '^' ~ __podman_group_name ~ ':') | list }}"
__subgid_data: "{{ __subgid_match_line[0].split(':') | list
- if __subgid_match_line else null }}"
+ if __subgid_match_line else none }}"
- name: Fail if user not in subuid file
fail:
--
2.46.0

View File

@ -1,44 +0,0 @@
From 4824891e596c197e49557d9d2679cabc76e598e9 Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Fri, 19 Apr 2024 07:33:41 -0600
Subject: [PATCH 108/115] uid 1001 conflicts on some test systems
(cherry picked from commit 5b7ad16d23b78f6f0f68638c0d69015ebb26b3b0)
---
tests/tests_basic.yml | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/tests/tests_basic.yml b/tests/tests_basic.yml
index b8ddc50..c91cc5f 100644
--- a/tests/tests_basic.yml
+++ b/tests/tests_basic.yml
@@ -8,14 +8,14 @@
podman_host_directories:
"{{ __test_tmpdir.path ~ '/httpd1-create' }}":
mode: "0777"
- owner: "{{ 1001 +
+ owner: "{{ 3001 +
podman_subuid_info[__podman_test_username]['start'] - 1 }}"
- group: "{{ 1001 +
+ group: "{{ 3001 +
podman_subgid_info[__podman_test_username]['start'] - 1 }}"
podman_run_as_user: root
__podman_test_username: podman_basic_user
test_names_users:
- - [httpd1, "{{ __podman_test_username }}", 1001]
+ - [httpd1, "{{ __podman_test_username }}", 3001]
- [httpd2, root, 0]
- [httpd3, root, 0]
podman_create_host_directories: true
@@ -172,7 +172,7 @@
- name: Create user
user:
name: "{{ __podman_test_username }}"
- uid: 1001
+ uid: 3001
- name: Create tempfile for kube_src
tempfile:
--
2.46.0

View File

@ -1,26 +0,0 @@
From 2343663a17a42e71aa5b78ad5deca72823a0afb0 Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Mon, 3 Jun 2024 13:15:07 -0600
Subject: [PATCH 109/115] fix ansible-lint octal value issues
(cherry picked from commit c684c68151f106b4a494bed865e138a0b54ecb43)
---
tests/tests_quadlet_demo.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/tests_quadlet_demo.yml b/tests/tests_quadlet_demo.yml
index a719f9c..259a694 100644
--- a/tests/tests_quadlet_demo.yml
+++ b/tests/tests_quadlet_demo.yml
@@ -98,7 +98,7 @@
get_url:
url: https://localhost:8000
dest: /run/out
- mode: 0600
+ mode: "0600"
validate_certs: false
register: __web_status
until: __web_status is success
--
2.46.0

View File

@ -1,308 +0,0 @@
From 6a5722ce2a591c57e50ac4ff702c810bf452431d Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Thu, 6 Jun 2024 15:20:22 -0600
Subject: [PATCH 110/115] fix: grab name of network to remove from quadlet file
Cause: The code was using "systemd-" + name of quadlet for
the network name when removing networks.
Consequence: If the quadlet had a different NetworkName, the
removal would fail.
Fix: Grab the network quadlet file and grab the NetworkName from
the file to use to remove the network.
Result: The removal of quadlet networks will work both with and
without a custom NetworkName in the quadlet file.
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
This also adds a fix for el10 and Fedora which installs the iptables-nft
package to allow rootless podman to manage networks using nftables.
(cherry picked from commit bcd5a750250736a07605c72f98e50c1babcddf16)
---
.ostree/packages-runtime-CentOS-10.txt | 3 ++
.ostree/packages-runtime-Fedora.txt | 3 ++
.ostree/packages-runtime-RedHat-10.txt | 3 ++
tasks/cleanup_quadlet_spec.yml | 43 +++++++++++++++++++++++++-
tests/files/quadlet-basic.network | 5 +++
tests/tests_quadlet_basic.yml | 31 +++++++------------
tests/tests_quadlet_demo.yml | 19 +++---------
vars/CentOS_10.yml | 7 +++++
vars/Fedora.yml | 7 +++++
vars/RedHat_10.yml | 7 +++++
10 files changed, 94 insertions(+), 34 deletions(-)
create mode 100644 .ostree/packages-runtime-CentOS-10.txt
create mode 100644 .ostree/packages-runtime-Fedora.txt
create mode 100644 .ostree/packages-runtime-RedHat-10.txt
create mode 100644 tests/files/quadlet-basic.network
create mode 100644 vars/CentOS_10.yml
create mode 100644 vars/Fedora.yml
create mode 100644 vars/RedHat_10.yml
diff --git a/.ostree/packages-runtime-CentOS-10.txt b/.ostree/packages-runtime-CentOS-10.txt
new file mode 100644
index 0000000..16b8eae
--- /dev/null
+++ b/.ostree/packages-runtime-CentOS-10.txt
@@ -0,0 +1,3 @@
+iptables-nft
+podman
+shadow-utils-subid
diff --git a/.ostree/packages-runtime-Fedora.txt b/.ostree/packages-runtime-Fedora.txt
new file mode 100644
index 0000000..16b8eae
--- /dev/null
+++ b/.ostree/packages-runtime-Fedora.txt
@@ -0,0 +1,3 @@
+iptables-nft
+podman
+shadow-utils-subid
diff --git a/.ostree/packages-runtime-RedHat-10.txt b/.ostree/packages-runtime-RedHat-10.txt
new file mode 100644
index 0000000..16b8eae
--- /dev/null
+++ b/.ostree/packages-runtime-RedHat-10.txt
@@ -0,0 +1,3 @@
+iptables-nft
+podman
+shadow-utils-subid
diff --git a/tasks/cleanup_quadlet_spec.yml b/tasks/cleanup_quadlet_spec.yml
index ba68771..8ea069b 100644
--- a/tasks/cleanup_quadlet_spec.yml
+++ b/tasks/cleanup_quadlet_spec.yml
@@ -30,6 +30,43 @@
vars:
__service_error: Could not find the requested service
+- name: See if quadlet file exists
+ stat:
+ path: "{{ __podman_quadlet_file }}"
+ register: __podman_network_stat
+ when: __podman_quadlet_type == "network"
+
+- name: Get network quadlet network name
+ when:
+ - __podman_quadlet_type == "network"
+ - __podman_network_stat.stat.exists
+ block:
+ - name: Create tempdir
+ tempfile:
+ prefix: podman_
+ suffix: _lsr.ini
+ state: directory
+ register: __podman_network_tmpdir
+ delegate_to: localhost
+
+ - name: Fetch the network quadlet
+ fetch:
+ dest: "{{ __podman_network_tmpdir.path }}/network.ini"
+ src: "{{ __podman_quadlet_file }}"
+ flat: true
+
+ - name: Get the network name
+ set_fact:
+ __podman_network_name: "{{
+ lookup('ini', 'NetworkName section=Network file=' ~
+ __podman_network_tmpdir.path ~ '/network.ini') }}"
+ always:
+ - name: Remove tempdir
+ file:
+ path: "{{ __podman_network_tmpdir.path }}"
+ state: absent
+ delegate_to: localhost
+
- name: Remove quadlet file
file:
path: "{{ __podman_quadlet_file }}"
@@ -62,10 +99,14 @@
changed_when: true
- name: Remove network
- command: podman network rm systemd-{{ __podman_quadlet_name }}
+ command: podman network rm {{ __name | quote }}
changed_when: true
when: __podman_quadlet_type == "network"
environment:
XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}"
become: "{{ __podman_rootless | ternary(true, omit) }}"
become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
+ vars:
+ __name: "{{ __podman_network_name if
+ __podman_network_name | d('') | length > 0
+ else 'systemd-' ~ __podman_quadlet_name }}"
diff --git a/tests/files/quadlet-basic.network b/tests/files/quadlet-basic.network
new file mode 100644
index 0000000..7db6e0d
--- /dev/null
+++ b/tests/files/quadlet-basic.network
@@ -0,0 +1,5 @@
+[Network]
+Subnet=192.168.29.0/24
+Gateway=192.168.29.1
+Label=app=wordpress
+NetworkName=quadlet-basic
diff --git a/tests/tests_quadlet_basic.yml b/tests/tests_quadlet_basic.yml
index 1b472be..2891b1a 100644
--- a/tests/tests_quadlet_basic.yml
+++ b/tests/tests_quadlet_basic.yml
@@ -19,12 +19,8 @@
state: present
data: "{{ __json_secret_data | string }}"
__podman_quadlet_specs:
- - name: quadlet-basic
- type: network
- Network:
- Subnet: 192.168.29.0/24
- Gateway: 192.168.29.1
- Label: app=wordpress
+ - file_src: files/quadlet-basic.network
+ state: started
- name: quadlet-basic-mysql
type: volume
Volume: {}
@@ -197,7 +193,8 @@
failed_when: not __stat.stat.exists
# must clean up networks last - cannot remove a network
- # in use by a container
+ # in use by a container - using reverse assumes the network
+ # is defined first in the list
- name: Cleanup user
include_role:
name: linux-system-roles.podman
@@ -206,10 +203,7 @@
__absent: {"state":"absent"}
podman_secrets: "{{ __podman_secrets | map('combine', __absent) |
list }}"
- podman_quadlet_specs: "{{ ((__podman_quadlet_specs |
- rejectattr('type', 'match', '^network$') | list) +
- (__podman_quadlet_specs |
- selectattr('type', 'match', '^network$') | list)) |
+ podman_quadlet_specs: "{{ __podman_quadlet_specs | reverse |
map('combine', __absent) | list }}"
- name: Ensure no linger
@@ -242,6 +236,11 @@
changed_when: false
rescue:
+ - name: Check AVCs
+ command: grep type=AVC /var/log/audit/audit.log
+ changed_when: false
+ failed_when: false
+
- name: Dump journal
command: journalctl -ex
changed_when: false
@@ -258,10 +257,7 @@
__absent: {"state":"absent"}
podman_secrets: "{{ __podman_secrets |
map('combine', __absent) | list }}"
- podman_quadlet_specs: "{{ ((__podman_quadlet_specs |
- rejectattr('type', 'match', '^network$') | list) +
- (__podman_quadlet_specs |
- selectattr('type', 'match', '^network$') | list)) |
+ podman_quadlet_specs: "{{ __podman_quadlet_specs | reverse |
map('combine', __absent) | list }}"
- name: Remove test user
@@ -277,10 +273,7 @@
__absent: {"state":"absent"}
podman_secrets: "{{ __podman_secrets |
map('combine', __absent) | list }}"
- podman_quadlet_specs: "{{ ((__podman_quadlet_specs |
- rejectattr('type', 'match', '^network$') | list) +
- (__podman_quadlet_specs |
- selectattr('type', 'match', '^network$') | list)) |
+ podman_quadlet_specs: "{{ __podman_quadlet_specs | reverse |
map('combine', __absent) | list }}"
rescue:
diff --git a/tests/tests_quadlet_demo.yml b/tests/tests_quadlet_demo.yml
index 259a694..b6c27ef 100644
--- a/tests/tests_quadlet_demo.yml
+++ b/tests/tests_quadlet_demo.yml
@@ -11,7 +11,7 @@
podman_use_copr: false # disable copr for CI testing
podman_fail_if_too_old: false
podman_create_host_directories: true
- podman_quadlet_specs:
+ __podman_quadlet_specs:
- file_src: quadlet-demo.network
- file_src: quadlet-demo-mysql.volume
- template_src: quadlet-demo-mysql.container.j2
@@ -45,6 +45,7 @@
include_role:
name: linux-system-roles.podman
vars:
+ podman_quadlet_specs: "{{ __podman_quadlet_specs }}"
podman_pull_retry: true
podman_secrets:
- name: mysql-root-password-container
@@ -149,19 +150,9 @@
include_role:
name: linux-system-roles.podman
vars:
- podman_quadlet_specs:
- - template_src: quadlet-demo-mysql.container.j2
- state: absent
- - file_src: quadlet-demo-mysql.volume
- state: absent
- - file_src: envoy-proxy-configmap.yml
- state: absent
- - file_src: quadlet-demo.kube
- state: absent
- - template_src: quadlet-demo.yml.j2
- state: absent
- - file_src: quadlet-demo.network
- state: absent
+ __absent: {"state":"absent"}
+ podman_quadlet_specs: "{{ __podman_quadlet_specs |
+ reverse | map('combine', __absent) | list }}"
podman_secrets:
- name: mysql-root-password-container
state: absent
diff --git a/vars/CentOS_10.yml b/vars/CentOS_10.yml
new file mode 100644
index 0000000..83589d5
--- /dev/null
+++ b/vars/CentOS_10.yml
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+---
+# shadow-utils-subid for getsubids
+__podman_packages:
+ - iptables-nft
+ - podman
+ - shadow-utils-subid
diff --git a/vars/Fedora.yml b/vars/Fedora.yml
new file mode 100644
index 0000000..83589d5
--- /dev/null
+++ b/vars/Fedora.yml
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+---
+# shadow-utils-subid for getsubids
+__podman_packages:
+ - iptables-nft
+ - podman
+ - shadow-utils-subid
diff --git a/vars/RedHat_10.yml b/vars/RedHat_10.yml
new file mode 100644
index 0000000..83589d5
--- /dev/null
+++ b/vars/RedHat_10.yml
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+---
+# shadow-utils-subid for getsubids
+__podman_packages:
+ - iptables-nft
+ - podman
+ - shadow-utils-subid
--
2.46.0

View File

@ -1,615 +0,0 @@
From e11e1ff198f0840fcef6cbe75c74ca69dd22f694 Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Mon, 8 Jul 2024 16:35:29 -0600
Subject: [PATCH 111/115] fix: proper cleanup for networks; ensure cleanup of
resources
Cause: The code was not managing network systemd quadlet units.
Consequence: Network systemd quadlet units were not being stopped and
disabled. Subsequent runs would fail due to the network units not
being cleaned up properly.
Fix: The role manages network systemd quadlet units, including stopping
and removing.
Result: Systemd quadlet network units are properly cleaned up.
In addition - improve the removal of all types of quadlet resources,
and include code which can be used to test and debug quadlet resource
removal.
(cherry picked from commit a85908ec7f6f8e19908f8d4d18d6d7b64ab1d31e)
---
README.md | 6 +
defaults/main.yml | 4 +
tasks/cancel_linger.yml | 2 +-
tasks/cleanup_quadlet_spec.yml | 188 +++++++++++++-----
tasks/handle_quadlet_spec.yml | 2 +
tasks/manage_linger.yml | 2 +-
tasks/parse_quadlet_file.yml | 57 ++++++
tests/files/quadlet-basic.network | 2 +-
.../templates/quadlet-demo-mysql.container.j2 | 2 +-
tests/tests_quadlet_basic.yml | 69 ++++++-
tests/tests_quadlet_demo.yml | 33 +++
11 files changed, 309 insertions(+), 58 deletions(-)
create mode 100644 tasks/parse_quadlet_file.yml
diff --git a/README.md b/README.md
index e5a7c12..8b6496e 100644
--- a/README.md
+++ b/README.md
@@ -388,6 +388,12 @@ a newer version. For example, if you attempt to manage quadlet or secrets with
podman 4.3 or earlier, the role will fail with an error. If you want the role to
be skipped instead, use `podman_fail_if_too_old: false`.
+### podman_prune_images
+
+Boolean - default is `false` - by default, the role will not prune unused images
+when removing quadlets and other resources. Set this to `true` to tell the role
+to remove unused images when cleaning up.
+
## Variables Exported by the Role
### podman_version
diff --git a/defaults/main.yml b/defaults/main.yml
index 92e4eb8..02453c9 100644
--- a/defaults/main.yml
+++ b/defaults/main.yml
@@ -109,3 +109,7 @@ podman_continue_if_pull_fails: false
# If true, if a pull attempt fails, it will be retried according
# to the default Ansible `until` behavior.
podman_pull_retry: false
+
+# Prune images when removing quadlets/kube specs -
+# this will remove all unused/unreferenced images
+podman_prune_images: false
diff --git a/tasks/cancel_linger.yml b/tasks/cancel_linger.yml
index ede71fe..f233fc4 100644
--- a/tasks/cancel_linger.yml
+++ b/tasks/cancel_linger.yml
@@ -49,7 +49,7 @@
when: __podman_xdg_stat.stat.exists
- name: Cancel linger if no more resources are in use
- command: loginctl disable-linger {{ __podman_linger_user }}
+ command: loginctl disable-linger {{ __podman_linger_user | quote }}
when:
- __podman_xdg_stat.stat.exists
- __podman_container_info.containers | length == 0
diff --git a/tasks/cleanup_quadlet_spec.yml b/tasks/cleanup_quadlet_spec.yml
index 8ea069b..df69243 100644
--- a/tasks/cleanup_quadlet_spec.yml
+++ b/tasks/cleanup_quadlet_spec.yml
@@ -33,39 +33,11 @@
- name: See if quadlet file exists
stat:
path: "{{ __podman_quadlet_file }}"
- register: __podman_network_stat
- when: __podman_quadlet_type == "network"
+ register: __podman_quadlet_stat
-- name: Get network quadlet network name
- when:
- - __podman_quadlet_type == "network"
- - __podman_network_stat.stat.exists
- block:
- - name: Create tempdir
- tempfile:
- prefix: podman_
- suffix: _lsr.ini
- state: directory
- register: __podman_network_tmpdir
- delegate_to: localhost
-
- - name: Fetch the network quadlet
- fetch:
- dest: "{{ __podman_network_tmpdir.path }}/network.ini"
- src: "{{ __podman_quadlet_file }}"
- flat: true
-
- - name: Get the network name
- set_fact:
- __podman_network_name: "{{
- lookup('ini', 'NetworkName section=Network file=' ~
- __podman_network_tmpdir.path ~ '/network.ini') }}"
- always:
- - name: Remove tempdir
- file:
- path: "{{ __podman_network_tmpdir.path }}"
- state: absent
- delegate_to: localhost
+- name: Parse quadlet file
+ include_tasks: parse_quadlet_file.yml
+ when: __podman_quadlet_stat.stat.exists
- name: Remove quadlet file
file:
@@ -73,40 +45,158 @@
state: absent
register: __podman_file_removed
+- name: Refresh systemd # noqa no-handler
+ systemd:
+ daemon_reload: true
+ scope: "{{ __podman_systemd_scope }}"
+ become: "{{ __podman_rootless | ternary(true, omit) }}"
+ become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
+ environment:
+ XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}"
+ when: __podman_file_removed is changed # noqa no-handler
+
+- name: Remove managed resource
+ command: >-
+ podman {{ 'rm' if __podman_quadlet_type == 'container'
+ else 'network rm' if __podman_quadlet_type == 'network'
+ else 'volume rm' if __podman_quadlet_type == 'volume' }}
+ {{ __podman_quadlet_resource_name | quote }}
+ register: __podman_rm
+ failed_when:
+ - __podman_rm is failed
+ - not __podman_rm.stderr is search(__str)
+ changed_when: __podman_rm.rc == 0
+ become: "{{ __podman_rootless | ternary(true, omit) }}"
+ become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
+ environment:
+ XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}"
+ vars:
+ __str: " found: no such "
+ __type_to_name: # map quadlet type to quadlet property name
+ container:
+ section: Container
+ name: ContainerName
+ network:
+ section: Network
+ name: NetworkName
+ volume:
+ section: Volume
+ name: VolumeName
+ __section: "{{ __type_to_name[__podman_quadlet_type]['section'] }}"
+ __name: "{{ __type_to_name[__podman_quadlet_type]['name'] }}"
+ __podman_quadlet_resource_name: "{{
+ __podman_quadlet_parsed[__section][__name]
+ if __section in __podman_quadlet_parsed
+ and __name in __podman_quadlet_parsed[__section]
+ else 'systemd-' ~ __podman_quadlet_name }}"
+ when:
+ - __podman_file_removed is changed # noqa no-handler
+ - __podman_quadlet_type in __type_to_name
+ - not __podman_rootless or __podman_xdg_stat.stat.exists
+ - __podman_service_name | length > 0
+ no_log: true
+
+- name: Remove volumes
+ command: podman volume rm {{ item | quote }}
+ loop: "{{ __volume_names }}"
+ when:
+ - __podman_file_removed is changed # noqa no-handler
+ - not __podman_rootless or __podman_xdg_stat.stat.exists
+ - __podman_service_name | length == 0
+ - __podman_quadlet_file.endswith(".yml") or
+ __podman_quadlet_file.endswith(".yaml")
+ changed_when: true
+ vars:
+ __volumes: "{{ __podman_quadlet_parsed |
+ selectattr('apiVersion', 'defined') | selectattr('spec', 'defined') |
+ map(attribute='spec') | selectattr('volumes', 'defined') |
+ map(attribute='volumes') | flatten }}"
+ __config_maps: "{{ __volumes | selectattr('configMap', 'defined') |
+ map(attribute='configMap') | selectattr('name', 'defined') |
+ map(attribute='name') | list }}"
+ __secrets: "{{ __volumes | selectattr('secret', 'defined') |
+ map(attribute='secret') | selectattr('secretName', 'defined') |
+ map(attribute='secretName') | list }}"
+ __pvcs: "{{ __volumes | selectattr('persistentVolumeClaim', 'defined') |
+ map(attribute='persistentVolumeClaim') | selectattr('claimName', 'defined') |
+ map(attribute='claimName') | list }}"
+ __volume_names: "{{ __config_maps + __secrets + __pvcs }}"
+ no_log: true
+
+- name: Clear parsed podman variable
+ set_fact:
+ __podman_quadlet_parsed: null
+
+- name: Prune images no longer in use
+ command: podman image prune --all -f
+ when:
+ - podman_prune_images | bool
+ - not __podman_rootless or __podman_xdg_stat.stat.exists
+ changed_when: true
+ become: "{{ __podman_rootless | ternary(true, omit) }}"
+ become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
+ environment:
+ XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}"
+
- name: Manage linger
include_tasks: manage_linger.yml
vars:
__podman_item_state: absent
-- name: Cleanup container resources
- when: __podman_file_removed is changed # noqa no-handler
+- name: Collect information for testing/debugging
+ when:
+ - __podman_test_debug | d(false)
+ - not __podman_rootless or __podman_xdg_stat.stat.exists
block:
- - name: Reload systemctl # noqa no-handler
- systemd:
- daemon_reload: true
- scope: "{{ __podman_systemd_scope }}"
+ - name: For testing and debugging - images
+ command: podman images -n
+ register: __podman_test_debug_images
+ changed_when: false
become: "{{ __podman_rootless | ternary(true, omit) }}"
become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
environment:
XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}"
- - name: Prune images no longer in use
- command: podman image prune -f
+ - name: For testing and debugging - volumes
+ command: podman volume ls -n
+ register: __podman_test_debug_volumes
+ changed_when: false
+ become: "{{ __podman_rootless | ternary(true, omit) }}"
+ become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
environment:
XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}"
+
+ - name: For testing and debugging - containers
+ command: podman ps --noheading
+ register: __podman_test_debug_containers
+ changed_when: false
become: "{{ __podman_rootless | ternary(true, omit) }}"
become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
- changed_when: true
+ environment:
+ XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}"
+
+ - name: For testing and debugging - networks
+ command: podman network ls -n -q
+ register: __podman_test_debug_networks
+ changed_when: false
+ become: "{{ __podman_rootless | ternary(true, omit) }}"
+ become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
+ environment:
+ XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}"
- - name: Remove network
- command: podman network rm {{ __name | quote }}
- changed_when: true
- when: __podman_quadlet_type == "network"
+ - name: For testing and debugging - secrets
+ command: podman secret ls -n -q
+ register: __podman_test_debug_secrets
+ changed_when: false
+ no_log: true
+ become: "{{ __podman_rootless | ternary(true, omit) }}"
+ become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
environment:
XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}"
+
+ - name: For testing and debugging - services
+ service_facts:
become: "{{ __podman_rootless | ternary(true, omit) }}"
become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
- vars:
- __name: "{{ __podman_network_name if
- __podman_network_name | d('') | length > 0
- else 'systemd-' ~ __podman_quadlet_name }}"
+ environment:
+ XDG_RUNTIME_DIR: "{{ __podman_xdg_runtime_dir }}"
diff --git a/tasks/handle_quadlet_spec.yml b/tasks/handle_quadlet_spec.yml
index ce6ef67..851c8a3 100644
--- a/tasks/handle_quadlet_spec.yml
+++ b/tasks/handle_quadlet_spec.yml
@@ -129,6 +129,8 @@
if __podman_quadlet_type in ['container', 'kube']
else __podman_quadlet_name ~ '-volume.service'
if __podman_quadlet_type in ['volume']
+ else __podman_quadlet_name ~ '-network.service'
+ if __podman_quadlet_type in ['network']
else none }}"
- name: Set per-container variables part 4
diff --git a/tasks/manage_linger.yml b/tasks/manage_linger.yml
index b506b70..be69490 100644
--- a/tasks/manage_linger.yml
+++ b/tasks/manage_linger.yml
@@ -10,7 +10,7 @@
- __podman_item_state | d('present') != 'absent'
block:
- name: Enable linger if needed
- command: loginctl enable-linger {{ __podman_user }}
+ command: loginctl enable-linger {{ __podman_user | quote }}
when: __podman_rootless | bool
args:
creates: /var/lib/systemd/linger/{{ __podman_user }}
diff --git a/tasks/parse_quadlet_file.yml b/tasks/parse_quadlet_file.yml
new file mode 100644
index 0000000..5f5297f
--- /dev/null
+++ b/tasks/parse_quadlet_file.yml
@@ -0,0 +1,57 @@
+---
+# Input:
+# * __podman_quadlet_file - path to quadlet file to parse
+# Output:
+# * __podman_quadlet_parsed - dict
+- name: Slurp quadlet file
+ slurp:
+ path: "{{ __podman_quadlet_file }}"
+ register: __podman_quadlet_raw
+ no_log: true
+
+- name: Parse quadlet file
+ set_fact:
+ __podman_quadlet_parsed: |-
+ {% set rv = {} %}
+ {% set section = ["DEFAULT"] %}
+ {% for line in __val %}
+ {% if line.startswith("[") %}
+ {% set val = line.replace("[", "").replace("]", "") %}
+ {% set _ = section.__setitem__(0, val) %}
+ {% else %}
+ {% set ary = line.split("=", 1) %}
+ {% set key = ary[0] %}
+ {% set val = ary[1] %}
+ {% if key in rv.get(section[0], {}) %}
+ {% set curval = rv[section[0]][key] %}
+ {% if curval is string %}
+ {% set newary = [curval, val] %}
+ {% set _ = rv[section[0]].__setitem__(key, newary) %}
+ {% else %}
+ {% set _ = rv[section[0]][key].append(val) %}
+ {% endif %}
+ {% else %}
+ {% set _ = rv.setdefault(section[0], {}).__setitem__(key, val) %}
+ {% endif %}
+ {% endif %}
+ {% endfor %}
+ {{ rv }}
+ vars:
+ __val: "{{ (__podman_quadlet_raw.content | b64decode).split('\n') |
+ select | reject('match', '#') | list }}"
+ when: __podman_service_name | length > 0
+ no_log: true
+
+- name: Parse quadlet yaml file
+ set_fact:
+ __podman_quadlet_parsed: "{{ __podman_quadlet_raw.content | b64decode |
+ from_yaml_all }}"
+ when:
+ - __podman_service_name | length == 0
+ - __podman_quadlet_file.endswith(".yml") or
+ __podman_quadlet_file.endswith(".yaml")
+ no_log: true
+
+- name: Reset raw variable
+ set_fact:
+ __podman_quadlet_raw: null
diff --git a/tests/files/quadlet-basic.network b/tests/files/quadlet-basic.network
index 7db6e0d..5b002ba 100644
--- a/tests/files/quadlet-basic.network
+++ b/tests/files/quadlet-basic.network
@@ -2,4 +2,4 @@
Subnet=192.168.29.0/24
Gateway=192.168.29.1
Label=app=wordpress
-NetworkName=quadlet-basic
+NetworkName=quadlet-basic-name
diff --git a/tests/templates/quadlet-demo-mysql.container.j2 b/tests/templates/quadlet-demo-mysql.container.j2
index c84f0e8..92097d4 100644
--- a/tests/templates/quadlet-demo-mysql.container.j2
+++ b/tests/templates/quadlet-demo-mysql.container.j2
@@ -9,7 +9,7 @@ Volume=/tmp/quadlet_demo:/var/lib/quadlet_demo:Z
Network=quadlet-demo.network
{% if podman_version is version("4.5", ">=") %}
Secret=mysql-root-password-container,type=env,target=MYSQL_ROOT_PASSWORD
-HealthCmd=/usr/bin/true
+HealthCmd=/bin/true
HealthOnFailure=kill
{% else %}
PodmanArgs=--secret=mysql-root-password-container,type=env,target=MYSQL_ROOT_PASSWORD
diff --git a/tests/tests_quadlet_basic.yml b/tests/tests_quadlet_basic.yml
index 2891b1a..0fdced4 100644
--- a/tests/tests_quadlet_basic.yml
+++ b/tests/tests_quadlet_basic.yml
@@ -21,7 +21,14 @@
__podman_quadlet_specs:
- file_src: files/quadlet-basic.network
state: started
+ - name: quadlet-basic-unused-network
+ type: network
+ Network: {}
- name: quadlet-basic-mysql
+ type: volume
+ Volume:
+ VolumeName: quadlet-basic-mysql-name
+ - name: quadlet-basic-unused-volume
type: volume
Volume: {}
- name: quadlet-basic-mysql
@@ -30,7 +37,7 @@
WantedBy: default.target
Container:
Image: "{{ mysql_image }}"
- ContainerName: quadlet-basic-mysql
+ ContainerName: quadlet-basic-mysql-name
Volume: quadlet-basic-mysql.volume:/var/lib/mysql
Network: quadlet-basic.network
# Once 4.5 is released change this line to use the quadlet Secret key
@@ -192,13 +199,14 @@
register: __stat
failed_when: not __stat.stat.exists
- # must clean up networks last - cannot remove a network
- # in use by a container - using reverse assumes the network
- # is defined first in the list
+ # must clean up in the reverse order of creating - and
+ # ensure networks are removed last
- name: Cleanup user
include_role:
name: linux-system-roles.podman
vars:
+ podman_prune_images: true
+ __podman_test_debug: true
podman_run_as_user: user_quadlet_basic
__absent: {"state":"absent"}
podman_secrets: "{{ __podman_secrets | map('combine', __absent) |
@@ -206,6 +214,22 @@
podman_quadlet_specs: "{{ __podman_quadlet_specs | reverse |
map('combine', __absent) | list }}"
+ - name: Ensure no resources
+ assert:
+ that:
+ - __podman_test_debug_images.stdout == ""
+ - __podman_test_debug_networks.stdout_lines |
+ reject("match", "^podman$") |
+ reject("match", "^podman-default-kube-network$") |
+ list | length == 0
+ - __podman_test_debug_volumes.stdout == ""
+ - __podman_test_debug_containers.stdout == ""
+ - __podman_test_debug_secrets.stdout == ""
+ - ansible_facts["services"] | dict2items |
+ rejectattr("value.status", "match", "not-found") |
+ selectattr("key", "match", "quadlet-demo") |
+ list | length == 0
+
- name: Ensure no linger
stat:
path: /var/lib/systemd/linger/user_quadlet_basic
@@ -230,12 +254,28 @@
- quadlet-basic-mysql.volume
- name: Check JSON
- command: podman exec quadlet-basic-mysql cat /tmp/test.json
+ command: podman exec quadlet-basic-mysql-name cat /tmp/test.json
register: __result
failed_when: __result.stdout != __json_secret_data
changed_when: false
rescue:
+ - name: Debug3
+ shell: |
+ set -x
+ set -o pipefail
+ exec 1>&2
+ #podman volume rm --all
+ #podman network prune -f
+ podman volume ls
+ podman network ls
+ podman secret ls
+ podman container ls
+ podman pod ls
+ podman images
+ systemctl list-units | grep quadlet
+ changed_when: false
+
- name: Check AVCs
command: grep type=AVC /var/log/audit/audit.log
changed_when: false
@@ -253,6 +293,7 @@
include_role:
name: linux-system-roles.podman
vars:
+ podman_prune_images: true
podman_run_as_user: user_quadlet_basic
__absent: {"state":"absent"}
podman_secrets: "{{ __podman_secrets |
@@ -270,12 +311,30 @@
include_role:
name: linux-system-roles.podman
vars:
+ podman_prune_images: true
+ __podman_test_debug: true
__absent: {"state":"absent"}
podman_secrets: "{{ __podman_secrets |
map('combine', __absent) | list }}"
podman_quadlet_specs: "{{ __podman_quadlet_specs | reverse |
map('combine', __absent) | list }}"
+ - name: Ensure no resources
+ assert:
+ that:
+ - __podman_test_debug_images.stdout == ""
+ - __podman_test_debug_networks.stdout_lines |
+ reject("match", "^podman$") |
+ reject("match", "^podman-default-kube-network$") |
+ list | length == 0
+ - __podman_test_debug_volumes.stdout == ""
+ - __podman_test_debug_containers.stdout == ""
+ - __podman_test_debug_secrets.stdout == ""
+ - ansible_facts["services"] | dict2items |
+ rejectattr("value.status", "match", "not-found") |
+ selectattr("key", "match", "quadlet-demo") |
+ list | length == 0
+
rescue:
- name: Dump journal
command: journalctl -ex
diff --git a/tests/tests_quadlet_demo.yml b/tests/tests_quadlet_demo.yml
index b6c27ef..1cc7e62 100644
--- a/tests/tests_quadlet_demo.yml
+++ b/tests/tests_quadlet_demo.yml
@@ -84,6 +84,11 @@
changed_when: false
failed_when: false
+ - name: Check volumes
+ command: podman volume ls
+ changed_when: false
+ failed_when: false
+
- name: Check pods
command: podman pod ps --ctr-ids --ctr-names --ctr-status
changed_when: false
@@ -150,6 +155,8 @@
include_role:
name: linux-system-roles.podman
vars:
+ podman_prune_images: true
+ __podman_test_debug: true
__absent: {"state":"absent"}
podman_quadlet_specs: "{{ __podman_quadlet_specs |
reverse | map('combine', __absent) | list }}"
@@ -161,7 +168,33 @@
- name: envoy-certificates
state: absent
+ - name: Ensure no resources
+ assert:
+ that:
+ - __podman_test_debug_images.stdout == ""
+ - __podman_test_debug_networks.stdout_lines |
+ reject("match", "^podman$") |
+ reject("match", "^podman-default-kube-network$") |
+ list | length == 0
+ - __podman_test_debug_volumes.stdout == ""
+ - __podman_test_debug_containers.stdout == ""
+ - __podman_test_debug_secrets.stdout == ""
+ - ansible_facts["services"] | dict2items |
+ rejectattr("value.status", "match", "not-found") |
+ selectattr("key", "match", "quadlet-demo") |
+ list | length == 0
+
rescue:
+ - name: Debug
+ shell: |
+ exec 1>&2
+ set -x
+ set -o pipefail
+ systemctl list-units --plain -l --all | grep quadlet || :
+ systemctl list-unit-files --all | grep quadlet || :
+ systemctl list-units --plain --failed -l --all | grep quadlet || :
+ changed_when: false
+
- name: Get journald
command: journalctl -ex
changed_when: false
--
2.46.0

View File

@ -1,72 +0,0 @@
From 7473a31e3a0201131e42281bce9bbf9c88ac04ca Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Wed, 31 Jul 2024 18:52:57 -0600
Subject: [PATCH 112/115] fix: Ensure user linger is closed on EL10
Cause: There is an issue with loginctl on EL10 - doing cancel-linger
will leave the user session in the closing state.
Consequence: User sessions accumulate, and the test user cannot
be removed.
Fix: As suggested in the systemd issue, the fix is to shutdown and
restart systemd-logind in this situation.
Result: User cancel-linger works as expected.
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit 0ceea96a12bf0b462ca62d012d86cdcbd4f20eaa)
---
tasks/cancel_linger.yml | 37 +++++++++++++++++++++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/tasks/cancel_linger.yml b/tasks/cancel_linger.yml
index f233fc4..00d38c2 100644
--- a/tasks/cancel_linger.yml
+++ b/tasks/cancel_linger.yml
@@ -58,5 +58,42 @@
list | length == 0
- __podman_linger_secrets.stdout == ""
changed_when: true
+ register: __cancel_linger
args:
removes: /var/lib/systemd/linger/{{ __podman_linger_user }}
+
+- name: Wait for user session to exit closing state # noqa no-handler
+ command: loginctl show-user -P State {{ __podman_linger_user | quote }}
+ register: __user_state
+ changed_when: false
+ until: __user_state.stdout != "closing"
+ when: __cancel_linger is changed
+ ignore_errors: true
+
+# see https://github.com/systemd/systemd/issues/26744#issuecomment-2261509208
+- name: Handle user stuck in closing state
+ when:
+ - __cancel_linger is changed
+ - __user_state is failed
+ block:
+ - name: Stop logind
+ service:
+ name: systemd-logind
+ state: stopped
+
+ - name: Wait for user session to exit closing state
+ command: loginctl show-user -P State {{ __podman_linger_user | quote }}
+ changed_when: false
+ register: __user_state
+ until: __user_state.stderr is match(__pat) or
+ __user_state.stdout != "closing"
+ failed_when:
+ - not __user_state.stderr is match(__pat)
+ - __user_state.stdout == "closing"
+ vars:
+ __pat: "Failed to get user: User ID .* is not logged in or lingering"
+
+ - name: Restart logind
+ service:
+ name: systemd-logind
+ state: started
--
2.46.0

View File

@ -1,70 +0,0 @@
From acc8e5458170cd653681beee8cec162e1d3e4f1f Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Mon, 19 Aug 2024 10:11:05 -0600
Subject: [PATCH 113/115] test: skip quadlet tests on non-x86_64
The images we currently use for quadlet testing are only available
on x86_64
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit 4a2ab77cafd9ae330f9260a5180680036707bf92)
---
tests/tests_quadlet_basic.yml | 11 +++++++++++
tests/tests_quadlet_demo.yml | 12 ++++++++++++
2 files changed, 23 insertions(+)
diff --git a/tests/tests_quadlet_basic.yml b/tests/tests_quadlet_basic.yml
index 0fdced4..5a06864 100644
--- a/tests/tests_quadlet_basic.yml
+++ b/tests/tests_quadlet_basic.yml
@@ -48,6 +48,17 @@
- FOO=/bin/busybox-extras
- BAZ=test
tasks:
+ - name: Test is only supported on x86_64
+ debug:
+ msg: >
+ This test is only supported on x86_64 because the test images used are only
+ available on that platform.
+ when: ansible_facts["architecture"] != "x86_64"
+
+ - name: End test
+ meta: end_play
+ when: ansible_facts["architecture"] != "x86_64"
+
- name: Run test
block:
- name: See if not pulling images fails
diff --git a/tests/tests_quadlet_demo.yml b/tests/tests_quadlet_demo.yml
index 1cc7e62..f08d482 100644
--- a/tests/tests_quadlet_demo.yml
+++ b/tests/tests_quadlet_demo.yml
@@ -2,6 +2,7 @@
---
- name: Deploy the quadlet demo app
hosts: all
+ gather_facts: true
vars_files:
- vars/test_vars.yml
vars:
@@ -28,6 +29,17 @@
"/tmp/quadlet_demo":
mode: "0777"
tasks:
+ - name: Test is only supported on x86_64
+ debug:
+ msg: >
+ This test is only supported on x86_64 because the test images used are only
+ available on that platform.
+ when: ansible_facts["architecture"] != "x86_64"
+
+ - name: End test
+ meta: end_play
+ when: ansible_facts["architecture"] != "x86_64"
+
- name: Run tests
block:
- name: Generate certificates
--
2.46.0

View File

@ -1,208 +0,0 @@
From 5367219c4d12b988b531b00a90625cb6747baf13 Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Thu, 29 Aug 2024 08:47:03 -0600
Subject: [PATCH 114/115] fix: subgid maps user to gids, not group to gids
Cause: The podman role was looking up groups in the subgid values, not
users.
Consequence: If the user name was different from the group name, the role
would fail to lookup the subgid values.
Fix: Ensure that the user is used to lookup the subgid values.
Result: The subgid values are looked up correctly.
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit ad01b0091707fc4eae6f98f694f1a213fb9f8521)
---
README.md | 45 ++++++++++++++++++-------------------
tasks/handle_user_group.yml | 32 ++++++++------------------
2 files changed, 31 insertions(+), 46 deletions(-)
diff --git a/README.md b/README.md
index 8b6496e..6222098 100644
--- a/README.md
+++ b/README.md
@@ -35,12 +35,11 @@ restrictions:
* They must be already present on the system - the role will not create the
users or groups - the role will exit with an error if a non-existent user or
group is specified
-* They must already exist in `/etc/subuid` and `/etc/subgid`, or are otherwise
- provided by your identity management system - the role will exit with an error
- if a specified user is not present in `/etc/subuid`, or if a specified group
- is not in `/etc/subgid`. The role uses `getsubids` to check the user and
- group if available, or checks the files directly if `getsubids` is not
- available.
+* The user must already exist in `/etc/subuid` and `/etc/subgid`, or otherwise
+ be provided by your identity management system - the role will exit with an
+ error if a specified user is not present in `/etc/subuid` and `/etc/subgid`.
+ The role uses `getsubids` to check the user and group if available, or checks
+ the files directly if `getsubids` is not available.
## Role Variables
@@ -56,14 +55,15 @@ except for the following:
* `started` - Create the pods and systemd services, and start them running
* `created` - Create the pods and systemd services, but do not start them
* `absent` - Remove the pods and systemd services
-* `run_as_user` - Use this to specify a per-pod user. If you do not
- specify this, then the global default `podman_run_as_user` value will be used.
+* `run_as_user` - Use this to specify a per-pod user. If you do not specify
+ this, then the global default `podman_run_as_user` value will be used.
Otherwise, `root` will be used. NOTE: The user must already exist - the role
- will not create one. The user must be present in `/etc/subuid`.
-* `run_as_group` - Use this to specify a per-pod group. If you do not
- specify this, then the global default `podman_run_as_group` value will be
- used. Otherwise, `root` will be used. NOTE: The group must already exist -
- the role will not create one. The group must be present in `/etc/subgid`.
+ will not create one. The user must be present in `/etc/subuid` and
+ `/etc/subgid`.
+* `run_as_group` - Use this to specify a per-pod group. If you do not specify
+ this, then the global default `podman_run_as_group` value will be used.
+ Otherwise, `root` will be used. NOTE: The group must already exist - the role
+ will not create one.
* `systemd_unit_scope` - The scope to use for the systemd unit. If you do not
specify this, then the global default `podman_systemd_unit_scope` will be
used. Otherwise, the scope will be `system` for root containers, and `user`
@@ -278,14 +278,13 @@ podman_selinux_ports:
This is the name of the user to use for all rootless containers. You can also
specify per-container username with `run_as_user` in `podman_kube_specs`. NOTE:
The user must already exist - the role will not create one. The user must be
-present in `/etc/subuid`.
+present in `/etc/subuid` and `/etc/subgid`.
### podman_run_as_group
This is the name of the group to use for all rootless containers. You can also
specify per-container group name with `run_as_group` in `podman_kube_specs`.
-NOTE: The group must already exist - the role will not create one. The group must
-be present in `/etc/subgid`.
+NOTE: The group must already exist - the role will not create one.
### podman_systemd_unit_scope
@@ -426,24 +425,24 @@ PodmanArgs=--secret=my-app-pwd,type=env,target=MYAPP_PASSWORD
### podman_subuid_info, podman_subgid_info
-The role needs to ensure any users and groups are present in the subuid and
+The role needs to ensure any users are present in the subuid and
subgid information. Once it extracts this data, it will be available in
`podman_subuid_info` and `podman_subgid_info`. These are dicts. The key is the
-user or group name, and the value is a `dict` with two fields:
+user name, and the value is a `dict` with two fields:
-* `start` - the start of the id range for that user or group, as an `int`
-* `range` - the id range for that user or group, as an `int`
+* `start` - the start of the id range for that user, as an `int`
+* `range` - the id range for that user, as an `int`
```yaml
podman_host_directories:
"/var/lib/db":
mode: "0777"
owner: "{{ 1001 + podman_subuid_info['dbuser']['start'] - 1 }}"
- group: "{{ 1001 + podman_subgid_info['dbgroup']['start'] - 1 }}"
+ group: "{{ 2001 + podman_subgid_info['dbuser']['start'] - 1 }}"
```
-Where `1001` is the uid for user `dbuser`, and `1001` is the gid for group
-`dbgroup`.
+Where `1001` is the uid for user `dbuser`, and `2001` is the gid for the
+group you want to use.
**NOTE**: depending on the namespace used by your containers, you might not be
able to use the subuid and subgid information, which comes from `getsubids` if
diff --git a/tasks/handle_user_group.yml b/tasks/handle_user_group.yml
index 0b98d99..2e19cdd 100644
--- a/tasks/handle_user_group.yml
+++ b/tasks/handle_user_group.yml
@@ -25,19 +25,6 @@
{{ ansible_facts["getent_passwd"][__podman_user][2] }}
{%- endif -%}
-- name: Get group information
- getent:
- database: group
- key: "{{ __podman_group }}"
- fail_key: false
- when: "'getent_group' not in ansible_facts or
- __podman_group not in ansible_facts['getent_group']"
-
-- name: Set group name
- set_fact:
- __podman_group_name: "{{ ansible_facts['getent_group'].keys() |
- list | first }}"
-
- name: See if getsubids exists
stat:
path: /usr/bin/getsubids
@@ -49,13 +36,13 @@
- __podman_user not in ["root", "0"]
- __podman_stat_getsubids.stat.exists
block:
- - name: Check user with getsubids
+ - name: Check with getsubids for user subuids
command: getsubids {{ __podman_user | quote }}
changed_when: false
register: __podman_register_subuids
- - name: Check group with getsubids
- command: getsubids -g {{ __podman_group_name | quote }}
+ - name: Check with getsubids for user subgids
+ command: getsubids -g {{ __podman_user | quote }}
changed_when: false
register: __podman_register_subgids
@@ -66,7 +53,7 @@
{'start': __subuid_data[2] | int, 'range': __subuid_data[3] | int}})
if __subuid_data | length > 0 else podman_subuid_info | d({}) }}"
podman_subgid_info: "{{ podman_subgid_info | d({}) |
- combine({__podman_group_name:
+ combine({__podman_user:
{'start': __subgid_data[2] | int, 'range': __subgid_data[3] | int}})
if __subgid_data | length > 0 else podman_subgid_info | d({}) }}"
vars:
@@ -77,7 +64,6 @@
when:
- not __podman_stat_getsubids.stat.exists
- __podman_user not in ["root", "0"]
- - __podman_group not in ["root", "0"]
block:
- name: Get subuid file
slurp:
@@ -96,7 +82,7 @@
{'start': __subuid_data[1] | int, 'range': __subuid_data[2] | int}})
if __subuid_data else podman_subuid_info | d({}) }}"
podman_subgid_info: "{{ podman_subgid_info | d({}) |
- combine({__podman_group_name:
+ combine({__podman_user:
{'start': __subgid_data[1] | int, 'range': __subgid_data[2] | int}})
if __subgid_data else podman_subgid_info | d({}) }}"
vars:
@@ -107,7 +93,7 @@
if __subuid_match_line else none }}"
__subgid_match_line: "{{
(__podman_register_subgids.content | b64decode).split('\n') | list |
- select('match', '^' ~ __podman_group_name ~ ':') | list }}"
+ select('match', '^' ~ __podman_user ~ ':') | list }}"
__subgid_data: "{{ __subgid_match_line[0].split(':') | list
if __subgid_match_line else none }}"
@@ -118,9 +104,9 @@
/etc/subuid file - cannot continue
when: not __podman_user in podman_subuid_info
- - name: Fail if group not in subgid file
+ - name: Fail if user not in subgid file
fail:
msg: >
- The given podman group [{{ __podman_group_name }}] is not in the
+ The given podman user [{{ __podman_user }}] is not in the
/etc/subgid file - cannot continue
- when: not __podman_group_name in podman_subuid_info
+ when: not __podman_user in podman_subgid_info
--
2.46.0

View File

@ -1,38 +0,0 @@
From c78741d6d5a782f599ee42c6deb89b80426e403d Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Fri, 6 Sep 2024 14:15:20 -0600
Subject: [PATCH 115/115] fix: Cannot remove volumes from kube yaml - need to
convert yaml to list
Cause: __podman_quadlet_parsed was not converted to a list.
Consequence: On older versions of Ansible, the volumes from the kube yaml
were not removed when removing quadlets.
Fix: Convert __podman_quadlet_parsed to a list after parsing.
Result: Older versions of Ansible can remove volumes specified
in kube yaml files.
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit 423c98342c82893aca891d49c63713193dc96222)
---
tasks/parse_quadlet_file.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tasks/parse_quadlet_file.yml b/tasks/parse_quadlet_file.yml
index 5f5297f..2d58c4e 100644
--- a/tasks/parse_quadlet_file.yml
+++ b/tasks/parse_quadlet_file.yml
@@ -45,7 +45,7 @@
- name: Parse quadlet yaml file
set_fact:
__podman_quadlet_parsed: "{{ __podman_quadlet_raw.content | b64decode |
- from_yaml_all }}"
+ from_yaml_all | list }}"
when:
- __podman_service_name | length == 0
- __podman_quadlet_file.endswith(".yml") or
--
2.46.0

View File

@ -1,68 +0,0 @@
From e2040d110ac24ec044973674afc8269ab9ef7c11 Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Fri, 25 Oct 2024 08:55:27 -0600
Subject: [PATCH 116/117] fix: ignore pod not found errors when removing kube
specs
Cause: The module uses the `podman kube play --done` command to remove
the pod specified by the kube spec, but does not check if the pod has
already been removed. That is, it is not idempotent. The command
gives an error if the pod is not found. This only happens with
podman 4.4.1 on EL8.8 and EL9.2.
Consequence: The podman role gives an error that the pod specified
by the kube spec cannot be found when removing.
Fix: The role ignores the 'pod not found' error when removing
a kube spec.
Result: The role does not give an error when removing a kube
spec.
NOTE: This has been fixed in the containers.podman.podman_play
module upstream but has not yet been released.
https://github.com/containers/ansible-podman-collections/pull/863/files#diff-6672fb7f52e2bec3450c2dd7ed9a4385accd9bab8429ea6eecf4d56447f5a1b8R304
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit 3edc125005c5912926add1539be96cf3b990bb96)
---
tasks/cleanup_kube_spec.yml | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/tasks/cleanup_kube_spec.yml b/tasks/cleanup_kube_spec.yml
index b6b47bd..36610e6 100644
--- a/tasks/cleanup_kube_spec.yml
+++ b/tasks/cleanup_kube_spec.yml
@@ -30,6 +30,11 @@
path: "{{ __podman_kube_file }}"
register: __podman_kube_file_stat
+# NOTE: removing kube specs is not idempotent and will give an error on
+# RHEL 8.8 and 9.2 - seems ok on other platforms - this was fixed in the
+# module but is not released yet (as of 20241024)
+# https://github.com/containers/ansible-podman-collections/pull/863/files#diff-6672fb7f52e2bec3450c2dd7ed9a4385accd9bab8429ea6eecf4d56447f5a1b8R304
+# remove this hack when the fix is available
- name: Remove pod/containers
containers.podman.podman_play: "{{ __podman_kube_spec |
combine({'kube_file': __podman_kube_file}) }}"
@@ -38,9 +43,17 @@
become: "{{ __podman_rootless | ternary(true, omit) }}"
become_user: "{{ __podman_rootless | ternary(__podman_user, omit) }}"
register: __podman_removed
+ failed_when:
+ - __podman_removed is failed
+ - not __podman_removed.msg is search(__err_msg)
+ - not __is_affected_platform
when:
- not __podman_rootless or __podman_xdg_stat.stat.exists
- __podman_kube_file_stat.stat.exists
+ vars:
+ __err_msg: Failed to delete .* with {{ __podman_kube_file }}
+ __is_affected_platform: "{{ ansible_facts['distribution'] == 'RedHat' and
+ ansible_facts['distribution_version'] in ['8.8', '9.2'] }}"
- name: Remove kubernetes yaml file
file:
--
2.47.0

View File

@ -1,33 +0,0 @@
From f5d7e3088a8662798ced2294ca9059799b7e1c33 Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Fri, 25 Oct 2024 11:12:08 -0600
Subject: [PATCH 117/117] test: need grubby for el8 testing for ostree
EL8 tests need grubby for ostree building
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit 881a03569b6dbebaf9fc9720ffe85039d1d0b72d)
---
.ostree/packages-testing-CentOS-8.txt | 1 +
.ostree/packages-testing-RedHat-8.txt | 1 +
2 files changed, 2 insertions(+)
create mode 100644 .ostree/packages-testing-CentOS-8.txt
create mode 100644 .ostree/packages-testing-RedHat-8.txt
diff --git a/.ostree/packages-testing-CentOS-8.txt b/.ostree/packages-testing-CentOS-8.txt
new file mode 100644
index 0000000..ae5e93e
--- /dev/null
+++ b/.ostree/packages-testing-CentOS-8.txt
@@ -0,0 +1 @@
+grubby
diff --git a/.ostree/packages-testing-RedHat-8.txt b/.ostree/packages-testing-RedHat-8.txt
new file mode 100644
index 0000000..ae5e93e
--- /dev/null
+++ b/.ostree/packages-testing-RedHat-8.txt
@@ -0,0 +1 @@
+grubby
--
2.47.0

View File

@ -1,89 +0,0 @@
From e8961d4e5ca7765e97d76a76e4741825e697aa8d Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Mon, 28 Oct 2024 10:27:59 -0600
Subject: [PATCH] fix: make role work on el 8.8 and el 9.2 and podman version
less than 4.7.0
Cause: Role was using podman and loginctl features not supported on el 8.8/9.2
and podman versions less than 4.7.0. NetworkName and VolumeName not supported
until podman 4.7.0. loginctl -P not supported in el 8.8/9.2.
Consequence: The role would give failures when managing el 8.8/9.2 machines.
Fix: Do not test with NetworkName and VolumeName when podman version is less
than 4.7.0. Use loginctl --value -p instead of -P which will work on all
versions.
Result: The role can manage el 8.8/9.2 machines.
Signed-off-by: Rich Megginson <rmeggins@redhat.com>
(cherry picked from commit f16c3fb3c884cf3af446d19aeda86f27dafd1d1e)
---
tasks/cancel_linger.yml | 4 ++--
.../quadlet-basic.network.j2} | 2 ++
tests/tests_quadlet_basic.yml | 6 +++---
3 files changed, 7 insertions(+), 5 deletions(-)
rename tests/{files/quadlet-basic.network => templates/quadlet-basic.network.j2} (62%)
diff --git a/tasks/cancel_linger.yml b/tasks/cancel_linger.yml
index 00d38c2..9eb67ff 100644
--- a/tasks/cancel_linger.yml
+++ b/tasks/cancel_linger.yml
@@ -63,7 +63,7 @@
removes: /var/lib/systemd/linger/{{ __podman_linger_user }}
- name: Wait for user session to exit closing state # noqa no-handler
- command: loginctl show-user -P State {{ __podman_linger_user | quote }}
+ command: loginctl show-user --value -p State {{ __podman_linger_user | quote }}
register: __user_state
changed_when: false
until: __user_state.stdout != "closing"
@@ -82,7 +82,7 @@
state: stopped
- name: Wait for user session to exit closing state
- command: loginctl show-user -P State {{ __podman_linger_user | quote }}
+ command: loginctl show-user --value -p State {{ __podman_linger_user | quote }}
changed_when: false
register: __user_state
until: __user_state.stderr is match(__pat) or
diff --git a/tests/files/quadlet-basic.network b/tests/templates/quadlet-basic.network.j2
similarity index 62%
rename from tests/files/quadlet-basic.network
rename to tests/templates/quadlet-basic.network.j2
index 5b002ba..3419e3d 100644
--- a/tests/files/quadlet-basic.network
+++ b/tests/templates/quadlet-basic.network.j2
@@ -2,4 +2,6 @@
Subnet=192.168.29.0/24
Gateway=192.168.29.1
Label=app=wordpress
+{% if podman_version is version("4.7.0", ">=") %}
NetworkName=quadlet-basic-name
+{% endif %}
diff --git a/tests/tests_quadlet_basic.yml b/tests/tests_quadlet_basic.yml
index 5a06864..9563a60 100644
--- a/tests/tests_quadlet_basic.yml
+++ b/tests/tests_quadlet_basic.yml
@@ -19,15 +19,15 @@
state: present
data: "{{ __json_secret_data | string }}"
__podman_quadlet_specs:
- - file_src: files/quadlet-basic.network
+ - template_src: templates/quadlet-basic.network.j2
state: started
- name: quadlet-basic-unused-network
type: network
Network: {}
- name: quadlet-basic-mysql
type: volume
- Volume:
- VolumeName: quadlet-basic-mysql-name
+ Volume: "{{ {} if podman_version is version('4.7.0', '<')
+ else {'VolumeName': 'quadlet-basic-mysql-name'} }}"
- name: quadlet-basic-unused-volume
type: volume
Volume: {}
--
2.47.0

View File

@ -1,738 +0,0 @@
Changelog
=========
[1.23.0-3] - 2024-09-11
----------------------------
### New Features
- [bootloader - bootloader role tests do not work on ostree [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58917)
- [logging - RFE - system-roles - logging: Add truncate options for local file inputs [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58485)
- [logging - redhat.rhel_system_roles.logging role fails to process logging_outputs: of type: "custom" [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58481)
- [logging - [RFE] Add the umask settings or enable a variable in linux-system-roles.logging [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58477)
- [nbde_client - feat: Allow initrd configuration to be skipped [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58519)
### Bug Fixes
- [ - package rhel-system-roles.noarch does not provide docs for ansible-doc [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58465)
- [ad_integration - fix: Sets domain name lower case in realmd.conf section header [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58494)
- [bootloader - fix: Set user.cfg path to /boot/grub2/ on EL 9 UEFI [rhel-8]](https://issues.redhat.com/browse/RHEL-45711)
- [cockpit - cockpit install all wildcard match does not work in newer el9 [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58515)
- [logging - Setup imuxsock using rhel-system-roles.logging causing an error EL8](https://issues.redhat.com/browse/RHEL-37550)
- [podman - fix: proper cleanup for networks; ensure cleanup of resources [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58525)
- [podman - fix: grab name of network to remove from quadlet file [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58511)
- [podman - Create podman secret when skip_existing=True and it does not exist [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58507)
- [podman - fix: do not use become for changing hostdir ownership, and expose subuid/subgid info [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58503)
- [podman - fix: use correct user for cancel linger file name [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58498)
- [podman - redhat.rhel_system_roles.podman fails to configure and run containers with podman rootless using different username and groupname. [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58460)
- [sshd - second SSHD service broken [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58473)
- [storage - rhel-system-role.storage is not idempotent [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58469)
- [timesync - System Roles: No module documentation [rhel-8.10.z]](https://issues.redhat.com/browse/RHEL-58489)
[1.23.0] - 2024-01-15
----------------------------
### New Features
- [RHEL for Edge support in system roles](https://issues.redhat.com/browse/RHEL-15872)
- [ad_integration - feat: Add sssd custom settings](https://issues.redhat.com/browse/RHEL-17667)
- [ad_integration - Enable AD dynamic DNS updates](https://issues.redhat.com/browse/RHEL-1119)
- [ad_integration - feat: add ad_integration_preserve_authselect_profile](https://issues.redhat.com/browse/RHEL-21383)
- [ad_integration - feat: Add SSSD parameters support](https://issues.redhat.com/browse/RHEL-21134)
- [bootloader - Create bootloader role (MVP)](https://issues.redhat.com/browse/RHEL-3241)
- [fapolicyd - feat: Import code for fapolicyd system role](https://issues.redhat.com/browse/RHEL-16542)
- [ha_cluster - [RFE] HA Cluster system role should be able to enable Resilient Storage repository](https://issues.redhat.com/browse/RHEL-14090)
- [ha_cluster - [FutureFeature] Allow ha_cluster role to configure fencing topology](https://issues.redhat.com/browse/RHEL-4624)
- [ha_cluster - [FutureFeature] Allow ha_cluster role to configure all qdevice options](https://issues.redhat.com/browse/RHEL-3264)
- [ha_cluster - Setting cluster members attributes](https://issues.redhat.com/browse/RHEL-22108)
- [journald - feat: Add support for ForwardToSyslog](https://issues.redhat.com/browse/RHEL-21123)
- [logging - feat: Add support for the global config option preserveFQDN with a new logg…](https://issues.redhat.com/browse/RHEL-15933)
- [logging - feat: Add support for general queue and general action parameters](https://issues.redhat.com/browse/RHEL-15440)
- [metrics - [RFE] Metrics system role support for configuring PMIE webhooks](https://issues.redhat.com/browse/RHEL-18170)
- [network - Add blackhole type route](https://issues.redhat.com/browse/RHEL-21491)
- [postgresql - feat: Enable support for Postgresql 16](https://issues.redhat.com/browse/RHEL-18963)
- [rhc - support RHEL 7 managed nodes](https://issues.redhat.com/browse/RHEL-16977)
- [rhc - new rhc_insights.ansible_host parameter](https://issues.redhat.com/browse/RHEL-16975)
- [rhc - new rhc_insights.display_name parameter](https://issues.redhat.com/browse/RHEL-16965)
- [snapshot - New Role for storage snapshot management (lvm, etc.)](https://issues.redhat.com/browse/RHEL-16553)
- [sshd - ansible-sshd Manage SSH certificates](https://issues.redhat.com/browse/RHEL-5985)
- [storage - feat: Support for creating volumes without a FS](https://issues.redhat.com/browse/RHEL-16213)
- [storage - Basic support for creating shared logical volumes (RHEL 8)](https://issues.redhat.com/browse/RHEL-14022)
### Bug Fixes
- [ha_cluster - high-availability firewall service is not added on qdevice node](https://issues.redhat.com/browse/RHEL-17874)
- [ha_cluster - Timeout issue between SBD with delay-start and systemd unit](https://issues.redhat.com/browse/RHEL-4684)
- [kdump - fix: retry read of kexec_crash_size](https://issues.redhat.com/browse/RHEL-3354)
- [keylime_server - won't detect registrar start failure](https://issues.redhat.com/browse/RHEL-21946)
- [logging - fix: check that logging_max_message_size is set, not rsyslog_max_message_size](https://issues.redhat.com/browse/RHEL-15038)
- [nbde_server - fix: Allow tangd socket override directory to be managed outside of the role](https://issues.redhat.com/browse/RHEL-25509)
- [network - Ansible RHEL network system role issue with ipv6.routing-rules the prefix length for 'from' cannot be zero"](https://issues.redhat.com/browse/RHEL-16501)
- [podman - fix: cast secret data to string in order to allow JSON valued strings](https://issues.redhat.com/browse/RHEL-22310)
- [podman - fix: name of volume quadlet service should be basename-volume.service](https://issues.redhat.com/browse/RHEL-21402)
- [podman - fix: add no_log: true for tasks that can log secret data](https://issues.redhat.com/browse/RHEL-19242)
- [podman - fix: user linger needed before secrets](https://issues.redhat.com/browse/RHEL-22229)
- [postgresql - PostgreSQL system role: unable to install PostgreSQL version 15 on RHEL 9](https://issues.redhat.com/browse/RHEL-21400)
- [selinux - fix: Use `ignore_selinux_state` module option](https://issues.redhat.com/browse/RHEL-15871)
- [selinux - fix: Print an error message when module to be created doesn't exist](https://issues.redhat.com/browse/RHEL-19044)
- [selinux - fix: no longer use "item" as a loop variable](https://issues.redhat.com/browse/RHEL-19042)
[1.22.0] - 2023-08-15
----------------------------
### New Features
- [ALL - fingerprint in config files managed by roles](https://bugzilla.redhat.com/show_bug.cgi?id=2186910)
- [ad_integration - add ad_integration_force_rejoin](https://bugzilla.redhat.com/show_bug.cgi?id=2211723)
- [certificate - add mode parameter to change permissions for cert files](https://bugzilla.redhat.com/show_bug.cgi?id=2218204)
- [firewall - missing module in linux-system-roles.firewall to create an ipset](https://bugzilla.redhat.com/show_bug.cgi?id=2140880)
- [firewall - fix: reload on resetting to defaults](https://bugzilla.redhat.com/show_bug.cgi?id=2224648)
- [firewall - should have option to disable conflicting services](https://bugzilla.redhat.com/show_bug.cgi?id=2222809)
- [ha_cluster - Add possibility to load SBD watchdog kernel modules](https://bugzilla.redhat.com/show_bug.cgi?id=2190478)
- [ha_cluster - cluster and quorum can have distinct passwords](https://bugzilla.redhat.com/show_bug.cgi?id=2216485)
- [ha_cluster - support for resource and operation defaults](https://bugzilla.redhat.com/show_bug.cgi?id=2190483)
- [kdump - support auto_reset_crashkernel, dracut_args, deprecate /etc/sysconfig/kdump](https://bugzilla.redhat.com/show_bug.cgi?id=2211272)
- [keylime_server - system role for managing keylime servers](https://bugzilla.redhat.com/show_bug.cgi?id=2224387)
- [network - Support configuring auto-dns setting](https://bugzilla.redhat.com/show_bug.cgi?id=2211273)
- [network - Support no-aaaa DNS option](https://bugzilla.redhat.com/show_bug.cgi?id=2218595)
- [podman - allow container networking configuration](https://bugzilla.redhat.com/show_bug.cgi?id=2220963)
- [podman - support for healthchecks and healthcheck actions](https://bugzilla.redhat.com/show_bug.cgi?id=2220961)
- [podman - support quadlet units](https://bugzilla.redhat.com/show_bug.cgi?id=2220962)
- [postgresql - [RFE] system role for PostgreSQL management](https://bugzilla.redhat.com/show_bug.cgi?id=2151371)
- [rhc - implement rhc_proxy.scheme](https://bugzilla.redhat.com/show_bug.cgi?id=2211778)
- [rhc - [RFE] New role for Red Hat subscription management, insights management [rhel-8.9.0]](https://bugzilla.redhat.com/show_bug.cgi?id=2179016)
- [ssh - add ssh_backup option with default true](https://bugzilla.redhat.com/show_bug.cgi?id=2216759)
- [storage - RFE for the storage system role to support configuring the stripe size for RAID LVM volumes](https://bugzilla.redhat.com/show_bug.cgi?id=2141961)
- [storage - [RFE] user-specified mount point owner and permissions](https://bugzilla.redhat.com/show_bug.cgi?id=2181661)
- [systemd - system role for managing systemd units](https://bugzilla.redhat.com/show_bug.cgi?id=2224388)
### Bug Fixes
- [ALL - facts being gathered unnecessarily](https://bugzilla.redhat.com/show_bug.cgi?id=2223036)
- [ad_integration - leaks credentials when in check_mode](https://bugzilla.redhat.com/show_bug.cgi?id=2233183)
- [certificate - rhel-system-roles.certificate does not re-issue after updating key_size](https://bugzilla.redhat.com/show_bug.cgi?id=2186057)
- [firewall - fix: reload on resetting to defaults](https://bugzilla.redhat.com/show_bug.cgi?id=2224648)
- [firewall - Check mode fails with replacing previous rules](https://issues.redhat.com/browse/RHEL-899)
- [firewall - Check mode fails when creating new firewall service](https://bugzilla.redhat.com/show_bug.cgi?id=2222433)
- [firewall - Ansible RHEL firewall system role not idempotent when configuring the interface using the role in rhel9](https://issues.redhat.com/browse/RHEL-918)
- [firewall - Don't install python(3)-firewall it's a dependency of firewalld](https://bugzilla.redhat.com/show_bug.cgi?id=2216521)
- [firewall - fix: files: overwrite firewalld.conf on previous replaced](https://issues.redhat.com/browse/RHEL-1496)
- [kdump - use failure_action instead of default on EL9 and later](https://issues.redhat.com/browse/RHEL-907)
- [kdump - role: "Write new authorized_keys if needed" task idempotency issues](https://bugzilla.redhat.com/show_bug.cgi?id=2232391)
- [kdump - system role fails if kdump_ssh_user doesn't have a .ssh/authorized_keys file in home directory](https://bugzilla.redhat.com/show_bug.cgi?id=2232392)
- [kdump - fix: ensure .ssh directory exists for kdump_ssh_user on kdump_ssh_server](https://issues.redhat.com/browse/RHEL-1398)
- [kdump - fix: Ensure authorized_keys management works with multiple hosts](https://issues.redhat.com/browse/RHEL-1500)
- [podman - Podman system role: Unable to use podman_registries_conf to set unqualified-search-registries](https://bugzilla.redhat.com/show_bug.cgi?id=2226077)
- [rhc - system role does not apply Insights tags](https://bugzilla.redhat.com/show_bug.cgi?id=2209441)
- [storage - Cannot set chunk size for RAID: Unsupported parameters for (blivet) module: pools.raid_chunk_size](https://bugzilla.redhat.com/show_bug.cgi?id=2193057)
- [storage - RAID volume pre cleanup - remove existing data from member disks as needed before creation](https://bugzilla.redhat.com/show_bug.cgi?id=2224094)
- [storage - Storage: mounted devices that are in use cannot be resized](https://bugzilla.redhat.com/show_bug.cgi?id=2168738)
- [storage - fix: use stat.pw_name, stat.gr_name instead of owner, group](https://issues.redhat.com/browse/RHEL-1498)
- [tlog - use the proxy provider - the files provider is deprecated in sssd](https://bugzilla.redhat.com/show_bug.cgi?id=2191702)
[1.21.1] - 2023-03-16
----------------------------
### New Features
- [rhc - New Role - Red Hat subscription management, insights management](https://bugzilla.redhat.com/show_bug.cgi?id=2144877)
### Bug Fixes
- none
[1.21.0] - 2023-02-20
----------------------------
### New Features
- [ad_integration - [RFE] new role to support AD integration, join to AD domain](https://bugilla.redhat.com/show_bug.cgi?id=2144876)
- [cockpit - [RFE] convert cockpit role to use firewall, selinux role, and certificate role](https://bugzilla.redhat.com/show_bug.cgi?id=2137667)
- [ha_cluster - Allow quorum device configuration](https://bugzilla.redhat.com/show_bug.cgi?id=2143814)
- [ha_cluster - [RFE] convert ha_cluster role to use firewall, selinux and certificate role](https://bugzilla.redhat.com/show_bug.cgi?id=2130019)
- [journald - New role - journald - manage systemd-journald](https://bugzilla.redhat.com/show_bug.cgi?id=2165176)
- [logging - [RFE] convert logging role to use firewall, selinux role, and certificate role](https://bugzilla.redhat.com/show_bug.cgi?id=2130362)
- [metrics - [RFE] convert metrics role to use firewall and selinux role](https://bugzilla.redhat.com/show_bug.cgi?id=2133532)
- [nbde_server - [RFE] convert nbde_server role to use firewall and selinux role](https://bugzilla.redhat.com/show_bug.cgi?id=2133931)
- [network - Support cloned MAC address](https://bugzilla.redhat.com/show_bug.cgi?id=2143458)
- [network - [RFE] Support setting the metric of the default route for initscripts provider](https://bugzilla.redhat.com/show_bug.cgi?id=2134201)
- [network - [RFE] Support the DNS priority](https://bugzilla.redhat.com/show_bug.cgi?id=2133856)
- [network - Support looking up named route table in routing rule](https://bugzilla.redhat.com/show_bug.cgi?id=2129620)
- [podman - [RFE] role for managing podman containers and systemd](https://bugzilla.redhat.com/show_bug.cgi?id=2066864)
- [postfix - [RFE] convert postfix role to use firewall and selinux role](https://bugzilla.redhat.com/show_bug.cgi?id=2130332)
- [selinux - add support for the 'local' parameter](https://bugzilla.redhat.com/show_bug.cgi?id=2143385)
- [vpn - Add parameters shared_key_content, ike, esp, type, leftid, rightid](https://bugzilla.redhat.com/show_bug.cgi?id=2119600)
- [vpn - [RFE] convert vpn role to use firewall and selinux role](https://bugzilla.redhat.com/show_bug.cgi?id=2130345)
### Bug Fixes
- [ha_cluster - Fix stonith watchdog timeout](https://bugzilla.redhat.com/show_bug.cgi?id=2167941)
- [ha_cluster - Allow enabled SBD on disabled cluster](https://bugzilla.redhat.com/show_bug.cgi?id=2153081)
- [ha_cluster - use no_log in tasks looping over pot. secret parameters](https://bugzilla.redhat.com/show_bug.cgi?id=2127497)
- [nbde_client - nbde_client_clevis fails with a traceback and prints sensitive data](https://bugzilla.redhat.com/show_bug.cgi?id=2159972)
- [nbde_client - must handle clevis-luks-askpass and clevis-luks-askpass@ systemd unit names](https://bugzilla.redhat.com/show_bug.cgi?id=2126960)
- [network - should route traffic via correct bond](https://bugzilla.redhat.com/show_bug.cgi?id=2168733)
- [selinux - managing modules is not idempotent](https://bugzilla.redhat.com/show_bug.cgi?id=2164879)
- [sshd,ssh,timesync - Unexpected templating type error - expected str instance, int found](https://bugzilla.redhat.com/show_bug.cgi?id=2143401)
- [tlog - Unconditionally enable the files provider](https://bugzilla.redhat.com/show_bug.cgi?id=2153080)
[1.20.0] - 2022-08-09
----------------------------
### New Features
- [cockpit - Add customization of port](https://bugzilla.redhat.com/show_bug.cgi?id=2115159)
- [firewall - RFE: firewall-system-role: add ability to add interface to zone by PCI device ID](https://bugzilla.redhat.com/show_bug.cgi?id=2100939)
- [firewall - support for firewall_config - gather firewall facts](https://bugzilla.redhat.com/show_bug.cgi?id=2115160)
- [logging - [RFE] Support startmsg.regex and endmsg.regex in the files inputs](https://bugzilla.redhat.com/show_bug.cgi?id=2112143)
- [selinux - Added setting of seuser and selevel for completeness](https://bugzilla.redhat.com/show_bug.cgi?id=2115162)
### Bug Fixes
- [nbde_client - Sets proper spacing for parameter rd.neednet=1](https://bugzilla.redhat.com/show_bug.cgi?id=2115161)
- [network - fix IPRouteUtils.get_route_tables_mapping() to accept any whitespace sequence](https://bugzilla.redhat.com/show_bug.cgi?id=2115884)
- [ssh sshd - ssh, sshd: RSAMinSize parameter definition is missing](https://bugzilla.redhat.com/show_bug.cgi?id=2109997)
- [storage - [RHEL8] [WARNING]: The loop variable 'storage_test_volume' is already in use. You should set the `loop_var` value in the `loop_control` option for the task to something else to avoid variable collisions and unexpected behavior.](https://bugzilla.redhat.com/show_bug.cgi?id=2082391)
[1.19.3] - 2022-07-01
----------------------------
### New Features
- [firewall - support add/modify/delete services](https://bugzilla.redhat.com/show_bug.cgi?id=2100297)
- [network - [RFE] [network] Support managing the network through nmstate schema](https://bugzilla.redhat.com/show_bug.cgi?id=2100979)
- [storage - support for adding/removing disks to/from storage pools](https://bugzilla.redhat.com/show_bug.cgi?id=2066880)
- [storage - support for attaching cache volumes to existing volumes](https://bugzilla.redhat.com/show_bug.cgi?id=2066881)
### Bug Fixes
- [firewall - forward_port should accept list of string or list of dict](https://bugzilla.redhat.com/show_bug.cgi?id=2101607)
- [metrics - document minimum supported redis version required by rhel-system-roles](https://bugzilla.redhat.com/show_bug.cgi?id=2100285)
- [metrics - restart pmie, pmlogger if changed, do not wait for handler](https://bugzilla.redhat.com/show_bug.cgi?id=2100298)
[1.19.2] - 2022-06-15
----------------------------
### New Features
- [sshd - system role should be able to optionally manage /etc/ssh/sshd_config on RHEL 9](https://bugzilla.redhat.com/show_bug.cgi?id=2086935)
### Bug Fixes
- none
[1.19.1] - 2022-06-13
----------------------------
### New Features
- [storage - support for creating and managing LVM thin pools/LVs](https://bugzilla.redhat.com/show_bug.cgi?id=2066876)
- [All roles should support running with gather_facts: false](https://bugzilla.redhat.com/show_bug.cgi?id=2079008)
### Bug Fixes
- none
[1.19.0] - 2022-06-06
----------------------------
### New Features
- [storage - support for creating and managing LVM thin pools/LVs](https://bugzilla.redhat.com/show_bug.cgi?id=2066876)
- [firewall - state no longer required for masquerade and ICMP block inversion](https://bugzilla.redhat.com/show_bug.cgi?id=2093437)
### Bug Fixes
- [storage - role raid_level "striped" is not supported](https://bugzilla.redhat.com/show_bug.cgi?id=2083426)
[1.18.0] - 2022-05-26
----------------------------
### New Features
- [firewall - [Improvement] Allow System Role to reset to default Firewalld Settings](https://bugzilla.redhat.com/show_bug.cgi?id=2043009)
- [metrics - [RFE] add an option to the metrics role to enable postfix metric collection](https://bugzilla.redhat.com/show_bug.cgi?id=2079114)
- [network - Rework the infiniband support](https://bugzilla.redhat.com/show_bug.cgi?id=2086869)
- [sshd - system role should not assume that RHEL 9 /etc/ssh/sshd_config has "Include > /etc/ssh/sshd_config.d/*.conf"](https://bugzilla.redhat.com/show_bug.cgi?id=2086934)
- [sshd - system role should be able to optionally manage /etc/ssh/sshd_config on RHEL 9](https://bugzilla.redhat.com/show_bug.cgi?id=2086935)
### Bug Fixes
- [storage - role cannot set mount_options for volumes](https://bugzilla.redhat.com/show_bug.cgi?id=2083378)
[1.17.0] - 2022-04-25
----------------------------
### New Features
- [All roles should support running with gather_facts: false](https://bugzilla.redhat.com/show_bug.cgi?id=2079008)
- [ha_cluster - support advanced corosync configuration](https://bugzilla.redhat.com/show_bug.cgi?id=2065339)
- [ha_cluster - support SBD fencing](https://bugzilla.redhat.com/show_bug.cgi?id=2066868)
- [ha_cluster - add support for configuring bundle resources](https://bugzilla.redhat.com/show_bug.cgi?id=2073518)
- [logging - Logging - RFE - support template, severity and facility options](https://bugzilla.redhat.com/show_bug.cgi?id=2075116)
- [metrics - consistently use ansible_managed in configuration files managed by role [rhel-8.7.0]](https://bugzilla.redhat.com/show_bug.cgi?id=2065215)
- [metrics - [RFE] add an option to the metrics role to enable postfix metric collection](https://bugzilla.redhat.com/show_bug.cgi?id=2079114)
- [network - [RFE] Extend rhel-system-roles.network feature set to support routing rules](https://bugzilla.redhat.com/show_bug.cgi?id=1996731)
- [network - consistently use ansible_managed in configuration files managed by role [rhel-8.7.0]](https://bugzilla.redhat.com/show_bug.cgi?id=2065670)
- [postfix - consistently use ansible_managed in configuration files managed by role [rhel-8.7.0]](https://bugzilla.redhat.com/show_bug.cgi?id=2065216)
- [postfix - Postfix RHEL System Role should provide the ability to replace config and reset configuration back to default [rhel-8.7.0]](https://bugzilla.redhat.com/show_bug.cgi?id=2065218)
### Bug Fixes
- [firewall - Firewall system role Ansible deprecation warning related to "include"](https://bugzilla.redhat.com/show_bug.cgi?id=2078650)
- [kernel_settings - error configobj not found on RHEL 8.6 managed hosts [rhel-8.7.0]](https://bugzilla.redhat.com/show_bug.cgi?id=2060378)
- [metrics - Metrics role, with "metrics_from_mssql" option does not configure /var/lib/pcp/pmdas/mssql/mssql.conf on first run [rhel-8.7.0]](https://bugzilla.redhat.com/show_bug.cgi?id=2060377)
- [nbde_client - NBDE client system role does not support servers with static IP addresses [rhel-8.7.0]](https://bugzilla.redhat.com/show_bug.cgi?id=2071011)
- [network - bond: fix typo in supporting the infiniband ports in active-backup mode](https://bugzilla.redhat.com/show_bug.cgi?id=2064067)
- [sshd - FIPS mode detection in SSHD role is wrong](https://bugzilla.redhat.com/show_bug.cgi?id=2075338)
- [storage - RFE storage Less verbosity by default](https://bugzilla.redhat.com/show_bug.cgi?id=2056480)
- [tlog - Tlog role - Enabling session recording configuration does not work due to RHEL9 SSSD files provider default](https://bugzilla.redhat.com/show_bug.cgi?id=2072749)
[1.16.3] - 2022-04-07
----------------------------
### New Features
- none
### Bug Fixes
- [tlog - Tlog role - Enabling session recording configuration does not work due to RHEL9 SSSD files provider default](https://bugzilla.redhat.com/show_bug.cgi?id=2072749)
[1.16.2] - 2022-04-06
----------------------------
### New Features
- [nbde_client - NBDE client system role does not support servers with static IP addresses](https://bugzilla.redhat.com/show_bug.cgi?id=1985022)
### Bug Fixes
- none
[1.16.1] - 2022-03-29
----------------------------
### New Features
- [nbde_client - NBDE client system role does not support servers with static IP addresses](https://bugzilla.redhat.com/show_bug.cgi?id=1985022)
### Bug Fixes
- none
[1.16.0] - 2022-03-22
----------------------------
### New Features
- [network - consistently use ansible_managed in configuration files managed by role](https://bugzilla.redhat.com/show_bug.cgi?id=2057656)
- [metrics - consistently use ansible_managed in configuration files managed by role](https://bugzilla.redhat.com/show_bug.cgi?id=2057645)
- [postfix - consistently use ansible_managed in configuration files managed by role](https://bugzilla.redhat.com/show_bug.cgi?id=2057661)
- [postfix - Postfix RHEL System Role should provide the ability to replace config and reset configuration back to default](https://bugzilla.redhat.com/show_bug.cgi?id=2044657)
### Bug Fixes
- [network - bond: fix typo in supporting the infiniband ports in active-backup mode](https://bugzilla.redhat.com/show_bug.cgi?id=2064388)
[1.15.1] - 2022-03-03
----------------------------
### New Features
- none
### Bug Fixes
- [kernel_settings - error configobj not found on RHEL 8.6 managed hosts](https://bugzilla.redhat.com/show_bug.cgi?id=2058772)
- [timesync - timesync: basic-smoke test failure in timesync/tests_ntp.yml](https://bugzilla.redhat.com/show_bug.cgi?id=2058645)
[1.15.0] - 2022-03-01
----------------------------
### New Features
- [firewall - [RFE] - Firewall RHEL System Role should be able to set default zone](https://bugzilla.redhat.com/show_bug.cgi?id=2022458)
### Bug Fixes
- [metrics - Metrics role, with "metrics_from_mssql" option does not configure /var/lib/pcp/pmdas/mssql/mssql.conf on first run](https://bugzilla.redhat.com/show_bug.cgi?id=2058655)
- [firewall - ensure target changes take effect immediately](https://bugzilla.redhat.com/show_bug.cgi?id=2057172)
[1.14.0] - 2022-02-14
----------------------------
### New Features
- [network - [RFE] Add more bonding options to rhel-system-roles.network](https://bugzilla.redhat.com/show_bug.cgi?id=2008931)
- [certificate - should consistently use ansible_managed in hook scripts](https://bugzilla.redhat.com/show_bug.cgi?id=2054364)
- [tlog - consistently use ansible_managed in configuration files managed by role](https://bugzilla.redhat.com/show_bug.cgi?id=2054363)
- [vpn - consistently use ansible_managed in configuration files managed by role](https://bugzilla.redhat.com/show_bug.cgi?id=2054365)
### Bug Fixes
- [ha_cluster - set permissions for haclient group](https://bugzilla.redhat.com/show_bug.cgi?id=2049747)
[1.13.1] - 2022-02-08
----------------------------
### New Features
- none
### Bug Fixes
- [vpn - vpn: template error while templating string: no filter named 'vpn_ipaddr'](https://bugzilla.redhat.com/show_bug.cgi?id=2050341)
- [kdump - kdump: Unable to start service kdump: Job for kdump.service failed because the control process exited with error code.](https://bugzilla.redhat.com/show_bug.cgi?id=2052105)
[1.13.0] - 2022-02-01
----------------------------
### New Features
- [storage - RFE: Add support for RAID volumes (lvm-only)](https://bugzilla.redhat.com/show_bug.cgi?id=2016514)
- [storage - RFE: Add support for cached volumes (lvm-only)](https://bugzilla.redhat.com/show_bug.cgi?id=2016511)
- [nbde_client - NBDE client system role does not support servers with static IP addresses](https://bugzilla.redhat.com/show_bug.cgi?id=1985022)
- [ha_cluster - [RFE] ha_cluster - Support for creating resource constraints (Location, Ordering, etc.)](https://bugzilla.redhat.com/show_bug.cgi?id=2041635)
- [network - RFE: Support Routing Tables in static routes in Network Role](https://bugzilla.redhat.com/show_bug.cgi?id=2031521)
### Bug Fixes
- [metrics - role can't be re-run if the Grafana admin password has been changed](https://bugzilla.redhat.com/show_bug.cgi?id=1967321)
- [network - Failure to activate connection: nm-manager-error-quark: No suitable device found for this connection](https://bugzilla.redhat.com/show_bug.cgi?id=2034908)
- [network - Set DNS search setting only for enabled IP protocols](https://bugzilla.redhat.com/show_bug.cgi?id=2041627)
[1.12.0] - 2022-01-27
----------------------------
### New Features
- none
### Bug Fixes
- [logging - Logging role "logging_purge_confs" option not properly working](https://bugzilla.redhat.com/show_bug.cgi?id=2040812)
- [kernel_settings - role should use ansible_managed in its configuration file](https://bugzilla.redhat.com/show_bug.cgi?id=2047504)
[1.11.0] - 2022-01-20
----------------------------
### New Features
- [Support ansible-core 2.11+](https://bugzilla.redhat.com/show_bug.cgi?id=2012316)
- [cockpit - Please include "cockpit" role](https://bugzilla.redhat.com/show_bug.cgi?id=2021661)
- [ssh - ssh/tests_all_options.yml: "assertion": "'StdinNull yes' in config.content | b64decode ", failure](https://bugzilla.redhat.com/show_bug.cgi?id=2029614)
### Bug Fixes
- [timesync - timesync: Failure related to missing ntp/ntpd package/service on RHEL-9 host](https://bugzilla.redhat.com/show_bug.cgi?id=2029463)
- [logging - role missing quotes for immark module interval value](https://bugzilla.redhat.com/show_bug.cgi?id=2021678)
- [kdump - kdump: support reboot required and reboot ok](https://bugzilla.redhat.com/show_bug.cgi?id=2029605)
- [sshd - should detect FIPS mode and handle tasks correctly in FIPS mode](https://bugzilla.redhat.com/show_bug.cgi?id=1979714)
[1.10.0] - 2021-11-08
----------------------------
### New Features
- [cockpit - Please include "cockpit" role](https://bugzilla.redhat.com/show_bug.cgi?id=2021661)
- [firewall - Ansible Roles for RHEL Firewall](https://bugzilla.redhat.com/show_bug.cgi?id=1854988)
- [firewall - RFE: firewall-system-role: add ability to add-source](https://bugzilla.redhat.com/show_bug.cgi?id=1932678)
- [firewall - RFE: firewall-system-role: allow user defined zones](https://bugzilla.redhat.com/show_bug.cgi?id=1850768)
- [firewall - RFE: firewall-system-role: allow specifying the zone](https://bugzilla.redhat.com/show_bug.cgi?id=1850753)
- [Support ansible-core 2.11+](https://bugzilla.redhat.com/show_bug.cgi?id=2012316)
- [network - role: Allow to specify PCI address to configure profiles](https://bugzilla.redhat.com/show_bug.cgi?id=1695634)
- [network - [RFE] support wifi Enhanced Open (OWE)](https://bugzilla.redhat.com/show_bug.cgi?id=1993379)
- [network - [RFE] support WPA3 Simultaneous Authentication of Equals(SAE)](https://bugzilla.redhat.com/show_bug.cgi?id=1993311)
- [network - [Network] RFE: Support ignoring default gateway retrieved by DHCP/IPv6-RA](https://bugzilla.redhat.com/show_bug.cgi?id=1897565)
- [logging - [RFE] logging - Add user and password](https://bugzilla.redhat.com/show_bug.cgi?id=2010327)
### Bug Fixes
- [Replace `# {{ ansible_managed }}` with `{{ ansible_managed | comment }}`](https://bugzilla.redhat.com/show_bug.cgi?id=2006230)
- [logging - role missing quotes for immark module interval value](https://bugzilla.redhat.com/show_bug.cgi?id=2021678)
- [logging - Logging - Performance improvement](https://bugzilla.redhat.com/show_bug.cgi?id=2005727)
- [nbde_client - add regenerate-all to the dracut command](https://bugzilla.redhat.com/show_bug.cgi?id=2021682)
- [certificate - certificates: "group" option keeps certificates inaccessible to the group](https://bugzilla.redhat.com/show_bug.cgi?id=2021683)
[1.7.3] - 2021-08-26
----------------------------
### New Features
- [storage - RFE: Request that VDO be added to the Ansible (redhat-system-roles)](https://bugzilla.redhat.com/show_bug.cgi?id=1978488)
### Bug Fixes
- none
[1.7.2] - 2021-08-24
----------------------------
### New Features
- none
### Bug Fixes
- [logging - Update the certificates copy tasks](https://bugzilla.redhat.com/show_bug.cgi?id=1996777)
[1.7.1] - 2021-08-16
----------------------------
### New Features
- none
### Bug Fixes
- [metrics - role: the bpftrace role does not properly configure bpftrace agent](https://bugzilla.redhat.com/show_bug.cgi?id=1994180)
[1.7.0] - 2021-08-12
----------------------------
### New Features
- [drop support for Ansible 2.8](https://bugzilla.redhat.com/show_bug.cgi?id=1989197)
### Bug Fixes
- [sshd - sshd: failed to validate: error:Missing Match criteria for all Bad Match condition](https://bugzilla.redhat.com/show_bug.cgi?id=1991598)
[1.6.6] - 2021-08-06
----------------------------
### New Features
- [logging - [RFE] logging - Add a support for list value to server_host in the elasticsearch output](https://bugzilla.redhat.com/show_bug.cgi?id=1986460)
### Bug Fixes
- none
[1.6.2] - 2021-07-30
----------------------------
### New Features
- none
### Bug Fixes
- [metrics - role: Grafana dashboard not working after metrics role run unless services manually restarted](https://bugzilla.redhat.com/show_bug.cgi?id=1984150)
[1.6.0] - 2021-07-28
----------------------------
### New Features
- [storage - [RFE] storage: support volume sizes as a percentage of pool](https://bugzilla.redhat.com/show_bug.cgi?id=1984583)
### Bug Fixes
- none
[1.5.0] - 2021-07-15
----------------------------
### New Features
- [ha_cluster - RFE: ha_cluster - add pacemaker cluster properties configuration](https://bugzilla.redhat.com/show_bug.cgi?id=1982913)
### Bug Fixes
- none
[1.4.3] - 2021-07-15
----------------------------
### New Features
- [crypto_policies - rename 'policy modules' to 'subpolicies'](https://bugzilla.redhat.com/show_bug.cgi?id=1982896)
### Bug Fixes
- none
[1.4.2] - 2021-07-15
----------------------------
### New Features
- [storage - storage: relabel doesn't support](https://bugzilla.redhat.com/show_bug.cgi?id=1876315)
### Bug Fixes
- none
[1.4.1] - 2021-07-09
----------------------------
### New Features
- none
### Bug Fixes
- [network - Re-running the network system role results in "changed: true" when nothing has actually changed](https://bugzilla.redhat.com/show_bug.cgi?id=1943384)
[1.4.0] - 2021-07-08
----------------------------
### New Features
- [storage - RFE: Request that VDO be added to the Ansible (redhat-system-roles)](https://bugzilla.redhat.com/show_bug.cgi?id=1882475)
### Bug Fixes
- none
[1.3.0] - 2021-06-23
----------------------------
### New Features
- [ha_cluster - RFE: add pacemaker resources configuration](https://bugzilla.redhat.com/show_bug.cgi?id=1963283)
- [network - [Network] RFE: Support ignoring default gateway retrieved by DHCP/IPv6-RA](https://bugzilla.redhat.com/show_bug.cgi?id=1897565)
- [storage - RFE: Request that VDO be added to the Ansible (redhat-system-roles)](https://bugzilla.redhat.com/show_bug.cgi?id=1882475)
- [sshd - RFE: sshd - support for appending a snippet to configuration file](https://bugzilla.redhat.com/show_bug.cgi?id=1970642)
- [timesync - RFE: timesync support for Network Time Security (NTS)](https://bugzilla.redhat.com/show_bug.cgi?id=1970664)
### Bug Fixes
- [postfix - Postfix RHEL system role README.md missing variables under the "Role Variables" section](https://bugzilla.redhat.com/show_bug.cgi?id=1961858)
- [postfix - the postfix role is not idempotent](https://bugzilla.redhat.com/show_bug.cgi?id=1960375)
- [selinux - task for semanage says Fedora in name but also runs on RHEL/CentOS 8](https://bugzilla.redhat.com/show_bug.cgi?id=1966681)
- [metrics - role task to enable logging for targeted hosts not working](https://bugzilla.redhat.com/show_bug.cgi?id=1967335)
- [sshd ssh - Unable to set sshd_hostkey_group and sshd_hostkey_mode](https://bugzilla.redhat.com/show_bug.cgi?id=1966711)
[1.2.3] - 2021-06-17
----------------------------
### New Features
- [main.yml: Add EL 9 support for all roles](https://bugzilla.redhat.com/show_bug.cgi?id=1952887)
### Bug Fixes
- none
[1.2.2] - 2021-06-15
----------------------------
### New Features
- [timesync - Add hybrid_e2e option to PTP domain](https://bugzilla.redhat.com/show_bug.cgi?id=1957849)
### Bug Fixes
- [Internal links in README.md are broken](https://bugzilla.redhat.com/show_bug.cgi?id=1962976)
- [ha_cluster - cannot read preshared key in binary format](https://bugzilla.redhat.com/show_bug.cgi?id=1952620)
[1.2.1] - 2021-05-21
----------------------------
### New Features
- none
### Bug Fixes
- [Internal links in README.md are broken](https://bugzilla.redhat.com/show_bug.cgi?id=1962976)
[1.2.0] - 2021-05-17
----------------------------
### New Features
- [network - role: Support ethtool -G|--set-ring options](https://bugzilla.redhat.com/show_bug.cgi?id=1959649)
### Bug Fixes
- [postfix - postfix: Use FQRN in README](https://bugzilla.redhat.com/show_bug.cgi?id=1958963)
- [postfix - Documentation error in rhel-system-roles postfix readme file](https://bugzilla.redhat.com/show_bug.cgi?id=1866544)
- [storage - storage: calltrace observed when set type: partition for storage_pools](https://bugzilla.redhat.com/show_bug.cgi?id=1854187)
[1.1.0] - 2021-05-13
----------------------------
### New Features
- [timesync - [RFE] support for free form configuration for chrony](https://bugzilla.redhat.com/show_bug.cgi?id=1938023)
- [timesync - [RFE] support for timesync_max_distance to configure maxdistance/maxdist parameter](https://bugzilla.redhat.com/show_bug.cgi?id=1938016)
- [timesync - [RFE] support for ntp xleave, filter, and hw timestamping](https://bugzilla.redhat.com/show_bug.cgi?id=1938020)
- [selinux - [RFE] Ability to install custom SELinux module via Ansible](https://bugzilla.redhat.com/show_bug.cgi?id=1848683)
- [network - support for ipv6_disabled to disable ipv6 for address](https://bugzilla.redhat.com/show_bug.cgi?id=1939711)
- [vpn - [RFE] Release Ansible role for vpn in rhel-system-roles](https://bugzilla.redhat.com/show_bug.cgi?id=1943679)
### Bug Fixes
- [Bug fixes for Collection/Automation Hub](https://bugzilla.redhat.com/show_bug.cgi?id=1954747)
- [timesync - do not use ignore_errors in timesync role](https://bugzilla.redhat.com/show_bug.cgi?id=1938014)
- [selinux - rhel-system-roles should not reload the SELinux policy if its not changed](https://bugzilla.redhat.com/show_bug.cgi?id=1757869)
[1.0.0] - 2021-02-23
----------------------------
### New Features
- [network - RFE: [network] Support of DNS with options](https://bugzilla.redhat.com/show_bug.cgi?id=1893959)
- [network - RFE: [network] Embrace Inclusive language](https://bugzilla.redhat.com/show_bug.cgi?id=1893957)
- [ssh - [8.4] [RFE] Release Ansible role for ssh client in rhel-system-roles](https://bugzilla.redhat.com/show_bug.cgi?id=1893712)
- [clusterha - [8.4] [RFE] Release Ansible role for cluster HA in rhel-system-roles](https://bugzilla.redhat.com/show_bug.cgi?id=1893743)
- [logging - Logging - Support RELP secure transport in the logging role configuration](https://bugzilla.redhat.com/show_bug.cgi?id=1889484)
- [metrics - [8.4] [RFE] add exporting-metric-data-to-elasticsearch functionality in the metrics role](https://bugzilla.redhat.com/show_bug.cgi?id=1895188)
- [metrics - release SQL server configuration support in the metrics role](https://bugzilla.redhat.com/show_bug.cgi?id=1893908)
- [[8.4] Package rhel-system-roles in the collection format in addition to the legacy role format](https://bugzilla.redhat.com/show_bug.cgi?id=1893906)
### Bug Fixes
- [logging - Logging - Integrating ELK with RHV-4.4 fails as RHVH is missing 'rsyslog-gnutls' package.](https://bugzilla.redhat.com/show_bug.cgi?id=1927943)
- [storage - storage: omitted parameters on existing pool/volume is interpreted as "use the default"](https://bugzilla.redhat.com/show_bug.cgi?id=1894651)
- [storage - storage: must list disks in order to identify an existing pool](https://bugzilla.redhat.com/show_bug.cgi?id=1894676)
- [storage - storage: pool metadata usage must be accounted for by the user](https://bugzilla.redhat.com/show_bug.cgi?id=1894647)
- [selinux - Merged fix incorrect default value (there is no variable named "present")](https://bugzilla.redhat.com/show_bug.cgi?id=1926947)
- [storage - storage: tests_luks.yml partition case failed with nvme disk](https://bugzilla.redhat.com/show_bug.cgi?id=1865990)
[1.0] - 2021-01-15
----------------------------
### New Features
- [tlog - Add exclude_users and exclude_groups support](https://bugzilla.redhat.com/show_bug.cgi?id=1895472)
- [crypto_policies - [8.4] [RFE] Release Ansible role for crypto policies in rhel-system-roles](https://bugzilla.redhat.com/show_bug.cgi?id=1893699)
- [sshd - [8.4] [RFE] Release Ansible role for sshd in rhel-system-roles](https://bugzilla.redhat.com/show_bug.cgi?id=1893696)
- [metrics - role should automate the setup of Grafana datasources](https://bugzilla.redhat.com/show_bug.cgi?id=1855544)
- [network role: Support -K|--features|--offload ethtool options](https://bugzilla.redhat.com/show_bug.cgi?id=1696703)
- [network role: Atomic changes](https://bugzilla.redhat.com/show_bug.cgi?id=1695161)
### Bug Fixes
- [storage - safe mode of storage role does not prevent accidentally losing data when toggling encryption on a volume, disk or pool](https://bugzilla.redhat.com/show_bug.cgi?id=1881524)
- [storage - storage: ext2/3/4 resize function doesn't work](https://bugzilla.redhat.com/show_bug.cgi?id=1862867)
- [logging - [logging role] cannot setup machine with tls](https://bugzilla.redhat.com/show_bug.cgi?id=1861318)
- [certificate - role: The role is not idempotent in rhel7](https://bugzilla.redhat.com/show_bug.cgi?id=1859547)
- [logging - Logging - Bug fixes](https://bugzilla.redhat.com/show_bug.cgi?id=1854546)
- [logging - [logging role] support scenario for client without key/cert, just CA cert](https://bugzilla.redhat.com/show_bug.cgi?id=1860896)
- [metrics - role incorrectly sets up multiple primary pmie processes in multi-host mode](https://bugzilla.redhat.com/show_bug.cgi?id=1855539)
- [certificate - role cannot manage EL7 hosts](https://bugzilla.redhat.com/show_bug.cgi?id=1848745)
- [network - [network] Support state:down persistent_state:absent for non-existent profile](https://bugzilla.redhat.com/show_bug.cgi?id=1822777)
- [network - Creating active bonded interface fails with the initscripts provider](https://bugzilla.redhat.com/show_bug.cgi?id=1848472)
- [logging - Logging role had performance issues](https://bugzilla.redhat.com/show_bug.cgi?id=1848762)
- [certificate - role does not work on controller hosts which use jinja2 2.10](https://bugzilla.redhat.com/show_bug.cgi?id=1848742)
- [nbde_client - fix idempotency, check_mode issues with nbde_client role](https://bugzilla.redhat.com/show_bug.cgi?id=1848766)
- [storage - Storage role can remove existing filesystems and volume groups without warning](https://bugzilla.redhat.com/show_bug.cgi?id=1763242)
- [network role: Minimize service disruption](https://bugzilla.redhat.com/show_bug.cgi?id=1695157)
- [typo in selinux/tests/tests_selinux_disabled.yml: Invalid options for assert: mgs](https://bugzilla.redhat.com/show_bug.cgi?id=1677743)
- [Check mode problems in rhel-system-roles](https://bugzilla.redhat.com/show_bug.cgi?id=1685904)
[0.6] - 2018-05-11
----------------------------
### New Features
- [RFE: Ansible rhel-system-roles.network: add ETHTOOL_OPTS, LINKDELAY, IPV4_FAILURE_FATAL](https://bugzilla.redhat.com/show_bug.cgi?id=1478576)
### Bug Fixes
- none

View File

@ -1 +0,0 @@
See docs/CHANGELOG.md

View File

@ -1,21 +0,0 @@
# Helper macros originally from macros.ansible by Igor Raits <ignatenkobrain>
# This file is for maintaining the compatibility with macros and other
# functionality (generators) provided by ansible-packaging on Fedora.
Provides: ansible-collection(%{collection_namespace}.%{collection_name}) = %{collection_version}
# ansible-galaxy is available by ansible-core on RHEL 8.6 and newer at buildtime.
%define ansible_collection_build() ansible-galaxy collection build
%define ansible_collection_install() ansible-galaxy collection install -n -p %{buildroot}%{_datadir}/ansible/collections %{collection_namespace}-%{collection_name}-%{version}.tar.gz
%define ansible_roles_dir %{_datadir}/ansible/roles
%define ansible_collections_dir %{_datadir}/ansible/collections/ansible_collections
# TODO: Officially deprecate this macro and add the following line to the macro
# def after the new approach has gotten more testing and adoption:
# %%{warn: %%{ansible_collection_files} is deprecated. Use %%files -f %%{ansible_collection_filelist} instead.}
%define ansible_collection_files %{shrink:
%{ansible_collections_dir}/%{collection_namespace}/
}
%define ansible_collection_filelist %{__ansible_builddir}/ansible_collection_files

View File

@ -1,14 +0,0 @@
#!/bin/bash
set -euxo pipefail
readme_md=${1:-"lsr_role2collection/collection_readme.md"}
sed -i -e '/## Currently supported distributions/{:1;/## Dependencies/!{N;b 1};s|.*|## Dependencies|}' \
-e 's/Linux/Red Hat Enterprise Linux/g' \
-e 's/Ansible Galaxy/Automation Hub/g' \
-e 's/fedora\(.\)linux_system_roles/redhat\1rhel_system_roles/g' \
-e 's/linux-system-roles/rhel-system-roles/g' \
-e '/## Documentation/{:a;/## Support/!{N;b a};s|.*|## Documentation\nThe official RHEL System Roles documentation can be found in the [Product Documentation section of the Red Hat Customer Portal](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/administration_and_configuration_tasks_using_system_roles_in_rhel/index).\n\n## Support|}' \
-e 's/ $//' \
$readme_md

View File

@ -1,10 +0,0 @@
Source801: https://galaxy.ansible.com/download/ansible-posix-1.5.4.tar.gz
Source901: https://galaxy.ansible.com/download/community-general-8.3.0.tar.gz
Source902: https://galaxy.ansible.com/download/containers-podman-1.15.4.tar.gz
Provides: bundled(ansible-collection(ansible.posix)) = 1.5.4
Provides: bundled(ansible-collection(community.general)) = 8.3.0
Provides: bundled(ansible-collection(containers.podman)) = 1.15.4
Source996: CHANGELOG.rst
Source998: collection_readme.sh

View File

@ -0,0 +1,13 @@
diff --git a/meta/main.yml b/meta/main.yml
index 2478fa6..ad8f4c6 100644
--- a/meta/main.yml
+++ b/meta/main.yml
@@ -7,6 +7,6 @@ galaxy_info:
min_ansible_version: 2.4
platforms:
- name: Fedora
- versions: [ 27, 28 ]
+ versions: [ 31, 32 ]
- name: EL
- versions: [ 6, 7 ]
+ versions: [ 6, 7, 8 ]

View File

@ -0,0 +1,142 @@
diff --git a/tests/commonvars.yml b/tests/commonvars.yml
new file mode 100644
index 0000000..2cd3566
--- /dev/null
+++ b/tests/commonvars.yml
@@ -0,0 +1,2 @@
+restore_services:
+ - kdump
diff --git a/tests/get_services_state.yml b/tests/get_services_state.yml
new file mode 100644
index 0000000..4fe5d36
--- /dev/null
+++ b/tests/get_services_state.yml
@@ -0,0 +1,4 @@
+- name: Get initial state of services
+ tags: tests::cleanup
+ service_facts:
+ register: initial_state
diff --git a/tests/restore_services_state.yml b/tests/restore_services_state.yml
new file mode 100644
index 0000000..2035dfc
--- /dev/null
+++ b/tests/restore_services_state.yml
@@ -0,0 +1,22 @@
+- block:
+ - name: load common vars
+ include_vars:
+ file: commonvars.yml
+
+ - name: Get final state of services
+ service_facts:
+ register: final_state
+
+ - name: Restore state of services
+ service:
+ name: "{{ item }}"
+ state: "{{ 'started' if
+ initial_state.ansible_facts.services[sname]['state']
+ == 'running' else 'stopped' }}"
+ when:
+ - sname in final_state.ansible_facts.services
+ - sname in initial_state.ansible_facts.services
+ vars:
+ sname: "{{ item + '.service' }}"
+ with_items: "{{ restore_services }}"
+ tags: tests::cleanup
diff --git a/tests/tests_default.yml b/tests/tests_default.yml
index 4c93830..9e7743a 100644
--- a/tests/tests_default.yml
+++ b/tests/tests_default.yml
@@ -4,3 +4,13 @@
roles:
- kdump
+
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_default_wrapper.yml b/tests/tests_default_wrapper.yml
index 2763fbd..95b3886 100644
--- a/tests/tests_default_wrapper.yml
+++ b/tests/tests_default_wrapper.yml
@@ -1,6 +1,9 @@
---
- name: Create static inventory from hostvars
hosts: all
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
tasks:
- name: create temporary file
tempfile:
@@ -17,10 +20,16 @@
- name: Run tests_default.yml normally
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
import_playbook: tests_default.yml
- name: Run tests_default.yml in check_mode
hosts: all
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
tasks:
- name: Run ansible-playbook with tests_default.yml in check mode
command: ansible-playbook -vvv -i {{ tempinventory.path }} --check tests_default.yml
diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml
index 14a59d9..23bc7eb 100644
--- a/tests/tests_ssh.yml
+++ b/tests/tests_ssh.yml
@@ -11,6 +11,13 @@
# this is the address at which the ssh dump server can be reached
# from the managed host. Dumps will be uploaded there.
kdump_ssh_server_inside: "{{ kdump_ssh_source if kdump_ssh_source in hostvars[kdump_ssh_server_outside]['ansible_all_ipv4_addresses'] + hostvars[kdump_ssh_server_outside]['ansible_all_ipv6_addresses'] else hostvars[kdump_ssh_server_outside]['ansible_default_ipv4']['address'] }}"
+ tags:
+ # this test executes some tasks on localhost and relies on
+ # localhost being a different host than the managed host
+ # (localhost is being used as a second host in multihost
+ # scenario). This also means that localhost must be capable
+ # enough (not just a container - must be runnign a sshd).
+ - 'tests::multihost_localhost'
tasks:
- name: gather facts from {{ kdump_ssh_server_outside }}
diff --git a/tests/tests_ssh_wrapper.yml b/tests/tests_ssh_wrapper.yml
index 9a8ecfd..1a6db73 100644
--- a/tests/tests_ssh_wrapper.yml
+++ b/tests/tests_ssh_wrapper.yml
@@ -1,6 +1,8 @@
---
- name: Create static inventory from hostvars
hosts: all
+ tags:
+ - 'tests::slow'
tasks:
- name: create temporary file
tempfile:
@@ -17,10 +19,15 @@
- name: Run tests_ssh.yml normally
+ tags:
+ - 'tests::slow'
import_playbook: tests_ssh.yml
- name: Run tests_ssh.yml in check_mode
hosts: all
+ tags:
+ - 'tests::slow'
+ - 'tests::multihost_localhost'
tasks:
- name: Run ansible-playbook with tests_ssh.yml in check mode
command: ansible-playbook -vvv -i {{ tempinventory.path }} --check tests_ssh.yml

View File

@ -0,0 +1,136 @@
From 90952a1bb7ddbba45ed8cbd62e6a8e0edb6f6148 Mon Sep 17 00:00:00 2001
From: Noriko Hosoi <nhosoi@redhat.com>
Date: Tue, 25 Aug 2020 09:05:03 -0700
Subject: [PATCH 1/7] Test playbooks enhancement
In the code to check the log message is successfully logged or not
in the /var/log/messages file, adding "until: __result is success"
and waiting up to 5 seconds.
---
tests/tests_basics_files.yml | 4 ++++
tests/tests_basics_files2.yml | 4 ++++
tests/tests_basics_files_forwards.yml | 4 ++++
tests/tests_basics_files_log_dir.yml | 4 ++++
tests/tests_basics_forwards_implicit_files.yml | 4 ++++
tests/tests_combination.yml | 4 ++++
tests/tests_combination2.yml | 4 ++++
tests/tests_imuxsock_files.yml | 4 ++++
8 files changed, 32 insertions(+)
diff --git a/tests/tests_basics_files.yml b/tests/tests_basics_files.yml
index 080890f..87950d8 100644
--- a/tests/tests_basics_files.yml
+++ b/tests/tests_basics_files.yml
@@ -74,4 +74,8 @@
- name: Check the test log message in {{ __default_system_log }}
command: /bin/grep testMessage0000 {{ __default_system_log }}
+ register: __result
+ until: __result is success
+ retries: 5
+ delay: 1
changed_when: false
diff --git a/tests/tests_basics_files2.yml b/tests/tests_basics_files2.yml
index ae61be2..094b125 100644
--- a/tests/tests_basics_files2.yml
+++ b/tests/tests_basics_files2.yml
@@ -99,4 +99,8 @@
- name: Check the test log message in {{ __default_system_log }}
command: /bin/grep testMessage0000 "{{ __default_system_log }}"
+ register: __result
+ until: __result is success
+ retries: 5
+ delay: 1
changed_when: false
diff --git a/tests/tests_basics_files_forwards.yml b/tests/tests_basics_files_forwards.yml
index f43b8eb..d08a207 100644
--- a/tests/tests_basics_files_forwards.yml
+++ b/tests/tests_basics_files_forwards.yml
@@ -105,6 +105,10 @@
- name: Check the test log message in {{ __default_system_log }}
command: /bin/grep testMessage0000 '{{ __default_system_log }}'
+ register: __result
+ until: __result is success
+ retries: 5
+ delay: 1
changed_when: false
- name: Check if the forwarding config exists
diff --git a/tests/tests_basics_files_log_dir.yml b/tests/tests_basics_files_log_dir.yml
index ca900b8..f5ca266 100644
--- a/tests/tests_basics_files_log_dir.yml
+++ b/tests/tests_basics_files_log_dir.yml
@@ -78,6 +78,10 @@
- name: Check the files output config that the path is {{ logging_system_log_dir }}/messages
command: /bin/grep '\*.info;mail.none;authpriv.none;cron.none.*{{ logging_system_log_dir }}/messages' {{ __test_files_conf }}
+ register: __result
+ until: __result is success
+ retries: 5
+ delay: 1
changed_when: false
- name: Check the test log message in {{ logging_system_log_dir }}/messages
diff --git a/tests/tests_basics_forwards_implicit_files.yml b/tests/tests_basics_forwards_implicit_files.yml
index 6744d53..1d23911 100644
--- a/tests/tests_basics_forwards_implicit_files.yml
+++ b/tests/tests_basics_forwards_implicit_files.yml
@@ -92,6 +92,10 @@
- name: Check if the test message is in {{ __default_system_log }}
command: /bin/grep testMessage0000 '{{ __default_system_log }}'
+ register: __result
+ until: __result is success
+ retries: 5
+ delay: 1
changed_when: false
- name: Get the forwarding config stat
diff --git a/tests/tests_combination.yml b/tests/tests_combination.yml
index 99d57dc..8aae855 100644
--- a/tests/tests_combination.yml
+++ b/tests/tests_combination.yml
@@ -129,6 +129,10 @@
- name: Check the test log message in {{ __default_system_log }}
command: /bin/grep testMessage0000 '{{ __default_system_log }}'
+ register: __result
+ until: __result is success
+ retries: 5
+ delay: 1
changed_when: false
- name: Generated a file to check severity_and_facility
diff --git a/tests/tests_combination2.yml b/tests/tests_combination2.yml
index 5d49a57..5fe43cb 100644
--- a/tests/tests_combination2.yml
+++ b/tests/tests_combination2.yml
@@ -138,6 +138,10 @@
- name: Check the test log message in {{ __default_system_log }}
command: /bin/grep testMessage0000 '{{ __default_system_log }}'
+ register: __result
+ until: __result is success
+ retries: 5
+ delay: 1
changed_when: false
- name: Check the forwarding config stat
diff --git a/tests/tests_imuxsock_files.yml b/tests/tests_imuxsock_files.yml
index 2d6840d..35db253 100644
--- a/tests/tests_imuxsock_files.yml
+++ b/tests/tests_imuxsock_files.yml
@@ -76,4 +76,8 @@
- name: Check the test log message in {{ __default_system_log }}
command: /bin/grep testMessage0000 "{{ __default_system_log }}"
+ register: __result
+ until: __result is success
+ retries: 5
+ delay: 1
changed_when: false
--
2.26.2

View File

@ -0,0 +1,81 @@
From e7f255a64a1ffe83b06e93c944c73b8079f1db3a Mon Sep 17 00:00:00 2001
From: Noriko Hosoi <nhosoi@redhat.com>
Date: Thu, 10 Sep 2020 17:15:32 -0700
Subject: [PATCH 2/7] Fixing a logic bug in elasticsearch output template.
When evaluated, the retryfailures value was denied by "not", which
should not have been. Removing the "not" and adding a test case to
tests_files_elasticsearch_use_local_cert.yml.
(cherry picked from commit 108f06926f7bec929fdfc24ce2fbcfe195078ae2)
---
roles/rsyslog/templates/output_elasticsearch.j2 | 2 +-
.../tests_files_elasticsearch_use_local_cert.yml | 16 +++++++++++++---
2 files changed, 14 insertions(+), 4 deletions(-)
diff --git a/roles/rsyslog/templates/output_elasticsearch.j2 b/roles/rsyslog/templates/output_elasticsearch.j2
index c3cd1df..c4db10f 100644
--- a/roles/rsyslog/templates/output_elasticsearch.j2
+++ b/roles/rsyslog/templates/output_elasticsearch.j2
@@ -44,7 +44,7 @@ ruleset(name="{{ item.name }}") {
bulkid="{{ item.bulkid | d("id_template") }}"
dynbulkid="{{ item.dynbulkid | d('on') }}"
allowUnsignedCerts="{{ item.allowUnsignedCerts | d("off") }}"
-{% if not item.retryfailures | d(true) %}
+{% if item.retryfailures | d(true) %}
{% if item.retryruleset | d() | length > 0 %}
retryfailures="on"
retryruleset="{{ item.retryruleset }}"
diff --git a/tests/tests_files_elasticsearch_use_local_cert.yml b/tests/tests_files_elasticsearch_use_local_cert.yml
index 2559ce7..8b1eaa4 100644
--- a/tests/tests_files_elasticsearch_use_local_cert.yml
+++ b/tests/tests_files_elasticsearch_use_local_cert.yml
@@ -44,6 +44,7 @@
__test_ca_cert: /tmp/es-ca.crt
__test_cert: /tmp/es-cert.pem
__test_key: /tmp/es-key.pem
+ __test_el: elasticsearch_output
tasks:
- name: Generate fake key/certs files.
@@ -60,13 +61,13 @@
- name: deploy config to send to elasticsearch
vars:
logging_outputs:
- - name: elasticsearch_output
+ - name: "{{ __test_el }}"
type: elasticsearch
server_host: logging-es
server_port: 9200
index_prefix: project.
input_type: ovirt
- retryfailures: false
+ retryfailures: on
ca_cert_src: "{{ __test_ca_cert }}"
cert_src: "{{ __test_cert }}"
private_key_src: "{{ __test_key }}"
@@ -77,7 +78,7 @@
logging_flows:
- name: flow_0
inputs: [files_input]
- outputs: [elasticsearch_output, elasticsearch_output_ops]
+ outputs: "[{{ __test_el }}]"
include_role:
name: linux-system-roles.logging
@@ -119,3 +120,12 @@
- mycert: "{{ __test_cert }}"
- myprivkey: "{{ __test_key }}"
changed_when: false
+
+ - name: Check retryfailures in {{ __test_outputfiles_conf }}
+ command: /bin/grep 'retryfailures="on"' {{ __test_outputfiles_conf }}
+ changed_when: false
+
+ - name: Check retryruleset in {{ __test_outputfiles_conf }}
+ command: /bin/grep 'retryruleset="{{ __test_el }}"' {{ __test_outputfiles_conf }}
+ changed_when: false
+
--
2.26.2

View File

@ -0,0 +1,55 @@
From 76b4418f937fd1dbaa1061fa5f83f11ea046dc40 Mon Sep 17 00:00:00 2001
From: Noriko Hosoi <nhosoi@redhat.com>
Date: Thu, 10 Sep 2020 16:35:43 -0700
Subject: [PATCH 3/7] Adding "Port and SELinux" section to README.
(cherry picked from commit 5f144bc74edbcd80a53a2fe84aa464f7ea9f44ef)
---
README.md | 16 +++++++++++++---
1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index 0eafde8..db29dc5 100644
--- a/README.md
+++ b/README.md
@@ -19,6 +19,7 @@
* [Standalone configuration](#standalone-configuration)
* [Client configuration](#client-configuration)
* [Server configuration](#server-configuration)
+ * [Port and SELinux](#port-and-selinux)
* [Providers](#providers)
* [Tests](#tests)
* [Implementation Details](#implementation-details)
@@ -111,10 +112,10 @@ This is a schematic logging configuration to show log messages from input_nameA
- `ovirt` type - `ovirt` input supports oVirt specific inputs.<br>
For the details, visit [oVirt Support](../../design_docs/rsyslog_ovirt_support.md).
-- `remote` type - `remote` input supports receiving logs from the remote logging system over the network. This input type makes rsyslog a server.<br>
+- `remote` type - `remote` input supports receiving logs from the remote logging system over the network.<br>
**available options**
- - `udp_ports`: List of UDP port numbers to listen. If set, the `remote` input listens on the UDP ports. No defaults. If both `udp_ports` and `tcp_ports` are set in a `remote` input item, `udp_ports` is used and `tcp_ports` is dropped.
- - `tcp_ports`: List of TCP port numbers to listen. If set, the `remote` input listens on the TCP ports. Default to `[514]`. If both `udp_ports` and `tcp_ports` are set in a `remote` input item, `udp_ports` is used and `tcp_ports` is dropped. If both `udp_ports` and `tcp_ports` are not set in a `remote` input item, `tcp_ports: [514]` is added to the item.
+ - `udp_ports`: List of UDP port numbers to listen. If set, the `remote` input listens on the UDP ports. No defaults. If both `udp_ports` and `tcp_ports` are set in a `remote` input item, `udp_ports` is used and `tcp_ports` is dropped. See also [Port and SELinux](#port-and-selinux).
+ - `tcp_ports`: List of TCP port numbers to listen. If set, the `remote` input listens on the TCP ports. Default to `[514]`. If both `udp_ports` and `tcp_ports` are set in a `remote` input item, `udp_ports` is used and `tcp_ports` is dropped. If both `udp_ports` and `tcp_ports` are not set in a `remote` input item, `tcp_ports: [514]` is added to the item. See also [Port and SELinux](#port-and-selinux).
- `tls`: Set to `true` to encrypt the connection using the default TLS implementation used by the provider. Default to `false`.
- `pki_authmode`: Specifying the default network driver authentication mode. `x509/name`, `x509/fingerprint`, `anon` is accepted. Default to `x509/name`.
- `permitted_clients`: List of hostnames, IP addresses, fingerprints(sha1), and wildcard DNS domains which will be allowed by the `logging` server to connect and send logs over TLS. Default to `['*.{{ logging_domain }}']`
@@ -591,6 +592,15 @@ The following playbook generates the same logging configuration files.
outputs: [remote_files_output0, remote_files_output1]
```
+### Port and SELinux
+
+SELinux is only configured to allow sending and receiving on the following ports by default:
+```
+syslogd_port_t tcp 514, 20514
+syslogd_port_t udp 514, 20514
+```
+If other ports need to be configured, you can use [linux-system-roles/selinux](https://github.com/linux-system-roles/selinux) to manage SELinux contexts.
+
## Providers
[Rsyslog](roles/rsyslog) - This documentation contains rsyslog specific information.
--
2.26.2

View File

@ -0,0 +1,31 @@
From 6ef1f1020abb074525724e9060ddada526ad0102 Mon Sep 17 00:00:00 2001
From: Noriko Hosoi <nhosoi@redhat.com>
Date: Tue, 29 Sep 2020 15:50:03 -0700
Subject: [PATCH 4/7] Fixing yamllint errors.
(cherry picked from commit b131f9e26b3fd74d759b237d7b3b26b6732371d2)
---
tests/tests_files_elasticsearch_use_local_cert.yml | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/tests/tests_files_elasticsearch_use_local_cert.yml b/tests/tests_files_elasticsearch_use_local_cert.yml
index 8b1eaa4..90b12af 100644
--- a/tests/tests_files_elasticsearch_use_local_cert.yml
+++ b/tests/tests_files_elasticsearch_use_local_cert.yml
@@ -67,7 +67,7 @@
server_port: 9200
index_prefix: project.
input_type: ovirt
- retryfailures: on
+ retryfailures: true
ca_cert_src: "{{ __test_ca_cert }}"
cert_src: "{{ __test_cert }}"
private_key_src: "{{ __test_key }}"
@@ -128,4 +128,3 @@
- name: Check retryruleset in {{ __test_outputfiles_conf }}
command: /bin/grep 'retryruleset="{{ __test_el }}"' {{ __test_outputfiles_conf }}
changed_when: false
-
--
2.26.2

View File

@ -0,0 +1,324 @@
From b72e8a48be07a1cebce8b2237d7344220678c2ec Mon Sep 17 00:00:00 2001
From: Noriko Hosoi <nhosoi@redhat.com>
Date: Fri, 16 Oct 2020 08:15:11 -0700
Subject: [PATCH 5/7] Logging - support property-based filters in the files and
forwards outputs
Adding property-based filter options to files, forwards and remote_files output.
A test case is added to tests_basics_files2.yml.
In addition, fixing a bug caused by a left over file from the previous tests.
Issue - https://github.com/linux-system-roles/logging/issues/179
(cherry picked from commit 6ac8f9ff680a4b0230446062f5927f5921829f80)
---
README.md | 68 ++++++++++++-------
roles/rsyslog/templates/output_files.j2 | 4 +-
roles/rsyslog/templates/output_forwards.j2 | 4 +-
.../rsyslog/templates/output_remote_files.j2 | 4 +-
tests/tests_basics_files2.yml | 40 +++++++++--
tests/tests_basics_forwards_cert.yml | 8 +++
tests/tests_basics_forwards_cert_missing.yml | 4 ++
tests/tests_server_conflict.yml | 8 +++
8 files changed, 108 insertions(+), 32 deletions(-)
diff --git a/README.md b/README.md
index db29dc5..4352ee7 100644
--- a/README.md
+++ b/README.md
@@ -180,11 +180,16 @@ This is a schematic logging configuration to show log messages from input_nameA
- `files` type - `files` output supports storing logs in the local files usually in /var/log.<br>
**available options**
- - `facility`: Facility; default to `*`.
- - `severity`: Severity; default to `*`.
- - `exclude`: Exclude list; default to none.
+ - `facility`: Facility in selector; default to `*`.
+ - `severity`: Severity in selector; default to `*`.
+ - `exclude`: Exclude list used in selector; default to none.
+ - `property`: Property in property-based filter; no default
+ - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains`
+ - `prop_value`: Value in property-based filter; default to `error`
- `path`: Path to the output file.
+ Selector options and property-based filter options are exclusive. If Property-based filter options are defined, selector options will be ignored.
+
Unless the above options are given, these local file outputs are configured.
```
kern.* /dev/console
@@ -199,8 +204,12 @@ This is a schematic logging configuration to show log messages from input_nameA
- `forwards` type - `forwards` output sends logs to the remote logging system over the network. This is for the client rsyslog.<br>
**available options**
- - `facility`: Facility; default to `*`.
- - `severity`: Severity; default to `*`.
+ - `facility`: Facility in selector; default to `*`.
+ - `severity`: Severity in selector; default to `*`.
+ - `exclude`: Exclude list used in selector; default to none.
+ - `property`: Property in property-based filter; no default
+ - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains`
+ - `prop_value`: Value in property-based filter; default to `error`
- `target`: Target host (fqdn). **Required**.
- `udp_port`: UDP port number. Default to `514`.
- `tcp_port`: TCP port number. Default to `514`.
@@ -208,11 +217,16 @@ This is a schematic logging configuration to show log messages from input_nameA
- `pki_authmode`: Specifying the default network driver authentication mode. `x509/name`, `x509/fingerprint`, `anon` is accepted. Default to `x509/name`.
- `permitted_server`: Hostname, IP address, fingerprint(sha1) or wildcard DNS domain of the server which this client will be allowed to connect and send logs over TLS. Default to `*.{{ logging_domain }}`
+ Selector options and property-based filter options are exclusive. If Property-based filter options are defined, selector options will be ignored.
+
- `remote_files` type - `remote_files` output stores logs to the local files per remote host and program name originated the logs.<br>
**available options**
- - `facility`: Facility; default to `*`.
- - `severity`: Severity; default to `*`.
- - `exclude`: Exclude list; default to none.
+ - `facility`: Facility in selector; default to `*`.
+ - `severity`: Severity in selector; default to `*`.
+ - `exclude`: Exclude list used in selector; default to none.
+ - `property`: Property in property-based filter; no default
+ - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains`
+ - `prop_value`: Value in property-based filter; default to `error`
- `async_writing`: If set to `true`, the files are written asynchronously. Allowed value is `true` or `false`. Default to `false`.
- `client_count`: Count of client logging system supported this rsyslog server. Default to `10`.
- `io_buffer_size`: Buffer size used to write output data. Default to `65536` bytes.
@@ -221,6 +235,8 @@ This is a schematic logging configuration to show log messages from input_nameA
`/path/to/output/dir/%HOSTNAME%/%PROGRAMNAME:::secpath-replace%.log`
- `remote_sub_path`: Relative path to logging_system_log_dir to store the filtered logs.
+ Selector options and property-based filter options are exclusive. If Property-based filter options are defined, selector options will be ignored.
+
if both `remote_log_path` and `remote_sub_path` are _not_ specified, the remote_file output configured with the following settings.
```
template(
@@ -446,32 +462,38 @@ The following playbook generates the same logging configuration files.
outputs: [files_output0, files_output1]
```
-5. Deploying `files input` reading logs from a local file and `elasticsearch output` to store the logs. Assuming the ca_cert, cert and key to connect to Elasticsearch are prepared.
+5. Deploying `files input` reading logs from local files and `files output` to write to the local files based on the property-based filters.
```yaml
---
-- name: Deploying basic input and elasticsearch output
+- name: Deploying files input and configured files output
hosts: all
roles:
- linux-system-roles.logging
vars:
logging_inputs:
- - name: files_input
+ - name: files_input0
type: files
- input_log_path: /var/log/containers/*.log
+ input_log_path: /var/log/containerA/*.log
+ - name: files_input1
+ type: files
+ input_log_path: /var/log/containerB/*.log
logging_outputs:
- - name: elasticsearch_output
- type: elasticsearch
- server_host: your_target_host
- server_port: 9200
- index_prefix: project.
- input_type: ovirt
- ca_cert_src: /local/path/to/ca_cert
- cert_src: /local/path/to/cert
- private_key_src: /local/path/to/key
+ - name: files_output0
+ type: files
+ property: msg
+ prop_op: contains
+ prop_value: error
+ path: /var/log/errors.log
+ - name: files_output1
+ type: files
+ property: msg
+ prop_op: "!contains"
+ prop_value: error
+ path: /var/log/others.log
logging_flows:
- name: flow0
- inputs: [files_input]
- outputs: [elasticsearch_output]
+ inputs: [files_input0, files_input1]
+ outputs: [files_output0, files_output1]
```
### Client configuration
diff --git a/roles/rsyslog/templates/output_files.j2 b/roles/rsyslog/templates/output_files.j2
index d994414..e15e4cd 100644
--- a/roles/rsyslog/templates/output_files.j2
+++ b/roles/rsyslog/templates/output_files.j2
@@ -1,6 +1,8 @@
{% if item.path is defined %}
ruleset(name="{{ item.name }}") {
-{% if item.exclude | d([]) %}
+{% if item.property | d() %}
+ :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" {{ item.path }}
+{% elif item.exclude | d([]) %}
{{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} {{ item.path }}
{% else %}
{{ item.facility | d('*') }}.{{ item.severity | d('*') }} {{ item.path }}
diff --git a/roles/rsyslog/templates/output_forwards.j2 b/roles/rsyslog/templates/output_forwards.j2
index 61254ee..35030b4 100644
--- a/roles/rsyslog/templates/output_forwards.j2
+++ b/roles/rsyslog/templates/output_forwards.j2
@@ -9,7 +9,9 @@
{% set __forwards_protocol = '' %}
{% endif %}
ruleset(name="{{ item.name }}") {
-{% if item.exclude | d([]) %}
+{% if item.property | d() %}
+ :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" action(name="{{ item.name }}"
+{% elif item.exclude | d([]) %}
{{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} action(name="{{ item.name }}"
{% else %}
{{ item.facility | d('*') }}.{{ item.severity | d('*') }} action(name="{{ item.name }}"
diff --git a/roles/rsyslog/templates/output_remote_files.j2 b/roles/rsyslog/templates/output_remote_files.j2
index 3c9339f..aaf547e 100644
--- a/roles/rsyslog/templates/output_remote_files.j2
+++ b/roles/rsyslog/templates/output_remote_files.j2
@@ -17,7 +17,9 @@ ruleset(name="{{ item.name }}"
queue.size="{{ logging_server_queue_size }}"
queue.workerThreads="{{ logging_server_threads }}") {
# Store remote logs in separate logfiles
-{% if item.exclude | d([]) %}
+{% if item.property | d() %}
+ :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}")
+{% elif item.exclude | d([]) %}
{{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}")
{% else %}
{{ item.facility | d('*') }}.{{ item.severity | d('*') }} action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}")
diff --git a/tests/tests_basics_files2.yml b/tests/tests_basics_files2.yml
index 094b125..b1a0f62 100644
--- a/tests/tests_basics_files2.yml
+++ b/tests/tests_basics_files2.yml
@@ -10,9 +10,9 @@
# If logging role is executed, the file size is about 100 bytes.
# Thus, assert the size is less than 1000.
# 2. Check file count in /etc/rsyslog.d.
-# If logging role is executed, 8 config files are generated.
+# If logging role is executed, 9 config files are generated.
# By setting logging_purge_confs, pre-existing config files are deleted.
-# Thus, assert the the count is equal to 8.
+# Thus, assert the the count is equal to 9.
# 3. Check systemctl status of rsyslog as well as error or specific message in the output.
# 4. To verify the generated filename is correct, check the config file of files output exists.
# 4.1 Check the config file contains the expected filter and the output file as configured.
@@ -24,6 +24,8 @@
vars:
__test_files_conf: /etc/rsyslog.d/30-output-files-files_output1.conf
__default_system_log: /var/log/messages
+ __prop_based_log0: /var/log/property_based_filter_in.log
+ __prop_based_log1: /var/log/property_based_filter_out.log
tasks:
- name: deploy config to output into local files
@@ -49,15 +51,23 @@
path: :omusrmsg:*
- name: files_output3
type: files
- facility: local7
- path: /var/log/boot.log
+ property: msg
+ prop_op: contains
+ prop_value: property_based_filter_test
+ path: "{{ __prop_based_log0 }}"
+ - name: files_output4
+ type: files
+ property: msg
+ prop_op: "!contains"
+ prop_value: property_based_filter_test
+ path: "{{ __prop_based_log1 }}"
logging_inputs:
- name: basic_input
type: basics
logging_flows:
- name: flow_0
inputs: [basic_input]
- outputs: [files_output0, files_output1, files_output2, files_output3]
+ outputs: [files_output0, files_output1, files_output2, files_output3, files_output4]
include_role:
name: linux-system-roles.logging
@@ -74,7 +84,7 @@
- name: Check file counts in rsyslog.d
assert:
- that: rsyslog_d_file_count.matched == 8
+ that: rsyslog_d_file_count.matched == 9
# Checking 'error' in stdout from systemctl status is for detecting the case in which rsyslog is running,
# but some functionality is disabled due to some error, e.g., error: 'tls.cacert' file couldn't be accessed.
@@ -104,3 +114,21 @@
retries: 5
delay: 1
changed_when: false
+
+ - name: Run logger to generate a test log message containing property_based_filter_test
+ command: /bin/logger -i -p local6.info -t testTag1 property_based_filter_test
+ changed_when: false
+
+ - name: Check the test log message in {{ __prop_based_log0 }}
+ command: /bin/grep property_based_filter_test "{{ __prop_based_log0 }}"
+ register: __result
+ until: __result is success
+ retries: 5
+ delay: 1
+ changed_when: false
+
+ - name: Check the test log message not in {{ __prop_based_log1 }}
+ command: /bin/grep property_based_filter_test "{{ __prop_based_log1 }}"
+ register: __result
+ changed_when: false
+ failed_when: "__result is not failed"
diff --git a/tests/tests_basics_forwards_cert.yml b/tests/tests_basics_forwards_cert.yml
index e27e016..48263ae 100644
--- a/tests/tests_basics_forwards_cert.yml
+++ b/tests/tests_basics_forwards_cert.yml
@@ -139,3 +139,11 @@
- /etc/pki/tls/certs/{{ __test_ca_cert_name }}
- /etc/pki/tls/certs/{{ __test_cert_name }}
- /etc/pki/tls/private/{{ __test_key_name }}
+
+ - name: clean up test files
+ file: path="{{ item }}" state=absent
+ loop:
+ - "{{ __test_ca_cert }}"
+ - "{{ __test_cert }}"
+ - "{{ __test_key }}"
+ delegate_to: localhost
diff --git a/tests/tests_basics_forwards_cert_missing.yml b/tests/tests_basics_forwards_cert_missing.yml
index 3e82856..0ad0569 100644
--- a/tests/tests_basics_forwards_cert_missing.yml
+++ b/tests/tests_basics_forwards_cert_missing.yml
@@ -63,6 +63,10 @@
assert:
that: "'{{ ansible_failed_result.results.0.msg }}' is match('{{ __expected_error }}')"
+ - name: clean up test files
+ file: path="{{ __test_key }}" state=absent
+ delegate_to: localhost
+
- name: default run for cleanup
vars:
logging_inputs:
diff --git a/tests/tests_server_conflict.yml b/tests/tests_server_conflict.yml
index 36eeeb7..8c182f6 100644
--- a/tests/tests_server_conflict.yml
+++ b/tests/tests_server_conflict.yml
@@ -76,3 +76,11 @@
- assert:
that: item.msg is not defined or item.msg is defined and item.msg == __expected_error
loop: "{{ ansible_failed_result.results }}"
+
+ - name: clean up test files
+ file: path="{{ item }}" state=absent
+ loop:
+ - "{{ __test_ca_cert }}"
+ - "{{ __test_cert }}"
+ - "{{ __test_key }}"
+ delegate_to: localhost
--
2.26.2

View File

@ -0,0 +1,136 @@
From ca2baffbfc14fba077c7c70d849c02b9c69c9e1f Mon Sep 17 00:00:00 2001
From: Noriko Hosoi <nhosoi@redhat.com>
Date: Fri, 16 Oct 2020 11:08:00 -0700
Subject: [PATCH 6/7] Replacing prop_op with property_op and prop_value with
property_value.
(cherry picked from commit 1c951e6acef886548029151dbca9d002f20ef425)
---
README.md | 20 +++++++++----------
roles/rsyslog/templates/output_files.j2 | 2 +-
roles/rsyslog/templates/output_forwards.j2 | 2 +-
.../rsyslog/templates/output_remote_files.j2 | 2 +-
tests/tests_basics_files2.yml | 8 ++++----
5 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/README.md b/README.md
index 4352ee7..d94ec04 100644
--- a/README.md
+++ b/README.md
@@ -184,8 +184,8 @@ This is a schematic logging configuration to show log messages from input_nameA
- `severity`: Severity in selector; default to `*`.
- `exclude`: Exclude list used in selector; default to none.
- `property`: Property in property-based filter; no default
- - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains`
- - `prop_value`: Value in property-based filter; default to `error`
+ - `property_op`: Operation in property-based filter; In case of not `!`, put the `property_op` value in quotes; default to `contains`
+ - `property_value`: Value in property-based filter; default to `error`
- `path`: Path to the output file.
Selector options and property-based filter options are exclusive. If Property-based filter options are defined, selector options will be ignored.
@@ -208,8 +208,8 @@ This is a schematic logging configuration to show log messages from input_nameA
- `severity`: Severity in selector; default to `*`.
- `exclude`: Exclude list used in selector; default to none.
- `property`: Property in property-based filter; no default
- - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains`
- - `prop_value`: Value in property-based filter; default to `error`
+ - `property_op`: Operation in property-based filter; In case of not `!`, put the `property_op` value in quotes; default to `contains`
+ - `property_value`: Value in property-based filter; default to `error`
- `target`: Target host (fqdn). **Required**.
- `udp_port`: UDP port number. Default to `514`.
- `tcp_port`: TCP port number. Default to `514`.
@@ -225,8 +225,8 @@ This is a schematic logging configuration to show log messages from input_nameA
- `severity`: Severity in selector; default to `*`.
- `exclude`: Exclude list used in selector; default to none.
- `property`: Property in property-based filter; no default
- - `prop_op`: Operation in property-based filter; In case of not `!`, put the `prop_op` value in quotes; default to `contains`
- - `prop_value`: Value in property-based filter; default to `error`
+ - `property_op`: Operation in property-based filter; In case of not `!`, put the `property_op` value in quotes; default to `contains`
+ - `property_value`: Value in property-based filter; default to `error`
- `async_writing`: If set to `true`, the files are written asynchronously. Allowed value is `true` or `false`. Default to `false`.
- `client_count`: Count of client logging system supported this rsyslog server. Default to `10`.
- `io_buffer_size`: Buffer size used to write output data. Default to `65536` bytes.
@@ -481,14 +481,14 @@ The following playbook generates the same logging configuration files.
- name: files_output0
type: files
property: msg
- prop_op: contains
- prop_value: error
+ property_op: contains
+ property_value: error
path: /var/log/errors.log
- name: files_output1
type: files
property: msg
- prop_op: "!contains"
- prop_value: error
+ property_op: "!contains"
+ property_value: error
path: /var/log/others.log
logging_flows:
- name: flow0
diff --git a/roles/rsyslog/templates/output_files.j2 b/roles/rsyslog/templates/output_files.j2
index e15e4cd..40f5b90 100644
--- a/roles/rsyslog/templates/output_files.j2
+++ b/roles/rsyslog/templates/output_files.j2
@@ -1,7 +1,7 @@
{% if item.path is defined %}
ruleset(name="{{ item.name }}") {
{% if item.property | d() %}
- :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" {{ item.path }}
+ :{{ item.property }}, {{ item.property_op | d('contains') }}, "{{ item.property_value | d('error') }}" {{ item.path }}
{% elif item.exclude | d([]) %}
{{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} {{ item.path }}
{% else %}
diff --git a/roles/rsyslog/templates/output_forwards.j2 b/roles/rsyslog/templates/output_forwards.j2
index 35030b4..87d7a09 100644
--- a/roles/rsyslog/templates/output_forwards.j2
+++ b/roles/rsyslog/templates/output_forwards.j2
@@ -10,7 +10,7 @@
{% endif %}
ruleset(name="{{ item.name }}") {
{% if item.property | d() %}
- :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" action(name="{{ item.name }}"
+ :{{ item.property }}, {{ item.property_op | d('contains') }}, "{{ item.property_value | d('error') }}" action(name="{{ item.name }}"
{% elif item.exclude | d([]) %}
{{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} action(name="{{ item.name }}"
{% else %}
diff --git a/roles/rsyslog/templates/output_remote_files.j2 b/roles/rsyslog/templates/output_remote_files.j2
index aaf547e..84317f2 100644
--- a/roles/rsyslog/templates/output_remote_files.j2
+++ b/roles/rsyslog/templates/output_remote_files.j2
@@ -18,7 +18,7 @@ ruleset(name="{{ item.name }}"
queue.workerThreads="{{ logging_server_threads }}") {
# Store remote logs in separate logfiles
{% if item.property | d() %}
- :{{ item.property }}, {{ item.prop_op | d('contains') }}, "{{ item.prop_value | d('error') }}" action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}")
+ :{{ item.property }}, {{ item.property_op | d('contains') }}, "{{ item.property_value | d('error') }}" action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}")
{% elif item.exclude | d([]) %}
{{ item.facility | d('*') }}.{{ item.severity | d('*') }};{{ item.exclude | join(';') }} action(name="{{ item.name }}" type="omfile" DynaFile="{{ item.name }}_template" DynaFileCacheSize="{{ item.client_count | d(10) }}" ioBufferSize="{{ item.io_buffer_size | d('65536') }}" asyncWriting="{{ 'on' if item.async_writing | d(false) | bool else 'off' }}")
{% else %}
diff --git a/tests/tests_basics_files2.yml b/tests/tests_basics_files2.yml
index b1a0f62..9f69ed5 100644
--- a/tests/tests_basics_files2.yml
+++ b/tests/tests_basics_files2.yml
@@ -52,14 +52,14 @@
- name: files_output3
type: files
property: msg
- prop_op: contains
- prop_value: property_based_filter_test
+ property_op: contains
+ property_value: property_based_filter_test
path: "{{ __prop_based_log0 }}"
- name: files_output4
type: files
property: msg
- prop_op: "!contains"
- prop_value: property_based_filter_test
+ property_op: "!contains"
+ property_value: property_based_filter_test
path: "{{ __prop_based_log1 }}"
logging_inputs:
- name: basic_input
--
2.26.2

View File

@ -0,0 +1,114 @@
From 3967a2b0e7e61dfb6317296a4cf15d0fe91a1638 Mon Sep 17 00:00:00 2001
From: Noriko Hosoi <nhosoi@redhat.com>
Date: Thu, 15 Oct 2020 10:52:29 -0700
Subject: [PATCH 7/7] RHELPLAN-56807 - Logging - elasticsearch - need to adjust
jinja2 boolean values to the rsyslog config values
Resetting the values of the following params as rsyslog expects.
dynSearchIndex, bulkmode, dynbulkid, allowUnsignedCerts, usehttps
Adding test cases to tests_ovirt_elasticsearch_params.yml
(cherry picked from commit c98aabd864f6d07c11d6db991bf0af0aaee7f123)
---
.../rsyslog/templates/output_elasticsearch.j2 | 13 ++++-----
tests/tests_ovirt_elasticsearch_params.yml | 29 +++++++++++++++++--
2 files changed, 33 insertions(+), 9 deletions(-)
diff --git a/roles/rsyslog/templates/output_elasticsearch.j2 b/roles/rsyslog/templates/output_elasticsearch.j2
index c4db10f..6c6255b 100644
--- a/roles/rsyslog/templates/output_elasticsearch.j2
+++ b/roles/rsyslog/templates/output_elasticsearch.j2
@@ -37,25 +37,24 @@ ruleset(name="{{ item.name }}") {
serverport="{{ item.server_port | d(9200) | int }}"
template="{{ item.template | d("es_template") }}"
searchIndex="{{ item.searchIndex | d("index_template") }}"
- dynSearchIndex="{{ item.dynSearchIndex | d("on") }}"
+ dynSearchIndex="{{ item.dynSearchIndex | d(true) | ternary('on', 'off') }}"
searchType="{{ item.searchType | d("com.redhat.viaq.common") }}"
- bulkmode="{{ item.bulkmode | d("on") }}"
+ bulkmode="{{ item.bulkmode | d(true) | ternary('on', 'off') }}"
writeoperation="{{ item.writeoperation | d("create") }}"
bulkid="{{ item.bulkid | d("id_template") }}"
- dynbulkid="{{ item.dynbulkid | d('on') }}"
- allowUnsignedCerts="{{ item.allowUnsignedCerts | d("off") }}"
+ dynbulkid="{{ item.dynbulkid | d(true) | ternary('on', 'off') }}"
+ allowUnsignedCerts="{{ item.allowUnsignedCerts | d(false) | ternary('on', 'off') }}"
{% if item.retryfailures | d(true) %}
-{% if item.retryruleset | d() | length > 0 %}
retryfailures="on"
+{% if item.retryruleset | d() | length > 0 %}
retryruleset="{{ item.retryruleset }}"
{% else %}
- retryfailures="on"
retryruleset="{{ item.name }}"
{% endif %}
{% else %}
retryfailures="off"
{% endif %}
- usehttps="{{ item.usehttps | default("on") }}"
+ usehttps="{{ item.usehttps | d(true) | ternary('on', 'off') }}"
{% if item.use_cert | default(true) %}
tls.cacert="{{ item.ca_cert | default('/etc/rsyslog.d/es-ca.crt') }}"
tls.mycert="{{ item.cert | default('/etc/rsyslog.d/es-cert.pem') }}"
diff --git a/tests/tests_ovirt_elasticsearch_params.yml b/tests/tests_ovirt_elasticsearch_params.yml
index 34d9e1d..4fefe59 100644
--- a/tests/tests_ovirt_elasticsearch_params.yml
+++ b/tests/tests_ovirt_elasticsearch_params.yml
@@ -34,6 +34,8 @@
__test_ovirt_engine_conf: /etc/rsyslog.d/90-input-ovirt-ovirt_engine_input.conf
__test_ovirt_vdsm_conf: /etc/rsyslog.d/90-input-ovirt-ovirt_vdsm_input.conf
__test_ovirt_bogus_conf: /etc/rsyslog.d/90-input-ovirt-ovirt_bogus_input.conf
+ __test_es_conf: /etc/rsyslog.d/31-output-elasticsearch-elasticsearch_output.conf
+ __test_es_ops_conf: /etc/rsyslog.d/31-output-elasticsearch-elasticsearch_output_ops.conf
__test_collectd_name: ovirt_collectd_input
__test_engine_name: ovirt_engine_input
__test_vdsm_name: ovirt_vdsm_input
@@ -56,7 +58,6 @@
server_port: 9200
index_prefix: project.
input_type: ovirt
- retryfailures: false
ca_cert: "/etc/rsyslog.d/es-ca.crt"
cert: "/etc/rsyslog.d/es-cert.pem"
private_key: "/etc/rsyslog.d/es-key.pem"
@@ -70,6 +71,11 @@
ca_cert: "/etc/rsyslog.d/es-ca.crt"
cert: "/etc/rsyslog.d/es-cert.pem"
private_key: "/etc/rsyslog.d/es-key.pem"
+ dynSearchIndex: false
+ bulkmode: false
+ dynbulkid: false
+ allowUnsignedCerts: true
+ usehttps: false
logging_inputs:
- name: basic_input
type: basics
@@ -164,4 +170,23 @@
- name: Check index_prefix is "{{ __test_logs_index }}" in "{{ __test_ovirt_vdsm_conf }}"
command: /bin/grep 'set $.index_prefix = "{{ __test_logs_index }}"' {{ __test_ovirt_vdsm_conf }}
- changed_when: false
+
+ - name: Check default config params in "{{ __test_es_conf }}"
+ command: /bin/grep {{ item }} {{ __test_es_conf }}
+ loop:
+ - "dynSearchIndex=.on."
+ - "bulkmode=.on."
+ - "dynbulkid=.on."
+ - "allowUnsignedCerts=.off."
+ - "usehttps=.on."
+ - "retryfailures=.on."
+
+ - name: Check modified config params in "{{ __test_es_ops_conf }}"
+ command: /bin/grep {{ item }} {{ __test_es_ops_conf }}
+ loop:
+ - "dynSearchIndex=.off."
+ - "bulkmode=.off."
+ - "dynbulkid=.off."
+ - "allowUnsignedCerts=.on."
+ - "usehttps=.off."
+ - "retryfailures=.off."
--
2.26.2

10
SOURCES/md2html.sh Normal file
View File

@ -0,0 +1,10 @@
#!/bin/bash
set -euxo pipefail
for file in "$@"; do
pandoc -f markdown_github "${file}" -t asciidoc -o "${file%.md}.tmp.adoc"
touch -r "${file}" "${file%.md}.tmp.adoc"
TZ=UTC asciidoc -o "${file%.md}.html" -a footer-style=none -a toc2 -a source-highlighter=highlight "${file%.md}.tmp.adoc"
rm "${file%.md}.tmp.adoc"
done

View File

@ -0,0 +1,6 @@
diff --git a/library/network_connections.py b/library/network_connections.py
old mode 100755
new mode 100644
diff --git a/tests/unit/test_network_connections.py b/tests/unit/test_network_connections.py
old mode 100755
new mode 100644

View File

@ -0,0 +1,531 @@
diff --git a/tests/playbooks/tests_802_1x.yml b/tests/playbooks/tests_802_1x.yml
index 9cce1ae..76d99e9 100644
--- a/tests/playbooks/tests_802_1x.yml
+++ b/tests/playbooks/tests_802_1x.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: 802-1x-test
@@ -122,3 +127,8 @@
command: update-ca-trust
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_bond.yml b/tests/playbooks/tests_bond.yml
index ab3ee43..d646a0b 100644
--- a/tests/playbooks/tests_bond.yml
+++ b/tests/playbooks/tests_bond.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
master_profile: bond0
@@ -94,3 +99,8 @@
- import_tasks: tasks/remove_test_interfaces_with_dhcp.yml
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_bridge.yml b/tests/playbooks/tests_bridge.yml
index d79d6ad..c8cf3cd 100644
--- a/tests/playbooks/tests_bridge.yml
+++ b/tests/playbooks/tests_bridge.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- name: Test configuring bridges
hosts: all
vars:
@@ -14,6 +19,8 @@
- name: Add test bridge
hosts: all
+ tags:
+ - 'tests::net::bridge'
vars:
network_connections:
- name: "{{ interface }}"
@@ -36,11 +43,15 @@
task: tasks/assert_profile_present.yml
- import_playbook: down_profile.yml
+ tags:
+ - 'tests::net::bridge'
vars:
profile: "{{ interface }}"
# FIXME: assert profile/device down
- import_playbook: remove_profile.yml
+ tags:
+ - 'tests::net::bridge'
vars:
profile: "{{ interface }}"
@@ -51,5 +62,19 @@
# FIXME: Devices might still be left when profile is absent
# - import_playbook: run_tasks.yml
-# vars:
+# vars:
# task: tasks/assert_device_absent.yml
+
+- name: Remove test bridge
+ hosts: all
+ tags:
+ - 'tests::cleanup'
+ - 'tests::net::bridge::cleanup'
+ tasks:
+ - command: 'ip link delete "{{ interface }}"'
+ ignore_errors: yes
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_checkpoint_cleanup.yml b/tests/playbooks/tests_checkpoint_cleanup.yml
index 18e3fd7..3b5a41a 100644
--- a/tests/playbooks/tests_checkpoint_cleanup.yml
+++ b/tests/playbooks/tests_checkpoint_cleanup.yml
@@ -4,6 +4,11 @@
# mark a device as unmanaged for NM and then tries to activiate it using NM.
# This failed without removing the checkpoint.
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: cptstbr
@@ -80,3 +85,8 @@
ignore_errors: true
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_ethernet.yml b/tests/playbooks/tests_ethernet.yml
index cd02579..adcffee 100644
--- a/tests/playbooks/tests_ethernet.yml
+++ b/tests/playbooks/tests_ethernet.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
tasks:
- debug:
@@ -9,6 +14,8 @@
- name: Test configuring ethernet devices
hosts: all
+ tags:
+ - 'tests::net::veth'
vars:
type: veth
interface: lsr27
@@ -26,6 +33,8 @@
- name: Test static interface up
hosts: all
+ tags:
+ - 'tests::net::reconf'
vars:
network_connections:
- name: "{{ interface }}"
@@ -48,17 +57,29 @@
# FIXME: assert profile present
# FIXME: assert profile/device up + IP address
- import_playbook: down_profile.yml
+ tags:
+ - 'tests::cleanup'
vars:
profile: "{{ interface }}"
# FIXME: assert profile/device down
- import_playbook: remove_profile.yml
+ tags:
+ - 'tests::cleanup'
vars:
profile: "{{ interface }}"
# FIXME: assert profile away
- name: Remove interfaces
hosts: all
+ tags:
+ - 'tests::cleanup'
+ - 'tests::net::veth::cleanup'
tasks:
- include_tasks: tasks/manage_test_interface.yml
vars:
state: absent
- include_tasks: tasks/assert_device_absent.yml
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_ethtool_features.yml b/tests/playbooks/tests_ethtool_features.yml
index 43fddc3..d1a87fe 100644
--- a/tests/playbooks/tests_ethtool_features.yml
+++ b/tests/playbooks/tests_ethtool_features.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: testnic1
@@ -198,3 +203,8 @@
state: absent
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_provider.yml b/tests/playbooks/tests_provider.yml
index 1db2d08..e097b4b 100644
--- a/tests/playbooks/tests_provider.yml
+++ b/tests/playbooks/tests_provider.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: testnic1
@@ -33,3 +38,8 @@
- tasks/cleanup_profile+device.yml
tags:
- tests::provider:initscripts_to_nm
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_reapply.yml b/tests/playbooks/tests_reapply.yml
index 4b1cb09..6995607 100644
--- a/tests/playbooks/tests_reapply.yml
+++ b/tests/playbooks/tests_reapply.yml
@@ -4,6 +4,11 @@
# of via Ansible. Until there is better test support for this, just check the
# log output for the respective log message.
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: rpltstbr
@@ -64,3 +69,8 @@
ignore_errors: true
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_states.yml b/tests/playbooks/tests_states.yml
index eec27c0..a8d0ecd 100644
--- a/tests/playbooks/tests_states.yml
+++ b/tests/playbooks/tests_states.yml
@@ -135,3 +135,23 @@
- tasks/cleanup_profile+device.yml
tags:
- tests::states:remove_down_twice
+
+ pre_tasks:
+ - name: Save host state
+ import_tasks: tasks/save_state.yml
+
+ post_tasks:
+ - name: Remove test profile
+ tags:
+ - 'tests::cleanup'
+ - 'tests::net::bridge::cleanup'
+ import_role:
+ name: linux-system-roles.network
+ vars:
+ network_connections:
+ - name: statebr
+ state: down
+ persistent_state: absent
+
+ - name: Restore host state
+ import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_vlan_mtu.yml b/tests/playbooks/tests_vlan_mtu.yml
index 029b599..378d5fe 100644
--- a/tests/playbooks/tests_vlan_mtu.yml
+++ b/tests/playbooks/tests_vlan_mtu.yml
@@ -10,6 +10,8 @@
- include_tasks: tasks/manage_test_interface.yml
vars:
state: present
+ tags:
+ - 'tests::net::veth'
- include_tasks: tasks/assert_device_present.yml
- name: >-
TEST: I can configure the MTU for a vlan interface without autoconnect.
@@ -38,6 +40,8 @@
ip:
dhcp4: false
auto6: false
+ tags:
+ - 'tests::net::reconf'
- include_tasks: tasks/assert_device_present.yml
vars:
interface: "{{ vlan_interface }}"
@@ -62,6 +66,20 @@
persistent_state: absent
state: down
ignore_errors: true
+ tags:
+ - 'tests::cleanup'
+ - 'tests::net::veth::cleanup'
- include_tasks: tasks/manage_test_interface.yml
vars:
state: absent
+ tags:
+ - 'tests::cleanup'
+ - 'tests::net::veth::cleanup'
+
+ pre_tasks:
+ - name: Save host state
+ import_tasks: tasks/save_state.yml
+
+ post_tasks:
+ - name: Restore host state
+ import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_wireless.yml b/tests/playbooks/tests_wireless.yml
index 822a15e..52661bd 100644
--- a/tests/playbooks/tests_wireless.yml
+++ b/tests/playbooks/tests_wireless.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: wlan0
@@ -86,3 +91,8 @@
- include_tasks: tasks/cleanup_mock_wifi.yml
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/tasks/commonvars.yml b/tests/tasks/commonvars.yml
new file mode 100644
index 0000000..50452f7
--- /dev/null
+++ b/tests/tasks/commonvars.yml
@@ -0,0 +1,2 @@
+restore_services:
+ - NetworkManager
diff --git a/tests/tasks/get_services_state.yml b/tests/tasks/get_services_state.yml
new file mode 100644
index 0000000..4fe5d36
--- /dev/null
+++ b/tests/tasks/get_services_state.yml
@@ -0,0 +1,4 @@
+- name: Get initial state of services
+ tags: tests::cleanup
+ service_facts:
+ register: initial_state
diff --git a/tests/tasks/restore_services_state.yml b/tests/tasks/restore_services_state.yml
new file mode 100644
index 0000000..2035dfc
--- /dev/null
+++ b/tests/tasks/restore_services_state.yml
@@ -0,0 +1,22 @@
+- block:
+ - name: load common vars
+ include_vars:
+ file: commonvars.yml
+
+ - name: Get final state of services
+ service_facts:
+ register: final_state
+
+ - name: Restore state of services
+ service:
+ name: "{{ item }}"
+ state: "{{ 'started' if
+ initial_state.ansible_facts.services[sname]['state']
+ == 'running' else 'stopped' }}"
+ when:
+ - sname in final_state.ansible_facts.services
+ - sname in initial_state.ansible_facts.services
+ vars:
+ sname: "{{ item + '.service' }}"
+ with_items: "{{ restore_services }}"
+ tags: tests::cleanup
diff --git a/tests/tasks/restore_state.yml b/tests/tasks/restore_state.yml
new file mode 100644
index 0000000..f4e3d5f
--- /dev/null
+++ b/tests/tasks/restore_state.yml
@@ -0,0 +1,24 @@
+---
+- name: Remove /etc/sysconfig/network if there was no one
+ tags:
+ - 'tests::cleanup'
+ file:
+ path: /etc/sysconfig/network
+ state: absent
+ when:
+ - etc_sysconfig_network_stat is defined
+ - not etc_sysconfig_network_stat.stat.exists
+
+- name: Restore services
+ import_tasks: restore_services_state.yml
+
+- name: reload NetworkManager
+ tags:
+ - 'tests::cleanup'
+ command: nmcli connection reload
+ when:
+ - sname in final_state.ansible_facts.services
+ - sname in initial_state.ansible_facts.services
+ - final_state.ansible_facts.services[sname]['state'] == 'running'
+ vars:
+ sname: NetworkManager.service
diff --git a/tests/tasks/save_state.yml b/tests/tasks/save_state.yml
new file mode 100644
index 0000000..5690aed
--- /dev/null
+++ b/tests/tasks/save_state.yml
@@ -0,0 +1,11 @@
+---
+- name: Get services state
+ import_tasks: get_services_state.yml
+
+- name: Investigate /etc/sysconfig/network presence
+ tags:
+ - 'tests::cleanup'
+ stat:
+ path: /etc/sysconfig/network
+ register: etc_sysconfig_network_stat
+ ignore_errors: yes
diff --git a/tests/tests_802_1x_nm.yml b/tests/tests_802_1x_nm.yml
index 3bd0719..77cf2d9 100644
--- a/tests/tests_802_1x_nm.yml
+++ b/tests/tests_802_1x_nm.yml
@@ -4,6 +4,8 @@
# set network provider and gather facts
- hosts: all
name: Run playbook 'playbooks/tests_802_1x.yml' with nm as provider
+ tags:
+ - tests::expfail
tasks:
- name: Set network provider to 'nm'
set_fact:
@@ -21,3 +23,5 @@
- import_playbook: playbooks/tests_802_1x.yml
when:
- ansible_distribution_major_version != '6'
+ tags:
+ - tests::expfail
diff --git a/tests/tests_802_1x_updated_nm.yml b/tests/tests_802_1x_updated_nm.yml
index 0d4c741..ca666a6 100644
--- a/tests/tests_802_1x_updated_nm.yml
+++ b/tests/tests_802_1x_updated_nm.yml
@@ -4,6 +4,8 @@
# set network provider and gather facts
- hosts: all
name: Run playbook 'playbooks/tests_802_1x_updated.yml' with nm as provider
+ tags:
+ - tests::expfail
tasks:
- name: Set network provider to 'nm'
set_fact:
@@ -21,3 +23,5 @@
- import_playbook: playbooks/tests_802_1x_updated.yml
when:
- ansible_distribution_major_version != '6'
+ tags:
+ - tests::expfail
diff --git a/tests/tests_default.yml b/tests/tests_default.yml
index f6f7550..98e3c7e 100644
--- a/tests/tests_default.yml
+++ b/tests/tests_default.yml
@@ -10,3 +10,11 @@
that:
- "'warnings' not in __network_connections_result"
msg: "There are warnings"
+
+ pre_tasks:
+ - name: Save host state
+ import_tasks: tasks/save_state.yml
+
+ post_tasks:
+ - name: Restore host state
+ import_tasks: tasks/restore_state.yml
diff --git a/tests/tests_helpers_and_asserts.yml b/tests/tests_helpers_and_asserts.yml
index 5514182..d9bfa11 100644
--- a/tests/tests_helpers_and_asserts.yml
+++ b/tests/tests_helpers_and_asserts.yml
@@ -15,6 +15,14 @@
type: dummy
interface: dummy1298
+ pre_tasks:
+ - name: Save host state
+ import_tasks: tasks/save_state.yml
+
+ post_tasks:
+ - name: Restore host state
+ import_tasks: tasks/restore_state.yml
+
# FIXME: when: does not seem to work with include_tasks, therefore this cannot
# be safely tested for now
# - name: test tap interfaces

View File

@ -0,0 +1,16 @@
diff --git a/meta/main.yml b/meta/main.yml
index a0ef6f4..da22270 100644
--- a/meta/main.yml
+++ b/meta/main.yml
@@ -7,8 +7,8 @@ galaxy_info:
min_ansible_version: 2.2
platforms:
- name: Fedora
- versions: [ 24, 25 ]
+ versions: [ 31, 32 ]
- name: EL
- versions: [ 6, 7 ]
+ versions: [ 6, 7, 8 ]

View File

@ -0,0 +1,80 @@
diff --git a/tasks/ssh.yml b/tasks/ssh.yml
index 1a4e858..b05d01a 100644
--- a/tasks/ssh.yml
+++ b/tasks/ssh.yml
@@ -18,3 +18,15 @@
key: "{{ keydata.content|b64decode }}"
state: present
delegate_to: "{{ kdump_ssh_server }}"
+
+- name: Fetch the servers public key
+ slurp:
+ src: /etc/ssh/ssh_host_rsa_key.pub
+ register: serverpubkey
+ delegate_to: "{{ kdump_ssh_server }}"
+
+- name: Add the servers public key to known_hosts on managed node
+ known_hosts:
+ key: "{{ kdump_ssh_server_location }} {{ serverpubkey.content | b64decode }}"
+ name: "{{ kdump_ssh_server_location }}"
+ path: /etc/ssh/ssh_known_hosts
diff --git a/templates/kdump.conf.j2 b/templates/kdump.conf.j2
index bf24210..504ff34 100644
--- a/templates/kdump.conf.j2
+++ b/templates/kdump.conf.j2
@@ -1,12 +1,17 @@
# {{ ansible_managed }}
{% if kdump_target %}
-{{ kdump_target.type }} {{ kdump_target.location }}
-{% endif %}
+{% if kdump_target.type == "ssh" %}
+ssh {{ kdump_target.location | d(kdump_ssh_user ~ '@' ~ kdump_ssh_server) }}
-{% if kdump_target and kdump_target.type == "ssh" and kdump_sshkey != '/root/.ssh/kdump_id_rsa' %}
+{% if kdump_sshkey != '/root/.ssh/kdump_id_rsa' %}
sshkey {{ kdump_sshkey }}
{% endif %}
+{% else %}
+{{ kdump_target.type }} {{ kdump_target.location }}
+
+{% endif %}
+{% endif %}
path {{ kdump_path }}
{% if kdump_core_collector %}
diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml
index 679148e..14a59d9 100644
--- a/tests/tests_ssh.yml
+++ b/tests/tests_ssh.yml
@@ -6,6 +6,11 @@
# known and ansible is supposed to be configured to be able to
# connect to it (via inventory).
kdump_ssh_server_outside: localhost
+ kdump_ssh_source: "{{ ansible_env['SSH_CONNECTION'].split()[0] }}"
+
+ # this is the address at which the ssh dump server can be reached
+ # from the managed host. Dumps will be uploaded there.
+ kdump_ssh_server_inside: "{{ kdump_ssh_source if kdump_ssh_source in hostvars[kdump_ssh_server_outside]['ansible_all_ipv4_addresses'] + hostvars[kdump_ssh_server_outside]['ansible_all_ipv6_addresses'] else hostvars[kdump_ssh_server_outside]['ansible_default_ipv4']['address'] }}"
tasks:
- name: gather facts from {{ kdump_ssh_server_outside }}
@@ -25,8 +30,5 @@
type: ssh
# This is the ssh dump server address visible from inside
# the machine being configured. Dumps are to be copied
- # there. We make here the assumption that this machine is
- # being run as a VM and the dump server is the VM host
- # (i.e. for ansible this is localhost). From the VM its
- # address is then identical to the default route.
- location: "{{ kdump_ssh_user }}@{{ ansible_default_ipv4.gateway }}"
+ # there.
+ location: "{{ kdump_ssh_user }}@{{ kdump_ssh_server_inside }}"
diff --git a/vars/main.yml b/vars/main.yml
new file mode 100644
index 0000000..34d2d62
--- /dev/null
+++ b/vars/main.yml
@@ -0,0 +1,2 @@
+# determine the managed node facing ssh server address
+kdump_ssh_server_location: "{{ kdump_target.location | regex_replace('.*@(.*)$', '\\1') if kdump_target.location is defined else kdump_ssh_server }}"

View File

@ -0,0 +1,148 @@
diff --git a/examples/bond_simple.yml b/examples/bond_simple.yml
index 4ca9811..f6f5897 100644
--- a/examples/bond_simple.yml
+++ b/examples/bond_simple.yml
@@ -32,5 +32,5 @@
interface_name: eth2
master: bond0
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
...
diff --git a/examples/bond_with_vlan.yml b/examples/bond_with_vlan.yml
index 2e6be23..3b7a6dc 100644
--- a/examples/bond_with_vlan.yml
+++ b/examples/bond_with_vlan.yml
@@ -35,4 +35,4 @@
- "192.0.2.{{ network_iphost }}/24"
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
diff --git a/examples/bridge_with_vlan.yml b/examples/bridge_with_vlan.yml
index 037ff8e..83c586d 100644
--- a/examples/bridge_with_vlan.yml
+++ b/examples/bridge_with_vlan.yml
@@ -33,4 +33,4 @@
- "192.0.2.{{ network_iphost }}/24"
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
diff --git a/examples/eth_simple_auto.yml b/examples/eth_simple_auto.yml
index 0ba168a..e4c4a54 100644
--- a/examples/eth_simple_auto.yml
+++ b/examples/eth_simple_auto.yml
@@ -15,4 +15,4 @@
mtu: 1450
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
diff --git a/examples/eth_with_802_1x.yml b/examples/eth_with_802_1x.yml
index 92a93a9..7731b7d 100644
--- a/examples/eth_with_802_1x.yml
+++ b/examples/eth_with_802_1x.yml
@@ -27,4 +27,4 @@
- client.pem
- cacert.pem
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
diff --git a/examples/eth_with_vlan.yml b/examples/eth_with_vlan.yml
index 69da673..e0c2f11 100644
--- a/examples/eth_with_vlan.yml
+++ b/examples/eth_with_vlan.yml
@@ -26,4 +26,4 @@
- "192.0.2.{{ network_iphost }}/24"
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
diff --git a/examples/ethtool_features.yml b/examples/ethtool_features.yml
index c580f89..0881316 100644
--- a/examples/ethtool_features.yml
+++ b/examples/ethtool_features.yml
@@ -3,7 +3,7 @@
- hosts: all
tasks:
- include_role:
- name: linux-system-roles.network
+ name: rhel-system-roles.network
vars:
network_connections:
- name: "{{ network_interface_name1 }}"
diff --git a/examples/ethtool_features_default.yml b/examples/ethtool_features_default.yml
index 78965e6..3cdd731 100644
--- a/examples/ethtool_features_default.yml
+++ b/examples/ethtool_features_default.yml
@@ -3,7 +3,7 @@
- hosts: all
tasks:
- include_role:
- name: linux-system-roles.network
+ name: rhel-system-roles.network
vars:
network_connections:
- name: "{{ network_interface_name1 }}"
diff --git a/examples/infiniband.yml b/examples/infiniband.yml
index 22603d9..9e7e267 100644
--- a/examples/infiniband.yml
+++ b/examples/infiniband.yml
@@ -23,4 +23,4 @@
- 198.51.100.133/30
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
diff --git a/examples/macvlan.yml b/examples/macvlan.yml
index 90cd09d..0064ad4 100644
--- a/examples/macvlan.yml
+++ b/examples/macvlan.yml
@@ -26,4 +26,4 @@
- 192.168.1.1/24
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
diff --git a/examples/remove+down_profile.yml b/examples/remove+down_profile.yml
index da2b1b8..f2d93e8 100644
--- a/examples/remove+down_profile.yml
+++ b/examples/remove+down_profile.yml
@@ -8,5 +8,5 @@
persistent_state: absent
state: down
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
...
diff --git a/examples/wireless_wpa_psk.yml b/examples/wireless_wpa_psk.yml
index eeec22f..60b0d83 100644
--- a/examples/wireless_wpa_psk.yml
+++ b/examples/wireless_wpa_psk.yml
@@ -12,4 +12,4 @@
# see https://docs.ansible.com/ansible/latest/user_guide/vault.html
password: "p@55w0rD"
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
diff --git a/tests/playbooks/down_profile.yml b/tests/playbooks/down_profile.yml
index 5087240..65e542d 100644
--- a/tests/playbooks/down_profile.yml
+++ b/tests/playbooks/down_profile.yml
@@ -7,4 +7,4 @@
- name: "{{ profile }}"
state: down
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network
diff --git a/tests/playbooks/remove_profile.yml b/tests/playbooks/remove_profile.yml
index a50e848..b6e6796 100644
--- a/tests/playbooks/remove_profile.yml
+++ b/tests/playbooks/remove_profile.yml
@@ -7,4 +7,4 @@
- name: "{{ profile }}"
persistent_state: absent
roles:
- - linux-system-roles.network
+ - rhel-system-roles.network

View File

@ -0,0 +1,40 @@
diff --git a/README.md b/README.md
index 5950215..df64284 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,7 @@ Example Playbook
Install and enable postfix. Configure "relay_domains=$mydestination" and
-```
+```yaml
---
- hosts: all
vars:
@@ -31,7 +31,7 @@ Install and enable postfix. Configure "relay_domains=$mydestination" and
Install and enable postfix. Do not run 'postfix check' before restarting
postfix:
-```
+```yaml
---
- hosts: all
vars:
@@ -43,7 +43,7 @@ postfix:
Install and enable postfix. Do single backup of main.cf (older backup will be
rewritten) and configure "relay_host=example.com":
-```
+```yaml
---
- hosts: all
vars:
@@ -58,7 +58,7 @@ Install and enable postfix. Do timestamped backup of main.cf and
configure "relay_host=example.com" (if postfix_backup_multiple is
set to true postfix_backup is ignored):
-```
+```yaml
---
- hosts: all
vars:

View File

@ -0,0 +1,40 @@
diff --git a/README.md b/README.md
index 5950215..a59d72f 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@ Install and enable postfix. Configure "relay_domains=$mydestination" and
relay_domains: "$mydestination"
relay_host: "example.com"
roles:
- - postfix
+ - linux-system-roles.postfix
```
Install and enable postfix. Do not run 'postfix check' before restarting
@@ -37,7 +37,7 @@ postfix:
vars:
postfix_check: false
roles:
- - postfix
+ - linux-system-roles.postfix
```
Install and enable postfix. Do single backup of main.cf (older backup will be
@@ -51,7 +51,7 @@ rewritten) and configure "relay_host=example.com":
relay_host: "example.com"
postfix_backup: true
roles:
- - postfix
+ - linux-system-roles.postfix
```
Install and enable postfix. Do timestamped backup of main.cf and
@@ -66,7 +66,7 @@ set to true postfix_backup is ignored):
relay_host: "example.com"
postfix_backup_multiple: true
roles:
- - postfix
+ - linux-system-roles.postfix
```

View File

@ -0,0 +1,32 @@
diff --git a/README.md b/README.md
index a0385b0..6efc62d 100644
--- a/README.md
+++ b/README.md
@@ -42,7 +42,7 @@ This role can be configured using variab
vars:
[ see below ]
roles:
- - role: linux-system-roles.selinux
+ - role: rhel-system-roles.selinux
become: true
```
diff --git a/selinux-playbook.yml b/selinux-playbook.yml
index 78d3953..b2348d5 100644
--- a/selinux-playbook.yml
+++ b/selinux-playbook.yml
@@ -31,7 +31,7 @@
- name: execute the role and catch errors
block:
- include_role:
- name: linux-system-roles.selinux
+ name: rhel-system-roles.selinux
rescue:
# Fail if failed for a different reason than selinux_reboot_required.
- name: handle errors
@@ -52,4 +52,4 @@
- name: reapply the role
include_role:
- name: linux-system-roles.selinux
+ name: rhel-system-roles.selinux

View File

@ -0,0 +1,13 @@
diff --git a/README.md b/README.md
index c2debc9..d9e40b3 100644
--- a/README.md
+++ b/README.md
@@ -154,7 +154,7 @@ Example Playbook
- hosts: all
roles:
- - name: linux-system-roles.storage
+ - name: rhel-system-roles.storage
storage_pools:
- name: app
disks:

View File

@ -0,0 +1,46 @@
diff -up timesync-1.0.0/README.md.orig timesync-1.0.0/README.md
--- timesync-1.0.0/README.md.orig 2018-08-21 11:46:41.000000000 +0200
+++ timesync-1.0.0/README.md 2018-11-06 22:29:14.586770442 +0100
@@ -82,7 +82,7 @@ Install and configure ntp to synchronize
- hostname: baz.example.com
iburst: yes
roles:
- - linux-system-roles.timesync
+ - rhel-system-roles.timesync
```
Install and configure linuxptp to synchronize the system clock with a
@@ -95,7 +95,7 @@ grandmaster in PTP domain number 0, whic
- number: 0
interfaces: [ eth0 ]
roles:
- - linux-system-roles.timesync
+ - rhel-system-roles.timesync
```
Install and configure chrony and linuxptp to synchronize the system clock with
@@ -122,5 +122,5 @@ synchronization:
transport: UDPv4
delay: 0.000010
roles:
- - linux-system-roles.timesync
+ - rhel-system-roles.timesync
```
diff -up timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml.orig timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml
--- timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml.orig 2019-06-03 18:03:18.081868584 +0200
+++ timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/multiple-ntp-servers.yml 2019-06-03 18:03:26.718704991 +0200
@@ -11,4 +11,4 @@
- hostname: 3.pool.ntp.org
iburst: yes
roles:
- - linux-system-roles.timesync
+ - rhel-system-roles.timesync
diff -up timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml.orig timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml
--- timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml.orig 2019-06-03 16:36:40.000000000 +0200
+++ timesync-85b90feedee2a5b3148fd3f72b229b44ec597682/examples/single-pool.yml 2019-06-03 18:03:36.721515519 +0200
@@ -6,4 +6,4 @@
pool: yes
iburst: yes
roles:
- - linux-system-roles.timesync
+ - rhel-system-roles.timesync

View File

@ -0,0 +1,162 @@
diff --git a/tests/set_selinux_variables.yml b/tests/set_selinux_variables.yml
index f294101..7571066 100644
--- a/tests/set_selinux_variables.yml
+++ b/tests/set_selinux_variables.yml
@@ -1,4 +1,12 @@
---
+- name: Install SELinux tool semanage on Fedora
+ package:
+ name:
+ - policycoreutils-python-utils
+ state: present
+ when: ansible_distribution == "Fedora" or
+ ( ansible_distribution_major_version > "7" and
+ ( ansible_distribution == "CentOS" or ansible_distribution == "RedHat" ))
- name: Get local modifications - boolean
command: /usr/sbin/semanage boolean -l -n -C
register: selinux_role_boolean
diff --git a/tests/tests_all_purge.yml b/tests/tests_all_purge.yml
index 03dfe05..c686837 100644
--- a/tests/tests_all_purge.yml
+++ b/tests/tests_all_purge.yml
@@ -14,7 +14,9 @@
name:
- policycoreutils-python-utils
state: present
- when: ansible_distribution == "Fedora"
+ when: ansible_distribution == "Fedora" or
+ ( ansible_distribution_major_version > "7" and
+ ( ansible_distribution == "CentOS" or ansible_distribution == "RedHat" ))
- name: Add a Linux System Roles SELinux User
user:
diff --git a/tests/tests_all_transitions.yml b/tests/tests_all_transitions.yml
index f608a42..d0d209b 100644
--- a/tests/tests_all_transitions.yml
+++ b/tests/tests_all_transitions.yml
@@ -1,6 +1,8 @@
- name: Test all the possible selinux_state transitions
hosts: all
become: true
+ tags:
+ - 'tests::reboot'
vars:
states:
- permissive
diff --git a/tests/tests_boolean.yml b/tests/tests_boolean.yml
index 47eafc0..2aa0025 100644
--- a/tests/tests_boolean.yml
+++ b/tests/tests_boolean.yml
@@ -1,5 +1,6 @@
- name: Check if selinux role sets SELinux booleans
+ tags: tests::expfail
hosts: all
become: true
@@ -12,7 +13,7 @@
selinux_booleans:
- { name: 'samba_enable_home_dirs', state: 'on', persistent: 'yes' }
- - include: set_selinux_variables.yml
+ - import_tasks: set_selinux_variables.yml
- name: save state after initial changes and before other changes
set_fact:
boolean_before: "{{ selinux_role_boolean.stdout_lines }}"
diff --git a/tests/tests_fcontext.yml b/tests/tests_fcontext.yml
index 0a411fb..f6f1bf4 100644
--- a/tests/tests_fcontext.yml
+++ b/tests/tests_fcontext.yml
@@ -13,7 +13,7 @@
selinux_fcontexts:
- { target: '/tmp/test_dir1(/.*)?', setype: 'user_home_dir_t', ftype: 'd' }
- - include: set_selinux_variables.yml
+ - import_tasks: set_selinux_variables.yml
- name: save state after initial changes and before other changes
set_fact:
fcontext_before: "{{ selinux_role_fcontext.stdout }}"
diff --git a/tests/tests_login.yml b/tests/tests_login.yml
index efa826d..c7ce462 100644
--- a/tests/tests_login.yml
+++ b/tests/tests_login.yml
@@ -18,7 +18,7 @@
- { login: 'sar-user', seuser: 'staff_u', serange: 's0-s0:c0.c1023', state: 'present' }
- - include: set_selinux_variables.yml
+ - import_tasks: set_selinux_variables.yml
- name: save state after initial changes and before other changes
set_fact:
login_before: "{{ selinux_role_login.stdout }}"
diff --git a/tests/tests_port.yml b/tests/tests_port.yml
index 446f79d..7bb112e 100644
--- a/tests/tests_port.yml
+++ b/tests/tests_port.yml
@@ -29,7 +29,7 @@
selinux_ports:
- { ports: '22022', proto: 'tcp', setype: 'ssh_port_t', state: 'present' }
- - include: set_selinux_variables.yml
+ - import_tasks: set_selinux_variables.yml
- name: save state after other changes
set_fact:
port_after: "{{ selinux_role_port.stdout }}"
diff --git a/tests/tests_selinux_disabled.yml b/tests/tests_selinux_disabled.yml
index afd23e4..706882f 100644
--- a/tests/tests_selinux_disabled.yml
+++ b/tests/tests_selinux_disabled.yml
@@ -18,7 +18,9 @@
name:
- policycoreutils-python-utils
state: present
- when: ansible_distribution == "Fedora"
+ when: ansible_distribution == "Fedora" or
+ ( ansible_distribution_major_version > "7" and
+ ( ansible_distribution == "CentOS" or ansible_distribution == "RedHat" ))
- name: Add a Linux System Roles SELinux User
user:
@@ -67,17 +69,28 @@
assert:
that: "{{ ansible_selinux.config_mode == 'enforcing' }}"
msg: "SELinux config mode should be enforcing instead of {{ ansible_selinux.config_mode }}"
- - name: Restore original /etc/selinux/config
- copy:
- remote_src: true
- dest: /etc/selinux/config
- src: /etc/selinux/config.test_selinux_disabled
- - name: Remove /etc/selinux/config backup
- file:
- path: /etc/selinux/config.test_selinux_disabled
- state: absent
- - name: Remove Linux System Roles SELinux User
- user:
- name: sar-user
- remove: yes
- state: absent
+
+ - name: Cleanup
+ tags: [ 'tests::cleanup' ]
+ block:
+ - name: Restore original /etc/selinux/config
+ copy:
+ remote_src: true
+ dest: /etc/selinux/config
+ src: /etc/selinux/config.test_selinux_disabled
+
+ - name: Remove /etc/selinux/config backup
+ file:
+ path: /etc/selinux/config.test_selinux_disabled
+ state: absent
+
+ - name: Remove Linux System Roles SELinux User
+ user:
+ name: sar-user
+ remove: yes
+ state: absent
+
+ - import_role:
+ name: selinux
+ vars:
+ selinux_all_purge: true

View File

@ -0,0 +1,602 @@
diff --git a/library/blivet.py b/library/blivet.py
index cb48e71..e1903f3 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -167,11 +167,16 @@ class BlivetBase(object):
raise NotImplementedError()
def _manage_one_encryption(self, device):
+ global safe_mode
ret = device
# Make sure to handle adjusting both existing stacks and future stacks.
if device == device.raw_device and self._spec_dict['encryption']:
# add luks
luks_name = "luks-%s" % device._name
+ if safe_mode and (device.original_format.type is not None or
+ device.original_format.name != get_format(None).name):
+ raise BlivetAnsibleError("cannot remove existing formatting on device '%s' in safe mode due to adding encryption" %
+ device._name)
if not device.format.exists:
fmt = device.format
else:
@@ -196,6 +201,10 @@ class BlivetBase(object):
ret = luks_device
elif device != device.raw_device and not self._spec_dict['encryption']:
# remove luks
+ if safe_mode and (device.original_format.type is not None or
+ device.original_format.name != get_format(None).name):
+ raise BlivetAnsibleError("cannot remove existing formatting on device '%s' in safe mode due to encryption removal" %
+ device._name)
if not device.format.exists:
fmt = device.format
else:
@@ -823,17 +832,21 @@ class BlivetPool(BlivetBase):
def manage(self):
""" Schedule actions to configure this pool according to the yaml input. """
+ global safe_mode
# look up the device
self._look_up_disks()
self._look_up_device()
# schedule destroy if appropriate, including member type change
- if not self.ultimately_present or self._member_management_is_destructive():
- if not self.ultimately_present:
- self._manage_volumes()
+ if not self.ultimately_present:
+ self._manage_volumes()
self._destroy()
- if not self.ultimately_present:
- return
+ return
+ elif self._member_management_is_destructive():
+ if safe_mode:
+ raise BlivetAnsibleError("cannot remove and recreate existing pool '%s' in safe mode" % self._pool['name'])
+ else:
+ self._destroy()
# schedule create if appropriate
self._create()
diff --git a/tests/create-test-file.yml b/tests/create-test-file.yml
new file mode 100644
index 0000000..d1091e2
--- /dev/null
+++ b/tests/create-test-file.yml
@@ -0,0 +1,13 @@
+# Create a file to be checked that it still exists and no data loss has occured.
+# To use:
+# - set testfile to a path under the mountpoint being tested
+# - include this file (create-test-file.yml) before executing the
+# operation to be tested
+# - execute the operation that could potentially result in a loss of
+# data in the filesystem where testfile is located
+# - include verify-data-preservation.yml
+
+- name: create a file
+ file:
+ path: "{{ testfile }}"
+ state: touch
diff --git a/tests/tests_luks.yml b/tests/tests_luks.yml
index f93efe5..f733714 100644
--- a/tests/tests_luks.yml
+++ b/tests/tests_luks.yml
@@ -2,8 +2,8 @@
- hosts: all
become: true
vars:
- storage_safe_mode: false
mount_location: '/opt/test1'
+ testfile: "{{ mount_location }}/quux"
volume_size: '5g'
tasks:
@@ -64,10 +64,47 @@
- include_tasks: verify-role-results.yml
+ - import_tasks: create-test-file.yml
+
+ - name: Test for correct handling of safe_mode
+ block:
+ - name: Remove the encryption layer
+ include_role:
+ name: storage
+ vars:
+ storage_volumes:
+ - name: foo
+ type: disk
+ disks: "{{ unused_disks }}"
+ mount_point: "{{ mount_location }}"
+ encryption: false
+ encryption_password: 'yabbadabbadoo'
+ - name: unreachable task
+ fail:
+ msg: UNREACH
+ rescue:
+ - name: Check that we failed in the role
+ assert:
+ that:
+ - ansible_failed_result.msg != 'UNREACH'
+ msg: "Role has not failed when it should have"
+
+ - name: Verify the output of the safe_mode test
+ assert:
+ that: "blivet_output.failed and
+ blivet_output.msg
+ |regex_search('cannot remove existing
+ formatting.*in safe mode due to encryption removal')
+ and not blivet_output.changed"
+ msg: "Unexpected behavior w/ existing filesystem in safe mode"
+
+ - import_tasks: verify-data-preservation.yml
+
- name: Remove the encryption layer
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_volumes:
- name: foo
type: disk
@@ -78,10 +115,47 @@
- include_tasks: verify-role-results.yml
+ - import_tasks: create-test-file.yml
+
+ - name: Test for correct handling of safe_mode
+ block:
+ - name: Add encryption to the volume
+ include_role:
+ name: storage
+ vars:
+ storage_volumes:
+ - name: foo
+ type: disk
+ disks: "{{ unused_disks }}"
+ mount_point: "{{ mount_location }}"
+ encryption: true
+ encryption_password: 'yabbadabbadoo'
+ - name: unreachable task
+ fail:
+ msg: UNREACH
+ rescue:
+ - name: Check that we failed in the role
+ assert:
+ that:
+ - ansible_failed_result.msg != 'UNREACH'
+ msg: "Role has not failed when it should have"
+
+ - name: Verify the output of the safe_mode test
+ assert:
+ that: "blivet_output.failed and
+ blivet_output.msg
+ |regex_search('cannot remove existing
+ formatting.*in safe mode due to adding encryption')
+ and not blivet_output.changed"
+ msg: "Unexpected behavior w/ existing filesystem in safe mode"
+
+ - import_tasks: verify-data-preservation.yml
+
- name: Add encryption to the volume
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_volumes:
- name: foo
type: disk
@@ -102,6 +176,7 @@
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_pools:
- name: foo
type: partition
@@ -135,6 +210,7 @@
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_pools:
- name: foo
type: partition
@@ -149,10 +225,51 @@
- include_tasks: verify-role-results.yml
+ - import_tasks: create-test-file.yml
+
+ - name: Test for correct handling of safe_mode
+ block:
+ - name: Remove the encryption layer
+ include_role:
+ name: storage
+ vars:
+ storage_pools:
+ - name: foo
+ type: partition
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ type: partition
+ mount_point: "{{ mount_location }}"
+ size: 4g
+ encryption: false
+ encryption_password: 'yabbadabbadoo'
+ - name: unreachable task
+ fail:
+ msg: UNREACH
+ rescue:
+ - name: Check that we failed in the role
+ assert:
+ that:
+ - ansible_failed_result.msg != 'UNREACH'
+ msg: "Role has not failed when it should have"
+
+ - name: Verify the output of the safe_mode test
+ assert:
+ that: "blivet_output.failed and
+ blivet_output.msg
+ |regex_search('cannot remove existing
+ formatting.*in safe mode due to encryption removal')
+ and not blivet_output.changed"
+ msg: "Unexpected behavior w/ existing filesystem in safe mode"
+
+ - import_tasks: verify-data-preservation.yml
+
- name: Remove the encryption layer
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_pools:
- name: foo
type: partition
@@ -167,6 +284,48 @@
- include_tasks: verify-role-results.yml
+ - import_tasks: create-test-file.yml
+
+ - name: Test for correct handling of safe_mode
+ block:
+ - name: Add encryption to the volume
+ include_role:
+ name: storage
+ vars:
+ storage_pools:
+ - name: foo
+ type: partition
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ type: partition
+ mount_point: "{{ mount_location }}"
+ size: 4g
+ encryption: true
+ encryption_password: 'yabbadabbadoo'
+
+ - name: unreachable task
+ fail:
+ msg: UNREACH
+
+ rescue:
+ - name: Check that we failed in the role
+ assert:
+ that:
+ - ansible_failed_result.msg != 'UNREACH'
+ msg: "Role has not failed when it should have"
+
+ - name: Verify the output of the safe_mode test
+ assert:
+ that: "blivet_output.failed and
+ blivet_output.msg
+ |regex_search('cannot remove existing
+ formatting.*in safe mode due to adding encryption')
+ and not blivet_output.changed"
+ msg: "Unexpected behavior w/ existing volume in safe mode"
+
+ - import_tasks: verify-data-preservation.yml
+
- name: Test key file handling
block:
- name: Create a key file
@@ -186,6 +345,7 @@
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_pools:
- name: foo
type: partition
@@ -216,6 +376,7 @@
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_pools:
- name: foo
type: lvm
@@ -248,6 +409,7 @@
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_pools:
- name: foo
type: lvm
@@ -264,10 +426,52 @@
- include_tasks: verify-role-results.yml
+ - import_tasks: create-test-file.yml
+
+ - name: Test for correct handling of safe_mode
+ block:
+ - name: Remove the encryption layer
+ include_role:
+ name: storage
+ vars:
+ storage_pools:
+ - name: foo
+ type: lvm
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ mount_point: "{{ mount_location }}"
+ size: 4g
+ encryption: false
+ encryption_password: 'yabbadabbadoo'
+
+ - name: unreachable task
+ fail:
+ msg: UNREACH
+
+ rescue:
+ - name: Check that we failed in the role
+ assert:
+ that:
+ - ansible_failed_result.msg != 'UNREACH'
+ msg: "Role has not failed when it should have"
+
+ - name: Verify the output of the safe_mode test
+ assert:
+ that: "blivet_output.failed and
+ blivet_output.msg
+ |regex_search('cannot remove existing
+ formatting.*in safe mode due to encryption removal')
+ and not blivet_output.changed"
+ msg: "Unexpected behavior w/ existing volume in safe mode"
+
+ - import_tasks: verify-data-preservation.yml
+
- name: Remove the encryption layer
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_pools:
- name: foo
type: lvm
@@ -281,10 +485,52 @@
- include_tasks: verify-role-results.yml
+ - import_tasks: create-test-file.yml
+
+ - name: Test for correct handling of safe_mode
+ block:
+ - name: Add encryption to the volume
+ include_role:
+ name: storage
+ vars:
+ storage_pools:
+ - name: foo
+ type: lvm
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ mount_point: "{{ mount_location }}"
+ size: 4g
+ encryption: true
+ encryption_password: 'yabbadabbadoo'
+
+ - name: unreachable task
+ fail:
+ msg: UNREACH
+
+ rescue:
+ - name: Check that we failed in the role
+ assert:
+ that:
+ - ansible_failed_result.msg != 'UNREACH'
+ msg: "Role has not failed when it should have"
+
+ - name: Verify the output of the safe_mode test
+ assert:
+ that: "blivet_output.failed and
+ blivet_output.msg
+ |regex_search('cannot remove existing
+ formatting.*in safe mode due to adding encryption')
+ and not blivet_output.changed"
+ msg: "Unexpected behavior w/ existing volume in safe mode"
+
+ - import_tasks: verify-data-preservation.yml
+
- name: Add encryption to the volume
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_pools:
- name: foo
type: lvm
diff --git a/tests/tests_luks_pool.yml b/tests/tests_luks_pool.yml
index b20b806..f44916f 100644
--- a/tests/tests_luks_pool.yml
+++ b/tests/tests_luks_pool.yml
@@ -2,9 +2,10 @@
- hosts: all
become: true
vars:
- storage_safe_mode: false
mount_location: '/opt/test1'
mount_location_2: '/opt/test2'
+ testfile: "{{ mount_location }}/quux"
+ testfile_location_2: "{{ mount_location_2 }}/quux"
volume_size: '5g'
tasks:
@@ -92,10 +93,50 @@
state: absent
changed_when: false
+ - import_tasks: create-test-file.yml
+
+ - name: Test for correct handling of safe_mode
+ block:
+ - name: Remove the encryption layer
+ include_role:
+ name: storage
+ vars:
+ storage_pools:
+ - name: foo
+ type: lvm
+ disks: "{{ unused_disks }}"
+ encryption: false
+ encryption_password: 'yabbadabbadoo'
+ volumes:
+ - name: test1
+ mount_point: "{{ mount_location }}"
+ size: 4g
+ - name: unreachable task
+ fail:
+ msg: UNREACH
+ rescue:
+ - name: Check that we failed in the role
+ assert:
+ that:
+ - ansible_failed_result.msg != 'UNREACH'
+ msg: "Role has not failed when it should have"
+
+ - name: Verify the output of the safe_mode test
+ assert:
+ that: "blivet_output.failed and
+ blivet_output.msg
+ |regex_search('cannot remove and recreate existing
+ pool.*in safe mode')
+ and not blivet_output.changed"
+ msg: "Unexpected behavior w/ existing pool in safe mode"
+
+ - import_tasks: verify-data-preservation.yml
+
- name: Remove the encryption layer
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_pools:
- name: foo
type: lvm
@@ -109,10 +150,53 @@
- include_tasks: verify-role-results.yml
- - name: Add encryption to the volume
+ - import_tasks: create-test-file.yml
+
+ - name: Test for correct handling of safe_mode
+ block:
+ - name: Add encryption to the pool
+ include_role:
+ name: storage
+ vars:
+ storage_pools:
+ - name: foo
+ type: lvm
+ disks: "{{ unused_disks }}"
+ encryption: true
+ encryption_password: 'yabbadabbadoo'
+ encryption_luks_version: luks1
+ encryption_key_size: 512
+ encryption_cipher: 'serpent-xts-plain64'
+ volumes:
+ - name: test1
+ mount_point: "{{ mount_location }}"
+ size: 4g
+ - name: unreachable task
+ fail:
+ msg: UNREACH
+ rescue:
+ - name: Check that we failed in the role
+ assert:
+ that:
+ - ansible_failed_result.msg != 'UNREACH'
+ msg: "Role has not failed when it should have"
+
+ - name: Verify the output of the safe_mode test
+ assert:
+ that: "blivet_output.failed and
+ blivet_output.msg
+ |regex_search('cannot remove and recreate existing
+ pool.*in safe mode')
+ and not blivet_output.changed"
+ msg: "Unexpected behavior w/ existing pool in safe mode"
+
+ - import_tasks: verify-data-preservation.yml
+
+ - name: Add encryption to the pool
include_role:
name: storage
vars:
+ storage_safe_mode: false
storage_pools:
- name: foo
type: lvm
@@ -129,6 +213,8 @@
- include_tasks: verify-role-results.yml
+ - import_tasks: create-test-file.yml
+
- name: Change the mountpoint, leaving encryption in place
include_role:
name: storage
@@ -144,6 +230,10 @@
mount_point: "{{ mount_location_2 }}"
size: 4g
+ - import_tasks: verify-data-preservation.yml
+ vars:
+ testfile: "{{ testfile_location_2 }}"
+
- include_tasks: verify-role-results.yml
- name: Clean up
diff --git a/tests/verify-data-preservation.yml b/tests/verify-data-preservation.yml
new file mode 100644
index 0000000..eed790f
--- /dev/null
+++ b/tests/verify-data-preservation.yml
@@ -0,0 +1,19 @@
+# Verify that a file still exists and no data loss has occured.
+# To use:
+# - set testfile to a path under the mountpoint being tested
+# - include create-test-file.yml before executing the operation to be
+# tested
+# - execute the operation that could potentially result in a loss of
+# data in the filesystem where testfile is located
+# - include this file (verify-data-preservation.yml)
+
+- name: stat the file
+ stat:
+ path: "{{ testfile }}"
+ register: stat_r
+
+- name: assert file presence
+ assert:
+ that:
+ stat_r.stat.isreg is defined and stat_r.stat.isreg
+ msg: "data lost!"

View File

@ -0,0 +1,335 @@
diff --git a/tests/get_services_state.yml b/tests/get_services_state.yml
new file mode 100644
index 0000000..4fe5d36
--- /dev/null
+++ b/tests/get_services_state.yml
@@ -0,0 +1,4 @@
+- name: Get initial state of services
+ tags: tests::cleanup
+ service_facts:
+ register: initial_state
diff --git a/tests/restore_services_state.yml b/tests/restore_services_state.yml
new file mode 100644
index 0000000..3d48975
--- /dev/null
+++ b/tests/restore_services_state.yml
@@ -0,0 +1,19 @@
+- name: Get final state of services
+ tags: tests::cleanup
+ service_facts:
+ register: final_state
+
+- name: Restore state of services
+ tags: tests::cleanup
+ service:
+ name: "{{ item }}"
+ state: "{{ 'started' if initial_state.ansible_facts.services[item + '.service']['state'] == 'running' else 'stopped' }}"
+ when:
+ - item + '.service' in final_state.ansible_facts.services
+ - item + '.service' in initial_state.ansible_facts.services
+ with_items:
+ - chronyd
+ - ntpd
+ - ptp4l
+ - phc2sys
+ - timemaster
diff --git a/tests/tests_default.yml b/tests/tests_default.yml
index 856ebe5..fb298c9 100644
--- a/tests/tests_default.yml
+++ b/tests/tests_default.yml
@@ -3,4 +4,14 @@
hosts: all
roles:
- linux-system-roles.timesync
+
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_default_wrapper.yml b/tests/tests_default_wrapper.yml
index a768f4c..b0c0ab3 100644
--- a/tests/tests_default_wrapper.yml
+++ b/tests/tests_default_wrapper.yml
@@ -1,5 +1,8 @@
---
- name: Create static inventory from hostvars
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
hosts: all
tasks:
- name: create temporary file
@@ -17,9 +20,15 @@
- name: Run tests_default.yml normally
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
import_playbook: tests_default.yml
- name: Run tests_default.yml in check_mode
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
hosts: all
tasks:
- name: Run ansible-playbook with tests_default.yml in check mode
diff --git a/tests/tests_ntp.yml b/tests/tests_ntp.yml
index e4b1b5e..446f1dc 100644
--- a/tests/tests_ntp.yml
+++ b/tests/tests_ntp.yml
@@ -18,6 +19,11 @@
roles:
- linux-system-roles.timesync
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
tasks:
- meta: flush_handlers
@@ -35,3 +41,8 @@
- "'172.16.123.1' in sources.stdout"
- "'172.16.123.2' in sources.stdout"
- "'172.16.123.3' in sources.stdout"
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider1.yml b/tests/tests_ntp_provider1.yml
index 08ecab9..9fe0db3 100644
--- a/tests/tests_ntp_provider1.yml
+++ b/tests/tests_ntp_provider1.yml
@@ -8,6 +9,10 @@
- linux-system-roles.timesync
pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Remove NTP providers
package: name={{ item }} state=absent
with_items:
@@ -27,3 +32,7 @@
assert:
that:
- "'172.16.123.1' in sources.stdout"
+
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider2.yml b/tests/tests_ntp_provider2.yml
index 5476ae4..e0d5c96 100644
--- a/tests/tests_ntp_provider2.yml
+++ b/tests/tests_ntp_provider2.yml
@@ -8,6 +9,10 @@
- linux-system-roles.timesync
pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Remove ntp
package: name=ntp state=absent
@@ -29,3 +34,7 @@
- name: Check chronyd service
shell: chronyc -n tracking
+
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider3.yml b/tests/tests_ntp_provider3.yml
index 44ca101..d440a64 100644
--- a/tests/tests_ntp_provider3.yml
+++ b/tests/tests_ntp_provider3.yml
@@ -8,6 +9,10 @@
- linux-system-roles.timesync
pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Remove chrony
package: name=chrony state=absent
@@ -29,3 +34,7 @@
- name: Check ntpd service
shell: ntpq -c rv | grep 'associd=0'
+
+ - name: Import tasks
+ tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider4.yml b/tests/tests_ntp_provider4.yml
index 8b452b8..8bccba0 100644
--- a/tests/tests_ntp_provider4.yml
+++ b/tests/tests_ntp_provider4.yml
@@ -9,6 +10,10 @@
- linux-system-roles.timesync
pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Install chrony
package: name=chrony state=present
register: package_install
@@ -27,3 +32,7 @@
- name: Check chronyd service
shell: chronyc -n tracking
+
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider5.yml b/tests/tests_ntp_provider5.yml
index 1740164..98a054f 100644
--- a/tests/tests_ntp_provider5.yml
+++ b/tests/tests_ntp_provider5.yml
@@ -9,6 +10,10 @@
- linux-system-roles.timesync
pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Install ntp
package: name=ntp state=present
register: package_install
@@ -27,3 +32,7 @@
- name: Check ntpd service
shell: ntpq -c rv | grep 'associd=0'
+
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider6.yml b/tests/tests_ntp_provider6.yml
index 21a2039..fb41824 100644
--- a/tests/tests_ntp_provider6.yml
+++ b/tests/tests_ntp_provider6.yml
@@ -6,6 +7,10 @@
both_avail: true
tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Check for availability of both NTP providers
package: name={{ item }} state=present
register: package_install
@@ -71,3 +76,7 @@
shell: chronyc -n tracking
when:
- not is_ntp_default
+
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_ptp.yml b/tests/tests_ntp_ptp.yml
index cab706f..7f4cdfc 100644
--- a/tests/tests_ntp_ptp.yml
+++ b/tests/tests_ntp_ptp.yml
@@ -22,6 +23,11 @@
roles:
- linux-system-roles.timesync
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
tasks:
- meta: flush_handlers
@@ -48,3 +54,8 @@
- "'PTP1' in sources.stdout"
when: "'SOF_TIMESTAMPING_TX_' in ethtool.stdout"
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ptp_multi.yml b/tests/tests_ptp_multi.yml
index d52d439..936e467 100644
--- a/tests/tests_ptp_multi.yml
+++ b/tests/tests_ptp_multi.yml
@@ -1,5 +1,6 @@
- name: Configure time synchronization with multiple PTP domains
+ tags: tests::expfail
hosts: all
vars:
timesync_ptp_domains:
@@ -16,6 +17,11 @@
roles:
- linux-system-roles.timesync
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
tasks:
- meta: flush_handlers
@@ -58,3 +64,8 @@
- "'domainNumber 1' in pmc.stdout"
when: "'SOF_TIMESTAMPING_TX_' in ethtool.stdout"
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ptp_single.yml b/tests/tests_ptp_single.yml
index 74da310..36d141e 100644
--- a/tests/tests_ptp_single.yml
+++ b/tests/tests_ptp_single.yml
@@ -1,5 +1,6 @@
- name: Configure time synchronization with single PTP domain
+ tags: tests::expfail
hosts: all
vars:
timesync_ptp_domains:
@@ -8,6 +9,11 @@
roles:
- linux-system-roles.timesync
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
tasks:
- meta: flush_handlers
@@ -31,3 +37,8 @@
- "'domainNumber 3' in pmc.stdout"
when: "'SOF_TIMESTAMPING_TX_' in ethtool.stdout"
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml

View File

@ -1,152 +0,0 @@
# maps the source file to the roles that use that file
# value can be string or space delimited list of strings
# role name `__collection` means - do not vendor into
# role, just vendor directly into the collection
declare -A plugin_map=(
[ansible/posix/plugins/modules/selinux.py]=selinux
[ansible/posix/plugins/modules/seboolean.py]=selinux
[ansible/posix/plugins/modules/mount.py]=storage
[ansible/posix/plugins/modules/rhel_facts.py]=__collection
[ansible/posix/plugins/modules/rhel_rpm_ostree.py]=__collection
[ansible/posix/plugins/module_utils/mount.py]=storage
[community/general/plugins/modules/ini_file.py]="tlog ad_integration"
[community/general/plugins/modules/modprobe.py]=ha_cluster
[community/general/plugins/modules/redhat_subscription.py]=rhc
[community/general/plugins/modules/rhsm_release.py]=rhc
[community/general/plugins/modules/rhsm_repository.py]=rhc
[community/general/plugins/modules/seport.py]=selinux
[community/general/plugins/modules/sefcontext.py]=selinux
[community/general/plugins/modules/selogin.py]=selinux
[containers/podman/plugins/modules/podman_container_info.py]=podman
[containers/podman/plugins/modules/podman_image.py]=podman
[containers/podman/plugins/modules/podman_play.py]=podman
[containers/podman/plugins/modules/podman_secret.py]=podman
[containers/podman/plugins/module_utils/podman/common.py]=podman
[containers/podman/plugins/module_utils/podman/quadlet.py]=podman
)
# fix the following issue
# ERROR: Found 1 pylint issue(s) which need to be resolved:
# ERROR: plugins/modules/rhsm_repository.py:263:8: wrong-collection-deprecated: Wrong collection name ('community.general') found in call to Display.deprecated or AnsibleModule.deprecate
sed "s/collection_name='community.general'/collection_name='%{collection_namespace}.%{collection_name}'/" \
-i .external/community/general/plugins/modules/rhsm_repository.py
fix_module_documentation() {
local module_src doc_fragment_name df_dest_dir
local -a paths
module_src=".external/$1"
sed ':a;N;$!ba;s/description:\n\( *\)/description:\n\1- "WARNING: Do not use this plugin directly! It is only for role internal use."\n\1/' \
-i "$module_src"
# grab documentation fragments
for doc_fragment_name in $(awk -F'[ -]+' '/^extends_documentation_fragment:/ {reading = 1; next}; /^[ -]/ {if (reading) {print $2}; next}; /^[^ -]/ {if (reading) {exit}}' "$module_src"); do
if [ "$doc_fragment_name" = files ]; then continue; fi # this one is built-in
df_dest_dir="%{collection_build_path}/plugins/doc_fragments"
if [ ! -d "$df_dest_dir" ]; then
mkdir -p "$df_dest_dir"
fi
paths=(${doc_fragment_name//./ })
# if we ever have two different collections that have the same doc_fragment name
# with different contents, we will be in trouble . . .
# will have to make the doc fragment files unique, then edit $dest to use
# the unique name
cp ".external/${paths[0]}/${paths[1]}/plugins/doc_fragments/${paths[2]}.py" "$df_dest_dir"
done
}
declare -a modules mod_utils collection_plugins
declare -A dests
# vendor in plugin files - fix documentation, fragments
for src in "${!plugin_map[@]}"; do
roles="${plugin_map["$src"]}"
if [ "$roles" = __collection ]; then
collection_plugins+=("$src")
case "$src" in
*/plugins/modules/*) fix_module_documentation "$src";;
esac
else
case "$src" in
*/plugins/modules/*) srcdir=plugins/modules; subdir=library; modules+=("$src"); fix_module_documentation "$src";;
*/plugins/module_utils/*) srcdir=plugins/module_utils; mod_utils+=("$src") ;;
*/plugins/action/*) srcdir=plugins/action ;;
esac
fi
for role in $roles; do
if [ "$role" = __collection ]; then
dest="%{collection_build_path}/plugins${src/#*plugins/}"
dests["$dest"]=__collection
else
case "$src" in
*/plugins/module_utils/*) subdir="module_utils/${role}_lsr" ;;
esac
dest="$role/${src/#*${srcdir}/${subdir}}"
dests["$dest"]="$role"
fi
destdir="$(dirname "$dest")"
if [ ! -d "$destdir" ]; then
mkdir -p "$destdir"
fi
cp -pL ".external/$src" "$dest"
done
done
# remove the temporary .external directory after vendoring
rm -rf .external
# fix python imports to point from the old name to the new name
for dest in "${!dests[@]}"; do
role="${dests["$dest"]}"
for module in "${modules[@]}"; do
python_name="$(dirname "$module")"
python_name="${python_name////[.]}"
sed -e "s/ansible_collections[.]${python_name}[.]/ansible.modules./" -i "$dest"
done
for mod_util in "${mod_utils[@]}"; do
# some mod_utils have subdirs, some do not
split=(${mod_util//// })
python_name="ansible_collections[.]${split[0]}[.]${split[1]}[.]plugins[.]module_utils[.]"
sed -e "s/${python_name}/ansible.module_utils.${role}_lsr./" -i "$dest"
done
for plugin in "${collection_plugins[@]}"; do
python_name="$(dirname "$plugin")"
dest_python_name="%{collection_namespace}/%{collection_name}/plugins${python_name/#*plugins/}"
src_python_name="ansible_collections.${python_name////[.]}"
dest_python_name="ansible_collections.${dest_python_name////.}"
sed -e "s/${src_python_name}/${dest_python_name}/" -i "$dest"
done
done
# Replacing "linux-system-roles.rolename" with "rhel-system-roles.rolename" in each role
# Replacing "fedora.linux_system_roles." with "redhat.rhel_system_roles" in each role
# This is for the "roles calling other roles" case
# for podman, change the FQCN - using a non-FQCN module name doesn't seem to work,
# even for the legacy role format
for rolename in %{rolenames}; do
find "$rolename" -type f -exec \
sed -e "s/linux-system-roles[.]${rolename}\\>/%{roleinstprefix}${rolename}/g" \
-e "s/fedora[.]linux_system_roles[.]/%{collection_namespace}.%{collection_name}./g" \
-e "s/containers[.]podman[.]/%{collection_namespace}.%{collection_name}./g" \
-e "s/community[.]general[.]/%{collection_namespace}.%{collection_name}./g" \
-e "s/ansible[.]posix[.]/%{collection_namespace}.%{collection_name}./g" \
-i {} \;
done
# add ansible-test ignores needed due to vendoring
for ansible_ver in 2.14 2.15 2.16; do
ignore_file="podman/.sanity-ansible-ignore-${ansible_ver}.txt"
cat >> "$ignore_file" <<EOF
plugins/module_utils/podman_lsr/podman/quadlet.py compile-2.7!skip
plugins/module_utils/podman_lsr/podman/quadlet.py import-2.7!skip
plugins/modules/podman_image.py import-2.7!skip
plugins/modules/podman_play.py import-2.7!skip
EOF
done
# these platforms still use python 3.5
for ansible_ver in 2.14 2.15; do
ignore_file="podman/.sanity-ansible-ignore-${ansible_ver}.txt"
cat >> "$ignore_file" <<EOF
plugins/module_utils/podman_lsr/podman/quadlet.py compile-3.5!skip
plugins/module_utils/podman_lsr/podman/quadlet.py import-3.5!skip
plugins/modules/podman_image.py import-3.5!skip
plugins/modules/podman_play.py import-3.5!skip
EOF
done

View File

@ -1,12 +0,0 @@
# Untar vendored collection tarballs to corresponding directories
for file in %{SOURCE801} %{SOURCE901} %{SOURCE902}; do
if [[ "$(basename $file)" =~ ([^-]+)-([^-]+)-(.+).tar.gz ]]; then
ns=${BASH_REMATCH[1]}
name=${BASH_REMATCH[2]}
ver=${BASH_REMATCH[3]}
mkdir -p .external/$ns/$name
pushd .external/$ns/$name > /dev/null
tar xfz "$file"
popd > /dev/null
fi
done

File diff suppressed because it is too large Load Diff