Merged update from upstream sources

This is an automated DistroBaker update from upstream sources.
If you do not know what this is about or would like to opt out,
contact the OSCI team.

Source: https://src.fedoraproject.org/rpms/linux-system-roles.git#490700ef996ddf949520cb4c0fedd290cc657aed
This commit is contained in:
DistroBaker 2021-02-22 23:36:10 +01:00
parent 1e20571cae
commit f6ce71d308
27 changed files with 3419 additions and 0 deletions

32
.gitignore vendored
View File

@ -0,0 +1,32 @@
/kdump-1.0.0.tar.gz
/network-64b2d76.tar.gz
/postfix-611754b.tar.gz
/selinux-1.0.0.tar.gz
/timesync-1.0.0.tar.gz
/kdump-77596fdd976c6160d6152c200a5432c609725a14.tar.gz
/postfix-0.1.tar.gz
/timesync-924650d0cd4117f73a7f0413ab745a8632bc5cec.tar.gz
/network-675c7e8073f33ead5143cd5fc11e332f18524317.tar.gz
/selinux-1.1.1.tar.gz
/storage-1.2.2.tar.gz
/metrics-e81b2650108727f38b1c856699aad26af0f44a46.tar.gz
/tlog-1.1.0.tar.gz
/kernel_settings-1.0.1.tar.gz
/logging-fe3f658e72b2883d2a1460d453105c7a53dd70e8.tar.gz
/nbde_client-1.0.1.tar.gz
/nbde_server-1.0.1.tar.gz
/certificate-19115b5f824141330861359a137d08ac32082502.tar.gz
/crypto_policies-76b2d5b0460dba22c5d290c1af96e4fdb3434cb9.tar.gz
/ansible-sshd-e1de59b3c54e9d48a010eeca73755df339c7e628.tar.gz
/auto-maintenance-627b7a83a2c5b2a3c1f775751c689ca40f5fb294.tar.gz
/network-bda206d45c87ee8c1a5284de84f5acf5e629de97.tar.gz
/storage-485de47b0dc0787aea077ba448ecb954f53e40c4.tar.gz
/logging-193e9034f9f74bbd8a0e9cd5b30e782a7e77dc6a.tar.gz
/auto-maintenance-e5ed203b2d7224c0bf0c3fd55452456c8f468cad.tar.gz
/kernel_settings-e5e5abb35fb695e22ccffa855c98ab882650480e.tar.gz
/nbde_client-3af7452e4861ee2363b29b23bf78bf11e06be142.tar.gz
/certificate-daecdc51cedaf67bf821f1f9f8f6c3cc0ca0d03f.tar.gz
/ssh-effa0a0d993832dee726290f263a2182cf3eacda.tar.gz
/ha_cluster-779bb78559de58bb5a1f25a4b92039c373ef59a4.tar.gz
/logging-4b07edf4e84882c9d0fb979092ba5953aac0b4d5.tar.gz
/certificate-50041ce55348fcce34aba4cbe3ea160c5d890ab3.tar.gz

3
README.md Normal file
View File

@ -0,0 +1,3 @@
# linux-system-roles
The linux-system-roles package

15
collection_readme.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
set -euxo pipefail
readme_md=${1:-"lsr_role2collection/collection_readme.md"}
sed -i -e '/## Currently supported distributions/{:1;/## Dependencies/!{N;b 1};s|.*|## Dependencies|}' \
-e 's/\(Linux System Roles is a set of roles for managing Linux system components.\)/\1\n\nThis collection is available as a Technology Preview./' \
-e 's/Linux/RHEL/g' \
-e 's/Ansible Galaxy/Automation Hub/g' \
-e 's/fedora\(.\)linux_system_roles/redhat\1rhel_system_roles/g' \
-e 's/linux-system-roles/rhel-system-roles/g' \
-e '/## Documentation/{:a;/## Support/!{N;b a};s|.*|## Documentation\nThe official RHEL System Roles documentation can be found in the [Product Documentation section of the Red Hat Customer Portal](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/administration_and_configuration_tasks_using_system_roles_in_rhel/index).\n\n## Support|}' \
-e 's/ $//' \
$readme_md

28
kdump-fix-newline.diff Normal file
View File

@ -0,0 +1,28 @@
commit cafd95d0b03360d12e86170eb10fc1fc3dcade06
Author: Pavel Cahyna <pcahyna@redhat.com>
Date: Thu Jan 14 11:42:48 2021 +0100
Get rid of the extra final newline in string
Use the `-` chomping indicator to indicate that the trailing newline is
not intended as a part of the string.
https://yaml.org/spec/1.1/#chomping/
The trailing newline was causing an actual problem in the test.
Also use the `>` folded style, which is more appropriate here than the
`|` literal style.
diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml
index 6d3699c..d3503f7 100644
--- a/tests/tests_ssh.yml
+++ b/tests/tests_ssh.yml
@@ -27,7 +27,7 @@
- include_role:
name: linux-system-roles.kdump
vars:
- kdump_ssh_user: |
+ kdump_ssh_user: >-
{{ hostvars[kdump_ssh_server_outside]['ansible_user_id'] }}
# This is the outside address. Ansible will connect to it to
# copy the ssh key.

13
kdump-meta-el8.diff Normal file
View File

@ -0,0 +1,13 @@
diff --git a/meta/main.yml b/meta/main.yml
index 2478fa6..ad8f4c6 100644
--- a/meta/main.yml
+++ b/meta/main.yml
@@ -7,6 +7,6 @@ galaxy_info:
min_ansible_version: 2.4
platforms:
- name: Fedora
- versions: [ 27, 28 ]
+ versions: [ 31, 32 ]
- name: EL
- versions: [ 6, 7 ]
+ versions: [ 6, 7, 8 ]

142
kdump-tier1-tags.diff Normal file
View File

@ -0,0 +1,142 @@
diff --git a/tests/commonvars.yml b/tests/commonvars.yml
new file mode 100644
index 0000000..2cd3566
--- /dev/null
+++ b/tests/commonvars.yml
@@ -0,0 +1,2 @@
+restore_services:
+ - kdump
diff --git a/tests/get_services_state.yml b/tests/get_services_state.yml
new file mode 100644
index 0000000..4fe5d36
--- /dev/null
+++ b/tests/get_services_state.yml
@@ -0,0 +1,4 @@
+- name: Get initial state of services
+ tags: tests::cleanup
+ service_facts:
+ register: initial_state
diff --git a/tests/restore_services_state.yml b/tests/restore_services_state.yml
new file mode 100644
index 0000000..2035dfc
--- /dev/null
+++ b/tests/restore_services_state.yml
@@ -0,0 +1,22 @@
+- block:
+ - name: load common vars
+ include_vars:
+ file: commonvars.yml
+
+ - name: Get final state of services
+ service_facts:
+ register: final_state
+
+ - name: Restore state of services
+ service:
+ name: "{{ item }}"
+ state: "{{ 'started' if
+ initial_state.ansible_facts.services[sname]['state']
+ == 'running' else 'stopped' }}"
+ when:
+ - sname in final_state.ansible_facts.services
+ - sname in initial_state.ansible_facts.services
+ vars:
+ sname: "{{ item + '.service' }}"
+ with_items: "{{ restore_services }}"
+ tags: tests::cleanup
diff --git a/tests/tests_default.yml b/tests/tests_default.yml
index af0b2a0..6ce5241 100644
--- a/tests/tests_default.yml
+++ b/tests/tests_default.yml
@@ -3,3 +3,13 @@
roles:
- linux-system-roles.kdump
+
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_default_wrapper.yml b/tests/tests_default_wrapper.yml
index eba31a0..857aab8 100644
--- a/tests/tests_default_wrapper.yml
+++ b/tests/tests_default_wrapper.yml
@@ -1,6 +1,9 @@
---
- name: Create static inventory from hostvars
hosts: all
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
tasks:
- name: create temporary file
tempfile:
@@ -17,10 +20,16 @@
- name: Run tests_default.yml normally
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
import_playbook: tests_default.yml
- name: Run tests_default.yml in check_mode
hosts: all
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
tasks:
- name: Run ansible-playbook with tests_default.yml in check mode
command: >
diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml
index d12e884..6d3699c 100644
--- a/tests/tests_ssh.yml
+++ b/tests/tests_ssh.yml
@@ -10,6 +10,13 @@
# this is the address at which the ssh dump server can be reached
# from the managed host. Dumps will be uploaded there.
kdump_ssh_server_inside: "{{ kdump_ssh_source if kdump_ssh_source in hostvars[kdump_ssh_server_outside]['ansible_all_ipv4_addresses'] + hostvars[kdump_ssh_server_outside]['ansible_all_ipv6_addresses'] else hostvars[kdump_ssh_server_outside]['ansible_default_ipv4']['address'] }}"
+ tags:
+ # this test executes some tasks on localhost and relies on
+ # localhost being a different host than the managed host
+ # (localhost is being used as a second host in multihost
+ # scenario). This also means that localhost must be capable
+ # enough (not just a container - must be runnign a sshd).
+ - 'tests::multihost_localhost'
tasks:
- name: gather facts from {{ kdump_ssh_server_outside }}
diff --git a/tests/tests_ssh_wrapper.yml b/tests/tests_ssh_wrapper.yml
index 2203f3f..96a764e 100644
--- a/tests/tests_ssh_wrapper.yml
+++ b/tests/tests_ssh_wrapper.yml
@@ -1,6 +1,8 @@
---
- name: Create static inventory from hostvars
hosts: all
+ tags:
+ - 'tests::slow'
tasks:
- name: create temporary file
tempfile:
@@ -17,10 +19,15 @@
- name: Run tests_ssh.yml normally
+ tags:
+ - 'tests::slow'
import_playbook: tests_ssh.yml
- name: Run tests_ssh.yml in check_mode
hosts: all
+ tags:
+ - 'tests::slow'
+ - 'tests::multihost_localhost'
tasks:
- name: Run ansible-playbook with tests_ssh.yml in check mode
command: |

869
linux-system-roles.spec Normal file
View File

@ -0,0 +1,869 @@
%if 0%{?rhel} && ! 0%{?epel}
%bcond_with ansible
%else
%bcond_without ansible
%endif
%if 0%{?rhel}
Name: rhel-system-roles
%else
Name: linux-system-roles
%endif
Url: https://github.com/linux-system-roles/
Summary: Set of interfaces for unified system management
Version: 1.0.0
Release: 16%{?dist}
#Group: Development/Libraries
License: GPLv3+ and MIT and BSD
%global installbase %{_datadir}/linux-system-roles
%global _pkglicensedir %{_licensedir}/%{name}
%global rolealtprefix linux-system-roles.
%global roleprefix %{name}.
%global roleinstprefix %{nil}
%global rolealtrelpath ../../linux-system-roles/
%if 0%{?rhel}
%global roleinstprefix %{roleprefix}
%global installbase %{_datadir}/ansible/roles
%global rolealtrelpath %{nil}
%endif
%if 0%{?rhel}
%global collection_namespace redhat
%global collection_name rhel_system_roles
%else
%global collection_namespace fedora
%global collection_name linux_system_roles
%endif
%global subrole_prefix "private_${role}_subrole_"
%global collection_version %{version}
# Helper macros originally from macros.ansible by Igor Raits <ignatenkobrain>
# Not available on RHEL, so we must define those macros locally here without using ansible-galaxy
# Not used (yet). Could be made to point to AH in RHEL - but what about CentOS Stream?
#%%{!?ansible_collection_url:%%define ansible_collection_url() https://galaxy.ansible.com/%%{collection_namespace}/%%{collection_name}}
%{!?ansible_collection_files:%define ansible_collection_files %{_datadir}/ansible/collections/ansible_collections/%{collection_namespace}/}
%if %{with ansible}
BuildRequires: ansible >= 2.9.10
%endif
%if %{undefined ansible_collection_build}
%if %{without ansible}
# Empty command. We don't have ansible-galaxy.
%define ansible_collection_build() :
%else
%define ansible_collection_build() ansible-galaxy collection build
%endif
%endif
%if %{undefined ansible_collection_install}
%if %{without ansible}
# Simply copy everything instead of galaxy-installing the built artifact.
%define ansible_collection_install() mkdir -p %{buildroot}%{ansible_collection_files}; cp -a . %{buildroot}%{ansible_collection_files}/%{collection_name}/
%else
%define ansible_collection_install() ansible-galaxy collection install -n -p %{buildroot}%{_datadir}/ansible/collections %{collection_namespace}-%{collection_name}-%{version}.tar.gz
%endif
%endif
# For each role, call either defcommit() or deftag(). The other macros
# (%%id and %%shortid) can be then used in the same way in both cases.
# This way the rest of the spec file des not need to know whether we are
# dealing with a tag or a commit.
%global archiveext tar.gz
# list of role names
%global rolenames %nil
# list of assignments that can be used to populate a bash associative array variable
%global rolestodir %nil
%define getarchivedir() %(p=%{basename:%{S:%{1}}}; echo ${p%%.%{archiveext}})
%define defcommit() %{expand:%%global ref%{1} %{2}
%%global shortcommit%{1} %%(c=%%{ref%{1}}; echo ${c:0:7})
%%global extractdir%{1} %%{expand:%%getarchivedir %{1}}
%%{!?repo%{1}:%%global repo%{1} %%{rolename%{1}}}
%%global archiveurl%{1} %%{?forgeorg%{1}}%%{!?forgeorg%{1}:%%{url}}%%{repo%{1}}/archive/%%{ref%{1}}/%%{repo%{1}}-%%{ref%{1}}.tar.gz
%%global rolenames %%{?rolenames} %%{rolename%{1}}
%%global roletodir%{1} [%{rolename%{1}}]="%{extractdir%{1}}"
%%global rolestodir %%{?rolestodir} %{roletodir%{1}}
}
%define deftag() %{expand:%%global ref%{1} %{2}
%%global extractdir%{1} %%{expand:%%getarchivedir %{1}}
%%{!?repo%{1}:%%global repo%{1} %%{rolename%{1}}}
%%global archiveurl%{1} %%{?forgeorg%{1}}%%{!?forgeorg%{1}:%%{url}}%%{repo%{1}}/archive/%%{ref%{1}}/%%{repo%{1}}-%%{ref%{1}}.tar.gz
%%global rolenames %%{?rolenames} %%{rolename%{1}}
%%global roletodir%{1} [%{rolename%{1}}]="%{extractdir%{1}}"
%%global rolestodir %%{?rolestodir} %%{roletodir%{1}}
}
#%%defcommit 1 43eec5668425d295dce3801216c19b1916df1f9b
%global rolename1 postfix
%deftag 1 0.1
#%%defcommit 2 6cd1ec8fdebdb92a789b14e5a44fe77f0a3d8ecd
%global rolename2 selinux
%deftag 2 1.1.1
%defcommit 3 924650d0cd4117f73a7f0413ab745a8632bc5cec
%global rolename3 timesync
#%%deftag 3 1.0.0
%defcommit 4 77596fdd976c6160d6152c200a5432c609725a14
%global rolename4 kdump
#%%deftag 4 1.0.0
%defcommit 5 bda206d45c87ee8c1a5284de84f5acf5e629de97
%global rolename5 network
#%%deftag 5 1.0.0
%defcommit 6 485de47b0dc0787aea077ba448ecb954f53e40c4
%global rolename6 storage
#%%deftag 6 1.2.2
%defcommit 7 e81b2650108727f38b1c856699aad26af0f44a46
%global rolename7 metrics
#%%deftag 7 0.1.0
#%%defcommit 8 cfa70b6b5910b3198aba2679f8fc36aad45ca45a
%global rolename8 tlog
%deftag 8 1.1.0
%defcommit 9 e5e5abb35fb695e22ccffa855c98ab882650480e
%global rolename9 kernel_settings
#%%deftag 9 1.0.1
%defcommit 10 4b07edf4e84882c9d0fb979092ba5953aac0b4d5
%global rolename10 logging
#%%deftag 10 0.2.0
#%%defcommit 11 4b6cfca4dd24e53a4bc4e07635601d7c104346c1
%global rolename11 nbde_server
%deftag 11 1.0.1
%defcommit 12 3af7452e4861ee2363b29b23bf78bf11e06be142
%global rolename12 nbde_client
#%%deftag 12 1.0.1
%defcommit 13 50041ce55348fcce34aba4cbe3ea160c5d890ab3
%global rolename13 certificate
#%%deftag 13 1.0.1
%defcommit 14 76b2d5b0460dba22c5d290c1af96e4fdb3434cb9
%global rolename14 crypto_policies
%global forgeorg15 https://github.com/willshersystems/
%global repo15 ansible-sshd
%global rolename15 sshd
%defcommit 15 e1de59b3c54e9d48a010eeca73755df339c7e628
%defcommit 16 effa0a0d993832dee726290f263a2182cf3eacda
%global rolename16 ssh
%defcommit 17 779bb78559de58bb5a1f25a4b92039c373ef59a4
%global rolename17 ha_cluster
%global mainid e5ed203b2d7224c0bf0c3fd55452456c8f468cad
Source: %{url}auto-maintenance/archive/%{mainid}/auto-maintenance-%{mainid}.tar.gz
Source1: %{archiveurl1}
Source2: %{archiveurl2}
Source3: %{archiveurl3}
Source4: %{archiveurl4}
Source5: %{archiveurl5}
Source6: %{archiveurl6}
Source7: %{archiveurl7}
Source8: %{archiveurl8}
Source9: %{archiveurl9}
Source10: %{archiveurl10}
Source11: %{archiveurl11}
Source12: %{archiveurl12}
Source13: %{archiveurl13}
Source14: %{archiveurl14}
Source15: %{archiveurl15}
Source16: %{archiveurl16}
Source17: %{archiveurl17}
# Script to convert the collection README to Automation Hub.
# Not used on Fedora.
Source998: collection_readme.sh
Patch11: rhel-system-roles-postfix-pr5.diff
Patch12: postfix-meta-el8.diff
Patch21: selinux-tier1-tags.diff
Patch22: selinux-bz-1926947-no-variable-named-present.diff
Patch31: timesync-tier1-tags.diff
Patch41: rhel-system-roles-kdump-pr22.diff
Patch42: kdump-tier1-tags.diff
Patch43: kdump-meta-el8.diff
Patch44: kdump-fix-newline.diff
Patch51: network-epel-minimal.diff
# Not suitable for upstream, since the files need to be executable there
Patch52: network-permissions.diff
Patch53: network-tier1-tags.diff
Patch55: network-disable-bondtests.diff
Patch62: storage-partition-name.diff
Patch63: storage-no-disks-existing.diff
Patch64: storage-trim-volume-size.diff
Patch71: metrics-mssql-x86.diff
Patch151: sshd-example.diff
Patch152: sshd-work-on-ansible28-jinja27.diff
BuildArch: noarch
# These are needed for md2html.sh to build the documentation
BuildRequires: asciidoc
BuildRequires: pandoc
BuildRequires: highlight
BuildRequires: python3
BuildRequires: python3-six
BuildRequires: python3dist(ruamel.yaml)
Requires: python3-jmespath
Obsoletes: rhel-system-roles-techpreview < 1.0-3
%if %{undefined __ansible_provides}
Provides: ansible-collection(%{collection_namespace}.%{collection_name}) = %{collection_version}
%endif
# be compatible with the usual Fedora Provides:
Provides: ansible-collection-%{collection_namespace}-%{collection_name} = %{version}-%{release}
# We need to put %%description within the if block to avoid empty
# lines showing up.
%if 0%{?rhel}
%description
Collection of Ansible roles and modules that provide a stable and
consistent configuration interface for managing multiple versions
of Red Hat Enterprise Linux.
%else
%description
Collection of Ansible roles and modules that provide a stable and
consistent configuration interface for managing multiple versions
of Fedora, Red Hat Enterprise Linux & CentOS.
%endif
%prep
%setup -q -a1 -a2 -a3 -a4 -a5 -a6 -a7 -a8 -a9 -a10 -a11 -a12 -a13 -a14 -a15 -a16 -a17 -n %{getarchivedir 0}
declare -A ROLESTODIR=(%{rolestodir})
for rolename in %{rolenames}; do
mv "${ROLESTODIR[${rolename}]}" ${rolename}
done
cd %{rolename1}
%patch11 -p1
%patch12 -p1
cd ..
cd %{rolename2}
%patch21 -p1
%patch22 -p1
cd ..
cd %{rolename3}
%patch31 -p1
cd ..
cd %{rolename4}
%patch41 -p1
%patch42 -p1
%patch43 -p1
%patch44 -p1
cd ..
cd %{rolename5}
%patch51 -p1
%patch52 -p1
%patch53 -p1
%patch55 -p1
cd ..
cd %{rolename6}
%patch62 -p1
%patch63 -p1
%patch64 -p1
cd ..
cd %{rolename7}
%patch71 -p1
cd ..
cd %{rolename15}
%patch151 -p1
%patch152 -p1
sed -r -i -e "s/ansible-sshd/linux-system-roles.sshd/" tests/*.yml examples/*.yml README.md
cd ..
# Replacing "linux-system-roles.rolename" with "rhel-system-roles.rolename" in each role
%if "%{roleprefix}" != "linux-system-roles."
for rolename in %{rolenames}; do
find $rolename -type f -exec \
sed "s/linux-system-roles[.]${rolename}\\>/%{roleprefix}${rolename}/g" -i {} \;
done
%endif
# Removing symlinks in tests/roles
for rolename in %{rolenames}; do
if [ -d ${rolename}/tests/roles ]; then
find ${rolename}/tests/roles -type l -exec rm {} \;
if [ -d ${rolename}/tests/roles/linux-system-roles.${rolename} ]; then
rm -r ${rolename}/tests/roles/linux-system-roles.${rolename}
fi
fi
done
rm %{rolename5}/tests/modules
rm %{rolename5}/tests/module_utils
rm %{rolename5}/tests/playbooks/roles
# transform ambiguous #!/usr/bin/env python shebangs to python3 to stop brp-mangle-shebangs complaining
find -type f -executable -name '*.py' -exec \
sed -i -r -e '1s@^(#! */usr/bin/env python)(\s|$)@#\13\2@' '{}' +
%build
sh md2html.sh \
%{rolename1}/README.md \
%{rolename2}/README.md \
%{rolename3}/README.md \
%{rolename4}/README.md \
%{rolename5}/README.md \
%{rolename6}/README.md \
%{rolename7}/README.md \
%{rolename8}/README.md \
%{rolename9}/README.md \
%{rolename10}/README.md \
%{rolename11}/README.md \
%{rolename12}/README.md \
%{rolename13}/README.md \
%{rolename14}/README.md \
%{rolename15}/README.md \
%{rolename16}/README.md \
%{rolename17}/README.md
mkdir .collections
%if 0%{?rhel}
# Convert the upstream collection readme to the downstream one
%{SOURCE998} lsr_role2collection/collection_readme.md
%endif
./galaxy_transform.py "%{collection_namespace}" "%{collection_name}" "%{collection_version}" > galaxy.yml.tmp
mv galaxy.yml.tmp galaxy.yml
for role in %{rolename1} %{rolename2} %{rolename3} \
%{rolename4} %{rolename5} %{rolename6} \
%{rolename7} %{rolename8} %{rolename9} \
%{rolename10} %{rolename11} %{rolename12} \
%{rolename13} %{rolename14} %{rolename15} \
%{rolename16} %{rolename17}; do
python3 lsr_role2collection.py --role "$role" --src-path "$role" \
--src-owner %{name} --subrole-prefix %{subrole_prefix} --dest-path .collections \
--readme lsr_role2collection/collection_readme.md \
--namespace %{collection_namespace} --collection %{collection_name}
done
cp -p galaxy.yml lsr_role2collection/.ansible-lint \
.collections/ansible_collections/%{collection_namespace}/%{collection_name}
mkdir -p .collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity
cp -p lsr_role2collection/ignore-2.9.txt \
.collections/ansible_collections/%{collection_namespace}/%{collection_name}/tests/sanity
cd .collections/ansible_collections/%{collection_namespace}/%{collection_name}/
%ansible_collection_build
%install
mkdir -p $RPM_BUILD_ROOT%{installbase}
mkdir -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles
cp -pR %{rolename1} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename1}
cp -pR %{rolename2} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename2}
cp -pR %{rolename3} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename3}
cp -pR %{rolename4} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename4}
cp -pR %{rolename5} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename5}
cp -pR %{rolename6} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename6}
cp -pR %{rolename7} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename7}
cp -pR %{rolename8} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename8}
cp -pR %{rolename9} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename9}
cp -pR %{rolename10} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename10}
cp -pR %{rolename11} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename11}
cp -pR %{rolename12} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename12}
cp -pR %{rolename13} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename13}
cp -pR %{rolename14} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename14}
cp -pR %{rolename15} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename15}
cp -pR %{rolename16} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename16}
cp -pR %{rolename17} $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}%{rolename17}
%if 0%{?rolealtprefix:1}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename1} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename1}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename2} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename2}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename3} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename3}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename4} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename4}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename5} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename5}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename6} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename6}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename7} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename7}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename8} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename8}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename9} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename9}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename10} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename10}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename11} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename11}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename12} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename12}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename13} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename13}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename14} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename14}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename15} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename15}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename16} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename16}
ln -s %{rolealtrelpath}%{roleinstprefix}%{rolename17} $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{rolealtprefix}%{rolename17}
%endif
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/kdump
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/postfix
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/selinux
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/timesync
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/network
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/storage
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/metrics
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/tlog
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/kernel_settings
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/logging
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/nbde_server
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/nbde_client
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/certificate
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/crypto_policies
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/sshd
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/ssh
mkdir -p $RPM_BUILD_ROOT%{_pkgdocdir}/ha_cluster
mkdir -p $RPM_BUILD_ROOT%{_pkglicensedir}
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kdump/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kdump/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/kdump
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kdump/COPYING \
$RPM_BUILD_ROOT%{_pkglicensedir}/kdump.COPYING
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}postfix/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}postfix/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/postfix
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}postfix/COPYING \
$RPM_BUILD_ROOT%{_pkglicensedir}/postfix.COPYING
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}selinux/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}selinux/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/selinux
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}selinux/COPYING \
$RPM_BUILD_ROOT%{_pkglicensedir}/selinux.COPYING
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}selinux/selinux-playbook.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/selinux/example-selinux-playbook.yml
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}timesync/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}timesync/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/timesync
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}timesync/COPYING \
$RPM_BUILD_ROOT%{_pkglicensedir}/timesync.COPYING
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}timesync/examples/multiple-ntp-servers.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/timesync/example-timesync-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}timesync/examples/single-pool.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/timesync/example-timesync-pool-playbook.yml
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/network
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/network.LICENSE
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/bond_with_vlan.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-bond_with_vlan-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/bridge_with_vlan.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-bridge_with_vlan-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/eth_simple_auto.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_simple_auto-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/eth_with_vlan.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_with_vlan-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/infiniband.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-infiniband-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/macvlan.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-macvlan-playbook.yml
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/remove_profile.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-remove_profile-playbook.yml
rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/remove_profile.yml
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/down_profile.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-down_profile-playbook.yml
rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/down_profile.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/inventory \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-inventory
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/ethtool_features.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-ethtool_features-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/ethtool_features_default.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-ethtool_features_default-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/bond_simple.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-bond_simple-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/eth_with_802_1x.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_with_802_1x-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/wireless_wpa_psk.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-wireless_wpa_psk-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/remove+down_profile.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-remove+down_profile-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/dummy_simple.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-dummy_simple-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/ethtool_coalesce.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-ethtool_coalesce-playbook.yml
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/team_simple.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-team_simple-playbook.yml
mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}network/examples/eth_dns_support.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/network/example-eth_dns_support-playbook.yml
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}storage/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}storage/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/storage
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}storage/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/storage.LICENSE
rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/semaphore
rm -r $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/molecule
rm -r $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/.[A-Za-z]*
rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}*/tests/.git*
rm $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples/roles
rmdir $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}network/examples
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}metrics/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}metrics/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/metrics
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}metrics/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/metrics.LICENSE
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}tlog/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}tlog/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/tlog
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}tlog/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/tlog.LICENSE
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kernel_settings/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kernel_settings/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/kernel_settings
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kernel_settings/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/kernel_settings.LICENSE
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}kernel_settings/COPYING \
$RPM_BUILD_ROOT%{_pkglicensedir}/kernel_settings.COPYING
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}logging/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}logging/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/logging
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}logging/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/logging.LICENSE
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}logging/COPYING \
$RPM_BUILD_ROOT%{_pkglicensedir}/logging.COPYING
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_server/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_server/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/nbde_server
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_server/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/nbde_server.LICENSE
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_client/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_client/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/nbde_client
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}nbde_client/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/nbde_client.LICENSE
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}certificate/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}certificate/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/certificate
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}certificate/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/certificate.LICENSE
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}crypto_policies/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}crypto_policies/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/crypto_policies
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}crypto_policies/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/crypto_policies.LICENSE
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}sshd/README.md \
$RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}sshd/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/sshd
cp -p $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}sshd/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/sshd.LICENSE
# referenced in the configuring-openssh-servers-using-the-sshd-system-role documentation module
# must be updated if changing the file path
mv $RPM_BUILD_ROOT%{installbase}/%{roleinstprefix}sshd/examples/example-root-login.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/sshd/example-root-login-playbook.yml
rmdir $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}sshd/examples
cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ssh/README.md \
$RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ssh/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/ssh
cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ssh/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/ssh.LICENSE
cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/README.md \
$RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/README.html \
$RPM_BUILD_ROOT%{_pkgdocdir}/ha_cluster
cp -p $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/LICENSE \
$RPM_BUILD_ROOT%{_pkglicensedir}/ha_cluster.LICENSE
mv $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/examples/simple.yml \
$RPM_BUILD_ROOT%{_pkgdocdir}/ha_cluster/example-simple-playbook.yml
rmdir $RPM_BUILD_ROOT%{_datadir}/ansible/roles/%{roleprefix}ha_cluster/examples
cd .collections/ansible_collections/%{collection_namespace}/%{collection_name}/
%ansible_collection_install
%files
%if %{without ansible}
%dir %{_datadir}/ansible
%dir %{_datadir}/ansible/roles
%endif
%if "%{installbase}" != "%{_datadir}/ansible/roles"
%dir %{installbase}
%endif
%if 0%{?rolealtprefix:1}
%{_datadir}/ansible/roles/%{rolealtprefix}kdump
%{_datadir}/ansible/roles/%{rolealtprefix}postfix
%{_datadir}/ansible/roles/%{rolealtprefix}selinux
%{_datadir}/ansible/roles/%{rolealtprefix}timesync
%{_datadir}/ansible/roles/%{rolealtprefix}network
%{_datadir}/ansible/roles/%{rolealtprefix}storage
%{_datadir}/ansible/roles/%{rolealtprefix}metrics
%{_datadir}/ansible/roles/%{rolealtprefix}tlog
%{_datadir}/ansible/roles/%{rolealtprefix}kernel_settings
%{_datadir}/ansible/roles/%{rolealtprefix}logging
%{_datadir}/ansible/roles/%{rolealtprefix}nbde_server
%{_datadir}/ansible/roles/%{rolealtprefix}nbde_client
%{_datadir}/ansible/roles/%{rolealtprefix}certificate
%{_datadir}/ansible/roles/%{rolealtprefix}crypto_policies
%{_datadir}/ansible/roles/%{rolealtprefix}sshd
%{_datadir}/ansible/roles/%{rolealtprefix}ssh
%{_datadir}/ansible/roles/%{rolealtprefix}ha_cluster
%endif
%{installbase}/%{roleinstprefix}kdump
%{installbase}/%{roleinstprefix}postfix
%{installbase}/%{roleinstprefix}selinux
%{installbase}/%{roleinstprefix}timesync
%{installbase}/%{roleinstprefix}network
%{installbase}/%{roleinstprefix}storage
%{installbase}/%{roleinstprefix}metrics
%{installbase}/%{roleinstprefix}tlog
%{installbase}/%{roleinstprefix}kernel_settings
%{installbase}/%{roleinstprefix}logging
%{installbase}/%{roleinstprefix}nbde_server
%{installbase}/%{roleinstprefix}nbde_client
%{installbase}/%{roleinstprefix}certificate
%{installbase}/%{roleinstprefix}crypto_policies
%{installbase}/%{roleinstprefix}sshd
%{installbase}/%{roleinstprefix}ssh
%{installbase}/%{roleinstprefix}ha_cluster
%{_pkgdocdir}/*/example-*-playbook.yml
%{_pkgdocdir}/network/example-inventory
%{_pkgdocdir}/*/README.md
%{_pkgdocdir}/*/README.html
%doc %{installbase}/%{roleinstprefix}kdump/README.md
%doc %{installbase}/%{roleinstprefix}postfix/README.md
%doc %{installbase}/%{roleinstprefix}selinux/README.md
%doc %{installbase}/%{roleinstprefix}timesync/README.md
%doc %{installbase}/%{roleinstprefix}network/README.md
%doc %{installbase}/%{roleinstprefix}storage/README.md
%doc %{installbase}/%{roleinstprefix}metrics/README.md
%doc %{installbase}/%{roleinstprefix}tlog/README.md
%doc %{installbase}/%{roleinstprefix}kernel_settings/README.md
%doc %{installbase}/%{roleinstprefix}logging/README.md
%doc %{installbase}/%{roleinstprefix}nbde_server/README.md
%doc %{installbase}/%{roleinstprefix}nbde_client/README.md
%doc %{installbase}/%{roleinstprefix}certificate/README.md
%doc %{installbase}/%{roleinstprefix}crypto_policies/README.md
%doc %{installbase}/%{roleinstprefix}sshd/README.md
%doc %{installbase}/%{roleinstprefix}ssh/README.md
%doc %{installbase}/%{roleinstprefix}kdump/README.html
%doc %{installbase}/%{roleinstprefix}postfix/README.html
%doc %{installbase}/%{roleinstprefix}selinux/README.html
%doc %{installbase}/%{roleinstprefix}timesync/README.html
%doc %{installbase}/%{roleinstprefix}network/README.html
%doc %{installbase}/%{roleinstprefix}storage/README.html
%doc %{installbase}/%{roleinstprefix}metrics/README.html
%doc %{installbase}/%{roleinstprefix}tlog/README.html
%doc %{installbase}/%{roleinstprefix}kernel_settings/README.html
%doc %{installbase}/%{roleinstprefix}logging/README.html
%doc %{installbase}/%{roleinstprefix}nbde_server/README.html
%doc %{installbase}/%{roleinstprefix}nbde_client/README.html
%doc %{installbase}/%{roleinstprefix}certificate/README.html
%doc %{installbase}/%{roleinstprefix}crypto_policies/README.html
%doc %{installbase}/%{roleinstprefix}sshd/README.html
%doc %{installbase}/%{roleinstprefix}ssh/README.html
%doc %{installbase}/%{roleinstprefix}ha_cluster/README.html
%license %{_pkglicensedir}/*
%license %{installbase}/%{roleinstprefix}kdump/COPYING
%license %{installbase}/%{roleinstprefix}postfix/COPYING
%license %{installbase}/%{roleinstprefix}selinux/COPYING
%license %{installbase}/%{roleinstprefix}timesync/COPYING
%license %{installbase}/%{roleinstprefix}network/LICENSE
%license %{installbase}/%{roleinstprefix}storage/LICENSE
%license %{installbase}/%{roleinstprefix}metrics/LICENSE
%license %{installbase}/%{roleinstprefix}tlog/LICENSE
%license %{installbase}/%{roleinstprefix}kernel_settings/LICENSE
%license %{installbase}/%{roleinstprefix}kernel_settings/COPYING
%license %{installbase}/%{roleinstprefix}logging/LICENSE
%license %{installbase}/%{roleinstprefix}logging/COPYING
%license %{installbase}/%{roleinstprefix}nbde_server/LICENSE
%license %{installbase}/%{roleinstprefix}nbde_client/LICENSE
%license %{installbase}/%{roleinstprefix}certificate/LICENSE
%license %{installbase}/%{roleinstprefix}crypto_policies/LICENSE
%license %{installbase}/%{roleinstprefix}sshd/LICENSE
%license %{installbase}/%{roleinstprefix}ssh/LICENSE
%license %{installbase}/%{roleinstprefix}ha_cluster/LICENSE
%{ansible_collection_files}
%changelog
* Mon Feb 22 2021 Pavel Cahyna <pcahyna@redhat.com> - 1.0.0-16
- Sync with RHEL version 1.0.0-31
Rebase certificate role to pick up a test fix
Rebase logging role to fix default private key path,
upstream PR #218
Update collection doc transformation to match a modified text
and include the Tech Preview note again (for RHEL)
* Fri Feb 19 2021 Pavel Cahyna <pcahyna@redhat.com> - 1.0.0-15
- Sync with RHEL version 1.0.0-29
Added roles: ssh, ha_cluster
Updated roles: certificate, kernel_settings, nbde_client,
logging, network
Improvements to collection build and metadata
- Two further improvements from RHEL:
Corrected merge botch in files list - make ssh README a docfile
Dynamically update galaxy.yml with our metadata even on Fedora,
we can't rely on correct version number in auto-maintenance
* Tue Feb 9 2021 Pavel Cahyna <pcahyna@redhat.com> - 1.0.0-14
- Synchronize with RHEL, new roles added:
storage, metrics, tlog, kernel_settings, logging, nbde_server,
nbde_client, certificate, crypto_policies, sshd, and the
fedora.linux_system_roles collection.
* Tue Jan 26 2021 Fedora Release Engineering <releng@fedoraproject.org> - 1.0-13
- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
* Tue Jul 28 2020 Fedora Release Engineering <releng@fedoraproject.org> - 1.0-12
- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
* Wed Jan 29 2020 Fedora Release Engineering <releng@fedoraproject.org> - 1.0-11
- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
* Thu Jul 25 2019 Fedora Release Engineering <releng@fedoraproject.org> - 1.0-10
- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
* Fri Feb 01 2019 Fedora Release Engineering <releng@fedoraproject.org> - 1.0-9
- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
* Wed Dec 05 2018 Till Maas <opensource@till.name> - 1.0-8
- Install roles at /usr/share/linux-system-roles, use symlinks in
/usr/share/ansible/roles/ to allow using alternatives
* Wed Nov 14 2018 Mike DePaulo <mikedep333@gmail.com> - 1.0-7
- spec file improvement: Remove unnecessary %%doc for files under _pkgdocdor
- Install license files under /usr/share/licenses instead of /usr/share/doc
* Tue Nov 06 2018 Mike DePaulo <mikedep333@gmail.com> - 1.0-7
- Fix rpm build for added example timesync example playbooks
- Misc spec file comments fixes
- Fix rpmlint error by escaping a previous changelog entry with a macro
- Comply with Fedora guidelines by always using "cp -p" in %%install
- Update %%description to be different for Fedora.
* Wed Oct 24 2018 Pavel Cahyna <pcahyna@redhat.com> - 1.0-7
- Update to latest versions of selinux, kdump and timesync.
- Update to the latest revision of postfix, fixes README markup
- Add Obsoletes for the -techpreview subpackage introduced mistakenly in 1.0-1
- spec file improvement: Unify the source macros with deftag() and defcommit()
* Tue Oct 23 2018 Till Maas <opensource@till.name> - 1.0-6
- Update Network system role to latest commit to include Fedora 29 fixes
- Update example timesync example playbooks
- Add comments about upstream status
* Tue Aug 14 2018 Pavel Cahyna <pcahyna@redhat.com> - 1.0-4
- Format the READMEs as html, by vdolezal, with changes to use highlight
(source-highlight does not understand YAML)
* Thu Aug 9 2018 Pavel Cahyna <pcahyna@redhat.com> - 1.0-3
- Rebase the network role to the last revision (d866422).
Many improvements to tests, introduces autodetection of the current provider
and defaults to using profile name as interface name.
- Rebase the selinux, timesync and kdump roles to their 1.0rc1 versions.
Many changes to the role interfaces to make them more consistent
and conforming to Ansible best practices.
- Update the description.
* Fri May 11 2018 Pavel Cahyna <pcahyna@redhat.com> - 0.6-4
- Fix complaints about /usr/bin/python during RPM build by making the affected scripts non-exec
- Fix merge botch
* Mon Mar 19 2018 Troy Dawson <tdawson@redhat.com> - 0.6-3.1
- Use -a (after cd) instead of -b (before cd) in %%setup
* Wed Mar 14 2018 Pavel Cahyna <pcahyna@redhat.com> - 0.6-3
- Minor corrections of the previous change by Till Maas.
* Fri Mar 9 2018 Pavel Cahyna <pcahyna@redhat.com> - 0.6-2
- Document network role options: static routes, ethernet, dns
Upstream PR#36, bz1550128, documents bz1487747 and bz1478576
* Tue Jan 30 2018 Pavel Cahyna <pcahyna@redhat.com> - 0.6-1
- Drop hard dependency on ansible (#1525655), patch from Yaakov Selkowitz
- Update the network role to version 0.4, solves bz#1487747, bz#1478576
* Tue Dec 19 2017 Pavel Cahyna <pcahyna@redhat.com> - 0.5-3
- kdump: fix the wrong conditional for ssh checking and improve test (PR#10)
* Tue Nov 07 2017 Pavel Cahyna <pcahyna@redhat.com> - 0.5-2
- kdump: add ssh support. upstream PR#9, rhbz1478707
* Tue Oct 03 2017 Pavel Cahyna <pcahyna@redhat.com> - 0.5-1
- SELinux: fix policy reload when SELinux is disabled on CentOS/RHEL 6
(bz#1493574)
- network: update to b856c7481bf5274d419f71fb62029ea0044b3ec1 :
makes the network role idempotent (bz#1476053) and fixes manual
network provider selection (bz#1485074).
* Mon Aug 28 2017 Pavel Cahyna <pcahyna@redhat.com> - 0.4-1
- network: update to b9b6f0a7969e400d8d6ba0ac97f69593aa1e8fa5:
ensure that state:absent followed by state:up works (bz#1478910), and change
the example IP adresses to the IANA-assigned ones.
- SELinux: fix the case when SELinux is disabled (bz#1479546).
* Tue Aug 8 2017 Pavel Cahyna <pcahyna@redhat.com> - 0.3-2
- We can't change directories to symlinks (rpm bug #447156) so keep the old
names and create the new names as symlinks.
* Tue Aug 8 2017 Pavel Cahyna <pcahyna@redhat.com> - 0.3-1
- Change the prefix to linux-system-roles., keeping compatibility
symlinks.
- Update the network role to dace7654feb7b5629ded0734c598e087c2713265:
adds InfiniBand support and other fixes.
- Drop a patch included upstream.
* Mon Jun 26 2017 Pavel Cahyna <pcahyna@redhat.com> - 0.2-2
- Leave a copy of README and COPYING in every role's directory, as suggested by T. Bowling.
- Move the network example inventory to the documentation directory together.
with the example playbooks and delete the now empty "examples" directory.
- Use proper reserved (by RFC 7042) MAC addresses in the network examples.
* Tue Jun 6 2017 Pavel Cahyna <pcahyna@redhat.com> - 0.2-1
- Update the networking role to version 0.2 (#1459203)
- Version every role and the package separately. They live in separate repos
and upstream release tags are not coordinated.
* Mon May 22 2017 Pavel Cahyna <pcahyna@redhat.com> - 0.1-2
- Prefix the roles in examples and documentation with rhel-system-roles.
* Thu May 18 2017 Pavel Cahyna <pcahyna@redhat.com> - 0.1-1
- Update to 0.1 (first upstream release).
- Remove the tuned role, it is not ready yet.
- Move the example playbooks to /usr/share/doc/rhel-system-roles/$SUBSYSTEM
directly to get rid of an extra directory.
- Depend on ansible.
* Thu May 4 2017 Pavel Cahyna <pcahyna@redhat.com> - 0-0.1.20170504
- Initial release.
- kdump r. fe8bb81966b60fa8979f3816a12b0c7120d71140
- postfix r. 43eec5668425d295dce3801216c19b1916df1f9b
- selinux r. 1e4a21f929455e5e76dda0b12867abaa63795ae7
- timesync r. 33a1a8c349de10d6281ed83d4c791e9177d7a141
- tuned r. 2e8bb068b9815bc84287e9b6dc6177295ffdf38b
- network r. 03ff040df78a14409a0d89eba1235b8f3e50a750

16
md2html.sh Normal file
View File

@ -0,0 +1,16 @@
# Copyright 2018 Red Hat, Inc.
# Author: Václav Doležal <vdolezal@redhat.com>
# THIS FILE IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
# OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
#
# Permission is hereby granted to use or copy this program
# for any purpose, provided the above notices are retained on all copies.
# Permission to modify the code and to distribute modified code is granted,
# provided the above notices are retained, and a notice that the code was
# modified is included with the above copyright notice.
for file in "$@"; do
pandoc -f markdown_github "${file}" -t asciidoc -o "${file%.md}.tmp.adoc" ||exit $?
touch -r "${file}" "${file%.md}.tmp.adoc" ||exit $?
TZ=UTC asciidoc -o "${file%.md}.html" -a footer-style=none -a toc2 -a source-highlighter=highlight "${file%.md}.tmp.adoc" ||exit $?
rm "${file%.md}.tmp.adoc"
done

24
metrics-mssql-x86.diff Normal file
View File

@ -0,0 +1,24 @@
From 7ff86f2fa05998afcd8ae87d9cdd660ef5b6ee2c Mon Sep 17 00:00:00 2001
From: Jan Kurik <jkurik@redhat.com>
Date: Thu, 18 Feb 2021 17:09:48 +1100
Subject: [PATCH] Update mssql test to exclude non-x86_64 architectures
pcp-pmda-mssql (and SQL Server itself) are x86_64-only.
---
tests/tests_sanity_mssql.yml | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tests/tests_sanity_mssql.yml b/tests/tests_sanity_mssql.yml
index 6f1e2cc..8602c36 100644
--- a/tests/tests_sanity_mssql.yml
+++ b/tests/tests_sanity_mssql.yml
@@ -12,7 +12,8 @@
- meta: end_host
when: (ansible_distribution in ['RedHat'] and
( ansible_facts['distribution_version'] is version('8.4', '<'))) or
- ansible_distribution not in ['Fedora', 'RedHat']
+ ansible_distribution not in ['Fedora', 'RedHat'] or
+ ansible_architecture not in ['x86_64']
- name: Save state of services
import_tasks: get_services_state.yml

14
multiple-ntp-servers.yml Normal file
View File

@ -0,0 +1,14 @@
---
- hosts: "{{ target }}"
vars:
timesync_ntp_servers:
- hostname: 0.pool.ntp.org
iburst: yes
- hostname: 1.pool.ntp.org
iburst: yes
- hostname: 2.pool.ntp.org
iburst: yes
- hostname: 3.pool.ntp.org
iburst: yes
roles:
- linux-system-roles.timesync

View File

@ -0,0 +1,48 @@
diff --git a/tests/playbooks/tests_bond.yml b/tests/playbooks/tests_bond.yml
index d646a0b..8689d59 100644
--- a/tests/playbooks/tests_bond_deprecated.yml
+++ b/tests/playbooks/tests_bond_deprecated.yml
@@ -8,6 +8,8 @@
dhcp_interface1: test1
slave2_profile: bond0.1
dhcp_interface2: test2
+ tags:
+ - "tests::expfail"
tasks:
- name: "INIT Prepare setup"
debug:
diff --git a/tests/tests_bond_initscripts.yml b/tests/tests_bond_initscripts.yml
index 8fa74c5..6a231c4 100644
--- a/tests/tests_bond_deprecated_initscripts.yml
+++ b/tests/tests_bond_deprecated_initscripts.yml
@@ -9,5 +9,6 @@
network_provider: initscripts
tags:
- always
+ - "tests::expfail"
- import_playbook: playbooks/tests_bond_deprecated.yml
diff --git a/tests/playbooks/tests_bond.yml b/tests/playbooks/tests_bond.yml
index d646a0b..8689d59 100644
--- a/tests/playbooks/tests_bond.yml
+++ b/tests/playbooks/tests_bond.yml
@@ -13,6 +13,8 @@
dhcp_interface1: test1
port2_profile: bond0.1
dhcp_interface2: test2
+ tags:
+ - "tests::expfail"
tasks:
- name: "INIT Prepare setup"
debug:
diff --git a/tests/tests_bond_initscripts.yml b/tests/tests_bond_initscripts.yml
index 8fa74c5..6a231c4 100644
--- a/tests/tests_bond_initscripts.yml
+++ b/tests/tests_bond_initscripts.yml
@@ -9,5 +9,6 @@
network_provider: initscripts
tags:
- always
+ - "tests::expfail"
- import_playbook: playbooks/tests_bond.yml

401
network-epel-minimal.diff Normal file
View File

@ -0,0 +1,401 @@
diff --git a/tests/playbooks/integration_pytest_python3.yml b/tests/playbooks/integration_pytest_python3.yml
index 075355b..5fc9dea 100644
--- a/tests/playbooks/integration_pytest_python3.yml
+++ b/tests/playbooks/integration_pytest_python3.yml
@@ -9,6 +9,11 @@
- rsync
tasks:
+ - name: Install EPEL for RHEL and CentOS
+ # yamllint disable-line rule:line-length
+ command: "yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm"
+ when: ansible_distribution in ["CentOS", "RedHat"]
+
- name: Install rpm dependencies
package:
state: present
diff --git a/tests/tasks/el_repo_setup.yml b/tests/tasks/el_repo_setup.yml
deleted file mode 100644
index 0656e8c..0000000
--- a/tests/tasks/el_repo_setup.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-- name: Fix CentOS6 Base repo
- copy:
- dest: /etc/yum.repos.d/CentOS-Base.repo
- content: |
- [base]
- name=CentOS-$releasever - Base
- baseurl=https://vault.centos.org/6.10/os/$basearch/
- gpgcheck=1
- gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
-
- [updates]
- name=CentOS-$releasever - Updates
- baseurl=https://vault.centos.org/6.10/updates/$basearch/
- gpgcheck=1
- gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
-
- [extras]
- name=CentOS-$releasever - Extras
- baseurl=https://vault.centos.org/6.10/extras/$basearch/
- gpgcheck=1
- gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
- when:
- - ansible_distribution == 'CentOS'
- - ansible_distribution_major_version == '6'
-- include_tasks: enable_epel.yml
diff --git a/tests/tasks/setup_802_1x_server.yml b/tests/tasks/setup_802_1x_server.yml
index 49d1ce1..3bf16a9 100644
--- a/tests/tasks/setup_802_1x_server.yml
+++ b/tests/tasks/setup_802_1x_server.yml
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- include_tasks: enable_epel.yml
+
- name: Install hostapd
package:
name: hostapd
diff --git a/tests/tasks/setup_mock_wifi.yml b/tests/tasks/setup_mock_wifi.yml
index 997b704..d7a1e22 100644
--- a/tests/tasks/setup_mock_wifi.yml
+++ b/tests/tasks/setup_mock_wifi.yml
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- include_tasks: enable_epel.yml
+
- name: Install packages required to set up mock wifi network
package:
name:
diff --git a/tests/tests_802_1x_nm.yml b/tests/tests_802_1x_nm.yml
index a27d8ea..288cd5d 100644
--- a/tests/tests_802_1x_nm.yml
+++ b/tests/tests_802_1x_nm.yml
@@ -5,7 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_802_1x.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_802_1x_updated_nm.yml b/tests/tests_802_1x_updated_nm.yml
index 5a25f5b..bd335e4 100644
--- a/tests/tests_802_1x_updated_nm.yml
+++ b/tests/tests_802_1x_updated_nm.yml
@@ -5,7 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_802_1x_updated.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_bond_deprecated_initscripts.yml b/tests/tests_bond_deprecated_initscripts.yml
index 1e74bcc..383b488 100644
--- a/tests/tests_bond_deprecated_initscripts.yml
+++ b/tests/tests_bond_deprecated_initscripts.yml
@@ -4,7 +4,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_bond_deprecated.yml' with initscripts
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts
diff --git a/tests/tests_bond_initscripts.yml b/tests/tests_bond_initscripts.yml
index 32fcc32..8fa74c5 100644
--- a/tests/tests_bond_initscripts.yml
+++ b/tests/tests_bond_initscripts.yml
@@ -4,7 +4,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_bond.yml' with initscripts as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts
diff --git a/tests/tests_bond_nm.yml b/tests/tests_bond_nm.yml
index 7075d95..8ac6cbd 100644
--- a/tests/tests_bond_nm.yml
+++ b/tests/tests_bond_nm.yml
@@ -5,7 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_bond.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_bridge_initscripts.yml b/tests/tests_bridge_initscripts.yml
index 8ce42e6..db5663c 100644
--- a/tests/tests_bridge_initscripts.yml
+++ b/tests/tests_bridge_initscripts.yml
@@ -4,7 +4,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_bridge.yml' with initscripts as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts
diff --git a/tests/tests_bridge_nm.yml b/tests/tests_bridge_nm.yml
index 3d1b53a..c565952 100644
--- a/tests/tests_bridge_nm.yml
+++ b/tests/tests_bridge_nm.yml
@@ -5,7 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_bridge.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_default.yml b/tests/tests_default.yml
index e196314..f6f7550 100644
--- a/tests/tests_default.yml
+++ b/tests/tests_default.yml
@@ -5,7 +5,6 @@
roles:
- linux-system-roles.network
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Test warning and info logs
assert:
that:
diff --git a/tests/tests_default_initscripts.yml b/tests/tests_default_initscripts.yml
index 006889c..cc8b875 100644
--- a/tests/tests_default_initscripts.yml
+++ b/tests/tests_default_initscripts.yml
@@ -2,7 +2,6 @@
---
- hosts: all
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts
diff --git a/tests/tests_default_nm.yml b/tests/tests_default_nm.yml
index 54bc3e1..8138ca9 100644
--- a/tests/tests_default_nm.yml
+++ b/tests/tests_default_nm.yml
@@ -2,7 +2,6 @@
---
- hosts: all
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_ethernet_initscripts.yml b/tests/tests_ethernet_initscripts.yml
index 366b052..62e75fe 100644
--- a/tests/tests_ethernet_initscripts.yml
+++ b/tests/tests_ethernet_initscripts.yml
@@ -4,8 +4,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_ethernet.yml' with initscripts as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts
diff --git a/tests/tests_ethernet_nm.yml b/tests/tests_ethernet_nm.yml
index 238172d..ecefa14 100644
--- a/tests/tests_ethernet_nm.yml
+++ b/tests/tests_ethernet_nm.yml
@@ -5,8 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_ethernet.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_ethtool_features_initscripts.yml b/tests/tests_ethtool_features_initscripts.yml
index 5bac5d3..6aea73b 100644
--- a/tests/tests_ethtool_features_initscripts.yml
+++ b/tests/tests_ethtool_features_initscripts.yml
@@ -2,7 +2,6 @@
# set network provider and gather facts
- hosts: all
tasks:
- - include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts
diff --git a/tests/tests_ethtool_features_nm.yml b/tests/tests_ethtool_features_nm.yml
index 2027862..30c6faa 100644
--- a/tests/tests_ethtool_features_nm.yml
+++ b/tests/tests_ethtool_features_nm.yml
@@ -5,8 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_ethtool_features.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_helpers_and_asserts.yml b/tests/tests_helpers_and_asserts.yml
index 64e2875..5514182 100644
--- a/tests/tests_helpers_and_asserts.yml
+++ b/tests/tests_helpers_and_asserts.yml
@@ -3,8 +3,6 @@
- name: Check that creating and removing test devices and assertions work
hosts: all
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: test veth interface management
include_tasks: tasks/create_and_remove_interface.yml
vars:
diff --git a/tests/tests_integration_pytest.yml b/tests/tests_integration_pytest.yml
index 9b80bd4..153214d 100644
--- a/tests/tests_integration_pytest.yml
+++ b/tests/tests_integration_pytest.yml
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
---
-- hosts: all
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
+- name: Empty play to gather facts
+ hosts: all
- import_playbook: playbooks/integration_pytest_python3.yml
when: (ansible_distribution in ["CentOS", "RedHat"] and
diff --git a/tests/tests_provider_nm.yml b/tests/tests_provider_nm.yml
index 67fcffe..99306a1 100644
--- a/tests/tests_provider_nm.yml
+++ b/tests/tests_provider_nm.yml
@@ -5,8 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_provider.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_reapply_nm.yml b/tests/tests_reapply_nm.yml
index eb48ddb..69fb208 100644
--- a/tests/tests_reapply_nm.yml
+++ b/tests/tests_reapply_nm.yml
@@ -5,8 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_reapply.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_regression_nm.yml b/tests/tests_regression_nm.yml
index b2c46e9..9eb8084 100644
--- a/tests/tests_regression_nm.yml
+++ b/tests/tests_regression_nm.yml
@@ -3,8 +3,6 @@
# set network provider and gather facts
- hosts: all
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_states_initscripts.yml b/tests/tests_states_initscripts.yml
index fa94103..3e55a43 100644
--- a/tests/tests_states_initscripts.yml
+++ b/tests/tests_states_initscripts.yml
@@ -4,8 +4,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_states.yml' with initscripts as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts
diff --git a/tests/tests_states_nm.yml b/tests/tests_states_nm.yml
index 34c8a24..3164a3a 100644
--- a/tests/tests_states_nm.yml
+++ b/tests/tests_states_nm.yml
@@ -5,8 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_states.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_team_nm.yml b/tests/tests_team_nm.yml
index 8048029..0516765 100644
--- a/tests/tests_team_nm.yml
+++ b/tests/tests_team_nm.yml
@@ -5,8 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_team.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_unit.yml b/tests/tests_unit.yml
index 44dfaec..8c5388b 100644
--- a/tests/tests_unit.yml
+++ b/tests/tests_unit.yml
@@ -3,7 +3,7 @@
- hosts: all
name: Setup for test running
tasks:
- - include_tasks: tasks/el_repo_setup.yml
+ - include_tasks: tasks/enable_epel.yml
- name: Install dependencies
package:
diff --git a/tests/tests_vlan_mtu_initscripts.yml b/tests/tests_vlan_mtu_initscripts.yml
index dcd5d74..37770a9 100644
--- a/tests/tests_vlan_mtu_initscripts.yml
+++ b/tests/tests_vlan_mtu_initscripts.yml
@@ -4,8 +4,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_vlan_mtu.yml' with initscripts as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts
diff --git a/tests/tests_vlan_mtu_nm.yml b/tests/tests_vlan_mtu_nm.yml
index c38263c..f201de3 100644
--- a/tests/tests_vlan_mtu_nm.yml
+++ b/tests/tests_vlan_mtu_nm.yml
@@ -5,8 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_vlan_mtu.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
diff --git a/tests/tests_wireless_nm.yml b/tests/tests_wireless_nm.yml
index 03b5ad6..86baf67 100644
--- a/tests/tests_wireless_nm.yml
+++ b/tests/tests_wireless_nm.yml
@@ -5,8 +5,6 @@
- hosts: all
name: Run playbook 'playbooks/tests_wireless.yml' with nm as provider
tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- name: Set network provider to 'nm'
set_fact:
network_provider: nm

6
network-permissions.diff Normal file
View File

@ -0,0 +1,6 @@
diff --git a/library/network_connections.py b/library/network_connections.py
old mode 100755
new mode 100644
diff --git a/tests/unit/test_network_connections.py b/tests/unit/test_network_connections.py
old mode 100755
new mode 100644

531
network-tier1-tags.diff Normal file
View File

@ -0,0 +1,531 @@
diff --git a/tests/playbooks/tests_802_1x.yml b/tests/playbooks/tests_802_1x.yml
index 9cce1ae..76d99e9 100644
--- a/tests/playbooks/tests_802_1x.yml
+++ b/tests/playbooks/tests_802_1x.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: 802-1x-test
@@ -122,3 +127,8 @@
command: update-ca-trust
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_bond.yml b/tests/playbooks/tests_bond.yml
index 69f07f8..1e45788 100644
--- a/tests/playbooks/tests_bond.yml
+++ b/tests/playbooks/tests_bond.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
controller_profile: bond0
@@ -95,3 +100,8 @@
- import_tasks: tasks/remove_test_interfaces_with_dhcp.yml
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_bridge.yml b/tests/playbooks/tests_bridge.yml
index d79d6ad..c8cf3cd 100644
--- a/tests/playbooks/tests_bridge.yml
+++ b/tests/playbooks/tests_bridge.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- name: Test configuring bridges
hosts: all
vars:
@@ -14,6 +19,8 @@
- name: Add test bridge
hosts: all
+ tags:
+ - 'tests::net::bridge'
vars:
network_connections:
- name: "{{ interface }}"
@@ -36,11 +43,15 @@
task: tasks/assert_profile_present.yml
- import_playbook: down_profile.yml
+ tags:
+ - 'tests::net::bridge'
vars:
profile: "{{ interface }}"
# FIXME: assert profile/device down
- import_playbook: remove_profile.yml
+ tags:
+ - 'tests::net::bridge'
vars:
profile: "{{ interface }}"
@@ -51,5 +62,19 @@
# FIXME: Devices might still be left when profile is absent
# - import_playbook: run_tasks.yml
-# vars:
+# vars:
# task: tasks/assert_device_absent.yml
+
+- name: Remove test bridge
+ hosts: all
+ tags:
+ - 'tests::cleanup'
+ - 'tests::net::bridge::cleanup'
+ tasks:
+ - command: 'ip link delete "{{ interface }}"'
+ ignore_errors: yes
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_checkpoint_cleanup.yml b/tests/playbooks/tests_checkpoint_cleanup.yml
index 18e3fd7..3b5a41a 100644
--- a/tests/playbooks/tests_checkpoint_cleanup.yml
+++ b/tests/playbooks/tests_checkpoint_cleanup.yml
@@ -4,6 +4,11 @@
# mark a device as unmanaged for NM and then tries to activiate it using NM.
# This failed without removing the checkpoint.
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: cptstbr
@@ -80,3 +85,8 @@
ignore_errors: true
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_ethernet.yml b/tests/playbooks/tests_ethernet.yml
index cd02579..adcffee 100644
--- a/tests/playbooks/tests_ethernet.yml
+++ b/tests/playbooks/tests_ethernet.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
tasks:
- debug:
@@ -9,6 +14,8 @@
- name: Test configuring ethernet devices
hosts: all
+ tags:
+ - 'tests::net::veth'
vars:
type: veth
interface: lsr27
@@ -26,6 +33,8 @@
- name: Test static interface up
hosts: all
+ tags:
+ - 'tests::net::reconf'
vars:
network_connections:
- name: "{{ interface }}"
@@ -48,17 +57,29 @@
# FIXME: assert profile present
# FIXME: assert profile/device up + IP address
- import_playbook: down_profile.yml
+ tags:
+ - 'tests::cleanup'
vars:
profile: "{{ interface }}"
# FIXME: assert profile/device down
- import_playbook: remove_profile.yml
+ tags:
+ - 'tests::cleanup'
vars:
profile: "{{ interface }}"
# FIXME: assert profile away
- name: Remove interfaces
hosts: all
+ tags:
+ - 'tests::cleanup'
+ - 'tests::net::veth::cleanup'
tasks:
- include_tasks: tasks/manage_test_interface.yml
vars:
state: absent
- include_tasks: tasks/assert_device_absent.yml
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_ethtool_features.yml b/tests/playbooks/tests_ethtool_features.yml
index 43fddc3..d1a87fe 100644
--- a/tests/playbooks/tests_ethtool_features.yml
+++ b/tests/playbooks/tests_ethtool_features.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: testnic1
@@ -198,3 +203,8 @@
state: absent
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_provider.yml b/tests/playbooks/tests_provider.yml
index 1db2d08..e097b4b 100644
--- a/tests/playbooks/tests_provider.yml
+++ b/tests/playbooks/tests_provider.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: testnic1
@@ -33,3 +38,8 @@
- tasks/cleanup_profile+device.yml
tags:
- tests::provider:initscripts_to_nm
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_reapply.yml b/tests/playbooks/tests_reapply.yml
index 4b1cb09..6995607 100644
--- a/tests/playbooks/tests_reapply.yml
+++ b/tests/playbooks/tests_reapply.yml
@@ -4,6 +4,11 @@
# of via Ansible. Until there is better test support for this, just check the
# log output for the respective log message.
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: rpltstbr
@@ -64,3 +69,8 @@
ignore_errors: true
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_states.yml b/tests/playbooks/tests_states.yml
index eec27c0..a8d0ecd 100644
--- a/tests/playbooks/tests_states.yml
+++ b/tests/playbooks/tests_states.yml
@@ -135,3 +135,23 @@
- tasks/cleanup_profile+device.yml
tags:
- tests::states:remove_down_twice
+
+ pre_tasks:
+ - name: Save host state
+ import_tasks: tasks/save_state.yml
+
+ post_tasks:
+ - name: Remove test profile
+ tags:
+ - 'tests::cleanup'
+ - 'tests::net::bridge::cleanup'
+ import_role:
+ name: linux-system-roles.network
+ vars:
+ network_connections:
+ - name: statebr
+ state: down
+ persistent_state: absent
+
+ - name: Restore host state
+ import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_vlan_mtu.yml b/tests/playbooks/tests_vlan_mtu.yml
index 029b599..378d5fe 100644
--- a/tests/playbooks/tests_vlan_mtu.yml
+++ b/tests/playbooks/tests_vlan_mtu.yml
@@ -10,6 +10,8 @@
- include_tasks: tasks/manage_test_interface.yml
vars:
state: present
+ tags:
+ - 'tests::net::veth'
- include_tasks: tasks/assert_device_present.yml
- name: >-
TEST: I can configure the MTU for a vlan interface without autoconnect.
@@ -38,6 +40,8 @@
ip:
dhcp4: false
auto6: false
+ tags:
+ - 'tests::net::reconf'
- include_tasks: tasks/assert_device_present.yml
vars:
interface: "{{ vlan_interface }}"
@@ -62,6 +66,20 @@
persistent_state: absent
state: down
ignore_errors: true
+ tags:
+ - 'tests::cleanup'
+ - 'tests::net::veth::cleanup'
- include_tasks: tasks/manage_test_interface.yml
vars:
state: absent
+ tags:
+ - 'tests::cleanup'
+ - 'tests::net::veth::cleanup'
+
+ pre_tasks:
+ - name: Save host state
+ import_tasks: tasks/save_state.yml
+
+ post_tasks:
+ - name: Restore host state
+ import_tasks: tasks/restore_state.yml
diff --git a/tests/playbooks/tests_wireless.yml b/tests/playbooks/tests_wireless.yml
index 822a15e..52661bd 100644
--- a/tests/playbooks/tests_wireless.yml
+++ b/tests/playbooks/tests_wireless.yml
@@ -1,5 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
---
+- name: Save host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/save_state.yml
+
- hosts: all
vars:
interface: wlan0
@@ -86,3 +91,8 @@
- include_tasks: tasks/cleanup_mock_wifi.yml
tags:
- "tests::cleanup"
+
+- name: Restore host state
+ hosts: all
+ tasks:
+ - import_tasks: tasks/restore_state.yml
diff --git a/tests/tasks/commonvars.yml b/tests/tasks/commonvars.yml
new file mode 100644
index 0000000..50452f7
--- /dev/null
+++ b/tests/tasks/commonvars.yml
@@ -0,0 +1,2 @@
+restore_services:
+ - NetworkManager
diff --git a/tests/tasks/get_services_state.yml b/tests/tasks/get_services_state.yml
new file mode 100644
index 0000000..4fe5d36
--- /dev/null
+++ b/tests/tasks/get_services_state.yml
@@ -0,0 +1,4 @@
+- name: Get initial state of services
+ tags: tests::cleanup
+ service_facts:
+ register: initial_state
diff --git a/tests/tasks/restore_services_state.yml b/tests/tasks/restore_services_state.yml
new file mode 100644
index 0000000..2035dfc
--- /dev/null
+++ b/tests/tasks/restore_services_state.yml
@@ -0,0 +1,22 @@
+- block:
+ - name: load common vars
+ include_vars:
+ file: commonvars.yml
+
+ - name: Get final state of services
+ service_facts:
+ register: final_state
+
+ - name: Restore state of services
+ service:
+ name: "{{ item }}"
+ state: "{{ 'started' if
+ initial_state.ansible_facts.services[sname]['state']
+ == 'running' else 'stopped' }}"
+ when:
+ - sname in final_state.ansible_facts.services
+ - sname in initial_state.ansible_facts.services
+ vars:
+ sname: "{{ item + '.service' }}"
+ with_items: "{{ restore_services }}"
+ tags: tests::cleanup
diff --git a/tests/tasks/restore_state.yml b/tests/tasks/restore_state.yml
new file mode 100644
index 0000000..f4e3d5f
--- /dev/null
+++ b/tests/tasks/restore_state.yml
@@ -0,0 +1,24 @@
+---
+- name: Remove /etc/sysconfig/network if there was no one
+ tags:
+ - 'tests::cleanup'
+ file:
+ path: /etc/sysconfig/network
+ state: absent
+ when:
+ - etc_sysconfig_network_stat is defined
+ - not etc_sysconfig_network_stat.stat.exists
+
+- name: Restore services
+ import_tasks: restore_services_state.yml
+
+- name: reload NetworkManager
+ tags:
+ - 'tests::cleanup'
+ command: nmcli connection reload
+ when:
+ - sname in final_state.ansible_facts.services
+ - sname in initial_state.ansible_facts.services
+ - final_state.ansible_facts.services[sname]['state'] == 'running'
+ vars:
+ sname: NetworkManager.service
diff --git a/tests/tasks/save_state.yml b/tests/tasks/save_state.yml
new file mode 100644
index 0000000..5690aed
--- /dev/null
+++ b/tests/tasks/save_state.yml
@@ -0,0 +1,11 @@
+---
+- name: Get services state
+ import_tasks: get_services_state.yml
+
+- name: Investigate /etc/sysconfig/network presence
+ tags:
+ - 'tests::cleanup'
+ stat:
+ path: /etc/sysconfig/network
+ register: etc_sysconfig_network_stat
+ ignore_errors: yes
diff --git a/tests/tests_802_1x_nm.yml b/tests/tests_802_1x_nm.yml
index 288cd5d..840958d 100644
--- a/tests/tests_802_1x_nm.yml
+++ b/tests/tests_802_1x_nm.yml
@@ -4,6 +4,8 @@
# set network provider and gather facts
- hosts: all
name: Run playbook 'playbooks/tests_802_1x.yml' with nm as provider
+ tags:
+ - tests::expfail
tasks:
- name: Set network provider to 'nm'
set_fact:
@@ -17,3 +19,5 @@
- import_playbook: playbooks/tests_802_1x.yml
when:
- ansible_distribution_major_version != '6'
+ tags:
+ - tests::expfail
diff --git a/tests/tests_802_1x_updated_nm.yml b/tests/tests_802_1x_updated_nm.yml
index bd335e4..4ebcaf9 100644
--- a/tests/tests_802_1x_updated_nm.yml
+++ b/tests/tests_802_1x_updated_nm.yml
@@ -4,6 +4,8 @@
# set network provider and gather facts
- hosts: all
name: Run playbook 'playbooks/tests_802_1x_updated.yml' with nm as provider
+ tags:
+ - tests::expfail
tasks:
- name: Set network provider to 'nm'
set_fact:
@@ -17,3 +19,5 @@
- import_playbook: playbooks/tests_802_1x_updated.yml
when:
- ansible_distribution_major_version != '6'
+ tags:
+ - tests::expfail
diff --git a/tests/tests_default.yml b/tests/tests_default.yml
index f6f7550..98e3c7e 100644
--- a/tests/tests_default.yml
+++ b/tests/tests_default.yml
@@ -10,3 +10,11 @@
that:
- "'warnings' not in __network_connections_result"
msg: "There are warnings"
+
+ pre_tasks:
+ - name: Save host state
+ import_tasks: tasks/save_state.yml
+
+ post_tasks:
+ - name: Restore host state
+ import_tasks: tasks/restore_state.yml
diff --git a/tests/tests_helpers_and_asserts.yml b/tests/tests_helpers_and_asserts.yml
index 5514182..d9bfa11 100644
--- a/tests/tests_helpers_and_asserts.yml
+++ b/tests/tests_helpers_and_asserts.yml
@@ -15,6 +15,14 @@
type: dummy
interface: dummy1298
+ pre_tasks:
+ - name: Save host state
+ import_tasks: tasks/save_state.yml
+
+ post_tasks:
+ - name: Restore host state
+ import_tasks: tasks/restore_state.yml
+
# FIXME: when: does not seem to work with include_tasks, therefore this cannot
# be safely tested for now
# - name: test tap interfaces

16
postfix-meta-el8.diff Normal file
View File

@ -0,0 +1,16 @@
diff --git a/meta/main.yml b/meta/main.yml
index a0ef6f4..da22270 100644
--- a/meta/main.yml
+++ b/meta/main.yml
@@ -7,8 +7,8 @@ galaxy_info:
min_ansible_version: 2.2
platforms:
- name: Fedora
- versions: [ 24, 25 ]
+ versions: [ 31, 32 ]
- name: EL
- versions: [ 6, 7 ]
+ versions: [ 6, 7, 8 ]

View File

@ -0,0 +1,80 @@
diff --git a/tasks/ssh.yml b/tasks/ssh.yml
index 1a4e858..b05d01a 100644
--- a/tasks/ssh.yml
+++ b/tasks/ssh.yml
@@ -18,3 +18,15 @@
key: "{{ keydata.content|b64decode }}"
state: present
delegate_to: "{{ kdump_ssh_server }}"
+
+- name: Fetch the servers public key
+ slurp:
+ src: /etc/ssh/ssh_host_rsa_key.pub
+ register: serverpubkey
+ delegate_to: "{{ kdump_ssh_server }}"
+
+- name: Add the servers public key to known_hosts on managed node
+ known_hosts:
+ key: "{{ kdump_ssh_server_location }} {{ serverpubkey.content | b64decode }}"
+ name: "{{ kdump_ssh_server_location }}"
+ path: /etc/ssh/ssh_known_hosts
diff --git a/templates/kdump.conf.j2 b/templates/kdump.conf.j2
index bf24210..504ff34 100644
--- a/templates/kdump.conf.j2
+++ b/templates/kdump.conf.j2
@@ -1,12 +1,17 @@
# {{ ansible_managed }}
{% if kdump_target %}
-{{ kdump_target.type }} {{ kdump_target.location }}
-{% endif %}
+{% if kdump_target.type == "ssh" %}
+ssh {{ kdump_target.location | d(kdump_ssh_user ~ '@' ~ kdump_ssh_server) }}
-{% if kdump_target and kdump_target.type == "ssh" and kdump_sshkey != '/root/.ssh/kdump_id_rsa' %}
+{% if kdump_sshkey != '/root/.ssh/kdump_id_rsa' %}
sshkey {{ kdump_sshkey }}
{% endif %}
+{% else %}
+{{ kdump_target.type }} {{ kdump_target.location }}
+
+{% endif %}
+{% endif %}
path {{ kdump_path }}
{% if kdump_core_collector %}
diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml
index 1da99df..d12e884 100644
--- a/tests/tests_ssh.yml
+++ b/tests/tests_ssh.yml
@@ -5,6 +5,11 @@
# known and ansible is supposed to be configured to be able to
# connect to it (via inventory).
kdump_ssh_server_outside: localhost
+ kdump_ssh_source: "{{ ansible_env['SSH_CONNECTION'].split()[0] }}"
+
+ # this is the address at which the ssh dump server can be reached
+ # from the managed host. Dumps will be uploaded there.
+ kdump_ssh_server_inside: "{{ kdump_ssh_source if kdump_ssh_source in hostvars[kdump_ssh_server_outside]['ansible_all_ipv4_addresses'] + hostvars[kdump_ssh_server_outside]['ansible_all_ipv6_addresses'] else hostvars[kdump_ssh_server_outside]['ansible_default_ipv4']['address'] }}"
tasks:
- name: gather facts from {{ kdump_ssh_server_outside }}
@@ -25,8 +30,5 @@
type: ssh
# This is the ssh dump server address visible from inside
# the machine being configured. Dumps are to be copied
- # there. We make here the assumption that this machine is
- # being run as a VM and the dump server is the VM host
- # (i.e. for ansible this is localhost). From the VM its
- # address is then identical to the default route.
- location: "{{ kdump_ssh_user }}@{{ ansible_default_ipv4.gateway }}"
+ # there.
+ location: "{{ kdump_ssh_user }}@{{ kdump_ssh_server_inside }}"
diff --git a/vars/main.yml b/vars/main.yml
new file mode 100644
index 0000000..34d2d62
--- /dev/null
+++ b/vars/main.yml
@@ -0,0 +1,2 @@
+# determine the managed node facing ssh server address
+kdump_ssh_server_location: "{{ kdump_target.location | regex_replace('.*@(.*)$', '\\1') if kdump_target.location is defined else kdump_ssh_server }}"

View File

@ -0,0 +1,40 @@
diff --git a/README.md b/README.md
index 5950215..df64284 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,7 @@ Example Playbook
Install and enable postfix. Configure "relay_domains=$mydestination" and
-```
+```yaml
---
- hosts: all
vars:
@@ -31,7 +31,7 @@ Install and enable postfix. Configure "relay_domains=$mydestination" and
Install and enable postfix. Do not run 'postfix check' before restarting
postfix:
-```
+```yaml
---
- hosts: all
vars:
@@ -43,7 +43,7 @@ postfix:
Install and enable postfix. Do single backup of main.cf (older backup will be
rewritten) and configure "relay_host=example.com":
-```
+```yaml
---
- hosts: all
vars:
@@ -58,7 +58,7 @@ Install and enable postfix. Do timestamped backup of main.cf and
configure "relay_host=example.com" (if postfix_backup_multiple is
set to true postfix_backup is ignored):
-```
+```yaml
---
- hosts: all
vars:

View File

@ -0,0 +1,34 @@
From 035a9b2db26af071a95e02a0af08bcbb73b69abf Mon Sep 17 00:00:00 2001
From: Florian Bachmann <fbachmann.public@gmail.com>
Date: Fri, 5 Feb 2021 11:48:53 +0100
Subject: [PATCH] fix incorrect default value (there is no variable named
"present")
---
tasks/main.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tasks/main.yml b/tasks/main.yml
index afbe81f..702e369 100644
--- a/tasks/main.yml
+++ b/tasks/main.yml
@@ -118,7 +118,7 @@
ports: "{{ item.ports }}"
proto: "{{ item.proto | default('tcp') }}"
setype: "{{ item.setype }}"
- state: "{{ item.state | default(present) }}"
+ state: "{{ item.state | default('present') }}"
with_items: "{{ selinux_ports }}"
- name: Set linux user to SELinux user mapping
@@ -126,6 +126,6 @@
login: "{{ item.login }}"
seuser: "{{ item.seuser }}"
serange: "{{ item.serange | default('s0') }}"
- state: "{{ item.state | default(present) }}"
+ state: "{{ item.state | default('present') }}"
reload: "{{ item.reload | default(False) }}"
with_items: "{{ selinux_logins }}"
--
2.29.2

176
selinux-tier1-tags.diff Normal file
View File

@ -0,0 +1,176 @@
diff --git a/tests/set_selinux_variables.yml b/tests/set_selinux_variables.yml
index f294101..7571066 100644
--- a/tests/set_selinux_variables.yml
+++ b/tests/set_selinux_variables.yml
@@ -1,4 +1,12 @@
---
+- name: Install SELinux tool semanage on Fedora
+ package:
+ name:
+ - policycoreutils-python-utils
+ state: present
+ when: ansible_distribution == "Fedora" or
+ ( ansible_distribution_major_version > "7" and
+ ( ansible_distribution == "CentOS" or ansible_distribution == "RedHat" ))
- name: Get local modifications - boolean
command: /usr/sbin/semanage boolean -l -n -C
register: selinux_role_boolean
diff --git a/tests/tests_all_purge.yml b/tests/tests_all_purge.yml
index 03dfe05..6775847 100644
--- a/tests/tests_all_purge.yml
+++ b/tests/tests_all_purge.yml
@@ -8,13 +8,17 @@
fcontext -a -t user_home_dir_t /tmp/test_dir
login -a -s staff_u sar-user
+ tags:
+ - 'tests::avc'
tasks:
- name: Install SELinux tool semanage on Fedora
package:
name:
- policycoreutils-python-utils
state: present
- when: ansible_distribution == "Fedora"
+ when: ansible_distribution == "Fedora" or
+ ( ansible_distribution_major_version > "7" and
+ ( ansible_distribution == "CentOS" or ansible_distribution == "RedHat" ))
- name: Add a Linux System Roles SELinux User
user:
diff --git a/tests/tests_all_transitions.yml b/tests/tests_all_transitions.yml
index f608a42..d0d209b 100644
--- a/tests/tests_all_transitions.yml
+++ b/tests/tests_all_transitions.yml
@@ -1,6 +1,8 @@
- name: Test all the possible selinux_state transitions
hosts: all
become: true
+ tags:
+ - 'tests::reboot'
vars:
states:
- permissive
diff --git a/tests/tests_boolean.yml b/tests/tests_boolean.yml
index 47eafc0..2aa0025 100644
--- a/tests/tests_boolean.yml
+++ b/tests/tests_boolean.yml
@@ -1,4 +1,5 @@
- name: Check if selinux role sets SELinux booleans
+ tags: tests::expfail
hosts: all
become: true
@@ -12,7 +13,7 @@
selinux_booleans:
- { name: 'samba_enable_home_dirs', state: 'on', persistent: 'yes' }
- - include: set_selinux_variables.yml
+ - import_tasks: set_selinux_variables.yml
- name: save state after initial changes and before other changes
set_fact:
boolean_before: "{{ selinux_role_boolean.stdout_lines }}"
diff --git a/tests/tests_fcontext.yml b/tests/tests_fcontext.yml
index 0a411fb..f6f1bf4 100644
--- a/tests/tests_fcontext.yml
+++ b/tests/tests_fcontext.yml
@@ -13,7 +13,7 @@
selinux_fcontexts:
- { target: '/tmp/test_dir1(/.*)?', setype: 'user_home_dir_t', ftype: 'd' }
- - include: set_selinux_variables.yml
+ - import_tasks: set_selinux_variables.yml
- name: save state after initial changes and before other changes
set_fact:
fcontext_before: "{{ selinux_role_fcontext.stdout }}"
diff --git a/tests/tests_login.yml b/tests/tests_login.yml
index efa826d..c7ce462 100644
--- a/tests/tests_login.yml
+++ b/tests/tests_login.yml
@@ -18,6 +18,6 @@
- { login: 'sar-user', seuser: 'staff_u', serange: 's0-s0:c0.c1023', state: 'present' }
- - include: set_selinux_variables.yml
+ - import_tasks: set_selinux_variables.yml
- name: save state after initial changes and before other changes
set_fact:
login_before: "{{ selinux_role_login.stdout }}"
diff --git a/tests/tests_port.yml b/tests/tests_port.yml
index 446f79d..7bb112e 100644
--- a/tests/tests_port.yml
+++ b/tests/tests_port.yml
@@ -29,7 +29,7 @@
selinux_ports:
- { ports: '22022', proto: 'tcp', setype: 'ssh_port_t', state: 'present' }
- - include: set_selinux_variables.yml
+ - import_tasks: set_selinux_variables.yml
- name: save state after other changes
set_fact:
port_after: "{{ selinux_role_port.stdout }}"
diff --git a/tests/tests_selinux_disabled.yml b/tests/tests_selinux_disabled.yml
index afd23e4..883dc6d 100644
--- a/tests/tests_selinux_disabled.yml
+++ b/tests/tests_selinux_disabled.yml
@@ -12,13 +12,17 @@
fcontext -a -t user_home_dir_t /tmp/test_dir
login -a -s staff_u sar-user
+ tags:
+ - 'tests::avc'
tasks:
- name: Install SELinux tool semanage on Fedora
package:
name:
- policycoreutils-python-utils
state: present
- when: ansible_distribution == "Fedora"
+ when: ansible_distribution == "Fedora" or
+ ( ansible_distribution_major_version > "7" and
+ ( ansible_distribution == "CentOS" or ansible_distribution == "RedHat" ))
- name: Add a Linux System Roles SELinux User
user:
@@ -67,17 +69,28 @@
assert:
that: "{{ ansible_selinux.config_mode == 'enforcing' }}"
msg: "SELinux config mode should be enforcing instead of {{ ansible_selinux.config_mode }}"
- - name: Restore original /etc/selinux/config
- copy:
- remote_src: true
- dest: /etc/selinux/config
- src: /etc/selinux/config.test_selinux_disabled
- - name: Remove /etc/selinux/config backup
- file:
- path: /etc/selinux/config.test_selinux_disabled
- state: absent
- - name: Remove Linux System Roles SELinux User
- user:
- name: sar-user
- remove: yes
- state: absent
+
+ - name: Cleanup
+ tags: [ 'tests::cleanup' ]
+ block:
+ - name: Restore original /etc/selinux/config
+ copy:
+ remote_src: true
+ dest: /etc/selinux/config
+ src: /etc/selinux/config.test_selinux_disabled
+
+ - name: Remove /etc/selinux/config backup
+ file:
+ path: /etc/selinux/config.test_selinux_disabled
+ state: absent
+
+ - name: Remove Linux System Roles SELinux User
+ user:
+ name: sar-user
+ remove: yes
+ state: absent
+
+ - import_role:
+ name: linux-system-roles.selinux
+ vars:
+ selinux_all_purge: true

9
single-pool.yml Normal file
View File

@ -0,0 +1,9 @@
---
- hosts: "{{ targets }}"
vars:
timesync_ntp_servers:
- hostname: 2.pool.ntp.org
pool: yes
iburst: yes
roles:
- linux-system-roles.timesync

21
sources Normal file
View File

@ -0,0 +1,21 @@
SHA512 (kdump-77596fdd976c6160d6152c200a5432c609725a14.tar.gz) = a33c9fc3e31b98d5039865685ab8a1cadd6b7df1ff54b1f375f8e79c62a71d659e3330e4a71340d82cd2476d56847799f3a321a2b9ca053c4da49ff1ef6259bb
SHA512 (postfix-0.1.tar.gz) = 37362befe7fe60802e9b8b4e5c5c8360d3bf17efa6a4d1bbf53e8ce42cd5d5f59cad9f87c2fd586133bc80fd727435de66ce30aecdff73d5493c6cb9b8fbd3a9
SHA512 (timesync-924650d0cd4117f73a7f0413ab745a8632bc5cec.tar.gz) = e31a68269ee7d724c1c1feb18910d2a9a9c28ff67d10c0de887eab453b4fa642853f1d6022742bcd9f356d7c6204dc0fa9baf1a38807a39717f121dd58ab632f
SHA512 (network-bda206d45c87ee8c1a5284de84f5acf5e629de97.tar.gz) = c4dcc9ef7ef83fb6588861f9340ee0f2495f81dce22edb81d8907ff6522f451d72abdf8ea5bf7e7e8319d7a4dd75b62339390635b33aa3dbf50caa549094a0e8
SHA512 (selinux-1.1.1.tar.gz) = f31024375785dd9c382c1398ebd09bd64347b2fb500aa338ffe25ff75fb512a46f9442e1e3d4b86d9ae5b83b7d18a61a087ca9ad79ab25eef937d95a218c39b6
SHA512 (storage-485de47b0dc0787aea077ba448ecb954f53e40c4.tar.gz) = 8a1a0cd52b5e3d6a6abade8de8746ed172155989627bd8cf775f866b3547d461e310a77978a50f8ac0bef0ce8bee55c97aff52ed436152f1a8c4b56266eca0da
SHA512 (metrics-e81b2650108727f38b1c856699aad26af0f44a46.tar.gz) = 738685dd645eda7d5647cb2dbba90bc0343430dc7b2aaebe9a9e75e67e13011872eb1df234840352844bc0732ee1f88f0a5ff0e2012c44dac6e8394a0ff2b9ec
SHA512 (tlog-1.1.0.tar.gz) = f9f36fb2bfc76687652424ecab70c1e11b435551cb373a483c1fde6c7c68013a95a1b313e1187ff5495b22fdf7b56f9e6c97f3015d8144f21bed6617916c07e9
SHA512 (kernel_settings-1.0.1.tar.gz) = 788ff4c36833979ca2d697ff705b45374361aec7b8b131f87a9cb89072334c85d3be6e6161f5e2a3bcb1bbb120fdb10032dae7534b121206d38859712a78e753
SHA512 (logging-4b07edf4e84882c9d0fb979092ba5953aac0b4d5.tar.gz) = c1d338456f7eeb8167c13c7b70669145aa6ccec4f8e6d3b71b2c01bb1637e140e44a8688d4cfa534aa523cc80043594cdd83f203cec325abfd822c1d1f1d327b
SHA512 (nbde_client-1.0.1.tar.gz) = 55d8d76cf13d14373497821022e7e9019ec43adf65084e473b012df329592ea2f7e25271e7dc7054dd1e56e80fa64dcbc108075af26641bfc2a37d94feabcbf4
SHA512 (nbde_server-1.0.1.tar.gz) = 83b954f4e4e68982f2781d671a4372ccf1d3c2afd8933a4ac0686d934884b3e06d554167dc5f5d42755c0772840c4526a7363e071b3882317129c4e0a119ab2f
SHA512 (certificate-50041ce55348fcce34aba4cbe3ea160c5d890ab3.tar.gz) = 567b4d76fdd6d6bf6b61b6109c9c1fefbd4b7207691cf8b2ac1854071d4a1643009f5bf61a2a0bd1aa63a8f7fe5bb80db628b8d8821c90160fe5866a56c586d7
SHA512 (crypto_policies-76b2d5b0460dba22c5d290c1af96e4fdb3434cb9.tar.gz) = 96f18f7bb43c0055d5035f1ea69c2b9f6e03f3e7dd6932c1829b31a214ff8c3936107912c33477ce2f7554328faa152e002ce88afd19acb854a69be27d14b32a
SHA512 (ansible-sshd-e1de59b3c54e9d48a010eeca73755df339c7e628.tar.gz) = 089c5ac20f223bfdc9efaab77efb1e590c4ceb7a21c5ffc2218171942150fa0797c2d5ac38f38a81f4194dd8da9bba9a32df9db4d56255a99e0bd9675bffa355
SHA512 (auto-maintenance-e5ed203b2d7224c0bf0c3fd55452456c8f468cad.tar.gz) = ff87208c54e4104b60117bd6bfc8f9af4ba55082cd1d024492098e2fb9620994adbcb7e767482e653a81113afadfd2035aa139ee540f6229c78502edf6fe4edf
SHA512 (kernel_settings-e5e5abb35fb695e22ccffa855c98ab882650480e.tar.gz) = 46913084a0fa62b2e3ef6eee2be3f58431fbf07469eadb9372d4d3d9260172c4cc24e837c615f4dd0898d711d0da6d242da1f9b2d01cb8dba871947bbbeb12bb
SHA512 (nbde_client-3af7452e4861ee2363b29b23bf78bf11e06be142.tar.gz) = 75c154f46a909a7d60d684009f4f9f4065a9d63b72fef036e63a921d1a4b070f327eba0c27cc176079c7b40e3c2fd9a45fc5aa2240928fef48e521ca5a2f52e3
SHA512 (certificate-daecdc51cedaf67bf821f1f9f8f6c3cc0ca0d03f.tar.gz) = 92eb87ccbc42be71cbcb8dae89647f8c9072bc349c882a56b838aec6b3700c194d3a001a82b0b467205788ea4d7e2000d267c89f312eaa1e74e4f782122d29c3
SHA512 (ssh-effa0a0d993832dee726290f263a2182cf3eacda.tar.gz) = 7dd8f2c2bceb5af4bcd322512cea832a1228c83932084d12157eaa61787818406997b14f94b98daab9ab6090d2b3dd601c79495179f0ba07c7dc9ffbd018ab60
SHA512 (ha_cluster-779bb78559de58bb5a1f25a4b92039c373ef59a4.tar.gz) = cbaaf9f8d35942c7c8fe72f67374ef588c52ca8752450df0312b9a3c5fd4f9c47aef3c5a1f8d8849e499f9baba1fc3080e1359ed06424c99fc692065df652879

43
sshd-example.diff Normal file
View File

@ -0,0 +1,43 @@
diff --git a/README.md b/README.md
index 676ad72..dc06d85 100644
--- a/README.md
+++ b/README.md
@@ -190,7 +190,7 @@ defaults. This is useful if the role is used in deployment stage to make sure
the service is able to start on the first attempt. To disable this check, set
this to empty list.
-* `sshd_hostkey_owner`, `sshd_hostkey_group`, `sshd_hostkey_group`
+* `sshd_hostkey_owner`, `sshd_hostkey_group`, `sshd_hostkey_mode`
Use these variables to set the ownership and permissions for the host keys from
the above list.
@@ -273,6 +273,8 @@ for example:
X11Forwarding: yes
```
+More example playbooks can be found in [`examples/`](examples/) directory.
+
Template Generation
-------------------
diff --git a/examples/example-root-login.yml b/examples/example-root-login.yml
new file mode 100644
index 0000000..156e629
--- /dev/null
+++ b/examples/example-root-login.yml
@@ -0,0 +1,15 @@
+---
+- hosts: all
+ tasks:
+ - name: Configure sshd to prevent root and password login except from particular subnet
+ include_role:
+ name: ansible-sshd
+ vars:
+ sshd:
+ # root login and password login is enabled only from a particular subnet
+ PermitRootLogin: no
+ PasswordAuthentication: no
+ Match:
+ - Condition: "Address 192.0.2.0/24"
+ PermitRootLogin: yes
+ PasswordAuthentication: yes

View File

@ -0,0 +1,25 @@
From bb612fb6c5f76a40fce368acb43d2847e699213d Mon Sep 17 00:00:00 2001
From: Rich Megginson <rmeggins@redhat.com>
Date: Thu, 28 Jan 2021 15:56:14 -0700
Subject: [PATCH] use state: absent instead of state: missing
---
tests/tests_hostkeys_missing.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/tests_hostkeys_missing.yml b/tests/tests_hostkeys_missing.yml
index 9dfe77b..5790684 100644
--- a/tests/tests_hostkeys_missing.yml
+++ b/tests/tests_hostkeys_missing.yml
@@ -40,7 +40,7 @@
- name: Make sure the key was not created
file:
path: /tmp/missing_ssh_host_rsa_key
- state: missing
+ state: absent
register: key
failed_when: key.changed
tags: tests::verify
--
2.29.2

View File

@ -0,0 +1,142 @@
diff --git a/library/blivet.py b/library/blivet.py
index eb8bb11..e927121 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -104,6 +104,7 @@ try:
from blivet3.formats import get_format
from blivet3.partitioning import do_partitioning
from blivet3.size import Size
+ from blivet3.udev import trigger
from blivet3.util import set_up_logging
BLIVET_PACKAGE = 'blivet3'
except ImportError:
@@ -116,6 +117,7 @@ except ImportError:
from blivet.formats import get_format
from blivet.partitioning import do_partitioning
from blivet.size import Size
+ from blivet.udev import trigger
from blivet.util import set_up_logging
BLIVET_PACKAGE = 'blivet'
except ImportError:
@@ -821,7 +823,10 @@ class BlivetPool(BlivetBase):
def _look_up_disks(self):
""" Look up the pool's disks in blivet's device tree. """
- if not self._pool['disks']:
+ if self._disks:
+ return
+
+ if not self._device and not self._pool['disks']:
raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name'])
elif not isinstance(self._pool['disks'], list):
raise BlivetAnsibleError("pool disks must be specified as a list")
@@ -832,7 +837,7 @@ class BlivetPool(BlivetBase):
if device is not None: # XXX fail if any disk isn't resolved?
disks.append(device)
- if self._pool['disks'] and not disks:
+ if self._pool['disks'] and not self._device and not disks:
raise BlivetAnsibleError("unable to resolve any disks specified for pool '%s' (%s)" % (self._pool['name'], self._pool['disks']))
self._disks = disks
@@ -974,9 +979,9 @@ class BlivetPool(BlivetBase):
""" Schedule actions to configure this pool according to the yaml input. """
global safe_mode
# look up the device
- self._look_up_disks()
self._look_up_device()
self._apply_defaults()
+ self._look_up_disks()
# schedule destroy if appropriate, including member type change
if not self.ultimately_present:
@@ -999,6 +1004,7 @@ class BlivetPartitionPool(BlivetPool):
return self._device.partitionable
def _look_up_device(self):
+ self._look_up_disks()
self._device = self._disks[0]
def _create(self):
@@ -1354,6 +1360,13 @@ def run_module():
actions.append(action)
+ def ensure_udev_update(action):
+ if action.is_create:
+ sys_path = action.device.path
+ if os.path.islink(sys_path):
+ sys_path = os.readlink(action.device.path)
+ trigger(action='change', subsystem='block', name=os.path.basename(sys_path))
+
def action_dict(action):
return dict(action=action.type_desc_str,
fs_type=action.format.type if action.is_format else None,
@@ -1395,6 +1408,7 @@ def run_module():
if scheduled:
# execute the scheduled actions, committing changes to disk
callbacks.action_executed.add(record_action)
+ callbacks.action_executed.add(ensure_udev_update)
try:
b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode)
except Exception as e:
diff --git a/tests/tests_existing_lvm_pool.yml b/tests/tests_existing_lvm_pool.yml
new file mode 100644
index 0000000..854ac0d
--- /dev/null
+++ b/tests/tests_existing_lvm_pool.yml
@@ -0,0 +1,54 @@
+---
+- hosts: all
+ become: true
+ vars:
+ mount_location: '/opt/test1'
+ volume_group_size: '5g'
+ volume_size: '4g'
+ pool_name: foo
+
+ tasks:
+ - include_role:
+ name: linux-system-roles.storage
+
+ - include_tasks: get_unused_disk.yml
+ vars:
+ min_size: "{{ volume_group_size }}"
+ max_return: 1
+
+ - name: Create one LVM logical volume under one volume group
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: "{{ pool_name }}"
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ size: "{{ volume_size }}"
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Create another volume in the existing pool, identified only by name.
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: "{{ pool_name }}"
+ volumes:
+ - name: newvol
+ size: '2 GiB'
+ fs_type: ext4
+ fs_label: newvol
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Clean up.
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: "{{ pool_name }}"
+ state: absent
+
+ - include_tasks: verify-role-results.yml

View File

@ -0,0 +1,30 @@
commit effb7faf20301ddcee8ee36a1b156a0b9f006bb0
Author: David Lehman <dlehman@redhat.com>
Date: Tue Aug 4 16:00:33 2020 -0400
Be smarter in choosing expected partition name.
BlivetVolume._get_device_id is only used to look up pre-existing
volumes, so we don't have to try too hard to guess it by name.
We can just see if the disk has a single partition and, if so,
return the name of that partition.
Fixes: #141
diff --git a/library/blivet.py b/library/blivet.py
index eb8bb11..0f7ce98 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -554,7 +554,11 @@ class BlivetPartitionVolume(BlivetVolume):
return self._device.raw_device.type == 'partition'
def _get_device_id(self):
- return self._blivet_pool._disks[0].name + '1'
+ device_id = None
+ if self._blivet_pool._disks[0].partitioned and len(self._blivet_pool._disks[0].children) == 1:
+ device_id = self._blivet_pool._disks[0].children[0].name
+
+ return device_id
def _resize(self):
pass

View File

@ -0,0 +1,326 @@
diff --git a/library/blivet.py b/library/blivet.py
index e927121..f59f821 100644
--- a/library/blivet.py
+++ b/library/blivet.py
@@ -130,6 +130,9 @@ if BLIVET_PACKAGE:
set_up_logging()
log = logging.getLogger(BLIVET_PACKAGE + ".ansible")
+
+MAX_TRIM_PERCENT = 2
+
use_partitions = None # create partitions on pool backing device disks?
disklabel_type = None # user-specified disklabel type
safe_mode = None # do not remove any existing devices or formatting
@@ -445,8 +448,16 @@ class BlivetVolume(BlivetBase):
if not self._device.resizable:
return
- if self._device.format.resizable:
- self._device.format.update_size_info()
+ trim_percent = (1.0 - float(self._device.max_size / size))*100
+ log.debug("resize: size=%s->%s ; trim=%s", self._device.size, size, trim_percent)
+ if size > self._device.max_size and trim_percent <= MAX_TRIM_PERCENT:
+ log.info("adjusting %s resize target from %s to %s to fit in free space",
+ self._volume['name'],
+ size,
+ self._device.max_size)
+ size = self._device.max_size
+ if size == self._device.size:
+ return
if not self._device.min_size <= size <= self._device.max_size:
raise BlivetAnsibleError("volume '%s' cannot be resized to '%s'" % (self._volume['name'], size))
@@ -610,10 +621,18 @@ class BlivetLVMVolume(BlivetVolume):
raise BlivetAnsibleError("invalid size '%s' specified for volume '%s'" % (self._volume['size'], self._volume['name']))
fmt = self._get_format()
+ trim_percent = (1.0 - float(parent.free_space / size))*100
+ log.debug("size: %s ; %s", size, trim_percent)
if size > parent.free_space:
- raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)" % (size,
- parent.name,
- parent.free_space))
+ if trim_percent > MAX_TRIM_PERCENT:
+ raise BlivetAnsibleError("specified size for volume '%s' exceeds available space in pool '%s' (%s)"
+ % (size, parent.name, parent.free_space))
+ else:
+ log.info("adjusting %s size from %s to %s to fit in %s free space", self._volume['name'],
+ size,
+ parent.free_space,
+ parent.name)
+ size = parent.free_space
try:
device = self._blivet.new_lv(name=self._volume['name'],
diff --git a/tests/tests_create_lv_size_equal_to_vg.yml b/tests/tests_create_lv_size_equal_to_vg.yml
new file mode 100644
index 0000000..21a5788
--- /dev/null
+++ b/tests/tests_create_lv_size_equal_to_vg.yml
@@ -0,0 +1,48 @@
+---
+- hosts: all
+ become: true
+ vars:
+ storage_safe_mode: false
+ mount_location: '/opt/test1'
+ volume_group_size: '10g'
+ lv_size: '10g'
+ unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
+ disk_size: '{{ unused_disk_subfact.sectors|int *
+ unused_disk_subfact.sectorsize|int }}'
+
+ tasks:
+ - include_role:
+ name: linux-system-roles.storage
+
+ - include_tasks: get_unused_disk.yml
+ vars:
+ min_size: "{{ volume_group_size }}"
+ max_return: 1
+
+ - name: Create one lv which size is equal to vg size
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ size: "{{ lv_size }}"
+ mount_point: "{{ mount_location }}"
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Clean up
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ disks: "{{ unused_disks }}"
+ state: "absent"
+ volumes:
+ - name: test1
+ mount_point: "{{ mount_location }}"
+
+ - include_tasks: verify-role-results.yml
diff --git a/tests/tests_lvm_auto_size_cap.yml b/tests/tests_lvm_auto_size_cap.yml
new file mode 100644
index 0000000..fb17c23
--- /dev/null
+++ b/tests/tests_lvm_auto_size_cap.yml
@@ -0,0 +1,89 @@
+---
+- hosts: all
+ become: true
+
+ tasks:
+ - include_role:
+ name: linux-system-roles.storage
+
+ - include_tasks: get_unused_disk.yml
+ vars:
+ min_size: 10g
+ max_return: 1
+
+ - command: lsblk -b -l --noheadings -o NAME,SIZE
+ register: storage_test_lsblk
+
+ - set_fact:
+ test_disk_size: "{{ storage_test_lsblk.stdout_lines|map('regex_search', '^' + unused_disks[0] + '\\s+\\d+$')|select('string')|first|regex_replace('^\\w+\\s+', '') }}"
+
+ - package:
+ name: bc
+ state: installed
+
+ - command:
+ cmd: bc
+ stdin: "{{ test_disk_size }} *2"
+ register: doubled_size
+
+ - name: Test handling of too-large LVM volume size
+ block:
+ - name: Try to create a pool containing one volume twice the size of the backing disk
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ type: lvm
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ size: "{{ doubled_size.stdout|trim }}"
+ - name: unreachable task
+ fail:
+ msg: UNREACH
+ rescue:
+ - name: Check that we failed in the role
+ assert:
+ that:
+ - ansible_failed_result.msg != 'UNREACH'
+ - blivet_output.failed and
+ blivet_output.msg|regex_search('specified size for volume.+exceeds available')
+ msg: "Role has not failed when it should have"
+
+ - name: Create a pool containing one volume the same size as the backing disk
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ size: "{{ test_disk_size }}"
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Repeat the previous invocation to verify idempotence
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ type: lvm
+ disks: "{{ unused_disks }}"
+ volumes:
+ - name: test1
+ size: "{{ test_disk_size }}"
+
+ - include_tasks: verify-role-results.yml
+
+ - name: Clean up
+ include_role:
+ name: linux-system-roles.storage
+ vars:
+ storage_pools:
+ - name: foo
+ disks: "{{ unused_disks }}"
+ state: absent
+ volumes: []
diff --git a/tests/tests_lvm_errors.yml b/tests/tests_lvm_errors.yml
index 37d41dc..e8dc4f4 100644
--- a/tests/tests_lvm_errors.yml
+++ b/tests/tests_lvm_errors.yml
@@ -11,8 +11,6 @@
- '/non/existent/disk'
invalid_size: 'xyz GiB'
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) *
- unused_disk_subfact.sectorsize|int }}'
tasks:
- include_role:
@@ -86,39 +84,6 @@
- ansible_failed_result.msg != 'UNREACH'
msg: "Role has not failed when it should have"
- # the following does not work properly
- # - name: Verify the output
- # assert:
- # that: "{{ blivet_output.failed and
- # blivet_output.msg|regex_search('invalid size.+for volume') and
- # not blivet_output.changed }}"
- # msg: "Unexpected behavior w/ invalid volume size"
-
- - name: Test for correct handling of too-large volume size.
- block:
- - name: Try to create LVM with a too-large volume size.
- include_role:
- name: linux-system-roles.storage
- vars:
- storage_pools:
- - name: foo
- disks: "{{ unused_disks }}"
- volumes:
- - name: test1
- size: "{{ too_large_size }}"
- mount_point: "{{ mount_location1 }}"
-
- - name: unreachable task
- fail:
- msg: UNREACH
-
- rescue:
- - name: Check that we failed in the role
- assert:
- that:
- - ansible_failed_result.msg != 'UNREACH'
- msg: "Role has not failed when it should have"
-
# the following does not work properly
# - name: Verify the output
# assert:
@@ -138,7 +103,7 @@
disks: "{{ unused_disks[0] }}"
volumes:
- name: test1
- size: "{{ too_large_size }}"
+ size: "{{ volume_size }}"
mount_point: "{{ mount_location1 }}"
- name: unreachable task
@@ -171,7 +136,7 @@
disks: []
volumes:
- name: test1
- size: "{{ too_large_size }}"
+ size: "{{ volume1_size }}"
mount_point: "{{ mount_location1 }}"
- name: unreachable task
diff --git a/tests/tests_misc.yml b/tests/tests_misc.yml
index a69ee98..3139bc7 100644
--- a/tests/tests_misc.yml
+++ b/tests/tests_misc.yml
@@ -7,7 +7,7 @@
volume_group_size: '5g'
volume1_size: '4g'
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) *
+ too_large_size: '{{ (unused_disk_subfact.sectors|int * 1.2) *
unused_disk_subfact.sectorsize|int }}'
tasks:
diff --git a/tests/tests_resize.yml b/tests/tests_resize.yml
index 9eeb2b9..209d129 100644
--- a/tests/tests_resize.yml
+++ b/tests/tests_resize.yml
@@ -9,7 +9,7 @@
invalid_size1: 'xyz GiB'
invalid_size2: 'none'
unused_disk_subfact: '{{ ansible_devices[unused_disks[0]] }}'
- too_large_size: '{{ (unused_disk_subfact.sectors|int + 1) *
+ too_large_size: '{{ unused_disk_subfact.sectors|int * 1.2 *
unused_disk_subfact.sectorsize|int }}'
disk_size: '{{ unused_disk_subfact.sectors|int *
unused_disk_subfact.sectorsize|int }}'
@@ -122,23 +122,7 @@
size: "{{ disk_size }}"
mount_point: "{{ mount_location }}"
- - name: Unreachable task
- fail:
- msg: UNREACH
-
- rescue:
- - name: Check that we failed in the role
- assert:
- that:
- - ansible_failed_result.msg != 'UNREACH'
- msg: "Role has not failed when it should have"
-
- - name: Verify the output
- assert:
- that: "blivet_output.failed and
- blivet_output.msg|regex_search('volume.+cannot be resized to.+') and
- not blivet_output.changed"
- msg: "Unexpected behavior w/ invalid volume size"
+ - include_tasks: verify-role-results.yml
- name: Test for correct handling of invalid size specification
block:

335
timesync-tier1-tags.diff Normal file
View File

@ -0,0 +1,335 @@
diff --git a/tests/get_services_state.yml b/tests/get_services_state.yml
new file mode 100644
index 0000000..4fe5d36
--- /dev/null
+++ b/tests/get_services_state.yml
@@ -0,0 +1,4 @@
+- name: Get initial state of services
+ tags: tests::cleanup
+ service_facts:
+ register: initial_state
diff --git a/tests/restore_services_state.yml b/tests/restore_services_state.yml
new file mode 100644
index 0000000..3d48975
--- /dev/null
+++ b/tests/restore_services_state.yml
@@ -0,0 +1,19 @@
+- name: Get final state of services
+ tags: tests::cleanup
+ service_facts:
+ register: final_state
+
+- name: Restore state of services
+ tags: tests::cleanup
+ service:
+ name: "{{ item }}"
+ state: "{{ 'started' if initial_state.ansible_facts.services[item + '.service']['state'] == 'running' else 'stopped' }}"
+ when:
+ - item + '.service' in final_state.ansible_facts.services
+ - item + '.service' in initial_state.ansible_facts.services
+ with_items:
+ - chronyd
+ - ntpd
+ - ptp4l
+ - phc2sys
+ - timemaster
diff --git a/tests/tests_default.yml b/tests/tests_default.yml
index 856ebe5..fb298c9 100644
--- a/tests/tests_default.yml
+++ b/tests/tests_default.yml
@@ -3,4 +4,14 @@
hosts: all
roles:
- linux-system-roles.timesync
+
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_default_wrapper.yml b/tests/tests_default_wrapper.yml
index a768f4c..b0c0ab3 100644
--- a/tests/tests_default_wrapper.yml
+++ b/tests/tests_default_wrapper.yml
@@ -1,5 +1,8 @@
---
- name: Create static inventory from hostvars
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
hosts: all
tasks:
- name: create temporary file
@@ -17,9 +20,15 @@
- name: Run tests_default.yml normally
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
import_playbook: tests_default.yml
- name: Run tests_default.yml in check_mode
+ tags:
+# - 'tests::tier1'
+ - 'tests::slow'
hosts: all
tasks:
- name: Run ansible-playbook with tests_default.yml in check mode
diff --git a/tests/tests_ntp.yml b/tests/tests_ntp.yml
index e4b1b5e..446f1dc 100644
--- a/tests/tests_ntp.yml
+++ b/tests/tests_ntp.yml
@@ -18,6 +19,11 @@
roles:
- linux-system-roles.timesync
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
tasks:
- meta: flush_handlers
@@ -35,3 +41,8 @@
- "'172.16.123.1' in sources.stdout"
- "'172.16.123.2' in sources.stdout"
- "'172.16.123.3' in sources.stdout"
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider1.yml b/tests/tests_ntp_provider1.yml
index 08ecab9..9fe0db3 100644
--- a/tests/tests_ntp_provider1.yml
+++ b/tests/tests_ntp_provider1.yml
@@ -8,6 +9,10 @@
- linux-system-roles.timesync
pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Remove NTP providers
package: name={{ item }} state=absent
with_items:
@@ -27,3 +32,7 @@
assert:
that:
- "'172.16.123.1' in sources.stdout"
+
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider2.yml b/tests/tests_ntp_provider2.yml
index 5476ae4..e0d5c96 100644
--- a/tests/tests_ntp_provider2.yml
+++ b/tests/tests_ntp_provider2.yml
@@ -8,6 +9,10 @@
- linux-system-roles.timesync
pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Remove ntp
package: name=ntp state=absent
@@ -29,3 +34,7 @@
- name: Check chronyd service
shell: chronyc -n tracking
+
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider3.yml b/tests/tests_ntp_provider3.yml
index 44ca101..d440a64 100644
--- a/tests/tests_ntp_provider3.yml
+++ b/tests/tests_ntp_provider3.yml
@@ -8,6 +9,10 @@
- linux-system-roles.timesync
pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Remove chrony
package: name=chrony state=absent
@@ -29,3 +34,7 @@
- name: Check ntpd service
shell: ntpq -c rv | grep 'associd=0'
+
+ - name: Import tasks
+ tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider4.yml b/tests/tests_ntp_provider4.yml
index 8b452b8..8bccba0 100644
--- a/tests/tests_ntp_provider4.yml
+++ b/tests/tests_ntp_provider4.yml
@@ -9,6 +10,10 @@
- linux-system-roles.timesync
pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Install chrony
package: name=chrony state=present
register: package_install
@@ -27,3 +32,7 @@
- name: Check chronyd service
shell: chronyc -n tracking
+
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider5.yml b/tests/tests_ntp_provider5.yml
index 1740164..98a054f 100644
--- a/tests/tests_ntp_provider5.yml
+++ b/tests/tests_ntp_provider5.yml
@@ -9,6 +10,10 @@
- linux-system-roles.timesync
pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Install ntp
package: name=ntp state=present
register: package_install
@@ -27,3 +32,7 @@
- name: Check ntpd service
shell: ntpq -c rv | grep 'associd=0'
+
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_provider6.yml b/tests/tests_ntp_provider6.yml
index 21a2039..fb41824 100644
--- a/tests/tests_ntp_provider6.yml
+++ b/tests/tests_ntp_provider6.yml
@@ -6,6 +7,10 @@
both_avail: true
tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
- name: Check for availability of both NTP providers
package: name={{ item }} state=present
register: package_install
@@ -71,3 +76,7 @@
shell: chronyc -n tracking
when:
- not is_ntp_default
+
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ntp_ptp.yml b/tests/tests_ntp_ptp.yml
index cab706f..7f4cdfc 100644
--- a/tests/tests_ntp_ptp.yml
+++ b/tests/tests_ntp_ptp.yml
@@ -22,6 +23,11 @@
roles:
- linux-system-roles.timesync
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
tasks:
- meta: flush_handlers
@@ -48,3 +54,8 @@
- "'PTP1' in sources.stdout"
when: "'SOF_TIMESTAMPING_TX_' in ethtool.stdout"
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ptp_multi.yml b/tests/tests_ptp_multi.yml
index d52d439..936e467 100644
--- a/tests/tests_ptp_multi.yml
+++ b/tests/tests_ptp_multi.yml
@@ -1,5 +1,6 @@
- name: Configure time synchronization with multiple PTP domains
+ tags: tests::expfail
hosts: all
vars:
timesync_ptp_domains:
@@ -16,6 +17,11 @@
roles:
- linux-system-roles.timesync
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
tasks:
- meta: flush_handlers
@@ -58,3 +64,8 @@
- "'domainNumber 1' in pmc.stdout"
when: "'SOF_TIMESTAMPING_TX_' in ethtool.stdout"
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml
diff --git a/tests/tests_ptp_single.yml b/tests/tests_ptp_single.yml
index 74da310..36d141e 100644
--- a/tests/tests_ptp_single.yml
+++ b/tests/tests_ptp_single.yml
@@ -1,5 +1,6 @@
- name: Configure time synchronization with single PTP domain
+ tags: tests::expfail
hosts: all
vars:
timesync_ptp_domains:
@@ -8,6 +9,11 @@
roles:
- linux-system-roles.timesync
+ pre_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: get_services_state.yml
+
tasks:
- meta: flush_handlers
@@ -31,3 +37,8 @@
- "'domainNumber 3' in pmc.stdout"
when: "'SOF_TIMESTAMPING_TX_' in ethtool.stdout"
+
+ post_tasks:
+ - name: Import tasks
+# tags: tests::tier1::cleanup
+ import_tasks: restore_services_state.yml