leapp-repository/SOURCES/leapp-repository-0.23.0-elevate.patch
2025-10-10 18:21:28 +03:00

7250 lines
300 KiB
Diff
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index 3e595e32..4b07e4b3 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- uses: codespell-project/actions-codespell@v2
with:
ignore_words_list: ro,fo,couldn,repositor,zeor,bootup
diff --git a/.github/workflows/differential-shellcheck.yml b/.github/workflows/differential-shellcheck.yml
index e1bafb93..6c81713c 100644
--- a/.github/workflows/differential-shellcheck.yml
+++ b/.github/workflows/differential-shellcheck.yml
@@ -19,7 +19,7 @@ jobs:
steps:
- name: Repository checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
fetch-depth: 0
diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index ed82e0e5..d1b8fb2a 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -19,40 +19,40 @@ jobs:
- name: 'Unit tests (python:3.12; repos:el9toel10,common)'
python: python3.12
repos: 'el9toel10,common'
- container: ubi9
+ container: el9
- name: 'Linters (python:3.12; repos:el9toel10,common)'
python: python3.12
repos: 'el9toel10,common'
- container: ubi9-lint
+ container: el9-lint
- name: 'Unit tests (python:3.9; repos:el9toel10,common)'
python: python3.9
repos: 'el9toel10,common'
- container: ubi9
+ container: el9
- name: 'Linters (python:3.9; repos:el9toel10,common)'
python: python3.9
repos: 'el9toel10,common'
- container: ubi9-lint
+ container: el9-lint
# 8to9
- name: 'Unit tests (python:3.9; repos:el8toel9,common)'
python: python3.9
repos: 'el8toel9,common'
- container: ubi9
+ container: el9
- name: 'Linters (python:3.9; repos:el8toel9,common)'
python: python3.9
repos: 'el8toel9,common'
- container: ubi9-lint
+ container: el9-lint
- name: 'Unit tests (python:3.6; repos:el8toel9,common)'
python: python3.6
repos: 'el8toel9,common'
- container: ubi8
+ container: el8
- name: 'Linters (python:3.6; repos:el8toel9,common)'
python: python3.6
repos: 'el8toel9,common'
- container: ubi8-lint
+ container: el8-lint
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
# NOTE(ivasilev) fetch-depth 0 is critical here as leapp deps discovery depends on specific substring in
# commit message and default 1 option will get us just merge commit which has an unrelevant message.
@@ -63,4 +63,10 @@ jobs:
run: |
git branch -f main origin/main
- name: ${{matrix.scenarios.name}}
- run: script -e -c /bin/bash -c 'TERM=xterm podman build --security-opt=seccomp=unconfined -t leapp-tests -f utils/container-tests/Containerfile.${{matrix.scenarios.container}} utils/container-tests && PYTHON_VENV=${{matrix.scenarios.python}} REPOSITORIES=${{matrix.scenarios.repos}} podman run --security-opt=seccomp=unconfined --rm -ti -v ${PWD}:/payload --env=PYTHON_VENV --env=REPOSITORIES leapp-tests'
+ run: |
+ script -e -c /bin/bash -c \
+ 'TERM=xterm \
+ podman build -t leapp-tests -f utils/container-tests/ci/Containerfile.${{matrix.scenarios.container}} . && \
+ PYTHON_VENV=${{matrix.scenarios.python}} \
+ REPOSITORIES=${{matrix.scenarios.repos}} \
+ podman run --rm -ti -v ${PWD}:/payload --env=PYTHON_VENV --env=REPOSITORIES leapp-tests'
diff --git a/.gitignore b/.gitignore
index 0bb92d3d..a04c7ded 100644
--- a/.gitignore
+++ b/.gitignore
@@ -115,6 +115,7 @@ ENV/
# visual studio code configuration
.vscode
+*.code-workspace
# pycharm
.idea
diff --git a/Makefile b/Makefile
index 81b16376..754c2c63 100644
--- a/Makefile
+++ b/Makefile
@@ -51,7 +51,7 @@ _COPR_CONFIG=$${COPR_CONFIG:-~/.config/copr_rh_oamg.conf}
_CONTAINER_TOOL=$${CONTAINER_TOOL:-podman}
# container to run tests in
-_TEST_CONTAINER=$${TEST_CONTAINER:-rhel8}
+_TEST_CONTAINER=$${TEST_CONTAINER:-el8}
# In case just specific CHROOTs should be used for the COPR build, you can
# set the multiple CHROOTs separated by comma in the COPR_CHROOT envar, e.g.
@@ -129,7 +129,7 @@ help:
@echo " test lint source code and run tests"
@echo " test_no_lint run tests without linting the source code"
@echo " test_container run lint and tests in container"
- @echo " - default container is 'rhel8'"
+ @echo " - default container is 'el8'"
@echo " - can be changed by setting TEST_CONTAINER env"
@echo " test_container_all run lint and tests in all available containers"
@echo " test_container_no_lint run tests without linting in container, see test_container"
@@ -164,9 +164,9 @@ help:
@echo " PR=7 SUFFIX='my_additional_suffix' make <target>"
@echo " MR=6 COPR_CONFIG='path/to/the/config/copr/file' make <target>"
@echo " ACTOR=<actor> TEST_LIBS=y make test"
- @echo " BUILD_CONTAINER=rhel8 make build_container"
- @echo " TEST_CONTAINER=f34 make test_container"
- @echo " CONTAINER_TOOL=docker TEST_CONTAINER=rhel8 make test_container_no_lint"
+ @echo " BUILD_CONTAINER=el8 make build_container"
+ @echo " TEST_CONTAINER=f42 make test_container"
+ @echo " CONTAINER_TOOL=docker TEST_CONTAINER=el8 make test_container_no_lint"
@echo ""
clean:
@@ -252,10 +252,10 @@ build_container:
echo "--- Build RPM ${PKGNAME}-${VERSION}-${RELEASE}.el$(DIST_VERSION).rpm in container ---";
case "$(BUILD_CONTAINER)" in \
el8) \
- CONT_FILE="utils/container-builds/Containerfile.ubi8"; \
+ CONT_FILE="utils/container-builds/Containerfile.el8"; \
;; \
el9) \
- CONT_FILE="utils/container-builds/Containerfile.ubi9"; \
+ CONT_FILE="utils/container-builds/Containerfile.el9"; \
;; \
"") \
echo "BUILD_CONTAINER must be set"; \
@@ -379,7 +379,6 @@ test_no_lint:
done; \
$(_PYTHON_VENV) -m pytest $(REPORT_ARG) $(TEST_PATHS) $(LIBRARY_PATH) $(PYTEST_ARGS)
-
test: lint test_no_lint
# container images act like a cache so that dependencies can only be downloaded once
@@ -416,7 +415,7 @@ lint_container:
@_TEST_CONT_TARGET="lint" $(MAKE) test_container
lint_container_all:
- @for container in "f34" "rhel8" "rhel9"; do \
+ @for container in f42 el{8,9}; do \
TEST_CONTAINER=$$container $(MAKE) lint_container || exit 1; \
done
@@ -426,20 +425,20 @@ lint_container_all:
# because e.g RHEL8 to RHEL9 IPU must work on python3.6 and python3.9.
test_container:
@case $(_TEST_CONTAINER) in \
- f34) \
- export CONT_FILE="utils/container-tests/Containerfile.f34"; \
- export _VENV="python3.9"; \
+ f42) \
+ export CONT_FILE="utils/container-tests/Containerfile.f42"; \
+ export _VENV="python3.13"; \
;; \
- rhel8) \
- export CONT_FILE="utils/container-tests/Containerfile.rhel8"; \
+ el8) \
+ export CONT_FILE="utils/container-tests/Containerfile.el8"; \
export _VENV="python3.6"; \
;; \
- rhel9) \
- export CONT_FILE="utils/container-tests/Containerfile.rhel9"; \
+ el9) \
+ export CONT_FILE="utils/container-tests/Containerfile.el9"; \
export _VENV="python3.9"; \
;; \
*) \
- echo "Error: Available containers are: f34, rhel8, rhel9"; exit 1; \
+ echo "Error: Available containers are: f42, el8, el9"; exit 1; \
;; \
esac; \
export TEST_IMAGE="leapp-repo-tests-$(_TEST_CONTAINER)"; \
@@ -471,7 +470,7 @@ test_container:
exit $$res
test_container_all:
- @for container in "f34" "rhel8" "rhel9"; do \
+ @for container in "f42" "el8" "el9"; do \
TEST_CONTAINER=$$container $(MAKE) test_container || exit 1; \
done
@@ -479,14 +478,13 @@ test_container_no_lint:
@_TEST_CONT_TARGET="test_no_lint" $(MAKE) test_container
test_container_all_no_lint:
- @for container in "f34" "rhel8" "rhel9"; do \
+ @for container in f42 el{8,9}; do \
TEST_CONTAINER=$$container $(MAKE) test_container_no_lint || exit 1; \
done
# clean all testing and building containers and their images
clean_containers:
- @for i in "leapp-repo-tests-f34" "leapp-repo-tests-rhel8" \
- "leapp-repo-tests-rhel9" "leapp-repo-build-el8"; do \
+ @for i in leapp-repo-tests-f42 leapp-repo-tests-el{8,9} leapp-repo-build-el{8,9}; do \
$(_CONTAINER_TOOL) kill "$$i-cont" || :; \
$(_CONTAINER_TOOL) rm "$$i-cont" || :; \
$(_CONTAINER_TOOL) rmi "$$i" || :; \
diff --git a/ci/.gitignore b/ci/.gitignore
new file mode 100644
index 00000000..e6f97f0f
--- /dev/null
+++ b/ci/.gitignore
@@ -0,0 +1 @@
+**/.vagrant
diff --git a/ci/ansible/ansible.cfg b/ci/ansible/ansible.cfg
new file mode 100644
index 00000000..d5c13036
--- /dev/null
+++ b/ci/ansible/ansible.cfg
@@ -0,0 +1,4 @@
+[defaults]
+callbacks_enabled=ansible.posix.profile_tasks
+stdout_callback=community.general.yaml
+pipelining=True
diff --git a/ci/ansible/docker-ce.yaml b/ci/ansible/docker-ce.yaml
new file mode 100644
index 00000000..bba5f3df
--- /dev/null
+++ b/ci/ansible/docker-ce.yaml
@@ -0,0 +1,6 @@
+---
+- name: Docker CE configuration
+ hosts: all
+ become: yes
+ roles:
+ - docker-ce
diff --git a/ci/ansible/minimal.yaml b/ci/ansible/minimal.yaml
new file mode 100644
index 00000000..517cc81b
--- /dev/null
+++ b/ci/ansible/minimal.yaml
@@ -0,0 +1,6 @@
+---
+- name: Minimal configuration
+ hosts: all
+ become: yes
+ roles:
+ - minimal
diff --git a/ci/ansible/requirements.yaml b/ci/ansible/requirements.yaml
new file mode 100644
index 00000000..13ca0224
--- /dev/null
+++ b/ci/ansible/requirements.yaml
@@ -0,0 +1,3 @@
+collections:
+ - name: community.general
+ - name: ansible.posix
diff --git a/ci/ansible/roles/docker-ce/README.md b/ci/ansible/roles/docker-ce/README.md
new file mode 100644
index 00000000..860444b1
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/README.md
@@ -0,0 +1,43 @@
+Docker CE Install and configuration
+=========
+
+Install latest version of Docker CE Engine form upstream repository. Start and enable services after installation.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+`docker_ce_repo_checksum` in defaults/main.yaml. SHA512 Checksum of the docker-ce.repo file.
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: all
+ become: yes
+ roles:
+ - role: docker
+ vars:
+ docker_ce_repo_checksum: sha512:XXXX # You can provide the new checksum if the default one not actual
+
+
+License
+-------
+
+GPL-3.0-or-later
+
+Author Information
+------------------
+
+AlmaLinux OS Foundation
diff --git a/ci/ansible/roles/docker-ce/defaults/main.yaml b/ci/ansible/roles/docker-ce/defaults/main.yaml
new file mode 100644
index 00000000..d0fd0c09
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+# defaults file for docker-ce
+docker_ce_repo_checksum: sha512:1de0b99cbb427e974144f226451711dc491caef6b1256cb599ff307a687ba2d7dd959a016d4e4cfdd4acbd83423ba1f78fa89db61bab35351e35f1152aedaf5c
diff --git a/ci/ansible/roles/docker-ce/handlers/main.yaml b/ci/ansible/roles/docker-ce/handlers/main.yaml
new file mode 100644
index 00000000..a7236219
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/handlers/main.yaml
@@ -0,0 +1,2 @@
+---
+# handlers file for docker-ce
diff --git a/ci/ansible/roles/docker-ce/meta/main.yaml b/ci/ansible/roles/docker-ce/meta/main.yaml
new file mode 100644
index 00000000..aa67ded8
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/meta/main.yaml
@@ -0,0 +1,25 @@
+galaxy_info:
+ author: AlmaLinux OS Community
+ description: Install and configure Docker CE Engine
+ company: AlmaLinux OS Foundation
+
+ license: GPL-3.0-or-later
+
+ min_ansible_version: 2.11
+
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - 8
+ - 9
+
+ galaxy_tags:
+ - docker
+ - el7
+ - el8
+ - el9
+ - almalinux
+
+dependencies:
+ - minimal
diff --git a/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml b/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml
new file mode 100644
index 00000000..320477af
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml
@@ -0,0 +1,11 @@
+---
+# Install Docker
+- name: Install Docker CE Stable
+ ansible.builtin.yum:
+ name:
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
+ - docker-compose-plugin
+ update_cache: yes
+ state: present
diff --git a/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml b/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml
new file mode 100644
index 00000000..d44a202a
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml
@@ -0,0 +1,11 @@
+---
+# Install Docker
+- name: Install Docker CE Stable
+ ansible.builtin.dnf:
+ name:
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
+ - docker-compose-plugin
+ update_cache: yes
+ state: present
diff --git a/ci/ansible/roles/docker-ce/tasks/main.yaml b/ci/ansible/roles/docker-ce/tasks/main.yaml
new file mode 100644
index 00000000..989af23f
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/tasks/main.yaml
@@ -0,0 +1,38 @@
+---
+# tasks file for docker-ce
+- name: Add Docker CE repository
+ ansible.builtin.get_url:
+ url: https://download.docker.com/linux/centos/docker-ce.repo
+ dest: /etc/yum.repos.d/docker-ce.repo
+ checksum: "{{ docker_ce_repo_checksum }}"
+ owner: root
+ group: root
+ mode: '0644'
+ seuser: system_u
+ serole: object_r
+ setype: system_conf_t
+
+- name: Remove older versions of Docker on EL7
+ ansible.builtin.include_tasks: remove_old_docker_el7.yaml
+ when: ansible_facts['distribution_major_version'] == '7'
+
+- name: Remove older versions of Docker on >= EL8
+ ansible.builtin.include_tasks: remove_old_docker_el8.yaml
+ when: ansible_facts['distribution_major_version'] == '8'
+
+- name: Install Docker CE Stable on EL7
+ ansible.builtin.include_tasks: install_docker_el7.yaml
+ when: ansible_facts['distribution_major_version'] == '7'
+
+- name: Install Docker CE Stable on >= EL8
+ ansible.builtin.include_tasks: install_docker_el8.yaml
+ when: ansible_facts['distribution_major_version'] == '8'
+
+- name: Start and Enable Docker services
+ ansible.builtin.systemd:
+ name: "{{ item }}"
+ enabled: yes
+ state: started
+ loop:
+ - docker.service
+ - containerd.service
diff --git a/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml
new file mode 100644
index 00000000..db9e0960
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml
@@ -0,0 +1,15 @@
+---
+# Remove older versions of Docker
+- name: Uninstall older versions of Docker
+ ansible.builtin.yum:
+ name:
+ - docker
+ - docker-client
+ - docker-client-latest
+ - docker-common
+ - docker-latest
+ - docker-latest-logrotate
+ - docker-logrotate
+ - docker-engine
+ autoremove: yes
+ state: absent
diff --git a/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml
new file mode 100644
index 00000000..88f860cf
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml
@@ -0,0 +1,15 @@
+---
+# Remove older versions of Docker
+- name: Uninstall older versions of Docker
+ ansible.builtin.dnf:
+ name:
+ - docker
+ - docker-client
+ - docker-client-latest
+ - docker-common
+ - docker-latest
+ - docker-latest-logrotate
+ - docker-logrotate
+ - docker-engine
+ autoremove: yes
+ state: absent
diff --git a/ci/ansible/roles/docker-ce/tests/inventory b/ci/ansible/roles/docker-ce/tests/inventory
new file mode 100644
index 00000000..878877b0
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ci/ansible/roles/docker-ce/tests/test.yaml b/ci/ansible/roles/docker-ce/tests/test.yaml
new file mode 100644
index 00000000..789ba96e
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/tests/test.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - docker-ce
diff --git a/ci/ansible/roles/docker-ce/vars/main.yaml b/ci/ansible/roles/docker-ce/vars/main.yaml
new file mode 100644
index 00000000..7ff8a18f
--- /dev/null
+++ b/ci/ansible/roles/docker-ce/vars/main.yaml
@@ -0,0 +1,2 @@
+---
+# vars file for docker-ce
diff --git a/ci/ansible/roles/minimal/README.md b/ci/ansible/roles/minimal/README.md
new file mode 100644
index 00000000..225dd44b
--- /dev/null
+++ b/ci/ansible/roles/minimal/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/ci/ansible/roles/minimal/defaults/main.yaml b/ci/ansible/roles/minimal/defaults/main.yaml
new file mode 100644
index 00000000..4a5a46cd
--- /dev/null
+++ b/ci/ansible/roles/minimal/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+# defaults file for minimal
diff --git a/ci/ansible/roles/minimal/handlers/main.yaml b/ci/ansible/roles/minimal/handlers/main.yaml
new file mode 100644
index 00000000..89105fec
--- /dev/null
+++ b/ci/ansible/roles/minimal/handlers/main.yaml
@@ -0,0 +1,2 @@
+---
+# handlers file for minimal
diff --git a/ci/ansible/roles/minimal/meta/main.yaml b/ci/ansible/roles/minimal/meta/main.yaml
new file mode 100644
index 00000000..ecc81ab7
--- /dev/null
+++ b/ci/ansible/roles/minimal/meta/main.yaml
@@ -0,0 +1,23 @@
+galaxy_info:
+ author: AlmaLinux OS Community
+ description: Minimal configuration for ELevate
+ company: AlmaLinux OS Foundation
+
+ license: GPL-3.0-or-later
+
+ min_ansible_version: 2.11
+
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - 8
+ - 9
+
+ galaxy_tags:
+ - elevate
+ - upgrade
+ - cleanup
+ - el7
+ - el8
+ - el9
diff --git a/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml b/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml
new file mode 100644
index 00000000..1b4af7c6
--- /dev/null
+++ b/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml
@@ -0,0 +1,10 @@
+---
+# Remove old kernels
+- name: Install the yum-utils
+ ansible.builtin.yum:
+ name: yum-utils
+ state: present
+ update_cache: yes
+
+- name: Remove the old kernels on EL7
+ ansible.builtin.command: package-cleanup -y --oldkernels --count=1
diff --git a/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml b/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml
new file mode 100644
index 00000000..56aeefd3
--- /dev/null
+++ b/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml
@@ -0,0 +1,7 @@
+---
+# Remove old kernels
+- name: Remove old kernels on EL8
+ ansible.builtin.command: dnf -y remove --oldinstallonly
+ register: removeoldkernels
+ changed_when: removeoldkernels.rc == 0
+ failed_when: removeoldkernels.rc > 1
diff --git a/ci/ansible/roles/minimal/tasks/main.yaml b/ci/ansible/roles/minimal/tasks/main.yaml
new file mode 100644
index 00000000..8c1b35bd
--- /dev/null
+++ b/ci/ansible/roles/minimal/tasks/main.yaml
@@ -0,0 +1,21 @@
+---
+# tasks file for minimal
+- name: Upgrade the packages on EL7
+ ansible.builtin.include_tasks: upgrade_el7.yaml
+ when: ansible_facts['distribution_major_version'] == '7'
+
+- name: Upgrade the packages on EL8
+ ansible.builtin.include_tasks: upgrade_el8.yaml
+ when: ansible_facts['distribution_major_version'] == '8'
+
+- name: Reboot the system
+ ansible.builtin.reboot:
+ when: upgrade_status is changed
+
+- name: Cleanup the older kernels on EL7
+ ansible.builtin.include_tasks: cleanup_el7.yaml
+ when: ansible_facts['distribution_major_version'] == '7'
+
+- name: Cleanup the older kernels on El8
+ ansible.builtin.include_tasks: cleanup_el8.yaml
+ when: ansible_facts['distribution_major_version'] == '8'
diff --git a/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml b/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml
new file mode 100644
index 00000000..7648a586
--- /dev/null
+++ b/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml
@@ -0,0 +1,8 @@
+---
+# Upgrade the system
+- name: Upgrade the system
+ ansible.builtin.yum:
+ name: "*"
+ state: latest
+ update_cache: yes
+ register: upgrade_status
diff --git a/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml b/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml
new file mode 100644
index 00000000..0d4a5d2a
--- /dev/null
+++ b/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml
@@ -0,0 +1,8 @@
+---
+# Upgrade the system
+- name: Upgrade the system
+ ansible.builtin.dnf:
+ name: "*"
+ state: latest
+ update_cache: yes
+ register: upgrade_status
diff --git a/ci/ansible/roles/minimal/tests/inventory b/ci/ansible/roles/minimal/tests/inventory
new file mode 100644
index 00000000..878877b0
--- /dev/null
+++ b/ci/ansible/roles/minimal/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ci/ansible/roles/minimal/tests/test.yaml b/ci/ansible/roles/minimal/tests/test.yaml
new file mode 100644
index 00000000..db5c4c17
--- /dev/null
+++ b/ci/ansible/roles/minimal/tests/test.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - minimal
diff --git a/ci/ansible/roles/minimal/vars/main.yaml b/ci/ansible/roles/minimal/vars/main.yaml
new file mode 100644
index 00000000..b24df080
--- /dev/null
+++ b/ci/ansible/roles/minimal/vars/main.yaml
@@ -0,0 +1,2 @@
+---
+# vars file for minimal
diff --git a/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile
new file mode 100644
index 00000000..317209ef
--- /dev/null
+++ b/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile
@@ -0,0 +1,249 @@
+RETRY = params.RETRY
+TIMEOUT = params.TIMEOUT
+
+pipeline {
+ agent {
+ label 'x86_64 && bm'
+ }
+ options {
+ timestamps()
+ parallelsAlwaysFailFast()
+ }
+ parameters {
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation')
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
+ string(name: 'LEAPP_SRC_GIT_USER', defaultValue: 'AlmaLinux', description: 'Input name of Git user of LEAPP source', trim: true)
+ string(name: 'LEAPP_SRC_GIT_BRANCH', defaultValue: 'almalinux', description: 'Input name of Git branch of LEAPP source', trim: true)
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
+ }
+ environment {
+ VAGRANT_NO_COLOR = '1'
+ }
+ stages {
+ stage('Prepare') {
+ steps {
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
+ label: 'Install Ansible collections'
+ sh script: 'python3.11 -m venv .venv',
+ label: 'Create Python virtual environment'
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
+ label: 'Install Testinfra'
+ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel',
+ label: 'Fetch devel version of leapp data'
+ }
+ }
+ stage('CreateSingleMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
+
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: "vagrant up $targetDistro.vmName",
+ label: 'Create source VM'
+ }
+ }
+ }
+ stage('CreateMultiMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: 'vagrant up',
+ label: 'Create source VM'
+ }
+ }
+ stage('ELevationAndTest') {
+ matrix {
+ when {
+ anyOf {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
+ }
+ }
+ axes {
+ axis {
+ name 'TARGET_DISTRO'
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
+ }
+ }
+ stages {
+ stage('ELevate') {
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"",
+ label: 'Add testing repo of ELevate'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"",
+ label: 'Install testing version of ELevate'
+ sh script: "vagrant upload ci/scripts/install_elevate_dev.sh install_elevate_dev.sh $targetDistro.vmName",
+ label: 'Upload installer script to VMs'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo bash install_elevate_dev.sh -u ${LEAPP_SRC_GIT_USER} -b ${LEAPP_SRC_GIT_BRANCH}\"",
+ label: 'Install development version of ELevate',
+ returnStatus: true
+ sh script: "vagrant upload leapp-data/ leapp-data/ --compress $targetDistro.vmName",
+ label: 'Upload devel branch of leapp data'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"",
+ label: 'Create directory structrue of leapp data'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files leapp-data/files/${targetDistro.leappData}/*\"",
+ label: 'Install devel version of leapp data'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files/vendors.d leapp-data/vendors.d/*\"",
+ label: 'Install devel version of leapp vendor data'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el8 /etc/leapp/files/leapp_upgrade_repositories.repo\"",
+ label: 'Configure leapp upgrade repositories for EL7toEL8'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/repomap.json.el8 /etc/leapp/files/repomap.json\"",
+ label: 'Configure leapp repository mapping for EL7toEL8'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum -y install tree && sudo tree -ha /etc/leapp\"",
+ label: 'Check if development version of leapp data installed correctly'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
+ label: 'Start pre-upgrade check',
+ returnStatus: true
+ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"",
+ label: 'Permit ssh as root login'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"",
+ label: 'Answer the leapp question'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
+ label: 'Start the Upgrade'
+ sh script: "vagrant reload $targetDistro.vmName",
+ label: 'Reboot to the ELevate initramfs'
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
+ label: 'Generate the ssh-config file'
+ }
+ }
+ }
+ }
+ }
+ stage('Distro Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'minimal' }
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: 'rm -f conftest.py pytest.ini',
+ label: 'Delete root conftest.py file'
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
+ """,
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ }
+ stage('Docker Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/docker/test_docker_ce.py
+ """,
+ label: 'Run the docker specific tests'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ success {
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
+ skipPublishingChecks: true
+ }
+ cleanup {
+ sh script: 'vagrant destroy -f --no-parallel -g',
+ label: 'Destroy VMs'
+ cleanWs()
+ }
+ }
+}
+
+def targetDistroSpec(distro) {
+ def spec = [:]
+
+ switch (distro) {
+ case 'almalinux-8':
+ vm = 'almalinux_8'
+ ldata = 'almalinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'centos-stream-8':
+ vm = 'centosstream_8'
+ ldata = 'centos'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'oraclelinux-8':
+ vm = 'oraclelinux_8'
+ ldata = 'oraclelinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'rocky-8':
+ vm = 'rocky_8'
+ ldata = 'rocky'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ default:
+ spec = [
+ vmName: 'unknown',
+ leappData: 'unknown'
+ ]
+ break
+ }
+ return spec
+}
diff --git a/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile
new file mode 100644
index 00000000..97f900fe
--- /dev/null
+++ b/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile
@@ -0,0 +1,230 @@
+RETRY = params.RETRY
+TIMEOUT = params.TIMEOUT
+
+pipeline {
+ agent {
+ label 'x86_64 && bm'
+ }
+ options {
+ timestamps()
+ parallelsAlwaysFailFast()
+ }
+ parameters {
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation')
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
+ }
+ environment {
+ VAGRANT_NO_COLOR = '1'
+ }
+ stages {
+ stage('Prepare') {
+ steps {
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
+ label: 'Install Ansible collections'
+ sh script: 'python3.11 -m venv .venv',
+ label: 'Create Python virtual environment'
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
+ label: 'Install Testinfra'
+ }
+ }
+ stage('CreateSingleMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
+
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: "vagrant up $targetDistro.vmName",
+ label: 'Create source VM'
+ }
+ }
+ }
+ stage('CreateMultiMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: 'vagrant up',
+ label: 'Create source VM'
+ }
+ }
+ stage('ELevationAndTest') {
+ matrix {
+ when {
+ anyOf {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
+ }
+ }
+ axes {
+ axis {
+ name 'TARGET_DISTRO'
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
+ }
+ }
+ stages {
+ stage('ELevate') {
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y https://repo.almalinux.org/elevate/elevate-release-latest-el7.noarch.rpm\"",
+ label: 'Install the elevate-release-latest rpm packages for EL7'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-almalinux-8-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"",
+ label: 'Add pulp repository'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"",
+ label: 'Install the leap rpm package'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"",
+ label: 'Install the LEAP migration data rpm packages'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
+ label: 'Start the Pre-Upgrade check',
+ returnStatus: true
+ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"",
+ label: 'Permit ssh as root login'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"",
+ label: 'Answer the LEAP question'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
+ label: 'Start the Upgrade'
+ sh script: "vagrant reload $targetDistro.vmName",
+ label: 'Reboot to the ELevate initramfs'
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
+ label: 'Generate the ssh-config file'
+ }
+ }
+ }
+ }
+ }
+ stage('Distro Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'minimal' }
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: 'rm -f conftest.py pytest.ini',
+ label: 'Delete root conftest.py file'
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
+ """,
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ }
+ stage('Docker Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/docker/test_docker_ce.py
+ """,
+ label: 'Run the docker specific tests'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ success {
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
+ skipPublishingChecks: true
+ }
+ cleanup {
+ sh script: 'vagrant destroy -f --no-parallel -g',
+ label: 'Destroy VMs'
+ cleanWs()
+ }
+ }
+}
+
+def targetDistroSpec(distro) {
+ def spec = [:]
+
+ switch (distro) {
+ case 'almalinux-8':
+ vm = 'almalinux_8'
+ ldata = 'leapp-data-almalinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'centos-stream-8':
+ vm = 'centosstream_8'
+ ldata = 'leapp-data-centos'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'oraclelinux-8':
+ vm = 'oraclelinux_8'
+ ldata = 'leapp-data-oraclelinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'rocky-8':
+ vm = 'rocky_8'
+ ldata = 'leapp-data-rocky'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ default:
+ spec = [
+ vmName: 'unknown',
+ leappData: 'unknown'
+ ]
+ break
+ }
+ return spec
+}
diff --git a/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile
new file mode 100644
index 00000000..af2fabe2
--- /dev/null
+++ b/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile
@@ -0,0 +1,253 @@
+RETRY = params.RETRY
+TIMEOUT = params.TIMEOUT
+
+pipeline {
+ agent {
+ label 'x86_64 && bm'
+ }
+ options {
+ timestamps()
+ parallelsAlwaysFailFast()
+ }
+ parameters {
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation')
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
+ string(name: 'LEAPP_SRC_GIT_USER', defaultValue: 'AlmaLinux', description: 'Input name of Git user of LEAPP source', trim: true)
+ string(name: 'LEAPP_SRC_GIT_BRANCH', defaultValue: 'almalinux', description: 'Input name of Git branch of LEAPP source', trim: true)
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
+ }
+ environment {
+ VAGRANT_NO_COLOR = '1'
+ }
+ stages {
+ stage('Prepare') {
+ steps {
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
+ label: 'Install Ansible collections'
+ sh script: 'python3.11 -m venv .venv',
+ label: 'Create Python virtual environment'
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
+ label: 'Install Testinfra'
+ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel',
+ label: 'Fetch devel version of leapp data'
+ }
+ }
+ stage('CreateSingleMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
+
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: "vagrant up $targetDistro.vmName",
+ label: 'Create source VM'
+ }
+ }
+ }
+ stage('CreateMultiMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: 'vagrant up',
+ label: 'Create source VM'
+ }
+ }
+ stage('ELevationAndTest') {
+ matrix {
+ when {
+ anyOf {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
+ }
+ }
+ axes {
+ axis {
+ name 'TARGET_DISTRO'
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
+ }
+ }
+ stages {
+ stage('ELevate') {
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"",
+ label: 'Add testing repo of ELevate'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-centos7-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"",
+ label: 'Add pulp repository'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i 's|enabled=1|enabled=1\\npriority=80|' /etc/yum.repos.d/internal-leapp.repo\"",
+ label: 'Set priority for pulp repository'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"",
+ label: 'Install testing version of ELevate'
+ sh script: "vagrant upload ci/scripts/install_elevate_dev.sh install_elevate_dev.sh $targetDistro.vmName",
+ label: 'Upload installer script to VMs'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo bash install_elevate_dev.sh -u ${LEAPP_SRC_GIT_USER} -b ${LEAPP_SRC_GIT_BRANCH}\"",
+ label: 'Install development version of ELevate',
+ returnStatus: true
+ sh script: "vagrant upload leapp-data/ leapp-data/ --compress $targetDistro.vmName",
+ label: 'Upload devel branch of leapp data'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"",
+ label: 'Create directory structrue of leapp data'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files leapp-data/files/${targetDistro.leappData}/*\"",
+ label: 'Install devel version of leapp data'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files/vendors.d leapp-data/vendors.d/*\"",
+ label: 'Install devel version of leapp vendor data'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el8 /etc/leapp/files/leapp_upgrade_repositories.repo\"",
+ label: 'Configure leapp upgrade repositories for EL7toEL8'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/repomap.json.el8 /etc/leapp/files/repomap.json\"",
+ label: 'Configure leapp repository mapping for EL7toEL8'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum -y install tree && sudo tree -ha /etc/leapp\"",
+ label: 'Check if development version of leapp data installed correctly'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
+ label: 'Start pre-upgrade check',
+ returnStatus: true
+ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"",
+ label: 'Permit ssh as root login'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"",
+ label: 'Answer the leapp question'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
+ label: 'Start the Upgrade'
+ sh script: "vagrant reload $targetDistro.vmName",
+ label: 'Reboot to the ELevate initramfs'
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
+ label: 'Generate the ssh-config file'
+ }
+ }
+ }
+ }
+ }
+ stage('Distro Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'minimal' }
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: 'rm -f conftest.py pytest.ini',
+ label: 'Delete root conftest.py file'
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
+ """,
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ }
+ stage('Docker Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/docker/test_docker_ce.py
+ """,
+ label: 'Run the docker specific tests'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ success {
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
+ skipPublishingChecks: true
+ }
+ cleanup {
+ sh script: 'vagrant destroy -f --no-parallel -g',
+ label: 'Destroy VMs'
+ cleanWs()
+ }
+ }
+}
+
+def targetDistroSpec(distro) {
+ def spec = [:]
+
+ switch (distro) {
+ case 'almalinux-8':
+ vm = 'almalinux_8'
+ ldata = 'almalinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'centos-stream-8':
+ vm = 'centosstream_8'
+ ldata = 'centos'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'oraclelinux-8':
+ vm = 'oraclelinux_8'
+ ldata = 'oraclelinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'rocky-8':
+ vm = 'rocky_8'
+ ldata = 'rocky'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ default:
+ spec = [
+ vmName: 'unknown',
+ leappData: 'unknown'
+ ]
+ break
+ }
+ return spec
+}
diff --git a/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile
new file mode 100644
index 00000000..ae9bdb57
--- /dev/null
+++ b/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile
@@ -0,0 +1,228 @@
+RETRY = params.RETRY
+TIMEOUT = params.TIMEOUT
+
+pipeline {
+ agent {
+ label 'x86_64 && bm'
+ }
+ options {
+ timestamps()
+ parallelsAlwaysFailFast()
+ }
+ parameters {
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation')
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
+ }
+ environment {
+ VAGRANT_NO_COLOR = '1'
+ }
+ stages {
+ stage('Prepare') {
+ steps {
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
+ label: 'Install Ansible collections'
+ sh script: 'python3.11 -m venv .venv',
+ label: 'Create Python virtual environment'
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
+ label: 'Install Testinfra'
+ }
+ }
+ stage('CreateSingleMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
+
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: "vagrant up $targetDistro.vmName",
+ label: 'Create source VM'
+ }
+ }
+ }
+ stage('CreateMultiMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: 'vagrant up',
+ label: 'Create source VM'
+ }
+ }
+ stage('ELevationAndTest') {
+ matrix {
+ when {
+ anyOf {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
+ }
+ }
+ axes {
+ axis {
+ name 'TARGET_DISTRO'
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
+ }
+ }
+ stages {
+ stage('ELevate') {
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y https://repo.almalinux.org/elevate/elevate-release-latest-el7.noarch.rpm\"",
+ label: 'Install the elevate-release-latest rpm packages for EL7'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"",
+ label: 'Install the leap rpm package'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"",
+ label: 'Install the LEAP migration data rpm packages'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
+ label: 'Start the Pre-Upgrade check',
+ returnStatus: true
+ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"",
+ label: 'Permit ssh as root login'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"",
+ label: 'Answer the LEAP question'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
+ label: 'Start the Upgrade'
+ sh script: "vagrant reload $targetDistro.vmName",
+ label: 'Reboot to the ELevate initramfs'
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
+ label: 'Generate the ssh-config file'
+ }
+ }
+ }
+ }
+ }
+ stage('Distro Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'minimal' }
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: 'rm -f conftest.py pytest.ini',
+ label: 'Delete root conftest.py file'
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
+ """,
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ }
+ stage('Docker Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/docker/test_docker_ce.py
+ """,
+ label: 'Run the docker specific tests'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ success {
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
+ skipPublishingChecks: true
+ }
+ cleanup {
+ sh script: 'vagrant destroy -f --no-parallel -g',
+ label: 'Destroy VMs'
+ cleanWs()
+ }
+ }
+}
+
+def targetDistroSpec(distro) {
+ def spec = [:]
+
+ switch (distro) {
+ case 'almalinux-8':
+ vm = 'almalinux_8'
+ ldata = 'leapp-data-almalinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'centos-stream-8':
+ vm = 'centosstream_8'
+ ldata = 'leapp-data-centos'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'oraclelinux-8':
+ vm = 'oraclelinux_8'
+ ldata = 'leapp-data-oraclelinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'rocky-8':
+ vm = 'rocky_8'
+ ldata = 'leapp-data-rocky'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ default:
+ spec = [
+ vmName: 'unknown',
+ leappData: 'unknown'
+ ]
+ break
+ }
+ return spec
+}
diff --git a/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile
new file mode 100644
index 00000000..0f37cf2e
--- /dev/null
+++ b/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile
@@ -0,0 +1,228 @@
+RETRY = params.RETRY
+TIMEOUT = params.TIMEOUT
+
+pipeline {
+ agent {
+ label 'x86_64 && bm'
+ }
+ options {
+ timestamps()
+ parallelsAlwaysFailFast()
+ }
+ parameters {
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation')
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
+ }
+ environment {
+ VAGRANT_NO_COLOR = '1'
+ }
+ stages {
+ stage('Prepare') {
+ steps {
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
+ label: 'Install Ansible collections'
+ sh script: 'python3.11 -m venv .venv',
+ label: 'Create Python virtual environment'
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
+ label: 'Install Testinfra'
+ }
+ }
+ stage('CreateSingleMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
+
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: "vagrant up $targetDistro.vmName",
+ label: 'Create source VM'
+ }
+ }
+ }
+ stage('CreateMultiMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: 'vagrant up',
+ label: 'Create source VM'
+ }
+ }
+ stage('ELevationAndTest') {
+ matrix {
+ when {
+ anyOf {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
+ }
+ }
+ axes {
+ axis {
+ name 'TARGET_DISTRO'
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
+ }
+ }
+ stages {
+ stage('ELevate') {
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"",
+ label: 'Install the elevate-release-latest rpm packages for EL7'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"",
+ label: 'Install the leap rpm package'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"",
+ label: 'Install the LEAP migration data rpm packages'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
+ label: 'Start the Pre-Upgrade check',
+ returnStatus: true
+ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"",
+ label: 'Permit ssh as root login'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"",
+ label: 'Answer the LEAP question'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
+ label: 'Start the Upgrade'
+ sh script: "vagrant reload $targetDistro.vmName",
+ label: 'Reboot to the ELevate initramfs'
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
+ label: 'Generate the ssh-config file'
+ }
+ }
+ }
+ }
+ }
+ stage('Distro Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'minimal' }
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: 'rm -f conftest.py pytest.ini',
+ label: 'Delete root conftest.py file'
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
+ """,
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ }
+ stage('Docker Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/docker/test_docker_ce.py
+ """,
+ label: 'Run the docker specific tests'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ success {
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
+ skipPublishingChecks: true
+ }
+ cleanup {
+ sh script: 'vagrant destroy -f --no-parallel -g',
+ label: 'Destroy VMs'
+ cleanWs()
+ }
+ }
+}
+
+def targetDistroSpec(distro) {
+ def spec = [:]
+
+ switch (distro) {
+ case 'almalinux-8':
+ vm = 'almalinux_8'
+ ldata = 'leapp-data-almalinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'centos-stream-8':
+ vm = 'centosstream_8'
+ ldata = 'leapp-data-centos'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'oraclelinux-8':
+ vm = 'oraclelinux_8'
+ ldata = 'leapp-data-oraclelinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'rocky-8':
+ vm = 'rocky_8'
+ ldata = 'leapp-data-rocky'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ default:
+ spec = [
+ vmName: 'unknown',
+ leappData: 'unknown'
+ ]
+ break
+ }
+ return spec
+}
diff --git a/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile
new file mode 100644
index 00000000..7eb5430b
--- /dev/null
+++ b/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile
@@ -0,0 +1,200 @@
+RETRY = params.RETRY
+TIMEOUT = params.TIMEOUT
+
+pipeline {
+ agent {
+ label params.AGENT
+ }
+ options {
+ timestamps()
+ }
+ parameters {
+ string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true)
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
+ string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true)
+ string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true)
+ choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation')
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation')
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
+ }
+ stages {
+ stage('Source') {
+ steps {
+ git url: REPO_URL,
+ branch: REPO_BRANCH,
+ credentialsId: 'github-almalinuxautobot'
+ }
+ }
+ stage('Prepare Build and Test enviroment') {
+ steps {
+ sh script: 'cp Vagrantfile.el8toel9 Vagrantfile',
+ label: 'Generate the el8toel9 Vagrantfile'
+ sh script: 'sudo dnf -y install python39-devel python39-wheel',
+ label: 'Install Python 3.9, PIP and Wheel'
+ sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt',
+ label: 'Install TestInfra'
+ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel',
+ label: 'Clone the leapp-data git repository'
+ }
+ }
+ stage('ELevation') {
+ matrix {
+ when {
+ allOf {
+ anyOf {
+ expression { params.SOURCE_DISTRO_FILTER == 'all' }
+ expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO }
+ }
+ anyOf {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
+ }
+ }
+ }
+ axes {
+ axis {
+ name 'SOURCE_DISTRO'
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
+ }
+ axis {
+ name 'TARGET_DISTRO'
+ values 'almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9'
+ }
+ }
+ stages {
+ stage('Create and Configure Machines') {
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'vagrant destroy -f $SOURCE_DISTRO',
+ label: 'Make sure no machine present from the last retry'
+ sh script: 'vagrant up $SOURCE_DISTRO',
+ label: 'Create the source machines'
+ }
+ }
+ }
+ }
+ stage('ELevate to the all target distros') {
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"',
+ label: 'Add the ELevate Testing RPM repository'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf install -y leapp-upgrade\"',
+ label: 'Install the leap rpm package'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo bash /vagrant/scripts/install_elevate_dev.sh\"',
+ label: 'Install Development version of ELevate',
+ returnStatus: true
+ script {
+ def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO)
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"",
+ label:'Create the LEAPP directory')
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files /vagrant/leapp-data/files/${LEAPP_DATA}/*\"",
+ label:"Install the LEAPP DATA")
+ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files/vendors.d /vagrant/leapp-data/vendors.d/*\"',
+ label:"Install the Vendor DATA")
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el9 /etc/leapp/files/leapp_upgrade_repositories.repo\"",
+ label:'Set LEAPP Repos for EL8')
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/repomap.json.el9 /etc/leapp/files/repomap.json\"",
+ label:'Set LEAPP Repo map for EL8')
+ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"',
+ label:"Debug: Data paths")
+ }
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"',
+ label: 'Start the Pre-Upgrade check',
+ returnStatus: true
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"',
+ label: 'Permit ssh as root login'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"',
+ label: 'Answer the LEAP question'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"',
+ label: 'Start the Upgrade'
+ sh script: 'vagrant reload $SOURCE_DISTRO',
+ label: 'Reboot to the ELevate initramfs'
+ sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config',
+ label: 'Generate the ssh-config file'
+ }
+ }
+ }
+ }
+ stage('Distro Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'minimal'}
+ expression { params.CONF_FILTER == 'docker-ce'}
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$SOURCE_DISTRO-junit.xml tests/distro/test_osinfo_$SOURCE_DISTRO.py',
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ stage('Docker Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'docker-ce'}
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py',
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ success {
+ junit testResults: '**/tests/**/**-junit.xml',
+ skipPublishingChecks: true
+ }
+ cleanup {
+ sh script: 'vagrant destroy -f',
+ label: 'Destroy All Machines'
+ cleanWs()
+ }
+ }
+}
+
+/*
+* Common Functions
+*/
+def getLeappDataDistro(TARGET_DISTRO) {
+ def leapp_data = ""
+
+ switch(TARGET_DISTRO) {
+ case "almalinux-9":
+ leapp_data = TARGET_DISTRO.substring(0, 9)
+ break
+
+ case "centos-stream-9":
+ leapp_data = TARGET_DISTRO.substring(0, 6)
+ break
+
+ case "oraclelinux-9":
+ leapp_data = TARGET_DISTRO.substring(0, 11)
+ break
+
+ case "rocky-9":
+ leapp_data = TARGET_DISTRO.substring(0, 5)
+ break
+
+ default:
+ leap_data = "Error: Target Distro Not Supported"
+ break
+ }
+ return leapp_data
+}
diff --git a/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile
new file mode 100644
index 00000000..aa6be967
--- /dev/null
+++ b/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile
@@ -0,0 +1,214 @@
+RETRY = params.RETRY
+TIMEOUT = params.TIMEOUT
+
+pipeline {
+ agent {
+ label 'x86_64 && bm'
+ }
+ options {
+ timestamps()
+ parallelsAlwaysFailFast()
+ }
+ parameters {
+ // choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation')
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation')
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
+ }
+ environment {
+ VAGRANT_NO_COLOR = '1'
+ }
+ stages {
+ stage('Prepare') {
+ steps {
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
+ label: 'Install Ansible collections'
+ sh script: 'python3.11 -m venv .venv',
+ label: 'Create Python virtual environment'
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
+ label: 'Install Testinfra'
+ }
+ }
+ stage('CreateSingleMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
+
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: "vagrant up $targetDistro.vmName",
+ label: 'Create source VM'
+ }
+ }
+ }
+ stage('CreateMultiMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ sh script: 'cp ci/vagrant/el8toel9_multi.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: 'vagrant up',
+ label: 'Create source VM'
+ }
+ }
+ stage('ELevationAndTest') {
+ matrix {
+ when {
+ anyOf {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
+ }
+ }
+ axes {
+ axis {
+ name 'TARGET_DISTRO'
+ // values 'almalinux-9', 'centos-stream-9', 'rocky-9'
+ values 'almalinux-9', 'rocky-9'
+ }
+ }
+ stages {
+ stage('ELevate') {
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y https://repo.almalinux.org/elevate/elevate-release-latest-el8.noarch.rpm\"",
+ label: 'Install the elevate-release-latest rpm packages for EL8'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-centos7-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"",
+ label: 'Add pulp repository'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y leapp-upgrade\"",
+ label: 'Install the leap rpm package'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y $targetDistro.leappData\"",
+ label: 'Install the LEAP migration data rpm packages'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
+ label: 'Start the Pre-Upgrade check',
+ returnStatus: true
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i \'s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/\' /etc/firewalld/firewalld.conf\"",
+ label: 'TODO'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section check_vdo.no_vdo_devices=True\"",
+ label: 'TODO'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
+ label: 'Start the Upgrade'
+ sh script: "vagrant reload $targetDistro.vmName",
+ label: 'Reboot to the ELevate initramfs'
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
+ label: 'Generate the ssh-config file'
+ }
+ }
+ }
+ }
+ }
+ stage('Distro Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'minimal' }
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: 'rm -f conftest.py pytest.ini',
+ label: 'Delete root conftest.py file'
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
+ """,
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ }
+ stage('Docker Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/docker/test_docker_ce.py
+ """,
+ label: 'Run the docker specific tests'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ success {
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
+ skipPublishingChecks: true
+ }
+ cleanup {
+ sh script: 'vagrant destroy -f --no-parallel -g',
+ label: 'Destroy VMs'
+ cleanWs()
+ }
+ }
+}
+
+def targetDistroSpec(distro) {
+ def spec = [:]
+
+ switch (distro) {
+ case 'almalinux-9':
+ vm = 'almalinux_9'
+ ldata = 'leapp-data-almalinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'rocky-9':
+ vm = 'rocky_9'
+ ldata = 'leapp-data-rocky'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ default:
+ spec = [
+ vmName: 'unknown',
+ leappData: 'unknown'
+ ]
+ break
+ }
+ return spec
+}
diff --git a/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile
new file mode 100644
index 00000000..82626697
--- /dev/null
+++ b/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile
@@ -0,0 +1,206 @@
+RETRY = params.RETRY
+TIMEOUT = params.TIMEOUT
+
+pipeline {
+ agent {
+ label params.AGENT
+ }
+ options {
+ timestamps()
+ }
+ parameters {
+ string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true)
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
+ string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true)
+ string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true)
+ choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation')
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation')
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
+ }
+ stages {
+ stage('Source') {
+ steps {
+ git url: REPO_URL,
+ branch: REPO_BRANCH,
+ credentialsId: 'github-almalinuxautobot'
+ }
+ }
+ stage('Prepare Build and Test enviroment') {
+ steps {
+ sh script: 'cp Vagrantfile.el8toel9 Vagrantfile',
+ label: 'Generate the el8toel9 Vagrantfile'
+ sh script: 'sudo dnf -y install python39-devel python39-wheel',
+ label: 'Install Python 3.9, PIP and Wheel'
+ sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt',
+ label: 'Install TestInfra'
+ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel',
+ label: 'Clone the leapp-data git repository'
+ }
+ }
+ stage('ELevation') {
+ matrix {
+ when {
+ allOf {
+ anyOf {
+ expression { params.SOURCE_DISTRO_FILTER == 'all' }
+ expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO }
+ }
+ anyOf {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
+ }
+ }
+ }
+ axes {
+ axis {
+ name 'SOURCE_DISTRO'
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
+ }
+ axis {
+ name 'TARGET_DISTRO'
+ values 'almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9'
+ }
+ }
+ stages {
+ stage('Create and Configure Machines') {
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'vagrant destroy -f $SOURCE_DISTRO',
+ label: 'Make sure no machine present from the last retry'
+ sh script: 'vagrant up $SOURCE_DISTRO',
+ label: 'Create the source machines'
+ }
+ }
+ }
+ }
+ stage('ELevate to the all target distros') {
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"',
+ label: 'Add the ELevate Testing RPM repository'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y wget\"",
+ label: 'Install wget'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-almalinux-8-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"",
+ label: 'Add pulp repository'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i 's|enabled=1|enabled=1\\npriority=80|' /etc/yum.repos.d/internal-leapp.repo\"",
+ label: 'Set priority for pulp repository'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf install -y leapp-upgrade\"',
+ label: 'Install the leap rpm package'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo bash /vagrant/scripts/install_elevate_dev.sh\"',
+ label: 'Install Development version of ELevate',
+ returnStatus: true
+ script {
+ def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO)
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"",
+ label:'Create the LEAPP directory')
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files /vagrant/leapp-data/files/${LEAPP_DATA}/*\"",
+ label:"Install the LEAPP DATA")
+ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files/vendors.d /vagrant/leapp-data/vendors.d/*\"',
+ label:"Install the Vendor DATA")
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el9 /etc/leapp/files/leapp_upgrade_repositories.repo\"",
+ label:'Set LEAPP Repos for EL8')
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/repomap.json.el9 /etc/leapp/files/repomap.json\"",
+ label:'Set LEAPP Repo map for EL8')
+ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"',
+ label:"Debug: Data paths")
+ }
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"',
+ label: 'Start the Pre-Upgrade check',
+ returnStatus: true
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"',
+ label: 'Permit ssh as root login'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"',
+ label: 'Answer the LEAP question'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"',
+ label: 'Start the Upgrade'
+ sh script: 'vagrant reload $SOURCE_DISTRO',
+ label: 'Reboot to the ELevate initramfs'
+ sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config',
+ label: 'Generate the ssh-config file'
+ }
+ }
+ }
+ }
+ stage('Distro Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'minimal'}
+ expression { params.CONF_FILTER == 'docker-ce'}
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$SOURCE_DISTRO-junit.xml tests/distro/test_osinfo_$SOURCE_DISTRO.py',
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ stage('Docker Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'docker-ce'}
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py',
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ success {
+ junit testResults: '**/tests/**/**-junit.xml',
+ skipPublishingChecks: true
+ }
+ cleanup {
+ sh script: 'vagrant destroy -f',
+ label: 'Destroy All Machines'
+ cleanWs()
+ }
+ }
+}
+
+/*
+* Common Functions
+*/
+def getLeappDataDistro(TARGET_DISTRO) {
+ def leapp_data = ""
+
+ switch(TARGET_DISTRO) {
+ case "almalinux-9":
+ leapp_data = TARGET_DISTRO.substring(0, 9)
+ break
+
+ case "centos-stream-9":
+ leapp_data = TARGET_DISTRO.substring(0, 6)
+ break
+
+ case "oraclelinux-9":
+ leapp_data = TARGET_DISTRO.substring(0, 11)
+ break
+
+ case "rocky-9":
+ leapp_data = TARGET_DISTRO.substring(0, 5)
+ break
+
+ default:
+ leap_data = "Error: Target Distro Not Supported"
+ break
+ }
+ return leapp_data
+}
diff --git a/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile
new file mode 100644
index 00000000..68f00165
--- /dev/null
+++ b/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile
@@ -0,0 +1,212 @@
+RETRY = params.RETRY
+TIMEOUT = params.TIMEOUT
+
+pipeline {
+ agent {
+ label 'x86_64 && bm'
+ }
+ options {
+ timestamps()
+ parallelsAlwaysFailFast()
+ }
+ parameters {
+ // choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation')
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation')
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
+ }
+ environment {
+ VAGRANT_NO_COLOR = '1'
+ }
+ stages {
+ stage('Prepare') {
+ steps {
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
+ label: 'Install Ansible collections'
+ sh script: 'python3.11 -m venv .venv',
+ label: 'Create Python virtual environment'
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
+ label: 'Install Testinfra'
+ }
+ }
+ stage('CreateSingleMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
+
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: "vagrant up $targetDistro.vmName",
+ label: 'Create source VM'
+ }
+ }
+ }
+ stage('CreateMultiMachine') {
+ when {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ }
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ sh script: 'cp ci/vagrant/el8toel9_multi.rb Vagrantfile',
+ label: 'Generate Vagrantfile'
+ sh script: 'vagrant up',
+ label: 'Create source VM'
+ }
+ }
+ stage('ELevationAndTest') {
+ matrix {
+ when {
+ anyOf {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
+ }
+ }
+ axes {
+ axis {
+ name 'TARGET_DISTRO'
+ // values 'almalinux-9', 'centos-stream-9', 'rocky-9'
+ values 'almalinux-9', 'rocky-9'
+ }
+ }
+ stages {
+ stage('ELevate') {
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y https://repo.almalinux.org/elevate/elevate-release-latest-el8.noarch.rpm\"",
+ label: 'Install the elevate-release-latest rpm packages for EL8'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y leapp-upgrade\"",
+ label: 'Install the leap rpm package'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y $targetDistro.leappData\"",
+ label: 'Install the LEAP migration data rpm packages'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
+ label: 'Start the Pre-Upgrade check',
+ returnStatus: true
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i \'s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/\' /etc/firewalld/firewalld.conf\"",
+ label: 'TODO'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section check_vdo.no_vdo_devices=True\"",
+ label: 'TODO'
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
+ label: 'Start the Upgrade'
+ sh script: "vagrant reload $targetDistro.vmName",
+ label: 'Reboot to the ELevate initramfs'
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
+ label: 'Generate the ssh-config file'
+ }
+ }
+ }
+ }
+ }
+ stage('Distro Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'minimal' }
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: 'rm -f conftest.py pytest.ini',
+ label: 'Delete root conftest.py file'
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
+ """,
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ }
+ stage('Docker Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'docker-ce' }
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ script {
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
+
+ sh script: """
+ . .venv/bin/activate \
+ && py.test -v --hosts=${targetDistro.vmName} \
+ --ssh-config=.vagrant/ssh-config \
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
+ ci/tests/tests/docker/test_docker_ce.py
+ """,
+ label: 'Run the docker specific tests'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ success {
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
+ skipPublishingChecks: true
+ }
+ cleanup {
+ sh script: 'vagrant destroy -f --no-parallel -g',
+ label: 'Destroy VMs'
+ cleanWs()
+ }
+ }
+}
+
+def targetDistroSpec(distro) {
+ def spec = [:]
+
+ switch (distro) {
+ case 'almalinux-9':
+ vm = 'almalinux_9'
+ ldata = 'leapp-data-almalinux'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ case 'rocky-9':
+ vm = 'rocky_9'
+ ldata = 'leapp-data-rocky'
+
+ spec = [
+ vmName: vm,
+ leappData: ldata
+ ]
+ break
+ default:
+ spec = [
+ vmName: 'unknown',
+ leappData: 'unknown'
+ ]
+ break
+ }
+ return spec
+}
diff --git a/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile
new file mode 100644
index 00000000..79cdd472
--- /dev/null
+++ b/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile
@@ -0,0 +1,187 @@
+RETRY = params.RETRY
+TIMEOUT = params.TIMEOUT
+
+pipeline {
+ agent {
+ label params.AGENT
+ }
+ options {
+ timestamps()
+ }
+ parameters {
+ string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true)
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
+ string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true)
+ string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true)
+ choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation')
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation')
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
+ }
+ stages {
+ stage('Source') {
+ steps {
+ git url: REPO_URL,
+ branch: REPO_BRANCH,
+ credentialsId: 'github-almalinuxautobot'
+ }
+ }
+ stage('Prepare Build and Test enviroment') {
+ steps {
+ sh script: 'cp Vagrantfile.el8toel9 Vagrantfile',
+ label: 'Generate the el8toel9 Vagrantfile'
+ sh script: 'sudo dnf -y install python39-devel python39-wheel',
+ label: 'Install Python 3.9, PIP and Wheel'
+ sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt',
+ label: 'Install TestInfra'
+ }
+ }
+ stage('ELevation') {
+ matrix {
+ when {
+ allOf {
+ anyOf {
+ expression { params.SOURCE_DISTRO_FILTER == 'all' }
+ expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO }
+ }
+ anyOf {
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
+ }
+ }
+ }
+ axes {
+ axis {
+ name 'SOURCE_DISTRO'
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
+ }
+ axis {
+ name 'TARGET_DISTRO'
+ values 'almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9'
+ }
+ }
+ stages {
+ stage('Create and Configure Machines') {
+ environment {
+ CONFIG = "${CONF_FILTER}"
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'vagrant destroy -f $SOURCE_DISTRO',
+ label: 'Make sure no machine present from the last retry'
+ sh script: 'vagrant up $SOURCE_DISTRO',
+ label: 'Create the source machines'
+ }
+ }
+ }
+ }
+ stage('ELevate to the all target distros') {
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"',
+ label: 'Add the ELevate Testing RPM repository'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install leapp-upgrade\"',
+ label: 'Install the leap rpm package'
+ script {
+ def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO)
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install leapp-data-$LEAPP_DATA\"",
+ label:'Install the LEAP migration data rpm packages')
+ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"',
+ label:'Debug: Data paths')
+ }
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"',
+ label: 'Start the Pre-Upgrade check',
+ returnStatus: true
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"',
+ label: 'Permit ssh as root login'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"',
+ label: 'Answer the LEAP question'
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"',
+ label: 'Start the Upgrade'
+ sh script: 'vagrant reload $SOURCE_DISTRO',
+ label: 'Reboot to the ELevate initramfs'
+ sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config',
+ label: 'Generate the ssh-config file'
+ }
+ }
+ }
+ }
+ stage('Distro Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'minimal'}
+ expression { params.CONF_FILTER == 'docker-ce'}
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$TARGET_DISTRO-junit.xml tests/distro/test_osinfo_$TARGET_DISTRO.py',
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ stage('Docker Tests') {
+ when {
+ anyOf {
+ expression { params.CONF_FILTER == 'docker-ce'}
+ }
+ }
+ steps {
+ retry(RETRY) {
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py',
+ label: 'Run the distro specific tests'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ post {
+ success {
+ junit testResults: '**/tests/**/**-junit.xml',
+ skipPublishingChecks: true
+ }
+ cleanup {
+ sh script: 'vagrant destroy -f',
+ label: 'Destroy All Machines'
+ cleanWs()
+ }
+ }
+}
+
+/*
+* Common Functions
+*/
+def getLeappDataDistro(TARGET_DISTRO) {
+ def leapp_data = ""
+
+ switch(TARGET_DISTRO) {
+ case "almalinux-9":
+ leapp_data = TARGET_DISTRO.substring(0, 9)
+ break
+
+ case "centos-stream-9":
+ leapp_data = TARGET_DISTRO.substring(0, 6)
+ break
+
+ case "oraclelinux-9":
+ leapp_data = TARGET_DISTRO.substring(0, 11)
+ break
+
+ case "rocky-9":
+ leapp_data = TARGET_DISTRO.substring(0, 5)
+ break
+
+ default:
+ leap_data = "Error: Target Distro Not Supported"
+ break
+ }
+ return leapp_data
+}
diff --git a/ci/scripts/install_elevate_dev.sh b/ci/scripts/install_elevate_dev.sh
new file mode 100644
index 00000000..f9cc2903
--- /dev/null
+++ b/ci/scripts/install_elevate_dev.sh
@@ -0,0 +1,117 @@
+#!/usr/bin/env bash
+
+USER='AlmaLinux'
+BRANCH='almalinux'
+
+show_usage() {
+ echo 'Usage: sync_cloudlinux [OPTION]...'
+ echo ''
+ echo ' -h, --help show this message and exit'
+ echo ' -u, --user github user name (default: AlmaLinux)'
+ echo ' -b, --branch github branch name (default: almalinux)'
+}
+
+while [[ $# -gt 0 ]]; do
+ opt="$1"
+ case ${opt} in
+ -h|--help)
+ show_usage
+ exit 0
+ ;;
+ -u|--user)
+ USER="$2"
+ shift
+ shift
+ ;;
+ -b|--branch)
+ BRANCH="$2"
+ shift
+ shift
+ ;;
+ *)
+ echo -e "Error: unknown option ${opt}" >&2
+ exit 2
+ ;;
+ esac
+done
+
+RHEL_MAJOR_VERSION=$(rpm --eval %rhel)
+WORK_DIR="$HOME"
+NEW_LEAPP_NAME="leapp-repository-$BRANCH"
+NEW_LEAPP_DIR="$WORK_DIR/$NEW_LEAPP_NAME/"
+LEAPP_PATH='/usr/share/leapp-repository/repositories/'
+LEAPP_GPG_PATH='/etc/leapp/repos.d/system_upgrade/common/files/rpm-gpg'
+EXCLUDE_PATH='
+/usr/share/leapp-repository/repositories/system_upgrade/el7toel8/files/bundled-rpms
+/usr/share/leapp-repository/repositories/system_upgrade/el7toel8/files
+/usr/share/leapp-repository/repositories/system_upgrade/el7toel8
+/usr/share/leapp-repository/repositories/system_upgrade/el8toel9/files/bundled-rpms
+/usr/share/leapp-repository/repositories/system_upgrade/el8toel9/files
+/usr/share/leapp-repository/repositories/system_upgrade/el8toel9
+/usr/share/leapp-repository/repositories/system_upgrade
+/usr/share/leapp-repository/repositories/
+'
+
+
+echo "RHEL_MAJOR_VERSION=$RHEL_MAJOR_VERSION"
+echo "WORK_DIR=$WORK_DIR"
+echo "EXCLUDED_PATHS=$EXCLUDE_PATH"
+
+echo "Preserve GPG keys if any"
+for major in 8 9; do
+ test -e ${LEAPP_GPG_PATH}/${major} && mv ${LEAPP_GPG_PATH}/${major} ${WORK_DIR}/
+done
+
+
+echo 'Remove old files'
+for dir in $(find $LEAPP_PATH -type d);
+do
+ skip=0
+ for exclude in $(echo $EXCLUDE_PATH);
+ do
+ if [[ $exclude == $dir ]];then
+ skip=1
+ break
+ fi
+ done
+ if [ $skip -eq 0 ];then
+ rm -rf $dir
+ fi
+done
+
+echo "Download new tarball from https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz"
+curl -s -L https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz | tar -xmz -C $WORK_DIR/ || exit 1
+
+echo 'Deleting files as in spec file'
+rm -rf $NEW_LEAPP_DIR/repos/common/actors/testactor
+find $NEW_LEAPP_DIR/repos/common -name "test.py" -delete
+rm -rf `find $NEW_LEAPP_DIR -name "tests" -type d`
+find $NEW_LEAPP_DIR -name "Makefile" -delete
+if [ $RHEL_MAJOR_VERSION -eq '7' ]; then
+ rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/el8toel9
+else
+ rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/el7toel8
+ rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/cloudlinux
+fi
+
+echo 'Copy new data to system'
+cp -r $NEW_LEAPP_DIR/repos/* $LEAPP_PATH || exit 1
+
+for DIRECTORY in $(find $LEAPP_PATH -mindepth 1 -maxdepth 1 -type d);
+do
+ REPOSITORY=$(basename $DIRECTORY)
+ if ! [ -e /etc/leapp/repos.d/$REPOSITORY ];then
+ echo "Enabling repository $REPOSITORY"
+ ln -s $LEAPP_PATH/$REPOSITORY /etc/leapp/repos.d/$REPOSITORY || exit 1
+ fi
+done
+
+echo "Restore GPG keys if any"
+for major in 8 9; do
+ rm -rf ${LEAPP_GPG_PATH}/${major}
+ test -e ${WORK_DIR}/${major} && mv ${WORK_DIR}/${major} ${LEAPP_GPG_PATH}/
+done
+
+rm -rf $NEW_LEAPP_DIR
+
+exit 0
diff --git a/ci/tests/tests/conftest.py b/ci/tests/tests/conftest.py
new file mode 100644
index 00000000..01f9443e
--- /dev/null
+++ b/ci/tests/tests/conftest.py
@@ -0,0 +1,52 @@
+import pytest
+import re
+
+
+@pytest.fixture(scope="module")
+def get_os_release(host):
+ """Get content of the /etc/os-release"""
+ os_release = host.file("/etc/os-release")
+ return os_release
+
+
+@pytest.fixture(scope="module")
+def get_redhat_release(host):
+ """Get content of the /etc/redhat-release"""
+ redhat_release = host.file("/etc/redhat-release")
+ return redhat_release
+
+
+@pytest.fixture(scope="module")
+def get_kernel_info(host):
+ """Get kernel version and vendor information"""
+ kernel_ver_pattern = re.compile(
+ f".*(^[0-9][0-9]?[0-9]?.[0-9][0-9]?[0-9]?.[0-9][0-9]?[0-9]?).*"
+ )
+ kernel_ver_output = host.check_output("uname -r")
+ kernel_version = kernel_ver_pattern.match(kernel_ver_output).group(1)
+
+ with host.sudo():
+ kernel_vendor = host.check_output(
+ "grep -Ei '(.*kernel signing key|.*CA Server|.*Build)' /proc/keys | sed -E"
+ " 's/ +/:/g' | cut -d ':' -f 9 | uniq"
+ )
+ kernel_info = (kernel_version, kernel_vendor)
+ return kernel_info
+
+
+@pytest.fixture(scope="module", params=["glibc", "systemd", "coreutils", "rpm"])
+def get_pkg_info(host, request):
+ """Get vendor and version of installed packages"""
+ pkg_name = request.param
+ pkg_vendor = host.check_output(
+ f"rpm -qa --queryformat \"%{{VENDOR}}\n\" {request.param} | sed '$p;d' "
+ )
+ pkg_version = host.check_output(
+ f'rpm -qa --queryformat "%{{VERSION}}\n" {request.param} | sort -n | sed'
+ " '$p;d'"
+ )
+ pkg_info = (pkg_name, pkg_vendor, pkg_version)
+ # print(pkg_name)
+ # print(pkg_vendor)
+ # print(pkg_version)
+ return pkg_info
diff --git a/ci/tests/tests/distro/test_osinfo_almalinux_8.py b/ci/tests/tests/distro/test_osinfo_almalinux_8.py
new file mode 100644
index 00000000..c5219b35
--- /dev/null
+++ b/ci/tests/tests/distro/test_osinfo_almalinux_8.py
@@ -0,0 +1,43 @@
+import pytest
+
+
+@pytest.mark.usefixtures("get_os_release")
+class TestOSRelease:
+ """Test values of NAME, ID and VERSION_ID"""
+
+ def test_os_rel_name(self, get_os_release):
+ assert get_os_release.contains('NAME="AlmaLinux"')
+
+ def test_os_rel_id(self, get_os_release):
+ assert get_os_release.contains('ID="almalinux"')
+
+ def test_os_rel_version_id(self, get_os_release):
+ assert get_os_release.contains('VERSION_ID="8.*"')
+
+
+@pytest.mark.usefixtures("get_redhat_release")
+class TestRHRelease:
+ """Test contents of the /etc/redhat-release"""
+
+ def test_redhat_release(self, get_redhat_release):
+ assert get_redhat_release.contains("AlmaLinux release 8.*")
+
+
+@pytest.mark.usefixtures("get_pkg_info")
+class TestPkgInfo:
+ """Test vendor and version of packages"""
+
+ def test_pkg_vendor(self, get_pkg_info):
+ assert get_pkg_info[1] == "AlmaLinux"
+
+ def test_pkg_version(self, get_pkg_info):
+ if get_pkg_info[0] == "kernel":
+ assert get_pkg_info[2] == "4.18.0"
+ elif get_pkg_info[0] == "glibc":
+ assert get_pkg_info[2] == "2.28"
+ elif get_pkg_info[0] == "systemd":
+ assert get_pkg_info[2] == "239"
+ elif get_pkg_info[0] == "coreutils":
+ assert get_pkg_info[2] == "8.30"
+ else:
+ assert get_pkg_info[2] == "4.14.3"
diff --git a/ci/tests/tests/distro/test_osinfo_almalinux_9.py b/ci/tests/tests/distro/test_osinfo_almalinux_9.py
new file mode 100644
index 00000000..1536e52b
--- /dev/null
+++ b/ci/tests/tests/distro/test_osinfo_almalinux_9.py
@@ -0,0 +1,52 @@
+import pytest
+
+
+@pytest.mark.usefixtures("get_os_release")
+class TestOSRelease:
+ """Test values of NAME, ID and VERSION_ID"""
+
+ def test_os_rel_name(self, get_os_release):
+ assert get_os_release.contains('NAME="AlmaLinux"')
+
+ def test_os_rel_id(self, get_os_release):
+ assert get_os_release.contains('ID="almalinux"')
+
+ def test_os_rel_version_id(self, get_os_release):
+ assert get_os_release.contains('VERSION_ID="9.*"')
+
+
+@pytest.mark.usefixtures("get_redhat_release")
+class TestRHRelease:
+ """Test contents of the /etc/redhat-release"""
+
+ def test_redhat_release(self, get_redhat_release):
+ assert get_redhat_release.contains("AlmaLinux release 9.*")
+
+
+@pytest.mark.usefixtures("get_kernel_info")
+class TestKernelInfo:
+ """Test version and vendor of running kernel"""
+
+ def test_kernel_version(self, get_kernel_info):
+ assert get_kernel_info[0] == "5.14.0"
+
+ def test_kernel_vendor(self, get_kernel_info):
+ assert get_kernel_info[1] == "AlmaLinux"
+
+
+@pytest.mark.usefixtures("get_pkg_info")
+class TestPkgInfo:
+ """Test vendor and version of packages"""
+
+ def test_pkg_vendor(self, get_pkg_info):
+ assert get_pkg_info[1] == "AlmaLinux"
+
+ def test_pkg_version(self, get_pkg_info):
+ if get_pkg_info[0] == "glibc":
+ assert get_pkg_info[2] == "2.34"
+ elif get_pkg_info[0] == "systemd":
+ assert get_pkg_info[2] == "252"
+ elif get_pkg_info[0] == "coreutils":
+ assert get_pkg_info[2] == "8.32"
+ else:
+ assert get_pkg_info[2] == "4.16.1.3"
diff --git a/ci/tests/tests/distro/test_osinfo_centosstream_8.py b/ci/tests/tests/distro/test_osinfo_centosstream_8.py
new file mode 100644
index 00000000..995ae61e
--- /dev/null
+++ b/ci/tests/tests/distro/test_osinfo_centosstream_8.py
@@ -0,0 +1,23 @@
+import pytest
+
+
+@pytest.mark.usefixtures("get_os_release")
+class TestOSRelease:
+ """Test values of NAME, ID and VERSION_ID"""
+
+ def test_os_rel_name(self, get_os_release):
+ assert get_os_release.contains('NAME="CentOS Stream"')
+
+ def test_os_rel_id(self, get_os_release):
+ assert get_os_release.contains('ID="centos"')
+
+ def test_os_rel_version_id(self, get_os_release):
+ assert get_os_release.contains('VERSION_ID="8"')
+
+
+@pytest.mark.usefixtures("get_redhat_release")
+class TestRHRelease:
+ """Test contents of the /etc/redhat-release"""
+
+ def test_redhat_release(self, get_redhat_release):
+ assert get_redhat_release.contains("CentOS Stream release 8")
diff --git a/ci/tests/tests/distro/test_osinfo_centosstream_9.py b/ci/tests/tests/distro/test_osinfo_centosstream_9.py
new file mode 100644
index 00000000..28e47202
--- /dev/null
+++ b/ci/tests/tests/distro/test_osinfo_centosstream_9.py
@@ -0,0 +1,23 @@
+import pytest
+
+
+@pytest.mark.usefixtures("get_os_release")
+class TestOSRelease:
+ """Test values of NAME, ID and VERSION_ID"""
+
+ def test_os_rel_name(self, get_os_release):
+ assert get_os_release.contains('NAME="CentOS Stream"')
+
+ def test_os_rel_id(self, get_os_release):
+ assert get_os_release.contains('ID="centos"')
+
+ def test_os_rel_version_id(self, get_os_release):
+ assert get_os_release.contains('VERSION_ID="9"')
+
+
+@pytest.mark.usefixtures("get_redhat_release")
+class TestRHRelease:
+ """Test contents of the /etc/redhat-release"""
+
+ def test_redhat_release(self, get_redhat_release):
+ assert get_redhat_release.contains("CentOS Stream release 9")
diff --git a/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py b/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py
new file mode 100644
index 00000000..2080fd2f
--- /dev/null
+++ b/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py
@@ -0,0 +1,23 @@
+import pytest
+
+
+@pytest.mark.usefixtures("get_os_release")
+class TestOSRelease:
+ """Test values of NAME, ID and VERSION_ID"""
+
+ def test_os_rel_name(self, get_os_release):
+ assert get_os_release.contains('NAME="Oracle Linux Server"')
+
+ def test_os_rel_id(self, get_os_release):
+ assert get_os_release.contains('ID="ol"')
+
+ def test_os_rel_version_id(self, get_os_release):
+ assert get_os_release.contains('VERSION_ID="8.*"')
+
+
+@pytest.mark.usefixtures("get_redhat_release")
+class TestRHRelease:
+ """Test contents of the /etc/redhat-release"""
+
+ def test_redhat_release(self, get_redhat_release):
+ assert get_redhat_release.contains("Red Hat Enterprise Linux release 8.*")
diff --git a/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py b/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py
new file mode 100644
index 00000000..bd5044bb
--- /dev/null
+++ b/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py
@@ -0,0 +1,23 @@
+import pytest
+
+
+@pytest.mark.usefixtures("get_os_release")
+class TestOSRelease:
+ """Test values of NAME, ID and VERSION_ID"""
+
+ def test_os_rel_name(self, get_os_release):
+ assert get_os_release.contains('NAME="Oracle Linux Server"')
+
+ def test_os_rel_id(self, get_os_release):
+ assert get_os_release.contains('ID="ol"')
+
+ def test_os_rel_version_id(self, get_os_release):
+ assert get_os_release.contains('VERSION_ID="9.*"')
+
+
+@pytest.mark.usefixtures("get_redhat_release")
+class TestRHRelease:
+ """Test contents of the /etc/redhat-release"""
+
+ def test_redhat_release(self, get_redhat_release):
+ assert get_redhat_release.contains("Red Hat Enterprise Linux release 9.*")
diff --git a/ci/tests/tests/distro/test_osinfo_rocky_8.py b/ci/tests/tests/distro/test_osinfo_rocky_8.py
new file mode 100644
index 00000000..cce5d668
--- /dev/null
+++ b/ci/tests/tests/distro/test_osinfo_rocky_8.py
@@ -0,0 +1,23 @@
+import pytest
+
+
+@pytest.mark.usefixtures("get_os_release")
+class TestOSRelease:
+ """Test values of NAME, ID and VERSION_ID"""
+
+ def test_os_rel_name(self, get_os_release):
+ assert get_os_release.contains('NAME="Rocky Linux"')
+
+ def test_os_rel_id(self, get_os_release):
+ assert get_os_release.contains('ID="rocky"')
+
+ def test_os_rel_version_id(self, get_os_release):
+ assert get_os_release.contains('VERSION_ID="8.*"')
+
+
+@pytest.mark.usefixtures("get_redhat_release")
+class TestRHRelease:
+ """Test contents of the /etc/redhat-release"""
+
+ def test_redhat_release(self, get_redhat_release):
+ assert get_redhat_release.contains("Rocky Linux release 8.*")
diff --git a/ci/tests/tests/distro/test_osinfo_rocky_9.py b/ci/tests/tests/distro/test_osinfo_rocky_9.py
new file mode 100644
index 00000000..ce8cccdb
--- /dev/null
+++ b/ci/tests/tests/distro/test_osinfo_rocky_9.py
@@ -0,0 +1,23 @@
+import pytest
+
+
+@pytest.mark.usefixtures("get_os_release")
+class TestOSRelease:
+ """Test values of NAME, ID and VERSION_ID"""
+
+ def test_os_rel_name(self, get_os_release):
+ assert get_os_release.contains('NAME="Rocky Linux"')
+
+ def test_os_rel_id(self, get_os_release):
+ assert get_os_release.contains('ID="rocky"')
+
+ def test_os_rel_version_id(self, get_os_release):
+ assert get_os_release.contains('VERSION_ID="9.*"')
+
+
+@pytest.mark.usefixtures("get_redhat_release")
+class TestRHRelease:
+ """Test contents of the /etc/redhat-release"""
+
+ def test_redhat_release(self, get_redhat_release):
+ assert get_redhat_release.contains("Rocky Linux release 9.*")
diff --git a/ci/tests/tests/docker/test_docker_ce.py b/ci/tests/tests/docker/test_docker_ce.py
new file mode 100644
index 00000000..3c2550c7
--- /dev/null
+++ b/ci/tests/tests/docker/test_docker_ce.py
@@ -0,0 +1,26 @@
+import pytest
+
+
+class TestDockerServices:
+ """Test docker and containerd services running and enabled"""
+
+ def test_docker_is_running(self, host):
+ assert host.service("docker.service").is_running
+
+ def test_containerd_is_running(self, host):
+ assert host.service("containerd.service").is_running
+
+ def test_docker_is_enabled(self, host):
+ assert host.service("docker.service").is_enabled
+
+ def test_containerd_is_enabled(self, host):
+ assert host.service("containerd.service").is_enabled
+
+
+class TestDockerWorking:
+ """Test docker working with the hello world container"""
+
+ def test_docker_is_working(self, host):
+ with host.sudo():
+ cmd = host.run("sudo docker run --rm hello-world")
+ assert cmd.succeeded
diff --git a/ci/vagrant/el7toel8_multi.rb b/ci/vagrant/el7toel8_multi.rb
new file mode 100644
index 00000000..a18da81d
--- /dev/null
+++ b/ci/vagrant/el7toel8_multi.rb
@@ -0,0 +1,40 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+configuration = ENV['CONFIG']
+
+Vagrant.configure('2') do |config|
+ config.vagrant.plugins = 'vagrant-libvirt'
+
+ config.vm.synced_folder '.', '/vagrant', disabled: true
+ config.vm.box = 'generic/centos7'
+ config.vm.boot_timeout = 3600
+
+ config.vm.provider 'libvirt' do |v|
+ v.uri = 'qemu:///system'
+ v.memory = 4096
+ v.machine_type = 'q35'
+ v.cpu_mode = 'host-passthrough'
+ v.cpus = 2
+ v.disk_bus = 'scsi'
+ v.disk_driver cache: 'writeback', discard: 'unmap'
+ v.random_hostname = true
+ end
+
+ target_distros = ['almalinux', 'centosstream', 'oraclelinux', 'rocky']
+
+ target_distros.each do |target_distro|
+ config.vm.define "#{target_distro}_8" do |machine|
+ machine.vm.hostname = "#{target_distro}-8.test"
+
+ if target_distro == target_distros[-1]
+ machine.vm.provision 'ansible' do |ansible|
+ ansible.compatibility_mode = '2.0'
+ ansible.limit = 'all'
+ ansible.playbook = "ci/ansible/#{configuration}.yaml"
+ ansible.config_file = 'ci/ansible/ansible.cfg'
+ end
+ end
+ end
+ end
+end
diff --git a/ci/vagrant/el7toel8toel9_single.rb b/ci/vagrant/el7toel8toel9_single.rb
new file mode 100644
index 00000000..8cd05ac3
--- /dev/null
+++ b/ci/vagrant/el7toel8toel9_single.rb
@@ -0,0 +1,53 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+configuration = ENV['CONFIG']
+
+Vagrant.configure('2') do |config|
+ config.vagrant.plugins = 'vagrant-libvirt'
+
+ config.vm.synced_folder '.', '/vagrant', disabled: true
+ config.ssh.disable_deprecated_algorithms = true
+ config.vm.boot_timeout = 3600
+
+ config.vm.provider 'libvirt' do |v|
+ v.uri = 'qemu:///system'
+ v.memory = 4096
+ v.machine_type = 'q35'
+ v.cpu_mode = 'host-passthrough'
+ v.cpus = 2
+ v.disk_bus = 'scsi'
+ v.disk_driver cache: 'writeback', discard: 'unmap'
+ v.random_hostname = true
+ end
+
+ # EL7toEL8
+ target_distros = ['almalinux', 'centosstream', 'oraclelinux', 'rocky']
+
+ target_distros.each do |target_distro|
+ config.vm.define "#{target_distro}_8" do |machine|
+ machine.vm.box = 'generic/centos7'
+ machine.vm.hostname = "#{target_distro}-8.test"
+ end
+ end
+
+ # EL8toEL9
+ target_distros_el9 = {
+ almalinux: 'almalinux/8',
+ # centosstream: 'generic/centos8s',
+ rocky: 'generic/rocky8'
+ }
+
+ target_distros_el9.each_pair do |vm, box|
+ config.vm.define "#{vm}_9" do |machine|
+ machine.vm.box = "#{box}"
+ machine.vm.hostname = "#{vm}-9.test"
+ end
+ end
+
+ config.vm.provision 'ansible' do |ansible|
+ ansible.compatibility_mode = '2.0'
+ ansible.playbook = "ci/ansible/#{configuration}.yaml"
+ ansible.config_file = 'ci/ansible/ansible.cfg'
+ end
+end
diff --git a/ci/vagrant/el8toel9_multi.rb b/ci/vagrant/el8toel9_multi.rb
new file mode 100644
index 00000000..370758e6
--- /dev/null
+++ b/ci/vagrant/el8toel9_multi.rb
@@ -0,0 +1,45 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+configuration = ENV['CONFIG']
+
+Vagrant.configure('2') do |config|
+ config.vagrant.plugins = 'vagrant-libvirt'
+
+ config.vm.synced_folder '.', '/vagrant', disabled: true
+ config.ssh.disable_deprecated_algorithms = true
+ config.vm.boot_timeout = 3600
+
+ config.vm.provider 'libvirt' do |v|
+ v.uri = 'qemu:///system'
+ v.memory = 4096
+ v.machine_type = 'q35'
+ v.cpu_mode = 'host-passthrough'
+ v.cpus = 2
+ v.disk_bus = 'scsi'
+ v.disk_driver cache: 'writeback', discard: 'unmap'
+ v.random_hostname = true
+ end
+
+ target_distros = {
+ almalinux: 'almalinux/8',
+ # centosstream: 'generic/centos8s',
+ rocky: 'generic/rocky8'
+ }
+
+ target_distros.each_pair do |vm, box|
+ config.vm.define "#{vm}_9" do |machine|
+ machine.vm.box = "#{box}"
+ machine.vm.hostname = "#{vm}-9.test"
+
+ if [vm, box] == target_distros.to_a.last
+ machine.vm.provision 'ansible' do |ansible|
+ ansible.compatibility_mode = '2.0'
+ ansible.limit = 'all'
+ ansible.playbook = "ci/ansible/#{configuration}.yaml"
+ ansible.config_file = 'ci/ansible/ansible.cfg'
+ end
+ end
+ end
+ end
+end
diff --git a/commands/tests/test_upgrade_paths.py b/commands/tests/test_upgrade_paths.py
index 89b5eb71..9bdf5792 100644
--- a/commands/tests/test_upgrade_paths.py
+++ b/commands/tests/test_upgrade_paths.py
@@ -42,6 +42,11 @@ def test_get_target_version(mock_open, monkeypatch):
},
)
def test_get_target_release(mock_open, monkeypatch): # do not remove mock_open
+ # Make it look like it's RHEL even on centos, because that's what the test
+ # assumes.
+ # Otherwise the test, when ran on Centos, fails because it works
+ # with MAJOR.MINOR version format while Centos uses MAJOR format.
+ monkeypatch.setattr(command_utils, 'get_distro_id', lambda: 'rhel')
monkeypatch.setattr(command_utils, 'get_os_release_version_id', lambda x: '8.6')
# make sure env var LEAPP_DEVEL_TARGET_RELEASE takes precedence
diff --git a/etc/leapp/transaction/to_reinstall b/etc/leapp/transaction/to_reinstall
new file mode 100644
index 00000000..c6694a8e
--- /dev/null
+++ b/etc/leapp/transaction/to_reinstall
@@ -0,0 +1,3 @@
+### List of packages (each on new line) to be reinstalled to the upgrade transaction
+### Useful for packages that have identical version strings but contain binary changes between major OS versions
+### Packages that aren't installed will be skipped
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
index b28ec57c..6882488a 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
@@ -91,7 +91,7 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to
'/usr/sbin/grubby',
'--add-kernel', '{0}'.format(kernel_path),
'--initrd', '{0}'.format(initramfs_path),
- '--title', 'RHEL-Upgrade-Initramfs',
+ '--title', 'ELevate-Upgrade-Initramfs',
'--copy-default',
'--make-default',
'--args', args_to_add_str
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
index e5f632bc..3e8d8c7b 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
@@ -53,7 +53,7 @@ run_args_add = [
'/usr/sbin/grubby',
'--add-kernel', '/abc',
'--initrd', '/def',
- '--title', 'RHEL-Upgrade-Initramfs',
+ '--title', 'ELevate-Upgrade-Initramfs',
'--copy-default',
'--make-default',
'--args',
diff --git a/repos/system_upgrade/common/actors/checkdnfpluginpath/actor.py b/repos/system_upgrade/common/actors/checkdnfpluginpath/actor.py
new file mode 100644
index 00000000..34055886
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkdnfpluginpath/actor.py
@@ -0,0 +1,22 @@
+from leapp.actors import Actor
+from leapp.libraries.actor.checkdnfpluginpath import perform_check
+from leapp.models import DnfPluginPathDetected
+from leapp.reporting import Report
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
+
+
+class CheckDnfPluginPath(Actor):
+ """
+ Inhibits the upgrade if a custom DNF plugin path is configured.
+
+ This actor checks whether the pluginpath option is configured in /etc/dnf/dnf.conf and produces a report if it is.
+ If the option is detected with any value, the upgrade is inhibited.
+ """
+
+ name = 'check_dnf_pluginpath'
+ consumes = (DnfPluginPathDetected,)
+ produces = (Report,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ perform_check()
diff --git a/repos/system_upgrade/common/actors/checkdnfpluginpath/libraries/checkdnfpluginpath.py b/repos/system_upgrade/common/actors/checkdnfpluginpath/libraries/checkdnfpluginpath.py
new file mode 100644
index 00000000..ce705361
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkdnfpluginpath/libraries/checkdnfpluginpath.py
@@ -0,0 +1,35 @@
+from leapp import reporting
+from leapp.libraries.stdlib import api
+from leapp.models import DnfPluginPathDetected
+
+DNF_CONFIG_PATH = '/etc/dnf/dnf.conf'
+
+
+def check_dnf_pluginpath(dnf_pluginpath_detected):
+ """Create an inhibitor when pluginpath is detected in DNF configuration."""
+ if not dnf_pluginpath_detected.is_pluginpath_detected:
+ return
+ reporting.create_report([
+ reporting.Title('Detected specified pluginpath in DNF configuration.'),
+ reporting.Summary(
+ 'The "pluginpath" option is set in the {} file. The path to DNF plugins differs between '
+ 'system major releases due to different versions of Python. '
+ 'This breaks the in-place upgrades if defined explicitly as DNF plugins '
+ 'are stored on a different path on the new system.'
+ .format(DNF_CONFIG_PATH)
+ ),
+ reporting.Remediation(
+ hint='Remove or comment out the pluginpath option in the DNF '
+ 'configuration file to be able to upgrade the system',
+ commands=[['sed', '-i', '\'s/^pluginpath[[:space:]]*=/#pluginpath=/\'', DNF_CONFIG_PATH]],
+ ),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ reporting.RelatedResource('file', DNF_CONFIG_PATH),
+ ])
+
+
+def perform_check():
+ dnf_pluginpath_detected = next(api.consume(DnfPluginPathDetected), None)
+ if dnf_pluginpath_detected:
+ check_dnf_pluginpath(dnf_pluginpath_detected)
diff --git a/repos/system_upgrade/common/actors/checkdnfpluginpath/tests/test_checkdnfpluginpath.py b/repos/system_upgrade/common/actors/checkdnfpluginpath/tests/test_checkdnfpluginpath.py
new file mode 100644
index 00000000..7dd8bbf2
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkdnfpluginpath/tests/test_checkdnfpluginpath.py
@@ -0,0 +1,34 @@
+import pytest
+
+from leapp import reporting
+from leapp.libraries.actor.checkdnfpluginpath import check_dnf_pluginpath, perform_check
+from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
+from leapp.libraries.stdlib import api
+from leapp.models import DnfPluginPathDetected
+from leapp.utils.report import is_inhibitor
+
+
+@pytest.mark.parametrize('is_detected', [False, True])
+def test_check_dnf_pluginpath(monkeypatch, is_detected):
+ actor_reports = create_report_mocked()
+ msg = DnfPluginPathDetected(is_pluginpath_detected=is_detected)
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[msg]))
+ monkeypatch.setattr(reporting, 'create_report', actor_reports)
+
+ perform_check()
+
+ assert bool(actor_reports.called) == is_detected
+
+ if is_detected:
+ assert is_inhibitor(actor_reports.report_fields)
+
+
+def test_perform_check_no_message_available(monkeypatch):
+ """Test perform_check when no DnfPluginPathDetected message is available."""
+ actor_reports = create_report_mocked()
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(reporting, 'create_report', actor_reports)
+
+ perform_check()
+
+ assert not actor_reports.called
diff --git a/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py b/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py
new file mode 100644
index 00000000..52f5af9d
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py
@@ -0,0 +1,53 @@
+from leapp.actors import Actor
+from leapp.libraries.stdlib import api
+from leapp.models import (
+ RepositoriesFacts,
+ VendorSourceRepos,
+ ActiveVendorList,
+)
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class CheckEnabledVendorRepos(Actor):
+ """
+ Create a list of vendors whose repositories are present on the system and enabled.
+ Only those vendors' configurations (new repositories, PES actions, etc.)
+ will be included in the upgrade process.
+ """
+
+ name = "check_enabled_vendor_repos"
+ consumes = (RepositoriesFacts, VendorSourceRepos)
+ produces = (ActiveVendorList)
+ tags = (IPUWorkflowTag, FactsPhaseTag.Before)
+
+ def process(self):
+ vendor_mapping_data = {}
+ active_vendors = set()
+
+ # Make a dict for easy mapping of repoid -> corresponding vendor name.
+ for vendor_src_repodata in api.consume(VendorSourceRepos):
+ for vendor_src_repo in vendor_src_repodata.source_repoids:
+ vendor_mapping_data[vendor_src_repo] = vendor_src_repodata.vendor
+
+ # Is the repo listed in the vendor map as from_repoid present on the system?
+ for repos_facts in api.consume(RepositoriesFacts):
+ for repo_file in repos_facts.repositories:
+ for repo_data in repo_file.data:
+ self.log.debug(
+ "Looking for repository {} in vendor maps".format(repo_data.repoid)
+ )
+ if repo_data.enabled and repo_data.repoid in vendor_mapping_data:
+ # If the vendor's repository is present in the system and enabled, count the vendor as active.
+ new_vendor = vendor_mapping_data[repo_data.repoid]
+ self.log.debug(
+ "Repository {} found and enabled, enabling vendor {}".format(
+ repo_data.repoid, new_vendor
+ )
+ )
+ active_vendors.add(new_vendor)
+
+ if active_vendors:
+ self.log.debug("Active vendor list: {}".format(active_vendors))
+ api.produce(ActiveVendorList(data=list(active_vendors)))
+ else:
+ self.log.info("No active vendors found, vendor list not generated")
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
index 56a94b5d..46c5d9b6 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
@@ -390,4 +390,3 @@ getarg 'rd.break=leapp-logs' 'rd.upgrade.break=leapp-finish' && {
sync
mount -o "remount,$old_opts" "$NEWROOT"
exit $result
-
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
index d73060cb..45f98148 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
@@ -102,7 +102,6 @@ install() {
inst_binary grep
# script to actually run the upgrader binary
- inst_hook upgrade 49 "$_moddir/mount_usr.sh"
inst_hook upgrade 50 "$_moddir/do-upgrade.sh"
#NOTE: some clean up?.. ideally, everything should be inside the leapp*
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh
deleted file mode 100755
index 9366ac13..00000000
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/bin/sh
-# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
-# ex: ts=8 sw=4 sts=4 et filetype=sh
-
-type info >/dev/null 2>&1 || . /lib/dracut-lib.sh
-
-export NEWROOT=${NEWROOT:-"/sysroot"}
-
-filtersubvol() {
- _oldifs="$IFS"
- IFS=","
- set "$@"
- IFS="$_oldifs"
- while [ $# -gt 0 ]; do
- case $1 in
- subvol=*) :;;
- *) printf '%s' "${1}," ;;
- esac
- shift
- done
-}
-
-mount_usr()
-{
- #
- # mount_usr [true | false]
- # Expected a "true" value for the last attempt to mount /usr. On the last
- # attempt, in case of failure drop to shell.
- #
- # Return 0 when everything is all right
- # In case of failure and /usr has been detected:
- # return 2 when $1 is "true" (drop to shell invoked)
- # (note: possibly it's nonsense, but to be sure..)
- # return 1 otherwise
- #
- _last_attempt="$1"
- # check, if we have to mount the /usr filesystem
- while read -r _dev _mp _fs _opts _freq _passno; do
- [ "${_dev%%#*}" != "$_dev" ] && continue
- if [ "$_mp" = "/usr" ]; then
- case "$_dev" in
- LABEL=*)
- _dev="$(echo "$_dev" | sed 's,/,\\x2f,g')"
- _dev="/dev/disk/by-label/${_dev#LABEL=}"
- ;;
- UUID=*)
- _dev="${_dev#block:}"
- _dev="/dev/disk/by-uuid/${_dev#UUID=}"
- ;;
- esac
-
- # shellcheck disable=SC2154 # Variable root is assigned by dracut
- _root_dev=${root#block:}
-
- if strstr "$_opts" "subvol=" && \
- [ "$(stat -c '%D:%i' "$_root_dev")" = "$(stat -c '%D:%i' "$_dev")" ] && \
- [ -n "$rflags" ]; then
- # for btrfs subvolumes we have to mount /usr with the same rflags
- rflags=$(filtersubvol "$rflags")
- rflags=${rflags%%,}
- _opts="${_opts:+${_opts},}${rflags}"
- elif getargbool 0 ro; then
- # if "ro" is specified, we want /usr to be mounted read-only
- _opts="${_opts:+${_opts},}ro"
- elif getargbool 0 rw; then
- # if "rw" is specified, we want /usr to be mounted read-write
- _opts="${_opts:+${_opts},}rw"
- fi
- echo "$_dev ${NEWROOT}${_mp} $_fs ${_opts} $_freq $_passno"
- _usr_found="1"
- break
- fi
- done < "${NEWROOT}/etc/fstab" >> /etc/fstab
-
- if [ "$_usr_found" = "" ]; then
- # nothing to do
- return 0
- fi
-
- info "Mounting /usr with -o $_opts"
- mount "${NEWROOT}/usr" 2>&1 | vinfo
- mount -o remount,rw "${NEWROOT}/usr"
-
- if ismounted "${NEWROOT}/usr"; then
- # success!!
- return 0
- fi
-
- if [ "$_last_attempt" = "true" ]; then
- warn "Mounting /usr to ${NEWROOT}/usr failed"
- warn "*** Dropping you to a shell; the system will continue"
- warn "*** when you leave the shell."
- action_on_fail
- return 2
- fi
-
- return 1
-}
-
-
-try_to_mount_usr() {
- _last_attempt="$1"
- if [ ! -f "${NEWROOT}/etc/fstab" ]; then
- warn "File ${NEWROOT}/etc/fstab doesn't exist."
- return 1
- fi
-
- # In case we have the LVM command available try make it activate all partitions
- if command -v lvm 2>/dev/null 1>/dev/null; then
- lvm vgchange --sysinit -a y || {
- warn "Detected problem when tried to activate LVM VG."
- if [ "$_last_attempt" != "true" ]; then
- # this is not last execution, retry
- return 1
- fi
- # NOTE(pstodulk):
- # last execution, so call mount_usr anyway
- # I am not 100% about lvm vgchange exit codes and I am aware of
- # possible warnings, in this last run, let's keep it on mount_usr
- # anyway..
- }
- fi
-
- mount_usr "$1"
-}
-
-_sleep_timeout=15
-_last_attempt="false"
-for i in 0 1 2 3 4 5 6 7 8 9 10 11; do
- info "Storage initialisation: Attempt $i of 11. Wait $_sleep_timeout seconds."
- sleep $_sleep_timeout
- if [ $i -eq 11 ]; then
- _last_attempt="true"
- fi
- try_to_mount_usr "$_last_attempt" && break
-
- # something is wrong. In some cases, storage needs more time for the
- # initialisation - especially in case of SAN.
-
- if [ "$_last_attempt" = "true" ]; then
- warn "The last attempt to initialize storage has not been successful."
- warn "Unknown state of the storage. It is possible that upgrade will be stopped."
- break
- fi
-
- warn "Failed attempt to initialize the storage. Retry..."
-done
-
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-cleanup-override.conf b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-cleanup-override.conf
new file mode 100644
index 00000000..d24e0ef0
--- /dev/null
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-cleanup-override.conf
@@ -0,0 +1,3 @@
+[Service]
+ExecStart=
+ExecStart=-/usr/bin/true
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
index 06479fb5..30ae57b3 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
@@ -54,6 +54,17 @@ install() {
ln -sf "../${s}.service" "$upgrade_wantsdir"
done
+ # Setup modified initrd-cleanup.service in the upgrade initramfs to enable
+ # storage initialisation using systemd-fstab-generator. We want to run the
+ # initrd-parse-etc.service but this one triggers also the initrd-cleanup.service
+ # which triggers the switch-root and isolated actions that basically kills
+ # the original upgrade service when used.
+ # The initrd-parse-etc.service has different content across RHEL systems,
+ # so we override rather initrd-cleanup.service instead as we do not need
+ # that one for the upgrade process.
+ mkdir -p "${unitdir}/initrd-cleanup.service.d"
+ inst_simple "${_moddir}/initrd-cleanup-override.conf" "${unitdir}/initrd-cleanup.service.d/initrd-cleanup-override.conf"
+
# just try : set another services into the wantsdir
# sysroot.mount \
# dracut-mount \
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/upgrade.target b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/upgrade.target
index 366b5cab..d2bf7313 100644
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/upgrade.target
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/upgrade.target
@@ -2,7 +2,7 @@
Description=System Upgrade
Documentation=man:upgrade.target(7)
# ##sysinit.target sockets.target initrd-root-fs.target initrd-root-device.target initrd-fs.target
-Wants=initrd-root-fs.target initrd-root-device.target initrd-fs.target initrd-usr-fs.target
+Wants=initrd-root-fs.target initrd-root-device.target initrd-fs.target initrd-usr-fs.target initrd-parse-etc.service
Requires=basic.target sysroot.mount
-After=basic.target sysroot.mount
+After=basic.target sysroot.mount initrd-fs.target
AllowIsolate=yes
diff --git a/repos/system_upgrade/common/actors/createresumeservice/tests/test_createresumeservice.py b/repos/system_upgrade/common/actors/createresumeservice/tests/test_createresumeservice.py
index 5302cdd2..c1cefc37 100644
--- a/repos/system_upgrade/common/actors/createresumeservice/tests/test_createresumeservice.py
+++ b/repos/system_upgrade/common/actors/createresumeservice/tests/test_createresumeservice.py
@@ -6,7 +6,7 @@ import pytest
@pytest.mark.skipif(os.getuid() != 0, reason='User is not a root')
@pytest.mark.skipif(
- distro.linux_distribution()[0] == 'Fedora',
+ distro.id() == 'fedora',
reason='default.target.wants does not exists on Fedora distro',
)
def test_create_resume_service(current_actor_context):
diff --git a/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py
index 003f3fc5..9e7bbf4a 100644
--- a/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py
+++ b/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py
@@ -1,6 +1,6 @@
from leapp.actors import Actor
from leapp.libraries.actor import distributionsignedrpmscanner
-from leapp.models import DistributionSignedRPM, InstalledRPM, InstalledUnsignedRPM, ThirdPartyRPM
+from leapp.models import DistributionSignedRPM, InstalledRPM, InstalledUnsignedRPM, ThirdPartyRPM, VendorSignatures
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
from leapp.utils.deprecation import suppress_deprecation
@@ -8,7 +8,7 @@ from leapp.utils.deprecation import suppress_deprecation
@suppress_deprecation(InstalledUnsignedRPM)
class DistributionSignedRpmScanner(Actor):
"""
- Provide data about distribution signed & third-party RPM packages.
+ Provide data about distribution signed & third-party plus vendors RPM packages.
For various checks and actions done during the upgrade it's important to
know what packages are signed by GPG keys of the installed linux system
@@ -22,11 +22,18 @@ class DistributionSignedRpmScanner(Actor):
common/files/distro/<distro>/gpg_signatures.json
where <distro> is distribution ID of the installed system (e.g. centos, rhel).
- If the file for the installed distribution is not found, end with error.
+ Fingerprints of vendors GPG keys are stored under
+ /etc/leapp/files/vendors.d/<vendor>.sigs
+ where <vendor> is name of the vendor (e.g. mariadb, postgresql).
+
+ The "Distribution" in the name of the actor is a historical artifact - the actor
+ is used for both distribution and all vendors present in config files.
+
+ If the file for the installed distribution is not find, end with error.
"""
name = 'distribution_signed_rpm_scanner'
- consumes = (InstalledRPM,)
+ consumes = (InstalledRPM, VendorSignatures)
produces = (DistributionSignedRPM, InstalledUnsignedRPM, ThirdPartyRPM)
tags = (IPUWorkflowTag, FactsPhaseTag)
diff --git a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py
index f42909f0..6383a56f 100644
--- a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py
+++ b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py
@@ -1,17 +1,117 @@
+import os
+import re
+
+from leapp.libraries.stdlib import run, api
from leapp.actors import Actor
-from leapp.libraries.common import efi_reboot_fix
+from leapp.models import InstalledTargetKernelVersion, KernelCmdlineArg, FirmwareFacts, MountEntry
from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag
+from leapp.exceptions import StopActorExecutionError
class EfiFinalizationFix(Actor):
"""
- Adjust EFI boot entry for final reboot
+ Ensure that EFI boot order is updated, which is particularly necessary
+ when upgrading to a different OS distro. Also rebuilds grub config
+ if necessary.
"""
name = 'efi_finalization_fix'
- consumes = ()
+ consumes = (KernelCmdlineArg, InstalledTargetKernelVersion, FirmwareFacts, MountEntry)
produces = ()
- tags = (FinalizationPhaseTag, IPUWorkflowTag)
+ tags = (FinalizationPhaseTag.Before, IPUWorkflowTag)
def process(self):
- efi_reboot_fix.maybe_emit_updated_boot_entry()
+ is_system_efi = False
+ ff = next(self.consume(FirmwareFacts), None)
+
+ dirname = {
+ 'AlmaLinux': 'almalinux',
+ 'CentOS Linux': 'centos',
+ 'CentOS Stream': 'centos',
+ 'Oracle Linux Server': 'redhat',
+ 'Red Hat Enterprise Linux': 'redhat',
+ 'Rocky Linux': 'rocky',
+ 'Scientific Linux': 'redhat',
+ }
+
+ efi_shimname_dict = {
+ 'x86_64': 'shimx64.efi',
+ 'aarch64': 'shimaa64.efi'
+ }
+
+ def devparts(dev):
+ """
+ NVMe block devices aren't named like SCSI/ATA/etc block devices and must be parsed differently.
+ SCSI/ATA/etc devices have a syntax resembling /dev/sdb4 for the 4th partition on the 2nd disk.
+ NVMe devices have a syntax resembling /dev/nvme0n2p4 for the 4th partition on the 2nd disk.
+ """
+ if '/dev/nvme' in dev:
+ """
+ NVMe
+ """
+ part = next(re.finditer(r'p\d+$', dev)).group(0)
+ dev = dev[:-len(part)]
+ part = part[1:]
+ else:
+ """
+ Non-NVMe (SCSI, ATA, etc)
+ """
+ part = next(re.finditer(r'\d+$', dev)).group(0)
+ dev = dev[:-len(part)]
+ return [dev, part];
+
+ with open('/etc/system-release', 'r') as sr:
+ release_line = next(line for line in sr if 'release' in line)
+ distro = release_line.split(' release ', 1)[0]
+
+ efi_bootentry_label = distro
+ distro_dir = dirname.get(distro, 'default')
+ shim_filename = efi_shimname_dict.get(api.current_actor().configuration.architecture, 'shimx64.efi')
+
+ shim_path = '/boot/efi/EFI/' + distro_dir + '/' + shim_filename
+ grub_cfg_path = '/boot/efi/EFI/' + distro_dir + '/grub.cfg'
+ bootmgr_path = '\\EFI\\' + distro_dir + '\\' + shim_filename
+
+ has_efibootmgr = os.path.exists('/sbin/efibootmgr')
+ has_shim = os.path.exists(shim_path)
+ has_grub_cfg = os.path.exists(grub_cfg_path)
+
+ if not ff:
+ raise StopActorExecutionError(
+ 'Could not identify system firmware',
+ details={'details': 'Actor did not receive FirmwareFacts message.'}
+ )
+
+ if not has_efibootmgr:
+ return
+
+ for fact in self.consume(FirmwareFacts):
+ if fact.firmware == 'efi':
+ is_system_efi = True
+ break
+
+ if is_system_efi and has_shim:
+ efidevlist = []
+ with open('/proc/mounts', 'r') as fp:
+ for line in fp:
+ if '/boot/efi' in line:
+ efidevpath = line.split(' ', 1)[0]
+ efidevpart = efidevpath.split('/')[-1]
+ if os.path.exists('/proc/mdstat'):
+ with open('/proc/mdstat', 'r') as mds:
+ for line in mds:
+ if line.startswith(efidevpart):
+ mddev = line.split(' ')
+ for md in mddev:
+ if '[' in md:
+ efimd = md.split('[', 1)[0]
+ efidp = efidevpath.replace(efidevpart, efimd)
+ efidevlist.append(efidp)
+ if len(efidevlist) == 0:
+ efidevlist.append(efidevpath)
+ for devpath in efidevlist:
+ efidev, efipart = devparts(devpath)
+ run(['/sbin/efibootmgr', '-c', '-d', efidev, '-p', efipart, '-l', bootmgr_path, '-L', efi_bootentry_label])
+
+ if not has_grub_cfg:
+ run(['/sbin/grub2-mkconfig', '-o', grub_cfg_path])
diff --git a/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py b/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py
index 582a5821..18f2c33f 100644
--- a/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py
+++ b/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py
@@ -32,6 +32,7 @@ class FilterRpmTransactionTasks(Actor):
to_remove = set()
to_keep = set()
to_upgrade = set()
+ to_reinstall = set()
modules_to_enable = {}
modules_to_reset = {}
for event in self.consume(RpmTransactionTasks, PESRpmTransactionTasks):
@@ -39,13 +40,14 @@ class FilterRpmTransactionTasks(Actor):
to_install.update(event.to_install)
to_remove.update(installed_pkgs.intersection(event.to_remove))
to_keep.update(installed_pkgs.intersection(event.to_keep))
+ to_reinstall.update(installed_pkgs.intersection(event.to_reinstall))
modules_to_enable.update({'{}:{}'.format(m.name, m.stream): m for m in event.modules_to_enable})
modules_to_reset.update({'{}:{}'.format(m.name, m.stream): m for m in event.modules_to_reset})
to_remove.difference_update(to_keep)
# run upgrade for the rest of RH signed pkgs which we do not have rule for
- to_upgrade = installed_pkgs - (to_install | to_remove)
+ to_upgrade = installed_pkgs - (to_install | to_remove | to_reinstall)
self.produce(FilteredRpmTransactionTasks(
local_rpms=list(local_rpms),
@@ -53,5 +55,6 @@ class FilterRpmTransactionTasks(Actor):
to_remove=list(to_remove),
to_keep=list(to_keep),
to_upgrade=list(to_upgrade),
+ to_reinstall=list(to_reinstall),
modules_to_reset=list(modules_to_reset.values()),
modules_to_enable=list(modules_to_enable.values())))
diff --git a/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/actor.py b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/actor.py
new file mode 100644
index 00000000..aba60645
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/actor.py
@@ -0,0 +1,21 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import enable_lvm_autoactivation as enable_lvm_autoactivation_lib
+from leapp.models import DistributionSignedRPM, UpgradeInitramfsTasks
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class EnableLVMAutoactivation(Actor):
+ """
+ Enable LVM autoactivation in upgrade initramfs.
+
+ Produce instructions for upgrade initramfs generation that will result in LVM
+ autoactivation in the initramfs.
+ """
+
+ name = 'enable_lvm_autoactivation'
+ consumes = (DistributionSignedRPM,)
+ produces = (UpgradeInitramfsTasks, )
+ tags = (FactsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ enable_lvm_autoactivation_lib.emit_lvm_autoactivation_instructions()
diff --git a/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/libraries/enable_lvm_autoactivation.py b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/libraries/enable_lvm_autoactivation.py
new file mode 100644
index 00000000..e312277b
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/libraries/enable_lvm_autoactivation.py
@@ -0,0 +1,21 @@
+from leapp.libraries.common.rpms import has_package
+from leapp.libraries.stdlib import api
+from leapp.models import DistributionSignedRPM, UpgradeInitramfsTasks
+
+
+def emit_lvm_autoactivation_instructions():
+ if not has_package(DistributionSignedRPM, 'lvm2'):
+ api.current_logger().debug(
+ 'Upgrade initramfs will not autoenable LVM devices - `lvm2` RPM is not installed.'
+ )
+ return
+
+ # the 69-dm-lvm.rules trigger pvscan and vgchange when LVM device is detected
+ files_to_include = [
+ '/usr/sbin/pvscan',
+ '/usr/sbin/vgchange',
+ '/usr/lib/udev/rules.d/69-dm-lvm.rules'
+ ]
+ lvm_autoactivation_instructions = UpgradeInitramfsTasks(include_files=files_to_include)
+
+ api.produce(lvm_autoactivation_instructions)
diff --git a/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/tests/test_lvm_autoactivation_enablement.py b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/tests/test_lvm_autoactivation_enablement.py
new file mode 100644
index 00000000..c5150aea
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/tests/test_lvm_autoactivation_enablement.py
@@ -0,0 +1,50 @@
+from leapp.libraries.actor import enable_lvm_autoactivation
+from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import DistributionSignedRPM, RPM, UpgradeInitramfsTasks
+
+
+def test_emit_lvm_autoactivation_instructions_produces_correct_message(monkeypatch):
+ """Test that emit_lvm_autoactivation_instructions produces UpgradeInitramfsTasks with correct files."""
+ lvm_package = RPM(
+ name='lvm2',
+ version='2',
+ release='1',
+ epoch='1',
+ packager='',
+ arch='x86_64',
+ pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'
+ )
+
+ msgs = [
+ DistributionSignedRPM(items=[lvm_package])
+ ]
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+
+ enable_lvm_autoactivation.emit_lvm_autoactivation_instructions()
+
+ assert api.produce.called == 1
+
+ produced_msg = api.produce.model_instances[0]
+
+ assert isinstance(produced_msg, UpgradeInitramfsTasks)
+
+ expected_files = [
+ '/usr/sbin/pvscan',
+ '/usr/sbin/vgchange',
+ '/usr/lib/udev/rules.d/69-dm-lvm.rules'
+ ]
+ assert produced_msg.include_files == expected_files
+
+
+def test_no_action_if_lvm_rpm_missing(monkeypatch):
+ msgs = [
+ DistributionSignedRPM(items=[])
+ ]
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+
+ enable_lvm_autoactivation.emit_lvm_autoactivation_instructions()
+
+ assert api.produce.called == 0
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
new file mode 100644
index 00000000..5fe25515
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
@@ -0,0 +1,22 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import mount_unit_generator as mount_unit_generator_lib
+from leapp.models import TargetUserSpaceInfo, UpgradeInitramfsTasks
+from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
+
+
+class MountUnitGenerator(Actor):
+ """
+ Sets up storage initialization using systemd's mount units in the upgrade container.
+ """
+
+ name = 'mount_unit_generator'
+ consumes = (
+ TargetUserSpaceInfo,
+ )
+ produces = (
+ UpgradeInitramfsTasks,
+ )
+ tags = (IPUWorkflowTag, InterimPreparationPhaseTag)
+
+ def process(self):
+ mount_unit_generator_lib.setup_storage_initialization()
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
new file mode 100644
index 00000000..e1060559
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
@@ -0,0 +1,307 @@
+import os
+import shutil
+import tempfile
+
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.common import mounting
+from leapp.libraries.stdlib import api, CalledProcessError, run
+from leapp.models import TargetUserSpaceInfo, UpgradeInitramfsTasks
+
+
+def run_systemd_fstab_generator(output_directory):
+ api.current_logger().debug(
+ 'Generating mount units for the source system into {}'.format(output_directory)
+ )
+
+ try:
+ generator_cmd = [
+ '/usr/lib/systemd/system-generators/systemd-fstab-generator',
+ output_directory,
+ output_directory,
+ output_directory
+ ]
+ run(generator_cmd)
+ except CalledProcessError as error:
+ api.current_logger().error(
+ 'Failed to generate mount units using systemd-fstab-generator. Error: {}'.format(error)
+ )
+ details = {'details': str(error)}
+ raise StopActorExecutionError(
+ 'Failed to generate mount units using systemd-fstab-generator',
+ details
+ )
+
+ api.current_logger().debug(
+ 'Mount units successfully generated into {}'.format(output_directory)
+ )
+
+
+def _read_unit_file_lines(unit_file_path): # Encapsulate IO for tests
+ with open(unit_file_path) as unit_file:
+ return unit_file.readlines()
+
+
+def _write_unit_file_lines(unit_file_path, lines): # Encapsulate IO for tests
+ with open(unit_file_path, 'w') as unit_file:
+ unit_file.write('\n'.join(lines) + '\n')
+
+
+def _delete_file(file_path):
+ os.unlink(file_path)
+
+
+def _prefix_mount_unit_with_sysroot(mount_unit_path, new_unit_destination):
+ """
+ Prefix the mount target with /sysroot as expected in the upgrade initramfs.
+
+ A new mount unit file is written to new_unit_destination.
+ """
+ # NOTE(pstodulk): Note that right now we update just the 'Where' key, however
+ # what about RequiresMountsFor, .. there could be some hidden dragons.
+ # In case of issues, investigate these values in generated unit files.
+ api.current_logger().debug(
+ 'Prefixing {}\'s mount target with /sysroot. Output will be written to {}'.format(
+ mount_unit_path,
+ new_unit_destination
+ )
+ )
+ unit_lines = _read_unit_file_lines(mount_unit_path)
+
+ output_lines = []
+ for line in unit_lines:
+ line = line.strip()
+ if not line.startswith('Where='):
+ output_lines.append(line)
+ continue
+
+ _, destination = line.split('=', 1)
+ new_destination = os.path.join('/sysroot', destination.lstrip('/'))
+
+ output_lines.append('Where={}'.format(new_destination))
+
+ _write_unit_file_lines(new_unit_destination, output_lines)
+
+ api.current_logger().debug(
+ 'Done. Modified mount unit successfully written to {}'.format(new_unit_destination)
+ )
+
+
+def prefix_all_mount_units_with_sysroot(dir_containing_units):
+ for unit_file_path in os.listdir(dir_containing_units):
+ # systemd requires mount path to be in the unit name
+ modified_unit_destination = 'sysroot-{}'.format(unit_file_path)
+ modified_unit_destination = os.path.join(dir_containing_units, modified_unit_destination)
+
+ unit_file_path = os.path.join(dir_containing_units, unit_file_path)
+
+ if not unit_file_path.endswith('.mount'):
+ api.current_logger().debug(
+ 'Skipping {} when prefixing mount units with /sysroot - not a mount unit.'.format(
+ unit_file_path
+ )
+ )
+ continue
+
+ _prefix_mount_unit_with_sysroot(unit_file_path, modified_unit_destination)
+
+ _delete_file(unit_file_path)
+ api.current_logger().debug('Original mount unit {} removed.'.format(unit_file_path))
+
+
+def _fix_symlinks_in_dir(dir_containing_mount_units, target_dir):
+ """
+ Fix broken symlinks in given target_dir due to us modifying (renaming) the mount units.
+
+ The target_dir contains symlinks to the (mount) units that are required
+ in order for the local-fs.target to be reached. However, we renamed these units to reflect
+ that we have changed their mount destinations by prefixing the mount destination with /sysroot.
+ Hence, we regenerate the symlinks.
+ """
+
+ target_dir_path = os.path.join(dir_containing_mount_units, target_dir)
+ if not os.path.exists(target_dir_path):
+ api.current_logger().debug(
+ 'The {} directory does not exist. Skipping'
+ .format(target_dir)
+ )
+ return
+
+ api.current_logger().debug(
+ 'Removing the old {} directory from {}.'
+ .format(target_dir, dir_containing_mount_units)
+ )
+
+ shutil.rmtree(target_dir_path)
+ os.mkdir(target_dir_path)
+
+ api.current_logger().debug('Populating {} with new symlinks.'.format(target_dir))
+
+ for unit_file in os.listdir(dir_containing_mount_units):
+ if not unit_file.endswith('.mount'):
+ continue
+
+ place_fastlink_at = os.path.join(target_dir_path, unit_file)
+ fastlink_points_to = os.path.join('../', unit_file)
+ try:
+ run(['ln', '-s', fastlink_points_to, place_fastlink_at])
+
+ api.current_logger().debug(
+ 'Dependency on {} created.'.format(unit_file)
+ )
+ except CalledProcessError as err:
+ err_descr = (
+ 'Failed to create required unit dependencies under {} for the upgrade initramfs.'
+ .format(target_dir)
+ )
+ details = {'details': str(err)}
+ raise StopActorExecutionError(err_descr, details=details)
+
+
+def fix_symlinks_in_targets(dir_containing_mount_units):
+ """
+ Fix broken symlinks in *.target.* directories caused by earlier modified mount units.
+
+ Generated mount unit files are part of one of systemd targets (list below),
+ which means that a symlink from a systemd target to exists for each of
+ them. Based on this, systemd knows when (local or remote file systems?)
+ they must (".requires" suffix") or could (".wants" suffix) be mounted.
+ See the man 5 systemd.mount for more details how mount units are split into
+ these targets.
+
+ The list of possible target directories where these mount units could end:
+ * local-fs.target.requires
+ * local-fs.target.wants
+ * local-fs-pre.target.requires
+ * local-fs-pre.target.wants
+ * remote-fs.target.requires
+ * remote-fs.target.wants
+ * remote-fs-pre.target.requires
+ * remote-fs-pre.target.wants
+ Most likely, unit files are not generated for "*pre*" targets, but to be
+ sure really. Longer list does not cause any issues in this code.
+
+ In most cases, "local-fs.target.requires" is the only important directory
+ for us during the upgrade. But in some (sometimes common) cases we will
+ need some of the others as well.
+
+ These directories do not have to necessarily exists if there are no mount
+ unit files that could be put there. But most likely "local-fs.target.requires"
+ will always exists.
+ """
+ dir_list = [
+ 'local-fs.target.requires',
+ 'local-fs.target.wants',
+ 'local-fs-pre.target.requires',
+ 'local-fs-pre.target.wants',
+ 'remote-fs.target.requires',
+ 'remote-fs.target.wants',
+ 'remote-fs-pre.target.requires',
+ 'remote-fs-pre.target.wants',
+ ]
+ for tdir in dir_list:
+ _fix_symlinks_in_dir(dir_containing_mount_units, tdir)
+
+
+def copy_units_into_system_location(upgrade_container_ctx, dir_with_our_mount_units):
+ """
+ Copy units and their .wants/.requires directories into the target userspace container.
+
+ :return: A list of files in the target userspace that were created by copying.
+ :rtype: list[str]
+ """
+ dest_inside_container = '/usr/lib/systemd/system'
+
+ api.current_logger().debug(
+ 'Copying generated mount units for upgrade from {} to {}'.format(
+ dir_with_our_mount_units,
+ upgrade_container_ctx.full_path(dest_inside_container)
+ )
+ )
+
+ copied_files = []
+ prefix_len_to_drop = len(upgrade_container_ctx.base_dir)
+
+ # We cannot rely on mounting library when copying into container
+ # as we want to control what happens to symlinks and
+ # shutil.copytree in Python3.6 fails if dst directory exists already
+ # - which happens in some cases when copying these files.
+ for root, dummy_dirs, files in os.walk(dir_with_our_mount_units):
+ rel_path = os.path.relpath(root, dir_with_our_mount_units)
+ if rel_path == '.':
+ rel_path = ''
+ dst_dir = os.path.join(upgrade_container_ctx.full_path(dest_inside_container), rel_path)
+ os.makedirs(dst_dir, mode=0o755, exist_ok=True)
+
+ for file in files:
+ src_file = os.path.join(root, file)
+ dst_file = os.path.join(dst_dir, file)
+ api.current_logger().debug(
+ 'Copying mount unit file {} to {}'.format(src_file, dst_file)
+ )
+ if os.path.islink(dst_file):
+ # If the target file already exists and it is a symlink, it will
+ # fail and we want to overwrite this.
+ # NOTE(pstodulk): You could think that it cannot happen, but
+ # in future possibly it could happen, so let's rather be careful
+ # and handle it. If the dst file exists, we want to overwrite it
+ # for sure
+ _delete_file(dst_file)
+ shutil.copy2(src_file, dst_file, follow_symlinks=False)
+ copied_files.append(dst_file[prefix_len_to_drop:])
+
+ return copied_files
+
+
+def remove_units_for_targets_that_are_already_mounted_by_dracut(dir_with_our_mount_units):
+ """
+ Remove mount units for mount targets that are already mounted by dracut.
+
+ Namely, remove mount units:
+ '-.mount' (mounts /)
+ 'usr.mount' (mounts /usr)
+ """
+
+ # NOTE: remount-fs.service creates dependency cycles that are nondeterministically broken
+ # by systemd, causing unpredictable failures. The service is supposed to remount root
+ # and /usr, reapplying mount options from /etc/fstab. However, the fstab file present in
+ # the initramfs is not the fstab from the source system, and, therefore, it is pointless
+ # to require the service. It would make sense after we switched root during normal boot
+ # process.
+ already_mounted_units = [
+ '-.mount',
+ 'usr.mount',
+ 'local-fs.target.wants/systemd-remount-fs.service'
+ ]
+
+ for unit in already_mounted_units:
+ unit_location = os.path.join(dir_with_our_mount_units, unit)
+
+ if not os.path.exists(unit_location):
+ api.current_logger().debug('The {} unit does not exists, no need to remove it.'.format(unit))
+ continue
+
+ _delete_file(unit_location)
+
+
+def request_units_inclusion_in_initramfs(files_to_include):
+ api.current_logger().debug('Including the following files into initramfs: {}'.format(files_to_include))
+
+ additional_files = [
+ '/usr/sbin/swapon' # If the system has swap, we have also generated a swap unit to activate it
+ ]
+
+ tasks = UpgradeInitramfsTasks(include_files=files_to_include + additional_files)
+ api.produce(tasks)
+
+
+def setup_storage_initialization():
+ userspace_info = next(api.consume(TargetUserSpaceInfo), None)
+
+ with mounting.NspawnActions(base_dir=userspace_info.path) as upgrade_container_ctx:
+ with tempfile.TemporaryDirectory(dir='/var/lib/leapp/', prefix='tmp_systemd_fstab_') as workspace_path:
+ run_systemd_fstab_generator(workspace_path)
+ remove_units_for_targets_that_are_already_mounted_by_dracut(workspace_path)
+ prefix_all_mount_units_with_sysroot(workspace_path)
+ fix_symlinks_in_targets(workspace_path)
+ mount_unit_files = copy_units_into_system_location(upgrade_container_ctx, workspace_path)
+ request_units_inclusion_in_initramfs(mount_unit_files)
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
new file mode 100644
index 00000000..b814f6ce
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
@@ -0,0 +1,269 @@
+import os
+import shutil
+
+import pytest
+
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.actor import mount_unit_generator
+from leapp.libraries.common.testutils import logger_mocked
+from leapp.libraries.stdlib import api, CalledProcessError
+from leapp.models import TargetUserSpaceInfo, UpgradeInitramfsTasks
+
+
+def test_run_systemd_fstab_generator_successful_generation(monkeypatch):
+ """Test successful mount unit generation."""
+
+ output_dir = '/tmp/test_output'
+ expected_cmd = [
+ '/usr/lib/systemd/system-generators/systemd-fstab-generator',
+ output_dir,
+ output_dir,
+ output_dir
+ ]
+
+ def mock_run(command):
+ assert command == expected_cmd
+
+ return {
+ "stdout": "",
+ "stderr": "",
+ "exit_code": 0,
+ }
+
+ monkeypatch.setattr(mount_unit_generator, 'run', mock_run)
+ mount_unit_generator.run_systemd_fstab_generator(output_dir)
+
+
+def test_run_systemd_fstab_generator_failure(monkeypatch):
+ """Test handling of systemd-fstab-generator failure."""
+ output_dir = '/tmp/test_output'
+ expected_cmd = [
+ '/usr/lib/systemd/system-generators/systemd-fstab-generator',
+ output_dir,
+ output_dir,
+ output_dir
+ ]
+
+ def mock_run(command):
+ assert command == expected_cmd
+ raise CalledProcessError(message='Generator failed', command=['test'], result={'exit_code': 1})
+
+ monkeypatch.setattr(mount_unit_generator, 'run', mock_run)
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ with pytest.raises(StopActorExecutionError):
+ mount_unit_generator.run_systemd_fstab_generator(output_dir)
+
+
+def test_prefix_mount_unit_with_sysroot(monkeypatch):
+ """Test prefixing a single mount unit with /sysroot."""
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ input_content = [
+ "[Unit]\n",
+ "Description=Test Mount\n",
+ "[Mount]\n",
+ "Where=/home\n",
+ "What=/dev/sda1\n"
+ ]
+
+ expected_output_lines = [
+ "[Unit]",
+ "Description=Test Mount",
+ "[Mount]",
+ "Where=/sysroot/home",
+ "What=/dev/sda1"
+ ]
+
+ def mock_read_unit_file_lines(unit_file_path):
+ return input_content
+
+ def mock_write_unit_file_lines(unit_file_path, lines):
+ assert unit_file_path == '/test/output.mount'
+ assert lines == expected_output_lines
+
+ monkeypatch.setattr(mount_unit_generator, '_read_unit_file_lines', mock_read_unit_file_lines)
+ monkeypatch.setattr(mount_unit_generator, '_write_unit_file_lines', mock_write_unit_file_lines)
+
+ mount_unit_generator._prefix_mount_unit_with_sysroot(
+ '/test/input.mount',
+ '/test/output.mount'
+ )
+
+
+def test_prefix_all_mount_units_with_sysroot(monkeypatch):
+ """Test prefixing all mount units in a directory."""
+
+ expected_changes = {
+ '/test/dir/home.mount': {
+ 'new_unit_destination': '/test/dir/sysroot-home.mount',
+ 'should_be_deleted': True,
+ 'deleted': False,
+ },
+ '/test/dir/var.mount': {
+ 'new_unit_destination': '/test/dir/sysroot-var.mount',
+ 'should_be_deleted': True,
+ 'deleted': False,
+ },
+ '/test/dir/not-a-mount.service': {
+ 'new_unit_destination': None,
+ 'should_be_deleted': False,
+ 'deleted': False,
+ }
+ }
+
+ def mock_listdir(dir_path):
+ return ['home.mount', 'var.mount', 'not-a-mount.service']
+
+ def mock_delete_file(file_path):
+ assert file_path in expected_changes
+ expected_changes[file_path]['deleted'] = True
+
+ def mock_prefix(unit_file_path, new_unit_destination):
+ assert expected_changes[unit_file_path]['new_unit_destination'] == new_unit_destination
+
+ monkeypatch.setattr('os.listdir', mock_listdir)
+ monkeypatch.setattr(mount_unit_generator, '_delete_file', mock_delete_file)
+ monkeypatch.setattr(mount_unit_generator, '_prefix_mount_unit_with_sysroot', mock_prefix)
+
+ mount_unit_generator.prefix_all_mount_units_with_sysroot('/test/dir')
+
+ for original_mount_unit_location in expected_changes:
+ should_be_deleted = expected_changes[original_mount_unit_location]['should_be_deleted']
+ was_deleted = expected_changes[original_mount_unit_location]['deleted']
+ assert should_be_deleted == was_deleted
+
+
+@pytest.mark.parametrize('dirname', (
+ 'local-fs.target.requires',
+ 'local-fs.target.wants',
+ 'local-fs-pre.target.requires',
+ 'local-fs-pre.target.wants',
+ 'remote-fs.target.requires',
+ 'remote-fs.target.wants',
+ 'remote-fs-pre.target.requires',
+ 'remote-fs-pre.target.wants',
+))
+def test_fix_symlinks_in_dir(monkeypatch, dirname):
+ """Test fixing local-fs.target.requires symlinks."""
+
+ DIR_PATH = os.path.join('/test/dir/', dirname)
+
+ def mock_rmtree(dir_path):
+ assert dir_path == DIR_PATH
+
+ def mock_mkdir(dir_path):
+ assert dir_path == DIR_PATH
+
+ def mock_listdir(dir_path):
+ return ['sysroot-home.mount', 'sysroot-var.mount', 'not-a-mount.service']
+
+ def mock_os_path_exist(dir_path):
+ assert dir_path == DIR_PATH
+ return dir_path == DIR_PATH
+
+ expected_calls = [
+ ['ln', '-s', '../sysroot-home.mount', os.path.join(DIR_PATH, 'sysroot-home.mount')],
+ ['ln', '-s', '../sysroot-var.mount', os.path.join(DIR_PATH, 'sysroot-var.mount')]
+ ]
+ call_count = 0
+
+ def mock_run(command):
+ nonlocal call_count
+ assert command in expected_calls
+ call_count += 1
+ return {
+ "stdout": "",
+ "stderr": "",
+ "exit_code": 0,
+ }
+
+ monkeypatch.setattr('shutil.rmtree', mock_rmtree)
+ monkeypatch.setattr('os.mkdir', mock_mkdir)
+ monkeypatch.setattr('os.listdir', mock_listdir)
+ monkeypatch.setattr('os.path.exists', mock_os_path_exist)
+ monkeypatch.setattr(mount_unit_generator, 'run', mock_run)
+
+ mount_unit_generator._fix_symlinks_in_dir('/test/dir', dirname)
+
+
+# Test the copy_units_into_system_location function
+def test_copy_units_mixed_content(monkeypatch):
+ """Test copying units with mixed files and directories."""
+
+ def mock_walk(dir_path):
+ tuples_to_yield = [
+ ('/source/dir', ['local-fs.target.requires'], ['unit1.mount', 'unit2.mount']),
+ ('/source/dir/local-fs.target.requires', [], ['unit1.mount', 'unit2.mount']),
+ ]
+ for i in tuples_to_yield:
+ yield i
+
+ def mock_isdir(path):
+ return 'local-fs.target.requires' in path
+
+ def _make_couple(sub_path):
+ return (
+ os.path.join('/source/dir/', sub_path),
+ os.path.join('/container/usr/lib/systemd/system/', sub_path)
+ )
+
+ def mock_copy2(src, dst, follow_symlinks=True):
+ valid_combinations = [
+ _make_couple('unit1.mount'),
+ _make_couple('unit2.mount'),
+ _make_couple('local-fs.target.requires/unit1.mount'),
+ _make_couple('local-fs.target.requires/unit2.mount'),
+ ]
+ assert not follow_symlinks
+ assert (src, dst) in valid_combinations
+
+ def mock_islink(file_path):
+ return file_path == '/container/usr/lib/systemd/system/local-fs.target.requires/unit2.mount'
+
+ class MockedDeleteFile:
+ def __init__(self):
+ self.removal_called = False
+
+ def __call__(self, file_path):
+ assert file_path == '/container/usr/lib/systemd/system/local-fs.target.requires/unit2.mount'
+ self.removal_called = True
+
+ def mock_makedirs(dst_dir, mode=0o777, exist_ok=False):
+ assert exist_ok
+ assert mode == 0o755
+
+ allowed_paths = [
+ '/container/usr/lib/systemd/system',
+ '/container/usr/lib/systemd/system/local-fs.target.requires'
+ ]
+ assert dst_dir.rstrip('/') in allowed_paths
+
+ monkeypatch.setattr(os, 'walk', mock_walk)
+ monkeypatch.setattr(os, 'makedirs', mock_makedirs)
+ monkeypatch.setattr(os.path, 'isdir', mock_isdir)
+ monkeypatch.setattr(os.path, 'islink', mock_islink)
+ monkeypatch.setattr(mount_unit_generator, '_delete_file', MockedDeleteFile())
+ monkeypatch.setattr(shutil, 'copy2', mock_copy2)
+
+ class MockedContainerContext:
+ def __init__(self):
+ self.base_dir = '/container'
+
+ def full_path(self, path):
+ return os.path.join('/container', path.lstrip('/'))
+
+ mock_container = MockedContainerContext()
+
+ files = mount_unit_generator.copy_units_into_system_location(
+ mock_container, '/source/dir'
+ )
+
+ expected_files = [
+ '/usr/lib/systemd/system/unit1.mount',
+ '/usr/lib/systemd/system/unit2.mount',
+ '/usr/lib/systemd/system/local-fs.target.requires/unit1.mount',
+ '/usr/lib/systemd/system/local-fs.target.requires/unit2.mount',
+ ]
+ assert sorted(files) == sorted(expected_files)
+ assert mount_unit_generator._delete_file.removal_called
diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
index 32e4527b..1e595e9a 100644
--- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
+++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
@@ -152,11 +152,11 @@ def _report(title, summary, keys, inhibitor=False):
)
hint = (
'Check the path to the listed GPG keys is correct, the keys are valid and'
- ' import them into the host RPM DB or store them inside the {} directory'
+ ' import them into the host RPM DB or store them inside on of the {} directories'
' prior the upgrade.'
' If you want to proceed the in-place upgrade without checking any RPM'
' signatures, execute leapp with the `--nogpgcheck` option.'
- .format(get_path_to_gpg_certs())
+ .format(','.format(get_path_to_gpg_certs()))
)
groups = [reporting.Groups.REPOSITORY]
if inhibitor:
@@ -188,7 +188,7 @@ def _report_missing_keys(keys):
summary = (
'Some of the target repositories require GPG keys that are not installed'
' in the current RPM DB or are not stored in the {trust_dir} directory.'
- .format(trust_dir=get_path_to_gpg_certs())
+ .format(trust_dir=','.join(get_path_to_gpg_certs()))
)
_report('Detected unknown GPG keys for target system repositories', summary, keys, True)
@@ -262,11 +262,12 @@ def _report_repos_missing_keys(repos):
def register_dnfworkaround():
- api.produce(DNFWorkaround(
- display_name='import trusted gpg keys to RPM DB',
- script_path=api.current_actor().get_common_tool_path('importrpmgpgkeys'),
- script_args=[get_path_to_gpg_certs()],
- ))
+ for trust_certs_dir in get_path_to_gpg_certs():
+ api.produce(DNFWorkaround(
+ display_name='import trusted gpg keys to RPM DB',
+ script_path=api.current_actor().get_common_tool_path('importrpmgpgkeys'),
+ script_args=[trust_certs_dir],
+ ))
@suppress_deprecation(TMPTargetRepositoriesFacts)
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/actor.py b/repos/system_upgrade/common/actors/peseventsscanner/actor.py
index f801f1a1..cb911471 100644
--- a/repos/system_upgrade/common/actors/peseventsscanner/actor.py
+++ b/repos/system_upgrade/common/actors/peseventsscanner/actor.py
@@ -10,7 +10,8 @@ from leapp.models import (
RepositoriesMapping,
RepositoriesSetupTasks,
RHUIInfo,
- RpmTransactionTasks
+ RpmTransactionTasks,
+ ActiveVendorList,
)
from leapp.reporting import Report
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
@@ -33,6 +34,7 @@ class PesEventsScanner(Actor):
RepositoriesMapping,
RHUIInfo,
RpmTransactionTasks,
+ ActiveVendorList,
)
produces = (ConsumedDataAsset, PESRpmTransactionTasks, RepositoriesSetupTasks, Report)
tags = (IPUWorkflowTag, FactsPhaseTag)
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py
index f24dda68..7ee5d016 100644
--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py
+++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py
@@ -58,6 +58,7 @@ class Action(IntEnum):
MERGED = 5
MOVED = 6
RENAMED = 7
+ REINSTALLED = 8
def get_pes_events(pes_json_directory, pes_json_filename):
@@ -72,13 +73,14 @@ def get_pes_events(pes_json_directory, pes_json_filename):
# a case as we have no work to do in such a case here.
events_data = fetch.load_data_asset(api.current_actor(),
pes_json_filename,
+ asset_directory=pes_json_directory,
asset_fulltext_name='PES events file',
docs_url='',
docs_title='')
if not events_data:
return None
- if not events_data.get('packageinfo'):
+ if events_data.get('packageinfo') is None:
raise ValueError('Found PES data with invalid structure')
all_events = list(chain(*[parse_entry(entry) for entry in events_data['packageinfo']]))
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
index e6741293..7a7e9ebf 100644
--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
+++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
@@ -1,5 +1,6 @@
from collections import defaultdict, namedtuple
from functools import partial
+import os
from leapp import reporting
from leapp.exceptions import StopActorExecutionError
@@ -7,6 +8,7 @@ from leapp.libraries.actor import peseventsscanner_repomap
from leapp.libraries.actor.pes_event_parsing import Action, get_pes_events, Package
from leapp.libraries.common import rpms
from leapp.libraries.common.config import version
+from leapp.libraries.common.repomaputils import combine_repomap_messages
from leapp.libraries.stdlib import api
from leapp.libraries.stdlib.config import is_verbose
from leapp.models import (
@@ -20,7 +22,8 @@ from leapp.models import (
RepositoriesMapping,
RepositoriesSetupTasks,
RHUIInfo,
- RpmTransactionTasks
+ RpmTransactionTasks,
+ ActiveVendorList,
)
SKIPPED_PKGS_MSG = (
@@ -31,8 +34,9 @@ SKIPPED_PKGS_MSG = (
'for details.\nThe list of these packages:'
)
+VENDORS_DIR = "/etc/leapp/files/vendors.d"
-TransactionConfiguration = namedtuple('TransactionConfiguration', ('to_install', 'to_remove', 'to_keep'))
+TransactionConfiguration = namedtuple('TransactionConfiguration', ('to_install', 'to_remove', 'to_keep', 'to_reinstall'))
def get_cloud_provider_name(cloud_provider_variant):
@@ -86,7 +90,7 @@ def get_transaction_configuration():
:return: TransactionConfiguration
"""
- transaction_configuration = TransactionConfiguration(to_install=set(), to_remove=set(), to_keep=set())
+ transaction_configuration = TransactionConfiguration(to_install=set(), to_remove=set(), to_keep=set(), to_reinstall=set())
_Pkg = partial(Package, repository=None, modulestream=None)
@@ -94,6 +98,7 @@ def get_transaction_configuration():
transaction_configuration.to_install.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_install)
transaction_configuration.to_remove.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_remove)
transaction_configuration.to_keep.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_keep)
+ transaction_configuration.to_reinstall.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_reinstall)
return transaction_configuration
@@ -133,6 +138,7 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs,
logger = api.current_logger()
# Start with the installed packages and modify the set according to release events
target_pkgs = set(source_installed_pkgs)
+ pkgs_to_reinstall = set()
release_events = [e for e in events if e.to_release == release]
@@ -176,9 +182,12 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs,
target_pkgs = target_pkgs.difference(event.out_pkgs)
target_pkgs = target_pkgs.union(event.out_pkgs)
+ if (event.action == Action.REINSTALLED and is_any_in_pkg_present):
+ pkgs_to_reinstall = pkgs_to_reinstall.union(event.in_pkgs)
+
pkgs_to_demodularize = pkgs_to_demodularize.difference(event.in_pkgs)
- return (target_pkgs, pkgs_to_demodularize)
+ return (target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall)
def remove_undesired_events(events, relevant_to_releases):
@@ -244,15 +253,17 @@ def compute_packages_on_target_system(source_pkgs, events, releases):
did_processing_cross_major_version = True
pkgs_to_demodularize = {pkg for pkg in target_pkgs if pkg.modulestream}
- target_pkgs, pkgs_to_demodularize = compute_pkg_changes_between_consequent_releases(target_pkgs, events,
- release, seen_pkgs,
- pkgs_to_demodularize)
+ target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall = compute_pkg_changes_between_consequent_releases(
+ target_pkgs, events,
+ release, seen_pkgs,
+ pkgs_to_demodularize
+ )
seen_pkgs = seen_pkgs.union(target_pkgs)
demodularized_pkgs = {Package(pkg.name, pkg.repository, None) for pkg in pkgs_to_demodularize}
demodularized_target_pkgs = target_pkgs.difference(pkgs_to_demodularize).union(demodularized_pkgs)
- return (demodularized_target_pkgs, pkgs_to_demodularize)
+ return (demodularized_target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall)
def compute_rpm_tasks_from_pkg_set_diff(source_pkgs, target_pkgs, pkgs_to_demodularize):
@@ -356,15 +367,13 @@ def get_pesid_to_repoid_map(target_pesids):
:return: Dictionary mapping the target_pesids to their corresponding repoid
"""
- repositories_map_msgs = api.consume(RepositoriesMapping)
- repositories_map_msg = next(repositories_map_msgs, None)
- if list(repositories_map_msgs):
- api.current_logger().warning('Unexpectedly received more than one RepositoriesMapping message.')
- if not repositories_map_msg:
+ repositories_map_msgs = list(api.consume(RepositoriesMapping))
+ if not repositories_map_msgs:
raise StopActorExecutionError(
'Cannot parse RepositoriesMapping data properly',
details={'Problem': 'Did not receive a message with mapped repositories'}
)
+ repositories_map_msg = combine_repomap_messages(repositories_map_msgs)
rhui_info = next(api.consume(RHUIInfo), None)
cloud_provider = rhui_info.provider if rhui_info else ''
@@ -554,6 +563,19 @@ def process():
if not events:
return
+ active_vendors = []
+ for vendor_list in api.consume(ActiveVendorList):
+ active_vendors.extend(vendor_list.data)
+
+ pes_json_suffix = "_pes.json"
+ if os.path.isdir(VENDORS_DIR):
+ vendor_pesfiles = list(filter(lambda vfile: pes_json_suffix in vfile, os.listdir(VENDORS_DIR)))
+
+ for pesfile in vendor_pesfiles:
+ if pesfile[:-len(pes_json_suffix)] in active_vendors:
+ vendor_events = get_pes_events(VENDORS_DIR, pesfile)
+ events.extend(vendor_events)
+
releases = get_relevant_releases(events)
installed_pkgs = get_installed_pkgs()
transaction_configuration = get_transaction_configuration()
@@ -567,7 +589,7 @@ def process():
events = remove_undesired_events(events, releases)
# Apply events - compute what packages should the target system have
- target_pkgs, pkgs_to_demodularize = compute_packages_on_target_system(pkgs_to_begin_computation_with,
+ target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall = compute_packages_on_target_system(pkgs_to_begin_computation_with,
events, releases)
# Packages coming out of the events have PESID as their repository, however, we need real repoid
@@ -587,4 +609,5 @@ def process():
rpm_tasks = include_instructions_from_transaction_configuration(rpm_tasks, transaction_configuration,
installed_pkgs)
if rpm_tasks:
+ rpm_tasks.to_reinstall = sorted(pkgs_to_reinstall)
api.produce(rpm_tasks)
diff --git a/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py b/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py
index d4a64793..4ec1d6e0 100644
--- a/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py
+++ b/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py
@@ -3,6 +3,7 @@ from collections import defaultdict
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.common.config.version import get_source_major_version, get_target_major_version
+from leapp.libraries.common.repomaputils import RepoMapData
from leapp.libraries.common.fetch import load_data_asset
from leapp.libraries.common.rpms import get_leapp_packages, LeappComponents
from leapp.libraries.stdlib import api
@@ -16,121 +17,6 @@ REPOMAP_FILE = 'repomap.json'
"""The name of the new repository mapping file."""
-class RepoMapData(object):
- VERSION_FORMAT = '1.3.0'
-
- def __init__(self):
- self.repositories = []
- self.mapping = {}
-
- def add_repository(self, data, pesid):
- """
- Add new PESIDRepositoryEntry with given pesid from the provided dictionary.
-
- :param data: A dict containing the data of the added repository. The dictionary structure corresponds
- to the repositories entries in the repository mapping JSON schema.
- :type data: Dict[str, str]
- :param pesid: PES id of the repository family that the newly added repository belongs to.
- :type pesid: str
- """
- self.repositories.append(PESIDRepositoryEntry(
- repoid=data['repoid'],
- channel=data['channel'],
- rhui=data.get('rhui', ''),
- repo_type=data['repo_type'],
- arch=data['arch'],
- major_version=data['major_version'],
- pesid=pesid,
- distro=data['distro'],
- ))
-
- def get_repositories(self, valid_major_versions):
- """
- Return the list of PESIDRepositoryEntry object matching the specified major versions.
- """
- return [repo for repo in self.repositories if repo.major_version in valid_major_versions]
-
- def add_mapping(self, source_major_version, target_major_version, source_pesid, target_pesid):
- """
- Add a new mapping entry that is mapping the source pesid to the destination pesid(s),
- relevant in an IPU from the supplied source major version to the supplied target
- major version.
-
- :param str source_major_version: Specifies the major version of the source system
- for which the added mapping applies.
- :param str target_major_version: Specifies the major version of the target system
- for which the added mapping applies.
- :param str source_pesid: PESID of the source repository.
- :param Union[str|List[str]] target_pesid: A single target PESID or a list of target
- PESIDs of the added mapping.
- """
- # NOTE: it could be more simple, but I prefer to be sure the input data
- # contains just one map per source PESID.
- key = '{}:{}'.format(source_major_version, target_major_version)
- rmap = self.mapping.get(key, defaultdict(set))
- self.mapping[key] = rmap
- if isinstance(target_pesid, list):
- rmap[source_pesid].update(target_pesid)
- else:
- rmap[source_pesid].add(target_pesid)
-
- def get_mappings(self, src_major_version, dst_major_version):
- """
- Return the list of RepoMapEntry objects for the specified upgrade path.
-
- IOW, the whole mapping for specified IPU.
- """
- key = '{}:{}'.format(src_major_version, dst_major_version)
- rmap = self.mapping.get(key, None)
- if not rmap:
- return None
- map_list = []
- for src_pesid in sorted(rmap.keys()):
- map_list.append(RepoMapEntry(source=src_pesid, target=sorted(rmap[src_pesid])))
- return map_list
-
- @staticmethod
- def load_from_dict(data):
- if data['version_format'] != RepoMapData.VERSION_FORMAT:
- raise ValueError(
- 'The obtained repomap data has unsupported version of format.'
- ' Get {} required {}'
- .format(data['version_format'], RepoMapData.VERSION_FORMAT)
- )
-
- repomap = RepoMapData()
-
- # Load reposiories
- existing_pesids = set()
- for repo_family in data['repositories']:
- existing_pesids.add(repo_family['pesid'])
- for repo in repo_family['entries']:
- repomap.add_repository(repo, repo_family['pesid'])
-
- # Load mappings
- for mapping in data['mapping']:
- for entry in mapping['entries']:
- if not isinstance(entry['target'], list):
- raise ValueError(
- 'The target field of a mapping entry is not a list: {}'
- .format(entry)
- )
-
- for pesid in [entry['source']] + entry['target']:
- if pesid not in existing_pesids:
- raise ValueError(
- 'The {} pesid is not related to any repository.'
- .format(pesid)
- )
- repomap.add_mapping(
- source_major_version=mapping['source_major_version'],
- target_major_version=mapping['target_major_version'],
- source_pesid=entry['source'],
- target_pesid=entry['target'],
- )
- return repomap
-
-
def _inhibit_upgrade(msg):
local_path = os.path.join('/etc/leapp/file', REPOMAP_FILE)
hint = (
diff --git a/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py b/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py
index 43ac1fc4..62aefaf4 100644
--- a/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py
+++ b/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py
@@ -18,21 +18,37 @@ def load_tasks_file(path, logger):
return []
+def filter_out(installed_rpm_names, to_filter, debug_msg):
+ # These are the packages that aren't installed on the system.
+ filtered_ok = [pkg for pkg in to_filter if pkg not in installed_rpm_names]
+
+ # And these ones are the ones that are.
+ filtered_out = list(set(to_filter) - set(filtered_ok))
+ if filtered_out:
+ api.current_logger().debug(
+ debug_msg +
+ '\n- ' + '\n- '.join(filtered_out)
+ )
+ # We may want to use either of the two sets.
+ return filtered_ok, filtered_out
+
+
def load_tasks(base_dir, logger):
# Loads configuration files to_install, to_keep, and to_remove from the given base directory
rpms = next(api.consume(DistributionSignedRPM))
rpm_names = [rpm.name for rpm in rpms.items]
+
to_install = load_tasks_file(os.path.join(base_dir, 'to_install'), logger)
+ install_debug_msg = 'The following packages from "to_install" file will be ignored as they are already installed:'
# we do not want to put into rpm transaction what is already installed (it will go to "to_upgrade" bucket)
- to_install_filtered = [pkg for pkg in to_install if pkg not in rpm_names]
+ to_install_filtered, _ = filter_out(rpm_names, to_install, install_debug_msg)
- filtered = set(to_install) - set(to_install_filtered)
- if filtered:
- api.current_logger().debug(
- 'The following packages from "to_install" file will be ignored as they are already installed:'
- '\n- ' + '\n- '.join(filtered))
+ to_reinstall = load_tasks_file(os.path.join(base_dir, 'to_reinstall'), logger)
+ reinstall_debug_msg = 'The following packages from "to_reinstall" file will be ignored as they are not installed:'
+ _, to_reinstall_filtered = filter_out(rpm_names, to_reinstall, reinstall_debug_msg)
return RpmTransactionTasks(
to_install=to_install_filtered,
+ to_reinstall=to_reinstall_filtered,
to_keep=load_tasks_file(os.path.join(base_dir, 'to_keep'), logger),
to_remove=load_tasks_file(os.path.join(base_dir, 'to_remove'), logger))
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/actor.py b/repos/system_upgrade/common/actors/scandnfpluginpath/actor.py
new file mode 100644
index 00000000..e43a691e
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/actor.py
@@ -0,0 +1,21 @@
+from leapp.actors import Actor
+from leapp.libraries.actor.scandnfpluginpath import scan_dnf_pluginpath
+from leapp.models import DnfPluginPathDetected
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class ScanDnfPluginPath(Actor):
+ """
+ Scans DNF configuration for custom pluginpath option.
+
+ This actor collects information about whether the pluginpath option is configured in DNF configuration
+ and produces a DnfPluginPathDetected message, containing the information.
+ """
+
+ name = 'scan_dnf_pluginpath'
+ consumes = ()
+ produces = (DnfPluginPathDetected,)
+ tags = (FactsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ scan_dnf_pluginpath()
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/libraries/scandnfpluginpath.py b/repos/system_upgrade/common/actors/scandnfpluginpath/libraries/scandnfpluginpath.py
new file mode 100644
index 00000000..818f7700
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/libraries/scandnfpluginpath.py
@@ -0,0 +1,30 @@
+import os
+
+from six.moves import configparser
+
+from leapp.libraries.stdlib import api
+from leapp.models import DnfPluginPathDetected
+
+DNF_CONFIG_PATH = '/etc/dnf/dnf.conf'
+
+
+def _is_pluginpath_set(config_path):
+ """Check if pluginpath option is set in DNF configuration file."""
+ if not os.path.isfile(config_path):
+ api.current_logger().warning('The %s file is missing.', config_path)
+ return False
+
+ parser = configparser.ConfigParser()
+
+ try:
+ parser.read(config_path)
+ return parser.has_option('main', 'pluginpath')
+ except (configparser.Error, IOError) as e:
+ api.current_logger().warning('The DNF config file %s couldn\'t be parsed: %s', config_path, e)
+ return False
+
+
+def scan_dnf_pluginpath():
+ """Scan DNF configuration and produce DnfPluginPathDetected message."""
+ is_detected = _is_pluginpath_set(DNF_CONFIG_PATH)
+ api.produce(DnfPluginPathDetected(is_pluginpath_detected=is_detected))
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_incorrect_pluginpath b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_incorrect_pluginpath
new file mode 100644
index 00000000..aa29db09
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_incorrect_pluginpath
@@ -0,0 +1,7 @@
+[main]
+gpgcheck=1
+installonly_limit=3
+clean_requirements_on_remove=True
+best=True
+skip_if_unavailable=False
+pluginpathincorrect=/usr/lib/python3.6/site-packages/dnf-plugins
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_no_pluginpath b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_no_pluginpath
new file mode 100644
index 00000000..3d08d075
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_no_pluginpath
@@ -0,0 +1,6 @@
+[main]
+gpgcheck=1
+installonly_limit=3
+clean_requirements_on_remove=True
+best=True
+skip_if_unavailable=False
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_with_pluginpath b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_with_pluginpath
new file mode 100644
index 00000000..09a81e64
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_with_pluginpath
@@ -0,0 +1,7 @@
+[main]
+gpgcheck=1
+installonly_limit=3
+clean_requirements_on_remove=True
+best=True
+skip_if_unavailable=False
+pluginpath=/usr/lib/python3.6/site-packages/dnf-plugins
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/tests/test_scandnfpluginpath.py b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/test_scandnfpluginpath.py
new file mode 100644
index 00000000..fefb9d3f
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/test_scandnfpluginpath.py
@@ -0,0 +1,53 @@
+import os
+
+import pytest
+
+from leapp.libraries.actor import scandnfpluginpath
+from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import DnfPluginPathDetected
+
+
+@pytest.mark.parametrize('is_detected', [False, True])
+def test_scan_detects_pluginpath(monkeypatch, is_detected):
+ mocked_producer = produce_mocked()
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(api, 'produce', mocked_producer)
+
+ monkeypatch.setattr(scandnfpluginpath, '_is_pluginpath_set',
+ lambda path: is_detected)
+
+ scandnfpluginpath.scan_dnf_pluginpath()
+
+ assert mocked_producer.called == 1
+ assert mocked_producer.model_instances[0].is_pluginpath_detected is is_detected
+
+
+@pytest.mark.parametrize(('config_file', 'result'), [
+ ('files/dnf_config_no_pluginpath', False),
+ ('files/dnf_config_with_pluginpath', True),
+ ('files/dnf_config_incorrect_pluginpath', False),
+ ('files/not_existing_file.conf', False)
+])
+def test_is_pluginpath_set(config_file, result):
+ CUR_DIR = os.path.dirname(os.path.abspath(__file__))
+
+ assert scandnfpluginpath._is_pluginpath_set(os.path.join(CUR_DIR, config_file)) == result
+
+
+def test_scan_no_config_file(monkeypatch):
+ mocked_producer = produce_mocked()
+ logger = logger_mocked()
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(api, 'produce', mocked_producer)
+ monkeypatch.setattr(api, 'current_logger', lambda: logger)
+
+ filename = 'files/not_existing_file.conf'
+ monkeypatch.setattr(scandnfpluginpath, 'DNF_CONFIG_PATH', filename)
+ scandnfpluginpath.scan_dnf_pluginpath()
+
+ assert mocked_producer.called == 1
+ assert mocked_producer.model_instances[0].is_pluginpath_detected is False
+
+ assert 'The %s file is missing.' in logger.warnmsg
+ assert filename in logger.warnmsg
diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py
new file mode 100644
index 00000000..a5e481cb
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py
@@ -0,0 +1,26 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import scanvendorrepofiles
+from leapp.models import (
+ CustomTargetRepositoryFile,
+ ActiveVendorList,
+ VendorCustomTargetRepositoryList,
+)
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class ScanVendorRepofiles(Actor):
+ """
+ Load and produce custom repository data from vendor-provided files.
+ Only those vendors whose source system repoids were found on the system will be included.
+ """
+
+ name = "scan_vendor_repofiles"
+ consumes = ActiveVendorList
+ produces = (
+ CustomTargetRepositoryFile,
+ VendorCustomTargetRepositoryList,
+ )
+ tags = (FactsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ scanvendorrepofiles.process()
diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py
new file mode 100644
index 00000000..84392101
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py
@@ -0,0 +1,72 @@
+import os
+
+from leapp.libraries.common import repofileutils
+from leapp.libraries.stdlib import api
+from leapp.models import (
+ CustomTargetRepository,
+ CustomTargetRepositoryFile,
+ ActiveVendorList,
+ VendorCustomTargetRepositoryList,
+)
+
+
+VENDORS_DIR = "/etc/leapp/files/vendors.d/"
+REPOFILE_SUFFIX = ".repo"
+
+
+def process():
+ """
+ Produce CustomTargetRepository msgs for the vendor repo files inside the
+ <CUSTOM_REPO_DIR>.
+
+ The CustomTargetRepository messages are produced only if a "from" vendor repository
+ listed indide its map matched one of the repositories active on the system.
+ """
+ if not os.path.isdir(VENDORS_DIR):
+ api.current_logger().debug(
+ "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR)
+ )
+ return
+
+ for repofile_name in os.listdir(VENDORS_DIR):
+ if not repofile_name.endswith(REPOFILE_SUFFIX):
+ continue
+ # Cut the .repo part to get only the name.
+ vendor_name = repofile_name[:-5]
+
+ active_vendors = []
+ for vendor_list in api.consume(ActiveVendorList):
+ active_vendors.extend(vendor_list.data)
+
+ api.current_logger().debug("Active vendor list: {}".format(active_vendors))
+
+ if vendor_name not in active_vendors:
+ api.current_logger().debug(
+ "Vendor {} not in active list, skipping".format(vendor_name)
+ )
+ continue
+
+ full_repo_path = os.path.join(VENDORS_DIR, repofile_name)
+ parsed_repofile = repofileutils.parse_repofile(full_repo_path)
+ api.current_logger().debug(
+ "Vendor {} found in active list, processing file {}".format(vendor_name, repofile_name)
+ )
+
+ api.produce(CustomTargetRepositoryFile(file=full_repo_path))
+
+ custom_vendor_repos = [
+ CustomTargetRepository(
+ repoid=repo.repoid,
+ name=repo.name,
+ baseurl=repo.baseurl,
+ enabled=repo.enabled,
+ ) for repo in parsed_repofile.data
+ ]
+
+ api.produce(
+ VendorCustomTargetRepositoryList(vendor=vendor_name, repos=custom_vendor_repos)
+ )
+
+ api.current_logger().info(
+ "The {} directory exists, vendor repositories loaded.".format(VENDORS_DIR)
+ )
diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py
new file mode 100644
index 00000000..cb5c7ab7
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py
@@ -0,0 +1,131 @@
+import os
+
+from leapp.libraries.actor import scancustomrepofile
+from leapp.libraries.common import repofileutils
+from leapp.libraries.common.testutils import produce_mocked
+from leapp.libraries.stdlib import api
+
+from leapp.models import (CustomTargetRepository, CustomTargetRepositoryFile,
+ RepositoryData, RepositoryFile)
+
+
+_REPODATA = [
+ RepositoryData(repoid="repo1", name="repo1name", baseurl="repo1url", enabled=True),
+ RepositoryData(repoid="repo2", name="repo2name", baseurl="repo2url", enabled=False),
+ RepositoryData(repoid="repo3", name="repo3name", enabled=True),
+ RepositoryData(repoid="repo4", name="repo4name", mirrorlist="mirror4list", enabled=True),
+]
+
+_CUSTOM_REPOS = [
+ CustomTargetRepository(repoid="repo1", name="repo1name", baseurl="repo1url", enabled=True),
+ CustomTargetRepository(repoid="repo2", name="repo2name", baseurl="repo2url", enabled=False),
+ CustomTargetRepository(repoid="repo3", name="repo3name", baseurl=None, enabled=True),
+ CustomTargetRepository(repoid="repo4", name="repo4name", baseurl=None, enabled=True),
+]
+
+_CUSTOM_REPO_FILE_MSG = CustomTargetRepositoryFile(file=scancustomrepofile.CUSTOM_REPO_PATH)
+
+
+_TESTING_REPODATA = [
+ RepositoryData(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True),
+ RepositoryData(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=False),
+ RepositoryData(repoid="repo3-stable", name="repo3name", enabled=False),
+ RepositoryData(repoid="repo4-testing", name="repo4name", mirrorlist="mirror4list", enabled=True),
+]
+
+_TESTING_CUSTOM_REPOS_STABLE_TARGET = [
+ CustomTargetRepository(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True),
+ CustomTargetRepository(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=False),
+ CustomTargetRepository(repoid="repo3-stable", name="repo3name", baseurl=None, enabled=False),
+ CustomTargetRepository(repoid="repo4-testing", name="repo4name", baseurl=None, enabled=True),
+]
+
+_TESTING_CUSTOM_REPOS_BETA_TARGET = [
+ CustomTargetRepository(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True),
+ CustomTargetRepository(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=True),
+ CustomTargetRepository(repoid="repo3-stable", name="repo3name", baseurl=None, enabled=False),
+ CustomTargetRepository(repoid="repo4-testing", name="repo4name", baseurl=None, enabled=True),
+]
+
+_PROCESS_STABLE_TARGET = "stable"
+_PROCESS_BETA_TARGET = "beta"
+
+
+class LoggerMocked(object):
+ def __init__(self):
+ self.infomsg = None
+ self.debugmsg = None
+
+ def info(self, msg):
+ self.infomsg = msg
+
+ def debug(self, msg):
+ self.debugmsg = msg
+
+ def __call__(self):
+ return self
+
+
+def test_no_repofile(monkeypatch):
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: False)
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+ monkeypatch.setattr(api, 'current_logger', LoggerMocked())
+ scancustomrepofile.process()
+ msg = "The {} file doesn't exist. Nothing to do.".format(scancustomrepofile.CUSTOM_REPO_PATH)
+ assert api.current_logger.debugmsg == msg
+ assert not api.produce.called
+
+
+def test_valid_repofile_exists(monkeypatch):
+ def _mocked_parse_repofile(fpath):
+ return RepositoryFile(file=fpath, data=_REPODATA)
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True)
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile)
+ monkeypatch.setattr(api, 'current_logger', LoggerMocked())
+ scancustomrepofile.process()
+ msg = "The {} file exists, custom repositories loaded.".format(scancustomrepofile.CUSTOM_REPO_PATH)
+ assert api.current_logger.infomsg == msg
+ assert api.produce.called == len(_CUSTOM_REPOS) + 1
+ assert _CUSTOM_REPO_FILE_MSG in api.produce.model_instances
+ for crepo in _CUSTOM_REPOS:
+ assert crepo in api.produce.model_instances
+
+
+def test_target_stable_repos(monkeypatch):
+ def _mocked_parse_repofile(fpath):
+ return RepositoryFile(file=fpath, data=_TESTING_REPODATA)
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True)
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile)
+
+ scancustomrepofile.process(_PROCESS_STABLE_TARGET)
+ assert api.produce.called == len(_TESTING_CUSTOM_REPOS_STABLE_TARGET) + 1
+ for crepo in _TESTING_CUSTOM_REPOS_STABLE_TARGET:
+ assert crepo in api.produce.model_instances
+
+
+def test_target_beta_repos(monkeypatch):
+ def _mocked_parse_repofile(fpath):
+ return RepositoryFile(file=fpath, data=_TESTING_REPODATA)
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True)
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile)
+
+ scancustomrepofile.process(_PROCESS_BETA_TARGET)
+ assert api.produce.called == len(_TESTING_CUSTOM_REPOS_BETA_TARGET) + 1
+ for crepo in _TESTING_CUSTOM_REPOS_BETA_TARGET:
+ assert crepo in api.produce.model_instances
+
+
+def test_empty_repofile_exists(monkeypatch):
+ def _mocked_parse_repofile(fpath):
+ return RepositoryFile(file=fpath, data=[])
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True)
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile)
+ monkeypatch.setattr(api, 'current_logger', LoggerMocked())
+ scancustomrepofile.process()
+ msg = "The {} file exists, but is empty. Nothing to do.".format(scancustomrepofile.CUSTOM_REPO_PATH)
+ assert api.current_logger.infomsg == msg
+ assert not api.produce.called
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
index 91855818..3a7e955b 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
@@ -10,7 +10,8 @@ from leapp.models import (
RHUIInfo,
SkippedRepositories,
TargetRepositories,
- UsedRepositories
+ UsedRepositories,
+ VendorCustomTargetRepositoryList
)
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
@@ -37,7 +38,8 @@ class SetupTargetRepos(Actor):
RepositoriesFacts,
RepositoriesBlacklisted,
RHUIInfo,
- UsedRepositories)
+ UsedRepositories,
+ VendorCustomTargetRepositoryList)
produces = (TargetRepositories, SkippedRepositories)
tags = (IPUWorkflowTag, FactsPhaseTag)
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
index a6073aa3..dfa565c1 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
@@ -1,6 +1,7 @@
from leapp.libraries.actor import setuptargetrepos_repomap
from leapp.libraries.common.config.version import get_source_major_version, get_source_version, get_target_version
+from leapp.libraries.common.repomaputils import combine_repomap_messages
from leapp.libraries.stdlib import api
from leapp.models import (
CustomTargetRepository,
@@ -13,7 +14,8 @@ from leapp.models import (
RHUIInfo,
SkippedRepositories,
TargetRepositories,
- UsedRepositories
+ UsedRepositories,
+ VendorCustomTargetRepositoryList
)
RHUI_CLIENT_REPOIDS_RHEL88_TO_RHEL810 = {
@@ -80,13 +82,62 @@ def _get_mapped_repoids(repomap, src_repoids):
return mapped_repoids
+def _get_vendor_custom_repos(enabled_repos, mapping_list):
+ # Look at what source repos from the vendor mapping were enabled.
+ # If any of them are in beta, include vendor's custom repos in the list.
+ # Otherwise skip them.
+
+ result = []
+
+ # Build a dict of vendor mappings for easy lookup.
+ map_dict = {mapping.vendor: mapping for mapping in mapping_list if mapping.vendor}
+
+ for vendor_repolist in api.consume(VendorCustomTargetRepositoryList):
+ vendor_repomap = map_dict[vendor_repolist.vendor]
+
+ # Find the beta channel repositories for the vendor.
+ beta_repos = [
+ x.repoid for x in vendor_repomap.repositories if x.channel == "beta"
+ ]
+ api.current_logger().debug(
+ "Vendor {} beta repos: {}".format(vendor_repolist.vendor, beta_repos)
+ )
+
+ # Are any of the beta repos present and enabled on the system?
+ if any(rep in beta_repos for rep in enabled_repos):
+ # If so, use all repos including beta in the upgrade.
+ vendor_repos = vendor_repolist.repos
+ else:
+ # Otherwise filter beta repos out.
+ vendor_repos = [repo for repo in vendor_repolist.repos if repo.repoid not in beta_repos]
+
+ result.extend([CustomTargetRepository(
+ repoid=repo.repoid,
+ name=repo.name,
+ baseurl=repo.baseurl,
+ enabled=repo.enabled,
+ ) for repo in vendor_repos])
+
+ return result
+
+
def process():
# Load relevant data from messages
used_repoids_dict = _get_used_repo_dict()
enabled_repoids = _get_enabled_repoids()
excluded_repoids = _get_blacklisted_repoids()
+
+ # Remember that we can't just grab one message, each vendor can have its own mapping.
+ repo_mapping_list = list(api.consume(RepositoriesMapping))
+
custom_repos = _get_custom_target_repos()
repoids_from_installed_packages = _get_repoids_from_installed_packages()
+ vendor_repos = _get_vendor_custom_repos(enabled_repoids, repo_mapping_list)
+ custom_repos.extend(vendor_repos)
+
+ api.current_logger().debug(
+ "Vendor repolist: {}".format([repo.repoid for repo in vendor_repos])
+ )
# Setup repomap handler
repo_mappig_msg = next(api.consume(RepositoriesMapping), RepositoriesMapping())
@@ -168,6 +219,10 @@ def process():
custom_repos = [repo for repo in custom_repos if repo.repoid not in excluded_repoids]
custom_repos = sorted(custom_repos, key=lambda x: x.repoid)
+ api.current_logger().debug(
+ "Final repolist: {}".format([repo.repoid for repo in custom_repos])
+ )
+
# produce message about skipped repositories
enabled_repoids_with_mapping = _get_mapped_repoids(repomap, enabled_repoids)
skipped_repoids = enabled_repoids & set(used_repoids_dict.keys()) - enabled_repoids_with_mapping
diff --git a/repos/system_upgrade/common/actors/systemfacts/actor.py b/repos/system_upgrade/common/actors/systemfacts/actor.py
index 59b12c87..85d4a09e 100644
--- a/repos/system_upgrade/common/actors/systemfacts/actor.py
+++ b/repos/system_upgrade/common/actors/systemfacts/actor.py
@@ -47,7 +47,7 @@ class SystemFactsActor(Actor):
GrubCfgBios,
Report
)
- tags = (IPUWorkflowTag, FactsPhaseTag,)
+ tags = (IPUWorkflowTag, FactsPhaseTag.Before,)
def process(self):
self.produce(systemfacts.get_sysctls_status())
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index 55877d05..0b7a5b3a 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -152,9 +152,10 @@ def _import_gpg_keys(context, install_root_dir, target_major_version):
# Import the RHEL X+1 GPG key to be able to verify the installation of initial packages
try:
# Import also any other keys provided by the customer in the same directory
- for certname in os.listdir(certs_path):
- cmd = ['rpm', '--root', install_root_dir, '--import', os.path.join(certs_path, certname)]
- context.call(cmd, callback_raw=utils.logging_handler)
+ for trusted_dir in certs_path:
+ for certname in os.listdir(trusted_dir):
+ cmd = ['rpm', '--root', install_root_dir, '--import', os.path.join(trusted_dir, certname)]
+ context.call(cmd, callback_raw=utils.logging_handler)
except CalledProcessError as exc:
raise StopActorExecutionError(
message=(
@@ -641,6 +642,7 @@ def _prep_repository_access(context, target_userspace):
run(["chroot", target_userspace, "/bin/bash", "-c", "su - -c update-ca-trust"])
if not rhsm.skip_rhsm():
+ _copy_certificates(context, target_userspace)
run(['rm', '-rf', os.path.join(target_etc, 'rhsm')])
context.copytree_from('/etc/rhsm', os.path.join(target_etc, 'rhsm'))
diff --git a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py
index 6377f767..4c5420f6 100644
--- a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py
+++ b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py
@@ -13,13 +13,14 @@ def _get_pubkeys(installed_rpms):
pubkeys = get_pubkeys_from_rpms(installed_rpms)
db_pubkeys = [key.fingerprint for key in pubkeys]
certs_path = get_path_to_gpg_certs()
- for certname in os.listdir(certs_path):
- key_file = os.path.join(certs_path, certname)
- fps = get_gpg_fp_from_file(key_file)
- for fp in fps:
- if fp not in db_pubkeys:
- pubkeys.append(GpgKey(fingerprint=fp, rpmdb=False, filename=key_file))
- db_pubkeys += fp
+ for trusted_dir in certs_path:
+ for certname in os.listdir(trusted_dir):
+ key_file = os.path.join(trusted_dir, certname)
+ fps = get_gpg_fp_from_file(key_file)
+ for fp in fps:
+ if fp not in db_pubkeys:
+ pubkeys.append(GpgKey(fingerprint=fp, rpmdb=False, filename=key_file))
+ db_pubkeys += fp
return pubkeys
diff --git a/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py b/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py
new file mode 100644
index 00000000..dbf86974
--- /dev/null
+++ b/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py
@@ -0,0 +1,72 @@
+import os
+
+from leapp.actors import Actor
+from leapp.models import VendorSignatures, ActiveVendorList
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+VENDORS_DIR = "/etc/leapp/files/vendors.d/"
+SIGFILE_SUFFIX = ".sigs"
+
+
+class VendorRepoSignatureScanner(Actor):
+ """
+ Produce VendorSignatures messages for the vendor signature files inside the
+ <VENDORS_DIR>.
+ These messages are used to extend the list of pakcages Leapp will consider
+ signed and will attempt to upgrade.
+
+ The messages are produced only if a "from" vendor repository
+ listed indide its map matched one of the repositories active on the system.
+ """
+
+ name = 'vendor_repo_signature_scanner'
+ consumes = (ActiveVendorList)
+ produces = (VendorSignatures)
+ tags = (IPUWorkflowTag, FactsPhaseTag.Before)
+
+ def process(self):
+ if not os.path.isdir(VENDORS_DIR):
+ self.log.debug(
+ "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR)
+ )
+ return
+
+ active_vendors = []
+ for vendor_list in self.consume(ActiveVendorList):
+ active_vendors.extend(vendor_list.data)
+
+ self.log.debug(
+ "Active vendor list: {}".format(active_vendors)
+ )
+
+ for sigfile_name in os.listdir(VENDORS_DIR):
+ if not sigfile_name.endswith(SIGFILE_SUFFIX):
+ continue
+ # Cut the suffix part to get only the name.
+ vendor_name = sigfile_name[:-5]
+
+ if vendor_name not in active_vendors:
+ self.log.debug(
+ "Vendor {} not in active list, skipping".format(vendor_name)
+ )
+ continue
+
+ self.log.debug(
+ "Vendor {} found in active list, processing file {}".format(vendor_name, sigfile_name)
+ )
+
+ full_sigfile_path = os.path.join(VENDORS_DIR, sigfile_name)
+ with open(full_sigfile_path) as f:
+ signatures = [line for line in f.read().splitlines() if line]
+
+ self.produce(
+ VendorSignatures(
+ vendor=vendor_name,
+ sigs=signatures,
+ )
+ )
+
+ self.log.info(
+ "The {} directory exists, vendor signatures loaded.".format(VENDORS_DIR)
+ )
diff --git a/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py
new file mode 100644
index 00000000..13256476
--- /dev/null
+++ b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py
@@ -0,0 +1,19 @@
+from leapp.actors import Actor
+# from leapp.libraries.common.repomaputils import scan_vendor_repomaps, VENDOR_REPOMAP_DIR
+from leapp.libraries.actor.vendorrepositoriesmapping import scan_vendor_repomaps
+from leapp.models import VendorSourceRepos, RepositoriesMapping
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class VendorRepositoriesMapping(Actor):
+ """
+ Scan the vendor repository mapping files and provide the data to other actors.
+ """
+
+ name = "vendor_repositories_mapping"
+ consumes = ()
+ produces = (RepositoriesMapping, VendorSourceRepos,)
+ tags = (IPUWorkflowTag, FactsPhaseTag.Before)
+
+ def process(self):
+ scan_vendor_repomaps()
diff --git a/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py
new file mode 100644
index 00000000..6a41d4e5
--- /dev/null
+++ b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py
@@ -0,0 +1,92 @@
+import os
+import json
+
+from leapp.libraries.common import fetch
+from leapp.libraries.common.config.version import get_target_major_version, get_source_major_version
+from leapp.libraries.common.repomaputils import RepoMapData
+from leapp.libraries.stdlib import api
+from leapp.models import VendorSourceRepos, RepositoriesMapping
+from leapp.models.fields import ModelViolationError
+from leapp.exceptions import StopActorExecutionError
+
+
+VENDORS_DIR = "/etc/leapp/files/vendors.d"
+"""The folder containing the vendor repository mapping files."""
+
+
+def inhibit_upgrade(msg):
+ raise StopActorExecutionError(
+ msg,
+ details={'hint': ('Read documentation at the following link for more'
+ ' information about how to retrieve the valid file:'
+ ' https://access.redhat.com/articles/3664871')})
+
+
+def read_repofile(repofile, repodir):
+ try:
+ return json.loads(fetch.read_or_fetch(repofile, directory=repodir, allow_download=False))
+ except ValueError:
+ # The data does not contain a valid json
+ inhibit_upgrade('The repository mapping file is invalid: file does not contain a valid JSON object.')
+ return None
+
+
+def read_repomap_file(repomap_file, read_repofile_func, vendor_name):
+ json_data = read_repofile_func(repomap_file, VENDORS_DIR)
+ try:
+ repomap_data = RepoMapData.load_from_dict(json_data)
+
+ source_major = get_source_major_version()
+ target_major = get_target_major_version()
+
+ api.produce(VendorSourceRepos(
+ vendor=vendor_name,
+ source_repoids=repomap_data.get_version_repoids(source_major)
+ ))
+
+ mapping = repomap_data.get_mappings(source_major, target_major)
+ valid_major_versions = [source_major, target_major]
+
+ api.produce(RepositoriesMapping(
+ mapping=mapping,
+ repositories=repomap_data.get_repositories(valid_major_versions),
+ vendor=vendor_name
+ ))
+ except ModelViolationError as err:
+ err_message = (
+ 'The repository mapping file is invalid: '
+ 'the JSON does not match required schema (wrong field type/value): {}. '
+ 'Ensure that the current upgrade path is correct and is present in the mappings: {} -> {}'
+ .format(err, source_major, target_major)
+ )
+ inhibit_upgrade(err_message)
+ except KeyError as err:
+ inhibit_upgrade(
+ 'The repository mapping file is invalid: the JSON is missing a required field: {}'.format(err))
+ except ValueError as err:
+ # The error should contain enough information, so we do not need to clarify it further
+ inhibit_upgrade('The repository mapping file is invalid: {}'.format(err))
+
+
+def scan_vendor_repomaps(read_repofile_func=read_repofile):
+ """
+ Scan the repository mapping file and produce RepositoriesMapping msg.
+
+ See the description of the actor for more details.
+ """
+
+ map_json_suffix = "_map.json"
+ if os.path.isdir(VENDORS_DIR):
+ vendor_mapfiles = list(filter(lambda vfile: map_json_suffix in vfile, os.listdir(VENDORS_DIR)))
+
+ for mapfile in vendor_mapfiles:
+ read_repomap_file(mapfile, read_repofile_func, mapfile[:-len(map_json_suffix)])
+ else:
+ api.current_logger().debug(
+ "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR)
+ )
+ # vendor_repomap_collection = scan_vendor_repomaps(VENDOR_REPOMAP_DIR)
+ # if vendor_repomap_collection:
+ # self.produce(vendor_repomap_collection)
+ # for repomap in vendor_repomap_collection.maps:
+ # self.produce(repomap)
diff --git a/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json b/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json
index 24bc93ba..0629d123 100644
--- a/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json
+++ b/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json
@@ -8,11 +8,19 @@
],
"obsoleted-keys": {
"7": [],
- "8": [],
+ "8": [
+ "gpg-pubkey-2fa658e0-45700c69",
+ "gpg-pubkey-37017186-45761324",
+ "gpg-pubkey-db42a60e-37ea5438"
+ ],
"9": [
+ "gpg-pubkey-d4082792-5b32db75",
"gpg-pubkey-3abb34f8-5ffd890e",
+ "gpg-pubkey-6275f250-5e26cb2e",
+ "gpg-pubkey-73e3b907-6581b071",
"gpg-pubkey-ced7258b-6525146f"
],
"10": ["gpg-pubkey-b86b3716-61e69f29"]
}
+
}
diff --git a/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json b/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json
index fe85e03c..6dfa5b0f 100644
--- a/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json
+++ b/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json
@@ -2,9 +2,24 @@
"keys": [
"24c6a8a7f4a80eb5",
"05b555b38483c65d",
- "4eb84e71f2ee9d55"
+ "4eb84e71f2ee9d55",
+ "429785e181b961a5",
+ "d07bf2a08d50eb66",
+ "6c7cb6ef305d49d6"
],
"obsoleted-keys": {
+ "7": [],
+ "8": [
+ "gpg-pubkey-2fa658e0-45700c69",
+ "gpg-pubkey-37017186-45761324",
+ "gpg-pubkey-db42a60e-37ea5438"
+ ],
+ "9": [
+ "gpg-pubkey-d4082792-5b32db75",
+ "gpg-pubkey-3abb34f8-5ffd890e",
+ "gpg-pubkey-6275f250-5e26cb2e",
+ "gpg-pubkey-73e3b907-6581b071"
+ ],
"10": ["gpg-pubkey-8483c65d-5ccc5b19"]
}
}
diff --git a/repos/system_upgrade/common/files/distro/cloudlinux/gpg-signatures.json b/repos/system_upgrade/common/files/distro/cloudlinux/gpg-signatures.json
new file mode 100644
index 00000000..acad9006
--- /dev/null
+++ b/repos/system_upgrade/common/files/distro/cloudlinux/gpg-signatures.json
@@ -0,0 +1,22 @@
+{
+ "keys": [
+ "8c55a6628608cb71",
+ "d07bf2a08d50eb66",
+ "429785e181b961a5"
+ ],
+ "obsoleted-keys": {
+ "7": [],
+ "8": [
+ "gpg-pubkey-2fa658e0-45700c69",
+ "gpg-pubkey-37017186-45761324",
+ "gpg-pubkey-db42a60e-37ea5438"
+ ],
+ "9": [
+ "gpg-pubkey-d4082792-5b32db75",
+ "gpg-pubkey-3abb34f8-5ffd890e",
+ "gpg-pubkey-6275f250-5e26cb2e",
+ "gpg-pubkey-73e3b907-6581b071"
+ ],
+ "10": []
+ }
+}
diff --git a/repos/system_upgrade/common/files/distro/ol/gpg-signatures.json b/repos/system_upgrade/common/files/distro/ol/gpg-signatures.json
new file mode 100644
index 00000000..a53775cf
--- /dev/null
+++ b/repos/system_upgrade/common/files/distro/ol/gpg-signatures.json
@@ -0,0 +1,24 @@
+{
+ "keys": [
+ "72f97b74ec551f03",
+ "82562ea9ad986da3",
+ "bc4d06a08d8b756f",
+ "429785e181b961a5",
+ "d07bf2a08d50eb66"
+ ],
+ "obsoleted-keys": {
+ "7": [],
+ "8": [
+ "gpg-pubkey-2fa658e0-45700c69",
+ "gpg-pubkey-37017186-45761324",
+ "gpg-pubkey-db42a60e-37ea5438"
+ ],
+ "9": [
+ "gpg-pubkey-d4082792-5b32db75",
+ "gpg-pubkey-3abb34f8-5ffd890e",
+ "gpg-pubkey-6275f250-5e26cb2e",
+ "gpg-pubkey-73e3b907-6581b071"
+ ],
+ "10": []
+ }
+}
diff --git a/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json b/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json
index 3cc67f82..c1f4acf4 100644
--- a/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json
+++ b/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json
@@ -4,7 +4,9 @@
"5326810137017186",
"938a80caf21541eb",
"fd372689897da07a",
- "45689c882fa658e0"
+ "45689c882fa658e0",
+ "429785e181b961a5",
+ "d07bf2a08d50eb66"
],
"obsoleted-keys": {
"7": [],
@@ -13,7 +15,12 @@
"gpg-pubkey-37017186-45761324",
"gpg-pubkey-db42a60e-37ea5438"
],
- "9": ["gpg-pubkey-d4082792-5b32db75"],
+ "9": [
+ "gpg-pubkey-d4082792-5b32db75",
+ "gpg-pubkey-3abb34f8-5ffd890e",
+ "gpg-pubkey-6275f250-5e26cb2e",
+ "gpg-pubkey-73e3b907-6581b071"
+ ],
"10": ["gpg-pubkey-fd431d51-4ae0493b"]
}
}
diff --git a/repos/system_upgrade/common/files/distro/rocky/gpg-signatures.json b/repos/system_upgrade/common/files/distro/rocky/gpg-signatures.json
new file mode 100644
index 00000000..f1738e79
--- /dev/null
+++ b/repos/system_upgrade/common/files/distro/rocky/gpg-signatures.json
@@ -0,0 +1,23 @@
+{
+ "keys": [
+ "15af5dac6d745a60",
+ "702d426d350d275d",
+ "429785e181b961a5",
+ "d07bf2a08d50eb66"
+ ],
+ "obsoleted-keys": {
+ "7": [],
+ "8": [
+ "gpg-pubkey-2fa658e0-45700c69",
+ "gpg-pubkey-37017186-45761324",
+ "gpg-pubkey-db42a60e-37ea5438"
+ ],
+ "9": [
+ "gpg-pubkey-d4082792-5b32db75",
+ "gpg-pubkey-3abb34f8-5ffd890e",
+ "gpg-pubkey-6275f250-5e26cb2e",
+ "gpg-pubkey-73e3b907-6581b071"
+ ],
+ "10": []
+ }
+}
diff --git a/repos/system_upgrade/common/files/distro/scientific/gpg-signatures.json b/repos/system_upgrade/common/files/distro/scientific/gpg-signatures.json
new file mode 100644
index 00000000..df764b53
--- /dev/null
+++ b/repos/system_upgrade/common/files/distro/scientific/gpg-signatures.json
@@ -0,0 +1,22 @@
+{
+ "keys": [
+ "b0b4183f192a7d7d",
+ "429785e181b961a5",
+ "d07bf2a08d50eb66"
+ ],
+ "obsoleted-keys": {
+ "7": [],
+ "8": [
+ "gpg-pubkey-2fa658e0-45700c69",
+ "gpg-pubkey-37017186-45761324",
+ "gpg-pubkey-db42a60e-37ea5438"
+ ],
+ "9": [
+ "gpg-pubkey-d4082792-5b32db75",
+ "gpg-pubkey-3abb34f8-5ffd890e",
+ "gpg-pubkey-6275f250-5e26cb2e",
+ "gpg-pubkey-73e3b907-6581b071"
+ ],
+ "10": []
+ }
+}
diff --git a/repos/system_upgrade/common/files/rhel_upgrade.py b/repos/system_upgrade/common/files/rhel_upgrade.py
index 4f76a61d..27824406 100644
--- a/repos/system_upgrade/common/files/rhel_upgrade.py
+++ b/repos/system_upgrade/common/files/rhel_upgrade.py
@@ -185,6 +185,7 @@ class RhelUpgradeCommand(dnf.cli.Command):
to_install = self.plugin_data['pkgs_info']['to_install']
to_remove = self.plugin_data['pkgs_info']['to_remove']
to_upgrade = self.plugin_data['pkgs_info']['to_upgrade']
+ to_reinstall = self.plugin_data['pkgs_info']['to_reinstall']
# Modules to enable
self._process_entities(entities=[available_modules_to_enable],
@@ -197,6 +198,9 @@ class RhelUpgradeCommand(dnf.cli.Command):
self._process_entities(entities=to_install, op=self.base.install, entity_name='Package')
# Packages to be upgraded
self._process_entities(entities=to_upgrade, op=self.base.upgrade, entity_name='Package')
+ # Packages to be reinstalled
+ self._process_entities(entities=to_reinstall, op=self.base.reinstall, entity_name='Package')
+
self.base.distro_sync()
if self.opts.tid[0] == 'check':
diff --git a/repos/system_upgrade/common/files/upgrade_paths.json b/repos/system_upgrade/common/files/upgrade_paths.json
index 22e0fd7d..47ca28d5 100644
--- a/repos/system_upgrade/common/files/upgrade_paths.json
+++ b/repos/system_upgrade/common/files/upgrade_paths.json
@@ -32,6 +32,7 @@
"almalinux": {
"default": {
"8.10": ["9.0", "9.1", "9.2", "9.3", "9.4", "9.5", "9.6", "9.7"],
+ "9.6": ["10.0"],
"9.7": ["10.0", "10.1"]
}
}
diff --git a/repos/system_upgrade/common/libraries/distro.py b/repos/system_upgrade/common/libraries/distro.py
index 2ed5eacd..219d31d1 100644
--- a/repos/system_upgrade/common/libraries/distro.py
+++ b/repos/system_upgrade/common/libraries/distro.py
@@ -3,6 +3,7 @@ import os
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.stdlib import api
+from leapp.models import VendorSignatures
def get_distribution_data(distribution):
@@ -11,8 +12,14 @@ def get_distribution_data(distribution):
distribution_config = os.path.join(distributions_path, distribution, 'gpg-signatures.json')
if os.path.exists(distribution_config):
with open(distribution_config) as distro_config_file:
- return json.load(distro_config_file)
+ distro_config_json = json.load(distro_config_file)
else:
raise StopActorExecutionError(
'Cannot find distribution signature configuration.',
details={'Problem': 'Distribution {} was not found in {}.'.format(distribution, distributions_path)})
+
+ # Extend with Vendors signatures
+ for siglist in api.consume(VendorSignatures):
+ distro_config_json["keys"].extend(siglist.sigs)
+
+ return distro_config_json
diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py
index 4f0c3a99..0f31f101 100644
--- a/repos/system_upgrade/common/libraries/dnfplugin.py
+++ b/repos/system_upgrade/common/libraries/dnfplugin.py
@@ -90,6 +90,7 @@ def build_plugin_data(target_repoids, debug, test, tasks, on_aws):
'to_install': sorted(tasks.to_install),
'to_remove': sorted(tasks.to_remove),
'to_upgrade': sorted(tasks.to_upgrade),
+ 'to_reinstall': sorted(tasks.to_reinstall),
'modules_to_enable': sorted(['{}:{}'.format(m.name, m.stream) for m in tasks.modules_to_enable]),
},
'dnf_conf': {
diff --git a/repos/system_upgrade/common/libraries/fetch.py b/repos/system_upgrade/common/libraries/fetch.py
index 82bf4ff3..cb20d775 100644
--- a/repos/system_upgrade/common/libraries/fetch.py
+++ b/repos/system_upgrade/common/libraries/fetch.py
@@ -146,7 +146,8 @@ def load_data_asset(actor_requesting_asset,
asset_filename,
asset_fulltext_name,
docs_url,
- docs_title):
+ docs_title,
+ asset_directory="/etc/leapp/files"):
"""
Load the content of the data asset with given asset_filename
and produce :class:`leapp.model.ConsumedDataAsset` message.
@@ -183,7 +184,7 @@ def load_data_asset(actor_requesting_asset,
try:
# The asset family ID has the form (major, minor), include only `major` in the URL
- raw_asset_contents = read_or_fetch(asset_filename, data_stream=data_stream_major, allow_download=False)
+ raw_asset_contents = read_or_fetch(asset_filename, directory=asset_directory, data_stream=data_stream_major, allow_download=False)
asset_contents = json.loads(raw_asset_contents)
except ValueError:
msg = 'The {0} file (at {1}) does not contain a valid JSON object.'.format(asset_fulltext_name, asset_filename)
diff --git a/repos/system_upgrade/common/libraries/gpg.py b/repos/system_upgrade/common/libraries/gpg.py
index c9c3f1fc..96907be0 100644
--- a/repos/system_upgrade/common/libraries/gpg.py
+++ b/repos/system_upgrade/common/libraries/gpg.py
@@ -122,12 +122,15 @@ def get_path_to_gpg_certs():
if target_product_type == 'beta':
certs_dir = '{}beta'.format(target_major_version)
distro = api.current_actor().configuration.os_release.release_id
- return os.path.join(
- api.get_common_folder_path('distro'),
- distro,
- GPG_CERTS_FOLDER,
- certs_dir
- )
+ return [
+ "/etc/leapp/files/vendors.d/rpm-gpg/",
+ os.path.join(
+ api.get_common_folder_path('distro'),
+ distro,
+ GPG_CERTS_FOLDER,
+ certs_dir
+ )
+ ]
def is_nogpgcheck_set():
diff --git a/repos/system_upgrade/common/libraries/repomaputils.py b/repos/system_upgrade/common/libraries/repomaputils.py
new file mode 100644
index 00000000..39b7d662
--- /dev/null
+++ b/repos/system_upgrade/common/libraries/repomaputils.py
@@ -0,0 +1,141 @@
+from collections import defaultdict
+from leapp.models import PESIDRepositoryEntry, RepoMapEntry, RepositoriesMapping
+
+class RepoMapData(object):
+ VERSION_FORMAT = '1.3.0'
+
+ def __init__(self):
+ self.repositories = []
+ self.mapping = {}
+
+ def add_repository(self, data, pesid):
+ """
+ Add new PESIDRepositoryEntry with given pesid from the provided dictionary.
+
+ :param data: A dict containing the data of the added repository. The dictionary structure corresponds
+ to the repositories entries in the repository mapping JSON schema.
+ :type data: Dict[str, str]
+ :param pesid: PES id of the repository family that the newly added repository belongs to.
+ :type pesid: str
+ """
+ self.repositories.append(PESIDRepositoryEntry(
+ repoid=data['repoid'],
+ channel=data['channel'],
+ rhui=data.get('rhui', ''),
+ repo_type=data['repo_type'],
+ arch=data['arch'],
+ major_version=data['major_version'],
+ pesid=pesid,
+ distro=data['distro'],
+ ))
+
+ def get_repositories(self, valid_major_versions):
+ """
+ Return the list of PESIDRepositoryEntry object matching the specified major versions.
+ """
+ return [repo for repo in self.repositories if repo.major_version in valid_major_versions]
+
+ def get_version_repoids(self, major_version):
+ """
+ Return the list of repository ID strings for repositories matching the specified major version.
+ """
+ return [repo.repoid for repo in self.repositories if repo.major_version == major_version]
+
+ def add_mapping(self, source_major_version, target_major_version, source_pesid, target_pesid):
+ """
+ Add a new mapping entry that is mapping the source pesid to the destination pesid(s),
+ relevant in an IPU from the supplied source major version to the supplied target
+ major version.
+
+ :param str source_major_version: Specifies the major version of the source system
+ for which the added mapping applies.
+ :param str target_major_version: Specifies the major version of the target system
+ for which the added mapping applies.
+ :param str source_pesid: PESID of the source repository.
+ :param Union[str|List[str]] target_pesid: A single target PESID or a list of target
+ PESIDs of the added mapping.
+ """
+ # NOTE: it could be more simple, but I prefer to be sure the input data
+ # contains just one map per source PESID.
+ key = '{}:{}'.format(source_major_version, target_major_version)
+ rmap = self.mapping.get(key, defaultdict(set))
+ self.mapping[key] = rmap
+ if isinstance(target_pesid, list):
+ rmap[source_pesid].update(target_pesid)
+ else:
+ rmap[source_pesid].add(target_pesid)
+
+ def get_mappings(self, src_major_version, dst_major_version):
+ """
+ Return the list of RepoMapEntry objects for the specified upgrade path.
+
+ IOW, the whole mapping for specified IPU.
+ """
+ key = '{}:{}'.format(src_major_version, dst_major_version)
+ rmap = self.mapping.get(key, None)
+ if not rmap:
+ return None
+ map_list = []
+ for src_pesid in sorted(rmap.keys()):
+ map_list.append(RepoMapEntry(source=src_pesid, target=sorted(rmap[src_pesid])))
+ return map_list
+
+ @staticmethod
+ def load_from_dict(data):
+ if data['version_format'] != RepoMapData.VERSION_FORMAT:
+ raise ValueError(
+ 'The obtained repomap data has unsupported version of format.'
+ ' Get {} required {}'
+ .format(data['version_format'], RepoMapData.VERSION_FORMAT)
+ )
+
+ repomap = RepoMapData()
+
+ # Load reposiories
+ existing_pesids = set()
+ for repo_family in data['repositories']:
+ existing_pesids.add(repo_family['pesid'])
+ for repo in repo_family['entries']:
+ repomap.add_repository(repo, repo_family['pesid'])
+
+ # Load mappings
+ for mapping in data['mapping']:
+ for entry in mapping['entries']:
+ if not isinstance(entry['target'], list):
+ raise ValueError(
+ 'The target field of a mapping entry is not a list: {}'
+ .format(entry)
+ )
+
+ for pesid in [entry['source']] + entry['target']:
+ if pesid not in existing_pesids:
+ raise ValueError(
+ 'The {} pesid is not related to any repository.'
+ .format(pesid)
+ )
+ repomap.add_mapping(
+ source_major_version=mapping['source_major_version'],
+ target_major_version=mapping['target_major_version'],
+ source_pesid=entry['source'],
+ target_pesid=entry['target'],
+ )
+ return repomap
+
+def combine_repomap_messages(mapping_list):
+ """
+ Combine multiple RepositoryMapping messages into one.
+ Needed because we might get more than one message if there are vendors present.
+ """
+ combined_mapping = []
+ combined_repositories = []
+ # Depending on whether there are any vendors present, we might get more than one message.
+ for msg in mapping_list:
+ combined_mapping.extend(msg.mapping)
+ combined_repositories.extend(msg.repositories)
+
+ combined_repomapping = RepositoriesMapping(
+ mapping=combined_mapping,
+ repositories=combined_repositories
+ )
+
+ return combined_repomapping
diff --git a/repos/system_upgrade/common/models/activevendorlist.py b/repos/system_upgrade/common/models/activevendorlist.py
new file mode 100644
index 00000000..de4056fb
--- /dev/null
+++ b/repos/system_upgrade/common/models/activevendorlist.py
@@ -0,0 +1,7 @@
+from leapp.models import Model, fields
+from leapp.topics import VendorTopic
+
+
+class ActiveVendorList(Model):
+ topic = VendorTopic
+ data = fields.List(fields.String())
diff --git a/repos/system_upgrade/common/models/dnfpluginpathdetected.py b/repos/system_upgrade/common/models/dnfpluginpathdetected.py
new file mode 100644
index 00000000..c5474857
--- /dev/null
+++ b/repos/system_upgrade/common/models/dnfpluginpathdetected.py
@@ -0,0 +1,14 @@
+from leapp.models import fields, Model
+from leapp.topics import SystemInfoTopic
+
+
+class DnfPluginPathDetected(Model):
+ """
+ This model contains information about whether DNF pluginpath option is configured in /etc/dnf/dnf.conf.
+ """
+ topic = SystemInfoTopic
+
+ is_pluginpath_detected = fields.Boolean()
+ """
+ True if pluginpath option is found in /etc/dnf/dnf.conf, False otherwise.
+ """
diff --git a/repos/system_upgrade/common/models/repositoriesmap.py b/repos/system_upgrade/common/models/repositoriesmap.py
index 842cd807..fc740606 100644
--- a/repos/system_upgrade/common/models/repositoriesmap.py
+++ b/repos/system_upgrade/common/models/repositoriesmap.py
@@ -96,3 +96,4 @@ class RepositoriesMapping(Model):
mapping = fields.List(fields.Model(RepoMapEntry), default=[])
repositories = fields.List(fields.Model(PESIDRepositoryEntry), default=[])
+ vendor = fields.Nullable(fields.String())
diff --git a/repos/system_upgrade/common/models/rpmtransactiontasks.py b/repos/system_upgrade/common/models/rpmtransactiontasks.py
index 7e2870d0..05d4e941 100644
--- a/repos/system_upgrade/common/models/rpmtransactiontasks.py
+++ b/repos/system_upgrade/common/models/rpmtransactiontasks.py
@@ -10,6 +10,7 @@ class RpmTransactionTasks(Model):
to_keep = fields.List(fields.String(), default=[])
to_remove = fields.List(fields.String(), default=[])
to_upgrade = fields.List(fields.String(), default=[])
+ to_reinstall = fields.List(fields.String(), default=[])
modules_to_enable = fields.List(fields.Model(Module), default=[])
modules_to_reset = fields.List(fields.Model(Module), default=[])
diff --git a/repos/system_upgrade/common/models/targetrepositories.py b/repos/system_upgrade/common/models/targetrepositories.py
index 02c6c5e5..f9fd4238 100644
--- a/repos/system_upgrade/common/models/targetrepositories.py
+++ b/repos/system_upgrade/common/models/targetrepositories.py
@@ -21,6 +21,12 @@ class CustomTargetRepository(TargetRepositoryBase):
enabled = fields.Boolean(default=True)
+class VendorCustomTargetRepositoryList(Model):
+ topic = TransactionTopic
+ vendor = fields.String()
+ repos = fields.List(fields.Model(CustomTargetRepository))
+
+
class TargetRepositories(Model):
"""
Repositories supposed to be used during the IPU process
diff --git a/repos/system_upgrade/common/models/vendorsignatures.py b/repos/system_upgrade/common/models/vendorsignatures.py
new file mode 100644
index 00000000..f456aec5
--- /dev/null
+++ b/repos/system_upgrade/common/models/vendorsignatures.py
@@ -0,0 +1,8 @@
+from leapp.models import Model, fields
+from leapp.topics import VendorTopic
+
+
+class VendorSignatures(Model):
+ topic = VendorTopic
+ vendor = fields.String()
+ sigs = fields.List(fields.String())
diff --git a/repos/system_upgrade/common/models/vendorsourcerepos.py b/repos/system_upgrade/common/models/vendorsourcerepos.py
new file mode 100644
index 00000000..b7a219b4
--- /dev/null
+++ b/repos/system_upgrade/common/models/vendorsourcerepos.py
@@ -0,0 +1,12 @@
+from leapp.models import Model, fields
+from leapp.topics import VendorTopic
+
+
+class VendorSourceRepos(Model):
+ """
+ This model contains the data on all source repositories associated with a specific vendor.
+ Its data is used to determine whether the vendor should be included into the upgrade process.
+ """
+ topic = VendorTopic
+ vendor = fields.String()
+ source_repoids = fields.List(fields.String())
diff --git a/repos/system_upgrade/common/topics/vendortopic.py b/repos/system_upgrade/common/topics/vendortopic.py
new file mode 100644
index 00000000..014b7afb
--- /dev/null
+++ b/repos/system_upgrade/common/topics/vendortopic.py
@@ -0,0 +1,5 @@
+from leapp.topics import Topic
+
+
+class VendorTopic(Topic):
+ name = 'vendor_topic'
diff --git a/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py b/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py
index c076fe6b..2455a2f6 100644
--- a/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py
+++ b/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py
@@ -14,6 +14,22 @@ from leapp.libraries.common.grub import (
from leapp.libraries.stdlib import api, CalledProcessError, run
from leapp.models import ArmWorkaroundEFIBootloaderInfo, EFIBootEntry, TargetUserSpaceInfo
+dirname = {
+ 'AlmaLinux': 'almalinux',
+ 'CentOS Linux': 'centos',
+ 'CentOS Stream': 'centos',
+ 'Oracle Linux Server': 'redhat',
+ 'Red Hat Enterprise Linux': 'redhat',
+ 'Rocky Linux': 'rocky',
+ 'Scientific Linux': 'redhat',
+}
+
+with open('/etc/system-release', 'r') as sr:
+ release_line = next(line for line in sr if 'release' in line)
+ distro = release_line.split(' release ', 1)[0]
+
+distro_dir = dirname.get(distro, 'default')
+
UPGRADE_EFI_ENTRY_LABEL = 'Leapp Upgrade'
ARM_SHIM_PACKAGE_NAME = 'shim-aa64'
@@ -21,7 +37,7 @@ ARM_GRUB_PACKAGE_NAME = 'grub2-efi-aa64'
EFI_MOUNTPOINT = '/boot/efi/'
LEAPP_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/leapp/')
-RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/redhat/')
+RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/', distro_dir)
UPGRADE_BLS_DIR = '/boot/upgrade-loader'
CONTAINER_DOWNLOAD_DIR = '/tmp_pkg_download_dir'
diff --git a/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
index 46af07c1..7558b307 100644
--- a/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
+++ b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
@@ -1,4 +1,5 @@
import errno
+import sys
import textwrap
import pytest
@@ -57,7 +58,16 @@ def test_no_conf(monkeypatch):
assert not api.produce.called
-@pytest.mark.skipif(not nmconnscanner.libnm_available, reason="NetworkManager g-ir not installed")
+@pytest.mark.skipif(
+ sys.version_info.major != 3 or sys.version_info.minor != 6,
+ # On Python > 3.6 the GLib and NM libraries apparently behave differently and
+ # the test fails. Let's skip it since the actor it's only ever run with
+ # Python3.6 (el8toel9 repo and FactsPhase)
+ reason="Only runs on Python 3.6",
+)
+@pytest.mark.skipif(
+ not nmconnscanner.libnm_available, reason="NetworkManager g-ir not installed"
+)
def test_nm_conn(monkeypatch):
"""
Check a basic keyfile
diff --git a/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py b/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py
index daa7b2ca..dd604d8b 100644
--- a/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py
+++ b/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py
@@ -5,9 +5,25 @@ from leapp.exceptions import StopActorExecutionError
from leapp.libraries.stdlib import api, CalledProcessError, run
from leapp.models import ArmWorkaroundEFIBootloaderInfo
+dirname = {
+ 'AlmaLinux': 'almalinux',
+ 'CentOS Linux': 'centos',
+ 'CentOS Stream': 'centos',
+ 'Oracle Linux Server': 'redhat',
+ 'Red Hat Enterprise Linux': 'redhat',
+ 'Rocky Linux': 'rocky',
+ 'Scientific Linux': 'redhat',
+}
+
+with open('/etc/system-release', 'r') as sr:
+ release_line = next(line for line in sr if 'release' in line)
+ distro = release_line.split(' release ', 1)[0]
+
+distro_dir = dirname.get(distro, 'default')
+
EFI_MOUNTPOINT = '/boot/efi/'
LEAPP_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/leapp/')
-RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/redhat/')
+RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/', distro_dir)
def get_workaround_efi_info():
diff --git a/repos/system_upgrade/el9toel10/actors/sssd/sssdchecks/libraries/sssdchecks.py b/repos/system_upgrade/el9toel10/actors/sssd/sssdchecks/libraries/sssdchecks.py
index 0a86fa7b..cb95026c 100644
--- a/repos/system_upgrade/el9toel10/actors/sssd/sssdchecks/libraries/sssdchecks.py
+++ b/repos/system_upgrade/el9toel10/actors/sssd/sssdchecks/libraries/sssdchecks.py
@@ -15,8 +15,8 @@ def check_config(model):
'SSSD\'s sss_ssh_knownhostsproxy tool is replaced by the more '
'reliable sss_ssh_knownhosts tool. SSH\'s configuration will be updated '
'to reflect this by updating every mention of sss_ssh_knownhostsproxy by '
- 'the corresponding mention of sss_ssh_knownhosts, even those commented out.\n'
- 'SSSD\'s ssh service will be enabled if not already done.\n'
+ 'the corresponding mention of sss_ssh_knownhosts, even those commented out. '
+ 'SSSD\'s ssh service will be enabled if not already done.\n\n'
'The following files will be updated:{}{}'.format(
FMT_LIST_SEPARATOR,
FMT_LIST_SEPARATOR.join(model.sssd_config_files + model.ssh_config_files)
diff --git a/repos/system_upgrade/el9toel10/actors/sssd/sssdfacts/libraries/sssdfacts.py b/repos/system_upgrade/el9toel10/actors/sssd/sssdfacts/libraries/sssdfacts.py
index 0ae9d93f..7d343229 100644
--- a/repos/system_upgrade/el9toel10/actors/sssd/sssdfacts/libraries/sssdfacts.py
+++ b/repos/system_upgrade/el9toel10/actors/sssd/sssdfacts/libraries/sssdfacts.py
@@ -19,7 +19,10 @@ def _does_file_contain_expression(file_path, expression):
)
return False
except OSError as e:
- raise StopActorExecutionError('Could not open file ' + file_path, details={'details': str(e)})
+ raise StopActorExecutionError(
+ 'Could not open configuration file',
+ details={'details': 'Coudn\'t open {} file with error: {}.'.format(file_path, str(e))}
+ )
def _look_for_files(expression: str, path_list: list[str]) -> list[str]:
diff --git a/repos/system_upgrade/el9toel10/actors/sssd/sssdupdate/libraries/sssdupdate.py b/repos/system_upgrade/el9toel10/actors/sssd/sssdupdate/libraries/sssdupdate.py
index 6d745ead..5b96bcc6 100644
--- a/repos/system_upgrade/el9toel10/actors/sssd/sssdupdate/libraries/sssdupdate.py
+++ b/repos/system_upgrade/el9toel10/actors/sssd/sssdupdate/libraries/sssdupdate.py
@@ -1,7 +1,7 @@
import os
import re
-from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.stdlib import api
def _process_knownhosts(line: str) -> str:
@@ -29,30 +29,26 @@ def _process_enable_svc(line: str) -> str:
def _update_file(filename, process_function):
- newname = filename + '.new'
- oldname = filename + '.old'
+ newname = '{}.leappnew'.format(filename)
+ oldname = '{}.leappsave'.format(filename)
try:
- with open(filename, 'r') as input_file, open(newname, 'x') as output_file:
+ with open(filename, 'r') as input_file, open(newname, 'w') as output_file:
istat = os.fstat(input_file.fileno())
os.fchmod(output_file.fileno(), istat.st_mode)
for line in input_file:
try:
output_file.write(process_function(line))
except OSError as e:
- raise StopActorExecutionError('Failed to write to {}'.format(newname),
- details={'details': str(e)})
+ api.current_logger().warning('Failed to write to {}'.format(newname), details={'details': str(e)})
- except FileExistsError as e:
- raise StopActorExecutionError('Temporary file already exists: {}'.format(newname),
- details={'details': str(e)})
except OSError as e:
try:
os.unlink(newname)
except FileNotFoundError:
pass
- raise StopActorExecutionError('Failed to access the required files', details={'details': str(e)})
+ api.current_logger().error('Failed to access the required files', details={'details': str(e)})
- # Let's make sure the old configuration is preserverd if something goes wrong
+ # Let's make sure the old configuration is preserved if something goes wrong
os.replace(filename, oldname)
os.replace(newname, filename)
os.unlink(oldname)
diff --git a/utils/container-builds/Containerfile.centos7 b/utils/container-builds/Containerfile.centos7
deleted file mode 100644
index af00eddb..00000000
--- a/utils/container-builds/Containerfile.centos7
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM centos:7
-
-VOLUME /repo
-
-# mirror.centos.org is dead, comment out mirrorlist and set baseurl to vault.centos.org
-RUN sed -i s/mirror.centos.org/vault.centos.org/ /etc/yum.repos.d/CentOS-*.repo
-RUN sed -i s/^#\s*baseurl=http/baseurl=http/ /etc/yum.repos.d/CentOS-*.repo
-RUN sed -i s/^mirrorlist=http/#mirrorlist=http/ /etc/yum.repos.d/CentOS-*.repo
-
-RUN yum update -y && \
- yum install -y rpm-build python-devel make git
-
-WORKDIR /repo
-ENV DIST_VERSION 7
-ENTRYPOINT make _build_local
diff --git a/utils/container-builds/Containerfile.ubi8 b/utils/container-builds/Containerfile.el8
similarity index 100%
rename from utils/container-builds/Containerfile.ubi8
rename to utils/container-builds/Containerfile.el8
diff --git a/utils/container-builds/Containerfile.ubi9 b/utils/container-builds/Containerfile.el9
similarity index 100%
rename from utils/container-builds/Containerfile.ubi9
rename to utils/container-builds/Containerfile.el9
diff --git a/utils/container-tests/Containerfile.el8 b/utils/container-tests/Containerfile.el8
new file mode 100644
index 00000000..b92e8742
--- /dev/null
+++ b/utils/container-tests/Containerfile.el8
@@ -0,0 +1,24 @@
+FROM centos:8
+
+RUN sed -i s/mirror.centos.org/vault.centos.org/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^#\s*baseurl=http/baseurl=http/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^mirrorlist=http/#mirrorlist=http/ /etc/yum.repos.d/CentOS-*.repo
+
+VOLUME /repo
+
+RUN dnf update -y && \
+ dnf install -y git make rsync \
+ python3-virtualenv python3-setuptools python3-pip \
+ python3-gobject NetworkManager-libnm
+
+ENV PYTHON_VENV python3.6
+
+COPY . /repocopy
+
+WORKDIR /repocopy
+
+RUN rm -rf tut*
+
+RUN make clean && make install-deps-fedora
+
+WORKDIR /
diff --git a/utils/container-tests/Containerfile.rhel9 b/utils/container-tests/Containerfile.el9
similarity index 100%
rename from utils/container-tests/Containerfile.rhel9
rename to utils/container-tests/Containerfile.el9
diff --git a/utils/container-tests/Containerfile.f34 b/utils/container-tests/Containerfile.f42
similarity index 84%
rename from utils/container-tests/Containerfile.f34
rename to utils/container-tests/Containerfile.f42
index a9346635..46f0f63a 100644
--- a/utils/container-tests/Containerfile.f34
+++ b/utils/container-tests/Containerfile.f42
@@ -1,11 +1,11 @@
-FROM fedora:34
+FROM fedora:42
VOLUME /repo
RUN dnf update -y && \
dnf install -y findutils make rsync python3-gobject-base NetworkManager-libnm
-ENV PYTHON_VENV python3.9
+ENV PYTHON_VENV python3.13
COPY . /repocopy
diff --git a/utils/container-tests/Containerfile.rhel7 b/utils/container-tests/Containerfile.rhel7
deleted file mode 100644
index 0a0c384a..00000000
--- a/utils/container-tests/Containerfile.rhel7
+++ /dev/null
@@ -1,24 +0,0 @@
-FROM registry.access.redhat.com/ubi7/ubi:7.9
-
-VOLUME /repo
-
-RUN yum update -y && \
- yum install -y python-virtualenv python-setuptools make git rsync
-
-# see ./Containerfile.ubi7 for explanation
-RUN yum -y install python27-python-pip && \
- scl enable python27 -- pip install -U --target /usr/lib/python2.7/site-packages/ pip==20.3.0 && \
- python -m pip install --ignore-installed pip==20.3.4 ipaddress virtualenv
-
-ENV PYTHON_VENV python2.7
-
-COPY . /repocopy
-
-WORKDIR /repocopy
-
-RUN rm -rf tut*
-
-RUN make clean && make install-deps
-
-WORKDIR /
-
diff --git a/utils/container-tests/Containerfile.rhel8 b/utils/container-tests/Containerfile.rhel8
deleted file mode 100644
index 6f21839b..00000000
--- a/utils/container-tests/Containerfile.rhel8
+++ /dev/null
@@ -1,18 +0,0 @@
-FROM registry.access.redhat.com/ubi8/ubi:latest
-
-VOLUME /repo
-
-RUN dnf update -y && \
- dnf install -y python3-virtualenv python3-setuptools python3-pip make git rsync
-
-ENV PYTHON_VENV python3.6
-
-COPY . /repocopy
-
-WORKDIR /repocopy
-
-RUN rm -rf tut*
-
-RUN make clean && make install-deps-fedora
-
-WORKDIR /
diff --git a/utils/container-tests/Containerfile.ubi7 b/utils/container-tests/Containerfile.ubi7
deleted file mode 100644
index 44625a76..00000000
--- a/utils/container-tests/Containerfile.ubi7
+++ /dev/null
@@ -1,25 +0,0 @@
-FROM registry.access.redhat.com/ubi7/ubi:7.9
-
-VOLUME /payload
-
-RUN yum update -y && \
- yum install python-virtualenv python-setuptools make git -y
-
-# NOTE(ivasilev,pstodulk) We need at least pip v10.0.1, however centos:7
-# provides just v8.1.2 (via EPEL). So do this: install epel repos -> install
-# python2-pip -> use pip to update to specific pip version we require. period
-# NOTE(pstodulk) I see we take care about pip for py3 inside the Makefile,
-# however I am afraid of additional possible troubles in future because of the
-# archaic pip3 version (v9.0.1). As we want to run tests for Py2 and Py3 in ci
-# always anyway, let's put py3 installation here as well..
-# Dropped Python3 as it is now added in its own container on RHEL8
-
-# This is some trickery: We install python27-python-pip from the scl, use the scl to bootstrap the python
-# module of pip version 20.3.0 and then make it update to 20.3.4 resulting the 'pip' command to be available.
-# The --target approach doesn't add it, but at least we now have pip 20.3.4 installed ;-)
-RUN yum -y install python27-python-pip && \
- scl enable python27 -- pip install -U --target /usr/lib/python2.7/site-packages/ pip==20.3.0 && \
- python -m pip install --ignore-installed pip==20.3.4 ipaddress virtualenv
-
-WORKDIR /payload
-ENTRYPOINT make install-deps && make test_no_lint
diff --git a/utils/container-tests/Containerfile.ubi7-lint b/utils/container-tests/Containerfile.ubi7-lint
deleted file mode 100644
index ed548985..00000000
--- a/utils/container-tests/Containerfile.ubi7-lint
+++ /dev/null
@@ -1,25 +0,0 @@
-FROM registry.access.redhat.com/ubi7/ubi:7.9
-
-VOLUME /payload
-
-RUN yum update -y && \
- yum install python-virtualenv python-setuptools make git -y
-
-# NOTE(ivasilev,pstodulk) We need at least pip v10.0.1, however centos:7
-# provides just v8.1.2 (via EPEL). So do this: install epel repos -> install
-# python2-pip -> use pip to update to specific pip version we require. period
-# NOTE(pstodulk) I see we take care about pip for py3 inside the Makefile,
-# however I am afraid of additional possible troubles in future because of the
-# archaic pip3 version (v9.0.1). As we want to run tests for Py2 and Py3 in ci
-# always anyway, let's put py3 installation here as well..
-# Dropped Python3 as it is now added in its own container on RHEL8
-
-# This is some trickery: We install python27-python-pip from the scl, use the scl to bootstrap the python
-# module of pip version 20.3.0 and then make it update to 20.3.4 resulting the 'pip' command to be available.
-# The --target approach doesn't add it, but at least we now have pip 20.3.4 installed ;-)
-RUN yum -y install python27-python-pip && \
- scl enable python27 -- pip install -U --target /usr/lib/python2.7/site-packages/ pip==20.3.0 && \
- python -m pip install --ignore-installed pip==20.3.4 ipaddress virtualenv
-
-WORKDIR /payload
-ENTRYPOINT make install-deps && make lint
diff --git a/utils/container-tests/Containerfile.ubi8 b/utils/container-tests/Containerfile.ubi8
deleted file mode 100644
index 4da60c18..00000000
--- a/utils/container-tests/Containerfile.ubi8
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM registry.access.redhat.com/ubi8/ubi:latest
-
-VOLUME /payload
-
-RUN dnf update -y && \
- dnf install python3-virtualenv python3-setuptools python3-pip make git -y
-
-WORKDIR /payload
-ENTRYPOINT make install-deps && make test_no_lint
diff --git a/utils/container-tests/ci/Containerfile.el8 b/utils/container-tests/ci/Containerfile.el8
new file mode 100644
index 00000000..4a19092e
--- /dev/null
+++ b/utils/container-tests/ci/Containerfile.el8
@@ -0,0 +1,15 @@
+FROM centos:8
+
+RUN sed -i s/mirror.centos.org/vault.centos.org/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^#\s*baseurl=http/baseurl=http/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^mirrorlist=http/#mirrorlist=http/ /etc/yum.repos.d/CentOS-*.repo
+
+VOLUME /payload
+
+RUN dnf update -y && \
+ dnf install -y make git \
+ python3-virtualenv python3-setuptools python3-pip \
+ python3-gobject NetworkManager-libnm
+
+WORKDIR /payload
+ENTRYPOINT make install-deps && make test_no_lint
diff --git a/utils/container-tests/Containerfile.ubi8-lint b/utils/container-tests/ci/Containerfile.el8-lint
similarity index 100%
rename from utils/container-tests/Containerfile.ubi8-lint
rename to utils/container-tests/ci/Containerfile.el8-lint
diff --git a/utils/container-tests/Containerfile.ubi9 b/utils/container-tests/ci/Containerfile.el9
similarity index 100%
rename from utils/container-tests/Containerfile.ubi9
rename to utils/container-tests/ci/Containerfile.el9
diff --git a/utils/container-tests/Containerfile.ubi9-lint b/utils/container-tests/ci/Containerfile.el9-lint
similarity index 100%
rename from utils/container-tests/Containerfile.ubi9-lint
rename to utils/container-tests/ci/Containerfile.el9-lint