forked from rpms/leapp-repository
9017 lines
382 KiB
Diff
9017 lines
382 KiB
Diff
diff --git a/.github/workflows/pr-welcome-msg.yml b/.github/workflows/pr-welcome-msg.yml
|
||
index f056fb79..4c12ab2a 100644
|
||
--- a/.github/workflows/pr-welcome-msg.yml
|
||
+++ b/.github/workflows/pr-welcome-msg.yml
|
||
@@ -14,7 +14,7 @@ jobs:
|
||
runs-on: ubuntu-latest
|
||
steps:
|
||
- name: Create comment
|
||
- uses: peter-evans/create-or-update-comment@v4
|
||
+ uses: peter-evans/create-or-update-comment@v5
|
||
with:
|
||
issue-number: ${{ github.event.pull_request.number }}
|
||
body: |
|
||
diff --git a/.gitignore b/.gitignore
|
||
index 0bb92d3d..a04c7ded 100644
|
||
--- a/.gitignore
|
||
+++ b/.gitignore
|
||
@@ -115,6 +115,7 @@ ENV/
|
||
|
||
# visual studio code configuration
|
||
.vscode
|
||
+*.code-workspace
|
||
|
||
# pycharm
|
||
.idea
|
||
diff --git a/.packit.yaml b/.packit.yaml
|
||
index 0c3f682a..e158c7e4 100644
|
||
--- a/.packit.yaml
|
||
+++ b/.packit.yaml
|
||
@@ -110,7 +110,7 @@ jobs:
|
||
job: tests
|
||
trigger: ignore
|
||
fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests"
|
||
- fmf_ref: "main"
|
||
+ fmf_ref: "next"
|
||
use_internal_tf: True
|
||
labels:
|
||
- sanity
|
||
@@ -447,7 +447,7 @@ jobs:
|
||
job: tests
|
||
trigger: ignore
|
||
fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests"
|
||
- fmf_ref: "main"
|
||
+ fmf_ref: "next"
|
||
use_internal_tf: True
|
||
labels:
|
||
- sanity
|
||
@@ -460,6 +460,15 @@ jobs:
|
||
tmt:
|
||
plan_filter: 'tag:9to10'
|
||
environments:
|
||
+ - &tmt-env-settings-centos9to10
|
||
+ tmt:
|
||
+ context: &tmt-context-centos9to10
|
||
+ distro: "centos-9"
|
||
+ distro_target: "centos-10"
|
||
+ settings:
|
||
+ provisioning:
|
||
+ tags:
|
||
+ BusinessUnit: sst_upgrades@leapp_upstream_test
|
||
- &tmt-env-settings-96to100
|
||
tmt:
|
||
context: &tmt-context-96to100
|
||
@@ -478,6 +487,15 @@ jobs:
|
||
provisioning:
|
||
tags:
|
||
BusinessUnit: sst_upgrades@leapp_upstream_test
|
||
+ - &tmt-env-settings-centos9torhel101
|
||
+ tmt:
|
||
+ context: &tmt-context-centos9torhel101
|
||
+ distro: "centos-9"
|
||
+ distro_target: "rhel-10.1"
|
||
+ settings:
|
||
+ provisioning:
|
||
+ tags:
|
||
+ BusinessUnit: sst_upgrades@leapp_upstream_test
|
||
- &tmt-env-settings-98to102
|
||
tmt:
|
||
context: &tmt-context-98to102
|
||
@@ -487,6 +505,15 @@ jobs:
|
||
provisioning:
|
||
tags:
|
||
BusinessUnit: sst_upgrades@leapp_upstream_test
|
||
+ - &tmt-env-settings-centos9torhel102
|
||
+ tmt:
|
||
+ context: &tmt-context-centos9torhel102
|
||
+ distro: "centos-9"
|
||
+ distro_target: "rhel-10.2"
|
||
+ settings:
|
||
+ provisioning:
|
||
+ tags:
|
||
+ BusinessUnit: sst_upgrades@leapp_upstream_test
|
||
|
||
- &sanity-abstract-9to10-aws
|
||
<<: *sanity-abstract-9to10
|
||
@@ -705,3 +732,79 @@ jobs:
|
||
env:
|
||
<<: *env-98to102
|
||
|
||
+# ###################################################################### #
|
||
+# ########################## CentOS Stream ############################# #
|
||
+# ###################################################################### #
|
||
+
|
||
+# ###################################################################### #
|
||
+# ###################### CentOS Stream > RHEL ########################## #
|
||
+# ###################################################################### #
|
||
+
|
||
+# ###################################################################### #
|
||
+# ############################ 9 > 10.1 ################################ #
|
||
+# ###################################################################### #
|
||
+
|
||
+- &sanity-centos9torhel101
|
||
+ <<: *sanity-abstract-9to10
|
||
+ trigger: pull_request
|
||
+ identifier: sanity-CentOS9toRHEL10.1
|
||
+ targets:
|
||
+ epel-9-x86_64:
|
||
+ distros: [CentOS-Stream-9]
|
||
+ tf_extra_params:
|
||
+ test:
|
||
+ tmt:
|
||
+ plan_filter: 'tag:9to10 & tag:tier0 & enabled:true & tag:-rhsm'
|
||
+ environments:
|
||
+ - *tmt-env-settings-centos9torhel101
|
||
+ env: &env-centos9to101
|
||
+ SOURCE_RELEASE: "9"
|
||
+ TARGET_RELEASE: "10.1"
|
||
+
|
||
+# ###################################################################### #
|
||
+# ############################ 9 > 10.2 ################################ #
|
||
+# ###################################################################### #
|
||
+
|
||
+- &sanity-centos9torhel102
|
||
+ <<: *sanity-abstract-9to10
|
||
+ trigger: pull_request
|
||
+ identifier: sanity-CentOS9toRHEL10.2
|
||
+ targets:
|
||
+ epel-9-x86_64:
|
||
+ distros: [CentOS-Stream-9]
|
||
+ tf_extra_params:
|
||
+ test:
|
||
+ tmt:
|
||
+ plan_filter: 'tag:9to10 & tag:tier0 & enabled:true & tag:-rhsm'
|
||
+ name:
|
||
+ environments:
|
||
+ - *tmt-env-settings-centos9torhel102
|
||
+ env: &env-centos9torhel102
|
||
+ SOURCE_RELEASE: "9"
|
||
+ TARGET_RELEASE: "10.2"
|
||
+
|
||
+# ###################################################################### #
|
||
+# ################## CentOS Stream > CentOS Stream ##################### #
|
||
+# ###################################################################### #
|
||
+
|
||
+# ###################################################################### #
|
||
+# ############################## 9 > 10 ################################ #
|
||
+# ###################################################################### #
|
||
+
|
||
+- &sanity-centos-9to10
|
||
+ <<: *sanity-abstract-9to10
|
||
+ trigger: pull_request
|
||
+ identifier: sanity-CentOS9to10
|
||
+ targets:
|
||
+ epel-9-x86_64:
|
||
+ distros: [CentOS-Stream-9]
|
||
+ tf_extra_params:
|
||
+ test:
|
||
+ tmt:
|
||
+ plan_filter: 'tag:9to10 & tag:tier0 & enabled:true & tag:-rhsm'
|
||
+ environments:
|
||
+ - *tmt-env-settings-centos9to10
|
||
+ env: &env-centos9to10
|
||
+ SOURCE_RELEASE: "9"
|
||
+ TARGET_RELEASE: "10"
|
||
+ TARGET_OS: "centos"
|
||
diff --git a/ci/.gitignore b/ci/.gitignore
|
||
new file mode 100644
|
||
index 00000000..e6f97f0f
|
||
--- /dev/null
|
||
+++ b/ci/.gitignore
|
||
@@ -0,0 +1 @@
|
||
+**/.vagrant
|
||
diff --git a/ci/ansible/ansible.cfg b/ci/ansible/ansible.cfg
|
||
new file mode 100644
|
||
index 00000000..d5c13036
|
||
--- /dev/null
|
||
+++ b/ci/ansible/ansible.cfg
|
||
@@ -0,0 +1,4 @@
|
||
+[defaults]
|
||
+callbacks_enabled=ansible.posix.profile_tasks
|
||
+stdout_callback=community.general.yaml
|
||
+pipelining=True
|
||
diff --git a/ci/ansible/docker-ce.yaml b/ci/ansible/docker-ce.yaml
|
||
new file mode 100644
|
||
index 00000000..bba5f3df
|
||
--- /dev/null
|
||
+++ b/ci/ansible/docker-ce.yaml
|
||
@@ -0,0 +1,6 @@
|
||
+---
|
||
+- name: Docker CE configuration
|
||
+ hosts: all
|
||
+ become: yes
|
||
+ roles:
|
||
+ - docker-ce
|
||
diff --git a/ci/ansible/minimal.yaml b/ci/ansible/minimal.yaml
|
||
new file mode 100644
|
||
index 00000000..517cc81b
|
||
--- /dev/null
|
||
+++ b/ci/ansible/minimal.yaml
|
||
@@ -0,0 +1,6 @@
|
||
+---
|
||
+- name: Minimal configuration
|
||
+ hosts: all
|
||
+ become: yes
|
||
+ roles:
|
||
+ - minimal
|
||
diff --git a/ci/ansible/requirements.yaml b/ci/ansible/requirements.yaml
|
||
new file mode 100644
|
||
index 00000000..13ca0224
|
||
--- /dev/null
|
||
+++ b/ci/ansible/requirements.yaml
|
||
@@ -0,0 +1,3 @@
|
||
+collections:
|
||
+ - name: community.general
|
||
+ - name: ansible.posix
|
||
diff --git a/ci/ansible/roles/docker-ce/README.md b/ci/ansible/roles/docker-ce/README.md
|
||
new file mode 100644
|
||
index 00000000..860444b1
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/README.md
|
||
@@ -0,0 +1,43 @@
|
||
+Docker CE Install and configuration
|
||
+=========
|
||
+
|
||
+Install latest version of Docker CE Engine form upstream repository. Start and enable services after installation.
|
||
+
|
||
+Requirements
|
||
+------------
|
||
+
|
||
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
|
||
+
|
||
+Role Variables
|
||
+--------------
|
||
+
|
||
+`docker_ce_repo_checksum` in defaults/main.yaml. SHA512 Checksum of the docker-ce.repo file.
|
||
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
|
||
+
|
||
+Dependencies
|
||
+------------
|
||
+
|
||
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
|
||
+
|
||
+Example Playbook
|
||
+----------------
|
||
+
|
||
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
|
||
+
|
||
+ - hosts: all
|
||
+ become: yes
|
||
+ roles:
|
||
+ - role: docker
|
||
+ vars:
|
||
+ docker_ce_repo_checksum: sha512:XXXX # You can provide the new checksum if the default one not actual
|
||
+
|
||
+
|
||
+License
|
||
+-------
|
||
+
|
||
+GPL-3.0-or-later
|
||
+
|
||
+Author Information
|
||
+------------------
|
||
+
|
||
+AlmaLinux OS Foundation
|
||
diff --git a/ci/ansible/roles/docker-ce/defaults/main.yaml b/ci/ansible/roles/docker-ce/defaults/main.yaml
|
||
new file mode 100644
|
||
index 00000000..d0fd0c09
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/defaults/main.yaml
|
||
@@ -0,0 +1,3 @@
|
||
+---
|
||
+# defaults file for docker-ce
|
||
+docker_ce_repo_checksum: sha512:1de0b99cbb427e974144f226451711dc491caef6b1256cb599ff307a687ba2d7dd959a016d4e4cfdd4acbd83423ba1f78fa89db61bab35351e35f1152aedaf5c
|
||
diff --git a/ci/ansible/roles/docker-ce/handlers/main.yaml b/ci/ansible/roles/docker-ce/handlers/main.yaml
|
||
new file mode 100644
|
||
index 00000000..a7236219
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/handlers/main.yaml
|
||
@@ -0,0 +1,2 @@
|
||
+---
|
||
+# handlers file for docker-ce
|
||
diff --git a/ci/ansible/roles/docker-ce/meta/main.yaml b/ci/ansible/roles/docker-ce/meta/main.yaml
|
||
new file mode 100644
|
||
index 00000000..aa67ded8
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/meta/main.yaml
|
||
@@ -0,0 +1,25 @@
|
||
+galaxy_info:
|
||
+ author: AlmaLinux OS Community
|
||
+ description: Install and configure Docker CE Engine
|
||
+ company: AlmaLinux OS Foundation
|
||
+
|
||
+ license: GPL-3.0-or-later
|
||
+
|
||
+ min_ansible_version: 2.11
|
||
+
|
||
+ platforms:
|
||
+ - name: EL
|
||
+ versions:
|
||
+ - 7
|
||
+ - 8
|
||
+ - 9
|
||
+
|
||
+ galaxy_tags:
|
||
+ - docker
|
||
+ - el7
|
||
+ - el8
|
||
+ - el9
|
||
+ - almalinux
|
||
+
|
||
+dependencies:
|
||
+ - minimal
|
||
diff --git a/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml b/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml
|
||
new file mode 100644
|
||
index 00000000..320477af
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml
|
||
@@ -0,0 +1,11 @@
|
||
+---
|
||
+# Install Docker
|
||
+- name: Install Docker CE Stable
|
||
+ ansible.builtin.yum:
|
||
+ name:
|
||
+ - docker-ce
|
||
+ - docker-ce-cli
|
||
+ - containerd.io
|
||
+ - docker-compose-plugin
|
||
+ update_cache: yes
|
||
+ state: present
|
||
diff --git a/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml b/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml
|
||
new file mode 100644
|
||
index 00000000..d44a202a
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml
|
||
@@ -0,0 +1,11 @@
|
||
+---
|
||
+# Install Docker
|
||
+- name: Install Docker CE Stable
|
||
+ ansible.builtin.dnf:
|
||
+ name:
|
||
+ - docker-ce
|
||
+ - docker-ce-cli
|
||
+ - containerd.io
|
||
+ - docker-compose-plugin
|
||
+ update_cache: yes
|
||
+ state: present
|
||
diff --git a/ci/ansible/roles/docker-ce/tasks/main.yaml b/ci/ansible/roles/docker-ce/tasks/main.yaml
|
||
new file mode 100644
|
||
index 00000000..989af23f
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/tasks/main.yaml
|
||
@@ -0,0 +1,38 @@
|
||
+---
|
||
+# tasks file for docker-ce
|
||
+- name: Add Docker CE repository
|
||
+ ansible.builtin.get_url:
|
||
+ url: https://download.docker.com/linux/centos/docker-ce.repo
|
||
+ dest: /etc/yum.repos.d/docker-ce.repo
|
||
+ checksum: "{{ docker_ce_repo_checksum }}"
|
||
+ owner: root
|
||
+ group: root
|
||
+ mode: '0644'
|
||
+ seuser: system_u
|
||
+ serole: object_r
|
||
+ setype: system_conf_t
|
||
+
|
||
+- name: Remove older versions of Docker on EL7
|
||
+ ansible.builtin.include_tasks: remove_old_docker_el7.yaml
|
||
+ when: ansible_facts['distribution_major_version'] == '7'
|
||
+
|
||
+- name: Remove older versions of Docker on >= EL8
|
||
+ ansible.builtin.include_tasks: remove_old_docker_el8.yaml
|
||
+ when: ansible_facts['distribution_major_version'] == '8'
|
||
+
|
||
+- name: Install Docker CE Stable on EL7
|
||
+ ansible.builtin.include_tasks: install_docker_el7.yaml
|
||
+ when: ansible_facts['distribution_major_version'] == '7'
|
||
+
|
||
+- name: Install Docker CE Stable on >= EL8
|
||
+ ansible.builtin.include_tasks: install_docker_el8.yaml
|
||
+ when: ansible_facts['distribution_major_version'] == '8'
|
||
+
|
||
+- name: Start and Enable Docker services
|
||
+ ansible.builtin.systemd:
|
||
+ name: "{{ item }}"
|
||
+ enabled: yes
|
||
+ state: started
|
||
+ loop:
|
||
+ - docker.service
|
||
+ - containerd.service
|
||
diff --git a/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml
|
||
new file mode 100644
|
||
index 00000000..db9e0960
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml
|
||
@@ -0,0 +1,15 @@
|
||
+---
|
||
+# Remove older versions of Docker
|
||
+- name: Uninstall older versions of Docker
|
||
+ ansible.builtin.yum:
|
||
+ name:
|
||
+ - docker
|
||
+ - docker-client
|
||
+ - docker-client-latest
|
||
+ - docker-common
|
||
+ - docker-latest
|
||
+ - docker-latest-logrotate
|
||
+ - docker-logrotate
|
||
+ - docker-engine
|
||
+ autoremove: yes
|
||
+ state: absent
|
||
diff --git a/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml
|
||
new file mode 100644
|
||
index 00000000..88f860cf
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml
|
||
@@ -0,0 +1,15 @@
|
||
+---
|
||
+# Remove older versions of Docker
|
||
+- name: Uninstall older versions of Docker
|
||
+ ansible.builtin.dnf:
|
||
+ name:
|
||
+ - docker
|
||
+ - docker-client
|
||
+ - docker-client-latest
|
||
+ - docker-common
|
||
+ - docker-latest
|
||
+ - docker-latest-logrotate
|
||
+ - docker-logrotate
|
||
+ - docker-engine
|
||
+ autoremove: yes
|
||
+ state: absent
|
||
diff --git a/ci/ansible/roles/docker-ce/tests/inventory b/ci/ansible/roles/docker-ce/tests/inventory
|
||
new file mode 100644
|
||
index 00000000..878877b0
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/tests/inventory
|
||
@@ -0,0 +1,2 @@
|
||
+localhost
|
||
+
|
||
diff --git a/ci/ansible/roles/docker-ce/tests/test.yaml b/ci/ansible/roles/docker-ce/tests/test.yaml
|
||
new file mode 100644
|
||
index 00000000..789ba96e
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/tests/test.yaml
|
||
@@ -0,0 +1,5 @@
|
||
+---
|
||
+- hosts: localhost
|
||
+ remote_user: root
|
||
+ roles:
|
||
+ - docker-ce
|
||
diff --git a/ci/ansible/roles/docker-ce/vars/main.yaml b/ci/ansible/roles/docker-ce/vars/main.yaml
|
||
new file mode 100644
|
||
index 00000000..7ff8a18f
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/docker-ce/vars/main.yaml
|
||
@@ -0,0 +1,2 @@
|
||
+---
|
||
+# vars file for docker-ce
|
||
diff --git a/ci/ansible/roles/minimal/README.md b/ci/ansible/roles/minimal/README.md
|
||
new file mode 100644
|
||
index 00000000..225dd44b
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/README.md
|
||
@@ -0,0 +1,38 @@
|
||
+Role Name
|
||
+=========
|
||
+
|
||
+A brief description of the role goes here.
|
||
+
|
||
+Requirements
|
||
+------------
|
||
+
|
||
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
|
||
+
|
||
+Role Variables
|
||
+--------------
|
||
+
|
||
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
|
||
+
|
||
+Dependencies
|
||
+------------
|
||
+
|
||
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
|
||
+
|
||
+Example Playbook
|
||
+----------------
|
||
+
|
||
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
|
||
+
|
||
+ - hosts: servers
|
||
+ roles:
|
||
+ - { role: username.rolename, x: 42 }
|
||
+
|
||
+License
|
||
+-------
|
||
+
|
||
+BSD
|
||
+
|
||
+Author Information
|
||
+------------------
|
||
+
|
||
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
||
diff --git a/ci/ansible/roles/minimal/defaults/main.yaml b/ci/ansible/roles/minimal/defaults/main.yaml
|
||
new file mode 100644
|
||
index 00000000..4a5a46cd
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/defaults/main.yaml
|
||
@@ -0,0 +1,2 @@
|
||
+---
|
||
+# defaults file for minimal
|
||
diff --git a/ci/ansible/roles/minimal/handlers/main.yaml b/ci/ansible/roles/minimal/handlers/main.yaml
|
||
new file mode 100644
|
||
index 00000000..89105fec
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/handlers/main.yaml
|
||
@@ -0,0 +1,2 @@
|
||
+---
|
||
+# handlers file for minimal
|
||
diff --git a/ci/ansible/roles/minimal/meta/main.yaml b/ci/ansible/roles/minimal/meta/main.yaml
|
||
new file mode 100644
|
||
index 00000000..ecc81ab7
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/meta/main.yaml
|
||
@@ -0,0 +1,23 @@
|
||
+galaxy_info:
|
||
+ author: AlmaLinux OS Community
|
||
+ description: Minimal configuration for ELevate
|
||
+ company: AlmaLinux OS Foundation
|
||
+
|
||
+ license: GPL-3.0-or-later
|
||
+
|
||
+ min_ansible_version: 2.11
|
||
+
|
||
+ platforms:
|
||
+ - name: EL
|
||
+ versions:
|
||
+ - 7
|
||
+ - 8
|
||
+ - 9
|
||
+
|
||
+ galaxy_tags:
|
||
+ - elevate
|
||
+ - upgrade
|
||
+ - cleanup
|
||
+ - el7
|
||
+ - el8
|
||
+ - el9
|
||
diff --git a/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml b/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml
|
||
new file mode 100644
|
||
index 00000000..1b4af7c6
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml
|
||
@@ -0,0 +1,10 @@
|
||
+---
|
||
+# Remove old kernels
|
||
+- name: Install the yum-utils
|
||
+ ansible.builtin.yum:
|
||
+ name: yum-utils
|
||
+ state: present
|
||
+ update_cache: yes
|
||
+
|
||
+- name: Remove the old kernels on EL7
|
||
+ ansible.builtin.command: package-cleanup -y --oldkernels --count=1
|
||
diff --git a/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml b/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml
|
||
new file mode 100644
|
||
index 00000000..56aeefd3
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml
|
||
@@ -0,0 +1,7 @@
|
||
+---
|
||
+# Remove old kernels
|
||
+- name: Remove old kernels on EL8
|
||
+ ansible.builtin.command: dnf -y remove --oldinstallonly
|
||
+ register: removeoldkernels
|
||
+ changed_when: removeoldkernels.rc == 0
|
||
+ failed_when: removeoldkernels.rc > 1
|
||
diff --git a/ci/ansible/roles/minimal/tasks/main.yaml b/ci/ansible/roles/minimal/tasks/main.yaml
|
||
new file mode 100644
|
||
index 00000000..8c1b35bd
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/tasks/main.yaml
|
||
@@ -0,0 +1,21 @@
|
||
+---
|
||
+# tasks file for minimal
|
||
+- name: Upgrade the packages on EL7
|
||
+ ansible.builtin.include_tasks: upgrade_el7.yaml
|
||
+ when: ansible_facts['distribution_major_version'] == '7'
|
||
+
|
||
+- name: Upgrade the packages on EL8
|
||
+ ansible.builtin.include_tasks: upgrade_el8.yaml
|
||
+ when: ansible_facts['distribution_major_version'] == '8'
|
||
+
|
||
+- name: Reboot the system
|
||
+ ansible.builtin.reboot:
|
||
+ when: upgrade_status is changed
|
||
+
|
||
+- name: Cleanup the older kernels on EL7
|
||
+ ansible.builtin.include_tasks: cleanup_el7.yaml
|
||
+ when: ansible_facts['distribution_major_version'] == '7'
|
||
+
|
||
+- name: Cleanup the older kernels on El8
|
||
+ ansible.builtin.include_tasks: cleanup_el8.yaml
|
||
+ when: ansible_facts['distribution_major_version'] == '8'
|
||
diff --git a/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml b/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml
|
||
new file mode 100644
|
||
index 00000000..7648a586
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml
|
||
@@ -0,0 +1,8 @@
|
||
+---
|
||
+# Upgrade the system
|
||
+- name: Upgrade the system
|
||
+ ansible.builtin.yum:
|
||
+ name: "*"
|
||
+ state: latest
|
||
+ update_cache: yes
|
||
+ register: upgrade_status
|
||
diff --git a/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml b/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml
|
||
new file mode 100644
|
||
index 00000000..0d4a5d2a
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml
|
||
@@ -0,0 +1,8 @@
|
||
+---
|
||
+# Upgrade the system
|
||
+- name: Upgrade the system
|
||
+ ansible.builtin.dnf:
|
||
+ name: "*"
|
||
+ state: latest
|
||
+ update_cache: yes
|
||
+ register: upgrade_status
|
||
diff --git a/ci/ansible/roles/minimal/tests/inventory b/ci/ansible/roles/minimal/tests/inventory
|
||
new file mode 100644
|
||
index 00000000..878877b0
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/tests/inventory
|
||
@@ -0,0 +1,2 @@
|
||
+localhost
|
||
+
|
||
diff --git a/ci/ansible/roles/minimal/tests/test.yaml b/ci/ansible/roles/minimal/tests/test.yaml
|
||
new file mode 100644
|
||
index 00000000..db5c4c17
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/tests/test.yaml
|
||
@@ -0,0 +1,5 @@
|
||
+---
|
||
+- hosts: localhost
|
||
+ remote_user: root
|
||
+ roles:
|
||
+ - minimal
|
||
diff --git a/ci/ansible/roles/minimal/vars/main.yaml b/ci/ansible/roles/minimal/vars/main.yaml
|
||
new file mode 100644
|
||
index 00000000..b24df080
|
||
--- /dev/null
|
||
+++ b/ci/ansible/roles/minimal/vars/main.yaml
|
||
@@ -0,0 +1,2 @@
|
||
+---
|
||
+# vars file for minimal
|
||
diff --git a/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile
|
||
new file mode 100644
|
||
index 00000000..317209ef
|
||
--- /dev/null
|
||
+++ b/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile
|
||
@@ -0,0 +1,249 @@
|
||
+RETRY = params.RETRY
|
||
+TIMEOUT = params.TIMEOUT
|
||
+
|
||
+pipeline {
|
||
+ agent {
|
||
+ label 'x86_64 && bm'
|
||
+ }
|
||
+ options {
|
||
+ timestamps()
|
||
+ parallelsAlwaysFailFast()
|
||
+ }
|
||
+ parameters {
|
||
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation')
|
||
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
|
||
+ string(name: 'LEAPP_SRC_GIT_USER', defaultValue: 'AlmaLinux', description: 'Input name of Git user of LEAPP source', trim: true)
|
||
+ string(name: 'LEAPP_SRC_GIT_BRANCH', defaultValue: 'almalinux', description: 'Input name of Git branch of LEAPP source', trim: true)
|
||
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
|
||
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
|
||
+ }
|
||
+ environment {
|
||
+ VAGRANT_NO_COLOR = '1'
|
||
+ }
|
||
+ stages {
|
||
+ stage('Prepare') {
|
||
+ steps {
|
||
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
|
||
+ label: 'Install Ansible collections'
|
||
+ sh script: 'python3.11 -m venv .venv',
|
||
+ label: 'Create Python virtual environment'
|
||
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
|
||
+ label: 'Install Testinfra'
|
||
+ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel',
|
||
+ label: 'Fetch devel version of leapp data'
|
||
+ }
|
||
+ }
|
||
+ stage('CreateSingleMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
|
||
+
|
||
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: "vagrant up $targetDistro.vmName",
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('CreateMultiMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: 'vagrant up',
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ stage('ELevationAndTest') {
|
||
+ matrix {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
|
||
+ }
|
||
+ }
|
||
+ axes {
|
||
+ axis {
|
||
+ name 'TARGET_DISTRO'
|
||
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
|
||
+ }
|
||
+ }
|
||
+ stages {
|
||
+ stage('ELevate') {
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"",
|
||
+ label: 'Add testing repo of ELevate'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"",
|
||
+ label: 'Install testing version of ELevate'
|
||
+ sh script: "vagrant upload ci/scripts/install_elevate_dev.sh install_elevate_dev.sh $targetDistro.vmName",
|
||
+ label: 'Upload installer script to VMs'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo bash install_elevate_dev.sh -u ${LEAPP_SRC_GIT_USER} -b ${LEAPP_SRC_GIT_BRANCH}\"",
|
||
+ label: 'Install development version of ELevate',
|
||
+ returnStatus: true
|
||
+ sh script: "vagrant upload leapp-data/ leapp-data/ --compress $targetDistro.vmName",
|
||
+ label: 'Upload devel branch of leapp data'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"",
|
||
+ label: 'Create directory structrue of leapp data'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files leapp-data/files/${targetDistro.leappData}/*\"",
|
||
+ label: 'Install devel version of leapp data'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files/vendors.d leapp-data/vendors.d/*\"",
|
||
+ label: 'Install devel version of leapp vendor data'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el8 /etc/leapp/files/leapp_upgrade_repositories.repo\"",
|
||
+ label: 'Configure leapp upgrade repositories for EL7toEL8'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/repomap.json.el8 /etc/leapp/files/repomap.json\"",
|
||
+ label: 'Configure leapp repository mapping for EL7toEL8'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum -y install tree && sudo tree -ha /etc/leapp\"",
|
||
+ label: 'Check if development version of leapp data installed correctly'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
|
||
+ label: 'Start pre-upgrade check',
|
||
+ returnStatus: true
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"",
|
||
+ label: 'Permit ssh as root login'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"",
|
||
+ label: 'Answer the leapp question'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
|
||
+ label: 'Start the Upgrade'
|
||
+ sh script: "vagrant reload $targetDistro.vmName",
|
||
+ label: 'Reboot to the ELevate initramfs'
|
||
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
|
||
+ label: 'Generate the ssh-config file'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Distro Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'minimal' }
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: 'rm -f conftest.py pytest.ini',
|
||
+ label: 'Delete root conftest.py file'
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
|
||
+ """,
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Docker Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/docker/test_docker_ce.py
|
||
+ """,
|
||
+ label: 'Run the docker specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ post {
|
||
+ success {
|
||
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
|
||
+ skipPublishingChecks: true
|
||
+ }
|
||
+ cleanup {
|
||
+ sh script: 'vagrant destroy -f --no-parallel -g',
|
||
+ label: 'Destroy VMs'
|
||
+ cleanWs()
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+def targetDistroSpec(distro) {
|
||
+ def spec = [:]
|
||
+
|
||
+ switch (distro) {
|
||
+ case 'almalinux-8':
|
||
+ vm = 'almalinux_8'
|
||
+ ldata = 'almalinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'centos-stream-8':
|
||
+ vm = 'centosstream_8'
|
||
+ ldata = 'centos'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'oraclelinux-8':
|
||
+ vm = 'oraclelinux_8'
|
||
+ ldata = 'oraclelinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'rocky-8':
|
||
+ vm = 'rocky_8'
|
||
+ ldata = 'rocky'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ default:
|
||
+ spec = [
|
||
+ vmName: 'unknown',
|
||
+ leappData: 'unknown'
|
||
+ ]
|
||
+ break
|
||
+ }
|
||
+ return spec
|
||
+}
|
||
diff --git a/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile
|
||
new file mode 100644
|
||
index 00000000..97f900fe
|
||
--- /dev/null
|
||
+++ b/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile
|
||
@@ -0,0 +1,230 @@
|
||
+RETRY = params.RETRY
|
||
+TIMEOUT = params.TIMEOUT
|
||
+
|
||
+pipeline {
|
||
+ agent {
|
||
+ label 'x86_64 && bm'
|
||
+ }
|
||
+ options {
|
||
+ timestamps()
|
||
+ parallelsAlwaysFailFast()
|
||
+ }
|
||
+ parameters {
|
||
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation')
|
||
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
|
||
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
|
||
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
|
||
+ }
|
||
+ environment {
|
||
+ VAGRANT_NO_COLOR = '1'
|
||
+ }
|
||
+ stages {
|
||
+ stage('Prepare') {
|
||
+ steps {
|
||
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
|
||
+ label: 'Install Ansible collections'
|
||
+ sh script: 'python3.11 -m venv .venv',
|
||
+ label: 'Create Python virtual environment'
|
||
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
|
||
+ label: 'Install Testinfra'
|
||
+ }
|
||
+ }
|
||
+ stage('CreateSingleMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
|
||
+
|
||
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: "vagrant up $targetDistro.vmName",
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('CreateMultiMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: 'vagrant up',
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ stage('ELevationAndTest') {
|
||
+ matrix {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
|
||
+ }
|
||
+ }
|
||
+ axes {
|
||
+ axis {
|
||
+ name 'TARGET_DISTRO'
|
||
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
|
||
+ }
|
||
+ }
|
||
+ stages {
|
||
+ stage('ELevate') {
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y https://repo.almalinux.org/elevate/elevate-release-latest-el7.noarch.rpm\"",
|
||
+ label: 'Install the elevate-release-latest rpm packages for EL7'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-almalinux-8-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"",
|
||
+ label: 'Add pulp repository'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"",
|
||
+ label: 'Install the leap rpm package'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"",
|
||
+ label: 'Install the LEAP migration data rpm packages'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
|
||
+ label: 'Start the Pre-Upgrade check',
|
||
+ returnStatus: true
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"",
|
||
+ label: 'Permit ssh as root login'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"",
|
||
+ label: 'Answer the LEAP question'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
|
||
+ label: 'Start the Upgrade'
|
||
+ sh script: "vagrant reload $targetDistro.vmName",
|
||
+ label: 'Reboot to the ELevate initramfs'
|
||
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
|
||
+ label: 'Generate the ssh-config file'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Distro Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'minimal' }
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: 'rm -f conftest.py pytest.ini',
|
||
+ label: 'Delete root conftest.py file'
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
|
||
+ """,
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Docker Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/docker/test_docker_ce.py
|
||
+ """,
|
||
+ label: 'Run the docker specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ post {
|
||
+ success {
|
||
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
|
||
+ skipPublishingChecks: true
|
||
+ }
|
||
+ cleanup {
|
||
+ sh script: 'vagrant destroy -f --no-parallel -g',
|
||
+ label: 'Destroy VMs'
|
||
+ cleanWs()
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+def targetDistroSpec(distro) {
|
||
+ def spec = [:]
|
||
+
|
||
+ switch (distro) {
|
||
+ case 'almalinux-8':
|
||
+ vm = 'almalinux_8'
|
||
+ ldata = 'leapp-data-almalinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'centos-stream-8':
|
||
+ vm = 'centosstream_8'
|
||
+ ldata = 'leapp-data-centos'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'oraclelinux-8':
|
||
+ vm = 'oraclelinux_8'
|
||
+ ldata = 'leapp-data-oraclelinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'rocky-8':
|
||
+ vm = 'rocky_8'
|
||
+ ldata = 'leapp-data-rocky'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ default:
|
||
+ spec = [
|
||
+ vmName: 'unknown',
|
||
+ leappData: 'unknown'
|
||
+ ]
|
||
+ break
|
||
+ }
|
||
+ return spec
|
||
+}
|
||
diff --git a/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile
|
||
new file mode 100644
|
||
index 00000000..af2fabe2
|
||
--- /dev/null
|
||
+++ b/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile
|
||
@@ -0,0 +1,253 @@
|
||
+RETRY = params.RETRY
|
||
+TIMEOUT = params.TIMEOUT
|
||
+
|
||
+pipeline {
|
||
+ agent {
|
||
+ label 'x86_64 && bm'
|
||
+ }
|
||
+ options {
|
||
+ timestamps()
|
||
+ parallelsAlwaysFailFast()
|
||
+ }
|
||
+ parameters {
|
||
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation')
|
||
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
|
||
+ string(name: 'LEAPP_SRC_GIT_USER', defaultValue: 'AlmaLinux', description: 'Input name of Git user of LEAPP source', trim: true)
|
||
+ string(name: 'LEAPP_SRC_GIT_BRANCH', defaultValue: 'almalinux', description: 'Input name of Git branch of LEAPP source', trim: true)
|
||
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
|
||
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
|
||
+ }
|
||
+ environment {
|
||
+ VAGRANT_NO_COLOR = '1'
|
||
+ }
|
||
+ stages {
|
||
+ stage('Prepare') {
|
||
+ steps {
|
||
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
|
||
+ label: 'Install Ansible collections'
|
||
+ sh script: 'python3.11 -m venv .venv',
|
||
+ label: 'Create Python virtual environment'
|
||
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
|
||
+ label: 'Install Testinfra'
|
||
+ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel',
|
||
+ label: 'Fetch devel version of leapp data'
|
||
+ }
|
||
+ }
|
||
+ stage('CreateSingleMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
|
||
+
|
||
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: "vagrant up $targetDistro.vmName",
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('CreateMultiMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: 'vagrant up',
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ stage('ELevationAndTest') {
|
||
+ matrix {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
|
||
+ }
|
||
+ }
|
||
+ axes {
|
||
+ axis {
|
||
+ name 'TARGET_DISTRO'
|
||
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
|
||
+ }
|
||
+ }
|
||
+ stages {
|
||
+ stage('ELevate') {
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"",
|
||
+ label: 'Add testing repo of ELevate'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-centos7-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"",
|
||
+ label: 'Add pulp repository'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i 's|enabled=1|enabled=1\\npriority=80|' /etc/yum.repos.d/internal-leapp.repo\"",
|
||
+ label: 'Set priority for pulp repository'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"",
|
||
+ label: 'Install testing version of ELevate'
|
||
+ sh script: "vagrant upload ci/scripts/install_elevate_dev.sh install_elevate_dev.sh $targetDistro.vmName",
|
||
+ label: 'Upload installer script to VMs'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo bash install_elevate_dev.sh -u ${LEAPP_SRC_GIT_USER} -b ${LEAPP_SRC_GIT_BRANCH}\"",
|
||
+ label: 'Install development version of ELevate',
|
||
+ returnStatus: true
|
||
+ sh script: "vagrant upload leapp-data/ leapp-data/ --compress $targetDistro.vmName",
|
||
+ label: 'Upload devel branch of leapp data'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"",
|
||
+ label: 'Create directory structrue of leapp data'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files leapp-data/files/${targetDistro.leappData}/*\"",
|
||
+ label: 'Install devel version of leapp data'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files/vendors.d leapp-data/vendors.d/*\"",
|
||
+ label: 'Install devel version of leapp vendor data'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el8 /etc/leapp/files/leapp_upgrade_repositories.repo\"",
|
||
+ label: 'Configure leapp upgrade repositories for EL7toEL8'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/repomap.json.el8 /etc/leapp/files/repomap.json\"",
|
||
+ label: 'Configure leapp repository mapping for EL7toEL8'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum -y install tree && sudo tree -ha /etc/leapp\"",
|
||
+ label: 'Check if development version of leapp data installed correctly'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
|
||
+ label: 'Start pre-upgrade check',
|
||
+ returnStatus: true
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"",
|
||
+ label: 'Permit ssh as root login'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"",
|
||
+ label: 'Answer the leapp question'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
|
||
+ label: 'Start the Upgrade'
|
||
+ sh script: "vagrant reload $targetDistro.vmName",
|
||
+ label: 'Reboot to the ELevate initramfs'
|
||
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
|
||
+ label: 'Generate the ssh-config file'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Distro Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'minimal' }
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: 'rm -f conftest.py pytest.ini',
|
||
+ label: 'Delete root conftest.py file'
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
|
||
+ """,
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Docker Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/docker/test_docker_ce.py
|
||
+ """,
|
||
+ label: 'Run the docker specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ post {
|
||
+ success {
|
||
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
|
||
+ skipPublishingChecks: true
|
||
+ }
|
||
+ cleanup {
|
||
+ sh script: 'vagrant destroy -f --no-parallel -g',
|
||
+ label: 'Destroy VMs'
|
||
+ cleanWs()
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+def targetDistroSpec(distro) {
|
||
+ def spec = [:]
|
||
+
|
||
+ switch (distro) {
|
||
+ case 'almalinux-8':
|
||
+ vm = 'almalinux_8'
|
||
+ ldata = 'almalinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'centos-stream-8':
|
||
+ vm = 'centosstream_8'
|
||
+ ldata = 'centos'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'oraclelinux-8':
|
||
+ vm = 'oraclelinux_8'
|
||
+ ldata = 'oraclelinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'rocky-8':
|
||
+ vm = 'rocky_8'
|
||
+ ldata = 'rocky'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ default:
|
||
+ spec = [
|
||
+ vmName: 'unknown',
|
||
+ leappData: 'unknown'
|
||
+ ]
|
||
+ break
|
||
+ }
|
||
+ return spec
|
||
+}
|
||
diff --git a/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile
|
||
new file mode 100644
|
||
index 00000000..ae9bdb57
|
||
--- /dev/null
|
||
+++ b/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile
|
||
@@ -0,0 +1,228 @@
|
||
+RETRY = params.RETRY
|
||
+TIMEOUT = params.TIMEOUT
|
||
+
|
||
+pipeline {
|
||
+ agent {
|
||
+ label 'x86_64 && bm'
|
||
+ }
|
||
+ options {
|
||
+ timestamps()
|
||
+ parallelsAlwaysFailFast()
|
||
+ }
|
||
+ parameters {
|
||
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation')
|
||
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
|
||
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
|
||
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
|
||
+ }
|
||
+ environment {
|
||
+ VAGRANT_NO_COLOR = '1'
|
||
+ }
|
||
+ stages {
|
||
+ stage('Prepare') {
|
||
+ steps {
|
||
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
|
||
+ label: 'Install Ansible collections'
|
||
+ sh script: 'python3.11 -m venv .venv',
|
||
+ label: 'Create Python virtual environment'
|
||
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
|
||
+ label: 'Install Testinfra'
|
||
+ }
|
||
+ }
|
||
+ stage('CreateSingleMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
|
||
+
|
||
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: "vagrant up $targetDistro.vmName",
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('CreateMultiMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: 'vagrant up',
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ stage('ELevationAndTest') {
|
||
+ matrix {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
|
||
+ }
|
||
+ }
|
||
+ axes {
|
||
+ axis {
|
||
+ name 'TARGET_DISTRO'
|
||
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
|
||
+ }
|
||
+ }
|
||
+ stages {
|
||
+ stage('ELevate') {
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y https://repo.almalinux.org/elevate/elevate-release-latest-el7.noarch.rpm\"",
|
||
+ label: 'Install the elevate-release-latest rpm packages for EL7'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"",
|
||
+ label: 'Install the leap rpm package'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"",
|
||
+ label: 'Install the LEAP migration data rpm packages'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
|
||
+ label: 'Start the Pre-Upgrade check',
|
||
+ returnStatus: true
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"",
|
||
+ label: 'Permit ssh as root login'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"",
|
||
+ label: 'Answer the LEAP question'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
|
||
+ label: 'Start the Upgrade'
|
||
+ sh script: "vagrant reload $targetDistro.vmName",
|
||
+ label: 'Reboot to the ELevate initramfs'
|
||
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
|
||
+ label: 'Generate the ssh-config file'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Distro Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'minimal' }
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: 'rm -f conftest.py pytest.ini',
|
||
+ label: 'Delete root conftest.py file'
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
|
||
+ """,
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Docker Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/docker/test_docker_ce.py
|
||
+ """,
|
||
+ label: 'Run the docker specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ post {
|
||
+ success {
|
||
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
|
||
+ skipPublishingChecks: true
|
||
+ }
|
||
+ cleanup {
|
||
+ sh script: 'vagrant destroy -f --no-parallel -g',
|
||
+ label: 'Destroy VMs'
|
||
+ cleanWs()
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+def targetDistroSpec(distro) {
|
||
+ def spec = [:]
|
||
+
|
||
+ switch (distro) {
|
||
+ case 'almalinux-8':
|
||
+ vm = 'almalinux_8'
|
||
+ ldata = 'leapp-data-almalinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'centos-stream-8':
|
||
+ vm = 'centosstream_8'
|
||
+ ldata = 'leapp-data-centos'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'oraclelinux-8':
|
||
+ vm = 'oraclelinux_8'
|
||
+ ldata = 'leapp-data-oraclelinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'rocky-8':
|
||
+ vm = 'rocky_8'
|
||
+ ldata = 'leapp-data-rocky'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ default:
|
||
+ spec = [
|
||
+ vmName: 'unknown',
|
||
+ leappData: 'unknown'
|
||
+ ]
|
||
+ break
|
||
+ }
|
||
+ return spec
|
||
+}
|
||
diff --git a/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile
|
||
new file mode 100644
|
||
index 00000000..0f37cf2e
|
||
--- /dev/null
|
||
+++ b/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile
|
||
@@ -0,0 +1,228 @@
|
||
+RETRY = params.RETRY
|
||
+TIMEOUT = params.TIMEOUT
|
||
+
|
||
+pipeline {
|
||
+ agent {
|
||
+ label 'x86_64 && bm'
|
||
+ }
|
||
+ options {
|
||
+ timestamps()
|
||
+ parallelsAlwaysFailFast()
|
||
+ }
|
||
+ parameters {
|
||
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation')
|
||
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
|
||
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
|
||
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
|
||
+ }
|
||
+ environment {
|
||
+ VAGRANT_NO_COLOR = '1'
|
||
+ }
|
||
+ stages {
|
||
+ stage('Prepare') {
|
||
+ steps {
|
||
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
|
||
+ label: 'Install Ansible collections'
|
||
+ sh script: 'python3.11 -m venv .venv',
|
||
+ label: 'Create Python virtual environment'
|
||
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
|
||
+ label: 'Install Testinfra'
|
||
+ }
|
||
+ }
|
||
+ stage('CreateSingleMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
|
||
+
|
||
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: "vagrant up $targetDistro.vmName",
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('CreateMultiMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: 'vagrant up',
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ stage('ELevationAndTest') {
|
||
+ matrix {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
|
||
+ }
|
||
+ }
|
||
+ axes {
|
||
+ axis {
|
||
+ name 'TARGET_DISTRO'
|
||
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
|
||
+ }
|
||
+ }
|
||
+ stages {
|
||
+ stage('ELevate') {
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"",
|
||
+ label: 'Install the elevate-release-latest rpm packages for EL7'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"",
|
||
+ label: 'Install the leap rpm package'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"",
|
||
+ label: 'Install the LEAP migration data rpm packages'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
|
||
+ label: 'Start the Pre-Upgrade check',
|
||
+ returnStatus: true
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"",
|
||
+ label: 'Permit ssh as root login'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"",
|
||
+ label: 'Answer the LEAP question'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
|
||
+ label: 'Start the Upgrade'
|
||
+ sh script: "vagrant reload $targetDistro.vmName",
|
||
+ label: 'Reboot to the ELevate initramfs'
|
||
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
|
||
+ label: 'Generate the ssh-config file'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Distro Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'minimal' }
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: 'rm -f conftest.py pytest.ini',
|
||
+ label: 'Delete root conftest.py file'
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
|
||
+ """,
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Docker Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/docker/test_docker_ce.py
|
||
+ """,
|
||
+ label: 'Run the docker specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ post {
|
||
+ success {
|
||
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
|
||
+ skipPublishingChecks: true
|
||
+ }
|
||
+ cleanup {
|
||
+ sh script: 'vagrant destroy -f --no-parallel -g',
|
||
+ label: 'Destroy VMs'
|
||
+ cleanWs()
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+def targetDistroSpec(distro) {
|
||
+ def spec = [:]
|
||
+
|
||
+ switch (distro) {
|
||
+ case 'almalinux-8':
|
||
+ vm = 'almalinux_8'
|
||
+ ldata = 'leapp-data-almalinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'centos-stream-8':
|
||
+ vm = 'centosstream_8'
|
||
+ ldata = 'leapp-data-centos'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'oraclelinux-8':
|
||
+ vm = 'oraclelinux_8'
|
||
+ ldata = 'leapp-data-oraclelinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'rocky-8':
|
||
+ vm = 'rocky_8'
|
||
+ ldata = 'leapp-data-rocky'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ default:
|
||
+ spec = [
|
||
+ vmName: 'unknown',
|
||
+ leappData: 'unknown'
|
||
+ ]
|
||
+ break
|
||
+ }
|
||
+ return spec
|
||
+}
|
||
diff --git a/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile
|
||
new file mode 100644
|
||
index 00000000..7eb5430b
|
||
--- /dev/null
|
||
+++ b/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile
|
||
@@ -0,0 +1,200 @@
|
||
+RETRY = params.RETRY
|
||
+TIMEOUT = params.TIMEOUT
|
||
+
|
||
+pipeline {
|
||
+ agent {
|
||
+ label params.AGENT
|
||
+ }
|
||
+ options {
|
||
+ timestamps()
|
||
+ }
|
||
+ parameters {
|
||
+ string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true)
|
||
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
|
||
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
|
||
+ string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true)
|
||
+ string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true)
|
||
+ choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation')
|
||
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation')
|
||
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
|
||
+ }
|
||
+ stages {
|
||
+ stage('Source') {
|
||
+ steps {
|
||
+ git url: REPO_URL,
|
||
+ branch: REPO_BRANCH,
|
||
+ credentialsId: 'github-almalinuxautobot'
|
||
+ }
|
||
+ }
|
||
+ stage('Prepare Build and Test enviroment') {
|
||
+ steps {
|
||
+ sh script: 'cp Vagrantfile.el8toel9 Vagrantfile',
|
||
+ label: 'Generate the el8toel9 Vagrantfile'
|
||
+ sh script: 'sudo dnf -y install python39-devel python39-wheel',
|
||
+ label: 'Install Python 3.9, PIP and Wheel'
|
||
+ sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt',
|
||
+ label: 'Install TestInfra'
|
||
+ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel',
|
||
+ label: 'Clone the leapp-data git repository'
|
||
+ }
|
||
+ }
|
||
+ stage('ELevation') {
|
||
+ matrix {
|
||
+ when {
|
||
+ allOf {
|
||
+ anyOf {
|
||
+ expression { params.SOURCE_DISTRO_FILTER == 'all' }
|
||
+ expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO }
|
||
+ }
|
||
+ anyOf {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ axes {
|
||
+ axis {
|
||
+ name 'SOURCE_DISTRO'
|
||
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
|
||
+ }
|
||
+ axis {
|
||
+ name 'TARGET_DISTRO'
|
||
+ values 'almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9'
|
||
+ }
|
||
+ }
|
||
+ stages {
|
||
+ stage('Create and Configure Machines') {
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'vagrant destroy -f $SOURCE_DISTRO',
|
||
+ label: 'Make sure no machine present from the last retry'
|
||
+ sh script: 'vagrant up $SOURCE_DISTRO',
|
||
+ label: 'Create the source machines'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('ELevate to the all target distros') {
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"',
|
||
+ label: 'Add the ELevate Testing RPM repository'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf install -y leapp-upgrade\"',
|
||
+ label: 'Install the leap rpm package'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo bash /vagrant/scripts/install_elevate_dev.sh\"',
|
||
+ label: 'Install Development version of ELevate',
|
||
+ returnStatus: true
|
||
+ script {
|
||
+ def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO)
|
||
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"",
|
||
+ label:'Create the LEAPP directory')
|
||
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files /vagrant/leapp-data/files/${LEAPP_DATA}/*\"",
|
||
+ label:"Install the LEAPP DATA")
|
||
+ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files/vendors.d /vagrant/leapp-data/vendors.d/*\"',
|
||
+ label:"Install the Vendor DATA")
|
||
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el9 /etc/leapp/files/leapp_upgrade_repositories.repo\"",
|
||
+ label:'Set LEAPP Repos for EL8')
|
||
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/repomap.json.el9 /etc/leapp/files/repomap.json\"",
|
||
+ label:'Set LEAPP Repo map for EL8')
|
||
+ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"',
|
||
+ label:"Debug: Data paths")
|
||
+ }
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"',
|
||
+ label: 'Start the Pre-Upgrade check',
|
||
+ returnStatus: true
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"',
|
||
+ label: 'Permit ssh as root login'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"',
|
||
+ label: 'Answer the LEAP question'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"',
|
||
+ label: 'Start the Upgrade'
|
||
+ sh script: 'vagrant reload $SOURCE_DISTRO',
|
||
+ label: 'Reboot to the ELevate initramfs'
|
||
+ sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config',
|
||
+ label: 'Generate the ssh-config file'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Distro Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'minimal'}
|
||
+ expression { params.CONF_FILTER == 'docker-ce'}
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$SOURCE_DISTRO-junit.xml tests/distro/test_osinfo_$SOURCE_DISTRO.py',
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Docker Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'docker-ce'}
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py',
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ post {
|
||
+ success {
|
||
+ junit testResults: '**/tests/**/**-junit.xml',
|
||
+ skipPublishingChecks: true
|
||
+ }
|
||
+ cleanup {
|
||
+ sh script: 'vagrant destroy -f',
|
||
+ label: 'Destroy All Machines'
|
||
+ cleanWs()
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+/*
|
||
+* Common Functions
|
||
+*/
|
||
+def getLeappDataDistro(TARGET_DISTRO) {
|
||
+ def leapp_data = ""
|
||
+
|
||
+ switch(TARGET_DISTRO) {
|
||
+ case "almalinux-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 9)
|
||
+ break
|
||
+
|
||
+ case "centos-stream-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 6)
|
||
+ break
|
||
+
|
||
+ case "oraclelinux-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 11)
|
||
+ break
|
||
+
|
||
+ case "rocky-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 5)
|
||
+ break
|
||
+
|
||
+ default:
|
||
+ leap_data = "Error: Target Distro Not Supported"
|
||
+ break
|
||
+ }
|
||
+ return leapp_data
|
||
+}
|
||
diff --git a/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile
|
||
new file mode 100644
|
||
index 00000000..aa6be967
|
||
--- /dev/null
|
||
+++ b/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile
|
||
@@ -0,0 +1,214 @@
|
||
+RETRY = params.RETRY
|
||
+TIMEOUT = params.TIMEOUT
|
||
+
|
||
+pipeline {
|
||
+ agent {
|
||
+ label 'x86_64 && bm'
|
||
+ }
|
||
+ options {
|
||
+ timestamps()
|
||
+ parallelsAlwaysFailFast()
|
||
+ }
|
||
+ parameters {
|
||
+ // choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation')
|
||
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation')
|
||
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
|
||
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
|
||
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
|
||
+ }
|
||
+ environment {
|
||
+ VAGRANT_NO_COLOR = '1'
|
||
+ }
|
||
+ stages {
|
||
+ stage('Prepare') {
|
||
+ steps {
|
||
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
|
||
+ label: 'Install Ansible collections'
|
||
+ sh script: 'python3.11 -m venv .venv',
|
||
+ label: 'Create Python virtual environment'
|
||
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
|
||
+ label: 'Install Testinfra'
|
||
+ }
|
||
+ }
|
||
+ stage('CreateSingleMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
|
||
+
|
||
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: "vagrant up $targetDistro.vmName",
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('CreateMultiMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ sh script: 'cp ci/vagrant/el8toel9_multi.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: 'vagrant up',
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ stage('ELevationAndTest') {
|
||
+ matrix {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
|
||
+ }
|
||
+ }
|
||
+ axes {
|
||
+ axis {
|
||
+ name 'TARGET_DISTRO'
|
||
+ // values 'almalinux-9', 'centos-stream-9', 'rocky-9'
|
||
+ values 'almalinux-9', 'rocky-9'
|
||
+ }
|
||
+ }
|
||
+ stages {
|
||
+ stage('ELevate') {
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y https://repo.almalinux.org/elevate/elevate-release-latest-el8.noarch.rpm\"",
|
||
+ label: 'Install the elevate-release-latest rpm packages for EL8'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-centos7-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"",
|
||
+ label: 'Add pulp repository'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y leapp-upgrade\"",
|
||
+ label: 'Install the leap rpm package'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y $targetDistro.leappData\"",
|
||
+ label: 'Install the LEAP migration data rpm packages'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
|
||
+ label: 'Start the Pre-Upgrade check',
|
||
+ returnStatus: true
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i \'s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/\' /etc/firewalld/firewalld.conf\"",
|
||
+ label: 'TODO'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section check_vdo.no_vdo_devices=True\"",
|
||
+ label: 'TODO'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
|
||
+ label: 'Start the Upgrade'
|
||
+ sh script: "vagrant reload $targetDistro.vmName",
|
||
+ label: 'Reboot to the ELevate initramfs'
|
||
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
|
||
+ label: 'Generate the ssh-config file'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Distro Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'minimal' }
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: 'rm -f conftest.py pytest.ini',
|
||
+ label: 'Delete root conftest.py file'
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
|
||
+ """,
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Docker Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/docker/test_docker_ce.py
|
||
+ """,
|
||
+ label: 'Run the docker specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ post {
|
||
+ success {
|
||
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
|
||
+ skipPublishingChecks: true
|
||
+ }
|
||
+ cleanup {
|
||
+ sh script: 'vagrant destroy -f --no-parallel -g',
|
||
+ label: 'Destroy VMs'
|
||
+ cleanWs()
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+def targetDistroSpec(distro) {
|
||
+ def spec = [:]
|
||
+
|
||
+ switch (distro) {
|
||
+ case 'almalinux-9':
|
||
+ vm = 'almalinux_9'
|
||
+ ldata = 'leapp-data-almalinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'rocky-9':
|
||
+ vm = 'rocky_9'
|
||
+ ldata = 'leapp-data-rocky'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ default:
|
||
+ spec = [
|
||
+ vmName: 'unknown',
|
||
+ leappData: 'unknown'
|
||
+ ]
|
||
+ break
|
||
+ }
|
||
+ return spec
|
||
+}
|
||
diff --git a/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile
|
||
new file mode 100644
|
||
index 00000000..82626697
|
||
--- /dev/null
|
||
+++ b/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile
|
||
@@ -0,0 +1,206 @@
|
||
+RETRY = params.RETRY
|
||
+TIMEOUT = params.TIMEOUT
|
||
+
|
||
+pipeline {
|
||
+ agent {
|
||
+ label params.AGENT
|
||
+ }
|
||
+ options {
|
||
+ timestamps()
|
||
+ }
|
||
+ parameters {
|
||
+ string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true)
|
||
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
|
||
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
|
||
+ string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true)
|
||
+ string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true)
|
||
+ choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation')
|
||
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation')
|
||
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
|
||
+ }
|
||
+ stages {
|
||
+ stage('Source') {
|
||
+ steps {
|
||
+ git url: REPO_URL,
|
||
+ branch: REPO_BRANCH,
|
||
+ credentialsId: 'github-almalinuxautobot'
|
||
+ }
|
||
+ }
|
||
+ stage('Prepare Build and Test enviroment') {
|
||
+ steps {
|
||
+ sh script: 'cp Vagrantfile.el8toel9 Vagrantfile',
|
||
+ label: 'Generate the el8toel9 Vagrantfile'
|
||
+ sh script: 'sudo dnf -y install python39-devel python39-wheel',
|
||
+ label: 'Install Python 3.9, PIP and Wheel'
|
||
+ sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt',
|
||
+ label: 'Install TestInfra'
|
||
+ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel',
|
||
+ label: 'Clone the leapp-data git repository'
|
||
+ }
|
||
+ }
|
||
+ stage('ELevation') {
|
||
+ matrix {
|
||
+ when {
|
||
+ allOf {
|
||
+ anyOf {
|
||
+ expression { params.SOURCE_DISTRO_FILTER == 'all' }
|
||
+ expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO }
|
||
+ }
|
||
+ anyOf {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ axes {
|
||
+ axis {
|
||
+ name 'SOURCE_DISTRO'
|
||
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
|
||
+ }
|
||
+ axis {
|
||
+ name 'TARGET_DISTRO'
|
||
+ values 'almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9'
|
||
+ }
|
||
+ }
|
||
+ stages {
|
||
+ stage('Create and Configure Machines') {
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'vagrant destroy -f $SOURCE_DISTRO',
|
||
+ label: 'Make sure no machine present from the last retry'
|
||
+ sh script: 'vagrant up $SOURCE_DISTRO',
|
||
+ label: 'Create the source machines'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('ELevate to the all target distros') {
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"',
|
||
+ label: 'Add the ELevate Testing RPM repository'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y wget\"",
|
||
+ label: 'Install wget'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-almalinux-8-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"",
|
||
+ label: 'Add pulp repository'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i 's|enabled=1|enabled=1\\npriority=80|' /etc/yum.repos.d/internal-leapp.repo\"",
|
||
+ label: 'Set priority for pulp repository'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf install -y leapp-upgrade\"',
|
||
+ label: 'Install the leap rpm package'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo bash /vagrant/scripts/install_elevate_dev.sh\"',
|
||
+ label: 'Install Development version of ELevate',
|
||
+ returnStatus: true
|
||
+ script {
|
||
+ def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO)
|
||
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"",
|
||
+ label:'Create the LEAPP directory')
|
||
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files /vagrant/leapp-data/files/${LEAPP_DATA}/*\"",
|
||
+ label:"Install the LEAPP DATA")
|
||
+ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files/vendors.d /vagrant/leapp-data/vendors.d/*\"',
|
||
+ label:"Install the Vendor DATA")
|
||
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el9 /etc/leapp/files/leapp_upgrade_repositories.repo\"",
|
||
+ label:'Set LEAPP Repos for EL8')
|
||
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/repomap.json.el9 /etc/leapp/files/repomap.json\"",
|
||
+ label:'Set LEAPP Repo map for EL8')
|
||
+ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"',
|
||
+ label:"Debug: Data paths")
|
||
+ }
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"',
|
||
+ label: 'Start the Pre-Upgrade check',
|
||
+ returnStatus: true
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"',
|
||
+ label: 'Permit ssh as root login'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"',
|
||
+ label: 'Answer the LEAP question'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"',
|
||
+ label: 'Start the Upgrade'
|
||
+ sh script: 'vagrant reload $SOURCE_DISTRO',
|
||
+ label: 'Reboot to the ELevate initramfs'
|
||
+ sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config',
|
||
+ label: 'Generate the ssh-config file'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Distro Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'minimal'}
|
||
+ expression { params.CONF_FILTER == 'docker-ce'}
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$SOURCE_DISTRO-junit.xml tests/distro/test_osinfo_$SOURCE_DISTRO.py',
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Docker Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'docker-ce'}
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py',
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ post {
|
||
+ success {
|
||
+ junit testResults: '**/tests/**/**-junit.xml',
|
||
+ skipPublishingChecks: true
|
||
+ }
|
||
+ cleanup {
|
||
+ sh script: 'vagrant destroy -f',
|
||
+ label: 'Destroy All Machines'
|
||
+ cleanWs()
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+/*
|
||
+* Common Functions
|
||
+*/
|
||
+def getLeappDataDistro(TARGET_DISTRO) {
|
||
+ def leapp_data = ""
|
||
+
|
||
+ switch(TARGET_DISTRO) {
|
||
+ case "almalinux-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 9)
|
||
+ break
|
||
+
|
||
+ case "centos-stream-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 6)
|
||
+ break
|
||
+
|
||
+ case "oraclelinux-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 11)
|
||
+ break
|
||
+
|
||
+ case "rocky-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 5)
|
||
+ break
|
||
+
|
||
+ default:
|
||
+ leap_data = "Error: Target Distro Not Supported"
|
||
+ break
|
||
+ }
|
||
+ return leapp_data
|
||
+}
|
||
diff --git a/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile
|
||
new file mode 100644
|
||
index 00000000..68f00165
|
||
--- /dev/null
|
||
+++ b/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile
|
||
@@ -0,0 +1,212 @@
|
||
+RETRY = params.RETRY
|
||
+TIMEOUT = params.TIMEOUT
|
||
+
|
||
+pipeline {
|
||
+ agent {
|
||
+ label 'x86_64 && bm'
|
||
+ }
|
||
+ options {
|
||
+ timestamps()
|
||
+ parallelsAlwaysFailFast()
|
||
+ }
|
||
+ parameters {
|
||
+ // choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation')
|
||
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation')
|
||
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
|
||
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
|
||
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
|
||
+ }
|
||
+ environment {
|
||
+ VAGRANT_NO_COLOR = '1'
|
||
+ }
|
||
+ stages {
|
||
+ stage('Prepare') {
|
||
+ steps {
|
||
+ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml',
|
||
+ label: 'Install Ansible collections'
|
||
+ sh script: 'python3.11 -m venv .venv',
|
||
+ label: 'Create Python virtual environment'
|
||
+ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko',
|
||
+ label: 'Install Testinfra'
|
||
+ }
|
||
+ }
|
||
+ stage('CreateSingleMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER != 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER)
|
||
+
|
||
+ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: "vagrant up $targetDistro.vmName",
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('CreateMultiMachine') {
|
||
+ when {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ }
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ sh script: 'cp ci/vagrant/el8toel9_multi.rb Vagrantfile',
|
||
+ label: 'Generate Vagrantfile'
|
||
+ sh script: 'vagrant up',
|
||
+ label: 'Create source VM'
|
||
+ }
|
||
+ }
|
||
+ stage('ELevationAndTest') {
|
||
+ matrix {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
|
||
+ }
|
||
+ }
|
||
+ axes {
|
||
+ axis {
|
||
+ name 'TARGET_DISTRO'
|
||
+ // values 'almalinux-9', 'centos-stream-9', 'rocky-9'
|
||
+ values 'almalinux-9', 'rocky-9'
|
||
+ }
|
||
+ }
|
||
+ stages {
|
||
+ stage('ELevate') {
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y https://repo.almalinux.org/elevate/elevate-release-latest-el8.noarch.rpm\"",
|
||
+ label: 'Install the elevate-release-latest rpm packages for EL8'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y leapp-upgrade\"",
|
||
+ label: 'Install the leap rpm package'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y $targetDistro.leappData\"",
|
||
+ label: 'Install the LEAP migration data rpm packages'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"",
|
||
+ label: 'Start the Pre-Upgrade check',
|
||
+ returnStatus: true
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i \'s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/\' /etc/firewalld/firewalld.conf\"",
|
||
+ label: 'TODO'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section check_vdo.no_vdo_devices=True\"",
|
||
+ label: 'TODO'
|
||
+ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"",
|
||
+ label: 'Start the Upgrade'
|
||
+ sh script: "vagrant reload $targetDistro.vmName",
|
||
+ label: 'Reboot to the ELevate initramfs'
|
||
+ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config",
|
||
+ label: 'Generate the ssh-config file'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Distro Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'minimal' }
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: 'rm -f conftest.py pytest.ini',
|
||
+ label: 'Delete root conftest.py file'
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py
|
||
+ """,
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Docker Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'docker-ce' }
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ script {
|
||
+ def targetDistro = targetDistroSpec(TARGET_DISTRO)
|
||
+
|
||
+ sh script: """
|
||
+ . .venv/bin/activate \
|
||
+ && py.test -v --hosts=${targetDistro.vmName} \
|
||
+ --ssh-config=.vagrant/ssh-config \
|
||
+ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \
|
||
+ ci/tests/tests/docker/test_docker_ce.py
|
||
+ """,
|
||
+ label: 'Run the docker specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ post {
|
||
+ success {
|
||
+ junit testResults: 'ci/tests/tests/**/**_junit.xml',
|
||
+ skipPublishingChecks: true
|
||
+ }
|
||
+ cleanup {
|
||
+ sh script: 'vagrant destroy -f --no-parallel -g',
|
||
+ label: 'Destroy VMs'
|
||
+ cleanWs()
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+def targetDistroSpec(distro) {
|
||
+ def spec = [:]
|
||
+
|
||
+ switch (distro) {
|
||
+ case 'almalinux-9':
|
||
+ vm = 'almalinux_9'
|
||
+ ldata = 'leapp-data-almalinux'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ case 'rocky-9':
|
||
+ vm = 'rocky_9'
|
||
+ ldata = 'leapp-data-rocky'
|
||
+
|
||
+ spec = [
|
||
+ vmName: vm,
|
||
+ leappData: ldata
|
||
+ ]
|
||
+ break
|
||
+ default:
|
||
+ spec = [
|
||
+ vmName: 'unknown',
|
||
+ leappData: 'unknown'
|
||
+ ]
|
||
+ break
|
||
+ }
|
||
+ return spec
|
||
+}
|
||
diff --git a/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile
|
||
new file mode 100644
|
||
index 00000000..79cdd472
|
||
--- /dev/null
|
||
+++ b/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile
|
||
@@ -0,0 +1,187 @@
|
||
+RETRY = params.RETRY
|
||
+TIMEOUT = params.TIMEOUT
|
||
+
|
||
+pipeline {
|
||
+ agent {
|
||
+ label params.AGENT
|
||
+ }
|
||
+ options {
|
||
+ timestamps()
|
||
+ }
|
||
+ parameters {
|
||
+ string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true)
|
||
+ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true)
|
||
+ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true)
|
||
+ string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true)
|
||
+ string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true)
|
||
+ choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation')
|
||
+ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation')
|
||
+ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration')
|
||
+ }
|
||
+ stages {
|
||
+ stage('Source') {
|
||
+ steps {
|
||
+ git url: REPO_URL,
|
||
+ branch: REPO_BRANCH,
|
||
+ credentialsId: 'github-almalinuxautobot'
|
||
+ }
|
||
+ }
|
||
+ stage('Prepare Build and Test enviroment') {
|
||
+ steps {
|
||
+ sh script: 'cp Vagrantfile.el8toel9 Vagrantfile',
|
||
+ label: 'Generate the el8toel9 Vagrantfile'
|
||
+ sh script: 'sudo dnf -y install python39-devel python39-wheel',
|
||
+ label: 'Install Python 3.9, PIP and Wheel'
|
||
+ sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt',
|
||
+ label: 'Install TestInfra'
|
||
+ }
|
||
+ }
|
||
+ stage('ELevation') {
|
||
+ matrix {
|
||
+ when {
|
||
+ allOf {
|
||
+ anyOf {
|
||
+ expression { params.SOURCE_DISTRO_FILTER == 'all' }
|
||
+ expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO }
|
||
+ }
|
||
+ anyOf {
|
||
+ expression { params.TARGET_DISTRO_FILTER == 'all' }
|
||
+ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ axes {
|
||
+ axis {
|
||
+ name 'SOURCE_DISTRO'
|
||
+ values 'almalinux-8', 'centos-stream-8', 'oraclelinux-8', 'rocky-8'
|
||
+ }
|
||
+ axis {
|
||
+ name 'TARGET_DISTRO'
|
||
+ values 'almalinux-9', 'centos-stream-9', 'oraclelinux-9', 'rocky-9'
|
||
+ }
|
||
+ }
|
||
+ stages {
|
||
+ stage('Create and Configure Machines') {
|
||
+ environment {
|
||
+ CONFIG = "${CONF_FILTER}"
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'vagrant destroy -f $SOURCE_DISTRO',
|
||
+ label: 'Make sure no machine present from the last retry'
|
||
+ sh script: 'vagrant up $SOURCE_DISTRO',
|
||
+ label: 'Create the source machines'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('ELevate to the all target distros') {
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"',
|
||
+ label: 'Add the ELevate Testing RPM repository'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install leapp-upgrade\"',
|
||
+ label: 'Install the leap rpm package'
|
||
+ script {
|
||
+ def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO)
|
||
+ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install leapp-data-$LEAPP_DATA\"",
|
||
+ label:'Install the LEAP migration data rpm packages')
|
||
+ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"',
|
||
+ label:'Debug: Data paths')
|
||
+ }
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"',
|
||
+ label: 'Start the Pre-Upgrade check',
|
||
+ returnStatus: true
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"',
|
||
+ label: 'Permit ssh as root login'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"',
|
||
+ label: 'Answer the LEAP question'
|
||
+ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"',
|
||
+ label: 'Start the Upgrade'
|
||
+ sh script: 'vagrant reload $SOURCE_DISTRO',
|
||
+ label: 'Reboot to the ELevate initramfs'
|
||
+ sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config',
|
||
+ label: 'Generate the ssh-config file'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Distro Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'minimal'}
|
||
+ expression { params.CONF_FILTER == 'docker-ce'}
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$TARGET_DISTRO-junit.xml tests/distro/test_osinfo_$TARGET_DISTRO.py',
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ stage('Docker Tests') {
|
||
+ when {
|
||
+ anyOf {
|
||
+ expression { params.CONF_FILTER == 'docker-ce'}
|
||
+ }
|
||
+ }
|
||
+ steps {
|
||
+ retry(RETRY) {
|
||
+ timeout(time: TIMEOUT, unit: 'MINUTES') {
|
||
+ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py',
|
||
+ label: 'Run the distro specific tests'
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ post {
|
||
+ success {
|
||
+ junit testResults: '**/tests/**/**-junit.xml',
|
||
+ skipPublishingChecks: true
|
||
+ }
|
||
+ cleanup {
|
||
+ sh script: 'vagrant destroy -f',
|
||
+ label: 'Destroy All Machines'
|
||
+ cleanWs()
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+/*
|
||
+* Common Functions
|
||
+*/
|
||
+def getLeappDataDistro(TARGET_DISTRO) {
|
||
+ def leapp_data = ""
|
||
+
|
||
+ switch(TARGET_DISTRO) {
|
||
+ case "almalinux-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 9)
|
||
+ break
|
||
+
|
||
+ case "centos-stream-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 6)
|
||
+ break
|
||
+
|
||
+ case "oraclelinux-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 11)
|
||
+ break
|
||
+
|
||
+ case "rocky-9":
|
||
+ leapp_data = TARGET_DISTRO.substring(0, 5)
|
||
+ break
|
||
+
|
||
+ default:
|
||
+ leap_data = "Error: Target Distro Not Supported"
|
||
+ break
|
||
+ }
|
||
+ return leapp_data
|
||
+}
|
||
diff --git a/ci/scripts/install_elevate_dev.sh b/ci/scripts/install_elevate_dev.sh
|
||
new file mode 100644
|
||
index 00000000..f9cc2903
|
||
--- /dev/null
|
||
+++ b/ci/scripts/install_elevate_dev.sh
|
||
@@ -0,0 +1,117 @@
|
||
+#!/usr/bin/env bash
|
||
+
|
||
+USER='AlmaLinux'
|
||
+BRANCH='almalinux'
|
||
+
|
||
+show_usage() {
|
||
+ echo 'Usage: sync_cloudlinux [OPTION]...'
|
||
+ echo ''
|
||
+ echo ' -h, --help show this message and exit'
|
||
+ echo ' -u, --user github user name (default: AlmaLinux)'
|
||
+ echo ' -b, --branch github branch name (default: almalinux)'
|
||
+}
|
||
+
|
||
+while [[ $# -gt 0 ]]; do
|
||
+ opt="$1"
|
||
+ case ${opt} in
|
||
+ -h|--help)
|
||
+ show_usage
|
||
+ exit 0
|
||
+ ;;
|
||
+ -u|--user)
|
||
+ USER="$2"
|
||
+ shift
|
||
+ shift
|
||
+ ;;
|
||
+ -b|--branch)
|
||
+ BRANCH="$2"
|
||
+ shift
|
||
+ shift
|
||
+ ;;
|
||
+ *)
|
||
+ echo -e "Error: unknown option ${opt}" >&2
|
||
+ exit 2
|
||
+ ;;
|
||
+ esac
|
||
+done
|
||
+
|
||
+RHEL_MAJOR_VERSION=$(rpm --eval %rhel)
|
||
+WORK_DIR="$HOME"
|
||
+NEW_LEAPP_NAME="leapp-repository-$BRANCH"
|
||
+NEW_LEAPP_DIR="$WORK_DIR/$NEW_LEAPP_NAME/"
|
||
+LEAPP_PATH='/usr/share/leapp-repository/repositories/'
|
||
+LEAPP_GPG_PATH='/etc/leapp/repos.d/system_upgrade/common/files/rpm-gpg'
|
||
+EXCLUDE_PATH='
|
||
+/usr/share/leapp-repository/repositories/system_upgrade/el7toel8/files/bundled-rpms
|
||
+/usr/share/leapp-repository/repositories/system_upgrade/el7toel8/files
|
||
+/usr/share/leapp-repository/repositories/system_upgrade/el7toel8
|
||
+/usr/share/leapp-repository/repositories/system_upgrade/el8toel9/files/bundled-rpms
|
||
+/usr/share/leapp-repository/repositories/system_upgrade/el8toel9/files
|
||
+/usr/share/leapp-repository/repositories/system_upgrade/el8toel9
|
||
+/usr/share/leapp-repository/repositories/system_upgrade
|
||
+/usr/share/leapp-repository/repositories/
|
||
+'
|
||
+
|
||
+
|
||
+echo "RHEL_MAJOR_VERSION=$RHEL_MAJOR_VERSION"
|
||
+echo "WORK_DIR=$WORK_DIR"
|
||
+echo "EXCLUDED_PATHS=$EXCLUDE_PATH"
|
||
+
|
||
+echo "Preserve GPG keys if any"
|
||
+for major in 8 9; do
|
||
+ test -e ${LEAPP_GPG_PATH}/${major} && mv ${LEAPP_GPG_PATH}/${major} ${WORK_DIR}/
|
||
+done
|
||
+
|
||
+
|
||
+echo 'Remove old files'
|
||
+for dir in $(find $LEAPP_PATH -type d);
|
||
+do
|
||
+ skip=0
|
||
+ for exclude in $(echo $EXCLUDE_PATH);
|
||
+ do
|
||
+ if [[ $exclude == $dir ]];then
|
||
+ skip=1
|
||
+ break
|
||
+ fi
|
||
+ done
|
||
+ if [ $skip -eq 0 ];then
|
||
+ rm -rf $dir
|
||
+ fi
|
||
+done
|
||
+
|
||
+echo "Download new tarball from https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz"
|
||
+curl -s -L https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz | tar -xmz -C $WORK_DIR/ || exit 1
|
||
+
|
||
+echo 'Deleting files as in spec file'
|
||
+rm -rf $NEW_LEAPP_DIR/repos/common/actors/testactor
|
||
+find $NEW_LEAPP_DIR/repos/common -name "test.py" -delete
|
||
+rm -rf `find $NEW_LEAPP_DIR -name "tests" -type d`
|
||
+find $NEW_LEAPP_DIR -name "Makefile" -delete
|
||
+if [ $RHEL_MAJOR_VERSION -eq '7' ]; then
|
||
+ rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/el8toel9
|
||
+else
|
||
+ rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/el7toel8
|
||
+ rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/cloudlinux
|
||
+fi
|
||
+
|
||
+echo 'Copy new data to system'
|
||
+cp -r $NEW_LEAPP_DIR/repos/* $LEAPP_PATH || exit 1
|
||
+
|
||
+for DIRECTORY in $(find $LEAPP_PATH -mindepth 1 -maxdepth 1 -type d);
|
||
+do
|
||
+ REPOSITORY=$(basename $DIRECTORY)
|
||
+ if ! [ -e /etc/leapp/repos.d/$REPOSITORY ];then
|
||
+ echo "Enabling repository $REPOSITORY"
|
||
+ ln -s $LEAPP_PATH/$REPOSITORY /etc/leapp/repos.d/$REPOSITORY || exit 1
|
||
+ fi
|
||
+done
|
||
+
|
||
+echo "Restore GPG keys if any"
|
||
+for major in 8 9; do
|
||
+ rm -rf ${LEAPP_GPG_PATH}/${major}
|
||
+ test -e ${WORK_DIR}/${major} && mv ${WORK_DIR}/${major} ${LEAPP_GPG_PATH}/
|
||
+done
|
||
+
|
||
+rm -rf $NEW_LEAPP_DIR
|
||
+
|
||
+exit 0
|
||
diff --git a/ci/tests/tests/conftest.py b/ci/tests/tests/conftest.py
|
||
new file mode 100644
|
||
index 00000000..01f9443e
|
||
--- /dev/null
|
||
+++ b/ci/tests/tests/conftest.py
|
||
@@ -0,0 +1,52 @@
|
||
+import pytest
|
||
+import re
|
||
+
|
||
+
|
||
+@pytest.fixture(scope="module")
|
||
+def get_os_release(host):
|
||
+ """Get content of the /etc/os-release"""
|
||
+ os_release = host.file("/etc/os-release")
|
||
+ return os_release
|
||
+
|
||
+
|
||
+@pytest.fixture(scope="module")
|
||
+def get_redhat_release(host):
|
||
+ """Get content of the /etc/redhat-release"""
|
||
+ redhat_release = host.file("/etc/redhat-release")
|
||
+ return redhat_release
|
||
+
|
||
+
|
||
+@pytest.fixture(scope="module")
|
||
+def get_kernel_info(host):
|
||
+ """Get kernel version and vendor information"""
|
||
+ kernel_ver_pattern = re.compile(
|
||
+ f".*(^[0-9][0-9]?[0-9]?.[0-9][0-9]?[0-9]?.[0-9][0-9]?[0-9]?).*"
|
||
+ )
|
||
+ kernel_ver_output = host.check_output("uname -r")
|
||
+ kernel_version = kernel_ver_pattern.match(kernel_ver_output).group(1)
|
||
+
|
||
+ with host.sudo():
|
||
+ kernel_vendor = host.check_output(
|
||
+ "grep -Ei '(.*kernel signing key|.*CA Server|.*Build)' /proc/keys | sed -E"
|
||
+ " 's/ +/:/g' | cut -d ':' -f 9 | uniq"
|
||
+ )
|
||
+ kernel_info = (kernel_version, kernel_vendor)
|
||
+ return kernel_info
|
||
+
|
||
+
|
||
+@pytest.fixture(scope="module", params=["glibc", "systemd", "coreutils", "rpm"])
|
||
+def get_pkg_info(host, request):
|
||
+ """Get vendor and version of installed packages"""
|
||
+ pkg_name = request.param
|
||
+ pkg_vendor = host.check_output(
|
||
+ f"rpm -qa --queryformat \"%{{VENDOR}}\n\" {request.param} | sed '$p;d' "
|
||
+ )
|
||
+ pkg_version = host.check_output(
|
||
+ f'rpm -qa --queryformat "%{{VERSION}}\n" {request.param} | sort -n | sed'
|
||
+ " '$p;d'"
|
||
+ )
|
||
+ pkg_info = (pkg_name, pkg_vendor, pkg_version)
|
||
+ # print(pkg_name)
|
||
+ # print(pkg_vendor)
|
||
+ # print(pkg_version)
|
||
+ return pkg_info
|
||
diff --git a/ci/tests/tests/distro/test_osinfo_almalinux_8.py b/ci/tests/tests/distro/test_osinfo_almalinux_8.py
|
||
new file mode 100644
|
||
index 00000000..c5219b35
|
||
--- /dev/null
|
||
+++ b/ci/tests/tests/distro/test_osinfo_almalinux_8.py
|
||
@@ -0,0 +1,43 @@
|
||
+import pytest
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_os_release")
|
||
+class TestOSRelease:
|
||
+ """Test values of NAME, ID and VERSION_ID"""
|
||
+
|
||
+ def test_os_rel_name(self, get_os_release):
|
||
+ assert get_os_release.contains('NAME="AlmaLinux"')
|
||
+
|
||
+ def test_os_rel_id(self, get_os_release):
|
||
+ assert get_os_release.contains('ID="almalinux"')
|
||
+
|
||
+ def test_os_rel_version_id(self, get_os_release):
|
||
+ assert get_os_release.contains('VERSION_ID="8.*"')
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_redhat_release")
|
||
+class TestRHRelease:
|
||
+ """Test contents of the /etc/redhat-release"""
|
||
+
|
||
+ def test_redhat_release(self, get_redhat_release):
|
||
+ assert get_redhat_release.contains("AlmaLinux release 8.*")
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_pkg_info")
|
||
+class TestPkgInfo:
|
||
+ """Test vendor and version of packages"""
|
||
+
|
||
+ def test_pkg_vendor(self, get_pkg_info):
|
||
+ assert get_pkg_info[1] == "AlmaLinux"
|
||
+
|
||
+ def test_pkg_version(self, get_pkg_info):
|
||
+ if get_pkg_info[0] == "kernel":
|
||
+ assert get_pkg_info[2] == "4.18.0"
|
||
+ elif get_pkg_info[0] == "glibc":
|
||
+ assert get_pkg_info[2] == "2.28"
|
||
+ elif get_pkg_info[0] == "systemd":
|
||
+ assert get_pkg_info[2] == "239"
|
||
+ elif get_pkg_info[0] == "coreutils":
|
||
+ assert get_pkg_info[2] == "8.30"
|
||
+ else:
|
||
+ assert get_pkg_info[2] == "4.14.3"
|
||
diff --git a/ci/tests/tests/distro/test_osinfo_almalinux_9.py b/ci/tests/tests/distro/test_osinfo_almalinux_9.py
|
||
new file mode 100644
|
||
index 00000000..1536e52b
|
||
--- /dev/null
|
||
+++ b/ci/tests/tests/distro/test_osinfo_almalinux_9.py
|
||
@@ -0,0 +1,52 @@
|
||
+import pytest
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_os_release")
|
||
+class TestOSRelease:
|
||
+ """Test values of NAME, ID and VERSION_ID"""
|
||
+
|
||
+ def test_os_rel_name(self, get_os_release):
|
||
+ assert get_os_release.contains('NAME="AlmaLinux"')
|
||
+
|
||
+ def test_os_rel_id(self, get_os_release):
|
||
+ assert get_os_release.contains('ID="almalinux"')
|
||
+
|
||
+ def test_os_rel_version_id(self, get_os_release):
|
||
+ assert get_os_release.contains('VERSION_ID="9.*"')
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_redhat_release")
|
||
+class TestRHRelease:
|
||
+ """Test contents of the /etc/redhat-release"""
|
||
+
|
||
+ def test_redhat_release(self, get_redhat_release):
|
||
+ assert get_redhat_release.contains("AlmaLinux release 9.*")
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_kernel_info")
|
||
+class TestKernelInfo:
|
||
+ """Test version and vendor of running kernel"""
|
||
+
|
||
+ def test_kernel_version(self, get_kernel_info):
|
||
+ assert get_kernel_info[0] == "5.14.0"
|
||
+
|
||
+ def test_kernel_vendor(self, get_kernel_info):
|
||
+ assert get_kernel_info[1] == "AlmaLinux"
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_pkg_info")
|
||
+class TestPkgInfo:
|
||
+ """Test vendor and version of packages"""
|
||
+
|
||
+ def test_pkg_vendor(self, get_pkg_info):
|
||
+ assert get_pkg_info[1] == "AlmaLinux"
|
||
+
|
||
+ def test_pkg_version(self, get_pkg_info):
|
||
+ if get_pkg_info[0] == "glibc":
|
||
+ assert get_pkg_info[2] == "2.34"
|
||
+ elif get_pkg_info[0] == "systemd":
|
||
+ assert get_pkg_info[2] == "252"
|
||
+ elif get_pkg_info[0] == "coreutils":
|
||
+ assert get_pkg_info[2] == "8.32"
|
||
+ else:
|
||
+ assert get_pkg_info[2] == "4.16.1.3"
|
||
diff --git a/ci/tests/tests/distro/test_osinfo_centosstream_8.py b/ci/tests/tests/distro/test_osinfo_centosstream_8.py
|
||
new file mode 100644
|
||
index 00000000..995ae61e
|
||
--- /dev/null
|
||
+++ b/ci/tests/tests/distro/test_osinfo_centosstream_8.py
|
||
@@ -0,0 +1,23 @@
|
||
+import pytest
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_os_release")
|
||
+class TestOSRelease:
|
||
+ """Test values of NAME, ID and VERSION_ID"""
|
||
+
|
||
+ def test_os_rel_name(self, get_os_release):
|
||
+ assert get_os_release.contains('NAME="CentOS Stream"')
|
||
+
|
||
+ def test_os_rel_id(self, get_os_release):
|
||
+ assert get_os_release.contains('ID="centos"')
|
||
+
|
||
+ def test_os_rel_version_id(self, get_os_release):
|
||
+ assert get_os_release.contains('VERSION_ID="8"')
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_redhat_release")
|
||
+class TestRHRelease:
|
||
+ """Test contents of the /etc/redhat-release"""
|
||
+
|
||
+ def test_redhat_release(self, get_redhat_release):
|
||
+ assert get_redhat_release.contains("CentOS Stream release 8")
|
||
diff --git a/ci/tests/tests/distro/test_osinfo_centosstream_9.py b/ci/tests/tests/distro/test_osinfo_centosstream_9.py
|
||
new file mode 100644
|
||
index 00000000..28e47202
|
||
--- /dev/null
|
||
+++ b/ci/tests/tests/distro/test_osinfo_centosstream_9.py
|
||
@@ -0,0 +1,23 @@
|
||
+import pytest
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_os_release")
|
||
+class TestOSRelease:
|
||
+ """Test values of NAME, ID and VERSION_ID"""
|
||
+
|
||
+ def test_os_rel_name(self, get_os_release):
|
||
+ assert get_os_release.contains('NAME="CentOS Stream"')
|
||
+
|
||
+ def test_os_rel_id(self, get_os_release):
|
||
+ assert get_os_release.contains('ID="centos"')
|
||
+
|
||
+ def test_os_rel_version_id(self, get_os_release):
|
||
+ assert get_os_release.contains('VERSION_ID="9"')
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_redhat_release")
|
||
+class TestRHRelease:
|
||
+ """Test contents of the /etc/redhat-release"""
|
||
+
|
||
+ def test_redhat_release(self, get_redhat_release):
|
||
+ assert get_redhat_release.contains("CentOS Stream release 9")
|
||
diff --git a/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py b/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py
|
||
new file mode 100644
|
||
index 00000000..2080fd2f
|
||
--- /dev/null
|
||
+++ b/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py
|
||
@@ -0,0 +1,23 @@
|
||
+import pytest
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_os_release")
|
||
+class TestOSRelease:
|
||
+ """Test values of NAME, ID and VERSION_ID"""
|
||
+
|
||
+ def test_os_rel_name(self, get_os_release):
|
||
+ assert get_os_release.contains('NAME="Oracle Linux Server"')
|
||
+
|
||
+ def test_os_rel_id(self, get_os_release):
|
||
+ assert get_os_release.contains('ID="ol"')
|
||
+
|
||
+ def test_os_rel_version_id(self, get_os_release):
|
||
+ assert get_os_release.contains('VERSION_ID="8.*"')
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_redhat_release")
|
||
+class TestRHRelease:
|
||
+ """Test contents of the /etc/redhat-release"""
|
||
+
|
||
+ def test_redhat_release(self, get_redhat_release):
|
||
+ assert get_redhat_release.contains("Red Hat Enterprise Linux release 8.*")
|
||
diff --git a/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py b/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py
|
||
new file mode 100644
|
||
index 00000000..bd5044bb
|
||
--- /dev/null
|
||
+++ b/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py
|
||
@@ -0,0 +1,23 @@
|
||
+import pytest
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_os_release")
|
||
+class TestOSRelease:
|
||
+ """Test values of NAME, ID and VERSION_ID"""
|
||
+
|
||
+ def test_os_rel_name(self, get_os_release):
|
||
+ assert get_os_release.contains('NAME="Oracle Linux Server"')
|
||
+
|
||
+ def test_os_rel_id(self, get_os_release):
|
||
+ assert get_os_release.contains('ID="ol"')
|
||
+
|
||
+ def test_os_rel_version_id(self, get_os_release):
|
||
+ assert get_os_release.contains('VERSION_ID="9.*"')
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_redhat_release")
|
||
+class TestRHRelease:
|
||
+ """Test contents of the /etc/redhat-release"""
|
||
+
|
||
+ def test_redhat_release(self, get_redhat_release):
|
||
+ assert get_redhat_release.contains("Red Hat Enterprise Linux release 9.*")
|
||
diff --git a/ci/tests/tests/distro/test_osinfo_rocky_8.py b/ci/tests/tests/distro/test_osinfo_rocky_8.py
|
||
new file mode 100644
|
||
index 00000000..cce5d668
|
||
--- /dev/null
|
||
+++ b/ci/tests/tests/distro/test_osinfo_rocky_8.py
|
||
@@ -0,0 +1,23 @@
|
||
+import pytest
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_os_release")
|
||
+class TestOSRelease:
|
||
+ """Test values of NAME, ID and VERSION_ID"""
|
||
+
|
||
+ def test_os_rel_name(self, get_os_release):
|
||
+ assert get_os_release.contains('NAME="Rocky Linux"')
|
||
+
|
||
+ def test_os_rel_id(self, get_os_release):
|
||
+ assert get_os_release.contains('ID="rocky"')
|
||
+
|
||
+ def test_os_rel_version_id(self, get_os_release):
|
||
+ assert get_os_release.contains('VERSION_ID="8.*"')
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_redhat_release")
|
||
+class TestRHRelease:
|
||
+ """Test contents of the /etc/redhat-release"""
|
||
+
|
||
+ def test_redhat_release(self, get_redhat_release):
|
||
+ assert get_redhat_release.contains("Rocky Linux release 8.*")
|
||
diff --git a/ci/tests/tests/distro/test_osinfo_rocky_9.py b/ci/tests/tests/distro/test_osinfo_rocky_9.py
|
||
new file mode 100644
|
||
index 00000000..ce8cccdb
|
||
--- /dev/null
|
||
+++ b/ci/tests/tests/distro/test_osinfo_rocky_9.py
|
||
@@ -0,0 +1,23 @@
|
||
+import pytest
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_os_release")
|
||
+class TestOSRelease:
|
||
+ """Test values of NAME, ID and VERSION_ID"""
|
||
+
|
||
+ def test_os_rel_name(self, get_os_release):
|
||
+ assert get_os_release.contains('NAME="Rocky Linux"')
|
||
+
|
||
+ def test_os_rel_id(self, get_os_release):
|
||
+ assert get_os_release.contains('ID="rocky"')
|
||
+
|
||
+ def test_os_rel_version_id(self, get_os_release):
|
||
+ assert get_os_release.contains('VERSION_ID="9.*"')
|
||
+
|
||
+
|
||
+@pytest.mark.usefixtures("get_redhat_release")
|
||
+class TestRHRelease:
|
||
+ """Test contents of the /etc/redhat-release"""
|
||
+
|
||
+ def test_redhat_release(self, get_redhat_release):
|
||
+ assert get_redhat_release.contains("Rocky Linux release 9.*")
|
||
diff --git a/ci/tests/tests/docker/test_docker_ce.py b/ci/tests/tests/docker/test_docker_ce.py
|
||
new file mode 100644
|
||
index 00000000..3c2550c7
|
||
--- /dev/null
|
||
+++ b/ci/tests/tests/docker/test_docker_ce.py
|
||
@@ -0,0 +1,26 @@
|
||
+import pytest
|
||
+
|
||
+
|
||
+class TestDockerServices:
|
||
+ """Test docker and containerd services running and enabled"""
|
||
+
|
||
+ def test_docker_is_running(self, host):
|
||
+ assert host.service("docker.service").is_running
|
||
+
|
||
+ def test_containerd_is_running(self, host):
|
||
+ assert host.service("containerd.service").is_running
|
||
+
|
||
+ def test_docker_is_enabled(self, host):
|
||
+ assert host.service("docker.service").is_enabled
|
||
+
|
||
+ def test_containerd_is_enabled(self, host):
|
||
+ assert host.service("containerd.service").is_enabled
|
||
+
|
||
+
|
||
+class TestDockerWorking:
|
||
+ """Test docker working with the hello world container"""
|
||
+
|
||
+ def test_docker_is_working(self, host):
|
||
+ with host.sudo():
|
||
+ cmd = host.run("sudo docker run --rm hello-world")
|
||
+ assert cmd.succeeded
|
||
diff --git a/ci/vagrant/el7toel8_multi.rb b/ci/vagrant/el7toel8_multi.rb
|
||
new file mode 100644
|
||
index 00000000..a18da81d
|
||
--- /dev/null
|
||
+++ b/ci/vagrant/el7toel8_multi.rb
|
||
@@ -0,0 +1,40 @@
|
||
+# -*- mode: ruby -*-
|
||
+# vi: set ft=ruby :
|
||
+
|
||
+configuration = ENV['CONFIG']
|
||
+
|
||
+Vagrant.configure('2') do |config|
|
||
+ config.vagrant.plugins = 'vagrant-libvirt'
|
||
+
|
||
+ config.vm.synced_folder '.', '/vagrant', disabled: true
|
||
+ config.vm.box = 'generic/centos7'
|
||
+ config.vm.boot_timeout = 3600
|
||
+
|
||
+ config.vm.provider 'libvirt' do |v|
|
||
+ v.uri = 'qemu:///system'
|
||
+ v.memory = 4096
|
||
+ v.machine_type = 'q35'
|
||
+ v.cpu_mode = 'host-passthrough'
|
||
+ v.cpus = 2
|
||
+ v.disk_bus = 'scsi'
|
||
+ v.disk_driver cache: 'writeback', discard: 'unmap'
|
||
+ v.random_hostname = true
|
||
+ end
|
||
+
|
||
+ target_distros = ['almalinux', 'centosstream', 'oraclelinux', 'rocky']
|
||
+
|
||
+ target_distros.each do |target_distro|
|
||
+ config.vm.define "#{target_distro}_8" do |machine|
|
||
+ machine.vm.hostname = "#{target_distro}-8.test"
|
||
+
|
||
+ if target_distro == target_distros[-1]
|
||
+ machine.vm.provision 'ansible' do |ansible|
|
||
+ ansible.compatibility_mode = '2.0'
|
||
+ ansible.limit = 'all'
|
||
+ ansible.playbook = "ci/ansible/#{configuration}.yaml"
|
||
+ ansible.config_file = 'ci/ansible/ansible.cfg'
|
||
+ end
|
||
+ end
|
||
+ end
|
||
+ end
|
||
+end
|
||
diff --git a/ci/vagrant/el7toel8toel9_single.rb b/ci/vagrant/el7toel8toel9_single.rb
|
||
new file mode 100644
|
||
index 00000000..8cd05ac3
|
||
--- /dev/null
|
||
+++ b/ci/vagrant/el7toel8toel9_single.rb
|
||
@@ -0,0 +1,53 @@
|
||
+# -*- mode: ruby -*-
|
||
+# vi: set ft=ruby :
|
||
+
|
||
+configuration = ENV['CONFIG']
|
||
+
|
||
+Vagrant.configure('2') do |config|
|
||
+ config.vagrant.plugins = 'vagrant-libvirt'
|
||
+
|
||
+ config.vm.synced_folder '.', '/vagrant', disabled: true
|
||
+ config.ssh.disable_deprecated_algorithms = true
|
||
+ config.vm.boot_timeout = 3600
|
||
+
|
||
+ config.vm.provider 'libvirt' do |v|
|
||
+ v.uri = 'qemu:///system'
|
||
+ v.memory = 4096
|
||
+ v.machine_type = 'q35'
|
||
+ v.cpu_mode = 'host-passthrough'
|
||
+ v.cpus = 2
|
||
+ v.disk_bus = 'scsi'
|
||
+ v.disk_driver cache: 'writeback', discard: 'unmap'
|
||
+ v.random_hostname = true
|
||
+ end
|
||
+
|
||
+ # EL7toEL8
|
||
+ target_distros = ['almalinux', 'centosstream', 'oraclelinux', 'rocky']
|
||
+
|
||
+ target_distros.each do |target_distro|
|
||
+ config.vm.define "#{target_distro}_8" do |machine|
|
||
+ machine.vm.box = 'generic/centos7'
|
||
+ machine.vm.hostname = "#{target_distro}-8.test"
|
||
+ end
|
||
+ end
|
||
+
|
||
+ # EL8toEL9
|
||
+ target_distros_el9 = {
|
||
+ almalinux: 'almalinux/8',
|
||
+ # centosstream: 'generic/centos8s',
|
||
+ rocky: 'generic/rocky8'
|
||
+ }
|
||
+
|
||
+ target_distros_el9.each_pair do |vm, box|
|
||
+ config.vm.define "#{vm}_9" do |machine|
|
||
+ machine.vm.box = "#{box}"
|
||
+ machine.vm.hostname = "#{vm}-9.test"
|
||
+ end
|
||
+ end
|
||
+
|
||
+ config.vm.provision 'ansible' do |ansible|
|
||
+ ansible.compatibility_mode = '2.0'
|
||
+ ansible.playbook = "ci/ansible/#{configuration}.yaml"
|
||
+ ansible.config_file = 'ci/ansible/ansible.cfg'
|
||
+ end
|
||
+end
|
||
diff --git a/ci/vagrant/el8toel9_multi.rb b/ci/vagrant/el8toel9_multi.rb
|
||
new file mode 100644
|
||
index 00000000..370758e6
|
||
--- /dev/null
|
||
+++ b/ci/vagrant/el8toel9_multi.rb
|
||
@@ -0,0 +1,45 @@
|
||
+# -*- mode: ruby -*-
|
||
+# vi: set ft=ruby :
|
||
+
|
||
+configuration = ENV['CONFIG']
|
||
+
|
||
+Vagrant.configure('2') do |config|
|
||
+ config.vagrant.plugins = 'vagrant-libvirt'
|
||
+
|
||
+ config.vm.synced_folder '.', '/vagrant', disabled: true
|
||
+ config.ssh.disable_deprecated_algorithms = true
|
||
+ config.vm.boot_timeout = 3600
|
||
+
|
||
+ config.vm.provider 'libvirt' do |v|
|
||
+ v.uri = 'qemu:///system'
|
||
+ v.memory = 4096
|
||
+ v.machine_type = 'q35'
|
||
+ v.cpu_mode = 'host-passthrough'
|
||
+ v.cpus = 2
|
||
+ v.disk_bus = 'scsi'
|
||
+ v.disk_driver cache: 'writeback', discard: 'unmap'
|
||
+ v.random_hostname = true
|
||
+ end
|
||
+
|
||
+ target_distros = {
|
||
+ almalinux: 'almalinux/8',
|
||
+ # centosstream: 'generic/centos8s',
|
||
+ rocky: 'generic/rocky8'
|
||
+ }
|
||
+
|
||
+ target_distros.each_pair do |vm, box|
|
||
+ config.vm.define "#{vm}_9" do |machine|
|
||
+ machine.vm.box = "#{box}"
|
||
+ machine.vm.hostname = "#{vm}-9.test"
|
||
+
|
||
+ if [vm, box] == target_distros.to_a.last
|
||
+ machine.vm.provision 'ansible' do |ansible|
|
||
+ ansible.compatibility_mode = '2.0'
|
||
+ ansible.limit = 'all'
|
||
+ ansible.playbook = "ci/ansible/#{configuration}.yaml"
|
||
+ ansible.config_file = 'ci/ansible/ansible.cfg'
|
||
+ end
|
||
+ end
|
||
+ end
|
||
+ end
|
||
+end
|
||
diff --git a/docs/source/conf.py b/docs/source/conf.py
|
||
index a0e6a1de..dd39d3fa 100644
|
||
--- a/docs/source/conf.py
|
||
+++ b/docs/source/conf.py
|
||
@@ -40,7 +40,6 @@ exclude_patterns = []
|
||
|
||
html_static_path = ['_static']
|
||
html_theme = 'sphinx_rtd_theme'
|
||
-html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
||
|
||
pygments_style = 'sphinx'
|
||
|
||
diff --git a/docs/source/contrib-and-devel-guidelines.md b/docs/source/contributing/coding-guidelines.md
|
||
similarity index 68%
|
||
rename from docs/source/contrib-and-devel-guidelines.md
|
||
rename to docs/source/contributing/coding-guidelines.md
|
||
index 3229c8a4..d06d0200 100644
|
||
--- a/docs/source/contrib-and-devel-guidelines.md
|
||
+++ b/docs/source/contributing/coding-guidelines.md
|
||
@@ -1,5 +1,4 @@
|
||
-# Contribution and development guidelines
|
||
-## Code guidelines
|
||
+# Coding guidelines
|
||
|
||
Your code should follow the [Python Coding Guidelines](https://leapp.readthedocs.io/en/latest/contributing.html#follow-python-coding-guidelines) used for the leapp project. On top of these rules follow instructions
|
||
below.
|
||
@@ -84,53 +83,3 @@ guaranteed to exist and executable.
|
||
The use of the {py:mod}`subprocess` library is forbidden in leapp repositories.
|
||
Use of the library would require very good reasoning, why the
|
||
{py:func}`~leapp.libraries.stdlib.run` function cannot be used.
|
||
-
|
||
-## Commits and pull requests (PRs)
|
||
-### PR description
|
||
-The description should contain information about all introduced changes:
|
||
-* What has been changed
|
||
-* How it has been changed
|
||
-* The reason for the change
|
||
-* How could people try/test the PR
|
||
-* Reference to a Jira ticket, Github issue, ... if applicable
|
||
-
|
||
-Good description provides all information for readers without the need to
|
||
-read the code. Note that reviewers can decline to review the PR with a poor
|
||
-description.
|
||
-
|
||
-### Commit messages
|
||
-When your pull-request is ready to be reviewed, every commit needs to include
|
||
-a title and a body continuing a description of the change --- what problem is
|
||
-being solved and how. The end of the commit body should contain Jira issue
|
||
-number (if applicable), GitHub issue that is being fixed, etc.:
|
||
-```
|
||
- Commit title
|
||
-
|
||
- Commit message body on multiple lines
|
||
-
|
||
- Jira-ref: <ticket-number>
|
||
-```
|
||
-
|
||
-Note that good commit message should provide information in similar way like
|
||
-the PR description. Poorly written commit messages can block the merge of PR
|
||
-or proper review.
|
||
-
|
||
-### Granularity of commits
|
||
-The granularity of commits depends strongly on the problem being solved. However,
|
||
-a large number of small commits is typically undesired. If possible, aim a
|
||
-Git history such that commits can be reverted individually, without requiring reverting
|
||
-numerous other dependent commits in order to get the `main` branch into a working state.
|
||
-
|
||
-Note that commits fixing problems of other commits in the PR are expected to be
|
||
-squashed before the final review and merge of the PR. Using of `git commit --fixup ...`
|
||
-and `git commit --squash ...` commands can help you to prepare such commits
|
||
-properly in advance and make the rebase later easier using `git rebase -i --autosquash`.
|
||
-We suggest you to get familiar with these commands as it can make your work really
|
||
-easier. Note that when you are starting to get higher number of such fixing commits
|
||
-in your PR, it's good practice to use the rebase more often. High numbers of such
|
||
-commits could make the final rebase more tricky in the end. So your PR should not
|
||
-have more than 15 commits at any time.
|
||
-
|
||
-### Create a separate git branch for your changes
|
||
-TBD
|
||
-
|
||
diff --git a/docs/source/contributing/community-upgrades.md b/docs/source/contributing/community-upgrades.md
|
||
new file mode 100644
|
||
index 00000000..cbec0a24
|
||
--- /dev/null
|
||
+++ b/docs/source/contributing/community-upgrades.md
|
||
@@ -0,0 +1,39 @@
|
||
+# Community upgrades for Centos-like distros
|
||
+
|
||
+In the past, this project was solely focused on Red Hat Enterprise Linux upgrades. Recently, we've been extending and refactoring the `leapp-repository` codebase to allow upgrades of other distributions, such as CentOS Stream and also upgrades + conversions between different distributions in one step.
|
||
+
|
||
+This document outlines the state of support for upgrades of distributions other than RHEL. Note that support in this case doesn't mean what the codebase allows, but what the core leapp team supports in terms of issues, bugfixes, feature requests, testing, etc.
|
||
+
|
||
+RHEL upgrades and upgrades + conversions *to* RHEL are the only officially supported upgrade paths and are the primary focus of leapp developers. However, we are open to and welcome contributions from the community, allowing other upgrade (and conversion) paths in the codebase. For example, we've already integrated a contribution introducing upgrade paths for Alma Linux upgrades.
|
||
+
|
||
+This does not mean that we won't offer help outside of the outlined scope, but it is primarily up to the contributors contributing a particular upgrade path to maintain and test it. Also, it can take us some time to get to such PRs, so be patient please.
|
||
+
|
||
+Upon agreement we can also update the upgrade paths (in `upgrade_paths.json`) when there is a new release of the particular distribution. However note that we might include some upgrade paths required for conversions *to* RHEL on top of that.
|
||
+
|
||
+Contributions improving the overall upgrade experience are also welcome, as they always have been.
|
||
+
|
||
+```{note}
|
||
+By default, upgrade + conversion paths are automatically derived from upgrade paths. If this is not desired or other paths are required, feel free to open a pull request or open a [discussion](https://github.com/oamg/leapp-repository/discussions) on that topic.
|
||
+```
|
||
+
|
||
+## How to contribute
|
||
+
|
||
+Currently, the process for enabling upgrades and conversions for other distributions is not fully documented. In the meantime you can use the [pull request introducing Alma Linux upgrades](https://github.com/oamg/leapp-repository/pull/1391/) as reference. However, note that the leapp upgrade data files have special rules for updates, described below.
|
||
+
|
||
+### Leapp data files
|
||
+
|
||
+#### repomap.json
|
||
+
|
||
+To use correct target repositories during the upgrade automatically, the `repomap.json` data file needs to be updated to cover repositories of the newly added distribution. However, the file cannot be updated manually as its content is generated, hence any manual changes would be overwritten with the next update. Currently there is not straightforward way for the community to update our generators, but you can
|
||
+
|
||
+- submit a separate PR of how the resulting `repomap.json` file should look like, for an example you can take a look at [this PR](https://github.com/oamg/leapp-repository/pull/1395)
|
||
+- or provide the list of repositories (possibly also architectures) present on the distribution
|
||
+
|
||
+and we will update the generators accordingly, asking you to review the result then. We are discussing an improvement to make this more community friendly.
|
||
+
|
||
+#### pes-events.json and device_driver_deprecation_data.json
|
||
+
|
||
+Both PES events and device driver deprecation data only contain data for RHEL in the upstream `leapp-repository` and we will not include any data unrelated to RHEL. If you find a bug in the data, you can open a bug in the [RHEL Jira](https://issues.redhat.com/) for the `leapp-repository` component.
|
||
+
|
||
+Before contributing, make sure your PR conforms to our {doc}`Coding guidelines<coding-guidelines>`
|
||
+ and {doc}`PR guidelines<pr-guidelines>`.
|
||
diff --git a/docs/source/contributing/index.rst b/docs/source/contributing/index.rst
|
||
new file mode 100644
|
||
index 00000000..ebdc9151
|
||
--- /dev/null
|
||
+++ b/docs/source/contributing/index.rst
|
||
@@ -0,0 +1,18 @@
|
||
+Contributing
|
||
+========================================================
|
||
+
|
||
+.. toctree::
|
||
+ :maxdepth: 4
|
||
+ :caption: Contents:
|
||
+ :glob:
|
||
+
|
||
+ coding-guidelines
|
||
+ pr-guidelines
|
||
+ community-upgrades
|
||
+
|
||
+.. Indices and tables
|
||
+.. ==================
|
||
+..
|
||
+.. * :ref:`genindex`
|
||
+.. * :ref:`modindex`
|
||
+.. * :ref:`search`
|
||
diff --git a/docs/source/contributing/pr-guidelines.md b/docs/source/contributing/pr-guidelines.md
|
||
new file mode 100644
|
||
index 00000000..4f6ee4fe
|
||
--- /dev/null
|
||
+++ b/docs/source/contributing/pr-guidelines.md
|
||
@@ -0,0 +1,48 @@
|
||
+# Commits and pull requests (PRs)
|
||
+## PR description
|
||
+The description should contain information about all introduced changes:
|
||
+* What has been changed
|
||
+* How it has been changed
|
||
+* The reason for the change
|
||
+* How could people try/test the PR
|
||
+* Reference to a Jira ticket, Github issue, ... if applicable
|
||
+
|
||
+Good description provides all information for readers without the need to
|
||
+read the code. Note that reviewers can decline to review the PR with a poor
|
||
+description.
|
||
+
|
||
+## Commit messages
|
||
+When your pull-request is ready to be reviewed, every commit needs to include
|
||
+a title and a body continuing a description of the change --- what problem is
|
||
+being solved and how. The end of the commit body should contain Jira issue
|
||
+number (if applicable), GitHub issue that is being fixed, etc.:
|
||
+```
|
||
+ Commit title
|
||
+
|
||
+ Commit message body on multiple lines
|
||
+
|
||
+ Jira-ref: <ticket-number>
|
||
+```
|
||
+
|
||
+Note that good commit message should provide information in similar way like
|
||
+the PR description. Poorly written commit messages can block the merge of PR
|
||
+or proper review.
|
||
+
|
||
+## Granularity of commits
|
||
+The granularity of commits depends strongly on the problem being solved. However,
|
||
+a large number of small commits is typically undesired. If possible, aim a
|
||
+Git history such that commits can be reverted individually, without requiring reverting
|
||
+numerous other dependent commits in order to get the `main` branch into a working state.
|
||
+
|
||
+Note that commits fixing problems of other commits in the PR are expected to be
|
||
+squashed before the final review and merge of the PR. Using of `git commit --fixup ...`
|
||
+and `git commit --squash ...` commands can help you to prepare such commits
|
||
+properly in advance and make the rebase later easier using `git rebase -i --autosquash`.
|
||
+We suggest you to get familiar with these commands as it can make your work really
|
||
+easier. Note that when you are starting to get higher number of such fixing commits
|
||
+in your PR, it's good practice to use the rebase more often. High numbers of such
|
||
+commits could make the final rebase more tricky in the end. So your PR should not
|
||
+have more than 15 commits at any time.
|
||
+
|
||
+## Create a separate git branch for your changes
|
||
+TBD
|
||
diff --git a/docs/source/index.rst b/docs/source/index.rst
|
||
index 27537ca4..ed68f751 100644
|
||
--- a/docs/source/index.rst
|
||
+++ b/docs/source/index.rst
|
||
@@ -21,7 +21,7 @@ providing Red Hat Enterprise Linux in-place upgrade functionality.
|
||
upgrade-architecture-and-workflow/index
|
||
configuring-ipu/index
|
||
libraries-and-api/index
|
||
- contrib-and-devel-guidelines
|
||
+ contributing/index
|
||
faq
|
||
|
||
.. Indices and tables
|
||
diff --git a/docs/source/tutorials/howto-single-actor-run.md b/docs/source/tutorials/howto-single-actor-run.md
|
||
new file mode 100644
|
||
index 00000000..728ca083
|
||
--- /dev/null
|
||
+++ b/docs/source/tutorials/howto-single-actor-run.md
|
||
@@ -0,0 +1,155 @@
|
||
+# Running a single Actor
|
||
+
|
||
+During development or debugging of actors there may appear a need of running single actor instead of the entire workflow. The advantages of such approach include:
|
||
+- **Time and resource efficiency** - Running the entire workflow takes time and resources. Source system is scanned, information is collected and stored, in-place upgrade process goes through several phases. All these actions take time, actors are run multiple times during debugging or development process, so preparing single actor execution lets us save time.
|
||
+- **Isolation of problem** - When debugged issue is related to single actor, this approach allows to isolate the issue without interference from other actors.
|
||
+
|
||
+
|
||
+```{hint}
|
||
+In practice, running a single actor for debugging does not have to be the best way to start when you do not have much experience with Leapp and IPU yet. However, in some cases it's still very valuable and helpful.
|
||
+```
|
||
+
|
||
+The execution of an actor using the `snactor` tool seems simple. In case of system upgrade leapp repositories it's not so straightforward and
|
||
+it can be quite complicated. In this guide we share our experience how to use `snactor` correctly, describing typical problems that developers hit.
|
||
+
|
||
+There are two main approaches:
|
||
+- **Running an actor with an empty or non-existent leapp database** -- applicable when a crafted data (or no data at all) is needed. Usually during development.
|
||
+- **Running an actor with leapp database filled by previous leapp execution** -- useful for debugging when the leapp.db file is available and want to run the actor in the same context as it has been previously executed when an error occurred.
|
||
+
|
||
+```{note}
|
||
+The leapp database refers to the `leapp.db` file. In case of using snactor, it's by default present in the `.leapp` directory of the used leapp repository
|
||
+scope.
|
||
+```
|
||
+
|
||
+````{tip}
|
||
+Cleaning the database can be managed with `snactor` tool command:
|
||
+```shell
|
||
+snactor messages clear
|
||
+```
|
||
+In other way, the database file can be also simply removed instead of using snactor.
|
||
+````
|
||
+
|
||
+
|
||
+Since an actor seems to be an independent piece of code, there is a dependency chain to resolve inside a workflow, especially around consumed messages and configuration which have to be resolved. When running entire In-Place Upgrade process, those dependencies needed for each actor are satisfied by assignment of each actor to specific phase, where actors emit and consume messages in desired sequence. Single actor usually needs specific list of such requirements, which can be fulfilled by manual preparation of this dependency chain. This very limited amount of resources needed for single actor can be called minimal context.
|
||
+
|
||
+
|
||
+## Running a single actor with minimal context
|
||
+
|
||
+It is possible to run a single actor without proceeding with `leapp preupgrade` machinery.
|
||
+This solution is based on the snactor tool. However, this solution requires minimal context to run.
|
||
+
|
||
+As mentioned before and described in [article](https://leapp.readthedocs.io/en/stable/building-blocks-and-architecture.html#architecture-overview)
|
||
+about workflow architecture, most of the actors are part of the produce/consume chain of messages. Important step in this procedure is to recreate the sequence of actors to be run to fulfill a chain of dependencies and provide necessary variables.
|
||
+
|
||
+Let's explain these steps based on a real case. The following example will be based on the `scan_fips` actor.
|
||
+
|
||
+
|
||
+### Initial configuration
|
||
+
|
||
+All actors (even those which are not depending on any message emitted by other actors) depend on some initial configuration which is provided by the `ipu_workflow_config` [actor](https://github.com/oamg/leapp-repository/blob/main/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py). No matter what actor you would like to run, the first step is always to run the `ipu_workflow_config` actor.
|
||
+
|
||
+Due to some missing initial variables, which usually are set by the framework, those variables need to be exported manually. Note that following vars are example ones, adjust them to your needs depending on your system configuration:
|
||
+```shell
|
||
+
|
||
+export LEAPP_UPGRADE_PATH_FLAVOUR=default
|
||
+export LEAPP_UPGRADE_PATH_TARGET_RELEASE=9.8
|
||
+export LEAPP_TARGET_OS=9.8
|
||
+```
|
||
+
|
||
+The `ipu_workflow_config` actor produces `IPUWorkflow` message, which contains all required initial config, so at the beginning execute:
|
||
+
|
||
+```shell
|
||
+snactor run ipu_workflow_config --print-output --save-output
|
||
+```
|
||
+
|
||
+```{note}
|
||
+Option `--save-output` is necessary to preserve output of this command in Leapp database. Without saving the message, it will not be available for other actors. Option *--print-output* is optional.
|
||
+```
|
||
+
|
||
+### Resolving actor's message dependencies
|
||
+
|
||
+All basic information what actor consumes and produce can be found in each `actor.py` [code](https://github.com/oamg/leapp-repository/blob/main/repos/system_upgrade/common/actors/scanfips/actor.py#L13-L14). In case of `scan_fips` actor it's:
|
||
+
|
||
+```shell
|
||
+ consumes = (KernelCmdline,)
|
||
+ produces = (FIPSInfo,)
|
||
+```
|
||
+
|
||
+This actor consumes one message and produces another. Now we need to track the consumed message, which is `KernelCmdline`. Grep the cloned repository to find that the actor which produces such [message](https://github.com/oamg/leapp-repository/blob/main/repos/system_upgrade/common/actors/scankernelcmdline/actor.py#L14) is `scan_kernel_cmdline`.
|
||
+
|
||
+```shell
|
||
+snactor run scan_kernel_cmdline --print-output --save-output --actor-config IPUConfig
|
||
+```
|
||
+
|
||
+```{note}
|
||
+Important step here is to point out what actor config needs to be used, `IPUConfig` in that case.
|
||
+This parameter needs to be specified every time you want to run an actor, pointing to proper configuration.
|
||
+```
|
||
+
|
||
+This [scan_kernel_cmdline](https://github.com/oamg/leapp-repository/blob/main/repos/system_upgrade/common/actors/scankernelcmdline/actor.py#L13) doesn't consume anything: `consumes = ()`. So finally the desired actor can be run:
|
||
+
|
||
+```shell
|
||
+snactor run scan_fips --print-output --save-output --actor-config IPUConfig
|
||
+```
|
||
+
|
||
+### Limitations
|
||
+Note that not all cases will be as simple as the presented one, sometimes actors depend on multiple messages originating from other actors, requiring longer session of environment recreation.
|
||
+
|
||
+Also actors designed to run on other architectures will not be able to run.
|
||
+
|
||
+## Run single actor with existing database
|
||
+
|
||
+In contrast to the previous paragraph, where we operated only on self-created minimal context, the tutorial below will explain how to work with existing or provided context.
|
||
+Sometimes - especially for debugging and reproduction of the bug it is very convenient to use provided Leapp database *leapp.db*. This is a file containing all information needed to run Leapp framework on a system, including messages and configurations. Usually all necessary environment for actors is set up by
|
||
+first run of `leapp preupgrade` command, when starting from scratch. In this case, we already have `leapp.db` (e.g. transferred from other system) database file.
|
||
+
|
||
+Every new run of `leapp` command creates another entry in the database. It creates
|
||
+another row in execution table with specific ID, so each context can be easily tracked and
|
||
+reproduced.
|
||
+
|
||
+See the list of executions using the [leapp-inspector](https://leapp-repository.readthedocs.io/latest/tutorials/troubleshooting-debugging.html#troubleshooting-with-leapp-inspector) tool.
|
||
+
|
||
+```shell
|
||
+leapp-inspector --db path/to/leapp.db executions
|
||
+```
|
||
+Example output:
|
||
+```shell
|
||
+##################################################################
|
||
+ Executions of Leapp
|
||
+##################################################################
|
||
+Execution | Timestamp
|
||
+------------------------------------ | ---------------------------
|
||
+d146e105-fafd-43a2-a791-54e141eeab9c | 2025-11-26T19:39:20.563594Z
|
||
+b7fd5dca-a49f-4af7-b70c-8bbcc28a4338 | 2025-11-26T19:39:38.034070Z
|
||
+50b5289f-be4d-4206-a6e0-73e3caa1f9ed | 2025-11-26T19:41:40.401273Z
|
||
+
|
||
+```
|
||
+
|
||
+
|
||
+To determine which context (execution) `leapp` will run, there are two variables: `LEAPP_DEBUG_PRESERVE_CONTEXT`
|
||
+and `LEAPP_EXECUTION_ID`. When the `LEAPP_DEBUG_PRESERVE_CONTEXT` is set to 1 and the environment has
|
||
+`LEAPP_EXECUTION_ID` set, the `LEAPP_EXECUTION_ID` is not overwritten with snactor's execution ID.
|
||
+This allows the developer to run actors in the same way as if the actor was run during the last leapp's
|
||
+execution, thus, avoiding to rerun the entire upgrade process.
|
||
+
|
||
+
|
||
+Set variables:
|
||
+```shell
|
||
+
|
||
+export LEAPP_DEBUG_PRESERVE_CONTEXT=1
|
||
+export LEAPP_EXECUTION_ID=50b5289f-be4d-4206-a6e0-73e3caa1f9ed
|
||
+```
|
||
+
|
||
+Run desired actors or the entire upgrade process safely now. Output will not be preserved as another context entry.
|
||
+```shell
|
||
+
|
||
+snactor run --config /etc/leapp/leapp.conf --actor-config IPUConfig <ActorName> --print-output
|
||
+```
|
||
+
|
||
+```{note}
|
||
+Point to `leapp.conf` file with *--config* option. By default this file is located in `/etc/leapp/` and, among others, it contains Leapp database (`leapp.db`) location. When working with given database, either adjust configuration file or place database file in default location.
|
||
+```
|
||
+
|
||
+### Limitations
|
||
+
|
||
+Even though the context was provided, it is not possible to run actors which are designed for different architecture than source system.
|
||
diff --git a/docs/source/tutorials/index.rst b/docs/source/tutorials/index.rst
|
||
index a04fc183..6059e76a 100644
|
||
--- a/docs/source/tutorials/index.rst
|
||
+++ b/docs/source/tutorials/index.rst
|
||
@@ -19,6 +19,7 @@ write leapp actors for **In-Place Upgrades (IPU)** with the leapp framework.
|
||
|
||
setup-devel-env
|
||
howto-first-actor-upgrade
|
||
+ howto-single-actor-run
|
||
custom-content
|
||
configurable-actors
|
||
templates/index
|
||
diff --git a/etc/leapp/transaction/to_reinstall b/etc/leapp/transaction/to_reinstall
|
||
new file mode 100644
|
||
index 00000000..c6694a8e
|
||
--- /dev/null
|
||
+++ b/etc/leapp/transaction/to_reinstall
|
||
@@ -0,0 +1,3 @@
|
||
+### List of packages (each on new line) to be reinstalled to the upgrade transaction
|
||
+### Useful for packages that have identical version strings but contain binary changes between major OS versions
|
||
+### Packages that aren't installed will be skipped
|
||
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
|
||
index b28ec57c..6882488a 100644
|
||
--- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
|
||
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
|
||
@@ -91,7 +91,7 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to
|
||
'/usr/sbin/grubby',
|
||
'--add-kernel', '{0}'.format(kernel_path),
|
||
'--initrd', '{0}'.format(initramfs_path),
|
||
- '--title', 'RHEL-Upgrade-Initramfs',
|
||
+ '--title', 'ELevate-Upgrade-Initramfs',
|
||
'--copy-default',
|
||
'--make-default',
|
||
'--args', args_to_add_str
|
||
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
|
||
index 7341602b..b2ced8ae 100644
|
||
--- a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
|
||
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
|
||
@@ -53,7 +53,7 @@ run_args_add = [
|
||
'/usr/sbin/grubby',
|
||
'--add-kernel', '/abc',
|
||
'--initrd', '/def',
|
||
- '--title', 'RHEL-Upgrade-Initramfs',
|
||
+ '--title', 'ELevate-Upgrade-Initramfs',
|
||
'--copy-default',
|
||
'--make-default',
|
||
'--args',
|
||
diff --git a/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py b/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py
|
||
new file mode 100644
|
||
index 00000000..52f5af9d
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py
|
||
@@ -0,0 +1,53 @@
|
||
+from leapp.actors import Actor
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import (
|
||
+ RepositoriesFacts,
|
||
+ VendorSourceRepos,
|
||
+ ActiveVendorList,
|
||
+)
|
||
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||
+
|
||
+
|
||
+class CheckEnabledVendorRepos(Actor):
|
||
+ """
|
||
+ Create a list of vendors whose repositories are present on the system and enabled.
|
||
+ Only those vendors' configurations (new repositories, PES actions, etc.)
|
||
+ will be included in the upgrade process.
|
||
+ """
|
||
+
|
||
+ name = "check_enabled_vendor_repos"
|
||
+ consumes = (RepositoriesFacts, VendorSourceRepos)
|
||
+ produces = (ActiveVendorList)
|
||
+ tags = (IPUWorkflowTag, FactsPhaseTag.Before)
|
||
+
|
||
+ def process(self):
|
||
+ vendor_mapping_data = {}
|
||
+ active_vendors = set()
|
||
+
|
||
+ # Make a dict for easy mapping of repoid -> corresponding vendor name.
|
||
+ for vendor_src_repodata in api.consume(VendorSourceRepos):
|
||
+ for vendor_src_repo in vendor_src_repodata.source_repoids:
|
||
+ vendor_mapping_data[vendor_src_repo] = vendor_src_repodata.vendor
|
||
+
|
||
+ # Is the repo listed in the vendor map as from_repoid present on the system?
|
||
+ for repos_facts in api.consume(RepositoriesFacts):
|
||
+ for repo_file in repos_facts.repositories:
|
||
+ for repo_data in repo_file.data:
|
||
+ self.log.debug(
|
||
+ "Looking for repository {} in vendor maps".format(repo_data.repoid)
|
||
+ )
|
||
+ if repo_data.enabled and repo_data.repoid in vendor_mapping_data:
|
||
+ # If the vendor's repository is present in the system and enabled, count the vendor as active.
|
||
+ new_vendor = vendor_mapping_data[repo_data.repoid]
|
||
+ self.log.debug(
|
||
+ "Repository {} found and enabled, enabling vendor {}".format(
|
||
+ repo_data.repoid, new_vendor
|
||
+ )
|
||
+ )
|
||
+ active_vendors.add(new_vendor)
|
||
+
|
||
+ if active_vendors:
|
||
+ self.log.debug("Active vendor list: {}".format(active_vendors))
|
||
+ api.produce(ActiveVendorList(data=list(active_vendors)))
|
||
+ else:
|
||
+ self.log.info("No active vendors found, vendor list not generated")
|
||
diff --git a/repos/system_upgrade/common/actors/checklvm/actor.py b/repos/system_upgrade/common/actors/checklvm/actor.py
|
||
new file mode 100644
|
||
index 00000000..167698db
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/checklvm/actor.py
|
||
@@ -0,0 +1,24 @@
|
||
+from leapp.actors import Actor
|
||
+from leapp.libraries.actor.checklvm import check_lvm
|
||
+from leapp.models import DistributionSignedRPM, LVMConfig, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
|
||
+from leapp.reporting import Report
|
||
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||
+
|
||
+
|
||
+class CheckLVM(Actor):
|
||
+ """
|
||
+ Check if the LVM is installed and ensure the target userspace container
|
||
+ and initramfs are prepared to support it.
|
||
+
|
||
+ The LVM configuration files are copied into the target userspace container
|
||
+ so that the dracut is able to use them while creating the initramfs.
|
||
+ The dracut LVM module is enabled by this actor as well.
|
||
+ """
|
||
+
|
||
+ name = 'check_lvm'
|
||
+ consumes = (DistributionSignedRPM, LVMConfig)
|
||
+ produces = (Report, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks)
|
||
+ tags = (ChecksPhaseTag, IPUWorkflowTag)
|
||
+
|
||
+ def process(self):
|
||
+ check_lvm()
|
||
diff --git a/repos/system_upgrade/common/actors/checklvm/libraries/checklvm.py b/repos/system_upgrade/common/actors/checklvm/libraries/checklvm.py
|
||
new file mode 100644
|
||
index 00000000..073bfbf4
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/checklvm/libraries/checklvm.py
|
||
@@ -0,0 +1,74 @@
|
||
+import os
|
||
+
|
||
+from leapp import reporting
|
||
+from leapp.libraries.common.rpms import has_package
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import (
|
||
+ CopyFile,
|
||
+ DistributionSignedRPM,
|
||
+ DracutModule,
|
||
+ LVMConfig,
|
||
+ TargetUserSpaceUpgradeTasks,
|
||
+ UpgradeInitramfsTasks
|
||
+)
|
||
+
|
||
+LVM_CONFIG_PATH = '/etc/lvm/lvm.conf'
|
||
+LVM_DEVICES_FILE_PATH_PREFIX = '/etc/lvm/devices'
|
||
+
|
||
+
|
||
+def _report_filter_detection():
|
||
+ title = 'LVM filter definition detected.'
|
||
+ summary = (
|
||
+ 'Beginning with RHEL 9, LVM devices file is used by default to select devices used by '
|
||
+ f'LVM. Since leapp detected the use of LVM filter in the {LVM_CONFIG_PATH} configuration '
|
||
+ 'file, the configuration won\'t be modified to use devices file during the upgrade and '
|
||
+ 'the LVM filter will remain in use after the upgrade.'
|
||
+ )
|
||
+
|
||
+ remediation_hint = (
|
||
+ 'While not required, switching to the LVM devices file from the LVM filter is possible '
|
||
+ 'using the following command. The command uses the existing LVM filter to create the system.devices '
|
||
+ 'file which is then used instead of the LVM filter. Before running the command, '
|
||
+ f'make sure that \'use_devicesfile=1\' is set in {LVM_CONFIG_PATH}.'
|
||
+ )
|
||
+ remediation_command = ['vgimportdevices']
|
||
+
|
||
+ reporting.create_report([
|
||
+ reporting.Title(title),
|
||
+ reporting.Summary(summary),
|
||
+ reporting.Remediation(hint=remediation_hint, commands=[remediation_command]),
|
||
+ reporting.ExternalLink(
|
||
+ title='Limiting LVM device visibility and usage',
|
||
+ url='https://red.ht/limiting-lvm-devices-visibility-and-usage',
|
||
+ ),
|
||
+ reporting.Severity(reporting.Severity.INFO),
|
||
+ ])
|
||
+
|
||
+
|
||
+def check_lvm():
|
||
+ if not has_package(DistributionSignedRPM, 'lvm2'):
|
||
+ return
|
||
+
|
||
+ lvm_config = next(api.consume(LVMConfig), None)
|
||
+ if not lvm_config:
|
||
+ return
|
||
+
|
||
+ lvm_devices_file_path = os.path.join(LVM_DEVICES_FILE_PATH_PREFIX, lvm_config.devices.devicesfile)
|
||
+ lvm_devices_file_exists = os.path.isfile(lvm_devices_file_path)
|
||
+
|
||
+ filters_used = not lvm_config.devices.use_devicesfile or not lvm_devices_file_exists
|
||
+ if filters_used:
|
||
+ _report_filter_detection()
|
||
+
|
||
+ api.current_logger().debug('Including lvm dracut module.')
|
||
+ api.produce(UpgradeInitramfsTasks(include_dracut_modules=[DracutModule(name='lvm')]))
|
||
+
|
||
+ copy_files = []
|
||
+ api.current_logger().debug('Copying "{}" to the target userspace.'.format(LVM_CONFIG_PATH))
|
||
+ copy_files.append(CopyFile(src=LVM_CONFIG_PATH))
|
||
+
|
||
+ if lvm_devices_file_exists and lvm_config.devices.use_devicesfile:
|
||
+ api.current_logger().debug('Copying "{}" to the target userspace.'.format(lvm_devices_file_path))
|
||
+ copy_files.append(CopyFile(src=lvm_devices_file_path))
|
||
+
|
||
+ api.produce(TargetUserSpaceUpgradeTasks(copy_files=copy_files))
|
||
diff --git a/repos/system_upgrade/common/actors/checklvm/tests/test_checklvm.py b/repos/system_upgrade/common/actors/checklvm/tests/test_checklvm.py
|
||
new file mode 100644
|
||
index 00000000..a7da8050
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/checklvm/tests/test_checklvm.py
|
||
@@ -0,0 +1,92 @@
|
||
+import os
|
||
+
|
||
+import pytest
|
||
+
|
||
+from leapp.libraries.actor import checklvm
|
||
+from leapp.libraries.common.testutils import produce_mocked
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import (
|
||
+ DistributionSignedRPM,
|
||
+ LVMConfig,
|
||
+ LVMConfigDevicesSection,
|
||
+ RPM,
|
||
+ TargetUserSpaceUpgradeTasks,
|
||
+ UpgradeInitramfsTasks
|
||
+)
|
||
+
|
||
+
|
||
+def test_check_lvm_when_lvm_not_installed(monkeypatch):
|
||
+ def consume_mocked(model):
|
||
+ if model == LVMConfig:
|
||
+ assert False
|
||
+ if model == DistributionSignedRPM:
|
||
+ yield DistributionSignedRPM(items=[])
|
||
+
|
||
+ monkeypatch.setattr(api, 'produce', produce_mocked())
|
||
+ monkeypatch.setattr(api, 'consume', consume_mocked)
|
||
+
|
||
+ checklvm.check_lvm()
|
||
+
|
||
+ assert not api.produce.called
|
||
+
|
||
+
|
||
+@pytest.mark.parametrize(
|
||
+ ('config', 'create_report', 'devices_file_exists'),
|
||
+ [
|
||
+ (LVMConfig(devices=LVMConfigDevicesSection(use_devicesfile=False)), True, False),
|
||
+ (LVMConfig(devices=LVMConfigDevicesSection(use_devicesfile=True)), False, True),
|
||
+ (LVMConfig(devices=LVMConfigDevicesSection(use_devicesfile=True)), True, False),
|
||
+ (LVMConfig(devices=LVMConfigDevicesSection(use_devicesfile=False, devicesfile="test.devices")), True, False),
|
||
+ (LVMConfig(devices=LVMConfigDevicesSection(use_devicesfile=True, devicesfile="test.devices")), False, True),
|
||
+ (LVMConfig(devices=LVMConfigDevicesSection(use_devicesfile=True, devicesfile="test.devices")), True, False),
|
||
+ ]
|
||
+)
|
||
+def test_scan_when_lvm_installed(monkeypatch, config, create_report, devices_file_exists):
|
||
+ lvm_package = RPM(
|
||
+ name='lvm2',
|
||
+ version='2',
|
||
+ release='1',
|
||
+ epoch='1',
|
||
+ packager='',
|
||
+ arch='x86_64',
|
||
+ pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'
|
||
+ )
|
||
+
|
||
+ def isfile_mocked(_):
|
||
+ return devices_file_exists
|
||
+
|
||
+ def consume_mocked(model):
|
||
+ if model == LVMConfig:
|
||
+ yield config
|
||
+ if model == DistributionSignedRPM:
|
||
+ yield DistributionSignedRPM(items=[lvm_package])
|
||
+
|
||
+ def report_filter_detection_mocked():
|
||
+ assert create_report
|
||
+
|
||
+ monkeypatch.setattr(api, 'produce', produce_mocked())
|
||
+ monkeypatch.setattr(api, 'consume', consume_mocked)
|
||
+ monkeypatch.setattr(os.path, 'isfile', isfile_mocked)
|
||
+ monkeypatch.setattr(checklvm, '_report_filter_detection', report_filter_detection_mocked)
|
||
+
|
||
+ checklvm.check_lvm()
|
||
+
|
||
+ # The lvm is installed, thus the dracut module is enabled and at least the lvm.conf is copied
|
||
+ assert api.produce.called == 2
|
||
+ assert len(api.produce.model_instances) == 2
|
||
+
|
||
+ expected_copied_files = [checklvm.LVM_CONFIG_PATH]
|
||
+ if devices_file_exists and config.devices.use_devicesfile:
|
||
+ devices_file_path = os.path.join(checklvm.LVM_DEVICES_FILE_PATH_PREFIX, config.devices.devicesfile)
|
||
+ expected_copied_files.append(devices_file_path)
|
||
+
|
||
+ for produced_model in api.produce.model_instances:
|
||
+ assert isinstance(produced_model, (UpgradeInitramfsTasks, TargetUserSpaceUpgradeTasks))
|
||
+
|
||
+ if isinstance(produced_model, UpgradeInitramfsTasks):
|
||
+ assert len(produced_model.include_dracut_modules) == 1
|
||
+ assert produced_model.include_dracut_modules[0].name == 'lvm'
|
||
+ else:
|
||
+ assert len(produced_model.copy_files) == len(expected_copied_files)
|
||
+ for file in produced_model.copy_files:
|
||
+ assert file.src in expected_copied_files
|
||
diff --git a/repos/system_upgrade/common/actors/checkrootsymlinks/actor.py b/repos/system_upgrade/common/actors/checkrootsymlinks/actor.py
|
||
index c35272b2..7b89bf7a 100644
|
||
--- a/repos/system_upgrade/common/actors/checkrootsymlinks/actor.py
|
||
+++ b/repos/system_upgrade/common/actors/checkrootsymlinks/actor.py
|
||
@@ -55,7 +55,7 @@ class CheckRootSymlinks(Actor):
|
||
os.path.relpath(item.target, '/'),
|
||
os.path.join('/', item.name)])
|
||
commands.append(command)
|
||
- rem_commands = [['sh', '-c', ' && '.join(commands)]]
|
||
+ rem_commands = [['sh', '-c', '"{}"'.format(' && '.join(commands))]]
|
||
# Generate reports about non-utf8 absolute links presence
|
||
nonutf_count = len(absolute_links_nonutf)
|
||
if nonutf_count > 0:
|
||
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
|
||
index 56a94b5d..46c5d9b6 100755
|
||
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
|
||
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
|
||
@@ -390,4 +390,3 @@ getarg 'rd.break=leapp-logs' 'rd.upgrade.break=leapp-finish' && {
|
||
sync
|
||
mount -o "remount,$old_opts" "$NEWROOT"
|
||
exit $result
|
||
-
|
||
diff --git a/repos/system_upgrade/common/actors/convert/swapdistropackages/actor.py b/repos/system_upgrade/common/actors/convert/swapdistropackages/actor.py
|
||
new file mode 100644
|
||
index 00000000..f8d9c446
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/convert/swapdistropackages/actor.py
|
||
@@ -0,0 +1,20 @@
|
||
+from leapp.actors import Actor
|
||
+from leapp.libraries.actor import swapdistropackages
|
||
+from leapp.models import DistributionSignedRPM, RpmTransactionTasks
|
||
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||
+
|
||
+
|
||
+class SwapDistroPackages(Actor):
|
||
+ """
|
||
+ Swap distribution specific packages.
|
||
+
|
||
+ Does nothing if not converting.
|
||
+ """
|
||
+
|
||
+ name = 'swap_distro_packages'
|
||
+ consumes = (DistributionSignedRPM,)
|
||
+ produces = (RpmTransactionTasks,)
|
||
+ tags = (IPUWorkflowTag, ChecksPhaseTag)
|
||
+
|
||
+ def process(self):
|
||
+ swapdistropackages.process()
|
||
diff --git a/repos/system_upgrade/common/actors/convert/swapdistropackages/libraries/swapdistropackages.py b/repos/system_upgrade/common/actors/convert/swapdistropackages/libraries/swapdistropackages.py
|
||
new file mode 100644
|
||
index 00000000..f7e2ce68
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/convert/swapdistropackages/libraries/swapdistropackages.py
|
||
@@ -0,0 +1,111 @@
|
||
+import fnmatch
|
||
+
|
||
+from leapp.exceptions import StopActorExecutionError
|
||
+from leapp.libraries.common.config import get_source_distro_id, get_target_distro_id
|
||
+from leapp.libraries.common.config.version import get_target_major_version
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import DistributionSignedRPM, RpmTransactionTasks
|
||
+
|
||
+# Config for swapping distribution-specific RPMs
|
||
+# The keys can be in 2 "formats":
|
||
+# (<source_distro_id>, <target_distro_id>)
|
||
+# (<source_distro_id>, <target_distro_id>, <target_major_version as int>)
|
||
+# The "swap" dict maps packages on the source distro to their replacements on
|
||
+# the target distro
|
||
+# The "remove" set lists packages or glob pattern for matching packages from
|
||
+# the source distro to remove without any replacement.
|
||
+_CONFIG = {
|
||
+ ("centos", "rhel"): {
|
||
+ "swap": {
|
||
+ "centos-logos": "redhat-logos",
|
||
+ "centos-logos-httpd": "redhat-logos-httpd",
|
||
+ "centos-logos-ipa": "redhat-logos-ipa",
|
||
+ "centos-indexhtml": "redhat-indexhtml",
|
||
+ "centos-backgrounds": "redhat-backgrounds",
|
||
+ "centos-stream-release": "redhat-release",
|
||
+ },
|
||
+ "remove": {
|
||
+ "centos-gpg-keys",
|
||
+ "centos-stream-repos",
|
||
+ # various release packages, typically contain repofiles
|
||
+ "centos-release-*",
|
||
+ # present on Centos (not Stream) 8, let's include them if they are potentially leftover
|
||
+ "centos-linux-release",
|
||
+ "centos-linux-repos",
|
||
+ "centos-obsolete-packages",
|
||
+ },
|
||
+ },
|
||
+ ("almalinux", "rhel"): {
|
||
+ "swap": {
|
||
+ "almalinux-logos": "redhat-logos",
|
||
+ "almalinux-logos-httpd": "redhat-logos-httpd",
|
||
+ "almalinux-logos-ipa": "redhat-logos-ipa",
|
||
+ "almalinux-indexhtml": "redhat-indexhtml",
|
||
+ "almalinux-backgrounds": "redhat-backgrounds",
|
||
+ "almalinux-release": "redhat-release",
|
||
+ },
|
||
+ "remove": {
|
||
+ "almalinux-repos",
|
||
+ "almalinux-gpg-keys",
|
||
+
|
||
+ "almalinux-release-*",
|
||
+ "centos-release-*",
|
||
+ "elrepo-release",
|
||
+ "epel-release",
|
||
+ },
|
||
+ },
|
||
+}
|
||
+
|
||
+
|
||
+def _get_config(source_distro, target_distro, target_major):
|
||
+ key = (source_distro, target_distro, target_major)
|
||
+ config = _CONFIG.get(key)
|
||
+ if config:
|
||
+ return config
|
||
+
|
||
+ key = (source_distro, target_distro)
|
||
+ return _CONFIG.get(key)
|
||
+
|
||
+
|
||
+def _glob_match_rpms(rpms, pattern):
|
||
+ return [rpm for rpm in rpms if fnmatch.fnmatch(rpm, pattern)]
|
||
+
|
||
+
|
||
+def _make_transaction_tasks(config, rpms):
|
||
+ to_install = set()
|
||
+ to_remove = set()
|
||
+ for source_pkg, target_pkg in config.get("swap", {}).items():
|
||
+ if source_pkg in rpms:
|
||
+ to_remove.add(source_pkg)
|
||
+ to_install.add(target_pkg)
|
||
+
|
||
+ for pkg in config.get("remove", {}):
|
||
+ matches = _glob_match_rpms(rpms, pkg)
|
||
+ to_remove.update(matches)
|
||
+
|
||
+ return RpmTransactionTasks(to_install=list(to_install), to_remove=list(to_remove))
|
||
+
|
||
+
|
||
+def process():
|
||
+ rpms_msg = next(api.consume(DistributionSignedRPM), None)
|
||
+ if not rpms_msg:
|
||
+ raise StopActorExecutionError("Did not receive DistributionSignedRPM message")
|
||
+
|
||
+ source_distro = get_source_distro_id()
|
||
+ target_distro = get_target_distro_id()
|
||
+
|
||
+ if source_distro == target_distro:
|
||
+ return
|
||
+
|
||
+ config = _get_config(source_distro, target_distro, get_target_major_version())
|
||
+ if not config:
|
||
+ api.current_logger().warning(
|
||
+ "Could not find config for handling distro specific packages for {}->{} upgrade.".format(
|
||
+ source_distro, target_distro
|
||
+ )
|
||
+ )
|
||
+ return
|
||
+
|
||
+ rpms = {rpm.name for rpm in rpms_msg.items}
|
||
+ task = _make_transaction_tasks(config, rpms)
|
||
+ api.produce(task)
|
||
diff --git a/repos/system_upgrade/common/actors/convert/swapdistropackages/tests/test_swapdistropackages.py b/repos/system_upgrade/common/actors/convert/swapdistropackages/tests/test_swapdistropackages.py
|
||
new file mode 100644
|
||
index 00000000..99bb9c20
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/convert/swapdistropackages/tests/test_swapdistropackages.py
|
||
@@ -0,0 +1,291 @@
|
||
+from unittest import mock
|
||
+
|
||
+import pytest
|
||
+
|
||
+from leapp.exceptions import StopActorExecutionError
|
||
+from leapp.libraries.actor import swapdistropackages
|
||
+from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import DistributionSignedRPM, RPM, RpmTransactionTasks
|
||
+
|
||
+
|
||
+def test_get_config(monkeypatch):
|
||
+ test_config = {
|
||
+ ("centos", "rhel"): {
|
||
+ "swap": {"pkgA": "pkgB"},
|
||
+ "remove": {
|
||
+ "pkgC",
|
||
+ },
|
||
+ },
|
||
+ ("centos", "rhel", 10): {"swap": {"pkg1": "pkg2"}},
|
||
+ }
|
||
+ monkeypatch.setattr(swapdistropackages, "_CONFIG", test_config)
|
||
+
|
||
+ expect = {
|
||
+ "swap": {"pkgA": "pkgB"},
|
||
+ "remove": {
|
||
+ "pkgC",
|
||
+ },
|
||
+ }
|
||
+ # fallback to (centos, rhel) when there is no target version specific config
|
||
+ cfg = swapdistropackages._get_config("centos", "rhel", 9)
|
||
+ assert cfg == expect
|
||
+
|
||
+ # has it's own target version specific config
|
||
+ cfg = swapdistropackages._get_config("centos", "rhel", 10)
|
||
+ assert cfg == {"swap": {"pkg1": "pkg2"}}
|
||
+
|
||
+ # not mapped
|
||
+ cfg = swapdistropackages._get_config("almalinux", "rhel", 9)
|
||
+ assert not cfg
|
||
+
|
||
+
|
||
+@pytest.mark.parametrize(
|
||
+ "rpms,config,expected",
|
||
+ [
|
||
+ (
|
||
+ ["pkgA", "pkgB", "pkgC"],
|
||
+ {
|
||
+ "swap": {"pkgA": "pkgB"},
|
||
+ "remove": {
|
||
+ "pkgC",
|
||
+ },
|
||
+ },
|
||
+ RpmTransactionTasks(to_install=["pkgB"], to_remove=["pkgA", "pkgC"]),
|
||
+ ),
|
||
+ # only some pkgs present
|
||
+ (
|
||
+ ["pkg1", "pkgA", "pkg-other"],
|
||
+ {
|
||
+ "swap": {"pkgX": "pkgB", "pkg1": "pkg2"},
|
||
+ "remove": {"pkg*"},
|
||
+ },
|
||
+ RpmTransactionTasks(
|
||
+ to_install=["pkg2"], to_remove=["pkgA", "pkg1", "pkg-other"]
|
||
+ ),
|
||
+ ),
|
||
+ (
|
||
+ ["pkgA", "pkgB"],
|
||
+ {},
|
||
+ RpmTransactionTasks(to_install=[], to_remove=[]),
|
||
+ ),
|
||
+ ],
|
||
+)
|
||
+def test__make_transaction_tasks(rpms, config, expected):
|
||
+ tasks = swapdistropackages._make_transaction_tasks(config, rpms)
|
||
+ assert set(tasks.to_install) == set(expected.to_install)
|
||
+ assert set(tasks.to_remove) == set(expected.to_remove)
|
||
+
|
||
+
|
||
+def test_process_ok(monkeypatch):
|
||
+ def _msg_pkgs(pkgnames):
|
||
+ rpms = []
|
||
+ for name in pkgnames:
|
||
+ rpms.append(RPM(
|
||
+ name=name,
|
||
+ epoch="0",
|
||
+ packager="packager",
|
||
+ version="1.2",
|
||
+ release="el9",
|
||
+ arch="noarch",
|
||
+ pgpsig="",
|
||
+ ))
|
||
+ return DistributionSignedRPM(items=rpms)
|
||
+
|
||
+ rpms = [
|
||
+ "centos-logos",
|
||
+ "centos-logos-httpd",
|
||
+ "centos-logos-ipa",
|
||
+ "centos-indexhtml",
|
||
+ "centos-backgrounds",
|
||
+ "centos-stream-release",
|
||
+ "centos-gpg-keys",
|
||
+ "centos-stream-repos",
|
||
+ "centos-linux-release",
|
||
+ "centos-linux-repos",
|
||
+ "centos-obsolete-packages",
|
||
+ "centos-release-automotive",
|
||
+ "centos-release-automotive-experimental",
|
||
+ "centos-release-autosd",
|
||
+ "centos-release-ceph-pacific",
|
||
+ "centos-release-ceph-quincy",
|
||
+ "centos-release-ceph-reef",
|
||
+ "centos-release-ceph-squid",
|
||
+ "centos-release-ceph-tentacle",
|
||
+ "centos-release-cloud",
|
||
+ "centos-release-gluster10",
|
||
+ "centos-release-gluster11",
|
||
+ "centos-release-gluster9",
|
||
+ "centos-release-hyperscale",
|
||
+ "centos-release-hyperscale-experimental",
|
||
+ "centos-release-hyperscale-experimental-testing",
|
||
+ "centos-release-hyperscale-spin",
|
||
+ "centos-release-hyperscale-spin-testing",
|
||
+ "centos-release-hyperscale-testing",
|
||
+ "centos-release-isa-override",
|
||
+ "centos-release-kmods",
|
||
+ "centos-release-kmods-kernel",
|
||
+ "centos-release-kmods-kernel-6",
|
||
+ "centos-release-messaging",
|
||
+ "centos-release-nfs-ganesha4",
|
||
+ "centos-release-nfs-ganesha5",
|
||
+ "centos-release-nfs-ganesha6",
|
||
+ "centos-release-nfs-ganesha7",
|
||
+ "centos-release-nfs-ganesha8",
|
||
+ "centos-release-nfv-common",
|
||
+ "centos-release-nfv-openvswitch",
|
||
+ "centos-release-okd-4",
|
||
+ "centos-release-openstack-antelope",
|
||
+ "centos-release-openstack-bobcat",
|
||
+ "centos-release-openstack-caracal",
|
||
+ "centos-release-openstack-dalmatian",
|
||
+ "centos-release-openstack-epoxy",
|
||
+ "centos-release-openstack-yoga",
|
||
+ "centos-release-openstack-zed",
|
||
+ "centos-release-openstackclient-xena",
|
||
+ "centos-release-opstools",
|
||
+ "centos-release-ovirt45",
|
||
+ "centos-release-ovirt45-testing",
|
||
+ "centos-release-proposed_updates",
|
||
+ "centos-release-rabbitmq-38",
|
||
+ "centos-release-samba414",
|
||
+ "centos-release-samba415",
|
||
+ "centos-release-samba416",
|
||
+ "centos-release-samba417",
|
||
+ "centos-release-samba418",
|
||
+ "centos-release-samba419",
|
||
+ "centos-release-samba420",
|
||
+ "centos-release-samba421",
|
||
+ "centos-release-samba422",
|
||
+ "centos-release-samba423",
|
||
+ "centos-release-storage-common",
|
||
+ "centos-release-virt-common",
|
||
+ ]
|
||
+ curr_actor_mocked = CurrentActorMocked(
|
||
+ src_distro="centos",
|
||
+ dst_distro="rhel",
|
||
+ msgs=[_msg_pkgs(rpms)],
|
||
+ )
|
||
+ monkeypatch.setattr(api, 'current_actor', curr_actor_mocked)
|
||
+ produce_mock = produce_mocked()
|
||
+ monkeypatch.setattr(api, 'produce', produce_mock)
|
||
+
|
||
+ swapdistropackages.process()
|
||
+
|
||
+ expected = RpmTransactionTasks(
|
||
+ to_install=[
|
||
+ "redhat-logos",
|
||
+ "redhat-logos-httpd",
|
||
+ "redhat-logos-ipa",
|
||
+ "redhat-indexhtml",
|
||
+ "redhat-backgrounds",
|
||
+ "redhat-release",
|
||
+ ],
|
||
+ to_remove=rpms,
|
||
+ )
|
||
+
|
||
+ assert produce_mock.called == 1
|
||
+ produced = produce_mock.model_instances[0]
|
||
+ assert set(produced.to_install) == set(expected.to_install)
|
||
+ assert set(produced.to_remove) == set(expected.to_remove)
|
||
+
|
||
+
|
||
+def test_process_no_config_skip(monkeypatch):
|
||
+ curr_actor_mocked = CurrentActorMocked(
|
||
+ src_distro="distroA", dst_distro="distroB", msgs=[DistributionSignedRPM()]
|
||
+ )
|
||
+ monkeypatch.setattr(api, "current_actor", curr_actor_mocked)
|
||
+ monkeypatch.setattr(swapdistropackages, "_get_config", lambda *args: None)
|
||
+ monkeypatch.setattr(api, "current_logger", logger_mocked())
|
||
+ produce_mock = produce_mocked()
|
||
+ monkeypatch.setattr(api, "produce", produce_mock)
|
||
+
|
||
+ swapdistropackages.process()
|
||
+
|
||
+ assert produce_mock.called == 0
|
||
+ assert (
|
||
+ "Could not find config for handling distro specific packages for distroA->distroB upgrade"
|
||
+ ) in api.current_logger.warnmsg[0]
|
||
+
|
||
+
|
||
+@pytest.mark.parametrize("distro", ["rhel", "centos"])
|
||
+def test_process_not_converting_skip(monkeypatch, distro):
|
||
+ curr_actor_mocked = CurrentActorMocked(
|
||
+ src_distro=distro, dst_distro=distro, msgs=[DistributionSignedRPM()]
|
||
+ )
|
||
+ monkeypatch.setattr(api, "current_actor", curr_actor_mocked)
|
||
+ monkeypatch.setattr(api, "current_logger", logger_mocked())
|
||
+ produce_mock = produce_mocked()
|
||
+ monkeypatch.setattr(api, "produce", produce_mock)
|
||
+
|
||
+ with mock.patch(
|
||
+ "leapp.libraries.actor.swapdistropackages._get_config"
|
||
+ ) as _get_config_mocked:
|
||
+ swapdistropackages.process()
|
||
+ _get_config_mocked.assert_not_called()
|
||
+ assert produce_mock.called == 0
|
||
+
|
||
+
|
||
+def test_process_no_rpms_mgs(monkeypatch):
|
||
+ curr_actor_mocked = CurrentActorMocked(src_distro='centos', dst_distro='rhel')
|
||
+ monkeypatch.setattr(api, "current_actor", curr_actor_mocked)
|
||
+ produce_mock = produce_mocked()
|
||
+ monkeypatch.setattr(api, "produce", produce_mock)
|
||
+
|
||
+ with pytest.raises(
|
||
+ StopActorExecutionError,
|
||
+ match="Did not receive DistributionSignedRPM message"
|
||
+ ):
|
||
+ swapdistropackages.process()
|
||
+
|
||
+ assert produce_mock.called == 0
|
||
+
|
||
+
|
||
+@pytest.mark.parametrize(
|
||
+ "pattern, expect",
|
||
+ [
|
||
+ (
|
||
+ "centos-release-*",
|
||
+ [
|
||
+ "centos-release-samba420",
|
||
+ "centos-release-okd-4",
|
||
+ "centos-release-opstools",
|
||
+ ],
|
||
+ ),
|
||
+ (
|
||
+ "almalinux-release-*",
|
||
+ [
|
||
+ "almalinux-release-testing",
|
||
+ "almalinux-release-devel",
|
||
+ ],
|
||
+ ),
|
||
+ (
|
||
+ "epel-release",
|
||
+ ["epel-release"],
|
||
+ ),
|
||
+ ],
|
||
+)
|
||
+def test_glob_match_rpms(pattern, expect):
|
||
+ """
|
||
+ A simple test making sure the fnmatch works correctly for RPM names
|
||
+ since it was originally meant for filepaths.
|
||
+ """
|
||
+
|
||
+ TEST_GLOB_RPMS = [
|
||
+ "centos-release-samba420",
|
||
+ "centos-stream-repos",
|
||
+ "centos-release-okd-4",
|
||
+ "centos-release",
|
||
+ "centos-release-opstools",
|
||
+ "release-centos",
|
||
+ "almalinux-release-devel",
|
||
+ "almalinux-release",
|
||
+ "almalinux-repos",
|
||
+ "release-almalinux",
|
||
+ "vim",
|
||
+ "epel-release",
|
||
+ "almalinux-release-testing",
|
||
+ "gcc-devel"
|
||
+ ]
|
||
+ actual = swapdistropackages._glob_match_rpms(TEST_GLOB_RPMS, pattern)
|
||
+ assert set(actual) == set(expect)
|
||
diff --git a/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py
|
||
index 003f3fc5..9e7bbf4a 100644
|
||
--- a/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py
|
||
+++ b/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py
|
||
@@ -1,6 +1,6 @@
|
||
from leapp.actors import Actor
|
||
from leapp.libraries.actor import distributionsignedrpmscanner
|
||
-from leapp.models import DistributionSignedRPM, InstalledRPM, InstalledUnsignedRPM, ThirdPartyRPM
|
||
+from leapp.models import DistributionSignedRPM, InstalledRPM, InstalledUnsignedRPM, ThirdPartyRPM, VendorSignatures
|
||
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||
from leapp.utils.deprecation import suppress_deprecation
|
||
|
||
@@ -8,7 +8,7 @@ from leapp.utils.deprecation import suppress_deprecation
|
||
@suppress_deprecation(InstalledUnsignedRPM)
|
||
class DistributionSignedRpmScanner(Actor):
|
||
"""
|
||
- Provide data about distribution signed & third-party RPM packages.
|
||
+ Provide data about distribution signed & third-party plus vendors RPM packages.
|
||
|
||
For various checks and actions done during the upgrade it's important to
|
||
know what packages are signed by GPG keys of the installed linux system
|
||
@@ -22,11 +22,18 @@ class DistributionSignedRpmScanner(Actor):
|
||
common/files/distro/<distro>/gpg_signatures.json
|
||
where <distro> is distribution ID of the installed system (e.g. centos, rhel).
|
||
|
||
- If the file for the installed distribution is not found, end with error.
|
||
+ Fingerprints of vendors GPG keys are stored under
|
||
+ /etc/leapp/files/vendors.d/<vendor>.sigs
|
||
+ where <vendor> is name of the vendor (e.g. mariadb, postgresql).
|
||
+
|
||
+ The "Distribution" in the name of the actor is a historical artifact - the actor
|
||
+ is used for both distribution and all vendors present in config files.
|
||
+
|
||
+ If the file for the installed distribution is not find, end with error.
|
||
"""
|
||
|
||
name = 'distribution_signed_rpm_scanner'
|
||
- consumes = (InstalledRPM,)
|
||
+ consumes = (InstalledRPM, VendorSignatures)
|
||
produces = (DistributionSignedRPM, InstalledUnsignedRPM, ThirdPartyRPM)
|
||
tags = (IPUWorkflowTag, FactsPhaseTag)
|
||
|
||
diff --git a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py
|
||
index f42909f0..6383a56f 100644
|
||
--- a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py
|
||
+++ b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py
|
||
@@ -1,17 +1,117 @@
|
||
+import os
|
||
+import re
|
||
+
|
||
+from leapp.libraries.stdlib import run, api
|
||
from leapp.actors import Actor
|
||
-from leapp.libraries.common import efi_reboot_fix
|
||
+from leapp.models import InstalledTargetKernelVersion, KernelCmdlineArg, FirmwareFacts, MountEntry
|
||
from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag
|
||
+from leapp.exceptions import StopActorExecutionError
|
||
|
||
|
||
class EfiFinalizationFix(Actor):
|
||
"""
|
||
- Adjust EFI boot entry for final reboot
|
||
+ Ensure that EFI boot order is updated, which is particularly necessary
|
||
+ when upgrading to a different OS distro. Also rebuilds grub config
|
||
+ if necessary.
|
||
"""
|
||
|
||
name = 'efi_finalization_fix'
|
||
- consumes = ()
|
||
+ consumes = (KernelCmdlineArg, InstalledTargetKernelVersion, FirmwareFacts, MountEntry)
|
||
produces = ()
|
||
- tags = (FinalizationPhaseTag, IPUWorkflowTag)
|
||
+ tags = (FinalizationPhaseTag.Before, IPUWorkflowTag)
|
||
|
||
def process(self):
|
||
- efi_reboot_fix.maybe_emit_updated_boot_entry()
|
||
+ is_system_efi = False
|
||
+ ff = next(self.consume(FirmwareFacts), None)
|
||
+
|
||
+ dirname = {
|
||
+ 'AlmaLinux': 'almalinux',
|
||
+ 'CentOS Linux': 'centos',
|
||
+ 'CentOS Stream': 'centos',
|
||
+ 'Oracle Linux Server': 'redhat',
|
||
+ 'Red Hat Enterprise Linux': 'redhat',
|
||
+ 'Rocky Linux': 'rocky',
|
||
+ 'Scientific Linux': 'redhat',
|
||
+ }
|
||
+
|
||
+ efi_shimname_dict = {
|
||
+ 'x86_64': 'shimx64.efi',
|
||
+ 'aarch64': 'shimaa64.efi'
|
||
+ }
|
||
+
|
||
+ def devparts(dev):
|
||
+ """
|
||
+ NVMe block devices aren't named like SCSI/ATA/etc block devices and must be parsed differently.
|
||
+ SCSI/ATA/etc devices have a syntax resembling /dev/sdb4 for the 4th partition on the 2nd disk.
|
||
+ NVMe devices have a syntax resembling /dev/nvme0n2p4 for the 4th partition on the 2nd disk.
|
||
+ """
|
||
+ if '/dev/nvme' in dev:
|
||
+ """
|
||
+ NVMe
|
||
+ """
|
||
+ part = next(re.finditer(r'p\d+$', dev)).group(0)
|
||
+ dev = dev[:-len(part)]
|
||
+ part = part[1:]
|
||
+ else:
|
||
+ """
|
||
+ Non-NVMe (SCSI, ATA, etc)
|
||
+ """
|
||
+ part = next(re.finditer(r'\d+$', dev)).group(0)
|
||
+ dev = dev[:-len(part)]
|
||
+ return [dev, part];
|
||
+
|
||
+ with open('/etc/system-release', 'r') as sr:
|
||
+ release_line = next(line for line in sr if 'release' in line)
|
||
+ distro = release_line.split(' release ', 1)[0]
|
||
+
|
||
+ efi_bootentry_label = distro
|
||
+ distro_dir = dirname.get(distro, 'default')
|
||
+ shim_filename = efi_shimname_dict.get(api.current_actor().configuration.architecture, 'shimx64.efi')
|
||
+
|
||
+ shim_path = '/boot/efi/EFI/' + distro_dir + '/' + shim_filename
|
||
+ grub_cfg_path = '/boot/efi/EFI/' + distro_dir + '/grub.cfg'
|
||
+ bootmgr_path = '\\EFI\\' + distro_dir + '\\' + shim_filename
|
||
+
|
||
+ has_efibootmgr = os.path.exists('/sbin/efibootmgr')
|
||
+ has_shim = os.path.exists(shim_path)
|
||
+ has_grub_cfg = os.path.exists(grub_cfg_path)
|
||
+
|
||
+ if not ff:
|
||
+ raise StopActorExecutionError(
|
||
+ 'Could not identify system firmware',
|
||
+ details={'details': 'Actor did not receive FirmwareFacts message.'}
|
||
+ )
|
||
+
|
||
+ if not has_efibootmgr:
|
||
+ return
|
||
+
|
||
+ for fact in self.consume(FirmwareFacts):
|
||
+ if fact.firmware == 'efi':
|
||
+ is_system_efi = True
|
||
+ break
|
||
+
|
||
+ if is_system_efi and has_shim:
|
||
+ efidevlist = []
|
||
+ with open('/proc/mounts', 'r') as fp:
|
||
+ for line in fp:
|
||
+ if '/boot/efi' in line:
|
||
+ efidevpath = line.split(' ', 1)[0]
|
||
+ efidevpart = efidevpath.split('/')[-1]
|
||
+ if os.path.exists('/proc/mdstat'):
|
||
+ with open('/proc/mdstat', 'r') as mds:
|
||
+ for line in mds:
|
||
+ if line.startswith(efidevpart):
|
||
+ mddev = line.split(' ')
|
||
+ for md in mddev:
|
||
+ if '[' in md:
|
||
+ efimd = md.split('[', 1)[0]
|
||
+ efidp = efidevpath.replace(efidevpart, efimd)
|
||
+ efidevlist.append(efidp)
|
||
+ if len(efidevlist) == 0:
|
||
+ efidevlist.append(efidevpath)
|
||
+ for devpath in efidevlist:
|
||
+ efidev, efipart = devparts(devpath)
|
||
+ run(['/sbin/efibootmgr', '-c', '-d', efidev, '-p', efipart, '-l', bootmgr_path, '-L', efi_bootentry_label])
|
||
+
|
||
+ if not has_grub_cfg:
|
||
+ run(['/sbin/grub2-mkconfig', '-o', grub_cfg_path])
|
||
diff --git a/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py b/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py
|
||
index 582a5821..18f2c33f 100644
|
||
--- a/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py
|
||
+++ b/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py
|
||
@@ -32,6 +32,7 @@ class FilterRpmTransactionTasks(Actor):
|
||
to_remove = set()
|
||
to_keep = set()
|
||
to_upgrade = set()
|
||
+ to_reinstall = set()
|
||
modules_to_enable = {}
|
||
modules_to_reset = {}
|
||
for event in self.consume(RpmTransactionTasks, PESRpmTransactionTasks):
|
||
@@ -39,13 +40,14 @@ class FilterRpmTransactionTasks(Actor):
|
||
to_install.update(event.to_install)
|
||
to_remove.update(installed_pkgs.intersection(event.to_remove))
|
||
to_keep.update(installed_pkgs.intersection(event.to_keep))
|
||
+ to_reinstall.update(installed_pkgs.intersection(event.to_reinstall))
|
||
modules_to_enable.update({'{}:{}'.format(m.name, m.stream): m for m in event.modules_to_enable})
|
||
modules_to_reset.update({'{}:{}'.format(m.name, m.stream): m for m in event.modules_to_reset})
|
||
|
||
to_remove.difference_update(to_keep)
|
||
|
||
# run upgrade for the rest of RH signed pkgs which we do not have rule for
|
||
- to_upgrade = installed_pkgs - (to_install | to_remove)
|
||
+ to_upgrade = installed_pkgs - (to_install | to_remove | to_reinstall)
|
||
|
||
self.produce(FilteredRpmTransactionTasks(
|
||
local_rpms=list(local_rpms),
|
||
@@ -53,5 +55,6 @@ class FilterRpmTransactionTasks(Actor):
|
||
to_remove=list(to_remove),
|
||
to_keep=list(to_keep),
|
||
to_upgrade=list(to_upgrade),
|
||
+ to_reinstall=list(to_reinstall),
|
||
modules_to_reset=list(modules_to_reset.values()),
|
||
modules_to_enable=list(modules_to_enable.values())))
|
||
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/files/bundled_units/boot.mount b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/files/bundled_units/boot.mount
|
||
index 869c5e4c..531f6c75 100644
|
||
--- a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/files/bundled_units/boot.mount
|
||
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/files/bundled_units/boot.mount
|
||
@@ -1,8 +1,8 @@
|
||
[Unit]
|
||
DefaultDependencies=no
|
||
Before=local-fs.target
|
||
-After=sysroot-boot.target
|
||
-Requires=sysroot-boot.target
|
||
+After=sysroot-boot.mount
|
||
+Requires=sysroot-boot.mount
|
||
|
||
[Mount]
|
||
What=/sysroot/boot
|
||
diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/actor.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/actor.py
|
||
index d99bab48..c0c93036 100644
|
||
--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/actor.py
|
||
+++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/actor.py
|
||
@@ -6,6 +6,7 @@ from leapp.models import (
|
||
BootContent,
|
||
FIPSInfo,
|
||
LiveModeConfig,
|
||
+ LVMConfig,
|
||
TargetOSInstallationImage,
|
||
TargetUserSpaceInfo,
|
||
TargetUserSpaceUpgradeTasks,
|
||
@@ -31,6 +32,7 @@ class UpgradeInitramfsGenerator(Actor):
|
||
consumes = (
|
||
FIPSInfo,
|
||
LiveModeConfig,
|
||
+ LVMConfig,
|
||
RequiredUpgradeInitramPackages, # deprecated
|
||
TargetOSInstallationImage,
|
||
TargetUserSpaceInfo,
|
||
diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py
|
||
index f7e4a8af..03447b7c 100644
|
||
--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py
|
||
+++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py
|
||
@@ -12,6 +12,7 @@ from leapp.models import UpgradeDracutModule # deprecated
|
||
from leapp.models import (
|
||
BootContent,
|
||
LiveModeConfig,
|
||
+ LVMConfig,
|
||
TargetOSInstallationImage,
|
||
TargetUserSpaceInfo,
|
||
TargetUserSpaceUpgradeTasks,
|
||
@@ -193,6 +194,7 @@ def _copy_files(context, files):
|
||
context.remove_tree(file_task.dst)
|
||
context.copytree_to(file_task.src, file_task.dst)
|
||
else:
|
||
+ context.makedirs(os.path.dirname(file_task.dst))
|
||
context.copy_to(file_task.src, file_task.dst)
|
||
|
||
|
||
@@ -363,20 +365,29 @@ def generate_initram_disk(context):
|
||
def fmt_module_list(module_list):
|
||
return ','.join(mod.name for mod in module_list)
|
||
|
||
+ env_variables = [
|
||
+ 'LEAPP_KERNEL_VERSION={kernel_version}',
|
||
+ 'LEAPP_ADD_DRACUT_MODULES="{dracut_modules}"',
|
||
+ 'LEAPP_KERNEL_ARCH={arch}',
|
||
+ 'LEAPP_ADD_KERNEL_MODULES="{kernel_modules}"',
|
||
+ 'LEAPP_DRACUT_INSTALL_FILES="{files}"'
|
||
+ ]
|
||
+
|
||
+ if next(api.consume(LVMConfig), None):
|
||
+ env_variables.append('LEAPP_DRACUT_LVMCONF="1"')
|
||
+
|
||
+ env_variables = ' '.join(env_variables)
|
||
+ env_variables = env_variables.format(
|
||
+ kernel_version=_get_target_kernel_version(context),
|
||
+ dracut_modules=fmt_module_list(initramfs_includes.dracut_modules),
|
||
+ kernel_modules=fmt_module_list(initramfs_includes.kernel_modules),
|
||
+ arch=api.current_actor().configuration.architecture,
|
||
+ files=' '.join(initramfs_includes.files)
|
||
+ )
|
||
+ cmd = os.path.join('/', INITRAM_GEN_SCRIPT_NAME)
|
||
+
|
||
# FIXME: issue #376
|
||
- context.call([
|
||
- '/bin/sh', '-c',
|
||
- 'LEAPP_KERNEL_VERSION={kernel_version} '
|
||
- 'LEAPP_ADD_DRACUT_MODULES="{dracut_modules}" LEAPP_KERNEL_ARCH={arch} '
|
||
- 'LEAPP_ADD_KERNEL_MODULES="{kernel_modules}" '
|
||
- 'LEAPP_DRACUT_INSTALL_FILES="{files}" {cmd}'.format(
|
||
- kernel_version=_get_target_kernel_version(context),
|
||
- dracut_modules=fmt_module_list(initramfs_includes.dracut_modules),
|
||
- kernel_modules=fmt_module_list(initramfs_includes.kernel_modules),
|
||
- arch=api.current_actor().configuration.architecture,
|
||
- files=' '.join(initramfs_includes.files),
|
||
- cmd=os.path.join('/', INITRAM_GEN_SCRIPT_NAME))
|
||
- ], env=env)
|
||
+ context.call(['/bin/sh', '-c', f'{env_variables} {cmd}'], env=env)
|
||
|
||
boot_files_info = copy_boot_files(context)
|
||
return boot_files_info
|
||
diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
|
||
index 32e4527b..1e595e9a 100644
|
||
--- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
|
||
+++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
|
||
@@ -152,11 +152,11 @@ def _report(title, summary, keys, inhibitor=False):
|
||
)
|
||
hint = (
|
||
'Check the path to the listed GPG keys is correct, the keys are valid and'
|
||
- ' import them into the host RPM DB or store them inside the {} directory'
|
||
+ ' import them into the host RPM DB or store them inside on of the {} directories'
|
||
' prior the upgrade.'
|
||
' If you want to proceed the in-place upgrade without checking any RPM'
|
||
' signatures, execute leapp with the `--nogpgcheck` option.'
|
||
- .format(get_path_to_gpg_certs())
|
||
+ .format(','.format(get_path_to_gpg_certs()))
|
||
)
|
||
groups = [reporting.Groups.REPOSITORY]
|
||
if inhibitor:
|
||
@@ -188,7 +188,7 @@ def _report_missing_keys(keys):
|
||
summary = (
|
||
'Some of the target repositories require GPG keys that are not installed'
|
||
' in the current RPM DB or are not stored in the {trust_dir} directory.'
|
||
- .format(trust_dir=get_path_to_gpg_certs())
|
||
+ .format(trust_dir=','.join(get_path_to_gpg_certs()))
|
||
)
|
||
_report('Detected unknown GPG keys for target system repositories', summary, keys, True)
|
||
|
||
@@ -262,11 +262,12 @@ def _report_repos_missing_keys(repos):
|
||
|
||
|
||
def register_dnfworkaround():
|
||
- api.produce(DNFWorkaround(
|
||
- display_name='import trusted gpg keys to RPM DB',
|
||
- script_path=api.current_actor().get_common_tool_path('importrpmgpgkeys'),
|
||
- script_args=[get_path_to_gpg_certs()],
|
||
- ))
|
||
+ for trust_certs_dir in get_path_to_gpg_certs():
|
||
+ api.produce(DNFWorkaround(
|
||
+ display_name='import trusted gpg keys to RPM DB',
|
||
+ script_path=api.current_actor().get_common_tool_path('importrpmgpgkeys'),
|
||
+ script_args=[trust_certs_dir],
|
||
+ ))
|
||
|
||
|
||
@suppress_deprecation(TMPTargetRepositoriesFacts)
|
||
diff --git a/repos/system_upgrade/common/actors/multipath/config_reader/actor.py b/repos/system_upgrade/common/actors/multipath/config_reader/actor.py
|
||
new file mode 100644
|
||
index 00000000..a7238a25
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/multipath/config_reader/actor.py
|
||
@@ -0,0 +1,28 @@
|
||
+from leapp.actors import Actor
|
||
+from leapp.libraries.actor import multipathconfread
|
||
+from leapp.models import DistributionSignedRPM, MultipathConfFacts8to9, MultipathInfo
|
||
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||
+
|
||
+
|
||
+class MultipathConfRead(Actor):
|
||
+ """
|
||
+ Read multipath configuration files and extract the necessary information
|
||
+
|
||
+ Related files:
|
||
+ - /etc/multipath.conf
|
||
+ - /etc/multipath/ - any files inside the directory
|
||
+ - /etc/xdrdevices.conf
|
||
+
|
||
+ Two kinds of messages are generated:
|
||
+ - MultipathInfo - general information about multipath, version agnostic
|
||
+ - upgrade-path-specific messages such as MultipathConfFacts8to9 (produced only
|
||
+ when upgrading from 8 to 9)
|
||
+ """
|
||
+
|
||
+ name = 'multipath_conf_read'
|
||
+ consumes = (DistributionSignedRPM,)
|
||
+ produces = (MultipathInfo, MultipathConfFacts8to9)
|
||
+ tags = (FactsPhaseTag, IPUWorkflowTag)
|
||
+
|
||
+ def process(self):
|
||
+ multipathconfread.scan_and_emit_multipath_info()
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/libraries/multipathconfread.py b/repos/system_upgrade/common/actors/multipath/config_reader/libraries/multipathconfread.py
|
||
similarity index 54%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/libraries/multipathconfread.py
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/libraries/multipathconfread.py
|
||
index 5b1cef50..e733500b 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfread/libraries/multipathconfread.py
|
||
+++ b/repos/system_upgrade/common/actors/multipath/config_reader/libraries/multipathconfread.py
|
||
@@ -2,15 +2,10 @@ import errno
|
||
import os
|
||
|
||
from leapp.libraries.common import multipathutil
|
||
+from leapp.libraries.common.config.version import get_source_major_version
|
||
from leapp.libraries.common.rpms import has_package
|
||
from leapp.libraries.stdlib import api
|
||
-from leapp.models import (
|
||
- CopyFile,
|
||
- DistributionSignedRPM,
|
||
- MultipathConfFacts8to9,
|
||
- MultipathConfig8to9,
|
||
- TargetUserSpaceUpgradeTasks
|
||
-)
|
||
+from leapp.models import DistributionSignedRPM, MultipathConfFacts8to9, MultipathConfig8to9, MultipathInfo
|
||
|
||
_regexes = ('vendor', 'product', 'revision', 'product_blacklist', 'devnode',
|
||
'wwid', 'property', 'protocol')
|
||
@@ -88,46 +83,30 @@ def is_processable():
|
||
return res
|
||
|
||
|
||
-def get_multipath_conf_facts(config_file='/etc/multipath.conf'):
|
||
- res_configs = []
|
||
- conf = _parse_config(config_file)
|
||
- if not conf:
|
||
- return None
|
||
- res_configs.append(conf)
|
||
- if conf.config_dir:
|
||
- res_configs.extend(_parse_config_dir(conf.config_dir))
|
||
- else:
|
||
- res_configs.extend(_parse_config_dir('/etc/multipath/conf.d'))
|
||
- return MultipathConfFacts8to9(configs=res_configs)
|
||
-
|
||
+def scan_and_emit_multipath_info(default_config_path='/etc/multipath.conf'):
|
||
+ if not is_processable():
|
||
+ return
|
||
|
||
-def produce_copy_to_target_task():
|
||
- """
|
||
- Produce task to copy files into the target userspace
|
||
+ primary_config = _parse_config(default_config_path)
|
||
+ if not primary_config:
|
||
+ api.current_logger().debug(
|
||
+ 'Primary multipath config /etc/multipath.conf is not present - multipath '
|
||
+ 'is not used.'
|
||
+ )
|
||
+ mpath_info = MultipathInfo(is_configured=False)
|
||
+ api.produce(mpath_info)
|
||
+ return
|
||
|
||
- The multipath configuration files are needed when the upgrade init ramdisk
|
||
- is generated to ensure we are able to boot into the upgrade environment
|
||
- and start the upgrade process itself. By this msg it's told that these
|
||
- files/dirs will be available when the upgrade init ramdisk is generated.
|
||
+ multipath_info = MultipathInfo(
|
||
+ is_configured=True,
|
||
+ config_dir=primary_config.config_dir or '/etc/multipath/conf.d'
|
||
+ )
|
||
+ api.produce(multipath_info)
|
||
|
||
- See TargetUserSpaceUpgradeTasks and UpgradeInitramfsTasks for more info.
|
||
- """
|
||
- # TODO(pstodulk): move the function to the multipathconfcheck actor
|
||
- # and get rid of the hardcoded stuff.
|
||
- # - The current behaviour looks from the user POV same as before this
|
||
- # * commit. I am going to keep the proper fix for additional PR as we do
|
||
- # * not want to make the current PR even more complex than now and the solution
|
||
- # * is not so trivial.
|
||
- # - As well, I am missing some information around xDR devices, which are
|
||
- # * possibly not handled correctly (maybe missing some executables?..)
|
||
- # * Update: practically we do not have enough info about xDR drivers, but
|
||
- # * discussed with Ben Marzinski, as the multipath dracut module includes
|
||
- # * the xDR utils stuff, we should handle it in the same way.
|
||
- # * See xdrgetuid, xdrgetinfo (these two utils are now missing in our initramfs)
|
||
- copy_files = []
|
||
- for fname in ['/etc/multipath.conf', '/etc/multipath', '/etc/xdrdevices.conf']:
|
||
- if os.path.exists(fname):
|
||
- copy_files.append(CopyFile(src=fname))
|
||
+ # Handle upgrade-path-specific config actions
|
||
+ if get_source_major_version() == '8':
|
||
+ secondary_configs = _parse_config_dir(multipath_info.config_dir)
|
||
+ all_configs = [primary_config] + secondary_configs
|
||
|
||
- if copy_files:
|
||
- api.produce(TargetUserSpaceUpgradeTasks(copy_files=copy_files))
|
||
+ config_facts_for_8to9 = MultipathConfFacts8to9(configs=all_configs)
|
||
+ api.produce(config_facts_for_8to9)
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/all_the_things.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/all_the_things.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/all_the_things.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/all_the_things.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/allow_usb.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/allow_usb.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/allow_usb.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/allow_usb.conf
|
||
index 57b6f97b..39681b85 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/allow_usb.conf
|
||
+++ b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/allow_usb.conf
|
||
@@ -1074,5 +1074,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/complicated.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/complicated.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/complicated.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/complicated.conf
|
||
index 23d93ecf..c889461c 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/complicated.conf
|
||
+++ b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/complicated.conf
|
||
@@ -1103,5 +1103,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/conf1.d/empty.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/conf1.d/empty.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/conf1.d/empty.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/conf1.d/empty.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/conf1.d/nothing_important.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/conf1.d/nothing_important.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/conf1.d/nothing_important.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/conf1.d/nothing_important.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/conf2.d/all_true.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/conf2.d/all_true.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/conf2.d/all_true.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/conf2.d/all_true.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/conf3.d/README b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/conf3.d/README
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/conf3.d/README
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/conf3.d/README
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/converted_the_things.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/converted_the_things.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/converted_the_things.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/converted_the_things.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/default_rhel8.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/default_rhel8.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/default_rhel8.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/default_rhel8.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/empty.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/empty.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/empty.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/empty.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/empty_dir.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/empty_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/empty_dir.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/empty_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/missing_dir.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/missing_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/missing_dir.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/missing_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/no_defaults.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/no_defaults.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/no_defaults.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/no_defaults.conf
|
||
index f7885ca8..ec8ddee2 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/no_defaults.conf
|
||
+++ b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/no_defaults.conf
|
||
@@ -1045,5 +1045,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/no_foreign.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/no_foreign.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/no_foreign.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/no_foreign.conf
|
||
index 9525731c..87f9a24c 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/no_foreign.conf
|
||
+++ b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/no_foreign.conf
|
||
@@ -1085,5 +1085,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/not_set_dir.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/not_set_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/not_set_dir.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/not_set_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/set_in_dir.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/set_in_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/set_in_dir.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/set_in_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/two_defaults.conf b/repos/system_upgrade/common/actors/multipath/config_reader/tests/files/two_defaults.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/two_defaults.conf
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/files/two_defaults.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/test_multipath_conf_read_8to9.py b/repos/system_upgrade/common/actors/multipath/config_reader/tests/test_multipath_conf_read_8to9.py
|
||
similarity index 58%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/test_multipath_conf_read_8to9.py
|
||
rename to repos/system_upgrade/common/actors/multipath/config_reader/tests/test_multipath_conf_read_8to9.py
|
||
index 9134e1d7..e593a857 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/test_multipath_conf_read_8to9.py
|
||
+++ b/repos/system_upgrade/common/actors/multipath/config_reader/tests/test_multipath_conf_read_8to9.py
|
||
@@ -1,7 +1,11 @@
|
||
import os
|
||
|
||
+import pytest
|
||
+
|
||
from leapp.libraries.actor import multipathconfread
|
||
-from leapp.models import MultipathConfig8to9
|
||
+from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import MultipathConfFacts8to9, MultipathConfig8to9, MultipathInfo
|
||
|
||
TEST_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files')
|
||
|
||
@@ -100,45 +104,71 @@ def test_parse_config():
|
||
assert_config(config, expected_data)
|
||
|
||
|
||
-def test_get_facts_missing_dir(monkeypatch):
|
||
+@pytest.mark.parametrize(
|
||
+ ('primary_config', 'expected_configs'),
|
||
+ [
|
||
+ ('missing_dir.conf', [missing_dir_conf]),
|
||
+ ('empty_dir.conf', [empty_dir_conf]),
|
||
+ ('not_set_dir.conf', [not_set_dir_conf, empty1_conf, nothing_important_conf]),
|
||
+ ('set_in_dir.conf', [set_in_dir_conf, all_true_conf]),
|
||
+ ]
|
||
+)
|
||
+def test_get_facts_missing_dir(monkeypatch, primary_config, expected_configs):
|
||
monkeypatch.setattr(multipathconfread, '_parse_config_orig', multipathconfread._parse_config, raising=False)
|
||
monkeypatch.setattr(multipathconfread, '_parse_config', mock_parse_config)
|
||
+ monkeypatch.setattr(multipathconfread, 'is_processable', lambda: True)
|
||
|
||
- facts = multipathconfread.get_multipath_conf_facts(os.path.join(TEST_DIR, 'missing_dir.conf'))
|
||
- assert facts
|
||
- assert len(facts.configs) == 1
|
||
- assert_config(facts.configs[0], missing_dir_conf)
|
||
+ produce_mock = produce_mocked()
|
||
+ monkeypatch.setattr(api, 'produce', produce_mock)
|
||
|
||
+ actor_mock = CurrentActorMocked(src_ver='8.10', dst_ver='9.6')
|
||
+ monkeypatch.setattr(api, 'current_actor', actor_mock)
|
||
|
||
-def test_get_facts_empty_dir(monkeypatch):
|
||
- monkeypatch.setattr(multipathconfread, '_parse_config_orig', multipathconfread._parse_config, raising=False)
|
||
- monkeypatch.setattr(multipathconfread, '_parse_config', mock_parse_config)
|
||
+ config_to_use = os.path.join(TEST_DIR, primary_config)
|
||
+ multipathconfread.scan_and_emit_multipath_info(config_to_use)
|
||
|
||
- facts = multipathconfread.get_multipath_conf_facts(os.path.join(TEST_DIR, 'empty_dir.conf'))
|
||
- assert facts
|
||
- assert len(facts.configs) == 1
|
||
- assert_config(facts.configs[0], empty_dir_conf)
|
||
+ assert produce_mock.called
|
||
|
||
+ general_info = [msg for msg in produce_mock.model_instances if isinstance(msg, MultipathInfo)]
|
||
+ assert len(general_info) == 1
|
||
+ assert general_info[0].is_configured
|
||
+ # general_info[0].config_dir is with the MultipathConfFacts8to9 messages below
|
||
|
||
-def test_get_facts_not_set_dir(monkeypatch):
|
||
- monkeypatch.setattr(multipathconfread, '_parse_config_orig', multipathconfread._parse_config, raising=False)
|
||
- monkeypatch.setattr(multipathconfread, '_parse_config', mock_parse_config)
|
||
+ msgs = [msg for msg in produce_mock.model_instances if isinstance(msg, MultipathConfFacts8to9)]
|
||
+ assert len(msgs) == 1
|
||
|
||
- expected_configs = (not_set_dir_conf, empty1_conf, nothing_important_conf)
|
||
- facts = multipathconfread.get_multipath_conf_facts(os.path.join(TEST_DIR, 'not_set_dir.conf'))
|
||
- assert facts
|
||
- assert len(facts.configs) == 3
|
||
- for i in range(len(facts.configs)):
|
||
- assert_config(facts.configs[i], expected_configs[i])
|
||
+ actual_configs = msgs[0].configs
|
||
+ assert len(actual_configs) == len(expected_configs)
|
||
|
||
+ for actual_config, expected_config in zip(actual_configs, expected_configs):
|
||
+ assert_config(actual_config, expected_config)
|
||
|
||
-def test_get_facts_set_in_dir(monkeypatch):
|
||
- monkeypatch.setattr(multipathconfread, '_parse_config_orig', multipathconfread._parse_config, raising=False)
|
||
- monkeypatch.setattr(multipathconfread, '_parse_config', mock_parse_config)
|
||
|
||
- expected_configs = (set_in_dir_conf, all_true_conf)
|
||
- facts = multipathconfread.get_multipath_conf_facts(os.path.join(TEST_DIR, 'set_in_dir.conf'))
|
||
- assert facts
|
||
- assert len(facts.configs) == 2
|
||
- for i in range(len(facts.configs)):
|
||
- assert_config(facts.configs[i], expected_configs[i])
|
||
+def test_only_general_info_is_produced_on_9to10(monkeypatch):
|
||
+ default_config_path = '/etc/multipath.conf'
|
||
+
|
||
+ def parse_config_mock(path):
|
||
+ assert path == default_config_path
|
||
+ return MultipathConfig8to9(pathname=path)
|
||
+
|
||
+ monkeypatch.setattr(multipathconfread, '_parse_config', parse_config_mock)
|
||
+ monkeypatch.setattr(multipathconfread, 'is_processable', lambda: True)
|
||
+
|
||
+ produce_mock = produce_mocked()
|
||
+ monkeypatch.setattr(api, 'produce', produce_mock)
|
||
+
|
||
+ actor_mock = CurrentActorMocked(src_ver='9.6', dst_ver='10.0')
|
||
+ monkeypatch.setattr(api, 'current_actor', actor_mock)
|
||
+
|
||
+ multipathconfread.scan_and_emit_multipath_info(default_config_path)
|
||
+
|
||
+ assert produce_mock.called
|
||
+
|
||
+ general_info_msgs = [msg for msg in produce_mock.model_instances if isinstance(msg, MultipathInfo)]
|
||
+ assert len(general_info_msgs) == 1
|
||
+ general_info = general_info_msgs[0]
|
||
+ assert general_info.is_configured
|
||
+ assert general_info.config_dir == '/etc/multipath/conf.d'
|
||
+
|
||
+ msgs = [msg for msg in produce_mock.model_instances if isinstance(msg, MultipathConfFacts8to9)]
|
||
+ assert not msgs
|
||
diff --git a/repos/system_upgrade/common/actors/multipath/system_conf_patcher/actor.py b/repos/system_upgrade/common/actors/multipath/system_conf_patcher/actor.py
|
||
new file mode 100644
|
||
index 00000000..44d4fd3b
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/multipath/system_conf_patcher/actor.py
|
||
@@ -0,0 +1,23 @@
|
||
+from leapp.actors import Actor
|
||
+from leapp.libraries.actor import system_config_patcher
|
||
+from leapp.models import MultipathConfigUpdatesInfo
|
||
+from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag
|
||
+
|
||
+
|
||
+class MultipathSystemConfigPatcher(Actor):
|
||
+ """
|
||
+ Propagate any modified multipath configs to the source system.
|
||
+
|
||
+ We copy, modify and use multipath configs from the source system in the upgrade initramfs
|
||
+ as the configs might be incompatible with the target system. Once the upgrade is performed,
|
||
+ actual system's configs need to be modified in the same fashion. This is achieved by simply
|
||
+ copying our modified multipath configs that were used to upgrade the system.
|
||
+ """
|
||
+
|
||
+ name = 'multipath_system_config_patcher'
|
||
+ consumes = (MultipathConfigUpdatesInfo,)
|
||
+ produces = ()
|
||
+ tags = (ApplicationsPhaseTag, IPUWorkflowTag)
|
||
+
|
||
+ def process(self):
|
||
+ system_config_patcher.patch_system_configs()
|
||
diff --git a/repos/system_upgrade/common/actors/multipath/system_conf_patcher/libraries/system_config_patcher.py b/repos/system_upgrade/common/actors/multipath/system_conf_patcher/libraries/system_config_patcher.py
|
||
new file mode 100644
|
||
index 00000000..0d873322
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/multipath/system_conf_patcher/libraries/system_config_patcher.py
|
||
@@ -0,0 +1,17 @@
|
||
+import shutil
|
||
+
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import MultipathConfigUpdatesInfo
|
||
+
|
||
+
|
||
+def patch_system_configs():
|
||
+ for config_updates in api.consume(MultipathConfigUpdatesInfo):
|
||
+ for modified_config in config_updates.updates:
|
||
+ api.current_logger().debug(
|
||
+ 'Copying modified multipath config {} to {}.'.format(
|
||
+ modified_config.updated_config_location,
|
||
+ modified_config.target_path
|
||
+ )
|
||
+ )
|
||
+
|
||
+ shutil.copy(modified_config.updated_config_location, modified_config.target_path)
|
||
diff --git a/repos/system_upgrade/common/actors/multipath/system_conf_patcher/tests/test_config_patcher.py b/repos/system_upgrade/common/actors/multipath/system_conf_patcher/tests/test_config_patcher.py
|
||
new file mode 100644
|
||
index 00000000..1151fb69
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/multipath/system_conf_patcher/tests/test_config_patcher.py
|
||
@@ -0,0 +1,41 @@
|
||
+import shutil
|
||
+
|
||
+from leapp.libraries.actor import system_config_patcher
|
||
+from leapp.libraries.common.testutils import CurrentActorMocked
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import MultipathConfigUpdatesInfo, UpdatedMultipathConfig
|
||
+
|
||
+
|
||
+def test_config_patcher(monkeypatch):
|
||
+ modified_configs = [
|
||
+ UpdatedMultipathConfig(
|
||
+ updated_config_location='/var/lib/leapp/planned_conf_modifications/etc/multipath.conf',
|
||
+ target_path='/etc/multipath.conf'
|
||
+ ),
|
||
+ UpdatedMultipathConfig(
|
||
+ updated_config_location='/var/lib/leapp/planned_conf_modifications/etc/multipath/conf.d/myconfig.conf',
|
||
+ target_path='/etc/multipath/conf.d/myconfig.conf'
|
||
+ )
|
||
+ ]
|
||
+ config_update_info = MultipathConfigUpdatesInfo(updates=modified_configs)
|
||
+
|
||
+ actor_mock = CurrentActorMocked(msgs=[config_update_info])
|
||
+ monkeypatch.setattr(api, 'current_actor', actor_mock)
|
||
+
|
||
+ copies_performed = []
|
||
+
|
||
+ def copy_mock(src, dst, *args, **kwargs):
|
||
+ copies_performed.append((src, dst))
|
||
+
|
||
+ monkeypatch.setattr(shutil, 'copy', copy_mock)
|
||
+ system_config_patcher.patch_system_configs()
|
||
+
|
||
+ expected_copies = [
|
||
+ ('/var/lib/leapp/planned_conf_modifications/etc/multipath.conf', '/etc/multipath.conf'),
|
||
+ (
|
||
+ '/var/lib/leapp/planned_conf_modifications/etc/multipath/conf.d/myconfig.conf',
|
||
+ '/etc/multipath/conf.d/myconfig.conf'
|
||
+ )
|
||
+ ]
|
||
+
|
||
+ assert sorted(copies_performed) == expected_copies
|
||
diff --git a/repos/system_upgrade/common/actors/multipath/target_uspace_configs/actor.py b/repos/system_upgrade/common/actors/multipath/target_uspace_configs/actor.py
|
||
new file mode 100644
|
||
index 00000000..bfe0219e
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/multipath/target_uspace_configs/actor.py
|
||
@@ -0,0 +1,22 @@
|
||
+from leapp.actors import Actor
|
||
+from leapp.libraries.actor import target_uspace_multipath_configs
|
||
+from leapp.models import MultipathConfigUpdatesInfo, MultipathInfo, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
|
||
+from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag
|
||
+
|
||
+
|
||
+class RequestMultipathConfsInTargetUserspace(Actor):
|
||
+ """
|
||
+ Aggregates information about multipath configs.
|
||
+
|
||
+ Produces uniform information consisting of copy instructions about which
|
||
+ multipath configs (original/updated) should be put into the target
|
||
+ userspace.
|
||
+ """
|
||
+
|
||
+ name = 'request_multipath_conf_in_target_userspace'
|
||
+ consumes = (MultipathInfo, MultipathConfigUpdatesInfo)
|
||
+ produces = (TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks)
|
||
+ tags = (TargetTransactionChecksPhaseTag, IPUWorkflowTag)
|
||
+
|
||
+ def process(self):
|
||
+ target_uspace_multipath_configs.process()
|
||
diff --git a/repos/system_upgrade/common/actors/multipath/target_uspace_configs/libraries/target_uspace_multipath_configs.py b/repos/system_upgrade/common/actors/multipath/target_uspace_configs/libraries/target_uspace_multipath_configs.py
|
||
new file mode 100644
|
||
index 00000000..72afc477
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/multipath/target_uspace_configs/libraries/target_uspace_multipath_configs.py
|
||
@@ -0,0 +1,80 @@
|
||
+import os
|
||
+
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import (
|
||
+ CopyFile,
|
||
+ DracutModule,
|
||
+ MultipathConfigUpdatesInfo,
|
||
+ MultipathInfo,
|
||
+ TargetUserSpaceUpgradeTasks,
|
||
+ UpgradeInitramfsTasks
|
||
+)
|
||
+
|
||
+
|
||
+def request_mpath_dracut_module_for_upgrade_initramfs():
|
||
+ multipath_mod = DracutModule(name='multipath')
|
||
+ request = UpgradeInitramfsTasks(include_dracut_modules=[multipath_mod])
|
||
+ api.produce(request)
|
||
+
|
||
+
|
||
+def request_mpath_confs(multipath_info):
|
||
+ files_to_put_into_uspace = { # source system path -> target uspace destination
|
||
+ '/etc/multipath.conf': '/etc/multipath.conf' # default config
|
||
+ }
|
||
+
|
||
+ if os.path.exists(multipath_info.config_dir):
|
||
+ for filename in os.listdir(multipath_info.config_dir):
|
||
+ config_path = os.path.join(multipath_info.config_dir, filename)
|
||
+ if not config_path.endswith('.conf'):
|
||
+ api.current_logger().debug(
|
||
+ 'Skipping {} as it does not have .conf extension'.format(config_path)
|
||
+ )
|
||
+ continue
|
||
+ files_to_put_into_uspace[config_path] = config_path
|
||
+
|
||
+ for config_updates in api.consume(MultipathConfigUpdatesInfo):
|
||
+ for update in config_updates.updates:
|
||
+ # Detect /etc/multipath.conf > /etc/multipath.conf, and replace it with the patched
|
||
+ # version PATCHED > /etc/multipath.conf
|
||
+ if update.target_path in files_to_put_into_uspace:
|
||
+ del files_to_put_into_uspace[update.target_path]
|
||
+
|
||
+ files_to_put_into_uspace[update.updated_config_location] = update.target_path
|
||
+
|
||
+ # Note: original implementation would copy the /etc/multipath directory, which contains
|
||
+ # /etc/multipath/conf.d location for drop-in files. The current logic includes it automatically,
|
||
+ # if the user does not override this default location. In case that the default drop-in location
|
||
+ # is changed, this new location is used.
|
||
+ additional_files = ['/etc/xdrdevices.conf']
|
||
+ for additional_file in additional_files:
|
||
+ if os.path.exists(additional_file):
|
||
+ files_to_put_into_uspace[additional_file] = additional_file
|
||
+
|
||
+ copy_tasks = []
|
||
+ for source_system_path, target_uspace_path in files_to_put_into_uspace.items():
|
||
+ task = CopyFile(src=source_system_path, dst=target_uspace_path)
|
||
+ copy_tasks.append(task)
|
||
+
|
||
+ tasks = TargetUserSpaceUpgradeTasks(copy_files=copy_tasks)
|
||
+ api.produce(tasks)
|
||
+
|
||
+
|
||
+def process():
|
||
+ multipath_info = next(api.consume(MultipathInfo), None)
|
||
+
|
||
+ if not multipath_info:
|
||
+ api.current_logger().debug(
|
||
+ 'Received no MultipathInfo message. No config files will '
|
||
+ 'be requested to be placed into target userspace.'
|
||
+ )
|
||
+ return
|
||
+
|
||
+ if not multipath_info.is_configured:
|
||
+ api.current_logger().debug(
|
||
+ 'Multipath is not configured. No config files will '
|
||
+ 'be requested to be placed into target userspace.'
|
||
+ )
|
||
+ return
|
||
+
|
||
+ request_mpath_confs(multipath_info)
|
||
+ request_mpath_dracut_module_for_upgrade_initramfs()
|
||
diff --git a/repos/system_upgrade/common/actors/multipath/target_uspace_configs/tests/test_target_uspace_configs.py b/repos/system_upgrade/common/actors/multipath/target_uspace_configs/tests/test_target_uspace_configs.py
|
||
new file mode 100644
|
||
index 00000000..ffb63322
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/multipath/target_uspace_configs/tests/test_target_uspace_configs.py
|
||
@@ -0,0 +1,86 @@
|
||
+import os
|
||
+import shutil
|
||
+
|
||
+import pytest
|
||
+
|
||
+from leapp.libraries.actor import target_uspace_multipath_configs as actor_lib
|
||
+from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import (
|
||
+ MultipathConfigUpdatesInfo,
|
||
+ MultipathInfo,
|
||
+ TargetUserSpaceUpgradeTasks,
|
||
+ UpdatedMultipathConfig,
|
||
+ UpgradeInitramfsTasks
|
||
+)
|
||
+
|
||
+
|
||
+@pytest.mark.parametrize(
|
||
+ ('multipath_info', 'should_produce'),
|
||
+ [
|
||
+ (None, False), # No multipath info message
|
||
+ (MultipathInfo(is_configured=False), False), # Multipath is not configured
|
||
+ (MultipathInfo(is_configured=True, config_dir='/etc/multipath/conf.d'), True)
|
||
+ ]
|
||
+)
|
||
+def test_production_conditions(monkeypatch, multipath_info, should_produce):
|
||
+ """ Test whether messages are produced under right conditions. """
|
||
+ produce_mock = produce_mocked()
|
||
+ monkeypatch.setattr(api, 'produce', produce_mock)
|
||
+
|
||
+ msgs = [multipath_info] if multipath_info else []
|
||
+ if multipath_info and multipath_info.is_configured:
|
||
+ update = UpdatedMultipathConfig(
|
||
+ updated_config_location='/var/lib/leapp/proposed_changes/etc/multipath/conf.d/config.conf',
|
||
+ target_path='/etc/multipath/conf.d/config.conf'
|
||
+ )
|
||
+ msgs.append(MultipathConfigUpdatesInfo(updates=[update]))
|
||
+
|
||
+ actor_mock = CurrentActorMocked(msgs=msgs)
|
||
+ monkeypatch.setattr(api, 'current_actor', actor_mock)
|
||
+
|
||
+ def listdir_mock(path):
|
||
+ assert path == '/etc/multipath/conf.d'
|
||
+ return ['config.conf', 'config-not-to-be-touched.conf']
|
||
+
|
||
+ def exists_mock(path):
|
||
+ return path == '/etc/multipath/conf.d'
|
||
+
|
||
+ monkeypatch.setattr(os.path, 'exists', exists_mock)
|
||
+ monkeypatch.setattr(os, 'listdir', listdir_mock)
|
||
+
|
||
+ actor_lib.process()
|
||
+
|
||
+ if should_produce:
|
||
+ _target_uspace_tasks = [
|
||
+ msg for msg in produce_mock.model_instances if isinstance(msg, TargetUserSpaceUpgradeTasks)
|
||
+ ]
|
||
+ assert len(_target_uspace_tasks) == 1
|
||
+
|
||
+ target_uspace_tasks = _target_uspace_tasks[0]
|
||
+
|
||
+ copies = sorted((copy.src, copy.dst) for copy in target_uspace_tasks.copy_files)
|
||
+ expected_copies = [
|
||
+ (
|
||
+ '/etc/multipath.conf',
|
||
+ '/etc/multipath.conf'
|
||
+ ),
|
||
+ (
|
||
+ '/var/lib/leapp/proposed_changes/etc/multipath/conf.d/config.conf',
|
||
+ '/etc/multipath/conf.d/config.conf'
|
||
+ ),
|
||
+ (
|
||
+ '/etc/multipath/conf.d/config-not-to-be-touched.conf',
|
||
+ '/etc/multipath/conf.d/config-not-to-be-touched.conf'
|
||
+ )
|
||
+ ]
|
||
+ assert copies == sorted(expected_copies)
|
||
+
|
||
+ _upgrade_initramfs_tasks = [m for m in produce_mock.model_instances if isinstance(m, UpgradeInitramfsTasks)]
|
||
+ assert len(_upgrade_initramfs_tasks) == 1
|
||
+ upgrade_initramfs_tasks = _upgrade_initramfs_tasks[0]
|
||
+
|
||
+ dracut_modules = [dracut_mod.name for dracut_mod in upgrade_initramfs_tasks.include_dracut_modules]
|
||
+ assert dracut_modules == ['multipath']
|
||
+ else:
|
||
+ assert not produce_mock.called
|
||
diff --git a/repos/system_upgrade/common/actors/opensshconfigscanner/libraries/readopensshconfig.py b/repos/system_upgrade/common/actors/opensshconfigscanner/libraries/readopensshconfig.py
|
||
index 50e37092..f467676b 100644
|
||
--- a/repos/system_upgrade/common/actors/opensshconfigscanner/libraries/readopensshconfig.py
|
||
+++ b/repos/system_upgrade/common/actors/opensshconfigscanner/libraries/readopensshconfig.py
|
||
@@ -7,6 +7,7 @@ from leapp.exceptions import StopActorExecutionError
|
||
from leapp.libraries.common.rpms import check_file_modification
|
||
from leapp.libraries.stdlib import api
|
||
from leapp.models import OpenSshConfig, OpenSshPermitRootLogin
|
||
+from leapp.models.fields import ModelViolationError
|
||
|
||
CONFIG = '/etc/ssh/sshd_config'
|
||
DEPRECATED_DIRECTIVES = ['showpatchlevel']
|
||
@@ -60,12 +61,35 @@ def parse_config(config, base_config=None, current_cfg_depth=0):
|
||
# convert deprecated alias
|
||
if value == "without-password":
|
||
value = "prohibit-password"
|
||
- v = OpenSshPermitRootLogin(value=value, in_match=in_match)
|
||
+ try:
|
||
+ v = OpenSshPermitRootLogin(value=value, in_match=in_match)
|
||
+ except ModelViolationError:
|
||
+ valid_values = OpenSshPermitRootLogin.value.serialize()['choices']
|
||
+ raise StopActorExecutionError(
|
||
+ 'Invalid SSH configuration: Invalid value for PermitRootLogin',
|
||
+ details={
|
||
+ 'details': 'Invalid value "{}" for PermitRootLogin in {}. '
|
||
+ 'Arguments for SSH configuration options are case-sensitive. '
|
||
+ 'Valid values are: {}.'
|
||
+ .format(value, CONFIG, ', '.join(valid_values))
|
||
+ }
|
||
+ )
|
||
ret.permit_root_login.append(v)
|
||
|
||
elif el[0].lower() == 'useprivilegeseparation':
|
||
# Record only first occurrence, which is effective
|
||
if not ret.use_privilege_separation:
|
||
+ valid_values = OpenSshConfig.use_privilege_separation.serialize()['choices']
|
||
+ if value not in valid_values:
|
||
+ raise StopActorExecutionError(
|
||
+ 'Invalid SSH configuration: Invalid value for UsePrivilegeSeparation',
|
||
+ details={
|
||
+ 'details': 'Invalid value "{}" for UsePrivilegeSeparation in {}. '
|
||
+ 'Arguments for SSH configuration options are case-sensitive. '
|
||
+ 'Valid values are: {}.'
|
||
+ .format(value, CONFIG, ', '.join(valid_values))
|
||
+ }
|
||
+ )
|
||
ret.use_privilege_separation = value
|
||
|
||
elif el[0].lower() == 'protocol':
|
||
diff --git a/repos/system_upgrade/common/actors/opensshconfigscanner/tests/test_readopensshconfig_opensshconfigscanner.py b/repos/system_upgrade/common/actors/opensshconfigscanner/tests/test_readopensshconfig_opensshconfigscanner.py
|
||
index 64c16f7f..1a6a1c9f 100644
|
||
--- a/repos/system_upgrade/common/actors/opensshconfigscanner/tests/test_readopensshconfig_opensshconfigscanner.py
|
||
+++ b/repos/system_upgrade/common/actors/opensshconfigscanner/tests/test_readopensshconfig_opensshconfigscanner.py
|
||
@@ -351,6 +351,19 @@ def test_produce_config():
|
||
assert cfg.subsystem_sftp == 'internal-sftp'
|
||
|
||
|
||
+@pytest.mark.parametrize('config_line,option_name,invalid_value', [
|
||
+ ('PermitRootLogin NO', 'PermitRootLogin', 'NO'),
|
||
+ ('UsePrivilegeSeparation YES', 'UsePrivilegeSeparation', 'YES'),
|
||
+])
|
||
+def test_parse_config_invalid_option_case(config_line, option_name, invalid_value):
|
||
+ config = [config_line]
|
||
+
|
||
+ with pytest.raises(StopActorExecutionError) as err:
|
||
+ parse_config(config)
|
||
+
|
||
+ assert str(err.value).startswith('Invalid SSH configuration')
|
||
+
|
||
+
|
||
def test_actor_execution(current_actor_context):
|
||
current_actor_context.run()
|
||
assert current_actor_context.consume(OpenSshConfig)
|
||
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/actor.py b/repos/system_upgrade/common/actors/peseventsscanner/actor.py
|
||
index f801f1a1..cb911471 100644
|
||
--- a/repos/system_upgrade/common/actors/peseventsscanner/actor.py
|
||
+++ b/repos/system_upgrade/common/actors/peseventsscanner/actor.py
|
||
@@ -10,7 +10,8 @@ from leapp.models import (
|
||
RepositoriesMapping,
|
||
RepositoriesSetupTasks,
|
||
RHUIInfo,
|
||
- RpmTransactionTasks
|
||
+ RpmTransactionTasks,
|
||
+ ActiveVendorList,
|
||
)
|
||
from leapp.reporting import Report
|
||
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||
@@ -33,6 +34,7 @@ class PesEventsScanner(Actor):
|
||
RepositoriesMapping,
|
||
RHUIInfo,
|
||
RpmTransactionTasks,
|
||
+ ActiveVendorList,
|
||
)
|
||
produces = (ConsumedDataAsset, PESRpmTransactionTasks, RepositoriesSetupTasks, Report)
|
||
tags = (IPUWorkflowTag, FactsPhaseTag)
|
||
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py
|
||
index f24dda68..7ee5d016 100644
|
||
--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py
|
||
+++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py
|
||
@@ -58,6 +58,7 @@ class Action(IntEnum):
|
||
MERGED = 5
|
||
MOVED = 6
|
||
RENAMED = 7
|
||
+ REINSTALLED = 8
|
||
|
||
|
||
def get_pes_events(pes_json_directory, pes_json_filename):
|
||
@@ -72,13 +73,14 @@ def get_pes_events(pes_json_directory, pes_json_filename):
|
||
# a case as we have no work to do in such a case here.
|
||
events_data = fetch.load_data_asset(api.current_actor(),
|
||
pes_json_filename,
|
||
+ asset_directory=pes_json_directory,
|
||
asset_fulltext_name='PES events file',
|
||
docs_url='',
|
||
docs_title='')
|
||
if not events_data:
|
||
return None
|
||
|
||
- if not events_data.get('packageinfo'):
|
||
+ if events_data.get('packageinfo') is None:
|
||
raise ValueError('Found PES data with invalid structure')
|
||
|
||
all_events = list(chain(*[parse_entry(entry) for entry in events_data['packageinfo']]))
|
||
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
|
||
index 67e517d1..ec7d001a 100644
|
||
--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
|
||
+++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
|
||
@@ -1,5 +1,6 @@
|
||
from collections import defaultdict, namedtuple
|
||
from functools import partial
|
||
+import os
|
||
|
||
from leapp import reporting
|
||
from leapp.exceptions import StopActorExecutionError
|
||
@@ -7,6 +8,7 @@ from leapp.libraries.actor import peseventsscanner_repomap
|
||
from leapp.libraries.actor.pes_event_parsing import Action, get_pes_events, Package
|
||
from leapp.libraries.common import rpms
|
||
from leapp.libraries.common.config import get_target_distro_id, version
|
||
+from leapp.libraries.common.repomaputils import combine_repomap_messages
|
||
from leapp.libraries.stdlib import api
|
||
from leapp.libraries.stdlib.config import is_verbose
|
||
from leapp.models import (
|
||
@@ -20,7 +22,8 @@ from leapp.models import (
|
||
RepositoriesMapping,
|
||
RepositoriesSetupTasks,
|
||
RHUIInfo,
|
||
- RpmTransactionTasks
|
||
+ RpmTransactionTasks,
|
||
+ ActiveVendorList,
|
||
)
|
||
|
||
SKIPPED_PKGS_MSG = (
|
||
@@ -31,8 +34,9 @@ SKIPPED_PKGS_MSG = (
|
||
'for details.\nThe list of these packages:'
|
||
)
|
||
|
||
+VENDORS_DIR = "/etc/leapp/files/vendors.d"
|
||
|
||
-TransactionConfiguration = namedtuple('TransactionConfiguration', ('to_install', 'to_remove', 'to_keep'))
|
||
+TransactionConfiguration = namedtuple('TransactionConfiguration', ('to_install', 'to_remove', 'to_keep', 'to_reinstall'))
|
||
|
||
|
||
def get_cloud_provider_name(cloud_provider_variant):
|
||
@@ -86,7 +90,7 @@ def get_transaction_configuration():
|
||
|
||
:return: TransactionConfiguration
|
||
"""
|
||
- transaction_configuration = TransactionConfiguration(to_install=set(), to_remove=set(), to_keep=set())
|
||
+ transaction_configuration = TransactionConfiguration(to_install=set(), to_remove=set(), to_keep=set(), to_reinstall=set())
|
||
|
||
_Pkg = partial(Package, repository=None, modulestream=None)
|
||
|
||
@@ -94,6 +98,7 @@ def get_transaction_configuration():
|
||
transaction_configuration.to_install.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_install)
|
||
transaction_configuration.to_remove.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_remove)
|
||
transaction_configuration.to_keep.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_keep)
|
||
+ transaction_configuration.to_reinstall.update(_Pkg(name=pkg_name) for pkg_name in tasks.to_reinstall)
|
||
return transaction_configuration
|
||
|
||
|
||
@@ -133,6 +138,7 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs,
|
||
logger = api.current_logger()
|
||
# Start with the installed packages and modify the set according to release events
|
||
target_pkgs = set(source_installed_pkgs)
|
||
+ pkgs_to_reinstall = set()
|
||
|
||
release_events = [e for e in events if e.to_release == release]
|
||
|
||
@@ -176,9 +182,12 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs,
|
||
target_pkgs = target_pkgs.difference(event.out_pkgs)
|
||
target_pkgs = target_pkgs.union(event.out_pkgs)
|
||
|
||
+ if (event.action == Action.REINSTALLED and is_any_in_pkg_present):
|
||
+ pkgs_to_reinstall = pkgs_to_reinstall.union(event.in_pkgs)
|
||
+
|
||
pkgs_to_demodularize = pkgs_to_demodularize.difference(event.in_pkgs)
|
||
|
||
- return (target_pkgs, pkgs_to_demodularize)
|
||
+ return (target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall)
|
||
|
||
|
||
def remove_undesired_events(events, relevant_to_releases):
|
||
@@ -244,15 +253,17 @@ def compute_packages_on_target_system(source_pkgs, events, releases):
|
||
did_processing_cross_major_version = True
|
||
pkgs_to_demodularize = {pkg for pkg in target_pkgs if pkg.modulestream}
|
||
|
||
- target_pkgs, pkgs_to_demodularize = compute_pkg_changes_between_consequent_releases(target_pkgs, events,
|
||
- release, seen_pkgs,
|
||
- pkgs_to_demodularize)
|
||
+ target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall = compute_pkg_changes_between_consequent_releases(
|
||
+ target_pkgs, events,
|
||
+ release, seen_pkgs,
|
||
+ pkgs_to_demodularize
|
||
+ )
|
||
seen_pkgs = seen_pkgs.union(target_pkgs)
|
||
|
||
demodularized_pkgs = {Package(pkg.name, pkg.repository, None) for pkg in pkgs_to_demodularize}
|
||
demodularized_target_pkgs = target_pkgs.difference(pkgs_to_demodularize).union(demodularized_pkgs)
|
||
|
||
- return (demodularized_target_pkgs, pkgs_to_demodularize)
|
||
+ return (demodularized_target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall)
|
||
|
||
|
||
def compute_rpm_tasks_from_pkg_set_diff(source_pkgs, target_pkgs, pkgs_to_demodularize):
|
||
@@ -356,15 +367,13 @@ def get_pesid_to_repoid_map(target_pesids):
|
||
:return: Dictionary mapping the target_pesids to their corresponding repoid
|
||
"""
|
||
|
||
- repositories_map_msgs = api.consume(RepositoriesMapping)
|
||
- repositories_map_msg = next(repositories_map_msgs, None)
|
||
- if list(repositories_map_msgs):
|
||
- api.current_logger().warning('Unexpectedly received more than one RepositoriesMapping message.')
|
||
- if not repositories_map_msg:
|
||
+ repositories_map_msgs = list(api.consume(RepositoriesMapping))
|
||
+ if not repositories_map_msgs:
|
||
raise StopActorExecutionError(
|
||
'Cannot parse RepositoriesMapping data properly',
|
||
details={'Problem': 'Did not receive a message with mapped repositories'}
|
||
)
|
||
+ repositories_map_msg = combine_repomap_messages(repositories_map_msgs)
|
||
|
||
rhui_info = next(api.consume(RHUIInfo), None)
|
||
cloud_provider = rhui_info.provider if rhui_info else ''
|
||
@@ -554,6 +563,19 @@ def process():
|
||
if not events:
|
||
return
|
||
|
||
+ active_vendors = []
|
||
+ for vendor_list in api.consume(ActiveVendorList):
|
||
+ active_vendors.extend(vendor_list.data)
|
||
+
|
||
+ pes_json_suffix = "_pes.json"
|
||
+ if os.path.isdir(VENDORS_DIR):
|
||
+ vendor_pesfiles = list(filter(lambda vfile: pes_json_suffix in vfile, os.listdir(VENDORS_DIR)))
|
||
+
|
||
+ for pesfile in vendor_pesfiles:
|
||
+ if pesfile[:-len(pes_json_suffix)] in active_vendors:
|
||
+ vendor_events = get_pes_events(VENDORS_DIR, pesfile)
|
||
+ events.extend(vendor_events)
|
||
+
|
||
releases = get_relevant_releases(events)
|
||
installed_pkgs = get_installed_pkgs()
|
||
transaction_configuration = get_transaction_configuration()
|
||
@@ -567,7 +589,7 @@ def process():
|
||
events = remove_undesired_events(events, releases)
|
||
|
||
# Apply events - compute what packages should the target system have
|
||
- target_pkgs, pkgs_to_demodularize = compute_packages_on_target_system(pkgs_to_begin_computation_with,
|
||
+ target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall = compute_packages_on_target_system(pkgs_to_begin_computation_with,
|
||
events, releases)
|
||
|
||
# Packages coming out of the events have PESID as their repository, however, we need real repoid
|
||
@@ -587,4 +609,5 @@ def process():
|
||
rpm_tasks = include_instructions_from_transaction_configuration(rpm_tasks, transaction_configuration,
|
||
installed_pkgs)
|
||
if rpm_tasks:
|
||
+ rpm_tasks.to_reinstall = sorted(pkgs_to_reinstall)
|
||
api.produce(rpm_tasks)
|
||
diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/actor.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/actor.py
|
||
index 5674ee3f..58b15a84 100644
|
||
--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/actor.py
|
||
+++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/actor.py
|
||
@@ -8,9 +8,14 @@ class RemoveObsoleteGpgKeys(Actor):
|
||
"""
|
||
Remove obsoleted RPM GPG keys.
|
||
|
||
- New version might make existing RPM GPG keys obsolete. This might be caused
|
||
- for example by the hashing algorithm becoming deprecated or by the key
|
||
- getting replaced.
|
||
+ The definition of what keys are considered obsolete depends on whether the
|
||
+ upgrade also does a conversion:
|
||
+ - If not converting, the obsolete keys are those that are no longer valid
|
||
+ on the target version. This might be caused for example by the hashing
|
||
+ algorithm becoming deprecated or by the key getting replaced. Note that
|
||
+ only keys provided by the vendor of the OS are handled.
|
||
+ - If converting, the obsolete keys are all of the keys provided by the
|
||
+ vendor of the source distribution.
|
||
|
||
A DNFWorkaround is registered to actually remove the keys.
|
||
"""
|
||
diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
|
||
index df08e6fa..7d047395 100644
|
||
--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
|
||
+++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
|
||
@@ -1,3 +1,5 @@
|
||
+import itertools
|
||
+
|
||
from leapp.libraries.common.config import get_source_distro_id, get_target_distro_id
|
||
from leapp.libraries.common.config.version import get_target_major_version
|
||
from leapp.libraries.common.distro import get_distribution_data
|
||
@@ -6,18 +8,25 @@ from leapp.libraries.stdlib import api
|
||
from leapp.models import DNFWorkaround, InstalledRPM
|
||
|
||
|
||
+def _is_key_installed(key):
|
||
+ """
|
||
+ :param key: The NVR of the gpg key RPM (e.g. gpg-pubkey-1d997668-61bae63b)
|
||
+ """
|
||
+ name, version, release = key.rsplit("-", 2)
|
||
+ return has_package(InstalledRPM, name, version=version, release=release)
|
||
+
|
||
+
|
||
def _get_obsolete_keys():
|
||
"""
|
||
- Return keys obsoleted in target and previous versions
|
||
+ Get keys obsoleted in target and previous major versions
|
||
"""
|
||
distribution = get_target_distro_id()
|
||
- obsoleted_keys_map = get_distribution_data(distribution).get('obsoleted-keys', {})
|
||
+ obsoleted_keys_map = get_distribution_data(distribution).get("obsoleted-keys", {})
|
||
keys = []
|
||
for version in range(7, int(get_target_major_version()) + 1):
|
||
try:
|
||
for key in obsoleted_keys_map[str(version)]:
|
||
- name, version, release = key.rsplit("-", 2)
|
||
- if has_package(InstalledRPM, name, version=version, release=release):
|
||
+ if _is_key_installed(key):
|
||
keys.append(key)
|
||
except KeyError:
|
||
pass
|
||
@@ -25,6 +34,22 @@ def _get_obsolete_keys():
|
||
return keys
|
||
|
||
|
||
+def _get_source_distro_keys():
|
||
+ """
|
||
+ Get all known keys of the source distro
|
||
+
|
||
+ This includes keys from all relevant previous OS versions as all of those
|
||
+ might be present on the system.
|
||
+ """
|
||
+ distribution = get_source_distro_id()
|
||
+ keys = get_distribution_data(distribution).get("keys", {})
|
||
+ return [
|
||
+ key
|
||
+ for key in itertools.chain.from_iterable(keys.values())
|
||
+ if _is_key_installed(key)
|
||
+ ]
|
||
+
|
||
+
|
||
def register_dnfworkaround(keys):
|
||
api.produce(
|
||
DNFWorkaround(
|
||
@@ -36,13 +61,12 @@ def register_dnfworkaround(keys):
|
||
|
||
|
||
def process():
|
||
- if get_source_distro_id() != get_target_distro_id():
|
||
- # TODO adjust for conversions, in the current state it would not have
|
||
- # any effect, just skip it
|
||
- return
|
||
-
|
||
- keys = _get_obsolete_keys()
|
||
- if not keys:
|
||
- return
|
||
+ if get_source_distro_id() == get_target_distro_id():
|
||
+ # only upgrading - remove keys obsoleted in previous versions
|
||
+ keys = _get_obsolete_keys()
|
||
+ else:
|
||
+ # also converting - we need to remove all keys from the source distro
|
||
+ keys = _get_source_distro_keys()
|
||
|
||
- register_dnfworkaround(keys)
|
||
+ if keys:
|
||
+ register_dnfworkaround(keys)
|
||
diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py
|
||
index b78174cc..8b9b842b 100644
|
||
--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py
|
||
+++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py
|
||
@@ -1,77 +1,79 @@
|
||
import os
|
||
+import unittest.mock as mock
|
||
|
||
import pytest
|
||
|
||
from leapp.libraries.actor import removeobsoleterpmgpgkeys
|
||
-from leapp.libraries.common.config.version import get_target_major_version
|
||
-from leapp.libraries.common.rpms import has_package
|
||
from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
|
||
from leapp.libraries.stdlib import api
|
||
-from leapp.models import DNFWorkaround, InstalledRPM, RPM
|
||
+from leapp.models import InstalledRPM, RPM
|
||
|
||
+_CUR_DIR = os.path.dirname(os.path.abspath(__file__))
|
||
|
||
-def _get_test_installedrpm():
|
||
- return InstalledRPM(
|
||
+
|
||
+def common_folder_path_mocked(folder):
|
||
+ return os.path.join(_CUR_DIR, "../../../files/", folder)
|
||
+
|
||
+
|
||
+def test_is_key_installed(monkeypatch):
|
||
+ installed_rpms = InstalledRPM(
|
||
items=[
|
||
RPM(
|
||
- name='gpg-pubkey',
|
||
- version='d4082792',
|
||
- release='5b32db75',
|
||
- epoch='0',
|
||
- packager='Red Hat, Inc. (auxiliary key 2) <security@redhat.com>',
|
||
- arch='noarch',
|
||
- pgpsig=''
|
||
+ name="gpg-pubkey",
|
||
+ version="d4082792",
|
||
+ release="5b32db75",
|
||
+ epoch="0",
|
||
+ packager="Red Hat, Inc. (auxiliary key 2) <security@redhat.com>",
|
||
+ arch="noarch",
|
||
+ pgpsig="",
|
||
),
|
||
RPM(
|
||
- name='gpg-pubkey',
|
||
- version='2fa658e0',
|
||
- release='45700c69',
|
||
- epoch='0',
|
||
- packager='Red Hat, Inc. (auxiliary key) <security@redhat.com>',
|
||
- arch='noarch',
|
||
- pgpsig=''
|
||
+ name="gpg-pubkey",
|
||
+ version="2fa658e0",
|
||
+ release="45700c69",
|
||
+ epoch="0",
|
||
+ packager="Red Hat, Inc. (auxiliary key) <security@redhat.com>",
|
||
+ arch="noarch",
|
||
+ pgpsig="",
|
||
),
|
||
RPM(
|
||
- name='gpg-pubkey',
|
||
- version='12345678',
|
||
- release='abcdefgh',
|
||
- epoch='0',
|
||
- packager='made up',
|
||
- arch='noarch',
|
||
- pgpsig=''
|
||
+ name="gpg-pubkey",
|
||
+ version="12345678",
|
||
+ release="abcdefgh",
|
||
+ epoch="0",
|
||
+ packager="made up",
|
||
+ arch="noarch",
|
||
+ pgpsig="",
|
||
),
|
||
]
|
||
)
|
||
|
||
+ monkeypatch.setattr(
|
||
+ api, "current_actor", CurrentActorMocked(msgs=[installed_rpms])
|
||
+ )
|
||
+
|
||
+ assert removeobsoleterpmgpgkeys._is_key_installed("gpg-pubkey-d4082792-5b32db75")
|
||
+ assert removeobsoleterpmgpgkeys._is_key_installed("gpg-pubkey-2fa658e0-45700c69")
|
||
+ assert removeobsoleterpmgpgkeys._is_key_installed("gpg-pubkey-12345678-abcdefgh")
|
||
+ assert not removeobsoleterpmgpgkeys._is_key_installed(
|
||
+ "gpg-pubkey-db42a60e-37ea5438"
|
||
+ )
|
||
+
|
||
|
||
@pytest.mark.parametrize(
|
||
"version, expected",
|
||
[
|
||
- (9, ["gpg-pubkey-d4082792-5b32db75", "gpg-pubkey-2fa658e0-45700c69"]),
|
||
- (8, ["gpg-pubkey-2fa658e0-45700c69"])
|
||
+ ("9", ["gpg-pubkey-d4082792-5b32db75", "gpg-pubkey-2fa658e0-45700c69"]),
|
||
+ ("8", ["gpg-pubkey-2fa658e0-45700c69"])
|
||
]
|
||
)
|
||
def test_get_obsolete_keys(monkeypatch, version, expected):
|
||
- def get_target_major_version_mocked():
|
||
- return version
|
||
-
|
||
- monkeypatch.setattr(
|
||
- removeobsoleterpmgpgkeys,
|
||
- "get_target_major_version",
|
||
- get_target_major_version_mocked,
|
||
- )
|
||
-
|
||
+ monkeypatch.setattr(api, "current_actor", CurrentActorMocked(dst_ver=version))
|
||
+ monkeypatch.setattr(api, "get_common_folder_path", common_folder_path_mocked)
|
||
monkeypatch.setattr(
|
||
- api,
|
||
- "current_actor",
|
||
- CurrentActorMocked(
|
||
- msgs=[_get_test_installedrpm()]
|
||
- ),
|
||
+ removeobsoleterpmgpgkeys, "_is_key_installed", lambda key: key in expected
|
||
)
|
||
|
||
- cur_dir = os.path.dirname(os.path.abspath(__file__))
|
||
- monkeypatch.setattr(api, 'get_common_folder_path', lambda folder: os.path.join(cur_dir, '../../../files/', folder))
|
||
-
|
||
keys = removeobsoleterpmgpgkeys._get_obsolete_keys()
|
||
assert set(keys) == set(expected)
|
||
|
||
@@ -79,50 +81,83 @@ def test_get_obsolete_keys(monkeypatch, version, expected):
|
||
@pytest.mark.parametrize(
|
||
"version, obsoleted_keys, expected",
|
||
[
|
||
- (10, None, []),
|
||
- (10, {}, []),
|
||
- (10, {"8": ["gpg-pubkey-888-abc"], "10": ["gpg-pubkey-10-10"]}, ["gpg-pubkey-888-abc", "gpg-pubkey-10-10"]),
|
||
- (9, {"8": ["gpg-pubkey-888-abc"], "9": ["gpg-pubkey-999-def"]}, ["gpg-pubkey-999-def", "gpg-pubkey-888-abc"]),
|
||
- (8, {"8": ["gpg-pubkey-888-abc"], "9": ["gpg-pubkey-999-def"]}, ["gpg-pubkey-888-abc"])
|
||
- ]
|
||
+ ("10", None, []),
|
||
+ ("10", {}, []),
|
||
+ (
|
||
+ "10",
|
||
+ {"8": ["gpg-pubkey-888-abc"], "10": ["gpg-pubkey-10-10"]},
|
||
+ ["gpg-pubkey-888-abc", "gpg-pubkey-10-10"],
|
||
+ ),
|
||
+ (
|
||
+ "9",
|
||
+ {"8": ["gpg-pubkey-888-abc"], "9": ["gpg-pubkey-999-def"]},
|
||
+ ["gpg-pubkey-999-def", "gpg-pubkey-888-abc"],
|
||
+ ),
|
||
+ (
|
||
+ "8",
|
||
+ {"8": ["gpg-pubkey-888-abc"], "9": ["gpg-pubkey-999-def"]},
|
||
+ ["gpg-pubkey-888-abc"],
|
||
+ ),
|
||
+ ],
|
||
)
|
||
-def test_get_obsolete_keys_incomplete_data(monkeypatch, version, obsoleted_keys, expected):
|
||
- def get_target_major_version_mocked():
|
||
- return version
|
||
+def test_get_obsolete_keys_incomplete_data(
|
||
+ monkeypatch, version, obsoleted_keys, expected
|
||
+):
|
||
+ monkeypatch.setattr(api, "current_actor", CurrentActorMocked(dst_ver=version))
|
||
+ monkeypatch.setattr(
|
||
+ removeobsoleterpmgpgkeys, "_is_key_installed", lambda key: key in expected
|
||
+ )
|
||
|
||
def get_distribution_data_mocked(_distro):
|
||
if obsoleted_keys is None:
|
||
return {}
|
||
- return {'obsoleted-keys': obsoleted_keys}
|
||
-
|
||
- def has_package_mocked(*args, **kwargs):
|
||
- return True
|
||
+ return {"obsoleted-keys": obsoleted_keys}
|
||
|
||
monkeypatch.setattr(
|
||
- removeobsoleterpmgpgkeys,
|
||
- "get_target_major_version",
|
||
- get_target_major_version_mocked,
|
||
+ removeobsoleterpmgpgkeys, "get_distribution_data", get_distribution_data_mocked
|
||
)
|
||
|
||
- monkeypatch.setattr(
|
||
- removeobsoleterpmgpgkeys,
|
||
- "get_distribution_data",
|
||
- get_distribution_data_mocked,
|
||
- )
|
||
+ keys = removeobsoleterpmgpgkeys._get_obsolete_keys()
|
||
+ assert set(keys) == set(expected)
|
||
|
||
- monkeypatch.setattr(
|
||
- removeobsoleterpmgpgkeys,
|
||
- "has_package",
|
||
- has_package_mocked,
|
||
- )
|
||
|
||
+@pytest.mark.parametrize(
|
||
+ "distro, expected",
|
||
+ [
|
||
+ (
|
||
+ "centos",
|
||
+ [
|
||
+ "gpg-pubkey-8483c65d-5ccc5b19",
|
||
+ "gpg-pubkey-1d997668-621e3cac",
|
||
+ "gpg-pubkey-1d997668-61bae63b",
|
||
+ ],
|
||
+ ),
|
||
+ (
|
||
+ "rhel",
|
||
+ [
|
||
+ "gpg-pubkey-fd431d51-4ae0493b",
|
||
+ "gpg-pubkey-37017186-45761324",
|
||
+ "gpg-pubkey-f21541eb-4a5233e8",
|
||
+ "gpg-pubkey-897da07a-3c979a7f",
|
||
+ "gpg-pubkey-2fa658e0-45700c69",
|
||
+ "gpg-pubkey-d4082792-5b32db75",
|
||
+ "gpg-pubkey-5a6340b3-6229229e",
|
||
+ "gpg-pubkey-db42a60e-37ea5438",
|
||
+ ],
|
||
+ ),
|
||
+ ],
|
||
+)
|
||
+def test_get_source_distro_keys(monkeypatch, distro, expected):
|
||
+ """
|
||
+ Test that the correct keys are returned for each distro.
|
||
+ """
|
||
+ monkeypatch.setattr(api, "current_actor", CurrentActorMocked(src_distro=distro))
|
||
+ monkeypatch.setattr(api, "get_common_folder_path", common_folder_path_mocked)
|
||
monkeypatch.setattr(
|
||
- api,
|
||
- "current_actor",
|
||
- CurrentActorMocked(),
|
||
+ removeobsoleterpmgpgkeys, "_is_key_installed", lambda _key: True
|
||
)
|
||
|
||
- keys = removeobsoleterpmgpgkeys._get_obsolete_keys()
|
||
+ keys = removeobsoleterpmgpgkeys._get_source_distro_keys()
|
||
assert set(keys) == set(expected)
|
||
|
||
|
||
@@ -134,16 +169,61 @@ def test_get_obsolete_keys_incomplete_data(monkeypatch, version, obsoleted_keys,
|
||
]
|
||
)
|
||
def test_workaround_should_register(monkeypatch, keys, should_register):
|
||
- def get_obsolete_keys_mocked():
|
||
- return keys
|
||
-
|
||
monkeypatch.setattr(
|
||
- removeobsoleterpmgpgkeys,
|
||
- '_get_obsolete_keys',
|
||
- get_obsolete_keys_mocked
|
||
+ removeobsoleterpmgpgkeys, "_get_obsolete_keys", lambda: keys
|
||
)
|
||
- monkeypatch.setattr(api, 'produce', produce_mocked())
|
||
+ monkeypatch.setattr(api, "produce", produce_mocked())
|
||
monkeypatch.setattr(api, "current_actor", CurrentActorMocked())
|
||
|
||
removeobsoleterpmgpgkeys.process()
|
||
assert api.produce.called == should_register
|
||
+
|
||
+
|
||
+def test_process(monkeypatch):
|
||
+ """
|
||
+ Test that the correct path is taken depending on whether also converting
|
||
+ """
|
||
+ obsolete = ["gpg-pubkey-12345678-abcdefgh"]
|
||
+ source_distro = ["gpg-pubkey-87654321-hgfedcba"]
|
||
+
|
||
+ monkeypatch.setattr(
|
||
+ removeobsoleterpmgpgkeys, "_get_obsolete_keys", lambda: obsolete
|
||
+ )
|
||
+ monkeypatch.setattr(
|
||
+ removeobsoleterpmgpgkeys, "_get_source_distro_keys", lambda: source_distro,
|
||
+ )
|
||
+
|
||
+ # upgrade only path
|
||
+ monkeypatch.setattr(
|
||
+ api, "current_actor", CurrentActorMocked(src_distro="rhel", dst_distro="rhel")
|
||
+ )
|
||
+ with mock.patch(
|
||
+ "leapp.libraries.actor.removeobsoleterpmgpgkeys.register_dnfworkaround"
|
||
+ ):
|
||
+ removeobsoleterpmgpgkeys.process()
|
||
+ removeobsoleterpmgpgkeys.register_dnfworkaround.assert_called_once_with(
|
||
+ obsolete
|
||
+ )
|
||
+
|
||
+ # upgrade + conversion paths
|
||
+ monkeypatch.setattr(
|
||
+ api, "current_actor", CurrentActorMocked(src_distro="rhel", dst_distro="centos")
|
||
+ )
|
||
+ with mock.patch(
|
||
+ "leapp.libraries.actor.removeobsoleterpmgpgkeys.register_dnfworkaround"
|
||
+ ):
|
||
+ removeobsoleterpmgpgkeys.process()
|
||
+ removeobsoleterpmgpgkeys.register_dnfworkaround.assert_called_once_with(
|
||
+ source_distro
|
||
+ )
|
||
+
|
||
+ monkeypatch.setattr(
|
||
+ api, "current_actor", CurrentActorMocked(src_distro="centos", dst_distro="rhel")
|
||
+ )
|
||
+ with mock.patch(
|
||
+ "leapp.libraries.actor.removeobsoleterpmgpgkeys.register_dnfworkaround"
|
||
+ ):
|
||
+ removeobsoleterpmgpgkeys.process()
|
||
+ removeobsoleterpmgpgkeys.register_dnfworkaround.assert_called_once_with(
|
||
+ source_distro
|
||
+ )
|
||
diff --git a/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py b/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py
|
||
index 503e66a3..4ec1d6e0 100644
|
||
--- a/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py
|
||
+++ b/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py
|
||
@@ -3,6 +3,7 @@ from collections import defaultdict
|
||
|
||
from leapp.exceptions import StopActorExecutionError
|
||
from leapp.libraries.common.config.version import get_source_major_version, get_target_major_version
|
||
+from leapp.libraries.common.repomaputils import RepoMapData
|
||
from leapp.libraries.common.fetch import load_data_asset
|
||
from leapp.libraries.common.rpms import get_leapp_packages, LeappComponents
|
||
from leapp.libraries.stdlib import api
|
||
@@ -16,121 +17,6 @@ REPOMAP_FILE = 'repomap.json'
|
||
"""The name of the new repository mapping file."""
|
||
|
||
|
||
-class RepoMapData:
|
||
- VERSION_FORMAT = '1.3.0'
|
||
-
|
||
- def __init__(self):
|
||
- self.repositories = []
|
||
- self.mapping = {}
|
||
-
|
||
- def add_repository(self, data, pesid):
|
||
- """
|
||
- Add new PESIDRepositoryEntry with given pesid from the provided dictionary.
|
||
-
|
||
- :param data: A dict containing the data of the added repository. The dictionary structure corresponds
|
||
- to the repositories entries in the repository mapping JSON schema.
|
||
- :type data: Dict[str, str]
|
||
- :param pesid: PES id of the repository family that the newly added repository belongs to.
|
||
- :type pesid: str
|
||
- """
|
||
- self.repositories.append(PESIDRepositoryEntry(
|
||
- repoid=data['repoid'],
|
||
- channel=data['channel'],
|
||
- rhui=data.get('rhui', ''),
|
||
- repo_type=data['repo_type'],
|
||
- arch=data['arch'],
|
||
- major_version=data['major_version'],
|
||
- pesid=pesid,
|
||
- distro=data['distro'],
|
||
- ))
|
||
-
|
||
- def get_repositories(self, valid_major_versions):
|
||
- """
|
||
- Return the list of PESIDRepositoryEntry object matching the specified major versions.
|
||
- """
|
||
- return [repo for repo in self.repositories if repo.major_version in valid_major_versions]
|
||
-
|
||
- def add_mapping(self, source_major_version, target_major_version, source_pesid, target_pesid):
|
||
- """
|
||
- Add a new mapping entry that is mapping the source pesid to the destination pesid(s),
|
||
- relevant in an IPU from the supplied source major version to the supplied target
|
||
- major version.
|
||
-
|
||
- :param str source_major_version: Specifies the major version of the source system
|
||
- for which the added mapping applies.
|
||
- :param str target_major_version: Specifies the major version of the target system
|
||
- for which the added mapping applies.
|
||
- :param str source_pesid: PESID of the source repository.
|
||
- :param Union[str|List[str]] target_pesid: A single target PESID or a list of target
|
||
- PESIDs of the added mapping.
|
||
- """
|
||
- # NOTE: it could be more simple, but I prefer to be sure the input data
|
||
- # contains just one map per source PESID.
|
||
- key = '{}:{}'.format(source_major_version, target_major_version)
|
||
- rmap = self.mapping.get(key, defaultdict(set))
|
||
- self.mapping[key] = rmap
|
||
- if isinstance(target_pesid, list):
|
||
- rmap[source_pesid].update(target_pesid)
|
||
- else:
|
||
- rmap[source_pesid].add(target_pesid)
|
||
-
|
||
- def get_mappings(self, src_major_version, dst_major_version):
|
||
- """
|
||
- Return the list of RepoMapEntry objects for the specified upgrade path.
|
||
-
|
||
- IOW, the whole mapping for specified IPU.
|
||
- """
|
||
- key = '{}:{}'.format(src_major_version, dst_major_version)
|
||
- rmap = self.mapping.get(key, None)
|
||
- if not rmap:
|
||
- return None
|
||
- map_list = []
|
||
- for src_pesid in sorted(rmap.keys()):
|
||
- map_list.append(RepoMapEntry(source=src_pesid, target=sorted(rmap[src_pesid])))
|
||
- return map_list
|
||
-
|
||
- @staticmethod
|
||
- def load_from_dict(data):
|
||
- if data['version_format'] != RepoMapData.VERSION_FORMAT:
|
||
- raise ValueError(
|
||
- 'The obtained repomap data has unsupported version of format.'
|
||
- ' Get {} required {}'
|
||
- .format(data['version_format'], RepoMapData.VERSION_FORMAT)
|
||
- )
|
||
-
|
||
- repomap = RepoMapData()
|
||
-
|
||
- # Load reposiories
|
||
- existing_pesids = set()
|
||
- for repo_family in data['repositories']:
|
||
- existing_pesids.add(repo_family['pesid'])
|
||
- for repo in repo_family['entries']:
|
||
- repomap.add_repository(repo, repo_family['pesid'])
|
||
-
|
||
- # Load mappings
|
||
- for mapping in data['mapping']:
|
||
- for entry in mapping['entries']:
|
||
- if not isinstance(entry['target'], list):
|
||
- raise ValueError(
|
||
- 'The target field of a mapping entry is not a list: {}'
|
||
- .format(entry)
|
||
- )
|
||
-
|
||
- for pesid in [entry['source']] + entry['target']:
|
||
- if pesid not in existing_pesids:
|
||
- raise ValueError(
|
||
- 'The {} pesid is not related to any repository.'
|
||
- .format(pesid)
|
||
- )
|
||
- repomap.add_mapping(
|
||
- source_major_version=mapping['source_major_version'],
|
||
- target_major_version=mapping['target_major_version'],
|
||
- source_pesid=entry['source'],
|
||
- target_pesid=entry['target'],
|
||
- )
|
||
- return repomap
|
||
-
|
||
-
|
||
def _inhibit_upgrade(msg):
|
||
local_path = os.path.join('/etc/leapp/file', REPOMAP_FILE)
|
||
hint = (
|
||
diff --git a/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py b/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py
|
||
index 84895f83..62aefaf4 100644
|
||
--- a/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py
|
||
+++ b/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py
|
||
@@ -18,22 +18,37 @@ def load_tasks_file(path, logger):
|
||
return []
|
||
|
||
|
||
+def filter_out(installed_rpm_names, to_filter, debug_msg):
|
||
+ # These are the packages that aren't installed on the system.
|
||
+ filtered_ok = [pkg for pkg in to_filter if pkg not in installed_rpm_names]
|
||
+
|
||
+ # And these ones are the ones that are.
|
||
+ filtered_out = list(set(to_filter) - set(filtered_ok))
|
||
+ if filtered_out:
|
||
+ api.current_logger().debug(
|
||
+ debug_msg +
|
||
+ '\n- ' + '\n- '.join(filtered_out)
|
||
+ )
|
||
+ # We may want to use either of the two sets.
|
||
+ return filtered_ok, filtered_out
|
||
+
|
||
+
|
||
def load_tasks(base_dir, logger):
|
||
# Loads configuration files to_install, to_keep, and to_remove from the given base directory
|
||
rpms = next(api.consume(DistributionSignedRPM))
|
||
rpm_names = [rpm.name for rpm in rpms.items]
|
||
+
|
||
to_install = load_tasks_file(os.path.join(base_dir, 'to_install'), logger)
|
||
+ install_debug_msg = 'The following packages from "to_install" file will be ignored as they are already installed:'
|
||
# we do not want to put into rpm transaction what is already installed (it will go to "to_upgrade" bucket)
|
||
- to_install_filtered = [pkg for pkg in to_install if pkg not in rpm_names]
|
||
+ to_install_filtered, _ = filter_out(rpm_names, to_install, install_debug_msg)
|
||
|
||
- filtered = set(to_install) - set(to_install_filtered)
|
||
- if filtered:
|
||
- api.current_logger().debug(
|
||
- 'The following packages from "to_install" file will be ignored as they are already installed:\n- %s',
|
||
- '\n- '.join(filtered)
|
||
- )
|
||
+ to_reinstall = load_tasks_file(os.path.join(base_dir, 'to_reinstall'), logger)
|
||
+ reinstall_debug_msg = 'The following packages from "to_reinstall" file will be ignored as they are not installed:'
|
||
+ _, to_reinstall_filtered = filter_out(rpm_names, to_reinstall, reinstall_debug_msg)
|
||
|
||
return RpmTransactionTasks(
|
||
to_install=to_install_filtered,
|
||
+ to_reinstall=to_reinstall_filtered,
|
||
to_keep=load_tasks_file(os.path.join(base_dir, 'to_keep'), logger),
|
||
to_remove=load_tasks_file(os.path.join(base_dir, 'to_remove'), logger))
|
||
diff --git a/repos/system_upgrade/common/actors/scanlvmconfig/actor.py b/repos/system_upgrade/common/actors/scanlvmconfig/actor.py
|
||
new file mode 100644
|
||
index 00000000..23ed032d
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/scanlvmconfig/actor.py
|
||
@@ -0,0 +1,18 @@
|
||
+from leapp.actors import Actor
|
||
+from leapp.libraries.actor import scanlvmconfig
|
||
+from leapp.models import DistributionSignedRPM, LVMConfig
|
||
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||
+
|
||
+
|
||
+class ScanLVMConfig(Actor):
|
||
+ """
|
||
+ Scan LVM configuration.
|
||
+ """
|
||
+
|
||
+ name = 'scan_lvm_config'
|
||
+ consumes = (DistributionSignedRPM,)
|
||
+ produces = (LVMConfig,)
|
||
+ tags = (FactsPhaseTag, IPUWorkflowTag)
|
||
+
|
||
+ def process(self):
|
||
+ scanlvmconfig.scan()
|
||
diff --git a/repos/system_upgrade/common/actors/scanlvmconfig/libraries/scanlvmconfig.py b/repos/system_upgrade/common/actors/scanlvmconfig/libraries/scanlvmconfig.py
|
||
new file mode 100644
|
||
index 00000000..37755e7c
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/scanlvmconfig/libraries/scanlvmconfig.py
|
||
@@ -0,0 +1,52 @@
|
||
+import os
|
||
+
|
||
+from leapp.libraries.common.config import version
|
||
+from leapp.libraries.common.rpms import has_package
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import DistributionSignedRPM, LVMConfig, LVMConfigDevicesSection
|
||
+
|
||
+LVM_CONFIG_PATH = '/etc/lvm/lvm.conf'
|
||
+
|
||
+
|
||
+def _lvm_config_devices_parser(lvm_config_lines):
|
||
+ in_section = False
|
||
+ config = {}
|
||
+ for line in lvm_config_lines:
|
||
+ line = line.split("#", 1)[0].strip()
|
||
+ if not line:
|
||
+ continue
|
||
+ if "devices {" in line:
|
||
+ in_section = True
|
||
+ continue
|
||
+ if in_section and "}" in line:
|
||
+ in_section = False
|
||
+ if in_section:
|
||
+ value = line.split("=", 1)
|
||
+ config[value[0].strip()] = value[1].strip().strip('"')
|
||
+ return config
|
||
+
|
||
+
|
||
+def _read_config_lines(path):
|
||
+ with open(path) as lvm_conf_file:
|
||
+ return lvm_conf_file.readlines()
|
||
+
|
||
+
|
||
+def scan():
|
||
+ if not has_package(DistributionSignedRPM, 'lvm2'):
|
||
+ return
|
||
+
|
||
+ if not os.path.isfile(LVM_CONFIG_PATH):
|
||
+ api.current_logger().debug('The "{}" is not present on the system.'.format(LVM_CONFIG_PATH))
|
||
+ return
|
||
+
|
||
+ lvm_config_lines = _read_config_lines(LVM_CONFIG_PATH)
|
||
+ devices_section = _lvm_config_devices_parser(lvm_config_lines)
|
||
+
|
||
+ lvm_config_devices = LVMConfigDevicesSection(use_devicesfile=int(version.get_source_major_version()) > 8)
|
||
+ if 'devicesfile' in devices_section:
|
||
+ lvm_config_devices.devicesfile = devices_section['devicesfile']
|
||
+
|
||
+ if 'use_devicesfile' in devices_section and devices_section['use_devicesfile'] in ['0', '1']:
|
||
+ lvm_config_devices.use_devicesfile = devices_section['use_devicesfile'] == '1'
|
||
+
|
||
+ api.produce(LVMConfig(devices=lvm_config_devices))
|
||
diff --git a/repos/system_upgrade/common/actors/scanlvmconfig/tests/test_scanlvmconfig.py b/repos/system_upgrade/common/actors/scanlvmconfig/tests/test_scanlvmconfig.py
|
||
new file mode 100644
|
||
index 00000000..26728fd8
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/scanlvmconfig/tests/test_scanlvmconfig.py
|
||
@@ -0,0 +1,176 @@
|
||
+import os
|
||
+
|
||
+import pytest
|
||
+
|
||
+from leapp.libraries.actor import scanlvmconfig
|
||
+from leapp.libraries.common.config import version
|
||
+from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import DistributionSignedRPM, LVMConfig, LVMConfigDevicesSection, RPM
|
||
+
|
||
+
|
||
+@pytest.mark.parametrize(
|
||
+ ("config_as_lines", "config_as_dict"),
|
||
+ [
|
||
+ ([], {}),
|
||
+ (
|
||
+ ['devices {\n',
|
||
+ '\t# comment\n'
|
||
+ '}\n'],
|
||
+ {}
|
||
+ ),
|
||
+ (
|
||
+ ['global {\n',
|
||
+ 'use_lvmetad = 1\n',
|
||
+ '}\n'],
|
||
+ {}
|
||
+ ),
|
||
+ (
|
||
+ ['devices {\n',
|
||
+ 'filter = [ "r|/dev/cdrom|", "a|.*|" ]\n',
|
||
+ 'use_devicesfile=0\n',
|
||
+ 'devicesfile="file-name.devices"\n',
|
||
+ '}'],
|
||
+ {'filter': '[ "r|/dev/cdrom|", "a|.*|" ]',
|
||
+ 'use_devicesfile': '0',
|
||
+ 'devicesfile': 'file-name.devices'}
|
||
+ ),
|
||
+ (
|
||
+ ['devices {\n',
|
||
+ 'use_devicesfile = 1\n',
|
||
+ 'devicesfile = "file-name.devices"\n',
|
||
+ ' }\n'],
|
||
+ {'use_devicesfile': '1',
|
||
+ 'devicesfile': 'file-name.devices'}
|
||
+ ),
|
||
+ (
|
||
+ ['devices {\n',
|
||
+ ' # comment\n',
|
||
+ 'use_devicesfile = 1 # comment\n',
|
||
+ '#devicesfile = "file-name.devices"\n',
|
||
+ ' }\n'],
|
||
+ {'use_devicesfile': '1'}
|
||
+ ),
|
||
+ (
|
||
+ ['config {\n',
|
||
+ '# configuration section\n',
|
||
+ '\tabort_on_errors = 1\n',
|
||
+ '\tprofile_dir = "/etc/lvm/prifile\n',
|
||
+ '}\n',
|
||
+ 'devices {\n',
|
||
+ ' \n',
|
||
+ '\tfilter = ["a|.*|"] \n',
|
||
+ '\tuse_devicesfile=0\n',
|
||
+ '}\n',
|
||
+ 'allocation {\n',
|
||
+ '\tcling_tag_list = [ "@site1", "@site2" ]\n',
|
||
+ '\tcache_settings {\n',
|
||
+ '\t}\n',
|
||
+ '}\n'
|
||
+ ],
|
||
+ {'filter': '["a|.*|"]', 'use_devicesfile': '0'}
|
||
+ ),
|
||
+ ]
|
||
+
|
||
+)
|
||
+def test_lvm_config_devices_parser(config_as_lines, config_as_dict):
|
||
+ lvm_config = scanlvmconfig._lvm_config_devices_parser(config_as_lines)
|
||
+ assert lvm_config == config_as_dict
|
||
+
|
||
+
|
||
+def test_scan_when_lvm_not_installed(monkeypatch):
|
||
+ def isfile_mocked(_):
|
||
+ assert False
|
||
+
|
||
+ def read_config_lines_mocked(_):
|
||
+ assert False
|
||
+
|
||
+ msgs = [
|
||
+ DistributionSignedRPM(items=[])
|
||
+ ]
|
||
+
|
||
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
|
||
+ monkeypatch.setattr(api, 'produce', produce_mocked())
|
||
+ monkeypatch.setattr(os.path, 'isfile', isfile_mocked)
|
||
+ monkeypatch.setattr(scanlvmconfig, '_read_config_lines', read_config_lines_mocked)
|
||
+
|
||
+ scanlvmconfig.scan()
|
||
+
|
||
+ assert not api.produce.called
|
||
+
|
||
+
|
||
+@pytest.mark.parametrize(
|
||
+ ('source_major_version', 'devices_section_dict', 'produced_devices_section'),
|
||
+ [
|
||
+ ('8', {}, LVMConfigDevicesSection(use_devicesfile=False)),
|
||
+ ('9', {}, LVMConfigDevicesSection(use_devicesfile=True)),
|
||
+ ('8', {
|
||
+ 'use_devicesfile': '0',
|
||
+ }, LVMConfigDevicesSection(use_devicesfile=False,
|
||
+ devicesfile='system.devices')
|
||
+ ),
|
||
+ ('9', {
|
||
+ 'use_devicesfile': '0',
|
||
+ 'devicesfile': 'file-name.devices'
|
||
+ }, LVMConfigDevicesSection(use_devicesfile=False,
|
||
+ devicesfile='file-name.devices')
|
||
+ ),
|
||
+
|
||
+ ('8', {
|
||
+ 'use_devicesfile': '1',
|
||
+ 'devicesfile': 'file-name.devices'
|
||
+ }, LVMConfigDevicesSection(use_devicesfile=True,
|
||
+ devicesfile='file-name.devices')
|
||
+ ),
|
||
+ ('9', {
|
||
+ 'use_devicesfile': '1',
|
||
+ }, LVMConfigDevicesSection(use_devicesfile=True,
|
||
+ devicesfile='system.devices')
|
||
+ ),
|
||
+
|
||
+ ]
|
||
+
|
||
+)
|
||
+def test_scan_when_lvm_installed(monkeypatch, source_major_version, devices_section_dict, produced_devices_section):
|
||
+
|
||
+ def isfile_mocked(file):
|
||
+ assert file == scanlvmconfig.LVM_CONFIG_PATH
|
||
+ return True
|
||
+
|
||
+ def read_config_lines_mocked(file):
|
||
+ assert file == scanlvmconfig.LVM_CONFIG_PATH
|
||
+ return ["test_line"]
|
||
+
|
||
+ def lvm_config_devices_parser_mocked(lines):
|
||
+ assert lines == ["test_line"]
|
||
+ return devices_section_dict
|
||
+
|
||
+ lvm_package = RPM(
|
||
+ name='lvm2',
|
||
+ version='2',
|
||
+ release='1',
|
||
+ epoch='1',
|
||
+ packager='',
|
||
+ arch='x86_64',
|
||
+ pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'
|
||
+ )
|
||
+
|
||
+ msgs = [
|
||
+ DistributionSignedRPM(items=[lvm_package])
|
||
+ ]
|
||
+
|
||
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
|
||
+ monkeypatch.setattr(api, 'produce', produce_mocked())
|
||
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: source_major_version)
|
||
+ monkeypatch.setattr(os.path, 'isfile', isfile_mocked)
|
||
+ monkeypatch.setattr(scanlvmconfig, '_read_config_lines', read_config_lines_mocked)
|
||
+ monkeypatch.setattr(scanlvmconfig, '_lvm_config_devices_parser', lvm_config_devices_parser_mocked)
|
||
+
|
||
+ scanlvmconfig.scan()
|
||
+
|
||
+ assert api.produce.called == 1
|
||
+ assert len(api.produce.model_instances) == 1
|
||
+
|
||
+ produced_model = api.produce.model_instances[0]
|
||
+ assert isinstance(produced_model, LVMConfig)
|
||
+ assert produced_model.devices == produced_devices_section
|
||
diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py
|
||
new file mode 100644
|
||
index 00000000..a5e481cb
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py
|
||
@@ -0,0 +1,26 @@
|
||
+from leapp.actors import Actor
|
||
+from leapp.libraries.actor import scanvendorrepofiles
|
||
+from leapp.models import (
|
||
+ CustomTargetRepositoryFile,
|
||
+ ActiveVendorList,
|
||
+ VendorCustomTargetRepositoryList,
|
||
+)
|
||
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||
+
|
||
+
|
||
+class ScanVendorRepofiles(Actor):
|
||
+ """
|
||
+ Load and produce custom repository data from vendor-provided files.
|
||
+ Only those vendors whose source system repoids were found on the system will be included.
|
||
+ """
|
||
+
|
||
+ name = "scan_vendor_repofiles"
|
||
+ consumes = ActiveVendorList
|
||
+ produces = (
|
||
+ CustomTargetRepositoryFile,
|
||
+ VendorCustomTargetRepositoryList,
|
||
+ )
|
||
+ tags = (FactsPhaseTag, IPUWorkflowTag)
|
||
+
|
||
+ def process(self):
|
||
+ scanvendorrepofiles.process()
|
||
diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py
|
||
new file mode 100644
|
||
index 00000000..84392101
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py
|
||
@@ -0,0 +1,72 @@
|
||
+import os
|
||
+
|
||
+from leapp.libraries.common import repofileutils
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import (
|
||
+ CustomTargetRepository,
|
||
+ CustomTargetRepositoryFile,
|
||
+ ActiveVendorList,
|
||
+ VendorCustomTargetRepositoryList,
|
||
+)
|
||
+
|
||
+
|
||
+VENDORS_DIR = "/etc/leapp/files/vendors.d/"
|
||
+REPOFILE_SUFFIX = ".repo"
|
||
+
|
||
+
|
||
+def process():
|
||
+ """
|
||
+ Produce CustomTargetRepository msgs for the vendor repo files inside the
|
||
+ <CUSTOM_REPO_DIR>.
|
||
+
|
||
+ The CustomTargetRepository messages are produced only if a "from" vendor repository
|
||
+ listed indide its map matched one of the repositories active on the system.
|
||
+ """
|
||
+ if not os.path.isdir(VENDORS_DIR):
|
||
+ api.current_logger().debug(
|
||
+ "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR)
|
||
+ )
|
||
+ return
|
||
+
|
||
+ for repofile_name in os.listdir(VENDORS_DIR):
|
||
+ if not repofile_name.endswith(REPOFILE_SUFFIX):
|
||
+ continue
|
||
+ # Cut the .repo part to get only the name.
|
||
+ vendor_name = repofile_name[:-5]
|
||
+
|
||
+ active_vendors = []
|
||
+ for vendor_list in api.consume(ActiveVendorList):
|
||
+ active_vendors.extend(vendor_list.data)
|
||
+
|
||
+ api.current_logger().debug("Active vendor list: {}".format(active_vendors))
|
||
+
|
||
+ if vendor_name not in active_vendors:
|
||
+ api.current_logger().debug(
|
||
+ "Vendor {} not in active list, skipping".format(vendor_name)
|
||
+ )
|
||
+ continue
|
||
+
|
||
+ full_repo_path = os.path.join(VENDORS_DIR, repofile_name)
|
||
+ parsed_repofile = repofileutils.parse_repofile(full_repo_path)
|
||
+ api.current_logger().debug(
|
||
+ "Vendor {} found in active list, processing file {}".format(vendor_name, repofile_name)
|
||
+ )
|
||
+
|
||
+ api.produce(CustomTargetRepositoryFile(file=full_repo_path))
|
||
+
|
||
+ custom_vendor_repos = [
|
||
+ CustomTargetRepository(
|
||
+ repoid=repo.repoid,
|
||
+ name=repo.name,
|
||
+ baseurl=repo.baseurl,
|
||
+ enabled=repo.enabled,
|
||
+ ) for repo in parsed_repofile.data
|
||
+ ]
|
||
+
|
||
+ api.produce(
|
||
+ VendorCustomTargetRepositoryList(vendor=vendor_name, repos=custom_vendor_repos)
|
||
+ )
|
||
+
|
||
+ api.current_logger().info(
|
||
+ "The {} directory exists, vendor repositories loaded.".format(VENDORS_DIR)
|
||
+ )
|
||
diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py
|
||
new file mode 100644
|
||
index 00000000..cb5c7ab7
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py
|
||
@@ -0,0 +1,131 @@
|
||
+import os
|
||
+
|
||
+from leapp.libraries.actor import scancustomrepofile
|
||
+from leapp.libraries.common import repofileutils
|
||
+from leapp.libraries.common.testutils import produce_mocked
|
||
+from leapp.libraries.stdlib import api
|
||
+
|
||
+from leapp.models import (CustomTargetRepository, CustomTargetRepositoryFile,
|
||
+ RepositoryData, RepositoryFile)
|
||
+
|
||
+
|
||
+_REPODATA = [
|
||
+ RepositoryData(repoid="repo1", name="repo1name", baseurl="repo1url", enabled=True),
|
||
+ RepositoryData(repoid="repo2", name="repo2name", baseurl="repo2url", enabled=False),
|
||
+ RepositoryData(repoid="repo3", name="repo3name", enabled=True),
|
||
+ RepositoryData(repoid="repo4", name="repo4name", mirrorlist="mirror4list", enabled=True),
|
||
+]
|
||
+
|
||
+_CUSTOM_REPOS = [
|
||
+ CustomTargetRepository(repoid="repo1", name="repo1name", baseurl="repo1url", enabled=True),
|
||
+ CustomTargetRepository(repoid="repo2", name="repo2name", baseurl="repo2url", enabled=False),
|
||
+ CustomTargetRepository(repoid="repo3", name="repo3name", baseurl=None, enabled=True),
|
||
+ CustomTargetRepository(repoid="repo4", name="repo4name", baseurl=None, enabled=True),
|
||
+]
|
||
+
|
||
+_CUSTOM_REPO_FILE_MSG = CustomTargetRepositoryFile(file=scancustomrepofile.CUSTOM_REPO_PATH)
|
||
+
|
||
+
|
||
+_TESTING_REPODATA = [
|
||
+ RepositoryData(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True),
|
||
+ RepositoryData(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=False),
|
||
+ RepositoryData(repoid="repo3-stable", name="repo3name", enabled=False),
|
||
+ RepositoryData(repoid="repo4-testing", name="repo4name", mirrorlist="mirror4list", enabled=True),
|
||
+]
|
||
+
|
||
+_TESTING_CUSTOM_REPOS_STABLE_TARGET = [
|
||
+ CustomTargetRepository(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True),
|
||
+ CustomTargetRepository(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=False),
|
||
+ CustomTargetRepository(repoid="repo3-stable", name="repo3name", baseurl=None, enabled=False),
|
||
+ CustomTargetRepository(repoid="repo4-testing", name="repo4name", baseurl=None, enabled=True),
|
||
+]
|
||
+
|
||
+_TESTING_CUSTOM_REPOS_BETA_TARGET = [
|
||
+ CustomTargetRepository(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True),
|
||
+ CustomTargetRepository(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=True),
|
||
+ CustomTargetRepository(repoid="repo3-stable", name="repo3name", baseurl=None, enabled=False),
|
||
+ CustomTargetRepository(repoid="repo4-testing", name="repo4name", baseurl=None, enabled=True),
|
||
+]
|
||
+
|
||
+_PROCESS_STABLE_TARGET = "stable"
|
||
+_PROCESS_BETA_TARGET = "beta"
|
||
+
|
||
+
|
||
+class LoggerMocked(object):
|
||
+ def __init__(self):
|
||
+ self.infomsg = None
|
||
+ self.debugmsg = None
|
||
+
|
||
+ def info(self, msg):
|
||
+ self.infomsg = msg
|
||
+
|
||
+ def debug(self, msg):
|
||
+ self.debugmsg = msg
|
||
+
|
||
+ def __call__(self):
|
||
+ return self
|
||
+
|
||
+
|
||
+def test_no_repofile(monkeypatch):
|
||
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: False)
|
||
+ monkeypatch.setattr(api, 'produce', produce_mocked())
|
||
+ monkeypatch.setattr(api, 'current_logger', LoggerMocked())
|
||
+ scancustomrepofile.process()
|
||
+ msg = "The {} file doesn't exist. Nothing to do.".format(scancustomrepofile.CUSTOM_REPO_PATH)
|
||
+ assert api.current_logger.debugmsg == msg
|
||
+ assert not api.produce.called
|
||
+
|
||
+
|
||
+def test_valid_repofile_exists(monkeypatch):
|
||
+ def _mocked_parse_repofile(fpath):
|
||
+ return RepositoryFile(file=fpath, data=_REPODATA)
|
||
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True)
|
||
+ monkeypatch.setattr(api, 'produce', produce_mocked())
|
||
+ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile)
|
||
+ monkeypatch.setattr(api, 'current_logger', LoggerMocked())
|
||
+ scancustomrepofile.process()
|
||
+ msg = "The {} file exists, custom repositories loaded.".format(scancustomrepofile.CUSTOM_REPO_PATH)
|
||
+ assert api.current_logger.infomsg == msg
|
||
+ assert api.produce.called == len(_CUSTOM_REPOS) + 1
|
||
+ assert _CUSTOM_REPO_FILE_MSG in api.produce.model_instances
|
||
+ for crepo in _CUSTOM_REPOS:
|
||
+ assert crepo in api.produce.model_instances
|
||
+
|
||
+
|
||
+def test_target_stable_repos(monkeypatch):
|
||
+ def _mocked_parse_repofile(fpath):
|
||
+ return RepositoryFile(file=fpath, data=_TESTING_REPODATA)
|
||
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True)
|
||
+ monkeypatch.setattr(api, 'produce', produce_mocked())
|
||
+ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile)
|
||
+
|
||
+ scancustomrepofile.process(_PROCESS_STABLE_TARGET)
|
||
+ assert api.produce.called == len(_TESTING_CUSTOM_REPOS_STABLE_TARGET) + 1
|
||
+ for crepo in _TESTING_CUSTOM_REPOS_STABLE_TARGET:
|
||
+ assert crepo in api.produce.model_instances
|
||
+
|
||
+
|
||
+def test_target_beta_repos(monkeypatch):
|
||
+ def _mocked_parse_repofile(fpath):
|
||
+ return RepositoryFile(file=fpath, data=_TESTING_REPODATA)
|
||
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True)
|
||
+ monkeypatch.setattr(api, 'produce', produce_mocked())
|
||
+ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile)
|
||
+
|
||
+ scancustomrepofile.process(_PROCESS_BETA_TARGET)
|
||
+ assert api.produce.called == len(_TESTING_CUSTOM_REPOS_BETA_TARGET) + 1
|
||
+ for crepo in _TESTING_CUSTOM_REPOS_BETA_TARGET:
|
||
+ assert crepo in api.produce.model_instances
|
||
+
|
||
+
|
||
+def test_empty_repofile_exists(monkeypatch):
|
||
+ def _mocked_parse_repofile(fpath):
|
||
+ return RepositoryFile(file=fpath, data=[])
|
||
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True)
|
||
+ monkeypatch.setattr(api, 'produce', produce_mocked())
|
||
+ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile)
|
||
+ monkeypatch.setattr(api, 'current_logger', LoggerMocked())
|
||
+ scancustomrepofile.process()
|
||
+ msg = "The {} file exists, but is empty. Nothing to do.".format(scancustomrepofile.CUSTOM_REPO_PATH)
|
||
+ assert api.current_logger.infomsg == msg
|
||
+ assert not api.produce.called
|
||
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
|
||
index 91855818..3a7e955b 100644
|
||
--- a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
|
||
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
|
||
@@ -10,7 +10,8 @@ from leapp.models import (
|
||
RHUIInfo,
|
||
SkippedRepositories,
|
||
TargetRepositories,
|
||
- UsedRepositories
|
||
+ UsedRepositories,
|
||
+ VendorCustomTargetRepositoryList
|
||
)
|
||
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||
|
||
@@ -37,7 +38,8 @@ class SetupTargetRepos(Actor):
|
||
RepositoriesFacts,
|
||
RepositoriesBlacklisted,
|
||
RHUIInfo,
|
||
- UsedRepositories)
|
||
+ UsedRepositories,
|
||
+ VendorCustomTargetRepositoryList)
|
||
produces = (TargetRepositories, SkippedRepositories)
|
||
tags = (IPUWorkflowTag, FactsPhaseTag)
|
||
|
||
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
|
||
index df17a217..41e10247 100644
|
||
--- a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
|
||
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
|
||
@@ -1,6 +1,7 @@
|
||
from leapp.libraries.actor import setuptargetrepos_repomap
|
||
from leapp.libraries.common.config import get_source_distro_id, get_target_distro_id
|
||
from leapp.libraries.common.config.version import get_source_major_version, get_source_version
|
||
+from leapp.libraries.common.repomaputils import combine_repomap_messages
|
||
from leapp.libraries.stdlib import api
|
||
from leapp.models import (
|
||
CustomTargetRepository,
|
||
@@ -14,7 +15,8 @@ from leapp.models import (
|
||
RHUIInfo,
|
||
SkippedRepositories,
|
||
TargetRepositories,
|
||
- UsedRepositories
|
||
+ UsedRepositories,
|
||
+ VendorCustomTargetRepositoryList
|
||
)
|
||
from leapp.utils.deprecation import suppress_deprecation
|
||
|
||
@@ -83,14 +85,63 @@ def _get_mapped_repoids(repomap, src_repoids):
|
||
return mapped_repoids
|
||
|
||
|
||
+def _get_vendor_custom_repos(enabled_repos, mapping_list):
|
||
+ # Look at what source repos from the vendor mapping were enabled.
|
||
+ # If any of them are in beta, include vendor's custom repos in the list.
|
||
+ # Otherwise skip them.
|
||
+
|
||
+ result = []
|
||
+
|
||
+ # Build a dict of vendor mappings for easy lookup.
|
||
+ map_dict = {mapping.vendor: mapping for mapping in mapping_list if mapping.vendor}
|
||
+
|
||
+ for vendor_repolist in api.consume(VendorCustomTargetRepositoryList):
|
||
+ vendor_repomap = map_dict[vendor_repolist.vendor]
|
||
+
|
||
+ # Find the beta channel repositories for the vendor.
|
||
+ beta_repos = [
|
||
+ x.repoid for x in vendor_repomap.repositories if x.channel == "beta"
|
||
+ ]
|
||
+ api.current_logger().debug(
|
||
+ "Vendor {} beta repos: {}".format(vendor_repolist.vendor, beta_repos)
|
||
+ )
|
||
+
|
||
+ # Are any of the beta repos present and enabled on the system?
|
||
+ if any(rep in beta_repos for rep in enabled_repos):
|
||
+ # If so, use all repos including beta in the upgrade.
|
||
+ vendor_repos = vendor_repolist.repos
|
||
+ else:
|
||
+ # Otherwise filter beta repos out.
|
||
+ vendor_repos = [repo for repo in vendor_repolist.repos if repo.repoid not in beta_repos]
|
||
+
|
||
+ result.extend([CustomTargetRepository(
|
||
+ repoid=repo.repoid,
|
||
+ name=repo.name,
|
||
+ baseurl=repo.baseurl,
|
||
+ enabled=repo.enabled,
|
||
+ ) for repo in vendor_repos])
|
||
+
|
||
+ return result
|
||
+
|
||
+
|
||
@suppress_deprecation(RHELTargetRepository)
|
||
def process():
|
||
# Load relevant data from messages
|
||
used_repoids_dict = _get_used_repo_dict()
|
||
enabled_repoids = _get_enabled_repoids()
|
||
excluded_repoids = _get_blacklisted_repoids()
|
||
+
|
||
+ # Remember that we can't just grab one message, each vendor can have its own mapping.
|
||
+ repo_mapping_list = list(api.consume(RepositoriesMapping))
|
||
+
|
||
custom_repos = _get_custom_target_repos()
|
||
repoids_from_installed_packages = _get_repoids_from_installed_packages()
|
||
+ vendor_repos = _get_vendor_custom_repos(enabled_repoids, repo_mapping_list)
|
||
+ custom_repos.extend(vendor_repos)
|
||
+
|
||
+ api.current_logger().debug(
|
||
+ "Vendor repolist: {}".format([repo.repoid for repo in vendor_repos])
|
||
+ )
|
||
|
||
# Setup repomap handler
|
||
repo_mappig_msg = next(api.consume(RepositoriesMapping), RepositoriesMapping())
|
||
@@ -166,6 +217,10 @@ def process():
|
||
custom_repos = [repo for repo in custom_repos if repo.repoid not in excluded_repoids]
|
||
custom_repos = sorted(custom_repos, key=lambda x: x.repoid)
|
||
|
||
+ api.current_logger().debug(
|
||
+ "Final repolist: {}".format([repo.repoid for repo in custom_repos])
|
||
+ )
|
||
+
|
||
# produce message about skipped repositories
|
||
enabled_repoids_with_mapping = _get_mapped_repoids(repomap, enabled_repoids)
|
||
skipped_repoids = enabled_repoids & set(used_repoids_dict.keys()) - enabled_repoids_with_mapping
|
||
diff --git a/repos/system_upgrade/common/actors/systemfacts/actor.py b/repos/system_upgrade/common/actors/systemfacts/actor.py
|
||
index 59b12c87..85d4a09e 100644
|
||
--- a/repos/system_upgrade/common/actors/systemfacts/actor.py
|
||
+++ b/repos/system_upgrade/common/actors/systemfacts/actor.py
|
||
@@ -47,7 +47,7 @@ class SystemFactsActor(Actor):
|
||
GrubCfgBios,
|
||
Report
|
||
)
|
||
- tags = (IPUWorkflowTag, FactsPhaseTag,)
|
||
+ tags = (IPUWorkflowTag, FactsPhaseTag.Before,)
|
||
|
||
def process(self):
|
||
self.produce(systemfacts.get_sysctls_status())
|
||
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
|
||
index c825c731..62a84a85 100644
|
||
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
|
||
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
|
||
@@ -155,9 +155,10 @@ def _import_gpg_keys(context, install_root_dir, target_major_version):
|
||
# installation of initial packages
|
||
try:
|
||
# Import also any other keys provided by the customer in the same directory
|
||
- for certname in os.listdir(certs_path):
|
||
- cmd = ['rpm', '--root', install_root_dir, '--import', os.path.join(certs_path, certname)]
|
||
- context.call(cmd, callback_raw=utils.logging_handler)
|
||
+ for trusted_dir in certs_path:
|
||
+ for certname in os.listdir(trusted_dir):
|
||
+ cmd = ['rpm', '--root', install_root_dir, '--import', os.path.join(trusted_dir, certname)]
|
||
+ context.call(cmd, callback_raw=utils.logging_handler)
|
||
except CalledProcessError as exc:
|
||
raise StopActorExecutionError(
|
||
message=(
|
||
@@ -660,6 +661,7 @@ def _prep_repository_access(context, target_userspace):
|
||
run(["chroot", target_userspace, "/bin/bash", "-c", "su - -c update-ca-trust"])
|
||
|
||
if not rhsm.skip_rhsm():
|
||
+ _copy_certificates(context, target_userspace)
|
||
run(['rm', '-rf', os.path.join(target_etc, 'rhsm')])
|
||
context.copytree_from('/etc/rhsm', os.path.join(target_etc, 'rhsm'))
|
||
|
||
diff --git a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py
|
||
index 6377f767..4c5420f6 100644
|
||
--- a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py
|
||
+++ b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py
|
||
@@ -13,13 +13,14 @@ def _get_pubkeys(installed_rpms):
|
||
pubkeys = get_pubkeys_from_rpms(installed_rpms)
|
||
db_pubkeys = [key.fingerprint for key in pubkeys]
|
||
certs_path = get_path_to_gpg_certs()
|
||
- for certname in os.listdir(certs_path):
|
||
- key_file = os.path.join(certs_path, certname)
|
||
- fps = get_gpg_fp_from_file(key_file)
|
||
- for fp in fps:
|
||
- if fp not in db_pubkeys:
|
||
- pubkeys.append(GpgKey(fingerprint=fp, rpmdb=False, filename=key_file))
|
||
- db_pubkeys += fp
|
||
+ for trusted_dir in certs_path:
|
||
+ for certname in os.listdir(trusted_dir):
|
||
+ key_file = os.path.join(trusted_dir, certname)
|
||
+ fps = get_gpg_fp_from_file(key_file)
|
||
+ for fp in fps:
|
||
+ if fp not in db_pubkeys:
|
||
+ pubkeys.append(GpgKey(fingerprint=fp, rpmdb=False, filename=key_file))
|
||
+ db_pubkeys += fp
|
||
return pubkeys
|
||
|
||
|
||
diff --git a/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py b/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py
|
||
new file mode 100644
|
||
index 00000000..dbf86974
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py
|
||
@@ -0,0 +1,72 @@
|
||
+import os
|
||
+
|
||
+from leapp.actors import Actor
|
||
+from leapp.models import VendorSignatures, ActiveVendorList
|
||
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||
+
|
||
+
|
||
+VENDORS_DIR = "/etc/leapp/files/vendors.d/"
|
||
+SIGFILE_SUFFIX = ".sigs"
|
||
+
|
||
+
|
||
+class VendorRepoSignatureScanner(Actor):
|
||
+ """
|
||
+ Produce VendorSignatures messages for the vendor signature files inside the
|
||
+ <VENDORS_DIR>.
|
||
+ These messages are used to extend the list of pakcages Leapp will consider
|
||
+ signed and will attempt to upgrade.
|
||
+
|
||
+ The messages are produced only if a "from" vendor repository
|
||
+ listed indide its map matched one of the repositories active on the system.
|
||
+ """
|
||
+
|
||
+ name = 'vendor_repo_signature_scanner'
|
||
+ consumes = (ActiveVendorList)
|
||
+ produces = (VendorSignatures)
|
||
+ tags = (IPUWorkflowTag, FactsPhaseTag.Before)
|
||
+
|
||
+ def process(self):
|
||
+ if not os.path.isdir(VENDORS_DIR):
|
||
+ self.log.debug(
|
||
+ "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR)
|
||
+ )
|
||
+ return
|
||
+
|
||
+ active_vendors = []
|
||
+ for vendor_list in self.consume(ActiveVendorList):
|
||
+ active_vendors.extend(vendor_list.data)
|
||
+
|
||
+ self.log.debug(
|
||
+ "Active vendor list: {}".format(active_vendors)
|
||
+ )
|
||
+
|
||
+ for sigfile_name in os.listdir(VENDORS_DIR):
|
||
+ if not sigfile_name.endswith(SIGFILE_SUFFIX):
|
||
+ continue
|
||
+ # Cut the suffix part to get only the name.
|
||
+ vendor_name = sigfile_name[:-5]
|
||
+
|
||
+ if vendor_name not in active_vendors:
|
||
+ self.log.debug(
|
||
+ "Vendor {} not in active list, skipping".format(vendor_name)
|
||
+ )
|
||
+ continue
|
||
+
|
||
+ self.log.debug(
|
||
+ "Vendor {} found in active list, processing file {}".format(vendor_name, sigfile_name)
|
||
+ )
|
||
+
|
||
+ full_sigfile_path = os.path.join(VENDORS_DIR, sigfile_name)
|
||
+ with open(full_sigfile_path) as f:
|
||
+ signatures = [line for line in f.read().splitlines() if line]
|
||
+
|
||
+ self.produce(
|
||
+ VendorSignatures(
|
||
+ vendor=vendor_name,
|
||
+ sigs=signatures,
|
||
+ )
|
||
+ )
|
||
+
|
||
+ self.log.info(
|
||
+ "The {} directory exists, vendor signatures loaded.".format(VENDORS_DIR)
|
||
+ )
|
||
diff --git a/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py
|
||
new file mode 100644
|
||
index 00000000..13256476
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py
|
||
@@ -0,0 +1,19 @@
|
||
+from leapp.actors import Actor
|
||
+# from leapp.libraries.common.repomaputils import scan_vendor_repomaps, VENDOR_REPOMAP_DIR
|
||
+from leapp.libraries.actor.vendorrepositoriesmapping import scan_vendor_repomaps
|
||
+from leapp.models import VendorSourceRepos, RepositoriesMapping
|
||
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||
+
|
||
+
|
||
+class VendorRepositoriesMapping(Actor):
|
||
+ """
|
||
+ Scan the vendor repository mapping files and provide the data to other actors.
|
||
+ """
|
||
+
|
||
+ name = "vendor_repositories_mapping"
|
||
+ consumes = ()
|
||
+ produces = (RepositoriesMapping, VendorSourceRepos,)
|
||
+ tags = (IPUWorkflowTag, FactsPhaseTag.Before)
|
||
+
|
||
+ def process(self):
|
||
+ scan_vendor_repomaps()
|
||
diff --git a/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py
|
||
new file mode 100644
|
||
index 00000000..6a41d4e5
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py
|
||
@@ -0,0 +1,92 @@
|
||
+import os
|
||
+import json
|
||
+
|
||
+from leapp.libraries.common import fetch
|
||
+from leapp.libraries.common.config.version import get_target_major_version, get_source_major_version
|
||
+from leapp.libraries.common.repomaputils import RepoMapData
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import VendorSourceRepos, RepositoriesMapping
|
||
+from leapp.models.fields import ModelViolationError
|
||
+from leapp.exceptions import StopActorExecutionError
|
||
+
|
||
+
|
||
+VENDORS_DIR = "/etc/leapp/files/vendors.d"
|
||
+"""The folder containing the vendor repository mapping files."""
|
||
+
|
||
+
|
||
+def inhibit_upgrade(msg):
|
||
+ raise StopActorExecutionError(
|
||
+ msg,
|
||
+ details={'hint': ('Read documentation at the following link for more'
|
||
+ ' information about how to retrieve the valid file:'
|
||
+ ' https://access.redhat.com/articles/3664871')})
|
||
+
|
||
+
|
||
+def read_repofile(repofile, repodir):
|
||
+ try:
|
||
+ return json.loads(fetch.read_or_fetch(repofile, directory=repodir, allow_download=False))
|
||
+ except ValueError:
|
||
+ # The data does not contain a valid json
|
||
+ inhibit_upgrade('The repository mapping file is invalid: file does not contain a valid JSON object.')
|
||
+ return None
|
||
+
|
||
+
|
||
+def read_repomap_file(repomap_file, read_repofile_func, vendor_name):
|
||
+ json_data = read_repofile_func(repomap_file, VENDORS_DIR)
|
||
+ try:
|
||
+ repomap_data = RepoMapData.load_from_dict(json_data)
|
||
+
|
||
+ source_major = get_source_major_version()
|
||
+ target_major = get_target_major_version()
|
||
+
|
||
+ api.produce(VendorSourceRepos(
|
||
+ vendor=vendor_name,
|
||
+ source_repoids=repomap_data.get_version_repoids(source_major)
|
||
+ ))
|
||
+
|
||
+ mapping = repomap_data.get_mappings(source_major, target_major)
|
||
+ valid_major_versions = [source_major, target_major]
|
||
+
|
||
+ api.produce(RepositoriesMapping(
|
||
+ mapping=mapping,
|
||
+ repositories=repomap_data.get_repositories(valid_major_versions),
|
||
+ vendor=vendor_name
|
||
+ ))
|
||
+ except ModelViolationError as err:
|
||
+ err_message = (
|
||
+ 'The repository mapping file is invalid: '
|
||
+ 'the JSON does not match required schema (wrong field type/value): {}. '
|
||
+ 'Ensure that the current upgrade path is correct and is present in the mappings: {} -> {}'
|
||
+ .format(err, source_major, target_major)
|
||
+ )
|
||
+ inhibit_upgrade(err_message)
|
||
+ except KeyError as err:
|
||
+ inhibit_upgrade(
|
||
+ 'The repository mapping file is invalid: the JSON is missing a required field: {}'.format(err))
|
||
+ except ValueError as err:
|
||
+ # The error should contain enough information, so we do not need to clarify it further
|
||
+ inhibit_upgrade('The repository mapping file is invalid: {}'.format(err))
|
||
+
|
||
+
|
||
+def scan_vendor_repomaps(read_repofile_func=read_repofile):
|
||
+ """
|
||
+ Scan the repository mapping file and produce RepositoriesMapping msg.
|
||
+
|
||
+ See the description of the actor for more details.
|
||
+ """
|
||
+
|
||
+ map_json_suffix = "_map.json"
|
||
+ if os.path.isdir(VENDORS_DIR):
|
||
+ vendor_mapfiles = list(filter(lambda vfile: map_json_suffix in vfile, os.listdir(VENDORS_DIR)))
|
||
+
|
||
+ for mapfile in vendor_mapfiles:
|
||
+ read_repomap_file(mapfile, read_repofile_func, mapfile[:-len(map_json_suffix)])
|
||
+ else:
|
||
+ api.current_logger().debug(
|
||
+ "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR)
|
||
+ )
|
||
+ # vendor_repomap_collection = scan_vendor_repomaps(VENDOR_REPOMAP_DIR)
|
||
+ # if vendor_repomap_collection:
|
||
+ # self.produce(vendor_repomap_collection)
|
||
+ # for repomap in vendor_repomap_collection.maps:
|
||
+ # self.produce(repomap)
|
||
diff --git a/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json b/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json
|
||
index 24bc93ba..3bd7376c 100644
|
||
--- a/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json
|
||
+++ b/repos/system_upgrade/common/files/distro/almalinux/gpg-signatures.json
|
||
@@ -1,18 +1,26 @@
|
||
{
|
||
- "keys": [
|
||
- "51d6647ec21ad6ea",
|
||
- "d36cb86cb86b3716",
|
||
- "2ae81e8aced7258b",
|
||
- "429785e181b961a5",
|
||
- "d07bf2a08d50eb66"
|
||
- ],
|
||
+ "keys": {
|
||
+ "51d6647ec21ad6ea": ["gpg-pubkey-3abb34f8-5ffd890e"],
|
||
+ "d36cb86cb86b3716": ["gpg-pubkey-ced7258b-6525146f"],
|
||
+ "2ae81e8aced7258b": ["gpg-pubkey-b86b3716-61e69f29"],
|
||
+ "429785e181b961a5": ["gpg-pubkey-81b961a5-64106f70"],
|
||
+ "d07bf2a08d50eb66": []
|
||
+ },
|
||
"obsoleted-keys": {
|
||
"7": [],
|
||
- "8": [],
|
||
+ "8": [
|
||
+ "gpg-pubkey-2fa658e0-45700c69",
|
||
+ "gpg-pubkey-37017186-45761324",
|
||
+ "gpg-pubkey-db42a60e-37ea5438"
|
||
+ ],
|
||
"9": [
|
||
+ "gpg-pubkey-d4082792-5b32db75",
|
||
"gpg-pubkey-3abb34f8-5ffd890e",
|
||
+ "gpg-pubkey-6275f250-5e26cb2e",
|
||
+ "gpg-pubkey-73e3b907-6581b071",
|
||
"gpg-pubkey-ced7258b-6525146f"
|
||
],
|
||
"10": ["gpg-pubkey-b86b3716-61e69f29"]
|
||
}
|
||
+
|
||
}
|
||
diff --git a/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json b/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json
|
||
index fe85e03c..8056e825 100644
|
||
--- a/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json
|
||
+++ b/repos/system_upgrade/common/files/distro/centos/gpg-signatures.json
|
||
@@ -1,10 +1,24 @@
|
||
{
|
||
- "keys": [
|
||
- "24c6a8a7f4a80eb5",
|
||
- "05b555b38483c65d",
|
||
- "4eb84e71f2ee9d55"
|
||
- ],
|
||
+ "keys": {
|
||
+ "24c6a8a7f4a80eb5": [],
|
||
+ "4eb84e71f2ee9d55": [],
|
||
+ "05b555b38483c65d": ["gpg-pubkey-8483c65d-5ccc5b19"],
|
||
+ "1ff6a2171d997668": ["gpg-pubkey-1d997668-621e3cac", "gpg-pubkey-1d997668-61bae63b"],
|
||
+ "6c7cb6ef305d49d6": []
|
||
+ },
|
||
"obsoleted-keys": {
|
||
+ "7": [],
|
||
+ "8": [
|
||
+ "gpg-pubkey-2fa658e0-45700c69",
|
||
+ "gpg-pubkey-37017186-45761324",
|
||
+ "gpg-pubkey-db42a60e-37ea5438"
|
||
+ ],
|
||
+ "9": [
|
||
+ "gpg-pubkey-d4082792-5b32db75",
|
||
+ "gpg-pubkey-3abb34f8-5ffd890e",
|
||
+ "gpg-pubkey-6275f250-5e26cb2e",
|
||
+ "gpg-pubkey-73e3b907-6581b071"
|
||
+ ],
|
||
"10": ["gpg-pubkey-8483c65d-5ccc5b19"]
|
||
}
|
||
}
|
||
diff --git a/repos/system_upgrade/common/files/distro/cloudlinux/gpg-signatures.json b/repos/system_upgrade/common/files/distro/cloudlinux/gpg-signatures.json
|
||
new file mode 100644
|
||
index 00000000..acad9006
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/files/distro/cloudlinux/gpg-signatures.json
|
||
@@ -0,0 +1,22 @@
|
||
+{
|
||
+ "keys": [
|
||
+ "8c55a6628608cb71",
|
||
+ "d07bf2a08d50eb66",
|
||
+ "429785e181b961a5"
|
||
+ ],
|
||
+ "obsoleted-keys": {
|
||
+ "7": [],
|
||
+ "8": [
|
||
+ "gpg-pubkey-2fa658e0-45700c69",
|
||
+ "gpg-pubkey-37017186-45761324",
|
||
+ "gpg-pubkey-db42a60e-37ea5438"
|
||
+ ],
|
||
+ "9": [
|
||
+ "gpg-pubkey-d4082792-5b32db75",
|
||
+ "gpg-pubkey-3abb34f8-5ffd890e",
|
||
+ "gpg-pubkey-6275f250-5e26cb2e",
|
||
+ "gpg-pubkey-73e3b907-6581b071"
|
||
+ ],
|
||
+ "10": []
|
||
+ }
|
||
+}
|
||
diff --git a/repos/system_upgrade/common/files/distro/ol/gpg-signatures.json b/repos/system_upgrade/common/files/distro/ol/gpg-signatures.json
|
||
new file mode 100644
|
||
index 00000000..a53775cf
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/files/distro/ol/gpg-signatures.json
|
||
@@ -0,0 +1,24 @@
|
||
+{
|
||
+ "keys": [
|
||
+ "72f97b74ec551f03",
|
||
+ "82562ea9ad986da3",
|
||
+ "bc4d06a08d8b756f",
|
||
+ "429785e181b961a5",
|
||
+ "d07bf2a08d50eb66"
|
||
+ ],
|
||
+ "obsoleted-keys": {
|
||
+ "7": [],
|
||
+ "8": [
|
||
+ "gpg-pubkey-2fa658e0-45700c69",
|
||
+ "gpg-pubkey-37017186-45761324",
|
||
+ "gpg-pubkey-db42a60e-37ea5438"
|
||
+ ],
|
||
+ "9": [
|
||
+ "gpg-pubkey-d4082792-5b32db75",
|
||
+ "gpg-pubkey-3abb34f8-5ffd890e",
|
||
+ "gpg-pubkey-6275f250-5e26cb2e",
|
||
+ "gpg-pubkey-73e3b907-6581b071"
|
||
+ ],
|
||
+ "10": []
|
||
+ }
|
||
+}
|
||
diff --git a/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json b/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json
|
||
index 3cc67f82..0b989984 100644
|
||
--- a/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json
|
||
+++ b/repos/system_upgrade/common/files/distro/rhel/gpg-signatures.json
|
||
@@ -1,19 +1,28 @@
|
||
{
|
||
- "keys": [
|
||
- "199e2f91fd431d51",
|
||
- "5326810137017186",
|
||
- "938a80caf21541eb",
|
||
- "fd372689897da07a",
|
||
- "45689c882fa658e0"
|
||
- ],
|
||
+ "keys": {
|
||
+ "199e2f91fd431d51": ["gpg-pubkey-fd431d51-4ae0493b"],
|
||
+ "5326810137017186": ["gpg-pubkey-37017186-45761324"],
|
||
+ "938a80caf21541eb": ["gpg-pubkey-f21541eb-4a5233e8"],
|
||
+ "fd372689897da07a": ["gpg-pubkey-897da07a-3c979a7f"],
|
||
+ "45689c882fa658e0": ["gpg-pubkey-2fa658e0-45700c69"],
|
||
+ "f76f66c3d4082792": ["gpg-pubkey-d4082792-5b32db75"],
|
||
+ "5054e4a45a6340b3": ["gpg-pubkey-5a6340b3-6229229e"],
|
||
+ "219180cddb42a60e": ["gpg-pubkey-db42a60e-37ea5438"]
|
||
+ },
|
||
"obsoleted-keys": {
|
||
"7": [],
|
||
"8": [
|
||
"gpg-pubkey-2fa658e0-45700c69",
|
||
"gpg-pubkey-37017186-45761324",
|
||
- "gpg-pubkey-db42a60e-37ea5438"
|
||
+ "gpg-pubkey-db42a60e-37ea5438",
|
||
+ "gpg-pubkey-897da07a-3c979a7f"
|
||
+ ],
|
||
+ "9": [
|
||
+ "gpg-pubkey-d4082792-5b32db75",
|
||
+ "gpg-pubkey-3abb34f8-5ffd890e",
|
||
+ "gpg-pubkey-6275f250-5e26cb2e",
|
||
+ "gpg-pubkey-73e3b907-6581b071"
|
||
],
|
||
- "9": ["gpg-pubkey-d4082792-5b32db75"],
|
||
"10": ["gpg-pubkey-fd431d51-4ae0493b"]
|
||
}
|
||
}
|
||
diff --git a/repos/system_upgrade/common/files/distro/rocky/gpg-signatures.json b/repos/system_upgrade/common/files/distro/rocky/gpg-signatures.json
|
||
new file mode 100644
|
||
index 00000000..f1738e79
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/files/distro/rocky/gpg-signatures.json
|
||
@@ -0,0 +1,23 @@
|
||
+{
|
||
+ "keys": [
|
||
+ "15af5dac6d745a60",
|
||
+ "702d426d350d275d",
|
||
+ "429785e181b961a5",
|
||
+ "d07bf2a08d50eb66"
|
||
+ ],
|
||
+ "obsoleted-keys": {
|
||
+ "7": [],
|
||
+ "8": [
|
||
+ "gpg-pubkey-2fa658e0-45700c69",
|
||
+ "gpg-pubkey-37017186-45761324",
|
||
+ "gpg-pubkey-db42a60e-37ea5438"
|
||
+ ],
|
||
+ "9": [
|
||
+ "gpg-pubkey-d4082792-5b32db75",
|
||
+ "gpg-pubkey-3abb34f8-5ffd890e",
|
||
+ "gpg-pubkey-6275f250-5e26cb2e",
|
||
+ "gpg-pubkey-73e3b907-6581b071"
|
||
+ ],
|
||
+ "10": []
|
||
+ }
|
||
+}
|
||
diff --git a/repos/system_upgrade/common/files/distro/scientific/gpg-signatures.json b/repos/system_upgrade/common/files/distro/scientific/gpg-signatures.json
|
||
new file mode 100644
|
||
index 00000000..df764b53
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/files/distro/scientific/gpg-signatures.json
|
||
@@ -0,0 +1,22 @@
|
||
+{
|
||
+ "keys": [
|
||
+ "b0b4183f192a7d7d",
|
||
+ "429785e181b961a5",
|
||
+ "d07bf2a08d50eb66"
|
||
+ ],
|
||
+ "obsoleted-keys": {
|
||
+ "7": [],
|
||
+ "8": [
|
||
+ "gpg-pubkey-2fa658e0-45700c69",
|
||
+ "gpg-pubkey-37017186-45761324",
|
||
+ "gpg-pubkey-db42a60e-37ea5438"
|
||
+ ],
|
||
+ "9": [
|
||
+ "gpg-pubkey-d4082792-5b32db75",
|
||
+ "gpg-pubkey-3abb34f8-5ffd890e",
|
||
+ "gpg-pubkey-6275f250-5e26cb2e",
|
||
+ "gpg-pubkey-73e3b907-6581b071"
|
||
+ ],
|
||
+ "10": []
|
||
+ }
|
||
+}
|
||
diff --git a/repos/system_upgrade/common/files/rhel_upgrade.py b/repos/system_upgrade/common/files/rhel_upgrade.py
|
||
index 63910fe0..4e8b380d 100644
|
||
--- a/repos/system_upgrade/common/files/rhel_upgrade.py
|
||
+++ b/repos/system_upgrade/common/files/rhel_upgrade.py
|
||
@@ -188,6 +188,7 @@ class RhelUpgradeCommand(dnf.cli.Command):
|
||
to_install = self.plugin_data['pkgs_info']['to_install']
|
||
to_remove = self.plugin_data['pkgs_info']['to_remove']
|
||
to_upgrade = self.plugin_data['pkgs_info']['to_upgrade']
|
||
+ to_reinstall = self.plugin_data['pkgs_info']['to_reinstall']
|
||
|
||
# Modules to enable
|
||
self._process_entities(entities=[available_modules_to_enable],
|
||
@@ -200,6 +201,9 @@ class RhelUpgradeCommand(dnf.cli.Command):
|
||
self._process_entities(entities=to_install, op=self.base.install, entity_name='Package')
|
||
# Packages to be upgraded
|
||
self._process_entities(entities=to_upgrade, op=self.base.upgrade, entity_name='Package')
|
||
+ # Packages to be reinstalled
|
||
+ self._process_entities(entities=to_reinstall, op=self.base.reinstall, entity_name='Package')
|
||
+
|
||
self.base.distro_sync()
|
||
|
||
if self.opts.tid[0] == 'check':
|
||
diff --git a/repos/system_upgrade/common/libraries/distro.py b/repos/system_upgrade/common/libraries/distro.py
|
||
index 04e553ac..b342d4fc 100644
|
||
--- a/repos/system_upgrade/common/libraries/distro.py
|
||
+++ b/repos/system_upgrade/common/libraries/distro.py
|
||
@@ -7,6 +7,7 @@ from leapp.libraries.common.config import get_target_distro_id
|
||
from leapp.libraries.common.config.architecture import ARCH_ACCEPTED, ARCH_X86_64
|
||
from leapp.libraries.common.config.version import get_target_major_version
|
||
from leapp.libraries.stdlib import api
|
||
+from leapp.models import VendorSignatures
|
||
|
||
|
||
def get_distribution_data(distribution):
|
||
@@ -15,12 +16,19 @@ def get_distribution_data(distribution):
|
||
distribution_config = os.path.join(distributions_path, distribution, 'gpg-signatures.json')
|
||
if os.path.exists(distribution_config):
|
||
with open(distribution_config) as distro_config_file:
|
||
- return json.load(distro_config_file)
|
||
+ distro_config_json = json.load(distro_config_file)
|
||
else:
|
||
raise StopActorExecutionError(
|
||
'Cannot find distribution signature configuration.',
|
||
details={'Problem': 'Distribution {} was not found in {}.'.format(distribution, distributions_path)})
|
||
|
||
+ # Extend with Vendors signatures
|
||
+ for siglist in api.consume(VendorSignatures):
|
||
+ for sig in siglist.sigs:
|
||
+ # Add vendor signature as a new key with empty package list
|
||
+ distro_config_json["keys"][sig] = []
|
||
+
|
||
+ return distro_config_json
|
||
|
||
# distro -> major_version -> repofile -> tuple of architectures where it's present
|
||
_DISTRO_REPOFILES_MAP = {
|
||
@@ -68,6 +76,7 @@ _DISTRO_REPOFILES_MAP = {
|
||
'/etc/yum.repos.d/almalinux.repo': ARCH_ACCEPTED,
|
||
},
|
||
'9': {
|
||
+ '/etc/yum.repos.d/almalinux.repo': ARCH_ACCEPTED,
|
||
'/etc/yum.repos.d/almalinux-appstream.repo': ARCH_ACCEPTED,
|
||
'/etc/yum.repos.d/almalinux-baseos.repo': ARCH_ACCEPTED,
|
||
'/etc/yum.repos.d/almalinux-crb.repo': ARCH_ACCEPTED,
|
||
diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py
|
||
index 1af52dc5..66b89aed 100644
|
||
--- a/repos/system_upgrade/common/libraries/dnfplugin.py
|
||
+++ b/repos/system_upgrade/common/libraries/dnfplugin.py
|
||
@@ -90,6 +90,7 @@ def build_plugin_data(target_repoids, debug, test, tasks, on_aws):
|
||
'to_install': sorted(tasks.to_install),
|
||
'to_remove': sorted(tasks.to_remove),
|
||
'to_upgrade': sorted(tasks.to_upgrade),
|
||
+ 'to_reinstall': sorted(tasks.to_reinstall),
|
||
'modules_to_enable': sorted(['{}:{}'.format(m.name, m.stream) for m in tasks.modules_to_enable]),
|
||
},
|
||
'dnf_conf': {
|
||
diff --git a/repos/system_upgrade/common/libraries/fetch.py b/repos/system_upgrade/common/libraries/fetch.py
|
||
index baf2c4eb..44abe66b 100644
|
||
--- a/repos/system_upgrade/common/libraries/fetch.py
|
||
+++ b/repos/system_upgrade/common/libraries/fetch.py
|
||
@@ -146,7 +146,8 @@ def load_data_asset(actor_requesting_asset,
|
||
asset_filename,
|
||
asset_fulltext_name,
|
||
docs_url,
|
||
- docs_title):
|
||
+ docs_title,
|
||
+ asset_directory="/etc/leapp/files"):
|
||
"""
|
||
Load the content of the data asset with given asset_filename
|
||
and produce :class:`leapp.model.ConsumedDataAsset` message.
|
||
@@ -183,7 +184,7 @@ def load_data_asset(actor_requesting_asset,
|
||
|
||
try:
|
||
# The asset family ID has the form (major, minor), include only `major` in the URL
|
||
- raw_asset_contents = read_or_fetch(asset_filename, data_stream=data_stream_major, allow_download=False)
|
||
+ raw_asset_contents = read_or_fetch(asset_filename, directory=asset_directory, data_stream=data_stream_major, allow_download=False)
|
||
asset_contents = json.loads(raw_asset_contents)
|
||
except ValueError:
|
||
msg = 'The {0} file (at {1}) does not contain a valid JSON object.'.format(asset_fulltext_name, asset_filename)
|
||
diff --git a/repos/system_upgrade/common/libraries/gpg.py b/repos/system_upgrade/common/libraries/gpg.py
|
||
index 9990cdcf..0c83a889 100644
|
||
--- a/repos/system_upgrade/common/libraries/gpg.py
|
||
+++ b/repos/system_upgrade/common/libraries/gpg.py
|
||
@@ -124,12 +124,15 @@ def get_path_to_gpg_certs():
|
||
if target_product_type == 'beta':
|
||
certs_dir = '{}beta'.format(target_major_version)
|
||
distro = config.get_target_distro_id()
|
||
- return os.path.join(
|
||
- api.get_common_folder_path('distro'),
|
||
- distro,
|
||
- GPG_CERTS_FOLDER,
|
||
- certs_dir
|
||
- )
|
||
+ return [
|
||
+ "/etc/leapp/files/vendors.d/rpm-gpg/",
|
||
+ os.path.join(
|
||
+ api.get_common_folder_path('distro'),
|
||
+ distro,
|
||
+ GPG_CERTS_FOLDER,
|
||
+ certs_dir
|
||
+ )
|
||
+ ]
|
||
|
||
|
||
def is_nogpgcheck_set():
|
||
diff --git a/repos/system_upgrade/common/libraries/repomaputils.py b/repos/system_upgrade/common/libraries/repomaputils.py
|
||
new file mode 100644
|
||
index 00000000..40a6f001
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/libraries/repomaputils.py
|
||
@@ -0,0 +1,141 @@
|
||
+from collections import defaultdict
|
||
+from leapp.models import PESIDRepositoryEntry, RepoMapEntry, RepositoriesMapping
|
||
+
|
||
+class RepoMapData:
|
||
+ VERSION_FORMAT = '1.3.0'
|
||
+
|
||
+ def __init__(self):
|
||
+ self.repositories = []
|
||
+ self.mapping = {}
|
||
+
|
||
+ def add_repository(self, data, pesid):
|
||
+ """
|
||
+ Add new PESIDRepositoryEntry with given pesid from the provided dictionary.
|
||
+
|
||
+ :param data: A dict containing the data of the added repository. The dictionary structure corresponds
|
||
+ to the repositories entries in the repository mapping JSON schema.
|
||
+ :type data: Dict[str, str]
|
||
+ :param pesid: PES id of the repository family that the newly added repository belongs to.
|
||
+ :type pesid: str
|
||
+ """
|
||
+ self.repositories.append(PESIDRepositoryEntry(
|
||
+ repoid=data['repoid'],
|
||
+ channel=data['channel'],
|
||
+ rhui=data.get('rhui', ''),
|
||
+ repo_type=data['repo_type'],
|
||
+ arch=data['arch'],
|
||
+ major_version=data['major_version'],
|
||
+ pesid=pesid,
|
||
+ distro=data['distro'],
|
||
+ ))
|
||
+
|
||
+ def get_repositories(self, valid_major_versions):
|
||
+ """
|
||
+ Return the list of PESIDRepositoryEntry object matching the specified major versions.
|
||
+ """
|
||
+ return [repo for repo in self.repositories if repo.major_version in valid_major_versions]
|
||
+
|
||
+ def get_version_repoids(self, major_version):
|
||
+ """
|
||
+ Return the list of repository ID strings for repositories matching the specified major version.
|
||
+ """
|
||
+ return [repo.repoid for repo in self.repositories if repo.major_version == major_version]
|
||
+
|
||
+ def add_mapping(self, source_major_version, target_major_version, source_pesid, target_pesid):
|
||
+ """
|
||
+ Add a new mapping entry that is mapping the source pesid to the destination pesid(s),
|
||
+ relevant in an IPU from the supplied source major version to the supplied target
|
||
+ major version.
|
||
+
|
||
+ :param str source_major_version: Specifies the major version of the source system
|
||
+ for which the added mapping applies.
|
||
+ :param str target_major_version: Specifies the major version of the target system
|
||
+ for which the added mapping applies.
|
||
+ :param str source_pesid: PESID of the source repository.
|
||
+ :param Union[str|List[str]] target_pesid: A single target PESID or a list of target
|
||
+ PESIDs of the added mapping.
|
||
+ """
|
||
+ # NOTE: it could be more simple, but I prefer to be sure the input data
|
||
+ # contains just one map per source PESID.
|
||
+ key = '{}:{}'.format(source_major_version, target_major_version)
|
||
+ rmap = self.mapping.get(key, defaultdict(set))
|
||
+ self.mapping[key] = rmap
|
||
+ if isinstance(target_pesid, list):
|
||
+ rmap[source_pesid].update(target_pesid)
|
||
+ else:
|
||
+ rmap[source_pesid].add(target_pesid)
|
||
+
|
||
+ def get_mappings(self, src_major_version, dst_major_version):
|
||
+ """
|
||
+ Return the list of RepoMapEntry objects for the specified upgrade path.
|
||
+
|
||
+ IOW, the whole mapping for specified IPU.
|
||
+ """
|
||
+ key = '{}:{}'.format(src_major_version, dst_major_version)
|
||
+ rmap = self.mapping.get(key, None)
|
||
+ if not rmap:
|
||
+ return None
|
||
+ map_list = []
|
||
+ for src_pesid in sorted(rmap.keys()):
|
||
+ map_list.append(RepoMapEntry(source=src_pesid, target=sorted(rmap[src_pesid])))
|
||
+ return map_list
|
||
+
|
||
+ @staticmethod
|
||
+ def load_from_dict(data):
|
||
+ if data['version_format'] != RepoMapData.VERSION_FORMAT:
|
||
+ raise ValueError(
|
||
+ 'The obtained repomap data has unsupported version of format.'
|
||
+ ' Get {} required {}'
|
||
+ .format(data['version_format'], RepoMapData.VERSION_FORMAT)
|
||
+ )
|
||
+
|
||
+ repomap = RepoMapData()
|
||
+
|
||
+ # Load reposiories
|
||
+ existing_pesids = set()
|
||
+ for repo_family in data['repositories']:
|
||
+ existing_pesids.add(repo_family['pesid'])
|
||
+ for repo in repo_family['entries']:
|
||
+ repomap.add_repository(repo, repo_family['pesid'])
|
||
+
|
||
+ # Load mappings
|
||
+ for mapping in data['mapping']:
|
||
+ for entry in mapping['entries']:
|
||
+ if not isinstance(entry['target'], list):
|
||
+ raise ValueError(
|
||
+ 'The target field of a mapping entry is not a list: {}'
|
||
+ .format(entry)
|
||
+ )
|
||
+
|
||
+ for pesid in [entry['source']] + entry['target']:
|
||
+ if pesid not in existing_pesids:
|
||
+ raise ValueError(
|
||
+ 'The {} pesid is not related to any repository.'
|
||
+ .format(pesid)
|
||
+ )
|
||
+ repomap.add_mapping(
|
||
+ source_major_version=mapping['source_major_version'],
|
||
+ target_major_version=mapping['target_major_version'],
|
||
+ source_pesid=entry['source'],
|
||
+ target_pesid=entry['target'],
|
||
+ )
|
||
+ return repomap
|
||
+
|
||
+def combine_repomap_messages(mapping_list):
|
||
+ """
|
||
+ Combine multiple RepositoryMapping messages into one.
|
||
+ Needed because we might get more than one message if there are vendors present.
|
||
+ """
|
||
+ combined_mapping = []
|
||
+ combined_repositories = []
|
||
+ # Depending on whether there are any vendors present, we might get more than one message.
|
||
+ for msg in mapping_list:
|
||
+ combined_mapping.extend(msg.mapping)
|
||
+ combined_repositories.extend(msg.repositories)
|
||
+
|
||
+ combined_repomapping = RepositoriesMapping(
|
||
+ mapping=combined_mapping,
|
||
+ repositories=combined_repositories
|
||
+ )
|
||
+
|
||
+ return combined_repomapping
|
||
diff --git a/repos/system_upgrade/common/models/activevendorlist.py b/repos/system_upgrade/common/models/activevendorlist.py
|
||
new file mode 100644
|
||
index 00000000..de4056fb
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/models/activevendorlist.py
|
||
@@ -0,0 +1,7 @@
|
||
+from leapp.models import Model, fields
|
||
+from leapp.topics import VendorTopic
|
||
+
|
||
+
|
||
+class ActiveVendorList(Model):
|
||
+ topic = VendorTopic
|
||
+ data = fields.List(fields.String())
|
||
diff --git a/repos/system_upgrade/common/models/lvmconfig.py b/repos/system_upgrade/common/models/lvmconfig.py
|
||
new file mode 100644
|
||
index 00000000..ab5e7815
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/models/lvmconfig.py
|
||
@@ -0,0 +1,26 @@
|
||
+from leapp.models import fields, Model
|
||
+from leapp.topics import SystemInfoTopic
|
||
+
|
||
+
|
||
+class LVMConfigDevicesSection(Model):
|
||
+ """The devices section from the LVM configuration."""
|
||
+ topic = SystemInfoTopic
|
||
+
|
||
+ use_devicesfile = fields.Boolean()
|
||
+ """
|
||
+ Determines whether only the devices in the devices file are used by LVM. Note
|
||
+ that the default value changed on the RHEL 9 to True.
|
||
+ """
|
||
+
|
||
+ devicesfile = fields.String(default="system.devices")
|
||
+ """
|
||
+ Defines the name of the devices file that should be used. The default devices
|
||
+ file is located in '/etc/lvm/devices/system.devices'.
|
||
+ """
|
||
+
|
||
+
|
||
+class LVMConfig(Model):
|
||
+ """LVM configuration split into sections."""
|
||
+ topic = SystemInfoTopic
|
||
+
|
||
+ devices = fields.Model(LVMConfigDevicesSection)
|
||
diff --git a/repos/system_upgrade/common/models/multipath.py b/repos/system_upgrade/common/models/multipath.py
|
||
new file mode 100644
|
||
index 00000000..1d1c53b5
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/models/multipath.py
|
||
@@ -0,0 +1,78 @@
|
||
+from leapp.models import fields, Model
|
||
+from leapp.topics import SystemInfoTopic
|
||
+
|
||
+
|
||
+class MultipathInfo(Model):
|
||
+ """ Available information about multpath devices of the source system. """
|
||
+ topic = SystemInfoTopic
|
||
+
|
||
+ is_configured = fields.Boolean(default=False)
|
||
+ """
|
||
+ True if multipath is configured on the system.
|
||
+
|
||
+ Detected based on checking whether /etc/multipath.conf exists.
|
||
+ """
|
||
+
|
||
+ config_dir = fields.Nullable(fields.String())
|
||
+ """ Value of config_dir in the defaults section. None if not set. """
|
||
+
|
||
+
|
||
+class UpdatedMultipathConfig(Model):
|
||
+ """ Information about multipath config that needed to be modified for the target system. """
|
||
+ topic = SystemInfoTopic
|
||
+
|
||
+ updated_config_location = fields.String()
|
||
+ """ Location of the updated config that should be propagated to the source system. """
|
||
+
|
||
+ target_path = fields.String()
|
||
+ """ Location where should be the updated config placed. """
|
||
+
|
||
+
|
||
+class MultipathConfigUpdatesInfo(Model):
|
||
+ """ Aggregate information about multipath configs that were updated. """
|
||
+ topic = SystemInfoTopic
|
||
+
|
||
+ updates = fields.List(fields.Model(UpdatedMultipathConfig), default=[])
|
||
+ """ Collection of multipath config updates that must be performed during the upgrade. """
|
||
+
|
||
+
|
||
+class MultipathConfig8to9(Model):
|
||
+ """
|
||
+ Model information about multipath configuration file important for the 8>9 upgrade path.
|
||
+
|
||
+ Note: This model is in the common repository due to the technical reasons
|
||
+ (reusing parser code in a single actor), and it should not be emitted on
|
||
+ non-8to9 upgrade paths. In the future, this model will likely be moved into
|
||
+ el8toel9 repository.
|
||
+ """
|
||
+ topic = SystemInfoTopic
|
||
+
|
||
+ pathname = fields.String()
|
||
+ """Config file path name"""
|
||
+
|
||
+ config_dir = fields.Nullable(fields.String())
|
||
+ """Value of config_dir in the defaults section. None if not set"""
|
||
+
|
||
+ enable_foreign_exists = fields.Boolean(default=False)
|
||
+ """True if enable_foreign is set in the defaults section"""
|
||
+
|
||
+ invalid_regexes_exist = fields.Boolean(default=False)
|
||
+ """True if any regular expressions have the value of "*" """
|
||
+
|
||
+ allow_usb_exists = fields.Boolean(default=False)
|
||
+ """True if allow_usb_devices is set in the defaults section."""
|
||
+
|
||
+
|
||
+class MultipathConfFacts8to9(Model):
|
||
+ """
|
||
+ Model representing information from multipath configuration files important for the 8>9 upgrade path.
|
||
+
|
||
+ Note: This model is in the common repository due to the technical reasons
|
||
+ (reusing parser code in a single actor), and it should not be emitted on
|
||
+ non-8to9 upgrade paths. In the future, this model will likely be moved into
|
||
+ el8toel9 repository.
|
||
+ """
|
||
+ topic = SystemInfoTopic
|
||
+
|
||
+ configs = fields.List(fields.Model(MultipathConfig8to9), default=[])
|
||
+ """List of multipath configuration files"""
|
||
diff --git a/repos/system_upgrade/common/models/repositoriesmap.py b/repos/system_upgrade/common/models/repositoriesmap.py
|
||
index 842cd807..fc740606 100644
|
||
--- a/repos/system_upgrade/common/models/repositoriesmap.py
|
||
+++ b/repos/system_upgrade/common/models/repositoriesmap.py
|
||
@@ -96,3 +96,4 @@ class RepositoriesMapping(Model):
|
||
|
||
mapping = fields.List(fields.Model(RepoMapEntry), default=[])
|
||
repositories = fields.List(fields.Model(PESIDRepositoryEntry), default=[])
|
||
+ vendor = fields.Nullable(fields.String())
|
||
diff --git a/repos/system_upgrade/common/models/rpmtransactiontasks.py b/repos/system_upgrade/common/models/rpmtransactiontasks.py
|
||
index 7e2870d0..05d4e941 100644
|
||
--- a/repos/system_upgrade/common/models/rpmtransactiontasks.py
|
||
+++ b/repos/system_upgrade/common/models/rpmtransactiontasks.py
|
||
@@ -10,6 +10,7 @@ class RpmTransactionTasks(Model):
|
||
to_keep = fields.List(fields.String(), default=[])
|
||
to_remove = fields.List(fields.String(), default=[])
|
||
to_upgrade = fields.List(fields.String(), default=[])
|
||
+ to_reinstall = fields.List(fields.String(), default=[])
|
||
modules_to_enable = fields.List(fields.Model(Module), default=[])
|
||
modules_to_reset = fields.List(fields.Model(Module), default=[])
|
||
|
||
diff --git a/repos/system_upgrade/common/models/targetrepositories.py b/repos/system_upgrade/common/models/targetrepositories.py
|
||
index e1a0b646..e1d44d80 100644
|
||
--- a/repos/system_upgrade/common/models/targetrepositories.py
|
||
+++ b/repos/system_upgrade/common/models/targetrepositories.py
|
||
@@ -30,6 +30,12 @@ class CustomTargetRepository(TargetRepositoryBase):
|
||
enabled = fields.Boolean(default=True)
|
||
|
||
|
||
+class VendorCustomTargetRepositoryList(Model):
|
||
+ topic = TransactionTopic
|
||
+ vendor = fields.String()
|
||
+ repos = fields.List(fields.Model(CustomTargetRepository))
|
||
+
|
||
+
|
||
class TargetRepositories(Model):
|
||
"""
|
||
Repositories supposed to be used during the IPU process
|
||
diff --git a/repos/system_upgrade/common/models/vendorsignatures.py b/repos/system_upgrade/common/models/vendorsignatures.py
|
||
new file mode 100644
|
||
index 00000000..f456aec5
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/models/vendorsignatures.py
|
||
@@ -0,0 +1,8 @@
|
||
+from leapp.models import Model, fields
|
||
+from leapp.topics import VendorTopic
|
||
+
|
||
+
|
||
+class VendorSignatures(Model):
|
||
+ topic = VendorTopic
|
||
+ vendor = fields.String()
|
||
+ sigs = fields.List(fields.String())
|
||
diff --git a/repos/system_upgrade/common/models/vendorsourcerepos.py b/repos/system_upgrade/common/models/vendorsourcerepos.py
|
||
new file mode 100644
|
||
index 00000000..b7a219b4
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/models/vendorsourcerepos.py
|
||
@@ -0,0 +1,12 @@
|
||
+from leapp.models import Model, fields
|
||
+from leapp.topics import VendorTopic
|
||
+
|
||
+
|
||
+class VendorSourceRepos(Model):
|
||
+ """
|
||
+ This model contains the data on all source repositories associated with a specific vendor.
|
||
+ Its data is used to determine whether the vendor should be included into the upgrade process.
|
||
+ """
|
||
+ topic = VendorTopic
|
||
+ vendor = fields.String()
|
||
+ source_repoids = fields.List(fields.String())
|
||
diff --git a/repos/system_upgrade/common/topics/vendortopic.py b/repos/system_upgrade/common/topics/vendortopic.py
|
||
new file mode 100644
|
||
index 00000000..014b7afb
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/common/topics/vendortopic.py
|
||
@@ -0,0 +1,5 @@
|
||
+from leapp.topics import Topic
|
||
+
|
||
+
|
||
+class VendorTopic(Topic):
|
||
+ name = 'vendor_topic'
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py b/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py
|
||
index c076fe6b..2455a2f6 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py
|
||
+++ b/repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py
|
||
@@ -14,6 +14,22 @@ from leapp.libraries.common.grub import (
|
||
from leapp.libraries.stdlib import api, CalledProcessError, run
|
||
from leapp.models import ArmWorkaroundEFIBootloaderInfo, EFIBootEntry, TargetUserSpaceInfo
|
||
|
||
+dirname = {
|
||
+ 'AlmaLinux': 'almalinux',
|
||
+ 'CentOS Linux': 'centos',
|
||
+ 'CentOS Stream': 'centos',
|
||
+ 'Oracle Linux Server': 'redhat',
|
||
+ 'Red Hat Enterprise Linux': 'redhat',
|
||
+ 'Rocky Linux': 'rocky',
|
||
+ 'Scientific Linux': 'redhat',
|
||
+}
|
||
+
|
||
+with open('/etc/system-release', 'r') as sr:
|
||
+ release_line = next(line for line in sr if 'release' in line)
|
||
+ distro = release_line.split(' release ', 1)[0]
|
||
+
|
||
+distro_dir = dirname.get(distro, 'default')
|
||
+
|
||
UPGRADE_EFI_ENTRY_LABEL = 'Leapp Upgrade'
|
||
|
||
ARM_SHIM_PACKAGE_NAME = 'shim-aa64'
|
||
@@ -21,7 +37,7 @@ ARM_GRUB_PACKAGE_NAME = 'grub2-efi-aa64'
|
||
|
||
EFI_MOUNTPOINT = '/boot/efi/'
|
||
LEAPP_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/leapp/')
|
||
-RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/redhat/')
|
||
+RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/', distro_dir)
|
||
UPGRADE_BLS_DIR = '/boot/upgrade-loader'
|
||
|
||
CONTAINER_DOWNLOAD_DIR = '/tmp_pkg_download_dir'
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/actor.py b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/actor.py
|
||
similarity index 57%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/actor.py
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/actor.py
|
||
index 6c3ef41b..ce6a1ebc 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/actor.py
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/actor.py
|
||
@@ -1,27 +1,26 @@
|
||
from leapp.actors import Actor
|
||
from leapp.libraries.actor import multipathconfupdate
|
||
-from leapp.models import MultipathConfFacts8to9
|
||
-from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag
|
||
+from leapp.models import MultipathConfFacts8to9, MultipathConfigUpdatesInfo
|
||
+from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag
|
||
|
||
|
||
-class MultipathConfUpdate8to9(Actor):
|
||
+class MultipathUpgradeConfUpdate8to9(Actor):
|
||
"""
|
||
- Modifies multipath configuration files on the target RHEL-9 system so that
|
||
- they will run properly. This is done in three ways
|
||
+ Modifies multipath configuration files on the target RHEL-9 upgrade userspace so that
|
||
+ we can mount multipath devices during the upgrade. This is done in three ways
|
||
1. Adding the allow_usb_devices and enable_foreign options to
|
||
/etc/multipath.conf if they are not present, to retain RHEL-8 behavior
|
||
2. Converting any "*" regular expression strings to ".*"
|
||
"""
|
||
|
||
- name = 'multipath_conf_update_8to9'
|
||
+ name = 'multipath_upgrade_conf_update_8to9'
|
||
consumes = (MultipathConfFacts8to9,)
|
||
- produces = ()
|
||
- tags = (ApplicationsPhaseTag, IPUWorkflowTag)
|
||
+ produces = (MultipathConfigUpdatesInfo,)
|
||
+ tags = (TargetTransactionChecksPhaseTag, IPUWorkflowTag)
|
||
|
||
def process(self):
|
||
facts = next(self.consume(MultipathConfFacts8to9), None)
|
||
if facts is None:
|
||
- self.log.debug('Skipping execution. No MultipathConfFacts8to9 has '
|
||
- 'been produced')
|
||
+ self.log.debug('Skipping execution. No MultipathConfFacts8to9 has been produced')
|
||
return
|
||
multipathconfupdate.update_configs(facts)
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/libraries/multipathconfupdate.py b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/libraries/multipathconfupdate.py
|
||
similarity index 67%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/libraries/multipathconfupdate.py
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/libraries/multipathconfupdate.py
|
||
index 9e49d78f..2dfde7b1 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/libraries/multipathconfupdate.py
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/libraries/multipathconfupdate.py
|
||
@@ -1,4 +1,11 @@
|
||
+import os
|
||
+import shutil
|
||
+
|
||
from leapp.libraries.common import multipathutil
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import MultipathConfigUpdatesInfo, UpdatedMultipathConfig
|
||
+
|
||
+MODIFICATIONS_STORE_PATH = '/var/lib/leapp/proposed_modifications'
|
||
|
||
_regexes = ('vendor', 'product', 'revision', 'product_blacklist', 'devnode',
|
||
'wwid', 'property', 'protocol')
|
||
@@ -71,10 +78,37 @@ def _update_config(need_foreign, need_allow_usb, config):
|
||
return contents
|
||
|
||
|
||
+def prepare_destination_for_file(file_path):
|
||
+ dirname = os.path.dirname(file_path)
|
||
+ os.makedirs(dirname, exist_ok=True)
|
||
+
|
||
+
|
||
+def prepare_place_for_config_modifications(workspace_path=MODIFICATIONS_STORE_PATH):
|
||
+ if os.path.exists(workspace_path):
|
||
+ shutil.rmtree(workspace_path)
|
||
+ os.mkdir(workspace_path)
|
||
+
|
||
+
|
||
def update_configs(facts):
|
||
need_foreign = not any(x for x in facts.configs if x.enable_foreign_exists)
|
||
need_allow_usb = not any(x for x in facts.configs if x.allow_usb_exists)
|
||
+
|
||
+ config_updates = []
|
||
+ prepare_place_for_config_modifications()
|
||
+
|
||
for config in facts.configs:
|
||
+ original_config_location = config.pathname
|
||
+
|
||
+ rootless_path = config.pathname.lstrip('/')
|
||
+ path_to_config_copy = os.path.join(MODIFICATIONS_STORE_PATH, rootless_path)
|
||
+ api.current_logger().debug(
|
||
+ 'Instead of modyfing {}, preparing modified config at {}'.format(
|
||
+ config.pathname,
|
||
+ path_to_config_copy
|
||
+ )
|
||
+ )
|
||
+ updated_config_location = path_to_config_copy
|
||
+
|
||
contents = _update_config(need_foreign, need_allow_usb, config)
|
||
need_foreign = False
|
||
need_allow_usb = False
|
||
@@ -83,4 +117,11 @@ def update_configs(facts):
|
||
config file.
|
||
"""
|
||
if contents:
|
||
- multipathutil.write_config(config.pathname, contents)
|
||
+ prepare_destination_for_file(updated_config_location)
|
||
+ multipathutil.write_config(updated_config_location, contents)
|
||
+
|
||
+ update = UpdatedMultipathConfig(updated_config_location=updated_config_location,
|
||
+ target_path=original_config_location)
|
||
+ config_updates.append(update)
|
||
+
|
||
+ api.produce(MultipathConfigUpdatesInfo(updates=config_updates))
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/all_the_things.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/all_the_things.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/all_the_things.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/all_the_things.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/allow_usb.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/allow_usb.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/allow_usb.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/allow_usb.conf
|
||
index e7a9c23e..0d7ad283 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/allow_usb.conf
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/allow_usb.conf
|
||
@@ -1075,5 +1075,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/complicated.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/complicated.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/complicated.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/complicated.conf
|
||
index cbfaf801..31d3b61d 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/complicated.conf
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/complicated.conf
|
||
@@ -1104,5 +1104,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/conf2.d/all_true.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/conf2.d/all_true.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/conf2.d/all_true.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/conf2.d/all_true.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/default_rhel8.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/default_rhel8.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/default_rhel8.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/default_rhel8.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/empty.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/empty.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/empty.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/empty.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/empty_dir.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/empty_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/empty_dir.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/empty_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/missing_dir.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/missing_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/missing_dir.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/missing_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/no_defaults.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/no_defaults.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/no_defaults.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/no_defaults.conf
|
||
index 02d7c1a2..d50d6a71 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/no_defaults.conf
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/no_defaults.conf
|
||
@@ -1045,7 +1045,7 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
|
||
defaults { # section added by Leapp
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/no_foreign.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/no_foreign.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/no_foreign.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/no_foreign.conf
|
||
index 9abffc40..d3d29c29 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/no_foreign.conf
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/no_foreign.conf
|
||
@@ -1086,5 +1086,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/not_set_dir.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/not_set_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/not_set_dir.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/not_set_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/two_defaults.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/two_defaults.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/after/two_defaults.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/after/two_defaults.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/all_the_things.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/all_the_things.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/all_the_things.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/all_the_things.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/allow_usb.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/allow_usb.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/allow_usb.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/allow_usb.conf
|
||
index 57b6f97b..39681b85 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/allow_usb.conf
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/allow_usb.conf
|
||
@@ -1074,5 +1074,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/complicated.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/complicated.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/complicated.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/complicated.conf
|
||
index 23d93ecf..c889461c 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/complicated.conf
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/complicated.conf
|
||
@@ -1103,5 +1103,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/conf1.d/empty.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/conf1.d/empty.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/conf1.d/empty.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/conf1.d/empty.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/conf1.d/nothing_important.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/conf1.d/nothing_important.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/conf1.d/nothing_important.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/conf1.d/nothing_important.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/conf2.d/all_true.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/conf2.d/all_true.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/conf2.d/all_true.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/conf2.d/all_true.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/conf3.d/README b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/conf3.d/README
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/conf3.d/README
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/conf3.d/README
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/converted_the_things.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/converted_the_things.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/converted_the_things.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/converted_the_things.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/default_rhel8.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/default_rhel8.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/default_rhel8.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/default_rhel8.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/empty.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/empty.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/empty.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/empty.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/empty_dir.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/empty_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/empty_dir.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/empty_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/missing_dir.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/missing_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/missing_dir.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/missing_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/no_defaults.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/no_defaults.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/no_defaults.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/no_defaults.conf
|
||
index f7885ca8..ec8ddee2 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/no_defaults.conf
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/no_defaults.conf
|
||
@@ -1045,5 +1045,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/no_foreign.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/no_foreign.conf
|
||
similarity index 99%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/no_foreign.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/no_foreign.conf
|
||
index 9525731c..87f9a24c 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfread/tests/files/no_foreign.conf
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/no_foreign.conf
|
||
@@ -1085,5 +1085,5 @@ multipaths {
|
||
multipath {
|
||
wwid "33333333000001388"
|
||
alias "foo"
|
||
- }
|
||
+ }
|
||
}
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/not_set_dir.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/not_set_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/not_set_dir.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/not_set_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/set_in_dir.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/set_in_dir.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/set_in_dir.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/set_in_dir.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/two_defaults.conf b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/two_defaults.conf
|
||
similarity index 100%
|
||
rename from repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/files/before/two_defaults.conf
|
||
rename to repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/files/before/two_defaults.conf
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/test_multipath_conf_update_8to9.py b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/test_multipath_conf_update_8to9.py
|
||
new file mode 100644
|
||
index 00000000..4ca73791
|
||
--- /dev/null
|
||
+++ b/repos/system_upgrade/el8toel9/actors/multipath_upgrade_conf_patcher/tests/test_multipath_conf_update_8to9.py
|
||
@@ -0,0 +1,179 @@
|
||
+import os
|
||
+
|
||
+import pytest
|
||
+
|
||
+from leapp.libraries.actor import multipathconfupdate
|
||
+from leapp.libraries.common import multipathutil
|
||
+from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
|
||
+from leapp.libraries.stdlib import api
|
||
+from leapp.models import MultipathConfFacts8to9, MultipathConfig8to9
|
||
+
|
||
+BEFORE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files/before')
|
||
+AFTER_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files/after')
|
||
+
|
||
+
|
||
+def build_config(pathname, config_dir, enable_foreign_exists, invalid_regexes_exist, allow_usb_exists):
|
||
+ return MultipathConfig8to9(
|
||
+ pathname=pathname,
|
||
+ config_dir=config_dir,
|
||
+ enable_foreign_exists=enable_foreign_exists,
|
||
+ invalid_regexes_exist=invalid_regexes_exist,
|
||
+ allow_usb_exists=allow_usb_exists,
|
||
+ )
|
||
+
|
||
+
|
||
+def build_facts(confs):
|
||
+ return MultipathConfFacts8to9(configs=confs)
|
||
+
|
||
+
|
||
+def mock_read_config(path):
|
||
+ """convert to full pathname"""
|
||
+ return multipathutil.read_config_orig(os.path.join(BEFORE_DIR, path))
|
||
+
|
||
+
|
||
+default_rhel8_conf = build_config(
|
||
+ 'default_rhel8.conf', None, True, False, False)
|
||
+
|
||
+all_the_things_conf = build_config(
|
||
+ 'all_the_things.conf', None, False, True, False)
|
||
+
|
||
+converted_the_things_conf = build_config(
|
||
+ 'converted_the_things.conf', None, True, False, True)
|
||
+
|
||
+idempotent_conf = build_config(
|
||
+ 'converted_the_things.conf', None, False, True, False)
|
||
+
|
||
+complicated_conf = build_config(
|
||
+ 'complicated.conf', '/etc/multipath/conf.d', True, True, False)
|
||
+
|
||
+no_foreign_conf = build_config(
|
||
+ 'no_foreign.conf', None, False, True, True)
|
||
+
|
||
+allow_usb_conf = build_config(
|
||
+ 'allow_usb.conf', None, False, False, True)
|
||
+
|
||
+no_defaults_conf = build_config(
|
||
+ 'no_defaults.conf', None, False, True, False)
|
||
+
|
||
+two_defaults_conf = build_config(
|
||
+ 'two_defaults.conf', None, True, False, False)
|
||
+
|
||
+empty_conf = build_config(
|
||
+ 'empty.conf', None, False, False, False)
|
||
+
|
||
+missing_dir_conf = build_config(
|
||
+ 'missing_dir.conf', 'missing', False, True, False)
|
||
+
|
||
+not_set_dir_conf = build_config(
|
||
+ 'not_set_dir.conf', 'conf1.d', False, True, False)
|
||
+
|
||
+empty1_conf = build_config(
|
||
+ 'conf1.d/empty.conf', None, False, False, False)
|
||
+
|
||
+nothing_important_conf = build_config(
|
||
+ 'conf1.d/nothing_important.conf', 'this_gets_ignored', False, False, False)
|
||
+
|
||
+set_in_dir_conf = build_config(
|
||
+ 'set_in_dir.conf', 'conf2.d', False, False, False)
|
||
+
|
||
+all_true_conf = build_config(
|
||
+ 'conf2.d/all_true.conf', None, True, True, True)
|
||
+
|
||
+empty_dir_conf = build_config(
|
||
+ 'empty_dir.conf', 'conf3.d', False, False, False)
|
||
+
|
||
+
|
||
+@pytest.mark.parametrize(
|
||
+ 'config_facts',
|
||
+ [
|
||
+ build_facts([default_rhel8_conf]),
|
||
+ build_facts([all_the_things_conf]),
|
||
+ build_facts([converted_the_things_conf]),
|
||
+ build_facts([complicated_conf]),
|
||
+ build_facts([no_foreign_conf]),
|
||
+ build_facts([allow_usb_conf]),
|
||
+ build_facts([no_defaults_conf]),
|
||
+ build_facts([two_defaults_conf]),
|
||
+ build_facts([empty_conf]),
|
||
+ build_facts([missing_dir_conf]),
|
||
+ build_facts([empty_dir_conf]),
|
||
+ build_facts([not_set_dir_conf, empty1_conf, nothing_important_conf]),
|
||
+ build_facts([set_in_dir_conf, all_true_conf]),
|
||
+ build_facts([idempotent_conf])
|
||
+ ]
|
||
+)
|
||
+def test_all_facts(monkeypatch, config_facts):
|
||
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
|
||
+
|
||
+ produce_mock = produce_mocked()
|
||
+ monkeypatch.setattr(api, 'produce', produce_mock)
|
||
+
|
||
+ config_writes = {}
|
||
+
|
||
+ def write_config_mock(location, contents):
|
||
+ config_writes[location] = contents
|
||
+
|
||
+ monkeypatch.setattr(multipathutil, 'read_config_orig', multipathutil.read_config, raising=False)
|
||
+ monkeypatch.setattr(multipathutil, 'read_config', mock_read_config)
|
||
+ monkeypatch.setattr(multipathutil, 'write_config', write_config_mock)
|
||
+ monkeypatch.setattr(multipathconfupdate, 'prepare_destination_for_file', lambda file_path: None)
|
||
+ monkeypatch.setattr(multipathconfupdate, 'prepare_place_for_config_modifications', lambda: None)
|
||
+
|
||
+ multipathconfupdate.update_configs(config_facts)
|
||
+
|
||
+ config_updates = {}
|
||
+ for config_updates_msg in produce_mock.model_instances:
|
||
+ for update in config_updates_msg.updates:
|
||
+ config_updates[update.target_path] = update.updated_config_location
|
||
+
|
||
+ for config in config_facts.configs:
|
||
+ expected_conf_location = os.path.join(AFTER_DIR, config.pathname)
|
||
+
|
||
+ if config.pathname not in config_updates:
|
||
+ assert not os.path.exists(expected_conf_location)
|
||
+ continue
|
||
+
|
||
+ updated_config_location = config_updates[config.pathname]
|
||
+ actual_contents = config_writes[updated_config_location]
|
||
+
|
||
+ updated_config_expected_location = os.path.join(
|
||
+ multipathconfupdate.MODIFICATIONS_STORE_PATH,
|
||
+ config.pathname.lstrip('/')
|
||
+ )
|
||
+
|
||
+ assert updated_config_location == updated_config_expected_location
|
||
+
|
||
+ expected_contents = multipathutil.read_config_orig(expected_conf_location)
|
||
+ assert actual_contents == expected_contents
|
||
+
|
||
+
|
||
+def test_proposed_config_updates_store(monkeypatch):
|
||
+ """ Check whether configs are being stored in the expected path. """
|
||
+ config = MultipathConfig8to9(
|
||
+ pathname='/etc/multipath.conf.d/xy.conf',
|
||
+ config_dir='',
|
||
+ enable_foreign_exists=False,
|
||
+ invalid_regexes_exist=False,
|
||
+ allow_usb_exists=False,
|
||
+ )
|
||
+
|
||
+ produce_mock = produce_mocked()
|
||
+ monkeypatch.setattr(api, 'produce', produce_mock)
|
||
+
|
||
+ config_writes = {}
|
||
+
|
||
+ def write_config_mock(location, contents):
|
||
+ config_writes[location] = contents
|
||
+
|
||
+ monkeypatch.setattr(multipathutil, 'write_config', write_config_mock)
|
||
+ monkeypatch.setattr(multipathconfupdate, '_update_config', lambda *args: 'new config content')
|
||
+ monkeypatch.setattr(multipathconfupdate, 'prepare_destination_for_file', lambda file_path: None)
|
||
+ monkeypatch.setattr(multipathconfupdate, 'prepare_place_for_config_modifications', lambda: None)
|
||
+
|
||
+ multipathconfupdate.update_configs(MultipathConfFacts8to9(configs=[config]))
|
||
+
|
||
+ expected_updated_config_path = os.path.join(
|
||
+ multipathconfupdate.MODIFICATIONS_STORE_PATH,
|
||
+ 'etc/multipath.conf.d/xy.conf'
|
||
+ )
|
||
+ assert expected_updated_config_path in config_writes
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/actor.py b/repos/system_upgrade/el8toel9/actors/multipathconfread/actor.py
|
||
deleted file mode 100644
|
||
index 2b41ae8b..00000000
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfread/actor.py
|
||
+++ /dev/null
|
||
@@ -1,33 +0,0 @@
|
||
-from leapp.actors import Actor
|
||
-from leapp.libraries.actor import multipathconfread
|
||
-from leapp.models import DistributionSignedRPM, MultipathConfFacts8to9, TargetUserSpaceUpgradeTasks
|
||
-from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||
-
|
||
-
|
||
-class MultipathConfRead8to9(Actor):
|
||
- """
|
||
- Read multipath configuration files and extract the necessary information
|
||
-
|
||
- Related files:
|
||
- - /etc/multipath.conf
|
||
- - /etc/multipath/ - any files inside the directory
|
||
- - /etc/xdrdevices.conf
|
||
-
|
||
- As well, create task (msg) to copy all needed multipath files into
|
||
- the target container as the files are needed to create proper initramfs.
|
||
- This covers the files mentioned above.
|
||
- """
|
||
-
|
||
- name = 'multipath_conf_read_8to9'
|
||
- consumes = (DistributionSignedRPM,)
|
||
- produces = (MultipathConfFacts8to9, TargetUserSpaceUpgradeTasks)
|
||
- tags = (FactsPhaseTag, IPUWorkflowTag)
|
||
-
|
||
- def process(self):
|
||
- if multipathconfread.is_processable():
|
||
- res = multipathconfread.get_multipath_conf_facts()
|
||
- if res:
|
||
- self.produce(res)
|
||
- # Create task to copy multipath config files Iff facts
|
||
- # are generated
|
||
- multipathconfread.produce_copy_to_target_task()
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/test_multipath_conf_update_8to9.py b/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/test_multipath_conf_update_8to9.py
|
||
deleted file mode 100644
|
||
index c18d6b85..00000000
|
||
--- a/repos/system_upgrade/el8toel9/actors/multipathconfupdate/tests/test_multipath_conf_update_8to9.py
|
||
+++ /dev/null
|
||
@@ -1,119 +0,0 @@
|
||
-import os
|
||
-
|
||
-from leapp.libraries.actor import multipathconfupdate
|
||
-from leapp.libraries.common import multipathutil
|
||
-from leapp.models import MultipathConfFacts8to9, MultipathConfig8to9
|
||
-
|
||
-BEFORE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files/before')
|
||
-AFTER_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files/after')
|
||
-
|
||
-converted_data = {}
|
||
-
|
||
-
|
||
-def build_config(pathname, config_dir, enable_foreign_exists, invalid_regexes_exist, allow_usb_exists):
|
||
- return MultipathConfig8to9(
|
||
- pathname=pathname,
|
||
- config_dir=config_dir,
|
||
- enable_foreign_exists=enable_foreign_exists,
|
||
- invalid_regexes_exist=invalid_regexes_exist,
|
||
- allow_usb_exists=allow_usb_exists,
|
||
- )
|
||
-
|
||
-
|
||
-def build_facts(confs):
|
||
- return MultipathConfFacts8to9(configs=confs)
|
||
-
|
||
-
|
||
-def mock_read_config(path):
|
||
- """convert to full pathname"""
|
||
- return multipathutil.read_config_orig(os.path.join(BEFORE_DIR, path))
|
||
-
|
||
-
|
||
-def mock_write_config(path, contents):
|
||
- converted_data[path] = contents
|
||
-
|
||
-
|
||
-default_rhel8_conf = build_config(
|
||
- 'default_rhel8.conf', None, True, False, False)
|
||
-
|
||
-all_the_things_conf = build_config(
|
||
- 'all_the_things.conf', None, False, True, False)
|
||
-
|
||
-converted_the_things_conf = build_config(
|
||
- 'converted_the_things.conf', None, True, False, True)
|
||
-
|
||
-idempotent_conf = build_config(
|
||
- 'converted_the_things.conf', None, False, True, False)
|
||
-
|
||
-complicated_conf = build_config(
|
||
- 'complicated.conf', '/etc/multipath/conf.d', True, True, False)
|
||
-
|
||
-no_foreign_conf = build_config(
|
||
- 'no_foreign.conf', None, False, True, True)
|
||
-
|
||
-allow_usb_conf = build_config(
|
||
- 'allow_usb.conf', None, False, False, True)
|
||
-
|
||
-no_defaults_conf = build_config(
|
||
- 'no_defaults.conf', None, False, True, False)
|
||
-
|
||
-two_defaults_conf = build_config(
|
||
- 'two_defaults.conf', None, True, False, False)
|
||
-
|
||
-empty_conf = build_config(
|
||
- 'empty.conf', None, False, False, False)
|
||
-
|
||
-missing_dir_conf = build_config(
|
||
- 'missing_dir.conf', 'missing', False, True, False)
|
||
-
|
||
-not_set_dir_conf = build_config(
|
||
- 'not_set_dir.conf', 'conf1.d', False, True, False)
|
||
-
|
||
-empty1_conf = build_config(
|
||
- 'conf1.d/empty.conf', None, False, False, False)
|
||
-
|
||
-nothing_important_conf = build_config(
|
||
- 'conf1.d/nothing_important.conf', 'this_gets_ignored', False, False, False)
|
||
-
|
||
-set_in_dir_conf = build_config(
|
||
- 'set_in_dir.conf', 'conf2.d', False, False, False)
|
||
-
|
||
-all_true_conf = build_config(
|
||
- 'conf2.d/all_true.conf', None, True, True, True)
|
||
-
|
||
-empty_dir_conf = build_config(
|
||
- 'empty_dir.conf', 'conf3.d', False, False, False)
|
||
-
|
||
-facts_list = [build_facts([default_rhel8_conf]),
|
||
- build_facts([all_the_things_conf]),
|
||
- build_facts([converted_the_things_conf]),
|
||
- build_facts([complicated_conf]),
|
||
- build_facts([no_foreign_conf]),
|
||
- build_facts([allow_usb_conf]),
|
||
- build_facts([no_defaults_conf]),
|
||
- build_facts([two_defaults_conf]),
|
||
- build_facts([empty_conf]),
|
||
- build_facts([missing_dir_conf]),
|
||
- build_facts([empty_dir_conf]),
|
||
- build_facts([not_set_dir_conf, empty1_conf, nothing_important_conf]),
|
||
- build_facts([set_in_dir_conf, all_true_conf]),
|
||
- build_facts([idempotent_conf])]
|
||
-
|
||
-
|
||
-def _test_facts(facts):
|
||
- multipathconfupdate.update_configs(facts)
|
||
- for config in facts.configs:
|
||
- expected_data = multipathutil.read_config_orig(os.path.join(AFTER_DIR, config.pathname))
|
||
- if config.pathname in converted_data:
|
||
- assert converted_data[config.pathname] == expected_data
|
||
- else:
|
||
- assert expected_data is None
|
||
-
|
||
-
|
||
-def test_all_facts(monkeypatch):
|
||
- monkeypatch.setattr(multipathutil, 'read_config_orig', multipathutil.read_config, raising=False)
|
||
- monkeypatch.setattr(multipathutil, 'read_config', mock_read_config)
|
||
- monkeypatch.setattr(multipathutil, 'write_config', mock_write_config)
|
||
- for facts in facts_list:
|
||
- _test_facts(facts)
|
||
- converted_data.clear()
|
||
diff --git a/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py b/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py
|
||
index daa7b2ca..dd604d8b 100644
|
||
--- a/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py
|
||
+++ b/repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py
|
||
@@ -5,9 +5,25 @@ from leapp.exceptions import StopActorExecutionError
|
||
from leapp.libraries.stdlib import api, CalledProcessError, run
|
||
from leapp.models import ArmWorkaroundEFIBootloaderInfo
|
||
|
||
+dirname = {
|
||
+ 'AlmaLinux': 'almalinux',
|
||
+ 'CentOS Linux': 'centos',
|
||
+ 'CentOS Stream': 'centos',
|
||
+ 'Oracle Linux Server': 'redhat',
|
||
+ 'Red Hat Enterprise Linux': 'redhat',
|
||
+ 'Rocky Linux': 'rocky',
|
||
+ 'Scientific Linux': 'redhat',
|
||
+}
|
||
+
|
||
+with open('/etc/system-release', 'r') as sr:
|
||
+ release_line = next(line for line in sr if 'release' in line)
|
||
+ distro = release_line.split(' release ', 1)[0]
|
||
+
|
||
+distro_dir = dirname.get(distro, 'default')
|
||
+
|
||
EFI_MOUNTPOINT = '/boot/efi/'
|
||
LEAPP_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/leapp/')
|
||
-RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/redhat/')
|
||
+RHEL_EFIDIR_CANONICAL_PATH = os.path.join(EFI_MOUNTPOINT, 'EFI/', distro_dir)
|
||
|
||
|
||
def get_workaround_efi_info():
|
||
diff --git a/repos/system_upgrade/el8toel9/models/multipathconffacts.py b/repos/system_upgrade/el8toel9/models/multipathconffacts.py
|
||
deleted file mode 100644
|
||
index 91d3ce35..00000000
|
||
--- a/repos/system_upgrade/el8toel9/models/multipathconffacts.py
|
||
+++ /dev/null
|
||
@@ -1,30 +0,0 @@
|
||
-from leapp.models import fields, Model
|
||
-from leapp.topics import SystemInfoTopic
|
||
-
|
||
-
|
||
-class MultipathConfig8to9(Model):
|
||
- """Model representing information about a multipath configuration file"""
|
||
- topic = SystemInfoTopic
|
||
-
|
||
- pathname = fields.String()
|
||
- """Config file path name"""
|
||
-
|
||
- config_dir = fields.Nullable(fields.String())
|
||
- """Value of config_dir in the defaults section. None if not set"""
|
||
-
|
||
- enable_foreign_exists = fields.Boolean(default=False)
|
||
- """True if enable_foreign is set in the defaults section"""
|
||
-
|
||
- invalid_regexes_exist = fields.Boolean(default=False)
|
||
- """True if any regular expressions have the value of "*" """
|
||
-
|
||
- allow_usb_exists = fields.Boolean(default=False)
|
||
- """True if allow_usb_devices is set in the defaults section."""
|
||
-
|
||
-
|
||
-class MultipathConfFacts8to9(Model):
|
||
- """Model representing information from multipath configuration files"""
|
||
- topic = SystemInfoTopic
|
||
-
|
||
- configs = fields.List(fields.Model(MultipathConfig8to9), default=[])
|
||
- """List of multipath configuration files"""
|
||
diff --git a/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py
|
||
index 6c891f22..0a38ace3 100644
|
||
--- a/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py
|
||
+++ b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py
|
||
@@ -48,7 +48,7 @@ def process():
|
||
[
|
||
"grubby",
|
||
"--update-kernel=ALL",
|
||
- '--remove-args="{}"'.format(",".join(remediation_cmd_args)),
|
||
+ '--remove-args="{}"'.format(" ".join(remediation_cmd_args)),
|
||
],
|
||
],
|
||
),
|