From 75c9028095226b5cd38d72cdbfddb1590d03a9ad Mon Sep 17 00:00:00 2001 From: Petr Stodulka Date: Thu, 16 Nov 2023 20:15:43 +0100 Subject: [PATCH] RHEL 8.10: CTC1 candidate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Enable new upgrade path for RHEL 8.10 -> RHEL 9.4 (including RHEL with SAP HANA) - Introduce generic transition of systemd services states during the IPU - Introduce possibility to upgrade with local repositories - Improve possibilities of upgrade when a proxy is configured in DNF configutation file - Fix handling of symlinks under /etc/pki when managing certificates - Fix the upgrade with custom https repositories - Default to the NO_RHSM mode when subscription-manager is not installed - Detect customized configuration of dynamic linker - Drop the invalid `tuv` target channel for the --channel option - Fix the issue of going out of bounds in the isccfg parser - Fix traceback when saving the rhsm facts results and the /etc/rhsm/facts directory doesn’t exist yet - Load all rpm repository substitutions that dnf knows about, not just "releasever" only - Simplify handling of upgrades on systems using RHUI, reducing the maintenance burden for cloud providers - Detect possible unexpected RPM GPG keys has been installed during RPM transaction - Resolves: RHEL-16729 --- 0001-Further-narrow-down-packit-tests.patch | 57 + 0002-Bring-back-uefi_test.patch | 54 + ...dd-7.9-8.9-and-8.9-9.3-upgrade-paths.patch | 125 ++ ...s-into-default-on-push-and-on-demand.patch | 273 +++ 0005-Add-labels-to-all-tests.patch | 145 ++ 0006-Refactor-using-YAML-anchors.patch | 396 ++++ ...sts-and-switch-to-sanity-for-default.patch | 118 ++ 0008-Minor-label-enhancements.patch | 155 ++ 0009-Update-pr-welcome-message.patch | 31 + 0010-Address-ddiblik-s-review-comments.patch | 256 +++ 0011-Address-mmoran-s-review-comments.patch | 173 ++ ...d-isccfg-library-manual-running-mode.patch | 50 + 0013-Avoid-warnings-on-python2.patch | 26 + ...makefile-add-dev_test_no_lint-target.patch | 172 ++ ...-going-out-of-bounds-in-the-isccfg-p.patch | 82 + ...ke-pylint-and-spellcheck-happy-again.patch | 209 ++ ...e-TUV-from-supported-target-channels.patch | 93 + ...ystemd-service-states-during-upgrade.patch | 531 +++++ ...-obsoleted-enablersyncdservice-actor.patch | 190 ++ ...SM-mode-when-subscription-manager-is.patch | 26 + ...ir-when-trying-to-create-etc-rhsm-fa.patch | 55 + ...switch-to-container-mode-for-new-RHS.patch | 2 +- 0023-load-all-substitutions-from-etc.patch | 61 + ...ngling-symlinks-for-containerized-RH.patch | 2 +- ...hen-figuring-out-major-version-in-in.patch | 68 + ...arget-rhui-clients-in-scratch-contai.patch | 1738 +++++++++++++++++ ...patibility-for-leapp-rhui-aws-azure-.patch | 167 ++ ...checknfs-do-not-check-systemd-mounts.patch | 134 ++ ...om-plan-name-regex-to-filter-by-tags.patch | 327 ++++ ...k-reference-to-oamg-leapp-tests-repo.patch | 29 + ...ty-to-upgrade-with-a-local-repositor.patch | 543 +++++ 0032-Fix-certificate-symlink-handling.patch | 455 +++++ ...certs-and-upgrade-paths-for-8.10-9.4.patch | 701 +++++++ 0034-pylint-ignore-too-many-lines.patch | 29 + 0035-Update-upgrade-paths-Add-8.10-9.4.patch | 66 + ...-target-userspace-and-allow-a-custom.patch | 275 +++ ...-suppress-unwanted-deprecation-repor.patch | 36 + ...r-custom-libraries-registered-by-ld..patch | 616 ++++++ ...-Fix-several-typos-and-Makefile-help.patch | 60 + ...andling-GPG-keys-to-separate-library.patch | 1381 +++++++++++++ ...xpected-keys-were-installed-during-t.patch | 184 ++ leapp-repository.spec | 101 +- 42 files changed, 10187 insertions(+), 5 deletions(-) create mode 100644 0001-Further-narrow-down-packit-tests.patch create mode 100644 0002-Bring-back-uefi_test.patch create mode 100644 0003-Add-7.9-8.9-and-8.9-9.3-upgrade-paths.patch create mode 100644 0004-Split-tier1-tests-into-default-on-push-and-on-demand.patch create mode 100644 0005-Add-labels-to-all-tests.patch create mode 100644 0006-Refactor-using-YAML-anchors.patch create mode 100644 0007-Add-kernel-rt-tests-and-switch-to-sanity-for-default.patch create mode 100644 0008-Minor-label-enhancements.patch create mode 100644 0009-Update-pr-welcome-message.patch create mode 100644 0010-Address-ddiblik-s-review-comments.patch create mode 100644 0011-Address-mmoran-s-review-comments.patch create mode 100644 0012-Add-isccfg-library-manual-running-mode.patch create mode 100644 0013-Avoid-warnings-on-python2.patch create mode 100644 0014-makefile-add-dev_test_no_lint-target.patch create mode 100644 0015-Fix-the-issue-of-going-out-of-bounds-in-the-isccfg-p.patch create mode 100644 0016-make-pylint-and-spellcheck-happy-again.patch create mode 100644 0017-Remove-TUV-from-supported-target-channels.patch create mode 100644 0018-Transition-systemd-service-states-during-upgrade.patch create mode 100644 0019-Remove-obsoleted-enablersyncdservice-actor.patch create mode 100644 0020-default-to-NO_RHSM-mode-when-subscription-manager-is.patch create mode 100644 0021-call-correct-mkdir-when-trying-to-create-etc-rhsm-fa.patch rename 0001-RHSM-Adjust-the-switch-to-container-mode-for-new-RHS.patch => 0022-RHSM-Adjust-the-switch-to-container-mode-for-new-RHS.patch (94%) create mode 100644 0023-load-all-substitutions-from-etc.patch rename 0002-Do-not-create-dangling-symlinks-for-containerized-RH.patch => 0024-Do-not-create-dangling-symlinks-for-containerized-RH.patch (97%) create mode 100644 0025-be-less-strict-when-figuring-out-major-version-in-in.patch create mode 100644 0026-rhui-bootstrap-target-rhui-clients-in-scratch-contai.patch create mode 100644 0027-add-backward-compatibility-for-leapp-rhui-aws-azure-.patch create mode 100644 0028-checknfs-do-not-check-systemd-mounts.patch create mode 100644 0029-Switch-from-plan-name-regex-to-filter-by-tags.patch create mode 100644 0030-Bring-back-reference-to-oamg-leapp-tests-repo.patch create mode 100644 0031-add-the-posibility-to-upgrade-with-a-local-repositor.patch create mode 100644 0032-Fix-certificate-symlink-handling.patch create mode 100644 0033-Add-prod-certs-and-upgrade-paths-for-8.10-9.4.patch create mode 100644 0034-pylint-ignore-too-many-lines.patch create mode 100644 0035-Update-upgrade-paths-Add-8.10-9.4.patch create mode 100644 0036-Copy-dnf.conf-to-target-userspace-and-allow-a-custom.patch create mode 100644 0037-adjustlocalrepos-suppress-unwanted-deprecation-repor.patch create mode 100644 0038-add-detection-for-custom-libraries-registered-by-ld..patch create mode 100644 0039-Fix-several-typos-and-Makefile-help.patch create mode 100644 0040-Move-code-handling-GPG-keys-to-separate-library.patch create mode 100644 0041-Check-no-new-unexpected-keys-were-installed-during-t.patch diff --git a/0001-Further-narrow-down-packit-tests.patch b/0001-Further-narrow-down-packit-tests.patch new file mode 100644 index 0000000..cd222f7 --- /dev/null +++ b/0001-Further-narrow-down-packit-tests.patch @@ -0,0 +1,57 @@ +From 0f4212f989ad5907091651c6c1c179240c21f4cb Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Thu, 10 Aug 2023 14:01:32 +0200 +Subject: [PATCH 01/38] Further narrow down packit tests + +- Get rid of the sad uefi_upgrade test for now; +- Reduce time consuming partitioning tests to 3. + +in demand /rerun command-scheduled tests will still be running +the full destructive test set (no max_sst though). +--- + .packit.yaml | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/.packit.yaml b/.packit.yaml +index b7b4c0be..50a50747 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -94,7 +94,7 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.6 +- tmt_plan: "^(?!.*max_sst)(.*tier1)" ++ tmt_plan: "((?!.*uefi_upgrade)(?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*uefi_upgrade)(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" + tf_extra_params: + environments: + - tmt: +@@ -120,7 +120,7 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.8 +- tmt_plan: "^(?!.*max_sst)(.*tier1)" ++ tmt_plan: "((?!.*uefi_upgrade)(?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*uefi_upgrade)(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" + tf_extra_params: + environments: + - tmt: +@@ -193,7 +193,7 @@ jobs: + epel-8-x86_64: + distros: [RHEL-8.6.0-Nightly] + identifier: tests-8.6to9.0 +- tmt_plan: "^(?!.*max_sst)(.*tier1)" ++ tmt_plan: "((?!.*uefi_upgrade)(?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*uefi_upgrade)(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" + tf_extra_params: + environments: + - tmt: +@@ -220,7 +220,7 @@ jobs: + epel-8-x86_64: + distros: [RHEL-8.8.0-Nightly] + identifier: tests-8.8to9.2 +- tmt_plan: "^(?!.*max_sst)(.*tier1)" ++ tmt_plan: "((?!.*uefi_upgrade)(?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*uefi_upgrade)(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" + tf_extra_params: + environments: + - tmt: +-- +2.41.0 + diff --git a/0002-Bring-back-uefi_test.patch b/0002-Bring-back-uefi_test.patch new file mode 100644 index 0000000..26b5482 --- /dev/null +++ b/0002-Bring-back-uefi_test.patch @@ -0,0 +1,54 @@ +From 9890df46356bb28a941bc5659b16f890918c8b4f Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Fri, 11 Aug 2023 10:49:41 +0200 +Subject: [PATCH 02/38] Bring back uefi_test + +A fix for the issue that has been causing this test to fail +was merged, so let's bring back that test. +--- + .packit.yaml | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/.packit.yaml b/.packit.yaml +index 50a50747..820d2151 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -94,7 +94,7 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.6 +- tmt_plan: "((?!.*uefi_upgrade)(?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*uefi_upgrade)(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" + tf_extra_params: + environments: + - tmt: +@@ -120,7 +120,7 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.8 +- tmt_plan: "((?!.*uefi_upgrade)(?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*uefi_upgrade)(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" + tf_extra_params: + environments: + - tmt: +@@ -193,7 +193,7 @@ jobs: + epel-8-x86_64: + distros: [RHEL-8.6.0-Nightly] + identifier: tests-8.6to9.0 +- tmt_plan: "((?!.*uefi_upgrade)(?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*uefi_upgrade)(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" + tf_extra_params: + environments: + - tmt: +@@ -220,7 +220,7 @@ jobs: + epel-8-x86_64: + distros: [RHEL-8.8.0-Nightly] + identifier: tests-8.8to9.2 +- tmt_plan: "((?!.*uefi_upgrade)(?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*uefi_upgrade)(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" + tf_extra_params: + environments: + - tmt: +-- +2.41.0 + diff --git a/0003-Add-7.9-8.9-and-8.9-9.3-upgrade-paths.patch b/0003-Add-7.9-8.9-and-8.9-9.3-upgrade-paths.patch new file mode 100644 index 0000000..7055e98 --- /dev/null +++ b/0003-Add-7.9-8.9-and-8.9-9.3-upgrade-paths.patch @@ -0,0 +1,125 @@ +From ecffc19fd75ea3caa9d36b8ce311bcf5a36aa998 Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Fri, 11 Aug 2023 12:38:33 +0200 +Subject: [PATCH 03/38] Add 7.9->8.9 and 8.9->9.3 upgrade paths + +Also let's get rid of commented out max_sst tests, there +is no way packit let's us support customized runs in the +nearest future and keeping dead commented out code is not cool. +--- + .packit.yaml | 92 +++++++++++++++++++++++++++++----------------------- + 1 file changed, 52 insertions(+), 40 deletions(-) + +diff --git a/.packit.yaml b/.packit.yaml +index 820d2151..9c30e0c8 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -137,25 +137,31 @@ jobs: + TARGET_RELEASE: "8.8" + LEAPPDATA_BRANCH: "upstream" + +-# - job: tests +-# fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +-# fmf_ref: "master" +-# use_internal_tf: True +-# trigger: pull_request +-# targets: +-# epel-7-x86_64: +-# distros: [RHEL-7.9-ZStream] +-# identifier: tests-7.9to8.8-sst +-# tmt_plan: "^(?!.*tier[2-3].*)(.*max_sst.*)" +-# tf_post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" +-# tf_extra_params: +-# environments: +-# - tmt: +-# context: +-# distro: "rhel-7.9" +-# env: +-# SOURCE_RELEASE: "7.9" +-# TARGET_RELEASE: "8.8" ++- job: tests ++ fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" ++ fmf_ref: "master" ++ use_internal_tf: True ++ trigger: pull_request ++ targets: ++ epel-7-x86_64: ++ distros: [RHEL-7.9-ZStream] ++ identifier: tests-7.9to8.9 ++ tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tf_extra_params: ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-7.9" ++ # tag resources as sst_upgrades to enable cost metrics collection ++ settings: ++ provisioning: ++ post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" ++ tags: ++ BusinessUnit: sst_upgrades ++ env: ++ SOURCE_RELEASE: "7.9" ++ TARGET_RELEASE: "8.9" ++ LEAPPDATA_BRANCH: "upstream" + + - job: tests + fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +@@ -239,27 +245,33 @@ jobs: + LEAPPDATA_BRANCH: "upstream" + LEAPP_DEVEL_TARGET_RELEASE: "9.2" + +-# - job: tests +-# fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +-# fmf_ref: "master" +-# use_internal_tf: True +-# trigger: pull_request +-# targets: +-# epel-8-x86_64: +-# distros: [RHEL-8.6.0-Nightly] +-# identifier: tests-8.6to9.0-sst +-# tmt_plan: "^(?!.*tier[2-3].*)(.*max_sst.*)" +-# tf_post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" +-# tf_extra_params: +-# environments: +-# - tmt: +-# context: +-# distro: "rhel-8.6" +-# env: +-# SOURCE_RELEASE: "8.6" +-# TARGET_RELEASE: "9.0" +-# RHSM_REPOS: "rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms" +-# LEAPPDATA_BRANCH: "upstream" ++- job: tests ++ fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" ++ fmf_ref: "master" ++ use_internal_tf: True ++ trigger: pull_request ++ targets: ++ epel-8-x86_64: ++ distros: [RHEL-8.9.0-Nightly] ++ identifier: tests-8.9to9.3 ++ tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tf_extra_params: ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-8.9" ++ # tag resources as sst_upgrades to enable cost metrics collection ++ settings: ++ provisioning: ++ post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" ++ tags: ++ BusinessUnit: sst_upgrades ++ env: ++ SOURCE_RELEASE: "8.9" ++ TARGET_RELEASE: "9.3" ++ RHSM_REPOS: "rhel-8-for-x86_64-appstream-beta-rpms,rhel-8-for-x86_64-baseos-beta-rpms" ++ LEAPPDATA_BRANCH: "upstream" ++ LEAPP_DEVEL_TARGET_RELEASE: "9.3" + + - job: tests + fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +-- +2.41.0 + diff --git a/0004-Split-tier1-tests-into-default-on-push-and-on-demand.patch b/0004-Split-tier1-tests-into-default-on-push-and-on-demand.patch new file mode 100644 index 0000000..bec83c9 --- /dev/null +++ b/0004-Split-tier1-tests-into-default-on-push-and-on-demand.patch @@ -0,0 +1,273 @@ +From 63963200e5fdc02d4ad2a0abb1632c26774af8bb Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Tue, 22 Aug 2023 14:50:15 +0200 +Subject: [PATCH 04/38] Split tier1 tests into default-on-push and on-demand + +Default test set will have the fastest tests that run on cloud only. +On demand tests will contain the minimal beaker test set and will +be triggered via /packit test --labels minimal-beaker + +Later we could add labels-based triggering that will remove the +need to manually leave a comment. + +https://packit.dev/posts/manual-triggering#manual-only-triggering-of-jobs + +OAMG-9458 +--- + .packit.yaml | 198 +++++++++++++++++++++++++++++++++++++++++++++++++-- + 1 file changed, 192 insertions(+), 6 deletions(-) + +diff --git a/.packit.yaml b/.packit.yaml +index 9c30e0c8..32d2b02e 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -85,6 +85,7 @@ jobs: + # builds from master branch should start with 100 release, to have high priority + - bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec" + ++ + - job: tests + fmf_url: "https://gitlab.cee.redhat.com/oamg/tmt-plans" + fmf_ref: "master" +@@ -94,7 +95,37 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.6 +- tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" ++ tf_extra_params: ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-7.9" ++ # tag resources as sst_upgrades to enable cost metrics collection ++ settings: ++ provisioning: ++ post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" ++ tags: ++ BusinessUnit: sst_upgrades ++ env: ++ SOURCE_RELEASE: "7.9" ++ TARGET_RELEASE: "8.6" ++ LEAPPDATA_BRANCH: "upstream" ++ ++# On-demand minimal beaker tests ++- job: tests ++ fmf_url: "https://gitlab.cee.redhat.com/oamg/tmt-plans" ++ fmf_ref: "master" ++ use_internal_tf: True ++ trigger: pull_request ++ manual_trigger: True ++ labels: ++ - minimal-beaker ++ targets: ++ epel-7-x86_64: ++ distros: [RHEL-7.9-ZStream] ++ identifier: tests-7.9to8.6-minimal-beaker ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" + tf_extra_params: + environments: + - tmt: +@@ -120,7 +151,37 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.8 +- tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" ++ tf_extra_params: ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-7.9" ++ # tag resources as sst_upgrades to enable cost metrics collection ++ settings: ++ provisioning: ++ post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" ++ tags: ++ BusinessUnit: sst_upgrades ++ env: ++ SOURCE_RELEASE: "7.9" ++ TARGET_RELEASE: "8.8" ++ LEAPPDATA_BRANCH: "upstream" ++ ++# On-demand minimal beaker tests ++- job: tests ++ fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" ++ fmf_ref: "master" ++ use_internal_tf: True ++ trigger: pull_request ++ manual_trigger: True ++ labels: ++ - minimal-beaker ++ targets: ++ epel-7-x86_64: ++ distros: [RHEL-7.9-ZStream] ++ identifier: tests-7.9to8.8-minimal-beaker ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" + tf_extra_params: + environments: + - tmt: +@@ -146,7 +207,37 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.9 +- tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" ++ tf_extra_params: ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-7.9" ++ # tag resources as sst_upgrades to enable cost metrics collection ++ settings: ++ provisioning: ++ post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" ++ tags: ++ BusinessUnit: sst_upgrades ++ env: ++ SOURCE_RELEASE: "7.9" ++ TARGET_RELEASE: "8.9" ++ LEAPPDATA_BRANCH: "upstream" ++ ++# On-demand minimal beaker tests ++- job: tests ++ fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" ++ fmf_ref: "master" ++ use_internal_tf: True ++ trigger: pull_request ++ manual_trigger: True ++ labels: ++ - minimal-beaker ++ targets: ++ epel-7-x86_64: ++ distros: [RHEL-7.9-ZStream] ++ identifier: tests-7.9to8.9-minimal-beaker ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" + tf_extra_params: + environments: + - tmt: +@@ -199,7 +290,38 @@ jobs: + epel-8-x86_64: + distros: [RHEL-8.6.0-Nightly] + identifier: tests-8.6to9.0 +- tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" ++ tf_extra_params: ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-8.6" ++ # tag resources as sst_upgrades to enable cost metrics collection ++ settings: ++ provisioning: ++ post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" ++ tags: ++ BusinessUnit: sst_upgrades ++ env: ++ SOURCE_RELEASE: "8.6" ++ TARGET_RELEASE: "9.0" ++ RHSM_REPOS: "rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms" ++ LEAPPDATA_BRANCH: "upstream" ++ ++# On-demand minimal beaker tests ++- job: tests ++ fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" ++ fmf_ref: "master" ++ use_internal_tf: True ++ trigger: pull_request ++ manual_trigger: True ++ labels: ++ - minimal-beaker ++ targets: ++ epel-8-x86_64: ++ distros: [RHEL-8.6.0-Nightly] ++ identifier: tests-8.6to9.0-minimal-beaker ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" + tf_extra_params: + environments: + - tmt: +@@ -226,7 +348,39 @@ jobs: + epel-8-x86_64: + distros: [RHEL-8.8.0-Nightly] + identifier: tests-8.8to9.2 +- tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" ++ tf_extra_params: ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-8.8" ++ # tag resources as sst_upgrades to enable cost metrics collection ++ settings: ++ provisioning: ++ post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" ++ tags: ++ BusinessUnit: sst_upgrades ++ env: ++ SOURCE_RELEASE: "8.8" ++ TARGET_RELEASE: "9.2" ++ RHSM_REPOS: "rhel-8-for-x86_64-appstream-beta-rpms,rhel-8-for-x86_64-baseos-beta-rpms" ++ LEAPPDATA_BRANCH: "upstream" ++ LEAPP_DEVEL_TARGET_RELEASE: "9.2" ++ ++# On-demand minimal beaker tests ++- job: tests ++ fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" ++ fmf_ref: "master" ++ use_internal_tf: True ++ trigger: pull_request ++ manual_trigger: True ++ labels: ++ - minimal-beaker ++ targets: ++ epel-8-x86_64: ++ distros: [RHEL-8.8.0-Nightly] ++ identifier: tests-8.8to9.2-minimal-beaker ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" + tf_extra_params: + environments: + - tmt: +@@ -254,7 +408,39 @@ jobs: + epel-8-x86_64: + distros: [RHEL-8.9.0-Nightly] + identifier: tests-8.9to9.3 +- tmt_plan: "((?!.*max_sst)(?!.*partitioning)(.*tier1)|(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog))" ++ tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" ++ tf_extra_params: ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-8.9" ++ # tag resources as sst_upgrades to enable cost metrics collection ++ settings: ++ provisioning: ++ post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" ++ tags: ++ BusinessUnit: sst_upgrades ++ env: ++ SOURCE_RELEASE: "8.9" ++ TARGET_RELEASE: "9.3" ++ RHSM_REPOS: "rhel-8-for-x86_64-appstream-beta-rpms,rhel-8-for-x86_64-baseos-beta-rpms" ++ LEAPPDATA_BRANCH: "upstream" ++ LEAPP_DEVEL_TARGET_RELEASE: "9.3" ++ ++# On-demand minimal beaker tests ++- job: tests ++ fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" ++ fmf_ref: "master" ++ use_internal_tf: True ++ trigger: pull_request ++ manual_trigger: True ++ labels: ++ - minimal-beaker ++ targets: ++ epel-8-x86_64: ++ distros: [RHEL-8.9.0-Nightly] ++ identifier: tests-8.9to9.3-minimal-beaker ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" + tf_extra_params: + environments: + - tmt: +-- +2.41.0 + diff --git a/0005-Add-labels-to-all-tests.patch b/0005-Add-labels-to-all-tests.patch new file mode 100644 index 0000000..8e72f7a --- /dev/null +++ b/0005-Add-labels-to-all-tests.patch @@ -0,0 +1,145 @@ +From 78542a7a58c3ee1a719cdbbd139409319402de0f Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Tue, 22 Aug 2023 15:39:48 +0200 +Subject: [PATCH 05/38] Add labels to all tests + +- On-demand minimal beaker tests will have a generic + minimal-beaker label and minimal-beaker-XtoY label to + enable micro control over test scheduling +- Aws tests will have aws label +- Tests triggered automatically will have default label. + +OAMG-9458 +--- + .packit.yaml | 24 ++++++++++++++++++++++++ + 1 file changed, 24 insertions(+) + +diff --git a/.packit.yaml b/.packit.yaml +index 32d2b02e..9a697838 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -91,6 +91,8 @@ jobs: + fmf_ref: "master" + use_internal_tf: True + trigger: pull_request ++ labels: ++ - default + targets: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] +@@ -121,6 +123,7 @@ jobs: + manual_trigger: True + labels: + - minimal-beaker ++ - minimal-beaker-7.9to8.6 + targets: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] +@@ -147,6 +150,8 @@ jobs: + fmf_ref: "master" + use_internal_tf: True + trigger: pull_request ++ labels: ++ - default + targets: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] +@@ -177,6 +182,7 @@ jobs: + manual_trigger: True + labels: + - minimal-beaker ++ - minimal-beaker-7.9to8.8 + targets: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] +@@ -203,6 +209,8 @@ jobs: + fmf_ref: "master" + use_internal_tf: True + trigger: pull_request ++ labels: ++ - default + targets: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] +@@ -233,6 +241,7 @@ jobs: + manual_trigger: True + labels: + - minimal-beaker ++ - minimal-beaker-7.9to8.9 + targets: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] +@@ -259,6 +268,9 @@ jobs: + fmf_ref: "master" + use_internal_tf: True + trigger: pull_request ++ labels: ++ - default ++ - aws + targets: + epel-7-x86_64: + distros: [RHEL-7.9-rhui] +@@ -286,6 +298,8 @@ jobs: + fmf_ref: "master" + use_internal_tf: True + trigger: pull_request ++ labels: ++ - default + targets: + epel-8-x86_64: + distros: [RHEL-8.6.0-Nightly] +@@ -317,6 +331,7 @@ jobs: + manual_trigger: True + labels: + - minimal-beaker ++ - minimal-beaker-8.6to9.0 + targets: + epel-8-x86_64: + distros: [RHEL-8.6.0-Nightly] +@@ -344,6 +359,8 @@ jobs: + fmf_ref: "master" + use_internal_tf: True + trigger: pull_request ++ labels: ++ - default + targets: + epel-8-x86_64: + distros: [RHEL-8.8.0-Nightly] +@@ -376,6 +393,7 @@ jobs: + manual_trigger: True + labels: + - minimal-beaker ++ - minimal-beaker-8.8to9.2 + targets: + epel-8-x86_64: + distros: [RHEL-8.8.0-Nightly] +@@ -404,6 +422,8 @@ jobs: + fmf_ref: "master" + use_internal_tf: True + trigger: pull_request ++ labels: ++ - default + targets: + epel-8-x86_64: + distros: [RHEL-8.9.0-Nightly] +@@ -436,6 +456,7 @@ jobs: + manual_trigger: True + labels: + - minimal-beaker ++ - minimal-beaker-8.9to9.3 + targets: + epel-8-x86_64: + distros: [RHEL-8.9.0-Nightly] +@@ -464,6 +485,9 @@ jobs: + fmf_ref: "master" + use_internal_tf: True + trigger: pull_request ++ labels: ++ - default ++ - aws + targets: + epel-8-x86_64: + distros: [RHEL-8.6-rhui] +-- +2.41.0 + diff --git a/0006-Refactor-using-YAML-anchors.patch b/0006-Refactor-using-YAML-anchors.patch new file mode 100644 index 0000000..6c04495 --- /dev/null +++ b/0006-Refactor-using-YAML-anchors.patch @@ -0,0 +1,396 @@ +From 6bb005605732e18b1921bf207898fa8499ceedc6 Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Wed, 23 Aug 2023 14:49:27 +0200 +Subject: [PATCH 06/38] Refactor using YAML anchors + +Let's remove duplication during similar test jobs definition +by using YAML anchors. +--- + .packit.yaml | 228 ++++++++++----------------------------------------- + 1 file changed, 43 insertions(+), 185 deletions(-) + +diff --git a/.packit.yaml b/.packit.yaml +index 9a697838..06c681b3 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -85,8 +85,8 @@ jobs: + # builds from master branch should start with 100 release, to have high priority + - bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec" + +- +-- job: tests ++- &default-79to86 ++ job: tests + fmf_url: "https://gitlab.cee.redhat.com/oamg/tmt-plans" + fmf_ref: "master" + use_internal_tf: True +@@ -97,7 +97,7 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.6 +- tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" ++ tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(?!.*oamg4250_lvm_var_xfs_ftype0)(?!.*kernel-rt)(.*tier1)" + tf_extra_params: + environments: + - tmt: +@@ -114,21 +114,16 @@ jobs: + TARGET_RELEASE: "8.6" + LEAPPDATA_BRANCH: "upstream" + +-# On-demand minimal beaker tests +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/tmt-plans" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- manual_trigger: True ++- &default-79to86-aws ++ <<: *default-79to86 + labels: +- - minimal-beaker +- - minimal-beaker-7.9to8.6 ++ - default ++ - aws + targets: + epel-7-x86_64: +- distros: [RHEL-7.9-ZStream] +- identifier: tests-7.9to8.6-minimal-beaker +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" ++ distros: [RHEL-7.9-rhui] ++ identifier: tests-7to8-aws-e2e ++ tmt_plan: "(?!.*sap)(.*e2e)" + tf_extra_params: + environments: + - tmt: +@@ -137,174 +132,71 @@ jobs: + # tag resources as sst_upgrades to enable cost metrics collection + settings: + provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" ++ post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; yum-config-manager --enable rhel-7-server-rhui-optional-rpms" + tags: + BusinessUnit: sst_upgrades + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.6" ++ RHUI: "aws" + LEAPPDATA_BRANCH: "upstream" + +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request ++# On-demand minimal beaker tests ++- &beaker-minimal-79to86 ++ <<: *default-79to86 ++ manual_trigger: True + labels: +- - default +- targets: +- epel-7-x86_64: +- distros: [RHEL-7.9-ZStream] ++ - minimal-beaker ++ - minimal-beaker-7.9to8.6 ++ identifier: tests-7.9to8.6-minimal-beaker ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi|.*oamg4250_lvm_var_xfs_ftype0)" ++ ++- &default-79to88 ++ <<: *default-79to86 + identifier: tests-7.9to8.8 +- tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" +- tf_extra_params: +- environments: +- - tmt: +- context: +- distro: "rhel-7.9" +- # tag resources as sst_upgrades to enable cost metrics collection +- settings: +- provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" +- tags: +- BusinessUnit: sst_upgrades + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.8" + LEAPPDATA_BRANCH: "upstream" + + # On-demand minimal beaker tests +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- manual_trigger: True ++- &beaker-minimal-79to88 ++ <<: *beaker-minimal-79to86 + labels: + - minimal-beaker + - minimal-beaker-7.9to8.8 +- targets: +- epel-7-x86_64: +- distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.8-minimal-beaker +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" +- tf_extra_params: +- environments: +- - tmt: +- context: +- distro: "rhel-7.9" +- # tag resources as sst_upgrades to enable cost metrics collection +- settings: +- provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" +- tags: +- BusinessUnit: sst_upgrades + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.8" + LEAPPDATA_BRANCH: "upstream" + +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- labels: +- - default +- targets: +- epel-7-x86_64: +- distros: [RHEL-7.9-ZStream] ++- &default-79to89 ++ <<: *default-79to86 + identifier: tests-7.9to8.9 +- tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" +- tf_extra_params: +- environments: +- - tmt: +- context: +- distro: "rhel-7.9" +- # tag resources as sst_upgrades to enable cost metrics collection +- settings: +- provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" +- tags: +- BusinessUnit: sst_upgrades + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.9" + LEAPPDATA_BRANCH: "upstream" + + # On-demand minimal beaker tests +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- manual_trigger: True ++- &beaker-minimal-79to89 ++ <<: *beaker-minimal-79to86 + labels: + - minimal-beaker + - minimal-beaker-7.9to8.9 +- targets: +- epel-7-x86_64: +- distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.9-minimal-beaker +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" +- tf_extra_params: +- environments: +- - tmt: +- context: +- distro: "rhel-7.9" +- # tag resources as sst_upgrades to enable cost metrics collection +- settings: +- provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" +- tags: +- BusinessUnit: sst_upgrades + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.9" + LEAPPDATA_BRANCH: "upstream" + +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- labels: +- - default +- - aws +- targets: +- epel-7-x86_64: +- distros: [RHEL-7.9-rhui] +- identifier: tests-7to8-aws-e2e +- tmt_plan: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)" +- tf_extra_params: +- environments: +- - tmt: +- context: +- distro: "rhel-7.9" +- # tag resources as sst_upgrades to enable cost metrics collection +- settings: +- provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; yum-config-manager --enable rhel-7-server-rhui-optional-rpms" +- tags: +- BusinessUnit: sst_upgrades +- env: +- SOURCE_RELEASE: "7.9" +- TARGET_RELEASE: "8.6" +- RHUI: "aws" +- LEAPPDATA_BRANCH: "upstream" +- +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- labels: +- - default ++- &default-86to90 ++ <<: *default-79to86 + targets: + epel-8-x86_64: + distros: [RHEL-8.6.0-Nightly] + identifier: tests-8.6to9.0 +- tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" + tf_extra_params: + environments: + - tmt: +@@ -323,12 +215,8 @@ jobs: + LEAPPDATA_BRANCH: "upstream" + + # On-demand minimal beaker tests +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- manual_trigger: True ++- &beaker-minimal-86to90 ++ <<: *beaker-minimal-79to86 + labels: + - minimal-beaker + - minimal-beaker-8.6to9.0 +@@ -336,7 +224,6 @@ jobs: + epel-8-x86_64: + distros: [RHEL-8.6.0-Nightly] + identifier: tests-8.6to9.0-minimal-beaker +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" + tf_extra_params: + environments: + - tmt: +@@ -354,18 +241,12 @@ jobs: + RHSM_REPOS: "rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms" + LEAPPDATA_BRANCH: "upstream" + +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- labels: +- - default ++- &default-88to92 ++ <<: *default-86to90 + targets: + epel-8-x86_64: + distros: [RHEL-8.8.0-Nightly] + identifier: tests-8.8to9.2 +- tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" + tf_extra_params: + environments: + - tmt: +@@ -385,12 +266,8 @@ jobs: + LEAPP_DEVEL_TARGET_RELEASE: "9.2" + + # On-demand minimal beaker tests +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- manual_trigger: True ++- &beaker-minimal-88to92 ++ <<: *beaker-minimal-86to90 + labels: + - minimal-beaker + - minimal-beaker-8.8to9.2 +@@ -398,7 +275,6 @@ jobs: + epel-8-x86_64: + distros: [RHEL-8.8.0-Nightly] + identifier: tests-8.8to9.2-minimal-beaker +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" + tf_extra_params: + environments: + - tmt: +@@ -417,18 +293,12 @@ jobs: + LEAPPDATA_BRANCH: "upstream" + LEAPP_DEVEL_TARGET_RELEASE: "9.2" + +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- labels: +- - default ++- &default-89to93 ++ <<: *default-88to92 + targets: + epel-8-x86_64: + distros: [RHEL-8.9.0-Nightly] + identifier: tests-8.9to9.3 +- tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(.*tier1)" + tf_extra_params: + environments: + - tmt: +@@ -448,12 +318,8 @@ jobs: + LEAPP_DEVEL_TARGET_RELEASE: "9.3" + + # On-demand minimal beaker tests +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- manual_trigger: True ++- &beaker-minimal-89to93 ++ <<: *beaker-minimal-88to92 + labels: + - minimal-beaker + - minimal-beaker-8.9to9.3 +@@ -461,7 +327,6 @@ jobs: + epel-8-x86_64: + distros: [RHEL-8.9.0-Nightly] + identifier: tests-8.9to9.3-minimal-beaker +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi)" + tf_extra_params: + environments: + - tmt: +@@ -480,19 +345,12 @@ jobs: + LEAPPDATA_BRANCH: "upstream" + LEAPP_DEVEL_TARGET_RELEASE: "9.3" + +-- job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" +- fmf_ref: "master" +- use_internal_tf: True +- trigger: pull_request +- labels: +- - default +- - aws ++- &default-86to90-aws ++ <<: *default-79to86-aws + targets: + epel-8-x86_64: + distros: [RHEL-8.6-rhui] + identifier: tests-8to9-aws-e2e +- tmt_plan: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)" + tf_extra_params: + environments: + - tmt: +-- +2.41.0 + diff --git a/0007-Add-kernel-rt-tests-and-switch-to-sanity-for-default.patch b/0007-Add-kernel-rt-tests-and-switch-to-sanity-for-default.patch new file mode 100644 index 0000000..31d0cef --- /dev/null +++ b/0007-Add-kernel-rt-tests-and-switch-to-sanity-for-default.patch @@ -0,0 +1,118 @@ +From 622fa64abe818294ade9d533f2bffdf320849b0f Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Wed, 23 Aug 2023 15:24:57 +0200 +Subject: [PATCH 07/38] Add kernel-rt tests and switch to sanity for default + +Instead if a bulky regex sanity test plan will be used. +Also kernel-rt tests have been specified as a separate +test set with kernel-rt label. +--- + .packit.yaml | 50 +++++++++++++++++++++++++++++++++++++++++++++++++- + 1 file changed, 49 insertions(+), 1 deletion(-) + +diff --git a/.packit.yaml b/.packit.yaml +index 06c681b3..eb08c9f5 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -97,7 +97,7 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] + identifier: tests-7.9to8.6 +- tmt_plan: "(?!.*uefi)(?!.*max_sst)(?!.*partitioning)(?!.*oamg4250_lvm_var_xfs_ftype0)(?!.*kernel-rt)(.*tier1)" ++ tmt_plan: "sanity_plan" + tf_extra_params: + environments: + - tmt: +@@ -151,6 +151,14 @@ jobs: + identifier: tests-7.9to8.6-minimal-beaker + tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi|.*oamg4250_lvm_var_xfs_ftype0)" + ++# On-demand kernel-rt tests ++- &kernel-rt-79to86 ++ <<: *beaker-minimal-79to86 ++ labels: ++ - kernel-rt ++ identifier: tests-7.9to8.6-kernel-rt ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ + - &default-79to88 + <<: *default-79to86 + identifier: tests-7.9to8.8 +@@ -171,6 +179,14 @@ jobs: + TARGET_RELEASE: "8.8" + LEAPPDATA_BRANCH: "upstream" + ++# On-demand kernel-rt tests ++- &kernel-rt-79to88 ++ <<: *beaker-minimal-79to88 ++ labels: ++ - kernel-rt ++ identifier: tests-7.9to8.8-kernel-rt ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ + - &default-79to89 + <<: *default-79to86 + identifier: tests-7.9to8.9 +@@ -191,6 +207,14 @@ jobs: + TARGET_RELEASE: "8.9" + LEAPPDATA_BRANCH: "upstream" + ++# On-demand kernel-rt tests ++- &kernel-rt-79to89 ++ <<: *beaker-minimal-79to89 ++ labels: ++ - kernel-rt ++ identifier: tests-7.9to8.9-kernel-rt ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ + - &default-86to90 + <<: *default-79to86 + targets: +@@ -241,6 +265,14 @@ jobs: + RHSM_REPOS: "rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms" + LEAPPDATA_BRANCH: "upstream" + ++# On-demand kernel-rt tests ++- &kernel-rt-86to90 ++ <<: *beaker-minimal-86to90 ++ labels: ++ - kernel-rt ++ identifier: tests-8.6to9.0-kernel-rt ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ + - &default-88to92 + <<: *default-86to90 + targets: +@@ -293,6 +325,14 @@ jobs: + LEAPPDATA_BRANCH: "upstream" + LEAPP_DEVEL_TARGET_RELEASE: "9.2" + ++# On-demand kernel-rt tests ++- &kernel-rt-88to92 ++ <<: *beaker-minimal-88to92 ++ labels: ++ - kernel-rt ++ identifier: tests-8.8to9.2-kernel-rt ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ + - &default-89to93 + <<: *default-88to92 + targets: +@@ -345,6 +385,14 @@ jobs: + LEAPPDATA_BRANCH: "upstream" + LEAPP_DEVEL_TARGET_RELEASE: "9.3" + ++# On-demand kernel-rt tests ++- &kernel-rt-89to93 ++ <<: *beaker-minimal-89to93 ++ labels: ++ - kernel-rt ++ identifier: tests-8.9to9.3-kernel-rt ++ tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ + - &default-86to90-aws + <<: *default-79to86-aws + targets: +-- +2.41.0 + diff --git a/0008-Minor-label-enhancements.patch b/0008-Minor-label-enhancements.patch new file mode 100644 index 0000000..166ba8c --- /dev/null +++ b/0008-Minor-label-enhancements.patch @@ -0,0 +1,155 @@ +From 4932e5ad0baac10db5efae9d57f8b57d2072b976 Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Thu, 24 Aug 2023 11:34:15 +0200 +Subject: [PATCH 08/38] Minor label enhancements + +- minimal-beaker label has been renamed to beaker-minimal to match + with test job names; +- kernel-rt-XtoY labels have been added to each test to allow for + separate test launch. +--- + .packit.yaml | 42 ++++++++++++++++++++++++------------------ + 1 file changed, 24 insertions(+), 18 deletions(-) + +diff --git a/.packit.yaml b/.packit.yaml +index eb08c9f5..a183674c 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -146,9 +146,9 @@ jobs: + <<: *default-79to86 + manual_trigger: True + labels: +- - minimal-beaker +- - minimal-beaker-7.9to8.6 +- identifier: tests-7.9to8.6-minimal-beaker ++ - beaker-minimal ++ - beaker-minimal-7.9to8.6 ++ identifier: tests-7.9to8.6-beaker-minimal + tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi|.*oamg4250_lvm_var_xfs_ftype0)" + + # On-demand kernel-rt tests +@@ -156,6 +156,7 @@ jobs: + <<: *beaker-minimal-79to86 + labels: + - kernel-rt ++ - kernel-rt-7.9to8.6 + identifier: tests-7.9to8.6-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +@@ -171,9 +172,9 @@ jobs: + - &beaker-minimal-79to88 + <<: *beaker-minimal-79to86 + labels: +- - minimal-beaker +- - minimal-beaker-7.9to8.8 +- identifier: tests-7.9to8.8-minimal-beaker ++ - beaker-minimal ++ - beaker-minimal-7.9to8.8 ++ identifier: tests-7.9to8.8-beaker-minimal + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.8" +@@ -184,6 +185,7 @@ jobs: + <<: *beaker-minimal-79to88 + labels: + - kernel-rt ++ - kernel-rt-7.9to8.8 + identifier: tests-7.9to8.8-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +@@ -199,9 +201,9 @@ jobs: + - &beaker-minimal-79to89 + <<: *beaker-minimal-79to86 + labels: +- - minimal-beaker +- - minimal-beaker-7.9to8.9 +- identifier: tests-7.9to8.9-minimal-beaker ++ - beaker-minimal ++ - beaker-minimal-7.9to8.9 ++ identifier: tests-7.9to8.9-beaker-minimal + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.9" +@@ -212,6 +214,7 @@ jobs: + <<: *beaker-minimal-79to89 + labels: + - kernel-rt ++ - kernel-rt-7.9to8.9 + identifier: tests-7.9to8.9-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +@@ -242,12 +245,12 @@ jobs: + - &beaker-minimal-86to90 + <<: *beaker-minimal-79to86 + labels: +- - minimal-beaker +- - minimal-beaker-8.6to9.0 ++ - beaker-minimal ++ - beaker-minimal-8.6to9.0 + targets: + epel-8-x86_64: + distros: [RHEL-8.6.0-Nightly] +- identifier: tests-8.6to9.0-minimal-beaker ++ identifier: tests-8.6to9.0-beaker-minimal + tf_extra_params: + environments: + - tmt: +@@ -270,6 +273,7 @@ jobs: + <<: *beaker-minimal-86to90 + labels: + - kernel-rt ++ - kernel-rt-8.6to9.0 + identifier: tests-8.6to9.0-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +@@ -301,12 +305,12 @@ jobs: + - &beaker-minimal-88to92 + <<: *beaker-minimal-86to90 + labels: +- - minimal-beaker +- - minimal-beaker-8.8to9.2 ++ - beaker-minimal ++ - beaker-minimal-8.8to9.2 + targets: + epel-8-x86_64: + distros: [RHEL-8.8.0-Nightly] +- identifier: tests-8.8to9.2-minimal-beaker ++ identifier: tests-8.8to9.2-beaker-minimal + tf_extra_params: + environments: + - tmt: +@@ -330,6 +334,7 @@ jobs: + <<: *beaker-minimal-88to92 + labels: + - kernel-rt ++ - kernel-rt-8.8to9.2 + identifier: tests-8.8to9.2-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +@@ -361,12 +366,12 @@ jobs: + - &beaker-minimal-89to93 + <<: *beaker-minimal-88to92 + labels: +- - minimal-beaker +- - minimal-beaker-8.9to9.3 ++ - beaker-minimal ++ - beaker-minimal-8.9to9.3 + targets: + epel-8-x86_64: + distros: [RHEL-8.9.0-Nightly] +- identifier: tests-8.9to9.3-minimal-beaker ++ identifier: tests-8.9to9.3-beaker-minimal + tf_extra_params: + environments: + - tmt: +@@ -390,6 +395,7 @@ jobs: + <<: *beaker-minimal-89to93 + labels: + - kernel-rt ++ - kernel-rt-8.9to9.3 + identifier: tests-8.9to9.3-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +-- +2.41.0 + diff --git a/0009-Update-pr-welcome-message.patch b/0009-Update-pr-welcome-message.patch new file mode 100644 index 0000000..6b13b40 --- /dev/null +++ b/0009-Update-pr-welcome-message.patch @@ -0,0 +1,31 @@ +From 0b6d2df149754f26829734240f1b05be2e9d16a4 Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Thu, 24 Aug 2023 14:00:35 +0200 +Subject: [PATCH 09/38] Update pr-welcome message + +List on-demand packit test launch possibilities. +--- + .github/workflows/pr-welcome-msg.yml | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/.github/workflows/pr-welcome-msg.yml b/.github/workflows/pr-welcome-msg.yml +index cec7c778..e791340e 100644 +--- a/.github/workflows/pr-welcome-msg.yml ++++ b/.github/workflows/pr-welcome-msg.yml +@@ -26,7 +26,12 @@ jobs: + + Packit will automatically schedule regression tests for this PR's build and latest upstream leapp build. If you need a different version of leapp from PR#42, use `/packit test oamg/leapp#42` + +- To launch regression testing public members of oamg organization can leave the following comment: ++ It is possible to schedule specific on-demand tests as well. Currently 2 test sets are supported, `beaker-minimal` and `kernel-rt`, both can be used to be run on all upgrade paths or just a couple of specific ones. ++ To launch on-demand tests with packit: ++ - **/packit test --labels kernel-rt** to schedule `kernel-rt` tests set for all upgrade paths ++ - **/packit test --labels beaker-minimal-8.9to9.3,kernel-rt-8.9to9.3** to schedule `kernel-rt` and `beaker-minimal` test sets for 8.9->9.3 upgrade path ++ ++ [Deprecated] To launch on-demand regression testing public members of oamg organization can leave the following comment: + - **/rerun** to schedule basic regression tests using this pr build and latest upstream leapp build as artifacts + - **/rerun 42** to schedule basic regression tests using this pr build and leapp\*PR42\* as artifacts + - **/rerun-sst** to schedule sst tests using this pr build and latest upstream leapp build as artifacts +-- +2.41.0 + diff --git a/0010-Address-ddiblik-s-review-comments.patch b/0010-Address-ddiblik-s-review-comments.patch new file mode 100644 index 0000000..adcb11b --- /dev/null +++ b/0010-Address-ddiblik-s-review-comments.patch @@ -0,0 +1,256 @@ +From ab94d25f067afa0b974dc6b850687023d982f52f Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Mon, 28 Aug 2023 15:12:38 +0200 +Subject: [PATCH 10/38] Address ddiblik's review comments + +- Rename default tests to sanity +- Add XtoY label to on-demand test sets for specific upgrade + paths +--- + .packit.yaml | 88 +++++++++++++++++++++++++++++----------------------- + 1 file changed, 50 insertions(+), 38 deletions(-) + +diff --git a/.packit.yaml b/.packit.yaml +index a183674c..3085ec0a 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -85,18 +85,18 @@ jobs: + # builds from master branch should start with 100 release, to have high priority + - bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec" + +-- &default-79to86 ++- &sanity-79to86 + job: tests + fmf_url: "https://gitlab.cee.redhat.com/oamg/tmt-plans" + fmf_ref: "master" + use_internal_tf: True + trigger: pull_request + labels: +- - default ++ - sanity + targets: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] +- identifier: tests-7.9to8.6 ++ identifier: sanity-7.9to8.6 + tmt_plan: "sanity_plan" + tf_extra_params: + environments: +@@ -114,15 +114,15 @@ jobs: + TARGET_RELEASE: "8.6" + LEAPPDATA_BRANCH: "upstream" + +-- &default-79to86-aws +- <<: *default-79to86 ++- &sanity-79to86-aws ++ <<: *sanity-79to86 + labels: +- - default ++ - sanity + - aws + targets: + epel-7-x86_64: + distros: [RHEL-7.9-rhui] +- identifier: tests-7to8-aws-e2e ++ identifier: sanity-7to8-aws-e2e + tmt_plan: "(?!.*sap)(.*e2e)" + tf_extra_params: + environments: +@@ -143,12 +143,13 @@ jobs: + + # On-demand minimal beaker tests + - &beaker-minimal-79to86 +- <<: *default-79to86 ++ <<: *sanity-79to86 + manual_trigger: True + labels: + - beaker-minimal + - beaker-minimal-7.9to8.6 +- identifier: tests-7.9to8.6-beaker-minimal ++ - 7.9to8.6 ++ identifier: sanity-7.9to8.6-beaker-minimal + tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi|.*oamg4250_lvm_var_xfs_ftype0)" + + # On-demand kernel-rt tests +@@ -157,12 +158,13 @@ jobs: + labels: + - kernel-rt + - kernel-rt-7.9to8.6 +- identifier: tests-7.9to8.6-kernel-rt ++ - 7.9to8.6 ++ identifier: sanity-7.9to8.6-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +-- &default-79to88 +- <<: *default-79to86 +- identifier: tests-7.9to8.8 ++- &sanity-79to88 ++ <<: *sanity-79to86 ++ identifier: sanity-7.9to8.8 + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.8" +@@ -174,7 +176,8 @@ jobs: + labels: + - beaker-minimal + - beaker-minimal-7.9to8.8 +- identifier: tests-7.9to8.8-beaker-minimal ++ - 7.9to8.8 ++ identifier: sanity-7.9to8.8-beaker-minimal + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.8" +@@ -186,12 +189,13 @@ jobs: + labels: + - kernel-rt + - kernel-rt-7.9to8.8 +- identifier: tests-7.9to8.8-kernel-rt ++ - 7.9to8.8 ++ identifier: sanity-7.9to8.8-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +-- &default-79to89 +- <<: *default-79to86 +- identifier: tests-7.9to8.9 ++- &sanity-79to89 ++ <<: *sanity-79to86 ++ identifier: sanity-7.9to8.9 + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.9" +@@ -203,7 +207,8 @@ jobs: + labels: + - beaker-minimal + - beaker-minimal-7.9to8.9 +- identifier: tests-7.9to8.9-beaker-minimal ++ - 7.9to8.9 ++ identifier: sanity-7.9to8.9-beaker-minimal + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.9" +@@ -215,15 +220,16 @@ jobs: + labels: + - kernel-rt + - kernel-rt-7.9to8.9 +- identifier: tests-7.9to8.9-kernel-rt ++ - 7.9to8.9 ++ identifier: sanity-7.9to8.9-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +-- &default-86to90 +- <<: *default-79to86 ++- &sanity-86to90 ++ <<: *sanity-79to86 + targets: + epel-8-x86_64: + distros: [RHEL-8.6.0-Nightly] +- identifier: tests-8.6to9.0 ++ identifier: sanity-8.6to9.0 + tf_extra_params: + environments: + - tmt: +@@ -247,10 +253,11 @@ jobs: + labels: + - beaker-minimal + - beaker-minimal-8.6to9.0 ++ - 8.6to9.0 + targets: + epel-8-x86_64: + distros: [RHEL-8.6.0-Nightly] +- identifier: tests-8.6to9.0-beaker-minimal ++ identifier: sanity-8.6to9.0-beaker-minimal + tf_extra_params: + environments: + - tmt: +@@ -274,15 +281,16 @@ jobs: + labels: + - kernel-rt + - kernel-rt-8.6to9.0 +- identifier: tests-8.6to9.0-kernel-rt ++ - 8.6to9.0 ++ identifier: sanity-8.6to9.0-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +-- &default-88to92 +- <<: *default-86to90 ++- &sanity-88to92 ++ <<: *sanity-86to90 + targets: + epel-8-x86_64: + distros: [RHEL-8.8.0-Nightly] +- identifier: tests-8.8to9.2 ++ identifier: sanity-8.8to9.2 + tf_extra_params: + environments: + - tmt: +@@ -307,10 +315,11 @@ jobs: + labels: + - beaker-minimal + - beaker-minimal-8.8to9.2 ++ - 8.6to9.2 + targets: + epel-8-x86_64: + distros: [RHEL-8.8.0-Nightly] +- identifier: tests-8.8to9.2-beaker-minimal ++ identifier: sanity-8.8to9.2-beaker-minimal + tf_extra_params: + environments: + - tmt: +@@ -335,15 +344,16 @@ jobs: + labels: + - kernel-rt + - kernel-rt-8.8to9.2 +- identifier: tests-8.8to9.2-kernel-rt ++ - 8.8to9.2 ++ identifier: sanity-8.8to9.2-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +-- &default-89to93 +- <<: *default-88to92 ++- &sanity-89to93 ++ <<: *sanity-88to92 + targets: + epel-8-x86_64: + distros: [RHEL-8.9.0-Nightly] +- identifier: tests-8.9to9.3 ++ identifier: sanity-8.9to9.3 + tf_extra_params: + environments: + - tmt: +@@ -368,10 +378,11 @@ jobs: + labels: + - beaker-minimal + - beaker-minimal-8.9to9.3 ++ - 8.9to9.3 + targets: + epel-8-x86_64: + distros: [RHEL-8.9.0-Nightly] +- identifier: tests-8.9to9.3-beaker-minimal ++ identifier: sanity-8.9to9.3-beaker-minimal + tf_extra_params: + environments: + - tmt: +@@ -396,15 +407,16 @@ jobs: + labels: + - kernel-rt + - kernel-rt-8.9to9.3 +- identifier: tests-8.9to9.3-kernel-rt ++ - 8.9to9.3 ++ identifier: sanity-8.9to9.3-kernel-rt + tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" + +-- &default-86to90-aws +- <<: *default-79to86-aws ++- &sanity-86to90-aws ++ <<: *sanity-79to86-aws + targets: + epel-8-x86_64: + distros: [RHEL-8.6-rhui] +- identifier: tests-8to9-aws-e2e ++ identifier: sanity-8to9-aws-e2e + tf_extra_params: + environments: + - tmt: +-- +2.41.0 + diff --git a/0011-Address-mmoran-s-review-comments.patch b/0011-Address-mmoran-s-review-comments.patch new file mode 100644 index 0000000..0ee5335 --- /dev/null +++ b/0011-Address-mmoran-s-review-comments.patch @@ -0,0 +1,173 @@ +From 93c6fd4f150229a01ba43ce74214043cffaf7dce Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Tue, 29 Aug 2023 18:18:01 +0200 +Subject: [PATCH 11/38] Address mmoran's review comments + +- Use RHSM_REPOS_EUS='eus' instead of RHSM_REPOS for 8.6->9.0 +- Remove beta repos from 8.8->9.2 +- Change BusinessUnit tag value to sst_upgrades@leapp_upstream_test +--- + .packit.yaml | 43 +++++++++++++++++++++---------------------- + 1 file changed, 21 insertions(+), 22 deletions(-) + +diff --git a/.packit.yaml b/.packit.yaml +index 3085ec0a..cd6dd7d1 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -103,12 +103,12 @@ jobs: + - tmt: + context: + distro: "rhel-7.9" +- # tag resources as sst_upgrades to enable cost metrics collection ++ # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: +- BusinessUnit: sst_upgrades ++ BusinessUnit: sst_upgrades@leapp_upstream_test + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.6" +@@ -129,12 +129,12 @@ jobs: + - tmt: + context: + distro: "rhel-7.9" +- # tag resources as sst_upgrades to enable cost metrics collection ++ # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; yum-config-manager --enable rhel-7-server-rhui-optional-rpms" + tags: +- BusinessUnit: sst_upgrades ++ BusinessUnit: sst_upgrades@leapp_upstream_test + env: + SOURCE_RELEASE: "7.9" + TARGET_RELEASE: "8.6" +@@ -235,16 +235,16 @@ jobs: + - tmt: + context: + distro: "rhel-8.6" +- # tag resources as sst_upgrades to enable cost metrics collection ++ # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: +- BusinessUnit: sst_upgrades ++ BusinessUnit: sst_upgrades@leapp_upstream_test + env: + SOURCE_RELEASE: "8.6" + TARGET_RELEASE: "9.0" +- RHSM_REPOS: "rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms" ++ RHSM_REPOS_EUS: "eus" + LEAPPDATA_BRANCH: "upstream" + + # On-demand minimal beaker tests +@@ -263,16 +263,16 @@ jobs: + - tmt: + context: + distro: "rhel-8.6" +- # tag resources as sst_upgrades to enable cost metrics collection ++ # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: +- BusinessUnit: sst_upgrades ++ BusinessUnit: sst_upgrades@leapp_upstream_test + env: + SOURCE_RELEASE: "8.6" + TARGET_RELEASE: "9.0" +- RHSM_REPOS: "rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms" ++ RHSM_REPOS_EUS: "eus" + LEAPPDATA_BRANCH: "upstream" + + # On-demand kernel-rt tests +@@ -296,16 +296,16 @@ jobs: + - tmt: + context: + distro: "rhel-8.8" +- # tag resources as sst_upgrades to enable cost metrics collection ++ # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: +- BusinessUnit: sst_upgrades ++ BusinessUnit: sst_upgrades@leapp_upstream_test + env: + SOURCE_RELEASE: "8.8" + TARGET_RELEASE: "9.2" +- RHSM_REPOS: "rhel-8-for-x86_64-appstream-beta-rpms,rhel-8-for-x86_64-baseos-beta-rpms" ++ RHSM_REPOS_EUS: "eus" + LEAPPDATA_BRANCH: "upstream" + LEAPP_DEVEL_TARGET_RELEASE: "9.2" + +@@ -325,16 +325,15 @@ jobs: + - tmt: + context: + distro: "rhel-8.8" +- # tag resources as sst_upgrades to enable cost metrics collection ++ # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: +- BusinessUnit: sst_upgrades ++ BusinessUnit: sst_upgrades@leapp_upstream_test + env: + SOURCE_RELEASE: "8.8" + TARGET_RELEASE: "9.2" +- RHSM_REPOS: "rhel-8-for-x86_64-appstream-beta-rpms,rhel-8-for-x86_64-baseos-beta-rpms" + LEAPPDATA_BRANCH: "upstream" + LEAPP_DEVEL_TARGET_RELEASE: "9.2" + +@@ -359,12 +358,12 @@ jobs: + - tmt: + context: + distro: "rhel-8.9" +- # tag resources as sst_upgrades to enable cost metrics collection ++ # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: +- BusinessUnit: sst_upgrades ++ BusinessUnit: sst_upgrades@leapp_upstream_test + env: + SOURCE_RELEASE: "8.9" + TARGET_RELEASE: "9.3" +@@ -388,12 +387,12 @@ jobs: + - tmt: + context: + distro: "rhel-8.9" +- # tag resources as sst_upgrades to enable cost metrics collection ++ # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: +- BusinessUnit: sst_upgrades ++ BusinessUnit: sst_upgrades@leapp_upstream_test + env: + SOURCE_RELEASE: "8.9" + TARGET_RELEASE: "9.3" +@@ -422,12 +421,12 @@ jobs: + - tmt: + context: + distro: "rhel-8.6" +- # tag resources as sst_upgrades to enable cost metrics collection ++ # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: +- BusinessUnit: sst_upgrades ++ BusinessUnit: sst_upgrades@leapp_upstream_test + env: + SOURCE_RELEASE: "8.6" + TARGET_RELEASE: "9.0" +-- +2.41.0 + diff --git a/0012-Add-isccfg-library-manual-running-mode.patch b/0012-Add-isccfg-library-manual-running-mode.patch new file mode 100644 index 0000000..30e95b2 --- /dev/null +++ b/0012-Add-isccfg-library-manual-running-mode.patch @@ -0,0 +1,50 @@ +From f83702c6e78b535a9511e0842c478773a1271cad Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= +Date: Wed, 30 Aug 2023 16:58:45 +0200 +Subject: [PATCH 12/38] Add isccfg library manual running mode + +For simplified manual testing add waking mode to parser script. Allows +direct test run displaying just chosen statements or blocks. +--- + .../el7toel8/libraries/isccfg.py | 28 +++++++++++++++++++ + 1 file changed, 28 insertions(+) + +diff --git a/repos/system_upgrade/el7toel8/libraries/isccfg.py b/repos/system_upgrade/el7toel8/libraries/isccfg.py +index dff9bf24..1d29ff21 100644 +--- a/repos/system_upgrade/el7toel8/libraries/isccfg.py ++++ b/repos/system_upgrade/el7toel8/libraries/isccfg.py +@@ -948,3 +948,31 @@ class IscConfigParser(object): + self.load_main_config() + self.load_included_files() + pass ++ ++ ++if __name__ == '__main__': ++ """Run parser to default path or path in the first argument. ++ ++ Additional parameters are statements or blocks to print. ++ Defaults to options and zone. ++ """ ++ ++ from sys import argv ++ ++ def print_cb(section, state): ++ print(section) ++ ++ cfgpath = IscConfigParser.CONFIG_FILE ++ if len(argv) > 1: ++ cfgpath = argv[1] ++ if len(argv) > 2: ++ cb = {} ++ for key in argv[2:]: ++ cb[key] = print_cb ++ else: ++ cb = {'options': print_cb, 'zone': print_cb} ++ ++ parser = IscConfigParser(cfgpath) ++ for section in parser.FILES_TO_CHECK: ++ print("# Walking file '{}'".format(section.path)) ++ parser.walk(section.root_section(), cb) +-- +2.41.0 + diff --git a/0013-Avoid-warnings-on-python2.patch b/0013-Avoid-warnings-on-python2.patch new file mode 100644 index 0000000..b993940 --- /dev/null +++ b/0013-Avoid-warnings-on-python2.patch @@ -0,0 +1,26 @@ +From fa0773ddd5d27762d10ad769c119ef87b1684e5e Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= +Date: Thu, 31 Aug 2023 13:04:34 +0200 +Subject: [PATCH 13/38] Avoid warnings on python2 + +Use python3 compatible print function +--- + repos/system_upgrade/el7toel8/libraries/isccfg.py | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/repos/system_upgrade/el7toel8/libraries/isccfg.py b/repos/system_upgrade/el7toel8/libraries/isccfg.py +index 1d29ff21..45baba0b 100644 +--- a/repos/system_upgrade/el7toel8/libraries/isccfg.py ++++ b/repos/system_upgrade/el7toel8/libraries/isccfg.py +@@ -2,6 +2,8 @@ + # + # Simplified parsing of bind configuration, with include support and nested sections. + ++from __future__ import print_function ++ + import re + import string + +-- +2.41.0 + diff --git a/0014-makefile-add-dev_test_no_lint-target.patch b/0014-makefile-add-dev_test_no_lint-target.patch new file mode 100644 index 0000000..ee610bd --- /dev/null +++ b/0014-makefile-add-dev_test_no_lint-target.patch @@ -0,0 +1,172 @@ +From 6ae2d5aadbf6a626cf27ca4594a3945e2c249122 Mon Sep 17 00:00:00 2001 +From: mhecko +Date: Tue, 1 Aug 2023 12:44:47 +0200 +Subject: [PATCH 14/38] makefile: add dev_test_no_lint target + +Add a target for testing individual actors with almost-instant +execution time. Testing individual actors currently involves +a process in which every actor is instantiated in a separate +process, the created instance reports actor information such as actor's +name and then exits. As many processes are created, this process is +time consuming (cca 7s) which disrupts developer's workflow and causes +attention shift. + +A newly added target `dev_test_no_lint` uses an introduced script +`find_actors`. To achieve the similar level of framework protection +as spawning actors in a separate process, the `find_actors` script +does not execute actors at all, and instead works on their ASTs. +Specifically, the script looks for all files named `actor.py`, finds +all classes that (explicitely) subclass Actor, and reads its `name` +attribute. + +Usage example: + ACTOR=check_target_iso make dev_test_no_lint +--- + Makefile | 15 +++++--- + utils/find_actors.py | 81 ++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 91 insertions(+), 5 deletions(-) + create mode 100644 utils/find_actors.py + +diff --git a/Makefile b/Makefile +index b63192e3..e3c40e01 100644 +--- a/Makefile ++++ b/Makefile +@@ -16,9 +16,12 @@ REPOSITORIES ?= $(shell ls $(_SYSUPG_REPOS) | xargs echo | tr " " ",") + SYSUPG_TEST_PATHS=$(shell echo $(REPOSITORIES) | sed -r "s|(,\\|^)| $(_SYSUPG_REPOS)/|g") + TEST_PATHS:=commands repos/common $(SYSUPG_TEST_PATHS) + ++# python version to run test with ++_PYTHON_VENV=$${PYTHON_VENV:-python2.7} + + ifdef ACTOR +- TEST_PATHS=`python utils/actor_path.py $(ACTOR)` ++ TEST_PATHS=`$(_PYTHON_VENV) utils/actor_path.py $(ACTOR)` ++ APPROX_TEST_PATHS=$(shell $(_PYTHON_VENV) utils/find_actors.py -C repos $(ACTOR)) # Dev only + endif + + ifeq ($(TEST_LIBS),y) +@@ -32,9 +35,6 @@ endif + # needed only in case the Python2 should be used + _USE_PYTHON_INTERPRETER=$${_PYTHON_INTERPRETER} + +-# python version to run test with +-_PYTHON_VENV=$${PYTHON_VENV:-python2.7} +- + # by default use values you can see below, but in case the COPR_* var is defined + # use it instead of the default + _COPR_REPO=$${COPR_REPO:-leapp} +@@ -127,6 +127,7 @@ help: + @echo " - can be changed by setting TEST_CONTAINER env" + @echo " test_container_all run lint and tests in all available containers" + @echo " test_container_no_lint run tests without linting in container, see test_container" ++ @echo " dev_test_no_lint (advanced users) run only tests of a single actor specified by the ACTOR variable" + @echo " test_container_all_no_lint run tests without linting in all available containers" + @echo " clean_containers clean all testing and building container images (to force a rebuild for example)" + @echo "" +@@ -486,6 +487,10 @@ fast_lint: + echo "No files to lint."; \ + fi + ++dev_test_no_lint: ++ . $(VENVNAME)/bin/activate; \ ++ $(_PYTHON_VENV) -m pytest $(REPORT_ARG) $(APPROX_TEST_PATHS) $(LIBRARY_PATH) ++ + dashboard_data: + . $(VENVNAME)/bin/activate; \ + snactor repo find --path repos/; \ +@@ -494,4 +499,4 @@ dashboard_data: + popd + + .PHONY: help build clean prepare source srpm copr_build _build_local build_container print_release register install-deps install-deps-fedora lint test_no_lint test dashboard_data fast_lint +-.PHONY: test_container test_container_no_lint test_container_all test_container_all_no_lint clean_containers _build_container_image _test_container_ipu ++.PHONY: test_container test_container_no_lint test_container_all test_container_all_no_lint clean_containers _build_container_image _test_container_ipu dev_test_no_lint +diff --git a/utils/find_actors.py b/utils/find_actors.py +new file mode 100644 +index 00000000..25cc2217 +--- /dev/null ++++ b/utils/find_actors.py +@@ -0,0 +1,81 @@ ++import argparse ++import ast ++import os ++import sys ++ ++ ++def is_direct_actor_def(ast_node): ++ if not isinstance(ast_node, ast.ClassDef): ++ return False ++ ++ direcly_named_bases = (base for base in ast_node.bases if isinstance(base, ast.Name)) ++ for class_base in direcly_named_bases: ++ # We are looking for direct name 'Actor' ++ if class_base.id == 'Actor': ++ return True ++ ++ return False ++ ++ ++def extract_actor_name_from_def(actor_class_def): ++ assignment_value_class = ast.Str if sys.version_info < (3,8) else ast.Constant ++ assignment_value_attrib = 's' if sys.version_info < (3,8) else 'value' ++ ++ actor_name = None ++ class_level_assignments = (child for child in actor_class_def.body if isinstance(child, ast.Assign)) ++ # Search for class-level assignment specifying actor's name: `name = 'name'` ++ for child in class_level_assignments: ++ assignment = child ++ for target in assignment.targets: ++ assignment_adds_name_attrib = isinstance(target, ast.Name) and target.id == 'name' ++ assignment_uses_a_constant_string = isinstance(assignment.value, assignment_value_class) ++ if assignment_adds_name_attrib and assignment_uses_a_constant_string: ++ rhs = assignment.value # = ++ actor_name = getattr(rhs, assignment_value_attrib) ++ break ++ if actor_name is not None: ++ break ++ return actor_name ++ ++ ++def get_actor_names(actor_path): ++ with open(actor_path) as actor_file: ++ try: ++ actor_def = ast.parse(actor_file.read()) ++ except SyntaxError: ++ error = ('Failed to parse {0}. The actor might contain syntax errors, or perhaps it ' ++ 'is written with Python3-specific syntax?\n') ++ sys.stderr.write(error.format(actor_path)) ++ return [] ++ actor_defs = [ast_node for ast_node in actor_def.body if is_direct_actor_def(ast_node)] ++ actors = [extract_actor_name_from_def(actor_def) for actor_def in actor_defs] ++ return actors ++ ++ ++def make_parser(): ++ parser = argparse.ArgumentParser() ++ parser.add_argument('actor_names', nargs='+', ++ help='Actor names (the name attribute of the actor class) to look for.') ++ parser.add_argument('-C', '--change-dir', dest='cwd', ++ help='Path in which the actors will be looked for.', default='.') ++ return parser ++ ++ ++if __name__ == '__main__': ++ parser = make_parser() ++ args = parser.parse_args() ++ cwd = os.path.abspath(args.cwd) ++ actor_names_to_search_for = set(args.actor_names) ++ ++ actor_paths = [] ++ for directory, dummy_subdirs, dir_files in os.walk(cwd): ++ for actor_path in dir_files: ++ actor_path = os.path.join(directory, actor_path) ++ if os.path.basename(actor_path) != 'actor.py': ++ continue ++ ++ defined_actor_names = set(get_actor_names(actor_path)) ++ if defined_actor_names.intersection(actor_names_to_search_for): ++ actor_module_path = directory ++ actor_paths.append(actor_module_path) ++ print('\n'.join(actor_paths)) +-- +2.41.0 + diff --git a/0015-Fix-the-issue-of-going-out-of-bounds-in-the-isccfg-p.patch b/0015-Fix-the-issue-of-going-out-of-bounds-in-the-isccfg-p.patch new file mode 100644 index 0000000..158497a --- /dev/null +++ b/0015-Fix-the-issue-of-going-out-of-bounds-in-the-isccfg-p.patch @@ -0,0 +1,82 @@ +From 4d8ad1c0363fc21f5d8a557f3319a6efacac9f2a Mon Sep 17 00:00:00 2001 +From: SandakovMM +Date: Thu, 24 Aug 2023 16:01:39 +0300 +Subject: [PATCH 15/38] Fix the issue of going out of bounds in the isccfg + parser. + +This problem can occur when attempting to parse an empty file. +--- + .../el7toel8/libraries/isccfg.py | 5 ++- + .../el7toel8/libraries/tests/test_isccfg.py | 32 +++++++++++++++++++ + 2 files changed, 36 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/el7toel8/libraries/isccfg.py b/repos/system_upgrade/el7toel8/libraries/isccfg.py +index 45baba0b..6cebb289 100644 +--- a/repos/system_upgrade/el7toel8/libraries/isccfg.py ++++ b/repos/system_upgrade/el7toel8/libraries/isccfg.py +@@ -688,9 +688,12 @@ class IscConfigParser(object): + + while index != -1: + keystart = index +- while istr[index] in self.CHAR_KEYWORD and index < end_index: ++ while index < end_index and istr[index] in self.CHAR_KEYWORD: + index += 1 + ++ if index >= end_index: ++ break ++ + if keystart < index <= end_index and istr[index] not in self.CHAR_KEYWORD: + # key has been found + return ConfigSection(cfg, istr[keystart:index], keystart, index-1) +diff --git a/repos/system_upgrade/el7toel8/libraries/tests/test_isccfg.py b/repos/system_upgrade/el7toel8/libraries/tests/test_isccfg.py +index 7438fa37..00753681 100644 +--- a/repos/system_upgrade/el7toel8/libraries/tests/test_isccfg.py ++++ b/repos/system_upgrade/el7toel8/libraries/tests/test_isccfg.py +@@ -116,6 +116,10 @@ view "v2" { + }; + """) + ++config_empty = isccfg.MockConfig('') ++ ++config_empty_include = isccfg.MockConfig('options { include "/dev/null"; };') ++ + + def check_in_section(parser, section, key, value): + """ Helper to check some section was found +@@ -343,5 +347,33 @@ def test_walk(): + assert 'dnssec-validation' not in state + + ++def test_empty_config(): ++ """ Test empty configuration """ ++ ++ callbacks = {} ++ ++ parser = isccfg.IscConfigParser(config_empty) ++ assert len(parser.FILES_TO_CHECK) == 1 ++ cfg = parser.FILES_TO_CHECK[0] ++ parser.walk(cfg.root_section(), callbacks) ++ assert cfg.buffer == '' ++ ++ ++def test_empty_include_config(): ++ """ Test empty configuration """ ++ ++ callbacks = {} ++ ++ parser = isccfg.IscConfigParser(config_empty_include) ++ assert len(parser.FILES_TO_CHECK) == 2 ++ cfg = parser.FILES_TO_CHECK[0] ++ parser.walk(cfg.root_section(), callbacks) ++ assert cfg.buffer == 'options { include "/dev/null"; };' ++ ++ null_cfg = parser.FILES_TO_CHECK[1] ++ parser.walk(null_cfg.root_section(), callbacks) ++ assert null_cfg.buffer == '' ++ ++ + if __name__ == '__main__': + test_key_views_lookaside() +-- +2.41.0 + diff --git a/0016-make-pylint-and-spellcheck-happy-again.patch b/0016-make-pylint-and-spellcheck-happy-again.patch new file mode 100644 index 0000000..479baf1 --- /dev/null +++ b/0016-make-pylint-and-spellcheck-happy-again.patch @@ -0,0 +1,209 @@ +From d74ff90e46c1acc2a16dc387a863f2aaf86f85d1 Mon Sep 17 00:00:00 2001 +From: PeterMocary +Date: Mon, 9 Oct 2023 23:35:30 +0200 +Subject: [PATCH 16/38] make pylint and spellcheck happy again + +--- + .pylintrc | 4 +++- + .../common/actors/checksaphana/libraries/checksaphana.py | 4 ++-- + .../actors/checktargetiso/libraries/check_target_iso.py | 2 +- + .../files/dracut/85sys-upgrade-redhat/do-upgrade.sh | 2 +- + .../actors/createisorepofile/libraries/create_iso_repofile.py | 2 +- + .../repositoriesmapping/libraries/repositoriesmapping.py | 2 +- + .../system_upgrade/common/actors/scancpu/libraries/scancpu.py | 2 +- + .../common/actors/scansaphana/libraries/scansaphana.py | 4 ++-- + .../actors/scantargetiso/libraries/scan_target_os_iso.py | 4 ++-- + .../actors/targetuserspacecreator/libraries/userspacegen.py | 4 ++-- + repos/system_upgrade/common/libraries/rhui.py | 2 +- + repos/system_upgrade/common/libraries/tests/test_rhsm.py | 2 +- + 12 files changed, 18 insertions(+), 16 deletions(-) + +diff --git a/.pylintrc b/.pylintrc +index 2ef31167..0adb7dcc 100644 +--- a/.pylintrc ++++ b/.pylintrc +@@ -54,7 +54,9 @@ disable= + duplicate-string-formatting-argument, # TMP: will be fixed in close future + consider-using-f-string, # sorry, not gonna happen, still have to support py2 + use-dict-literal, +- redundant-u-string-prefix # still have py2 to support ++ redundant-u-string-prefix, # still have py2 to support ++ logging-format-interpolation, ++ logging-not-lazy + + [FORMAT] + # Maximum number of characters on a single line. +diff --git a/repos/system_upgrade/common/actors/checksaphana/libraries/checksaphana.py b/repos/system_upgrade/common/actors/checksaphana/libraries/checksaphana.py +index 1b08f3d2..7cd83de8 100644 +--- a/repos/system_upgrade/common/actors/checksaphana/libraries/checksaphana.py ++++ b/repos/system_upgrade/common/actors/checksaphana/libraries/checksaphana.py +@@ -132,7 +132,7 @@ def _major_version_check(instance): + return False + return True + except (ValueError, IndexError): +- api.current_logger().warn( ++ api.current_logger().warning( + 'Failed to parse manifest release field for instance {}'.format(instance.name), exc_info=True) + return False + +@@ -164,7 +164,7 @@ def _sp_rev_patchlevel_check(instance, patchlevels): + return True + return False + # if not 'len(number) > 2 and number.isdigit()' +- api.current_logger().warn( ++ api.current_logger().warning( + 'Invalid rev-number field value `{}` in manifest for instance {}'.format(number, instance.name)) + return False + +diff --git a/repos/system_upgrade/common/actors/checktargetiso/libraries/check_target_iso.py b/repos/system_upgrade/common/actors/checktargetiso/libraries/check_target_iso.py +index b5b66901..fcb23028 100644 +--- a/repos/system_upgrade/common/actors/checktargetiso/libraries/check_target_iso.py ++++ b/repos/system_upgrade/common/actors/checktargetiso/libraries/check_target_iso.py +@@ -170,7 +170,7 @@ def perform_target_iso_checks(): + return + + if next(requested_target_iso_msg_iter, None): +- api.current_logger().warn('Received multiple msgs with target ISO to use.') ++ api.current_logger().warning('Received multiple msgs with target ISO to use.') + + # Cascade the inhibiting conditions so that we do not spam the user with inhibitors + is_iso_invalid = inhibit_if_not_valid_iso_file(target_iso) +diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh +index 491b85ec..c181c5cf 100755 +--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh ++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh +@@ -130,7 +130,7 @@ ibdmp() { + # + # 1. encode tarball using base64 + # +- # 2. pre-pend line `chunks=CHUNKS,md5=MD5` where ++ # 2. prepend line `chunks=CHUNKS,md5=MD5` where + # MD5 is the MD5 digest of original tarball and + # CHUNKS is number of upcoming Base64 chunks + # +diff --git a/repos/system_upgrade/common/actors/createisorepofile/libraries/create_iso_repofile.py b/repos/system_upgrade/common/actors/createisorepofile/libraries/create_iso_repofile.py +index b4470b68..3f4f75e0 100644 +--- a/repos/system_upgrade/common/actors/createisorepofile/libraries/create_iso_repofile.py ++++ b/repos/system_upgrade/common/actors/createisorepofile/libraries/create_iso_repofile.py +@@ -13,7 +13,7 @@ def produce_repofile_if_iso_used(): + return + + if next(target_iso_msgs_iter, None): +- api.current_logger().warn('Received multiple TargetISInstallationImage messages, using the first one') ++ api.current_logger().warning('Received multiple TargetISInstallationImage messages, using the first one') + + # Mounting was successful, create a repofile to copy into target userspace + repofile_entry_template = ('[{repoid}]\n' +diff --git a/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py b/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py +index 416034ac..6f2b2e0f 100644 +--- a/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py ++++ b/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py +@@ -145,7 +145,7 @@ def _inhibit_upgrade(msg): + def _read_repofile(repofile): + # NOTE: what about catch StopActorExecution error when the file cannot be + # obtained -> then check whether old_repomap file exists and in such a case +- # inform user they have to provde the new repomap.json file (we have the ++ # inform user they have to provide the new repomap.json file (we have the + # warning now only which could be potentially overlooked) + repofile_data = load_data_asset(api.current_actor(), + repofile, +diff --git a/repos/system_upgrade/common/actors/scancpu/libraries/scancpu.py b/repos/system_upgrade/common/actors/scancpu/libraries/scancpu.py +index e5555f99..9de50fae 100644 +--- a/repos/system_upgrade/common/actors/scancpu/libraries/scancpu.py ++++ b/repos/system_upgrade/common/actors/scancpu/libraries/scancpu.py +@@ -133,7 +133,7 @@ def _find_deprecation_data_entries(lscpu): + if is_detected(lscpu, entry) + ] + +- api.current_logger().warn('Unsupported platform could not detect relevant CPU information') ++ api.current_logger().warning('Unsupported platform could not detect relevant CPU information') + return [] + + +diff --git a/repos/system_upgrade/common/actors/scansaphana/libraries/scansaphana.py b/repos/system_upgrade/common/actors/scansaphana/libraries/scansaphana.py +index 04195b57..99490477 100644 +--- a/repos/system_upgrade/common/actors/scansaphana/libraries/scansaphana.py ++++ b/repos/system_upgrade/common/actors/scansaphana/libraries/scansaphana.py +@@ -37,7 +37,7 @@ def parse_manifest(path): + # Most likely an empty line, but we're being permissive here and ignore failures. + # In the end it's all about having the right values available. + if line: +- api.current_logger().warn( ++ api.current_logger().warning( + 'Failed to parse line in manifest: {file}. Line was: `{line}`'.format(file=path, + line=line), + exc_info=True) +@@ -128,6 +128,6 @@ def get_instance_status(instance_number, sapcontrol_path, admin_name): + # In that case there are always more than 7 lines. + return len(output['stdout'].split('\n')) > 7 + except CalledProcessError: +- api.current_logger().warn( ++ api.current_logger().warning( + 'Failed to retrieve SAP HANA instance status from sapcontrol - Considering it as not running.') + return False +diff --git a/repos/system_upgrade/common/actors/scantargetiso/libraries/scan_target_os_iso.py b/repos/system_upgrade/common/actors/scantargetiso/libraries/scan_target_os_iso.py +index 281389cf..a5f0750a 100644 +--- a/repos/system_upgrade/common/actors/scantargetiso/libraries/scan_target_os_iso.py ++++ b/repos/system_upgrade/common/actors/scantargetiso/libraries/scan_target_os_iso.py +@@ -18,8 +18,8 @@ def determine_rhel_version_from_iso_mountpoint(iso_mountpoint): + return '' # We did not determine anything + + if len(redhat_release_pkgs) > 1: +- api.current_logger().warn('Multiple packages with name redhat-release* found when ' +- 'determining RHEL version of the supplied installation ISO.') ++ api.current_logger().warning('Multiple packages with name redhat-release* found when ' ++ 'determining RHEL version of the supplied installation ISO.') + + redhat_release_pkg = redhat_release_pkgs[0] + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index 9dfa0f14..0982a796 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -347,7 +347,7 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False): + + def _copy_certificates(context, target_userspace): + """ +- Copy the needed cetificates into the container, but preserve original ones ++ Copy the needed certificates into the container, but preserve original ones + + Some certificates are already installed in the container and those are + default certificates for the target OS, so we preserve these. +@@ -378,7 +378,7 @@ def _copy_certificates(context, target_userspace): + + # The path original path of the broken symlink in the container + report_path = os.path.join(target_pki, os.path.relpath(src_path, backup_pki)) +- api.current_logger().warn('File {} is a broken symlink!'.format(report_path)) ++ api.current_logger().warning('File {} is a broken symlink!'.format(report_path)) + break + + src_path = next_path +diff --git a/repos/system_upgrade/common/libraries/rhui.py b/repos/system_upgrade/common/libraries/rhui.py +index 4578ecd2..14a91c42 100644 +--- a/repos/system_upgrade/common/libraries/rhui.py ++++ b/repos/system_upgrade/common/libraries/rhui.py +@@ -258,7 +258,7 @@ def gen_rhui_files_map(): + + def copy_rhui_data(context, provider): + """ +- Copy relevant RHUI cerificates and key into the target userspace container ++ Copy relevant RHUI certificates and key into the target userspace container + """ + rhui_dir = api.get_common_folder_path('rhui') + data_dir = os.path.join(rhui_dir, provider) +diff --git a/repos/system_upgrade/common/libraries/tests/test_rhsm.py b/repos/system_upgrade/common/libraries/tests/test_rhsm.py +index a6dbea96..957616f4 100644 +--- a/repos/system_upgrade/common/libraries/tests/test_rhsm.py ++++ b/repos/system_upgrade/common/libraries/tests/test_rhsm.py +@@ -249,7 +249,7 @@ def test_get_release_with_release_not_set(monkeypatch, actor_mocked, context_moc + + release = rhsm.get_release(context_mocked) + +- fail_description = 'The release information was obtained, even if "No release set" was repored by rhsm.' ++ fail_description = 'The release information was obtained, even if "No release set" was reported by rhsm.' + assert not release, fail_description + + +-- +2.41.0 + diff --git a/0017-Remove-TUV-from-supported-target-channels.patch b/0017-Remove-TUV-from-supported-target-channels.patch new file mode 100644 index 0000000..95604f9 --- /dev/null +++ b/0017-Remove-TUV-from-supported-target-channels.patch @@ -0,0 +1,93 @@ +From 84d6ce3073e646e8740b72a5e7edda056c1b324a Mon Sep 17 00:00:00 2001 +From: Martin Kluson +Date: Tue, 10 Oct 2023 14:57:02 +0200 +Subject: [PATCH 17/38] Remove TUV from supported target channels + +TUS (mispelled as TUV) is not suported channel for inplace upgrade, +removed from the code. + +Jira: OAMG-7288 +--- + commands/preupgrade/__init__.py | 2 +- + commands/upgrade/__init__.py | 2 +- + .../common/actors/setuptargetrepos/tests/test_repomapping.py | 4 ++-- + repos/system_upgrade/common/libraries/config/__init__.py | 2 +- + repos/system_upgrade/common/models/repositoriesmap.py | 2 +- + 5 files changed, 6 insertions(+), 6 deletions(-) + +diff --git a/commands/preupgrade/__init__.py b/commands/preupgrade/__init__.py +index 03209419..5a89069f 100644 +--- a/commands/preupgrade/__init__.py ++++ b/commands/preupgrade/__init__.py +@@ -25,7 +25,7 @@ from leapp.utils.output import beautify_actor_exception, report_errors, report_i + help='Enable specified repository. Can be used multiple times.') + @command_opt('channel', + help='Set preferred channel for the IPU target.', +- choices=['ga', 'tuv', 'e4s', 'eus', 'aus'], ++ choices=['ga', 'e4s', 'eus', 'aus'], + value_type=str.lower) # This allows the choices to be case insensitive + @command_opt('iso', help='Use provided target RHEL installation image to perform the in-place upgrade.') + @command_opt('target', choices=command_utils.get_supported_target_versions(), +diff --git a/commands/upgrade/__init__.py b/commands/upgrade/__init__.py +index 18edcb9b..c42b7cba 100644 +--- a/commands/upgrade/__init__.py ++++ b/commands/upgrade/__init__.py +@@ -31,7 +31,7 @@ from leapp.utils.output import beautify_actor_exception, report_errors, report_i + help='Enable specified repository. Can be used multiple times.') + @command_opt('channel', + help='Set preferred channel for the IPU target.', +- choices=['ga', 'tuv', 'e4s', 'eus', 'aus'], ++ choices=['ga', 'e4s', 'eus', 'aus'], + value_type=str.lower) # This allows the choices to be case insensitive + @command_opt('iso', help='Use provided target RHEL installation image to perform the in-place upgrade.') + @command_opt('target', choices=command_utils.get_supported_target_versions(), +diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_repomapping.py b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_repomapping.py +index 53897614..ba5906f4 100644 +--- a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_repomapping.py ++++ b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_repomapping.py +@@ -614,14 +614,14 @@ def test_get_expected_target_pesid_repos_with_priority_channel_set(monkeypatch): + make_pesid_repo('pesid1', '7', 'pesid1-repoid-ga'), + make_pesid_repo('pesid2', '8', 'pesid2-repoid-ga'), + make_pesid_repo('pesid2', '8', 'pesid2-repoid-eus', channel='eus'), +- make_pesid_repo('pesid2', '8', 'pesid2-repoid-tuv', channel='tuv'), ++ make_pesid_repo('pesid2', '8', 'pesid2-repoid-aus', channel='aus'), + make_pesid_repo('pesid3', '8', 'pesid3-repoid-ga') + ] + ) + + handler = RepoMapDataHandler(repositories_mapping) + # Set defaults to verify that the priority channel is not overwritten by defaults +- handler.set_default_channels(['tuv', 'ga']) ++ handler.set_default_channels(['aus', 'ga']) + target_repoids = handler.get_expected_target_pesid_repos(['pesid1-repoid-ga']) + + fail_description = 'get_expected_target_peid_repos does not correctly respect preferred channel.' +diff --git a/repos/system_upgrade/common/libraries/config/__init__.py b/repos/system_upgrade/common/libraries/config/__init__.py +index c37a35cf..b3697a4d 100644 +--- a/repos/system_upgrade/common/libraries/config/__init__.py ++++ b/repos/system_upgrade/common/libraries/config/__init__.py +@@ -2,7 +2,7 @@ from leapp.exceptions import StopActorExecutionError + from leapp.libraries.stdlib import api + + # The devel variable for target product channel can also contain 'beta' +-SUPPORTED_TARGET_CHANNELS = {'ga', 'tuv', 'e4s', 'eus', 'aus'} ++SUPPORTED_TARGET_CHANNELS = {'ga', 'e4s', 'eus', 'aus'} + CONSUMED_DATA_STREAM_ID = '2.0' + + +diff --git a/repos/system_upgrade/common/models/repositoriesmap.py b/repos/system_upgrade/common/models/repositoriesmap.py +index 12639e19..7ef0bdb4 100644 +--- a/repos/system_upgrade/common/models/repositoriesmap.py ++++ b/repos/system_upgrade/common/models/repositoriesmap.py +@@ -61,7 +61,7 @@ class PESIDRepositoryEntry(Model): + too. + """ + +- channel = fields.StringEnum(['ga', 'tuv', 'e4s', 'eus', 'aus', 'beta']) ++ channel = fields.StringEnum(['ga', 'e4s', 'eus', 'aus', 'beta']) + """ + The 'channel' of the repository. + +-- +2.41.0 + diff --git a/0018-Transition-systemd-service-states-during-upgrade.patch b/0018-Transition-systemd-service-states-during-upgrade.patch new file mode 100644 index 0000000..95bbea5 --- /dev/null +++ b/0018-Transition-systemd-service-states-during-upgrade.patch @@ -0,0 +1,531 @@ +From f50de2d3f541ca64934b4488dd1a403c8783a5da Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Tue, 14 Mar 2023 23:26:30 +0100 +Subject: [PATCH 18/38] Transition systemd service states during upgrade + +Sometimes after the upgrade some services end up disabled even if they +have been enabled on the source system. + +There are already two separate actors that fix this for +`device_cio_free.service` and `rsyncd.service`. + +A new actor `transition-systemd-services-states` handles this generally +for all services. A "desired" state is determined depending on state and +vendor preset of both source and target system and a +SystemdServicesTasks message is produced with each service that isn't +already in the "desired" state. + +Jira ref.: OAMG-1745 +--- + .../transitionsystemdservicesstates/actor.py | 53 +++++ + .../transitionsystemdservicesstates.py | 211 +++++++++++++++++ + .../test_transitionsystemdservicesstates.py | 219 ++++++++++++++++++ + 3 files changed, 483 insertions(+) + create mode 100644 repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/actor.py + create mode 100644 repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/libraries/transitionsystemdservicesstates.py + create mode 100644 repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py + +diff --git a/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/actor.py b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/actor.py +new file mode 100644 +index 00000000..139f9f6b +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/actor.py +@@ -0,0 +1,53 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import transitionsystemdservicesstates ++from leapp.models import ( ++ SystemdServicesInfoSource, ++ SystemdServicesInfoTarget, ++ SystemdServicesPresetInfoSource, ++ SystemdServicesPresetInfoTarget, ++ SystemdServicesTasks ++) ++from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag ++ ++ ++class TransitionSystemdServicesStates(Actor): ++ """ ++ Transition states of systemd services between source and target systems ++ ++ Services on the target system might end up in incorrect/unexpected state ++ after an upgrade. This actor puts such services into correct/expected ++ state. ++ ++ A SystemdServicesTasks message is produced containing all tasks that need ++ to be executed to put all services into the correct states. ++ ++ The correct states are determined according to following rules: ++ - All enabled services remain enabled ++ - All masked services remain masked ++ - Disabled services will be enabled if they are disabled by default on ++ the source system (by preset files), but enabled by default on target ++ system, otherwise they will remain disabled ++ - Runtime enabled service (state == runtime-enabled) are treated ++ the same as disabled services ++ - Services in other states are not handled as they can't be ++ enabled/disabled ++ ++ Two reports are generated: ++ - Report with services that were corrected from disabled to enabled on ++ the upgraded system ++ - Report with services that were newly enabled on the upgraded system ++ by a preset ++ """ ++ ++ name = 'transition_systemd_services_states' ++ consumes = ( ++ SystemdServicesInfoSource, ++ SystemdServicesInfoTarget, ++ SystemdServicesPresetInfoSource, ++ SystemdServicesPresetInfoTarget ++ ) ++ produces = (SystemdServicesTasks,) ++ tags = (ApplicationsPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ transitionsystemdservicesstates.process() +diff --git a/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/libraries/transitionsystemdservicesstates.py b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/libraries/transitionsystemdservicesstates.py +new file mode 100644 +index 00000000..494271ae +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/libraries/transitionsystemdservicesstates.py +@@ -0,0 +1,211 @@ ++from leapp import reporting ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.stdlib import api ++from leapp.models import ( ++ SystemdServicesInfoSource, ++ SystemdServicesInfoTarget, ++ SystemdServicesPresetInfoSource, ++ SystemdServicesPresetInfoTarget, ++ SystemdServicesTasks ++) ++ ++FMT_LIST_SEPARATOR = "\n - " ++ ++ ++def _get_desired_service_state(state_source, preset_source, preset_target): ++ """ ++ Get the desired service state on the target system ++ ++ :param state_source: State on the source system ++ :param preset_source: Preset on the source system ++ :param preset_target: Preset on the target system ++ :return: The desired state on the target system ++ """ ++ ++ if state_source in ("disabled", "enabled-runtime"): ++ if preset_source == "disable": ++ return preset_target + "d" # use the default from target ++ ++ return state_source ++ ++ ++def _get_desired_states( ++ services_source, presets_source, services_target, presets_target ++): ++ "Get the states that services should be in on the target system" ++ desired_states = {} ++ ++ for service in services_target: ++ state_source = services_source.get(service.name) ++ preset_target = _get_service_preset(service.name, presets_target) ++ preset_source = _get_service_preset(service.name, presets_source) ++ ++ desired_state = _get_desired_service_state( ++ state_source, preset_source, preset_target ++ ) ++ desired_states[service.name] = desired_state ++ ++ return desired_states ++ ++ ++def _get_service_task(service_name, desired_state, state_target, tasks): ++ """ ++ Get the task to set the desired state of the service on the target system ++ ++ :param service_name: Then name of the service ++ :param desired_state: The state the service should set to ++ :param state_target: State on the target system ++ :param tasks: The tasks to append the task to ++ """ ++ if desired_state == state_target: ++ return ++ ++ if desired_state == "enabled": ++ tasks.to_enable.append(service_name) ++ if desired_state == "disabled": ++ tasks.to_disable.append(service_name) ++ ++ ++def _get_service_preset(service_name, presets): ++ preset = presets.get(service_name) ++ if not preset: ++ # shouldn't really happen as there is usually a `disable *` glob as ++ # the last statement in the presets ++ api.current_logger().debug( ++ 'No presets found for service "{}", assuming "disable"'.format(service_name) ++ ) ++ return "disable" ++ return preset ++ ++ ++def _filter_services(services_source, services_target): ++ """ ++ Filter out irrelevant services ++ """ ++ filtered = [] ++ for service in services_target: ++ if service.state not in ("enabled", "disabled", "enabled-runtime"): ++ # Enabling/disabling of services is only relevant to these states ++ continue ++ ++ state_source = services_source.get(service.name) ++ if not state_source: ++ # The service doesn't exist on the source system ++ continue ++ ++ if state_source == "masked-runtime": ++ # TODO(mmatuska): It's not possible to get the persistent ++ # (non-runtime) state of a service with `systemctl`. One solution ++ # might be to check symlinks ++ api.current_logger().debug( ++ 'Skipping service in "masked-runtime" state: {}'.format(service.name) ++ ) ++ continue ++ ++ filtered.append(service) ++ ++ return filtered ++ ++ ++def _get_required_tasks(services_target, desired_states): ++ """ ++ Get the required tasks to set the services on the target system to their desired state ++ ++ :return: The tasks required to be executed ++ :rtype: SystemdServicesTasks ++ """ ++ tasks = SystemdServicesTasks() ++ ++ for service in services_target: ++ desired_state = desired_states[service.name] ++ _get_service_task(service.name, desired_state, service.state, tasks) ++ ++ return tasks ++ ++ ++def _report_kept_enabled(tasks): ++ summary = ( ++ "Systemd services which were enabled on the system before the upgrade" ++ " were kept enabled after the upgrade. " ++ ) ++ if tasks: ++ summary += ( ++ "The following services were originally disabled on the upgraded system" ++ " and Leapp attempted to enable them:{}{}" ++ ).format(FMT_LIST_SEPARATOR, FMT_LIST_SEPARATOR.join(sorted(tasks.to_enable))) ++ # TODO(mmatuska): When post-upgrade reports are implemented in ++ # `setsystemdservicesstates actor, add a note here to check the reports ++ # if the enabling failed ++ ++ reporting.create_report( ++ [ ++ reporting.Title("Previously enabled systemd services were kept enabled"), ++ reporting.Summary(summary), ++ reporting.Severity(reporting.Severity.INFO), ++ reporting.Groups([reporting.Groups.POST]), ++ ] ++ ) ++ ++ ++def _get_newly_enabled(services_source, desired_states): ++ newly_enabled = [] ++ for service, state in desired_states.items(): ++ state_source = services_source[service] ++ if state_source == "disabled" and state == "enabled": ++ newly_enabled.append(service) ++ ++ return newly_enabled ++ ++ ++def _report_newly_enabled(newly_enabled): ++ summary = ( ++ "The following services were disabled before the upgrade and were set" ++ "to enabled by a systemd preset after the upgrade:{}{}.".format( ++ FMT_LIST_SEPARATOR, FMT_LIST_SEPARATOR.join(sorted(newly_enabled)) ++ ) ++ ) ++ ++ reporting.create_report( ++ [ ++ reporting.Title("Some systemd services were newly enabled"), ++ reporting.Summary(summary), ++ reporting.Severity(reporting.Severity.INFO), ++ reporting.Groups([reporting.Groups.POST]), ++ ] ++ ) ++ ++ ++def _expect_message(model): ++ """ ++ Get the expected message or throw an error ++ """ ++ message = next(api.consume(model), None) ++ if not message: ++ raise StopActorExecutionError( ++ "Expected {} message, but didn't get any".format(model.__name__) ++ ) ++ return message ++ ++ ++def process(): ++ services_source = _expect_message(SystemdServicesInfoSource).service_files ++ services_target = _expect_message(SystemdServicesInfoTarget).service_files ++ presets_source = _expect_message(SystemdServicesPresetInfoSource).presets ++ presets_target = _expect_message(SystemdServicesPresetInfoTarget).presets ++ ++ services_source = dict((p.name, p.state) for p in services_source) ++ presets_source = dict((p.service, p.state) for p in presets_source) ++ presets_target = dict((p.service, p.state) for p in presets_target) ++ ++ services_target = _filter_services(services_source, services_target) ++ ++ desired_states = _get_desired_states( ++ services_source, presets_source, services_target, presets_target ++ ) ++ tasks = _get_required_tasks(services_target, desired_states) ++ ++ api.produce(tasks) ++ _report_kept_enabled(tasks) ++ ++ newly_enabled = _get_newly_enabled(services_source, desired_states) ++ _report_newly_enabled(newly_enabled) +diff --git a/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py +new file mode 100644 +index 00000000..a19afc7f +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py +@@ -0,0 +1,219 @@ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor import transitionsystemdservicesstates ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked ++from leapp.libraries.stdlib import api ++from leapp.models import ( ++ SystemdServiceFile, ++ SystemdServicePreset, ++ SystemdServicesInfoSource, ++ SystemdServicesInfoTarget, ++ SystemdServicesPresetInfoSource, ++ SystemdServicesPresetInfoTarget, ++ SystemdServicesTasks ++) ++ ++ ++@pytest.mark.parametrize( ++ "state_source, preset_source, preset_target, expected", ++ ( ++ ["enabled", "disable", "enable", "enabled"], ++ ["enabled", "disable", "disable", "enabled"], ++ ["disabled", "disable", "disable", "disabled"], ++ ["disabled", "disable", "enable", "enabled"], ++ ["masked", "disable", "enable", "masked"], ++ ["masked", "disable", "disable", "masked"], ++ ["enabled", "enable", "enable", "enabled"], ++ ["enabled", "enable", "disable", "enabled"], ++ ["masked", "enable", "enable", "masked"], ++ ["masked", "enable", "disable", "masked"], ++ ["disabled", "enable", "enable", "disabled"], ++ ["disabled", "enable", "disable", "disabled"], ++ ), ++) ++def test_get_desired_service_state( ++ state_source, preset_source, preset_target, expected ++): ++ target_state = transitionsystemdservicesstates._get_desired_service_state( ++ state_source, preset_source, preset_target ++ ) ++ ++ assert target_state == expected ++ ++ ++@pytest.mark.parametrize( ++ "desired_state, state_target, expected", ++ ( ++ ("enabled", "enabled", SystemdServicesTasks()), ++ ("enabled", "disabled", SystemdServicesTasks(to_enable=["test.service"])), ++ ("disabled", "enabled", SystemdServicesTasks(to_disable=["test.service"])), ++ ("disabled", "disabled", SystemdServicesTasks()), ++ ), ++) ++def test_get_service_task(monkeypatch, desired_state, state_target, expected): ++ def _get_desired_service_state_mocked(*args): ++ return desired_state ++ ++ monkeypatch.setattr( ++ transitionsystemdservicesstates, ++ "_get_desired_service_state", ++ _get_desired_service_state_mocked, ++ ) ++ ++ tasks = SystemdServicesTasks() ++ transitionsystemdservicesstates._get_service_task( ++ "test.service", desired_state, state_target, tasks ++ ) ++ assert tasks == expected ++ ++ ++def test_filter_services_services_filtered(): ++ services_source = { ++ "test2.service": "static", ++ "test3.service": "masked", ++ "test4.service": "indirect", ++ "test5.service": "indirect", ++ "test6.service": "indirect", ++ } ++ services_target = [ ++ SystemdServiceFile(name="test1.service", state="enabled"), ++ SystemdServiceFile(name="test2.service", state="masked"), ++ SystemdServiceFile(name="test3.service", state="indirect"), ++ SystemdServiceFile(name="test4.service", state="static"), ++ SystemdServiceFile(name="test5.service", state="generated"), ++ SystemdServiceFile(name="test6.service", state="masked-runtime"), ++ ] ++ ++ filtered = transitionsystemdservicesstates._filter_services( ++ services_source, services_target ++ ) ++ ++ assert not filtered ++ ++ ++def test_filter_services_services_not_filtered(): ++ services_source = { ++ "test1.service": "enabled", ++ "test2.service": "disabled", ++ "test3.service": "static", ++ "test4.service": "indirect", ++ } ++ services_target = [ ++ SystemdServiceFile(name="test1.service", state="enabled"), ++ SystemdServiceFile(name="test2.service", state="disabled"), ++ SystemdServiceFile(name="test3.service", state="enabled-runtime"), ++ SystemdServiceFile(name="test4.service", state="enabled"), ++ ] ++ ++ filtered = transitionsystemdservicesstates._filter_services( ++ services_source, services_target ++ ) ++ ++ assert len(filtered) == len(services_target) ++ ++ ++@pytest.mark.parametrize( ++ "presets", ++ [ ++ dict(), ++ {"other.service": "enable"}, ++ ], ++) ++def test_service_preset_missing_presets(presets): ++ preset = transitionsystemdservicesstates._get_service_preset( ++ "test.service", presets ++ ) ++ assert preset == "disable" ++ ++ ++def test_tasks_produced_reports_created(monkeypatch): ++ services_source = [ ++ SystemdServiceFile(name="rsyncd.service", state="enabled"), ++ SystemdServiceFile(name="test.service", state="enabled"), ++ ] ++ service_info_source = SystemdServicesInfoSource(service_files=services_source) ++ ++ presets_source = [ ++ SystemdServicePreset(service="rsyncd.service", state="enable"), ++ SystemdServicePreset(service="test.service", state="enable"), ++ ] ++ preset_info_source = SystemdServicesPresetInfoSource(presets=presets_source) ++ ++ services_target = [ ++ SystemdServiceFile(name="rsyncd.service", state="disabled"), ++ SystemdServiceFile(name="test.service", state="enabled"), ++ ] ++ service_info_target = SystemdServicesInfoTarget(service_files=services_target) ++ ++ presets_target = [ ++ SystemdServicePreset(service="rsyncd.service", state="enable"), ++ SystemdServicePreset(service="test.service", state="enable"), ++ ] ++ preset_info_target = SystemdServicesPresetInfoTarget(presets=presets_target) ++ ++ monkeypatch.setattr( ++ api, ++ "current_actor", ++ CurrentActorMocked( ++ msgs=[ ++ service_info_source, ++ service_info_target, ++ preset_info_source, ++ preset_info_target, ++ ] ++ ), ++ ) ++ monkeypatch.setattr(api, "produce", produce_mocked()) ++ created_reports = create_report_mocked() ++ monkeypatch.setattr(reporting, "create_report", created_reports) ++ ++ expected_tasks = SystemdServicesTasks(to_enable=["rsyncd.service"], to_disable=[]) ++ transitionsystemdservicesstates.process() ++ ++ assert created_reports.called == 2 ++ assert api.produce.called ++ assert api.produce.model_instances[0].to_enable == expected_tasks.to_enable ++ assert api.produce.model_instances[0].to_disable == expected_tasks.to_disable ++ ++ ++def test_report_kept_enabled(monkeypatch): ++ created_reports = create_report_mocked() ++ monkeypatch.setattr(reporting, "create_report", created_reports) ++ ++ tasks = SystemdServicesTasks( ++ to_enable=["test.service", "other.service"], to_disable=["another.service"] ++ ) ++ transitionsystemdservicesstates._report_kept_enabled(tasks) ++ ++ assert created_reports.called ++ assert all([s in created_reports.report_fields["summary"] for s in tasks.to_enable]) ++ ++ ++def test_get_newly_enabled(): ++ services_source = { ++ "test.service": "disabled", ++ "other.service": "enabled", ++ "another.service": "enabled", ++ } ++ desired_states = { ++ "test.service": "enabled", ++ "other.service": "enabled", ++ "another.service": "disabled", ++ } ++ ++ newly_enabled = transitionsystemdservicesstates._get_newly_enabled( ++ services_source, desired_states ++ ) ++ assert newly_enabled == ['test.service'] ++ ++ ++def test_report_newly_enabled(monkeypatch): ++ created_reports = create_report_mocked() ++ monkeypatch.setattr(reporting, "create_report", created_reports) ++ ++ newly_enabled = ["test.service", "other.service"] ++ transitionsystemdservicesstates._report_newly_enabled(newly_enabled) ++ ++ assert created_reports.called ++ assert all([s in created_reports.report_fields["summary"] for s in newly_enabled]) +-- +2.41.0 + diff --git a/0019-Remove-obsoleted-enablersyncdservice-actor.patch b/0019-Remove-obsoleted-enablersyncdservice-actor.patch new file mode 100644 index 0000000..e19a4fb --- /dev/null +++ b/0019-Remove-obsoleted-enablersyncdservice-actor.patch @@ -0,0 +1,190 @@ +From bea0f89bd858736418a535de37ddcfeef0ec4d31 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Wed, 15 Mar 2023 16:35:35 +0100 +Subject: [PATCH 19/38] Remove obsoleted enablersyncdservice actor + +The `transitionsystemdservicesstates` actor now handles all such +services generically, which makes this actor obsolete. +--- + .../transitionsystemdservicesstates.py | 10 +++--- + .../test_transitionsystemdservicesstates.py | 33 +++++++++++++++---- + .../actors/enablersyncdservice/actor.py | 21 ------------ + .../libraries/enablersyncdservice.py | 21 ------------ + .../tests/test_enablersyncdservice.py | 24 -------------- + 5 files changed, 32 insertions(+), 77 deletions(-) + delete mode 100644 repos/system_upgrade/el7toel8/actors/enablersyncdservice/actor.py + delete mode 100644 repos/system_upgrade/el7toel8/actors/enablersyncdservice/libraries/enablersyncdservice.py + delete mode 100644 repos/system_upgrade/el7toel8/actors/enablersyncdservice/tests/test_enablersyncdservice.py + +diff --git a/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/libraries/transitionsystemdservicesstates.py b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/libraries/transitionsystemdservicesstates.py +index 494271ae..b487366b 100644 +--- a/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/libraries/transitionsystemdservicesstates.py ++++ b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/libraries/transitionsystemdservicesstates.py +@@ -130,8 +130,8 @@ def _report_kept_enabled(tasks): + ) + if tasks: + summary += ( +- "The following services were originally disabled on the upgraded system" +- " and Leapp attempted to enable them:{}{}" ++ "The following services were originally disabled by preset on the" ++ " upgraded system and Leapp attempted to enable them:{}{}" + ).format(FMT_LIST_SEPARATOR, FMT_LIST_SEPARATOR.join(sorted(tasks.to_enable))) + # TODO(mmatuska): When post-upgrade reports are implemented in + # `setsystemdservicesstates actor, add a note here to check the reports +@@ -193,9 +193,9 @@ def process(): + presets_source = _expect_message(SystemdServicesPresetInfoSource).presets + presets_target = _expect_message(SystemdServicesPresetInfoTarget).presets + +- services_source = dict((p.name, p.state) for p in services_source) +- presets_source = dict((p.service, p.state) for p in presets_source) +- presets_target = dict((p.service, p.state) for p in presets_target) ++ services_source = {p.name: p.state for p in services_source} ++ presets_source = {p.service: p.state for p in presets_source} ++ presets_target = {p.service: p.state for p in presets_target} + + services_target = _filter_services(services_source, services_target) + +diff --git a/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py +index a19afc7f..e0611859 100644 +--- a/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py ++++ b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py +@@ -177,17 +177,38 @@ def test_tasks_produced_reports_created(monkeypatch): + assert api.produce.model_instances[0].to_disable == expected_tasks.to_disable + + +-def test_report_kept_enabled(monkeypatch): ++@pytest.mark.parametrize( ++ "tasks, expect_extended_summary", ++ ( ++ ( ++ SystemdServicesTasks( ++ to_enable=["test.service", "other.service"], ++ to_disable=["another.service"], ++ ), ++ True, ++ ), ++ (None, False), ++ ), ++) ++def test_report_kept_enabled(monkeypatch, tasks, expect_extended_summary): + created_reports = create_report_mocked() + monkeypatch.setattr(reporting, "create_report", created_reports) + +- tasks = SystemdServicesTasks( +- to_enable=["test.service", "other.service"], to_disable=["another.service"] +- ) + transitionsystemdservicesstates._report_kept_enabled(tasks) + ++ extended_summary_str = ( ++ "The following services were originally disabled by preset on the" ++ " upgraded system and Leapp attempted to enable them" ++ ) ++ + assert created_reports.called +- assert all([s in created_reports.report_fields["summary"] for s in tasks.to_enable]) ++ if expect_extended_summary: ++ assert extended_summary_str in created_reports.report_fields["summary"] ++ assert all( ++ [s in created_reports.report_fields["summary"] for s in tasks.to_enable] ++ ) ++ else: ++ assert extended_summary_str not in created_reports.report_fields["summary"] + + + def test_get_newly_enabled(): +@@ -205,7 +226,7 @@ def test_get_newly_enabled(): + newly_enabled = transitionsystemdservicesstates._get_newly_enabled( + services_source, desired_states + ) +- assert newly_enabled == ['test.service'] ++ assert newly_enabled == ["test.service"] + + + def test_report_newly_enabled(monkeypatch): +diff --git a/repos/system_upgrade/el7toel8/actors/enablersyncdservice/actor.py b/repos/system_upgrade/el7toel8/actors/enablersyncdservice/actor.py +deleted file mode 100644 +index bdf2e63e..00000000 +--- a/repos/system_upgrade/el7toel8/actors/enablersyncdservice/actor.py ++++ /dev/null +@@ -1,21 +0,0 @@ +-from leapp.actors import Actor +-from leapp.libraries.actor import enablersyncdservice +-from leapp.models import SystemdServicesInfoSource, SystemdServicesTasks +-from leapp.tags import ChecksPhaseTag, IPUWorkflowTag +- +- +-class EnableDeviceCioFreeService(Actor): +- """ +- Enables rsyncd.service systemd service if it is enabled on source system +- +- After an upgrade this service ends up disabled even if it was enabled on +- the source system. +- """ +- +- name = 'enable_rsyncd_service' +- consumes = (SystemdServicesInfoSource,) +- produces = (SystemdServicesTasks,) +- tags = (ChecksPhaseTag, IPUWorkflowTag) +- +- def process(self): +- enablersyncdservice.process() +diff --git a/repos/system_upgrade/el7toel8/actors/enablersyncdservice/libraries/enablersyncdservice.py b/repos/system_upgrade/el7toel8/actors/enablersyncdservice/libraries/enablersyncdservice.py +deleted file mode 100644 +index 216ebca9..00000000 +--- a/repos/system_upgrade/el7toel8/actors/enablersyncdservice/libraries/enablersyncdservice.py ++++ /dev/null +@@ -1,21 +0,0 @@ +-from leapp.exceptions import StopActorExecutionError +-from leapp.libraries.stdlib import api +-from leapp.models import SystemdServicesInfoSource, SystemdServicesTasks +- +-SERVICE_NAME = "rsyncd.service" +- +- +-def _service_enabled_source(service_info, name): +- service_file = next((s for s in service_info.service_files if s.name == name), None) +- return service_file and service_file.state == "enabled" +- +- +-def process(): +- service_info_source = next(api.consume(SystemdServicesInfoSource), None) +- if not service_info_source: +- raise StopActorExecutionError( +- "Expected SystemdServicesInfoSource message, but didn't get any" +- ) +- +- if _service_enabled_source(service_info_source, SERVICE_NAME): +- api.produce(SystemdServicesTasks(to_enable=[SERVICE_NAME])) +diff --git a/repos/system_upgrade/el7toel8/actors/enablersyncdservice/tests/test_enablersyncdservice.py b/repos/system_upgrade/el7toel8/actors/enablersyncdservice/tests/test_enablersyncdservice.py +deleted file mode 100644 +index 34a25afe..00000000 +--- a/repos/system_upgrade/el7toel8/actors/enablersyncdservice/tests/test_enablersyncdservice.py ++++ /dev/null +@@ -1,24 +0,0 @@ +-import pytest +- +-from leapp.libraries.actor import enablersyncdservice +-from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked +-from leapp.libraries.stdlib import api +-from leapp.models import SystemdServiceFile, SystemdServicesInfoSource, SystemdServicesTasks +- +- +-@pytest.mark.parametrize('service_file, should_produce', [ +- (SystemdServiceFile(name='rsyncd.service', state='enabled'), True), +- (SystemdServiceFile(name='rsyncd.service', state='disabled'), False), +- (SystemdServiceFile(name='not-rsyncd.service', state='enabled'), False), +- (SystemdServiceFile(name='not-rsyncd.service', state='disabled'), False), +-]) +-def test_task_produced(monkeypatch, service_file, should_produce): +- service_info = SystemdServicesInfoSource(service_files=[service_file]) +- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[service_info])) +- monkeypatch.setattr(api, "produce", produce_mocked()) +- +- enablersyncdservice.process() +- +- assert api.produce.called == should_produce +- if should_produce: +- assert api.produce.model_instances[0].to_enable == ['rsyncd.service'] +-- +2.41.0 + diff --git a/0020-default-to-NO_RHSM-mode-when-subscription-manager-is.patch b/0020-default-to-NO_RHSM-mode-when-subscription-manager-is.patch new file mode 100644 index 0000000..b4dba86 --- /dev/null +++ b/0020-default-to-NO_RHSM-mode-when-subscription-manager-is.patch @@ -0,0 +1,26 @@ +From 6661e496143c47e92cd1d83ed1e4f1da8d0d617a Mon Sep 17 00:00:00 2001 +From: Evgeni Golov +Date: Sat, 21 Oct 2023 16:26:17 +0200 +Subject: [PATCH 20/38] default to NO_RHSM mode when subscription-manager is + not found + +--- + commands/upgrade/util.py | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/commands/upgrade/util.py b/commands/upgrade/util.py +index b52da25c..b11265ee 100644 +--- a/commands/upgrade/util.py ++++ b/commands/upgrade/util.py +@@ -191,6 +191,8 @@ def prepare_configuration(args): + os.environ['LEAPP_UNSUPPORTED'] = '0' if os.getenv('LEAPP_UNSUPPORTED', '0') == '0' else '1' + if args.no_rhsm: + os.environ['LEAPP_NO_RHSM'] = '1' ++ elif not os.path.exists('/usr/sbin/subscription-manager'): ++ os.environ['LEAPP_NO_RHSM'] = '1' + elif os.getenv('LEAPP_NO_RHSM') != '1': + os.environ['LEAPP_NO_RHSM'] = os.getenv('LEAPP_DEVEL_SKIP_RHSM', '0') + +-- +2.41.0 + diff --git a/0021-call-correct-mkdir-when-trying-to-create-etc-rhsm-fa.patch b/0021-call-correct-mkdir-when-trying-to-create-etc-rhsm-fa.patch new file mode 100644 index 0000000..136b717 --- /dev/null +++ b/0021-call-correct-mkdir-when-trying-to-create-etc-rhsm-fa.patch @@ -0,0 +1,55 @@ +From 17c88d9451774cd3910f81eaa889d4ff14615e1c Mon Sep 17 00:00:00 2001 +From: Evgeni Golov +Date: Mon, 30 Oct 2023 17:36:23 +0100 +Subject: [PATCH 21/38] call correct mkdir when trying to create + /etc/rhsm/facts (#1132) + +os.path has no mkdir, but os does. + +traceback without the patch: + + Traceback (most recent call last): + File "/bin/leapp", line 11, in + load_entry_point('leapp==0.16.0', 'console_scripts', 'leapp')() + File "/usr/lib/python3.6/site-packages/leapp/cli/__init__.py", line 45, in main + cli.command.execute('leapp version {}'.format(VERSION)) + File "/usr/lib/python3.6/site-packages/leapp/utils/clicmd.py", line 111, in execute + args.func(args) + File "/usr/lib/python3.6/site-packages/leapp/utils/clicmd.py", line 133, in called + self.target(args) + File "/usr/lib/python3.6/site-packages/leapp/cli/commands/upgrade/breadcrumbs.py", line 170, in wrapper + breadcrumbs.save() + File "/usr/lib/python3.6/site-packages/leapp/cli/commands/upgrade/breadcrumbs.py", line 116, in save + self._save_rhsm_facts(doc['activities']) + File "/usr/lib/python3.6/site-packages/leapp/cli/commands/upgrade/breadcrumbs.py", line 64, in _save_rhsm_facts + os.path.mkdir('/etc/rhsm/facts') + AttributeError: module 'posixpath' has no attribute 'mkdir' + +While at it, also catch OSError with errno 17, to safeguard against race +conditions if anything has created the directory between us checking for +it and us trying to create it. +--- + commands/upgrade/breadcrumbs.py | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/commands/upgrade/breadcrumbs.py b/commands/upgrade/breadcrumbs.py +index 16903ee0..3a3dcde3 100644 +--- a/commands/upgrade/breadcrumbs.py ++++ b/commands/upgrade/breadcrumbs.py +@@ -61,7 +61,12 @@ class _BreadCrumbs(object): + if not os.path.exists('/etc/rhsm'): + # If there's no /etc/rhsm folder just skip it + return +- os.path.mkdir('/etc/rhsm/facts') ++ try: ++ os.mkdir('/etc/rhsm/facts') ++ except OSError as e: ++ if e.errno == 17: ++ # The directory already exists which is all we need. ++ pass + try: + with open('/etc/rhsm/facts/leapp.facts', 'w') as f: + json.dump(_flattened({ +-- +2.41.0 + diff --git a/0001-RHSM-Adjust-the-switch-to-container-mode-for-new-RHS.patch b/0022-RHSM-Adjust-the-switch-to-container-mode-for-new-RHS.patch similarity index 94% rename from 0001-RHSM-Adjust-the-switch-to-container-mode-for-new-RHS.patch rename to 0022-RHSM-Adjust-the-switch-to-container-mode-for-new-RHS.patch index 241a920..1173ca6 100644 --- a/0001-RHSM-Adjust-the-switch-to-container-mode-for-new-RHS.patch +++ b/0022-RHSM-Adjust-the-switch-to-container-mode-for-new-RHS.patch @@ -1,7 +1,7 @@ From b6e409e1055b5d8b7f27e5df9eae096eb592a9c7 Mon Sep 17 00:00:00 2001 From: Petr Stodulka Date: Fri, 27 Oct 2023 13:34:38 +0200 -Subject: [PATCH] RHSM: Adjust the switch to container mode for new RHSM +Subject: [PATCH 22/38] RHSM: Adjust the switch to container mode for new RHSM RHSM in RHEL 8.9+ & RHEL 9.3+ requires newly for the switch to the container mode existence and content under /etc/pki/entitlement-host, diff --git a/0023-load-all-substitutions-from-etc.patch b/0023-load-all-substitutions-from-etc.patch new file mode 100644 index 0000000..f072dcd --- /dev/null +++ b/0023-load-all-substitutions-from-etc.patch @@ -0,0 +1,61 @@ +From 5b0c1d9d6bc96e9718949a03dd717bb4cbc04c10 Mon Sep 17 00:00:00 2001 +From: Evgeni Golov +Date: Sat, 21 Oct 2023 19:36:19 +0200 +Subject: [PATCH 23/38] load all substitutions from etc + +On some distributions (like CentOS Stream and Oracle Linux), we need +more substitutions to be able to load repositories properly. + +DNF has a helper for that: conf.substitutions.update_from_etc. + +On pure DNF distributions, calling this should be sufficient. +On EL7, where the primary tool is YUM, DNF does not load vars from +/etc/yum, only from /etc/dnf, so we have to help it a bit and explicitly +try to load releasever from /etc/yum. +(DNF since 4.2.15 *does* also load substitutions from /etc/yum, but EL7 +ships with 4.0.x) +--- + .../system_upgrade/common/libraries/module.py | 23 +++++++++++-------- + 1 file changed, 14 insertions(+), 9 deletions(-) + +diff --git a/repos/system_upgrade/common/libraries/module.py b/repos/system_upgrade/common/libraries/module.py +index abde69e7..7d4e8aa4 100644 +--- a/repos/system_upgrade/common/libraries/module.py ++++ b/repos/system_upgrade/common/libraries/module.py +@@ -1,4 +1,3 @@ +-import os + import warnings + + from leapp.libraries.common.config.version import get_source_major_version +@@ -23,14 +22,20 @@ def _create_or_get_dnf_base(base=None): + # have repositories only for the exact system version (including the minor number). In a case when + # /etc/yum/vars/releasever is present, read its contents so that we can access repositores on such systems. + conf = dnf.conf.Conf() +- pkg_manager = 'yum' if get_source_major_version() == '7' else 'dnf' +- releasever_path = '/etc/{0}/vars/releasever'.format(pkg_manager) +- if os.path.exists(releasever_path): +- with open(releasever_path) as releasever_file: +- releasever = releasever_file.read().strip() +- conf.substitutions['releasever'] = releasever +- else: +- conf.substitutions['releasever'] = get_source_major_version() ++ ++ # preload releasever from what we know, this will be our fallback ++ conf.substitutions['releasever'] = get_source_major_version() ++ ++ # dnf on EL7 doesn't load vars from /etc/yum, so we need to help it a bit ++ if get_source_major_version() == '7': ++ try: ++ with open('/etc/yum/vars/releasever') as releasever_file: ++ conf.substitutions['releasever'] = releasever_file.read().strip() ++ except IOError: ++ pass ++ ++ # load all substitutions from etc ++ conf.substitutions.update_from_etc('/') + + base = dnf.Base(conf=conf) + base.init_plugins() +-- +2.41.0 + diff --git a/0002-Do-not-create-dangling-symlinks-for-containerized-RH.patch b/0024-Do-not-create-dangling-symlinks-for-containerized-RH.patch similarity index 97% rename from 0002-Do-not-create-dangling-symlinks-for-containerized-RH.patch rename to 0024-Do-not-create-dangling-symlinks-for-containerized-RH.patch index 7739103..42bd5e5 100644 --- a/0002-Do-not-create-dangling-symlinks-for-containerized-RH.patch +++ b/0024-Do-not-create-dangling-symlinks-for-containerized-RH.patch @@ -1,7 +1,7 @@ From d1f28cbd143f2dce85f7f175308437954847aba8 Mon Sep 17 00:00:00 2001 From: Petr Stodulka Date: Thu, 2 Nov 2023 14:20:11 +0100 -Subject: [PATCH] Do not create dangling symlinks for containerized RHSM +Subject: [PATCH 24/38] Do not create dangling symlinks for containerized RHSM When setting RHSM into the container mode, we are creating symlinks to /etc/rhsm and /etc/pki/entitlement directories. However, this diff --git a/0025-be-less-strict-when-figuring-out-major-version-in-in.patch b/0025-be-less-strict-when-figuring-out-major-version-in-in.patch new file mode 100644 index 0000000..0690351 --- /dev/null +++ b/0025-be-less-strict-when-figuring-out-major-version-in-in.patch @@ -0,0 +1,68 @@ +From 64ec2ec60eac7abd4910c5b2a1a43794d3df11cf Mon Sep 17 00:00:00 2001 +From: Evgeni Golov +Date: Sat, 4 Nov 2023 19:54:19 +0100 +Subject: [PATCH 25/38] be less strict when figuring out major version in + initrd + +We only care for the major part of the version, so it's sufficient to +grep without the dot, which is not present on CentOS initrd. + +CentOS Stream 8: + + VERSION="8 dracut-049-224.git20230330.el8" + VERSION_ID=049-224.git20230330.el8 + +CentOS Stream 9: + + VERSION="9 dracut-057-38.git20230725.el9" + VERSION_ID="9" + +RHEL 8.8: + + VERSION="8.8 (Ootpa) dracut-049-223.git20230119.el8" + VERSION_ID=049-223.git20230119.el8 + +RHEL 9.2: + + VERSION="9.2 (Plow) dracut-057-21.git20230214.el9" + VERSION_ID="9.2" + +Ideally, we would just use the major part of VERSION_ID, but this is set +to the underlying OS'es VERSION_ID only since dracut 050 [1] and EL8 +ships with 049. + +[1] https://github.com/dracutdevs/dracut/commit/72ae1c4fe73c5637eb8f6843b9a127a6d69469d6 +--- + .../files/dracut/85sys-upgrade-redhat/do-upgrade.sh | 2 +- + .../files/dracut/90sys-upgrade/initrd-system-upgrade-generator | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh +index c181c5cf..95be87b5 100755 +--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh ++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh +@@ -9,7 +9,7 @@ type getarg >/dev/null 2>&1 || . /lib/dracut-lib.sh + + get_rhel_major_release() { + local os_version +- os_version=$(grep -o '^VERSION="[0-9][0-9]*\.' /etc/initrd-release | grep -o '[0-9]*') ++ os_version=$(grep -o '^VERSION="[0-9][0-9]*' /etc/initrd-release | grep -o '[0-9]*') + [ -z "$os_version" ] && { + # This should not happen as /etc/initrd-release is supposed to have API + # stability, but check is better than broken system. +diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-system-upgrade-generator b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-system-upgrade-generator +index 5cc6fd92..fe81626f 100755 +--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-system-upgrade-generator ++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-system-upgrade-generator +@@ -1,7 +1,7 @@ + #!/bin/sh + + get_rhel_major_release() { +- _os_version=$(cat /etc/initrd-release | grep -o '^VERSION="[0-9][0-9]*\.' | grep -o '[0-9]*') ++ _os_version=$(cat /etc/initrd-release | grep -o '^VERSION="[0-9][0-9]*' | grep -o '[0-9]*') + [ -z "$_os_version" ] && { + # This should not happen as /etc/initrd-release is supposed to have API + # stability, but check is better than broken system. +-- +2.41.0 + diff --git a/0026-rhui-bootstrap-target-rhui-clients-in-scratch-contai.patch b/0026-rhui-bootstrap-target-rhui-clients-in-scratch-contai.patch new file mode 100644 index 0000000..8f2fb42 --- /dev/null +++ b/0026-rhui-bootstrap-target-rhui-clients-in-scratch-contai.patch @@ -0,0 +1,1738 @@ +From bbed72d18dabb9c47aed4f2e760ee637decc30f1 Mon Sep 17 00:00:00 2001 +From: Michal Hecko +Date: Wed, 8 Mar 2023 12:41:03 +0100 +Subject: [PATCH 26/38] rhui: bootstrap target rhui clients in scratch + container + +In order to upgrade a RHUI system leapp uses custom `leapp-rhui- +X` packages providing leapp with necessary repository definitions as +well as certs and keys to access these repositories. The content of +the `leapp-rhui-X` packages is therefore almost identical to the RHUI +client(s) found on the target systems, implying that leapp's rhui +packages must actively mirror any changes to the target client packages. +This patch modifies leapp so that leapp uses the `leapp-rhui-X` package +only to provide a definion of the repository where a target RHUI client +can be found. The current RHUI client and target RHUI client is then +(bootstrapped) atomically swapped in the scratch container, allowing the +upgrade process to access target content. This change thus minimizes +the effort put into maintaining leapp-rhui-X. + +This patch also does redesigns the "cloud map" to contain declarative +descriptions of setups, allowing to produce different the client +bootstrap steps if desired (not implemented). The new map also contains +information about content channel used on known rhui systems, laying the +necessary foundation for better error messages when the user forgets to +run leapp with --channel. + +Finally, the RHUI-handling logic has been mostly isolated into a fully +unit-tested actor, whereas the implemented userspacegen modifications +have the nature of somehow blindly following the instructions produced +by the RHUI actor. + +Jira: OAMG-8599 +--- + .../tests/test_checketcreleasever.py | 36 +- + .../libraries/checkhybridimage.py | 17 +- + .../common/actors/cloud/checkrhui/actor.py | 93 +---- + .../cloud/checkrhui/libraries/checkrhui.py | 250 +++++++++++++ + .../tests/component_test_checkrhui.py | 339 ++++++++++++++++-- + .../libraries/pes_events_scanner.py | 5 +- + .../actors/redhatsignedrpmscanner/actor.py | 21 +- + .../tests/test_setetcreleasever.py | 25 +- + .../libraries/setuptargetrepos.py | 7 +- + .../libraries/userspacegen.py | 196 +++++++--- + .../tests/unit_test_targetuserspacecreator.py | 7 +- + repos/system_upgrade/common/libraries/rhui.py | 266 ++++++++++++-- + .../system_upgrade/common/models/rhuiinfo.py | 52 ++- + 13 files changed, 1066 insertions(+), 248 deletions(-) + create mode 100644 repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py + +diff --git a/repos/system_upgrade/common/actors/checketcreleasever/tests/test_checketcreleasever.py b/repos/system_upgrade/common/actors/checketcreleasever/tests/test_checketcreleasever.py +index 82eb0847..1310ace2 100644 +--- a/repos/system_upgrade/common/actors/checketcreleasever/tests/test_checketcreleasever.py ++++ b/repos/system_upgrade/common/actors/checketcreleasever/tests/test_checketcreleasever.py +@@ -4,13 +4,16 @@ import pytest + + from leapp import reporting + from leapp.libraries.actor import checketcreleasever +-from leapp.libraries.common.testutils import ( +- create_report_mocked, +- CurrentActorMocked, +- logger_mocked +-) ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, logger_mocked + from leapp.libraries.stdlib import api +-from leapp.models import PkgManagerInfo, Report, RHUIInfo ++from leapp.models import ( ++ PkgManagerInfo, ++ Report, ++ RHUIInfo, ++ TargetRHUIPostInstallTasks, ++ TargetRHUIPreInstallTasks, ++ TargetRHUISetupInfo ++) + + + @pytest.mark.parametrize('exists', [True, False]) +@@ -55,9 +58,24 @@ def test_etc_releasever_empty(monkeypatch): + assert api.current_logger.dbgmsg + + ++def mk_rhui_info(): ++ preinstall_tasks = TargetRHUIPreInstallTasks() ++ postinstall_tasks = TargetRHUIPostInstallTasks() ++ setup_info = TargetRHUISetupInfo(preinstall_tasks=preinstall_tasks, postinstall_tasks=postinstall_tasks) ++ rhui_info = RHUIInfo(provider='aws', ++ src_client_pkg_names=['rh-amazon-rhui-client'], ++ target_client_pkg_names=['rh-amazon-rhui-client'], ++ target_client_setup_info=setup_info) ++ return rhui_info ++ ++ + @pytest.mark.parametrize('is_rhui', [True, False]) + def test_etc_releasever_rhui(monkeypatch, is_rhui): +- rhui_msg = [RHUIInfo(provider='aws')] if is_rhui else [] ++ if is_rhui: ++ rhui_msg = [mk_rhui_info()] ++ else: ++ rhui_msg = [] ++ + expected_rel_ver = '6.10' + + mocked_report = create_report_mocked() +@@ -92,7 +110,9 @@ def test_etc_releasever_neither(monkeypatch): + + + def test_etc_releasever_both(monkeypatch): +- msgs = [RHUIInfo(provider='aws'), PkgManagerInfo(etc_releasever='7.7')] ++ rhui_info = mk_rhui_info() ++ ++ msgs = [rhui_info, PkgManagerInfo(etc_releasever='7.7')] + expected_rel_ver = '6.10' + + mocked_report = create_report_mocked() +diff --git a/repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py b/repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py +index e894683b..e2b7f5b2 100644 +--- a/repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py ++++ b/repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py +@@ -2,6 +2,7 @@ import os + + from leapp import reporting + from leapp.libraries.common import rhui ++from leapp.libraries.common.config.version import get_source_major_version + from leapp.libraries.common.rpms import has_package + from leapp.libraries.stdlib import api + from leapp.models import FirmwareFacts, HybridImage, InstalledRPM +@@ -20,8 +21,20 @@ def is_grubenv_symlink_to_efi(): + + def is_azure_agent_installed(): + """Check whether 'WALinuxAgent' package is installed.""" +- upg_path = rhui.get_upg_path() +- agent_pkg = rhui.RHUI_CLOUD_MAP[upg_path].get('azure', {}).get('agent_pkg', '') ++ src_ver_major = get_source_major_version() ++ ++ family = rhui.RHUIFamily(rhui.RHUIProvider.AZURE) ++ azure_setups = rhui.RHUI_SETUPS.get(family, []) ++ ++ agent_pkg = None ++ for setup in azure_setups: ++ if setup.os_version == src_ver_major: ++ agent_pkg = setup.extra_info.get('agent_pkg') ++ break ++ ++ if not agent_pkg: ++ return False ++ + return has_package(InstalledRPM, agent_pkg) + + +diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py b/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py +index 9cf69dad..593e73e5 100644 +--- a/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py ++++ b/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py +@@ -1,11 +1,5 @@ +-import os +- +-from leapp import reporting + from leapp.actors import Actor +-from leapp.libraries.common import rhsm, rhui +-from leapp.libraries.common.config.version import get_source_major_version +-from leapp.libraries.common.rpms import has_package +-from leapp.libraries.stdlib import api ++from leapp.libraries.actor import checkrhui as checkrhui_lib + from leapp.models import ( + CopyFile, + DNFPluginTask, +@@ -16,7 +10,7 @@ from leapp.models import ( + RpmTransactionTasks, + TargetUserSpacePreupgradeTasks + ) +-from leapp.reporting import create_report, Report ++from leapp.reporting import Report + from leapp.tags import FactsPhaseTag, IPUWorkflowTag + + +@@ -40,85 +34,4 @@ class CheckRHUI(Actor): + tags = (FactsPhaseTag, IPUWorkflowTag) + + def process(self): +- upg_path = rhui.get_upg_path() +- for provider, info in rhui.RHUI_CLOUD_MAP[upg_path].items(): +- if has_package(InstalledRPM, info['src_pkg']): +- # we need to do this workaround in order to overcome our RHUI handling limitation +- # in case there are more client packages on the source system +- # @Note(mhecko): Azure has changed the structure of their images to not use a pair of RHUI clients and +- # # instead they started to use a single package. However, it could happen that a user +- # # does not run `dnf upgrade` and thus has both packages installed. +- if 'azure' in info['src_pkg']: +- azure_sap_variants = ['azure-sap-ha', 'azure-sap-apps'] +- for azure_sap_variant in azure_sap_variants: +- sap_variant_info = rhui.RHUI_CLOUD_MAP[upg_path][azure_sap_variant] +- if has_package(InstalledRPM, sap_variant_info['src_pkg']): +- info = sap_variant_info +- provider = azure_sap_variant +- +- if provider.startswith('google'): +- rhui_dir = api.get_common_folder_path('rhui') +- repofile = os.path.join(rhui_dir, provider, 'leapp-{}.repo'.format(provider)) +- api.produce( +- TargetUserSpacePreupgradeTasks( +- copy_files=[CopyFile(src=repofile, dst='/etc/yum.repos.d/leapp-google-copied.repo')] +- ) +- ) +- +- if not rhsm.skip_rhsm(): +- create_report([ +- reporting.Title('Upgrade initiated with RHSM on public cloud with RHUI infrastructure'), +- reporting.Summary( +- 'Leapp detected this system is on public cloud with RHUI infrastructure ' +- 'but the process was initiated without "--no-rhsm" command line option ' +- 'which implies RHSM usage (valid subscription is needed).' +- ), +- reporting.Severity(reporting.Severity.INFO), +- reporting.Groups([reporting.Groups.PUBLIC_CLOUD]), +- ]) +- return +- +- # When upgrading with RHUI we cannot switch certs and let RHSM provide us repos for target OS content. +- # Instead, Leapp's provider-specific package containing target OS certs and repos has to be installed. +- if not has_package(InstalledRPM, info['leapp_pkg']): +- create_report([ +- reporting.Title('Package "{}" is missing'.format(info['leapp_pkg'])), +- reporting.Summary( +- 'On {} using RHUI infrastructure, a package "{}" is needed for ' +- 'in-place upgrade'.format(provider.upper(), info['leapp_pkg']) +- ), +- reporting.Severity(reporting.Severity.HIGH), +- reporting.RelatedResource('package', info['leapp_pkg']), +- reporting.Groups([reporting.Groups.INHIBITOR]), +- reporting.Groups([reporting.Groups.PUBLIC_CLOUD, reporting.Groups.RHUI]), +- reporting.Remediation(commands=[['yum', 'install', '-y', info['leapp_pkg']]]) +- ]) +- return +- +- # there are several "variants" related to the *AWS* provider (aws, aws-sap) +- if provider.startswith('aws'): +- # We have to disable Amazon-id plugin in the initramdisk phase as the network +- # is down at the time +- self.produce(DNFPluginTask(name='amazon-id', disable_in=['upgrade'])) +- +- # If source OS and target OS packages differ we must remove the source pkg, and install the target pkg. +- # If the packages do not differ, it is sufficient to upgrade them during the upgrade +- if info['src_pkg'] != info['target_pkg']: +- self.produce(RpmTransactionTasks(to_install=[info['target_pkg']])) +- self.produce(RpmTransactionTasks(to_remove=[info['src_pkg']])) +- +- # Although SAP systems on Azure should not rely on a pair of RHUI clients, it is still possible +- # that the source system has both clients installed, and it is safer to remove both of them. +- azure_nonsap_pkg = None +- if provider == 'azure-sap-ha': +- azure_nonsap_pkg = rhui.RHUI_CLOUD_MAP[upg_path]['azure']['src_pkg'] +- elif provider == 'azure-sap-apps': +- # SAP Apps systems have EUS content channel from RHEL8+ +- src_rhel_content_type = 'azure' if get_source_major_version() == '7' else 'azure-eus' +- azure_nonsap_pkg = rhui.RHUI_CLOUD_MAP[upg_path][src_rhel_content_type]['src_pkg'] +- if azure_nonsap_pkg and has_package(InstalledRPM, azure_nonsap_pkg): +- self.produce(RpmTransactionTasks(to_remove=[azure_nonsap_pkg])) +- +- self.produce(RHUIInfo(provider=provider)) +- self.produce(RequiredTargetUserspacePackages(packages=[info['target_pkg']])) +- return ++ checkrhui_lib.process() +diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py +new file mode 100644 +index 00000000..84ab40e3 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py +@@ -0,0 +1,250 @@ ++import itertools ++import os ++from collections import namedtuple ++ ++from leapp import reporting ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.common import rhsm, rhui ++from leapp.libraries.common.config import version ++from leapp.libraries.stdlib import api ++from leapp.models import ( ++ CopyFile, ++ DNFPluginTask, ++ InstalledRPM, ++ RHUIInfo, ++ RpmTransactionTasks, ++ TargetRHUIPostInstallTasks, ++ TargetRHUIPreInstallTasks, ++ TargetRHUISetupInfo, ++ TargetUserSpacePreupgradeTasks ++) ++ ++MatchingSetup = namedtuple('MatchingSetup', ['family', 'description']) ++ ++ ++def into_set(pkgs): ++ if isinstance(pkgs, set): ++ return pkgs ++ if isinstance(pkgs, str): ++ return {pkgs} ++ return set(pkgs) ++ ++ ++def find_rhui_setup_matching_src_system(installed_pkgs, rhui_map): ++ src_ver = version.get_source_major_version() ++ arch = api.current_actor().configuration.architecture ++ ++ matching_setups = [] ++ for rhui_family, family_setups in rhui_map.items(): ++ if rhui_family.arch != arch: ++ continue ++ ++ for setup in family_setups: ++ if setup.os_version != src_ver: ++ continue ++ if setup.clients.issubset(installed_pkgs): ++ matching_setups.append(MatchingSetup(family=rhui_family, description=setup)) ++ ++ if not matching_setups: ++ return None ++ ++ # In case that a RHUI variant uses a combination of clients identify the maximal client set ++ matching_setups_by_size = sorted(matching_setups, key=lambda match: -len(match.description.clients)) ++ ++ match = matching_setups_by_size[0] # Matching setup with the highest number of clients ++ if len(matching_setups) == 1: ++ return match ++ ++ if len(matching_setups_by_size[0].description.clients) == len(matching_setups_by_size[1].description.clients): ++ # Should not happen as no cloud providers use multi-client setups (at the moment) ++ msg = 'Could not identify the source RHUI setup (ambiguous setup)' ++ ++ variant_detail_table = { ++ rhui.RHUIVariant.ORDINARY: '', ++ rhui.RHUIVariant.SAP: ' for SAP', ++ rhui.RHUIVariant.SAP_APPS: ' for SAP Applications', ++ rhui.RHUIVariant.SAP_HA: ' for SAP HA', ++ } ++ ++ match0 = matching_setups_by_size[0] ++ variant0_detail = variant_detail_table[match0.family.variant] ++ clients0 = ' '.join(match0.description.clients) ++ ++ match1 = matching_setups_by_size[1] ++ variant1_detail = variant_detail_table[match1.family.variant] ++ clients1 = ' '.join(match1.description.clients) ++ ++ details = ('Leapp uses client-based identification of the used RHUI setup in order to determine what the ' ++ 'target RHEL content should be. According to the installed RHUI clients the system should be ' ++ 'RHEL {os_major}{variant0_detail} ({provider0}) (identified by clients {clients0}) but also ' ++ 'RHEL {os_major}{variant1_detail} ({provider1}) (identified by clients {clients1}).') ++ details = details.format(os_major=version.get_source_major_version(), ++ variant0_detail=variant0_detail, clients0=clients0, provider0=match0.family.provider, ++ variant1_detail=variant1_detail, clients1=clients1, provider1=match1.family.provider) ++ ++ raise StopActorExecutionError(message=msg, details={'details': details}) ++ ++ return match ++ ++ ++def determine_target_setup_desc(cloud_map, rhui_family): ++ variant_setups = cloud_map[rhui_family] ++ target_major = version.get_target_major_version() ++ ++ for setup in variant_setups: ++ if setup.os_version == target_major: ++ return setup ++ return None ++ ++ ++def inhibit_if_leapp_pkg_to_access_target_missing(installed_pkgs, rhui_family, target_setup_desc): ++ pkg_name = target_setup_desc.leapp_pkg ++ ++ if pkg_name not in installed_pkgs: ++ summary = 'On {provider} the "{pkg}" is required to perform an in-place upgrade' ++ summary = summary.format(provider=rhui_family.provider, pkg=pkg_name) ++ reporting.create_report([ ++ reporting.Title('Package "{}" is not installed'.format(pkg_name)), ++ reporting.Summary(summary), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.RelatedResource('package', pkg_name), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ reporting.Groups([reporting.Groups.PUBLIC_CLOUD, reporting.Groups.RHUI]), ++ reporting.Remediation(commands=[['yum', 'install', '-y', pkg_name]]) ++ ]) ++ return True ++ return False ++ ++ ++def stop_due_to_unknown_target_system_setup(rhui_family): ++ msg = 'Failed to identify target RHUI setup' ++ variant_detail = ' ({rhui_family.variant})' if rhui_family.variant != rhui.RHUIVariant.ORDINARY else '' ++ details = ('Leapp successfully identified the current RHUI setup as a system provided by ' ++ '{provider}{variant_detail}, but it failed to determine' ++ ' equivalent RHUI setup for the target OS.') ++ details = details.format(provider=rhui_family.provider, variant_detail=variant_detail) ++ raise StopActorExecutionError(message=msg, details={'details': details}) ++ ++ ++def customize_rhui_setup_for_gcp(rhui_family, setup_info): ++ if not rhui_family.provider == rhui.RHUIProvider.GOOGLE: ++ return ++ ++ # The google-cloud.repo repofile provides the repository containing the target clients. However, its repoid is the ++ # same across all rhel versions, therefore, we need to remove the source google-cloud.repo to enable ++ # correct target one. ++ setup_info.preinstall_tasks.files_to_remove.append('/etc/yum.repos.d/google-cloud.repo') ++ ++ ++def customize_rhui_setup_for_aws(rhui_family, setup_info): ++ if rhui_family.provider != rhui.RHUIProvider.AWS: ++ return ++ ++ target_version = version.get_target_major_version() ++ if target_version == '8': ++ return # The rhel8 plugin is packed into leapp-rhui-aws as we need python2 compatible client ++ ++ amazon_plugin_copy_task = CopyFile(src='/usr/lib/python3.9/site-packages/dnf-plugins/amazon-id.py', ++ dst='/usr/lib/python3.6/site-packages/dnf-plugins/') ++ setup_info.postinstall_tasks.files_to_copy.append(amazon_plugin_copy_task) ++ ++ ++def produce_rhui_info_to_setup_target(rhui_family, source_setup_desc, target_setup_desc): ++ rhui_files_location = os.path.join(api.get_common_folder_path('rhui'), rhui_family.client_files_folder) ++ ++ files_to_access_target_client_repo = [] ++ for filename, target_path in target_setup_desc.mandatory_files: ++ src_path = os.path.join(rhui_files_location, filename) ++ files_to_access_target_client_repo.append(CopyFile(src=src_path, dst=target_path)) ++ ++ for filename, target_path in target_setup_desc.optional_files: ++ src_path = os.path.join(rhui_files_location, filename) ++ ++ if not os.path.exists(src_path): ++ msg = "Optional file {} is present, will be used to setup target RHUI." ++ api.current_logger().debug(msg.format(src_path)) ++ continue ++ ++ files_to_access_target_client_repo.append(CopyFile(src=src_path, dst=target_path)) ++ ++ preinstall_tasks = TargetRHUIPreInstallTasks(files_to_copy_into_overlay=files_to_access_target_client_repo) ++ ++ files_supporting_client_operation = sorted( ++ os.path.join(rhui_files_location, file) for file in target_setup_desc.files_supporting_client_operation ++ ) ++ ++ target_client_setup_info = TargetRHUISetupInfo( ++ preinstall_tasks=preinstall_tasks, ++ postinstall_tasks=TargetRHUIPostInstallTasks(), ++ files_supporting_client_operation=files_supporting_client_operation ++ ) ++ ++ customize_rhui_setup_for_gcp(rhui_family, target_client_setup_info) ++ customize_rhui_setup_for_aws(rhui_family, target_client_setup_info) ++ ++ rhui_info = RHUIInfo( ++ provider=rhui_family.provider.lower(), ++ variant=rhui_family.variant, ++ src_client_pkg_names=sorted(source_setup_desc.clients), ++ target_client_pkg_names=sorted(target_setup_desc.clients), ++ target_client_setup_info=target_client_setup_info ++ ) ++ api.produce(rhui_info) ++ ++ ++def produce_rpms_to_install_into_target(source_setup, target_setup): ++ to_install = sorted(target_setup.clients - source_setup.clients) ++ to_remove = sorted(source_setup.clients - target_setup.clients) ++ ++ api.produce(TargetUserSpacePreupgradeTasks(install_rpms=sorted(target_setup.clients))) ++ if to_install or to_remove: ++ api.produce(RpmTransactionTasks(to_install=to_install, to_remove=to_remove)) ++ ++ ++def inform_about_upgrade_with_rhui_without_no_rhsm(): ++ if not rhsm.skip_rhsm(): ++ reporting.create_report([ ++ reporting.Title('Upgrade initiated with RHSM on public cloud with RHUI infrastructure'), ++ reporting.Summary( ++ 'Leapp detected this system is on public cloud with RHUI infrastructure ' ++ 'but the process was initiated without "--no-rhsm" command line option ' ++ 'which implies RHSM usage (valid subscription is needed).' ++ ), ++ reporting.Severity(reporting.Severity.INFO), ++ reporting.Groups([reporting.Groups.PUBLIC_CLOUD]), ++ ]) ++ return True ++ return False ++ ++ ++def process(): ++ installed_rpm = itertools.chain(*[installed_rpm_msg.items for installed_rpm_msg in api.consume(InstalledRPM)]) ++ installed_pkgs = {rpm.name for rpm in installed_rpm} ++ ++ src_rhui_setup = find_rhui_setup_matching_src_system(installed_pkgs, rhui.RHUI_SETUPS) ++ if not src_rhui_setup: ++ return ++ api.current_logger().debug("The RHUI family of the source system is {}".format(src_rhui_setup.family)) ++ ++ target_setup_desc = determine_target_setup_desc(rhui.RHUI_SETUPS, src_rhui_setup.family) ++ ++ if not target_setup_desc: ++ # We know that we are on RHUI because we have identified what RHUI variant it is, but we don't know how does ++ # the target system look like. Likely, our knowledge of what RHUI setups are there (RHUI_SETUPS) is incomplete. ++ stop_due_to_unknown_target_system_setup(src_rhui_setup.family) ++ return ++ ++ if inform_about_upgrade_with_rhui_without_no_rhsm(): ++ return ++ ++ if inhibit_if_leapp_pkg_to_access_target_missing(installed_pkgs, src_rhui_setup.family, target_setup_desc): ++ return ++ ++ # Instruction on how to access the target content ++ produce_rhui_info_to_setup_target(src_rhui_setup.family, src_rhui_setup.description, target_setup_desc) ++ ++ produce_rpms_to_install_into_target(src_rhui_setup.description, target_setup_desc) ++ ++ if src_rhui_setup.family.provider == rhui.RHUIProvider.AWS: ++ # We have to disable Amazon-id plugin in the initramdisk phase as there is no network ++ api.produce(DNFPluginTask(name='amazon-id', disable_in=['upgrade'])) +diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py +index fde5ea72..93f13a00 100644 +--- a/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py ++++ b/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py +@@ -1,60 +1,329 @@ + from collections import namedtuple ++from enum import Enum + + import pytest + +-from leapp.libraries.common import rhsm +-from leapp.libraries.common.config import mock_configs ++from leapp import reporting ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.actor import checkrhui as checkrhui_lib ++from leapp.libraries.common import rhsm, rhui ++from leapp.libraries.common.config import mock_configs, version ++from leapp.libraries.common.rhui import mk_rhui_setup, RHUIFamily ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked ++from leapp.libraries.stdlib import api + from leapp.models import ( ++ CopyFile, + InstalledRedHatSignedRPM, + InstalledRPM, + RequiredTargetUserspacePackages, + RHUIInfo, +- RPM ++ RPM, ++ RpmTransactionTasks, ++ TargetRHUIPostInstallTasks, ++ TargetRHUIPreInstallTasks, ++ TargetRHUISetupInfo, ++ TargetUserSpacePreupgradeTasks + ) + from leapp.reporting import Report + from leapp.snactor.fixture import current_actor_context + + RH_PACKAGER = 'Red Hat, Inc. ' + +-NO_RHUI = [ +- RPM(name='yolo', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', +- pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), +-] + +-ON_AWS_WITHOUT_LEAPP_PKG = [ +- RPM(name='rh-amazon-rhui-client', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, +- arch='noarch', pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), +-] ++def mk_pkg(name): ++ return RPM(name=name, version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', ++ pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51') + +-ON_AWS_WITH_LEAPP_PKG = [ +- RPM(name='rh-amazon-rhui-client', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, +- arch='noarch', pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'), +- RPM(name='leapp-rhui-aws', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, +- arch='noarch', pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51') +-] + ++def mk_setup_info(): ++ pre_tasks = TargetRHUIPreInstallTasks() ++ post_tasks = TargetRHUIPostInstallTasks() ++ return TargetRHUISetupInfo(preinstall_tasks=pre_tasks, postinstall_tasks=post_tasks) + +-def create_modulesfacts(installed_rpm): +- return InstalledRPM(items=installed_rpm) + ++def iter_known_rhui_setups(): ++ for upgrade_path, providers in rhui.RHUI_CLOUD_MAP.items(): ++ for provider_variant, variant_description in providers.items(): ++ src_clients = variant_description['src_pkg'] ++ if isinstance(src_clients, str): ++ src_clients = {src_clients, } + +-msgs_received = namedtuple('MsgsReceived', ['report', 'rhui_info', 'req_target_userspace']) ++ yield provider_variant, upgrade_path, src_clients + + +-@pytest.mark.parametrize('skip_rhsm, msgs_received, installed_rpms', [ +- (False, msgs_received(False, False, False), NO_RHUI), +- (True, msgs_received(True, False, False), ON_AWS_WITHOUT_LEAPP_PKG), +- (True, msgs_received(False, True, True), ON_AWS_WITH_LEAPP_PKG), +- (False, msgs_received(True, False, False), ON_AWS_WITH_LEAPP_PKG) +-]) +-def test_check_rhui_actor( +- monkeypatch, current_actor_context, skip_rhsm, msgs_received, installed_rpms +-): ++def mk_cloud_map(variants): ++ upg_path = {} ++ for variant_desc in variants: ++ provider, desc = next(iter(variant_desc.items())) ++ upg_path[provider] = desc ++ return upg_path ++ ++ ++@pytest.mark.parametrize( ++ ('extra_pkgs', 'rhui_setups', 'expected_result'), ++ [ ++ ( ++ ['client'], ++ {RHUIFamily('provider'): [mk_rhui_setup(clients={'client'})]}, ++ RHUIFamily('provider') ++ ), ++ ( ++ ['client'], ++ {RHUIFamily('provider'): [mk_rhui_setup(clients={'missing_client'})]}, ++ None ++ ), ++ ( ++ ['clientA', 'clientB'], ++ {RHUIFamily('provider'): [mk_rhui_setup(clients={'clientB'})]}, ++ RHUIFamily('provider') ++ ), ++ ( ++ ['clientA', 'clientB'], ++ { ++ RHUIFamily('provider'): [mk_rhui_setup(clients={'clientA'})], ++ RHUIFamily('provider+'): [mk_rhui_setup(clients={'clientA', 'clientB'})], ++ }, ++ RHUIFamily('provider+') ++ ), ++ ( ++ ['client'], ++ { ++ RHUIFamily('providerA'): [mk_rhui_setup(clients={'client'})], ++ RHUIFamily('providerB'): [mk_rhui_setup(clients={'client'})], ++ }, ++ StopActorExecutionError ++ ), ++ ] ++) ++def test_determine_rhui_src_variant(monkeypatch, extra_pkgs, rhui_setups, expected_result): ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.9')) ++ installed_pkgs = {'zip', 'zsh', 'bash', 'grubby'}.union(set(extra_pkgs)) ++ ++ if expected_result and not isinstance(expected_result, RHUIFamily): # An exception ++ with pytest.raises(expected_result) as err: ++ checkrhui_lib.find_rhui_setup_matching_src_system(installed_pkgs, rhui_setups) ++ assert 'ambiguous' in str(err) ++ return ++ ++ variant_setup_pair = checkrhui_lib.find_rhui_setup_matching_src_system(installed_pkgs, rhui_setups) ++ if not expected_result: ++ assert variant_setup_pair == expected_result ++ else: ++ variant = variant_setup_pair[0] ++ assert variant == expected_result ++ ++ ++@pytest.mark.parametrize( ++ ('extra_pkgs', 'target_rhui_setup', 'should_inhibit'), ++ [ ++ (['pkg'], mk_rhui_setup(leapp_pkg='pkg'), False), ++ ([], mk_rhui_setup(leapp_pkg='pkg'), True), ++ ] ++) ++def test_inhibit_on_missing_leapp_rhui_pkg(monkeypatch, extra_pkgs, target_rhui_setup, should_inhibit): ++ installed_pkgs = set(['bash', 'zsh', 'zip'] + extra_pkgs) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ checkrhui_lib.inhibit_if_leapp_pkg_to_access_target_missing(installed_pkgs, ++ RHUIFamily('rhui-variant'), ++ target_rhui_setup) ++ assert bool(reporting.create_report.called) == should_inhibit ++ ++ ++def are_setup_infos_eq(actual, expected): ++ eq = True ++ eq &= actual.enable_only_repoids_in_copied_files == expected.enable_only_repoids_in_copied_files ++ eq &= actual.files_supporting_client_operation == expected.files_supporting_client_operation ++ eq &= actual.preinstall_tasks.files_to_remove == expected.preinstall_tasks.files_to_remove ++ eq &= actual.preinstall_tasks.files_to_copy_into_overlay == expected.preinstall_tasks.files_to_copy_into_overlay ++ eq &= actual.postinstall_tasks.files_to_copy == expected.postinstall_tasks.files_to_copy ++ return eq ++ ++ ++@pytest.mark.parametrize( ++ ('provider', 'should_mutate'), ++ [ ++ (RHUIFamily(rhui.RHUIProvider.GOOGLE), True), ++ (RHUIFamily(rhui.RHUIProvider.GOOGLE, variant=rhui.RHUIVariant.SAP), True), ++ (RHUIFamily('azure'), False), ++ ] ++) ++def test_google_specific_customization(provider, should_mutate): ++ setup_info = mk_setup_info() ++ checkrhui_lib.customize_rhui_setup_for_gcp(provider, setup_info) ++ ++ if should_mutate: ++ assert setup_info != mk_setup_info() ++ else: ++ assert setup_info == mk_setup_info() ++ ++ ++@pytest.mark.parametrize( ++ ('rhui_family', 'target_major', 'should_mutate'), ++ [ ++ (RHUIFamily(rhui.RHUIProvider.AWS), '8', False), ++ (RHUIFamily(rhui.RHUIProvider.AWS), '9', True), ++ (RHUIFamily(rhui.RHUIProvider.AWS, variant=rhui.RHUIVariant.SAP), '9', True), ++ (RHUIFamily('azure'), '9', False), ++ ] ++) ++def test_aws_specific_customization(monkeypatch, rhui_family, target_major, should_mutate): ++ dst_ver = '{major}.0'.format(major=target_major) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(dst_ver=dst_ver)) ++ ++ setup_info = mk_setup_info() ++ checkrhui_lib.customize_rhui_setup_for_aws(rhui_family, setup_info) ++ ++ was_mutated = not are_setup_infos_eq(setup_info, mk_setup_info()) ++ assert should_mutate == was_mutated ++ ++ ++def produce_rhui_info_to_setup_target(monkeypatch): ++ source_rhui_setup = mk_rhui_setup( ++ clients={'src_pkg'}, ++ leapp_pkg='leapp_pkg', ++ mandatory_files=[('src_file1', '/etc'), ('src_file2', '/var')], ++ ) ++ ++ target_rhui_setup = mk_rhui_setup( ++ clients={'target_pkg'}, ++ leapp_pkg='leapp_pkg', ++ mandatory_files=[('target_file1', '/etc'), ('target_file2', '/var')], ++ ) ++ ++ monkeypatch.setattr(api, 'get_common_folder_path', lambda dummy: 'common_folder') ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ ++ checkrhui_lib.produce_rhui_info_to_setup_target('provider', source_rhui_setup, target_rhui_setup) ++ ++ assert len(api.produce.model_instances) == 1 ++ ++ rhui_info = api.produce.model_instances[0] ++ assert rhui_info.provider == 'provider' ++ assert rhui_info.src_client_pkg_names == ['src_pkg'] ++ assert rhui_info.target_client_pkg_names == ['target_pkg'] ++ ++ setup_info = rhui_info.target_client_setup_info ++ ++ expected_copies = { ++ ('common_folder/provider/target_file1', '/etc'), ++ ('common_folder/provider/target_file2', '/var') ++ } ++ actual_copies = {(instr.src, instr.dst) for instr in setup_info.preinstall_tasks.files_to_copy_in} ++ ++ assert expected_copies == actual_copies ++ ++ assert not setup_info.postinstall_tasks.files_to_copy ++ ++ ++def test_produce_rpms_to_install_into_target(monkeypatch): ++ source_rhui_setup = mk_rhui_setup(clients={'src_pkg'}, leapp_pkg='leapp_pkg') ++ target_rhui_setup = mk_rhui_setup(clients={'target_pkg'}, leapp_pkg='leapp_pkg') ++ ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ ++ checkrhui_lib.produce_rpms_to_install_into_target(source_rhui_setup, target_rhui_setup) ++ ++ assert len(api.produce.model_instances) == 2 ++ userspace_tasks, target_rpm_tasks = api.produce.model_instances[0], api.produce.model_instances[1] ++ ++ if isinstance(target_rpm_tasks, TargetUserSpacePreupgradeTasks): ++ userspace_tasks, target_rpm_tasks = target_rpm_tasks, userspace_tasks ++ ++ assert 'target_pkg' in target_rpm_tasks.to_install ++ assert 'src_pkg' in target_rpm_tasks.to_remove ++ assert 'target_pkg' in userspace_tasks.install_rpms ++ ++ ++@pytest.mark.parametrize('skip_rhsm', (True, False)) ++def test_inform_about_upgrade_with_rhui_without_no_rhsm(monkeypatch, skip_rhsm): ++ monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: skip_rhsm) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ ++ checkrhui_lib.inform_about_upgrade_with_rhui_without_no_rhsm() ++ ++ assert bool(reporting.create_report.called) is not skip_rhsm ++ ++ ++class ExpectedAction(Enum): ++ NOTHING = 1 # Actor should not produce anything ++ INHIBIT = 2 ++ PRODUCE = 3 # Actor should produce RHUI related info ++ ++ ++# Scenarios to cover: ++# 1. source client + NO_RHSM -> RPMs are produced, and setup info is produced ++# 2. source client -> inhibit ++# 3. leapp pkg missing -> inhibit ++@pytest.mark.parametrize( ++ ('extra_installed_pkgs', 'skip_rhsm', 'expected_action'), ++ [ ++ (['src_pkg', 'leapp_pkg'], True, ExpectedAction.PRODUCE), # Everything OK ++ (['src_pkg', 'leapp_pkg'], False, ExpectedAction.INHIBIT), # No --no-rhsm ++ (['src_pkg'], True, ExpectedAction.INHIBIT), # Missing leapp-rhui package ++ ([], True, ExpectedAction.NOTHING) # Not a RHUI system ++ ] ++) ++def test_process(monkeypatch, extra_installed_pkgs, skip_rhsm, expected_action): ++ known_setups = { ++ RHUIFamily('rhui-variant'): [ ++ mk_rhui_setup(clients={'src_pkg'}, os_version='7'), ++ mk_rhui_setup(clients={'target_pkg'}, os_version='8', leapp_pkg='leapp_pkg', ++ mandatory_files=[('file1', '/etc'), ('file2', '/var')]), ++ ] ++ } ++ ++ installed_pkgs = {'zip', 'kernel-core', 'python'}.union(set(extra_installed_pkgs)) ++ installed_pkgs = [mk_pkg(pkg_name) for pkg_name in installed_pkgs] ++ installed_rpms = InstalledRPM(items=installed_pkgs) ++ ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms])) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) + monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: skip_rhsm) ++ monkeypatch.setattr(rhui, 'RHUI_SETUPS', known_setups) ++ ++ checkrhui_lib.process() ++ ++ if expected_action == ExpectedAction.NOTHING: ++ assert not api.produce.called ++ assert not reporting.create_report.called ++ elif expected_action == ExpectedAction.INHIBIT: ++ assert not api.produce.called ++ assert len(reporting.create_report.reports) == 1 ++ else: # expected_action = ExpectedAction.PRODUCE ++ assert not reporting.create_report.called ++ assert len(api.produce.model_instances) == 3 ++ assert any(isinstance(pkg, RpmTransactionTasks) for pkg in api.produce.model_instances) ++ assert any(isinstance(pkg, RHUIInfo) for pkg in api.produce.model_instances) ++ assert any(isinstance(pkg, TargetUserSpacePreupgradeTasks) for pkg in api.produce.model_instances) ++ ++ ++@pytest.mark.parametrize('is_target_setup_known', (False, True)) ++def test_unknown_target_rhui_setup(monkeypatch, is_target_setup_known): ++ rhui_family = RHUIFamily('rhui-variant') ++ known_setups = { ++ rhui_family: [ ++ mk_rhui_setup(clients={'src_pkg'}, os_version='7'), ++ ] ++ } ++ ++ if is_target_setup_known: ++ target_setup = mk_rhui_setup(clients={'target_pkg'}, os_version='8', leapp_pkg='leapp_pkg') ++ known_setups[rhui_family].append(target_setup) ++ ++ installed_pkgs = {'zip', 'kernel-core', 'python', 'src_pkg', 'leapp_pkg'} ++ installed_pkgs = [mk_pkg(pkg_name) for pkg_name in installed_pkgs] ++ installed_rpms = InstalledRPM(items=installed_pkgs) ++ ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms])) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: True) ++ monkeypatch.setattr(rhui, 'RHUI_SETUPS', known_setups) + +- current_actor_context.feed(create_modulesfacts(installed_rpm=installed_rpms)) +- current_actor_context.run(config_model=mock_configs.CONFIG) +- assert bool(current_actor_context.consume(Report)) is msgs_received.report +- assert bool(current_actor_context.consume(RHUIInfo)) is msgs_received.rhui_info +- assert bool(current_actor_context.consume( +- RequiredTargetUserspacePackages)) is msgs_received.req_target_userspace ++ if is_target_setup_known: ++ checkrhui_lib.process() ++ assert api.produce.called ++ else: ++ with pytest.raises(StopActorExecutionError): ++ checkrhui_lib.process() +diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py +index 01457f2a..f8d8dcfc 100644 +--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py ++++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py +@@ -355,9 +355,10 @@ def get_pesid_to_repoid_map(target_pesids): + details={'Problem': 'Did not receive a message with mapped repositories'} + ) + +- rhui_info = next(api.consume(RHUIInfo), RHUIInfo(provider='')) ++ rhui_info = next(api.consume(RHUIInfo), None) ++ cloud_provider = rhui_info.provider if rhui_info else '' + +- repomap = peseventsscanner_repomap.RepoMapDataHandler(repositories_map_msg, cloud_provider=rhui_info.provider) ++ repomap = peseventsscanner_repomap.RepoMapDataHandler(repositories_map_msg, cloud_provider=cloud_provider) + + # NOTE: We have to calculate expected target repositories like in the setuptargetrepos actor. + # It's planned to handle this in different a way in future... +diff --git a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +index 1085beee..41f9d343 100644 +--- a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py ++++ b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +@@ -54,26 +54,7 @@ class RedHatSignedRpmScanner(Actor): + """Whitelist the katello package.""" + return pkg.name.startswith('katello-ca-consumer') + +- upg_path = rhui.get_upg_path() +- # AWS RHUI packages do not have to be whitelisted because they are signed by RedHat +- whitelisted_cloud_flavours = ( +- 'azure', +- 'azure-eus', +- 'azure-sap-ha', +- 'azure-sap-apps', +- 'google', +- 'google-sap', +- 'alibaba' +- ) +- whitelisted_cloud_pkgs = { +- rhui.RHUI_CLOUD_MAP[upg_path].get(flavour, {}).get('src_pkg') for flavour in whitelisted_cloud_flavours +- } +- whitelisted_cloud_pkgs.update( +- rhui.RHUI_CLOUD_MAP[upg_path].get(flavour, {}).get('target_pkg') for flavour in whitelisted_cloud_flavours +- ) +- whitelisted_cloud_pkgs.update( +- rhui.RHUI_CLOUD_MAP[upg_path].get(flavour, {}).get('leapp_pkg') for flavour in whitelisted_cloud_flavours +- ) ++ whitelisted_cloud_pkgs = rhui.get_all_known_rhui_pkgs_for_current_upg() + + for rpm_pkgs in self.consume(InstalledRPM): + for pkg in rpm_pkgs.items: +diff --git a/repos/system_upgrade/common/actors/setetcreleasever/tests/test_setetcreleasever.py b/repos/system_upgrade/common/actors/setetcreleasever/tests/test_setetcreleasever.py +index d86ac926..a14dd2b8 100644 +--- a/repos/system_upgrade/common/actors/setetcreleasever/tests/test_setetcreleasever.py ++++ b/repos/system_upgrade/common/actors/setetcreleasever/tests/test_setetcreleasever.py +@@ -3,13 +3,15 @@ import os + import pytest + + from leapp.libraries.actor import setetcreleasever +-from leapp.libraries.common.testutils import ( +- create_report_mocked, +- CurrentActorMocked, +- logger_mocked +-) ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, logger_mocked + from leapp.libraries.stdlib import api +-from leapp.models import PkgManagerInfo, RHUIInfo ++from leapp.models import ( ++ PkgManagerInfo, ++ RHUIInfo, ++ TargetRHUIPostInstallTasks, ++ TargetRHUIPreInstallTasks, ++ TargetRHUISetupInfo ++) + + CUR_DIR = os.path.dirname(os.path.abspath(__file__)) + +@@ -33,8 +35,15 @@ class mocked_set_releasever(object): + + + def test_set_releasever(monkeypatch, current_actor_context): +- +- msgs = [RHUIInfo(provider='aws'), PkgManagerInfo(etc_releasever='7.7')] ++ preinstall_tasks = TargetRHUIPreInstallTasks() ++ postinstall_tasks = TargetRHUIPostInstallTasks() ++ setup_info = TargetRHUISetupInfo(preinstall_tasks=preinstall_tasks, postinstall_tasks=postinstall_tasks) ++ rhui_info = RHUIInfo(provider='aws', ++ src_client_pkg_names=['rh-amazon-rhui-client'], ++ target_client_pkg_names=['rh-amazon-rhui-client'], ++ target_client_setup_info=setup_info) ++ ++ msgs = [rhui_info, PkgManagerInfo(etc_releasever='7.7')] + + expected_rel_ver = '8.0' + monkeypatch.setattr(setetcreleasever, '_set_releasever', mocked_set_releasever()) +diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py +index 4b8405d0..2b14a29a 100644 +--- a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py ++++ b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py +@@ -85,8 +85,11 @@ def process(): + + # Setup repomap handler + repo_mappig_msg = next(api.consume(RepositoriesMapping), RepositoriesMapping()) +- rhui_info = next(api.consume(RHUIInfo), RHUIInfo(provider='')) +- repomap = setuptargetrepos_repomap.RepoMapDataHandler(repo_mappig_msg, cloud_provider=rhui_info.provider) ++ ++ rhui_info = next(api.consume(RHUIInfo), None) ++ cloud_provider = rhui_info.provider if rhui_info else '' ++ ++ repomap = setuptargetrepos_repomap.RepoMapDataHandler(repo_mappig_msg, cloud_provider=cloud_provider) + + # Filter set of repoids from installed packages so that it contains only repoids with mapping + repoids_from_installed_packages_with_mapping = _get_mapped_repoids(repomap, repoids_from_installed_packages) +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index 0982a796..039b99a5 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -6,7 +6,7 @@ import shutil + from leapp import reporting + from leapp.exceptions import StopActorExecution, StopActorExecutionError + from leapp.libraries.actor import constants +-from leapp.libraries.common import dnfplugin, mounting, overlaygen, repofileutils, rhsm, rhui, utils ++from leapp.libraries.common import dnfplugin, mounting, overlaygen, repofileutils, rhsm, utils + from leapp.libraries.common.config import get_env, get_product_type + from leapp.libraries.common.config.version import get_target_major_version + from leapp.libraries.stdlib import api, CalledProcessError, config, run +@@ -282,25 +282,11 @@ def prepare_target_userspace(context, userspace_dir, enabled_repos, packages): + raise StopActorExecutionError(message=message, details=details) + + +-def _get_all_rhui_pkgs(): +- """ +- Return the list of rhui packages +- +- Currently, do not care about what rhui we have, release, etc. +- Just take all packages. We need them just for the purpose of filtering +- what files we have to remove (see _prep_repository_access) and it's ok +- for us to use whatever rhui rpms (the relevant rpms catch the problem, +- the others are just taking bytes in memory...). It's a hot-fix. We are going +- to refactor the library later completely.. +- """ +- upg_path = rhui.get_upg_path() +- pkgs = [] +- for rhui_map in rhui.RHUI_CLOUD_MAP[upg_path].values(): +- for key in rhui_map.keys(): +- if not key.endswith('pkg'): +- continue +- pkgs.append(rhui_map[key]) +- return pkgs ++def _query_rpm_for_pkg_files(context, pkgs): ++ files_owned_by_rpm = set() ++ rpm_query_result = context.call(['rpm', '-ql'] + pkgs, split=True) ++ files_owned_by_rpm.update(rpm_query_result['stdout']) ++ return files_owned_by_rpm + + + def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False): +@@ -405,42 +391,30 @@ def _prep_repository_access(context, target_userspace): + if not rhsm.skip_rhsm(): + run(['rm', '-rf', os.path.join(target_etc, 'rhsm')]) + context.copytree_from('/etc/rhsm', os.path.join(target_etc, 'rhsm')) +- # NOTE: we cannot just remove the original target yum.repos.d dir +- # as e.g. in case of RHUI a special RHUI repofiles are installed by a pkg +- # when the target userspace container is created. Removing these files we loose +- # RHUI target repositories. So ...-> +- # -> detect such a files... ++ ++ # NOTE: We cannot just remove the target yum.repos.d dir and replace it with yum.repos.d from the scratch ++ # # that we've used to obtain the new DNF stack and install it into the target userspace. Although ++ # # RHUI clients are being installed in both scratch and target containers, users can request their package ++ # # to be installed into target userspace that might add some repos to yum.repos.d that are not in scratch. ++ ++ # Detect files that are owned by some RPM - these cannot be deleted + with mounting.NspawnActions(base_dir=target_userspace) as target_context: + files_owned_by_rpms = _get_files_owned_by_rpms(target_context, '/etc/yum.repos.d') + +- # -> backup the orig dir & install the new one ++ # Backup the target yum.repos.d so we can always copy the files installed by some RPM back into yum.repos.d ++ # when we modify it + run(['mv', target_yum_repos_d, backup_yum_repos_d]) +- context.copytree_from('/etc/yum.repos.d', target_yum_repos_d) + +- # -> find old rhui repo files (we have to remove these as they cause duplicates) +- rhui_pkgs = _get_all_rhui_pkgs() +- old_files_owned_by_rhui_rpms = _get_files_owned_by_rpms(context, '/etc/yum.repos.d', rhui_pkgs) +- for fname in old_files_owned_by_rhui_rpms: +- api.current_logger().debug('Remove the old repofile: {}'.format(fname)) +- run(['rm', '-f', os.path.join(target_yum_repos_d, fname)]) +- # .. continue: remove our leapp rhui repo file (do not care if we are on rhui or not) +- for rhui_map in rhui.gen_rhui_files_map().values(): +- for item in rhui_map: +- if item[1] != rhui.YUM_REPOS_PATH: +- continue +- target_leapp_repofile = os.path.join(target_yum_repos_d, item[0]) +- if not os.path.isfile(target_leapp_repofile): +- continue +- # we found it!! +- run(['rm', '-f', target_leapp_repofile]) +- break ++ # Copy the yum.repos.d from scratch - preserve any custom repositories. No need to clean-up old RHUI clients, ++ # we swap them for the new RHUI client in scratch (so the old one is not installed). ++ context.copytree_from('/etc/yum.repos.d', target_yum_repos_d) + +- # -> copy expected files back ++ # Copy back files owned by some RPM + for fname in files_owned_by_rpms: + api.current_logger().debug('Copy the backed up repo file: {}'.format(fname)) + run(['mv', os.path.join(backup_yum_repos_d, fname), os.path.join(target_yum_repos_d, fname)]) + +- # -> remove the backed up dir ++ # Cleanup - remove the backed up dir + run(['rm', '-rf', backup_yum_repos_d]) + + +@@ -637,22 +611,71 @@ def _get_rhui_available_repoids(context, cloud_repo): + return set(repoids) + + ++def get_copy_location_from_copy_in_task(context, copy_task): ++ basename = os.path.basename(copy_task.src) ++ dest_in_container = context.full_path(copy_task.dst) ++ if os.path.isdir(dest_in_container): ++ return os.path.join(copy_task.dst, basename) ++ return copy_task.dst ++ ++ + def _get_rh_available_repoids(context, indata): + """ + RH repositories are provided either by RHSM or are stored in the expected repo file provided by + RHUI special packages (every cloud provider has itw own rpm). + """ + +- upg_path = rhui.get_upg_path() +- + rh_repoids = _get_rhsm_available_repoids(context) + ++ # If we are upgrading a RHUI system, check what repositories are provided by the (already installed) target clients + if indata and indata.rhui_info: +- cloud_repo = os.path.join( +- '/etc/yum.repos.d/', rhui.RHUI_CLOUD_MAP[upg_path][indata.rhui_info.provider]['leapp_pkg_repo'] ++ files_provided_by_clients = _query_rpm_for_pkg_files(context, indata.rhui_info.target_client_pkg_names) ++ ++ def is_repofile(path): ++ return os.path.dirname(path) == '/etc/yum.repos.d' and os.path.basename(path).endswith('.repo') ++ ++ def extract_repoid_from_line(line): ++ return line.split(':', 1)[1].strip() ++ ++ target_ver = api.current_actor().configuration.version.target ++ setup_tasks = indata.rhui_info.target_client_setup_info.preinstall_tasks.files_to_copy_into_overlay ++ ++ yum_repos_d = context.full_path('/etc/yum.repos.d') ++ all_repofiles = {os.path.join(yum_repos_d, path) for path in os.listdir(yum_repos_d) if path.endswith('.repo')} ++ client_repofiles = {context.full_path(path) for path in files_provided_by_clients if is_repofile(path)} ++ ++ # Exclude repofiles used to setup the target rhui access as on some platforms the repos provided by ++ # the client are not sufficient to install the client into target userspace (GCP) ++ rhui_setup_repofile_tasks = [task for task in setup_tasks if task.src.endswith('repo')] ++ rhui_setup_repofiles = ( ++ get_copy_location_from_copy_in_task(context, copy_task) for copy_task in rhui_setup_repofile_tasks + ) +- rhui_repoids = _get_rhui_available_repoids(context, cloud_repo) +- rh_repoids.update(rhui_repoids) ++ rhui_setup_repofiles = {context.full_path(repofile) for repofile in rhui_setup_repofiles} ++ ++ foreign_repofiles = all_repofiles - client_repofiles - rhui_setup_repofiles ++ ++ # Rename non-client repofiles so they will not be recognized when running dnf repolist ++ for foreign_repofile in foreign_repofiles: ++ os.rename(foreign_repofile, '{0}.back'.format(foreign_repofile)) ++ ++ try: ++ dnf_cmd = ['dnf', 'repolist', '--releasever', target_ver, '-v'] ++ repolist_result = context.call(dnf_cmd)['stdout'] ++ repoid_lines = [line for line in repolist_result.split('\n') if line.startswith('Repo-id')] ++ rhui_repoids = {extract_repoid_from_line(line) for line in repoid_lines} ++ rh_repoids.update(rhui_repoids) ++ ++ except CalledProcessError as err: ++ details = {'err': err.stderr, 'details': str(err)} ++ raise StopActorExecutionError( ++ message='Failed to retrieve repoids provided by target RHUI clients.', ++ details=details ++ ) ++ ++ finally: ++ # Revert the renaming of non-client repofiles ++ for foreign_repofile in foreign_repofiles: ++ os.rename('{0}.back'.format(foreign_repofile), foreign_repofile) + + return rh_repoids + +@@ -790,8 +813,7 @@ def _gather_target_repositories(context, indata, prod_cert_path): + """ + rhsm.set_container_mode(context) + rhsm.switch_certificate(context, indata.rhsm_info, prod_cert_path) +- if indata.rhui_info: +- rhui.copy_rhui_data(context, indata.rhui_info.provider) ++ + _install_custom_repofiles(context, indata.custom_repofiles) + return gather_target_repositories(context, indata) + +@@ -834,6 +856,69 @@ def _create_target_userspace(context, packages, files, target_repoids): + rhsm.set_container_mode(target_context) + + ++def install_target_rhui_client_if_needed(context, indata): ++ if not indata.rhui_info: ++ return ++ ++ target_major_version = get_target_major_version() ++ userspace_dir = _get_target_userspace() ++ _create_target_userspace_directories(userspace_dir) ++ ++ setup_info = indata.rhui_info.target_client_setup_info ++ if setup_info.preinstall_tasks: ++ preinstall_tasks = setup_info.preinstall_tasks ++ ++ for file_to_remove in preinstall_tasks.files_to_remove: ++ context.remove(file_to_remove) ++ ++ for copy_info in preinstall_tasks.files_to_copy_into_overlay: ++ context.makedirs(os.path.dirname(copy_info.dst), exists_ok=True) ++ context.copy_to(copy_info.src, copy_info.dst) ++ ++ cmd = ['dnf', '-y'] ++ ++ if setup_info.enable_only_repoids_in_copied_files and setup_info.preinstall_tasks: ++ copy_tasks = setup_info.preinstall_tasks.files_to_copy_into_overlay ++ copied_repofiles = [copy.src for copy in copy_tasks if copy.src.endswith('.repo')] ++ copied_repoids = set() ++ for repofile in copied_repofiles: ++ repofile_contents = repofileutils.parse_repofile(repofile) ++ copied_repoids.update(entry.repoid for entry in repofile_contents.data) ++ ++ cmd += ['--disablerepo', '*'] ++ for copied_repoid in copied_repoids: ++ cmd.extend(('--enablerepo', copied_repoid)) ++ ++ src_client_remove_steps = ['remove {0}'.format(client) for client in indata.rhui_info.src_client_pkg_names] ++ target_client_install_steps = ['install {0}'.format(client) for client in indata.rhui_info.target_client_pkg_names] ++ ++ dnf_transaction_steps = src_client_remove_steps + target_client_install_steps + ['transaction run'] ++ ++ cmd += [ ++ '--setopt=module_platform_id=platform:el{}'.format(target_major_version), ++ '--setopt=keepcache=1', ++ '--releasever', api.current_actor().configuration.version.target, ++ '--disableplugin', 'subscription-manager', ++ 'shell' ++ ] ++ ++ context.call(cmd, callback_raw=utils.logging_handler, stdin='\n'.join(dnf_transaction_steps)) ++ ++ if setup_info.postinstall_tasks: ++ for copy_info in setup_info.postinstall_tasks.files_to_copy: ++ context.makedirs(os.path.dirname(copy_info.dst), exists_ok=True) ++ context.call(['cp', copy_info.src, copy_info.dst]) ++ ++ # Do a cleanup so there are not duplicit repoids ++ files_owned_by_clients = _query_rpm_for_pkg_files(context, indata.rhui_info.target_client_pkg_names) ++ ++ for copy_task in setup_info.preinstall_tasks.files_to_copy_into_overlay: ++ dest = get_copy_location_from_copy_in_task(context, copy_task) ++ can_be_cleaned_up = copy_task.src not in setup_info.files_supporting_client_operation ++ if dest not in files_owned_by_clients and can_be_cleaned_up: ++ context.remove(dest) ++ ++ + @suppress_deprecation(TMPTargetRepositoriesFacts) + def perform(): + # NOTE: this one action is out of unit-tests completely; we do not use +@@ -853,6 +938,9 @@ def perform(): + # Mount the ISO into the scratch container + target_iso = next(api.consume(TargetOSInstallationImage), None) + with mounting.mount_upgrade_iso_to_root_dir(overlay.target, target_iso): ++ ++ install_target_rhui_client_if_needed(context, indata) ++ + target_repoids = _gather_target_repositories(context, indata, prod_cert_path) + _create_target_userspace(context, indata.packages, indata.files, target_repoids) + # TODO: this is tmp solution as proper one needs significant refactoring +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +index a519275e..cc684c7d 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +@@ -85,7 +85,12 @@ def _gen_packages_msgs(): + + _PACKAGES_MSGS = _gen_packages_msgs() + _RHSMINFO_MSG = models.RHSMInfo(attached_skus=['testing-sku']) +-_RHUIINFO_MSG = models.RHUIInfo(provider='aws') ++_RHUIINFO_MSG = models.RHUIInfo(provider='aws', ++ src_client_pkg_names=['rh-amazon-rhui-client'], ++ target_client_pkg_names=['rh-amazon-rhui-client'], ++ target_client_setup_info=models.TargetRHUISetupInfo( ++ preinstall_tasks=models.TargetRHUIPreInstallTasks(), ++ postinstall_tasks=models.TargetRHUIPostInstallTasks())) + _XFS_MSG = models.XFSPresence() + _STORAGEINFO_MSG = models.StorageInfo() + _CTRF_MSGS = [ +diff --git a/repos/system_upgrade/common/libraries/rhui.py b/repos/system_upgrade/common/libraries/rhui.py +index 14a91c42..aa40b597 100644 +--- a/repos/system_upgrade/common/libraries/rhui.py ++++ b/repos/system_upgrade/common/libraries/rhui.py +@@ -1,9 +1,12 @@ + import os ++from collections import namedtuple + + import six + +-from leapp.libraries.common.config.version import get_target_major_version ++from leapp.libraries.common.config import architecture as arch ++from leapp.libraries.common.config.version import get_source_major_version, get_target_major_version + from leapp.libraries.stdlib import api ++from leapp.utils.deprecation import deprecated + + # when on AWS and upgrading from RHEL 7, we need also Python2 version of "Amazon-id" dnf + # plugin which is served by "leapp-rhui-aws" rpm package (please note this package is not +@@ -18,10 +21,233 @@ RHUI_PKI_PRIVATE_DIR = os.path.join(RHUI_PKI_DIR, 'private') + AWS_DNF_PLUGIN_NAME = 'amazon-id.py' + + ++class ContentChannel(object): ++ GA = 'ga' ++ TUV = 'tuv' ++ E4S = 'e4s' ++ EUS = 'eus' ++ AUS = 'aus' ++ BETA = 'beta' ++ ++ ++class RHUIVariant(object): ++ ORDINARY = 'ordinary' # Special value - not displayed in report/errors ++ SAP = 'sap' ++ SAP_APPS = 'sap-apps' ++ SAP_HA = 'sap-ha' ++ ++ ++class RHUIProvider(object): ++ GOOGLE = 'Google' ++ AZURE = 'Azure' ++ AWS = 'AWS' ++ ALIBABA = 'Alibaba' ++ ++ + # The files in 'files_map' are provided by special Leapp rpms (per cloud) and + # are delivered into "repos/system_upgrade/common/files/rhui/ + ++RHUISetup = namedtuple( ++ 'RHUISetup', ++ ('clients', 'leapp_pkg', 'mandatory_files', 'optional_files', 'extra_info', 'os_version', ++ 'arch', 'content_channel', 'files_supporting_client_operation') ++) ++"""RHUI-Setup-specific details used during IPU ++.. py:attribute:: clients ++ A set of RHUI clients present on the system. ++.. py:attribute:: leapp_pkg ++ The name of leapp's rhui-specific pkg providing repofiles, certs and keys to access package of the setup. ++.. py:attribute:: mandatory_files ++ Mandatory files and their destinations to copy into target userspace container required to access the target OS ++ content. If not present, an exception will be raised. ++.. py:attribute:: optional_files ++ Optional files and their destinations to copy into target userspace container required to access the target OS ++ content. Nonexistence of any of these files is ignored. ++.. py:attribute:: extra_info ++ Extra information about the setup. ++.. py:attribute:: os_version ++ The major OS version of the RHUI system. ++.. py:attribute:: content_channel ++ Content channel used by the RHUI setup. ++.. py:attribute:: files_supporting_client_operation ++ A subset of files from ``mandatory_files`` that are necessary for client to work (cannot be cleaned up). ++""" ++ ++ ++class RHUIFamily(object): ++ def __init__(self, provider, client_files_folder='', variant=RHUIVariant.ORDINARY, arch=arch.ARCH_X86_64,): ++ self.provider = provider ++ self.client_files_folder = client_files_folder ++ self.variant = variant ++ self.arch = arch ++ ++ def __hash__(self): ++ return hash((self.provider, self.variant, self.arch)) ++ ++ def __eq__(self, other): ++ if not isinstance(other, RHUIFamily): ++ return False ++ self_repr = (self.provider, self.variant, self.arch) ++ other_repr = (other.provider, other.variant, other.arch) ++ return self_repr == other_repr ++ ++ def full_eq(self, other): ++ partial_eq = self == other ++ return partial_eq and self.client_files_folder == other.client_files_folder ++ ++ def __str__(self): ++ template = 'RHUIFamily(provider={provider}, variant={variant}, arch={arch})' ++ return template.format(provider=self.provider, variant=self.variant, arch=self.arch) ++ ++ ++def mk_rhui_setup(clients=None, leapp_pkg='', mandatory_files=None, optional_files=None, ++ extra_info=None, os_version='7', arch=arch.ARCH_X86_64, content_channel=ContentChannel.GA, ++ files_supporting_client_operation=None): ++ clients = clients or set() ++ mandatory_files = mandatory_files or [] ++ extra_info = extra_info or {} ++ files_supporting_client_operation = files_supporting_client_operation or [] + ++ # Since the default optional files are not [], we cannot use the same construction as above ++ # to allow the caller to specify empty optional files ++ default_opt_files = [('content-leapp.crt', RHUI_PKI_PRODUCT_DIR), ('key-leapp.pem', RHUI_PKI_DIR)] ++ optional_files = default_opt_files if optional_files is None else optional_files ++ ++ return RHUISetup(clients=clients, leapp_pkg=leapp_pkg, mandatory_files=mandatory_files, arch=arch, ++ content_channel=content_channel, optional_files=optional_files, extra_info=extra_info, ++ os_version=os_version, files_supporting_client_operation=files_supporting_client_operation) ++ ++ ++# This will be the new "cloud map". Essentially a directed graph with edges defined implicitly by OS versions + ++# setup family identification. In theory, we can make the variant be part of rhui setups, but this way we don't ++# have to repeatedly write it to every known setup there is (a sort of compression). Furthermore, it limits ++# the search for target equivalent to setups sharing the same family, and thus reducing a chance of error. ++RHUI_SETUPS = { ++ RHUIFamily(RHUIProvider.AWS, client_files_folder='aws'): [ ++ mk_rhui_setup(clients={'rh-amazon-rhui-client'}, optional_files=[], os_version='7'), ++ mk_rhui_setup(clients={'rh-amazon-rhui-client'}, leapp_pkg='leapp-rhui-aws', ++ mandatory_files=[ ++ ('rhui-client-config-server-8.crt', RHUI_PKI_PRODUCT_DIR), ++ ('rhui-client-config-server-8.key', RHUI_PKI_DIR), ++ (AWS_DNF_PLUGIN_NAME, DNF_PLUGIN_PATH_PY2), ++ ('leapp-aws.repo', YUM_REPOS_PATH) ++ ], ++ files_supporting_client_operation=[AWS_DNF_PLUGIN_NAME], ++ optional_files=[], os_version='8'), ++ # @Note(mhecko): We don't need to deal with AWS_DNF_PLUGIN_NAME here as on rhel8+ we can use the plugin ++ # # provided by the target client - there is no Python2 incompatibility issue there. ++ mk_rhui_setup(clients={'rh-amazon-rhui-client'}, leapp_pkg='leapp-rhui-aws', ++ mandatory_files=[ ++ ('rhui-client-config-server-9.crt', RHUI_PKI_PRODUCT_DIR), ++ ('rhui-client-config-server-9.key', RHUI_PKI_DIR), ++ ('leapp-aws.repo', YUM_REPOS_PATH) ++ ], ++ optional_files=[], os_version='9'), ++ ], ++ RHUIFamily(RHUIProvider.AWS, arch=arch.ARCH_ARM64, client_files_folder='aws'): [ ++ mk_rhui_setup(clients={'rh-amazon-rhui-client-arm'}, optional_files=[], os_version='7', arch=arch.ARCH_ARM64), ++ mk_rhui_setup(clients={'rh-amazon-rhui-client-arm'}, leapp_pkg='leapp-rhui-aws', ++ mandatory_files=[ ++ ('rhui-client-config-server-8.crt', RHUI_PKI_PRODUCT_DIR), ++ ('rhui-client-config-server-8.key', RHUI_PKI_DIR), ++ (AWS_DNF_PLUGIN_NAME, DNF_PLUGIN_PATH_PY2), ++ ('leapp-aws.repo', YUM_REPOS_PATH) ++ ], ++ files_supporting_client_operation=[AWS_DNF_PLUGIN_NAME], ++ optional_files=[], os_version='8', arch=arch.ARCH_ARM64), ++ mk_rhui_setup(clients={'rh-amazon-rhui-client-arm'}, leapp_pkg='leapp-rhui-aws', ++ mandatory_files=[ ++ ('rhui-client-config-server-9.crt', RHUI_PKI_PRODUCT_DIR), ++ ('rhui-client-config-server-9.key', RHUI_PKI_DIR), ++ ('leapp-aws.repo', YUM_REPOS_PATH) ++ ], ++ optional_files=[], os_version='9', arch=arch.ARCH_ARM64), ++ ], ++ RHUIFamily(RHUIProvider.AWS, variant=RHUIVariant.SAP, client_files_folder='aws-sap-e4s'): [ ++ mk_rhui_setup(clients={'rh-amazon-rhui-client-sap-bundle'}, optional_files=[], os_version='7', ++ content_channel=ContentChannel.E4S), ++ mk_rhui_setup(clients={'rh-amazon-rhui-client-sap-bundle-e4s'}, leapp_pkg='leapp-rhui-aws-sap-e4s', ++ mandatory_files=[ ++ ('rhui-client-config-server-8-sap-bundle.crt', RHUI_PKI_PRODUCT_DIR), ++ ('rhui-client-config-server-8-sap-bundle.key', RHUI_PKI_DIR), ++ (AWS_DNF_PLUGIN_NAME, DNF_PLUGIN_PATH_PY2), ++ ('leapp-aws-sap-e4s.repo', YUM_REPOS_PATH) ++ ], ++ files_supporting_client_operation=[AWS_DNF_PLUGIN_NAME], ++ optional_files=[], os_version='8', content_channel=ContentChannel.E4S), ++ mk_rhui_setup(clients={'rh-amazon-rhui-client-sap-bundle-e4s'}, leapp_pkg='leapp-rhui-aws-sap-e4s', ++ mandatory_files=[ ++ ('rhui-client-config-server-9-sap-bundle.crt', RHUI_PKI_PRODUCT_DIR), ++ ('rhui-client-config-server-9-sap-bundle.key', RHUI_PKI_DIR), ++ ('leapp-aws-sap-e4s.repo', YUM_REPOS_PATH) ++ ], ++ optional_files=[], os_version='9', content_channel=ContentChannel.E4S), ++ ], ++ RHUIFamily(RHUIProvider.AZURE, client_files_folder='azure'): [ ++ mk_rhui_setup(clients={'rhui-azure-rhel7'}, os_version='7', ++ extra_info={'agent_pkg': 'WALinuxAgent'}), ++ mk_rhui_setup(clients={'rhui-azure-rhel8'}, leapp_pkg='leapp-rhui-azure', ++ mandatory_files=[('leapp-azure.repo', YUM_REPOS_PATH)], ++ extra_info={'agent_pkg': 'WALinuxAgent'}, ++ os_version='8'), ++ mk_rhui_setup(clients={'rhui-azure-rhel9'}, leapp_pkg='leapp-rhui-azure', ++ mandatory_files=[('leapp-azure.repo', YUM_REPOS_PATH)], ++ extra_info={'agent_pkg': 'WALinuxAgent'}, ++ os_version='9'), ++ ], ++ RHUIFamily(RHUIProvider.AZURE, variant=RHUIVariant.SAP_APPS, client_files_folder='azure-sap-apps'): [ ++ mk_rhui_setup(clients={'rhui-azure-rhel7-base-sap-apps'}, os_version='7', content_channel=ContentChannel.EUS), ++ mk_rhui_setup(clients={'rhui-azure-rhel8-sapapps'}, leapp_pkg='leapp-rhui-azure-sap', ++ mandatory_files=[('leapp-azure-sap-apps.repo', YUM_REPOS_PATH)], ++ extra_info={'agent_pkg': 'WALinuxAgent'}, ++ os_version='8', content_channel=ContentChannel.EUS), ++ mk_rhui_setup(clients={'rhui-azure-rhel9-sapapps'}, leapp_pkg='leapp-rhui-azure-sap', ++ mandatory_files=[('leapp-azure-sap-apps.repo', YUM_REPOS_PATH)], ++ extra_info={'agent_pkg': 'WALinuxAgent'}, ++ os_version='9', content_channel=ContentChannel.EUS), ++ ], ++ RHUIFamily(RHUIProvider.AZURE, variant=RHUIVariant.SAP_HA, client_files_folder='azure-sap-ha'): [ ++ mk_rhui_setup(clients={'rhui-azure-rhel7-base-sap-ha'}, os_version='7', content_channel=ContentChannel.E4S), ++ mk_rhui_setup(clients={'rhui-azure-rhel8-sap-ha'}, leapp_pkg='leapp-rhui-azure-sap', ++ mandatory_files=[('leapp-azure-sap-ha.repo', YUM_REPOS_PATH)], ++ extra_info={'agent_pkg': 'WALinuxAgent'}, ++ os_version='8', content_channel=ContentChannel.E4S), ++ mk_rhui_setup(clients={'rhui-azure-rhel9-sap-ha'}, leapp_pkg='leapp-rhui-azure-sap', ++ mandatory_files=[('leapp-azure-sap-ha.repo', YUM_REPOS_PATH)], ++ extra_info={'agent_pkg': 'WALinuxAgent'}, ++ os_version='9', content_channel=ContentChannel.E4S), ++ ], ++ RHUIFamily(RHUIProvider.GOOGLE, client_files_folder='google'): [ ++ mk_rhui_setup(clients={'google-rhui-client-rhel7'}, os_version='7'), ++ mk_rhui_setup(clients={'google-rhui-client-rhel8'}, leapp_pkg='leapp-rhui-google', ++ mandatory_files=[('leapp-google.repo', YUM_REPOS_PATH)], ++ files_supporting_client_operation=['leapp-google.repo'], ++ os_version='8'), ++ mk_rhui_setup(clients={'google-rhui-client-rhel9'}, leapp_pkg='leapp-rhui-google', ++ mandatory_files=[('leapp-google.repo', YUM_REPOS_PATH)], ++ files_supporting_client_operation=['leapp-google.repo'], ++ os_version='9'), ++ ], ++ RHUIFamily(RHUIProvider.GOOGLE, variant=RHUIVariant.SAP, client_files_folder='google-sap'): [ ++ mk_rhui_setup(clients={'google-rhui-client-rhel79-sap'}, os_version='7', content_channel=ContentChannel.E4S), ++ mk_rhui_setup(clients={'google-rhui-client-rhel8-sap'}, leapp_pkg='leapp-rhui-google-sap', ++ mandatory_files=[('leapp-google-sap.repo', YUM_REPOS_PATH)], ++ files_supporting_client_operation=['leapp-google-sap.repo'], ++ os_version='8', content_channel=ContentChannel.E4S), ++ mk_rhui_setup(clients={'google-rhui-client-rhel9-sap'}, leapp_pkg='leapp-rhui-google-sap', ++ mandatory_files=[('leapp-google-sap.repo', YUM_REPOS_PATH)], ++ files_supporting_client_operation=['leapp-google-sap.repo'], ++ os_version='9', content_channel=ContentChannel.E4S), ++ ], ++ RHUIFamily(RHUIProvider.ALIBABA, client_files_folder='alibaba'): [ ++ mk_rhui_setup(clients={'client-rhel7'}, os_version='7'), ++ mk_rhui_setup(clients={'aliyun_rhui_rhel8'}, leapp_pkg='leapp-rhui-alibaba', ++ mandatory_files=[('leapp-alibaba.repo', YUM_REPOS_PATH)], os_version='8'), ++ ] ++} ++ ++ ++# DEPRECATED, use RHUI_SETUPS instead + RHUI_CLOUD_MAP = { + '7to8': { + 'aws': { +@@ -32,8 +258,6 @@ RHUI_CLOUD_MAP = { + 'files_map': [ + ('rhui-client-config-server-8.crt', RHUI_PKI_PRODUCT_DIR), + ('rhui-client-config-server-8.key', RHUI_PKI_DIR), +- ('content-rhel8.crt', RHUI_PKI_PRODUCT_DIR), +- ('content-rhel8.key', RHUI_PKI_DIR), + ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), + (AWS_DNF_PLUGIN_NAME, DNF_PLUGIN_PATH_PY2), + ('leapp-aws.repo', YUM_REPOS_PATH) +@@ -47,8 +271,6 @@ RHUI_CLOUD_MAP = { + 'files_map': [ + ('rhui-client-config-server-8-sap-bundle.crt', RHUI_PKI_PRODUCT_DIR), + ('rhui-client-config-server-8-sap-bundle.key', RHUI_PKI_DIR), +- ('content-rhel8-sap.crt', RHUI_PKI_PRODUCT_DIR), +- ('content-rhel8-sap.key', RHUI_PKI_DIR), + ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), + (AWS_DNF_PLUGIN_NAME, DNF_PLUGIN_PATH_PY2), + ('leapp-aws-sap-e4s.repo', YUM_REPOS_PATH) +@@ -61,8 +283,6 @@ RHUI_CLOUD_MAP = { + 'leapp_pkg': 'leapp-rhui-azure', + 'leapp_pkg_repo': 'leapp-azure.repo', + 'files_map': [ +- ('content.crt', RHUI_PKI_PRODUCT_DIR), +- ('key.pem', RHUI_PKI_PRIVATE_DIR), + ('leapp-azure.repo', YUM_REPOS_PATH) + ], + }, +@@ -73,8 +293,6 @@ RHUI_CLOUD_MAP = { + 'leapp_pkg': 'leapp-rhui-azure-sap', + 'leapp_pkg_repo': 'leapp-azure-sap-apps.repo', + 'files_map': [ +- ('content-sapapps.crt', RHUI_PKI_PRODUCT_DIR), +- ('key-sapapps.pem', RHUI_PKI_PRIVATE_DIR), + ('leapp-azure-sap-apps.repo', YUM_REPOS_PATH), + ], + }, +@@ -85,8 +303,6 @@ RHUI_CLOUD_MAP = { + 'leapp_pkg': 'leapp-rhui-azure-sap', + 'leapp_pkg_repo': 'leapp-azure-sap-ha.repo', + 'files_map': [ +- ('content-sap-ha.crt', RHUI_PKI_PRODUCT_DIR), +- ('key-sap-ha.pem', RHUI_PKI_PRIVATE_DIR), + ('leapp-azure-sap-ha.repo', YUM_REPOS_PATH) + ], + }, +@@ -133,8 +349,6 @@ RHUI_CLOUD_MAP = { + 'files_map': [ + ('rhui-client-config-server-9.crt', RHUI_PKI_PRODUCT_DIR), + ('rhui-client-config-server-9.key', RHUI_PKI_DIR), +- ('content-rhel9.crt', RHUI_PKI_PRODUCT_DIR), +- ('content-rhel9.key', RHUI_PKI_DIR), + ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), + ('leapp-aws.repo', YUM_REPOS_PATH) + ], +@@ -147,8 +361,6 @@ RHUI_CLOUD_MAP = { + 'files_map': [ + ('rhui-client-config-server-9-sap-bundle.crt', RHUI_PKI_PRODUCT_DIR), + ('rhui-client-config-server-9-sap-bundle.key', RHUI_PKI_DIR), +- ('content-rhel9-sap-bundle-e4s.crt', RHUI_PKI_PRODUCT_DIR), +- ('content-rhel9-sap-bundle-e4s.key', RHUI_PKI_DIR), + ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), + ('leapp-aws-sap-e4s.repo', YUM_REPOS_PATH) + ], +@@ -160,8 +372,6 @@ RHUI_CLOUD_MAP = { + 'leapp_pkg': 'leapp-rhui-azure', + 'leapp_pkg_repo': 'leapp-azure.repo', + 'files_map': [ +- ('content.crt', RHUI_PKI_PRODUCT_DIR), +- ('key.pem', RHUI_PKI_PRIVATE_DIR), + ('leapp-azure.repo', YUM_REPOS_PATH) + ], + }, +@@ -178,8 +388,6 @@ RHUI_CLOUD_MAP = { + 'leapp_pkg': 'leapp-rhui-azure-eus', + 'leapp_pkg_repo': 'leapp-azure.repo', + 'files_map': [ +- ('content.crt', RHUI_PKI_PRODUCT_DIR), +- ('key.pem', RHUI_PKI_PRIVATE_DIR), + ('leapp-azure.repo', YUM_REPOS_PATH) + ], + }, +@@ -190,8 +398,6 @@ RHUI_CLOUD_MAP = { + 'leapp_pkg': 'leapp-rhui-azure-sap', + 'leapp_pkg_repo': 'leapp-azure-sap-ha.repo', + 'files_map': [ +- ('content-sap-ha.crt', RHUI_PKI_PRODUCT_DIR), +- ('key-sap-ha.pem', RHUI_PKI_DIR), + ('leapp-azure-sap-ha.repo', YUM_REPOS_PATH) + ], + }, +@@ -202,8 +408,6 @@ RHUI_CLOUD_MAP = { + 'leapp_pkg': 'leapp-rhui-azure-sap', + 'leapp_pkg_repo': 'leapp-azure-sap-apps.repo', + 'files_map': [ +- ('content-sapapps.crt', RHUI_PKI_PRODUCT_DIR), +- ('key-sapapps.pem', RHUI_PKI_PRIVATE_DIR), + ('leapp-azure-sap-apps.repo', YUM_REPOS_PATH) + ], + }, +@@ -240,6 +444,7 @@ def get_upg_path(): + return '7to8' if get_target_major_version() == '8' else '8to9' + + ++@deprecated(since='2023-07-27', message='This functionality has been replaced with the RHUIInfo message.') + def gen_rhui_files_map(): + """ + Generate RHUI files map based on architecture and upgrade path +@@ -256,6 +461,7 @@ def gen_rhui_files_map(): + return files_map + + ++@deprecated(since='2023-07-27', message='This functionality has been integrated into target_userspace_creator.') + def copy_rhui_data(context, provider): + """ + Copy relevant RHUI certificates and key into the target userspace container +@@ -268,3 +474,17 @@ def copy_rhui_data(context, provider): + + for path_ in gen_rhui_files_map().get(provider, ()): + context.copy_to(os.path.join(data_dir, path_[0]), path_[1]) ++ ++ ++def get_all_known_rhui_pkgs_for_current_upg(): ++ upg_major_versions = (get_source_major_version(), get_target_major_version()) ++ ++ known_pkgs = set() ++ for setup_family in RHUI_SETUPS.values(): ++ for setup in setup_family: ++ if setup.os_version not in upg_major_versions: ++ continue ++ known_pkgs.update(setup.clients) ++ known_pkgs.add(setup.leapp_pkg) ++ ++ return known_pkgs +diff --git a/repos/system_upgrade/common/models/rhuiinfo.py b/repos/system_upgrade/common/models/rhuiinfo.py +index 0b518928..3eaa4826 100644 +--- a/repos/system_upgrade/common/models/rhuiinfo.py ++++ b/repos/system_upgrade/common/models/rhuiinfo.py +@@ -1,12 +1,58 @@ +-from leapp.models import fields, Model ++from leapp.models import CopyFile, fields, Model + from leapp.topics import SystemInfoTopic + + ++class TargetRHUIPreInstallTasks(Model): ++ """Tasks required to be executed before target RHUI clients are installed""" ++ topic = SystemInfoTopic ++ ++ files_to_remove = fields.List(fields.String(), default=[]) ++ """Files to remove from the source system in order to setup target RHUI access""" ++ ++ files_to_copy_into_overlay = fields.List(fields.Model(CopyFile), default=[]) ++ """Files to copy into the scratch (overlayfs) container in order to setup target RHUI access""" ++ ++ ++class TargetRHUIPostInstallTasks(Model): ++ """Tasks required to be executed after target RHUI clients are installed to facilitate access to target content.""" ++ topic = SystemInfoTopic ++ ++ files_to_copy = fields.List(fields.Model(CopyFile), default=[]) ++ """Source and destination are paths inside the container""" ++ ++ ++class TargetRHUISetupInfo(Model): ++ topic = SystemInfoTopic ++ ++ enable_only_repoids_in_copied_files = fields.Boolean(default=True) ++ """If True (default) only the repoids from copied files will be enabled during client installation""" ++ ++ preinstall_tasks = fields.Model(TargetRHUIPreInstallTasks) ++ """Tasks that must be performed before attempting to install the target client(s)""" ++ ++ postinstall_tasks = fields.Model(TargetRHUIPostInstallTasks) ++ """Tasks that must be performed after the target client is installed (before any other content is accessed)""" ++ ++ files_supporting_client_operation = fields.List(fields.String(), default=[]) ++ """A subset of files copied in preinstall tasks that should not be cleaned up.""" ++ ++ + class RHUIInfo(Model): + """ +- Facts about public cloud provider and RHUI infrastructure ++ Facts about public cloud variant and RHUI infrastructure + """ + topic = SystemInfoTopic + + provider = fields.String() +- """ Provider name """ ++ """Provider name""" ++ ++ variant = fields.StringEnum(['ordinary', 'sap', 'sap-apps', 'sap-ha'], default='ordinary') ++ """Variant of the system""" ++ ++ src_client_pkg_names = fields.List(fields.String()) ++ """Names of the RHUI client packages providing repofiles to the source system""" ++ ++ target_client_pkg_names = fields.List(fields.String()) ++ """Names of the RHUI client packages providing repofiles to the target system""" ++ ++ target_client_setup_info = fields.Model(TargetRHUISetupInfo) +-- +2.41.0 + diff --git a/0027-add-backward-compatibility-for-leapp-rhui-aws-azure-.patch b/0027-add-backward-compatibility-for-leapp-rhui-aws-azure-.patch new file mode 100644 index 0000000..e3ffad5 --- /dev/null +++ b/0027-add-backward-compatibility-for-leapp-rhui-aws-azure-.patch @@ -0,0 +1,167 @@ +From 594cdb92171ebd66a07c558bfa5c914593569810 Mon Sep 17 00:00:00 2001 +From: PeterMocary +Date: Wed, 18 Oct 2023 15:34:22 +0200 +Subject: [PATCH 27/38] add backward compatibility for leapp-rhui-(aws|azure) + packages + +--- + repos/system_upgrade/common/libraries/rhui.py | 76 +++++++++++++++---- + 1 file changed, 62 insertions(+), 14 deletions(-) + +diff --git a/repos/system_upgrade/common/libraries/rhui.py b/repos/system_upgrade/common/libraries/rhui.py +index aa40b597..b31eba0b 100644 +--- a/repos/system_upgrade/common/libraries/rhui.py ++++ b/repos/system_upgrade/common/libraries/rhui.py +@@ -127,13 +127,17 @@ RHUI_SETUPS = { + mk_rhui_setup(clients={'rh-amazon-rhui-client'}, optional_files=[], os_version='7'), + mk_rhui_setup(clients={'rh-amazon-rhui-client'}, leapp_pkg='leapp-rhui-aws', + mandatory_files=[ +- ('rhui-client-config-server-8.crt', RHUI_PKI_PRODUCT_DIR), +- ('rhui-client-config-server-8.key', RHUI_PKI_DIR), +- (AWS_DNF_PLUGIN_NAME, DNF_PLUGIN_PATH_PY2), +- ('leapp-aws.repo', YUM_REPOS_PATH) ++ ('rhui-client-config-server-8.crt', RHUI_PKI_PRODUCT_DIR), ++ ('rhui-client-config-server-8.key', RHUI_PKI_DIR), ++ (AWS_DNF_PLUGIN_NAME, DNF_PLUGIN_PATH_PY2), ++ ('leapp-aws.repo', YUM_REPOS_PATH) + ], + files_supporting_client_operation=[AWS_DNF_PLUGIN_NAME], +- optional_files=[], os_version='8'), ++ optional_files=[ ++ ('content-rhel8.key', RHUI_PKI_DIR), ++ ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), ++ ('content-rhel8.crt', RHUI_PKI_PRODUCT_DIR) ++ ], os_version='8'), + # @Note(mhecko): We don't need to deal with AWS_DNF_PLUGIN_NAME here as on rhel8+ we can use the plugin + # # provided by the target client - there is no Python2 incompatibility issue there. + mk_rhui_setup(clients={'rh-amazon-rhui-client'}, leapp_pkg='leapp-rhui-aws', +@@ -142,26 +146,38 @@ RHUI_SETUPS = { + ('rhui-client-config-server-9.key', RHUI_PKI_DIR), + ('leapp-aws.repo', YUM_REPOS_PATH) + ], +- optional_files=[], os_version='9'), ++ optional_files=[ ++ ('content-rhel9.key', RHUI_PKI_DIR), ++ ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), ++ ('content-rhel9.crt', RHUI_PKI_PRODUCT_DIR) ++ ], os_version='9'), + ], + RHUIFamily(RHUIProvider.AWS, arch=arch.ARCH_ARM64, client_files_folder='aws'): [ + mk_rhui_setup(clients={'rh-amazon-rhui-client-arm'}, optional_files=[], os_version='7', arch=arch.ARCH_ARM64), + mk_rhui_setup(clients={'rh-amazon-rhui-client-arm'}, leapp_pkg='leapp-rhui-aws', + mandatory_files=[ +- ('rhui-client-config-server-8.crt', RHUI_PKI_PRODUCT_DIR), +- ('rhui-client-config-server-8.key', RHUI_PKI_DIR), +- (AWS_DNF_PLUGIN_NAME, DNF_PLUGIN_PATH_PY2), +- ('leapp-aws.repo', YUM_REPOS_PATH) ++ ('rhui-client-config-server-8.crt', RHUI_PKI_PRODUCT_DIR), ++ ('rhui-client-config-server-8.key', RHUI_PKI_DIR), ++ (AWS_DNF_PLUGIN_NAME, DNF_PLUGIN_PATH_PY2), ++ ('leapp-aws.repo', YUM_REPOS_PATH) + ], + files_supporting_client_operation=[AWS_DNF_PLUGIN_NAME], +- optional_files=[], os_version='8', arch=arch.ARCH_ARM64), ++ optional_files=[ ++ ('content-rhel8.key', RHUI_PKI_DIR), ++ ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), ++ ('content-rhel8.crt', RHUI_PKI_PRODUCT_DIR) ++ ], os_version='8', arch=arch.ARCH_ARM64), + mk_rhui_setup(clients={'rh-amazon-rhui-client-arm'}, leapp_pkg='leapp-rhui-aws', + mandatory_files=[ + ('rhui-client-config-server-9.crt', RHUI_PKI_PRODUCT_DIR), + ('rhui-client-config-server-9.key', RHUI_PKI_DIR), + ('leapp-aws.repo', YUM_REPOS_PATH) + ], +- optional_files=[], os_version='9', arch=arch.ARCH_ARM64), ++ optional_files=[ ++ ('content-rhel9.key', RHUI_PKI_DIR), ++ ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), ++ ('content-rhel9.crt', RHUI_PKI_PRODUCT_DIR) ++ ], os_version='9', arch=arch.ARCH_ARM64), + ], + RHUIFamily(RHUIProvider.AWS, variant=RHUIVariant.SAP, client_files_folder='aws-sap-e4s'): [ + mk_rhui_setup(clients={'rh-amazon-rhui-client-sap-bundle'}, optional_files=[], os_version='7', +@@ -174,24 +190,40 @@ RHUI_SETUPS = { + ('leapp-aws-sap-e4s.repo', YUM_REPOS_PATH) + ], + files_supporting_client_operation=[AWS_DNF_PLUGIN_NAME], +- optional_files=[], os_version='8', content_channel=ContentChannel.E4S), ++ optional_files=[ ++ ('content-rhel8-sap.key', RHUI_PKI_DIR), ++ ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), ++ ('content-rhel8-sap.crt', RHUI_PKI_PRODUCT_DIR) ++ ], os_version='8', content_channel=ContentChannel.E4S), + mk_rhui_setup(clients={'rh-amazon-rhui-client-sap-bundle-e4s'}, leapp_pkg='leapp-rhui-aws-sap-e4s', + mandatory_files=[ + ('rhui-client-config-server-9-sap-bundle.crt', RHUI_PKI_PRODUCT_DIR), + ('rhui-client-config-server-9-sap-bundle.key', RHUI_PKI_DIR), + ('leapp-aws-sap-e4s.repo', YUM_REPOS_PATH) + ], +- optional_files=[], os_version='9', content_channel=ContentChannel.E4S), ++ optional_files=[ ++ ('content-rhel9-sap-bundle-e4s.key', RHUI_PKI_DIR), ++ ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), ++ ('content-rhel9-sap-bundle-e4s.crt', RHUI_PKI_PRODUCT_DIR) ++ ], os_version='9', content_channel=ContentChannel.E4S), + ], + RHUIFamily(RHUIProvider.AZURE, client_files_folder='azure'): [ + mk_rhui_setup(clients={'rhui-azure-rhel7'}, os_version='7', + extra_info={'agent_pkg': 'WALinuxAgent'}), + mk_rhui_setup(clients={'rhui-azure-rhel8'}, leapp_pkg='leapp-rhui-azure', + mandatory_files=[('leapp-azure.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key.pem', RHUI_PKI_DIR), ++ ('content.crt', RHUI_PKI_PRODUCT_DIR) ++ ], + extra_info={'agent_pkg': 'WALinuxAgent'}, + os_version='8'), + mk_rhui_setup(clients={'rhui-azure-rhel9'}, leapp_pkg='leapp-rhui-azure', + mandatory_files=[('leapp-azure.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key.pem', RHUI_PKI_DIR), ++ ('content.crt', RHUI_PKI_PRODUCT_DIR) ++ ], + extra_info={'agent_pkg': 'WALinuxAgent'}, + os_version='9'), + ], +@@ -199,10 +231,18 @@ RHUI_SETUPS = { + mk_rhui_setup(clients={'rhui-azure-rhel7-base-sap-apps'}, os_version='7', content_channel=ContentChannel.EUS), + mk_rhui_setup(clients={'rhui-azure-rhel8-sapapps'}, leapp_pkg='leapp-rhui-azure-sap', + mandatory_files=[('leapp-azure-sap-apps.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key-sapapps.pem', RHUI_PKI_DIR), ++ ('content-sapapps.crt', RHUI_PKI_PRODUCT_DIR) ++ ], + extra_info={'agent_pkg': 'WALinuxAgent'}, + os_version='8', content_channel=ContentChannel.EUS), + mk_rhui_setup(clients={'rhui-azure-rhel9-sapapps'}, leapp_pkg='leapp-rhui-azure-sap', + mandatory_files=[('leapp-azure-sap-apps.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key-sapapps.pem', RHUI_PKI_DIR), ++ ('content-sapapps.crt', RHUI_PKI_PRODUCT_DIR) ++ ], + extra_info={'agent_pkg': 'WALinuxAgent'}, + os_version='9', content_channel=ContentChannel.EUS), + ], +@@ -210,10 +250,18 @@ RHUI_SETUPS = { + mk_rhui_setup(clients={'rhui-azure-rhel7-base-sap-ha'}, os_version='7', content_channel=ContentChannel.E4S), + mk_rhui_setup(clients={'rhui-azure-rhel8-sap-ha'}, leapp_pkg='leapp-rhui-azure-sap', + mandatory_files=[('leapp-azure-sap-ha.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key-sap-ha.pem', RHUI_PKI_DIR), ++ ('content-sap-ha.crt', RHUI_PKI_PRODUCT_DIR) ++ ], + extra_info={'agent_pkg': 'WALinuxAgent'}, + os_version='8', content_channel=ContentChannel.E4S), + mk_rhui_setup(clients={'rhui-azure-rhel9-sap-ha'}, leapp_pkg='leapp-rhui-azure-sap', + mandatory_files=[('leapp-azure-sap-ha.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key-sap-ha.pem', RHUI_PKI_DIR), ++ ('content-sap-ha.crt', RHUI_PKI_PRODUCT_DIR) ++ ], + extra_info={'agent_pkg': 'WALinuxAgent'}, + os_version='9', content_channel=ContentChannel.E4S), + ], +-- +2.41.0 + diff --git a/0028-checknfs-do-not-check-systemd-mounts.patch b/0028-checknfs-do-not-check-systemd-mounts.patch new file mode 100644 index 0000000..3602ad2 --- /dev/null +++ b/0028-checknfs-do-not-check-systemd-mounts.patch @@ -0,0 +1,134 @@ +From bf866cb33d9aefb2d6d79fc6ea0e326c6c2a0cf3 Mon Sep 17 00:00:00 2001 +From: mhecko +Date: Thu, 14 Sep 2023 13:43:37 +0200 +Subject: [PATCH 28/38] checknfs: do not check systemd mounts + +Systemd mounts contain only *block* devices. Therefore, the list can +never contain NFS shares at all and the check is redundant. This is +apparent if one reads storagescanner/libraries/storagescanner.py:L251. +This patch, therefore, removes the check for systemd mount alltogether. +--- + .../common/actors/checknfs/actor.py | 15 +------- + .../actors/checknfs/tests/test_checknfs.py | 37 ++----------------- + 2 files changed, 5 insertions(+), 47 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/checknfs/actor.py b/repos/system_upgrade/common/actors/checknfs/actor.py +index 40ca834e..208c5dd9 100644 +--- a/repos/system_upgrade/common/actors/checknfs/actor.py ++++ b/repos/system_upgrade/common/actors/checknfs/actor.py +@@ -10,7 +10,7 @@ class CheckNfs(Actor): + """ + Check if NFS filesystem is in use. If yes, inhibit the upgrade process. + +- Actor looks for NFS in the following sources: /ets/fstab, mount and systemd-mount. ++ Actor looks for NFS in the following sources: /ets/fstab and mount. + If there is NFS in any of the mentioned sources, actors inhibits the upgrade. + """ + name = "check_nfs" +@@ -41,14 +41,7 @@ class CheckNfs(Actor): + if _is_nfs(mount.tp): + nfs_mounts.append(" - {} {}\n".format(mount.name, mount.mount)) + +- # Check systemd-mount +- systemd_nfs_mounts = [] +- for systemdmount in storage.systemdmount: +- if _is_nfs(systemdmount.fs_type): +- # mountpoint is not available in the model +- systemd_nfs_mounts.append(" - {}\n".format(systemdmount.node)) +- +- if any((fstab_nfs_mounts, nfs_mounts, systemd_nfs_mounts)): ++ if any((fstab_nfs_mounts, nfs_mounts)): + if fstab_nfs_mounts: + details += "- NFS shares found in /etc/fstab:\n" + details += ''.join(fstab_nfs_mounts) +@@ -57,10 +50,6 @@ class CheckNfs(Actor): + details += "- NFS shares currently mounted:\n" + details += ''.join(nfs_mounts) + +- if systemd_nfs_mounts: +- details += "- NFS mounts configured with systemd-mount:\n" +- details += ''.join(systemd_nfs_mounts) +- + fstab_related_resource = [reporting.RelatedResource('file', '/etc/fstab')] if fstab_nfs_mounts else [] + + create_report([ +diff --git a/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py b/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py +index 907dca40..739b3a83 100644 +--- a/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py ++++ b/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py +@@ -1,37 +1,12 @@ + import pytest + + from leapp.libraries.common import config +-from leapp.models import FstabEntry, MountEntry, StorageInfo, SystemdMountEntry ++from leapp.models import FstabEntry, MountEntry, StorageInfo + from leapp.reporting import Report + from leapp.snactor.fixture import current_actor_context + from leapp.utils.report import is_inhibitor + + +-@pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4')) +-def test_actor_with_systemdmount_entry(current_actor_context, nfs_fstype, monkeypatch): +- monkeypatch.setattr(config, 'get_env', lambda x, y: y) +- with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a", +- wwn="n/a", fs_type=nfs_fstype, label="n/a", +- uuid="n/a")] +- current_actor_context.feed(StorageInfo(systemdmount=with_systemdmount_entry)) +- current_actor_context.run() +- report_fields = current_actor_context.consume(Report)[0].report +- assert is_inhibitor(report_fields) +- +- +-def test_actor_without_systemdmount_entry(current_actor_context, monkeypatch): +- monkeypatch.setattr(config, 'get_env', lambda x, y: y) +- without_systemdmount_entry = [SystemdMountEntry(node="/dev/sda1", +- path="pci-0000:00:17.0-ata-2", +- model="TOSHIBA_THNSNJ512GDNU_A", +- wwn="0x500080d9108e8753", +- fs_type="ext4", label="n/a", +- uuid="5675d309-eff7-4eb1-9c27-58bc5880ec72")] +- current_actor_context.feed(StorageInfo(systemdmount=without_systemdmount_entry)) +- current_actor_context.run() +- assert not current_actor_context.consume(Report) +- +- + @pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4')) + def test_actor_with_fstab_entry(current_actor_context, nfs_fstype, monkeypatch): + monkeypatch.setattr(config, 'get_env', lambda x, y: y) +@@ -89,15 +64,12 @@ def test_actor_skipped_if_initram_network_enabled(current_actor_context, monkeyp + monkeypatch.setattr(config, 'get_env', lambda x, y: 'network-manager' if x == 'LEAPP_DEVEL_INITRAM_NETWORK' else y) + with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs', + options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")] +- with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a", +- wwn="n/a", fs_type='nfs', label="n/a", +- uuid="n/a")] + with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data", + fs_vfstype='nfs', + fs_mntops="noauto,noatime,rsize=32768,wsize=32768", + fs_freq="0", fs_passno="0")] + current_actor_context.feed(StorageInfo(mount=with_mount_share, +- systemdmount=with_systemdmount_entry, ++ systemdmount=[], + fstab=with_fstab_entry)) + current_actor_context.run() + assert not current_actor_context.consume(Report) +@@ -108,15 +80,12 @@ def test_actor_not_skipped_if_initram_network_empty(current_actor_context, monke + monkeypatch.setattr(config, 'get_env', lambda x, y: '' if x == 'LEAPP_DEVEL_INITRAM_NETWORK' else y) + with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs', + options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")] +- with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a", +- wwn="n/a", fs_type='nfs', label="n/a", +- uuid="n/a")] + with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data", + fs_vfstype='nfs', + fs_mntops="noauto,noatime,rsize=32768,wsize=32768", + fs_freq="0", fs_passno="0")] + current_actor_context.feed(StorageInfo(mount=with_mount_share, +- systemdmount=with_systemdmount_entry, ++ systemdmount=[], + fstab=with_fstab_entry)) + current_actor_context.run() + report_fields = current_actor_context.consume(Report)[0].report +-- +2.41.0 + diff --git a/0029-Switch-from-plan-name-regex-to-filter-by-tags.patch b/0029-Switch-from-plan-name-regex-to-filter-by-tags.patch new file mode 100644 index 0000000..7780fce --- /dev/null +++ b/0029-Switch-from-plan-name-regex-to-filter-by-tags.patch @@ -0,0 +1,327 @@ +From 88e1e14090bd32acf5635959010c8e9b515fd9c5 Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Fri, 10 Nov 2023 13:39:39 +0100 +Subject: [PATCH 29/38] Switch from plan name regex to filter by tags + +Necessary work to adapt upstream tests to big refactoring +changes brought by MR303. +--- + .packit.yaml | 130 ++++++++++++++++++++++++++++++++++++++++----------- + 1 file changed, 102 insertions(+), 28 deletions(-) + +diff --git a/.packit.yaml b/.packit.yaml +index cd6dd7d1..02cc6d52 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -87,8 +87,8 @@ jobs: + + - &sanity-79to86 + job: tests +- fmf_url: "https://gitlab.cee.redhat.com/oamg/tmt-plans" +- fmf_ref: "master" ++ fmf_url: "https://gitlab.cee.redhat.com/ivasilev/tmt-plans" ++ fmf_ref: "pocgenerator" + use_internal_tf: True + trigger: pull_request + labels: +@@ -97,16 +97,17 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-ZStream] + identifier: sanity-7.9to8.6 +- tmt_plan: "sanity_plan" ++ tmt_plan: "" + tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:sanity' + environments: + - tmt: + context: + distro: "rhel-7.9" +- # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: + BusinessUnit: sst_upgrades@leapp_upstream_test + env: +@@ -123,13 +124,16 @@ jobs: + epel-7-x86_64: + distros: [RHEL-7.9-rhui] + identifier: sanity-7to8-aws-e2e +- tmt_plan: "(?!.*sap)(.*e2e)" ++ # NOTE(ivasilev) Unfortunately to use yaml templates we need to rewrite the whole tf_extra_params dict ++ # to use plan_filter (can't just specify one section test.tmt.plan_filter, need to specify environments.* as well) + tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:e2e' + environments: + - tmt: + context: + distro: "rhel-7.9" +- # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; yum-config-manager --enable rhel-7-server-rhui-optional-rpms" +@@ -150,7 +154,18 @@ jobs: + - beaker-minimal-7.9to8.6 + - 7.9to8.6 + identifier: sanity-7.9to8.6-beaker-minimal +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*partitioning_monolithic|.*separate_var_usr_varlog|.*uefi|.*oamg4250_lvm_var_xfs_ftype0)" ++ tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:partitioning & tag:7to8' ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-7.9" ++ settings: ++ provisioning: ++ tags: ++ BusinessUnit: sst_upgrades@leapp_upstream_test + + # On-demand kernel-rt tests + - &kernel-rt-79to86 +@@ -160,7 +175,18 @@ jobs: + - kernel-rt-7.9to8.6 + - 7.9to8.6 + identifier: sanity-7.9to8.6-kernel-rt +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:kernel-rt & tag:7to8' ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-7.9" ++ settings: ++ provisioning: ++ tags: ++ BusinessUnit: sst_upgrades@leapp_upstream_test + + - &sanity-79to88 + <<: *sanity-79to86 +@@ -185,13 +211,16 @@ jobs: + + # On-demand kernel-rt tests + - &kernel-rt-79to88 +- <<: *beaker-minimal-79to88 ++ <<: *kernel-rt-79to86 + labels: + - kernel-rt + - kernel-rt-7.9to8.8 + - 7.9to8.8 + identifier: sanity-7.9to8.8-kernel-rt +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ env: ++ SOURCE_RELEASE: "7.9" ++ TARGET_RELEASE: "8.8" ++ LEAPPDATA_BRANCH: "upstream" + + - &sanity-79to89 + <<: *sanity-79to86 +@@ -216,13 +245,16 @@ jobs: + + # On-demand kernel-rt tests + - &kernel-rt-79to89 +- <<: *beaker-minimal-79to89 ++ <<: *kernel-rt-79to88 + labels: + - kernel-rt + - kernel-rt-7.9to8.9 + - 7.9to8.9 + identifier: sanity-7.9to8.9-kernel-rt +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ env: ++ SOURCE_RELEASE: "7.9" ++ TARGET_RELEASE: "8.9" ++ LEAPPDATA_BRANCH: "upstream" + + - &sanity-86to90 + <<: *sanity-79to86 +@@ -231,14 +263,15 @@ jobs: + distros: [RHEL-8.6.0-Nightly] + identifier: sanity-8.6to9.0 + tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:sanity & tag:8to9' + environments: + - tmt: + context: + distro: "rhel-8.6" +- # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: + BusinessUnit: sst_upgrades@leapp_upstream_test + env: +@@ -259,14 +292,15 @@ jobs: + distros: [RHEL-8.6.0-Nightly] + identifier: sanity-8.6to9.0-beaker-minimal + tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:partitioning & tag:8to9' + environments: + - tmt: + context: + distro: "rhel-8.6" +- # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: + BusinessUnit: sst_upgrades@leapp_upstream_test + env: +@@ -283,7 +317,18 @@ jobs: + - kernel-rt-8.6to9.0 + - 8.6to9.0 + identifier: sanity-8.6to9.0-kernel-rt +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:kernel-rt & tag:8to9' ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-8.6" ++ settings: ++ provisioning: ++ tags: ++ BusinessUnit: sst_upgrades@leapp_upstream_test + + - &sanity-88to92 + <<: *sanity-86to90 +@@ -292,14 +337,15 @@ jobs: + distros: [RHEL-8.8.0-Nightly] + identifier: sanity-8.8to9.2 + tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:sanity & tag:8to9' + environments: + - tmt: + context: + distro: "rhel-8.8" +- # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: + BusinessUnit: sst_upgrades@leapp_upstream_test + env: +@@ -321,11 +367,13 @@ jobs: + distros: [RHEL-8.8.0-Nightly] + identifier: sanity-8.8to9.2-beaker-minimal + tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:partitioning & tag:8to9' + environments: + - tmt: + context: + distro: "rhel-8.8" +- # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" +@@ -345,7 +393,18 @@ jobs: + - kernel-rt-8.8to9.2 + - 8.8to9.2 + identifier: sanity-8.8to9.2-kernel-rt +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:kernel-rt & tag:8to9' ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-8.8" ++ settings: ++ provisioning: ++ tags: ++ BusinessUnit: sst_upgrades@leapp_upstream_test + + - &sanity-89to93 + <<: *sanity-88to92 +@@ -354,14 +413,15 @@ jobs: + distros: [RHEL-8.9.0-Nightly] + identifier: sanity-8.9to9.3 + tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:sanity & tag:8to9' + environments: + - tmt: + context: + distro: "rhel-8.9" +- # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: + BusinessUnit: sst_upgrades@leapp_upstream_test + env: +@@ -383,14 +443,15 @@ jobs: + distros: [RHEL-8.9.0-Nightly] + identifier: sanity-8.9to9.3-beaker-minimal + tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:partitioning & tag:8to9' + environments: + - tmt: + context: + distro: "rhel-8.9" +- # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: +- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" + tags: + BusinessUnit: sst_upgrades@leapp_upstream_test + env: +@@ -408,7 +469,18 @@ jobs: + - kernel-rt-8.9to9.3 + - 8.9to9.3 + identifier: sanity-8.9to9.3-kernel-rt +- tmt_plan: "(?!.*max_sst)(.*tier1)(.*kernel-rt)" ++ tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:kernel-rt & tag:8to9' ++ environments: ++ - tmt: ++ context: ++ distro: "rhel-8.9" ++ settings: ++ provisioning: ++ tags: ++ BusinessUnit: sst_upgrades@leapp_upstream_test + + - &sanity-86to90-aws + <<: *sanity-79to86-aws +@@ -417,11 +489,13 @@ jobs: + distros: [RHEL-8.6-rhui] + identifier: sanity-8to9-aws-e2e + tf_extra_params: ++ test: ++ tmt: ++ plan_filter: 'tag:e2e' + environments: + - tmt: + context: + distro: "rhel-8.6" +- # tag resources as sst_upgrades@leapp_upstream_test to enable cost metrics collection + settings: + provisioning: + post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys" +-- +2.41.0 + diff --git a/0030-Bring-back-reference-to-oamg-leapp-tests-repo.patch b/0030-Bring-back-reference-to-oamg-leapp-tests-repo.patch new file mode 100644 index 0000000..715574e --- /dev/null +++ b/0030-Bring-back-reference-to-oamg-leapp-tests-repo.patch @@ -0,0 +1,29 @@ +From 60190ff19cc8c1f840ee2d0e20f6b63fdd6e8947 Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Mon, 13 Nov 2023 14:26:07 +0100 +Subject: [PATCH 30/38] Bring back reference to oamg/leapp-tests repo + +After MR303 is merged to master there is no need +to point to my fork anymore. +--- + .packit.yaml | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/.packit.yaml b/.packit.yaml +index 02cc6d52..2e606a40 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -87,8 +87,8 @@ jobs: + + - &sanity-79to86 + job: tests +- fmf_url: "https://gitlab.cee.redhat.com/ivasilev/tmt-plans" +- fmf_ref: "pocgenerator" ++ fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests" ++ fmf_ref: "master" + use_internal_tf: True + trigger: pull_request + labels: +-- +2.41.0 + diff --git a/0031-add-the-posibility-to-upgrade-with-a-local-repositor.patch b/0031-add-the-posibility-to-upgrade-with-a-local-repositor.patch new file mode 100644 index 0000000..7790396 --- /dev/null +++ b/0031-add-the-posibility-to-upgrade-with-a-local-repositor.patch @@ -0,0 +1,543 @@ +From e9f899c27688007d2e87144ccfd038b8b0a655d1 Mon Sep 17 00:00:00 2001 +From: PeterMocary +Date: Wed, 12 Jul 2023 22:24:48 +0200 +Subject: [PATCH 31/38] add the posibility to upgrade with a local repository + +Upgrade with a local repository required to host the repository locally +for it to be visible from target user-space container during the +upgrade. The added actor ensures that the local repository +will be visible from the container by adjusting the path to it simply by +prefixing a host root mount bind '/installroot' to it. The +local_repos_inhibit actor is no longer needed, thus was removed. +--- + .../common/actors/adjustlocalrepos/actor.py | 48 ++++++ + .../libraries/adjustlocalrepos.py | 100 ++++++++++++ + .../tests/test_adjustlocalrepos.py | 151 ++++++++++++++++++ + .../common/actors/localreposinhibit/actor.py | 89 ----------- + .../tests/test_unit_localreposinhibit.py | 81 ---------- + .../common/libraries/dnfplugin.py | 5 +- + 6 files changed, 302 insertions(+), 172 deletions(-) + create mode 100644 repos/system_upgrade/common/actors/adjustlocalrepos/actor.py + create mode 100644 repos/system_upgrade/common/actors/adjustlocalrepos/libraries/adjustlocalrepos.py + create mode 100644 repos/system_upgrade/common/actors/adjustlocalrepos/tests/test_adjustlocalrepos.py + delete mode 100644 repos/system_upgrade/common/actors/localreposinhibit/actor.py + delete mode 100644 repos/system_upgrade/common/actors/localreposinhibit/tests/test_unit_localreposinhibit.py + +diff --git a/repos/system_upgrade/common/actors/adjustlocalrepos/actor.py b/repos/system_upgrade/common/actors/adjustlocalrepos/actor.py +new file mode 100644 +index 00000000..064e7f3e +--- /dev/null ++++ b/repos/system_upgrade/common/actors/adjustlocalrepos/actor.py +@@ -0,0 +1,48 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import adjustlocalrepos ++from leapp.libraries.common import mounting ++from leapp.libraries.stdlib import api ++from leapp.models import ( ++ TargetOSInstallationImage, ++ TargetUserSpaceInfo, ++ TMPTargetRepositoriesFacts, ++ UsedTargetRepositories ++) ++from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag ++ ++ ++class AdjustLocalRepos(Actor): ++ """ ++ Adjust local repositories to the target user-space container. ++ ++ Changes the path of local file urls (starting with 'file://') for 'baseurl' and ++ 'mirrorlist' fields to the container space for the used repositories. This is ++ done by prefixing host root mount bind ('/installroot') to the path. It ensures ++ that the files will be accessible from the container and thus proper functionality ++ of the local repository. ++ """ ++ ++ name = 'adjust_local_repos' ++ consumes = (TargetOSInstallationImage, ++ TargetUserSpaceInfo, ++ TMPTargetRepositoriesFacts, ++ UsedTargetRepositories) ++ produces = () ++ tags = (IPUWorkflowTag, TargetTransactionChecksPhaseTag) ++ ++ def process(self): ++ target_userspace_info = next(self.consume(TargetUserSpaceInfo), None) ++ used_target_repos = next(self.consume(UsedTargetRepositories), None) ++ target_repos_facts = next(self.consume(TMPTargetRepositoriesFacts), None) ++ target_iso = next(self.consume(TargetOSInstallationImage), None) ++ ++ if not all([target_userspace_info, used_target_repos, target_repos_facts]): ++ api.current_logger().error("Missing required information to proceed!") ++ return ++ ++ target_repos_facts = target_repos_facts.repositories ++ iso_repoids = set(repo.repoid for repo in target_iso.repositories) if target_iso else set() ++ used_target_repoids = set(repo.repoid for repo in used_target_repos.repos) ++ ++ with mounting.NspawnActions(base_dir=target_userspace_info.path) as context: ++ adjustlocalrepos.process(context, target_repos_facts, iso_repoids, used_target_repoids) +diff --git a/repos/system_upgrade/common/actors/adjustlocalrepos/libraries/adjustlocalrepos.py b/repos/system_upgrade/common/actors/adjustlocalrepos/libraries/adjustlocalrepos.py +new file mode 100644 +index 00000000..55a0d075 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/adjustlocalrepos/libraries/adjustlocalrepos.py +@@ -0,0 +1,100 @@ ++import os ++ ++from leapp.libraries.stdlib import api ++ ++HOST_ROOT_MOUNT_BIND_PATH = '/installroot' ++LOCAL_FILE_URL_PREFIX = 'file://' ++ ++ ++def _adjust_local_file_url(repo_file_line): ++ """ ++ Adjusts a local file url to the target user-space container in a provided ++ repo file line by prefixing host root mount bind '/installroot' to it ++ when needed. ++ ++ :param str repo_file_line: a line from a repo file ++ :returns str: adjusted line or the provided line if no changes are needed ++ """ ++ adjust_fields = ['baseurl', 'mirrorlist'] ++ ++ if LOCAL_FILE_URL_PREFIX in repo_file_line and not repo_file_line.startswith('#'): ++ entry_field, entry_value = repo_file_line.strip().split('=', 1) ++ if not any(entry_field.startswith(field) for field in adjust_fields): ++ return repo_file_line ++ ++ entry_value = entry_value.strip('\'\"') ++ path = entry_value[len(LOCAL_FILE_URL_PREFIX):] ++ new_entry_value = LOCAL_FILE_URL_PREFIX + os.path.join(HOST_ROOT_MOUNT_BIND_PATH, path.lstrip('/')) ++ new_repo_file_line = entry_field + '=' + new_entry_value ++ return new_repo_file_line ++ return repo_file_line ++ ++ ++def _extract_repos_from_repofile(context, repo_file): ++ """ ++ Generator function that extracts repositories from a repo file in the given context ++ and yields them as list of lines that belong to the repository. ++ ++ :param context: target user-space context ++ :param str repo_file: path to repository file (inside the provided context) ++ """ ++ with context.open(repo_file, 'r') as rf: ++ repo_file_lines = rf.readlines() ++ ++ # Detect repo and remove lines before first repoid ++ repo_found = False ++ for idx, line in enumerate(repo_file_lines): ++ if line.startswith('['): ++ repo_file_lines = repo_file_lines[idx:] ++ repo_found = True ++ break ++ ++ if not repo_found: ++ return ++ ++ current_repo = [] ++ for line in repo_file_lines: ++ line = line.strip() ++ ++ if line.startswith('[') and current_repo: ++ yield current_repo ++ current_repo = [] ++ ++ current_repo.append(line) ++ yield current_repo ++ ++ ++def _adjust_local_repos_to_container(context, repo_file, local_repoids): ++ new_repo_file = [] ++ for repo in _extract_repos_from_repofile(context, repo_file): ++ repoid = repo[0].strip('[]') ++ adjusted_repo = repo ++ if repoid in local_repoids: ++ adjusted_repo = [_adjust_local_file_url(line) for line in repo] ++ new_repo_file.append(adjusted_repo) ++ ++ # Combine the repo file contents into a string and write it back to the file ++ new_repo_file = ['\n'.join(repo) for repo in new_repo_file] ++ new_repo_file = '\n'.join(new_repo_file) ++ with context.open(repo_file, 'w') as rf: ++ rf.write(new_repo_file) ++ ++ ++def process(context, target_repos_facts, iso_repoids, used_target_repoids): ++ for repo_file_facts in target_repos_facts: ++ repo_file_path = repo_file_facts.file ++ local_repoids = set() ++ for repo in repo_file_facts.data: ++ # Skip repositories that aren't used or are provided by ISO ++ if repo.repoid not in used_target_repoids or repo.repoid in iso_repoids: ++ continue ++ # Note repositories that contain local file url ++ if repo.baseurl and LOCAL_FILE_URL_PREFIX in repo.baseurl or \ ++ repo.mirrorlist and LOCAL_FILE_URL_PREFIX in repo.mirrorlist: ++ local_repoids.add(repo.repoid) ++ ++ if local_repoids: ++ api.current_logger().debug( ++ 'Adjusting following repos in the repo file - {}: {}'.format(repo_file_path, ++ ', '.join(local_repoids))) ++ _adjust_local_repos_to_container(context, repo_file_path, local_repoids) +diff --git a/repos/system_upgrade/common/actors/adjustlocalrepos/tests/test_adjustlocalrepos.py b/repos/system_upgrade/common/actors/adjustlocalrepos/tests/test_adjustlocalrepos.py +new file mode 100644 +index 00000000..41cff200 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/adjustlocalrepos/tests/test_adjustlocalrepos.py +@@ -0,0 +1,151 @@ ++import pytest ++ ++from leapp.libraries.actor import adjustlocalrepos ++ ++REPO_FILE_1_LOCAL_REPOIDS = ['myrepo1'] ++REPO_FILE_1 = [['[myrepo1]', ++ 'name=mylocalrepo', ++ 'baseurl=file:///home/user/.local/myrepos/repo1' ++ ]] ++REPO_FILE_1_ADJUSTED = [['[myrepo1]', ++ 'name=mylocalrepo', ++ 'baseurl=file:///installroot/home/user/.local/myrepos/repo1' ++ ]] ++ ++REPO_FILE_2_LOCAL_REPOIDS = ['myrepo3'] ++REPO_FILE_2 = [['[myrepo2]', ++ 'name=mynotlocalrepo', ++ 'baseurl=https://www.notlocal.com/packages' ++ ], ++ ['[myrepo3]', ++ 'name=mylocalrepo', ++ 'baseurl=file:///home/user/.local/myrepos/repo3', ++ 'mirrorlist=file:///home/user/.local/mymirrors/repo3.txt' ++ ]] ++REPO_FILE_2_ADJUSTED = [['[myrepo2]', ++ 'name=mynotlocalrepo', ++ 'baseurl=https://www.notlocal.com/packages' ++ ], ++ ['[myrepo3]', ++ 'name=mylocalrepo', ++ 'baseurl=file:///installroot/home/user/.local/myrepos/repo3', ++ 'mirrorlist=file:///installroot/home/user/.local/mymirrors/repo3.txt' ++ ]] ++ ++REPO_FILE_3_LOCAL_REPOIDS = ['myrepo4', 'myrepo5'] ++REPO_FILE_3 = [['[myrepo4]', ++ 'name=myrepowithlocalgpgkey', ++ 'baseurl="file:///home/user/.local/myrepos/repo4"', ++ 'gpgkey=file:///home/user/.local/pki/gpgkey', ++ 'gpgcheck=1' ++ ], ++ ['[myrepo5]', ++ 'name=myrepowithcomment', ++ 'baseurl=file:///home/user/.local/myrepos/repo5', ++ '#baseurl=file:///home/user/.local/myotherrepos/repo5', ++ 'enabled=1', ++ 'exclude=sed']] ++REPO_FILE_3_ADJUSTED = [['[myrepo4]', ++ 'name=myrepowithlocalgpgkey', ++ 'baseurl=file:///installroot/home/user/.local/myrepos/repo4', ++ 'gpgkey=file:///home/user/.local/pki/gpgkey', ++ 'gpgcheck=1' ++ ], ++ ['[myrepo5]', ++ 'name=myrepowithcomment', ++ 'baseurl=file:///installroot/home/user/.local/myrepos/repo5', ++ '#baseurl=file:///home/user/.local/myotherrepos/repo5', ++ 'enabled=1', ++ 'exclude=sed']] ++REPO_FILE_EMPTY = [] ++ ++ ++@pytest.mark.parametrize('repo_file_line, expected_adjusted_repo_file_line', ++ [('baseurl=file:///home/user/.local/repositories/repository', ++ 'baseurl=file:///installroot/home/user/.local/repositories/repository'), ++ ('baseurl="file:///home/user/my-repo"', ++ 'baseurl=file:///installroot/home/user/my-repo'), ++ ('baseurl=https://notlocal.com/packages', ++ 'baseurl=https://notlocal.com/packages'), ++ ('mirrorlist=file:///some_mirror_list.txt', ++ 'mirrorlist=file:///installroot/some_mirror_list.txt'), ++ ('gpgkey=file:///etc/pki/some.key', ++ 'gpgkey=file:///etc/pki/some.key'), ++ ('#baseurl=file:///home/user/my-repo', ++ '#baseurl=file:///home/user/my-repo'), ++ ('', ''), ++ ('[repoid]', '[repoid]')]) ++def test_adjust_local_file_url(repo_file_line, expected_adjusted_repo_file_line): ++ adjusted_repo_file_line = adjustlocalrepos._adjust_local_file_url(repo_file_line) ++ if 'file://' not in repo_file_line: ++ assert adjusted_repo_file_line == repo_file_line ++ return ++ assert adjusted_repo_file_line == expected_adjusted_repo_file_line ++ ++ ++class MockedFileDescriptor(object): ++ ++ def __init__(self, repo_file, expected_new_repo_file): ++ self.repo_file = repo_file ++ self.expected_new_repo_file = expected_new_repo_file ++ ++ @staticmethod ++ def _create_repo_file_lines(repo_file): ++ repo_file_lines = [] ++ for repo in repo_file: ++ repo = [line+'\n' for line in repo] ++ repo_file_lines += repo ++ return repo_file_lines ++ ++ def __enter__(self): ++ return self ++ ++ def __exit__(self, *args, **kwargs): ++ return ++ ++ def readlines(self): ++ return self._create_repo_file_lines(self.repo_file) ++ ++ def write(self, new_contents): ++ assert self.expected_new_repo_file ++ repo_file_lines = self._create_repo_file_lines(self.expected_new_repo_file) ++ expected_repo_file_contents = ''.join(repo_file_lines).rstrip('\n') ++ assert expected_repo_file_contents == new_contents ++ ++ ++class MockedContext(object): ++ ++ def __init__(self, repo_contents, expected_repo_contents): ++ self.repo_contents = repo_contents ++ self.expected_repo_contents = expected_repo_contents ++ ++ def open(self, path, mode): ++ return MockedFileDescriptor(self.repo_contents, self.expected_repo_contents) ++ ++ ++@pytest.mark.parametrize('repo_file, local_repoids, expected_repo_file', ++ [(REPO_FILE_1, REPO_FILE_1_LOCAL_REPOIDS, REPO_FILE_1_ADJUSTED), ++ (REPO_FILE_2, REPO_FILE_2_LOCAL_REPOIDS, REPO_FILE_2_ADJUSTED), ++ (REPO_FILE_3, REPO_FILE_3_LOCAL_REPOIDS, REPO_FILE_3_ADJUSTED)]) ++def test_adjust_local_repos_to_container(repo_file, local_repoids, expected_repo_file): ++ # The checks for expected_repo_file comparison to a adjusted form of the ++ # repo_file can be found in the MockedFileDescriptor.write(). ++ context = MockedContext(repo_file, expected_repo_file) ++ adjustlocalrepos._adjust_local_repos_to_container(context, '', local_repoids) ++ ++ ++@pytest.mark.parametrize('expected_repo_file, add_empty_lines', [(REPO_FILE_EMPTY, False), ++ (REPO_FILE_1, False), ++ (REPO_FILE_2, True)]) ++def test_extract_repos_from_repofile(expected_repo_file, add_empty_lines): ++ repo_file = expected_repo_file[:] ++ if add_empty_lines: # add empty lines before the first repo ++ repo_file[0] = ['', ''] + repo_file[0] ++ ++ context = MockedContext(repo_file, None) ++ repo_gen = adjustlocalrepos._extract_repos_from_repofile(context, '') ++ ++ for repo in expected_repo_file: ++ assert repo == next(repo_gen, None) ++ ++ assert next(repo_gen, None) is None +diff --git a/repos/system_upgrade/common/actors/localreposinhibit/actor.py b/repos/system_upgrade/common/actors/localreposinhibit/actor.py +deleted file mode 100644 +index 2bde7f15..00000000 +--- a/repos/system_upgrade/common/actors/localreposinhibit/actor.py ++++ /dev/null +@@ -1,89 +0,0 @@ +-from leapp import reporting +-from leapp.actors import Actor +-from leapp.models import TargetOSInstallationImage, TMPTargetRepositoriesFacts, UsedTargetRepositories +-from leapp.reporting import Report +-from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag +-from leapp.utils.deprecation import suppress_deprecation +- +- +-@suppress_deprecation(TMPTargetRepositoriesFacts) +-class LocalReposInhibit(Actor): +- """Inhibits the upgrade if local repositories were found.""" +- +- name = "local_repos_inhibit" +- consumes = ( +- UsedTargetRepositories, +- TargetOSInstallationImage, +- TMPTargetRepositoriesFacts, +- ) +- produces = (Report,) +- tags = (IPUWorkflowTag, TargetTransactionChecksPhaseTag) +- +- def collect_target_repoids_with_local_url(self, used_target_repos, target_repos_facts, target_iso): +- """Collects all repoids that have a local (file://) URL. +- +- UsedTargetRepositories doesn't contain baseurl attribute. So gathering +- them from model TMPTargetRepositoriesFacts. +- """ +- used_target_repoids = set(repo.repoid for repo in used_target_repos.repos) +- iso_repoids = set(iso_repo.repoid for iso_repo in target_iso.repositories) if target_iso else set() +- +- target_repofile_data = (repofile.data for repofile in target_repos_facts.repositories) +- +- local_repoids = [] +- for repo_data in target_repofile_data: +- for target_repo in repo_data: +- # Check only in repositories that are used and are not provided by the upgrade ISO, if any +- if target_repo.repoid not in used_target_repoids or target_repo.repoid in iso_repoids: +- continue +- +- # Repo fields potentially containing local URLs have different importance, check based on their prio +- url_field_to_check = target_repo.mirrorlist or target_repo.metalink or target_repo.baseurl or '' +- +- if url_field_to_check.startswith("file://"): +- local_repoids.append(target_repo.repoid) +- return local_repoids +- +- def process(self): +- used_target_repos = next(self.consume(UsedTargetRepositories), None) +- target_repos_facts = next(self.consume(TMPTargetRepositoriesFacts), None) +- target_iso = next(self.consume(TargetOSInstallationImage), None) +- +- if not used_target_repos or not target_repos_facts: +- return +- +- local_repoids = self.collect_target_repoids_with_local_url(used_target_repos, target_repos_facts, target_iso) +- if local_repoids: +- suffix, verb = ("y", "has") if len(local_repoids) == 1 else ("ies", "have") +- local_repoids_str = ", ".join(local_repoids) +- +- warn_msg = ("The following local repositor{suffix} {verb} been found: {local_repoids} " +- "(their baseurl starts with file:///). Currently leapp does not support this option.") +- warn_msg = warn_msg.format(suffix=suffix, verb=verb, local_repoids=local_repoids_str) +- self.log.warning(warn_msg) +- +- reporting.create_report( +- [ +- reporting.Title("Local repositor{suffix} detected".format(suffix=suffix)), +- reporting.Summary(warn_msg), +- reporting.Severity(reporting.Severity.HIGH), +- reporting.Groups([reporting.Groups.REPOSITORY]), +- reporting.Groups([reporting.Groups.INHIBITOR]), +- reporting.Remediation( +- hint=( +- "By using Apache HTTP Server you can expose " +- "your local repository via http. See the linked " +- "article for details. " +- ) +- ), +- reporting.ExternalLink( +- title=( +- "Customizing your Red Hat Enterprise Linux " +- "in-place upgrade" +- ), +- url=( +- "https://red.ht/ipu-customisation-repos-known-issues" +- ), +- ), +- ] +- ) +diff --git a/repos/system_upgrade/common/actors/localreposinhibit/tests/test_unit_localreposinhibit.py b/repos/system_upgrade/common/actors/localreposinhibit/tests/test_unit_localreposinhibit.py +deleted file mode 100644 +index 64a79e80..00000000 +--- a/repos/system_upgrade/common/actors/localreposinhibit/tests/test_unit_localreposinhibit.py ++++ /dev/null +@@ -1,81 +0,0 @@ +-import pytest +- +-from leapp.models import ( +- RepositoryData, +- RepositoryFile, +- TargetOSInstallationImage, +- TMPTargetRepositoriesFacts, +- UsedTargetRepositories, +- UsedTargetRepository +-) +-from leapp.snactor.fixture import ActorContext +- +- +-@pytest.mark.parametrize( +- ("baseurl", "mirrorlist", "metalink", "exp_msgs_len"), +- [ +- ("file:///root/crb", None, None, 1), +- ("http://localhost/crb", None, None, 0), +- (None, "file:///root/crb", None, 1), +- (None, "http://localhost/crb", None, 0), +- (None, None, "file:///root/crb", 1), +- (None, None, "http://localhost/crb", 0), +- ("http://localhost/crb", "file:///root/crb", None, 1), +- ("file:///root/crb", "http://localhost/crb", None, 0), +- ("http://localhost/crb", None, "file:///root/crb", 1), +- ("file:///root/crb", None, "http://localhost/crb", 0), +- ], +-) +-def test_unit_localreposinhibit(current_actor_context, baseurl, mirrorlist, metalink, exp_msgs_len): +- """Ensure the Report is generated when local path is used as a baseurl. +- +- :type current_actor_context: ActorContext +- """ +- with pytest.deprecated_call(): +- current_actor_context.feed( +- TMPTargetRepositoriesFacts( +- repositories=[ +- RepositoryFile( +- file="the/path/to/some/file", +- data=[ +- RepositoryData( +- name="BASEOS", +- baseurl=( +- "http://example.com/path/to/repo/BaseOS/x86_64/os/" +- ), +- repoid="BASEOS", +- ), +- RepositoryData( +- name="APPSTREAM", +- baseurl=( +- "http://example.com/path/to/repo/AppStream/x86_64/os/" +- ), +- repoid="APPSTREAM", +- ), +- RepositoryData( +- name="CRB", repoid="CRB", baseurl=baseurl, +- mirrorlist=mirrorlist, metalink=metalink +- ), +- ], +- ) +- ] +- ) +- ) +- current_actor_context.feed( +- UsedTargetRepositories( +- repos=[ +- UsedTargetRepository(repoid="BASEOS"), +- UsedTargetRepository(repoid="CRB"), +- ] +- ) +- ) +- current_actor_context.run() +- assert len(current_actor_context.messages()) == exp_msgs_len +- +- +-def test_upgrade_not_inhibited_if_iso_used(current_actor_context): +- repofile = RepositoryFile(file="path/to/some/file", +- data=[RepositoryData(name="BASEOS", baseurl="file:///path", repoid="BASEOS")]) +- current_actor_context.feed(TMPTargetRepositoriesFacts(repositories=[repofile])) +- current_actor_context.feed(UsedTargetRepositories(repos=[UsedTargetRepository(repoid="BASEOS")])) +- current_actor_context.feed(TargetOSInstallationImage(path='', mountpoint='', repositories=[])) +diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py +index ffde211f..26810e94 100644 +--- a/repos/system_upgrade/common/libraries/dnfplugin.py ++++ b/repos/system_upgrade/common/libraries/dnfplugin.py +@@ -334,8 +334,9 @@ def install_initramdisk_requirements(packages, target_userspace_info, used_repos + """ + Performs the installation of packages into the initram disk + """ +- with _prepare_transaction(used_repos=used_repos, +- target_userspace_info=target_userspace_info) as (context, target_repoids, _unused): ++ mount_binds = ['/:/installroot'] ++ with _prepare_transaction(used_repos=used_repos, target_userspace_info=target_userspace_info, ++ binds=mount_binds) as (context, target_repoids, _unused): + if get_target_major_version() == '9': + _rebuild_rpm_db(context) + repos_opt = [['--enablerepo', repo] for repo in target_repoids] +-- +2.41.0 + diff --git a/0032-Fix-certificate-symlink-handling.patch b/0032-Fix-certificate-symlink-handling.patch new file mode 100644 index 0000000..9bf7e44 --- /dev/null +++ b/0032-Fix-certificate-symlink-handling.patch @@ -0,0 +1,455 @@ +From 5202c9b126c06057e9145b4b7e02afe50c1f879d Mon Sep 17 00:00:00 2001 +From: David Kubek +Date: Tue, 24 Oct 2023 11:49:16 +0200 +Subject: [PATCH 32/38] Fix certificate symlink handling + +In response to the identified flaws in the originally delivered fix, for +feature enabling http repositories, this commit addresses the following +issues: + +1. Previously, files installed via RPMs that were originally symlinks + were being switched to standard files. This issue has been resolved + by preserving symlinks within the /etc/pki directory. Any symlink + pointing to a file within the /etc/pki directory (whether present in + the source system or installed by a package in the container) will be + present in the container, ensuring changes to certificates are + properly propagated. + +2. Lists of trusted CAs were not being updated, as the update-ca-trust + call was missing inside the container. This commit now includes the + necessary update-ca-trust call. + +The solution specification has been modified as follows: + + - Certificate _files_ in /etc/pki (excluding symlinks) are copied to + the container as in the original solution. + - Files installed by packages within the container are preserved and + given higher priority. + - Handling of symlinks is enhanced, ensuring that symlinks within + the /etc/pki directory are preserved, while any symlink pointing + outside the /etc/pki directory will be copied as a file. + - Certificates are updated using `update-ca-trust`. +--- + .../libraries/userspacegen.py | 124 ++++++++-- + .../tests/unit_test_targetuserspacecreator.py | 224 ++++++++++++++++++ + 2 files changed, 332 insertions(+), 16 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index 039b99a5..050ad7fe 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -331,12 +331,80 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False): + return files_owned_by_rpms + + ++def _copy_decouple(srcdir, dstdir): ++ """ ++ Copy `srcdir` to `dstdir` while decoupling symlinks. ++ ++ What we mean by decoupling the `srcdir` is that any symlinks pointing ++ outside the directory will be copied as regular files. This means that the ++ directory will become independent from its surroundings with respect to ++ symlinks. Any symlink (or symlink chains) within the directory will be ++ preserved. ++ ++ """ ++ ++ for root, dummy_dirs, files in os.walk(srcdir): ++ for filename in files: ++ relpath = os.path.relpath(root, srcdir) ++ source_filepath = os.path.join(root, filename) ++ target_filepath = os.path.join(dstdir, relpath, filename) ++ ++ # Skip and report broken symlinks ++ if not os.path.exists(source_filepath): ++ api.current_logger().warning( ++ 'File {} is a broken symlink! Will not copy the file.'.format(source_filepath)) ++ continue ++ ++ # Copy symlinks to the target userspace ++ source_is_symlink = os.path.islink(source_filepath) ++ pointee = None ++ if source_is_symlink: ++ pointee = os.readlink(source_filepath) ++ ++ # If source file is a symlink within `srcdir` then preserve it, ++ # otherwise resolve and copy it as a file it points to ++ if pointee is not None and not pointee.startswith(srcdir): ++ # Follow the path until we hit a file or get back to /etc/pki ++ while not pointee.startswith(srcdir) and os.path.islink(pointee): ++ pointee = os.readlink(pointee) ++ ++ # Pointee points to a _regular file_ outside /etc/pki so we ++ # copy it instead ++ if not pointee.startswith(srcdir) and not os.path.islink(pointee): ++ source_is_symlink = False ++ source_filepath = pointee ++ else: ++ # pointee points back to /etc/pki ++ pass ++ ++ # Ensure parent directory exists ++ parent_dir = os.path.dirname(target_filepath) ++ # Note: This is secure because we know that parent_dir is located ++ # inside of `$target_userspace/etc/pki` which is a directory that ++ # is not writable by unprivileged users. If this function is used ++ # elsewhere we may need to be more careful before running `mkdir -p`. ++ run(['mkdir', '-p', parent_dir]) ++ ++ if source_is_symlink: ++ # Preserve the owner and permissions of the original symlink ++ run(['ln', '-s', pointee, target_filepath]) ++ run(['chmod', '--reference={}'.format(source_filepath), target_filepath]) ++ continue ++ ++ run(['cp', '-a', source_filepath, target_filepath]) ++ ++ + def _copy_certificates(context, target_userspace): + """ +- Copy the needed certificates into the container, but preserve original ones ++ Copy certificates from source system into the container, but preserve ++ original ones + + Some certificates are already installed in the container and those are + default certificates for the target OS, so we preserve these. ++ ++ We respect the symlink hierarchy of the source system within the /etc/pki ++ folder. Dangling symlinks will be ignored. ++ + """ + + target_pki = os.path.join(target_userspace, 'etc', 'pki') +@@ -346,36 +414,56 @@ def _copy_certificates(context, target_userspace): + files_owned_by_rpms = _get_files_owned_by_rpms(target_context, '/etc/pki', recursive=True) + api.current_logger().debug('Files owned by rpms: {}'.format(' '.join(files_owned_by_rpms))) + ++ # Backup container /etc/pki + run(['mv', target_pki, backup_pki]) +- context.copytree_from('/etc/pki', target_pki) + ++ # Copy source /etc/pki to the container ++ _copy_decouple('/etc/pki', target_pki) ++ ++ # Assertion: after running _copy_decouple(), no broken symlinks exist in /etc/pki in the container ++ # So any broken symlinks created will be by the installed packages. ++ ++ # Recover installed packages as they always get precedence + for filepath in files_owned_by_rpms: + src_path = os.path.join(backup_pki, filepath) + dst_path = os.path.join(target_pki, filepath) + + # Resolve and skip any broken symlinks + is_broken_symlink = False +- while os.path.islink(src_path): +- # The symlink points to a path relative to the target userspace so +- # we need to readjust it +- next_path = os.path.join(target_userspace, os.readlink(src_path)[1:]) +- if not os.path.exists(next_path): +- is_broken_symlink = True +- +- # The path original path of the broken symlink in the container +- report_path = os.path.join(target_pki, os.path.relpath(src_path, backup_pki)) +- api.current_logger().warning('File {} is a broken symlink!'.format(report_path)) +- break +- +- src_path = next_path ++ pointee = None ++ if os.path.islink(src_path): ++ pointee = os.path.join(target_userspace, os.readlink(src_path)[1:]) ++ ++ seen = set() ++ while os.path.islink(pointee): ++ # The symlink points to a path relative to the target userspace so ++ # we need to readjust it ++ pointee = os.path.join(target_userspace, os.readlink(src_path)[1:]) ++ if not os.path.exists(pointee) or pointee in seen: ++ is_broken_symlink = True ++ ++ # The path original path of the broken symlink in the container ++ report_path = os.path.join(target_pki, os.path.relpath(src_path, backup_pki)) ++ api.current_logger().warning( ++ 'File {} is a broken symlink! Will not copy!'.format(report_path)) ++ break ++ ++ seen.add(pointee) + + if is_broken_symlink: + continue + ++ # Cleanup conflicting files + run(['rm', '-rf', dst_path]) ++ ++ # Ensure destination exists + parent_dir = os.path.dirname(dst_path) + run(['mkdir', '-p', parent_dir]) +- run(['cp', '-a', src_path, dst_path]) ++ ++ # Copy the new file ++ run(['cp', '-R', '--preserve=all', src_path, dst_path]) ++ ++ run(['rm', '-rf', backup_pki]) + + + def _prep_repository_access(context, target_userspace): +@@ -387,6 +475,10 @@ def _prep_repository_access(context, target_userspace): + backup_yum_repos_d = os.path.join(target_etc, 'yum.repos.d.backup') + + _copy_certificates(context, target_userspace) ++ # NOTE(dkubek): context.call(['update-ca-trust']) seems to not be working. ++ # I am not really sure why. The changes to files are not ++ # being written to disk. ++ run(["chroot", target_userspace, "/bin/bash", "-c", "su - -c update-ca-trust"]) + + if not rhsm.skip_rhsm(): + run(['rm', '-rf', os.path.join(target_etc, 'rhsm')]) +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +index cc684c7d..1a1ee56e 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +@@ -1,4 +1,8 @@ ++from __future__ import division ++ + import os ++import subprocess ++import sys + from collections import namedtuple + + import pytest +@@ -11,6 +15,12 @@ from leapp.libraries.common.config import architecture + from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked + from leapp.utils.deprecation import suppress_deprecation + ++if sys.version_info < (2, 8): ++ from pathlib2 import Path ++else: ++ from pathlib import Path ++ ++ + CUR_DIR = os.path.dirname(os.path.abspath(__file__)) + _CERTS_PATH = os.path.join(CUR_DIR, '../../../files', userspacegen.PROD_CERTS_FOLDER) + _DEFAULT_CERT_PATH = os.path.join(_CERTS_PATH, '8.1', '479.pem') +@@ -48,6 +58,220 @@ class MockedMountingBase(object): + pass + + ++def traverse_structure(structure, root=Path('/')): ++ for filename, links_to in structure.items(): ++ filepath = root / filename ++ ++ if isinstance(links_to, dict): ++ for pair in traverse_structure(links_to, filepath): ++ yield pair ++ else: ++ yield (filepath, links_to) ++ ++ ++def assert_directory_structure_matches(root, initial, expected): ++ # Assert every file that is supposed to be present is present ++ for filepath, links_to in traverse_structure(expected, root=root / 'expected'): ++ assert filepath.exists() ++ ++ if links_to is None: ++ assert filepath.is_file() ++ continue ++ ++ assert filepath.is_symlink() ++ assert os.readlink(str(filepath)) == str(root / 'initial' / links_to.lstrip('/')) ++ ++ # Assert there are no extra files ++ result_dir = str(root / 'expected') ++ for fileroot, dummy_dirs, files in os.walk(result_dir): ++ for filename in files: ++ dir_path = os.path.relpath(fileroot, result_dir).split('/') ++ ++ cwd = expected ++ for directory in dir_path: ++ cwd = cwd[directory] ++ ++ assert filename in cwd ++ ++ filepath = os.path.join(fileroot, filename) ++ if os.path.islink(filepath): ++ links_to = '/' + os.path.relpath(os.readlink(filepath), str(root / 'initial')) ++ assert cwd[filename] == links_to ++ ++ ++@pytest.fixture ++def temp_directory_layout(tmp_path, initial_structure): ++ for filepath, links_to in traverse_structure(initial_structure, root=tmp_path / 'initial'): ++ file_path = tmp_path / filepath ++ file_path.parent.mkdir(parents=True, exist_ok=True) ++ ++ if links_to is None: ++ file_path.touch() ++ continue ++ ++ file_path.symlink_to(tmp_path / 'initial' / links_to.lstrip('/')) ++ ++ (tmp_path / 'expected').mkdir() ++ assert (tmp_path / 'expected').exists() ++ ++ return tmp_path ++ ++ ++# The semantics of initial_structure and expected_structure are as follows: ++# ++# 1. The outermost dictionary encodes the root of a directory structure ++# ++# 2. Depending on the value for a key in a dict, each key in the dictionary ++# denotes the name of either a: ++# a) directory -- if value is dict ++# b) regular file -- if value is None ++# c) symlink -- if a value is str ++# ++# 3. The value of a symlink entry is a absolute path to a file in the context of ++# the structure. ++# ++@pytest.mark.parametrize('initial_structure,expected_structure', [ ++ ({ # Copy a regular file ++ 'dir': { ++ 'fileA': None ++ } ++ }, { ++ 'dir': { ++ 'fileA': None ++ } ++ }), ++ ({ # Do not copy a broken symlink ++ 'dir': { ++ 'fileA': 'nonexistent' ++ } ++ }, { ++ 'dir': {} ++ }), ++ ({ # Copy a regular symlink ++ 'dir': { ++ 'fileA': '/dir/fileB', ++ 'fileB': None ++ } ++ }, { ++ 'dir': { ++ 'fileA': '/dir/fileB', ++ 'fileB': None ++ } ++ }), ++ ({ # Do not copy a chain of broken symlinks ++ 'dir': { ++ 'fileA': '/dir/fileB', ++ 'fileB': 'nonexistent' ++ } ++ }, { ++ 'dir': {} ++ }), ++ ({ # Copy a chain of symlinks ++ 'dir': { ++ 'fileA': '/dir/fileB', ++ 'fileB': '/dir/fileC', ++ 'fileC': None ++ } ++ }, { ++ 'dir': { ++ 'fileA': '/dir/fileB', ++ 'fileB': '/dir/fileC', ++ 'fileC': None ++ } ++ }), ++ ({ # Circular symlinks ++ 'dir': { ++ 'fileA': '/dir/fileB', ++ 'fileB': '/dir/fileC', ++ 'fileC': '/dir/fileC', ++ } ++ }, { ++ 'dir': {} ++ }), ++ ({ # Copy a link to a file outside the considered directory as file ++ 'dir': { ++ 'fileA': '/dir/fileB', ++ 'fileB': '/dir/fileC', ++ 'fileC': '/outside/fileOut', ++ 'fileE': None ++ }, ++ 'outside': { ++ 'fileOut': '/outside/fileD', ++ 'fileD': '/dir/fileE' ++ } ++ }, { ++ 'dir': { ++ 'fileA': '/dir/fileB', ++ 'fileB': '/dir/fileC', ++ 'fileC': '/dir/fileE', ++ 'fileE': None, ++ } ++ }), ++ ({ # Same test with a nested structure within the source dir ++ 'dir': { ++ 'nested': { ++ 'fileA': '/dir/nested/fileB', ++ 'fileB': '/dir/nested/fileC', ++ 'fileC': '/outside/fileOut', ++ 'fileE': None ++ } ++ }, ++ 'outside': { ++ 'fileOut': '/outside/fileD', ++ 'fileD': '/dir/nested/fileE' ++ } ++ }, { ++ 'dir': { ++ 'nested': { ++ 'fileA': '/dir/nested/fileB', ++ 'fileB': '/dir/nested/fileC', ++ 'fileC': '/dir/nested/fileE', ++ 'fileE': None ++ } ++ } ++ }), ++ ({ # Same test with a nested structure in the outside dir ++ 'dir': { ++ 'fileA': '/dir/fileB', ++ 'fileB': '/dir/fileC', ++ 'fileC': '/outside/nested/fileOut', ++ 'fileE': None ++ }, ++ 'outside': { ++ 'nested': { ++ 'fileOut': '/outside/nested/fileD', ++ 'fileD': '/dir/fileE' ++ } ++ } ++ }, { ++ 'dir': { ++ 'fileA': '/dir/fileB', ++ 'fileB': '/dir/fileC', ++ 'fileC': '/dir/fileE', ++ 'fileE': None, ++ } ++ }), ++] ++) ++def test_copy_decouple(monkeypatch, temp_directory_layout, initial_structure, expected_structure): ++ ++ def run_mocked(command): ++ subprocess.Popen( ++ ' '.join(command), ++ shell=True, ++ stdout=subprocess.PIPE, ++ stderr=subprocess.STDOUT, ++ ).wait() ++ ++ monkeypatch.setattr(userspacegen, 'run', run_mocked) ++ userspacegen._copy_decouple( ++ str(temp_directory_layout / 'initial' / 'dir'), ++ str(temp_directory_layout / 'expected' / 'dir'), ++ ) ++ ++ assert_directory_structure_matches(temp_directory_layout, initial_structure, expected_structure) ++ ++ + @pytest.mark.parametrize('result,dst_ver,arch,prod_type', [ + (os.path.join(_CERTS_PATH, '8.1', '479.pem'), '8.1', architecture.ARCH_X86_64, 'ga'), + (os.path.join(_CERTS_PATH, '8.1', '419.pem'), '8.1', architecture.ARCH_ARM64, 'ga'), +-- +2.41.0 + diff --git a/0033-Add-prod-certs-and-upgrade-paths-for-8.10-9.4.patch b/0033-Add-prod-certs-and-upgrade-paths-for-8.10-9.4.patch new file mode 100644 index 0000000..4a031c5 --- /dev/null +++ b/0033-Add-prod-certs-and-upgrade-paths-for-8.10-9.4.patch @@ -0,0 +1,701 @@ +From b099660b5a11ca09b3bc80bab105ba89322a331f Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Wed, 15 Nov 2023 15:10:15 +0100 +Subject: [PATCH 33/38] Add prod certs and upgrade paths for 8.10 & 9.4 + +--- + .../common/files/prod-certs/8.10/279.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.10/362.pem | 36 +++++++++++++++++++ + .../common/files/prod-certs/8.10/363.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.10/419.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.10/433.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.10/479.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.10/486.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.10/72.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.4/279.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.4/362.pem | 36 +++++++++++++++++++ + .../common/files/prod-certs/9.4/363.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.4/419.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.4/433.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.4/479.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.4/486.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.4/72.pem | 35 ++++++++++++++++++ + 16 files changed, 562 insertions(+) + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.10/279.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.10/362.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.10/363.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.10/419.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.10/433.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.10/479.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.10/486.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.10/72.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.4/279.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.4/362.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.4/363.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.4/419.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.4/433.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.4/479.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.4/486.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.4/72.pem + +diff --git a/repos/system_upgrade/common/files/prod-certs/8.10/279.pem b/repos/system_upgrade/common/files/prod-certs/8.10/279.pem +new file mode 100644 +index 00000000..e5cd4895 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.10/279.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJjCCBA6gAwIBAgIJALDxRLt/tVC4MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxMjIxMjMzOFoXDTQzMDcx ++MjIxMjMzOFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtkOTE4MGJk ++ZS1jZjdiLTRlMzktODY3Yy01YjlhZjQwYTczM2ZdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrzCBrDAJBgNVHRMEAjAAMEMGDCsGAQQBkggJAYIXAQQzDDFSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuMBYGDCsG ++AQQBkggJAYIXAgQGDAQ4LjEwMBkGDCsGAQQBkggJAYIXAwQJDAdwcGM2NGxlMCcG ++DCsGAQQBkggJAYIXBAQXDBVyaGVsLTgscmhlbC04LXBwYzY0bGUwDQYJKoZIhvcN ++AQELBQADggIBAIekB01efwoAed6nHz/siMJ+F4M/AiuaVxl6BoPDxTEC2nLf0pJH ++qaA1wWUltdP7W6YDNuq3KjdigeOG0MYcL6jphWEyC2s94XGdIMpU1lXCIKrjlt/D ++HD2MqYNwMsLOTt7CCayVwkZN0tLpLMybrhPjdMq6hOu3Fg1qyf8KQAjkKRF98n6Y ++dQuEW2rpwaSPAyucgIAKy8w7vwL/ABSNlHO7vL3yNarKSN0cNjS3b/pjBnC1LClL ++zQJY89GzYV2vgctjBqKkpJMccHDwVXkzZIcD5tFOOnq4GwGcKHucQJs7uMY8xvKB ++/7S917v2ryVveHYKm6bUD1AwnXGFd1timpKHxvRqIJqGi0tzTITD2joiLdyF0iPf ++bbet4WWgpwudwLc6Q6lI7SSXMWPOp3eZTtYAQhOcM7BymbST5jum5Rs+lzvY3lHn ++SIJsZnx4Q+31c0D412BH4hLHVrDgzQBIlbDwToVJFays/8dX8nixEZkUlHBZTSHk ++XSYFml/GgKMJ6C3aytK8B84mIzZlc3YMwVEmlqVWwylSufTnK678jBNHjVE/Nm1V ++VgwhNZXacSf5Q0/WBN5GqmkqQqktNlKdIDenr/f1psh9Tvz3j5aJQPV6UOYm6m5A ++FrdJMf4Gc4Snn1WAa/bElspZBc3pXnJkZBkxsk5UvvKMlEvCWqFYtQfY ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.10/362.pem b/repos/system_upgrade/common/files/prod-certs/8.10/362.pem +new file mode 100644 +index 00000000..51ce132a +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.10/362.pem +@@ -0,0 +1,36 @@ ++-----BEGIN CERTIFICATE----- ++MIIGNTCCBB2gAwIBAgIJALDxRLt/tVCiMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxMjIxMjMyMFoXDTQzMDcx ++MjIxMjMyMFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFthOWU3ZmM1 ++Mi05MDgyLTRiYWUtODJiMi0yYTEyZDkxYmNiMzZdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBvjCBuzAJBgNVHRMEAjAAMEgGDCsGAQQBkggJAYJqAQQ4DDZSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuIEJldGEw ++GwYMKwYBBAGSCAkBgmoCBAsMCTguMTAgQmV0YTAZBgwrBgEEAZIICQGCagMECQwH ++cHBjNjRsZTAsBgwrBgEEAZIICQGCagQEHAwacmhlbC04LHJoZWwtOC1iZXRhLXBw ++YzY0bGUwDQYJKoZIhvcNAQELBQADggIBAB/7qr5HDUDdX2MBQSGStHTvT2Bepy1L ++ZWWrjFmoGOobW+Lee8/J7hPU5PNka7zqOjFFwi3oWOiPTMnJj3AkqWPaUnPemS/Q ++Jy9YDd14GZGefUAiczJYw5ZeY4HbOBEvPBnU/gSn3qbNiKZzWRR+cpD2SLF1pgIL ++05LU0+EKlIT8SNvTui3pFOqjuOeXPHeCF7sGG8r0ZEFtkyrqFReNT8iXy8wadG7k ++NcwMFttl0XR5qUWJbhkhMasMsyy2JZmdTzmqodxYvlhfpe+4naPOVH8brKkwM+iH ++sDZ2fFL+KOOUmybeV5bsOjGtcfbkKJ5g+h2JyyyO2O2p5hXsnpf7cSjwF2c07QaT ++SihdvNPA5V2UUPCScF9eAXveJeMFS+JOJDDyohxpr8uzg8Pz4dlMFe9YX4YUBP6I ++Kx3BWh5yagrGCyMAlw27IUeoVELWQXRaZnXngDO+2y/RDj2wVJi3gcajsrcHsjSn ++s5yQfNOb2hu6W13QbjXqFj8NZoszG120F3G09oC/wzYf5PCD+7PeVMKKefZfeWSw ++NEWrrBBZI6mJyVVeH1MLLdehI8Qt5ymBNELjNy5l8ITBFWFVqHYoRvY0kyDF1d8X ++o7Vk8hgiqShporkHWvW/sz/rFjvW6VRUu5Qx3KiXWnGIIM/Vq4FF9CjogvIvKWTN ++Oi1mTwT3Uq5c ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.10/363.pem b/repos/system_upgrade/common/files/prod-certs/8.10/363.pem +new file mode 100644 +index 00000000..7e7377f5 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.10/363.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJzCCBA+gAwIBAgIJALDxRLt/tVChMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxMjIxMjMyMFoXDTQzMDcx ++MjIxMjMyMFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs2NDA1ZWIw ++My04OTQzLTQ1ZTAtYjFiMC1mOTBiZmEzZDk2YjNdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBsDCBrTAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYJrAQQqDChSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NCBCZXRhMBsGDCsGAQQBkggJAYJr ++AgQLDAk4LjEwIEJldGEwGQYMKwYBBAGSCAkBgmsDBAkMB2FhcmNoNjQwLAYMKwYB ++BAGSCAkBgmsEBBwMGnJoZWwtOCxyaGVsLTgtYmV0YS1hYXJjaDY0MA0GCSqGSIb3 ++DQEBCwUAA4ICAQA6dNrnTJSRXP0e4YM0m+0YRtI7Zh5mI67FA0HcYpnI6VDSl6KC ++9jQwb8AXT/FN35UJ68V/0TdR7K2FFbk4iZ/zTJACQBV+SnjkJRa/a46FA3qgftiW ++Lo74gTdYTqgo3pOgCYDrY8YnEtrTLdTdzVc95MLV5DdQuqyI1whawzW5b/DSildc ++f0rwI7kaSEl4NSc4ZZEiT9Qq3S/QGd2pIYGpDA+4WYXA2Nnlt/W31Khm7G+r7suj ++j9NNYs8Ddc63o86NBSLyKrCwry9lrn/1Vt8j5LQsiuHhjmxu5YMemvUPGR9o87r5 ++1dEMAN4fwY4RULy072UjLoyWLHlRx8N9lCcHtQjbakmq9Ic+le2onvlq9yJ3nsWS ++kd1SUHtl/Ag/t6Qe5a+tWxZpUY2sG/nrrtdEK3zlMK665qlWoHuCRPcjQFa2UltR ++8qtO4AehozcKjR8HSS2BeDsR9IyBxDUYLkwY7sS33CbJAJcFfsV2h7usM9gEogp4 ++xuzxgEQEEwi/z3dXYvDuw9RPKE7jEYG+7xrYuG5KGz2bD1NEo2pMs5T9ZkklmRGQ ++JOrDe2uI9X1x0Rz+DbFvR6vUYrZ9aYtPOQ5u3VU0pGszwXNZDNILc9W8Qakci4y3 ++BBHqh7EVE4MN1PEDoT0NnvXsYBXoEwxBg4KihqgKqPT9titqeFWzUOWtRw== ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.10/419.pem b/repos/system_upgrade/common/files/prod-certs/8.10/419.pem +new file mode 100644 +index 00000000..7f3e91af +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.10/419.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGGDCCBACgAwIBAgIJALDxRLt/tVC3MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxMjIxMjMzOFoXDTQzMDcx ++MjIxMjMzOFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtiNGFlN2Yx ++OS04OGU5LTRhNmItYTU3Ni0xYjllMjU1YWNkZTZdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBoTCBnjAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYMjAQQlDCNSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NDAWBgwrBgEEAZIICQGDIwIEBgwE ++OC4xMDAZBgwrBgEEAZIICQGDIwMECQwHYWFyY2g2NDAnBgwrBgEEAZIICQGDIwQE ++FwwVcmhlbC04LHJoZWwtOC1hYXJjaDY0MA0GCSqGSIb3DQEBCwUAA4ICAQBsYUdo ++XoheLJKoMUt8cnq0eLfsXORLtYbFsFpemGx0i/Bc1Gy/ZO99Z5X50fn7jjGI1jFg ++GkRdz0q+inZilds3xD4hIhMHrX5nxupC6Ao5n1jDLQNYFFpLlKODStQHjv8KUMzY ++iFY4kCnC1AmfClEx+oM32gEb5O9okyNDAZhuQYUT6YMhpbcm2tVNtw08OvcJfXqP ++lQWzzB21jlqW79cBm3u/5mrHWBFSkbqOys6WjznMVBo77y32W4y3/TYebN64IfRA ++QouQasPXJ+PPP34rXZmTMhSEbU712fYmby913w+17M6u6FWQjLpGA3pancWLrXqo ++Fu1THyO0eyZDRf6IoMFlNZTqJs4Sd96zhNQOcetDnebR9n9oDSjs8zO8AmDtAUox ++Ni6hR2SF4JAgViARPC9kqEWNKg957mySz0JifPVCKW+uWhLAej2AaJMWaPsrtQfj ++k4EiDPrgXFw6C6s5ilf1653QT1PN3d4PLVh8K4iTwfanPHIQ5lJX8tYXWBDCwJ6n ++aY5SX340p542uMuP0/LkGu2Q0I8gH2Qv4v12zkQ8lAp1PND79xwbP9QK0Swuc8TP ++ob9tipL9hhp2SJqHjiD5lbP8r3NpZ+NEEKfnv1mH0iMVCRg6Nz4MJyV/u4Zk3bvw ++2vYet0eK5Dy9amxFK+uun5IyPi2xTm29T8E5Nw== ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.10/433.pem b/repos/system_upgrade/common/files/prod-certs/8.10/433.pem +new file mode 100644 +index 00000000..d2374e61 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.10/433.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGKjCCBBKgAwIBAgIJALDxRLt/tVCjMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxMjIxMjMyMVoXDTQzMDcx ++MjIxMjMyMVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs0Y2M0ZWZk ++Ni03NzlhLTQwNmQtOTNhMy0zZTI5YzM0NThkNGNdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBszCBsDAJBgNVHRMEAjAAMEEGDCsGAQQBkggJAYMxAQQxDC9SZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIElCTSB6IFN5c3RlbXMgQmV0YTAbBgwrBgEE ++AZIICQGDMQIECwwJOC4xMCBCZXRhMBcGDCsGAQQBkggJAYMxAwQHDAVzMzkweDAq ++BgwrBgEEAZIICQGDMQQEGgwYcmhlbC04LHJoZWwtOC1iZXRhLXMzOTB4MA0GCSqG ++SIb3DQEBCwUAA4ICAQBwAhNSGFtdCSq4Q9NnnUXaH96asoGlwkW1KqcDUOJjw8nP ++j6svp9BB/ieqNpNL4Xa5Yf5oFDb2i0pYVUE1rTsVzsakqg0Ny87ARCZ/Ofwh4J9C ++9as722+VquxVWhvGL+Wx2LNrFseRJsD91dD2kUbKGSPDyW3dwpdTsfKF22LVVcwn ++oWc92VyoPm949wt8nytW2H4Rd4mCGLPpd2xoLemf6fgbDgqdbZEs8EUC0vlRon97 ++ZEtNBFYEWNJCi/VMGPasele2rdn1/uYghVlLgQGwk0C0aj0a4P/DIyC9gmL+Wcmo ++ZOslsdAl5wl/7hQ/myRMsjCtd9CTFiXACNmHT+16jjvw09xae3vivd4XaDrUpVPn ++TelOfBM9GDd1yqFDa6t6SdS/SNCw2XV0S41gFvDeeskJjvfvpuJ63otjbc/RATMD ++oIlU7YaL5l0Wx/3IOHX8bo08xxILlBywVOxLYjdjJA0jwWW1rUSXvsZqHHPVObYW ++9eLybvkZ+8Ob72QzgNZA6yCuYrVLQV53pAfliVljB+fQVM6Qh/G8OO9CpiY8fnBr ++z+XbIJb+WlSuHmuCVayTG4/VDlYOMpUvOWw6x3fq8qxj8eX2C8r5v3qa0L2joF+Z ++wlVQOuIsrS5i8lmqBO5+Qg07zmCM7xWEfwxOCVbMMoXmjMlLQDMS2slXRwtKaQ== ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.10/479.pem b/repos/system_upgrade/common/files/prod-certs/8.10/479.pem +new file mode 100644 +index 00000000..9e98c9c5 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.10/479.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFjCCA/6gAwIBAgIJALDxRLt/tVC6MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxMjIxMjMzOVoXDTQzMDcx ++MjIxMjMzOVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtlMmY2ZTE4 ++ZS05OGE3LTRiZjktYWNkYS0yZGVjZjk3Yzg1NzddMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBnzCBnDAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAWBgwrBgEEAZIICQGDXwIEBgwE ++OC4xMDAYBgwrBgEEAZIICQGDXwMECAwGeDg2XzY0MCYGDCsGAQQBkggJAYNfBAQW ++DBRyaGVsLTgscmhlbC04LXg4Nl82NDANBgkqhkiG9w0BAQsFAAOCAgEAWP58UYdf ++h7NZhrgxvfDqQBP8tcJaoG96PQUXhOCfNOOwExxncFM7EDn/OvRgNJFQ3Hm+T25m ++xPg6FoCtuVdEtWyKLpuKLdc7NaFVNqqid6ULJyHvsZ8ju3lMaCAGZIV6qwiftHTb ++JhIzbpEak2UeNbLHNJ6WtAQ1pssJYrmq6eK8ScewQ2DtUCnyVF6gJS86bzy+tbst ++8KBImeasSXMjc+XGx22aNBHV+G2LSpi/bSHstqjPHmfFOJvIYGG7grKDVTka/TmX ++yJDl5yydHIPkWlBTu/VLb9m5V4Ke7Zu1nnMkaXoXdtx8DGcfEv8Eqqp5jAiFRUP0 ++KfvF4yRcFdsVGeHXiWt3fN8EbwXiNHWO69/9fQgzJXXhkfMHbHAWbGcAgYl7A2r9 ++w4SfACOvJAXSgaGr2KAKzNuWiLDDl2UJTLsF5IeGudc/lOlaDUM8RWKmWIOh+jup ++T/g/KuYTtNukyqiwPuaWkwwM6kyuqsm/3z2d76ZbiCkcqTfqfHvOA2fzgxWocUPi ++pg0PQ0NoxJRss1fZ3qu97d0e5p21M92UI1dn+uo/dyw7Xg3Ka2+AWfIs5HP0Fh2e ++lal4LKNjRx+bpApcPSQ2y7exTr1Jni4yHVBC8CQeomoQqmgKLnJ4RB9gsxx4lvf/ ++GryScFMDmJk5elrgja1hA5cuV5Rqb3gvyy4= ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.10/486.pem b/repos/system_upgrade/common/files/prod-certs/8.10/486.pem +new file mode 100644 +index 00000000..491f9f2d +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.10/486.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJTCCBA2gAwIBAgIJALDxRLt/tVCkMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxMjIxMjMyMVoXDTQzMDcx ++MjIxMjMyMVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtmOTk3ZmE5 ++NC0xMDRlLTQ0YjMtYjA4Yy04MjQzNjA0MjhlZjBdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrjCBqzAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYNmAQQqDChSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NCBCZXRhMBsGDCsGAQQBkggJAYNm ++AgQLDAk4LjEwIEJldGEwGAYMKwYBBAGSCAkBg2YDBAgMBng4Nl82NDArBgwrBgEE ++AZIICQGDZgQEGwwZcmhlbC04LHJoZWwtOC1iZXRhLXg4Nl82NDANBgkqhkiG9w0B ++AQsFAAOCAgEAvlpN7okXL9T74DNAdmb6YBSCI2fl1By5kSsDkvA6HlWY2nSHJILN ++FCCCl9ygvUXIrxP6ObKV/ylZ5m+p20aa1fvmsmCsgh2QHAxWHlW17lRouQrtd1b6 ++EzNnoL0eVkPa2RF1h5Jwr1m5rLLeQH6A46Xgf3cSj4qD22Ru/b6pBGgJxqHMCIaX ++cyC1biwLT3JTJCTe3Y/gi326jPDaIMsKa28y/Tu5swg+7VhhbUNqqC3pMaKzhtF+ ++yT33d3X3An8iJ+i8cv6SdqovLV/C8DVM7ZWzFXDWlj1/wmSZ7IBeu6beUhUUkz0x ++VdN1Ud2DFaALFK09LK+JL5SV+thk5q6VmSTzfaIVnIqsbHVcLGjol/ePlm9kGVtr ++shyBYVpbNfSTqXnDsRyK6i7QRGix17b+nwPsVtRW1dBhy2pQ4vnJ53bZ3OnRm9ZW ++9qWu4N7uFtxRqtcEHKOYH7S88RWpjlyaNNAD+NYpnwBq3hSukQx/II619fm5zkR3 ++63WyoSQThBxM7D9ZNEVD0ibtNd3Q+8SJB0BFKXCrrWziMD9B7KGVyhK7GbdsBDzU ++fUlvxqCST2bd/beTIuPHanYAGFao4CyIlH7rSgpyR3ikSVrIzVYiR4KpkXzGfaBU ++CJ1v9WRDjALqjx5YABSD0AoP88darao26o6UsxxV4NMjWUc+WLdPpak= ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.10/72.pem b/repos/system_upgrade/common/files/prod-certs/8.10/72.pem +new file mode 100644 +index 00000000..eff0add4 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.10/72.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFzCCA/+gAwIBAgIJALDxRLt/tVC5MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxMjIxMjMzOVoXDTQzMDcx ++MjIxMjMzOVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFthNTEwOTUx ++NS04ZGUzLTQwYzItOTM4Yy0yZjhlNDgxMDA1NzFdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBoDCBnTAJBgNVHRMEAjAAMDsGCysGAQQBkggJAUgBBCwMKlJlZCBIYXQg ++RW50ZXJwcmlzZSBMaW51eCBmb3IgSUJNIHogU3lzdGVtczAVBgsrBgEEAZIICQFI ++AgQGDAQ4LjEwMBYGCysGAQQBkggJAUgDBAcMBXMzOTB4MCQGCysGAQQBkggJAUgE ++BBUME3JoZWwtOCxyaGVsLTgtczM5MHgwDQYJKoZIhvcNAQELBQADggIBAITSTmUd ++W7DTBY77dm05tIRnAiDn4LqMLj2r1jt/6UBGH8e3WDNHmWlZa1kctewwvdbp4wEd ++RJAOpCoNucoy9TDg5atgyTKaqAYZXBu9fCi4faCl7mmTlvbOmIK3+WKOtRXG1pot ++ijq+RRQrS6h8hwOB2Ye/QXfY0C9fHz3OuI26rJ+n24MM9K3AYYOGZ+Xp/neBTLho ++fC0lwkyfZc45N+X/sAgaERz44Zd4JcW98XykFGyUJ0R0tHk2TvWbR7MyVKNaqEVW ++OwZxnlltpe15Dbz8SY5km0gRWfeXpEtmSjBST3cPREcOapL7sL4iJifKYaIJNg+I ++JED+K8BEfKbUH4OHqDS6QYRS+G7B++wkpmyBnlg7/It/dotZM82BIch32jifRj8S ++L2DkxScapLVc/QjyP6yHzUYMvdHHLAmaHZqf3X0TCDuBZ5VOyy2vYaWzroDbuJds ++S0ECnNG20P+IS5kWBXaw8cQ/iQP2HXylraHlXnsQ3xCBAISTbXKI0tHbcfITsb0I ++W+EKJnRyKGUvenffsTHetZ/NqekmNMCNweavg27jmikrFIoZaEGyMd5fterUbHoi ++hejh8bgzh95+r3tiO8lV/ZfGDB6kjlzqGJDFYoVsNIEwVxZ/OqWFbWsiwMpLax+9 ++T7gkLBXtuu/5Prp7fT0wy8PqJFaxUCVj27oh ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.4/279.pem b/repos/system_upgrade/common/files/prod-certs/9.4/279.pem +new file mode 100644 +index 00000000..da9b52bf +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.4/279.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJTCCBA2gAwIBAgIJALDxRLt/tVDOMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQwOFoXDTQzMDcx ++OTE2MzQwOFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtmZTk3MWE4 ++Mi1iNzViLTRlMTgtYjM5YS0yY2E3ODlhNmVlNTZdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrjCBqzAJBgNVHRMEAjAAMEMGDCsGAQQBkggJAYIXAQQzDDFSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuMBUGDCsG ++AQQBkggJAYIXAgQFDAM5LjQwGQYMKwYBBAGSCAkBghcDBAkMB3BwYzY0bGUwJwYM ++KwYBBAGSCAkBghcEBBcMFXJoZWwtOSxyaGVsLTktcHBjNjRsZTANBgkqhkiG9w0B ++AQsFAAOCAgEAT/uBV7Mh+0zatuSO6rTBpTa0kFeVsbhpqc7cMDD7+lRxrKtdtkp5 ++WzU/0hw46I11jI29lkd0lLruX9EUxU2AtADK7HonQwpCBPK/3jLduOjx0IRjl8i5 ++YbMeKRHWTRiPrb/Avi7dA0ZkacBp9vCWVE1t6s972KgiQEKb85SS+5NvMpVcRaCo ++t5NNmi2+qZU/r/N47EUb9tJtFUPSV30GV97x/xlQgoVy8QAdomVo2wH1fuwgDZRy ++1ylniX/D/638wgYVJQV/H3Fr7CFxcXGTX1gIB9/uyYIjY5fOqVKqQwYYqG3AlNQd ++bIrztMR1b8FjsmX3nmCKYfJTvCOGhwgil9AYQR0g6poEquLYGI95cYxLml1kWTXN ++y4KPxosPwZVSgJ7G+xQLS61Pzk0mdk4+upTrnetqR64VQ/dyja8tSZw8bCga0R6K ++nLOEn55pkJPmDUgRFyyZT016+X8kFYaJqaNT2A2u4fA6hGf1vTqGqluNad2K9DSs ++TTzGiY0RD1aacOCIM2MtVNyIw15TTt9p9RCmwOLnJOn/KhqG51coIKfLgtDXvOoI ++6YTKqIM8Tb06ik12LnyHRj0fn8quqPwSmARMPP4JSLAVPv3Xf7s7CsWEBg89GTs+ ++gJln+L+kJPqT9GwUizz2v++ZYe9ZrGJ2Lguyvd+YGJs7HEreU+5uxxM= ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.4/362.pem b/repos/system_upgrade/common/files/prod-certs/9.4/362.pem +new file mode 100644 +index 00000000..f86ad9c8 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.4/362.pem +@@ -0,0 +1,36 @@ ++-----BEGIN CERTIFICATE----- ++MIIGNDCCBBygAwIBAgIJALDxRLt/tVDkMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQ1M1oXDTQzMDcx ++OTE2MzQ1M1owRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFszMjE1Nzg2 ++NS01MDZiLTRjZmYtYmU1My01MWViOGY3ZGM2OWNdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBvTCBujAJBgNVHRMEAjAAMEgGDCsGAQQBkggJAYJqAQQ4DDZSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuIEJldGEw ++GgYMKwYBBAGSCAkBgmoCBAoMCDkuNCBCZXRhMBkGDCsGAQQBkggJAYJqAwQJDAdw ++cGM2NGxlMCwGDCsGAQQBkggJAYJqBAQcDBpyaGVsLTkscmhlbC05LWJldGEtcHBj ++NjRsZTANBgkqhkiG9w0BAQsFAAOCAgEAz10M4vhpDqQ0Jq80QGPZZCBpcERMTlbo ++hzqPX21O6pqeCIqS0AN/nuOQ+nfABvixsLZFjJ2cbXDRNQBCIfs3Yhhai/FLOKl1 ++zJ4CSfFoVBjp5zOJePS0EI6w7OVZJySzEWIWDWA1aPm+/wivKBQ/jYmGzahvtgLi ++hBdIawe6Mgfb4CMbbhpX9fxjYEohiUxXmxmfVxkXfqthgt6UXodykgk/UkT+Ir4N ++FTBFCm0/3ptaUAISV9/B7Pes7DBrbaYfSlamyRFtnDKBIc4tHJW0Iu6LZDRJzEDL ++yemaYFWRDuM3AodRDPez+leMoyXJOzLfYy9LhriFdZyOMzZCWTUCdIRJVWO7i2Lt ++OSrm7VzpWEno5EBd1tuo6KW7ZW2fJo3VV1Z54elNiItIxvFC9ZI38f1LMcueVpzC ++qZuXT9sICi+CMWXaFGb+3INU5tDqXrX5DyccFmIUJeGMuifLrAJmakT9S0f5AF8z ++QhGQm0pY2CO9IChKxxX1w+Yb4iNQ/GV0vTmFhC4+s7bFsQ/1yazrI91XTKrK125Q ++80KWUuQad8MYw6bs5K04OTdeUn5dEHqcVZLTmNHgpi6+8x3LShIZqqgrNNkzBIZD ++FmbrWIU2YilmX1hRTFn6OaVPmo5OWBcwgwQ/q4LDcxEvWO3C70A/cBn8QOuU8lUm ++bnNddM3PSgc= ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.4/363.pem b/repos/system_upgrade/common/files/prod-certs/9.4/363.pem +new file mode 100644 +index 00000000..c381a298 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.4/363.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJjCCBA6gAwIBAgIJALDxRLt/tVDjMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQ1M1oXDTQzMDcx ++OTE2MzQ1M1owRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtlYzI0NDY1 ++ZC1iODRkLTRkZDctYjA0Mi05MzFjZDkxNmQzOTRdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrzCBrDAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYJrAQQqDChSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NCBCZXRhMBoGDCsGAQQBkggJAYJr ++AgQKDAg5LjQgQmV0YTAZBgwrBgEEAZIICQGCawMECQwHYWFyY2g2NDAsBgwrBgEE ++AZIICQGCawQEHAwacmhlbC05LHJoZWwtOS1iZXRhLWFhcmNoNjQwDQYJKoZIhvcN ++AQELBQADggIBAE4lU1YTA5lGbC1uO2JQv7mSkI6FbhOjw2TE67exhzZfTuzNB0eq ++amuJrMJEN4n3yteBb2zmzYXxHeEkpvN2cpXW61fhC39X2LA51DQTelfXNGLH/kw0 ++lpXW47uG9o3qOyU25i1qZdapLUJvGwS6fMwPJrEeIwltbCGgpOen1aIs29KOfNzF ++JRmx1aNV0SA6nhwxPwPCnbHBnSsWYBKWhWxutUdN7SFwCQrJ72LbfkOwBBlf0P8A ++miWTVqJ1ZM051goF0m/5hgjMAW/UN4QsP8k2o+3YLjVho9Zd25d5U1PEqVwjBcxt ++Yjz74LpcZwrvx9MNPSijUZTXSHBD7ATkD+Tj32Wsxcoyce2PlyWpQlMAZdWZh8ve ++osOxNFjt8+sVB9i3gvO5aQibIvRTPIayuMCTla0A776BMv27AKETOclvHBCyEAa+ ++BQk4Th51gLnMPrFZEdt75AuZ9Hq3SgNzFnL7cw7KP1KjwicBkHnhNP5+vRTo3JWT ++lNtSeNGxzgtI1HlBnbOalirOBdi3GruEtVIdGkqgJo4bi7t6wj2KscRKwL/193q6 ++oJeFxo9To2Kc7V9+jEfYDmToGS6QezjO1wlLT63wpJXstpNdPRnMcHnGQ7iYV1dD ++hY2PTPWCHcKdjOa/Lff2K7MUNTmkhKsPivv4hO1MIbKKzyVoO12jo7Q2 ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.4/419.pem b/repos/system_upgrade/common/files/prod-certs/9.4/419.pem +new file mode 100644 +index 00000000..be9677f7 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.4/419.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFzCCA/+gAwIBAgIJALDxRLt/tVDNMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQwN1oXDTQzMDcx ++OTE2MzQwN1owRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtlZjU2MDdh ++Ni1mOThjLTRkYTUtYTQ5MC1jNGRjYTVlODkyNjJdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBoDCBnTAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYMjAQQlDCNSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NDAVBgwrBgEEAZIICQGDIwIEBQwD ++OS40MBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCcGDCsGAQQBkggJAYMjBAQX ++DBVyaGVsLTkscmhlbC05LWFhcmNoNjQwDQYJKoZIhvcNAQELBQADggIBAIhATeEW ++1cnsBWCy26c17gOqduTK1QQEjxoJzr0T1jVT2el7CsNm/TLjASdA3JEIupXhDV1y ++ej1VcdLQjaxwBah4dBPPrISD4mD7i4R9AsY73m4bg0UySkipYFgFJOHoPKXkbd/f ++Uy1/nBcLGKtBwWncI5hQ9qovPOndD9PSlNMN5gteG35d3IIPv4ugDF5hw2wabiaK ++TvUNZVFpCNR7lo11aONhJxfoygWjiNR1L1If3Uvgf7UAixTdIMlAb+Ioqa8o9ZMN ++fJclzk+ltVnWfphw+QdCWSJv1+0rJJzTHnm3S4UtGAIgrabo9WXAopLelwBgnP8l ++GhXWOhzU11FFjzp5pQ2VONUTGKXYfUjdclDj4w94fE3GRXXbwaqc3jaNRHb9JjNB ++aNfQ59O3nl7y2PwZkzCVtGwT3GwCOxrUcUVFdjDTs6WHfGSpt2wwsQl03oS55C+s ++xo8m+1LpQ+iWpxfiFqpKpPV+j3U9L2sTAInx3yuxtnRLhFma7qxJN6GVdrIEYXoi ++H5opy2YTZisvmHtd/pwjzB+yVdHcqvHkqt06mag84Pve3FUV2JQ7VfuCCyN9HsyO ++rdHvOCZK2cSkK+020Q40zTtQQDOmnHb6aLy2vLMNdvufylm6cchXRr+2avYzwEV5 ++LcgfwpsgtJFW3GgvR1ElBgJlXKEJlyxQzFws ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.4/433.pem b/repos/system_upgrade/common/files/prod-certs/9.4/433.pem +new file mode 100644 +index 00000000..c381be24 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.4/433.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGKTCCBBGgAwIBAgIJALDxRLt/tVDlMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQ1M1oXDTQzMDcx ++OTE2MzQ1M1owRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtkZGFjYmZm ++NS1hZDViLTQwNmQtYjA1OS1hYTI0Zjg2YmMyOWVdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBsjCBrzAJBgNVHRMEAjAAMEEGDCsGAQQBkggJAYMxAQQxDC9SZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIElCTSB6IFN5c3RlbXMgQmV0YTAaBgwrBgEE ++AZIICQGDMQIECgwIOS40IEJldGEwFwYMKwYBBAGSCAkBgzEDBAcMBXMzOTB4MCoG ++DCsGAQQBkggJAYMxBAQaDBhyaGVsLTkscmhlbC05LWJldGEtczM5MHgwDQYJKoZI ++hvcNAQELBQADggIBAI4wHOkCmr5rNJA1agaVflNz7KTFOQdX41VV20MM2PuBxIsi ++pj/pS2MzHFhdCcSZ2YMl2ztGVKLpheoFZi1EA62VppasmnkskayGxaeGh+yvv1x/ ++frUW26izPWUNeqpi4oMsO2ByKCySYWyMIZfyPV8LpqU5/VSchohYB0FNzXUdHpVg ++FJSnkiHS28UwQ4RDKp+0uKKY3S9Zq6u3YBer0wf2v0uuVz3R2pFNC86lybe/wihm ++XTjlJOT33zpGUm49jp+xgM1FSx+g1CSQKT9SZJiMQzD+yappyRaYbReZ4a3AWaUn ++juAES9tgBfYNrsmj9vNJ94isRTXifhh6pU5gKjdvbddYFNfaSFRmnOQK+SNcgUr6 ++/RqC6yivGKGeZ+W+jn6hlSQPQISmsoy3D0/X+yKJShAVXvEZwtME9iKmVSqtLMKJ ++Exu4t6vguy5frm5rBbuB2XfaGX6de8jF5742bBODj5hdQoNQUw/6E4QHj6HXRWTW ++InpfhOA9Uk8+n4+QmJfJjp9O+cTwbDx2+GAPSu/pMhFE1yfWPb0ZLBQHcSlD1uga ++rVeFld3c1p0MZkVZVU/G6I+aGq1fNSKdtAd068z1/AJr7lLJ5vY3ckwR0sGhMccA ++3BiXXyTbciwVX9ShA/bRa3YXNDYCu2zNaX38arTP8JSq5h8a1zJDG+vnsRfr ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.4/479.pem b/repos/system_upgrade/common/files/prod-certs/9.4/479.pem +new file mode 100644 +index 00000000..1ea1cd3d +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.4/479.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFTCCA/2gAwIBAgIJALDxRLt/tVDQMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQwOFoXDTQzMDcx ++OTE2MzQwOFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsxZDg0ZDQ5 ++Ny1jZmNmLTQxNjEtOTM0YS0zNzk2MDU4M2ZmZGZdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBnjCBmzAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD ++OS40MBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM ++FHJoZWwtOSxyaGVsLTkteDg2XzY0MA0GCSqGSIb3DQEBCwUAA4ICAQCGUDPFBrLs ++sK/RITJothRhKhKNX3zu9TWRG0WKxszCx/y7c4yEfH1TV/yd7BNB2RubaoayWz8E ++TQjcRW8BnVu9JrlbdpWJm4eN+dOOpcESPilLnkz4Tr0WYDsT1/jk/uiorK4h21S0 ++EwMicuSuEmm0OUEX0zj2X/IyveFRtpJpH/JktznCkvexysc1JRzqMCbal8GipRX9 ++Xf7Oko6QiaUpu5GDLN2OXhizYHdR2f3l+Sn2cScsbi3fSVv+DLsnaz6J0kZ4U8q3 ++lYk/ZYifJjG+/7cv3e+usixpmK/qYlpOvunUDnqOkDfUs4/4bZjH8e8CdqJk4YvU ++RRtLr7muXEJsaqF7lxAViXnKxT/z/+1kOgN/+Oyzjs4QDsk2HQpWHFgNYSSG9Mmz ++PUS8tk2T0j5sN55X7QRRl5c0oqrBU5XaWyL26QcfONYcR8dBaKawjxg8CI9KzsYY ++sb2jjS+fBkB1OI2c6z4OZRd+0N6FQ6gq++KiXOLFvi/QSFNi9Veb56c5tR2l6fBk ++0pSH06Gg2s0aQg20NdMIr+HaYsVdJRsE1FgQ2tlfFx9rGkcqhgwV3Za/abgtRb2o ++YVwps28DLm41DXf5DnXK+BXFHrtR/3YAZtga+R7OL/RvcF0kc2kudlxqd/8Y33uL ++nqnoATy31FTW4J4rEfanJTQgTpatZmbaLQ== ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.4/486.pem b/repos/system_upgrade/common/files/prod-certs/9.4/486.pem +new file mode 100644 +index 00000000..8c6cc292 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.4/486.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJDCCBAygAwIBAgIJALDxRLt/tVDmMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQ1NFoXDTQzMDcx ++OTE2MzQ1NFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFthMThhM2Iz ++MC01MTIxLTQ4YmYtOWFjYS01YWUwMTY5Zjk3MDFdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrTCBqjAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYNmAQQqDChSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NCBCZXRhMBoGDCsGAQQBkggJAYNm ++AgQKDAg5LjQgQmV0YTAYBgwrBgEEAZIICQGDZgMECAwGeDg2XzY0MCsGDCsGAQQB ++kggJAYNmBAQbDBlyaGVsLTkscmhlbC05LWJldGEteDg2XzY0MA0GCSqGSIb3DQEB ++CwUAA4ICAQCKLxIlbpPv+pvTx79IsbuZeTgjeTyJ5swT0R6WoAgjjVf3BInjnu5n ++tOqxTFy9f6Vg1sU8/DCNQdY87gQmnDLgx+E/fJRb3DlBqTVMdRQbafdS8H0PK/A8 ++wnGuwfiI6IUv/G1nb4Gp9SxzBO6c6iJDfp+UN/v+i0FxpIwq5n5vsGDx9qG7YkC/ ++wfgiXB7dvzMjx9GIf0Q0ouTMrB0CN07CBa5qwjLLVAOV4jfXl/PK6DbhmIjCsDEp ++BWmHZKVvn610301W/efrMtzZjH9KgIMmylEPY3QrYXaFjZcKRAl/jEGTSROQmycY ++hF+pmKKqqzRT6ab3aM6zO4LoMj8+VgyJOn1Pep7ETb3uxReYZU0vSKCqa0dYcpsP ++ufmLLYmAThwEoOEEQMn0zOFDLhdBKiP+JaBWVFLyVVquEfWVEsIVGamAdVZUDX1v ++ILhzV4imgboajVPYo/C5yEsuHPkw8idA2L9phZY9kPY2DhYBnfV2ccQSik5wBKpf ++lWajuFMSQFNiUet43YHQGzqmZLA08PgoaQkLRfENTvlhHFOrphnoIu4yNbdzuM3y ++bOjGFem5WwOPwPBs7m0wEpvpUp4UoqbIn6vihtLq7q2mFxwz/iDh7rHDrTkMD7fB ++nSrKb/v4Gnp2k+/fU52rWaV2tjesevGJeWw17YMerzZYhrF+KTt3pQ== ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.4/72.pem b/repos/system_upgrade/common/files/prod-certs/9.4/72.pem +new file mode 100644 +index 00000000..d5832c16 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.4/72.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFjCCA/6gAwIBAgIJALDxRLt/tVDPMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQwOFoXDTQzMDcx ++OTE2MzQwOFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFszYzk0ZTRj ++OS1kYjU5LTQ2ZDktYjBmNS04YmZmNDRkMDFiMjVdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBnzCBnDAJBgNVHRMEAjAAMDsGCysGAQQBkggJAUgBBCwMKlJlZCBIYXQg ++RW50ZXJwcmlzZSBMaW51eCBmb3IgSUJNIHogU3lzdGVtczAUBgsrBgEEAZIICQFI ++AgQFDAM5LjQwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJAYLKwYBBAGSCAkBSAQE ++FQwTcmhlbC05LHJoZWwtOS1zMzkweDANBgkqhkiG9w0BAQsFAAOCAgEAvzalgsaq ++pRPmiEeCjm43KBazl284ua9GBeDVjKAWrlAUmoa6HROrU5x55qH4VMQlDB8q0GIb ++cF5Nde2EhIDeTTomrSD8bA0I/vwAF4mxsxA9Qjm2NqaRN8AwLkhy9Mnl/SDZXarB ++ebOtwSlI7NUFj8+2C6kVCAV37EA2TMkBOjleBVU9y16yFnbgmVoJZQ9DeZreWt/i ++igkpybNE5rdqbnp/cXMgsZgisGt2SyHa6oyuUK/goDN0MAfVrLf7JJWZY7r6Q/Yy ++8NRvIzniWAZEkX6ywoT9f5GsVuiOzGSIvf0uSS9cPrKxSbZeliVSpwZk7GLr5cv/ ++rxjEuNNPTv/+KqEfrACAPqx4IuCd+wRD2qbhiWwfG/XBd0qnHbw+TyUdhzVxgVj7 ++7curyQUSqJtpAQ868cdGBoqpCR6yV4ZN4ZekqmPdcmGXIBWsvI3Arv7BZO9P4Pt9 ++yxBA4hwP6X6+PsVVdOdSV48m6bcFj8QCy1+Q6OyEDtY5NGNISlxa4U4613jKc/rA ++4NAc6sbqaLtRhEC3Bx4jCIP/+ReY+C8RR3569HCz1NU8Bb+xRXsRiV8Zgj8eKSMJ ++6+RrbOCb+MooF1HMPtaSgJJNOkcVFdHAw9xz0iFf2TWm8yVyZtLh0g9pYT+n8UiF ++ILtIL4wWtg67tJLTuXJ2QwLpu/Eow7CXT6M= ++-----END CERTIFICATE----- +-- +2.41.0 + diff --git a/0034-pylint-ignore-too-many-lines.patch b/0034-pylint-ignore-too-many-lines.patch new file mode 100644 index 0000000..bb44ace --- /dev/null +++ b/0034-pylint-ignore-too-many-lines.patch @@ -0,0 +1,29 @@ +From 81e85bd5ebadfa90851e22999a851375f7de363e Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Thu, 16 Nov 2023 09:30:22 +0100 +Subject: [PATCH 34/38] pylint: ignore too-many-lines + +It limits 1000 lines of code per module and targetuserspacecreator +actor's lib is beyond that limit. This is not type of problem +we want to deal with anyway. +--- + .pylintrc | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/.pylintrc b/.pylintrc +index 0adb7dcc..57259bcb 100644 +--- a/.pylintrc ++++ b/.pylintrc +@@ -56,7 +56,8 @@ disable= + use-dict-literal, + redundant-u-string-prefix, # still have py2 to support + logging-format-interpolation, +- logging-not-lazy ++ logging-not-lazy, ++ too-many-lines # we do not want to take care about that one + + [FORMAT] + # Maximum number of characters on a single line. +-- +2.41.0 + diff --git a/0035-Update-upgrade-paths-Add-8.10-9.4.patch b/0035-Update-upgrade-paths-Add-8.10-9.4.patch new file mode 100644 index 0000000..e2a9a53 --- /dev/null +++ b/0035-Update-upgrade-paths-Add-8.10-9.4.patch @@ -0,0 +1,66 @@ +From 5a3bded4be67e6a99ba739e15c3b9d533134f35b Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Thu, 16 Nov 2023 08:46:55 +0100 +Subject: [PATCH 35/38] Update upgrade paths: Add 8.10/9.4 + +Adding upgrade paths (RHEL and RHEL with SAP HANA): + 7.9 -> 8.10 + 8.10 -> 9.4 + +Following upgrade paths will be dropped later in this release. +Consider them as deprecated now. + 7.9 -> 8.6 (RHEL and RHEL with SAP HANA) + 7.9 -> 8.9 + 8.6 -> 9.0 (RHEL and RHEL with SAP HANA) + 8.9 -> 8.3 +--- + .../system_upgrade/common/files/upgrade_paths.json | 14 ++++++++------ + .../common/libraries/config/version.py | 2 +- + 2 files changed, 9 insertions(+), 7 deletions(-) + +diff --git a/repos/system_upgrade/common/files/upgrade_paths.json b/repos/system_upgrade/common/files/upgrade_paths.json +index 2069e26d..25c6db7c 100644 +--- a/repos/system_upgrade/common/files/upgrade_paths.json ++++ b/repos/system_upgrade/common/files/upgrade_paths.json +@@ -1,17 +1,19 @@ + { + "default": { +- "7.9": ["8.6", "8.8", "8.9"], ++ "7.9": ["8.6", "8.8", "8.9", "8.10"], + "8.6": ["9.0"], + "8.8": ["9.2"], + "8.9": ["9.3"], +- "7": ["8.6", "8.8", "8.9"], +- "8": ["9.3"] ++ "8.10": ["9.4"], ++ "7": ["8.6", "8.8", "8.9", "8.10"], ++ "8": ["9.0", "9.2", "9.3", "9.4"] + }, + "saphana": { +- "7.9": ["8.8", "8.6"], +- "7": ["8.8", "8.6"], ++ "7.9": ["8.6", "8.10", "8.8"], ++ "7": ["8.6", "8.10", "8.8"], + "8.6": ["9.0"], + "8.8": ["9.2"], +- "8": ["9.2", "9.0"] ++ "8.10": ["9.4"], ++ "8": ["9.0", "9.4", "9.2"] + } + } +diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py +index 0f1e5874..12598960 100644 +--- a/repos/system_upgrade/common/libraries/config/version.py ++++ b/repos/system_upgrade/common/libraries/config/version.py +@@ -16,7 +16,7 @@ OP_MAP = { + _SUPPORTED_VERSIONS = { + # Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x + '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9']}, +- '8': {'rhel': ['8.6', '8.8', '8.9'], 'rhel-saphana': ['8.6', '8.8']}, ++ '8': {'rhel': ['8.6', '8.8', '8.9', '8.10'], 'rhel-saphana': ['8.6', '8.8', '8.10']}, + } + + +-- +2.41.0 + diff --git a/0036-Copy-dnf.conf-to-target-userspace-and-allow-a-custom.patch b/0036-Copy-dnf.conf-to-target-userspace-and-allow-a-custom.patch new file mode 100644 index 0000000..9ffe854 --- /dev/null +++ b/0036-Copy-dnf.conf-to-target-userspace-and-allow-a-custom.patch @@ -0,0 +1,275 @@ +From 5bd7bdf5e9c81ec306e567a147dc270adfd27da2 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Tue, 14 Nov 2023 09:51:41 +0100 +Subject: [PATCH 36/38] Copy dnf.conf to target userspace and allow a custom + one + +This change allows working around the fact that source and target +`dnf.conf` files might be incompatible. For example some of the proxy +configuration between RHEL7 and RHEL8. + +Target system compatible configuration can be specified in +/etc/leapp/files/dnf.conf. If this file is present it is copied into +the target userspace and also applied to the target system. If it +doesn't exist, the `/etc/dnf/dnf.conf` from the source system will be +copied instead. + +Errors that could be caused by incompatible/incorrect proxy +configuration now contain a hint with a remediation with the steps above +mentioned. + +* pstodulk@redhat.com: Updated text in the error msg. + +Jira: OAMG-6544 +--- + .../common/actors/applycustomdnfconf/actor.py | 19 ++++++++++++++ + .../libraries/applycustomdnfconf.py | 15 +++++++++++ + .../tests/test_applycustomdnfconf.py | 23 ++++++++++++++++ + .../copydnfconfintotargetuserspace/actor.py | 24 +++++++++++++++++ + .../copydnfconfintotargetuserspace.py | 19 ++++++++++++++ + .../tests/test_dnfconfuserspacecopy.py | 26 +++++++++++++++++++ + .../libraries/userspacegen.py | 18 ++++++++++--- + .../common/libraries/dnfplugin.py | 24 ++++++++++++++++- + 8 files changed, 163 insertions(+), 5 deletions(-) + create mode 100644 repos/system_upgrade/common/actors/applycustomdnfconf/actor.py + create mode 100644 repos/system_upgrade/common/actors/applycustomdnfconf/libraries/applycustomdnfconf.py + create mode 100644 repos/system_upgrade/common/actors/applycustomdnfconf/tests/test_applycustomdnfconf.py + create mode 100644 repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/actor.py + create mode 100644 repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/libraries/copydnfconfintotargetuserspace.py + create mode 100644 repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/tests/test_dnfconfuserspacecopy.py + +diff --git a/repos/system_upgrade/common/actors/applycustomdnfconf/actor.py b/repos/system_upgrade/common/actors/applycustomdnfconf/actor.py +new file mode 100644 +index 00000000..d7c7fe87 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/applycustomdnfconf/actor.py +@@ -0,0 +1,19 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import applycustomdnfconf ++from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag ++ ++ ++class ApplyCustomDNFConf(Actor): ++ """ ++ Move /etc/leapp/files/dnf.conf to /etc/dnf/dnf.conf if it exists ++ ++ An actor in FactsPhase copies this file to the target userspace if present. ++ In such case we also want to use the file on the target system. ++ """ ++ name = "apply_custom_dnf_conf" ++ consumes = () ++ produces = () ++ tags = (ApplicationsPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ applycustomdnfconf.process() +diff --git a/repos/system_upgrade/common/actors/applycustomdnfconf/libraries/applycustomdnfconf.py b/repos/system_upgrade/common/actors/applycustomdnfconf/libraries/applycustomdnfconf.py +new file mode 100644 +index 00000000..2eabd678 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/applycustomdnfconf/libraries/applycustomdnfconf.py +@@ -0,0 +1,15 @@ ++import os ++ ++from leapp.libraries.stdlib import api, CalledProcessError, run ++ ++CUSTOM_DNF_CONF_PATH = "/etc/leapp/files/dnf.conf" ++ ++ ++def process(): ++ if os.path.exists(CUSTOM_DNF_CONF_PATH): ++ try: ++ run(["mv", CUSTOM_DNF_CONF_PATH, "/etc/dnf/dnf.conf"]) ++ except (CalledProcessError, OSError) as e: ++ api.current_logger().debug( ++ "Failed to move /etc/leapp/files/dnf.conf to /etc/dnf/dnf.conf: {}".format(e) ++ ) +diff --git a/repos/system_upgrade/common/actors/applycustomdnfconf/tests/test_applycustomdnfconf.py b/repos/system_upgrade/common/actors/applycustomdnfconf/tests/test_applycustomdnfconf.py +new file mode 100644 +index 00000000..6dbc4291 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/applycustomdnfconf/tests/test_applycustomdnfconf.py +@@ -0,0 +1,23 @@ ++import os ++ ++import pytest ++ ++from leapp.libraries.actor import applycustomdnfconf ++ ++ ++@pytest.mark.parametrize( ++ "exists,should_move", ++ [(False, False), (True, True)], ++) ++def test_copy_correct_dnf_conf(monkeypatch, exists, should_move): ++ monkeypatch.setattr(os.path, "exists", lambda _: exists) ++ ++ run_called = [False] ++ ++ def mocked_run(_): ++ run_called[0] = True ++ ++ monkeypatch.setattr(applycustomdnfconf, 'run', mocked_run) ++ ++ applycustomdnfconf.process() ++ assert run_called[0] == should_move +diff --git a/repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/actor.py b/repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/actor.py +new file mode 100644 +index 00000000..46ce1934 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/actor.py +@@ -0,0 +1,24 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import copydnfconfintotargetuserspace ++from leapp.models import TargetUserSpacePreupgradeTasks ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class CopyDNFConfIntoTargetUserspace(Actor): ++ """ ++ Copy dnf.conf into target userspace ++ ++ Copies /etc/leapp/files/dnf.conf to target userspace. If it isn't available ++ /etc/dnf/dnf.conf is copied instead. This allows specifying a different ++ config for the target userspace, which might be required if the source ++ system configuration file isn't compatible with the target one. One such ++ example is incompatible proxy configuration between RHEL7 and RHEL8 DNF ++ versions. ++ """ ++ name = "copy_dnf_conf_into_target_userspace" ++ consumes = () ++ produces = (TargetUserSpacePreupgradeTasks,) ++ tags = (FactsPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ copydnfconfintotargetuserspace.process() +diff --git a/repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/libraries/copydnfconfintotargetuserspace.py b/repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/libraries/copydnfconfintotargetuserspace.py +new file mode 100644 +index 00000000..4e74acdb +--- /dev/null ++++ b/repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/libraries/copydnfconfintotargetuserspace.py +@@ -0,0 +1,19 @@ ++import os ++ ++from leapp.libraries.stdlib import api ++from leapp.models import CopyFile, TargetUserSpacePreupgradeTasks ++ ++ ++def process(): ++ src = "/etc/dnf/dnf.conf" ++ if os.path.exists("/etc/leapp/files/dnf.conf"): ++ src = "/etc/leapp/files/dnf.conf" ++ ++ api.current_logger().debug( ++ "Copying dnf.conf at {} to the target userspace".format(src) ++ ) ++ api.produce( ++ TargetUserSpacePreupgradeTasks( ++ copy_files=[CopyFile(src=src, dst="/etc/dnf/dnf.conf")] ++ ) ++ ) +diff --git a/repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/tests/test_dnfconfuserspacecopy.py b/repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/tests/test_dnfconfuserspacecopy.py +new file mode 100644 +index 00000000..6c99925e +--- /dev/null ++++ b/repos/system_upgrade/common/actors/copydnfconfintotargetuserspace/tests/test_dnfconfuserspacecopy.py +@@ -0,0 +1,26 @@ ++import os ++ ++import pytest ++ ++from leapp.libraries.actor import copydnfconfintotargetuserspace ++from leapp.libraries.common.testutils import logger_mocked, produce_mocked ++ ++ ++@pytest.mark.parametrize( ++ "userspace_conf_exists,expected", ++ [(False, "/etc/dnf/dnf.conf"), (True, "/etc/leapp/files/dnf.conf")], ++) ++def test_copy_correct_dnf_conf(monkeypatch, userspace_conf_exists, expected): ++ monkeypatch.setattr(os.path, "exists", lambda _: userspace_conf_exists) ++ ++ mocked_produce = produce_mocked() ++ monkeypatch.setattr(copydnfconfintotargetuserspace.api, 'produce', mocked_produce) ++ monkeypatch.setattr(copydnfconfintotargetuserspace.api, 'current_logger', logger_mocked()) ++ ++ copydnfconfintotargetuserspace.process() ++ ++ assert mocked_produce.called == 1 ++ assert len(mocked_produce.model_instances) == 1 ++ assert len(mocked_produce.model_instances[0].copy_files) == 1 ++ assert mocked_produce.model_instances[0].copy_files[0].src == expected ++ assert mocked_produce.model_instances[0].copy_files[0].dst == "/etc/dnf/dnf.conf" +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index 050ad7fe..e015a741 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -269,15 +269,25 @@ def prepare_target_userspace(context, userspace_dir, enabled_repos, packages): + # failed since leapp does not support updates behind proxy yet. + for manager_info in api.consume(PkgManagerInfo): + if manager_info.configured_proxies: +- details['details'] = ("DNF failed to install userspace packages, likely due to the proxy " +- "configuration detected in the YUM/DNF configuration file.") ++ details['details'] = ( ++ "DNF failed to install userspace packages, likely due to the proxy " ++ "configuration detected in the YUM/DNF configuration file. " ++ "Make sure the proxy is properly configured in /etc/dnf/dnf.conf. " ++ "It's also possible the proxy settings in the DNF configuration file are " ++ "incompatible with the target system. A compatible configuration can be " ++ "placed in /etc/leapp/files/dnf.conf which, if present, will be used during " ++ "the upgrade instead of /etc/dnf/dnf.conf. " ++ "In such case the configuration will also be applied to the target system." ++ ) + + # Similarly if a proxy was set specifically for one of the repositories. + for repo_facts in api.consume(RepositoriesFacts): + for repo_file in repo_facts.repositories: + if any(repo_data.proxy and repo_data.enabled for repo_data in repo_file.data): +- details['details'] = ("DNF failed to install userspace packages, likely due to the proxy " +- "configuration detected in a repository configuration file.") ++ details['details'] = ( ++ "DNF failed to install userspace packages, likely due to the proxy " ++ "configuration detected in a repository configuration file." ++ ) + + raise StopActorExecutionError(message=message, details=details) + +diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py +index 26810e94..d3ec5901 100644 +--- a/repos/system_upgrade/common/libraries/dnfplugin.py ++++ b/repos/system_upgrade/common/libraries/dnfplugin.py +@@ -178,8 +178,30 @@ def _handle_transaction_err_msg(stage, xfs_info, err, is_container=False): + return # not needed actually as the above function raises error, but for visibility + NO_SPACE_STR = 'more space needed on the' + message = 'DNF execution failed with non zero exit code.' +- details = {'STDOUT': err.stdout, 'STDERR': err.stderr} + if NO_SPACE_STR not in err.stderr: ++ # if there was a problem reaching repos and proxy is configured in DNF/YUM configs, the ++ # proxy is likely the problem. ++ # NOTE(mmatuska): We can't consistently detect there was a problem reaching some repos, ++ # because it isn't clear what are all the possible DNF error messages we can encounter, ++ # such as: "Failed to synchronize cache for repo ..." or "Errors during downloading ++ # metadata for # repository" or "No more mirrors to try - All mirrors were already tried ++ # without success" ++ # NOTE(mmatuska): We could check PkgManagerInfo to detect if proxy is indeed configured, ++ # however it would be pretty ugly to pass it all the way down here ++ proxy_hint = ( ++ "If there was a problem reaching remote content (see stderr output) and proxy is " ++ "configured in the YUM/DNF configuration file, the proxy configuration is likely " ++ "causing this error. " ++ "Make sure the proxy is properly configured in /etc/dnf/dnf.conf. " ++ "It's also possible the proxy settings in the DNF configuration file are " ++ "incompatible with the target system. A compatible configuration can be " ++ "placed in /etc/leapp/files/dnf.conf which, if present, it will be used during " ++ "some parts of the upgrade instead of original /etc/dnf/dnf.conf. " ++ "In such case the configuration will also be applied to the target system. " ++ "Note that /etc/dnf/dnf.conf needs to be still configured correctly " ++ "for your current system to pass the early phases of the upgrade process." ++ ) ++ details = {'STDOUT': err.stdout, 'STDERR': err.stderr, 'hint': proxy_hint} + raise StopActorExecutionError(message=message, details=details) + + # Disk Requirements: +-- +2.41.0 + diff --git a/0037-adjustlocalrepos-suppress-unwanted-deprecation-repor.patch b/0037-adjustlocalrepos-suppress-unwanted-deprecation-repor.patch new file mode 100644 index 0000000..35c67b7 --- /dev/null +++ b/0037-adjustlocalrepos-suppress-unwanted-deprecation-repor.patch @@ -0,0 +1,36 @@ +From 51fd0cc817aa9efea24d62e735fdc47133b1622b Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Thu, 16 Nov 2023 18:30:53 +0100 +Subject: [PATCH 37/38] adjustlocalrepos: suppress unwanted deprecation report + +--- + repos/system_upgrade/common/actors/adjustlocalrepos/actor.py | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/adjustlocalrepos/actor.py b/repos/system_upgrade/common/actors/adjustlocalrepos/actor.py +index 064e7f3e..0d0cc1d0 100644 +--- a/repos/system_upgrade/common/actors/adjustlocalrepos/actor.py ++++ b/repos/system_upgrade/common/actors/adjustlocalrepos/actor.py +@@ -9,8 +9,10 @@ from leapp.models import ( + UsedTargetRepositories + ) + from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag ++from leapp.utils.deprecation import suppress_deprecation + + ++@suppress_deprecation(TMPTargetRepositoriesFacts) + class AdjustLocalRepos(Actor): + """ + Adjust local repositories to the target user-space container. +@@ -25,7 +27,7 @@ class AdjustLocalRepos(Actor): + name = 'adjust_local_repos' + consumes = (TargetOSInstallationImage, + TargetUserSpaceInfo, +- TMPTargetRepositoriesFacts, ++ TMPTargetRepositoriesFacts, # deprecated + UsedTargetRepositories) + produces = () + tags = (IPUWorkflowTag, TargetTransactionChecksPhaseTag) +-- +2.41.0 + diff --git a/0038-add-detection-for-custom-libraries-registered-by-ld..patch b/0038-add-detection-for-custom-libraries-registered-by-ld..patch new file mode 100644 index 0000000..8ad02dd --- /dev/null +++ b/0038-add-detection-for-custom-libraries-registered-by-ld..patch @@ -0,0 +1,616 @@ +From 7dabc85a0ab5595bd4c7b232c78f14d04eed40fc Mon Sep 17 00:00:00 2001 +From: PeterMocary +Date: Tue, 22 Aug 2023 17:03:48 +0200 +Subject: [PATCH 38/38] add detection for custom libraries registered by + ld.so.conf + +The in-place upgrade process does not support custom libraries +and also does not handle customized configuration of dynamic linked. +In such a case it can happen (and it happens) that the upgrade could +break in critical phases when linked libraries dissapear or are not +compatible with the new system. + +We cannot decide whether or not such a custom configuration affects +the upgrade negatively, so let's detect any customisations +or unexpected configurations related to dynamic linker and in such +a case generate a high severity report, informing user about the +possible impact on the upgrade process. + +Currently it's detectect: + * modified default LD configuration: /etc/ld.so.conf + * drop int configuration files under /etc/ld.so.conf.d/ that are + not owned by any RHEL RPMs + * envars: LD_LIBRARY_PATH, LD_PRELOAD + +Jira ref.: OAMG-4460 / RHEL-11958 +BZ ref.: BZ 1927700 +--- + .../checkdynamiclinkerconfiguration/actor.py | 22 +++ + .../checkdynamiclinkerconfiguration.py | 79 ++++++++ + .../test_checkdynamiclinkerconfiguration.py | 65 +++++++ + .../scandynamiclinkerconfiguration/actor.py | 23 +++ + .../scandynamiclinkerconfiguration.py | 117 +++++++++++ + .../test_scandynamiclinkerconfiguration.py | 181 ++++++++++++++++++ + .../common/models/dynamiclinker.py | 41 ++++ + 7 files changed, 528 insertions(+) + create mode 100644 repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/actor.py + create mode 100644 repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/libraries/checkdynamiclinkerconfiguration.py + create mode 100644 repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/tests/test_checkdynamiclinkerconfiguration.py + create mode 100644 repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/actor.py + create mode 100644 repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/libraries/scandynamiclinkerconfiguration.py + create mode 100644 repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/tests/test_scandynamiclinkerconfiguration.py + create mode 100644 repos/system_upgrade/common/models/dynamiclinker.py + +diff --git a/repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/actor.py b/repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/actor.py +new file mode 100644 +index 00000000..6671eef4 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/actor.py +@@ -0,0 +1,22 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor.checkdynamiclinkerconfiguration import check_dynamic_linker_configuration ++from leapp.models import DynamicLinkerConfiguration, Report ++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag ++ ++ ++class CheckDynamicLinkerConfiguration(Actor): ++ """ ++ Check for customization of dynamic linker configuration. ++ ++ The in-place upgrade could potentionally be impacted in a negative way due ++ to the customization of dynamic linker configuration by user. This actor creates high ++ severity report upon detecting such customization. ++ """ ++ ++ name = 'check_dynamic_linker_configuration' ++ consumes = (DynamicLinkerConfiguration,) ++ produces = (Report,) ++ tags = (ChecksPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ check_dynamic_linker_configuration() +diff --git a/repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/libraries/checkdynamiclinkerconfiguration.py b/repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/libraries/checkdynamiclinkerconfiguration.py +new file mode 100644 +index 00000000..9ead892e +--- /dev/null ++++ b/repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/libraries/checkdynamiclinkerconfiguration.py +@@ -0,0 +1,79 @@ ++from leapp import reporting ++from leapp.libraries.stdlib import api ++from leapp.models import DynamicLinkerConfiguration ++ ++LD_SO_CONF_DIR = '/etc/ld.so.conf.d' ++LD_SO_CONF_MAIN = '/etc/ld.so.conf' ++LD_LIBRARY_PATH_VAR = 'LD_LIBRARY_PATH' ++LD_PRELOAD_VAR = 'LD_PRELOAD' ++FMT_LIST_SEPARATOR_1 = '\n- ' ++FMT_LIST_SEPARATOR_2 = '\n - ' ++ ++ ++def _report_custom_dynamic_linker_configuration(summary): ++ reporting.create_report([ ++ reporting.Title( ++ 'Detected customized configuration for dynamic linker.' ++ ), ++ reporting.Summary(summary), ++ reporting.Remediation(hint=('Remove or revert the custom dynamic linker configurations and apply the changes ' ++ 'using the ldconfig command. In case of possible active software collections we ' ++ 'suggest disabling them persistently.')), ++ reporting.RelatedResource('file', '/etc/ld.so.conf'), ++ reporting.RelatedResource('directory', '/etc/ld.so.conf.d'), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([reporting.Groups.OS_FACTS]), ++ ]) ++ ++ ++def check_dynamic_linker_configuration(): ++ configuration = next(api.consume(DynamicLinkerConfiguration), None) ++ if not configuration: ++ return ++ ++ custom_configurations = '' ++ if configuration.main_config.modified: ++ custom_configurations += ( ++ '{}The {} file has unexpected contents:{}{}' ++ .format(FMT_LIST_SEPARATOR_1, LD_SO_CONF_MAIN, ++ FMT_LIST_SEPARATOR_2, FMT_LIST_SEPARATOR_2.join(configuration.main_config.modified_lines)) ++ ) ++ ++ custom_configs = [] ++ for config in configuration.included_configs: ++ if config.modified: ++ custom_configs.append(config.path) ++ ++ if custom_configs: ++ custom_configurations += ( ++ '{}The following drop in config files were marked as custom:{}{}' ++ .format(FMT_LIST_SEPARATOR_1, FMT_LIST_SEPARATOR_2, FMT_LIST_SEPARATOR_2.join(custom_configs)) ++ ) ++ ++ if configuration.used_variables: ++ custom_configurations += ( ++ '{}The following variables contain unexpected dynamic linker configuration:{}{}' ++ .format(FMT_LIST_SEPARATOR_1, FMT_LIST_SEPARATOR_2, ++ FMT_LIST_SEPARATOR_2.join(configuration.used_variables)) ++ ) ++ ++ if custom_configurations: ++ summary = ( ++ 'Custom configurations to the dynamic linker could potentially impact ' ++ 'the upgrade in a negative way. The custom configuration includes ' ++ 'modifications to {main_conf}, custom or modified drop in config ' ++ 'files in the {conf_dir} directory and additional entries in the ' ++ '{ldlib_envar} or {ldpre_envar} variables. These modifications ' ++ 'configure the dynamic linker to use different libraries that might ' ++ 'not be provided by Red Hat products or might not be present during ' ++ 'the whole upgrade process. The following custom configurations ' ++ 'were detected by leapp:{cust_configs}' ++ .format( ++ main_conf=LD_SO_CONF_MAIN, ++ conf_dir=LD_SO_CONF_DIR, ++ ldlib_envar=LD_LIBRARY_PATH_VAR, ++ ldpre_envar=LD_PRELOAD_VAR, ++ cust_configs=custom_configurations ++ ) ++ ) ++ _report_custom_dynamic_linker_configuration(summary) +diff --git a/repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/tests/test_checkdynamiclinkerconfiguration.py b/repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/tests/test_checkdynamiclinkerconfiguration.py +new file mode 100644 +index 00000000..d640f0c5 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/checkdynamiclinkerconfiguration/tests/test_checkdynamiclinkerconfiguration.py +@@ -0,0 +1,65 @@ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor.checkdynamiclinkerconfiguration import ( ++ check_dynamic_linker_configuration, ++ LD_LIBRARY_PATH_VAR, ++ LD_PRELOAD_VAR ++) ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked ++from leapp.libraries.stdlib import api ++from leapp.models import DynamicLinkerConfiguration, LDConfigFile, MainLDConfigFile ++ ++INCLUDED_CONFIG_PATHS = ['/etc/ld.so.conf.d/dyninst-x86_64.conf', ++ '/etc/ld.so.conf.d/mariadb-x86_64.conf', ++ '/custom/path/custom1.conf'] ++ ++ ++@pytest.mark.parametrize(('included_configs_modifications', 'used_variables', 'modified_lines'), ++ [ ++ ([False, False, False], [], []), ++ ([True, True, True], [], []), ++ ([False, False, False], [LD_LIBRARY_PATH_VAR], []), ++ ([False, False, False], [], ['modified line 1', 'midified line 2']), ++ ([True, False, True], [LD_LIBRARY_PATH_VAR, LD_PRELOAD_VAR], ['modified line']), ++ ]) ++def test_check_ld_so_configuration(monkeypatch, included_configs_modifications, used_variables, modified_lines): ++ assert len(INCLUDED_CONFIG_PATHS) == len(included_configs_modifications) ++ ++ main_config = MainLDConfigFile(path="/etc/ld.so.conf", modified=any(modified_lines), modified_lines=modified_lines) ++ included_configs = [] ++ for path, modified in zip(INCLUDED_CONFIG_PATHS, included_configs_modifications): ++ included_configs.append(LDConfigFile(path=path, modified=modified)) ++ ++ configuration = DynamicLinkerConfiguration(main_config=main_config, ++ included_configs=included_configs, ++ used_variables=used_variables) ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[configuration])) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ ++ check_dynamic_linker_configuration() ++ ++ report_expected = any(included_configs_modifications) or modified_lines or used_variables ++ if not report_expected: ++ assert reporting.create_report.called == 0 ++ return ++ ++ assert reporting.create_report.called == 1 ++ assert 'configuration for dynamic linker' in reporting.create_report.reports[0]['title'] ++ summary = reporting.create_report.reports[0]['summary'] ++ ++ if any(included_configs_modifications): ++ assert 'The following drop in config files were marked as custom:' in summary ++ for config, modified in zip(INCLUDED_CONFIG_PATHS, included_configs_modifications): ++ assert modified == (config in summary) ++ ++ if modified_lines: ++ assert 'The /etc/ld.so.conf file has unexpected contents' in summary ++ for line in modified_lines: ++ assert line in summary ++ ++ if used_variables: ++ assert 'The following variables contain unexpected dynamic linker configuration:' in summary ++ for var in used_variables: ++ assert '- {}'.format(var) in summary +diff --git a/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/actor.py b/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/actor.py +new file mode 100644 +index 00000000..11283cd0 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/actor.py +@@ -0,0 +1,23 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor.scandynamiclinkerconfiguration import scan_dynamic_linker_configuration ++from leapp.models import DynamicLinkerConfiguration, InstalledRedHatSignedRPM ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class ScanDynamicLinkerConfiguration(Actor): ++ """ ++ Scan the dynamic linker configuration and find modifications. ++ ++ The dynamic linker configuration files can be used to replace standard libraries ++ with different custom libraries. The in-place upgrade does not support customization ++ of this configuration by user. This actor produces information about detected ++ modifications. ++ """ ++ ++ name = 'scan_dynamic_linker_configuration' ++ consumes = (InstalledRedHatSignedRPM,) ++ produces = (DynamicLinkerConfiguration,) ++ tags = (FactsPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ scan_dynamic_linker_configuration() +diff --git a/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/libraries/scandynamiclinkerconfiguration.py b/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/libraries/scandynamiclinkerconfiguration.py +new file mode 100644 +index 00000000..1a6ab6a2 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/libraries/scandynamiclinkerconfiguration.py +@@ -0,0 +1,117 @@ ++import glob ++import os ++ ++from leapp.libraries.common.rpms import has_package ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import DynamicLinkerConfiguration, InstalledRedHatSignedRPM, LDConfigFile, MainLDConfigFile ++ ++LD_SO_CONF_DIR = '/etc/ld.so.conf.d' ++LD_SO_CONF_MAIN = '/etc/ld.so.conf' ++LD_SO_CONF_DEFAULT_INCLUDE = 'ld.so.conf.d/*.conf' ++LD_SO_CONF_COMMENT_PREFIX = '#' ++LD_LIBRARY_PATH_VAR = 'LD_LIBRARY_PATH' ++LD_PRELOAD_VAR = 'LD_PRELOAD' ++ ++ ++def _read_file(file_path): ++ with open(file_path, 'r') as fd: ++ return fd.readlines() ++ ++ ++def _is_modified(config_path): ++ """ Decide if the configuration file was modified based on the package it belongs to. """ ++ result = run(['rpm', '-Vf', config_path], checked=False) ++ if not result['exit_code']: ++ return False ++ modification_flags = result['stdout'].split(' ', 1)[0] ++ # The file is considered modified only when the checksum does not match ++ return '5' in modification_flags ++ ++ ++def _is_included_config_custom(config_path): ++ if not os.path.isfile(config_path): ++ return False ++ ++ # Check if the config file has any lines that have an effect on dynamic linker configuration ++ has_effective_line = False ++ for line in _read_file(config_path): ++ line = line.strip() ++ if line and not line.startswith(LD_SO_CONF_COMMENT_PREFIX): ++ has_effective_line = True ++ break ++ ++ if not has_effective_line: ++ return False ++ ++ is_custom = False ++ try: ++ package_name = run(['rpm', '-qf', '--queryformat', '%{NAME}', config_path])['stdout'] ++ is_custom = not has_package(InstalledRedHatSignedRPM, package_name) or _is_modified(config_path) ++ except CalledProcessError: ++ is_custom = True ++ ++ return is_custom ++ ++ ++def _parse_main_config(): ++ """ ++ Extracts included configs from the main dynamic linker configuration file (/etc/ld.so.conf) ++ along with lines that are likely custom. The lines considered custom are simply those that are ++ not includes. ++ ++ :returns: tuple containing all the included files and lines considered custom ++ :rtype: tuple(list, list) ++ """ ++ config = _read_file(LD_SO_CONF_MAIN) ++ ++ included_configs = [] ++ other_lines = [] ++ for line in config: ++ line = line.strip() ++ if line.startswith('include'): ++ cfg_glob = line.split(' ', 1)[1].strip() ++ cfg_glob = os.path.join('/etc', cfg_glob) if not os.path.isabs(cfg_glob) else cfg_glob ++ included_configs.append(cfg_glob) ++ elif line and not line.startswith(LD_SO_CONF_COMMENT_PREFIX): ++ other_lines.append(line) ++ ++ return included_configs, other_lines ++ ++ ++def scan_dynamic_linker_configuration(): ++ included_configs, other_lines = _parse_main_config() ++ ++ is_default_include_present = '/etc/' + LD_SO_CONF_DEFAULT_INCLUDE in included_configs ++ if not is_default_include_present: ++ api.current_logger().debug('The default include "{}" is not present in ' ++ 'the {} file.'.format(LD_SO_CONF_DEFAULT_INCLUDE, LD_SO_CONF_MAIN)) ++ ++ if is_default_include_present and len(included_configs) != 1: ++ # The additional included configs will most likely be created manually by the user ++ # and therefore will get flagged as custom in the next part of this function ++ api.current_logger().debug('The default include "{}" is not the only include in ' ++ 'the {} file.'.format(LD_SO_CONF_DEFAULT_INCLUDE, LD_SO_CONF_MAIN)) ++ ++ main_config_file = MainLDConfigFile(path=LD_SO_CONF_MAIN, modified=any(other_lines), modified_lines=other_lines) ++ ++ # Expand the config paths from globs and ensure uniqueness of resulting paths ++ config_paths = set() ++ for cfg_glob in included_configs: ++ for cfg in glob.glob(cfg_glob): ++ config_paths.add(cfg) ++ ++ included_config_files = [] ++ for config_path in config_paths: ++ config_file = LDConfigFile(path=config_path, modified=_is_included_config_custom(config_path)) ++ included_config_files.append(config_file) ++ ++ # Check if dynamic linker variables used for specifying custom libraries are set ++ variables = [LD_LIBRARY_PATH_VAR, LD_PRELOAD_VAR] ++ used_variables = [var for var in variables if os.getenv(var, None)] ++ ++ configuration = DynamicLinkerConfiguration(main_config=main_config_file, ++ included_configs=included_config_files, ++ used_variables=used_variables) ++ ++ if other_lines or any([config.modified for config in included_config_files]) or used_variables: ++ api.produce(configuration) +diff --git a/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/tests/test_scandynamiclinkerconfiguration.py b/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/tests/test_scandynamiclinkerconfiguration.py +new file mode 100644 +index 00000000..21144951 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/tests/test_scandynamiclinkerconfiguration.py +@@ -0,0 +1,181 @@ ++import glob ++import os ++ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor import scandynamiclinkerconfiguration ++from leapp.libraries.common.testutils import produce_mocked ++from leapp.libraries.stdlib import api, CalledProcessError ++from leapp.models import InstalledRedHatSignedRPM ++ ++INCLUDED_CONFIGS_GLOB_DICT_1 = {'/etc/ld.so.conf.d/*.conf': ['/etc/ld.so.conf.d/dyninst-x86_64.conf', ++ '/etc/ld.so.conf.d/mariadb-x86_64.conf', ++ '/etc/ld.so.conf.d/bind-export-x86_64.conf']} ++ ++INCLUDED_CONFIGS_GLOB_DICT_2 = {'/etc/ld.so.conf.d/*.conf': ['/etc/ld.so.conf.d/dyninst-x86_64.conf', ++ '/etc/ld.so.conf.d/mariadb-x86_64.conf', ++ '/etc/ld.so.conf.d/bind-export-x86_64.conf', ++ '/etc/ld.so.conf.d/custom1.conf', ++ '/etc/ld.so.conf.d/custom2.conf']} ++ ++INCLUDED_CONFIGS_GLOB_DICT_3 = {'/etc/ld.so.conf.d/*.conf': ['/etc/ld.so.conf.d/dyninst-x86_64.conf', ++ '/etc/ld.so.conf.d/custom1.conf', ++ '/etc/ld.so.conf.d/mariadb-x86_64.conf', ++ '/etc/ld.so.conf.d/bind-export-x86_64.conf', ++ '/etc/ld.so.conf.d/custom2.conf'], ++ '/custom/path/*.conf': ['/custom/path/custom1.conf', ++ '/custom/path/custom2.conf']} ++ ++ ++@pytest.mark.parametrize(('included_configs_glob_dict', 'other_lines', 'custom_configs', 'used_variables'), ++ [ ++ (INCLUDED_CONFIGS_GLOB_DICT_1, [], [], []), ++ (INCLUDED_CONFIGS_GLOB_DICT_1, ['/custom/path.lib'], [], []), ++ (INCLUDED_CONFIGS_GLOB_DICT_1, [], [], ['LD_LIBRARY_PATH']), ++ (INCLUDED_CONFIGS_GLOB_DICT_2, [], ['/etc/ld.so.conf.d/custom1.conf', ++ '/etc/ld.so.conf.d/custom2.conf'], []), ++ (INCLUDED_CONFIGS_GLOB_DICT_3, ['/custom/path.lib'], ['/etc/ld.so.conf.d/custom1.conf', ++ '/etc/ld.so.conf.d/custom2.conf' ++ '/custom/path/custom1.conf', ++ '/custom/path/custom2.conf'], []), ++ ]) ++def test_scan_dynamic_linker_configuration(monkeypatch, included_configs_glob_dict, other_lines, ++ custom_configs, used_variables): ++ monkeypatch.setattr(scandynamiclinkerconfiguration, '_parse_main_config', ++ lambda: (included_configs_glob_dict.keys(), other_lines)) ++ monkeypatch.setattr(glob, 'glob', lambda glob: included_configs_glob_dict[glob]) ++ monkeypatch.setattr(scandynamiclinkerconfiguration, '_is_included_config_custom', ++ lambda config: config in custom_configs) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ ++ for var in used_variables: ++ monkeypatch.setenv(var, '/some/path') ++ ++ scandynamiclinkerconfiguration.scan_dynamic_linker_configuration() ++ ++ produce_expected = custom_configs or other_lines or used_variables ++ if not produce_expected: ++ assert not api.produce.called ++ return ++ ++ assert api.produce.called == 1 ++ ++ configuration = api.produce.model_instances[0] ++ ++ all_configs = [] ++ for configs in included_configs_glob_dict.values(): ++ all_configs += configs ++ ++ assert len(all_configs) == len(configuration.included_configs) ++ for config in configuration.included_configs: ++ if config.path in custom_configs: ++ assert config.modified ++ ++ assert configuration.main_config.path == scandynamiclinkerconfiguration.LD_SO_CONF_MAIN ++ if other_lines: ++ assert configuration.main_config.modified ++ assert configuration.main_config.modified_lines == other_lines ++ ++ if used_variables: ++ assert configuration.used_variables == used_variables ++ ++ ++@pytest.mark.parametrize(('config_contents', 'included_config_paths', 'other_lines'), ++ [ ++ (['include ld.so.conf.d/*.conf\n'], ++ ['/etc/ld.so.conf.d/*.conf'], []), ++ (['include ld.so.conf.d/*.conf\n', '\n', '/custom/path.lib\n', '#comment'], ++ ['/etc/ld.so.conf.d/*.conf'], ['/custom/path.lib']), ++ (['include ld.so.conf.d/*.conf\n', 'include /custom/path.conf\n'], ++ ['/etc/ld.so.conf.d/*.conf', '/custom/path.conf'], []), ++ (['include ld.so.conf.d/*.conf\n', '#include /custom/path.conf\n', '#/custom/path.conf\n'], ++ ['/etc/ld.so.conf.d/*.conf'], []), ++ ([' \n'], ++ [], []) ++ ]) ++def test_parse_main_config(monkeypatch, config_contents, included_config_paths, other_lines): ++ def mocked_read_file(path): ++ assert path == scandynamiclinkerconfiguration.LD_SO_CONF_MAIN ++ return config_contents ++ ++ monkeypatch.setattr(scandynamiclinkerconfiguration, '_read_file', mocked_read_file) ++ ++ _included_config_paths, _other_lines = scandynamiclinkerconfiguration._parse_main_config() ++ ++ assert _included_config_paths == included_config_paths ++ assert _other_lines == other_lines ++ ++ ++@pytest.mark.parametrize(('config_path', 'run_result', 'is_modified'), ++ [ ++ ('/etc/ld.so.conf.d/dyninst-x86_64.conf', ++ '.......T. c /etc/ld.so.conf.d/dyninst-x86_64.conf', False), ++ ('/etc/ld.so.conf.d/dyninst-x86_64.conf', ++ 'S.5....T. c /etc/ld.so.conf.d/dyninst-x86_64.conf', True), ++ ('/etc/ld.so.conf.d/kernel-3.10.0-1160.el7.x86_64.conf', ++ '', False) ++ ]) ++def test_is_modified(monkeypatch, config_path, run_result, is_modified): ++ def mocked_run(command, checked): ++ assert config_path in command ++ assert checked is False ++ exit_code = 1 if run_result else 0 ++ return {'stdout': run_result, 'exit_code': exit_code} ++ ++ monkeypatch.setattr(scandynamiclinkerconfiguration, 'run', mocked_run) ++ ++ _is_modified = scandynamiclinkerconfiguration._is_modified(config_path) ++ assert _is_modified == is_modified ++ ++ ++@pytest.mark.parametrize(('config_path', ++ 'config_contents', 'run_result', ++ 'is_installed_rh_signed_package', 'is_modified', 'has_effective_lines'), ++ [ ++ ('/etc/ld.so.conf.d/dyninst-x86_64.conf', ++ ['/usr/lib64/dyninst\n'], 'dyninst', ++ True, False, True), # RH sighend package without modification - Not custom ++ ('/etc/ld.so.conf.d/dyninst-x86_64.conf', ++ ['/usr/lib64/my_dyninst\n'], 'dyninst', ++ True, True, True), # Was modified by user - Custom ++ ('/etc/custom/custom.conf', ++ ['/usr/lib64/custom'], 'custom', ++ False, None, True), # Third-party package - Custom ++ ('/etc/custom/custom.conf', ++ ['#/usr/lib64/custom\n'], 'custom', ++ False, None, False), # Third-party package without effective lines - Not custom ++ ('/etc/ld.so.conf.d/somelib.conf', ++ ['/usr/lib64/somelib\n'], CalledProcessError, ++ None, None, True), # User created configuration file - Custom ++ ('/etc/ld.so.conf.d/somelib.conf', ++ ['#/usr/lib64/somelib\n'], CalledProcessError, ++ None, None, False) # User created configuration file without effective lines - Not custom ++ ]) ++def test_is_included_config_custom(monkeypatch, config_path, config_contents, run_result, ++ is_installed_rh_signed_package, is_modified, has_effective_lines): ++ def mocked_run(command): ++ assert config_path in command ++ if run_result and not isinstance(run_result, str): ++ raise CalledProcessError("message", command, "result") ++ return {'stdout': run_result} ++ ++ def mocked_has_package(model, package_name): ++ assert model is InstalledRedHatSignedRPM ++ assert package_name == run_result ++ return is_installed_rh_signed_package ++ ++ def mocked_read_file(path): ++ assert path == config_path ++ return config_contents ++ ++ monkeypatch.setattr(scandynamiclinkerconfiguration, 'run', mocked_run) ++ monkeypatch.setattr(scandynamiclinkerconfiguration, 'has_package', mocked_has_package) ++ monkeypatch.setattr(scandynamiclinkerconfiguration, '_read_file', mocked_read_file) ++ monkeypatch.setattr(scandynamiclinkerconfiguration, '_is_modified', lambda *_: is_modified) ++ monkeypatch.setattr(os.path, 'isfile', lambda _: True) ++ ++ result = scandynamiclinkerconfiguration._is_included_config_custom(config_path) ++ is_custom = not isinstance(run_result, str) or not is_installed_rh_signed_package or is_modified ++ is_custom &= has_effective_lines ++ assert result == is_custom +diff --git a/repos/system_upgrade/common/models/dynamiclinker.py b/repos/system_upgrade/common/models/dynamiclinker.py +new file mode 100644 +index 00000000..4dc107f4 +--- /dev/null ++++ b/repos/system_upgrade/common/models/dynamiclinker.py +@@ -0,0 +1,41 @@ ++from leapp.models import fields, Model ++from leapp.topics import SystemFactsTopic ++ ++ ++class LDConfigFile(Model): ++ """ ++ Represents a config file related to dynamic linker configuration ++ """ ++ topic = SystemFactsTopic ++ ++ path = fields.String() ++ """ Absolute path to the configuration file """ ++ ++ modified = fields.Boolean() ++ """ If True the file is considered custom and will trigger a report """ ++ ++ ++class MainLDConfigFile(LDConfigFile): ++ """ ++ Represents the main configuration file of the dynamic linker /etc/ld.so.conf ++ """ ++ topic = SystemFactsTopic ++ ++ modified_lines = fields.List(fields.String(), default=[]) ++ """ Lines that are considered custom, generally those that are not includes of other configs """ ++ ++ ++class DynamicLinkerConfiguration(Model): ++ """ ++ Facts about configuration of dynamic linker ++ """ ++ topic = SystemFactsTopic ++ ++ main_config = fields.Model(MainLDConfigFile) ++ """ The main configuration file of dynamic linker (/etc/ld.so.conf) """ ++ ++ included_configs = fields.List(fields.Model(LDConfigFile)) ++ """ All the configs that are included by the main configuration file """ ++ ++ used_variables = fields.List(fields.String(), default=[]) ++ """ Environment variables that are currently used to modify dynamic linker configuration """ +-- +2.41.0 + diff --git a/0039-Fix-several-typos-and-Makefile-help.patch b/0039-Fix-several-typos-and-Makefile-help.patch new file mode 100644 index 0000000..817d880 --- /dev/null +++ b/0039-Fix-several-typos-and-Makefile-help.patch @@ -0,0 +1,60 @@ +From c81731f04c479fd9212458054d9ba21daa8e4780 Mon Sep 17 00:00:00 2001 +From: Jakub Jelen +Date: Mon, 26 Jun 2023 16:29:45 +0200 +Subject: [PATCH 39/41] Fix several typos and Makefile help + +- CheckSystemdServicesTasks: Fix typo in the phase name in comment +- utils: fix typo in comment +- Makefile: Fix example in help to actually work + +Squashed by Petr Stodulka + +Signed-off-by: Jakub Jelen +--- + Makefile | 2 +- + .../common/actors/systemd/checksystemdservicetasks/actor.py | 2 +- + repos/system_upgrade/common/libraries/utils.py | 2 +- + 3 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/Makefile b/Makefile +index e3c40e01..b504a854 100644 +--- a/Makefile ++++ b/Makefile +@@ -155,7 +155,7 @@ help: + @echo " PR=7 SUFFIX='my_additional_suffix' make " + @echo " MR=6 COPR_CONFIG='path/to/the/config/copr/file' make " + @echo " ACTOR= TEST_LIBS=y make test" +- @echo " BUILD_CONTAINER=el7 make build_container" ++ @echo " BUILD_CONTAINER=rhel7 make build_container" + @echo " TEST_CONTAINER=f34 make test_container" + @echo " CONTAINER_TOOL=docker TEST_CONTAINER=rhel7 make test_container_no_lint" + @echo "" +diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py +index 547a13df..272ebc1f 100644 +--- a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py ++++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py +@@ -14,7 +14,7 @@ class CheckSystemdServicesTasks(Actor): + - enabled and disabled. This actor inhibits upgrade in such cases. + + Note: We expect that SystemdServicesTasks could be produced even after the +- TargetTransactionChecksPhase (e.g. during the ApplicationPhase). The ++ TargetTransactionChecksPhase (e.g. during the ApplicationsPhase). The + purpose of this actor is to report collisions in case we can already detect + them. In case of conflicts caused by messages produced later we just log + the collisions and the services will end up disabled. +diff --git a/repos/system_upgrade/common/libraries/utils.py b/repos/system_upgrade/common/libraries/utils.py +index cd3ad1a6..38b9bb1a 100644 +--- a/repos/system_upgrade/common/libraries/utils.py ++++ b/repos/system_upgrade/common/libraries/utils.py +@@ -14,7 +14,7 @@ def parse_config(cfg=None, strict=True): + """ + Applies a workaround to parse a config file using py3 AND py2 + +- ConfigParser has a new def to read strings/iles in Py3, making ++ ConfigParser has a new def to read strings/files in Py3, making + the old ones (Py2) obsoletes, these function was created to use the + ConfigParser on Py2 and Py3 + +-- +2.41.0 + diff --git a/0040-Move-code-handling-GPG-keys-to-separate-library.patch b/0040-Move-code-handling-GPG-keys-to-separate-library.patch new file mode 100644 index 0000000..d80ea9c --- /dev/null +++ b/0040-Move-code-handling-GPG-keys-to-separate-library.patch @@ -0,0 +1,1381 @@ +From 747a9e442fce1886274038341936dfaa3939d352 Mon Sep 17 00:00:00 2001 +From: Jakub Jelen +Date: Tue, 4 Jul 2023 16:14:04 +0200 +Subject: [PATCH 40/41] Move code handling GPG keys to separate library + +This decouples gpg keys handling and some code duplication from the +MissingGpgKeysInhibitor actor to separate library that will be usable +from more actors. + +The new actor TrustedGpgKeysScanner actor is crated, which handles +reading the source RPM DB and trusted keys directory and produces a new +model describing what keys are supposed to be trusted on the target +system. + +This also removes the code duplication for detecting the --no-gpgcheck +and for defining the directory where to look for the gpg keys. + +Petr Stodulka updates: + +* updated docstrings for public functions in the shared library + We want them documented better in comparison to functions in private + (actor's) libraries as they could be used by everyone. + +* some functions are renamed: + * read_gpg_fp_from_file -> get_gpg_fp_from_file + * the_nogpgcheck_option_used -> is_nogpgcheck_set + The related code has been updated. + +* use the gpg library in the shared dnfplugin library + +* make some unit-tests conditional so we know the results are always + valid (skip if distro ID is not rhel or centos) + +* update tests and improve the test coverage + +Signed-off-by: Jakub Jelen +--- + .../actors/missinggpgkeysinhibitor/actor.py | 4 +- + .../libraries/missinggpgkey.py | 153 ++-------------- + .../tests/component_test_missinggpgkey.py | 109 ++++-------- + .../tests/unit_test_missinggpgkey.py | 168 +----------------- + .../libraries/userspacegen.py | 21 +-- + .../actors/trustedgpgkeysscanner/actor.py | 21 +++ + .../libraries/trustedgpgkeys.py | 38 ++++ + .../tests/test_trustedgpgkeys.py | 87 +++++++++ + .../common/libraries/dnfplugin.py | 9 +- + repos/system_upgrade/common/libraries/gpg.py | 137 ++++++++++++++ + .../common/libraries/tests/test_gpg.py | 147 +++++++++++++++ + .../common/models/trustedgpgkeys.py | 19 ++ + 12 files changed, 506 insertions(+), 407 deletions(-) + create mode 100644 repos/system_upgrade/common/actors/trustedgpgkeysscanner/actor.py + create mode 100644 repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py + create mode 100644 repos/system_upgrade/common/actors/trustedgpgkeysscanner/tests/test_trustedgpgkeys.py + create mode 100644 repos/system_upgrade/common/libraries/gpg.py + create mode 100644 repos/system_upgrade/common/libraries/tests/test_gpg.py + create mode 100644 repos/system_upgrade/common/models/trustedgpgkeys.py + +diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/actor.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/actor.py +index 6f836a5b..faa96452 100644 +--- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/actor.py ++++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/actor.py +@@ -2,9 +2,9 @@ from leapp.actors import Actor + from leapp.libraries.actor import missinggpgkey + from leapp.models import ( + DNFWorkaround, +- InstalledRPM, + TargetUserSpaceInfo, + TMPTargetRepositoriesFacts, ++ TrustedGpgKeys, + UsedTargetRepositories + ) + from leapp.reporting import Report +@@ -28,7 +28,7 @@ class MissingGpgKeysInhibitor(Actor): + + name = 'missing_gpg_keys_inhibitor' + consumes = ( +- InstalledRPM, ++ TrustedGpgKeys, + TMPTargetRepositoriesFacts, + TargetUserSpaceInfo, + UsedTargetRepositories, +diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py +index 1880986d..9a806ca2 100644 +--- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py ++++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py +@@ -8,113 +8,21 @@ from six.moves import urllib + + from leapp import reporting + from leapp.exceptions import StopActorExecutionError +-from leapp.libraries.common import config +-from leapp.libraries.common.config.version import get_source_major_version, get_target_major_version +-from leapp.libraries.stdlib import api, run ++from leapp.libraries.common.config.version import get_target_major_version ++from leapp.libraries.common.gpg import get_gpg_fp_from_file, get_path_to_gpg_certs, is_nogpgcheck_set ++from leapp.libraries.stdlib import api + from leapp.models import ( + DNFWorkaround, +- InstalledRPM, + TargetUserSpaceInfo, + TMPTargetRepositoriesFacts, ++ TrustedGpgKeys, + UsedTargetRepositories + ) + from leapp.utils.deprecation import suppress_deprecation + +-GPG_CERTS_FOLDER = 'rpm-gpg' + FMT_LIST_SEPARATOR = '\n - ' + + +-def _gpg_show_keys(key_path): +- """ +- Show keys in given file in version-agnostic manner +- +- This runs gpg --show-keys (EL8) or gpg --with-fingerprints (EL7) +- to verify the given file exists, is readable and contains valid +- OpenPGP key data, which is printed in parsable format (--with-colons). +- """ +- try: +- cmd = ['gpg2'] +- # RHEL7 gnupg requires different switches to get the same output +- if get_source_major_version() == '7': +- cmd.append('--with-fingerprint') +- else: +- cmd.append('--show-keys') +- cmd += ['--with-colons', key_path] +- # TODO: discussed, most likely the checked=False will be dropped +- # and error will be handled in other functions +- return run(cmd, split=True, checked=False) +- except OSError as err: +- # NOTE: this is hypothetic; gnupg2 has to be installed on RHEL 7+ +- error = 'Failed to read fingerprint from GPG key {}: {}'.format(key_path, str(err)) +- api.current_logger().error(error) +- return {} +- +- +-def _parse_fp_from_gpg(output): +- """ +- Parse the output of gpg --show-keys --with-colons. +- +- Return list of 8 characters fingerprints per each gpgkey for the given +- output from stdlib.run() or None if some error occurred. Either the +- command return non-zero exit code, the file does not exists, its not +- readable or does not contain any openpgp data. +- """ +- if not output or output['exit_code']: +- return [] +- +- # we are interested in the lines of the output starting with "pub:" +- # the colons are used for separating the fields in output like this +- # pub:-:4096:1:999F7CBF38AB71F4:1612983048:::-:::escESC::::::23::0: +- # ^--------------^ this is the fingerprint we need +- # ^------^ but RPM version is just the last 8 chars lowercase +- # Also multiple gpg keys can be stored in the file, so go through all "pub" +- # lines +- gpg_fps = [] +- for line in output['stdout']: +- if not line or not line.startswith('pub:'): +- continue +- parts = line.split(':') +- if len(parts) >= 4 and len(parts[4]) == 16: +- gpg_fps.append(parts[4][8:].lower()) +- else: +- api.current_logger().warning( +- 'Cannot parse the gpg2 output. Line: "{}"' +- .format(line) +- ) +- +- return gpg_fps +- +- +-def _read_gpg_fp_from_file(key_path): +- """ +- Returns the list of public key fingerprints from the given file +- +- Logs warning in case no OpenPGP data found in the given file or it is not +- readable for some reason. +- """ +- res = _gpg_show_keys(key_path) +- fp = _parse_fp_from_gpg(res) +- if not fp: +- error = 'Unable to read OpenPGP keys from {}: {}'.format(key_path, res['stderr']) +- api.current_logger().error(error) +- return fp +- +- +-def _get_path_to_gpg_certs(): +- """ +- Get path to the directory with trusted target gpg keys in leapp tree +- """ +- # XXX This is copy&paste from TargetUserspaceCreator actor. +- # Potential changes need to happen in both places to keep them in sync. +- target_major_version = get_target_major_version() +- target_product_type = config.get_product_type('target') +- certs_dir = target_major_version +- # only beta is special in regards to the GPG signing keys +- if target_product_type == 'beta': +- certs_dir = '{}beta'.format(target_major_version) +- return os.path.join(api.get_common_folder_path(GPG_CERTS_FOLDER), certs_dir) +- +- + def _expand_vars(path): + """ + Expand variables like $releasever and $basearch to the target system version +@@ -152,38 +60,6 @@ def _get_abs_file_path(target_userspace, file_url): + return os.path.join('/', file_path) + + +-def _pubkeys_from_rpms(installed_rpms): +- """ +- Return the list of fingerprints of GPG keys in RPM DB +- +- This function returns short 8 characters fingerprints of trusted GPG keys +- "installed" in the source OS RPM database. These look like normal packages +- named "gpg-pubkey" and the fingerprint is present in the version field. +- """ +- return [pkg.version for pkg in installed_rpms.items if pkg.name == 'gpg-pubkey'] +- +- +-def _get_pubkeys(installed_rpms): +- """ +- Get pubkeys from installed rpms and the trusted directory +- """ +- pubkeys = _pubkeys_from_rpms(installed_rpms) +- certs_path = _get_path_to_gpg_certs() +- for certname in os.listdir(certs_path): +- key_file = os.path.join(certs_path, certname) +- fps = _read_gpg_fp_from_file(key_file) +- if fps: +- pubkeys += fps +- # TODO: what about else: ? +- # The warning is now logged in _read_gpg_fp_from_file. We can raise +- # the priority of the message or convert it to report though. +- return pubkeys +- +- +-def _the_nogpgcheck_option_used(): +- return config.get_env('LEAPP_NOGPGCHECK', False) == '1' +- +- + def _consume_data(): + try: + used_target_repos = next(api.consume(UsedTargetRepositories)).repos +@@ -199,10 +75,10 @@ def _consume_data(): + 'Could not check for valid GPG keys', details={'details': 'No TMPTargetRepositoriesFacts facts'} + ) + try: +- installed_rpms = next(api.consume(InstalledRPM)) ++ trusted_gpg_keys = next(api.consume(TrustedGpgKeys)) + except StopIteration: + raise StopActorExecutionError( +- 'Could not check for valid GPG keys', details={'details': 'No InstalledRPM facts'} ++ 'Could not check for valid GPG keys', details={'details': 'No TrustedGpgKeys facts'} + ) + try: + target_userspace = next(api.consume(TargetUserSpaceInfo)) +@@ -211,7 +87,7 @@ def _consume_data(): + 'Could not check for valid GPG keys', details={'details': 'No TargetUserSpaceInfo facts'} + ) + +- return used_target_repos, target_repos, installed_rpms, target_userspace ++ return used_target_repos, target_repos, trusted_gpg_keys, target_userspace + + + def _get_repo_gpgkey_urls(repo): +@@ -274,7 +150,7 @@ def _report(title, summary, keys, inhibitor=False): + ' prior the upgrade.' + ' If you want to proceed the in-place upgrade without checking any RPM' + ' signatures, execute leapp with the `--nogpgcheck` option.' +- .format(_get_path_to_gpg_certs()) ++ .format(get_path_to_gpg_certs()) + ) + groups = [reporting.Groups.REPOSITORY] + if inhibitor: +@@ -306,7 +182,7 @@ def _report_missing_keys(keys): + summary = ( + 'Some of the target repositories require GPG keys that are not installed' + ' in the current RPM DB or are not stored in the {trust_dir} directory.' +- .format(trust_dir=_get_path_to_gpg_certs()) ++ .format(trust_dir=get_path_to_gpg_certs()) + ) + _report('Detected unknown GPG keys for target system repositories', summary, keys, True) + +@@ -383,7 +259,7 @@ def register_dnfworkaround(): + api.produce(DNFWorkaround( + display_name='import trusted gpg keys to RPM DB', + script_path=api.current_actor().get_common_tool_path('importrpmgpgkeys'), +- script_args=[_get_path_to_gpg_certs()], ++ script_args=[get_path_to_gpg_certs()], + )) + + +@@ -396,11 +272,11 @@ def process(): + them from model TMPTargetRepositoriesFacts. + """ + # when the user decided to ignore gpg signatures on the packages, we can ignore these checks altogether +- if _the_nogpgcheck_option_used(): ++ if is_nogpgcheck_set(): + api.current_logger().warning('The --nogpgcheck option is used: skipping all related checks.') + return + +- used_target_repos, target_repos, installed_rpms, target_userspace = _consume_data() ++ used_target_repos, target_repos, trusted_gpg_keys, target_userspace = _consume_data() + + target_repo_id_to_repositories_facts_map = { + repo.repoid: repo +@@ -415,8 +291,7 @@ def process(): + invalid_keys = list() + repos_missing_keys = list() + +- # These are used only for getting the installed gpg-pubkey "packages" +- pubkeys = _get_pubkeys(installed_rpms) ++ pubkeys = [key.fingerprint for key in trusted_gpg_keys.items] + processed_gpgkey_urls = set() + tmpdir = None + for repoid in used_target_repos: +@@ -454,7 +329,7 @@ def process(): + api.current_logger().error( + 'Skipping unknown protocol for gpgkey {}'.format(gpgkey_url)) + continue +- fps = _read_gpg_fp_from_file(key_file) ++ fps = get_gpg_fp_from_file(key_file) + if not fps: + invalid_keys.append(gpgkey_url) + api.current_logger().warning( +diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/component_test_missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/component_test_missinggpgkey.py +index 7da13cec..6d3fa0b2 100644 +--- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/component_test_missinggpgkey.py ++++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/component_test_missinggpgkey.py +@@ -3,12 +3,13 @@ from six.moves.urllib.error import URLError + + from leapp import reporting + from leapp.exceptions import StopActorExecutionError +-from leapp.libraries.actor.missinggpgkey import _pubkeys_from_rpms, process ++from leapp.libraries.actor.missinggpgkey import process ++from leapp.libraries.common.gpg import get_pubkeys_from_rpms + from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, logger_mocked, produce_mocked + from leapp.libraries.stdlib import api + from leapp.models import ( + DNFWorkaround, +- InstalledRPM, ++ GpgKey, + Report, + RepositoriesFacts, + RepositoryData, +@@ -16,6 +17,7 @@ from leapp.models import ( + RPM, + TargetUserSpaceInfo, + TMPTargetRepositoriesFacts, ++ TrustedGpgKeys, + UsedTargetRepositories, + UsedTargetRepository + ) +@@ -26,59 +28,21 @@ from leapp.utils.deprecation import suppress_deprecation + # whole process as I was initially advised not to use these component tests. + + +-def _get_test_installedrpm_no_my_key(): ++def _get_test_gpgkeys_missing(): + """ +- Valid RPM packages missing the key we are looking for (epel9) ++ Return list of Trusted GPG keys without the epel9 key we look for + """ + return [ +- RPM( +- name='rpm', +- version='4.16.1.3', +- release='17.el9', +- epoch='0', +- packager='Red Hat, Inc. ', +- arch='x86_64', +- pgpsig='RSA/SHA256, Mon 08 Aug 2022 09:10:15 AM UTC, Key ID 199e2f91fd431d51', +- repository='BaseOS', +- ), +- RPM( +- name='gpg-pubkey', +- version='fd431d51', +- release='4ae0493b', +- epoch='0', +- packager='Red Hat, Inc. (release key 2) ', +- arch='noarch', +- pgpsig='' +- ), +- RPM( +- name='gpg-pubkey', +- version='5a6340b3', +- release='6229229e', +- epoch='0', +- packager='Red Hat, Inc. (auxiliary key 3) ', +- arch='noarch', +- pgpsig='' +- ), ++ GpgKey(fingerprint='fd431d51', rpmdb=True), ++ GpgKey(fingerprint='5a6340b3', rpmdb=True), + ] + + +-def _get_test_installedrpm(): ++def _get_test_gpgkeys(): + """ +- All test RPMS packages ++ Return all the Trusted GPG keys for a test + """ +- return InstalledRPM( +- items=[ +- RPM( +- name='gpg-pubkey', +- version='3228467c', +- release='613798eb', +- epoch='0', +- packager='Fedora (epel9) ', +- arch='noarch', +- pgpsig='' +- ), +- ] + _get_test_installedrpm_no_my_key(), +- ) ++ return TrustedGpgKeys(items=[GpgKey(fingerprint='3228467c', rpmdb=True)] + _get_test_gpgkeys_missing()) + + + def _get_test_targuserspaceinfo(path='/'): +@@ -189,7 +153,7 @@ def test_perform_nogpgcheck(monkeypatch): + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked( + envars={'LEAPP_NOGPGCHECK': '1'}, + msgs=[ +- _get_test_installedrpm(), ++ _get_test_gpgkeys(), + _get_test_usedtargetrepositories(), + _get_test_tmptargetrepositoriesfacts(), + ], +@@ -206,13 +170,13 @@ def test_perform_nogpgcheck(monkeypatch): + + @pytest.mark.parametrize('msgs', [ + [], +- [_get_test_installedrpm], ++ [_get_test_gpgkeys], + [_get_test_usedtargetrepositories], + [_get_test_tmptargetrepositoriesfacts], + # These are just incomplete lists of required facts +- [_get_test_installedrpm(), _get_test_usedtargetrepositories()], ++ [_get_test_gpgkeys(), _get_test_usedtargetrepositories()], + [_get_test_usedtargetrepositories(), _get_test_tmptargetrepositoriesfacts()], +- [_get_test_installedrpm(), _get_test_tmptargetrepositoriesfacts()], ++ [_get_test_gpgkeys(), _get_test_tmptargetrepositoriesfacts()], + ]) + def test_perform_missing_facts(monkeypatch, msgs): + """ +@@ -238,7 +202,7 @@ def test_perform_missing_facts(monkeypatch, msgs): + @suppress_deprecation(TMPTargetRepositoriesFacts) + def _get_test_tmptargetrepositoriesfacts_partial(): + return [ +- _get_test_installedrpm(), ++ _get_test_gpgkeys(), + _get_test_usedtargetrepositories(), + TMPTargetRepositoriesFacts( + repositories=[ +@@ -298,7 +262,7 @@ def _get_pubkeys_mocked(installed_rpms): + """ + This skips getting fps from files in container for simplification + """ +- return _pubkeys_from_rpms(installed_rpms) ++ return get_pubkeys_from_rpms(installed_rpms) + + + def test_perform_missing_some_repo_facts(monkeypatch): +@@ -314,7 +278,7 @@ def test_perform_missing_some_repo_facts(monkeypatch): + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) ++ monkeypatch.setattr('leapp.libraries.common.gpg._gpg_show_keys', _gpg_show_keys_mocked) + + with pytest.raises(StopActorExecutionError): + process() +@@ -326,7 +290,7 @@ def test_perform_missing_some_repo_facts(monkeypatch): + def _get_test_tmptargetrepositoriesfacts_https_unused(): + return [ + _get_test_targuserspaceinfo(), +- _get_test_installedrpm(), ++ _get_test_gpgkeys(), + _get_test_usedtargetrepositories(), + TMPTargetRepositoriesFacts( + repositories=[ +@@ -362,8 +326,7 @@ def test_perform_https_gpgkey_unused(monkeypatch): + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ monkeypatch.setattr('leapp.libraries.common.gpg._gpg_show_keys', _gpg_show_keys_mocked) + + process() + assert not api.current_logger.warnmsg +@@ -376,7 +339,7 @@ def test_perform_https_gpgkey_unused(monkeypatch): + def get_test_tmptargetrepositoriesfacts_https(): + return ( + _get_test_targuserspaceinfo(), +- _get_test_installedrpm(), ++ _get_test_gpgkeys(), + UsedTargetRepositories( + repos=_get_test_usedtargetrepositories_list() + [ + UsedTargetRepository( +@@ -409,7 +372,7 @@ def get_test_tmptargetrepositoriesfacts_https(): + def get_test_tmptargetrepositoriesfacts_ftp(): + return ( + _get_test_targuserspaceinfo(), +- _get_test_installedrpm(), ++ _get_test_gpgkeys(), + UsedTargetRepositories( + repos=_get_test_usedtargetrepositories_list() + [ + UsedTargetRepository( +@@ -454,8 +417,7 @@ def test_perform_https_gpgkey(monkeypatch): + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ monkeypatch.setattr('leapp.libraries.common.gpg._gpg_show_keys', _gpg_show_keys_mocked) + monkeypatch.setattr('six.moves.urllib.request.urlretrieve', _urlretrive_mocked) + + process() +@@ -482,8 +444,7 @@ def test_perform_https_gpgkey_urlerror(monkeypatch): + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ monkeypatch.setattr('leapp.libraries.common.gpg._gpg_show_keys', _gpg_show_keys_mocked) + monkeypatch.setattr('six.moves.urllib.request.urlretrieve', _urlretrive_mocked_urlerror) + + process() +@@ -508,8 +469,7 @@ def test_perform_ftp_gpgkey(monkeypatch): + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ monkeypatch.setattr('leapp.libraries.common.gpg._gpg_show_keys', _gpg_show_keys_mocked) + + process() + assert len(api.current_logger.errmsg) == 1 +@@ -525,7 +485,7 @@ def test_perform_ftp_gpgkey(monkeypatch): + def get_test_data_missing_key(): + return [ + _get_test_targuserspaceinfo(), +- InstalledRPM(items=_get_test_installedrpm_no_my_key()), ++ TrustedGpgKeys(items=_get_test_gpgkeys_missing()), + _get_test_usedtargetrepositories(), + _get_test_tmptargetrepositoriesfacts(), + ] +@@ -543,8 +503,7 @@ def test_perform_report(monkeypatch): + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ monkeypatch.setattr('leapp.libraries.common.gpg._gpg_show_keys', _gpg_show_keys_mocked) + + process() + assert not api.current_logger.warnmsg +@@ -559,7 +518,7 @@ def test_perform_report(monkeypatch): + def get_test_data_no_gpg_data(): + return [ + _get_test_targuserspaceinfo(), +- _get_test_installedrpm(), ++ _get_test_gpgkeys(), + _get_test_usedtargetrepositories(), + _get_test_tmptargetrepositoriesfacts(), + ] +@@ -593,12 +552,11 @@ def test_perform_invalid_key(monkeypatch): + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked_my_empty) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ monkeypatch.setattr('leapp.libraries.common.gpg._gpg_show_keys', _gpg_show_keys_mocked_my_empty) + + process() +- assert len(api.current_logger.warnmsg) == 1 +- assert 'Cannot get any gpg key from the file' in api.current_logger.warnmsg[0] ++ assert len(api.current_logger.warnmsg) == 2, api.current_logger.warnmsg ++ assert 'Cannot get any gpg key from the file' in api.current_logger.warnmsg[1] + assert api.produce.called == 1 + assert isinstance(api.produce.model_instances[0], DNFWorkaround) + assert reporting.create_report.called == 1 +@@ -610,7 +568,7 @@ def test_perform_invalid_key(monkeypatch): + def get_test_data_gpgcheck_without_gpgkey(): + return [ + _get_test_targuserspaceinfo(), +- _get_test_installedrpm(), ++ _get_test_gpgkeys(), + UsedTargetRepositories( + repos=_get_test_usedtargetrepositories_list() + [ + UsedTargetRepository( +@@ -651,8 +609,7 @@ def test_perform_gpgcheck_without_gpgkey(monkeypatch): + monkeypatch.setattr(api, 'produce', produce_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) +- monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ monkeypatch.setattr('leapp.libraries.common.gpg._gpg_show_keys', _gpg_show_keys_mocked) + + process() + assert len(api.current_logger.warnmsg) == 1 +diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/unit_test_missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/unit_test_missinggpgkey.py +index 68e4cdfe..8cd00531 100644 +--- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/unit_test_missinggpgkey.py ++++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/unit_test_missinggpgkey.py +@@ -6,134 +6,12 @@ import tempfile + import distro + import pytest + +-from leapp.libraries.actor.missinggpgkey import ( +- _expand_vars, +- _get_abs_file_path, +- _get_path_to_gpg_certs, +- _get_pubkeys, +- _get_repo_gpgkey_urls, +- _gpg_show_keys, +- _parse_fp_from_gpg, +- _pubkeys_from_rpms +-) ++from leapp.libraries.actor.missinggpgkey import _expand_vars, _get_abs_file_path, _get_repo_gpgkey_urls + from leapp.libraries.common.testutils import CurrentActorMocked + from leapp.libraries.stdlib import api + from leapp.models import InstalledRPM, RepositoryData, RPM, TargetUserSpaceInfo + + +-def is_rhel7(): +- return int(distro.major_version()) < 8 +- +- +-def test_gpg_show_keys(current_actor_context, monkeypatch): +- src = '7.9' if is_rhel7() else '8.6' +- current_actor = CurrentActorMocked(src_ver=src) +- monkeypatch.setattr(api, 'current_actor', current_actor) +- +- # python2 compatibility :/ +- dirpath = tempfile.mkdtemp() +- +- # using GNUPGHOME env should avoid gnupg modifying the system +- os.environ['GNUPGHOME'] = dirpath +- +- try: +- # non-existing file +- non_existent_path = os.path.join(dirpath, 'nonexistent') +- res = _gpg_show_keys(non_existent_path) +- if is_rhel7(): +- err_msg = "gpg: can't open `{}'".format(non_existent_path) +- else: +- err_msg = "gpg: can't open '{}': No such file or directory\n".format(non_existent_path) +- assert not res['stdout'] +- assert err_msg in res['stderr'] +- assert res['exit_code'] == 2 +- +- fp = _parse_fp_from_gpg(res) +- assert fp == [] +- +- # no gpg data found +- no_key_path = os.path.join(dirpath, "no_key") +- with open(no_key_path, "w") as f: +- f.write('test') +- +- res = _gpg_show_keys(no_key_path) +- if is_rhel7(): +- err_msg = ('gpg: no valid OpenPGP data found.\n' +- 'gpg: processing message failed: Unknown system error\n') +- else: +- err_msg = 'gpg: no valid OpenPGP data found.\n' +- assert not res['stdout'] +- assert res['stderr'] == err_msg +- assert res['exit_code'] == 2 +- +- fp = _parse_fp_from_gpg(res) +- assert fp == [] +- +- # with some test data now -- rhel9 release key +- # rhel9_key_path = os.path.join(api.get_common_folder_path('rpm-gpg'), '9') +- cur_dir = os.path.dirname(os.path.abspath(__file__)) +- rhel9_key_path = os.path.join(cur_dir, '..', '..', '..', 'files', 'rpm-gpg', '9', +- 'RPM-GPG-KEY-redhat-release') +- res = _gpg_show_keys(rhel9_key_path) +- finally: +- shutil.rmtree(dirpath) +- +- if is_rhel7(): +- assert len(res['stdout']) == 4 +- assert res['stdout'][0] == ('pub:-:4096:1:199E2F91FD431D51:1256212795:::-:' +- 'Red Hat, Inc. (release key 2) :') +- assert res['stdout'][1] == 'fpr:::::::::567E347AD0044ADE55BA8A5F199E2F91FD431D51:' +- assert res['stdout'][2] == ('pub:-:4096:1:5054E4A45A6340B3:1646863006:::-:' +- 'Red Hat, Inc. (auxiliary key 3) :') +- assert res['stdout'][3] == 'fpr:::::::::7E4624258C406535D56D6F135054E4A45A6340B3:' +- else: +- assert len(res['stdout']) == 6 +- assert res['stdout'][0] == 'pub:-:4096:1:199E2F91FD431D51:1256212795:::-:::scSC::::::23::0:' +- assert res['stdout'][1] == 'fpr:::::::::567E347AD0044ADE55BA8A5F199E2F91FD431D51:' +- assert res['stdout'][2] == ('uid:-::::1256212795::DC1CAEC7997B3575101BB0FCAAC6191792660D8F::' +- 'Red Hat, Inc. (release key 2) ::::::::::0:') +- assert res['stdout'][3] == 'pub:-:4096:1:5054E4A45A6340B3:1646863006:::-:::scSC::::::23::0:' +- assert res['stdout'][4] == 'fpr:::::::::7E4624258C406535D56D6F135054E4A45A6340B3:' +- assert res['stdout'][5] == ('uid:-::::1646863006::DA7F68E3872D6E7BDCE05225E7EB5F3ACDD9699F::' +- 'Red Hat, Inc. (auxiliary key 3) ::::::::::0:') +- +- err = '{}/trustdb.gpg: trustdb created'.format(dirpath) +- assert err in res['stderr'] +- assert res['exit_code'] == 0 +- +- # now, parse the output too +- fp = _parse_fp_from_gpg(res) +- assert fp == ['fd431d51', '5a6340b3'] +- +- +-@pytest.mark.parametrize('res, exp', [ +- ({'exit_code': 2, 'stdout': '', 'stderr': ''}, []), +- ({'exit_code': 2, 'stdout': '', 'stderr': 'bash: gpg2: command not found...'}, []), +- ({'exit_code': 0, 'stdout': 'Some other output', 'stderr': ''}, []), +- ({'exit_code': 0, 'stdout': ['Some other output', 'other line'], 'stderr': ''}, []), +- ({'exit_code': 0, 'stdout': ['pub:-:4096:1:199E2F91FD431D:'], 'stderr': ''}, []), +- ({'exit_code': 0, 'stdout': ['pub:-:4096:1:5054E4A45A6340B3:1..'], 'stderr': ''}, ['5a6340b3']), +-]) +-def test_parse_fp_from_gpg(res, exp): +- fp = _parse_fp_from_gpg(res) +- assert fp == exp +- +- +-@pytest.mark.parametrize('target, product_type, exp', [ +- ('8.6', 'beta', '../../files/rpm-gpg/8beta'), +- ('8.8', 'htb', '../../files/rpm-gpg/8'), +- ('9.0', 'beta', '../../files/rpm-gpg/9beta'), +- ('9.2', 'ga', '../../files/rpm-gpg/9'), +-]) +-def test_get_path_to_gpg_certs(current_actor_context, monkeypatch, target, product_type, exp): +- current_actor = CurrentActorMocked(dst_ver=target, +- envars={'LEAPP_DEVEL_TARGET_PRODUCT_TYPE': product_type}) +- monkeypatch.setattr(api, 'current_actor', current_actor) +- +- p = _get_path_to_gpg_certs() +- assert p == exp +- +- + @pytest.mark.parametrize('data, exp', [ + ('bare string', 'bare string'), + ('with dollar$$$', 'with dollar$$$'), +@@ -148,50 +26,6 @@ def test_expand_vars(monkeypatch, data, exp): + assert res == exp + + +-def _get_test_installed_rmps(): +- return InstalledRPM( +- items=[ +- RPM(name='gpg-pubkey', +- version='9570ff31', +- release='5e3006fb', +- epoch='0', +- packager='Fedora (33) ', +- arch='noarch', +- pgpsig=''), +- RPM(name='rpm', +- version='4.17.1', +- release='3.fc35', +- epoch='0', +- packager='Fedora Project', +- arch='x86_64', +- pgpsig='RSA/SHA256, Tue 02 Aug 2022 03:12:43 PM CEST, Key ID db4639719867c58f'), +- ], +- ) +- +- +-def test_pubkeys_from_rpms(): +- installed_rpm = _get_test_installed_rmps() +- assert _pubkeys_from_rpms(installed_rpm) == ['9570ff31'] +- +- +-# @pytest.mark.parametrize('target, product_type, exp', [ +-# ('8.6', 'beta', ['F21541EB']), +-# ('8.8', 'htb', ['FD431D51', 'D4082792']), # ga +-# ('9.0', 'beta', ['F21541EB']), +-# ('9.2', 'ga', ['FD431D51', '5A6340B3']), +-# ]) +-# Def test_get_pubkeys(current_actor_context, monkeypatch, target, product_type, exp): +-# current_actor = CurrentActorMocked(dst_ver=target, +-# envars={'LEAPP_DEVEL_TARGET_PRODUCT_TYPE': product_type}) +-# monkeypatch.setattr(api, 'current_actor', current_actor) +-# installed_rpm = _get_test_installed_rmps() +-# +-# p = _get_pubkeys(installed_rpm) +-# assert '9570ff31' in p +-# for x in exp: +-# assert x in p +- +- + @pytest.mark.parametrize('repo, exp', [ + (RepositoryData(repoid='dummy', name='name'), None), + (RepositoryData(repoid='dummy', name='name', additional_fields='{}'), None), +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index e015a741..d605ba0e 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -9,6 +9,7 @@ from leapp.libraries.actor import constants + from leapp.libraries.common import dnfplugin, mounting, overlaygen, repofileutils, rhsm, utils + from leapp.libraries.common.config import get_env, get_product_type + from leapp.libraries.common.config.version import get_target_major_version ++from leapp.libraries.common.gpg import get_path_to_gpg_certs, is_nogpgcheck_set + from leapp.libraries.stdlib import api, CalledProcessError, config, run + from leapp.models import RequiredTargetUserspacePackages # deprecated + from leapp.models import TMPTargetRepositoriesFacts # deprecated all the time +@@ -54,7 +55,6 @@ from leapp.utils.deprecation import suppress_deprecation + # Issue: #486 + + PROD_CERTS_FOLDER = 'prod-certs' +-GPG_CERTS_FOLDER = 'rpm-gpg' + PERSISTENT_PACKAGE_CACHE_DIR = '/var/lib/leapp/persistent_package_cache' + DEDICATED_LEAPP_PART_URL = 'https://access.redhat.com/solutions/7011704' + +@@ -143,21 +143,8 @@ def _backup_to_persistent_package_cache(userspace_dir): + shutil.move(src_cache, PERSISTENT_PACKAGE_CACHE_DIR) + + +-def _the_nogpgcheck_option_used(): +- return get_env('LEAPP_NOGPGCHECK', False) == '1' +- +- +-def _get_path_to_gpg_certs(target_major_version): +- target_product_type = get_product_type('target') +- certs_dir = target_major_version +- # only beta is special in regards to the GPG signing keys +- if target_product_type == 'beta': +- certs_dir = '{}beta'.format(target_major_version) +- return os.path.join(api.get_common_folder_path(GPG_CERTS_FOLDER), certs_dir) +- +- + def _import_gpg_keys(context, install_root_dir, target_major_version): +- certs_path = _get_path_to_gpg_certs(target_major_version) ++ certs_path = get_path_to_gpg_certs() + # Import the RHEL X+1 GPG key to be able to verify the installation of initial packages + try: + # Import also any other keys provided by the customer in the same directory +@@ -234,13 +221,13 @@ def prepare_target_userspace(context, userspace_dir, enabled_repos, packages): + install_root_dir = '/el{}target'.format(target_major_version) + with mounting.BindMount(source=userspace_dir, target=os.path.join(context.base_dir, install_root_dir.lstrip('/'))): + _restore_persistent_package_cache(userspace_dir) +- if not _the_nogpgcheck_option_used(): ++ if not is_nogpgcheck_set(): + _import_gpg_keys(context, install_root_dir, target_major_version) + + repos_opt = [['--enablerepo', repo] for repo in enabled_repos] + repos_opt = list(itertools.chain(*repos_opt)) + cmd = ['dnf', 'install', '-y'] +- if _the_nogpgcheck_option_used(): ++ if is_nogpgcheck_set(): + cmd.append('--nogpgcheck') + cmd += [ + '--setopt=module_platform_id=platform:el{}'.format(target_major_version), +diff --git a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/actor.py b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/actor.py +new file mode 100644 +index 00000000..46e8f9ec +--- /dev/null ++++ b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/actor.py +@@ -0,0 +1,21 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import trustedgpgkeys ++from leapp.models import InstalledRPM, TrustedGpgKeys ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class TrustedGpgKeysScanner(Actor): ++ """ ++ Scan for trusted GPG keys. ++ ++ These include keys readily available in the source RPM DB, keys for N+1 ++ Red Hat release and custom keys stored in the trusted directory. ++ """ ++ ++ name = 'trusted_gpg_keys_scanner' ++ consumes = (InstalledRPM,) ++ produces = (TrustedGpgKeys,) ++ tags = (IPUWorkflowTag, FactsPhaseTag) ++ ++ def process(self): ++ trustedgpgkeys.process() +diff --git a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py +new file mode 100644 +index 00000000..6377f767 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/libraries/trustedgpgkeys.py +@@ -0,0 +1,38 @@ ++import os ++ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.common.gpg import get_gpg_fp_from_file, get_path_to_gpg_certs, get_pubkeys_from_rpms ++from leapp.libraries.stdlib import api ++from leapp.models import GpgKey, InstalledRPM, TrustedGpgKeys ++ ++ ++def _get_pubkeys(installed_rpms): ++ """ ++ Get pubkeys from installed rpms and the trusted directory ++ """ ++ pubkeys = get_pubkeys_from_rpms(installed_rpms) ++ db_pubkeys = [key.fingerprint for key in pubkeys] ++ certs_path = get_path_to_gpg_certs() ++ for certname in os.listdir(certs_path): ++ key_file = os.path.join(certs_path, certname) ++ fps = get_gpg_fp_from_file(key_file) ++ for fp in fps: ++ if fp not in db_pubkeys: ++ pubkeys.append(GpgKey(fingerprint=fp, rpmdb=False, filename=key_file)) ++ db_pubkeys += fp ++ return pubkeys ++ ++ ++def process(): ++ """ ++ Process keys in RPM DB and the ones in trusted directory to produce a list of trusted keys ++ """ ++ ++ try: ++ installed_rpms = next(api.consume(InstalledRPM)) ++ except StopIteration: ++ raise StopActorExecutionError( ++ 'Could not check for valid GPG keys', details={'details': 'No InstalledRPM facts'} ++ ) ++ pubkeys = _get_pubkeys(installed_rpms) ++ api.produce(TrustedGpgKeys(items=pubkeys)) +diff --git a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/tests/test_trustedgpgkeys.py b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/tests/test_trustedgpgkeys.py +new file mode 100644 +index 00000000..0d98aad7 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/tests/test_trustedgpgkeys.py +@@ -0,0 +1,87 @@ ++import os ++ ++from leapp import reporting ++from leapp.libraries.actor import trustedgpgkeys ++from leapp.libraries.common.gpg import get_pubkeys_from_rpms ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, logger_mocked, produce_mocked ++from leapp.libraries.stdlib import api ++from leapp.models import GpgKey, InstalledRPM, RPM, TrustedGpgKeys ++ ++ ++def _get_test_installed_rmps(fps): ++ # adding at least one rpm that is not gpg-pubkey ++ rpms = [RPM( ++ name='rpm', ++ version='4.17.1', ++ release='3.fc35', ++ epoch='0', ++ packager='Fedora Project', ++ arch='x86_64', ++ pgpsig='RSA/SHA256, Tue 02 Aug 2022 03:12:43 PM CEST, Key ID db4639719867c58f' ++ )] ++ for fp in fps: ++ rpms.append(RPM( ++ name='gpg-pubkey', ++ version=fp, ++ release='5e3006fb', ++ epoch='0', ++ packager='Fedora (33) ', ++ arch='noarch', ++ pgpsig='' ++ )) ++ return InstalledRPM(items=rpms) ++ ++ ++class MockedGetGpgFromFile(object): ++ def __init__(self, file_fps_tuples): ++ # e.g. file_fps_tuple = [('/mydir/myfile', ['0000ff31', '0000ff32'])] ++ self._data = {} ++ for fname, fps in file_fps_tuples: ++ self._data[fname] = fps ++ ++ def get_files(self): ++ return self._data.keys() # noqa: W1655; pylint: disable=dict-keys-not-iterating ++ ++ def __call__(self, fname): ++ return self._data.get(fname, []) ++ ++ ++def test_get_pubkeys(monkeypatch): ++ """ ++ Very basic test of _get_pubkeys function ++ """ ++ rpm_fps = ['9570ff31', '99900000'] ++ file_fps = ['0000ff31', '0000ff32'] ++ installed_rpms = _get_test_installed_rmps(rpm_fps) ++ mocked_gpg_files = MockedGetGpgFromFile([('/mydir/myfile', ['0000ff31', '0000ff32'])]) ++ ++ def _mocked_listdir(dummy): ++ return [os.path.basename(i) for i in mocked_gpg_files.get_files()] ++ ++ monkeypatch.setattr(trustedgpgkeys.os, 'listdir', _mocked_listdir) ++ monkeypatch.setattr(trustedgpgkeys, 'get_path_to_gpg_certs', lambda: '/mydir/') ++ monkeypatch.setattr(trustedgpgkeys, 'get_gpg_fp_from_file', mocked_gpg_files) ++ ++ pubkeys = trustedgpgkeys._get_pubkeys(installed_rpms) ++ assert len(pubkeys) == len(rpm_fps + file_fps) ++ assert set(rpm_fps) == {pkey.fingerprint for pkey in pubkeys if pkey.rpmdb} ++ assert set(file_fps) == {pkey.fingerprint for pkey in pubkeys if not pkey.rpmdb} ++ assert list({pkey.filename for pkey in pubkeys if not pkey.rpmdb})[0] == '/mydir/myfile' ++ ++ ++def test_process(monkeypatch): ++ """ ++ Executes the "main" function ++ """ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked( ++ msgs=[_get_test_installed_rmps(['9570ff31'])]) ++ ) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ monkeypatch.setattr(trustedgpgkeys, '_get_pubkeys', get_pubkeys_from_rpms) ++ ++ trustedgpgkeys.process() ++ assert api.produce.called == 1 ++ assert isinstance(api.produce.model_instances[0], TrustedGpgKeys) ++ assert reporting.create_report.called == 0 +diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py +index d3ec5901..fbd58246 100644 +--- a/repos/system_upgrade/common/libraries/dnfplugin.py ++++ b/repos/system_upgrade/common/libraries/dnfplugin.py +@@ -9,6 +9,7 @@ from leapp.exceptions import StopActorExecutionError + from leapp.libraries.common import dnfconfig, guards, mounting, overlaygen, rhsm, utils + from leapp.libraries.common.config import get_env + from leapp.libraries.common.config.version import get_target_major_version, get_target_version ++from leapp.libraries.common.gpg import is_nogpgcheck_set + from leapp.libraries.stdlib import api, CalledProcessError, config + from leapp.models import DNFWorkaround + +@@ -77,10 +78,6 @@ def _rebuild_rpm_db(context, root=None): + context.call(cmd) + + +-def _the_nogpgcheck_option_used(): +- return get_env('LEAPP_NOGPGCHECK', '0') == '1' +- +- + def build_plugin_data(target_repoids, debug, test, tasks, on_aws): + """ + Generates a dictionary with the DNF plugin data. +@@ -100,7 +97,7 @@ def build_plugin_data(target_repoids, debug, test, tasks, on_aws): + 'debugsolver': debug, + 'disable_repos': True, + 'enable_repos': target_repoids, +- 'gpgcheck': not _the_nogpgcheck_option_used(), ++ 'gpgcheck': not is_nogpgcheck_set(), + 'platform_id': 'platform:el{}'.format(get_target_major_version()), + 'releasever': get_target_version(), + 'installroot': '/installroot', +@@ -367,7 +364,7 @@ def install_initramdisk_requirements(packages, target_userspace_info, used_repos + 'dnf', + 'install', + '-y'] +- if _the_nogpgcheck_option_used(): ++ if is_nogpgcheck_set(): + cmd.append('--nogpgcheck') + cmd += [ + '--setopt=module_platform_id=platform:el{}'.format(get_target_major_version()), +diff --git a/repos/system_upgrade/common/libraries/gpg.py b/repos/system_upgrade/common/libraries/gpg.py +new file mode 100644 +index 00000000..a8071329 +--- /dev/null ++++ b/repos/system_upgrade/common/libraries/gpg.py +@@ -0,0 +1,137 @@ ++import os ++ ++from leapp.libraries.common import config ++from leapp.libraries.common.config.version import get_source_major_version, get_target_major_version ++from leapp.libraries.stdlib import api, run ++from leapp.models import GpgKey ++ ++GPG_CERTS_FOLDER = 'rpm-gpg' ++ ++ ++def get_pubkeys_from_rpms(installed_rpms): ++ """ ++ Return the list of fingerprints of GPG keys in RPM DB ++ ++ This function returns short 8 characters fingerprints of trusted GPG keys ++ "installed" in the source OS RPM database. These look like normal packages ++ named "gpg-pubkey" and the fingerprint is present in the version field. ++ ++ :param installed_rpms: List of installed RPMs ++ :type installed_rpms: list(leapp.models.RPM) ++ :return: list of GPG keys from RPM DB ++ :rtype: list(leapp.models.GpgKey) ++ """ ++ return [GpgKey(fingerprint=pkg.version, rpmdb=True) for pkg in installed_rpms.items if pkg.name == 'gpg-pubkey'] ++ ++ ++def _gpg_show_keys(key_path): ++ """ ++ Show keys in given file in version-agnostic manner ++ ++ This runs gpg --show-keys (EL8) or gpg --with-fingerprints (EL7) ++ to verify the given file exists, is readable and contains valid ++ OpenPGP key data, which is printed in parsable format (--with-colons). ++ """ ++ try: ++ cmd = ['gpg2'] ++ # RHEL7 gnupg requires different switches to get the same output ++ if get_source_major_version() == '7': ++ cmd.append('--with-fingerprint') ++ else: ++ cmd.append('--show-keys') ++ cmd += ['--with-colons', key_path] ++ # TODO: discussed, most likely the checked=False will be dropped ++ # and error will be handled in other functions ++ return run(cmd, split=True, checked=False) ++ except OSError as err: ++ # NOTE: this is hypothetic; gnupg2 has to be installed on RHEL 7+ ++ error = 'Failed to read fingerprint from GPG key {}: {}'.format(key_path, str(err)) ++ api.current_logger().error(error) ++ return {} ++ ++ ++def _parse_fp_from_gpg(output): ++ """ ++ Parse the output of gpg --show-keys --with-colons. ++ ++ Return list of 8 characters fingerprints per each gpgkey for the given ++ output from stdlib.run() or None if some error occurred. Either the ++ command return non-zero exit code, the file does not exists, its not ++ readable or does not contain any openpgp data. ++ """ ++ if not output or output['exit_code']: ++ return [] ++ ++ # we are interested in the lines of the output starting with "pub:" ++ # the colons are used for separating the fields in output like this ++ # pub:-:4096:1:999F7CBF38AB71F4:1612983048:::-:::escESC::::::23::0: ++ # ^--------------^ this is the fingerprint we need ++ # ^------^ but RPM version is just the last 8 chars lowercase ++ # Also multiple gpg keys can be stored in the file, so go through all "pub" ++ # lines ++ gpg_fps = [] ++ for line in output['stdout']: ++ if not line or not line.startswith('pub:'): ++ continue ++ parts = line.split(':') ++ if len(parts) >= 4 and len(parts[4]) == 16: ++ gpg_fps.append(parts[4][8:].lower()) ++ else: ++ api.current_logger().warning( ++ 'Cannot parse the gpg2 output. Line: "{}"' ++ .format(line) ++ ) ++ ++ return gpg_fps ++ ++ ++def get_gpg_fp_from_file(key_path): ++ """ ++ Return the list of public key fingerprints from the given file ++ ++ Log warning in case no OpenPGP data found in the given file or it is not ++ readable for some reason. ++ ++ :param key_path: Path to the file with GPG key(s) ++ :type key_path: str ++ :return: List of public key fingerprints from the given file ++ :rtype: list(str) ++ """ ++ res = _gpg_show_keys(key_path) ++ fp = _parse_fp_from_gpg(res) ++ if not fp: ++ error_msg = 'Unable to read OpenPGP keys from {}: {}'.format(key_path, res['stderr']) ++ api.current_logger().warning(error_msg) ++ return fp ++ ++ ++def get_path_to_gpg_certs(): ++ """ ++ Get path to the directory with trusted target gpg keys in the common leapp repository. ++ ++ GPG keys stored under this directory are considered as trusted and are ++ installed during the upgrade process. ++ ++ :return: Path to the directory with GPG keys stored under the common leapp repository. ++ :rtype: str ++ """ ++ target_major_version = get_target_major_version() ++ target_product_type = config.get_product_type('target') ++ certs_dir = target_major_version ++ # only beta is special in regards to the GPG signing keys ++ if target_product_type == 'beta': ++ certs_dir = '{}beta'.format(target_major_version) ++ return os.path.join(api.get_common_folder_path(GPG_CERTS_FOLDER), certs_dir) ++ ++ ++def is_nogpgcheck_set(): ++ """ ++ Return True if the GPG check should be skipped. ++ ++ The GPG check is skipped if leapp is executed with LEAPP_NOGPGCHECK=1 ++ or with the --nogpgcheck CLI option. In both cases, actors will see ++ LEAPP_NOGPGCHECK is '1'. ++ ++ :rtype: bool ++ """ ++ return config.get_env('LEAPP_NOGPGCHECK', False) == '1' +diff --git a/repos/system_upgrade/common/libraries/tests/test_gpg.py b/repos/system_upgrade/common/libraries/tests/test_gpg.py +new file mode 100644 +index 00000000..7cf37fa2 +--- /dev/null ++++ b/repos/system_upgrade/common/libraries/tests/test_gpg.py +@@ -0,0 +1,147 @@ ++import os ++import shutil ++import tempfile ++ ++import distro ++import pytest ++ ++from leapp.libraries.common import gpg ++from leapp.libraries.common.testutils import CurrentActorMocked ++from leapp.libraries.stdlib import api ++from leapp.models import GpgKey, InstalledRPM, RPM ++ ++ ++@pytest.mark.parametrize('target, product_type, exp', [ ++ ('8.6', 'beta', '../../files/rpm-gpg/8beta'), ++ ('8.8', 'htb', '../../files/rpm-gpg/8'), ++ ('9.0', 'beta', '../../files/rpm-gpg/9beta'), ++ ('9.2', 'ga', '../../files/rpm-gpg/9'), ++]) ++def test_get_path_to_gpg_certs(monkeypatch, target, product_type, exp): ++ current_actor = CurrentActorMocked(dst_ver=target, ++ envars={'LEAPP_DEVEL_TARGET_PRODUCT_TYPE': product_type}) ++ monkeypatch.setattr(api, 'current_actor', current_actor) ++ ++ p = gpg.get_path_to_gpg_certs() ++ assert p == exp ++ ++ ++def is_rhel7(): ++ return int(distro.major_version()) < 8 ++ ++ ++@pytest.mark.skipif(distro.id() not in ("rhel", "centos"), reason="Requires RHEL or CentOS for valid results.") ++def test_gpg_show_keys(loaded_leapp_repository, monkeypatch): ++ src = '7.9' if is_rhel7() else '8.6' ++ current_actor = CurrentActorMocked(src_ver=src) ++ monkeypatch.setattr(api, 'current_actor', current_actor) ++ ++ # python2 compatibility :/ ++ dirpath = tempfile.mkdtemp() ++ ++ # using GNUPGHOME env should avoid gnupg modifying the system ++ os.environ['GNUPGHOME'] = dirpath ++ ++ try: ++ # non-existing file ++ non_existent_path = os.path.join(dirpath, 'nonexistent') ++ res = gpg._gpg_show_keys(non_existent_path) ++ if is_rhel7(): ++ err_msg = "gpg: can't open `{}'".format(non_existent_path) ++ else: ++ err_msg = "gpg: can't open '{}': No such file or directory\n".format(non_existent_path) ++ assert not res['stdout'] ++ assert err_msg in res['stderr'] ++ assert res['exit_code'] == 2 ++ ++ fp = gpg._parse_fp_from_gpg(res) ++ assert fp == [] ++ ++ # no gpg data found ++ no_key_path = os.path.join(dirpath, "no_key") ++ with open(no_key_path, "w") as f: ++ f.write('test') ++ ++ res = gpg._gpg_show_keys(no_key_path) ++ if is_rhel7(): ++ err_msg = ('gpg: no valid OpenPGP data found.\n' ++ 'gpg: processing message failed: Unknown system error\n') ++ else: ++ err_msg = 'gpg: no valid OpenPGP data found.\n' ++ assert not res['stdout'] ++ assert res['stderr'] == err_msg ++ assert res['exit_code'] == 2 ++ ++ fp = gpg._parse_fp_from_gpg(res) ++ assert fp == [] ++ ++ # with some test data now -- rhel9 release key ++ # rhel9_key_path = os.path.join(api.get_common_folder_path('rpm-gpg'), '9') ++ cur_dir = os.path.dirname(os.path.abspath(__file__)) ++ rhel9_key_path = os.path.join(cur_dir, '..', '..', 'files', 'rpm-gpg', '9', ++ 'RPM-GPG-KEY-redhat-release') ++ res = gpg._gpg_show_keys(rhel9_key_path) ++ finally: ++ shutil.rmtree(dirpath) ++ ++ if is_rhel7(): ++ assert len(res['stdout']) == 4 ++ assert res['stdout'][0] == ('pub:-:4096:1:199E2F91FD431D51:1256212795:::-:' ++ 'Red Hat, Inc. (release key 2) :') ++ assert res['stdout'][1] == 'fpr:::::::::567E347AD0044ADE55BA8A5F199E2F91FD431D51:' ++ assert res['stdout'][2] == ('pub:-:4096:1:5054E4A45A6340B3:1646863006:::-:' ++ 'Red Hat, Inc. (auxiliary key 3) :') ++ assert res['stdout'][3] == 'fpr:::::::::7E4624258C406535D56D6F135054E4A45A6340B3:' ++ else: ++ assert len(res['stdout']) == 6 ++ assert res['stdout'][0] == 'pub:-:4096:1:199E2F91FD431D51:1256212795:::-:::scSC::::::23::0:' ++ assert res['stdout'][1] == 'fpr:::::::::567E347AD0044ADE55BA8A5F199E2F91FD431D51:' ++ assert res['stdout'][2] == ('uid:-::::1256212795::DC1CAEC7997B3575101BB0FCAAC6191792660D8F::' ++ 'Red Hat, Inc. (release key 2) ::::::::::0:') ++ assert res['stdout'][3] == 'pub:-:4096:1:5054E4A45A6340B3:1646863006:::-:::scSC::::::23::0:' ++ assert res['stdout'][4] == 'fpr:::::::::7E4624258C406535D56D6F135054E4A45A6340B3:' ++ assert res['stdout'][5] == ('uid:-::::1646863006::DA7F68E3872D6E7BDCE05225E7EB5F3ACDD9699F::' ++ 'Red Hat, Inc. (auxiliary key 3) ::::::::::0:') ++ ++ err = '{}/trustdb.gpg: trustdb created'.format(dirpath) ++ assert err in res['stderr'] ++ assert res['exit_code'] == 0 ++ ++ # now, parse the output too ++ fp = gpg._parse_fp_from_gpg(res) ++ assert fp == ['fd431d51', '5a6340b3'] ++ ++ ++@pytest.mark.parametrize('res, exp', [ ++ ({'exit_code': 2, 'stdout': '', 'stderr': ''}, []), ++ ({'exit_code': 2, 'stdout': '', 'stderr': 'bash: gpg2: command not found...'}, []), ++ ({'exit_code': 0, 'stdout': 'Some other output', 'stderr': ''}, []), ++ ({'exit_code': 0, 'stdout': ['Some other output', 'other line'], 'stderr': ''}, []), ++ ({'exit_code': 0, 'stdout': ['pub:-:4096:1:199E2F91FD431D:'], 'stderr': ''}, []), ++ ({'exit_code': 0, 'stdout': ['pub:-:4096:1:5054E4A45A6340B3:1..'], 'stderr': ''}, ['5a6340b3']), ++]) ++def test_parse_fp_from_gpg(res, exp): ++ fp = gpg._parse_fp_from_gpg(res) ++ assert fp == exp ++ ++ ++def test_pubkeys_from_rpms(): ++ installed_rpms = InstalledRPM( ++ items=[ ++ RPM(name='gpg-pubkey', ++ version='9570ff31', ++ release='5e3006fb', ++ epoch='0', ++ packager='Fedora (33) ', ++ arch='noarch', ++ pgpsig=''), ++ RPM(name='rpm', ++ version='4.17.1', ++ release='3.fc35', ++ epoch='0', ++ packager='Fedora Project', ++ arch='x86_64', ++ pgpsig='RSA/SHA256, Tue 02 Aug 2022 03:12:43 PM CEST, Key ID db4639719867c58f'), ++ ], ++ ) ++ assert gpg.get_pubkeys_from_rpms(installed_rpms) == [GpgKey(fingerprint='9570ff31', rpmdb=True)] +diff --git a/repos/system_upgrade/common/models/trustedgpgkeys.py b/repos/system_upgrade/common/models/trustedgpgkeys.py +new file mode 100644 +index 00000000..c397bea7 +--- /dev/null ++++ b/repos/system_upgrade/common/models/trustedgpgkeys.py +@@ -0,0 +1,19 @@ ++from leapp.models import fields, Model ++from leapp.topics import SystemFactsTopic ++ ++ ++class GpgKey(Model): ++ """ ++ GPG Public key ++ ++ It is represented by a record in the RPM DB or by a file in directory with trusted keys (or both). ++ """ ++ topic = SystemFactsTopic ++ fingerprint = fields.String() ++ rpmdb = fields.Boolean() ++ filename = fields.Nullable(fields.String()) ++ ++ ++class TrustedGpgKeys(Model): ++ topic = SystemFactsTopic ++ items = fields.List(fields.Model(GpgKey), default=[]) +-- +2.41.0 + diff --git a/0041-Check-no-new-unexpected-keys-were-installed-during-t.patch b/0041-Check-no-new-unexpected-keys-were-installed-during-t.patch new file mode 100644 index 0000000..c4a059d --- /dev/null +++ b/0041-Check-no-new-unexpected-keys-were-installed-during-t.patch @@ -0,0 +1,184 @@ +From 930758e269111190f1e5689e75d552d896adab67 Mon Sep 17 00:00:00 2001 +From: Jakub Jelen +Date: Tue, 4 Jul 2023 18:22:49 +0200 +Subject: [PATCH 41/41] Check no new unexpected keys were installed during the + upgrade + +Petr Stodulka: + +* some refactoring +* added added error logging +* replace the hard error stop by post upgrade report + We do not want to interrupt the upgrade process after the + DNF transaction execution + +Signed-off-by: Jakub Jelen +--- + .../common/actors/gpgpubkeycheck/actor.py | 23 ++++ + .../libraries/gpgpubkeycheck.py | 124 ++++++++++++++++++ + 2 files changed, 147 insertions(+) + create mode 100644 repos/system_upgrade/common/actors/gpgpubkeycheck/actor.py + create mode 100644 repos/system_upgrade/common/actors/gpgpubkeycheck/libraries/gpgpubkeycheck.py + +diff --git a/repos/system_upgrade/common/actors/gpgpubkeycheck/actor.py b/repos/system_upgrade/common/actors/gpgpubkeycheck/actor.py +new file mode 100644 +index 00000000..3d11de38 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/gpgpubkeycheck/actor.py +@@ -0,0 +1,23 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import gpgpubkeycheck ++from leapp.models import TrustedGpgKeys ++from leapp.reporting import Report ++from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag ++ ++ ++class GpgPubkeyCheck(Actor): ++ """ ++ Checks no unexpected GPG keys were installed during the upgrade. ++ ++ This should be mostly sanity check and this should not happen ++ unless something went very wrong, regardless the gpgcheck was ++ used (default) or not (with --no-gpgcheck option). ++ """ ++ ++ name = 'gpg_pubkey_check' ++ consumes = (TrustedGpgKeys,) ++ produces = (Report,) ++ tags = (IPUWorkflowTag, ApplicationsPhaseTag,) ++ ++ def process(self): ++ gpgpubkeycheck.process() +diff --git a/repos/system_upgrade/common/actors/gpgpubkeycheck/libraries/gpgpubkeycheck.py b/repos/system_upgrade/common/actors/gpgpubkeycheck/libraries/gpgpubkeycheck.py +new file mode 100644 +index 00000000..387c6cef +--- /dev/null ++++ b/repos/system_upgrade/common/actors/gpgpubkeycheck/libraries/gpgpubkeycheck.py +@@ -0,0 +1,124 @@ ++from leapp import reporting ++from leapp.libraries.common.gpg import is_nogpgcheck_set ++from leapp.libraries.common.rpms import get_installed_rpms ++from leapp.libraries.stdlib import api ++from leapp.models import TrustedGpgKeys ++ ++FMT_LIST_SEPARATOR = '\n - ' ++ ++ ++def _get_installed_fps_tuple(): ++ """ ++ Return list of tuples (fingerprint, packager). ++ """ ++ installed_fps_tuple = [] ++ rpms = get_installed_rpms() ++ for rpm in rpms: ++ rpm = rpm.strip() ++ if not rpm: ++ continue ++ try: ++ # NOTE: pgpsig is (none) for 'gpg-pubkey' entries ++ name, version, dummy_release, dummy_epoch, packager, dummy_arch, dummy_pgpsig = rpm.split('|') ++ except ValueError as e: ++ # NOTE: it's seatbelt, but if it happens, seeing loong list of errors ++ # will let us know earlier that we missed something really ++ api.current_logger().error('Cannot perform the check of installed GPG keys after the upgrade.') ++ api.current_logger().error('Cannot parse rpm output: {}'.format(e)) ++ continue ++ if name != 'gpg-pubkey': ++ continue ++ installed_fps_tuple.append((version, packager)) ++ return installed_fps_tuple ++ ++ ++def _report_cannot_check_keys(installed_fps): ++ # NOTE: in this case, it's expected there will be always some GPG keys present ++ summary = ( ++ 'Cannot perform the check of GPG keys installed in the RPM DB' ++ ' due to missing facts (TrustedGpgKeys) supposed to be generated' ++ ' in the start of the upgrade process on the original system.' ++ ' Unexpected unexpected installed GPG keys could be e.g. a mark of' ++ ' a malicious attempt to hijack the upgrade process.' ++ ' The list of all GPG keys in RPM DB:{sep}{key_list}' ++ .format( ++ sep=FMT_LIST_SEPARATOR, ++ key_list=FMT_LIST_SEPARATOR.join(installed_fps) ++ ) ++ ) ++ hint = ( ++ 'Verify the installed GPG keys are expected.' ++ ) ++ groups = [ ++ reporting.Groups.POST, ++ reporting.Groups.REPOSITORY, ++ reporting.Groups.SECURITY ++ ] ++ reporting.create_report([ ++ reporting.Title('Cannot perform the check of installed GPG keys after the upgrade.'), ++ reporting.Summary(summary), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups(groups), ++ reporting.Remediation(hint=hint), ++ ]) ++ ++ ++def _report_unexpected_keys(unexpected_fps): ++ summary = ( ++ 'The system contains unexpected GPG keys after upgrade.' ++ ' This can be caused e.g. by a manual intervention' ++ ' or by malicious attempt to hijack the upgrade process.' ++ ' The unexpected keys are the following:' ++ ' {sep}{key_list}' ++ .format( ++ sep=FMT_LIST_SEPARATOR, ++ key_list=FMT_LIST_SEPARATOR.join(unexpected_fps) ++ ) ++ ) ++ hint = ( ++ 'Verify the installed GPG keys are expected.' ++ ) ++ groups = [ ++ reporting.Groups.POST, ++ reporting.Groups.REPOSITORY, ++ reporting.Groups.SECURITY ++ ] ++ reporting.create_report([ ++ reporting.Title('Detected unexpected GPG keys after the upgrade.'), ++ reporting.Summary(summary), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups(groups), ++ reporting.Remediation(hint=hint), ++ ]) ++ ++ ++def process(): ++ """ ++ Verify the system does not have any unexpected gpg keys installed ++ ++ If the --no-gpgcheck option is used, this is skipped as we can not ++ guarantee that what was installed came from trusted source ++ """ ++ ++ if is_nogpgcheck_set(): ++ api.current_logger().warning('The --nogpgcheck option is used: Skipping the check of installed GPG keys.') ++ return ++ ++ installed_fps_tuple = _get_installed_fps_tuple() ++ ++ try: ++ trusted_gpg_keys = next(api.consume(TrustedGpgKeys)) ++ except StopIteration: ++ # unexpected (bug) situation; keeping as seatbelt for the security aspect ++ installed_fps = ['{fp}: {packager}'.format(fp=fp, packager=packager) for fp, packager in installed_fps_tuple] ++ _report_cannot_check_keys(installed_fps) ++ return ++ ++ trusted_fps = [key.fingerprint for key in trusted_gpg_keys.items] ++ unexpected_fps = [] ++ for fp, packager in installed_fps_tuple: ++ if fp not in trusted_fps: ++ unexpected_fps.append('{fp}: {packager}'.format(fp=fp, packager=packager)) ++ ++ if unexpected_fps: ++ _report_unexpected_keys(unexpected_fps) +-- +2.41.0 + diff --git a/leapp-repository.spec b/leapp-repository.spec index 8ba4345..b8cce79 100644 --- a/leapp-repository.spec +++ b/leapp-repository.spec @@ -42,7 +42,7 @@ py2_byte_compile "%1" "%2"} Name: leapp-repository Version: 0.19.0 -Release: 4%{?dist} +Release: 5%{?dist} Summary: Repositories for leapp License: ASL 2.0 @@ -55,8 +55,47 @@ BuildArch: noarch ### PATCHES HERE # Patch0001: filename.patch -Patch0001: 0001-RHSM-Adjust-the-switch-to-container-mode-for-new-RHS.patch -Patch0002: 0002-Do-not-create-dangling-symlinks-for-containerized-RH.patch +Patch0001: 0001-Further-narrow-down-packit-tests.patch +Patch0002: 0002-Bring-back-uefi_test.patch +Patch0003: 0003-Add-7.9-8.9-and-8.9-9.3-upgrade-paths.patch +Patch0004: 0004-Split-tier1-tests-into-default-on-push-and-on-demand.patch +Patch0005: 0005-Add-labels-to-all-tests.patch +Patch0006: 0006-Refactor-using-YAML-anchors.patch +Patch0007: 0007-Add-kernel-rt-tests-and-switch-to-sanity-for-default.patch +Patch0008: 0008-Minor-label-enhancements.patch +Patch0009: 0009-Update-pr-welcome-message.patch +Patch0010: 0010-Address-ddiblik-s-review-comments.patch +Patch0011: 0011-Address-mmoran-s-review-comments.patch +Patch0012: 0012-Add-isccfg-library-manual-running-mode.patch +Patch0013: 0013-Avoid-warnings-on-python2.patch +Patch0014: 0014-makefile-add-dev_test_no_lint-target.patch +Patch0015: 0015-Fix-the-issue-of-going-out-of-bounds-in-the-isccfg-p.patch +Patch0016: 0016-make-pylint-and-spellcheck-happy-again.patch +Patch0017: 0017-Remove-TUV-from-supported-target-channels.patch +Patch0018: 0018-Transition-systemd-service-states-during-upgrade.patch +Patch0019: 0019-Remove-obsoleted-enablersyncdservice-actor.patch +Patch0020: 0020-default-to-NO_RHSM-mode-when-subscription-manager-is.patch +Patch0021: 0021-call-correct-mkdir-when-trying-to-create-etc-rhsm-fa.patch +Patch0022: 0022-RHSM-Adjust-the-switch-to-container-mode-for-new-RHS.patch +Patch0023: 0023-load-all-substitutions-from-etc.patch +Patch0024: 0024-Do-not-create-dangling-symlinks-for-containerized-RH.patch +Patch0025: 0025-be-less-strict-when-figuring-out-major-version-in-in.patch +Patch0026: 0026-rhui-bootstrap-target-rhui-clients-in-scratch-contai.patch +Patch0027: 0027-add-backward-compatibility-for-leapp-rhui-aws-azure-.patch +Patch0028: 0028-checknfs-do-not-check-systemd-mounts.patch +Patch0029: 0029-Switch-from-plan-name-regex-to-filter-by-tags.patch +Patch0030: 0030-Bring-back-reference-to-oamg-leapp-tests-repo.patch +Patch0031: 0031-add-the-posibility-to-upgrade-with-a-local-repositor.patch +Patch0032: 0032-Fix-certificate-symlink-handling.patch +Patch0033: 0033-Add-prod-certs-and-upgrade-paths-for-8.10-9.4.patch +Patch0034: 0034-pylint-ignore-too-many-lines.patch +Patch0035: 0035-Update-upgrade-paths-Add-8.10-9.4.patch +Patch0036: 0036-Copy-dnf.conf-to-target-userspace-and-allow-a-custom.patch +Patch0037: 0037-adjustlocalrepos-suppress-unwanted-deprecation-repor.patch +Patch0038: 0038-add-detection-for-custom-libraries-registered-by-ld..patch +Patch0039: 0039-Fix-several-typos-and-Makefile-help.patch +Patch0040: 0040-Move-code-handling-GPG-keys-to-separate-library.patch +Patch0041: 0041-Check-no-new-unexpected-keys-were-installed-during-t.patch %description @@ -201,6 +240,45 @@ Requires: python3-gobject-base # %%patch0001 -p1 %patch0001 -p1 %patch0002 -p1 +%patch0003 -p1 +%patch0004 -p1 +%patch0005 -p1 +%patch0006 -p1 +%patch0007 -p1 +%patch0008 -p1 +%patch0009 -p1 +%patch0010 -p1 +%patch0011 -p1 +%patch0012 -p1 +%patch0013 -p1 +%patch0014 -p1 +%patch0015 -p1 +%patch0016 -p1 +%patch0017 -p1 +%patch0018 -p1 +%patch0019 -p1 +%patch0020 -p1 +%patch0021 -p1 +%patch0022 -p1 +%patch0023 -p1 +%patch0024 -p1 +%patch0025 -p1 +%patch0026 -p1 +%patch0027 -p1 +%patch0028 -p1 +%patch0029 -p1 +%patch0030 -p1 +%patch0031 -p1 +%patch0032 -p1 +%patch0033 -p1 +%patch0034 -p1 +%patch0035 -p1 +%patch0036 -p1 +%patch0037 -p1 +%patch0038 -p1 +%patch0039 -p1 +%patch0040 -p1 +%patch0041 -p1 %build @@ -278,6 +356,23 @@ done; # no files here %changelog +* Thu Nov 16 2023 Petr Stodulka - 0.19.0-5 +- Enable new upgrade path for RHEL 8.10 -> RHEL 9.4 (including RHEL with SAP HANA) +- Introduce generic transition of systemd services states during the IPU +- Introduce possibility to upgrade with local repositories +- Improve possibilities of upgrade when a proxy is configured in DNF configutation file +- Fix handling of symlinks under /etc/pki when managing certificates +- Fix the upgrade with custom https repositories +- Default to the NO_RHSM mode when subscription-manager is not installed +- Detect customized configuration of dynamic linker +- Drop the invalid `tuv` target channel for the --channel option +- Fix the issue of going out of bounds in the isccfg parser +- Fix traceback when saving the rhsm facts results and the /etc/rhsm/facts directory doesn’t exist yet +- Load all rpm repository substitutions that dnf knows about, not just "releasever" only +- Simplify handling of upgrades on systems using RHUI, reducing the maintenance burden for cloud providers +- Detect possible unexpected RPM GPG keys has been installed during RPM transaction +- Resolves: RHEL-16729 + * Thu Nov 02 2023 Petr Stodulka - 0.19.0-4 - Fix the upgrade for systems without subscription-manager package - Resolves: RHEL-14901