From 97becf699449f0d59746927586d40848c94ef5e5 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Tue, 13 Dec 2022 06:13:56 +0000 Subject: [PATCH] import leapp-repository-0.17.0-5.el8 --- .gitignore | 2 +- .leapp-repository.metadata | 2 +- ...e-isort-check-for-deprecated-imports.patch | 131 ++ ...tHub-actions-workflow-for-spell-chec.patch | 1875 +++++++++++++++++ ...0007-Mini-updateds-in-the-spec-files.patch | 39 + ...-only-faiulres-and-undetermined-dev.patch} | 29 +- ...hecking-and-setting-systemd-services.patch | 383 ++++ ...tp-Replace-reports-with-log-messages.patch | 92 + ...-more-specific-exception-from-ntp2ch.patch | 28 + ...-Don-t-raise-StopActorExecutionError.patch | 92 + .../0013-Make-shellcheck-happy-again.patch | 32 + .../0014-actor-firewalld-support-0.8.z.patch | 231 ++ ...kgmanager-detect-proxy-configuration.patch | 257 +++ ...configscanner-actor-into-the-scanpkg.patch | 380 ++++ ...lowzonedrifting-Fix-the-remediation-.patch | 30 + ...apps-consider-RHUI-client-as-signed.patch} | 4 +- ...ps-handle-EUS-SAP-Apps-content-on-R.patch} | 5 +- ...=> 0020-checksaphana-Move-to-common.patch} | 4 +- ...st-for-el7toel8-and-el8toel9-requir.patch} | 7 +- ...t-enables-device_cio_free.service-on.patch | 105 + ...-actor-handling-the-IPU-with-ZFCP-s3.patch | 240 +++ ...cfg-bind-mount-dev-boot-into-the-use.patch | 118 ++ ...ide-common-information-about-systemd.patch | 1247 +++++++++++ ...ble-disable-reenable-_unit-functions.patch | 227 ++ ...broken-or-incorrect-systemd-symlinks.patch | 253 +++ ...stemd-symlinks-broken-before-the-upg.patch | 271 +++ ...icestasks-update-docstrings-extend-t.patch | 87 + ...g-a-target-RHEL-installation-ISO-ima.patch | 1515 +++++++++++++ ...1-Add-prod-certs-for-8.8-9.2-Beta-GA.patch | 702 ++++++ ...-Introduce-new-upgrade-paths-8.8-9.2.patch | 52 + ...mplement-get_common_tool_path-method.patch | 46 + ...reator-improve-copy-of-etc-pki-rpm-g.patch | 91 + ...ound-extend-the-model-by-script_args.patch | 75 + ...duce-theimportrpmgpgkeys-tool-script.patch | 57 + ...during-IPU-add-nogpgcheck-CLI-option.patch | 1814 ++++++++++++++++ ...-missinggpgkey-polish-the-report-msg.patch | 68 + SPECS/leapp-repository.spec | 130 +- 37 files changed, 10670 insertions(+), 51 deletions(-) create mode 100644 SOURCES/0005-Disable-isort-check-for-deprecated-imports.patch create mode 100644 SOURCES/0006-Add-codespell-GitHub-actions-workflow-for-spell-chec.patch create mode 100644 SOURCES/0007-Mini-updateds-in-the-spec-files.patch rename SOURCES/{0001-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch => 0008-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch} (97%) create mode 100644 SOURCES/0009-Add-actors-for-checking-and-setting-systemd-services.patch create mode 100644 SOURCES/0010-migratentp-Replace-reports-with-log-messages.patch create mode 100644 SOURCES/0011-migratentp-Catch-more-specific-exception-from-ntp2ch.patch create mode 100644 SOURCES/0012-migratentp-Don-t-raise-StopActorExecutionError.patch create mode 100644 SOURCES/0013-Make-shellcheck-happy-again.patch create mode 100644 SOURCES/0014-actor-firewalld-support-0.8.z.patch create mode 100644 SOURCES/0015-Scanpkgmanager-detect-proxy-configuration.patch create mode 100644 SOURCES/0016-Merge-of-the-yumconfigscanner-actor-into-the-scanpkg.patch create mode 100644 SOURCES/0017-firewalldcheckallowzonedrifting-Fix-the-remediation-.patch rename SOURCES/{0005-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch => 0018-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch} (93%) rename SOURCES/{0006-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch => 0019-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch} (96%) rename SOURCES/{0007-checksaphana-Move-to-common.patch => 0020-checksaphana-Move-to-common.patch} (96%) rename SOURCES/{0008-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch => 0021-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch} (98%) create mode 100644 SOURCES/0022-Add-an-actor-that-enables-device_cio_free.service-on.patch create mode 100644 SOURCES/0023-Add-the-scanzfcp-actor-handling-the-IPU-with-ZFCP-s3.patch create mode 100644 SOURCES/0024-ziplconverttoblscfg-bind-mount-dev-boot-into-the-use.patch create mode 100644 SOURCES/0025-Provide-common-information-about-systemd.patch create mode 100644 SOURCES/0026-systemd-Move-enable-disable-reenable-_unit-functions.patch create mode 100644 SOURCES/0027-Fix-broken-or-incorrect-systemd-symlinks.patch create mode 100644 SOURCES/0028-Add-check-for-systemd-symlinks-broken-before-the-upg.patch create mode 100644 SOURCES/0029-checksystemdservicestasks-update-docstrings-extend-t.patch create mode 100644 SOURCES/0030-Support-IPU-using-a-target-RHEL-installation-ISO-ima.patch create mode 100644 SOURCES/0031-Add-prod-certs-for-8.8-9.2-Beta-GA.patch create mode 100644 SOURCES/0032-Introduce-new-upgrade-paths-8.8-9.2.patch create mode 100644 SOURCES/0033-testutils-Implement-get_common_tool_path-method.patch create mode 100644 SOURCES/0034-targetuserspacecreator-improve-copy-of-etc-pki-rpm-g.patch create mode 100644 SOURCES/0035-DNFWorkaround-extend-the-model-by-script_args.patch create mode 100644 SOURCES/0036-Introduce-theimportrpmgpgkeys-tool-script.patch create mode 100644 SOURCES/0037-Enable-gpgcheck-during-IPU-add-nogpgcheck-CLI-option.patch create mode 100644 SOURCES/0038-missinggpgkey-polish-the-report-msg.patch diff --git a/.gitignore b/.gitignore index 3dc1fcd..56d0307 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -SOURCES/deps-pkgs-7.tar.gz +SOURCES/deps-pkgs-8.tar.gz SOURCES/leapp-repository-0.17.0.tar.gz diff --git a/.leapp-repository.metadata b/.leapp-repository.metadata index 9f0e332..05432a4 100644 --- a/.leapp-repository.metadata +++ b/.leapp-repository.metadata @@ -1,2 +1,2 @@ -4886551d9ee2259cdfbd8d64a02d0ab9a381ba3d SOURCES/deps-pkgs-7.tar.gz +4f7f6009adfe92d390e09beab710805fb0077c25 SOURCES/deps-pkgs-8.tar.gz cbb3e6025c6567507d3bc317731b4c2f0a0eb872 SOURCES/leapp-repository-0.17.0.tar.gz diff --git a/SOURCES/0005-Disable-isort-check-for-deprecated-imports.patch b/SOURCES/0005-Disable-isort-check-for-deprecated-imports.patch new file mode 100644 index 0000000..4ccc4d2 --- /dev/null +++ b/SOURCES/0005-Disable-isort-check-for-deprecated-imports.patch @@ -0,0 +1,131 @@ +From c5451ffb9a5c964552afd9d419855ea23b764ad7 Mon Sep 17 00:00:00 2001 +From: Inessa Vasilevskaya +Date: Wed, 24 Aug 2022 12:17:44 +0200 +Subject: [PATCH 05/32] Disable isort check for deprecated imports + +Although isort works fine most of the time, the way it handles +multiline imports with inline comments is not acceptable +to everyone in the team. +So before we implement some solution we are 146% happy about +it was decided to leave those imports just as they have been for +ages. This patch mutes isort import check for deprecated imports. +--- + .../common/actors/commonleappdracutmodules/actor.py | 7 ++++--- + .../actors/commonleappdracutmodules/libraries/modscan.py | 5 +++-- + .../tests/test_modscan_commonleappdracutmodules.py | 5 +++-- + .../tests/test_targetinitramfsgenerator.py | 5 +++-- + .../tests/unit_test_upgradeinitramfsgenerator.py | 5 +++-- + 5 files changed, 16 insertions(+), 11 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/actor.py b/repos/system_upgrade/common/actors/commonleappdracutmodules/actor.py +index 950b6e88..aae42bbb 100644 +--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/actor.py ++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/actor.py +@@ -1,13 +1,14 @@ + from leapp.actors import Actor + from leapp.libraries.actor import modscan +-from leapp.models import ( ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++from leapp.utils.deprecation import suppress_deprecation ++ ++from leapp.models import ( # isort:skip + RequiredUpgradeInitramPackages, # deprecated + UpgradeDracutModule, # deprecated + TargetUserSpaceUpgradeTasks, + UpgradeInitramfsTasks + ) +-from leapp.tags import FactsPhaseTag, IPUWorkflowTag +-from leapp.utils.deprecation import suppress_deprecation + + + @suppress_deprecation(RequiredUpgradeInitramPackages, UpgradeDracutModule) +diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py b/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py +index a089c4c1..275b2c63 100644 +--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py ++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py +@@ -3,14 +3,15 @@ import re + + from leapp.libraries.common.config import architecture, version + from leapp.libraries.stdlib import api +-from leapp.models import ( ++from leapp.utils.deprecation import suppress_deprecation ++ ++from leapp.models import ( # isort:skip + RequiredUpgradeInitramPackages, # deprecated + UpgradeDracutModule, # deprecated + DracutModule, + TargetUserSpaceUpgradeTasks, + UpgradeInitramfsTasks + ) +-from leapp.utils.deprecation import suppress_deprecation + + _REQUIRED_PACKAGES = [ + 'binutils', +diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/tests/test_modscan_commonleappdracutmodules.py b/repos/system_upgrade/common/actors/commonleappdracutmodules/tests/test_modscan_commonleappdracutmodules.py +index 307e927c..9c52b51f 100644 +--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/tests/test_modscan_commonleappdracutmodules.py ++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/tests/test_modscan_commonleappdracutmodules.py +@@ -8,13 +8,14 @@ from leapp.libraries.actor import modscan + from leapp.libraries.common.config import architecture + from leapp.libraries.common.testutils import CurrentActorMocked + from leapp.libraries.stdlib import api +-from leapp.models import ( ++from leapp.utils.deprecation import suppress_deprecation ++ ++from leapp.models import ( # isort:skip + RequiredUpgradeInitramPackages, # deprecated + UpgradeDracutModule, # deprecated + TargetUserSpaceUpgradeTasks, + UpgradeInitramfsTasks + ) +-from leapp.utils.deprecation import suppress_deprecation + + + def _files_get_folder_path(name): +diff --git a/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py +index 98fe92c6..f5930b9b 100644 +--- a/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py ++++ b/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py +@@ -4,13 +4,14 @@ from leapp.exceptions import StopActorExecutionError + from leapp.libraries.actor import targetinitramfsgenerator + from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked + from leapp.libraries.stdlib import api, CalledProcessError +-from leapp.models import ( ++from leapp.utils.deprecation import suppress_deprecation ++ ++from leapp.models import ( # isort:skip + InitrdIncludes, # deprecated + DracutModule, + InstalledTargetKernelVersion, + TargetInitramfsTasks + ) +-from leapp.utils.deprecation import suppress_deprecation + + FILES = ['/file1', '/file2', '/dir/ect/ory/file3', '/file4', '/file5'] + MODULES = [ +diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py +index b54aaa1f..2b401e52 100644 +--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py ++++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py +@@ -7,7 +7,9 @@ from leapp.exceptions import StopActorExecutionError + from leapp.libraries.actor import upgradeinitramfsgenerator + from leapp.libraries.common.config import architecture + from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked +-from leapp.models import ( ++from leapp.utils.deprecation import suppress_deprecation ++ ++from leapp.models import ( # isort:skip + RequiredUpgradeInitramPackages, # deprecated + UpgradeDracutModule, # deprecated + BootContent, +@@ -16,7 +18,6 @@ from leapp.models import ( + TargetUserSpaceUpgradeTasks, + UpgradeInitramfsTasks, + ) +-from leapp.utils.deprecation import suppress_deprecation + + CUR_DIR = os.path.dirname(os.path.abspath(__file__)) + PKGS = ['pkg{}'.format(c) for c in 'ABCDEFGHIJ'] +-- +2.38.1 + diff --git a/SOURCES/0006-Add-codespell-GitHub-actions-workflow-for-spell-chec.patch b/SOURCES/0006-Add-codespell-GitHub-actions-workflow-for-spell-chec.patch new file mode 100644 index 0000000..670c810 --- /dev/null +++ b/SOURCES/0006-Add-codespell-GitHub-actions-workflow-for-spell-chec.patch @@ -0,0 +1,1875 @@ +From c3f32c8cd95011b9fb6b4c8a9ee27736ba551ae2 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Tue, 23 Aug 2022 11:04:55 +0200 +Subject: [PATCH 06/32] Add codespell GitHub actions workflow for spell + checking code + +Use [codespell](https://github.com/codespell-project/actions-codespell) +for spell checking code in PRs and pushes to master. + +All existing typos are fixed or ignored in the following way: +- Files without any words to check are skipped, as they would just + create false positives. +- Words like `ro` and `fo` are ignored since they are pretty common in + the codebase and we don't want to trouble contributors with excluding + every line containing these from spell checking. It's up to + maintainers to spot these false negatives. +--- + .github/workflows/codespell.yml | 23 ++++++++++++++++++ + .github/workflows/pr-welcome-msg.yml | 2 +- + .github/workflows/reuse-copr-build.yml | 2 +- + Makefile | 2 +- + commands/command_utils.py | 2 +- + packaging/leapp-repository.spec | 8 +++---- + .../baculacheck/tests/test_baculacheck.py | 4 ++-- + .../biosdevname/libraries/biosdevname.py | 2 +- + .../biosdevname/tests/test_biosdevname.py | 2 +- + .../libraries/checkbootavailspace.py | 2 +- + .../tests/test_checkyumpluginsenabled.py | 2 +- + .../actors/cloud/checkhybridimage/actor.py | 2 +- + .../grubenvtofile/tests/test_grubenvtofile.py | 2 +- + .../dracut/85sys-upgrade-redhat/do-upgrade.sh | 4 ++-- + .../dracut/85sys-upgrade-redhat/mount_usr.sh | 2 +- + .../files/leapp_resume.service | 2 +- + .../tests/test_enablerhsmtargetrepos.py | 5 ++-- + .../tests/test_targetinitramfsgenerator.py | 4 ++-- + .../libraries/upgradeinitramfsgenerator.py | 2 +- + .../unit_test_upgradeinitramfsgenerator.py | 2 +- + .../tests/test_ipuworkflowconfig.py | 2 +- + .../libraries/checkinstalledkernels.py | 2 +- + .../libraries/readopensshconfig.py | 12 +++++----- + .../opensshpermitrootlogincheck/actor.py | 6 ++--- + .../actors/persistentnetnamesconfig/actor.py | 2 +- + .../libraries/persistentnetnamesconfig.py | 2 +- + .../libraries/peseventsscanner_repomap.py | 4 ++-- + .../tests/test_scanfilesfortargetuserspace.py | 4 ++-- + .../scansaphana/libraries/scansaphana.py | 2 +- + .../scansubscriptionmanagerinfo/actor.py | 2 +- + .../selinux/selinuxapplycustom/actor.py | 2 +- + .../libraries/selinuxcontentscanner.py | 2 +- + .../common/actors/setuptargetrepos/actor.py | 2 +- + .../libraries/setuptargetrepos_repomap.py | 4 ++-- + .../tests/test_repomapping.py | 8 +++---- + .../systemfacts/libraries/systemfacts.py | 4 ++-- + .../libraries/userspacegen.py | 8 +++---- + .../tests/unit_test_targetuserspacecreator.py | 2 +- + .../actors/unsupportedupgradecheck/actor.py | 2 +- + .../tests/test_updategrubcore.py | 2 +- + .../common/files/rhel_upgrade.py | 2 +- + .../common/libraries/config/__init__.py | 2 +- + .../common/libraries/dnfconfig.py | 2 +- + .../common/libraries/dnfplugin.py | 4 ++-- + .../system_upgrade/common/libraries/guards.py | 2 +- + .../common/libraries/mounting.py | 2 +- + .../common/libraries/overlaygen.py | 2 +- + .../common/libraries/tests/test_grub.py | 2 +- + .../common/libraries/tests/test_rhsm.py | 2 +- + .../system_upgrade/common/libraries/utils.py | 6 ++--- + repos/system_upgrade/common/models/cpuinfo.py | 2 +- + .../common/models/dnfplugintask.py | 2 +- + .../system_upgrade/common/models/initramfs.py | 4 ++-- + .../common/models/installeddesktopsfacts.py | 2 +- + repos/system_upgrade/common/models/module.py | 2 +- + .../common/models/opensshconfig.py | 2 +- + .../common/models/targetuserspace.py | 2 +- + .../common/workflows/inplace_upgrade.py | 4 ++-- + .../el7toel8/actors/bindupdate/actor.py | 2 +- + .../actors/bindupdate/libraries/updates.py | 2 +- + .../actors/checkremovedpammodules/actor.py | 4 ++-- + .../checksaphana/libraries/checksaphana.py | 2 +- + .../actors/cupscheck/libraries/cupscheck.py | 4 ++-- + .../grubdevname/tests/test_grubdevname.py | 2 +- + .../actors/multipathconfread/actor.py | 2 +- + .../actors/multipathconfupdate/actor.py | 2 +- + .../tests/test_postgresqlcheck.py | 4 ++-- + .../quaggatofrr/libraries/quaggatofrr.py | 2 +- + .../actors/quaggatofrr/tests/files/daemons | 2 +- + .../tests/test_unit_quaggatofrr.py | 2 +- + .../actors/satellite_upgrade_facts/actor.py | 2 +- + .../sctpconfigread/libraries/sctplib.py | 2 +- + .../libraries/spamassassinconfigcheck.py | 2 +- + .../test_library_spamassassinconfigcheck.py | 2 +- + .../actors/spamassassinconfigread/actor.py | 2 +- + .../libraries/spamassassinconfigread.py | 2 +- + .../test_lib_spamd_spamassassinconfigread.py | 24 +++++++++---------- + ...test_lib_spamd_spamassassinconfigupdate.py | 2 +- + .../libraries/tcpwrapperscheck.py | 6 ++--- + .../libraries/ziplcheckbootentries.py | 4 ++-- + .../actors/ziplconverttoblscfg/actor.py | 2 +- + .../el7toel8/libraries/isccfg.py | 6 ++--- + .../el7toel8/libraries/vsftpdutils.py | 4 ++-- + .../el7toel8/models/spamassassinfacts.py | 2 +- + .../actors/checkblsgrubcfgonppc64/actor.py | 2 +- + .../el8toel9/actors/checkvdo/actor.py | 2 +- + .../actors/checkvdo/libraries/checkvdo.py | 2 +- + .../actors/dotnet/tests/test_dotnet.py | 2 +- + .../mariadbcheck/tests/test_mariadbcheck.py | 4 ++-- + .../actors/multipathconfread/actor.py | 2 +- + .../actors/nischeck/libraries/nischeck.py | 2 +- + .../actors/nischeck/tests/test_nischeck.py | 6 ++--- + .../actors/nisscanner/libraries/nisscan.py | 2 +- + .../actors/nisscanner/tests/test_nisscan.py | 6 ++--- + .../libraries/opensshdropindirectory.py | 2 +- + .../opensshdropindirectorycheck/actor.py | 2 +- + .../libraries/add_provider.py | 2 +- + .../tests/test_postgresqlcheck.py | 4 ++-- + .../actors/pythonthreetmpworkaround/actor.py | 2 +- + .../targetuserspacecryptopolicies.py | 2 +- + .../actors/vdoconversionscanner/actor.py | 2 +- + .../el8toel9/models/opensslconfig.py | 2 +- + utils/ibdmp-decode | 2 +- + 103 files changed, 181 insertions(+), 157 deletions(-) + create mode 100644 .github/workflows/codespell.yml + +diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml +new file mode 100644 +index 00000000..e7532d98 +--- /dev/null ++++ b/.github/workflows/codespell.yml +@@ -0,0 +1,23 @@ ++name: Codespell ++ ++on: ++ push: ++ branches: ++ - master ++ pull_request: ++ branches: ++ - master ++ ++jobs: ++ codespell: ++ name: Check for spelling errors ++ runs-on: ubuntu-latest ++ ++ steps: ++ - uses: actions/checkout@v3 ++ - uses: codespell-project/actions-codespell@master ++ with: ++ ignore_words_list: ro,fo ++ skip: "./repos/system_upgrade/common/actors/storagescanner/tests/files/mounts,\ ++ ./repos/system_upgrade/el7toel8/actors/networkmanagerreadconfig/tests/files/nm_cfg_file_error,\ ++ ./repos/system_upgrade/common/actors/scancpu/tests/files/lscpu_s390x" +diff --git a/.github/workflows/pr-welcome-msg.yml b/.github/workflows/pr-welcome-msg.yml +index c4435578..7ae2fa4e 100644 +--- a/.github/workflows/pr-welcome-msg.yml ++++ b/.github/workflows/pr-welcome-msg.yml +@@ -19,7 +19,7 @@ jobs: + issue-number: ${{ github.event.pull_request.number }} + body: | + ## **Thank you for contributing to the Leapp project!** +- Please note that every PR needs to comply with the [Leapp Guidelines](https://leapp.readthedocs.io/en/latest/contributing.html#) and must pass all tests in order to be mergable. ++ Please note that every PR needs to comply with the [Leapp Guidelines](https://leapp.readthedocs.io/en/latest/contributing.html#) and must pass all tests in order to be mergeable. + If you want to request a review or rebuild a package in copr, you can use following commands as a comment: + - **review please** to notify leapp developers of review request + - **/packit copr-build** to submit a public copr build using packit +diff --git a/.github/workflows/reuse-copr-build.yml b/.github/workflows/reuse-copr-build.yml +index 08d78024..477d3f40 100644 +--- a/.github/workflows/reuse-copr-build.yml ++++ b/.github/workflows/reuse-copr-build.yml +@@ -35,7 +35,7 @@ jobs: + echo "::set-output name=pr_nr::${PR_URL##*/}" + + - name: Checkout +- # TODO: The correct way to checkout would be to use simmilar approach as in get_commit_by_timestamp function of ++ # TODO: The correct way to checkout would be to use similar approach as in get_commit_by_timestamp function of + # the github gluetool module (i.e. do not use HEAD but the last commit before comment). + id: checkout + uses: actions/checkout@v2 +diff --git a/Makefile b/Makefile +index 5650973c..e8d9f170 100644 +--- a/Makefile ++++ b/Makefile +@@ -60,7 +60,7 @@ endif + # someone will call copr_build without additional parameters + MASTER_BRANCH=master + +-# In case the PR or MR is defined or in case build is not comming from the ++# In case the PR or MR is defined or in case build is not coming from the + # MATER_BRANCH branch, N_REL=0; (so build is not update of the approved + # upstream solution). For upstream builds N_REL=100; + N_REL=`_NR=$${PR:+0}; if test "$${_NR:-100}" == "100"; then _NR=$${MR:+0}; fi; git rev-parse --abbrev-ref HEAD | grep -qE "^($(MASTER_BRANCH)|stable)$$" || _NR=0; echo $${_NR:-100}` +diff --git a/commands/command_utils.py b/commands/command_utils.py +index da62c50d..451324ee 100644 +--- a/commands/command_utils.py ++++ b/commands/command_utils.py +@@ -43,7 +43,7 @@ def get_major_version(version): + + def detect_sap_hana(): + """ +- Detect SAP HANA based on existance of /hana/shared/*/exe/linuxx86_64/hdb/sapcontrol ++ Detect SAP HANA based on existence of /hana/shared/*/exe/linuxx86_64/hdb/sapcontrol + """ + if os.path.exists(HANA_BASE_PATH): + for entry in os.listdir(HANA_BASE_PATH): +diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec +index 5411fbb2..c59f8acd 100644 +--- a/packaging/leapp-repository.spec ++++ b/packaging/leapp-repository.spec +@@ -70,7 +70,7 @@ Requires: python2-leapp + Obsoletes: leapp-repository-data <= 0.6.1 + Provides: leapp-repository-data <= 0.6.1 + +-# Former leapp subpackage that is part of the sos package since HEL 7.8 ++# Former leapp subpackage that is part of the sos package since RHEL 7.8 + Obsoletes: leapp-repository-sos-plugin <= 0.9.0 + + # Set the conflict to be sure this RPM is not upgraded automatically to +@@ -90,7 +90,7 @@ Conflicts: leapp-upgrade-el7toel8 + + %endif + +-# IMPORTANT: everytime the requirements are changed, increment number by one ++# IMPORTANT: every time the requirements are changed, increment number by one + # - same for Provides in deps subpackage + Requires: leapp-repository-dependencies = %{leapp_repo_deps} + +@@ -124,7 +124,7 @@ Leapp repositories for the in-place upgrade to the next major version + of the Red Hat Enterprise Linux system. + + +-# This metapackage should contain all RPM dependencies exluding deps on *leapp* ++# This metapackage should contain all RPM dependencies excluding deps on *leapp* + # RPMs. This metapackage will be automatically replaced during the upgrade + # to satisfy dependencies with RPMs from target system. + %package -n %{lpr_name}-deps +@@ -133,7 +133,7 @@ Summary: Meta-package with system dependencies of %{lpr_name} package + # The package has been renamed, so let's obsoletes the old one + Obsoletes: leapp-repository-deps < 0.14.0-%{release} + +-# IMPORTANT: everytime the requirements are changed, increment number by one ++# IMPORTANT: every time the requirements are changed, increment number by one + # - same for Requires in main package + Provides: leapp-repository-dependencies = %{leapp_repo_deps} + ################################################## +diff --git a/repos/system_upgrade/common/actors/baculacheck/tests/test_baculacheck.py b/repos/system_upgrade/common/actors/baculacheck/tests/test_baculacheck.py +index ec4e7f81..6f81d5f7 100644 +--- a/repos/system_upgrade/common/actors/baculacheck/tests/test_baculacheck.py ++++ b/repos/system_upgrade/common/actors/baculacheck/tests/test_baculacheck.py +@@ -35,7 +35,7 @@ def test_actor_execution(monkeypatch, has_director): + Parametrized helper function for test_actor_* functions. + + First generate list of RPM models based on set arguments. Then, run +- the actor feeded with our RPM list. Finally, assert Reports ++ the actor fed with our RPM list. Finally, assert Reports + according to set arguments. + + Parameters: +@@ -54,7 +54,7 @@ def test_actor_execution(monkeypatch, has_director): + monkeypatch.setattr(api, 'current_actor', curr_actor_mocked) + monkeypatch.setattr(reporting, "create_report", create_report_mocked()) + +- # Executed actor feeded with out fake RPMs ++ # Executed actor fed with out fake RPMs + report_installed_packages(_context=api) + + if has_director: +diff --git a/repos/system_upgrade/common/actors/biosdevname/libraries/biosdevname.py b/repos/system_upgrade/common/actors/biosdevname/libraries/biosdevname.py +index 5d44c58a..a6b4a242 100644 +--- a/repos/system_upgrade/common/actors/biosdevname/libraries/biosdevname.py ++++ b/repos/system_upgrade/common/actors/biosdevname/libraries/biosdevname.py +@@ -38,7 +38,7 @@ def all_interfaces_biosdevname(interfaces): + + def enable_biosdevname(): + api.current_logger().info( +- "Biosdevname naming scheme in use, explicitely enabling biosdevname on the target RHEL system" ++ "Biosdevname naming scheme in use, explicitly enabling biosdevname on the target RHEL system" + ) + api.produce(KernelCmdlineArg(**{'key': 'biosdevname', 'value': '1'})) + +diff --git a/repos/system_upgrade/common/actors/biosdevname/tests/test_biosdevname.py b/repos/system_upgrade/common/actors/biosdevname/tests/test_biosdevname.py +index 05a38ac6..c60aa7a4 100644 +--- a/repos/system_upgrade/common/actors/biosdevname/tests/test_biosdevname.py ++++ b/repos/system_upgrade/common/actors/biosdevname/tests/test_biosdevname.py +@@ -112,7 +112,7 @@ def test_enable_biosdevname(monkeypatch): + + biosdevname.enable_biosdevname() + assert ( +- "Biosdevname naming scheme in use, explicitely enabling biosdevname on the target RHEL system" ++ "Biosdevname naming scheme in use, explicitly enabling biosdevname on the target RHEL system" + in api.current_logger.infomsg + ) + assert result[0].key == "biosdevname" +diff --git a/repos/system_upgrade/common/actors/checkbootavailspace/libraries/checkbootavailspace.py b/repos/system_upgrade/common/actors/checkbootavailspace/libraries/checkbootavailspace.py +index 9e174484..7380f335 100644 +--- a/repos/system_upgrade/common/actors/checkbootavailspace/libraries/checkbootavailspace.py ++++ b/repos/system_upgrade/common/actors/checkbootavailspace/libraries/checkbootavailspace.py +@@ -29,7 +29,7 @@ def inhibit_upgrade(avail_bytes): + reporting.create_report([ + reporting.Title('Not enough space on /boot'), + reporting.Summary( +- '/boot needs additional {0} MiB to be able to accomodate the upgrade initramfs and new kernel.'.format( ++ '/boot needs additional {0} MiB to be able to accommodate the upgrade initramfs and new kernel.'.format( + additional_mib_needed) + ), + reporting.Severity(reporting.Severity.HIGH), +diff --git a/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py b/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py +index fa4462bd..896d31d5 100644 +--- a/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py ++++ b/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py +@@ -47,7 +47,7 @@ def test_report_when_missing_required_plugins(monkeypatch): + + check_required_yum_plugins_enabled(yum_config) + +- assert actor_reports.called, 'Report wasn\'t created when required a plugin is missing.' ++ assert actor_reports.called, "Report wasn't created when required a plugin is missing." + + fail_description = 'The missing required plugin is not mentioned in the report.' + assert 'subscription-manager' in actor_reports.report_fields['summary'], fail_description +diff --git a/repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py b/repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py +index 54a2d331..47f5fdd8 100644 +--- a/repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py ++++ b/repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py +@@ -9,7 +9,7 @@ class CheckHybridImage(Actor): + Check if the system is using Azure hybrid image. + + These images have a default relative symlink to EFI +- partion even when booted using BIOS and in such cases ++ partition even when booted using BIOS and in such cases + GRUB is not able find "grubenv" to get the kernel cmdline + options and fails to boot after upgrade`. + """ +diff --git a/repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py b/repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py +index a8710691..807f5efa 100644 +--- a/repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py ++++ b/repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py +@@ -8,7 +8,7 @@ from leapp.models import HybridImage + + def raise_call_error(args=None): + raise CalledProcessError( +- message='A Leapp Command Error occured.', ++ message='A Leapp Command Error occurred.', + command=args, + result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} + ) +diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh +index 17d67315..1f39a6b2 100755 +--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh ++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh +@@ -47,7 +47,7 @@ export NSPAWN_OPTS="$NSPAWN_OPTS --keep-unit --register=no --timezone=off --reso + + + # +-# Temp for collecting and preparing tarbal ++# Temp for collecting and preparing tarball + # + LEAPP_DEBUG_TMP="/tmp/leapp-debug-root" + +@@ -135,7 +135,7 @@ ibdmp() { + # 3. decorate each chunk with prefix `N:` where + # N is number of given chunk. + # +- # 4. Finally print all lines (pre-pended "header" ++ # 4. Finally print all lines (prepended "header" + # line and all chunks) several times, where + # every iteration should be prefixed by + # `_ibdmp:I/TTL|` and suffixed by `|`. +diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh +index 04ded4a3..3c52652f 100755 +--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh ++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh +@@ -75,7 +75,7 @@ mount_usr() + } + + if [ -f "${NEWROOT}/etc/fstab" ]; then +- # Incase we have the LVM command available try make it activate all partitions ++ # In case we have the LVM command available try make it activate all partitions + if command -v lvm 2>/dev/null 1>/dev/null; then + lvm vgchange -a y + fi +diff --git a/repos/system_upgrade/common/actors/createresumeservice/files/leapp_resume.service b/repos/system_upgrade/common/actors/createresumeservice/files/leapp_resume.service +index 79cfa0be..39ac6112 100644 +--- a/repos/system_upgrade/common/actors/createresumeservice/files/leapp_resume.service ++++ b/repos/system_upgrade/common/actors/createresumeservice/files/leapp_resume.service +@@ -8,7 +8,7 @@ Wants=network-online.target + + [Service] + Type=oneshot +-# FIXME: this is temporary workround for Python3 ++# FIXME: this is temporary workaround for Python3 + ExecStart=/root/tmp_leapp_py3/leapp3 upgrade --resume + StandardOutput=journal+console + # FIXME: this shouldn't be needed, but Satellite upgrade runs installer, and that's slow +diff --git a/repos/system_upgrade/common/actors/enablerhsmtargetrepos/tests/test_enablerhsmtargetrepos.py b/repos/system_upgrade/common/actors/enablerhsmtargetrepos/tests/test_enablerhsmtargetrepos.py +index bccbbd80..12d53d26 100644 +--- a/repos/system_upgrade/common/actors/enablerhsmtargetrepos/tests/test_enablerhsmtargetrepos.py ++++ b/repos/system_upgrade/common/actors/enablerhsmtargetrepos/tests/test_enablerhsmtargetrepos.py +@@ -27,7 +27,7 @@ def not_isolated_actions(raise_err=False): + + def raise_call_error(args=None): + raise CalledProcessError( +- message='A Leapp Command Error occured.', ++ message='A Leapp Command Error occurred.', + command=args, + result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} + ) +@@ -61,7 +61,8 @@ def test_setrelease_submgr_throwing_error(monkeypatch): + monkeypatch.setattr(mounting, 'NotIsolatedActions', klass) + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(dst_ver='8.0', envars={'LEAPP_NO_RHSM': '0'})) + monkeypatch.setattr(config, 'get_product_type', lambda dummy: 'ga') +- # free the set_release funtion from the @_rhsm_retry decorator which would otherwise cause 25 sec delay of the test ++ # free the set_release function from the @_rhsm_retry decorator ++ # which would otherwise cause 25 sec delay of the test + if sys.version_info.major < 3: + monkeypatch.setattr(rhsm, 'set_release', + rhsm.set_release.func_closure[0].cell_contents.func_closure[0].cell_contents) +diff --git a/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py +index f5930b9b..8403a431 100644 +--- a/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py ++++ b/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py +@@ -13,7 +13,7 @@ from leapp.models import ( # isort:skip + TargetInitramfsTasks + ) + +-FILES = ['/file1', '/file2', '/dir/ect/ory/file3', '/file4', '/file5'] ++FILES = ['/file1', '/file2', '/dir/subdir/subsubdir/file3', '/file4', '/file5'] + MODULES = [ + ('moduleA', None), + ('moduleB', None), +@@ -26,7 +26,7 @@ NO_INCLUDE_MSG = 'No additional files or modules required to add into the target + + def raise_call_error(args=None): + raise CalledProcessError( +- message='A Leapp Command Error occured.', ++ message='A Leapp Command Error occurred.', + command=args, + result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'}) + +diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py +index 8e59d5f3..991ace0e 100644 +--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py ++++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py +@@ -78,7 +78,7 @@ def _install_initram_deps(packages): + used_repos=used_repos) + + +-# duplicate of _copy_files fro userspacegen.py ++# duplicate of _copy_files from userspacegen.py + def _copy_files(context, files): + """ + Copy the files/dirs from the host to the `context` userspace +diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py +index 2b401e52..13939df1 100644 +--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py ++++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py +@@ -261,7 +261,7 @@ def test_generate_initram_disk(monkeypatch, input_msgs, modules): + + # TODO(pstodulk): this test is not created properly, as context.call check + # is skipped completely. Testing will more convenient with fixed #376 +- # similar fo the files... ++ # similar to the files... + + + def test_copy_dracut_modules_rmtree_ignore(monkeypatch): +diff --git a/repos/system_upgrade/common/actors/ipuworkflowconfig/tests/test_ipuworkflowconfig.py b/repos/system_upgrade/common/actors/ipuworkflowconfig/tests/test_ipuworkflowconfig.py +index 12e9bb45..a5e4d03b 100644 +--- a/repos/system_upgrade/common/actors/ipuworkflowconfig/tests/test_ipuworkflowconfig.py ++++ b/repos/system_upgrade/common/actors/ipuworkflowconfig/tests/test_ipuworkflowconfig.py +@@ -24,7 +24,7 @@ def _clean_leapp_envs(monkeypatch): + + def _raise_call_error(*args): + raise CalledProcessError( +- message='A Leapp Command Error occured.', ++ message='A Leapp Command Error occurred.', + command=args, + result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} + ) +diff --git a/repos/system_upgrade/common/actors/kernel/checkinstalledkernels/libraries/checkinstalledkernels.py b/repos/system_upgrade/common/actors/kernel/checkinstalledkernels/libraries/checkinstalledkernels.py +index 7d6de89d..7dc5b2f2 100644 +--- a/repos/system_upgrade/common/actors/kernel/checkinstalledkernels/libraries/checkinstalledkernels.py ++++ b/repos/system_upgrade/common/actors/kernel/checkinstalledkernels/libraries/checkinstalledkernels.py +@@ -113,7 +113,7 @@ def process(): + raise StopActorExecutionError('Cannot find any installed kernel signed by Red Hat.') + + if len(pkgs) > 1 and architecture.matches_architecture(architecture.ARCH_S390X): +- # It's temporary solution, so no need to try automatize everything. ++ # It's temporary solution, so no need to try automate everything. + title = 'Multiple kernels installed' + summary = ('The upgrade process does not handle well the case when multiple kernels' + ' are installed on s390x. There is a severe risk of the bootloader configuration' +diff --git a/repos/system_upgrade/common/actors/opensshconfigscanner/libraries/readopensshconfig.py b/repos/system_upgrade/common/actors/opensshconfigscanner/libraries/readopensshconfig.py +index ba786025..e6cb9fcc 100644 +--- a/repos/system_upgrade/common/actors/opensshconfigscanner/libraries/readopensshconfig.py ++++ b/repos/system_upgrade/common/actors/opensshconfigscanner/libraries/readopensshconfig.py +@@ -43,33 +43,33 @@ def parse_config(config): + ret.permit_root_login.append(v) + + elif el[0].lower() == 'useprivilegeseparation': +- # Record only first occurence, which is effective ++ # Record only first occurrence, which is effective + if not ret.use_privilege_separation: + ret.use_privilege_separation = value + + elif el[0].lower() == 'protocol': +- # Record only first occurence, which is effective ++ # Record only first occurrence, which is effective + if not ret.protocol: + ret.protocol = value + + elif el[0].lower() == 'ciphers': +- # Record only first occurence, which is effective ++ # Record only first occurrence, which is effective + if not ret.ciphers: + ret.ciphers = value + + elif el[0].lower() == 'macs': +- # Record only first occurence, which is effective ++ # Record only first occurrence, which is effective + if not ret.macs: + ret.macs = value + + elif el[0].lower() == 'subsystem': +- # Record only first occurence, which is effective ++ # Record only first occurrence, which is effective + if el[1].lower() == 'sftp' and len(el) > 2 and not ret.subsystem_sftp: + # here we need to record all remaining items as command and arguments + ret.subsystem_sftp = ' '.join(el[2:]) + + elif el[0].lower() in DEPRECATED_DIRECTIVES: +- # Filter out duplicit occurences of the same deprecated directive ++ # Filter out duplicit occurrences of the same deprecated directive + if el[0].lower() not in ret.deprecated_directives: + # Use the directive in the form as found in config for user convenience + ret.deprecated_directives.append(el[0]) +diff --git a/repos/system_upgrade/common/actors/opensshpermitrootlogincheck/actor.py b/repos/system_upgrade/common/actors/opensshpermitrootlogincheck/actor.py +index 52553aaf..2ac4ec8f 100644 +--- a/repos/system_upgrade/common/actors/opensshpermitrootlogincheck/actor.py ++++ b/repos/system_upgrade/common/actors/opensshpermitrootlogincheck/actor.py +@@ -97,14 +97,14 @@ class OpenSshPermitRootLoginCheck(Actor): + 'OpenSSH is configured to deny root logins in match ' + 'blocks, but not explicitly enabled in global or ' + '"Match all" context. This update changes the ' +- 'default to disable root logins using paswords ' +- 'so your server migth get inaccessible.' ++ 'default to disable root logins using passwords ' ++ 'so your server might get inaccessible.' + ), + reporting.Severity(reporting.Severity.HIGH), + reporting.Groups(COMMON_REPORT_TAGS), + reporting.Remediation( + hint='Consider using different user for administrative ' +- 'logins or make sure your configration file ' ++ 'logins or make sure your configuration file ' + 'contains the line "PermitRootLogin yes" ' + 'in global context if desired.' + ), +diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/actor.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/actor.py +index 31cad35c..2689d837 100644 +--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/actor.py ++++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/actor.py +@@ -17,7 +17,7 @@ class PersistentNetNamesConfig(Actor): + Generate udev persistent network naming configuration + + This actor generates systemd-udevd link files for each physical ethernet interface present on RHEL-7 +- in case we notice that interace name differs on RHEL-8. Link file configuration will assign RHEL-7 version of ++ in case we notice that interface name differs on RHEL-8. Link file configuration will assign RHEL-7 version of + a name. Actors produces list of interfaces which changed name between RHEL-7 and RHEL-8. + """ + +diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py +index 6b3d6619..dc5196ea 100644 +--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py ++++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py +@@ -100,7 +100,7 @@ def process(): + + api.produce(RenamedInterfaces(renamed=renamed_interfaces)) + api.produce(InitrdIncludes(files=initrd_files)) +- # TODO: cover actor by tests in future. I am skipping writting of tests ++ # TODO: cover actor by tests in future. I am skipping writing of tests + # now as some refactoring and bugfixing related to this actor + # is planned already. + api.produce(TargetInitramfsTasks(include_files=initrd_files)) +diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/peseventsscanner_repomap.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/peseventsscanner_repomap.py +index 567e8475..95147782 100644 +--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/peseventsscanner_repomap.py ++++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/peseventsscanner_repomap.py +@@ -155,7 +155,7 @@ class RepoMapDataHandler(object): + def get_source_pesid_repos(self, pesid): + """ + Return the list of PESIDRepositoryEntry objects for a specified PES ID +- mathing the source OS major version. ++ matching the source OS major version. + + :param pesid: The PES ID for which to retrieve PESIDRepositoryEntries. + :type pesid: str +@@ -168,7 +168,7 @@ class RepoMapDataHandler(object): + def get_target_pesid_repos(self, pesid): + """ + Return the list of PESIDRepositoryEntry objects for a specified PES ID +- mathing the target OS major version. ++ matching the target OS major version. + + :param pesid: The PES ID for which to retrieve PESIDRepositoryEntries. + :type pesid: str +diff --git a/repos/system_upgrade/common/actors/scanfilesfortargetuserspace/tests/test_scanfilesfortargetuserspace.py b/repos/system_upgrade/common/actors/scanfilesfortargetuserspace/tests/test_scanfilesfortargetuserspace.py +index dce0f534..afe1a443 100644 +--- a/repos/system_upgrade/common/actors/scanfilesfortargetuserspace/tests/test_scanfilesfortargetuserspace.py ++++ b/repos/system_upgrade/common/actors/scanfilesfortargetuserspace/tests/test_scanfilesfortargetuserspace.py +@@ -58,7 +58,7 @@ def test_etc_hosts_present(monkeypatch, isfile_default_config): + + preupgrade_task_msg = actor_produces.model_instances[0] + +- fail_msg = 'Didn\'t indentify any files to copy into target userspace (at least /etc/hosts shoud be).' ++ fail_msg = 'Didn\'t identify any files to copy into target userspace (at least /etc/hosts should be).' + assert preupgrade_task_msg.copy_files, fail_msg + + should_copy_hostsfile = do_files_to_copy_contain_entry_with_src(preupgrade_task_msg.copy_files, '/etc/hosts') +@@ -70,7 +70,7 @@ def test_etc_hosts_present(monkeypatch, isfile_default_config): + + def test_etc_hosts_missing(monkeypatch, isfile_default_config): + """Tests whether /etc/hosts is not identified as "to be copied" into target userspace when it is missing.""" +- isfile_default_config['/etc/hosts'] = False # The file is not present or is a directory (-> shoud not be copied) ++ isfile_default_config['/etc/hosts'] = False # The file is not present or is a directory (-> should not be copied) + mocked_isfile = make_mocked_isfile(isfile_default_config) + actor_produces = produce_mocked() + +diff --git a/repos/system_upgrade/common/actors/scansaphana/libraries/scansaphana.py b/repos/system_upgrade/common/actors/scansaphana/libraries/scansaphana.py +index 1eec7a66..04195b57 100644 +--- a/repos/system_upgrade/common/actors/scansaphana/libraries/scansaphana.py ++++ b/repos/system_upgrade/common/actors/scansaphana/libraries/scansaphana.py +@@ -113,7 +113,7 @@ def get_instance_status(instance_number, sapcontrol_path, admin_name): + # GetProcessList has some oddities, like returning non zero exit codes with special meanings. + # Exit code 3 = All processes are running correctly + # Exit code 4 = All processes stopped +- # Other exit codes aren't handled at this time and it's assumed that SAP HANA is possibly in some unusal ++ # Other exit codes aren't handled at this time and it's assumed that SAP HANA is possibly in some unusual + # state. Such as starting/stopping but also that it is in some kind of failure state. + output = run([ + 'sudo', '-u', admin_name, sapcontrol_path, '-nr', instance_number, '-function', 'GetProcessList'], +diff --git a/repos/system_upgrade/common/actors/scansubscriptionmanagerinfo/actor.py b/repos/system_upgrade/common/actors/scansubscriptionmanagerinfo/actor.py +index eb190085..50a7cd0f 100644 +--- a/repos/system_upgrade/common/actors/scansubscriptionmanagerinfo/actor.py ++++ b/repos/system_upgrade/common/actors/scansubscriptionmanagerinfo/actor.py +@@ -9,7 +9,7 @@ class ScanSubscriptionManagerInfo(Actor): + Scans the current system for subscription manager information + + Retrieves information about enabled and available repositories, attached SKUs, product certificates and release +- from the current system without modfying it. ++ from the current system without modifying it. + """ + + name = 'scan_subscription_manager_info' +diff --git a/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/actor.py b/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/actor.py +index 17e8cb58..52502e96 100644 +--- a/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/actor.py ++++ b/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/actor.py +@@ -17,7 +17,7 @@ class SELinuxApplyCustom(Actor): + Re-apply SELinux customizations from the original RHEL installation + + Re-apply SELinux policy customizations (custom policy modules and changes +- introduced by semanage). Any changes (due to incompatiblity with ++ introduced by semanage). Any changes (due to incompatibility with + SELinux policy in the upgraded system) are reported to user. + """ + name = 'selinuxapplycustom' +diff --git a/repos/system_upgrade/common/actors/selinux/selinuxcontentscanner/libraries/selinuxcontentscanner.py b/repos/system_upgrade/common/actors/selinux/selinuxcontentscanner/libraries/selinuxcontentscanner.py +index ee162091..8f5e31ab 100644 +--- a/repos/system_upgrade/common/actors/selinux/selinuxcontentscanner/libraries/selinuxcontentscanner.py ++++ b/repos/system_upgrade/common/actors/selinux/selinuxcontentscanner/libraries/selinuxcontentscanner.py +@@ -125,7 +125,7 @@ def get_selinux_modules(): + return ([], [], []) + + for (name, priority) in modules: +- # Udica templates should not be transfered, we only need a list of their ++ # Udica templates should not be transferred, we only need a list of their + # names and priorities so that we can reinstall their latest verisions + if name in UDICA_TEMPLATES: + template_list.append( +diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py +index 00de0739..47724f0d 100644 +--- a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py ++++ b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py +@@ -19,7 +19,7 @@ class SetupTargetRepos(Actor): + Produces list of repositories that should be available to be used by Upgrade process. + + Based on current set of Red Hat Enterprise Linux repositories, produces the list of target +- repositories. Additionaly process request to use custom repositories during the upgrade ++ repositories. Additionally process request to use custom repositories during the upgrade + transaction. + """ + +diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos_repomap.py b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos_repomap.py +index 567e8475..95147782 100644 +--- a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos_repomap.py ++++ b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos_repomap.py +@@ -155,7 +155,7 @@ class RepoMapDataHandler(object): + def get_source_pesid_repos(self, pesid): + """ + Return the list of PESIDRepositoryEntry objects for a specified PES ID +- mathing the source OS major version. ++ matching the source OS major version. + + :param pesid: The PES ID for which to retrieve PESIDRepositoryEntries. + :type pesid: str +@@ -168,7 +168,7 @@ class RepoMapDataHandler(object): + def get_target_pesid_repos(self, pesid): + """ + Return the list of PESIDRepositoryEntry objects for a specified PES ID +- mathing the target OS major version. ++ matching the target OS major version. + + :param pesid: The PES ID for which to retrieve PESIDRepositoryEntries. + :type pesid: str +diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_repomapping.py b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_repomapping.py +index 19c41e19..4bda9397 100644 +--- a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_repomapping.py ++++ b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_repomapping.py +@@ -185,7 +185,7 @@ def test_get_target_pesid_repos(monkeypatch, repomap_data_for_pesid_repo_retriev + assert actual_pesid_repo in expected_pesid_repos, fail_description + + fail_description = ( +- 'The get_target_pesid_repos method doesn\'t take into account the taget system version correctly.' ++ 'The get_target_pesid_repos method doesn\'t take into account the target system version correctly.' + ) + monkeypatch.setattr(api, 'current_actor', + CurrentActorMocked(arch='x86_64', src_ver='9.4', dst_ver='10.0')) +@@ -244,7 +244,7 @@ def test_find_repository_target_equivalent_fallback_to_default(monkeypatch, + """ + Test for the RepoMapDataHandler._find_repository_target_equivalent method. + +- Verifies that the method will find a target equivalent with matchin some of the fallback ++ Verifies that the method will find a target equivalent with matching some of the fallback + channels if a target equivalent that matches the source pesid repository completely is not + available in the repository mapping data. + """ +@@ -591,7 +591,7 @@ def test_find_repository_equivalent_with_priority_channel(monkeypatch): + + assert handler.prio_channel == 'eus' + +- fail_description = '_find_repository_target_equivalent does not correcly respect preferred channel.' ++ fail_description = '_find_repository_target_equivalent does not correctly respect preferred channel.' + expected_target_equivalent = repositories_mapping.repositories[2] + actual_target_equivalent = handler._find_repository_target_equivalent(repositories_mapping.repositories[0], + 'pesid2') +@@ -624,7 +624,7 @@ def test_get_expected_target_pesid_repos_with_priority_channel_set(monkeypatch): + handler.set_default_channels(['tuv', 'ga']) + target_repoids = handler.get_expected_target_pesid_repos(['pesid1-repoid-ga']) + +- fail_description = 'get_expected_target_peid_repos does not correcly respect preferred channel.' ++ fail_description = 'get_expected_target_peid_repos does not correctly respect preferred channel.' + assert {'pesid2': repositories_mapping.repositories[2], + 'pesid3': repositories_mapping.repositories[4]} == target_repoids, fail_description + +diff --git a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py +index 26e654ae..e34cb86b 100644 +--- a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py ++++ b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py +@@ -113,7 +113,7 @@ def _get_active_kernel_modules(logger): + + signature_string = None + if signature: +- # Remove whitspace from the signature string ++ # Remove whitespace from the signature string + signature_string = re.sub(r"\s+", "", signature, flags=re.UNICODE) + + # Since we're using the `/sys` VFS we need to use `os.listdir()` to get +@@ -274,7 +274,7 @@ def _default_grub_info(): + reporting.Title('File "{}" does not exist!'.format(default_grb_fpath)), + reporting.Summary( + 'Leapp detected "{}" does not exist. The file is essential for the in-place upgrade ' +- 'to finish successfully. This scenario might have occured if the system was already ' ++ 'to finish successfully. This scenario might have occurred if the system was already ' + 'upgraded from RHEL 6. Please re-create the file manually.'.format(default_grb_fpath) + ), + reporting.Severity(reporting.Severity.HIGH), +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index ee1aa8fb..c39af66f 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -30,7 +30,7 @@ from leapp.utils.deprecation import suppress_deprecation + # # (0.) consume process input data + # # 1. prepare the first container, to be able to obtain repositories for the + # # target system (this is extra neededwhen rhsm is used, but not reason to +-# # do such thing only when rhsm is used. Be persistant here ++# # do such thing only when rhsm is used. Be persistent here + # # 2. gather target repositories that should AND can be used + # # - basically here is the main thing that is PITA; I started + # # the refactoring but realized that it needs much more changes because +@@ -60,7 +60,7 @@ def _check_deprecated_rhsm_skip(): + if get_env('LEAPP_DEVEL_SKIP_RHSM', '0') == '1': + api.current_logger().warning( + 'The LEAPP_DEVEL_SKIP_RHSM has been deprecated. Use' +- ' LEAPP_NO_RHSM istead or use the --no-rhsm option for' ++ ' LEAPP_NO_RHSM instead or use the --no-rhsm option for' + ' leapp. as well custom repofile has not been defined.' + ' Please read documentation about new "skip rhsm" solution.' + ) +@@ -510,7 +510,7 @@ def gather_target_repositories(context, indata): + else: + # TODO: We shall report that the RHEL repos that we deem necessary for + # the upgrade are not available; but currently it would just print bunch of +- # data everytime as we maps EUS and other repositories as well. But these ++ # data every time as we maps EUS and other repositories as well. But these + # do not have to be necessary available on the target system in the time + # of the upgrade. Let's skip it for now until it's clear how we will deal + # with it. +@@ -582,7 +582,7 @@ def _install_custom_repofiles(context, custom_repofiles): + """ + Install the required custom repository files into the container. + +- The repostory files are copied from the host into the /etc/yum.repos.d ++ The repository files are copied from the host into the /etc/yum.repos.d + directory into the container. + + :param context: the container where the repofiles should be copied +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +index 425f3062..276175a1 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +@@ -373,5 +373,5 @@ def test_perform_ok(monkeypatch): + assert userspacegen.api.produce.called == 3 + assert isinstance(userspacegen.api.produce.model_instances[0], models.TMPTargetRepositoriesFacts) + assert userspacegen.api.produce.model_instances[1] == msg_target_repos +- # this one is full of contants, so it's safe to check just the instance ++ # this one is full of constants, so it's safe to check just the instance + assert isinstance(userspacegen.api.produce.model_instances[2], models.TargetUserSpaceInfo) +diff --git a/repos/system_upgrade/common/actors/unsupportedupgradecheck/actor.py b/repos/system_upgrade/common/actors/unsupportedupgradecheck/actor.py +index fd1b1ef6..e8b3499a 100644 +--- a/repos/system_upgrade/common/actors/unsupportedupgradecheck/actor.py ++++ b/repos/system_upgrade/common/actors/unsupportedupgradecheck/actor.py +@@ -6,7 +6,7 @@ from leapp.tags import ChecksPhaseTag, IPUWorkflowTag + + class UnsupportedUpgradeCheck(Actor): + """ +- Checks enviroment variables and produces a warning report if the upgrade is unsupported. ++ Checks environment variables and produces a warning report if the upgrade is unsupported. + + Upgrade is unsupported if any LEAPP_DEVEL_* variable is used or an experimental actor is enabled. + This can be overridden by setting the variable LEAPP_UNSUPPORTED (at user's own risk). +diff --git a/repos/system_upgrade/common/actors/updategrubcore/tests/test_updategrubcore.py b/repos/system_upgrade/common/actors/updategrubcore/tests/test_updategrubcore.py +index afeff4a4..c72a5f4a 100644 +--- a/repos/system_upgrade/common/actors/updategrubcore/tests/test_updategrubcore.py ++++ b/repos/system_upgrade/common/actors/updategrubcore/tests/test_updategrubcore.py +@@ -14,7 +14,7 @@ UPDATE_FAILED_TITLE = 'GRUB core update failed' + + def raise_call_error(args=None): + raise CalledProcessError( +- message='A Leapp Command Error occured.', ++ message='A Leapp Command Error occurred.', + command=args, + result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} + ) +diff --git a/repos/system_upgrade/common/files/rhel_upgrade.py b/repos/system_upgrade/common/files/rhel_upgrade.py +index 6d6ad752..62a92989 100644 +--- a/repos/system_upgrade/common/files/rhel_upgrade.py ++++ b/repos/system_upgrade/common/files/rhel_upgrade.py +@@ -143,7 +143,7 @@ class RhelUpgradeCommand(dnf.cli.Command): + # folder in "/var/cache/dnf" as it has different digest calculated based on already substituted + # placeholder. + # E.g +- # "https://rhui3.REGION.aws.ce.redhat.com" becames "https://rhui3.eu-central-1.aws.ce.redhat.com" ++ # "https://rhui3.REGION.aws.ce.redhat.com" becomes "https://rhui3.eu-central-1.aws.ce.redhat.com" + # + # region should be same for all repos so we are fine to collect it from + # the last one +diff --git a/repos/system_upgrade/common/libraries/config/__init__.py b/repos/system_upgrade/common/libraries/config/__init__.py +index 8835a568..5c8f2efb 100644 +--- a/repos/system_upgrade/common/libraries/config/__init__.py ++++ b/repos/system_upgrade/common/libraries/config/__init__.py +@@ -63,7 +63,7 @@ def get_target_product_channel(default='ga'): + - Using the environment variable LEAPP_DEVEL_TARGET_PRODUCT_TYPE (devel variable with higher priority than + any other way of specifying target channel). + - Using the environment variable LEAPP_TARGET_PRODUCT_CHANNEL +- - Using the '--channel' option when runnning leapp preupgrade/upgrade ++ - Using the '--channel' option when running leapp preupgrade/upgrade + + :param default: Value to be returned if no target product type has been specified when running leapp. + :type default: str +diff --git a/repos/system_upgrade/common/libraries/dnfconfig.py b/repos/system_upgrade/common/libraries/dnfconfig.py +index 49bf8009..64d6c204 100644 +--- a/repos/system_upgrade/common/libraries/dnfconfig.py ++++ b/repos/system_upgrade/common/libraries/dnfconfig.py +@@ -12,7 +12,7 @@ def get_leapp_packages(): + installed. + + The snactor RPM doesn't have to be installed, but if so, we have to take +- care about that too as well to preven broken dnf transaction. ++ care about that too as well to prevent broken dnf transaction. + """ + # TODO: should we set the seatbelt and exclude leapp RPMs from the target + # system too? +diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py +index 4010e9f3..56b703d5 100644 +--- a/repos/system_upgrade/common/libraries/dnfplugin.py ++++ b/repos/system_upgrade/common/libraries/dnfplugin.py +@@ -244,7 +244,7 @@ def apply_workarounds(context=None): + context.call(['/bin/bash', '-c', workaround.script_path]) + except (OSError, CalledProcessError) as e: + raise StopActorExecutionError( +- message=('Failed to exceute script to apply transaction workaround {display_name}.' ++ message=('Failed to execute script to apply transaction workaround {display_name}.' + ' Message: {error}'.format(error=str(e), display_name=workaround.display_name)) + ) + +@@ -336,7 +336,7 @@ def perform_transaction_install(target_userspace_info, storage_info, used_repos, + ) + + # we have to ensure the leapp packages will stay untouched even after the +- # upgrade is fully finished (it cannot be done before the upgarde ++ # upgrade is fully finished (it cannot be done before the upgrade + # on the host as the config-manager plugin is available since rhel-8) + dnfconfig.exclude_leapp_rpms(mounting.NotIsolatedActions(base_dir='/')) + +diff --git a/repos/system_upgrade/common/libraries/guards.py b/repos/system_upgrade/common/libraries/guards.py +index 763483aa..c8001817 100644 +--- a/repos/system_upgrade/common/libraries/guards.py ++++ b/repos/system_upgrade/common/libraries/guards.py +@@ -38,7 +38,7 @@ def connection_guard(url='https://example.com'): + return None + except URLError as e: + cause = '''Failed to open url '{url}' with error: {error}'''.format(url=url, error=e) +- return ('There was probably a problem with internet conection ({cause}).' ++ return ('There was probably a problem with internet connection ({cause}).' + ' Check your connection and try again.'.format(cause=cause)) + return closure + +diff --git a/repos/system_upgrade/common/libraries/mounting.py b/repos/system_upgrade/common/libraries/mounting.py +index d12344c2..f272d8c7 100644 +--- a/repos/system_upgrade/common/libraries/mounting.py ++++ b/repos/system_upgrade/common/libraries/mounting.py +@@ -10,7 +10,7 @@ from leapp.libraries.stdlib import api, CalledProcessError, run + + # Using ALWAYS_BIND will crash the upgrade process if the file does not exist. + # Consider instead adding an entry to the ScanFilesToCopyIntoTargetSystem actor that +-# conditionaly (only if it exists) creates CopyFile message to the TargetUserspaceCreator. ++# conditionally (only if it exists) creates CopyFile message to the TargetUserspaceCreator. + ALWAYS_BIND = [] + + ErrorData = namedtuple('ErrorData', ['summary', 'details']) +diff --git a/repos/system_upgrade/common/libraries/overlaygen.py b/repos/system_upgrade/common/libraries/overlaygen.py +index 43695c7d..b544f88c 100644 +--- a/repos/system_upgrade/common/libraries/overlaygen.py ++++ b/repos/system_upgrade/common/libraries/overlaygen.py +@@ -55,7 +55,7 @@ def _prepare_required_mounts(scratch_dir, mounts_dir, mount_points, xfs_info): + space_needed = _overlay_disk_size() * len(xfs_info.mountpoints_without_ftype) + disk_images_directory = os.path.join(scratch_dir, 'diskimages') + +- # Ensure we cleanup old disk images before we check for space contraints. ++ # Ensure we cleanup old disk images before we check for space constraints. + run(['rm', '-rf', disk_images_directory]) + _create_diskimages_dir(scratch_dir, disk_images_directory) + _ensure_enough_diskimage_space(space_needed, scratch_dir) +diff --git a/repos/system_upgrade/common/libraries/tests/test_grub.py b/repos/system_upgrade/common/libraries/tests/test_grub.py +index 1775790e..ba086854 100644 +--- a/repos/system_upgrade/common/libraries/tests/test_grub.py ++++ b/repos/system_upgrade/common/libraries/tests/test_grub.py +@@ -19,7 +19,7 @@ CUR_DIR = os.path.dirname(os.path.abspath(__file__)) + + def raise_call_error(args=None): + raise CalledProcessError( +- message='A Leapp Command Error occured.', ++ message='A Leapp Command Error occurred.', + command=args, + result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} + ) +diff --git a/repos/system_upgrade/common/libraries/tests/test_rhsm.py b/repos/system_upgrade/common/libraries/tests/test_rhsm.py +index 193bbcc8..a6dbea96 100644 +--- a/repos/system_upgrade/common/libraries/tests/test_rhsm.py ++++ b/repos/system_upgrade/common/libraries/tests/test_rhsm.py +@@ -244,7 +244,7 @@ def test_get_release(monkeypatch, actor_mocked, context_mocked): + + def test_get_release_with_release_not_set(monkeypatch, actor_mocked, context_mocked): + """Tests whether the library does not retrieve release information when the release is not set.""" +- # Test whether no realease is detected correctly too ++ # Test whether no release is detected correctly too + context_mocked.add_mocked_command_call_with_stdout(CMD_RHSM_RELEASE, 'Release not set') + + release = rhsm.get_release(context_mocked) +diff --git a/repos/system_upgrade/common/libraries/utils.py b/repos/system_upgrade/common/libraries/utils.py +index 6793de63..943a6e0b 100644 +--- a/repos/system_upgrade/common/libraries/utils.py ++++ b/repos/system_upgrade/common/libraries/utils.py +@@ -49,7 +49,7 @@ def makedirs(path, mode=0o777, exists_ok=True): + + @deprecated(since='2022-02-03', message=( + 'The "apply_yum_workaround" function has been deprecated, use "DNFWorkaround" ' +- 'message as used in the successing "RegisterYumAdjustment" actor.' ++ 'message as used in the successive "RegisterYumAdjustment" actor.' + ) + ) + def apply_yum_workaround(context=None): +@@ -69,7 +69,7 @@ def apply_yum_workaround(context=None): + context.call(cmd) + except OSError as e: + raise StopActorExecutionError( +- message='Failed to exceute script to apply yum adjustment. Message: {}'.format(str(e)) ++ message='Failed to execute script to apply yum adjustment. Message: {}'.format(str(e)) + ) + except CalledProcessError as e: + raise StopActorExecutionError( +@@ -160,7 +160,7 @@ def clean_guard(cleanup_function): + try: + cleanup_function(*args, **kwargs) + except Exception: # pylint: disable=broad-except +- # Broad exception handler to handle all cases however, swallowed, to avoid loosing the original ++ # Broad exception handler to handle all cases however, swallowed, to avoid losing the original + # error. Logging for debuggability. + api.current_logger().warning('Caught and swallowed an exception during cleanup.', exc_info=True) + raise # rethrow original exception +diff --git a/repos/system_upgrade/common/models/cpuinfo.py b/repos/system_upgrade/common/models/cpuinfo.py +index 71f58b24..e3e52838 100644 +--- a/repos/system_upgrade/common/models/cpuinfo.py ++++ b/repos/system_upgrade/common/models/cpuinfo.py +@@ -8,7 +8,7 @@ class CPUInfo(Model): + + The model currently doesn't represent all information about cpus could + provide on the machine. Just part of them, in case any other attributes +- will be neded, the model can be extended. ++ will be needed, the model can be extended. + + The provided info is aggregated - like from lscpu command. Expecting all + CPUs are same on the machine (at least for now). +diff --git a/repos/system_upgrade/common/models/dnfplugintask.py b/repos/system_upgrade/common/models/dnfplugintask.py +index 873e5d7d..74c084fd 100644 +--- a/repos/system_upgrade/common/models/dnfplugintask.py ++++ b/repos/system_upgrade/common/models/dnfplugintask.py +@@ -4,7 +4,7 @@ from leapp.topics import SystemInfoTopic + + class DNFPluginTask(Model): + """ +- Represents information what should DNF do with a specifiec DNF plugin ++ Represents information what should DNF do with a specific DNF plugin + in various stages. + + Currently, it's possible just to disable specified DNF plugins. +diff --git a/repos/system_upgrade/common/models/initramfs.py b/repos/system_upgrade/common/models/initramfs.py +index 6c6bb999..a5d1416e 100644 +--- a/repos/system_upgrade/common/models/initramfs.py ++++ b/repos/system_upgrade/common/models/initramfs.py +@@ -63,7 +63,7 @@ class UpgradeInitramfsTasks(Model): + + include_files = fields.List(fields.String(), default=[]) + """ +- List of files (cannonical filesystem paths) to include in the initramfs ++ List of files (canonical filesystem paths) to include in the initramfs + """ + + include_dracut_modules = fields.List(fields.Model(DracutModule), default=[]) +@@ -91,7 +91,7 @@ class TargetInitramfsTasks(UpgradeInitramfsTasks): + @deprecated(since='2021-10-10', message='Replaced by TargetInitramfsTasks.') + class InitrdIncludes(Model): + """ +- List of files (cannonical filesystem paths) to include in RHEL-8 initramfs ++ List of files (canonical filesystem paths) to include in RHEL-8 initramfs + """ + topic = SystemInfoTopic + +diff --git a/repos/system_upgrade/common/models/installeddesktopsfacts.py b/repos/system_upgrade/common/models/installeddesktopsfacts.py +index 2dfc6c1c..87b0ca9f 100644 +--- a/repos/system_upgrade/common/models/installeddesktopsfacts.py ++++ b/repos/system_upgrade/common/models/installeddesktopsfacts.py +@@ -4,7 +4,7 @@ from leapp.topics import SystemFactsTopic + + class InstalledDesktopsFacts(Model): + """ +- The model includes fact about installe ++ The model includes fact about installed + """ + topic = SystemFactsTopic + gnome_installed = fields.Boolean(default=False) +diff --git a/repos/system_upgrade/common/models/module.py b/repos/system_upgrade/common/models/module.py +index 781a9b30..688c43cf 100644 +--- a/repos/system_upgrade/common/models/module.py ++++ b/repos/system_upgrade/common/models/module.py +@@ -4,7 +4,7 @@ from leapp.topics import SystemFactsTopic + + class Module(Model): + """ +- A single DNF module indentified by its name and stream. ++ A single DNF module identified by its name and stream. + """ + topic = SystemFactsTopic + name = fields.String() +diff --git a/repos/system_upgrade/common/models/opensshconfig.py b/repos/system_upgrade/common/models/opensshconfig.py +index e94c6881..f4dc3261 100644 +--- a/repos/system_upgrade/common/models/opensshconfig.py ++++ b/repos/system_upgrade/common/models/opensshconfig.py +@@ -9,7 +9,7 @@ class OpenSshPermitRootLogin(Model): + 'forced-commands-only', 'no']) + """ Value of a PermitRootLogin directive. """ + in_match = fields.Nullable(fields.List(fields.String())) +- """ Criteria of Match blocks the PermitRootLogin directive occured in, if any. """ ++ """ Criteria of Match blocks the PermitRootLogin directive occurred in, if any. """ + + + class OpenSshConfig(Model): +diff --git a/repos/system_upgrade/common/models/targetuserspace.py b/repos/system_upgrade/common/models/targetuserspace.py +index d6d03bab..4b5d4bd7 100644 +--- a/repos/system_upgrade/common/models/targetuserspace.py ++++ b/repos/system_upgrade/common/models/targetuserspace.py +@@ -54,7 +54,7 @@ class CopyFile(Model): + + src = fields.String() + """ +- Cannonical path to the file (on the host) that should be copied ++ Canonical path to the file (on the host) that should be copied + """ + + dst = fields.Nullable(fields.String()) +diff --git a/repos/system_upgrade/common/workflows/inplace_upgrade.py b/repos/system_upgrade/common/workflows/inplace_upgrade.py +index eb2313a4..d4871aa3 100644 +--- a/repos/system_upgrade/common/workflows/inplace_upgrade.py ++++ b/repos/system_upgrade/common/workflows/inplace_upgrade.py +@@ -50,7 +50,7 @@ class IPUWorkflow(Workflow): + Get information about target system. Analogy of FactsCollectionPhase for target system. + + Here we can collect information what repositories are available on target system, +- what is expected calculation of target transaction (what will be instaled, removed, ... ++ what is expected calculation of target transaction (what will be installed, removed, ... + """ + + name = 'TargetTransactionFactsCollection' +@@ -176,7 +176,7 @@ class IPUWorkflow(Workflow): + + class ApplicationsPhase(Phase): + """ +- Perform the neccessary steps to finish upgrade of applications provided by Red Hat. ++ Perform the necessary steps to finish upgrade of applications provided by Red Hat. + + This may include moving/renaming of configuration files, modifying configuration of applications to be able + to run correctly and with as similar behaviour to the original as possible. +diff --git a/repos/system_upgrade/el7toel8/actors/bindupdate/actor.py b/repos/system_upgrade/el7toel8/actors/bindupdate/actor.py +index 6e94b8c8..d612ffb9 100644 +--- a/repos/system_upgrade/el7toel8/actors/bindupdate/actor.py ++++ b/repos/system_upgrade/el7toel8/actors/bindupdate/actor.py +@@ -7,7 +7,7 @@ from leapp.tags import IPUWorkflowTag, PreparationPhaseTag + + class BindUpdate(Actor): + """ +- Actor parsing facts found in configuration and modifing configuration. ++ Actor parsing facts found in configuration and modifying configuration. + """ + + name = 'bind_update' +diff --git a/repos/system_upgrade/el7toel8/actors/bindupdate/libraries/updates.py b/repos/system_upgrade/el7toel8/actors/bindupdate/libraries/updates.py +index 9d7b9a36..aa0aeeb8 100644 +--- a/repos/system_upgrade/el7toel8/actors/bindupdate/libraries/updates.py ++++ b/repos/system_upgrade/el7toel8/actors/bindupdate/libraries/updates.py +@@ -44,7 +44,7 @@ def update_section(parser, section): + + + def update_config(parser, cfg): +- """Modify contents of file accoriding to rules. ++ """Modify contents of file according to rules. + + :type cfg: ConfigFile + :returns str: Modified config contents +diff --git a/repos/system_upgrade/el7toel8/actors/checkremovedpammodules/actor.py b/repos/system_upgrade/el7toel8/actors/checkremovedpammodules/actor.py +index 9572d694..503f6149 100644 +--- a/repos/system_upgrade/el7toel8/actors/checkremovedpammodules/actor.py ++++ b/repos/system_upgrade/el7toel8/actors/checkremovedpammodules/actor.py +@@ -12,7 +12,7 @@ class CheckRemovedPamModules(Actor): + Check for modules that are not available in RHEL 8 anymore + + At this moment, we check only for pam_tally2. Few more modules +- are alredy covered in RemoveOldPAMModulesApply actor ++ are already covered in RemoveOldPAMModulesApply actor + """ + + name = 'removed_pam_modules' +@@ -30,7 +30,7 @@ class CheckRemovedPamModules(Actor): + 'Could not check pam configuration', details={'details': 'No PamConfiguration facts found.'} + ) + +- # This list contain tupples of removed modules and their recommended replacements ++ # This list contain tuples of removed modules and their recommended replacements + removed_modules = [ + ('pam_tally2', 'pam_faillock'), + ] +diff --git a/repos/system_upgrade/el7toel8/actors/checksaphana/libraries/checksaphana.py b/repos/system_upgrade/el7toel8/actors/checksaphana/libraries/checksaphana.py +index b028b5af..e540ccd1 100644 +--- a/repos/system_upgrade/el7toel8/actors/checksaphana/libraries/checksaphana.py ++++ b/repos/system_upgrade/el7toel8/actors/checksaphana/libraries/checksaphana.py +@@ -133,7 +133,7 @@ def _sp_rev_patchlevel_check(instance): + + + def _fullfills_hana_min_version(instance): +- """ Performs a check whether the version of SAP HANA fullfills the minimal requirements for the target RHEL """ ++ """ Performs a check whether the version of SAP HANA fulfills the minimal requirements for the target RHEL """ + return _major_version_check(instance) and _sp_rev_patchlevel_check(instance) + + +diff --git a/repos/system_upgrade/el7toel8/actors/cupscheck/libraries/cupscheck.py b/repos/system_upgrade/el7toel8/actors/cupscheck/libraries/cupscheck.py +index 424503a0..0f990959 100644 +--- a/repos/system_upgrade/el7toel8/actors/cupscheck/libraries/cupscheck.py ++++ b/repos/system_upgrade/el7toel8/actors/cupscheck/libraries/cupscheck.py +@@ -135,10 +135,10 @@ def check_certkey_directives(facts, report_func): + :param obj facts: model object containing info about CUPS configuration + :param func report_func: creates report + """ +- title = ('ServerKey/ServerCertificate directives are substitued ' ++ title = ('ServerKey/ServerCertificate directives are substituted ' + 'by ServerKeychain directive') + summary = ( +- 'The directives were substitued by ServerKeychain directive, ' ++ 'The directives were substituted by ServerKeychain directive, ' + 'which now takes a directory as value (/etc/cups/ssl is default). ' + 'The previous directives took a file as value. ' + 'The migration script will copy the files specified in ' +diff --git a/repos/system_upgrade/el7toel8/actors/grubdevname/tests/test_grubdevname.py b/repos/system_upgrade/el7toel8/actors/grubdevname/tests/test_grubdevname.py +index 07d2c31a..f26a135d 100644 +--- a/repos/system_upgrade/el7toel8/actors/grubdevname/tests/test_grubdevname.py ++++ b/repos/system_upgrade/el7toel8/actors/grubdevname/tests/test_grubdevname.py +@@ -20,7 +20,7 @@ CUR_DIR = os.path.dirname(os.path.abspath(__file__)) + + def raise_call_error(args=None): + raise CalledProcessError( +- message='A Leapp Command Error occured.', ++ message='A Leapp Command Error occurred.', + command=args, + result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} + ) +diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfread/actor.py b/repos/system_upgrade/el7toel8/actors/multipathconfread/actor.py +index 586c2c7a..da85414b 100644 +--- a/repos/system_upgrade/el7toel8/actors/multipathconfread/actor.py ++++ b/repos/system_upgrade/el7toel8/actors/multipathconfread/actor.py +@@ -6,7 +6,7 @@ from leapp.tags import FactsPhaseTag, IPUWorkflowTag + + class MultipathConfRead(Actor): + """ +- Read multipath configuration files and extract the necessary informaton ++ Read multipath configuration files and extract the necessary information + + Related files: + - /etc/multipath.conf +diff --git a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/actor.py b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/actor.py +index fd20f909..221285e1 100644 +--- a/repos/system_upgrade/el7toel8/actors/multipathconfupdate/actor.py ++++ b/repos/system_upgrade/el7toel8/actors/multipathconfupdate/actor.py +@@ -11,7 +11,7 @@ class MultipathConfUpdate(Actor): + 1. commenting out lines for options that no longer exist, or whose value + is no longer current in RHEL-8 + 2. Migrating any options in an devices section with all_devs to an +- overrides setions ++ overrides sections + 3. Rename options that have changed names + """ + +diff --git a/repos/system_upgrade/el7toel8/actors/postgresqlcheck/tests/test_postgresqlcheck.py b/repos/system_upgrade/el7toel8/actors/postgresqlcheck/tests/test_postgresqlcheck.py +index 348f1bd1..658d3e4c 100644 +--- a/repos/system_upgrade/el7toel8/actors/postgresqlcheck/tests/test_postgresqlcheck.py ++++ b/repos/system_upgrade/el7toel8/actors/postgresqlcheck/tests/test_postgresqlcheck.py +@@ -36,7 +36,7 @@ def test_actor_execution(monkeypatch, has_server, has_contrib): + Parametrized helper function for test_actor_* functions. + + First generate list of RPM models based on set arguments. Then, run +- the actor feeded with our RPM list. Finally, assert Reports ++ the actor fed with our RPM list. Finally, assert Reports + according to set arguments. + + Parameters: +@@ -59,7 +59,7 @@ def test_actor_execution(monkeypatch, has_server, has_contrib): + monkeypatch.setattr(api, 'current_actor', curr_actor_mocked) + monkeypatch.setattr(reporting, "create_report", create_report_mocked()) + +- # Executed actor feeded with out fake RPMs ++ # Executed actor fed with out fake RPMs + report_installed_packages(_context=api) + + if has_server and has_contrib: +diff --git a/repos/system_upgrade/el7toel8/actors/quaggatofrr/libraries/quaggatofrr.py b/repos/system_upgrade/el7toel8/actors/quaggatofrr/libraries/quaggatofrr.py +index d05c6032..07bccf95 100644 +--- a/repos/system_upgrade/el7toel8/actors/quaggatofrr/libraries/quaggatofrr.py ++++ b/repos/system_upgrade/el7toel8/actors/quaggatofrr/libraries/quaggatofrr.py +@@ -6,7 +6,7 @@ from leapp.libraries.common.config import version + from leapp.libraries.stdlib import api, CalledProcessError, run + + DAEMON_FILE = '/etc/frr/daemons' +-# if this file sitll exists after the removal of quagga, it has been modified ++# if this file still exists after the removal of quagga, it has been modified + CONFIG_FILE = '/etc/sysconfig/quagga.rpmsave' + QUAGGA_CONF_FILES = '/etc/quagga/' + FRR_CONF_FILES = '/etc/frr/' +diff --git a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/daemons b/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/daemons +index 9159e49d..6b5ccd4a 100644 +--- a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/daemons ++++ b/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/files/daemons +@@ -76,7 +76,7 @@ fabricd_options=("-A 127.0.0.1") + # If the vtysh_enable is yes, then the unified config is read + # and applied if it exists. If no unified frr.conf exists + # then the per-daemon .conf files are used) +-# If vtysh_enable is no or non-existant, the frr.conf is ignored. ++# If vtysh_enable is no or non-existent, the frr.conf is ignored. + # it is highly suggested to have this set to yes + vtysh_enable=yes + +diff --git a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/test_unit_quaggatofrr.py b/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/test_unit_quaggatofrr.py +index 48b46dce..503dbfbc 100644 +--- a/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/test_unit_quaggatofrr.py ++++ b/repos/system_upgrade/el7toel8/actors/quaggatofrr/tests/test_unit_quaggatofrr.py +@@ -92,7 +92,7 @@ class MockedOpen(object): + Get list of MockedFilePointer objects with the specified fname. + + if the mode is set (expected 'r', 'rw', 'w' ..) discovered files are +- additionaly filtered to match the same mode (same string). ++ additionally filtered to match the same mode (same string). + """ + fnames = [i for i in self._open_called if i.fname == fname] + return fnames if not mode else [i for i in fnames if i.mode == mode] +diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py +index ea2e340c..2bbceb5d 100644 +--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py ++++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py +@@ -80,7 +80,7 @@ class SatelliteUpgradeFacts(Actor): + Handle migration of the PostgreSQL legacy-actions files. + RPM cannot handle replacement of directories by symlinks by default + without the %pretrans scriptlet. As PostgreSQL package is packaged wrong, +- we have to workround that by migration of the PostgreSQL files ++ we have to workaround that by migration of the PostgreSQL files + before the rpm transaction is processed. + """ + self.produce( +diff --git a/repos/system_upgrade/el7toel8/actors/sctpconfigread/libraries/sctplib.py b/repos/system_upgrade/el7toel8/actors/sctpconfigread/libraries/sctplib.py +index 0db90508..cc002166 100644 +--- a/repos/system_upgrade/el7toel8/actors/sctpconfigread/libraries/sctplib.py ++++ b/repos/system_upgrade/el7toel8/actors/sctpconfigread/libraries/sctplib.py +@@ -88,7 +88,7 @@ def was_sctp_used(): + + def is_sctp_wanted(): + """ +- Decision making funtion that decides based on the current or past usage of SCTP, the SCTP module is wanted ++ Decision making function that decides based on the current or past usage of SCTP, the SCTP module is wanted + on the new system. + + :return: True if SCTP seems to be in use or has been recently used. +diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/libraries/spamassassinconfigcheck.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/libraries/spamassassinconfigcheck.py +index 1399b7b6..3a4cf186 100644 +--- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/libraries/spamassassinconfigcheck.py ++++ b/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/libraries/spamassassinconfigcheck.py +@@ -72,7 +72,7 @@ def _check_spamd_config_service_type(facts, report_func): + title = 'The type of the spamassassin systemd service has changed' + summary_generic = 'The type of spamassassin.service has been changed from "forking" to "simple".' + if facts.service_overriden: +- summary_detail = 'However, the service appears to be overriden; no migration action will occur.' ++ summary_detail = 'However, the service appears to be overridden; no migration action will occur.' + resource = reporting.RelatedResource('file', SPAMASSASSIN_SERVICE_OVERRIDE) + else: + summary_detail = 'The spamassassin sysconfig file will be updated.' +diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/tests/test_library_spamassassinconfigcheck.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/tests/test_library_spamassassinconfigcheck.py +index 97562c3e..a54dae21 100644 +--- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/tests/test_library_spamassassinconfigcheck.py ++++ b/repos/system_upgrade/el7toel8/actors/spamassassinconfigcheck/tests/test_library_spamassassinconfigcheck.py +@@ -119,7 +119,7 @@ def test_check_spamd_config_service_type_service_overriden(): + report_fields = report_func.report_fields + assert 'type of the spamassassin systemd service' in report_fields['title'] + assert 'The type of spamassassin.service' in report_fields['summary'] +- assert 'overriden' in report_fields['summary'] ++ assert 'overridden' in report_fields['summary'] + assert report_fields['severity'] == 'medium' + + +diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/actor.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/actor.py +index 832cdde6..6b88968d 100644 +--- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/actor.py ++++ b/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/actor.py +@@ -11,7 +11,7 @@ class SpamassassinConfigRead(Actor): + """ + Reads spamc configuration (/etc/mail/spamassassin/spamc.conf), the + spamassassin sysconfig file (/etc/sysconfig/spamassassin) and checks +- whether the spamassassin service has been overriden. Produces ++ whether the spamassassin service has been overridden. Produces + SpamassassinFacts containing the extracted information. + """ + +diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/libraries/spamassassinconfigread.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/libraries/spamassassinconfigread.py +index 6cb86d4c..fde24f16 100644 +--- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/libraries/spamassassinconfigread.py ++++ b/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/libraries/spamassassinconfigread.py +@@ -17,7 +17,7 @@ def is_processable(): + def get_spamassassin_facts(read_func, listdir): + """ + Reads the spamc configuration file, the spamassassin sysconfig file and checks +- whether the spamassassin service is overriden. Returns SpamassassinFacts. ++ whether the spamassassin service is overridden. Returns SpamassassinFacts. + """ + spamc_ssl_argument = spamassassinconfigread_spamc.get_spamc_ssl_argument(read_func) + service_overriden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir) +diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/tests/test_lib_spamd_spamassassinconfigread.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/tests/test_lib_spamd_spamassassinconfigread.py +index a3b1f94f..8c2a9179 100644 +--- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/tests/test_lib_spamd_spamassassinconfigread.py ++++ b/repos/system_upgrade/el7toel8/actors/spamassassinconfigread/tests/test_lib_spamd_spamassassinconfigread.py +@@ -45,32 +45,32 @@ class MockListDir(object): + + def test_spamassassin_service_overriden(): + listdir = MockListDir(path='/etc/systemd/system', file_names=['spamassassin.service']) +- overriden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) +- assert overriden is True ++ overridden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) ++ assert overridden is True + + listdir = MockListDir(path='/etc/systemd/system', + file_names=['foo.service', 'spamassassin.service', 'bar.service']) +- overriden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) +- assert overriden is True ++ overridden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) ++ assert overridden is True + assert not listdir.error + + + def test_spamassassin_service_overriden_nonexistent(): + listdir = MockListDir(path='/etc/systemd/system', file_names=[]) +- overriden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) +- assert overriden is False ++ overridden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) ++ assert overridden is False + + listdir = MockListDir(path='/etc/systemd/system', + file_names=['foo.service', 'bar.service']) +- overriden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) +- assert overriden is False ++ overridden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) ++ assert overridden is False + assert not listdir.error + + + def test_spamassassin_service_overriden_nonexistent_dir(): + listdir = MockListDir(to_raise=make_OSError(errno.ENOENT)) +- overriden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) +- assert overriden is False ++ overridden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) ++ assert overridden is False + + + def test_spamassassin_service_overriden_nonexistent_inaccessible(): +@@ -78,8 +78,8 @@ def test_spamassassin_service_overriden_nonexistent_inaccessible(): + # so that the SpamassassinConfigUpdate actor doesn't make changes to + # /etc/sysconfig/spamassassin that may not be justified. + listdir = MockListDir(to_raise=make_OSError(errno.EACCES)) +- overriden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) +- assert overriden is True ++ overridden = spamassassinconfigread_spamd.spamassassin_service_overriden(listdir.listdir) ++ assert overridden is True + + + def test_parse_ssl_version_sslv3(): +diff --git a/repos/system_upgrade/el7toel8/actors/spamassassinconfigupdate/tests/test_lib_spamd_spamassassinconfigupdate.py b/repos/system_upgrade/el7toel8/actors/spamassassinconfigupdate/tests/test_lib_spamd_spamassassinconfigupdate.py +index f8e14756..9acc4109 100644 +--- a/repos/system_upgrade/el7toel8/actors/spamassassinconfigupdate/tests/test_lib_spamd_spamassassinconfigupdate.py ++++ b/repos/system_upgrade/el7toel8/actors/spamassassinconfigupdate/tests/test_lib_spamd_spamassassinconfigupdate.py +@@ -239,7 +239,7 @@ def test_rewrite_spamd_config(): + + + def test_rewrite_spamd_config_service_overriden(): +- # If the service is overriden, the service type (simple/forking) remains ++ # If the service is overridden, the service type (simple/forking) remains + # the same after upgrade. So we must not remove the -d option. + facts = SpamassassinFacts(spamd_ssl_version='sslv3', service_overriden=True) + content = '# Options to spamd\n' \ +diff --git a/repos/system_upgrade/el7toel8/actors/tcpwrapperscheck/libraries/tcpwrapperscheck.py b/repos/system_upgrade/el7toel8/actors/tcpwrapperscheck/libraries/tcpwrapperscheck.py +index 5d98c428..5c3e93bb 100644 +--- a/repos/system_upgrade/el7toel8/actors/tcpwrapperscheck/libraries/tcpwrapperscheck.py ++++ b/repos/system_upgrade/el7toel8/actors/tcpwrapperscheck/libraries/tcpwrapperscheck.py +@@ -8,17 +8,17 @@ def config_affects_daemons(tcp_wrappers_facts, packages_list, daemons): + + :param tcp_wrappers_facts: Facts provided by the TcpWrappersFacts + :param packages_list: List of packages provided by InstalledRedHatSignedRPM +- :param deamons: List of packages and keywords affecting daemons in this format: ++ :param daemons: List of packages and keywords affecting daemons in this format: + [{"package-name", ["daemon1", "daemon2", ...], ...}] + """ + found_packages = set() + + for (package, keywords) in daemons: +- # We do not care for particular deamon if the providing package is not installed ++ # We do not care for particular daemon if the providing package is not installed + if package not in packages_list: + continue + +- # Every package can have several deamons or deamons reacting to several keywords ++ # Every package can have several daemons or daemons reacting to several keywords + for daemon in keywords: + # Is this daemon/keyword affected by the current configuration? + if not config_applies_to_daemon(tcp_wrappers_facts, daemon): +diff --git a/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/libraries/ziplcheckbootentries.py b/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/libraries/ziplcheckbootentries.py +index c9f93b79..757af6c8 100644 +--- a/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/libraries/ziplcheckbootentries.py ++++ b/repos/system_upgrade/el7toel8/actors/ziplcheckbootentries/libraries/ziplcheckbootentries.py +@@ -58,7 +58,7 @@ def extract_kernel_version(kernel_img_path): + """ + Extracts the kernel version out of the given image path. + +- The extraction logic is designed to closely mimick the logic Zipl configuration to BLS ++ The extraction logic is designed to closely mimic the logic Zipl configuration to BLS + conversion script works, so that it is possible to identify the possible issues with kernel + images. + +@@ -67,7 +67,7 @@ def extract_kernel_version(kernel_img_path): + :rtype: str + """ + +- # Mimick bash substitution used in the conversion script, see: ++ # Mimic bash substitution used in the conversion script, see: + # https://github.com/ibm-s390-linux/s390-tools/blob/b5604850ab66f862850568a37404faa647b5c098/scripts/zipl-switch-to-blscfg#L168 + if 'vmlinuz-' in kernel_img_path: + fragments = kernel_img_path.rsplit('/vmlinuz-', 1) +diff --git a/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py b/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py +index dab0ae6c..e80c335d 100644 +--- a/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py ++++ b/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py +@@ -73,5 +73,5 @@ class ZiplConvertToBLSCFG(Actor): + # NOTE: Basically, just removal of the /boot/loader dir content inside + # could be enough, but we cannot remove /boot/loader because of boom + # - - if we remove it, we will remove the snapshot as well +- # - - on the other hand, we should't keep it there if zipl ++ # - - on the other hand, we shouldn't keep it there if zipl + # - - has not been converted to BLS +diff --git a/repos/system_upgrade/el7toel8/libraries/isccfg.py b/repos/system_upgrade/el7toel8/libraries/isccfg.py +index 0a3f63fd..dff9bf24 100644 +--- a/repos/system_upgrade/el7toel8/libraries/isccfg.py ++++ b/repos/system_upgrade/el7toel8/libraries/isccfg.py +@@ -51,7 +51,7 @@ class ConfigFile(object): + + + class MockConfig(ConfigFile): +- """Configuration file with contens defined on constructor. ++ """Configuration file with contents defined on constructor. + + Intended for testing the library. + """ +@@ -501,7 +501,7 @@ class IscConfigParser(object): + choose the first one. + + The function would be confusing in case of brackets, but content between +- brackets is not evaulated as new tokens. ++ brackets is not evaluated as new tokens. + E.g.: + + "find { me };" : 5 +@@ -630,7 +630,7 @@ class IscConfigParser(object): + :param index: start searching from the index + :param end_index: stop searching at the end_index or end of the string + +- Funtion is not recursive. Searched key has to be in the current scope. ++ Function is not recursive. Searched key has to be in the current scope. + Attention: + + In case that input string contains data outside of section by mistake, +diff --git a/repos/system_upgrade/el7toel8/libraries/vsftpdutils.py b/repos/system_upgrade/el7toel8/libraries/vsftpdutils.py +index c2d3b005..776c5b2d 100644 +--- a/repos/system_upgrade/el7toel8/libraries/vsftpdutils.py ++++ b/repos/system_upgrade/el7toel8/libraries/vsftpdutils.py +@@ -25,7 +25,7 @@ def get_config_contents(path, read_func=read_file): + + Try to read a vsftpd configuration file, log a warning if an error happens. + :param path: File path +- :param read_func: Function to use to read the file. This is meant to be overriden in tests. ++ :param read_func: Function to use to read the file. This is meant to be overridden in tests. + :return: File contents or None, if the file could not be read + """ + try: +@@ -40,7 +40,7 @@ def get_default_config_hash(read_func=read_file): + """ + Read the default vsftpd configuration file (/etc/vsftpd/vsftpd.conf) and return its hash. + +- :param read_func: Function to use to read the file. This is meant to be overriden in tests. ++ :param read_func: Function to use to read the file. This is meant to be overridden in tests. + :return SHA1 hash of the configuration file, or None if the file could not be read. + """ + content = get_config_contents(VSFTPD_DEFAULT_CONFIG_PATH, read_func=read_func) +diff --git a/repos/system_upgrade/el7toel8/models/spamassassinfacts.py b/repos/system_upgrade/el7toel8/models/spamassassinfacts.py +index 6262295e..c0755aed 100644 +--- a/repos/system_upgrade/el7toel8/models/spamassassinfacts.py ++++ b/repos/system_upgrade/el7toel8/models/spamassassinfacts.py +@@ -19,5 +19,5 @@ class SpamassassinFacts(Model): + + service_overriden = fields.Boolean() + """ +- True if spamassassin.service is overriden, else False. ++ True if spamassassin.service is overridden, else False. + """ +diff --git a/repos/system_upgrade/el8toel9/actors/checkblsgrubcfgonppc64/actor.py b/repos/system_upgrade/el8toel9/actors/checkblsgrubcfgonppc64/actor.py +index d14e5aca..748ecd28 100644 +--- a/repos/system_upgrade/el8toel9/actors/checkblsgrubcfgonppc64/actor.py ++++ b/repos/system_upgrade/el8toel9/actors/checkblsgrubcfgonppc64/actor.py +@@ -10,7 +10,7 @@ class CheckBlsGrubOnPpc64(Actor): + + After a ppc64 system is upgraded from RHEL 8 to RHEL 9 and + GRUB config on RHEL 8 is not yet BLS aware, the system boots +- into el8 kernel because the config is not successfuly migrated by ++ into el8 kernel because the config is not successfully migrated by + GRUB during the upgrade process. + + IMPORTANT NOTE: The later fix which is based on the outcome of this +diff --git a/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py b/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py +index c28b3a98..4158253a 100644 +--- a/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py ++++ b/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py +@@ -12,7 +12,7 @@ class CheckVdo(Actor): + + `Background` + ============ +- In RHEL 9.0 the indepdent VDO management software, `vdo manager`, is ++ In RHEL 9.0 the independent VDO management software, `vdo manager`, is + superseded by LVM management. Existing VDOs must be converted to LVM-based + management *before* upgrading to RHEL 9.0. + +diff --git a/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py b/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py +index 9ba5c70c..135a279d 100644 +--- a/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py ++++ b/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py +@@ -40,7 +40,7 @@ def _process_post_conversion_vdos(vdos): + 'device format level; however, the expected LVM management ' + 'portion of the conversion did not take place. This ' + 'indicates that an exceptional condition (for example, a ' +- 'system crash) likely occured during the conversion ' ++ 'system crash) likely occurred during the conversion ' + 'process. The LVM portion of the conversion must be ' + 'performed in order for upgrade to proceed.')) + +diff --git a/repos/system_upgrade/el8toel9/actors/dotnet/tests/test_dotnet.py b/repos/system_upgrade/el8toel9/actors/dotnet/tests/test_dotnet.py +index 744a4e0b..93fae569 100644 +--- a/repos/system_upgrade/el8toel9/actors/dotnet/tests/test_dotnet.py ++++ b/repos/system_upgrade/el8toel9/actors/dotnet/tests/test_dotnet.py +@@ -33,7 +33,7 @@ def test_actor_execution(monkeypatch, current_actor_context, unsupported_version + for version in unsupported_versions: + rpms += [_generate_rpm_with_name(f'dotnet-runtime-{version}')] + +- # Executed actor feeded with fake RPMs ++ # Executed actor fed with fake RPMs + current_actor_context.feed(InstalledRedHatSignedRPM(items=rpms)) + current_actor_context.run() + +diff --git a/repos/system_upgrade/el8toel9/actors/mariadbcheck/tests/test_mariadbcheck.py b/repos/system_upgrade/el8toel9/actors/mariadbcheck/tests/test_mariadbcheck.py +index e91345f2..15e70ede 100644 +--- a/repos/system_upgrade/el8toel9/actors/mariadbcheck/tests/test_mariadbcheck.py ++++ b/repos/system_upgrade/el8toel9/actors/mariadbcheck/tests/test_mariadbcheck.py +@@ -35,7 +35,7 @@ def test_actor_execution(monkeypatch, has_server): + Parametrized helper function for test_actor_* functions. + + First generate list of RPM models based on set arguments. Then, run +- the actor feeded with our RPM list. Finally, assert Reports ++ the actor fed with our RPM list. Finally, assert Reports + according to set arguments. + + Parameters: +@@ -54,7 +54,7 @@ def test_actor_execution(monkeypatch, has_server): + monkeypatch.setattr(api, 'current_actor', curr_actor_mocked) + monkeypatch.setattr(reporting, "create_report", create_report_mocked()) + +- # Executed actor feeded with fake RPMs ++ # Executed actor fed with fake RPMs + report_installed_packages(_context=api) + + if has_server: +diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/actor.py b/repos/system_upgrade/el8toel9/actors/multipathconfread/actor.py +index 92184c70..ec558cbe 100644 +--- a/repos/system_upgrade/el8toel9/actors/multipathconfread/actor.py ++++ b/repos/system_upgrade/el8toel9/actors/multipathconfread/actor.py +@@ -6,7 +6,7 @@ from leapp.tags import FactsPhaseTag, IPUWorkflowTag + + class MultipathConfRead8to9(Actor): + """ +- Read multipath configuration files and extract the necessary informaton ++ Read multipath configuration files and extract the necessary information + + Related files: + - /etc/multipath.conf +diff --git a/repos/system_upgrade/el8toel9/actors/nischeck/libraries/nischeck.py b/repos/system_upgrade/el8toel9/actors/nischeck/libraries/nischeck.py +index 6bd15991..7e3d9a80 100644 +--- a/repos/system_upgrade/el8toel9/actors/nischeck/libraries/nischeck.py ++++ b/repos/system_upgrade/el8toel9/actors/nischeck/libraries/nischeck.py +@@ -23,7 +23,7 @@ def report_nis(): + Create the report if any of NIS packages (RH signed) + is installed and configured. + +- Should notify user about present NIS compnent package ++ Should notify user about present NIS component package + installation, warn them about discontinuation, and + redirect them to online documentation for possible + alternatives. +diff --git a/repos/system_upgrade/el8toel9/actors/nischeck/tests/test_nischeck.py b/repos/system_upgrade/el8toel9/actors/nischeck/tests/test_nischeck.py +index 7ebde0ac..7bdfed02 100644 +--- a/repos/system_upgrade/el8toel9/actors/nischeck/tests/test_nischeck.py ++++ b/repos/system_upgrade/el8toel9/actors/nischeck/tests/test_nischeck.py +@@ -31,11 +31,11 @@ def test_actor_nis(monkeypatch, pkgs_installed, pkgs_configured): + Parametrized helper function for test_actor_* functions. + + First generate list of RPM models based on set arguments. Then, run +- the actor feeded with our RPM list and mocked functions. Finally, assert ++ the actor fed with our RPM list and mocked functions. Finally, assert + Reports according to set arguments. + + Parameters: +- pkgs_installed (touple): installed pkgs ++ pkgs_installed (tuple): installed pkgs + fill_conf_file (bool): not default ypbind config file + fill_ypserv_dir (bool): not default ypserv dir content + """ +@@ -55,7 +55,7 @@ def test_actor_nis(monkeypatch, pkgs_installed, pkgs_configured): + monkeypatch.setattr(api, 'current_actor', curr_actor_mocked) + monkeypatch.setattr(reporting, "create_report", create_report_mocked()) + +- # Executed actor feeded with out fake msgs ++ # Executed actor fed with out fake msgs + nischeck.report_nis() + + # Iterate through installed packages +diff --git a/repos/system_upgrade/el8toel9/actors/nisscanner/libraries/nisscan.py b/repos/system_upgrade/el8toel9/actors/nisscanner/libraries/nisscan.py +index 541d4037..9910f748 100644 +--- a/repos/system_upgrade/el8toel9/actors/nisscanner/libraries/nisscan.py ++++ b/repos/system_upgrade/el8toel9/actors/nisscanner/libraries/nisscan.py +@@ -26,7 +26,7 @@ class NISScanLibrary: + lines = [line.strip() for line in f.readlines() if line.strip()] + + for line in lines: +- # Cheks for any valid configuration entry ++ # Checks for any valid configuration entry + if not line.startswith('#'): + return True + return False +diff --git a/repos/system_upgrade/el8toel9/actors/nisscanner/tests/test_nisscan.py b/repos/system_upgrade/el8toel9/actors/nisscanner/tests/test_nisscan.py +index 8f463641..ed000ce0 100644 +--- a/repos/system_upgrade/el8toel9/actors/nisscanner/tests/test_nisscan.py ++++ b/repos/system_upgrade/el8toel9/actors/nisscanner/tests/test_nisscan.py +@@ -32,11 +32,11 @@ def test_actor_nisscan(monkeypatch, pkgs_installed, fill_conf_file, fill_ypserv_ + """ + Parametrized helper function for test_actor_* functions. + +- Run the actor feeded with our mocked functions and assert ++ Run the actor fed with our mocked functions and assert + produced messages according to set arguments. + + Parameters: +- pkgs_installed (touple): installed pkgs ++ pkgs_installed (tuple): installed pkgs + fill_conf_file (bool): not default ypbind config file + fill_ypserv_dir (bool): not default ypserv dir content + """ +@@ -64,7 +64,7 @@ def test_actor_nisscan(monkeypatch, pkgs_installed, fill_conf_file, fill_ypserv_ + monkeypatch.setattr(nisscan.os.path, 'isfile', lambda dummy: mocked_isfile) + monkeypatch.setattr(nisscan.os.path, 'isdir', lambda dummy: mocked_isdir) + +- # Executed actor feeded with mocked functions ++ # Executed actor fed with mocked functions + nisscan.NISScanLibrary().process() + + # Filter NIS pkgs +diff --git a/repos/system_upgrade/el8toel9/actors/opensshdropindirectory/libraries/opensshdropindirectory.py b/repos/system_upgrade/el8toel9/actors/opensshdropindirectory/libraries/opensshdropindirectory.py +index d55eee1c..3b4b24f8 100644 +--- a/repos/system_upgrade/el8toel9/actors/opensshdropindirectory/libraries/opensshdropindirectory.py ++++ b/repos/system_upgrade/el8toel9/actors/opensshdropindirectory/libraries/opensshdropindirectory.py +@@ -13,7 +13,7 @@ INCLUDE_BLOCK = ''.join(('# Added by leapp during upgrade from RHEL8 to RHEL9\n' + + def prepend_string_if_not_present(f, content, check_string): + """ +- This reads the open file descriptor and checks for presense of the `check_string`. ++ This reads the open file descriptor and checks for presence of the `check_string`. + If not present, the `content` is prepended to the original content of the file and + result is written. + Note, that this requires opened file for both reading and writing, for example with: +diff --git a/repos/system_upgrade/el8toel9/actors/opensshdropindirectorycheck/actor.py b/repos/system_upgrade/el8toel9/actors/opensshdropindirectorycheck/actor.py +index b1b445bc..19f0e44d 100644 +--- a/repos/system_upgrade/el8toel9/actors/opensshdropindirectorycheck/actor.py ++++ b/repos/system_upgrade/el8toel9/actors/opensshdropindirectorycheck/actor.py +@@ -47,7 +47,7 @@ class OpenSshDropInDirectoryCheck(Actor): + reporting.RelatedResource('file', '/etc/ssh/sshd_config') + ] + reporting.create_report([ +- reporting.Title('The upgrade will prepend the Incude directive to OpenSSH sshd_config'), ++ reporting.Title('The upgrade will prepend the Include directive to OpenSSH sshd_config'), + reporting.Summary( + 'OpenSSH server configuration needs to be modified to contain Include directive ' + 'for the RHEL9 to work properly and integrate with the other parts of the OS. ' +diff --git a/repos/system_upgrade/el8toel9/actors/opensslproviders/libraries/add_provider.py b/repos/system_upgrade/el8toel9/actors/opensslproviders/libraries/add_provider.py +index fb287ce4..91462f18 100644 +--- a/repos/system_upgrade/el8toel9/actors/opensslproviders/libraries/add_provider.py ++++ b/repos/system_upgrade/el8toel9/actors/opensslproviders/libraries/add_provider.py +@@ -71,7 +71,7 @@ def _append(lines, add, comment=None): + + def _modify_file(f, fail_on_error=True): + """ +- Modify the openssl configuration file to accomodate el8toel9 changes ++ Modify the openssl configuration file to accommodate el8toel9 changes + """ + lines = f.readlines() + lines = _replace(lines, r"openssl_conf\s*=\s*default_modules", +diff --git a/repos/system_upgrade/el8toel9/actors/postgresqlcheck/tests/test_postgresqlcheck.py b/repos/system_upgrade/el8toel9/actors/postgresqlcheck/tests/test_postgresqlcheck.py +index 41d3a30b..7b6f4384 100644 +--- a/repos/system_upgrade/el8toel9/actors/postgresqlcheck/tests/test_postgresqlcheck.py ++++ b/repos/system_upgrade/el8toel9/actors/postgresqlcheck/tests/test_postgresqlcheck.py +@@ -35,7 +35,7 @@ def test_actor_execution(monkeypatch, has_server): + Parametrized helper function for test_actor_* functions. + + First generate list of RPM models based on set arguments. Then, run +- the actor feeded with our RPM list. Finally, assert Reports ++ the actor fed with our RPM list. Finally, assert Reports + according to set arguments. + + Parameters: +@@ -54,7 +54,7 @@ def test_actor_execution(monkeypatch, has_server): + monkeypatch.setattr(api, 'current_actor', curr_actor_mocked) + monkeypatch.setattr(reporting, "create_report", create_report_mocked()) + +- # Executed actor feeded with out fake RPMs ++ # Executed actor fed with out fake RPMs + report_installed_packages(_context=api) + + if has_server: +diff --git a/repos/system_upgrade/el8toel9/actors/pythonthreetmpworkaround/actor.py b/repos/system_upgrade/el8toel9/actors/pythonthreetmpworkaround/actor.py +index cb04a268..b3737380 100644 +--- a/repos/system_upgrade/el8toel9/actors/pythonthreetmpworkaround/actor.py ++++ b/repos/system_upgrade/el8toel9/actors/pythonthreetmpworkaround/actor.py +@@ -13,7 +13,7 @@ class PythonThreeTmpWorkaround(Actor): + + During the RPM upgrade the /usr/bin/python3 is removed because of problem + in alternatives. The fix requires new builds of python36 on RHEL8, python3 +- on RHEL 9 ans alternatives on both systems. Once the internal repositories ++ on RHEL 9 and alternatives on both systems. Once the internal repositories + are updated, we can drop this. If the /usr/bin/python3 file exists, + do nothing. + """ +diff --git a/repos/system_upgrade/el8toel9/actors/targetuserspacecryptopolicies/libraries/targetuserspacecryptopolicies.py b/repos/system_upgrade/el8toel9/actors/targetuserspacecryptopolicies/libraries/targetuserspacecryptopolicies.py +index 93eea5b0..ddb7ad33 100644 +--- a/repos/system_upgrade/el8toel9/actors/targetuserspacecryptopolicies/libraries/targetuserspacecryptopolicies.py ++++ b/repos/system_upgrade/el8toel9/actors/targetuserspacecryptopolicies/libraries/targetuserspacecryptopolicies.py +@@ -41,7 +41,7 @@ def _set_crypto_policy(context, current_policy): + def process(): + target_userspace_info = next(api.consume(TargetUserSpaceInfo), None) + if not target_userspace_info: +- # nothing to do - an error occured in previous actors and upgrade will be inhibited ++ # nothing to do - an error occurred in previous actors and upgrade will be inhibited + api.current_logger().error('Missing the TargetUserSpaceInfo message. Probably it has not been created before.') + return + cpi = next(api.consume(CryptoPolicyInfo), None) +diff --git a/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/actor.py b/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/actor.py +index 3061e206..035299e3 100644 +--- a/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/actor.py ++++ b/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/actor.py +@@ -10,7 +10,7 @@ class VdoConversionScanner(Actor): + + A VdoConversionInfo message containing the data will be produced. + +- In RHEL 9.0 the indepdent VDO management software, `vdo manager`, is ++ In RHEL 9.0 the independent VDO management software, `vdo manager`, is + superseded by LVM management. Existing VDOs must be converted to LVM-based + management *before* upgrading to RHEL 9.0. + +diff --git a/repos/system_upgrade/el8toel9/models/opensslconfig.py b/repos/system_upgrade/el8toel9/models/opensslconfig.py +index 94fcbcbd..831256d2 100644 +--- a/repos/system_upgrade/el8toel9/models/opensslconfig.py ++++ b/repos/system_upgrade/el8toel9/models/opensslconfig.py +@@ -58,7 +58,7 @@ class OpenSslConfig(Model): + + It is used to load default TLS policy in RHEL8, but controls loading of all + providers in RHEL9 so it needs to be adjusted for upgrade. This is listed +- befor any block. ++ before any block. + """ + + blocks = fields.List(fields.Model(OpenSslConfigBlock)) +diff --git a/utils/ibdmp-decode b/utils/ibdmp-decode +index 74a8dd2a..1386835e 100755 +--- a/utils/ibdmp-decode ++++ b/utils/ibdmp-decode +@@ -16,7 +16,7 @@ def USAGE(): + lines = [ + "usage: %s path/to/console.log path/to/target.tar.xz" % self, + "", +- "Decode debug tarball emited by leapp's initramfs in-band", ++ "Decode debug tarball emitted by leapp's initramfs in-band", + "console debugger, ibdmp().", + ] + sys.stderr.writelines('%s\n' % l for l in lines) +-- +2.38.1 + diff --git a/SOURCES/0007-Mini-updateds-in-the-spec-files.patch b/SOURCES/0007-Mini-updateds-in-the-spec-files.patch new file mode 100644 index 0000000..fca1782 --- /dev/null +++ b/SOURCES/0007-Mini-updateds-in-the-spec-files.patch @@ -0,0 +1,39 @@ +From 25adde3fe09d200a3f8bc42af1ebcf07b179fb85 Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Thu, 1 Sep 2022 11:19:18 +0200 +Subject: [PATCH 07/32] Mini updateds in the spec files + +To synchronize better with the downstream specfile, making lives +of people again a little bit easier. +--- + packaging/leapp-repository.spec | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec +index c59f8acd..89750927 100644 +--- a/packaging/leapp-repository.spec ++++ b/packaging/leapp-repository.spec +@@ -53,6 +53,10 @@ Source1: deps-pkgs.tar.gz + # NOTE: Our packages must be noarch. Do no drop this in any way. + BuildArch: noarch + ++### PATCHES HERE ++# Patch0001: filename.patch ++ ++ + %description + %{summary} + +@@ -182,6 +186,9 @@ Requires: dracut + %setup -n %{name}-%{version} + %setup -q -n %{name}-%{version} -D -T -a 1 + ++# APPLY PATCHES HERE ++# %%patch0001 -p1 ++ + + %build + %if 0%{?rhel} == 7 +-- +2.38.1 + diff --git a/SOURCES/0001-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch b/SOURCES/0008-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch similarity index 97% rename from SOURCES/0001-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch rename to SOURCES/0008-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch index b4d4e68..ccedaa3 100644 --- a/SOURCES/0001-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch +++ b/SOURCES/0008-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch @@ -1,8 +1,8 @@ -From 505963d51e3989a7d907861dd870133c670ccb78 Mon Sep 17 00:00:00 2001 +From 921a3f5ae0fa75ef04eb56857b5f07275e39c112 Mon Sep 17 00:00:00 2001 From: Joe Shimkus Date: Wed, 24 Aug 2022 13:30:19 -0400 -Subject: [PATCH] CheckVDO: Ask user only faiulres and undetermined devices (+ - report update) +Subject: [PATCH 08/32] CheckVDO: Ask user only faiulres and undetermined + devices (+ report update) The previous solution made possible to skip the VDO check answering the user question (confirming no vdo devices are present) if the @@ -43,24 +43,15 @@ to other "unstable" block devices. Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2096159 Jira: OAMG-7025 --- - .../el8toel9/actors/checkvdo/actor.py | 96 +++++++---- + .../el8toel9/actors/checkvdo/actor.py | 94 +++++++---- .../actors/checkvdo/libraries/checkvdo.py | 155 ++++++++++-------- .../checkvdo/tests/unit_test_checkvdo.py | 44 +++-- - 3 files changed, 184 insertions(+), 111 deletions(-) + 3 files changed, 183 insertions(+), 110 deletions(-) diff --git a/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py b/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py -index c28b3a9..d43bac0 100644 +index 4158253a..d43bac0b 100644 --- a/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py +++ b/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py -@@ -12,7 +12,7 @@ class CheckVdo(Actor): - - `Background` - ============ -- In RHEL 9.0 the indepdent VDO management software, `vdo manager`, is -+ In RHEL 9.0 the independent VDO management software, `vdo manager`, is - superseded by LVM management. Existing VDOs must be converted to LVM-based - management *before* upgrading to RHEL 9.0. - @@ -32,12 +32,24 @@ class CheckVdo(Actor): If the VdoConversionInfo model indicates unexpected errors occurred during scanning CheckVdo will produce appropriate inhibitory reports. @@ -175,7 +166,7 @@ index c28b3a9..d43bac0 100644 def process(self): for conversion_info in self.consume(VdoConversionInfo): diff --git a/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py b/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py -index 9ba5c70..3b161c9 100644 +index 135a279d..3b161c9b 100644 --- a/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py +++ b/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py @@ -1,10 +1,35 @@ @@ -244,7 +235,7 @@ index 9ba5c70..3b161c9 100644 - 'device format level; however, the expected LVM management ' - 'portion of the conversion did not take place. This ' - 'indicates that an exceptional condition (for example, a ' -- 'system crash) likely occured during the conversion ' +- 'system crash) likely occurred during the conversion ' - 'process. The LVM portion of the conversion must be ' - 'performed in order for upgrade to proceed.')) + summary = ( @@ -397,7 +388,7 @@ index 9ba5c70..3b161c9 100644 + if detected_under_dev or detected_failed_check: + _report_skip_check() diff --git a/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py b/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py -index e0ac39d..865e036 100644 +index e0ac39d0..865e036f 100644 --- a/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py +++ b/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py @@ -13,14 +13,16 @@ from leapp.models import ( @@ -558,5 +549,5 @@ index e0ac39d..865e036 100644 - 'User has opted to inhibit upgrade') + 'User has asserted all VDO devices on the system have been successfully converted') -- -2.37.2 +2.38.1 diff --git a/SOURCES/0009-Add-actors-for-checking-and-setting-systemd-services.patch b/SOURCES/0009-Add-actors-for-checking-and-setting-systemd-services.patch new file mode 100644 index 0000000..d8e869d --- /dev/null +++ b/SOURCES/0009-Add-actors-for-checking-and-setting-systemd-services.patch @@ -0,0 +1,383 @@ +From d2d7999744e97776eda664592ac0cc7ec5747b99 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Thu, 8 Sep 2022 16:27:10 +0200 +Subject: [PATCH 09/32] Add actors for checking and setting systemd services + states + +Introduces a new `set_systemd_services_state` actor, which +enables/disables systemd services according to received +`SystemdServicesTasks` messages and a `check_systemd_services_tasks` +actor which checks tasks in the `TargetTransactionCheckPhase` and +inhibits upgrade if there are conflicts. + +Actors are in a new directory `systemd`. +--- + .../systemd/checksystemdservicetasks/actor.py | 30 +++++++ + .../libraries/checksystemdservicetasks.py | 36 ++++++++ + .../tests/test_checksystemdservicestasks.py | 88 +++++++++++++++++++ + .../systemd/setsystemdservicesstates/actor.py | 18 ++++ + .../libraries/setsystemdservicesstate.py | 31 +++++++ + .../tests/test_setsystemdservicesstate.py | 83 +++++++++++++++++ + .../common/models/systemdservices.py | 22 +++++ + 7 files changed, 308 insertions(+) + create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py + create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py + create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py + create mode 100644 repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/actor.py + create mode 100644 repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py + create mode 100644 repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py + create mode 100644 repos/system_upgrade/common/models/systemdservices.py + +diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py +new file mode 100644 +index 00000000..2df995ee +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py +@@ -0,0 +1,30 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import checksystemdservicetasks ++from leapp.models import SystemdServicesTasks ++from leapp.reporting import Report ++from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag ++ ++ ++class CheckSystemdServicesTasks(Actor): ++ """ ++ Inhibits upgrade if SystemdServicesTasks tasks are in conflict ++ ++ There is possibility, that SystemdServicesTasks messages with conflicting ++ requested service states could be produced. For example a service is ++ requested to be both enabled and disabled. This actor inhibits upgrade in ++ such cases. ++ ++ Note: We expect that SystemdServicesTasks could be produced even after the ++ TargetTransactionChecksPhase (e.g. during the ApplicationPhase). The ++ purpose of this actor is to report collisions in case we can already detect ++ them. In case of conflicts caused by produced messages later we just log ++ the collisions and the services will end up disabled. ++ """ ++ ++ name = 'check_systemd_services_tasks' ++ consumes = (SystemdServicesTasks,) ++ produces = (Report,) ++ tags = (TargetTransactionChecksPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ checksystemdservicetasks.check_conflicts() +diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py +new file mode 100644 +index 00000000..75833e4f +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py +@@ -0,0 +1,36 @@ ++from leapp import reporting ++from leapp.libraries.stdlib import api ++from leapp.models import SystemdServicesTasks ++ ++FMT_LIST_SEPARATOR = '\n - ' ++ ++ ++def _printable_conflicts(conflicts): ++ return FMT_LIST_SEPARATOR + FMT_LIST_SEPARATOR.join(sorted(conflicts)) ++ ++ ++def _inhibit_upgrade_with_conflicts(conflicts): ++ summary = ( ++ 'The requested states for systemd services on the target system are in conflict.' ++ ' The following systemd services were requested to be both enabled and disabled on the target system: {}' ++ ) ++ report = [ ++ reporting.Title('Conflicting requirements of systemd service states'), ++ reporting.Summary(summary.format(_printable_conflicts(conflicts))), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([reporting.Groups.SANITY]), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ ] ++ reporting.create_report(report) ++ ++ ++def check_conflicts(): ++ services_to_enable = set() ++ services_to_disable = set() ++ for task in api.consume(SystemdServicesTasks): ++ services_to_enable.update(task.to_enable) ++ services_to_disable.update(task.to_disable) ++ ++ conflicts = services_to_enable.intersection(services_to_disable) ++ if conflicts: ++ _inhibit_upgrade_with_conflicts(conflicts) +diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py +new file mode 100644 +index 00000000..36ded92f +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py +@@ -0,0 +1,88 @@ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor import checksystemdservicetasks ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked ++from leapp.libraries.stdlib import api ++from leapp.models import SystemdServicesTasks ++ ++ ++@pytest.mark.parametrize( ++ ('tasks', 'should_inhibit'), ++ [ ++ ( ++ [SystemdServicesTasks(to_enable=['hello.service'], to_disable=['hello.service'])], ++ True ++ ), ++ ( ++ [SystemdServicesTasks(to_enable=['hello.service', 'world.service'], ++ to_disable=['hello.service'])], ++ True ++ ), ++ ( ++ [ ++ SystemdServicesTasks(to_enable=['hello.service']), ++ SystemdServicesTasks(to_disable=['hello.service']) ++ ], ++ True ++ ), ++ ( ++ [SystemdServicesTasks(to_enable=['hello.service'], to_disable=['world.service'])], ++ False ++ ), ++ ( ++ [ ++ SystemdServicesTasks(to_enable=['hello.service']), ++ SystemdServicesTasks(to_disable=['world.service']) ++ ], ++ False ++ ), ++ ( ++ [ ++ SystemdServicesTasks(to_enable=['hello.service', 'world.service']), ++ SystemdServicesTasks(to_disable=['world.service', 'httpd.service']) ++ ], ++ True ++ ), ++ ] ++) ++def test_conflicts_detected(monkeypatch, tasks, should_inhibit): ++ ++ created_reports = create_report_mocked() ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=tasks)) ++ monkeypatch.setattr(reporting, 'create_report', created_reports) ++ ++ checksystemdservicetasks.check_conflicts() ++ ++ assert bool(created_reports.called) == should_inhibit ++ ++ ++@pytest.mark.parametrize( ++ ('tasks', 'expected_reported'), ++ [ ++ ( ++ [SystemdServicesTasks(to_enable=['world.service', 'httpd.service', 'hello.service'], ++ to_disable=['hello.service', 'world.service', 'test.service'])], ++ ['world.service', 'hello.service'] ++ ), ++ ( ++ [ ++ SystemdServicesTasks(to_enable=['hello.service', 'httpd.service'], ++ to_disable=['world.service']), ++ SystemdServicesTasks(to_enable=['world.service', 'httpd.service'], ++ to_disable=['hello.service', 'test.service']) ++ ], ++ ['world.service', 'hello.service'] ++ ), ++ ] ++) ++def test_coflict_reported(monkeypatch, tasks, expected_reported): ++ ++ created_reports = create_report_mocked() ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=tasks)) ++ monkeypatch.setattr(reporting, 'create_report', created_reports) ++ ++ checksystemdservicetasks.check_conflicts() ++ ++ report_summary = reporting.create_report.report_fields['summary'] ++ assert all(service in report_summary for service in expected_reported) +diff --git a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/actor.py b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/actor.py +new file mode 100644 +index 00000000..1709091e +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/actor.py +@@ -0,0 +1,18 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import setsystemdservicesstate ++from leapp.models import SystemdServicesTasks ++from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag ++ ++ ++class SetSystemdServicesState(Actor): ++ """ ++ According to input messages sets systemd services states on the target system ++ """ ++ ++ name = 'set_systemd_services_state' ++ consumes = (SystemdServicesTasks,) ++ produces = () ++ tags = (FinalizationPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ setsystemdservicesstate.process() +diff --git a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py +new file mode 100644 +index 00000000..01272438 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py +@@ -0,0 +1,31 @@ ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import SystemdServicesTasks ++ ++ ++def _try_set_service_state(command, service): ++ try: ++ # it is possible to call this on multiple units at once, ++ # but failing to enable one service would cause others to not enable as well ++ run(['systemctl', command, service]) ++ except CalledProcessError as err: ++ api.current_logger().error('Failed to {} systemd unit "{}". Message: {}'.format(command, service, str(err))) ++ # TODO(mmatuska) produce post-upgrade report ++ ++ ++def process(): ++ services_to_enable = set() ++ services_to_disable = set() ++ for task in api.consume(SystemdServicesTasks): ++ services_to_enable.update(task.to_enable) ++ services_to_disable.update(task.to_disable) ++ ++ intersection = services_to_enable.intersection(services_to_disable) ++ for service in intersection: ++ msg = 'Attempted to both enable and disable systemd service "{}", service will be disabled.'.format(service) ++ api.current_logger().error(msg) ++ ++ for service in services_to_enable: ++ _try_set_service_state('enable', service) ++ ++ for service in services_to_disable: ++ _try_set_service_state('disable', service) +diff --git a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py +new file mode 100644 +index 00000000..dd153329 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py +@@ -0,0 +1,83 @@ ++import pytest ++ ++from leapp.libraries import stdlib ++from leapp.libraries.actor import setsystemdservicesstate ++from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked ++from leapp.libraries.stdlib import api, CalledProcessError ++from leapp.models import SystemdServicesTasks ++ ++ ++class MockedRun(object): ++ def __init__(self): ++ self.commands = [] ++ ++ def __call__(self, cmd, *args, **kwargs): ++ self.commands.append(cmd) ++ return {} ++ ++ ++@pytest.mark.parametrize( ++ ('msgs', 'expected_calls'), ++ [ ++ ( ++ [SystemdServicesTasks(to_enable=['hello.service'], ++ to_disable=['getty.service'])], ++ [['systemctl', 'enable', 'hello.service'], ['systemctl', 'disable', 'getty.service']] ++ ), ++ ( ++ [SystemdServicesTasks(to_disable=['getty.service'])], ++ [['systemctl', 'disable', 'getty.service']] ++ ), ++ ( ++ [SystemdServicesTasks(to_enable=['hello.service'])], ++ [['systemctl', 'enable', 'hello.service']] ++ ), ++ ( ++ [SystemdServicesTasks()], ++ [] ++ ), ++ ] ++) ++def test_process(monkeypatch, msgs, expected_calls): ++ mocked_run = MockedRun() ++ monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs)) ++ ++ setsystemdservicesstate.process() ++ ++ assert mocked_run.commands == expected_calls ++ ++ ++def test_process_invalid(monkeypatch): ++ ++ def mocked_run(cmd, *args, **kwargs): ++ if cmd == ['systemctl', 'enable', 'invalid.service']: ++ message = 'Command {0} failed with exit code {1}.'.format(str(cmd), 1) ++ raise CalledProcessError(message, cmd, 1) ++ ++ msgs = [SystemdServicesTasks(to_enable=['invalid.service'])] ++ ++ monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs)) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ ++ setsystemdservicesstate.process() ++ ++ expect_msg = ("Failed to enable systemd unit \"invalid.service\". Message:" ++ " Command ['systemctl', 'enable', 'invalid.service'] failed with exit code 1.") ++ assert expect_msg in api.current_logger.errmsg ++ ++ ++def test_enable_disable_conflict_logged(monkeypatch): ++ msgs = [SystemdServicesTasks(to_enable=['hello.service'], ++ to_disable=['hello.service'])] ++ mocked_run = MockedRun() ++ monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs)) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ ++ setsystemdservicesstate.process() ++ ++ expect_msg = ('Attempted to both enable and disable systemd service "hello.service",' ++ ' service will be disabled.') ++ assert expect_msg in api.current_logger.errmsg +diff --git a/repos/system_upgrade/common/models/systemdservices.py b/repos/system_upgrade/common/models/systemdservices.py +new file mode 100644 +index 00000000..6c7d4a1d +--- /dev/null ++++ b/repos/system_upgrade/common/models/systemdservices.py +@@ -0,0 +1,22 @@ ++from leapp.models import fields, Model ++from leapp.topics import SystemInfoTopic ++ ++ ++class SystemdServicesTasks(Model): ++ topic = SystemInfoTopic ++ ++ to_enable = fields.List(fields.String(), default=[]) ++ """ ++ List of systemd services to enable on the target system ++ ++ Masked services will not be enabled. Attempting to enable a masked service ++ will be evaluated by systemctl as usually. The error will be logged and the ++ upgrade process will continue. ++ """ ++ to_disable = fields.List(fields.String(), default=[]) ++ """ ++ List of systemd services to disable on the target system ++ """ ++ ++ # Note: possible extension in case of requirement (currently not implemented): ++ # to_unmask = fields.List(fields.String(), default=[]) +-- +2.38.1 + diff --git a/SOURCES/0010-migratentp-Replace-reports-with-log-messages.patch b/SOURCES/0010-migratentp-Replace-reports-with-log-messages.patch new file mode 100644 index 0000000..11360ac --- /dev/null +++ b/SOURCES/0010-migratentp-Replace-reports-with-log-messages.patch @@ -0,0 +1,92 @@ +From 004e7f3515cc2daa1a7ca72f7c8f5becb945ff17 Mon Sep 17 00:00:00 2001 +From: Miroslav Lichvar +Date: Mon, 19 Sep 2022 15:16:46 +0200 +Subject: [PATCH 10/32] migratentp: Replace reports with log messages + +Reports are supposed to contain different information. +--- + .../actors/migratentp/libraries/migratentp.py | 27 ++++--------------- + .../migratentp/tests/unit_test_migratentp.py | 10 ------- + 2 files changed, 5 insertions(+), 32 deletions(-) + +diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py +index deeaaccd..a0ad634b 100644 +--- a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py ++++ b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py +@@ -2,11 +2,8 @@ import base64 + import io + import tarfile + +-from leapp import reporting + from leapp.exceptions import StopActorExecutionError +-from leapp.libraries.stdlib import CalledProcessError, run +- +-COMMON_REPORT_TAGS = [reporting.Groups.SERVICES, reporting.Groups.TIME_MANAGEMENT] ++from leapp.libraries.stdlib import api, CalledProcessError, run + + + def extract_tgz64(s): +@@ -82,21 +79,7 @@ def migrate_ntp(migrate_services, config_tgz64): + + ignored_lines = ntp2chrony('/', ntp_conf, step_tickers) + +- config_resources = [reporting.RelatedResource('file', mc) for mc in migrate_configs + [ntp_conf]] +- package_resources = [reporting.RelatedResource('package', p) for p in ['ntpd', 'chrony']] +- +- if not ignored_lines: +- reporting.create_report([ +- reporting.Title('{} configuration migrated to chrony'.format(' and '.join(migrate_configs))), +- reporting.Summary('ntp2chrony executed successfully'), +- reporting.Severity(reporting.Severity.INFO), +- reporting.Groups(COMMON_REPORT_TAGS) +- ] + config_resources + package_resources) +- +- else: +- reporting.create_report([ +- reporting.Title('{} configuration partially migrated to chrony'.format(' and '.join(migrate_configs))), +- reporting.Summary('Some lines in /etc/ntp.conf were ignored in migration (check /etc/chrony.conf)'), +- reporting.Severity(reporting.Severity.MEDIUM), +- reporting.Groups(COMMON_REPORT_TAGS) +- ] + config_resources + package_resources) ++ api.current_logger().info('Configuration files migrated to chrony: {}'.format(' '.join(migrate_configs))) ++ if ignored_lines: ++ api.current_logger().warning('Some lines in /etc/ntp.conf were ignored in migration' ++ ' (check /etc/chrony.conf)') +diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py +index 6ce4bb5b..fafff5e7 100644 +--- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py ++++ b/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py +@@ -55,7 +55,6 @@ def test_migration(monkeypatch): + (['ntp-wait'], ['chrony-wait'], 0), + (['ntpd', 'ntpdate', 'ntp-wait'], ['chronyd', 'chronyd', 'chrony-wait'], 1), + ]: +- monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) + monkeypatch.setattr(migratentp, 'extract_tgz64', extract_tgz64_mocked()) + monkeypatch.setattr(migratentp, 'enable_service', enable_service_mocked()) + monkeypatch.setattr(migratentp, 'write_file', write_file_mocked()) +@@ -64,14 +63,6 @@ def test_migration(monkeypatch): + migratentp.migrate_ntp(ntp_services, 'abcdef') + + if ntp_services: +- assert reporting.create_report.called == 1 +- if ignored_lines > 0: +- assert 'configuration partially migrated to chrony' in \ +- reporting.create_report.report_fields['title'] +- else: +- assert 'configuration migrated to chrony' in \ +- reporting.create_report.report_fields['title'] +- + assert migratentp.extract_tgz64.called == 1 + assert migratentp.extract_tgz64.s == 'abcdef' + assert migratentp.enable_service.called == len(chrony_services) +@@ -86,7 +77,6 @@ def test_migration(monkeypatch): + '/etc/ntp.conf' if 'ntpd' in ntp_services else '/etc/ntp.conf.nosources', + '/etc/ntp/step-tickers' if 'ntpdate' in ntp_services else '') + else: +- assert reporting.create_report.called == 0 + assert migratentp.extract_tgz64.called == 0 + assert migratentp.enable_service.called == 0 + assert migratentp.write_file.called == 0 +-- +2.38.1 + diff --git a/SOURCES/0011-migratentp-Catch-more-specific-exception-from-ntp2ch.patch b/SOURCES/0011-migratentp-Catch-more-specific-exception-from-ntp2ch.patch new file mode 100644 index 0000000..2a1eef3 --- /dev/null +++ b/SOURCES/0011-migratentp-Catch-more-specific-exception-from-ntp2ch.patch @@ -0,0 +1,28 @@ +From 83dbc935d1ac32cbfeca7ba52da6bb4bbb965879 Mon Sep 17 00:00:00 2001 +From: Miroslav Lichvar +Date: Mon, 19 Sep 2022 15:35:43 +0200 +Subject: [PATCH 11/32] migratentp: Catch more specific exception from + ntp2chrony + +Catch OSError instead of Exception from ntp2chrony to avoid pylint +errors. +--- + .../el7toel8/actors/migratentp/libraries/migratentp.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py +index a0ad634b..1bc59448 100644 +--- a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py ++++ b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py +@@ -33,7 +33,7 @@ def ntp2chrony(root, ntp_conf, step_tickers): + ntp_configuration = ntp2chrony.NtpConfiguration(root, ntp_conf, step_tickers) + ntp_configuration.write_chrony_configuration('/etc/chrony.conf', '/etc/chrony.keys', + False, True) +- except Exception as e: ++ except OSError as e: + raise StopActorExecutionError('ntp2chrony failed: {}'.format(e)) + + # Return ignored lines from ntp.conf, except 'disable monitor' from +-- +2.38.1 + diff --git a/SOURCES/0012-migratentp-Don-t-raise-StopActorExecutionError.patch b/SOURCES/0012-migratentp-Don-t-raise-StopActorExecutionError.patch new file mode 100644 index 0000000..aa7389d --- /dev/null +++ b/SOURCES/0012-migratentp-Don-t-raise-StopActorExecutionError.patch @@ -0,0 +1,92 @@ +From 02dca0a6b721c89d125c521c7da5e85b89d136f7 Mon Sep 17 00:00:00 2001 +From: Miroslav Lichvar +Date: Wed, 14 Sep 2022 14:55:10 +0200 +Subject: [PATCH 12/32] migratentp: Don't raise StopActorExecutionError + +When a service cannot be enabled (e.g. due to masking) or when +ntp2chrony fails, log an error message instead of failing the migration. + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2089514 +--- + .../actors/migratentp/libraries/migratentp.py | 22 ++++++++++--------- + .../migratentp/tests/unit_test_migratentp.py | 2 +- + 2 files changed, 13 insertions(+), 11 deletions(-) + +diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py +index 1bc59448..306ce09e 100644 +--- a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py ++++ b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py +@@ -2,7 +2,6 @@ import base64 + import io + import tarfile + +-from leapp.exceptions import StopActorExecutionError + from leapp.libraries.stdlib import api, CalledProcessError, run + + +@@ -17,7 +16,7 @@ def enable_service(name): + try: + run(['systemctl', 'enable', '{}.service'.format(name)]) + except CalledProcessError: +- raise StopActorExecutionError('Could not enable {} service'.format(name)) ++ api.current_logger().error('Could not enable {} service'.format(name)) + + + def write_file(name, content): +@@ -34,11 +33,12 @@ def ntp2chrony(root, ntp_conf, step_tickers): + ntp_configuration.write_chrony_configuration('/etc/chrony.conf', '/etc/chrony.keys', + False, True) + except OSError as e: +- raise StopActorExecutionError('ntp2chrony failed: {}'.format(e)) ++ api.current_logger().error('ntp2chrony failed: {}'.format(e)) ++ return False, set() + + # Return ignored lines from ntp.conf, except 'disable monitor' from + # the default ntp.conf +- return set(ntp_configuration.ignored_lines) - set(['disable monitor']) ++ return True, set(ntp_configuration.ignored_lines) - set(['disable monitor']) + + + def migrate_ntp(migrate_services, config_tgz64): +@@ -61,7 +61,8 @@ def migrate_ntp(migrate_services, config_tgz64): + migrate_configs = [] + for service in migrate_services: + if service not in service_map: +- raise StopActorExecutionError('Unknown service {}'.format(service)) ++ api.current_logger().error('Unknown service {}'.format(service)) ++ continue + enable_service(service_map[service][0]) + if service_map[service][1]: + migrate_configs.append(service) +@@ -77,9 +78,10 @@ def migrate_ntp(migrate_services, config_tgz64): + + step_tickers = '/etc/ntp/step-tickers' if 'ntpdate' in migrate_configs else '' + +- ignored_lines = ntp2chrony('/', ntp_conf, step_tickers) ++ conf_migrated, ignored_lines = ntp2chrony('/', ntp_conf, step_tickers) + +- api.current_logger().info('Configuration files migrated to chrony: {}'.format(' '.join(migrate_configs))) +- if ignored_lines: +- api.current_logger().warning('Some lines in /etc/ntp.conf were ignored in migration' +- ' (check /etc/chrony.conf)') ++ if conf_migrated: ++ api.current_logger().info('Configuration files migrated to chrony: {}'.format(' '.join(migrate_configs))) ++ if ignored_lines: ++ api.current_logger().warning('Some lines in /etc/ntp.conf were ignored in migration' ++ ' (check /etc/chrony.conf)') +diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py +index fafff5e7..5350029c 100644 +--- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py ++++ b/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py +@@ -44,7 +44,7 @@ class ntp2chrony_mocked(object): + def __call__(self, *args): + self.called += 1 + self.args = args +- return self.ignored_lines * ['a line'] ++ return True, self.ignored_lines * ['a line'] + + + def test_migration(monkeypatch): +-- +2.38.1 + diff --git a/SOURCES/0013-Make-shellcheck-happy-again.patch b/SOURCES/0013-Make-shellcheck-happy-again.patch new file mode 100644 index 0000000..519ae15 --- /dev/null +++ b/SOURCES/0013-Make-shellcheck-happy-again.patch @@ -0,0 +1,32 @@ +From 7d915f9ce861f999d6fc559e7a466a32c7e4aec9 Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Fri, 9 Sep 2022 16:44:27 +0200 +Subject: [PATCH 13/32] Make shellcheck happy again + +Fixing: +``` +85sys-upgrade-redhat/do-upgrade.sh:236:37: warning[SC2166]: Prefer [ p ] && [ q ] as [ p -a q ] is not well defined. +``` + +It's not a real issue as we do not care about the order of the +evaluation, but making shellcheck happy. +--- + .../files/dracut/85sys-upgrade-redhat/do-upgrade.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh +index 1f39a6b2..ff491316 100755 +--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh ++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh +@@ -233,7 +233,7 @@ do_upgrade() { + # on aarch64 systems during el8 to el9 upgrades the swap is broken due to change in page size (64K to 4k) + # adjust the page size before booting into the new system, as it is possible the swap is necessary for to boot + # `arch` command is not available in the dracut shell, using uname -m instead +- [ "$(uname -m)" = "aarch64" -a "$RHEL_OS_MAJOR_RELEASE" = "9" ] && { ++ [ "$(uname -m)" = "aarch64" ] && [ "$RHEL_OS_MAJOR_RELEASE" = "9" ] && { + cp -aS ".leapp_bp" $NEWROOT/etc/fstab /etc/fstab + # swapon internally uses mkswap and both swapon and mkswap aren't available in dracut shell + # as a workaround we can use the one from $NEWROOT in $NEWROOT/usr/sbin +-- +2.38.1 + diff --git a/SOURCES/0014-actor-firewalld-support-0.8.z.patch b/SOURCES/0014-actor-firewalld-support-0.8.z.patch new file mode 100644 index 0000000..590dda0 --- /dev/null +++ b/SOURCES/0014-actor-firewalld-support-0.8.z.patch @@ -0,0 +1,231 @@ +From c109704cb2139dbdba371b83e2f55aad8fb1f9ed Mon Sep 17 00:00:00 2001 +From: Eric Garver +Date: Wed, 31 Aug 2022 14:24:42 -0400 +Subject: [PATCH 14/32] actor: firewalld: support 0.8.z + +Prior to this change the actor only supported firewalld-0.9.z and later. + +Relevant differences between 0.9.z and 0.8.z: + +- Policies don't exist (new in 0.9.0) +- Zones use a tuple based API + +Fixes: rhbz2101909 +--- + ...private_firewalldcollectusedobjectnames.py | 31 +++++- + ...it_test_firewalldcollectusedobjectnames.py | 105 +++++++++++++++++- + 2 files changed, 129 insertions(+), 7 deletions(-) + +diff --git a/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/libraries/private_firewalldcollectusedobjectnames.py b/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/libraries/private_firewalldcollectusedobjectnames.py +index 93e4c6a2..d93b980b 100644 +--- a/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/libraries/private_firewalldcollectusedobjectnames.py ++++ b/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/libraries/private_firewalldcollectusedobjectnames.py +@@ -14,6 +14,13 @@ def is_zone_in_use(conf): + return False + + ++def is_zone_in_use_tuple(conf): ++ conf_dict = {'interfaces': conf[10], ++ 'sources': conf[11]} ++ ++ return is_zone_in_use(conf_dict) ++ ++ + def is_policy_in_use(conf, used_zones): + # A policy is in use if both ingress_zones and egress_zones contain at + # least one of following: an active zone, 'ANY', 'HOST'. +@@ -49,6 +56,18 @@ def get_used_services(conf, isZone): + return used_services + + ++def get_used_services_tuple(conf, isZone): ++ if not isZone: ++ return set() ++ ++ conf_dict = {'services': conf[5], ++ 'interfaces': conf[10], ++ 'sources': conf[11], ++ 'rules_str': conf[12]} ++ ++ return get_used_services(conf_dict, isZone) ++ ++ + def read_config(): + try: + fw = Firewall(offline=True) +@@ -65,12 +84,12 @@ def read_config(): + used_zones = set([fw.get_default_zone()]) + for zone in fw.config.get_zones(): + obj = fw.config.get_zone(zone) +- conf = fw.config.get_zone_config_dict(obj) +- if is_zone_in_use(conf): ++ conf = fw.config.get_zone_config(obj) ++ if is_zone_in_use_tuple(conf): + used_zones.add(zone) + + used_policies = [] +- for policy in fw.config.get_policy_objects(): ++ for policy in fw.config.get_policy_objects() if hasattr(fw.config, "get_policy_objects") else []: + obj = fw.config.get_policy_object(policy) + conf = fw.config.get_policy_object_config_dict(obj) + if is_policy_in_use(conf, used_zones): +@@ -79,9 +98,9 @@ def read_config(): + used_services = set() + for zone in fw.config.get_zones(): + obj = fw.config.get_zone(zone) +- conf = fw.config.get_zone_config_dict(obj) +- used_services.update(get_used_services(conf, True)) +- for policy in fw.config.get_policy_objects(): ++ conf = fw.config.get_zone_config(obj) ++ used_services.update(get_used_services_tuple(conf, True)) ++ for policy in fw.config.get_policy_objects() if hasattr(fw.config, "get_policy_objects") else []: + obj = fw.config.get_policy_object(policy) + conf = fw.config.get_policy_object_config_dict(obj) + used_services.update(get_used_services(conf, False)) +diff --git a/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/tests/unit_test_firewalldcollectusedobjectnames.py b/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/tests/unit_test_firewalldcollectusedobjectnames.py +index 6e1511eb..9d2cfb47 100644 +--- a/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/tests/unit_test_firewalldcollectusedobjectnames.py ++++ b/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/tests/unit_test_firewalldcollectusedobjectnames.py +@@ -1,7 +1,9 @@ + from leapp.libraries.actor.private_firewalldcollectusedobjectnames import ( + get_used_services, ++ get_used_services_tuple, + is_policy_in_use, +- is_zone_in_use ++ is_zone_in_use, ++ is_zone_in_use_tuple + ) + + +@@ -20,6 +22,35 @@ def test_is_zone_in_use(): + assert is_zone_in_use(conf) + + ++def test_is_zone_in_use_tuple(): ++ conf = (None, None, None, None, None, ++ ['tftp-client'], # conf[5], services ++ None, None, None, None, ++ ['dummy0'], # conf[10], interfaces ++ [], # conf[11], sources ++ [], # conf[12], rules_str ++ None, None, None) ++ assert is_zone_in_use_tuple(conf) ++ ++ conf = (None, None, None, None, None, ++ ['tftp-client'], # conf[5], services ++ None, None, None, None, ++ [], # conf[10], interfaces ++ ['10.1.2.0/24'], # conf[11], sources ++ [], # conf[12], rules_str ++ None, None, None) ++ assert is_zone_in_use_tuple(conf) ++ ++ conf = (None, None, None, None, None, ++ ['tftp-client'], # conf[5], services ++ None, None, None, None, ++ ['dummy0'], # conf[10], interfaces ++ ['fd00::/8'], # conf[11], sources ++ [], # conf[12], rules_str ++ None, None, None) ++ assert is_zone_in_use_tuple(conf) ++ ++ + def test_is_zone_in_use_negative(): + conf = {'interfaces': [], + 'services': ['tftp-client']} +@@ -33,6 +64,17 @@ def test_is_zone_in_use_negative(): + assert not is_zone_in_use(conf) + + ++def test_is_zone_in_use_tuple_negative(): ++ conf = (None, None, None, None, None, ++ ['tftp-client'], # conf[5], services ++ None, None, None, None, ++ [], # conf[10], interfaces ++ [], # conf[11], sources ++ [], # conf[12], rules_str ++ None, None, None) ++ assert not is_zone_in_use_tuple(conf) ++ ++ + def test_is_policy_in_use(): + conf = {'ingress_zones': ['HOST'], + 'egress_zones': ['public'], +@@ -88,6 +130,35 @@ def test_get_used_services_zone(): + assert 'tftp-client' in get_used_services(conf, True) + + ++def test_get_used_services_tuple_zone(): ++ conf = (None, None, None, None, None, ++ ['tftp-client'], # conf[5], services ++ None, None, None, None, ++ ['dummy0'], # conf[10], interfaces ++ [], # conf[11], sources ++ [], # conf[12], rules_str ++ None, None, None) ++ assert 'tftp-client' in get_used_services_tuple(conf, True) ++ ++ conf = (None, None, None, None, None, ++ [], # conf[5], services ++ None, None, None, None, ++ [], # conf[10], interfaces ++ ['10.1.2.0/24'], # conf[11], sources ++ ['rule family="ipv4" source address="10.1.1.0/24" service name="tftp-client" reject'], ++ None, None, None) ++ assert 'tftp-client' in get_used_services_tuple(conf, True) ++ ++ conf = (None, None, None, None, None, ++ [], # conf[5], services ++ None, None, None, None, ++ ['dummy0'], # conf[10], interfaces ++ ['fd00::/8'], # conf[11], sources ++ ['rule service name="ssh" accept', 'rule service name="tftp-client" accept'], # conf[12], rules_str ++ None, None, None) ++ assert 'tftp-client' in get_used_services_tuple(conf, True) ++ ++ + def test_get_used_services_zone_negative(): + conf = {'interfaces': ['dummy0'], + 'services': ['https']} +@@ -105,6 +176,38 @@ def test_get_used_services_zone_negative(): + assert 'tftp-client' not in get_used_services(conf, True) + + ++def test_get_used_services_tuple_zone_negative(): ++ conf = (None, None, None, None, None, ++ ['https'], # conf[5], services ++ None, None, None, None, ++ ['dummy0'], # conf[10], interfaces ++ [], # conf[11], sources ++ [], # conf[12], rules_str ++ None, None, None) ++ assert 'tftp-client' not in get_used_services_tuple(conf, True) ++ ++ conf = {'sources': ['10.1.2.0/24'], ++ 'rules_str': ['rule family="ipv4" source address="10.1.1.0/24" service name="ssh" reject'], ++ 'services': ['https']} ++ conf = (None, None, None, None, None, ++ ['https'], # conf[5], services ++ None, None, None, None, ++ [], # conf[10], interfaces ++ ['10.1.2.0/24'], # conf[11], sources ++ ['rule family="ipv4" source address="10.1.1.0/24" service name="ssh" reject'], # conf[12], rules_str ++ None, None, None) ++ assert 'tftp-client' not in get_used_services_tuple(conf, True) ++ ++ conf = (None, None, None, None, None, ++ [], # conf[5], services ++ None, None, None, None, ++ ['dummy0'], # conf[10], interfaces ++ ['fd00::/8'], # conf[11], sources ++ ['rule service name="ssh" accept', 'rule service name="http" accept'], # conf[12], rules_str ++ None, None, None) ++ assert 'tftp-client' not in get_used_services_tuple(conf, True) ++ ++ + def test_get_used_services_policy(): + conf = {'services': ['tftp-client']} + assert 'tftp-client' in get_used_services(conf, False) +-- +2.38.1 + diff --git a/SOURCES/0015-Scanpkgmanager-detect-proxy-configuration.patch b/SOURCES/0015-Scanpkgmanager-detect-proxy-configuration.patch new file mode 100644 index 0000000..dbecc31 --- /dev/null +++ b/SOURCES/0015-Scanpkgmanager-detect-proxy-configuration.patch @@ -0,0 +1,257 @@ +From 876e93f233c41aa6c1742ed874ac167f0ddc4dbb Mon Sep 17 00:00:00 2001 +From: PeterMocary +Date: Fri, 24 Jun 2022 15:23:30 +0200 +Subject: [PATCH 15/32] Scanpkgmanager: detect proxy configuration + +This new information enables targetuserspacecreator actor to inform user why the package installation might have failed +--- + .../libraries/scanpkgmanager.py | 53 ++++++++++++++++++- + .../tests/test_scanpkgmanager.py | 49 +++++++++++++++++ + .../actors/targetuserspacecreator/actor.py | 4 ++ + .../libraries/userspacegen.py | 24 +++++++-- + .../common/models/packagemanagerinfo.py | 5 ++ + .../common/models/repositoriesfacts.py | 1 + + 6 files changed, 131 insertions(+), 5 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py +index 6f6a79d2..7c97fb1a 100644 +--- a/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py ++++ b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py +@@ -1,9 +1,13 @@ + import os ++import re + + from leapp.libraries.common.config.version import get_source_major_version + from leapp.libraries.stdlib import api + from leapp.models import PkgManagerInfo + ++YUM_CONFIG_PATH = '/etc/yum.conf' ++DNF_CONFIG_PATH = '/etc/dnf/dnf.conf' ++ + + def _get_releasever_path(): + default_manager = 'yum' if get_source_major_version() == '7' else 'dnf' +@@ -28,5 +32,52 @@ def get_etc_releasever(): + return releasever + + ++def _get_config_contents(config_path): ++ if os.path.isfile(config_path): ++ with open(config_path, 'r') as config: ++ return config.read() ++ return '' ++ ++ ++def _get_proxy_if_set(manager_config_path): ++ """ ++ Get proxy address from specified package manager config. ++ ++ :param manager_config_path: path to a package manager config ++ :returns: proxy address or None when not set ++ :rtype: String ++ """ ++ ++ config = _get_config_contents(manager_config_path) ++ ++ for line in config.split('\n'): ++ if re.match('^proxy[ \t]*=', line): ++ proxy_address = line.split('=', 1)[1] ++ return proxy_address.strip() ++ ++ return None ++ ++ ++def get_configured_proxies(): ++ """ ++ Get a list of proxies used in dnf and yum configuration files. ++ ++ :returns: sorted list of unique proxies ++ :rtype: List ++ """ ++ ++ configured_proxies = set() ++ for config_path in (DNF_CONFIG_PATH, YUM_CONFIG_PATH): ++ proxy = _get_proxy_if_set(config_path) ++ if proxy: ++ configured_proxies.add(proxy) ++ ++ return sorted(configured_proxies) ++ ++ + def process(): +- api.produce(PkgManagerInfo(etc_releasever=get_etc_releasever())) ++ pkg_manager_info = PkgManagerInfo() ++ pkg_manager_info.etc_releasever = get_etc_releasever() ++ pkg_manager_info.configured_proxies = get_configured_proxies() ++ ++ api.produce(pkg_manager_info) +diff --git a/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py +index 3be6fa2f..e78b532f 100644 +--- a/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py ++++ b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py +@@ -9,6 +9,9 @@ from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked + from leapp.libraries.stdlib import api + + CUR_DIR = os.path.dirname(os.path.abspath(__file__)) ++PROXY_ADDRESS = 'https://192.168.121.123:3128' ++YUM_CONFIG_PATH = '/etc/yum.conf' ++DNF_CONFIG_PATH = '/etc/dnf/dnf.conf' + + + def mock_releasever_exists(overrides): +@@ -36,6 +39,8 @@ def test_get_etcreleasever(monkeypatch, etcrelease_exists): + monkeypatch.setattr(scanpkgmanager.api, 'produce', produce_mocked()) + monkeypatch.setattr(scanpkgmanager.api, 'current_actor', CurrentActorMocked()) + monkeypatch.setattr(scanpkgmanager, '_get_releasever_path', mocked_get_releasever_path) ++ monkeypatch.setattr(scanpkgmanager, '_get_proxy_if_set', lambda x: None) ++ monkeypatch.setattr(pluginscanner, 'scan_enabled_package_manager_plugins', lambda: []) + + scanpkgmanager.process() + +@@ -44,3 +49,47 @@ def test_get_etcreleasever(monkeypatch, etcrelease_exists): + assert api.produce.model_instances[0].etc_releasever + else: + assert not api.produce.model_instances[0].etc_releasever ++ ++ ++@pytest.mark.parametrize('proxy_set', [True, False]) ++def test_get_proxy_if_set(monkeypatch, proxy_set): ++ ++ config_path = '/path/to/config.conf' ++ config_contents = '[main]\n' ++ if proxy_set: ++ config_contents += 'proxy = \t{} '.format(PROXY_ADDRESS) ++ ++ def mocked_get_config_contents(path): ++ assert path == config_path ++ return config_contents ++ ++ monkeypatch.setattr(scanpkgmanager, '_get_config_contents', mocked_get_config_contents) ++ ++ proxy = scanpkgmanager._get_proxy_if_set(config_path) ++ ++ if proxy_set: ++ assert proxy == PROXY_ADDRESS ++ ++ assert proxy_set == bool(proxy) ++ ++ ++@pytest.mark.parametrize( ++ ('proxy_set_in_dnf_config', 'proxy_set_in_yum_config', 'expected_output'), ++ [ ++ (True, True, [PROXY_ADDRESS]), ++ (True, False, [PROXY_ADDRESS]), ++ (False, False, []) ++ ] ++) ++def test_get_configured_proxies(monkeypatch, proxy_set_in_dnf_config, proxy_set_in_yum_config, expected_output): ++ ++ def mocked_get_proxy_if_set(path): ++ proxy = PROXY_ADDRESS if proxy_set_in_yum_config else None ++ if path == DNF_CONFIG_PATH: ++ proxy = PROXY_ADDRESS if proxy_set_in_dnf_config else None ++ return proxy ++ ++ monkeypatch.setattr(scanpkgmanager, '_get_proxy_if_set', mocked_get_proxy_if_set) ++ ++ configured_proxies = scanpkgmanager.get_configured_proxies() ++ assert configured_proxies == expected_output +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py b/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py +index 7e5c7db7..04fb2e8b 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py +@@ -5,7 +5,9 @@ from leapp.models import RequiredTargetUserspacePackages # deprecated + from leapp.models import TMPTargetRepositoriesFacts # deprecated all the time + from leapp.models import ( + CustomTargetRepositoryFile, ++ PkgManagerInfo, + Report, ++ RepositoriesFacts, + RepositoriesMapping, + RHSMInfo, + RHUIInfo, +@@ -36,12 +38,14 @@ class TargetUserspaceCreator(Actor): + CustomTargetRepositoryFile, + RHSMInfo, + RHUIInfo, ++ RepositoriesFacts, + RepositoriesMapping, + RequiredTargetUserspacePackages, + StorageInfo, + TargetRepositories, + TargetUserSpacePreupgradeTasks, + XFSPresence, ++ PkgManagerInfo, + ) + produces = (TargetUserSpaceInfo, UsedTargetRepositories, Report, TMPTargetRepositoriesFacts,) + tags = (IPUWorkflowTag, TargetTransactionFactsPhaseTag) +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index c39af66f..00acacd9 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -12,6 +12,8 @@ from leapp.models import RequiredTargetUserspacePackages # deprecated + from leapp.models import TMPTargetRepositoriesFacts # deprecated + from leapp.models import ( + CustomTargetRepositoryFile, ++ PkgManagerInfo, ++ RepositoriesFacts, + RHSMInfo, + RHUIInfo, + StorageInfo, +@@ -166,10 +168,24 @@ def prepare_target_userspace(context, userspace_dir, enabled_repos, packages): + try: + context.call(cmd, callback_raw=utils.logging_handler) + except CalledProcessError as exc: +- raise StopActorExecutionError( +- message='Unable to install RHEL {} userspace packages.'.format(target_major_version), +- details={'details': str(exc), 'stderr': exc.stderr} +- ) ++ message = 'Unable to install RHEL {} userspace packages.'.format(target_major_version) ++ details = {'details': str(exc), 'stderr': exc.stderr} ++ ++ # If a proxy was set in dnf config, it should be the reason why dnf ++ # failed since leapp does not support updates behind proxy yet. ++ for manager_info in api.consume(PkgManagerInfo): ++ if manager_info.configured_proxies: ++ details['details'] = ("DNF failed to install userspace packages, likely due to the proxy " ++ "configuration detected in the YUM/DNF configuration file.") ++ ++ # Similarly if a proxy was set specifically for one of the repositories. ++ for repo_facts in api.consume(RepositoriesFacts): ++ for repo_file in repo_facts.repositories: ++ if any(repo_data.proxy and repo_data.enabled for repo_data in repo_file.data): ++ details['details'] = ("DNF failed to install userspace packages, likely due to the proxy " ++ "configuration detected in a repository configuration file.") ++ ++ raise StopActorExecutionError(message=message, details=details) + + + def _get_all_rhui_pkgs(): +diff --git a/repos/system_upgrade/common/models/packagemanagerinfo.py b/repos/system_upgrade/common/models/packagemanagerinfo.py +index ba6391c3..aa450978 100644 +--- a/repos/system_upgrade/common/models/packagemanagerinfo.py ++++ b/repos/system_upgrade/common/models/packagemanagerinfo.py +@@ -17,3 +17,8 @@ class PkgManagerInfo(Model): + In case the value is empty string, it means the file exists but it is empty. In such a case the + original configuration is obviously broken. + """ ++ ++ configured_proxies = fields.List(fields.String(), default=[]) ++ """ ++ A sorted list of proxies present in yum and dnf configuration files. ++ """ +diff --git a/repos/system_upgrade/common/models/repositoriesfacts.py b/repos/system_upgrade/common/models/repositoriesfacts.py +index 722c579f..cd2124fc 100644 +--- a/repos/system_upgrade/common/models/repositoriesfacts.py ++++ b/repos/system_upgrade/common/models/repositoriesfacts.py +@@ -13,6 +13,7 @@ class RepositoryData(Model): + mirrorlist = fields.Nullable(fields.String()) + enabled = fields.Boolean(default=True) + additional_fields = fields.Nullable(fields.String()) ++ proxy = fields.Nullable(fields.String()) + + + class RepositoryFile(Model): +-- +2.38.1 + diff --git a/SOURCES/0016-Merge-of-the-yumconfigscanner-actor-into-the-scanpkg.patch b/SOURCES/0016-Merge-of-the-yumconfigscanner-actor-into-the-scanpkg.patch new file mode 100644 index 0000000..77dcd1c --- /dev/null +++ b/SOURCES/0016-Merge-of-the-yumconfigscanner-actor-into-the-scanpkg.patch @@ -0,0 +1,380 @@ +From b4c3de448324a35da8b92905c04cc169430cf4a0 Mon Sep 17 00:00:00 2001 +From: PeterMocary +Date: Sun, 26 Jun 2022 13:56:24 +0200 +Subject: [PATCH 16/32] Merge of the yumconfigscanner actor into the + scanpkgmanager actor + +--- + .../actors/checkyumpluginsenabled/actor.py | 8 ++-- + .../libraries/checkyumpluginsenabled.py | 6 +-- + .../tests/test_checkyumpluginsenabled.py | 6 +-- + .../libraries/pluginscanner.py} | 48 +++++++------------ + .../libraries/scanpkgmanager.py | 6 ++- + .../tests/test_pluginscanner.py} | 26 +++++----- + .../tests/test_scanpkgmanager.py | 2 +- + .../common/actors/yumconfigscanner/actor.py | 18 ------- + .../common/models/packagemanagerinfo.py | 2 + + .../system_upgrade/common/models/yumconfig.py | 8 ---- + 10 files changed, 48 insertions(+), 82 deletions(-) + rename repos/system_upgrade/common/actors/{yumconfigscanner/libraries/yumconfigscanner.py => scanpkgmanager/libraries/pluginscanner.py} (56%) + rename repos/system_upgrade/common/actors/{yumconfigscanner/tests/test_yumconfigscanner.py => scanpkgmanager/tests/test_pluginscanner.py} (74%) + delete mode 100644 repos/system_upgrade/common/actors/yumconfigscanner/actor.py + delete mode 100644 repos/system_upgrade/common/models/yumconfig.py + +diff --git a/repos/system_upgrade/common/actors/checkyumpluginsenabled/actor.py b/repos/system_upgrade/common/actors/checkyumpluginsenabled/actor.py +index c6872fa7..fbc2f8bc 100644 +--- a/repos/system_upgrade/common/actors/checkyumpluginsenabled/actor.py ++++ b/repos/system_upgrade/common/actors/checkyumpluginsenabled/actor.py +@@ -1,6 +1,6 @@ + from leapp.actors import Actor + from leapp.libraries.actor.checkyumpluginsenabled import check_required_yum_plugins_enabled +-from leapp.models import YumConfig ++from leapp.models import PkgManagerInfo + from leapp.reporting import Report + from leapp.tags import ChecksPhaseTag, IPUWorkflowTag + +@@ -11,10 +11,10 @@ class CheckYumPluginsEnabled(Actor): + """ + + name = 'check_yum_plugins_enabled' +- consumes = (YumConfig,) ++ consumes = (PkgManagerInfo,) + produces = (Report,) + tags = (ChecksPhaseTag, IPUWorkflowTag) + + def process(self): +- yum_config = next(self.consume(YumConfig)) +- check_required_yum_plugins_enabled(yum_config) ++ pkg_manager_info = next(self.consume(PkgManagerInfo)) ++ check_required_yum_plugins_enabled(pkg_manager_info) +diff --git a/repos/system_upgrade/common/actors/checkyumpluginsenabled/libraries/checkyumpluginsenabled.py b/repos/system_upgrade/common/actors/checkyumpluginsenabled/libraries/checkyumpluginsenabled.py +index 7c7398df..48f38d0a 100644 +--- a/repos/system_upgrade/common/actors/checkyumpluginsenabled/libraries/checkyumpluginsenabled.py ++++ b/repos/system_upgrade/common/actors/checkyumpluginsenabled/libraries/checkyumpluginsenabled.py +@@ -10,16 +10,16 @@ REQUIRED_YUM_PLUGINS = {'subscription-manager', 'product-id'} + FMT_LIST_SEPARATOR = '\n - ' + + +-def check_required_yum_plugins_enabled(yum_config): ++def check_required_yum_plugins_enabled(pkg_manager_info): + """ + Checks whether the yum plugins required by the IPU are enabled. + + If they are not enabled, a report is produced informing the user about it. + +- :param yum_config: YumConfig ++ :param pkg_manager_info: PkgManagerInfo + """ + +- missing_required_plugins = REQUIRED_YUM_PLUGINS - set(yum_config.enabled_plugins) ++ missing_required_plugins = REQUIRED_YUM_PLUGINS - set(pkg_manager_info.enabled_plugins) + + if skip_rhsm(): + missing_required_plugins -= {'subscription-manager', 'product-id'} +diff --git a/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py b/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py +index 896d31d5..9bf9a3ba 100644 +--- a/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py ++++ b/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py +@@ -4,7 +4,7 @@ from leapp import reporting + from leapp.libraries.actor.checkyumpluginsenabled import check_required_yum_plugins_enabled + from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked + from leapp.libraries.stdlib import api +-from leapp.models import YumConfig ++from leapp.models import PkgManagerInfo + from leapp.utils.report import is_inhibitor + + +@@ -38,7 +38,7 @@ def test__create_report_mocked(monkeypatch): + + def test_report_when_missing_required_plugins(monkeypatch): + """Test whether a report entry is created when any of the required YUM plugins are missing.""" +- yum_config = YumConfig(enabled_plugins=['product-id', 'some-user-plugin']) ++ yum_config = PkgManagerInfo(enabled_plugins=['product-id', 'some-user-plugin']) + + actor_reports = create_report_mocked() + +@@ -62,7 +62,7 @@ def test_nothing_is_reported_when_rhsm_disabled(monkeypatch): + monkeypatch.setattr(api, 'current_actor', actor_mocked) + monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) + +- yum_config = YumConfig(enabled_plugins=[]) ++ yum_config = PkgManagerInfo(enabled_plugins=[]) + check_required_yum_plugins_enabled(yum_config) + + assert not reporting.create_report.called, 'Report was created even if LEAPP_NO_RHSM was set' +diff --git a/repos/system_upgrade/common/actors/yumconfigscanner/libraries/yumconfigscanner.py b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/pluginscanner.py +similarity index 56% +rename from repos/system_upgrade/common/actors/yumconfigscanner/libraries/yumconfigscanner.py +rename to repos/system_upgrade/common/actors/scanpkgmanager/libraries/pluginscanner.py +index 0b7d5fe6..7bb03996 100644 +--- a/repos/system_upgrade/common/actors/yumconfigscanner/libraries/yumconfigscanner.py ++++ b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/pluginscanner.py +@@ -1,26 +1,25 @@ + import re + + from leapp.libraries.common.config.version import get_source_major_version +-from leapp.libraries.stdlib import api, run +-from leapp.models import YumConfig ++from leapp.libraries.stdlib import run + + # When the output spans multiple lines, each of the lines after the first one + # start with a ' : ' +-YUM_LOADED_PLUGINS_NEXT_LINE_START = ' +: ' ++LOADED_PLUGINS_NEXT_LINE_START = ' +: ' + + +-def _parse_loaded_plugins(yum_output): ++def _parse_loaded_plugins(package_manager_output): + """ +- Retrieves a list of plugins that are being loaded when calling yum. ++ Retrieves a list of plugins that are being loaded when calling dnf/yum. + +- :param dict yum_output: The result of running the yum command. ++ :param dict package_manager_output: The result of running the package manager command. + :rtype: list +- :returns: A list of plugins that are being loaded when calling yum. ++ :returns: A list of plugins that are being loaded by the package manager. + """ +- # YUM might break the information about loaded plugins into multiple lines, ++ # Package manager might break the information about loaded plugins into multiple lines, + # we need to concaternate the list ourselves + loaded_plugins_str = '' +- for line in yum_output['stdout']: ++ for line in package_manager_output['stdout']: + if line.startswith('Loaded plugins:'): + # We have found the first line that contains the plugins + plugins_on_this_line = line[16:] # Remove the `Loaded plugins: ` part +@@ -32,7 +31,7 @@ def _parse_loaded_plugins(yum_output): + continue + + if loaded_plugins_str: +- if re.match(YUM_LOADED_PLUGINS_NEXT_LINE_START, line): ++ if re.match(LOADED_PLUGINS_NEXT_LINE_START, line): + # The list of plugins continues on this line + plugins_on_this_line = line.lstrip(' :') # Remove the leading spaces and semicolon + +@@ -49,39 +48,28 @@ def _parse_loaded_plugins(yum_output): + return loaded_plugins_str.split(', ') + + +-def scan_enabled_yum_plugins(): ++def scan_enabled_package_manager_plugins(): + """ +- Runs the `yum` command and parses its output for enabled/loaded plugins. ++ Runs package manager (yum/dnf) command and parses its output for enabled/loaded plugins. + + :return: A list of enabled plugins. + :rtype: List + """ + +- # We rely on yum itself to report what plugins are used when it is invoked. +- # An alternative approach would be to check /usr/lib/yum-plugins/ (install +- # path for yum plugins) and parse corresponding configurations from +- # /etc/yum/pluginconf.d/ ++ # We rely on package manager itself to report what plugins are used when it is invoked. ++ # An alternative approach would be to check the install path for package manager plugins ++ # and parse corresponding plugin configuration files. + + if get_source_major_version() == '7': + # in case of yum, set debuglevel=2 to be sure the output is always + # same. The format of data is different for various debuglevels +- yum_cmd = ['yum', '--setopt=debuglevel=2'] ++ cmd = ['yum', '--setopt=debuglevel=2'] + else: + # the verbose mode in dnf always set particular debuglevel, so the + # output is not affected by the default debug level set on the + # system +- yum_cmd = ['dnf', '-v'] # On RHEL8 we need to supply an extra switch ++ cmd = ['dnf', '-v'] # On RHEL8 we need to supply an extra switch + +- yum_output = run(yum_cmd, split=True, checked=False) # The yum command will certainly fail (does not matter). ++ pkg_manager_output = run(cmd, split=True, checked=False) # The command will certainly fail (does not matter). + +- return _parse_loaded_plugins(yum_output) +- +- +-def scan_yum_config(): +- """ +- Scans the YUM configuration and produces :class:`YumConfig` message with the information found. +- """ +- config = YumConfig() +- config.enabled_plugins = scan_enabled_yum_plugins() +- +- api.produce(config) ++ return _parse_loaded_plugins(pkg_manager_output) +diff --git a/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py +index 7c97fb1a..bf7ec0be 100644 +--- a/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py ++++ b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py +@@ -1,6 +1,7 @@ + import os + import re + ++from leapp.libraries.actor import pluginscanner + from leapp.libraries.common.config.version import get_source_major_version + from leapp.libraries.stdlib import api + from leapp.models import PkgManagerInfo +@@ -43,9 +44,9 @@ def _get_proxy_if_set(manager_config_path): + """ + Get proxy address from specified package manager config. + +- :param manager_config_path: path to a package manager config ++ :param str manager_config_path: path to a package manager config + :returns: proxy address or None when not set +- :rtype: String ++ :rtype: str + """ + + config = _get_config_contents(manager_config_path) +@@ -79,5 +80,6 @@ def process(): + pkg_manager_info = PkgManagerInfo() + pkg_manager_info.etc_releasever = get_etc_releasever() + pkg_manager_info.configured_proxies = get_configured_proxies() ++ pkg_manager_info.enabled_plugins = pluginscanner.scan_enabled_package_manager_plugins() + + api.produce(pkg_manager_info) +diff --git a/repos/system_upgrade/common/actors/yumconfigscanner/tests/test_yumconfigscanner.py b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_pluginscanner.py +similarity index 74% +rename from repos/system_upgrade/common/actors/yumconfigscanner/tests/test_yumconfigscanner.py +rename to repos/system_upgrade/common/actors/scanpkgmanager/tests/test_pluginscanner.py +index 8406ef00..f0260e54 100644 +--- a/repos/system_upgrade/common/actors/yumconfigscanner/tests/test_yumconfigscanner.py ++++ b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_pluginscanner.py +@@ -1,6 +1,6 @@ + import pytest + +-from leapp.libraries.actor import yumconfigscanner ++from leapp.libraries.actor import pluginscanner + + CMD_YUM_OUTPUT = '''Loaded plugins: langpacks, my plugin, subscription-manager, product-id + Usage: yum [options] COMMAND +@@ -16,23 +16,23 @@ Usage: yum [options] COMMAND + + + def assert_plugins_identified_as_enabled(expected_plugins, identified_plugins): +- fail_description = 'Failed to parse a plugin from the yum output.' ++ fail_description = 'Failed to parse a plugin from the package manager output.' + for expected_enabled_plugin in expected_plugins: + assert expected_enabled_plugin in identified_plugins, fail_description + + + @pytest.mark.parametrize( +- ('source_major_version', 'yum_command'), ++ ('source_major_version', 'command'), + [ + ('7', ['yum', '--setopt=debuglevel=2']), + ('8', ['dnf', '-v']), + ] + ) +-def test_scan_enabled_plugins(monkeypatch, source_major_version, yum_command): +- """Tests whether the enabled plugins are correctly retrieved from the yum output.""" ++def test_scan_enabled_plugins(monkeypatch, source_major_version, command): ++ """Tests whether the enabled plugins are correctly retrieved from the package manager output.""" + + def run_mocked(cmd, **kwargs): +- if cmd == yum_command: ++ if cmd == command: + return { + 'stdout': CMD_YUM_OUTPUT.split('\n'), + 'stderr': 'You need to give some command', +@@ -45,10 +45,10 @@ def test_scan_enabled_plugins(monkeypatch, source_major_version, yum_command): + + # The library imports `run` all the way into its namespace (from ...stdlib import run), + # we must overwrite it there then: +- monkeypatch.setattr(yumconfigscanner, 'run', run_mocked) +- monkeypatch.setattr(yumconfigscanner, 'get_source_major_version', get_source_major_version_mocked) ++ monkeypatch.setattr(pluginscanner, 'run', run_mocked) ++ monkeypatch.setattr(pluginscanner, 'get_source_major_version', get_source_major_version_mocked) + +- enabled_plugins = yumconfigscanner.scan_enabled_yum_plugins() ++ enabled_plugins = pluginscanner.scan_enabled_package_manager_plugins() + assert_plugins_identified_as_enabled( + ['langpacks', 'my plugin', 'subscription-manager', 'product-id'], + enabled_plugins +@@ -63,7 +63,7 @@ def test_scan_enabled_plugins(monkeypatch, source_major_version, yum_command): + (CMD_YUM_OUTPUT_MULTILINE_BREAK_ON_WHITESPACE,) + ]) + def test_yum_loaded_plugins_multiline_output(yum_output, monkeypatch): +- """Tests whether the library correctly handles yum plugins getting reported on multiple lines.""" ++ """Tests whether the library correctly handles plugins getting reported on multiple lines.""" + def run_mocked(cmd, **kwargs): + return { + 'stdout': yum_output.split('\n'), +@@ -71,10 +71,10 @@ def test_yum_loaded_plugins_multiline_output(yum_output, monkeypatch): + 'exit_code': 1 + } + +- monkeypatch.setattr(yumconfigscanner, 'run', run_mocked) +- monkeypatch.setattr(yumconfigscanner, 'get_source_major_version', lambda: '7') ++ monkeypatch.setattr(pluginscanner, 'run', run_mocked) ++ monkeypatch.setattr(pluginscanner, 'get_source_major_version', lambda: '7') + +- enabled_plugins = yumconfigscanner.scan_enabled_yum_plugins() ++ enabled_plugins = pluginscanner.scan_enabled_package_manager_plugins() + + assert len(enabled_plugins) == 4, 'Identified more yum plugins than available in the mocked yum output.' + assert_plugins_identified_as_enabled( +diff --git a/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py +index e78b532f..75c5c5ba 100644 +--- a/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py ++++ b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py +@@ -3,7 +3,7 @@ import os + import pytest + + from leapp.libraries import stdlib +-from leapp.libraries.actor import scanpkgmanager ++from leapp.libraries.actor import pluginscanner, scanpkgmanager + from leapp.libraries.common import testutils + from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked + from leapp.libraries.stdlib import api +diff --git a/repos/system_upgrade/common/actors/yumconfigscanner/actor.py b/repos/system_upgrade/common/actors/yumconfigscanner/actor.py +deleted file mode 100644 +index 95aee415..00000000 +--- a/repos/system_upgrade/common/actors/yumconfigscanner/actor.py ++++ /dev/null +@@ -1,18 +0,0 @@ +-from leapp.actors import Actor +-from leapp.libraries.actor.yumconfigscanner import scan_yum_config +-from leapp.models import YumConfig +-from leapp.tags import ChecksPhaseTag, IPUWorkflowTag +- +- +-class YumConfigScanner(Actor): +- """ +- Scans the configuration of the YUM package manager. +- """ +- +- name = 'yum_config_scanner' +- consumes = () +- produces = (YumConfig,) +- tags = (IPUWorkflowTag, ChecksPhaseTag) +- +- def process(self): +- scan_yum_config() +diff --git a/repos/system_upgrade/common/models/packagemanagerinfo.py b/repos/system_upgrade/common/models/packagemanagerinfo.py +index aa450978..bf969338 100644 +--- a/repos/system_upgrade/common/models/packagemanagerinfo.py ++++ b/repos/system_upgrade/common/models/packagemanagerinfo.py +@@ -22,3 +22,5 @@ class PkgManagerInfo(Model): + """ + A sorted list of proxies present in yum and dnf configuration files. + """ ++ ++ enabled_plugins = fields.List(fields.String(), default=[]) +diff --git a/repos/system_upgrade/common/models/yumconfig.py b/repos/system_upgrade/common/models/yumconfig.py +deleted file mode 100644 +index 506ce47e..00000000 +--- a/repos/system_upgrade/common/models/yumconfig.py ++++ /dev/null +@@ -1,8 +0,0 @@ +-from leapp.models import fields, Model +-from leapp.topics import SystemFactsTopic +- +- +-class YumConfig(Model): +- topic = SystemFactsTopic +- +- enabled_plugins = fields.List(fields.String(), default=[]) +-- +2.38.1 + diff --git a/SOURCES/0017-firewalldcheckallowzonedrifting-Fix-the-remediation-.patch b/SOURCES/0017-firewalldcheckallowzonedrifting-Fix-the-remediation-.patch new file mode 100644 index 0000000..ba4a2cb --- /dev/null +++ b/SOURCES/0017-firewalldcheckallowzonedrifting-Fix-the-remediation-.patch @@ -0,0 +1,30 @@ +From 279ebc96c45ab597f9c26903f5b36e2e57ced6fe Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Fri, 30 Sep 2022 11:17:38 +0200 +Subject: [PATCH 17/32] firewalldcheckallowzonedrifting: Fix the remediation + cmd + +The remediation cmd was incorrect as the cmd is written as string +instead of list, the fix: + ['cmd param param'] -> ['cmd', 'paramm', 'param'] + +JIRA: OAMG-7694 +--- + .../el8toel9/actors/firewalldcheckallowzonedrifting/actor.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py b/repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py +index 1f2767f5..b7eb5806 100644 +--- a/repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py ++++ b/repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py +@@ -46,6 +46,6 @@ class FirewalldCheckAllowZoneDrifting(Actor): + title='Changes in firewalld related to Zone Drifting'), + reporting.Remediation( + hint='Set AllowZoneDrifting=no in /etc/firewalld/firewalld.conf', +- commands=[['sed -i "s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/" ' ++ commands=[['sed', '-i', 's/^AllowZoneDrifting=.*/AllowZoneDrifting=no/', + '/etc/firewalld/firewalld.conf']]), + ]) +-- +2.38.1 + diff --git a/SOURCES/0005-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch b/SOURCES/0018-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch similarity index 93% rename from SOURCES/0005-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch rename to SOURCES/0018-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch index 0d0e5d3..db65308 100644 --- a/SOURCES/0005-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch +++ b/SOURCES/0018-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch @@ -1,7 +1,7 @@ From 1c6388139695aefb02daa7b5cb13e628f03eab43 Mon Sep 17 00:00:00 2001 From: Michal Hecko Date: Mon, 17 Oct 2022 12:59:22 +0200 -Subject: [PATCH] rhui(azure-sap-apps): consider RHUI client as signed +Subject: [PATCH 18/32] rhui(azure-sap-apps): consider RHUI client as signed --- .../common/actors/redhatsignedrpmscanner/actor.py | 2 +- @@ -21,5 +21,5 @@ index dd6db7c9..647805cd 100644 rhui.RHUI_CLOUD_MAP[upg_path].get(flavour, {}).get('src_pkg') for flavour in whitelisted_cloud_flavours } -- -2.37.3 +2.38.1 diff --git a/SOURCES/0006-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch b/SOURCES/0019-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch similarity index 96% rename from SOURCES/0006-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch rename to SOURCES/0019-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch index 014782b..906b57e 100644 --- a/SOURCES/0006-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch +++ b/SOURCES/0019-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch @@ -1,7 +1,8 @@ From a2f35c0aa4e00936e58c17a94d4f1507a3287c72 Mon Sep 17 00:00:00 2001 From: Michal Hecko Date: Mon, 17 Oct 2022 12:59:22 +0200 -Subject: [PATCH] rhui(azure-sap-apps): handle EUS SAP Apps content on RHEL8+ +Subject: [PATCH 19/32] rhui(azure-sap-apps): handle EUS SAP Apps content on + RHEL8+ --- .../common/actors/cloud/checkrhui/actor.py | 9 ++++++++- @@ -37,5 +38,5 @@ index 822c7535..a56bb1e1 100644 self.produce(RHUIInfo(provider=provider)) self.produce(RequiredTargetUserspacePackages(packages=[info['target_pkg']])) -- -2.37.3 +2.38.1 diff --git a/SOURCES/0007-checksaphana-Move-to-common.patch b/SOURCES/0020-checksaphana-Move-to-common.patch similarity index 96% rename from SOURCES/0007-checksaphana-Move-to-common.patch rename to SOURCES/0020-checksaphana-Move-to-common.patch index 108548a..1b3857e 100644 --- a/SOURCES/0007-checksaphana-Move-to-common.patch +++ b/SOURCES/0020-checksaphana-Move-to-common.patch @@ -1,7 +1,7 @@ From a06e248faa3b336c09ee6137eee54a1a0256162b Mon Sep 17 00:00:00 2001 From: Vinzenz Feenstra Date: Wed, 19 Oct 2022 21:05:00 +0200 -Subject: [PATCH] checksaphana: Move to common +Subject: [PATCH 20/32] checksaphana: Move to common We need to start handling also el8 to el9 upgrades now. @@ -28,5 +28,5 @@ similarity index 100% rename from repos/system_upgrade/el7toel8/actors/checksaphana/tests/test_checksaphana.py rename to repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py -- -2.37.3 +2.38.1 diff --git a/SOURCES/0008-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch b/SOURCES/0021-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch similarity index 98% rename from SOURCES/0008-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch rename to SOURCES/0021-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch index 2864e50..20a06d9 100644 --- a/SOURCES/0008-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch +++ b/SOURCES/0021-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch @@ -1,7 +1,8 @@ From b716765e638156c9a5cb21a474d1203b695acf8d Mon Sep 17 00:00:00 2001 From: Vinzenz Feenstra Date: Wed, 19 Oct 2022 21:42:14 +0200 -Subject: [PATCH] checksaphana: Adjust for el7toel8 and el8toel9 requirements +Subject: [PATCH 21/32] checksaphana: Adjust for el7toel8 and el8toel9 + requirements Previously only upgrade from el7toel8 were supported for SAP Hana. This patch will introduce the adjustments necessary to allow the @@ -104,7 +105,7 @@ index e540ccd1..564d86b8 100644 @@ -134,7 +154,13 @@ def _sp_rev_patchlevel_check(instance): def _fullfills_hana_min_version(instance): - """ Performs a check whether the version of SAP HANA fullfills the minimal requirements for the target RHEL """ + """ Performs a check whether the version of SAP HANA fulfills the minimal requirements for the target RHEL """ - return _major_version_check(instance) and _sp_rev_patchlevel_check(instance) + if version.get_target_major_version() == '8': + patchlevels = SAP_HANA_RHEL86_REQUIRED_PATCH_LEVELS @@ -272,5 +273,5 @@ index 3f1d4230..6f61d0bf 100644 monkeypatch.setattr(checksaphana.api, 'consume', _consume_mock_sap_hana_info( v1names=v1names, v2names=v2names, v2lownames=v2lownames, running=True)) -- -2.37.3 +2.38.1 diff --git a/SOURCES/0022-Add-an-actor-that-enables-device_cio_free.service-on.patch b/SOURCES/0022-Add-an-actor-that-enables-device_cio_free.service-on.patch new file mode 100644 index 0000000..b854790 --- /dev/null +++ b/SOURCES/0022-Add-an-actor-that-enables-device_cio_free.service-on.patch @@ -0,0 +1,105 @@ +From 6ec9f0adf2e9197e68a1919c1f69344fb0534eca Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Wed, 19 Oct 2022 14:15:41 +0200 +Subject: [PATCH 22/32] Add an actor that enables device_cio_free.service on + s390x + +After an IPU the device_cio_free.service systemd service (exclusive to +s390x) is disabled even though the vendor preset is set to disable. The +new actor instruct to enable the service during the IPU. + +The service is used to enable devices not explicitly enabled on kernel +command line. + +Jira ref.: OAMG-6302 +--- + .../enableddeviceciofreeservices390/actor.py | 21 ++++++++++++ + .../libraries/enabledeviceciofreeservice.py | 8 +++++ + .../tests/test_enableddeviceciofreeservice.py | 32 +++++++++++++++++++ + 3 files changed, 61 insertions(+) + create mode 100644 repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py + create mode 100644 repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py + create mode 100644 repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py + +diff --git a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py +new file mode 100644 +index 00000000..4928710e +--- /dev/null ++++ b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py +@@ -0,0 +1,21 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import enabledeviceciofreeservice ++from leapp.models import SystemdServicesTasks ++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag ++ ++ ++class EnableDeviceCioFreeService(Actor): ++ """ ++ Enables device_cio_free.service systemd service on s390x ++ ++ After an upgrade this service ends up disabled even though it's vendor preset is set to enabled. ++ The service is used to enable devices which are not explicitly enabled on the kernel command line. ++ """ ++ ++ name = 'enable_device_cio_free_service' ++ consumes = () ++ produces = (SystemdServicesTasks,) ++ tags = (ChecksPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ enabledeviceciofreeservice.process() +diff --git a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py +new file mode 100644 +index 00000000..97e36f10 +--- /dev/null ++++ b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py +@@ -0,0 +1,8 @@ ++from leapp.libraries.common.config import architecture ++from leapp.libraries.stdlib import api ++from leapp.models import SystemdServicesTasks ++ ++ ++def process(): ++ if architecture.matches_architecture(architecture.ARCH_S390X): ++ api.produce(SystemdServicesTasks(to_enable=['device_cio_free.service'])) +diff --git a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py +new file mode 100644 +index 00000000..42527595 +--- /dev/null ++++ b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py +@@ -0,0 +1,32 @@ ++import pytest ++ ++from leapp.libraries.actor import enabledeviceciofreeservice ++from leapp.libraries.common.config import architecture ++from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked ++from leapp.libraries.stdlib import api ++from leapp.models import SystemdServicesTasks ++ ++ ++def test_task_produced_on_s390(monkeypatch): ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X)) ++ monkeypatch.setattr(api, "produce", produce_mocked()) ++ ++ enabledeviceciofreeservice.process() ++ ++ assert api.produce.called ++ assert isinstance(api.produce.model_instances[0], SystemdServicesTasks) ++ assert api.produce.model_instances[0].to_enable == ['device_cio_free.service'] ++ ++ ++@pytest.mark.parametrize('arch', [ ++ architecture.ARCH_X86_64, ++ architecture.ARCH_ARM64, ++ architecture.ARCH_PPC64LE, ++]) ++def test_task_not_produced_on_non_s390(monkeypatch, arch): ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=arch)) ++ monkeypatch.setattr(api, "produce", produce_mocked()) ++ ++ enabledeviceciofreeservice.process() ++ ++ assert not api.produce.called +-- +2.38.1 + diff --git a/SOURCES/0023-Add-the-scanzfcp-actor-handling-the-IPU-with-ZFCP-s3.patch b/SOURCES/0023-Add-the-scanzfcp-actor-handling-the-IPU-with-ZFCP-s3.patch new file mode 100644 index 0000000..a0cab2e --- /dev/null +++ b/SOURCES/0023-Add-the-scanzfcp-actor-handling-the-IPU-with-ZFCP-s3.patch @@ -0,0 +1,240 @@ +From 3b5f7416d5f680cbeb777ba4ba33a4bd4787d6f6 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Renaud=20M=C3=A9trich?= +Date: Mon, 7 Nov 2022 09:26:45 +0100 +Subject: [PATCH 23/32] Add the scanzfcp actor handling the IPU with ZFCP + (s390x) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +When having systems configured with ZFCP instead of DASD, the disks +are not seen while rebooting because `/etc/zfcp.conf` is missing +in the initramfs. + +When the file exists, it's copied inside the userspace container +and installed in the upgrade initramfs, producing + TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks +messages. + +pstodulk: updated unit-tests in the scanzfcp and scandasd actors + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2140563 + +Signed-off-by: Renaud Métrich +--- + .../actors/scandasd/libraries/scandasd.py | 4 +- + .../scandasd/tests/unit_test_scandasd.py | 23 +++++--- + .../common/actors/scanzfcp/actor.py | 24 ++++++++ + .../actors/scanzfcp/libraries/scanzfcp.py | 25 ++++++++ + .../scanzfcp/tests/unit_test_scanzfcp.py | 59 +++++++++++++++++++ + 5 files changed, 124 insertions(+), 11 deletions(-) + create mode 100644 repos/system_upgrade/common/actors/scanzfcp/actor.py + create mode 100644 repos/system_upgrade/common/actors/scanzfcp/libraries/scanzfcp.py + create mode 100644 repos/system_upgrade/common/actors/scanzfcp/tests/unit_test_scanzfcp.py + +diff --git a/repos/system_upgrade/common/actors/scandasd/libraries/scandasd.py b/repos/system_upgrade/common/actors/scandasd/libraries/scandasd.py +index 3e1cba66..ff3104d4 100644 +--- a/repos/system_upgrade/common/actors/scandasd/libraries/scandasd.py ++++ b/repos/system_upgrade/common/actors/scandasd/libraries/scandasd.py +@@ -18,8 +18,8 @@ def process(): + copy_files = [CopyFile(src=DASD_CONF)] + api.produce(UpgradeInitramfsTasks(include_files=[DASD_CONF])) + else: +- api.current_logger().warning( +- "The {} file has not been discovered. DASD not used?" ++ api.current_logger().info( ++ "The {} file has not been discovered. DASD not used." + .format(DASD_CONF) + ) + api.produce(TargetUserSpaceUpgradeTasks(copy_files=copy_files, install_rpms=['s390utils-core'])) +diff --git a/repos/system_upgrade/common/actors/scandasd/tests/unit_test_scandasd.py b/repos/system_upgrade/common/actors/scandasd/tests/unit_test_scandasd.py +index e4eea10c..af8f951b 100644 +--- a/repos/system_upgrade/common/actors/scandasd/tests/unit_test_scandasd.py ++++ b/repos/system_upgrade/common/actors/scandasd/tests/unit_test_scandasd.py +@@ -3,18 +3,18 @@ import os + import pytest + + from leapp.libraries.actor import scandasd +-from leapp.libraries.common.config.architecture import ARCH_S390X +-from leapp.libraries.common.testutils import logger_mocked, produce_mocked ++from leapp.libraries.common.config import architecture ++from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked + from leapp.models import CopyFile, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks + + + def test_dasd_exists(monkeypatch): +- monkeypatch.setattr(scandasd.architecture, 'matches_architecture', lambda dummy: True) ++ monkeypatch.setattr(scandasd.api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X)) + monkeypatch.setattr(scandasd.api, 'current_logger', logger_mocked()) + monkeypatch.setattr(scandasd.api, 'produce', produce_mocked()) + monkeypatch.setattr(os.path, 'isfile', lambda dummy: True) + scandasd.process() +- assert not scandasd.api.current_logger.warnmsg ++ assert not scandasd.api.current_logger.infomsg + assert scandasd.api.produce.called == 2 + tusut_flag = False + uit_flag = False +@@ -30,12 +30,12 @@ def test_dasd_exists(monkeypatch): + + + def test_dasd_not_found(monkeypatch): +- monkeypatch.setattr(scandasd.architecture, 'matches_architecture', lambda dummy: True) ++ monkeypatch.setattr(scandasd.api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X)) + monkeypatch.setattr(scandasd.api, 'current_logger', logger_mocked()) + monkeypatch.setattr(os.path, 'isfile', lambda dummy: False) + monkeypatch.setattr(scandasd.api, 'produce', produce_mocked()) + scandasd.process() +- assert scandasd.api.current_logger.warnmsg ++ assert scandasd.api.current_logger.infomsg + assert scandasd.api.produce.called == 1 + assert len(scandasd.api.produce.model_instances) == 1 + assert isinstance(scandasd.api.produce.model_instances[0], TargetUserSpaceUpgradeTasks) +@@ -44,11 +44,16 @@ def test_dasd_not_found(monkeypatch): + + + @pytest.mark.parametrize('isfile', [True, False]) +-def test_non_ibmz_arch(monkeypatch, isfile): +- monkeypatch.setattr(scandasd.architecture, 'matches_architecture', lambda dummy: False) ++@pytest.mark.parametrize('arch', [ ++ architecture.ARCH_X86_64, ++ architecture.ARCH_ARM64, ++ architecture.ARCH_PPC64LE, ++]) ++def test_non_ibmz_arch(monkeypatch, isfile, arch): ++ monkeypatch.setattr(scandasd.api, 'current_actor', CurrentActorMocked(arch=arch)) + monkeypatch.setattr(scandasd.api, 'current_logger', logger_mocked()) + monkeypatch.setattr(scandasd.api, 'produce', produce_mocked()) + monkeypatch.setattr(os.path, 'isfile', lambda dummy: isfile) + scandasd.process() +- assert not scandasd.api.current_logger.warnmsg ++ assert not scandasd.api.current_logger.infomsg + assert not scandasd.api.produce.called +diff --git a/repos/system_upgrade/common/actors/scanzfcp/actor.py b/repos/system_upgrade/common/actors/scanzfcp/actor.py +new file mode 100644 +index 00000000..9817fdc8 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scanzfcp/actor.py +@@ -0,0 +1,24 @@ ++ ++from leapp.actors import Actor ++from leapp.libraries.actor import scanzfcp ++from leapp.models import TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class ScanZFCP(Actor): ++ """ ++ In case of s390x architecture, check whether ZFCP is used. ++ ++ The current check is based just on existence of the /etc/zfcp.conf file. ++ If it exists, produce UpgradeInitramfsTasks msg to ensure the file ++ is available inside the target userspace to be able to generate the ++ upgrade init ramdisk correctly. ++ """ ++ ++ name = 'scanzfcp' ++ consumes = () ++ produces = (TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks) ++ tags = (IPUWorkflowTag, FactsPhaseTag) ++ ++ def process(self): ++ scanzfcp.process() +diff --git a/repos/system_upgrade/common/actors/scanzfcp/libraries/scanzfcp.py b/repos/system_upgrade/common/actors/scanzfcp/libraries/scanzfcp.py +new file mode 100644 +index 00000000..72f83f8f +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scanzfcp/libraries/scanzfcp.py +@@ -0,0 +1,25 @@ ++import os ++ ++from leapp.libraries.common.config import architecture ++from leapp.libraries.stdlib import api ++from leapp.models import CopyFile, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks ++ ++ZFCP_CONF = '/etc/zfcp.conf' ++ ++ ++def process(): ++ if not architecture.matches_architecture(architecture.ARCH_S390X): ++ return ++ copy_files = [] ++ if os.path.isfile(ZFCP_CONF): ++ # the file has to be copied into the targetuserspace container first, ++ # then it can be included into the initramfs ==> both messages are ++ # needed to be produced ++ copy_files = [CopyFile(src=ZFCP_CONF)] ++ api.produce(UpgradeInitramfsTasks(include_files=[ZFCP_CONF])) ++ else: ++ api.current_logger().info( ++ "The {} file has not been discovered. ZFCP not used." ++ .format(ZFCP_CONF) ++ ) ++ api.produce(TargetUserSpaceUpgradeTasks(copy_files=copy_files, install_rpms=['s390utils-core'])) +diff --git a/repos/system_upgrade/common/actors/scanzfcp/tests/unit_test_scanzfcp.py b/repos/system_upgrade/common/actors/scanzfcp/tests/unit_test_scanzfcp.py +new file mode 100644 +index 00000000..1b1f840c +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scanzfcp/tests/unit_test_scanzfcp.py +@@ -0,0 +1,59 @@ ++import os ++ ++import pytest ++ ++from leapp.libraries.actor import scanzfcp ++from leapp.libraries.common.config import architecture ++from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked ++from leapp.models import CopyFile, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks ++ ++ ++def test_zfcp_exists(monkeypatch): ++ monkeypatch.setattr(scanzfcp.api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X)) ++ monkeypatch.setattr(scanzfcp.api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(scanzfcp.api, 'produce', produce_mocked()) ++ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True) ++ scanzfcp.process() ++ assert not scanzfcp.api.current_logger.infomsg ++ assert scanzfcp.api.produce.called == 2 ++ tusut_flag = False ++ uit_flag = False ++ for msg in scanzfcp.api.produce.model_instances: ++ if isinstance(msg, TargetUserSpaceUpgradeTasks): ++ assert [CopyFile(src=scanzfcp.ZFCP_CONF)] == msg.copy_files ++ assert msg.install_rpms == ['s390utils-core'] ++ tusut_flag = True ++ elif isinstance(msg, UpgradeInitramfsTasks): ++ assert [scanzfcp.ZFCP_CONF] == msg.include_files ++ uit_flag = True ++ assert tusut_flag and uit_flag ++ ++ ++def test_zfcp_not_found(monkeypatch): ++ monkeypatch.setattr(scanzfcp.api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X)) ++ monkeypatch.setattr(scanzfcp.api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(scanzfcp.os.path, 'isfile', lambda dummy: False) ++ monkeypatch.setattr(scanzfcp.api, 'produce', produce_mocked()) ++ scanzfcp.process() ++ assert scanzfcp.api.current_logger.infomsg ++ assert scanzfcp.api.produce.called == 1 ++ assert len(scanzfcp.api.produce.model_instances) == 1 ++ assert isinstance(scanzfcp.api.produce.model_instances[0], TargetUserSpaceUpgradeTasks) ++ assert scanzfcp.api.produce.model_instances[0].install_rpms == ['s390utils-core'] ++ assert not scanzfcp.api.produce.model_instances[0].copy_files ++ ++ ++@pytest.mark.parametrize('isfile', [True, False]) ++@pytest.mark.parametrize('arch', [ ++ architecture.ARCH_X86_64, ++ architecture.ARCH_ARM64, ++ architecture.ARCH_PPC64LE, ++]) ++def test_non_ibmz_arch(monkeypatch, isfile, arch): ++ monkeypatch.setattr(scanzfcp.api, 'current_actor', CurrentActorMocked(arch=arch)) ++ monkeypatch.setattr(scanzfcp.api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(scanzfcp.api, 'produce', produce_mocked()) ++ monkeypatch.setattr(os.path, 'isfile', lambda dummy: isfile) ++ scanzfcp.process() ++ assert not scanzfcp.api.current_logger.infomsg ++ assert not scanzfcp.api.produce.called +-- +2.38.1 + diff --git a/SOURCES/0024-ziplconverttoblscfg-bind-mount-dev-boot-into-the-use.patch b/SOURCES/0024-ziplconverttoblscfg-bind-mount-dev-boot-into-the-use.patch new file mode 100644 index 0000000..7165f9b --- /dev/null +++ b/SOURCES/0024-ziplconverttoblscfg-bind-mount-dev-boot-into-the-use.patch @@ -0,0 +1,118 @@ +From a6445b391a01bf17d3ad8229ca1185b10479f467 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Renaud=20M=C3=A9trich?= +Date: Mon, 7 Nov 2022 09:33:32 +0100 +Subject: [PATCH 24/32] ziplconverttoblscfg: bind mount /dev & /boot into the + userspace container (s390x) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The conversion of ZIPL to BLS on IBM Z machines failed when + a) the machine was configured using ZFCP instead of DASD + b) /boot was not on a separate partition + +In case a), the zipl-switch-to-blscfg script failed as the /dev has +not been propagated to into the el8userspace container. Regarding +that, the /dev did not contain all required devices needed for the +correct conversion. + +With this fix, the /dev is bindmounted into the el8userspace container +using the (systemd-nspawn) `--bind` option. The direct bind mounting +via `leapp.libraries.common.mounting.BindMount` cannot be used in this +case as it blocks the correct start of the container. + +In case b), the content of /boot has been removed during the upgrade +due to problems when using BindMount on normal directory (that is not +mountpoint). This has been possibly resolved by this commit also, +as the /boot has been propagated using the --bind (sysmd-nspawn) +option as well. (Untested) + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2140563 +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1901440 + +Signed-off-by: Renaud Métrich +--- + .../actors/ziplconverttoblscfg/actor.py | 66 +++++++++---------- + 1 file changed, 33 insertions(+), 33 deletions(-) + +diff --git a/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py b/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py +index e80c335d..441c538b 100644 +--- a/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py ++++ b/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py +@@ -38,40 +38,40 @@ class ZiplConvertToBLSCFG(Actor): + # replace the original boot directory inside the container by the host one + # - as we cannot use zipl* pointing anywhere else than default directory + # - no, --bls-directory is not solution +- with mounting.BindMount(source='/boot', target=os.path.join(userspace.path, 'boot')): ++ # also make sure device nodes are available (requirement for zipl-switch-to-blscfg) ++ binds = ['/boot', '/dev'] ++ with mounting.NspawnActions(base_dir=userspace.path, binds=binds) as context: + userspace_zipl_conf = os.path.join(userspace.path, 'etc', 'zipl.conf') + if os.path.exists(userspace_zipl_conf): + os.remove(userspace_zipl_conf) +- with mounting.NullMount(target=userspace.path) as userspace: +- with userspace.nspawn() as context: +- context.copy_to('/etc/zipl.conf', '/etc/zipl.conf') +- # zipl needs this one as well +- context.copy_to('/etc/machine-id', '/etc/machine-id') +- try: +- context.call(['/usr/sbin/zipl-switch-to-blscfg']) +- if filecmp.cmp('/etc/zipl.conf', userspace_zipl_conf): +- # When the files are same, zipl failed - see the switch script +- raise OSError('Failed to convert the ZIPL configuration to BLS.') +- context.copy_from('/etc/zipl.conf', '/etc/zipl.conf') +- except OSError as e: +- self.log.error('Could not call zipl-switch-to-blscfg command.', +- exc_info=True) +- raise StopActorExecutionError( +- message='Failed to execute zipl-switch-to-blscfg.', +- details={'details': str(e)} +- ) +- except CalledProcessError as e: +- self.log.error('zipl-switch-to-blscfg execution failed,', +- exc_info=True) +- raise StopActorExecutionError( +- message='zipl-switch-to-blscfg execution failed with non zero exit code.', +- details={'details': str(e), 'stdout': e.stdout, 'stderr': e.stderr} +- ) ++ context.copy_to('/etc/zipl.conf', '/etc/zipl.conf') ++ # zipl needs this one as well ++ context.copy_to('/etc/machine-id', '/etc/machine-id') ++ try: ++ context.call(['/usr/sbin/zipl-switch-to-blscfg']) ++ if filecmp.cmp('/etc/zipl.conf', userspace_zipl_conf): ++ # When the files are same, zipl failed - see the switch script ++ raise OSError('Failed to convert the ZIPL configuration to BLS.') ++ context.copy_from('/etc/zipl.conf', '/etc/zipl.conf') ++ except OSError as e: ++ self.log.error('Could not call zipl-switch-to-blscfg command.', ++ exc_info=True) ++ raise StopActorExecutionError( ++ message='Failed to execute zipl-switch-to-blscfg.', ++ details={'details': str(e)} ++ ) ++ except CalledProcessError as e: ++ self.log.error('zipl-switch-to-blscfg execution failed,', ++ exc_info=True) ++ raise StopActorExecutionError( ++ message='zipl-switch-to-blscfg execution failed with non zero exit code.', ++ details={'details': str(e), 'stdout': e.stdout, 'stderr': e.stderr} ++ ) + +- # FIXME: we do not want to continue anymore, but we should clean +- # better. +- # NOTE: Basically, just removal of the /boot/loader dir content inside +- # could be enough, but we cannot remove /boot/loader because of boom +- # - - if we remove it, we will remove the snapshot as well +- # - - on the other hand, we shouldn't keep it there if zipl +- # - - has not been converted to BLS ++ # FIXME: we do not want to continue anymore, but we should clean ++ # better. ++ # NOTE: Basically, just removal of the /boot/loader dir content inside ++ # could be enough, but we cannot remove /boot/loader because of boom ++ # - - if we remove it, we will remove the snapshot as well ++ # - - on the other hand, we shouldn't keep it there if zipl ++ # - - has not been converted to BLS +-- +2.38.1 + diff --git a/SOURCES/0025-Provide-common-information-about-systemd.patch b/SOURCES/0025-Provide-common-information-about-systemd.patch new file mode 100644 index 0000000..6c8154b --- /dev/null +++ b/SOURCES/0025-Provide-common-information-about-systemd.patch @@ -0,0 +1,1247 @@ +From fac07e2aeb59092871fbd9807e718c6f6da1193d Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Fri, 26 Aug 2022 15:33:44 +0200 +Subject: [PATCH 25/32] Provide common information about systemd + +Introduced new actors providing information about systemd services, +preset files, and broken symlinks. Both for the source and the +target system. All related actors are under + repos/system_upgrade/common/actors/systemd/ +directory. Also it's introduced the systemd shared library +providing basic functionality for the gathering of systemd data. +Currently all functions from the library that are not called by +actors directly are marked as private. Note that provided functions +could raise exceptions. The handling of exceptions is expected to be +done inside actors. See docstrings for the set of possible exceptions. + +In case of the source system, all data is valid during the upgrade +and in case the required data cannot be obtained, the upgrade +is interrupted. + +However in case of data about the target system we speak about +a snapshot, how the system looks like in the moment after the upgrade +rpm transaction (data is collected during the Application phase). +The data can be used for the basic overview of the system configuration +after the upgrade transaction and also does not have to be necessary +produced! In case of an error a particular *Target* msg is not +produced, to make clear we could not collect correctly the required +data. But the upgrade is not interrupted in this phase and the errors +are logged only. + +Systemd symlinks (under /etc/systemd/system/) prior the upgrade are +reported during the preupgrade, so administrator could fix them +prior the upgrade. + +It's expected to create post-upgrade reports in future, but currently +skipping this topic until the post-upgrade reports are defined by +the leapp framework. + +Introduced models (messages): + * SystemdBrokenSymlinksSource + * SystemdServicesInfoSource + * SystemdServicesPresetInfoSource + + * SystemdBrokenSymlinksTarget + * SystemdServicesInfoTarget + * SystemdServicesPresetInfoTarget +--- + .../libraries/checksystemdservicetasks.py | 10 +- + .../actors/systemd/scansystemdsource/actor.py | 25 ++ + .../libraries/scansystemdsource.py | 45 +++ + .../tests/test_scansystemdsource.py | 100 +++++++ + .../actors/systemd/scansystemdtarget/actor.py | 28 ++ + .../libraries/scansystemdtarget.py | 37 +++ + .../tests/test_scansystemdtarget.py | 110 ++++++++ + .../common/libraries/systemd.py | 216 ++++++++++++++ + .../common/libraries/tests/00-test.preset | 10 + + .../common/libraries/tests/01-test.preset | 4 + + .../common/libraries/tests/05-invalid.preset | 8 + + .../common/libraries/tests/test_systemd.py | 263 ++++++++++++++++++ + .../tests/test_systemd_files/abc.service | 0 + .../tests/test_systemd_files/example.service | 0 + .../tests/test_systemd_files/example.socket | 0 + .../tests/test_systemd_files/extra.service | 0 + .../test_systemd_files/globbed-one.service | 0 + .../test_systemd_files/globbed-two.service | 0 + .../test_systemd_files/template2@.service | 0 + .../test_systemd_files/template@.service | 0 + repos/system_upgrade/common/models/systemd.py | 155 +++++++++++ + .../common/models/systemdservices.py | 22 -- + 22 files changed, 1005 insertions(+), 28 deletions(-) + create mode 100644 repos/system_upgrade/common/actors/systemd/scansystemdsource/actor.py + create mode 100644 repos/system_upgrade/common/actors/systemd/scansystemdsource/libraries/scansystemdsource.py + create mode 100644 repos/system_upgrade/common/actors/systemd/scansystemdsource/tests/test_scansystemdsource.py + create mode 100644 repos/system_upgrade/common/actors/systemd/scansystemdtarget/actor.py + create mode 100644 repos/system_upgrade/common/actors/systemd/scansystemdtarget/libraries/scansystemdtarget.py + create mode 100644 repos/system_upgrade/common/actors/systemd/scansystemdtarget/tests/test_scansystemdtarget.py + create mode 100644 repos/system_upgrade/common/libraries/systemd.py + create mode 100644 repos/system_upgrade/common/libraries/tests/00-test.preset + create mode 100644 repos/system_upgrade/common/libraries/tests/01-test.preset + create mode 100644 repos/system_upgrade/common/libraries/tests/05-invalid.preset + create mode 100644 repos/system_upgrade/common/libraries/tests/test_systemd.py + create mode 100644 repos/system_upgrade/common/libraries/tests/test_systemd_files/abc.service + create mode 100644 repos/system_upgrade/common/libraries/tests/test_systemd_files/example.service + create mode 100644 repos/system_upgrade/common/libraries/tests/test_systemd_files/example.socket + create mode 100644 repos/system_upgrade/common/libraries/tests/test_systemd_files/extra.service + create mode 100644 repos/system_upgrade/common/libraries/tests/test_systemd_files/globbed-one.service + create mode 100644 repos/system_upgrade/common/libraries/tests/test_systemd_files/globbed-two.service + create mode 100644 repos/system_upgrade/common/libraries/tests/test_systemd_files/template2@.service + create mode 100644 repos/system_upgrade/common/libraries/tests/test_systemd_files/template@.service + create mode 100644 repos/system_upgrade/common/models/systemd.py + delete mode 100644 repos/system_upgrade/common/models/systemdservices.py + +diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py +index 75833e4f..4d1bcda7 100644 +--- a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py ++++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py +@@ -5,18 +5,16 @@ from leapp.models import SystemdServicesTasks + FMT_LIST_SEPARATOR = '\n - ' + + +-def _printable_conflicts(conflicts): +- return FMT_LIST_SEPARATOR + FMT_LIST_SEPARATOR.join(sorted(conflicts)) +- +- + def _inhibit_upgrade_with_conflicts(conflicts): + summary = ( + 'The requested states for systemd services on the target system are in conflict.' +- ' The following systemd services were requested to be both enabled and disabled on the target system: {}' ++ ' The following systemd services were requested to be both enabled and' ++ ' disabled on the target system:{}{}' ++ .format(FMT_LIST_SEPARATOR, FMT_LIST_SEPARATOR.join(sorted(conflicts))) + ) + report = [ + reporting.Title('Conflicting requirements of systemd service states'), +- reporting.Summary(summary.format(_printable_conflicts(conflicts))), ++ reporting.Summary(summary), + reporting.Severity(reporting.Severity.HIGH), + reporting.Groups([reporting.Groups.SANITY]), + reporting.Groups([reporting.Groups.INHIBITOR]), +diff --git a/repos/system_upgrade/common/actors/systemd/scansystemdsource/actor.py b/repos/system_upgrade/common/actors/systemd/scansystemdsource/actor.py +new file mode 100644 +index 00000000..04a504b9 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/scansystemdsource/actor.py +@@ -0,0 +1,25 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import scansystemdsource ++from leapp.models import SystemdBrokenSymlinksSource, SystemdServicesInfoSource, SystemdServicesPresetInfoSource ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class ScanSystemdSource(Actor): ++ """ ++ Provides info about systemd on the source system ++ ++ The provided info includes information about: ++ - vendor presets of services ++ - systemd service files, including their state ++ - broken systemd symlinks ++ ++ There is an analogous actor :class:`ScanSystemdTarget` for target system. ++ """ ++ ++ name = 'scan_systemd_source' ++ consumes = () ++ produces = (SystemdBrokenSymlinksSource, SystemdServicesInfoSource, SystemdServicesPresetInfoSource) ++ tags = (IPUWorkflowTag, FactsPhaseTag) ++ ++ def process(self): ++ scansystemdsource.scan() +diff --git a/repos/system_upgrade/common/actors/systemd/scansystemdsource/libraries/scansystemdsource.py b/repos/system_upgrade/common/actors/systemd/scansystemdsource/libraries/scansystemdsource.py +new file mode 100644 +index 00000000..f6d9599c +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/scansystemdsource/libraries/scansystemdsource.py +@@ -0,0 +1,45 @@ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.common import systemd ++from leapp.libraries.stdlib import api, CalledProcessError ++from leapp.models import SystemdBrokenSymlinksSource, SystemdServicesInfoSource, SystemdServicesPresetInfoSource ++ ++ ++def scan(): ++ try: ++ broken_symlinks = systemd.get_broken_symlinks() ++ except (OSError, CalledProcessError) as err: ++ details = {'details': str(err)} ++ if isinstance(err, CalledProcessError): ++ details['stderr'] = err.stderr ++ raise StopActorExecutionError( ++ message='Cannot scan the system to list possible broken systemd symlinks.', ++ details=details ++ ) ++ ++ try: ++ services_files = systemd.get_service_files() ++ except CalledProcessError as err: ++ raise StopActorExecutionError( ++ message='Cannot obtain the list of systemd service unit files.', ++ details={'details': str(err), 'stderr': err.stderr} ++ ) ++ ++ try: ++ presets = systemd.get_system_service_preset_files(services_files, ignore_invalid_entries=False) ++ except (OSError, CalledProcessError) as err: ++ details = {'details': str(err)} ++ if isinstance(err, CalledProcessError): ++ details['stderr'] = err.stderr ++ raise StopActorExecutionError( ++ message='Cannot obtain the list of systemd preset files.', ++ details=details ++ ) ++ except ValueError as err: ++ raise StopActorExecutionError( ++ message='Discovered an invalid systemd preset file.', ++ details={'details': str(err)} ++ ) ++ ++ api.produce(SystemdBrokenSymlinksSource(broken_symlinks=broken_symlinks)) ++ api.produce(SystemdServicesInfoSource(service_files=services_files)) ++ api.produce(SystemdServicesPresetInfoSource(presets=presets)) +diff --git a/repos/system_upgrade/common/actors/systemd/scansystemdsource/tests/test_scansystemdsource.py b/repos/system_upgrade/common/actors/systemd/scansystemdsource/tests/test_scansystemdsource.py +new file mode 100644 +index 00000000..7b95a2df +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/scansystemdsource/tests/test_scansystemdsource.py +@@ -0,0 +1,100 @@ ++import pytest ++ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.actor import scansystemdsource ++from leapp.libraries.common import systemd ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked ++from leapp.libraries.stdlib import api, CalledProcessError ++from leapp.models import ( ++ SystemdServiceFile, ++ SystemdServicePreset, ++ SystemdServicesInfoSource, ++ SystemdServicesPresetInfoSource ++) ++ ++_BROKEN_SYMLINKS = [ ++ "/etc/systemd/system/multi-user.target.wants/vdo.service", ++ "/etc/systemd/system/multi-user.target.wants/rngd.service" ++] ++ ++_SERVICE_FILES = [ ++ SystemdServiceFile(name='getty@.service', state='enabled'), ++ SystemdServiceFile(name='vdo.service', state='disabled') ++] ++ ++_PRESETS = [ ++ SystemdServicePreset(service='getty@.service', state='enable'), ++ SystemdServicePreset(service='vdo.service', state='disable'), ++] ++ ++ ++@pytest.mark.parametrize( ++ ('broken_symlinks', 'files', 'presets'), ++ ( ++ (_BROKEN_SYMLINKS, _SERVICE_FILES, _PRESETS), ++ ([], [], []) ++ ) ++) ++def test_message_produced(monkeypatch, broken_symlinks, files, presets): ++ ++ def get_broken_symlinks_mocked(): ++ return broken_symlinks ++ ++ def get_service_files_mocked(): ++ return files ++ ++ def get_system_service_preset_files_mocked(service_files, ignore_invalid_entries): ++ return presets ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(systemd, 'get_broken_symlinks', get_broken_symlinks_mocked) ++ monkeypatch.setattr(systemd, 'get_service_files', get_service_files_mocked) ++ monkeypatch.setattr(systemd, 'get_system_service_preset_files', get_system_service_preset_files_mocked) ++ ++ scansystemdsource.scan() ++ ++ assert api.produce.called ++ assert api.produce.model_instances[0].broken_symlinks == broken_symlinks ++ assert api.produce.model_instances[1].service_files == files ++ assert api.produce.model_instances[2].presets == presets ++ ++ ++_CALL_PROC_ERR = CalledProcessError( ++ message='BooCalled', ++ command=['find'], ++ result={ ++ 'stdout': 'stdout', ++ 'stderr': 'stderr', ++ 'exit_code': 1, ++ 'signal': 1, ++ 'pid': 1, ++ } ++) ++ ++ ++class GetOrRaise(object): ++ def __init__(self, value): ++ self.value = value ++ ++ def __call__(self, *dummyArgs, **dummy): ++ if isinstance(self.value, list): ++ return self.value ++ raise self.value ++ ++ ++@pytest.mark.parametrize('symlinks', [OSError('Boo'), _CALL_PROC_ERR, []]) ++@pytest.mark.parametrize('files', [_CALL_PROC_ERR, []]) ++@pytest.mark.parametrize('presets', [OSError('Boo'), _CALL_PROC_ERR, ValueError('Hamster'), []]) ++def test_exception_handling(monkeypatch, symlinks, files, presets): ++ if symlinks == files == presets == []: ++ # covered by test above ++ return ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(systemd, 'get_broken_symlinks', GetOrRaise(symlinks)) ++ monkeypatch.setattr(systemd, 'get_service_files', GetOrRaise(files)) ++ monkeypatch.setattr(systemd, 'get_system_service_preset_files', GetOrRaise(presets)) ++ with pytest.raises(StopActorExecutionError): ++ scansystemdsource.scan() +diff --git a/repos/system_upgrade/common/actors/systemd/scansystemdtarget/actor.py b/repos/system_upgrade/common/actors/systemd/scansystemdtarget/actor.py +new file mode 100644 +index 00000000..185b30ac +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/scansystemdtarget/actor.py +@@ -0,0 +1,28 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import scansystemdtarget ++from leapp.models import SystemdBrokenSymlinksTarget, SystemdServicesInfoTarget, SystemdServicesPresetInfoTarget ++from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag ++ ++ ++class ScanSystemdTarget(Actor): ++ """ ++ Provides info about systemd on the source system ++ ++ The provided info includes information about: ++ - vendor presets of services ++ - systemd service files, including their state ++ - broken systemd symlinks ++ ++ There is an analogous actor :class:`ScanSystemdSource` for source system ++ ++ The actor ignore errors (errors are logged, but do not stop the upgrade). ++ If some data cannot be obtained, particular message is not produced. ++ Actors are expected to check whether the data is available. ++ """ ++ name = 'scan_systemd_target' ++ consumes = () ++ produces = (SystemdBrokenSymlinksTarget, SystemdServicesInfoTarget, SystemdServicesPresetInfoTarget) ++ tags = (IPUWorkflowTag, ApplicationsPhaseTag) ++ ++ def process(self): ++ scansystemdtarget.scan() +diff --git a/repos/system_upgrade/common/actors/systemd/scansystemdtarget/libraries/scansystemdtarget.py b/repos/system_upgrade/common/actors/systemd/scansystemdtarget/libraries/scansystemdtarget.py +new file mode 100644 +index 00000000..9c922c93 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/scansystemdtarget/libraries/scansystemdtarget.py +@@ -0,0 +1,37 @@ ++from leapp.libraries.common import systemd ++from leapp.libraries.stdlib import api, CalledProcessError ++from leapp.models import SystemdBrokenSymlinksTarget, SystemdServicesInfoTarget, SystemdServicesPresetInfoTarget ++ ++ ++def scan_broken_symlinks(): ++ try: ++ broken_symlinks = systemd.get_broken_symlinks() ++ except (OSError, CalledProcessError): ++ return ++ api.produce(SystemdBrokenSymlinksTarget(broken_symlinks=broken_symlinks)) ++ ++ ++def scan_service_files(): ++ try: ++ services_files = systemd.get_service_files() ++ except CalledProcessError: ++ return None ++ api.produce(SystemdServicesInfoTarget(service_files=services_files)) ++ return services_files ++ ++ ++def scan_preset_files(services_files): ++ if services_files is None: ++ return ++ try: ++ presets = systemd.get_system_service_preset_files(services_files, ignore_invalid_entries=True) ++ except (OSError, CalledProcessError): ++ return ++ api.produce(SystemdServicesPresetInfoTarget(presets=presets)) ++ ++ ++def scan(): ++ # Errors are logged inside the systemd library, no need to log them here again. ++ scan_broken_symlinks() ++ services_files = scan_service_files() ++ scan_preset_files(services_files) +diff --git a/repos/system_upgrade/common/actors/systemd/scansystemdtarget/tests/test_scansystemdtarget.py b/repos/system_upgrade/common/actors/systemd/scansystemdtarget/tests/test_scansystemdtarget.py +new file mode 100644 +index 00000000..227ba61a +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/scansystemdtarget/tests/test_scansystemdtarget.py +@@ -0,0 +1,110 @@ ++import pytest ++ ++from leapp.libraries.actor import scansystemdtarget ++from leapp.libraries.common import systemd ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked ++from leapp.libraries.stdlib import api, CalledProcessError ++from leapp.models import ( ++ SystemdBrokenSymlinksTarget, ++ SystemdServiceFile, ++ SystemdServicePreset, ++ SystemdServicesInfoTarget, ++ SystemdServicesPresetInfoTarget ++) ++ ++_BROKEN_SYMLINKS = [ ++ "/etc/systemd/system/multi-user.target.wants/vdo.service", ++ "/etc/systemd/system/multi-user.target.wants/rngd.service" ++] ++ ++_SERVICE_FILES = [ ++ SystemdServiceFile(name='getty@.service', state='enabled'), ++ SystemdServiceFile(name='vdo.service', state='disabled') ++] ++ ++_PRESETS = [ ++ SystemdServicePreset(service='getty@.service', state='enable'), ++ SystemdServicePreset(service='vdo.service', state='disable'), ++] ++ ++ ++@pytest.mark.parametrize( ++ ('broken_symlinks', 'files', 'presets'), ++ ( ++ (_BROKEN_SYMLINKS, _SERVICE_FILES, _PRESETS), ++ ([], [], []) ++ ) ++) ++def test_message_produced(monkeypatch, broken_symlinks, files, presets): ++ ++ def scan_broken_symlinks_mocked(): ++ return broken_symlinks ++ ++ def get_service_files_mocked(): ++ return files ++ ++ def get_system_service_preset_files_mocked(service_files, ignore_invalid_entries): ++ return presets ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(systemd, 'get_broken_symlinks', scan_broken_symlinks_mocked) ++ monkeypatch.setattr(systemd, 'get_service_files', get_service_files_mocked) ++ monkeypatch.setattr(systemd, 'get_system_service_preset_files', get_system_service_preset_files_mocked) ++ ++ scansystemdtarget.scan() ++ ++ assert api.produce.called ++ assert api.produce.model_instances[0].broken_symlinks == broken_symlinks ++ assert api.produce.model_instances[1].service_files == files ++ assert api.produce.model_instances[2].presets == presets ++ ++ ++_CALL_PROC_ERR = CalledProcessError( ++ message='BooCalled', ++ command=['find'], ++ result={ ++ 'stdout': 'stdout', ++ 'stderr': 'stderr', ++ 'exit_code': 1, ++ 'signal': 1, ++ 'pid': 1, ++ } ++) ++ ++ ++class GetOrRaise(object): ++ def __init__(self, value): ++ self.value = value ++ ++ def __call__(self, *dummyArgs, **dummy): ++ if isinstance(self.value, list): ++ return self.value ++ raise self.value ++ ++ ++@pytest.mark.parametrize('symlinks', [OSError('Boo'), _CALL_PROC_ERR, []]) ++@pytest.mark.parametrize('files', [_CALL_PROC_ERR, []]) ++@pytest.mark.parametrize('presets', [OSError('Boo'), _CALL_PROC_ERR, []]) ++def test_exception_handling(monkeypatch, symlinks, files, presets): ++ ++ def check_msg(input_data, msg_type, msgs, is_msg_expected): ++ for msg in msgs.model_instances: ++ if isinstance(msg, msg_type): ++ return is_msg_expected ++ return not is_msg_expected ++ ++ if symlinks == files == presets == []: ++ # covered by test above ++ return ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(systemd, 'get_broken_symlinks', GetOrRaise(symlinks)) ++ monkeypatch.setattr(systemd, 'get_service_files', GetOrRaise(files)) ++ monkeypatch.setattr(systemd, 'get_system_service_preset_files', GetOrRaise(presets)) ++ scansystemdtarget.scan() ++ assert check_msg(symlinks, SystemdBrokenSymlinksTarget, api.produce, isinstance(symlinks, list)) ++ assert check_msg(files, SystemdServicesInfoTarget, api.produce, isinstance(files, list)) ++ is_msg_expected = isinstance(files, list) and isinstance(presets, list) ++ assert check_msg(presets, SystemdServicesPresetInfoTarget, api.produce, is_msg_expected) +diff --git a/repos/system_upgrade/common/libraries/systemd.py b/repos/system_upgrade/common/libraries/systemd.py +new file mode 100644 +index 00000000..bbf71af7 +--- /dev/null ++++ b/repos/system_upgrade/common/libraries/systemd.py +@@ -0,0 +1,216 @@ ++import fnmatch ++import os ++ ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import SystemdServiceFile, SystemdServicePreset ++ ++SYSTEMD_SYMLINKS_DIR = '/etc/systemd/system/' ++ ++_SYSTEMCTL_CMD_OPTIONS = ['--type=service', '--all', '--plain', '--no-legend'] ++_USR_PRESETS_PATH = '/usr/lib/systemd/system-preset/' ++_ETC_PRESETS_PATH = '/etc/systemd/system-preset/' ++ ++SYSTEMD_SYSTEM_LOAD_PATH = [ ++ '/etc/systemd/system', ++ '/usr/lib/systemd/system' ++] ++ ++ ++def get_broken_symlinks(): ++ """ ++ Get broken systemd symlinks on the system ++ ++ :return: List of broken systemd symlinks ++ :rtype: list[str] ++ :raises: CalledProcessError: if the `find` command fails ++ :raises: OSError: if the find utility is not found ++ """ ++ try: ++ return run(['find', SYSTEMD_SYMLINKS_DIR, '-xtype', 'l'], split=True)['stdout'] ++ except (OSError, CalledProcessError): ++ api.current_logger().error('Cannot obtain the list of broken systemd symlinks.') ++ raise ++ ++ ++def get_service_files(): ++ """ ++ Get list of unit files of systemd services on the system ++ ++ The list includes template units. ++ ++ :return: List of service unit files with states ++ :rtype: list[SystemdServiceFile] ++ :raises: CalledProcessError: in case of failure of `systemctl` command ++ """ ++ services_files = [] ++ try: ++ cmd = ['systemctl', 'list-unit-files'] + _SYSTEMCTL_CMD_OPTIONS ++ service_units_data = run(cmd, split=True)['stdout'] ++ except CalledProcessError as err: ++ api.current_logger().error('Cannot obtain the list of unit files:{}'.format(str(err))) ++ raise ++ ++ for entry in service_units_data: ++ columns = entry.split() ++ services_files.append(SystemdServiceFile(name=columns[0], state=columns[1])) ++ return services_files ++ ++ ++def _join_presets_resolving_overrides(etc_files, usr_files): ++ """ ++ Join presets and resolve preset file overrides ++ ++ Preset files in /etc/ override those with the same name in /usr/. ++ If such a file is a symlink to /dev/null, it disables the one in /usr/ instead. ++ ++ :param etc_files: Systemd preset files in /etc/ ++ :param usr_files: Systemd preset files in /usr/ ++ :return: List of preset files in /etc/ and /usr/ with overridden files removed ++ """ ++ for etc_file in etc_files: ++ filename = os.path.basename(etc_file) ++ for usr_file in usr_files: ++ if filename == os.path.basename(usr_file): ++ usr_files.remove(usr_file) ++ if os.path.islink(etc_file) and os.readlink(etc_file) == '/dev/null': ++ etc_files.remove(etc_file) ++ ++ return etc_files + usr_files ++ ++ ++def _search_preset_files(path): ++ """ ++ Search preset files in the given path ++ ++ Presets are search recursively in the given directory. ++ If path isn't an existing directory, return empty list. ++ ++ :param path: The path to search preset files in ++ :return: List of found preset files ++ :rtype: list[str] ++ :raises: CalledProcessError: if the `find` command fails ++ :raises: OSError: if the find utility is not found ++ """ ++ if os.path.isdir(path): ++ try: ++ return run(['find', path, '-name', '*.preset'], split=True)['stdout'] ++ except (OSError, CalledProcessError) as err: ++ api.current_logger().error('Cannot obtain list of systemd preset files in {}:{}'.format(path, str(err))) ++ raise ++ else: ++ return [] ++ ++ ++def _get_system_preset_files(): ++ """ ++ Get systemd system preset files and remove overriding entries. Entries in /run/systemd/system are ignored. ++ ++ :return: List of system systemd preset files ++ :raises: CalledProcessError: if the `find` command fails ++ :raises: OSError: if the find utility is not found ++ """ ++ etc_files = _search_preset_files(_ETC_PRESETS_PATH) ++ usr_files = _search_preset_files(_USR_PRESETS_PATH) ++ ++ preset_files = _join_presets_resolving_overrides(etc_files, usr_files) ++ preset_files.sort() ++ return preset_files ++ ++ ++def _recursive_glob(pattern, root_dir): ++ for _, _, filenames in os.walk(root_dir): ++ for filename in filenames: ++ if fnmatch.fnmatch(filename, pattern): ++ yield filename ++ ++ ++def _parse_preset_entry(entry, presets, load_path): ++ """ ++ Parse a single entry (line) in a preset file ++ ++ Single entry might set presets on multiple units using globs. ++ ++ :param entry: The entry to parse ++ :param presets: Dictionary to store the presets into ++ :param load_path: List of paths to look systemd unit files up in ++ """ ++ ++ columns = entry.split() ++ if len(columns) < 2 or columns[0] not in ('enable', 'disable'): ++ raise ValueError('Invalid preset file entry: "{}"'.format(entry)) ++ ++ for path in load_path: ++ # TODO(mmatuska): This currently also globs non unit files, ++ # so the results need to be filtered with something like endswith('.') ++ unit_files = _recursive_glob(columns[1], root_dir=path) ++ ++ for unit_file in unit_files: ++ if '@' in columns[1] and len(columns) > 2: ++ # unit is a template, ++ # if the entry contains instance names after template unit name ++ # the entry only applies to the specified instances, not to the ++ # template itself ++ for instance in columns[2:]: ++ service_name = unit_file[:unit_file.index('@') + 1] + instance + '.service' ++ if service_name not in presets: # first occurrence has priority ++ presets[service_name] = columns[0] ++ ++ elif unit_file not in presets: # first occurrence has priority ++ presets[unit_file] = columns[0] ++ ++ ++def _parse_preset_files(preset_files, load_path, ignore_invalid_entries): ++ """ ++ Parse presets from preset files ++ ++ :param load_path: List of paths to search units at ++ :param ignore_invalid_entries: Whether to ignore invalid entries in preset files or raise an error ++ :return: Dictionary mapping systemd units to their preset state ++ :rtype: dict[str, str] ++ :raises: ValueError: when a preset file has invalid content ++ """ ++ presets = {} ++ ++ for preset in preset_files: ++ with open(preset, 'r') as preset_file: ++ for line in preset_file: ++ stripped = line.strip() ++ if stripped and stripped[0] not in ('#', ';'): # ignore comments ++ try: ++ _parse_preset_entry(stripped, presets, load_path) ++ except ValueError as err: ++ new_msg = 'Invalid preset file {pfile}: {error}'.format(pfile=preset, error=str(err)) ++ if ignore_invalid_entries: ++ api.current_logger().warning(new_msg) ++ continue ++ raise ValueError(new_msg) ++ return presets ++ ++ ++def get_system_service_preset_files(service_files, ignore_invalid_entries=False): ++ """ ++ Get system preset files for services ++ ++ Presets for static and transient services are filtered out. ++ ++ :param services_files: List of service unit files ++ :param ignore_invalid_entries: Ignore invalid entries in preset files if True, raise ValueError otherwise ++ :return: List of system systemd services presets ++ :rtype: list[SystemdServicePreset] ++ :raises: CalledProcessError: In case of errors when discovering systemd preset files ++ :raises: OSError: When the `find` command is not available ++ :raises: ValueError: When a preset file has invalid content and ignore_invalid_entries is False ++ """ ++ preset_files = _get_system_preset_files() ++ presets = _parse_preset_files(preset_files, SYSTEMD_SYSTEM_LOAD_PATH, ignore_invalid_entries) ++ ++ preset_models = [] ++ for unit, state in presets.items(): ++ if unit.endswith('.service'): ++ service_file = next(iter([s for s in service_files if s.name == unit]), None) ++ # presets can also be set on instances of template services which don't have a unit file ++ if service_file and service_file.state in ('static', 'transient'): ++ continue ++ preset_models.append(SystemdServicePreset(service=unit, state=state)) ++ ++ return preset_models +diff --git a/repos/system_upgrade/common/libraries/tests/00-test.preset b/repos/system_upgrade/common/libraries/tests/00-test.preset +new file mode 100644 +index 00000000..85e4cb0b +--- /dev/null ++++ b/repos/system_upgrade/common/libraries/tests/00-test.preset +@@ -0,0 +1,10 @@ ++enable example.service ++# first line takes priority ++disable example.service ++ ++# hello, world! ++disable abc.service ++ ++; another comment format ++disable template@.service ++enable template@.service instance1 instance2 +diff --git a/repos/system_upgrade/common/libraries/tests/01-test.preset b/repos/system_upgrade/common/libraries/tests/01-test.preset +new file mode 100644 +index 00000000..6ef393c4 +--- /dev/null ++++ b/repos/system_upgrade/common/libraries/tests/01-test.preset +@@ -0,0 +1,4 @@ ++disable example.* ++enable globbed*.service ++ ++disable * +diff --git a/repos/system_upgrade/common/libraries/tests/05-invalid.preset b/repos/system_upgrade/common/libraries/tests/05-invalid.preset +new file mode 100644 +index 00000000..9ec39de1 +--- /dev/null ++++ b/repos/system_upgrade/common/libraries/tests/05-invalid.preset +@@ -0,0 +1,8 @@ ++# missing unit or glob ++enable ++; missing enable or disable ++hello.service ++# only enable and disable directives are allowed ++mask hello.service ++ ++disable example.service +diff --git a/repos/system_upgrade/common/libraries/tests/test_systemd.py b/repos/system_upgrade/common/libraries/tests/test_systemd.py +new file mode 100644 +index 00000000..a91fce11 +--- /dev/null ++++ b/repos/system_upgrade/common/libraries/tests/test_systemd.py +@@ -0,0 +1,263 @@ ++import os ++from functools import partial ++ ++import pytest ++ ++from leapp.libraries.common import systemd ++from leapp.libraries.common.testutils import logger_mocked ++from leapp.libraries.stdlib import api ++from leapp.models import SystemdServiceFile, SystemdServicePreset ++ ++CURR_DIR = os.path.dirname(os.path.abspath(__file__)) ++ ++ ++def test_get_service_files(monkeypatch): ++ def run_mocked(cmd, *args, **kwargs): ++ if cmd == ['systemctl', 'list-unit-files'] + systemd._SYSTEMCTL_CMD_OPTIONS: ++ return {'stdout': [ ++ 'auditd.service enabled', ++ 'crond.service enabled ', ++ 'dbus.service static ', ++ 'dnf-makecache.service static ', ++ 'firewalld.service enabled ', ++ 'getty@.service enabled ', ++ 'gssproxy.service disabled', ++ 'kdump.service enabled ', ++ 'mdmon@.service static ', ++ 'nfs.service disabled', ++ 'polkit.service static ', ++ 'rescue.service static ', ++ 'rngd.service enabled ', ++ 'rsyncd.service disabled', ++ 'rsyncd@.service static ', ++ 'smartd.service enabled ', ++ 'sshd.service enabled ', ++ 'sshd@.service static ', ++ 'wpa_supplicant.service disabled' ++ ]} ++ raise ValueError('Attempted to call unexpected command: {}'.format(cmd)) ++ ++ monkeypatch.setattr(systemd, 'run', run_mocked) ++ service_files = systemd.get_service_files() ++ ++ expected = [ ++ SystemdServiceFile(name='auditd.service', state='enabled'), ++ SystemdServiceFile(name='crond.service', state='enabled'), ++ SystemdServiceFile(name='dbus.service', state='static'), ++ SystemdServiceFile(name='dnf-makecache.service', state='static'), ++ SystemdServiceFile(name='firewalld.service', state='enabled'), ++ SystemdServiceFile(name='getty@.service', state='enabled'), ++ SystemdServiceFile(name='gssproxy.service', state='disabled'), ++ SystemdServiceFile(name='kdump.service', state='enabled'), ++ SystemdServiceFile(name='mdmon@.service', state='static'), ++ SystemdServiceFile(name='nfs.service', state='disabled'), ++ SystemdServiceFile(name='polkit.service', state='static'), ++ SystemdServiceFile(name='rescue.service', state='static'), ++ SystemdServiceFile(name='rngd.service', state='enabled'), ++ SystemdServiceFile(name='rsyncd.service', state='disabled'), ++ SystemdServiceFile(name='rsyncd@.service', state='static'), ++ SystemdServiceFile(name='smartd.service', state='enabled'), ++ SystemdServiceFile(name='sshd.service', state='enabled'), ++ SystemdServiceFile(name='sshd@.service', state='static'), ++ SystemdServiceFile(name='wpa_supplicant.service', state='disabled') ++ ] ++ ++ assert service_files == expected ++ ++ ++def test_preset_files_overrides(): ++ etc_files = [ ++ '/etc/systemd/system-preset/00-abc.preset', ++ '/etc/systemd/system-preset/preset_without_prio.preset' ++ ] ++ usr_files = [ ++ '/usr/lib/systemd/system-preset/00-abc.preset', ++ '/usr/lib/systemd/system-preset/99-xyz.preset', ++ '/usr/lib/systemd/system-preset/preset_without_prio.preset' ++ ] ++ ++ expected = [ ++ '/usr/lib/systemd/system-preset/99-xyz.preset', ++ '/etc/systemd/system-preset/00-abc.preset', ++ '/etc/systemd/system-preset/preset_without_prio.preset' ++ ] ++ ++ presets = systemd._join_presets_resolving_overrides(etc_files, usr_files) ++ assert sorted(presets) == sorted(expected) ++ ++ ++def test_preset_files_block_override(monkeypatch): ++ etc_files = [ ++ '/etc/systemd/system-preset/00-abc.preset' ++ ] ++ usr_files = [ ++ '/usr/lib/systemd/system-preset/00-abc.preset', ++ '/usr/lib/systemd/system-preset/99-xyz.preset' ++ ] ++ ++ expected = [ ++ '/usr/lib/systemd/system-preset/99-xyz.preset', ++ ] ++ ++ def islink_mocked(path): ++ return path == '/etc/systemd/system-preset/00-abc.preset' ++ ++ def readlink_mocked(path): ++ if path == '/etc/systemd/system-preset/00-abc.preset': ++ return '/dev/null' ++ raise OSError ++ ++ monkeypatch.setattr(os.path, 'islink', islink_mocked) ++ monkeypatch.setattr(os, 'readlink', readlink_mocked) ++ ++ presets = systemd._join_presets_resolving_overrides(etc_files, usr_files) ++ assert sorted(presets) == sorted(expected) ++ ++ ++TEST_SYSTEMD_LOAD_PATH = [os.path.join(CURR_DIR, 'test_systemd_files/')] ++ ++TESTING_PRESET_FILES = [ ++ os.path.join(CURR_DIR, '00-test.preset'), ++ os.path.join(CURR_DIR, '01-test.preset') ++] ++ ++TESTING_PRESET_WITH_INVALID_ENTRIES = os.path.join(CURR_DIR, '05-invalid.preset') ++ ++_PARSE_PRESET_ENTRIES_TEST_DEFINITION = ( ++ ('enable example.service', {'example.service': 'enable'}), ++ ('disable abc.service', {'abc.service': 'disable'}), ++ ('enable template@.service', {'template@.service': 'enable'}), ++ ('disable template2@.service', {'template2@.service': 'disable'}), ++ ('disable template@.service instance1 instance2', { ++ 'template@instance1.service': 'disable', ++ 'template@instance2.service': 'disable' ++ }), ++ ('enable globbed*.service', {'globbed-one.service': 'enable', 'globbed-two.service': 'enable'}), ++ ('enable example.*', {'example.service': 'enable', 'example.socket': 'enable'}), ++ ('disable *', { ++ 'example.service': 'disable', ++ 'abc.service': 'disable', ++ 'template@.service': 'disable', ++ 'template2@.service': 'disable', ++ 'globbed-one.service': 'disable', ++ 'globbed-two.service': 'disable', ++ 'example.socket': 'disable', ++ 'extra.service': 'disable' ++ }) ++) ++ ++ ++@pytest.mark.parametrize('entry,expected', _PARSE_PRESET_ENTRIES_TEST_DEFINITION) ++def test_parse_preset_entry(monkeypatch, entry, expected): ++ presets = {} ++ systemd._parse_preset_entry(entry, presets, TEST_SYSTEMD_LOAD_PATH) ++ assert presets == expected ++ ++ ++@pytest.mark.parametrize( ++ 'entry', ++ [ ++ ('hello.service'), ++ ('mask hello.service'), ++ ('enable'), ++ ] ++) ++def test_parse_preset_entry_invalid(monkeypatch, entry): ++ presets = {} ++ with pytest.raises(ValueError, match=r'^Invalid preset file entry: '): ++ systemd._parse_preset_entry(entry, presets, TEST_SYSTEMD_LOAD_PATH) ++ ++ ++def test_parse_preset_files(monkeypatch): ++ ++ expected = { ++ 'example.service': 'enable', ++ 'example.socket': 'disable', ++ 'abc.service': 'disable', ++ 'template@.service': 'disable', ++ 'template@instance1.service': 'enable', ++ 'template@instance2.service': 'enable', ++ 'globbed-one.service': 'enable', ++ 'globbed-two.service': 'enable', ++ 'extra.service': 'disable', ++ 'template2@.service': 'disable' ++ } ++ ++ presets = systemd._parse_preset_files(TESTING_PRESET_FILES, TEST_SYSTEMD_LOAD_PATH, False) ++ assert presets == expected ++ ++ ++def test_parse_preset_files_invalid(): ++ with pytest.raises(ValueError): ++ systemd._parse_preset_files( ++ [TESTING_PRESET_WITH_INVALID_ENTRIES], TEST_SYSTEMD_LOAD_PATH, ignore_invalid_entries=False ++ ) ++ ++ ++def test_parse_preset_files_ignore_invalid(monkeypatch): ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ ++ invalid_preset_files = [TESTING_PRESET_WITH_INVALID_ENTRIES] ++ presets = systemd._parse_preset_files( ++ invalid_preset_files, TEST_SYSTEMD_LOAD_PATH, ignore_invalid_entries=True ++ ) ++ ++ for entry in ('enable', 'hello.service', 'mask hello.service'): ++ msg = 'Invalid preset file {}: Invalid preset file entry: "{}"'.format(invalid_preset_files[0], entry) ++ assert msg in api.current_logger.warnmsg ++ ++ assert presets == {'example.service': 'disable'} ++ ++ ++def parse_preset_files_mocked(): ++ mocked = partial(systemd._parse_preset_files, load_path=TEST_SYSTEMD_LOAD_PATH) ++ ++ def impl(preset_files, load_path, ignore_invalid_entries): ++ return mocked(preset_files, ignore_invalid_entries=ignore_invalid_entries) ++ return impl ++ ++ ++def test_get_service_preset_files(monkeypatch): ++ ++ def get_system_preset_files_mocked(): ++ return TESTING_PRESET_FILES ++ ++ monkeypatch.setattr(systemd, '_get_system_preset_files', get_system_preset_files_mocked) ++ monkeypatch.setattr(systemd, '_parse_preset_files', parse_preset_files_mocked()) ++ ++ service_files = [ ++ SystemdServiceFile(name='abc.service', state='transient'), ++ SystemdServiceFile(name='example.service', state='static'), ++ SystemdServiceFile(name='example.socket', state='masked'), ++ SystemdServiceFile(name='extra.service', state='disabled'), ++ SystemdServiceFile(name='template2@.service', state='enabled'), ++ SystemdServiceFile(name='template@.service', state='enabled'), ++ ] ++ ++ expected = [ ++ # dont expect example.service since it's static ++ # dont expect abc.service since it's transient ++ SystemdServicePreset(service='template@.service', state='disable'), ++ SystemdServicePreset(service='template@instance1.service', state='enable'), ++ SystemdServicePreset(service='template@instance2.service', state='enable'), ++ SystemdServicePreset(service='globbed-one.service', state='enable'), ++ SystemdServicePreset(service='globbed-two.service', state='enable'), ++ SystemdServicePreset(service='extra.service', state='disable'), ++ SystemdServicePreset(service='template2@.service', state='disable') ++ ] ++ ++ presets = systemd.get_system_service_preset_files(service_files, False) ++ assert sorted(presets, key=lambda e: e.service) == sorted(expected, key=lambda e: e.service) ++ ++ ++def test_get_service_preset_files_invalid(monkeypatch): ++ ++ def get_system_preset_files_mocked(): ++ return [TESTING_PRESET_WITH_INVALID_ENTRIES] ++ ++ monkeypatch.setattr(systemd, '_get_system_preset_files', get_system_preset_files_mocked) ++ monkeypatch.setattr(systemd, '_parse_preset_files', parse_preset_files_mocked()) ++ ++ with pytest.raises(ValueError): ++ # doesn't matter what service_files are ++ systemd.get_system_service_preset_files([], ignore_invalid_entries=False) +diff --git a/repos/system_upgrade/common/libraries/tests/test_systemd_files/abc.service b/repos/system_upgrade/common/libraries/tests/test_systemd_files/abc.service +new file mode 100644 +index 00000000..e69de29b +diff --git a/repos/system_upgrade/common/libraries/tests/test_systemd_files/example.service b/repos/system_upgrade/common/libraries/tests/test_systemd_files/example.service +new file mode 100644 +index 00000000..e69de29b +diff --git a/repos/system_upgrade/common/libraries/tests/test_systemd_files/example.socket b/repos/system_upgrade/common/libraries/tests/test_systemd_files/example.socket +new file mode 100644 +index 00000000..e69de29b +diff --git a/repos/system_upgrade/common/libraries/tests/test_systemd_files/extra.service b/repos/system_upgrade/common/libraries/tests/test_systemd_files/extra.service +new file mode 100644 +index 00000000..e69de29b +diff --git a/repos/system_upgrade/common/libraries/tests/test_systemd_files/globbed-one.service b/repos/system_upgrade/common/libraries/tests/test_systemd_files/globbed-one.service +new file mode 100644 +index 00000000..e69de29b +diff --git a/repos/system_upgrade/common/libraries/tests/test_systemd_files/globbed-two.service b/repos/system_upgrade/common/libraries/tests/test_systemd_files/globbed-two.service +new file mode 100644 +index 00000000..e69de29b +diff --git a/repos/system_upgrade/common/libraries/tests/test_systemd_files/template2@.service b/repos/system_upgrade/common/libraries/tests/test_systemd_files/template2@.service +new file mode 100644 +index 00000000..e69de29b +diff --git a/repos/system_upgrade/common/libraries/tests/test_systemd_files/template@.service b/repos/system_upgrade/common/libraries/tests/test_systemd_files/template@.service +new file mode 100644 +index 00000000..e69de29b +diff --git a/repos/system_upgrade/common/models/systemd.py b/repos/system_upgrade/common/models/systemd.py +new file mode 100644 +index 00000000..f66ae5dd +--- /dev/null ++++ b/repos/system_upgrade/common/models/systemd.py +@@ -0,0 +1,155 @@ ++from leapp.models import fields, Model ++from leapp.topics import SystemInfoTopic ++ ++ ++class SystemdBrokenSymlinksSource(Model): ++ """ ++ Information about broken systemd symlinks on the source system ++ """ ++ ++ topic = SystemInfoTopic ++ broken_symlinks = fields.List(fields.String(), default=[]) ++ """ ++ List of broken systemd symlinks on the source system ++ ++ The values are absolute paths of the broken symlinks. ++ """ ++ ++ ++class SystemdBrokenSymlinksTarget(SystemdBrokenSymlinksSource): ++ """ ++ Analogy to :class:`SystemdBrokenSymlinksSource`, but for the target system ++ """ ++ ++ ++class SystemdServicesTasks(Model): ++ """ ++ Influence the systemd services of the target system ++ ++ E.g. it could be specified explicitly whether some services should ++ be enabled or disabled after the in-place upgrade - follow descriptions ++ of particular tasks for details. ++ ++ In case of conflicting tasks (e.g. the A service should be enabled and ++ disabled in the same time): ++ a) If conflicting tasks are detected during check phases, ++ the upgrade is inhibited with the proper report. ++ b) If conflicting tasks are detected during the final evaluation, ++ error logs are created and such services will be disabled. ++ """ ++ topic = SystemInfoTopic ++ ++ to_enable = fields.List(fields.String(), default=[]) ++ """ ++ List of systemd services to enable on the target system ++ ++ Masked services will not be enabled. Attempting to enable a masked service ++ will be evaluated by systemctl as usually. The error will be logged and the ++ upgrade process will continue. ++ """ ++ ++ to_disable = fields.List(fields.String(), default=[]) ++ """ ++ List of systemd services to disable on the target system ++ """ ++ ++ # NOTE: possible extension in case of requirement (currently not implemented): ++ # to_unmask = fields.List(fields.String(), default=[]) ++ ++ ++class SystemdServiceFile(Model): ++ """ ++ Information about single systemd service unit file ++ ++ This model is not expected to be produced nor consumed by actors directly. ++ See the :class:`SystemdServicesInfoSource` and :class:`SystemdServicesPresetInfoTarget` ++ for more info. ++ """ ++ topic = SystemInfoTopic ++ ++ name = fields.String() ++ """ ++ Name of the service unit file ++ """ ++ ++ state = fields.StringEnum([ ++ 'alias', ++ 'bad', ++ 'disabled', ++ 'enabled', ++ 'enabled-runtime', ++ 'generated', ++ 'indirect', ++ 'linked', ++ 'linked-runtime', ++ 'masked', ++ 'masked-runtime', ++ 'static', ++ 'transient', ++ ]) ++ """ ++ The state of the service unit file ++ """ ++ ++ ++class SystemdServicesInfoSource(Model): ++ """ ++ Information about systemd services on the source system ++ """ ++ topic = SystemInfoTopic ++ ++ service_files = fields.List(fields.Model(SystemdServiceFile), default=[]) ++ """ ++ List of all installed systemd service unit files ++ ++ Instances of service template unit files don't have a unit file ++ and therefore aren't included, but their template files are. ++ Generated service unit files are also included. ++ """ ++ ++ ++class SystemdServicesInfoTarget(SystemdServicesInfoSource): ++ """ ++ Analogy to :class:`SystemdServicesInfoSource`, but for the target system ++ ++ This information is taken after the RPM Upgrade and might become ++ invalid if there are actors calling systemctl enable/disable directly later ++ in the upgrade process. Therefore it is recommended to use ++ :class:`SystemdServicesTasks` to alter the state of units in the ++ FinalizationPhase. ++ """ ++ ++ ++class SystemdServicePreset(Model): ++ """ ++ Information about a preset for systemd service ++ """ ++ ++ topic = SystemInfoTopic ++ service = fields.String() ++ """ ++ Name of the service, with the .service suffix ++ """ ++ ++ state = fields.StringEnum(['disable', 'enable']) ++ """ ++ The state set by a preset file ++ """ ++ ++ ++class SystemdServicesPresetInfoSource(Model): ++ """ ++ Information about presets for systemd services ++ """ ++ topic = SystemInfoTopic ++ ++ presets = fields.List(fields.Model(SystemdServicePreset), default=[]) ++ """ ++ List of all service presets ++ """ ++ ++ ++class SystemdServicesPresetInfoTarget(SystemdServicesPresetInfoSource): ++ """ ++ Analogy to :class:`SystemdServicesPresetInfoSource` but for the target system ++ """ +diff --git a/repos/system_upgrade/common/models/systemdservices.py b/repos/system_upgrade/common/models/systemdservices.py +deleted file mode 100644 +index 6c7d4a1d..00000000 +--- a/repos/system_upgrade/common/models/systemdservices.py ++++ /dev/null +@@ -1,22 +0,0 @@ +-from leapp.models import fields, Model +-from leapp.topics import SystemInfoTopic +- +- +-class SystemdServicesTasks(Model): +- topic = SystemInfoTopic +- +- to_enable = fields.List(fields.String(), default=[]) +- """ +- List of systemd services to enable on the target system +- +- Masked services will not be enabled. Attempting to enable a masked service +- will be evaluated by systemctl as usually. The error will be logged and the +- upgrade process will continue. +- """ +- to_disable = fields.List(fields.String(), default=[]) +- """ +- List of systemd services to disable on the target system +- """ +- +- # Note: possible extension in case of requirement (currently not implemented): +- # to_unmask = fields.List(fields.String(), default=[]) +-- +2.38.1 + diff --git a/SOURCES/0026-systemd-Move-enable-disable-reenable-_unit-functions.patch b/SOURCES/0026-systemd-Move-enable-disable-reenable-_unit-functions.patch new file mode 100644 index 0000000..19a6a05 --- /dev/null +++ b/SOURCES/0026-systemd-Move-enable-disable-reenable-_unit-functions.patch @@ -0,0 +1,227 @@ +From dc43277d4cab1f218a2b5d7e7743a1d2423c8c77 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Wed, 16 Nov 2022 14:01:45 +0100 +Subject: [PATCH 26/32] systemd: Move (enable|disable|reenable)_unit functions + to the shared library + +The functions are used to enable, disable, or re-enable the given +systemd unit. Originaly they were part of setsystemdservicesstate +actor, however we have realized they are needed in other actors too +in rare cases. +--- + .../libraries/setsystemdservicesstate.py | 25 +++++----- + .../tests/test_setsystemdservicesstate.py | 48 +++++++++++------- + .../common/libraries/systemd.py | 50 +++++++++++++++++++ + 3 files changed, 93 insertions(+), 30 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py +index 01272438..641605db 100644 +--- a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py ++++ b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py +@@ -1,17 +1,8 @@ +-from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.libraries.common import systemd ++from leapp.libraries.stdlib import api, CalledProcessError + from leapp.models import SystemdServicesTasks + + +-def _try_set_service_state(command, service): +- try: +- # it is possible to call this on multiple units at once, +- # but failing to enable one service would cause others to not enable as well +- run(['systemctl', command, service]) +- except CalledProcessError as err: +- api.current_logger().error('Failed to {} systemd unit "{}". Message: {}'.format(command, service, str(err))) +- # TODO(mmatuska) produce post-upgrade report +- +- + def process(): + services_to_enable = set() + services_to_disable = set() +@@ -25,7 +16,15 @@ def process(): + api.current_logger().error(msg) + + for service in services_to_enable: +- _try_set_service_state('enable', service) ++ try: ++ systemd.enable_unit(service) ++ except CalledProcessError: ++ # TODO(mmatuska) produce post-upgrade report ++ pass + + for service in services_to_disable: +- _try_set_service_state('disable', service) ++ try: ++ systemd.disable_unit(service) ++ except CalledProcessError: ++ # TODO(mmatuska) produce post-upgrade report ++ pass +diff --git a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py +index dd153329..14d07537 100644 +--- a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py ++++ b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py +@@ -2,50 +2,60 @@ import pytest + + from leapp.libraries import stdlib + from leapp.libraries.actor import setsystemdservicesstate ++from leapp.libraries.common import systemd + from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked + from leapp.libraries.stdlib import api, CalledProcessError + from leapp.models import SystemdServicesTasks + + +-class MockedRun(object): ++class MockedSystemdCmd(object): + def __init__(self): +- self.commands = [] ++ self.units = [] + +- def __call__(self, cmd, *args, **kwargs): +- self.commands.append(cmd) ++ def __call__(self, unit, *args, **kwargs): ++ self.units.append(unit) + return {} + + + @pytest.mark.parametrize( +- ('msgs', 'expected_calls'), ++ ('msgs', 'expect_enable_units', 'expect_disable_units'), + [ + ( + [SystemdServicesTasks(to_enable=['hello.service'], + to_disable=['getty.service'])], +- [['systemctl', 'enable', 'hello.service'], ['systemctl', 'disable', 'getty.service']] ++ ['hello.service'], ++ ['getty.service'] + ), + ( + [SystemdServicesTasks(to_disable=['getty.service'])], +- [['systemctl', 'disable', 'getty.service']] ++ [], ++ ['getty.service'] + ), + ( + [SystemdServicesTasks(to_enable=['hello.service'])], +- [['systemctl', 'enable', 'hello.service']] ++ ['hello.service'], ++ [] + ), + ( + [SystemdServicesTasks()], ++ [], + [] + ), + ] + ) +-def test_process(monkeypatch, msgs, expected_calls): +- mocked_run = MockedRun() +- monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run) ++def test_process(monkeypatch, msgs, expect_enable_units, expect_disable_units): ++ mocked_enable = MockedSystemdCmd() ++ monkeypatch.setattr(systemd, 'enable_unit', mocked_enable) ++ ++ mocked_disable = MockedSystemdCmd() ++ monkeypatch.setattr(systemd, 'disable_unit', mocked_disable) ++ + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs)) + + setsystemdservicesstate.process() + +- assert mocked_run.commands == expected_calls ++ assert mocked_enable.units == expect_enable_units ++ assert mocked_disable.units == expect_disable_units + + + def test_process_invalid(monkeypatch): +@@ -57,7 +67,7 @@ def test_process_invalid(monkeypatch): + + msgs = [SystemdServicesTasks(to_enable=['invalid.service'])] + +- monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run) ++ monkeypatch.setattr(systemd, 'run', mocked_run) + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs)) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + +@@ -69,10 +79,14 @@ def test_process_invalid(monkeypatch): + + + def test_enable_disable_conflict_logged(monkeypatch): +- msgs = [SystemdServicesTasks(to_enable=['hello.service'], +- to_disable=['hello.service'])] +- mocked_run = MockedRun() +- monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run) ++ msgs = [SystemdServicesTasks(to_enable=['hello.service'], to_disable=['hello.service'])] ++ ++ mocked_enable = MockedSystemdCmd() ++ monkeypatch.setattr(systemd, 'enable_unit', mocked_enable) ++ ++ mocked_disable = MockedSystemdCmd() ++ monkeypatch.setattr(systemd, 'disable_unit', mocked_disable) ++ + monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs)) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + +diff --git a/repos/system_upgrade/common/libraries/systemd.py b/repos/system_upgrade/common/libraries/systemd.py +index bbf71af7..c709f233 100644 +--- a/repos/system_upgrade/common/libraries/systemd.py ++++ b/repos/system_upgrade/common/libraries/systemd.py +@@ -32,6 +32,56 @@ def get_broken_symlinks(): + raise + + ++def _try_call_unit_command(command, unit): ++ try: ++ # it is possible to call this on multiple units at once, ++ # but failing to enable one service would cause others to not enable as well ++ run(['systemctl', command, unit]) ++ except CalledProcessError as err: ++ msg = 'Failed to {} systemd unit "{}". Message: {}'.format(command, unit, str(err)) ++ api.current_logger().error(msg) ++ raise err ++ ++ ++def enable_unit(unit): ++ """ ++ Enable a systemd unit ++ ++ It is strongly recommended to produce SystemdServicesTasks message instead, ++ unless it is absolutely necessary to handle failure yourself. ++ ++ :param unit: The systemd unit to enable ++ :raises CalledProcessError: In case of failure ++ """ ++ _try_call_unit_command('enable', unit) ++ ++ ++def disable_unit(unit): ++ """ ++ Disable a systemd unit ++ ++ It is strongly recommended to produce SystemdServicesTasks message instead, ++ unless it is absolutely necessary to handle failure yourself. ++ ++ :param unit: The systemd unit to disable ++ :raises CalledProcessError: In case of failure ++ """ ++ _try_call_unit_command('disable', unit) ++ ++ ++def reenable_unit(unit): ++ """ ++ Re-enable a systemd unit ++ ++ It is strongly recommended to produce SystemdServicesTasks message, unless it ++ is absolutely necessary to handle failure yourself. ++ ++ :param unit: The systemd unit to re-enable ++ :raises CalledProcessError: In case of failure ++ """ ++ _try_call_unit_command('reenable', unit) ++ ++ + def get_service_files(): + """ + Get list of unit files of systemd services on the system +-- +2.38.1 + diff --git a/SOURCES/0027-Fix-broken-or-incorrect-systemd-symlinks.patch b/SOURCES/0027-Fix-broken-or-incorrect-systemd-symlinks.patch new file mode 100644 index 0000000..f112d34 --- /dev/null +++ b/SOURCES/0027-Fix-broken-or-incorrect-systemd-symlinks.patch @@ -0,0 +1,253 @@ +From 7a61c281946ffa0436da8f8837074f17e2103361 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Wed, 16 Nov 2022 14:11:39 +0100 +Subject: [PATCH 27/32] Fix broken or incorrect systemd symlinks + +Introduce repairsystemdsymlinks actor. +During the in-place upgrade process, it usually happens that some +symlinks become incorrect - symlinks are broken, or they are defined +in a wrong directory (e.g. when they are supposed to be defined in a +different systemd target). This has various reasons, but usually it's +caused by missing rpm scriptlets in particular rpms. + +This change corrects only systemd symlinks are (newly) broken during +the in-place upgrade. Symlinks that have been already broken before +the in-place upgrade are ignored. + +Symlinks are handled in the following fashion, if the symlink points to: + - a removed unit, such a symlink is deleted + - a unit whose installation has been changed (e.g. changed WantedBy), + such symlinks are fixed (re-enabled using systemctl) + +JIRA: + OAMG-5342 + OAMG-5344 + OAMG-6519 (possibly related) + OAMG-7755 + +Bugzillas: + https://bugzilla.redhat.com/show_bug.cgi?id=1988457 + https://bugzilla.redhat.com/show_bug.cgi?id=1988449 + https://bugzilla.redhat.com/show_bug.cgi?id=2055117 (possibly fixed) +--- + .../systemd/repairsystemdsymlinks/actor.py | 25 +++++ + .../libraries/repairsystemdsymlinks.py | 76 ++++++++++++++++ + .../tests/test_repairsystemdsymlinks.py | 91 +++++++++++++++++++ + 3 files changed, 192 insertions(+) + create mode 100644 repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/actor.py + create mode 100644 repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py + create mode 100644 repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py + +diff --git a/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/actor.py b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/actor.py +new file mode 100644 +index 00000000..29134373 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/actor.py +@@ -0,0 +1,25 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import repairsystemdsymlinks ++from leapp.models import SystemdBrokenSymlinksSource, SystemdBrokenSymlinksTarget, SystemdServicesInfoSource ++from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag ++ ++ ++class RepairSystemdSymlinks(Actor): ++ """ ++ Fix broken or incorrect systemd symlinks ++ ++ Symlinks are handled in the following fashion, if the symlink points to: ++ - a removed unit, such a symlink is deleted ++ - a unit whose installation has been changed (e.g. changed WantedBy), ++ such symlinks are fixed (re-enabled using systemctl) ++ ++ Symlinks that have been already broken before the in-place upgrade are ignored. ++ """ ++ ++ name = 'repair_systemd_symlinks' ++ consumes = (SystemdBrokenSymlinksSource, SystemdBrokenSymlinksTarget, SystemdServicesInfoSource) ++ produces = () ++ tags = (ApplicationsPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ repairsystemdsymlinks.process() +diff --git a/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py +new file mode 100644 +index 00000000..884b001e +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py +@@ -0,0 +1,76 @@ ++import os ++ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.common import systemd ++from leapp.libraries.common.config.version import get_target_major_version ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import SystemdBrokenSymlinksSource, SystemdBrokenSymlinksTarget, SystemdServicesInfoSource ++ ++_INSTALLATION_CHANGED_EL8 = ['rngd.service', 'sysstat.service'] ++_INSTALLATION_CHANGED_EL9 = [] ++ ++ ++def _get_installation_changed_units(): ++ version = get_target_major_version() ++ if version == '8': ++ return _INSTALLATION_CHANGED_EL8 ++ if version == '9': ++ return _INSTALLATION_CHANGED_EL9 ++ ++ return [] ++ ++ ++def _service_enabled_source(service_info, name): ++ service_file = next((s for s in service_info.service_files if s.name == name), None) ++ return service_file and service_file.state == 'enabled' ++ ++ ++def _is_unit_enabled(unit): ++ try: ++ ret = run(['systemctl', 'is-enabled', unit], split=True)['stdout'] ++ return ret and ret[0] == 'enabled' ++ except (OSError, CalledProcessError): ++ return False ++ ++ ++def _handle_newly_broken_symlinks(symlinks, service_info): ++ for symlink in symlinks: ++ unit = os.path.basename(symlink) ++ try: ++ if not _is_unit_enabled(unit): ++ # removes the broken symlink ++ systemd.disable_unit(unit) ++ elif _service_enabled_source(service_info, unit) and _is_unit_enabled(unit): ++ # removes the old symlinks and creates the new ones ++ systemd.reenable_unit(unit) ++ except CalledProcessError: ++ # TODO(mmatuska): Produce post-upgrade report: failed to handle broken symlink (and suggest a fix?) ++ pass ++ ++ ++def _handle_bad_symlinks(service_files): ++ install_changed_units = _get_installation_changed_units() ++ potentially_bad = [s for s in service_files if s.name in install_changed_units] ++ ++ for unit_file in potentially_bad: ++ if unit_file.state == 'enabled' and _is_unit_enabled(unit_file.name): ++ systemd.reenable_unit(unit_file.name) ++ ++ ++def process(): ++ service_info_source = next(api.consume(SystemdServicesInfoSource), None) ++ if not service_info_source: ++ raise StopActorExecutionError("Expected SystemdServicesInfoSource message, but got None") ++ ++ source_info = next(api.consume(SystemdBrokenSymlinksSource), None) ++ target_info = next(api.consume(SystemdBrokenSymlinksTarget), None) ++ ++ if source_info and target_info: ++ newly_broken = [] ++ newly_broken = [s for s in target_info.broken_symlinks if s not in source_info.broken_symlinks] ++ if not newly_broken: ++ return ++ ++ _handle_newly_broken_symlinks(newly_broken, service_info_source) ++ ++ _handle_bad_symlinks(service_info_source.service_files) +diff --git a/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py +new file mode 100644 +index 00000000..2394df5e +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py +@@ -0,0 +1,91 @@ ++from leapp.libraries.actor import repairsystemdsymlinks ++from leapp.libraries.common import systemd ++from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import ( ++ SystemdBrokenSymlinksSource, ++ SystemdBrokenSymlinksTarget, ++ SystemdServiceFile, ++ SystemdServicesInfoSource ++) ++ ++ ++class MockedSystemdCmd(object): ++ def __init__(self): ++ self.units = [] ++ ++ def __call__(self, unit, *args, **kwargs): ++ self.units.append(unit) ++ return {} ++ ++ ++def test_bad_symslinks(monkeypatch): ++ service_files = [ ++ SystemdServiceFile(name='rngd.service', state='enabled'), ++ SystemdServiceFile(name='sysstat.service', state='disabled'), ++ SystemdServiceFile(name='hello.service', state='enabled'), ++ SystemdServiceFile(name='world.service', state='disabled'), ++ ] ++ ++ def is_unit_enabled_mocked(unit): ++ return True ++ ++ monkeypatch.setattr(repairsystemdsymlinks, '_is_unit_enabled', is_unit_enabled_mocked) ++ ++ reenable_mocked = MockedSystemdCmd() ++ monkeypatch.setattr(systemd, 'reenable_unit', reenable_mocked) ++ ++ service_info = SystemdServicesInfoSource(service_files=service_files) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[service_info])) ++ ++ repairsystemdsymlinks._handle_bad_symlinks(service_info.service_files) ++ ++ assert reenable_mocked.units == ['rngd.service'] ++ ++ ++def test_handle_newly_broken_symlink(monkeypatch): ++ ++ symlinks = [ ++ '/etc/systemd/system/default.target.wants/systemd-readahead-replay.service', ++ '/etc/systemd/system/multi-user.target.wants/vdo.service', ++ '/etc/systemd/system/multi-user.target.wants/hello.service', ++ '/etc/systemd/system/multi-user.target.wants/world.service', ++ '/etc/systemd/system/multi-user.target.wants/foo.service', ++ '/etc/systemd/system/multi-user.target.wants/bar.service', ++ ] ++ ++ def is_unit_enabled_mocked(unit): ++ return unit in ('hello.service', 'foo.service') ++ ++ expect_disabled = [ ++ 'systemd-readahead-replay.service', ++ 'vdo.service', ++ 'world.service', ++ 'bar.service', ++ ] ++ ++ expect_reenabled = [ ++ 'hello.service', ++ ] ++ ++ monkeypatch.setattr(repairsystemdsymlinks, '_is_unit_enabled', is_unit_enabled_mocked) ++ ++ reenable_mocked = MockedSystemdCmd() ++ monkeypatch.setattr(systemd, 'reenable_unit', reenable_mocked) ++ ++ disable_mocked = MockedSystemdCmd() ++ monkeypatch.setattr(systemd, 'disable_unit', disable_mocked) ++ ++ service_files = [ ++ SystemdServiceFile(name='systemd-readahead-replay.service', state='enabled'), ++ SystemdServiceFile(name='vdo.service', state='disabled'), ++ SystemdServiceFile(name='hello.service', state='enabled'), ++ SystemdServiceFile(name='world.service', state='disabled'), ++ SystemdServiceFile(name='foo.service', state='disabled'), ++ SystemdServiceFile(name='bar.service', state='enabled'), ++ ] ++ service_info = SystemdServicesInfoSource(service_files=service_files) ++ repairsystemdsymlinks._handle_newly_broken_symlinks(symlinks, service_info) ++ ++ assert reenable_mocked.units == expect_reenabled ++ assert disable_mocked.units == expect_disabled +-- +2.38.1 + diff --git a/SOURCES/0028-Add-check-for-systemd-symlinks-broken-before-the-upg.patch b/SOURCES/0028-Add-check-for-systemd-symlinks-broken-before-the-upg.patch new file mode 100644 index 0000000..43c2eee --- /dev/null +++ b/SOURCES/0028-Add-check-for-systemd-symlinks-broken-before-the-upg.patch @@ -0,0 +1,271 @@ +From 2713d60a99b60a352b89374dec89f6faa683861d Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Wed, 16 Nov 2022 14:19:36 +0100 +Subject: [PATCH 28/32] Add check for systemd symlinks broken before the + upgrade + +Broken systemd symlinks are not treated during the in-place upgrade +if they are broken prior the leapp execution. This could lead in +unwanted behaviour on the upgraded system, but it does not have to +- so we do not inhibit the upgrade when such symlinks are detected. + +Also, such symlinks could have been created by previous in-place +upgrade, when an automatical fixing of broken symlinks have not been +implemented yet. By this actor we inform people about such issues, +so they can fix it prior the upgrade. + +Co-authored-by: Petr Stodulka +--- + .../checksystemdbrokensymlinks/actor.py | 29 +++++ + .../libraries/checksystemdbrokensymlinks.py | 106 ++++++++++++++++++ + .../tests/test_checksystemdbrokensymlinks.py | 89 +++++++++++++++ + 3 files changed, 224 insertions(+) + create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/actor.py + create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/libraries/checksystemdbrokensymlinks.py + create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py + +diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/actor.py b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/actor.py +new file mode 100644 +index 00000000..257e8c33 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/actor.py +@@ -0,0 +1,29 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import checksystemdbrokensymlinks ++from leapp.models import SystemdBrokenSymlinksSource, SystemdServicesInfoSource ++from leapp.reporting import Report ++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag ++ ++ ++class CheckSystemdBrokenSymlinks(Actor): ++ """ ++ Check whether some systemd symlinks are broken ++ ++ If some systemd symlinks are broken, report them but do not inhibit the ++ upgrade. The symlinks broken already before the upgrade will not be ++ handled by the upgrade process anyhow. Two different reports are created: ++ - symlinks which have the same filename as an existing enabled systemd ++ service (the symlink doesn't point to an existing unit file, but the ++ service is enabled) ++ - broken symlinks which names do not correspond with any existing systemd ++ unit file (typically when the service is removed but not disabled ++ correctly) ++ """ ++ ++ name = 'check_systemd_broken_symlinks' ++ consumes = (SystemdBrokenSymlinksSource, SystemdServicesInfoSource) ++ produces = (Report,) ++ tags = (ChecksPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ checksystemdbrokensymlinks.process() +diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/libraries/checksystemdbrokensymlinks.py b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/libraries/checksystemdbrokensymlinks.py +new file mode 100644 +index 00000000..23addf72 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/libraries/checksystemdbrokensymlinks.py +@@ -0,0 +1,106 @@ ++import os ++ ++from leapp import reporting ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.stdlib import api ++from leapp.models import SystemdBrokenSymlinksSource, SystemdServicesInfoSource ++ ++FMT_LIST_SEPARATOR = '\n - ' ++ ++ ++def _report_broken_symlinks(symlinks): ++ summary = ( ++ 'Leapp detected broken systemd symlinks on the system that do not' ++ ' correspond to any installed systemd unit.' ++ ' This typically happens when the original systemd unit file has been' ++ ' removed (e.g. an rpm removal) or renamed and the system configration' ++ ' has not been properly modified.' ++ ' These symlinks will not be handled during the in-place upgrade' ++ ' as they are already broken.' ++ ' The list of detected broken systemd symlinks:{}{}' ++ .format(FMT_LIST_SEPARATOR, FMT_LIST_SEPARATOR.join(sorted(symlinks))) ++ ) ++ ++ command = ['/usr/bin/rm'] + symlinks ++ ++ hint = ( ++ 'Remove the invalid symlinks before the upgrade.' ++ ) ++ ++ reporting.create_report([ ++ reporting.Title( ++ 'Detected broken systemd symlinks for non-existing services' ++ ), ++ reporting.Summary(summary), ++ reporting.Remediation(hint=hint, commands=[command]), ++ reporting.Severity(reporting.Severity.LOW), ++ reporting.Tags([reporting.Tags.FILESYSTEM]), ++ ]) ++ ++ ++def _report_enabled_services_broken_symlinks(symlinks): ++ summary = ( ++ 'Leapp detected broken systemd symlinks on the system that correspond' ++ ' to existing systemd units, but on different paths. This could lead' ++ ' in future to unexpected behaviour. Also, these symlinks will not be' ++ ' handled during the in-place upgrade as they are already broken.' ++ ' The list of detected broken symlinks:{}{}' ++ .format(FMT_LIST_SEPARATOR, FMT_LIST_SEPARATOR.join(sorted(symlinks))) ++ ) ++ ++ hint = ( ++ 'Fix the broken symlinks before the upgrade or remove them. For this' ++ ' purpose, you can re-enable or disable the related systemd services' ++ ' using the systemctl tool.' ++ ) ++ ++ reporting.create_report([ ++ reporting.Title( ++ 'Detected broken systemd symlinks for existing services' ++ ), ++ reporting.Summary(summary), ++ reporting.Remediation(hint=hint), ++ reporting.Severity(reporting.Severity.MEDIUM), ++ reporting.Tags([reporting.Tags.FILESYSTEM]), ++ ]) ++ ++ ++def _is_enabled(unit, service_files): ++ # FIXME(pstodulk): currently our msgs contain only information about systemd ++ # services. If the unit (broken symlink) refers to timers, etc. They will ++ # be treated now as disabled (read: symlink is broken and there is not ++ # a corresponding unit-file on the system). Considering it for now as ++ # minor issue that will be resolved in future. ++ # NOTE: One of possible solution is to put the information about enabled broken ++ # symlinks to the msg, so it can be just consumed. ++ for service_file in service_files: ++ if service_file.name == unit: ++ return service_file.state == 'enabled' ++ return False ++ ++ ++def process(): ++ broken_symlinks_info = next(api.consume(SystemdBrokenSymlinksSource), None) ++ if not broken_symlinks_info: ++ # nothing to do ++ return ++ services = next(api.consume(SystemdServicesInfoSource), None) ++ if not services: ++ # This is just a seatbelt. It's not expected this msg will be missing. ++ # Skipping tests. ++ raise StopActorExecutionError('Missing SystemdServicesInfoSource message.') ++ ++ enabled_to_report = [] ++ to_report = [] ++ for broken_symlink in broken_symlinks_info.broken_symlinks: ++ unit = os.path.basename(broken_symlink) ++ if _is_enabled(unit, services.service_files): ++ enabled_to_report.append(broken_symlink) ++ else: ++ to_report.append(broken_symlink) ++ ++ if enabled_to_report: ++ _report_enabled_services_broken_symlinks(enabled_to_report) ++ ++ if to_report: ++ _report_broken_symlinks(to_report) +diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py +new file mode 100644 +index 00000000..2364f7a5 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py +@@ -0,0 +1,89 @@ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor import checksystemdbrokensymlinks ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked ++from leapp.libraries.stdlib import api ++from leapp.models import SystemdBrokenSymlinksSource, SystemdServiceFile, SystemdServicesInfoSource ++ ++ ++def test_report_broken_symlinks(monkeypatch): ++ ++ symlinks = [ ++ '/etc/systemd/system/multi-user.target.wants/hello.service', ++ '/etc/systemd/system/multi-user.target.wants/world.service', ++ ] ++ ++ created_reports = create_report_mocked() ++ monkeypatch.setattr(reporting, 'create_report', created_reports) ++ ++ checksystemdbrokensymlinks._report_broken_symlinks(symlinks) ++ ++ assert created_reports.called ++ assert all([s in created_reports.report_fields['summary'] for s in symlinks]) ++ ++ ++def test_report_enabled_services_broken_symlinks(monkeypatch): ++ symlinks = [ ++ '/etc/systemd/system/multi-user.target.wants/foo.service', ++ '/etc/systemd/system/multi-user.target.wants/bar.service', ++ ] ++ ++ created_reports = create_report_mocked() ++ monkeypatch.setattr(reporting, 'create_report', created_reports) ++ ++ checksystemdbrokensymlinks._report_enabled_services_broken_symlinks(symlinks) ++ ++ assert created_reports.called ++ assert all([s in created_reports.report_fields['summary'] for s in symlinks]) ++ ++ ++class ReportBrokenSymlinks(object): ++ def __init__(self): ++ self.symlinks = [] ++ ++ def __call__(self, unit, *args, **kwargs): ++ self.symlinks.append(unit) ++ return {} ++ ++ ++def test_broken_symlinks_reported(monkeypatch): ++ broken_symlinks = SystemdBrokenSymlinksSource(broken_symlinks=[ ++ '/etc/systemd/system/multi-user.target.wants/foo.service', ++ '/etc/systemd/system/multi-user.target.wants/bar.service', ++ '/etc/systemd/system/multi-user.target.wants/hello.service', ++ '/etc/systemd/system/multi-user.target.wants/world.service', ++ ]) ++ systemd_services = SystemdServicesInfoSource(service_files=[ ++ SystemdServiceFile(name='foo.service', state='enabled'), ++ SystemdServiceFile(name='bar.service', state='enabled'), ++ SystemdServiceFile(name='hello.service', state='disabled'), ++ ]) ++ broken = [] ++ enabled_broken = [] ++ ++ def _report_broken_symlinks_mocked(symlinks): ++ broken.extend(symlinks) ++ ++ def _report_enabled_services_broken_symlinks_mocked(symlinks): ++ enabled_broken.extend(symlinks) ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[broken_symlinks, systemd_services])) ++ monkeypatch.setattr(checksystemdbrokensymlinks, '_report_broken_symlinks', _report_broken_symlinks_mocked) ++ monkeypatch.setattr( ++ checksystemdbrokensymlinks, ++ '_report_enabled_services_broken_symlinks', ++ _report_enabled_services_broken_symlinks_mocked ++ ) ++ ++ checksystemdbrokensymlinks.process() ++ ++ assert broken == [ ++ '/etc/systemd/system/multi-user.target.wants/hello.service', ++ '/etc/systemd/system/multi-user.target.wants/world.service', ++ ] ++ ++ assert enabled_broken == [ ++ '/etc/systemd/system/multi-user.target.wants/foo.service', ++ '/etc/systemd/system/multi-user.target.wants/bar.service', ++ ] +-- +2.38.1 + diff --git a/SOURCES/0029-checksystemdservicestasks-update-docstrings-extend-t.patch b/SOURCES/0029-checksystemdservicestasks-update-docstrings-extend-t.patch new file mode 100644 index 0000000..7fdce37 --- /dev/null +++ b/SOURCES/0029-checksystemdservicestasks-update-docstrings-extend-t.patch @@ -0,0 +1,87 @@ +From 4e2767e0eab5fe99b9e99dfea8a8425a1297574b Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Wed, 16 Nov 2022 14:10:48 +0100 +Subject: [PATCH 29/32] checksystemdservicestasks: update docstrings + extend + tests + +--- + .../systemd/checksystemdservicetasks/actor.py | 11 +++++------ + .../tests/test_checksystemdservicestasks.py | 16 +++++++++++++++- + 2 files changed, 20 insertions(+), 7 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py +index 2df995ee..547a13df 100644 +--- a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py ++++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py +@@ -7,17 +7,16 @@ from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag + + class CheckSystemdServicesTasks(Actor): + """ +- Inhibits upgrade if SystemdServicesTasks tasks are in conflict ++ Inhibit the upgrade if SystemdServicesTasks tasks are in conflict + +- There is possibility, that SystemdServicesTasks messages with conflicting +- requested service states could be produced. For example a service is +- requested to be both enabled and disabled. This actor inhibits upgrade in +- such cases. ++ SystemdServicesTasks messages with conflicting requested service states ++ could be produced. For example a service could be requested to be both ++ - enabled and disabled. This actor inhibits upgrade in such cases. + + Note: We expect that SystemdServicesTasks could be produced even after the + TargetTransactionChecksPhase (e.g. during the ApplicationPhase). The + purpose of this actor is to report collisions in case we can already detect +- them. In case of conflicts caused by produced messages later we just log ++ them. In case of conflicts caused by messages produced later we just log + the collisions and the services will end up disabled. + """ + +diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py +index 36ded92f..88c278d6 100644 +--- a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py ++++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py +@@ -5,6 +5,7 @@ from leapp.libraries.actor import checksystemdservicetasks + from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked + from leapp.libraries.stdlib import api + from leapp.models import SystemdServicesTasks ++from leapp.utils.report import is_inhibitor + + + @pytest.mark.parametrize( +@@ -44,6 +45,18 @@ from leapp.models import SystemdServicesTasks + ], + True + ), ++ ( ++ [ ++ SystemdServicesTasks(to_enable=['hello.service']), ++ SystemdServicesTasks(to_disable=['world.service']), ++ SystemdServicesTasks(to_enable=['hello.service', 'kitty.service']) ++ ], ++ False ++ ), ++ ( ++ [], ++ False ++ ) + ] + ) + def test_conflicts_detected(monkeypatch, tasks, should_inhibit): +@@ -55,6 +68,7 @@ def test_conflicts_detected(monkeypatch, tasks, should_inhibit): + checksystemdservicetasks.check_conflicts() + + assert bool(created_reports.called) == should_inhibit ++ assert is_inhibitor(created_reports.report_fields) == should_inhibit + + + @pytest.mark.parametrize( +@@ -84,5 +98,5 @@ def test_coflict_reported(monkeypatch, tasks, expected_reported): + + checksystemdservicetasks.check_conflicts() + +- report_summary = reporting.create_report.report_fields['summary'] ++ report_summary = created_reports.report_fields['summary'] + assert all(service in report_summary for service in expected_reported) +-- +2.38.1 + diff --git a/SOURCES/0030-Support-IPU-using-a-target-RHEL-installation-ISO-ima.patch b/SOURCES/0030-Support-IPU-using-a-target-RHEL-installation-ISO-ima.patch new file mode 100644 index 0000000..af172d2 --- /dev/null +++ b/SOURCES/0030-Support-IPU-using-a-target-RHEL-installation-ISO-ima.patch @@ -0,0 +1,1515 @@ +From 515a6a7b22c0848bacde96cee66449435b3340d6 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michal=20He=C4=8Dko?= +Date: Wed, 16 Nov 2022 18:15:00 +0100 +Subject: [PATCH 30/32] Support IPU using a target RHEL installation ISO image + (#979) + +Introduced an option to use an ISO file as a target RHEL version content source + +With the current enhancement, it's possible to IPU using RHEL ISO image. +For that case it's introduced the --iso CLI option: + leapp upgrade --iso PATH_TO_RHEL_ISO + +The ISO must be stored on local partition (removable and network media are +not allowed). + +Packaging: +* Requires cpio +* Bump leapp-repository-dependencies to 8 + +New models: + TargetOSInstallationImage +--- + Makefile | 2 +- + commands/preupgrade/__init__.py | 1 + + commands/upgrade/__init__.py | 1 + + commands/upgrade/util.py | 7 + + packaging/leapp-el7toel8-deps.spec | 6 +- + packaging/leapp-repository.spec | 6 +- + .../common/actors/checktargetiso/actor.py | 18 ++ + .../libraries/check_target_iso.py | 182 +++++++++++++++ + .../tests/test_check_target_iso.py | 168 +++++++++++++ + .../common/actors/createisorepofile/actor.py | 18 ++ + .../libraries/create_iso_repofile.py | 36 +++ + .../common/actors/dnfdryrun/actor.py | 6 +- + .../common/actors/dnfpackagedownload/actor.py | 6 +- + .../actors/dnftransactioncheck/actor.py | 5 +- + .../actors/initramfs/mounttargetiso/actor.py | 16 ++ + .../libraries/mount_target_iso.py | 27 +++ + .../upgradeinitramfsgenerator/actor.py | 2 + + .../libraries/upgradeinitramfsgenerator.py | 8 +- + .../common/actors/localreposinhibit/actor.py | 59 +++-- + .../tests/test_unit_localreposinhibit.py | 9 + + .../common/actors/scantargetiso/actor.py | 16 ++ + .../libraries/scan_target_os_iso.py | 96 ++++++++ + .../tests/test_scan_target_iso.py | 220 ++++++++++++++++++ + .../actors/targetuserspacecreator/actor.py | 4 +- + .../libraries/userspacegen.py | 30 +-- + .../tests/unit_test_targetuserspacecreator.py | 1 + + .../common/libraries/dnfplugin.py | 47 +++- + .../common/libraries/mounting.py | 20 ++ + .../common/models/upgradeiso.py | 14 ++ + 29 files changed, 977 insertions(+), 54 deletions(-) + create mode 100644 repos/system_upgrade/common/actors/checktargetiso/actor.py + create mode 100644 repos/system_upgrade/common/actors/checktargetiso/libraries/check_target_iso.py + create mode 100644 repos/system_upgrade/common/actors/checktargetiso/tests/test_check_target_iso.py + create mode 100644 repos/system_upgrade/common/actors/createisorepofile/actor.py + create mode 100644 repos/system_upgrade/common/actors/createisorepofile/libraries/create_iso_repofile.py + create mode 100644 repos/system_upgrade/common/actors/initramfs/mounttargetiso/actor.py + create mode 100644 repos/system_upgrade/common/actors/initramfs/mounttargetiso/libraries/mount_target_iso.py + create mode 100644 repos/system_upgrade/common/actors/scantargetiso/actor.py + create mode 100644 repos/system_upgrade/common/actors/scantargetiso/libraries/scan_target_os_iso.py + create mode 100644 repos/system_upgrade/common/actors/scantargetiso/tests/test_scan_target_iso.py + create mode 100644 repos/system_upgrade/common/models/upgradeiso.py + +diff --git a/Makefile b/Makefile +index e8d9f170..7342d4bf 100644 +--- a/Makefile ++++ b/Makefile +@@ -448,7 +448,7 @@ clean_containers: + + fast_lint: + @. $(VENVNAME)/bin/activate; \ +- FILES_TO_LINT="$$(git diff --name-only $(MASTER_BRANCH)| grep '\.py$$')"; \ ++ FILES_TO_LINT="$$(git diff --name-only $(MASTER_BRANCH) --diff-filter AMR | grep '\.py$$')"; \ + if [[ -n "$$FILES_TO_LINT" ]]; then \ + pylint -j 0 $$FILES_TO_LINT && \ + flake8 $$FILES_TO_LINT; \ +diff --git a/commands/preupgrade/__init__.py b/commands/preupgrade/__init__.py +index be2c7be8..d612fbb1 100644 +--- a/commands/preupgrade/__init__.py ++++ b/commands/preupgrade/__init__.py +@@ -24,6 +24,7 @@ from leapp.utils.output import beautify_actor_exception, report_errors, report_i + help='Set preferred channel for the IPU target.', + choices=['ga', 'tuv', 'e4s', 'eus', 'aus'], + value_type=str.lower) # This allows the choices to be case insensitive ++@command_opt('iso', help='Use provided target RHEL installation image to perform the in-place upgrade.') + @command_opt('target', choices=command_utils.get_supported_target_versions(), + help='Specify RHEL version to upgrade to for {} detected upgrade flavour'.format( + command_utils.get_upgrade_flavour())) +diff --git a/commands/upgrade/__init__.py b/commands/upgrade/__init__.py +index 39bfd525..005538ed 100644 +--- a/commands/upgrade/__init__.py ++++ b/commands/upgrade/__init__.py +@@ -30,6 +30,7 @@ from leapp.utils.output import beautify_actor_exception, report_errors, report_i + help='Set preferred channel for the IPU target.', + choices=['ga', 'tuv', 'e4s', 'eus', 'aus'], + value_type=str.lower) # This allows the choices to be case insensitive ++@command_opt('iso', help='Use provided target RHEL installation image to perform the in-place upgrade.') + @command_opt('target', choices=command_utils.get_supported_target_versions(), + help='Specify RHEL version to upgrade to for {} detected upgrade flavour'.format( + command_utils.get_upgrade_flavour())) +diff --git a/commands/upgrade/util.py b/commands/upgrade/util.py +index ce0b5433..aa433786 100644 +--- a/commands/upgrade/util.py ++++ b/commands/upgrade/util.py +@@ -199,6 +199,13 @@ def prepare_configuration(args): + if args.channel: + os.environ['LEAPP_TARGET_PRODUCT_CHANNEL'] = args.channel + ++ if args.iso: ++ os.environ['LEAPP_TARGET_ISO'] = args.iso ++ target_iso_path = os.environ.get('LEAPP_TARGET_ISO') ++ if target_iso_path: ++ # Make sure we convert rel paths into abs ones while we know what CWD is ++ os.environ['LEAPP_TARGET_ISO'] = os.path.abspath(target_iso_path) ++ + # Check upgrade path and fail early if it's unsupported + target_version, flavor = command_utils.vet_upgrade_path(args) + os.environ['LEAPP_UPGRADE_PATH_TARGET_RELEASE'] = target_version +diff --git a/packaging/leapp-el7toel8-deps.spec b/packaging/leapp-el7toel8-deps.spec +index cdfa7f98..822b6f63 100644 +--- a/packaging/leapp-el7toel8-deps.spec ++++ b/packaging/leapp-el7toel8-deps.spec +@@ -9,7 +9,7 @@ + %endif + + +-%define leapp_repo_deps 7 ++%define leapp_repo_deps 8 + %define leapp_framework_deps 5 + + # NOTE: the Version contains the %{rhel} macro just for the convenience to +@@ -61,6 +61,10 @@ Requires: dnf-command(config-manager) + # sure + Requires: dracut + ++# Used to determine RHEL version of a given target RHEL installation image - ++# uncompressing redhat-release package from the ISO. ++Requires: cpio ++ + # just to be sure that /etc/modprobe.d is present + Requires: kmod + +diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec +index 89750927..0ffba71c 100644 +--- a/packaging/leapp-repository.spec ++++ b/packaging/leapp-repository.spec +@@ -2,7 +2,7 @@ + %global repositorydir %{leapp_datadir}/repositories + %global custom_repositorydir %{leapp_datadir}/custom-repositories + +-%define leapp_repo_deps 7 ++%define leapp_repo_deps 8 + + %if 0%{?rhel} == 7 + %define leapp_python_sitelib %{python2_sitelib} +@@ -106,6 +106,10 @@ Requires: leapp-framework >= 3.1, leapp-framework < 4 + # tool to be installed as well. + Requires: leapp + ++# Used to determine RHEL version of a given target RHEL installation image - ++# uncompressing redhat-release package from the ISO. ++Requires: cpio ++ + # The leapp-repository rpm is renamed to %%{lpr_name} + Obsoletes: leapp-repository < 0.14.0-%{release} + Provides: leapp-repository = %{version}-%{release} +diff --git a/repos/system_upgrade/common/actors/checktargetiso/actor.py b/repos/system_upgrade/common/actors/checktargetiso/actor.py +new file mode 100644 +index 00000000..4d602de8 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/checktargetiso/actor.py +@@ -0,0 +1,18 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import check_target_iso ++from leapp.models import Report, StorageInfo, TargetOSInstallationImage ++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag ++ ++ ++class CheckTargetISO(Actor): ++ """ ++ Check that the provided target ISO is a valid ISO image and is located on a persistent partition. ++ """ ++ ++ name = 'check_target_iso' ++ consumes = (StorageInfo, TargetOSInstallationImage,) ++ produces = (Report,) ++ tags = (IPUWorkflowTag, ChecksPhaseTag) ++ ++ def process(self): ++ check_target_iso.perform_target_iso_checks() +diff --git a/repos/system_upgrade/common/actors/checktargetiso/libraries/check_target_iso.py b/repos/system_upgrade/common/actors/checktargetiso/libraries/check_target_iso.py +new file mode 100644 +index 00000000..b5b66901 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/checktargetiso/libraries/check_target_iso.py +@@ -0,0 +1,182 @@ ++import os ++ ++from leapp import reporting ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.common.config import version ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import StorageInfo, TargetOSInstallationImage ++ ++ ++def inhibit_if_not_valid_iso_file(iso): ++ inhibit_title = None ++ target_os = 'RHEL {}'.format(version.get_target_major_version()) ++ if not os.path.exists(iso.path): ++ inhibit_title = 'Provided {target_os} installation ISO does not exists.'.format(target_os=target_os) ++ inhibit_summary_tpl = 'The supplied {target_os} ISO path \'{iso_path}\' does not point to an existing file.' ++ inhibit_summary = inhibit_summary_tpl.format(target_os=target_os, iso_path=iso.path) ++ else: ++ try: ++ # TODO(mhecko): Figure out whether we will keep this since the scan actor is mounting the ISO anyway ++ file_cmd_output = run(['file', '--mime', iso.path]) ++ if 'application/x-iso9660-image' not in file_cmd_output['stdout']: ++ inhibit_title = 'Provided {target_os} installation image is not a valid ISO.'.format( ++ target_os=target_os) ++ summary_tpl = ('The provided {target_os} installation image path \'{iso_path}\'' ++ 'does not point to a valid ISO image.') ++ inhibit_summary = summary_tpl.format(target_os=target_os, iso_path=iso.path) ++ ++ except CalledProcessError as err: ++ raise StopActorExecutionError(message='Failed to check whether {0} is an ISO file.'.format(iso.path), ++ details={'details': '{}'.format(err)}) ++ if inhibit_title: ++ remediation_hint = ('Check whether the supplied target OS installation path points to a valid' ++ '{target_os} ISO image.'.format(target_os=target_os)) ++ ++ reporting.create_report([ ++ reporting.Title(inhibit_title), ++ reporting.Summary(inhibit_summary), ++ reporting.Remediation(hint=remediation_hint), ++ reporting.Severity(reporting.Severity.MEDIUM), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ reporting.Groups([reporting.Groups.REPOSITORY]), ++ ]) ++ return True ++ return False ++ ++ ++def inhibit_if_failed_to_mount_iso(iso): ++ if iso.was_mounted_successfully: ++ return False ++ ++ target_os = 'RHEL {0}'.format(version.get_target_major_version()) ++ title = 'Failed to mount the provided {target_os} installation image.' ++ summary = 'The provided {target_os} installation image {iso_path} could not be mounted.' ++ hint = 'Verify that the provided ISO is a valid {target_os} installation image' ++ reporting.create_report([ ++ reporting.Title(title.format(target_os=target_os)), ++ reporting.Summary(summary.format(target_os=target_os, iso_path=iso.path)), ++ reporting.Remediation(hint=hint.format(target_os=target_os)), ++ reporting.Severity(reporting.Severity.MEDIUM), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ reporting.Groups([reporting.Groups.REPOSITORY]), ++ ]) ++ return True ++ ++ ++def inhibit_if_wrong_iso_rhel_version(iso): ++ # If the major version could not be determined, the iso.rhel_version will be an empty string ++ if not iso.rhel_version: ++ reporting.create_report([ ++ reporting.Title( ++ 'Failed to determine RHEL version provided by the supplied installation image.'), ++ reporting.Summary( ++ 'Could not determine what RHEL version does the supplied installation image' ++ ' located at {iso_path} provide.'.format(iso_path=iso.path) ++ ), ++ reporting.Remediation(hint='Check that the supplied image is a valid RHEL installation image.'), ++ reporting.Severity(reporting.Severity.MEDIUM), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ reporting.Groups([reporting.Groups.REPOSITORY]), ++ ]) ++ return ++ ++ iso_rhel_major_version = iso.rhel_version.split('.')[0] ++ req_major_ver = version.get_target_major_version() ++ if iso_rhel_major_version != req_major_ver: ++ summary = ('The provided RHEL installation image provides RHEL {iso_rhel_ver}, however, a RHEL ' ++ '{required_rhel_ver} image is required for the upgrade.') ++ ++ reporting.create_report([ ++ reporting.Title('The provided installation image provides invalid RHEL version.'), ++ reporting.Summary(summary.format(iso_rhel_ver=iso.rhel_version, required_rhel_ver=req_major_ver)), ++ reporting.Remediation(hint='Check that the supplied image is a valid RHEL installation image.'), ++ reporting.Severity(reporting.Severity.MEDIUM), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ reporting.Groups([reporting.Groups.REPOSITORY]), ++ ]) ++ ++ ++def inhibit_if_iso_not_located_on_persistent_partition(iso): ++ # Check whether the filesystem that on which the ISO resides is mounted in a persistent fashion ++ storage_info = next(api.consume(StorageInfo), None) ++ if not storage_info: ++ raise StopActorExecutionError('Actor did not receive any StorageInfo message.') ++ ++ # Assumes that the path has been already checked for validity, e.g., the ISO path points to a file ++ iso_mountpoint = iso.path ++ while not os.path.ismount(iso_mountpoint): # Guaranteed to terminate because we must reach / eventually ++ iso_mountpoint = os.path.dirname(iso_mountpoint) ++ ++ is_iso_on_persistent_partition = False ++ for fstab_entry in storage_info.fstab: ++ if fstab_entry.fs_file == iso_mountpoint: ++ is_iso_on_persistent_partition = True ++ break ++ ++ if not is_iso_on_persistent_partition: ++ target_ver = version.get_target_major_version() ++ title = 'The RHEL {target_ver} installation image is not located on a persistently mounted partition' ++ summary = ('The provided RHEL {target_ver} installation image {iso_path} is located' ++ ' on a partition without an entry in /etc/fstab, causing the partition ' ++ ' to be persistently mounted.') ++ hint = ('Move the installation image to a partition that is persistently mounted, or create an /etc/fstab' ++ ' entry for the partition on which the installation image is located.') ++ ++ reporting.create_report([ ++ reporting.Title(title.format(target_ver=target_ver)), ++ reporting.Summary(summary.format(target_ver=target_ver, iso_path=iso.path)), ++ reporting.Remediation(hint=hint), ++ reporting.RelatedResource('file', '/etc/fstab'), ++ reporting.Severity(reporting.Severity.MEDIUM), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ reporting.Groups([reporting.Groups.REPOSITORY]), ++ ]) ++ ++ ++def inihibit_if_iso_does_not_contain_basic_repositories(iso): ++ missing_basic_repoids = {'BaseOS', 'AppStream'} ++ ++ for custom_repo in iso.repositories: ++ missing_basic_repoids.remove(custom_repo.repoid) ++ if not missing_basic_repoids: ++ break ++ ++ if missing_basic_repoids: ++ target_ver = version.get_target_major_version() ++ ++ title = 'Provided RHEL {target_ver} installation ISO is missing fundamental repositories.' ++ summary = ('The supplied RHEL {target_ver} installation ISO {iso_path} does not contain ' ++ '{missing_repos} repositor{suffix}') ++ hint = 'Check whether the supplied ISO is a valid RHEL {target_ver} installation image.' ++ ++ reporting.create_report([ ++ reporting.Title(title.format(target_ver=target_ver)), ++ reporting.Summary(summary.format(target_ver=target_ver, ++ iso_path=iso.path, ++ missing_repos=','.join(missing_basic_repoids), ++ suffix=('y' if len(missing_basic_repoids) == 1 else 'ies'))), ++ reporting.Remediation(hint=hint.format(target_ver=target_ver)), ++ reporting.Severity(reporting.Severity.MEDIUM), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ reporting.Groups([reporting.Groups.REPOSITORY]), ++ ]) ++ ++ ++def perform_target_iso_checks(): ++ requested_target_iso_msg_iter = api.consume(TargetOSInstallationImage) ++ target_iso = next(requested_target_iso_msg_iter, None) ++ ++ if not target_iso: ++ return ++ ++ if next(requested_target_iso_msg_iter, None): ++ api.current_logger().warn('Received multiple msgs with target ISO to use.') ++ ++ # Cascade the inhibiting conditions so that we do not spam the user with inhibitors ++ is_iso_invalid = inhibit_if_not_valid_iso_file(target_iso) ++ if not is_iso_invalid: ++ failed_to_mount_iso = inhibit_if_failed_to_mount_iso(target_iso) ++ if not failed_to_mount_iso: ++ inhibit_if_wrong_iso_rhel_version(target_iso) ++ inhibit_if_iso_not_located_on_persistent_partition(target_iso) ++ inihibit_if_iso_does_not_contain_basic_repositories(target_iso) +diff --git a/repos/system_upgrade/common/actors/checktargetiso/tests/test_check_target_iso.py b/repos/system_upgrade/common/actors/checktargetiso/tests/test_check_target_iso.py +new file mode 100644 +index 00000000..d819bc34 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/checktargetiso/tests/test_check_target_iso.py +@@ -0,0 +1,168 @@ ++import os ++ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor import check_target_iso ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked ++from leapp.libraries.stdlib import api ++from leapp.models import CustomTargetRepository, FstabEntry, StorageInfo, TargetOSInstallationImage ++from leapp.utils.report import is_inhibitor ++ ++ ++@pytest.mark.parametrize('mount_successful', (True, False)) ++def test_inhibit_on_iso_mount_failure(monkeypatch, mount_successful): ++ create_report_mock = create_report_mocked() ++ monkeypatch.setattr(reporting, 'create_report', create_report_mock) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) ++ ++ target_iso_msg = TargetOSInstallationImage(path='', ++ mountpoint='', ++ repositories=[], ++ was_mounted_successfully=mount_successful) ++ ++ check_target_iso.inhibit_if_failed_to_mount_iso(target_iso_msg) ++ ++ expected_report_count = 0 if mount_successful else 1 ++ assert create_report_mock.called == expected_report_count ++ if not mount_successful: ++ assert is_inhibitor(create_report_mock.reports[0]) ++ ++ ++@pytest.mark.parametrize(('detected_iso_rhel_ver', 'required_target_ver', 'should_inhibit'), ++ (('8.6', '8.6', False), ('7.9', '8.6', True), ('8.5', '8.6', False), ('', '8.6', True))) ++def test_inhibit_on_detected_rhel_version(monkeypatch, detected_iso_rhel_ver, required_target_ver, should_inhibit): ++ create_report_mock = create_report_mocked() ++ monkeypatch.setattr(reporting, 'create_report', create_report_mock) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(dst_ver=required_target_ver)) ++ ++ target_iso_msg = TargetOSInstallationImage(path='', ++ mountpoint='', ++ repositories=[], ++ rhel_version=detected_iso_rhel_ver, ++ was_mounted_successfully=True) ++ ++ check_target_iso.inhibit_if_wrong_iso_rhel_version(target_iso_msg) ++ ++ expected_report_count = 1 if should_inhibit else 0 ++ assert create_report_mock.called == expected_report_count ++ if should_inhibit: ++ assert is_inhibitor(create_report_mock.reports[0]) ++ ++ ++@pytest.mark.parametrize(('iso_repoids', 'should_inhibit'), ++ ((('BaseOS', 'AppStream'), False), (('BaseOS',), True), (('AppStream',), True), ((), True))) ++def test_inhibit_on_invalid_rhel_version(monkeypatch, iso_repoids, should_inhibit): ++ create_report_mock = create_report_mocked() ++ monkeypatch.setattr(reporting, 'create_report', create_report_mock) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) ++ ++ iso_repositories = [CustomTargetRepository(repoid=repoid, baseurl='', name='') for repoid in iso_repoids] ++ ++ target_iso_msg = TargetOSInstallationImage(path='', ++ mountpoint='', ++ repositories=iso_repositories, ++ was_mounted_successfully=True) ++ ++ check_target_iso.inihibit_if_iso_does_not_contain_basic_repositories(target_iso_msg) ++ ++ expected_report_count = 1 if should_inhibit else 0 ++ assert create_report_mock.called == expected_report_count ++ if should_inhibit: ++ assert is_inhibitor(create_report_mock.reports[0]) ++ ++ ++def test_inhibit_on_nonexistent_iso(monkeypatch): ++ iso_path = '/nonexistent/iso' ++ create_report_mock = create_report_mocked() ++ monkeypatch.setattr(reporting, 'create_report', create_report_mock) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) ++ ++ def mocked_os_path_exists(path): ++ assert path == iso_path, 'The actor should check only the path to ISO for existence.' ++ return False ++ ++ monkeypatch.setattr(os.path, 'exists', mocked_os_path_exists) ++ ++ target_iso_msg = TargetOSInstallationImage(path=iso_path, ++ mountpoint='', ++ repositories=[], ++ was_mounted_successfully=True) ++ ++ check_target_iso.inhibit_if_not_valid_iso_file(target_iso_msg) ++ ++ assert create_report_mock.called == 1 ++ assert is_inhibitor(create_report_mock.reports[0]) ++ ++ ++@pytest.mark.parametrize(('filetype', 'should_inhibit'), ++ (('{path}: text/plain; charset=us-ascii', True), ++ ('{path}: application/x-iso9660-image; charset=binary', False))) ++def test_inhibit_on_path_not_pointing_to_iso(monkeypatch, filetype, should_inhibit): ++ iso_path = '/path/not-an-iso' ++ create_report_mock = create_report_mocked() ++ monkeypatch.setattr(reporting, 'create_report', create_report_mock) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) ++ ++ def mocked_os_path_exists(path): ++ assert path == iso_path, 'The actor should check only the path to ISO for existence.' ++ return True ++ ++ def mocked_run(cmd, *args, **kwargs): ++ assert cmd[0] == 'file', 'The actor should only use `file` cmd when checking for file type.' ++ return {'stdout': filetype.format(path=iso_path)} ++ ++ monkeypatch.setattr(os.path, 'exists', mocked_os_path_exists) ++ monkeypatch.setattr(check_target_iso, 'run', mocked_run) ++ ++ target_iso_msg = TargetOSInstallationImage(path=iso_path, mountpoint='', repositories=[]) ++ ++ check_target_iso.inhibit_if_not_valid_iso_file(target_iso_msg) ++ ++ if should_inhibit: ++ assert create_report_mock.called == 1 ++ assert is_inhibitor(create_report_mock.reports[0]) ++ else: ++ assert create_report_mock.called == 0 ++ ++ ++@pytest.mark.parametrize('is_persistently_mounted', (False, True)) ++def test_inhibition_when_iso_not_on_persistent_partition(monkeypatch, is_persistently_mounted): ++ path_mountpoint = '/d0/d1' ++ iso_path = '/d0/d1/d2/d3/iso' ++ create_report_mock = create_report_mocked() ++ monkeypatch.setattr(reporting, 'create_report', create_report_mock) ++ ++ def os_path_ismount_mocked(path): ++ if path == path_mountpoint: ++ return True ++ if path == '/': # / Should be a mountpoint on every system ++ return True ++ return False ++ ++ monkeypatch.setattr(os.path, 'ismount', os_path_ismount_mocked) ++ ++ fstab_mountpoint = path_mountpoint if is_persistently_mounted else '/some/other/mountpoint' ++ fstab_entry = FstabEntry(fs_spec='/dev/sta2', fs_file=fstab_mountpoint, ++ fs_vfstype='', fs_mntops='', fs_freq='', fs_passno='') ++ storage_info_msg = StorageInfo(fstab=[fstab_entry]) ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[storage_info_msg])) ++ ++ target_iso_msg = TargetOSInstallationImage(path=iso_path, mountpoint='', repositories=[]) ++ check_target_iso.inhibit_if_iso_not_located_on_persistent_partition(target_iso_msg) ++ ++ if is_persistently_mounted: ++ assert not create_report_mock.called ++ else: ++ assert create_report_mock.called == 1 ++ assert is_inhibitor(create_report_mock.reports[0]) ++ ++ ++def test_actor_does_not_perform_when_iso_not_used(monkeypatch): ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) ++ ++ check_target_iso.perform_target_iso_checks() ++ ++ assert not reporting.create_report.called +diff --git a/repos/system_upgrade/common/actors/createisorepofile/actor.py b/repos/system_upgrade/common/actors/createisorepofile/actor.py +new file mode 100644 +index 00000000..5c4fa760 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/createisorepofile/actor.py +@@ -0,0 +1,18 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import create_iso_repofile ++from leapp.models import CustomTargetRepositoryFile, TargetOSInstallationImage ++from leapp.tags import IPUWorkflowTag, TargetTransactionFactsPhaseTag ++ ++ ++class CreateISORepofile(Actor): ++ """ ++ Create custom repofile containing information about repositories found in target OS installation ISO, if used. ++ """ ++ ++ name = 'create_iso_repofile' ++ consumes = (TargetOSInstallationImage,) ++ produces = (CustomTargetRepositoryFile,) ++ tags = (IPUWorkflowTag, TargetTransactionFactsPhaseTag) ++ ++ def process(self): ++ create_iso_repofile.produce_repofile_if_iso_used() +diff --git a/repos/system_upgrade/common/actors/createisorepofile/libraries/create_iso_repofile.py b/repos/system_upgrade/common/actors/createisorepofile/libraries/create_iso_repofile.py +new file mode 100644 +index 00000000..b4470b68 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/createisorepofile/libraries/create_iso_repofile.py +@@ -0,0 +1,36 @@ ++import os ++ ++from leapp.libraries.common.config.version import get_target_major_version ++from leapp.libraries.stdlib import api ++from leapp.models import CustomTargetRepositoryFile, TargetOSInstallationImage ++ ++ ++def produce_repofile_if_iso_used(): ++ target_iso_msgs_iter = api.consume(TargetOSInstallationImage) ++ target_iso = next(target_iso_msgs_iter, None) ++ ++ if not target_iso: ++ return ++ ++ if next(target_iso_msgs_iter, None): ++ api.current_logger().warn('Received multiple TargetISInstallationImage messages, using the first one') ++ ++ # Mounting was successful, create a repofile to copy into target userspace ++ repofile_entry_template = ('[{repoid}]\n' ++ 'name={reponame}\n' ++ 'baseurl={baseurl}\n' ++ 'enabled=0\n' ++ 'gpgcheck=0\n') ++ ++ repofile_content = '' ++ for repo in target_iso.repositories: ++ repofile_content += repofile_entry_template.format(repoid=repo.repoid, ++ reponame=repo.repoid, ++ baseurl=repo.baseurl) ++ ++ target_os_path_prefix = 'el{target_major_ver}'.format(target_major_ver=get_target_major_version()) ++ iso_repofile_path = os.path.join('/var/lib/leapp/', '{}_iso.repo'.format(target_os_path_prefix)) ++ with open(iso_repofile_path, 'w') as iso_repofile: ++ iso_repofile.write(repofile_content) ++ ++ api.produce(CustomTargetRepositoryFile(file=iso_repofile_path)) +diff --git a/repos/system_upgrade/common/actors/dnfdryrun/actor.py b/repos/system_upgrade/common/actors/dnfdryrun/actor.py +index 7cfce25f..bc3267b4 100644 +--- a/repos/system_upgrade/common/actors/dnfdryrun/actor.py ++++ b/repos/system_upgrade/common/actors/dnfdryrun/actor.py +@@ -7,6 +7,7 @@ from leapp.models import ( + FilteredRpmTransactionTasks, + RHUIInfo, + StorageInfo, ++ TargetOSInstallationImage, + TargetUserSpaceInfo, + TransactionDryRun, + UsedTargetRepositories, +@@ -31,6 +32,7 @@ class DnfDryRun(Actor): + FilteredRpmTransactionTasks, + RHUIInfo, + StorageInfo, ++ TargetOSInstallationImage, + TargetUserSpaceInfo, + UsedTargetRepositories, + XFSPresence, +@@ -46,10 +48,12 @@ class DnfDryRun(Actor): + tasks = next(self.consume(FilteredRpmTransactionTasks), FilteredRpmTransactionTasks()) + target_userspace_info = next(self.consume(TargetUserSpaceInfo), None) + rhui_info = next(self.consume(RHUIInfo), None) ++ target_iso = next(self.consume(TargetOSInstallationImage), None) + on_aws = bool(rhui_info and rhui_info.provider == 'aws') + + dnfplugin.perform_dry_run( + tasks=tasks, used_repos=used_repos, target_userspace_info=target_userspace_info, +- xfs_info=xfs_info, storage_info=storage_info, plugin_info=plugin_info, on_aws=on_aws ++ xfs_info=xfs_info, storage_info=storage_info, plugin_info=plugin_info, on_aws=on_aws, ++ target_iso=target_iso, + ) + self.produce(TransactionDryRun()) +diff --git a/repos/system_upgrade/common/actors/dnfpackagedownload/actor.py b/repos/system_upgrade/common/actors/dnfpackagedownload/actor.py +index f27045c3..b54f5627 100644 +--- a/repos/system_upgrade/common/actors/dnfpackagedownload/actor.py ++++ b/repos/system_upgrade/common/actors/dnfpackagedownload/actor.py +@@ -6,6 +6,7 @@ from leapp.models import ( + FilteredRpmTransactionTasks, + RHUIInfo, + StorageInfo, ++ TargetOSInstallationImage, + TargetUserSpaceInfo, + UsedTargetRepositories, + XFSPresence +@@ -28,6 +29,7 @@ class DnfPackageDownload(Actor): + FilteredRpmTransactionTasks, + RHUIInfo, + StorageInfo, ++ TargetOSInstallationImage, + TargetUserSpaceInfo, + UsedTargetRepositories, + XFSPresence, +@@ -45,8 +47,10 @@ class DnfPackageDownload(Actor): + rhui_info = next(self.consume(RHUIInfo), None) + # there are several "variants" related to the *AWS* provider (aws, aws-sap) + on_aws = bool(rhui_info and rhui_info.provider.startswith('aws')) ++ target_iso = next(self.consume(TargetOSInstallationImage), None) + + dnfplugin.perform_rpm_download( + tasks=tasks, used_repos=used_repos, target_userspace_info=target_userspace_info, +- xfs_info=xfs_info, storage_info=storage_info, plugin_info=plugin_info, on_aws=on_aws ++ xfs_info=xfs_info, storage_info=storage_info, plugin_info=plugin_info, on_aws=on_aws, ++ target_iso=target_iso + ) +diff --git a/repos/system_upgrade/common/actors/dnftransactioncheck/actor.py b/repos/system_upgrade/common/actors/dnftransactioncheck/actor.py +index f741b77b..b545d1ce 100644 +--- a/repos/system_upgrade/common/actors/dnftransactioncheck/actor.py ++++ b/repos/system_upgrade/common/actors/dnftransactioncheck/actor.py +@@ -5,6 +5,7 @@ from leapp.models import ( + DNFWorkaround, + FilteredRpmTransactionTasks, + StorageInfo, ++ TargetOSInstallationImage, + TargetUserSpaceInfo, + UsedTargetRepositories, + XFSPresence +@@ -23,6 +24,7 @@ class DnfTransactionCheck(Actor): + DNFWorkaround, + FilteredRpmTransactionTasks, + StorageInfo, ++ TargetOSInstallationImage, + TargetUserSpaceInfo, + UsedTargetRepositories, + XFSPresence, +@@ -37,9 +39,10 @@ class DnfTransactionCheck(Actor): + plugin_info = list(self.consume(DNFPluginTask)) + tasks = next(self.consume(FilteredRpmTransactionTasks), FilteredRpmTransactionTasks()) + target_userspace_info = next(self.consume(TargetUserSpaceInfo), None) ++ target_iso = next(self.consume(TargetOSInstallationImage), None) + + if target_userspace_info: + dnfplugin.perform_transaction_check( + tasks=tasks, used_repos=used_repos, target_userspace_info=target_userspace_info, +- xfs_info=xfs_info, storage_info=storage_info, plugin_info=plugin_info ++ xfs_info=xfs_info, storage_info=storage_info, plugin_info=plugin_info, target_iso=target_iso + ) +diff --git a/repos/system_upgrade/common/actors/initramfs/mounttargetiso/actor.py b/repos/system_upgrade/common/actors/initramfs/mounttargetiso/actor.py +new file mode 100644 +index 00000000..950b2694 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/initramfs/mounttargetiso/actor.py +@@ -0,0 +1,16 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import mount_target_iso ++from leapp.models import TargetOSInstallationImage, TargetUserSpaceInfo ++from leapp.tags import IPUWorkflowTag, PreparationPhaseTag ++ ++ ++class MountTargetISO(Actor): ++ """Mounts target OS ISO in order to install upgrade packages from it.""" ++ ++ name = 'mount_target_iso' ++ consumes = (TargetUserSpaceInfo, TargetOSInstallationImage,) ++ produces = () ++ tags = (PreparationPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ mount_target_iso.mount_target_iso() +diff --git a/repos/system_upgrade/common/actors/initramfs/mounttargetiso/libraries/mount_target_iso.py b/repos/system_upgrade/common/actors/initramfs/mounttargetiso/libraries/mount_target_iso.py +new file mode 100644 +index 00000000..7cc45234 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/initramfs/mounttargetiso/libraries/mount_target_iso.py +@@ -0,0 +1,27 @@ ++import os ++ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import TargetOSInstallationImage, TargetUserSpaceInfo ++ ++ ++def mount_target_iso(): ++ target_os_iso = next(api.consume(TargetOSInstallationImage), None) ++ target_userspace_info = next(api.consume(TargetUserSpaceInfo), None) ++ ++ if not target_os_iso: ++ return ++ ++ mountpoint = os.path.join(target_userspace_info.path, target_os_iso.mountpoint[1:]) ++ if not os.path.exists(mountpoint): ++ # The target userspace container exists, however, the mountpoint has been removed during cleanup. ++ os.makedirs(mountpoint) ++ try: ++ run(['mount', target_os_iso.path, mountpoint]) ++ except CalledProcessError as err: ++ # Unlikely, since we are checking that the ISO is mountable and located on a persistent partition. This would ++ # likely mean that either the fstab entry for the partition points uses a different device that the one that ++ # was mounted during pre-reboot, or the fstab has been tampered with before rebooting. Either way, there is ++ # nothing at this point how we can recover. ++ msg = 'Failed to mount the target RHEL ISO file containing RPMs to install during the upgrade.' ++ raise StopActorExecutionError(message=msg, details={'details': '{0}'.format(err)}) +diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/actor.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/actor.py +index 31e3c61e..dc97172a 100644 +--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/actor.py ++++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/actor.py +@@ -4,6 +4,7 @@ from leapp.models import RequiredUpgradeInitramPackages # deprecated + from leapp.models import UpgradeDracutModule # deprecated + from leapp.models import ( + BootContent, ++ TargetOSInstallationImage, + TargetUserSpaceInfo, + TargetUserSpaceUpgradeTasks, + UpgradeInitramfsTasks, +@@ -27,6 +28,7 @@ class UpgradeInitramfsGenerator(Actor): + name = 'upgrade_initramfs_generator' + consumes = ( + RequiredUpgradeInitramPackages, # deprecated ++ TargetOSInstallationImage, + TargetUserSpaceInfo, + TargetUserSpaceUpgradeTasks, + UpgradeDracutModule, # deprecated +diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py +index 991ace0e..f6539b25 100644 +--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py ++++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py +@@ -9,6 +9,7 @@ from leapp.models import RequiredUpgradeInitramPackages # deprecated + from leapp.models import UpgradeDracutModule # deprecated + from leapp.models import ( + BootContent, ++ TargetOSInstallationImage, + TargetUserSpaceInfo, + TargetUserSpaceUpgradeTasks, + UpgradeInitramfsTasks, +@@ -200,7 +201,8 @@ def copy_boot_files(context): + + def process(): + userspace_info = next(api.consume(TargetUserSpaceInfo), None) +- ++ target_iso = next(api.consume(TargetOSInstallationImage), None) + with mounting.NspawnActions(base_dir=userspace_info.path) as context: +- prepare_userspace_for_initram(context) +- generate_initram_disk(context) ++ with mounting.mount_upgrade_iso_to_root_dir(userspace_info.path, target_iso): ++ prepare_userspace_for_initram(context) ++ generate_initram_disk(context) +diff --git a/repos/system_upgrade/common/actors/localreposinhibit/actor.py b/repos/system_upgrade/common/actors/localreposinhibit/actor.py +index bff65f2d..edf58792 100644 +--- a/repos/system_upgrade/common/actors/localreposinhibit/actor.py ++++ b/repos/system_upgrade/common/actors/localreposinhibit/actor.py +@@ -1,6 +1,6 @@ + from leapp import reporting + from leapp.actors import Actor +-from leapp.models import TMPTargetRepositoriesFacts, UsedTargetRepositories ++from leapp.models import TargetOSInstallationImage, TMPTargetRepositoriesFacts, UsedTargetRepositories + from leapp.reporting import Report + from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag + from leapp.utils.deprecation import suppress_deprecation +@@ -13,41 +13,58 @@ class LocalReposInhibit(Actor): + name = "local_repos_inhibit" + consumes = ( + UsedTargetRepositories, ++ TargetOSInstallationImage, + TMPTargetRepositoriesFacts, + ) + produces = (Report,) + tags = (IPUWorkflowTag, TargetTransactionChecksPhaseTag) + +- def file_baseurl_in_use(self): +- """Check if any of target repos is local. ++ def collect_target_repoids_with_local_url(self, used_target_repos, target_repos_facts, target_iso): ++ """Collects all repoids that have a local (file://) URL. + + UsedTargetRepositories doesn't contain baseurl attribute. So gathering + them from model TMPTargetRepositoriesFacts. + """ +- used_target_repos = next(self.consume(UsedTargetRepositories)).repos +- target_repos = next(self.consume(TMPTargetRepositoriesFacts)).repositories +- target_repo_id_to_url_map = { +- repo.repoid: repo.mirrorlist or repo.metalink or repo.baseurl or "" +- for repofile in target_repos +- for repo in repofile.data +- } +- return any( +- target_repo_id_to_url_map[repo.repoid].startswith("file:") +- for repo in used_target_repos +- ) ++ used_target_repoids = set(repo.repoid for repo in used_target_repos.repos) ++ iso_repoids = set(iso_repo.repoid for iso_repo in target_iso.repositories) if target_iso else set() ++ ++ target_repofile_data = (repofile.data for repofile in target_repos_facts.repositories) ++ ++ local_repoids = [] ++ for repo_data in target_repofile_data: ++ for target_repo in repo_data: ++ # Check only in repositories that are used and are not provided by the upgrade ISO, if any ++ if target_repo.repoid not in used_target_repoids or target_repo.repoid in iso_repoids: ++ continue ++ ++ # Repo fields potentially containing local URLs have different importance, check based on their prio ++ url_field_to_check = target_repo.mirrorlist or target_repo.metalink or target_repo.baseurl or '' ++ ++ if url_field_to_check.startswith("file://"): ++ local_repoids.append(target_repo.repoid) ++ return local_repoids + + def process(self): +- if not all(next(self.consume(model), None) for model in self.consumes): ++ used_target_repos = next(self.consume(UsedTargetRepositories), None) ++ target_repos_facts = next(self.consume(TMPTargetRepositoriesFacts), None) ++ target_iso = next(self.consume(TargetOSInstallationImage), None) ++ ++ if not used_target_repos or not target_repos_facts: + return +- if self.file_baseurl_in_use(): +- warn_msg = ( +- "Local repository found (baseurl starts with file:///). " +- "Currently leapp does not support this option." +- ) ++ ++ local_repoids = self.collect_target_repoids_with_local_url(used_target_repos, target_repos_facts, target_iso) ++ if local_repoids: ++ suffix, verb = ("y", "has") if len(local_repoids) == 1 else ("ies", "have") ++ local_repoids_str = ", ".join(local_repoids) ++ ++ warn_msg = ("The following local repositor{suffix} {verb} been found: {local_repoids} " ++ "(their baseurl starts with file:///). Currently leapp does not support this option.") ++ warn_msg = warn_msg.format(suffix=suffix, verb=verb, local_repoids=local_repoids_str) + self.log.warning(warn_msg) ++ + reporting.create_report( + [ +- reporting.Title("Local repository detected"), ++ reporting.Title("Local repositor{suffix} detected".format(suffix=suffix)), + reporting.Summary(warn_msg), + reporting.Severity(reporting.Severity.HIGH), + reporting.Groups([reporting.Groups.REPOSITORY]), +diff --git a/repos/system_upgrade/common/actors/localreposinhibit/tests/test_unit_localreposinhibit.py b/repos/system_upgrade/common/actors/localreposinhibit/tests/test_unit_localreposinhibit.py +index 70156751..64a79e80 100644 +--- a/repos/system_upgrade/common/actors/localreposinhibit/tests/test_unit_localreposinhibit.py ++++ b/repos/system_upgrade/common/actors/localreposinhibit/tests/test_unit_localreposinhibit.py +@@ -3,6 +3,7 @@ import pytest + from leapp.models import ( + RepositoryData, + RepositoryFile, ++ TargetOSInstallationImage, + TMPTargetRepositoriesFacts, + UsedTargetRepositories, + UsedTargetRepository +@@ -70,3 +71,11 @@ def test_unit_localreposinhibit(current_actor_context, baseurl, mirrorlist, meta + ) + current_actor_context.run() + assert len(current_actor_context.messages()) == exp_msgs_len ++ ++ ++def test_upgrade_not_inhibited_if_iso_used(current_actor_context): ++ repofile = RepositoryFile(file="path/to/some/file", ++ data=[RepositoryData(name="BASEOS", baseurl="file:///path", repoid="BASEOS")]) ++ current_actor_context.feed(TMPTargetRepositoriesFacts(repositories=[repofile])) ++ current_actor_context.feed(UsedTargetRepositories(repos=[UsedTargetRepository(repoid="BASEOS")])) ++ current_actor_context.feed(TargetOSInstallationImage(path='', mountpoint='', repositories=[])) +diff --git a/repos/system_upgrade/common/actors/scantargetiso/actor.py b/repos/system_upgrade/common/actors/scantargetiso/actor.py +new file mode 100644 +index 00000000..88b1b8f5 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scantargetiso/actor.py +@@ -0,0 +1,16 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import scan_target_os_iso ++from leapp.models import CustomTargetRepository, TargetOSInstallationImage ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class ScanTargetISO(Actor): ++ """Scans the provided target OS ISO image to use as a content source for the IPU, if any.""" ++ ++ name = 'scan_target_os_image' ++ consumes = () ++ produces = (CustomTargetRepository, TargetOSInstallationImage,) ++ tags = (IPUWorkflowTag, FactsPhaseTag) ++ ++ def process(self): ++ scan_target_os_iso.inform_ipu_about_request_to_use_target_iso() +diff --git a/repos/system_upgrade/common/actors/scantargetiso/libraries/scan_target_os_iso.py b/repos/system_upgrade/common/actors/scantargetiso/libraries/scan_target_os_iso.py +new file mode 100644 +index 00000000..281389cf +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scantargetiso/libraries/scan_target_os_iso.py +@@ -0,0 +1,96 @@ ++import os ++ ++import leapp.libraries.common.config as ipu_config ++from leapp.libraries.common.mounting import LoopMount, MountError ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import CustomTargetRepository, TargetOSInstallationImage ++ ++ ++def determine_rhel_version_from_iso_mountpoint(iso_mountpoint): ++ baseos_packages = os.path.join(iso_mountpoint, 'BaseOS/Packages') ++ if os.path.isdir(baseos_packages): ++ def is_rh_release_pkg(pkg_name): ++ return pkg_name.startswith('redhat-release') and 'eula' not in pkg_name ++ ++ redhat_release_pkgs = [pkg for pkg in os.listdir(baseos_packages) if is_rh_release_pkg(pkg)] ++ ++ if not redhat_release_pkgs: ++ return '' # We did not determine anything ++ ++ if len(redhat_release_pkgs) > 1: ++ api.current_logger().warn('Multiple packages with name redhat-release* found when ' ++ 'determining RHEL version of the supplied installation ISO.') ++ ++ redhat_release_pkg = redhat_release_pkgs[0] ++ ++ determined_rhel_ver = '' ++ try: ++ rh_release_pkg_path = os.path.join(baseos_packages, redhat_release_pkg) ++ # rpm2cpio is provided by rpm; cpio is a dependency of yum (rhel7) and a dependency of dracut which is ++ # a dependency for leapp (rhel8+) ++ cpio_archive = run(['rpm2cpio', rh_release_pkg_path]) ++ etc_rh_release_contents = run(['cpio', '--extract', '--to-stdout', './etc/redhat-release'], ++ stdin=cpio_archive['stdout']) ++ ++ # 'Red Hat Enterprise Linux Server release 7.9 (Maipo)' -> ['Red Hat...', '7.9 (Maipo'] ++ product_release_fragments = etc_rh_release_contents['stdout'].split('release') ++ if len(product_release_fragments) != 2: ++ return '' # Unlikely. Either way we failed to parse the release ++ ++ if not product_release_fragments[0].startswith('Red Hat'): ++ return '' ++ ++ determined_rhel_ver = product_release_fragments[1].strip().split(' ', 1)[0] # Remove release name (Maipo) ++ return determined_rhel_ver ++ except CalledProcessError: ++ return '' ++ return '' ++ ++ ++def inform_ipu_about_request_to_use_target_iso(): ++ target_iso_path = ipu_config.get_env('LEAPP_TARGET_ISO') ++ if not target_iso_path: ++ return ++ ++ iso_mountpoint = '/iso' ++ ++ if not os.path.exists(target_iso_path): ++ # If the path does not exists, do not attempt to mount it and let the upgrade be inhibited by the check actor ++ api.produce(TargetOSInstallationImage(path=target_iso_path, ++ repositories=[], ++ mountpoint=iso_mountpoint, ++ was_mounted_successfully=False)) ++ return ++ ++ # Mount the given ISO, extract the available repositories and determine provided RHEL version ++ iso_scan_mountpoint = '/var/lib/leapp/iso_scan_mountpoint' ++ try: ++ with LoopMount(source=target_iso_path, target=iso_scan_mountpoint): ++ required_repositories = ('BaseOS', 'AppStream') ++ ++ # Check what required repositories are present in the root of the ISO ++ iso_contents = os.listdir(iso_scan_mountpoint) ++ present_repositories = [req_repo for req_repo in required_repositories if req_repo in iso_contents] ++ ++ # Create custom repository information about the repositories found in the root of the ISO ++ iso_repos = [] ++ for repo_dir in present_repositories: ++ baseurl = 'file://' + os.path.join(iso_mountpoint, repo_dir) ++ iso_repo = CustomTargetRepository(name=repo_dir, baseurl=baseurl, repoid=repo_dir) ++ api.produce(iso_repo) ++ iso_repos.append(iso_repo) ++ ++ rhel_version = determine_rhel_version_from_iso_mountpoint(iso_scan_mountpoint) ++ ++ api.produce(TargetOSInstallationImage(path=target_iso_path, ++ repositories=iso_repos, ++ mountpoint=iso_mountpoint, ++ rhel_version=rhel_version, ++ was_mounted_successfully=True)) ++ except MountError: ++ # Do not analyze the situation any further as ISO checks will be done by another actor ++ iso_mountpoint = '/iso' ++ api.produce(TargetOSInstallationImage(path=target_iso_path, ++ repositories=[], ++ mountpoint=iso_mountpoint, ++ was_mounted_successfully=False)) +diff --git a/repos/system_upgrade/common/actors/scantargetiso/tests/test_scan_target_iso.py b/repos/system_upgrade/common/actors/scantargetiso/tests/test_scan_target_iso.py +new file mode 100644 +index 00000000..4dd0a125 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scantargetiso/tests/test_scan_target_iso.py +@@ -0,0 +1,220 @@ ++import contextlib ++import os ++from functools import partial ++ ++import pytest ++ ++from leapp.libraries.actor import scan_target_os_iso ++from leapp.libraries.common.mounting import MountError ++from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked ++from leapp.libraries.stdlib import api, CalledProcessError ++from leapp.models import CustomTargetRepository, TargetOSInstallationImage ++ ++ ++def fail_if_called(fail_reason, *args, **kwargs): ++ assert False, fail_reason ++ ++ ++def test_determine_rhel_version_determination_unexpected_iso_structure_or_invalid_mountpoint(monkeypatch): ++ iso_mountpoint = '/some/mountpoint' ++ ++ run_mocked = partial(fail_if_called, ++ 'No commands should be called when mounted ISO mountpoint has unexpected structure.') ++ monkeypatch.setattr(scan_target_os_iso, 'run', run_mocked) ++ ++ def isdir_mocked(path): ++ assert path == '/some/mountpoint/BaseOS/Packages', 'Only the contents of BaseOS/Packages should be examined.' ++ return False ++ ++ monkeypatch.setattr(os.path, 'isdir', isdir_mocked) ++ ++ determined_version = scan_target_os_iso.determine_rhel_version_from_iso_mountpoint(iso_mountpoint) ++ assert not determined_version ++ ++ ++def test_determine_rhel_version_valid_iso(monkeypatch): ++ iso_mountpoint = '/some/mountpoint' ++ ++ def isdir_mocked(path): ++ return True ++ ++ def listdir_mocked(path): ++ assert path == '/some/mountpoint/BaseOS/Packages', 'Only the contents of BaseOS/Packages should be examined.' ++ return ['xz-5.2.4-4.el8_6.x86_64.rpm', ++ 'libmodman-2.0.1-17.el8.i686.rpm', ++ 'redhat-release-8.7-0.3.el8.x86_64.rpm', ++ 'redhat-release-eula-8.7-0.3.el8.x86_64.rpm'] ++ ++ def run_mocked(cmd, *args, **kwargs): ++ rpm2cpio_output = 'rpm2cpio_output' ++ if cmd[0] == 'rpm2cpio': ++ assert cmd == ['rpm2cpio', '/some/mountpoint/BaseOS/Packages/redhat-release-8.7-0.3.el8.x86_64.rpm'] ++ return {'stdout': rpm2cpio_output} ++ if cmd[0] == 'cpio': ++ assert cmd == ['cpio', '--extract', '--to-stdout', './etc/redhat-release'] ++ assert kwargs['stdin'] == rpm2cpio_output ++ return {'stdout': 'Red Hat Enterprise Linux Server release 7.9 (Maipo)'} ++ raise ValueError('Unexpected command has been called.') ++ ++ monkeypatch.setattr(os.path, 'isdir', isdir_mocked) ++ monkeypatch.setattr(os, 'listdir', listdir_mocked) ++ monkeypatch.setattr(scan_target_os_iso, 'run', run_mocked) ++ ++ determined_version = scan_target_os_iso.determine_rhel_version_from_iso_mountpoint(iso_mountpoint) ++ assert determined_version == '7.9' ++ ++ ++def test_determine_rhel_version_valid_iso_no_rh_release(monkeypatch): ++ iso_mountpoint = '/some/mountpoint' ++ ++ def isdir_mocked(path): ++ return True ++ ++ def listdir_mocked(path): ++ assert path == '/some/mountpoint/BaseOS/Packages', 'Only the contents of BaseOS/Packages should be examined.' ++ return ['xz-5.2.4-4.el8_6.x86_64.rpm', ++ 'libmodman-2.0.1-17.el8.i686.rpm', ++ 'redhat-release-eula-8.7-0.3.el8.x86_64.rpm'] ++ ++ run_mocked = partial(fail_if_called, 'No command should be called if the redhat-release package is not present.') ++ ++ monkeypatch.setattr(os.path, 'isdir', isdir_mocked) ++ monkeypatch.setattr(os, 'listdir', listdir_mocked) ++ monkeypatch.setattr(scan_target_os_iso, 'run', run_mocked) ++ ++ determined_version = scan_target_os_iso.determine_rhel_version_from_iso_mountpoint(iso_mountpoint) ++ assert determined_version == '' ++ ++ ++def test_determine_rhel_version_rpm_extract_fails(monkeypatch): ++ iso_mountpoint = '/some/mountpoint' ++ ++ def isdir_mocked(path): ++ return True ++ ++ def listdir_mocked(path): ++ assert path == '/some/mountpoint/BaseOS/Packages', 'Only the contents of BaseOS/Packages should be examined.' ++ return ['redhat-release-8.7-0.3.el8.x86_64.rpm'] ++ ++ def run_mocked(cmd, *args, **kwargs): ++ raise CalledProcessError(message='Ooops.', command=cmd, result=2) ++ ++ monkeypatch.setattr(os.path, 'isdir', isdir_mocked) ++ monkeypatch.setattr(os, 'listdir', listdir_mocked) ++ monkeypatch.setattr(scan_target_os_iso, 'run', run_mocked) ++ ++ determined_version = scan_target_os_iso.determine_rhel_version_from_iso_mountpoint(iso_mountpoint) ++ assert determined_version == '' ++ ++ ++@pytest.mark.parametrize('etc_rh_release_contents', ('', ++ 'Red Hat Enterprise Linux Server', ++ 'Fedora release 35 (Thirty Five)')) ++def test_determine_rhel_version_unexpected_etc_rh_release_contents(monkeypatch, etc_rh_release_contents): ++ iso_mountpoint = '/some/mountpoint' ++ ++ def isdir_mocked(path): ++ return True ++ ++ def listdir_mocked(path): ++ assert path == '/some/mountpoint/BaseOS/Packages', 'Only the contents of BaseOS/Packages should be examined.' ++ return ['redhat-release-8.7-0.3.el8.x86_64.rpm'] ++ ++ def run_mocked(cmd, *args, **kwargs): ++ if cmd[0] == 'rpm2cpio': ++ return {'stdout': 'rpm2cpio_output'} ++ if cmd[0] == 'cpio': ++ return {'stdout': etc_rh_release_contents} ++ raise ValueError('Actor called an unexpected command: {0}'.format(cmd)) ++ ++ monkeypatch.setattr(os.path, 'isdir', isdir_mocked) ++ monkeypatch.setattr(os, 'listdir', listdir_mocked) ++ monkeypatch.setattr(scan_target_os_iso, 'run', run_mocked) ++ ++ determined_version = scan_target_os_iso.determine_rhel_version_from_iso_mountpoint(iso_mountpoint) ++ assert determined_version == '' ++ ++ ++@pytest.mark.parametrize('iso_envar_set', (True, False)) ++def test_iso_detection_with_no_iso(monkeypatch, iso_envar_set): ++ envars = {'LEAPP_TARGET_ISO': '/target_iso'} if iso_envar_set else {} ++ mocked_actor = CurrentActorMocked(envars=envars) ++ monkeypatch.setattr(api, 'current_actor', mocked_actor) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ ++ scan_target_os_iso.inform_ipu_about_request_to_use_target_iso() ++ assert bool(api.produce.called) == iso_envar_set ++ ++ ++def test_iso_mounting_failed(monkeypatch): ++ envars = {'LEAPP_TARGET_ISO': '/target_iso'} ++ mocked_actor = CurrentActorMocked(envars=envars) ++ monkeypatch.setattr(api, 'current_actor', mocked_actor) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ ++ def raise_mount_error_when_called(): ++ raise MountError('MountError') ++ ++ monkeypatch.setattr(scan_target_os_iso, 'LoopMount', raise_mount_error_when_called) ++ ++ scan_target_os_iso.inform_ipu_about_request_to_use_target_iso() ++ assert api.produce.called ++ ++ assert len(api.produce.model_instances) == 1 ++ assert not api.produce.model_instances[0].was_mounted_successfully ++ ++ ++@pytest.mark.parametrize(('repodirs_in_iso', 'expected_repoids'), ++ (((), ()), ++ (('BaseOS',), ('BaseOS',)), ++ (('BaseOS', 'AppStream'), ('BaseOS', 'AppStream')), ++ (('BaseOS', 'AppStream', 'UnknownRepo'), ('BaseOS', 'AppStream')))) ++def test_iso_repository_detection(monkeypatch, repodirs_in_iso, expected_repoids): ++ iso_path = '/target_iso' ++ envars = {'LEAPP_TARGET_ISO': iso_path} ++ mocked_actor = CurrentActorMocked(envars=envars) ++ ++ @contextlib.contextmanager ++ def always_successful_loop_mount(*args, **kwargs): ++ yield ++ ++ def mocked_os_path_exits(path): ++ if path == iso_path: ++ return True ++ raise ValueError('Only the ISO path should be probed for existence.') ++ ++ def mocked_os_listdir(path): ++ # Add some extra files as an ISO will always have some extra files in / as the ones parametrizing this test ++ return list(repodirs_in_iso + ('eula.txt', 'grub', 'imgs')) ++ ++ monkeypatch.setattr(api, 'current_actor', mocked_actor) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(scan_target_os_iso, 'LoopMount', always_successful_loop_mount) ++ monkeypatch.setattr(os.path, 'exists', mocked_os_path_exits) ++ monkeypatch.setattr(os, 'listdir', mocked_os_listdir) ++ monkeypatch.setattr(scan_target_os_iso, 'determine_rhel_version_from_iso_mountpoint', lambda iso_mountpoint: '7.9') ++ ++ scan_target_os_iso.inform_ipu_about_request_to_use_target_iso() ++ ++ produced_msgs = api.produce.model_instances ++ assert len(produced_msgs) == 1 + len(expected_repoids) ++ ++ produced_custom_repo_msgs = [] ++ target_iso_msg = None ++ for produced_msg in produced_msgs: ++ if isinstance(produced_msg, CustomTargetRepository): ++ produced_custom_repo_msgs.append(produced_msg) ++ else: ++ assert not target_iso_msg, 'Actor is expected to produce only one TargetOSInstallationImage msg' ++ target_iso = produced_msg ++ ++ # Do not explicitly instantiate model instances of what we expect the model instance to look like. Instead check ++ # for expected structural properties, leaving the actor implementation flexibility (e.g. choice of the mountpoint) ++ iso_mountpoint = target_iso.mountpoint ++ ++ assert target_iso.was_mounted_successfully ++ assert target_iso.rhel_version == '7.9' ++ ++ expected_repos = {(repoid, 'file://' + os.path.join(iso_mountpoint, repoid)) for repoid in expected_repoids} ++ actual_repos = {(repo.repoid, repo.baseurl) for repo in produced_custom_repo_msgs} ++ assert expected_repos == actual_repos +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py b/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py +index 04fb2e8b..b1225230 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py +@@ -2,7 +2,7 @@ from leapp.actors import Actor + from leapp.libraries.actor import userspacegen + from leapp.libraries.common.config import get_env, version + from leapp.models import RequiredTargetUserspacePackages # deprecated +-from leapp.models import TMPTargetRepositoriesFacts # deprecated all the time ++from leapp.models import TMPTargetRepositoriesFacts # deprecated + from leapp.models import ( + CustomTargetRepositoryFile, + PkgManagerInfo, +@@ -12,6 +12,7 @@ from leapp.models import ( + RHSMInfo, + RHUIInfo, + StorageInfo, ++ TargetOSInstallationImage, + TargetRepositories, + TargetUserSpaceInfo, + TargetUserSpacePreupgradeTasks, +@@ -42,6 +43,7 @@ class TargetUserspaceCreator(Actor): + RepositoriesMapping, + RequiredTargetUserspacePackages, + StorageInfo, ++ TargetOSInstallationImage, + TargetRepositories, + TargetUserSpacePreupgradeTasks, + XFSPresence, +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index 00acacd9..5a6a80f2 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -9,7 +9,7 @@ from leapp.libraries.common.config import get_env, get_product_type + from leapp.libraries.common.config.version import get_target_major_version + from leapp.libraries.stdlib import api, CalledProcessError, config, run + from leapp.models import RequiredTargetUserspacePackages # deprecated +-from leapp.models import TMPTargetRepositoriesFacts # deprecated ++from leapp.models import TMPTargetRepositoriesFacts # deprecated all the time + from leapp.models import ( + CustomTargetRepositoryFile, + PkgManagerInfo, +@@ -17,6 +17,7 @@ from leapp.models import ( + RHSMInfo, + RHUIInfo, + StorageInfo, ++ TargetOSInstallationImage, + TargetRepositories, + TargetUserSpaceInfo, + TargetUserSpacePreupgradeTasks, +@@ -686,15 +687,18 @@ def perform(): + storage_info=indata.storage_info, + xfs_info=indata.xfs_info) as overlay: + with overlay.nspawn() as context: +- target_repoids = _gather_target_repositories(context, indata, prod_cert_path) +- _create_target_userspace(context, indata.packages, indata.files, target_repoids) +- # TODO: this is tmp solution as proper one needs significant refactoring +- target_repo_facts = repofileutils.get_parsed_repofiles(context) +- api.produce(TMPTargetRepositoriesFacts(repositories=target_repo_facts)) +- # ## TODO ends here +- api.produce(UsedTargetRepositories( +- repos=[UsedTargetRepository(repoid=repo) for repo in target_repoids])) +- api.produce(TargetUserSpaceInfo( +- path=_get_target_userspace(), +- scratch=constants.SCRATCH_DIR, +- mounts=constants.MOUNTS_DIR)) ++ # Mount the ISO into the scratch container ++ target_iso = next(api.consume(TargetOSInstallationImage), None) ++ with mounting.mount_upgrade_iso_to_root_dir(overlay.target, target_iso): ++ target_repoids = _gather_target_repositories(context, indata, prod_cert_path) ++ _create_target_userspace(context, indata.packages, indata.files, target_repoids) ++ # TODO: this is tmp solution as proper one needs significant refactoring ++ target_repo_facts = repofileutils.get_parsed_repofiles(context) ++ api.produce(TMPTargetRepositoriesFacts(repositories=target_repo_facts)) ++ # ## TODO ends here ++ api.produce(UsedTargetRepositories( ++ repos=[UsedTargetRepository(repoid=repo) for repo in target_repoids])) ++ api.produce(TargetUserSpaceInfo( ++ path=_get_target_userspace(), ++ scratch=constants.SCRATCH_DIR, ++ mounts=constants.MOUNTS_DIR)) +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +index 276175a1..5f544471 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +@@ -27,6 +27,7 @@ def adjust_cwd(): + class MockedMountingBase(object): + def __init__(self, **dummy_kwargs): + self.called_copytree_from = [] ++ self.target = '' + + def copytree_from(self, src, dst): + self.called_copytree_from.append((src, dst)) +diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py +index 56b703d5..0a546637 100644 +--- a/repos/system_upgrade/common/libraries/dnfplugin.py ++++ b/repos/system_upgrade/common/libraries/dnfplugin.py +@@ -342,22 +342,29 @@ def perform_transaction_install(target_userspace_info, storage_info, used_repos, + + + @contextlib.contextmanager +-def _prepare_perform(used_repos, target_userspace_info, xfs_info, storage_info): ++def _prepare_perform(used_repos, target_userspace_info, xfs_info, storage_info, target_iso=None): + with _prepare_transaction(used_repos=used_repos, + target_userspace_info=target_userspace_info + ) as (context, target_repoids, userspace_info): + with overlaygen.create_source_overlay(mounts_dir=userspace_info.mounts, scratch_dir=userspace_info.scratch, + xfs_info=xfs_info, storage_info=storage_info, + mount_target=os.path.join(context.base_dir, 'installroot')) as overlay: +- yield context, overlay, target_repoids ++ with mounting.mount_upgrade_iso_to_root_dir(target_userspace_info.path, target_iso): ++ yield context, overlay, target_repoids + + +-def perform_transaction_check(target_userspace_info, used_repos, tasks, xfs_info, storage_info, plugin_info): ++def perform_transaction_check(target_userspace_info, ++ used_repos, ++ tasks, ++ xfs_info, ++ storage_info, ++ plugin_info, ++ target_iso=None): + """ + Perform DNF transaction check using our plugin + """ + with _prepare_perform(used_repos=used_repos, target_userspace_info=target_userspace_info, xfs_info=xfs_info, +- storage_info=storage_info) as (context, overlay, target_repoids): ++ storage_info=storage_info, target_iso=target_iso) as (context, overlay, target_repoids): + apply_workarounds(overlay.nspawn()) + dnfconfig.exclude_leapp_rpms(context) + _transaction( +@@ -365,12 +372,22 @@ def perform_transaction_check(target_userspace_info, used_repos, tasks, xfs_info + ) + + +-def perform_rpm_download(target_userspace_info, used_repos, tasks, xfs_info, storage_info, plugin_info, on_aws=False): ++def perform_rpm_download(target_userspace_info, ++ used_repos, ++ tasks, ++ xfs_info, ++ storage_info, ++ plugin_info, ++ target_iso=None, ++ on_aws=False): + """ + Perform RPM download including the transaction test using dnf with our plugin + """ +- with _prepare_perform(used_repos=used_repos, target_userspace_info=target_userspace_info, xfs_info=xfs_info, +- storage_info=storage_info) as (context, overlay, target_repoids): ++ with _prepare_perform(used_repos=used_repos, ++ target_userspace_info=target_userspace_info, ++ xfs_info=xfs_info, ++ storage_info=storage_info, ++ target_iso=target_iso) as (context, overlay, target_repoids): + apply_workarounds(overlay.nspawn()) + dnfconfig.exclude_leapp_rpms(context) + _transaction( +@@ -379,12 +396,22 @@ def perform_rpm_download(target_userspace_info, used_repos, tasks, xfs_info, sto + ) + + +-def perform_dry_run(target_userspace_info, used_repos, tasks, xfs_info, storage_info, plugin_info, on_aws=False): ++def perform_dry_run(target_userspace_info, ++ used_repos, ++ tasks, ++ xfs_info, ++ storage_info, ++ plugin_info, ++ target_iso=None, ++ on_aws=False): + """ + Perform the dnf transaction test / dry-run using only cached data. + """ +- with _prepare_perform(used_repos=used_repos, target_userspace_info=target_userspace_info, xfs_info=xfs_info, +- storage_info=storage_info) as (context, overlay, target_repoids): ++ with _prepare_perform(used_repos=used_repos, ++ target_userspace_info=target_userspace_info, ++ xfs_info=xfs_info, ++ storage_info=storage_info, ++ target_iso=target_iso) as (context, overlay, target_repoids): + apply_workarounds(overlay.nspawn()) + _transaction( + context=context, stage='dry-run', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks, +diff --git a/repos/system_upgrade/common/libraries/mounting.py b/repos/system_upgrade/common/libraries/mounting.py +index f272d8c7..fd079048 100644 +--- a/repos/system_upgrade/common/libraries/mounting.py ++++ b/repos/system_upgrade/common/libraries/mounting.py +@@ -422,3 +422,23 @@ class OverlayMount(MountingBase): + '-t', 'overlay', 'overlay2', + '-o', 'lowerdir={},upperdir={},workdir={}'.format(self.source, self._upper_dir, self._work_dir) + ] ++ ++ ++def mount_upgrade_iso_to_root_dir(root_dir, target_iso): ++ """ ++ Context manager mounting the target RHEL ISO into the system root residing at `root_dir`. ++ ++ If the `target_iso` is None no action is performed. ++ ++ :param root_dir: Path to a directory containing a system root. ++ :type root_dir: str ++ :param target_iso: Description of the ISO to be mounted. ++ :type target_iso: Optional[TargetOSInstallationImage] ++ :rtype: Optional[LoopMount] ++ """ ++ if not target_iso: ++ return NullMount(root_dir) ++ ++ mountpoint = target_iso.mountpoint[1:] # Strip the leading / from the absolute mountpoint ++ mountpoint_in_root_dir = os.path.join(root_dir, mountpoint) ++ return LoopMount(source=target_iso.path, target=mountpoint_in_root_dir) +diff --git a/repos/system_upgrade/common/models/upgradeiso.py b/repos/system_upgrade/common/models/upgradeiso.py +new file mode 100644 +index 00000000..da612bec +--- /dev/null ++++ b/repos/system_upgrade/common/models/upgradeiso.py +@@ -0,0 +1,14 @@ ++from leapp.models import CustomTargetRepository, fields, Model ++from leapp.topics import SystemFactsTopic ++ ++ ++class TargetOSInstallationImage(Model): ++ """ ++ An installation image of a target OS requested to be the source of target OS packages. ++ """ ++ topic = SystemFactsTopic ++ path = fields.String() ++ mountpoint = fields.String() ++ repositories = fields.List(fields.Model(CustomTargetRepository)) ++ rhel_version = fields.String(default='') ++ was_mounted_successfully = fields.Boolean(default=False) +-- +2.38.1 + diff --git a/SOURCES/0031-Add-prod-certs-for-8.8-9.2-Beta-GA.patch b/SOURCES/0031-Add-prod-certs-for-8.8-9.2-Beta-GA.patch new file mode 100644 index 0000000..ce98242 --- /dev/null +++ b/SOURCES/0031-Add-prod-certs-for-8.8-9.2-Beta-GA.patch @@ -0,0 +1,702 @@ +From 4dac9dc9d8f7c48626ea78d2d3bf128efdcb610d Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Wed, 16 Nov 2022 20:09:47 +0100 +Subject: [PATCH 31/32] Add prod certs for 8.8 & 9.2 (Beta + GA) + +Signed-off-by: Petr Stodulka +--- + .../common/files/prod-certs/8.8/279.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.8/362.pem | 36 +++++++++++++++++++ + .../common/files/prod-certs/8.8/363.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.8/419.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.8/433.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.8/479.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.8/486.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/8.8/72.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.2/279.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.2/362.pem | 36 +++++++++++++++++++ + .../common/files/prod-certs/9.2/363.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.2/419.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.2/433.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.2/479.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.2/486.pem | 35 ++++++++++++++++++ + .../common/files/prod-certs/9.2/72.pem | 35 ++++++++++++++++++ + 16 files changed, 562 insertions(+) + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/279.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/362.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/363.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/419.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/433.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/479.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/486.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/72.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/279.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/362.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/363.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/419.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/433.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/479.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/486.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/72.pem + +diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/279.pem b/repos/system_upgrade/common/files/prod-certs/8.8/279.pem +new file mode 100644 +index 00000000..8ca3cea1 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.8/279.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJTCCBA2gAwIBAgIJALDxRLt/tVMfMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTM1MFoXDTQyMDcw ++NzA2NTM1MFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtlYzg2NTc3 ++MC01NGViLTQ5NjEtYmJjMC1iZWVhOWI2ZGYyNjZdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrjCBqzAJBgNVHRMEAjAAMEMGDCsGAQQBkggJAYIXAQQzDDFSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuMBUGDCsG ++AQQBkggJAYIXAgQFDAM4LjgwGQYMKwYBBAGSCAkBghcDBAkMB3BwYzY0bGUwJwYM ++KwYBBAGSCAkBghcEBBcMFXJoZWwtOCxyaGVsLTgtcHBjNjRsZTANBgkqhkiG9w0B ++AQsFAAOCAgEARI585ue/LavAlcpIOCiwxmNv/djPG3XFU0bPoTym/gycwppJPh3Z ++2wfXQMumgmp6C07yui1ybbVIzftwBMU46z+VGqYyFAvFGXLdYndQ0EJpyZkov5F+ ++zd6XQlrzIrJu9G9k/bwWXld+7mIBgmWTPjv+TA4wlya9r6NSMW/xSxVm5Xm9SThy ++rvwN8ElK2+BjmyEVByNWphoagNQnKg1hkWsajNGlTKM1x+w1of941uDdBaXbyKVE ++JbYX5klal0DnqqYt8Fgj4vCDMJ635yhnwHgo5MginZZMQFZutHS8NjV2wMvYx1yY ++oLhPo6fA572tTRAEGbZ8HnlU9FrBwP938fvFDHy3hQuRUjrE5qtE+sWnwnmVMgNB ++oMUBy5hZN35VX/s0yQ25CXUqrVof1H2ZmLmRNX+c9Du/vZ2R4cjJpPu+9om4a848 ++Dt4IKfaScsVISErkVvOYH7RCB0o/y3vzahou8fA3lL3Mu4D4Vlyv59Xajsnuwbu/ ++5+3OYZ87h50NlbOLbV0668NztVzRppZ9aoZGVFyhcDdFc5y0uG2schWHSYOIJgJp ++8L3M9PL0FgdyEHAZD2Jyu8l+lhc+oIc41JXjW0GZhCZ9Uvw7x3apurdHk9IU5Ord ++9IugAJ1qN7veRstmb4rCVS8c/gxR24wCRGcDD3eIgvBwmgdFi09DLTA= ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/362.pem b/repos/system_upgrade/common/files/prod-certs/8.8/362.pem +new file mode 100644 +index 00000000..502e9d16 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.8/362.pem +@@ -0,0 +1,36 @@ ++-----BEGIN CERTIFICATE----- ++MIIGNDCCBBygAwIBAgIJALDxRLt/tVM1MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTYyNFoXDTQyMDcw ++NzA2NTYyNFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtiOTdkODkx ++NC1jNjJhLTRhNDAtOTFiZi1hZjdlNTM3MmVjOGVdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBvTCBujAJBgNVHRMEAjAAMEgGDCsGAQQBkggJAYJqAQQ4DDZSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuIEJldGEw ++GgYMKwYBBAGSCAkBgmoCBAoMCDguOCBCZXRhMBkGDCsGAQQBkggJAYJqAwQJDAdw ++cGM2NGxlMCwGDCsGAQQBkggJAYJqBAQcDBpyaGVsLTgscmhlbC04LWJldGEtcHBj ++NjRsZTANBgkqhkiG9w0BAQsFAAOCAgEAcQBzf0ndflW+503jCYyZS1enHucWjgIi ++EqtX4s1mkLuLXyiR7LcSNq56jyRjztyab2ydA77/C/iWaDzXEEXqlO+rrHBfw4u+ ++aJ3Pp0p8mYC+srWMO0wuVeRJeBkbDkXzoGmm/DkzMjGnTZB9/O0hkQ3+dnHLbf8I ++IC9lWen7Rcn+pSp2v8jz7zpZ3qrfBb2Q62KuPL6xwCfw+CVrl+PuChjz373i12CH ++9F7XG/RtVI1B+9qh4yLtTB13hPaAzIkGW3yTA+NOBoVexxZSka7ZfJFFXpmnI7Ot ++4NGi3L6aTGYGRNsHaDX1JsVd4vXC4LFca7YeKBW2aIGjt5ZSThE1tfIgXCgEm7uS ++UUB5lQiQ/9H67Vl8r4/LsUItdD9NmRdpTWT3NB8vbenqLL7QG91ra3uMR4vA9l0j ++Ei7v0WGWjpeiQbbRjzMkRgQKmeW/3M41ShUW4MNg9sFObw6eZqMJnH1BV9N/1e1k ++CpP6drmYE8qb8rVc66FIFS77JB6xmeLRK5Bq4yAtyA7PsM7r4RytgmVpVa4zoMEi ++JSboaBN9IMawvA7m4B/+fQZAy86pD168eOTBoP8G4RswFSLZCeIohFgorG0VEmEx ++CcJDxa9+ud/xFJfJQ9ILHJXYj8+SCO73LUQ1D0z9MPtKqDEk/7Rl+b6EziBzmDyO ++xYae2xpfO4E= ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/363.pem b/repos/system_upgrade/common/files/prod-certs/8.8/363.pem +new file mode 100644 +index 00000000..54e14706 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.8/363.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJjCCBA6gAwIBAgIJALDxRLt/tVM0MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTYxOVoXDTQyMDcw ++NzA2NTYxOVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs4NDk1OTc3 ++Yi0yZDU1LTQwZDItOWZjOC0yOTI5ZjJlZWZiNDRdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrzCBrDAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYJrAQQqDChSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NCBCZXRhMBoGDCsGAQQBkggJAYJr ++AgQKDAg4LjggQmV0YTAZBgwrBgEEAZIICQGCawMECQwHYWFyY2g2NDAsBgwrBgEE ++AZIICQGCawQEHAwacmhlbC04LHJoZWwtOC1iZXRhLWFhcmNoNjQwDQYJKoZIhvcN ++AQELBQADggIBAMEjuJ3qX1Ggyt5axDU3Ub+YJy+JJoBPk/nxpoDWBmZLmGAhW5pa ++sjP6xL/1CLcdWe4bFDbZDdtbXEPStZ0WraNmO0nQgUJFFx7RJ1hd5CUzCi8j3uGh ++M9+YDrr4MbQJSO0Wc55m23E6V9Lug6cA/rCzBWzwxD1W1K7q26CAiWT5l0qBZJmI ++VozYzqTk941GYipukb7vbScDFFafoNMyysEYQckRKRhhIZrr0z3p9ZdFgKFVvy4b ++rYX4/W5MdsAetlzTBrsfxazSOYw/97mnPxDCIjEue2V1A1z5D5HlHotQcbq4OXff ++3aHVbhsYbLbGUhULo/HfBxA1tFSJ9QpsEDu+yvP0032non7xEDB4IvypZ0ay2qK7 ++ArrSFGAyUIVrdIopspPxRikPfc+DcmPflO9vePRTT95tK0O6iObFM9azNmphp2e9 ++9Bzz1A2CjctjA7z4MIP6lPVGbWhD53qRbJs3bkMjqDDCUdE+vEnCuLdronlMlzQ1 ++KVGvRgnKNrAI9ORY24bz/AsGTseZp9jN4IKKnj0ZSq+SjZih/eMP1lNFHjQda/9/ ++gUoeAz3oAd1KQe011R81rS/HnL4QTRqkQiMeEahrx8q0xFwgk3wsk8voFGTBGyEO ++qnVIkzgrzXSQvM3neGlnBVkLzYS2okgFtJzglqAvUUqqfj34J3d91TWF ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/419.pem b/repos/system_upgrade/common/files/prod-certs/8.8/419.pem +new file mode 100644 +index 00000000..fd9944a9 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.8/419.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFzCCA/+gAwIBAgIJALDxRLt/tVMeMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTM0NloXDTQyMDcw ++NzA2NTM0NlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtkODg3ZTU0 ++NC0wMDBkLTQ2MTYtODk3Zi1kYmIzMDg1MzM4ODVdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBoDCBnTAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYMjAQQlDCNSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NDAVBgwrBgEEAZIICQGDIwIEBQwD ++OC44MBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCcGDCsGAQQBkggJAYMjBAQX ++DBVyaGVsLTgscmhlbC04LWFhcmNoNjQwDQYJKoZIhvcNAQELBQADggIBAFoEXLlm ++Vxi4qkcugC++o4LrGD8l1pGWL6J7JQ7cFpiCIMtmh0EXx8Tc4511u9SqzIR6uaSI ++D23jUfndGTGsfqYW/jclr5ayoN8IKL7Km18Wc9sb2DErZ98wDDlkIq1s9Wl5TthE ++Eq1Ae81rCnK2R85IUQa7IIB26aSnSwV3DNd1nYPLewzgN8rpF21wKqcN6HowIzbg ++U06sdKCuBA/fnnk/3SInambZAl/eqtHMgmQjgNHzt+qFhno0JqhllevXYn7Gx3Pu ++qJ9UMCTLZM4OEjnNfA0f1VX1CUzf1Fz5ukvChxX4cx2pKNl8q6w+R+2A3fcSkvv2 ++BHMDI00F0M22AEeZQE2ECG4/s8G2dRu2Dzp1kmBH26pSs9FTB3fTPXW2kyXPpOT/ ++jv2x1jFsza0GXoMJ7t7JEV5Mx9wcC3pguxEnJeCBqejoHTcG1xuWxFhlXmkNuiyD ++/Try5lCEmOvQYyE4FrJGezkpWBab5m2426hByTnpuHYvDsqAPDjUY0HoFUtxwqwA ++kVxUQzf3GxXu5FoFq36BxiWG7e0d4OJzwMK5DI00r/rs2tUlLCfNozDdbN5rBMlR ++1RIrGctY4LDfgr8sXXEK+54nAP11me16/Z4odkQbkv+WZ9z5i4573wei88kTg49X ++Dn64lKrB2B5dKq7vjemcDO3qFp0RAyc2PGUc ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/433.pem b/repos/system_upgrade/common/files/prod-certs/8.8/433.pem +new file mode 100644 +index 00000000..1c6772ca +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.8/433.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGKTCCBBGgAwIBAgIJALDxRLt/tVM2MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTYyOVoXDTQyMDcw ++NzA2NTYyOVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs1YjllMDEy ++Yy1jM2ZkLTQ0MTgtYWY0OC01Y2FkNWE4YTBjMjBdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBsjCBrzAJBgNVHRMEAjAAMEEGDCsGAQQBkggJAYMxAQQxDC9SZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIElCTSB6IFN5c3RlbXMgQmV0YTAaBgwrBgEE ++AZIICQGDMQIECgwIOC44IEJldGEwFwYMKwYBBAGSCAkBgzEDBAcMBXMzOTB4MCoG ++DCsGAQQBkggJAYMxBAQaDBhyaGVsLTgscmhlbC04LWJldGEtczM5MHgwDQYJKoZI ++hvcNAQELBQADggIBAEcUjx4IcWFemLJqpxFJm7nP9j/4ZqTjEa9Q7oDHNOOWM1NG ++HL9wJe/5Y/TCUGJvf4JiIUPNnfkaXXZDKqR7mbpLyb83BSAhgCBekdXvb/n+5QKI ++AAYyliEPtWkAIh0aP/nLYDEZ9aJoKIoDs9tp7uAQ/1fGPqN5lIvr7CO7HjIo7xrm ++5S4C3b+DlXp3GB74kb89r1XM3/1cmFmVz8js5KGg7JOVBUqxKQsjF7y8OGgGONiy ++xfkDFIvX+vyNli6xiXpsRH+CkSRckioTOsV8WawA0Ae89QNTVdN7xNXSugrIXSRd ++fyULDx9v+jihJuEyzMYbpvj3fmenrpcbckACsCHWGtRlvdAgYcF0TrFYsYthd2Gc ++wpR/XLn2SRu0Hx5ZbfqqhrJo765wYRPfTMVLilCPiw71d7DP0m6hrNzxX/Sp8K4e ++w/RxKaC5p/aV27dGSe83udnAXA2IgjfaJz6adnD36YfWUYIRVEg/tX2nlpDROz7Y ++saVj5Lq6wzFdt6mIVIQ6A4lM1zldHNyDv69gVDOlOgtklO94z41eJkPu5MbDG2fG ++xlVRgjiAsERNvHEXfnVb0iz/b2ymmM7HIVDowlIVhyJBkNKUW1invXOvf+AGZzQf ++LS4Db1q+P7HJZnrQf1EzgDKjTm8Kdv2CqKXpBnhDsXUXZZPbNl4txG4yIGHI ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/479.pem b/repos/system_upgrade/common/files/prod-certs/8.8/479.pem +new file mode 100644 +index 00000000..2ecca847 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.8/479.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFTCCA/2gAwIBAgIJALDxRLt/tVMhMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTQwMFoXDTQyMDcw ++NzA2NTQwMFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFswOWI2ZGRm ++MC03ODFkLTRjMjctYjZkZi0xMWQ2MmE5YmJkMDFdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBnjCBmzAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD ++OC44MBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM ++FHJoZWwtOCxyaGVsLTgteDg2XzY0MA0GCSqGSIb3DQEBCwUAA4ICAQBhvgRzUrOb ++VRVPq2cG/Sto2KzpuIjauYhvuYLCNItw3cOqKaUy5ThOeYkLpddyzPjjlb/jQZi2 ++dUybQjF3lrRpfgMmvRuapvhXWsCQuwq63JjqUaRNuPRrjxlwUqX2ibQSw0ZpPhlj ++vw3usTbLb04zd+RLb9e897tVMxWEWcyfyakMAI2/zV4UXhCZiFoaIA1EQqIyZIhK ++ukCnMYt9m/1KwK9yNYd6yzwYxqDe1aK4Z7J57G0FBpr57JXbZrE1KHpWQawusnFB ++t+2gGTxVOyDIrMuszV93GrrzPTyn+BasVS6UMwpUPQDOFJB9y7AKNSFkhZPUZRPW ++pmJUB4+Z5KGS+Of+g0Sp1huMnCvmEre1mP3pJTBhXmut1X1r/JJI041e46qnE7KO ++wHOz/cimduPgp2Sthc0OY3jZyZU1ibrFld9QFW1vVz7jO0j28T+JInzq+ji4NHdm ++0rGnAxp6S3L6HQBqiliO62ehyG3PnK2UvQyAz3sTnT7qL6qeOvvBSQvJqyQeF33y ++a85oEvAX3air6KuIVJTlXsS4E5EyTKYaX/5BqmrfzZ94ImcnO+5OF0SMOirCG3ik ++uWRGS9+I+0p+I7G9FjDduy8Cm1MYwEC8yB2/CFGEKgsMjXEyMkXMX4hzndnwE1G7 ++edrVZJxTtwuyDtMvE6jeBziapQXSDNv/2A== ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/486.pem b/repos/system_upgrade/common/files/prod-certs/8.8/486.pem +new file mode 100644 +index 00000000..c5108d61 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.8/486.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJDCCBAygAwIBAgIJALDxRLt/tVM3MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTYzM1oXDTQyMDcw ++NzA2NTYzM1owRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs3ZmU5MDgy ++Mi00NzFiLTRmNDctOGZmNC1jYzVkMGE0MjFmZjJdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrTCBqjAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYNmAQQqDChSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NCBCZXRhMBoGDCsGAQQBkggJAYNm ++AgQKDAg4LjggQmV0YTAYBgwrBgEEAZIICQGDZgMECAwGeDg2XzY0MCsGDCsGAQQB ++kggJAYNmBAQbDBlyaGVsLTgscmhlbC04LWJldGEteDg2XzY0MA0GCSqGSIb3DQEB ++CwUAA4ICAQChnxZRwBX1DK/dONKHIsXkbpKdP4xzTF79tt6o6oueR313wGEeC+uS ++SRdbK8HiNC+J8hpgUz3g2RMmoxE7lObm2gkpEtOh7b6dOTOSL+LrmUhm8Ly5Ts4X ++ExY4I5pctcTXx8PaODIPQjpHIrFSqKYtxT9y0z43StUSmM310sg45H+qSM1ilepe ++WTIcDjLldUPNiaHDvu8wqE77khPnoVaP9dZUO7dNkhPkCR0ECN4Q1YrJhUZro9M0 ++/pQ5700ev5Sw48Iu8iBW/h6wjpuD8cEFA4eYxRE0T8nVSvPILqK1mt8arGFP8Vch ++d6VIyv503eRwVbq9LQE8WOpC+c53ZmJYe/L5OlJU6oRlTK1ePEKZUaLsPfwHnVXC ++2e7IynDmkG2D2PE2J3br8bIVSmxCoxCp7mH2nwKJGE4EVquTnBfdwS3uCzfHX3p8 ++5LGNS460tdymPZF8y4TeL+BAKZYg+l6mLx79ob044OCxsQQbcLY8v50XsTiRpGqH ++ZPLLzastYROQWvI5OhzhXE88HN0CLKCTNPlUeFmFwOw/FYWKjQtwcceuNMmMjeAe ++IZ5MrMyPf0x+MSmlIaPONn5uHmeMp7yvazdgTAkvIsBwq2cuqqFk7xfnqk0iX3zd ++kE4mKzWMJ6Fa3C+yOroNEIJz+AAiD3mgPln7CleKtXRKrvVkyxKa0g== ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/72.pem b/repos/system_upgrade/common/files/prod-certs/8.8/72.pem +new file mode 100644 +index 00000000..703d0ad7 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/8.8/72.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFjCCA/6gAwIBAgIJALDxRLt/tVMgMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTM1NVoXDTQyMDcw ++NzA2NTM1NVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs4YWFkYmY2 ++OS0xYTA1LTRjOGYtYTc5MS04YWRlOGZiNThkMzRdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBnzCBnDAJBgNVHRMEAjAAMDsGCysGAQQBkggJAUgBBCwMKlJlZCBIYXQg ++RW50ZXJwcmlzZSBMaW51eCBmb3IgSUJNIHogU3lzdGVtczAUBgsrBgEEAZIICQFI ++AgQFDAM4LjgwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJAYLKwYBBAGSCAkBSAQE ++FQwTcmhlbC04LHJoZWwtOC1zMzkweDANBgkqhkiG9w0BAQsFAAOCAgEAbNQpBfvJ ++GequSRt4hkr4qSqM3TOsVkr6/DpM2CVHsIF6irb5sJaHjwNomw0C6ecE76j9Rm2f ++dK/TCo6vPdSvAcATwyfXBiPvRc+bT4oETBf7FqqMRwPRf35ftBL/4J1JVb/d2rFF ++hO/cu4sLTItSwlnvSuOqMDqmCpa4OfMPdTj16v7iJEkN1gMEIbi7uQdZiusO7En5 ++s/w4Dreok+Q98jAKrHHuCoIKAfxMKB+1YPDN6FYfVqMbngnX8X+G4ysED5OWa47b ++qLMh1+VDKBbNmDAYx7PMEDjG3Hb4S6g+Uc5d6MxPccXwKoJTbA6vSuTTVvPL5ex5 ++s1NPW50W39oPyV9818qHSmFt4RN+3dxXquBNPePKMugXU/77XKo4zeYE+zGucEph ++HaYbmfDNWp74ZG4qf6wTi91NlkkNiaihLbD17ez3AkWH9qXP37RzJ289eIcu42i5 ++uDc82NKakJc4hR5h92Psc7J602gcOl2d23syFrvpMmPqVSjLYMir3ImpwIe7Pn3i ++hgywwGB1QPEnoSc3dPk8FmmFST/ULaU/Ktlc0PwxpirbLO3OTQR3/y4zqxSATWMJ ++Qs4L0ouTwzVJ633+mu+4xIO3wzvtNXHI5Q1mw78D3Xzx5B3Qu7QOkPiNQOKkmKcg ++rzKkOicYZ2gIk0hWdcb7gCJMS1h+8x6FPnE= ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/279.pem b/repos/system_upgrade/common/files/prod-certs/9.2/279.pem +new file mode 100644 +index 00000000..8bd078f3 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.2/279.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJTCCBA2gAwIBAgIJALDxRLt/tU8JMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDA0N1oXDTQyMDcx ++NDEyNDA0N1owRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs3ZTA5MmI3 ++My1hYmYzLTQ5N2QtYWI4Yi03MDg1NWE0OTVjMGNdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrjCBqzAJBgNVHRMEAjAAMEMGDCsGAQQBkggJAYIXAQQzDDFSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuMBUGDCsG ++AQQBkggJAYIXAgQFDAM5LjIwGQYMKwYBBAGSCAkBghcDBAkMB3BwYzY0bGUwJwYM ++KwYBBAGSCAkBghcEBBcMFXJoZWwtOSxyaGVsLTktcHBjNjRsZTANBgkqhkiG9w0B ++AQsFAAOCAgEAAQNrWf/OVdfqx1Ln9b+8EI812sNt+e3TbIT9Qs/jFQ0FeQGZcYsA ++yBkB2uE9D6eBdTfteSk9LGazf7FYsvCCgv+V938qm1cfM+Y6UoUY6kE965w0DLgJ ++Cre/yRP8k3VzBTl5luLt9QX2x3jY/jVGdBKm1L3fJdfgSQQLEikJK+z/Eweiwmfq ++1lB/G9kIDNof3Jmm+TEBI9ToMg1zZBbmAudZGKp/jyDTo0Hnnfbr+TaPAYR8LD8A ++lQNWs2WqKakTLdqm3zKqKBTm0L35KEmBLNK6Gu+43CjBjXd5IGctumUJ7Bklgxm2 ++JqFT14jERJrE/YLTmu2JcMz/VzbleRQ5jtl/RmKEnUD3GgyaMujtVu2TOMxB0i8v ++Ovi7Okdf3/VA83T9noW9EYbYFdq+o00oyAxFqQPASYRLVPsyX86OUe5tXo+s1w3D ++fG7sPRP7fvAjWLL+u+BT9V9GppxF1OHbdBitKY/7KocbejkEpTAHVF2y4SJ96aDg ++BXIsf7J78hpyAYdEhbL79djygH5iZloGapJzKHVSQ55Smaj6uIj5RkEAZTjdPmIE ++PGqv74eMswYI6K/B2eHwZmuFaTtgrHfAtgl4jKEnc3qaaaDRpaXAjM25FiZavcC4 ++1pr59D/wDv+kRzRK9Qy3iuyDsboeYnU30qPdrry5SCx4qsi80VxSRMM= ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/362.pem b/repos/system_upgrade/common/files/prod-certs/9.2/362.pem +new file mode 100644 +index 00000000..e783c625 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.2/362.pem +@@ -0,0 +1,36 @@ ++-----BEGIN CERTIFICATE----- ++MIIGNDCCBBygAwIBAgIJALDxRLt/tU8fMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDQ0OVoXDTQyMDcx ++NDEyNDQ0OVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs0Y2YzNmI4 ++OC0xM2QyLTQyZWYtYWM2NS1iYWQ1ZTc0ODc2ZWFdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBvTCBujAJBgNVHRMEAjAAMEgGDCsGAQQBkggJAYJqAQQ4DDZSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuIEJldGEw ++GgYMKwYBBAGSCAkBgmoCBAoMCDkuMiBCZXRhMBkGDCsGAQQBkggJAYJqAwQJDAdw ++cGM2NGxlMCwGDCsGAQQBkggJAYJqBAQcDBpyaGVsLTkscmhlbC05LWJldGEtcHBj ++NjRsZTANBgkqhkiG9w0BAQsFAAOCAgEArjsodDEcCbXin1wyhdjpSQhZEmgtO9hX ++myaAAdOaWWrOKV6rSLEL2EhmeT/zCOPdmoErKHQrcdKutr6z9Bw06K1qiFwnfd/5 ++SJJtkNBNJFtpTGDZHDG6GSbRg7hA9YbrqSoX6c5UYDX6VcUv9gNXlTIxyIT86kCV ++i4QcS9hH7HvTTtfCnO7W2j47w3sGqt/mLYQWSa2ZzMzbGpBty1tLO5lux9+HVH9z ++aRiiKCHrGXBbo6PiHjcl/Ikxc3rJRLWwI3q5tegC+MjyC2tmQdc1hhXKwZj51EMt ++B+s4bLYv3WmVlcaheN6//aHz+cO6xw6OBVgUt62xBG4XprT7tbTVY1bS7+pQZm0C ++y3eUZxkfofb5k7mJqGxebNGuXZWS1yJuaPc4AGyYvnqskKE6bsJbET71zS2qZnSU ++MqYjVJ0LdoSFgNsgebbG63GovYFJYB/4cFGk2l+21D5bOXTb4CbJmEgBsVzoRXuH ++/YeJSZ++h2Y78hjxFMXeztM5TaN2d/FPm41jN9fDeCwN0XZAhVLtvrizobEj/rZF ++fF3om6ETcg7cRn7l00zsQGZeAjMDYXjQprcj074ER2Oz+6/nGuOlgBXgn76jm/2E ++oomPas/YcyxOrG1V4oZAzyedOCuU+51iJK3qJXMYG/a4X8TXv5sKu/DpfLpIbaze ++oRQ+8ay5+ys= ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/363.pem b/repos/system_upgrade/common/files/prod-certs/9.2/363.pem +new file mode 100644 +index 00000000..2afb74db +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.2/363.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJjCCBA6gAwIBAgIJALDxRLt/tU8eMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDQ0NVoXDTQyMDcx ++NDEyNDQ0NVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtmYjE2MTNh ++OS04YjcyLTRiOTUtOGE0Yy0zNmNiZTVmMjg2MGNdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrzCBrDAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYJrAQQqDChSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NCBCZXRhMBoGDCsGAQQBkggJAYJr ++AgQKDAg5LjIgQmV0YTAZBgwrBgEEAZIICQGCawMECQwHYWFyY2g2NDAsBgwrBgEE ++AZIICQGCawQEHAwacmhlbC05LHJoZWwtOS1iZXRhLWFhcmNoNjQwDQYJKoZIhvcN ++AQELBQADggIBAK9GawETqhJTkT0VUEQt9Kn4s92TRaEMB4/X5pWDOG4BBQu5T3LV ++9xKelt6eVXPLvjytY3EgCZxm5xc+1zE/Gf9yypgH0vtNFqOr+/U9fn6YOfAwvDqo ++2ozNAmA11m5TKi57IGhQJGTaxJdKdOk3NEuwMcD1TfQqDtqMF27OnWdO501URJJW ++e52b0NraoeF6OicDKLgxc31fv457CcwT3k/GyAgmR31PXWkoySiB+Np/xf0uJQvf ++2iI1V4iqfcygMqniJsjEi2IMcLXBxs9DdFRPDMeVkmO3JKXCFjV+sHZB9LbsRh1o ++LTnAnEvfWx1nWUc3t9lwS54HlSKfOyPt/c+tPiXCHa19p+Z8gqk7KyztTMB1JeIE ++0HdjFfwino66rcEshfBEe3mq3ohY4Yq79PACKmbVVqYanBiRAvoR7j7cZROvEmGJ ++pq9qUZ91w4OwDx5G/IIUZVafGkVAiLACK3ACala4CQZmB/UKSihwnPiWXj7sdnYz ++CjEyk/z9q2zaFvB/H3fQdol0Vy66eQ+DPRO+eMnppCvG6SI5nah0ZJSnfmR+26Mc ++IeR2KzRoN1kwVMzMh3qOpSaneDOQTQONzzzmeOqVQohRbz9cfYZor99l8/LLXce6 ++sH9LlaFP3aHoB5cdGyirTsB8Z65x/1y/4UrqdwdfO0o+DZH8kkhJ9roH ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/419.pem b/repos/system_upgrade/common/files/prod-certs/9.2/419.pem +new file mode 100644 +index 00000000..f35743dc +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.2/419.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFzCCA/+gAwIBAgIJALDxRLt/tU8IMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDA0MloXDTQyMDcx ++NDEyNDA0MlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFthYzI5ZTA3 ++Ni1mY2ViLTRhMTEtYjM3Yi03M2YxOGFiOTAzMmRdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBoDCBnTAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYMjAQQlDCNSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NDAVBgwrBgEEAZIICQGDIwIEBQwD ++OS4yMBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCcGDCsGAQQBkggJAYMjBAQX ++DBVyaGVsLTkscmhlbC05LWFhcmNoNjQwDQYJKoZIhvcNAQELBQADggIBAGxyb6Sk ++QPbMUsdNVwMo5lL7yR/O8JsKfMgwnXgp4szymjgCRdYKAmk/TeceuHnM+1YxxyN2 ++n11Oy67Vlcchpy5Vo9m1GjSk3oQ0biyJgSgMEoHdWPCwFYDTABMK5U/4Df7wBw/q ++4TvnaX5EhYO4nQo7Pc0A4eFOvyeKv6lTw0Rv5WNHFCMZSQLdPSpGLHZYMF0lyl/p ++yAQHpSkDFaB1mMvQLu9r7FbeRm2M8eyaRp1Ok4Ypxr2yXoBUQm3YPCpBBIwnqyD5 ++trnpYkjncxe9q2DSRpYgRLEmu+2Qm5WbrJ0zZKYcs/jZbaH5mrWvNCLy5u3h442V ++vHEX+ITDyuB0507ORxOpyt+k2+JenEcYNg7aHn/fUnsWjutGfEY4aDIVOnZxAf31 ++DLDJXPH4/jjO9dd/4fKykgLP8OUq5x+VXAtufpyDUyYVqXnIXwfUPN0NSl8gtUKJ ++ruHJ7gNyYqdopMquuOWb/Mew2DnwXFA9b3goYBbdaCzkt7k9Zdafzz6Mu1NnxUkf ++tMyJOmPBCZSDHRilTA/dA+8Lvj+2H6q7aEFzLv1saAoktxB/fggpBJm3jRs4dy3T ++xbcWnF++VANF6LQ+5bI8dxX6/FC5/zjJd1oEoiIS7dcFUZ0uf6x5aBuzjB+c2G0C ++MnR4x3OKYQl6cy3pFJkQNgLoAHXVRsNOmVe6 ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/433.pem b/repos/system_upgrade/common/files/prod-certs/9.2/433.pem +new file mode 100644 +index 00000000..8af44fae +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.2/433.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGKTCCBBGgAwIBAgIJALDxRLt/tU8gMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDQ1NFoXDTQyMDcx ++NDEyNDQ1NFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs1Y2E3YWM5 ++Ny0yMmZhLTRmZDUtODU3My04NTc1YjAxOWQ5N2RdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBsjCBrzAJBgNVHRMEAjAAMEEGDCsGAQQBkggJAYMxAQQxDC9SZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIElCTSB6IFN5c3RlbXMgQmV0YTAaBgwrBgEE ++AZIICQGDMQIECgwIOS4yIEJldGEwFwYMKwYBBAGSCAkBgzEDBAcMBXMzOTB4MCoG ++DCsGAQQBkggJAYMxBAQaDBhyaGVsLTkscmhlbC05LWJldGEtczM5MHgwDQYJKoZI ++hvcNAQELBQADggIBAM/RY5sRACnyRmPKq0fGBuApNJU/m8q116Ls6FSpgZiz5xa5 ++qUaWW2UHn/oFdXd7A3kaLL/9VbrFVfuC/wiz+te0EqHy2NPwlGgKmbVjFZn4PcoG ++YzTopv5bwr90WONkLt7jDbhls8ZbGgPY6qUDA2TbtvHPDNPIM9ukoin9BrurksUS ++XJ9UsV3jHV9yye/u6nM5FZmc9E0IagoS/asd0B3Y3egkbCn5bcfyYvV2Y8cn5/gg ++SucFU1KIwxLOs+J61RfaFh5O/22ZJtPG/7zMYXPk/Luas0YZUEiVFjc4BWQRmM94 ++dF142BpwOX9L5LBMtMhuB0sWpov7wlQamFiP2ZtsVLQgoqFKW3MXHZNy3f1FQM10 ++ei9lglw7qrhoeKj7UtedL4zJREtr4fhG3TzLhDqa8GvIEr+JAPtg2eRslO6uu67e ++RdE2AIYY6HWKQ5FcEfkCdW/hFFeVr0MjvBgQCYJlO8fmHxgOAQSKjjAzyRVAcjTk ++x+8v69ucZ3uMZb6oFUZH+p67XuduCm3sQCFk+Ilscr/8E/MNB4x0bPCIXLK6T3aQ ++9JKBxofBKtTSzyxEFEXqYLYJyQrAKXVpOgOrAMmeLHwA3IoikVG1x6/GwVuYTBUA ++B0lW/aO8mL0caQyebnE4fpYef5GzrtvOt2rGB54N/3AipD5dOW/AeYP/Wcj0 ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/479.pem b/repos/system_upgrade/common/files/prod-certs/9.2/479.pem +new file mode 100644 +index 00000000..7ed95967 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.2/479.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFTCCA/2gAwIBAgIJALDxRLt/tU8LMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDA1NloXDTQyMDcx ++NDEyNDA1NlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs0ZmU2ODU0 ++NC0yYjYwLTRiOGYtODdhYS02MzkxNWJkNGMyMjhdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBnjCBmzAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD ++OS4yMBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM ++FHJoZWwtOSxyaGVsLTkteDg2XzY0MA0GCSqGSIb3DQEBCwUAA4ICAQA0Sgnj5BjL ++2p4U7R/TOMhkP/7Tm4AkdmMvhkUb7c0tZhY3jJaJJt2U9IBTd8sN5Z/mb3Zr03dQ ++8gOb5mpfMGVrwoMjgDhZniRJ6/0yPKrgiRbGijHS6mXkU4dkzh6N/HyBjpQUuOaK ++5isXArEx7kv3k0Hun2DPdw8oBhXgH7x0TL3K3Yz+VXiX6Tcn4tlMTTBuR8NngP57 ++V9xmtLncR8rSdNr8j7cxAoXGaSPlE4K0cTGz87gAja6702CVk8ueB8bU68S47ZEK ++xLDcj1iWiVjYiZSFO7gWFavrlitEE+yW8c6oLVVXKfA8TxrJ1VuSTqU+fOojx5sM ++qtNqeMPLzz80M6dNrfuOJ+FHuwXu6Ytj8u/u24ds12TU7NCV9YLyfB2NDhueALtr ++/6OKlANU4DdxdL3947KGnnQZLpEpDpvsgOUBFGOivNIbHt0QXpV9tnMwsWx6tQ82 ++exnin3PJBkR2rg5/xv9ZXNb4WdYA3FwLsyej9gM7S4rFgMZzr7n2S5Dd8v9kRYHl ++JGUdY3LsY+SfxyYNalJirt3JxeIuLg0QZIXQP0BwBX92zZb+Zw4MxI1AcJvxsGkf ++7vGqTnIlPPER+IdK6SNeF3yJ4FQb6U1WMAyw0yqFPm4s7asaV/aULZu6+p13NlKZ ++r331U/otUJX8S2irN9kUt/oKdV/MVlgsFg== ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/486.pem b/repos/system_upgrade/common/files/prod-certs/9.2/486.pem +new file mode 100644 +index 00000000..c786ea82 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.2/486.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGJDCCBAygAwIBAgIJALDxRLt/tU8hMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDQ1OFoXDTQyMDcx ++NDEyNDQ1OFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsyMzg4MDQx ++Yy1iYWMxLTRmZGEtYWJjZS0zNWNkMGY5MzQxMDRdMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBrTCBqjAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYNmAQQqDChSZWQgSGF0 ++IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NCBCZXRhMBoGDCsGAQQBkggJAYNm ++AgQKDAg5LjIgQmV0YTAYBgwrBgEEAZIICQGDZgMECAwGeDg2XzY0MCsGDCsGAQQB ++kggJAYNmBAQbDBlyaGVsLTkscmhlbC05LWJldGEteDg2XzY0MA0GCSqGSIb3DQEB ++CwUAA4ICAQAHqIuoFbUAfhRFzLGeuTaJVidWk7nbmwyGKOHBDHannogHXSxJM5nt ++Ct5vFqG7uC0UE0JgUPz/q2o6iFhro3slfvWHA1sW83XN+yiaTtDIQl8Y7O7n4wJ1 ++NXH7mRC/L+58P1/HJ3gEaBdBfKiHte6J8FPonuRkfJrREiPgo+B9zNf0BEjl6xqr ++7SgfJZMO257Lkg3/Tl4amZ8M/cm/P/Z+kprfvUDsJzBQJ1z7qhriUuXFJfS799mG +++UV/wO0ZtdhGaHAXR28/MmtearogcM9rhp9DfdqmKdhktIcoHBuDXLUxnwUhX+W3 ++AJTNf7YwyYUKEHzhPLJH8v0JH8N/Cfd2PQHrQ1zni0D3BXTygHrbDEWZDm+3jSOF ++joyEIFHlWIb7eF67a7x/7iiS2op07E0Ka3h3SYHy/l+WvqPg8O28Zz3U6o1dCtBT ++odDtz9FVcGJ1MhMZ3F71XvM+TNEASJW1aK0bRoJMUXZ1krtHWUCsZuea3X5JAOey ++CycnOcUkvu8tzIOmgaqPmeolG/tKdlEY90Sc8XLw/KWsW0tfqqU9weppoZnCqPyp ++8YQiUEumjpGOtZUJRvootlBN9CQH8ilCOl1c4CsGdcmnXwnC0Z8gYzM+HhcqYenD ++Y+O3lNd3WsLoQrGfj2dMYWnKFOLKJovaYpOXiQOW6ghpM5bWdqVIuQ== ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/72.pem b/repos/system_upgrade/common/files/prod-certs/9.2/72.pem +new file mode 100644 +index 00000000..dabf8506 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/9.2/72.pem +@@ -0,0 +1,35 @@ ++-----BEGIN CERTIFICATE----- ++MIIGFjCCA/6gAwIBAgIJALDxRLt/tU8KMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD ++VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI ++YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk ++IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ ++ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDA1MVoXDTQyMDcx ++NDEyNDA1MVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs1YWUwNTdk ++ZC1kMWI3LTQ4NzEtYTA5MS0wYzY4MzcxMTkyZDldMIICIjANBgkqhkiG9w0BAQEF ++AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk ++sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x ++8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB ++RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I ++5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa ++xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo ++QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI ++yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl ++1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v ++5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ ++ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C ++AwEAAaOBnzCBnDAJBgNVHRMEAjAAMDsGCysGAQQBkggJAUgBBCwMKlJlZCBIYXQg ++RW50ZXJwcmlzZSBMaW51eCBmb3IgSUJNIHogU3lzdGVtczAUBgsrBgEEAZIICQFI ++AgQFDAM5LjIwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJAYLKwYBBAGSCAkBSAQE ++FQwTcmhlbC05LHJoZWwtOS1zMzkweDANBgkqhkiG9w0BAQsFAAOCAgEApFHsXGnC ++mGFM6yMkJYDGxYGDdsOY0xl0IMT6m2bvMRlbcykLhOL/CxwjZsS/mGPeHG4Q44+e ++pq+xMh3013klRN9iZoKFHSBTuXHDxzjjEPYR414O7FehNB82f3GlkLv6z57WeAxw ++wAqPvFcsIACzVEDOvSWQzn5aDEJURHT2caax/Psm+NT5nBneueySIOe5FDZmpgDJ ++7xqnUCaniM8RN4YlNQLm8V5wM9akiIhp/60Pq4bqSvlN23vOQ/QOTUtGyGlBtsGs ++LVhR0ssaTKlHbA+1xntZkEjNI229PcFFYeWXw5Fn/18l/ulfGCmbOMuRfDpC15Wl ++dLGETkpUVcflhJOloYcaPi+6RSXEMqyMSgLfN0k1IDJdV2Gh0Ok+HUYlxgPZ07+Q ++OW2jky9+tC2kLDh424J1sZUB+M/ONGJGHwXBHsIqMcbhVzDpGpHkQoMt6jDWw+li ++mHmwmSqKGxH/uhnVepSH6iJi4pF16YhrteW4wjtmrFFp7RsvxggqfHL8IgZSZ/Es ++pvTqSygWCU6hHoHdQdIrVph1VYSpvNyaEsopj+4F8oHSzC+sXQ+4iJ++CpCFised ++pG34sx+vFi/kcRnYnd8z20dbSVeH2j2+WSaYiV53mxUdA/Hp9XEn2u7p8WWIcv79 ++21f+YSbmvDuP6xg5D/l9lg1q6FljH6NcpBE= ++-----END CERTIFICATE----- +-- +2.38.1 + diff --git a/SOURCES/0032-Introduce-new-upgrade-paths-8.8-9.2.patch b/SOURCES/0032-Introduce-new-upgrade-paths-8.8-9.2.patch new file mode 100644 index 0000000..18e1c52 --- /dev/null +++ b/SOURCES/0032-Introduce-new-upgrade-paths-8.8-9.2.patch @@ -0,0 +1,52 @@ +From 7aab954d2ed9fcdd67ceb4c6a783fafbd6021c8a Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Wed, 16 Nov 2022 20:19:34 +0100 +Subject: [PATCH 32/37] Introduce new upgrade paths 8.8/9.2 + +The new enabled upgrade paths are: + RHEL 7.9 -> 8.8, 8.6 (default: 8.8) + RHEL 8.8 -> 9.2 + +Keeping some previous upgrade paths still opened, but expecting +dropping them before the release. +--- + repos/system_upgrade/common/files/upgrade_paths.json | 7 ++++--- + repos/system_upgrade/common/libraries/config/version.py | 2 +- + 2 files changed, 5 insertions(+), 4 deletions(-) + +diff --git a/repos/system_upgrade/common/files/upgrade_paths.json b/repos/system_upgrade/common/files/upgrade_paths.json +index 11d52423..c48d916f 100644 +--- a/repos/system_upgrade/common/files/upgrade_paths.json ++++ b/repos/system_upgrade/common/files/upgrade_paths.json +@@ -1,11 +1,12 @@ + { + "default": { + "7.6": ["8.4", "8.6"], +- "7.9": ["8.4", "8.6"], ++ "7.9": ["8.6", "8.8"], + "8.6": ["9.0"], + "8.7": ["9.0"], +- "7": ["8.4", "8.6"], +- "8": ["9.0"] ++ "8.8": ["9.2"], ++ "7": ["8.6", "8.8"], ++ "8": ["9.2"] + }, + "saphana": { + "7.9": ["8.2", "8.6"], +diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py +index e148932a..7104bdc5 100644 +--- a/repos/system_upgrade/common/libraries/config/version.py ++++ b/repos/system_upgrade/common/libraries/config/version.py +@@ -14,7 +14,7 @@ OP_MAP = { + _SUPPORTED_VERSIONS = { + # Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x + '7': {'rhel': ['7.9'], 'rhel-alt': ['7.6'], 'rhel-saphana': ['7.9']}, +- '8': {'rhel': ['8.6', '8.7'], 'rhel-saphana': ['8.6']}, ++ '8': {'rhel': ['8.6', '8.7', '8.8'], 'rhel-saphana': ['8.6']}, + } + + +-- +2.38.1 + diff --git a/SOURCES/0033-testutils-Implement-get_common_tool_path-method.patch b/SOURCES/0033-testutils-Implement-get_common_tool_path-method.patch new file mode 100644 index 0000000..43e8feb --- /dev/null +++ b/SOURCES/0033-testutils-Implement-get_common_tool_path-method.patch @@ -0,0 +1,46 @@ +From d6ddc8e6250bf3c07633a84b81e8f4b66c23c0e5 Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Fri, 25 Nov 2022 17:11:26 +0100 +Subject: [PATCH 33/37] testutils: Implement get_common_tool_path method + +--- + repos/system_upgrade/common/libraries/testutils.py | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/repos/system_upgrade/common/libraries/testutils.py b/repos/system_upgrade/common/libraries/testutils.py +index fc20aa3b..c538af1a 100644 +--- a/repos/system_upgrade/common/libraries/testutils.py ++++ b/repos/system_upgrade/common/libraries/testutils.py +@@ -75,7 +75,9 @@ class CurrentActorMocked(object): # pylint:disable=R0904 + release = namedtuple('OS_release', ['release_id', 'version_id'])(release_id, src_ver) + + self._common_folder = '../../files' ++ self._common_tools_folder = '../../tools' + self._actor_folder = 'files' ++ self._actor_tools_folder = 'tools' + self.configuration = namedtuple( + 'configuration', ['architecture', 'kernel', 'leapp_env_vars', 'os_release', 'version', 'flavour'] + )(arch, kernel, envarsList, release, version, flavour) +@@ -87,6 +89,9 @@ class CurrentActorMocked(object): # pylint:disable=R0904 + def get_common_folder_path(self, folder): + return os.path.join(self._common_folder, folder) + ++ def get_common_tool_path(self, name): ++ return os.path.join(self._common_tools_folder, name) ++ + def consume(self, model): + return iter(filter( # pylint:disable=W0110,W1639 + lambda msg: isinstance(msg, model), self._msgs +@@ -149,9 +154,6 @@ class CurrentActorMocked(object): # pylint:disable=R0904 + def get_tool_path(self, name): + raise NotImplementedError + +- def get_common_tool_path(self, name): +- raise NotImplementedError +- + def get_actor_tool_path(self, name): + raise NotImplementedError + +-- +2.38.1 + diff --git a/SOURCES/0034-targetuserspacecreator-improve-copy-of-etc-pki-rpm-g.patch b/SOURCES/0034-targetuserspacecreator-improve-copy-of-etc-pki-rpm-g.patch new file mode 100644 index 0000000..b0deead --- /dev/null +++ b/SOURCES/0034-targetuserspacecreator-improve-copy-of-etc-pki-rpm-g.patch @@ -0,0 +1,91 @@ +From f1c00a3823751d3fccaba3c98be86eba2b16930c Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Sat, 26 Nov 2022 12:27:46 +0100 +Subject: [PATCH 34/37] targetuserspacecreator: improve copy of /etc/pki + (rpm-gpg) + +The original solution copied /etc/pki from the host into the +target userspace container if the upgrade has been performed with +RHSM, which causes several negative impacts: + +a) certificates are missing inside the container when upgrading + without RHSM (still issue) + - Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2040706 +b) the target OS certificates are replaced by the original OS + certificates when upgrading with RHSM (partially fixed) + +This commit partially fixes the case b), so we preserve target +certificates inside the container only from the /etc/pki/rpm-gpg +directory when upgrading with RHSM. If files or directories with +the same name exists inside, prefered are those from the target OS. + +For the full fix of this case. The full fix should preserve +all certificates owned by packages inside the container, and only +"new files" from the host should be applied. This is also prerequisite +to be able to fix the case a). + +To be able to fix the case a) we would need to make this behaviour +unconditional (not dependent on the use of RHSM). Which most likely +should resolve the bug 2040706. Which needs the full fix of the case +b) first, as described above. The unconditional copy of /etc/pki +currently breaks upgrades on systems using RHUI (at least on +Azure for IPU 8 -> 9, other clouds could be affected also). +So postponing the sollution to a followup PR. +--- + .../libraries/userspacegen.py | 32 +++++++++++++++++-- + 1 file changed, 30 insertions(+), 2 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index 5a6a80f2..0415f0fe 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -235,6 +235,33 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None): + return files_owned_by_rpms + + ++def _copy_certificates(context, target_userspace): ++ """ ++ Copy the needed cetificates into the container, but preserve original ones ++ ++ Some certificates are already installed in the container and those are ++ default certificates for the target OS. We know we should preserve at ++ least certificates located at rpm-gpg directory. So preserve these for ++ now at least. ++ """ ++ target_pki = os.path.join(target_userspace, 'etc', 'pki') ++ backup_pki = os.path.join(target_userspace, 'etc', 'pki.backup') ++ ++ # FIXME(pstodulk): search for all files owned by RPMs inside the container ++ # before the mv, and all such files restore ++ # - this is requirement to not break IPU with RHUI when making the copy ++ # of certificates unconditional ++ run(['mv', target_pki, backup_pki]) ++ context.copytree_from('/etc/pki', target_pki) ++ ++ # TODO(pstodulk): restore the files owned by rpms instead of the code below ++ for fname in os.listdir(os.path.join(backup_pki, 'rpm-gpg')): ++ src_path = os.path.join(backup_pki, 'rpm-gpg', fname) ++ dst_path = os.path.join(target_pki, 'rpm-gpg', fname) ++ run(['rm', '-rf', dst_path]) ++ run(['cp', '-a', src_path, dst_path]) ++ ++ + def _prep_repository_access(context, target_userspace): + """ + Prepare repository access by copying all relevant certificates and configuration files to the userspace +@@ -243,9 +270,10 @@ def _prep_repository_access(context, target_userspace): + target_yum_repos_d = os.path.join(target_etc, 'yum.repos.d') + backup_yum_repos_d = os.path.join(target_etc, 'yum.repos.d.backup') + if not rhsm.skip_rhsm(): +- run(['rm', '-rf', os.path.join(target_etc, 'pki')]) ++ # TODO: make the _copy_certificates unconditional. keeping it conditional ++ # due to issues causing on RHUI ++ _copy_certificates(context, target_userspace) + run(['rm', '-rf', os.path.join(target_etc, 'rhsm')]) +- context.copytree_from('/etc/pki', os.path.join(target_etc, 'pki')) + context.copytree_from('/etc/rhsm', os.path.join(target_etc, 'rhsm')) + # NOTE: we cannot just remove the original target yum.repos.d dir + # as e.g. in case of RHUI a special RHUI repofiles are installed by a pkg +-- +2.38.1 + diff --git a/SOURCES/0035-DNFWorkaround-extend-the-model-by-script_args.patch b/SOURCES/0035-DNFWorkaround-extend-the-model-by-script_args.patch new file mode 100644 index 0000000..8fb757e --- /dev/null +++ b/SOURCES/0035-DNFWorkaround-extend-the-model-by-script_args.patch @@ -0,0 +1,75 @@ +From 9628970bf0d5a7db6553c57b55f4623c91330228 Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Thu, 24 Nov 2022 12:48:51 +0100 +Subject: [PATCH 35/37] DNFWorkaround: extend the model by script_args + +The original model provided a possibility to execute a script +that will handle problems before the DNF / RPM transaction, +in correct contexts (overlay, host system, ..) before any use +of the upgrade dnf plugin. + +But current solution provided only the script_path field, which +suggests it should contain only the path to the script. The executed +command (inside a context) looked like this: + bash -c script_path +However we have realized we need to be able to execute a script +with additional arguments. Regarding that, introducing +the script_args field. SO the final command looks like this: + bash -c 'script_path arg1 arg2..' +when script_args are specified. The default is set to an empty +list. +--- + .../common/libraries/dnfplugin.py | 9 ++++++++- + .../common/models/dnfworkaround.py | 18 ++++++++++++++++-- + 2 files changed, 24 insertions(+), 3 deletions(-) + +diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py +index 0a546637..0ef9ea9b 100644 +--- a/repos/system_upgrade/common/libraries/dnfplugin.py ++++ b/repos/system_upgrade/common/libraries/dnfplugin.py +@@ -241,7 +241,14 @@ def apply_workarounds(context=None): + for workaround in api.consume(DNFWorkaround): + try: + api.show_message('Applying transaction workaround - {}'.format(workaround.display_name)) +- context.call(['/bin/bash', '-c', workaround.script_path]) ++ if workaround.script_args: ++ cmd_str = '{script} {args}'.format( ++ script=workaround.script_path, ++ args=' '.join(workaround.script_args) ++ ) ++ else: ++ cmd_str = workaround.script_path ++ context.call(['/bin/bash', '-c', cmd_str]) + except (OSError, CalledProcessError) as e: + raise StopActorExecutionError( + message=('Failed to execute script to apply transaction workaround {display_name}.' +diff --git a/repos/system_upgrade/common/models/dnfworkaround.py b/repos/system_upgrade/common/models/dnfworkaround.py +index c921c5fc..4a813dcd 100644 +--- a/repos/system_upgrade/common/models/dnfworkaround.py ++++ b/repos/system_upgrade/common/models/dnfworkaround.py +@@ -15,6 +15,20 @@ class DNFWorkaround(Model): + topic = SystemInfoTopic + + script_path = fields.String() +- """ Absolute path to a bash script to execute """ ++ """ ++ Absolute path to a bash script to execute ++ """ ++ ++ script_args = fields.List(fields.String(), default=[]) ++ """ ++ Arguments with which the script should be executed ++ ++ In case that an argument contains a whitespace or an escapable character, ++ the argument must be already treated correctly. e.g. ++ `script_args = ['-i', 'my\\ string'] ++ """ ++ + display_name = fields.String() +- """ Name to display for this script when executed """ ++ """ ++ Name to display for this script when executed ++ """ +-- +2.38.1 + diff --git a/SOURCES/0036-Introduce-theimportrpmgpgkeys-tool-script.patch b/SOURCES/0036-Introduce-theimportrpmgpgkeys-tool-script.patch new file mode 100644 index 0000000..b3a55b0 --- /dev/null +++ b/SOURCES/0036-Introduce-theimportrpmgpgkeys-tool-script.patch @@ -0,0 +1,57 @@ +From 2277012bc6aab1f473eda8070b48d75487a41bb7 Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Thu, 24 Nov 2022 17:57:12 +0100 +Subject: [PATCH 36/37] Introduce theimportrpmgpgkeys tool script + +The script can be used to import gpg keys from a particular file +or files inside a directory. Expected to be executed like: + importrpmgpgkey +--- + .../common/tools/importrpmgpgkeys | 35 +++++++++++++++++++ + 1 file changed, 35 insertions(+) + create mode 100755 repos/system_upgrade/common/tools/importrpmgpgkeys + +diff --git a/repos/system_upgrade/common/tools/importrpmgpgkeys b/repos/system_upgrade/common/tools/importrpmgpgkeys +new file mode 100755 +index 00000000..79e5c580 +--- /dev/null ++++ b/repos/system_upgrade/common/tools/importrpmgpgkeys +@@ -0,0 +1,35 @@ ++#!/usr/bin/bash -ef ++ ++log_error() { ++ echo >&2 "Error: $1" ++} ++ ++log_info() { ++ echo >&2 "Info: $1" ++} ++ ++if [ "$#" -eq 0 ]; then ++ log_error "Missing the required path to the directory with trusted GPG keys." ++ exit 1 ++elif [ "$#" -ge 2 ]; then ++ log_error "Expected only one argument, received $#. Possibly unescaped whitespaces? '$*'" ++ exit 1 ++fi ++ ++if [ ! -e "$1" ]; then ++ log_error "The $1 directory does not exist." ++ exit 1 ++fi ++ ++error_flag=0 ++IFS=$'\n' ++# shellcheck disable=SC2044 ++for key_file in $(find -L "$1" -type f); do ++ log_info "Importing GPG keys from: $key_file" ++ rpm --import "$key_file" || { ++ error_flag=2 ++ log_error "Unable to import GPG keys from: $key_file" ++ } ++done ++ ++exit $error_flag +-- +2.38.1 + diff --git a/SOURCES/0037-Enable-gpgcheck-during-IPU-add-nogpgcheck-CLI-option.patch b/SOURCES/0037-Enable-gpgcheck-during-IPU-add-nogpgcheck-CLI-option.patch new file mode 100644 index 0000000..58fd586 --- /dev/null +++ b/SOURCES/0037-Enable-gpgcheck-during-IPU-add-nogpgcheck-CLI-option.patch @@ -0,0 +1,1814 @@ +From 9ed71946b763e1b1e3049ebd55a0d61eba42015e Mon Sep 17 00:00:00 2001 +From: Jakub Jelen +Date: Wed, 15 Jun 2022 21:49:22 +0200 +Subject: [PATCH 37/37] Enable gpgcheck during IPU (+ add --nogpgcheck CLI + option) + +Previously the gpgcheck=0 has been enforced during the IPU as we have +not have any reasonable solution doing upgrade with allowed gpgcheck. +Previously such upgrades automatically imported any gpgkeys without +any possible check whether the actual keys are valid or not, which +could lead to the automatical import of compromised/spurious gpg keys +and user would not know about that. So using the original solution, +user was asked for the import of new keys when installing additional +content from new repositories (if keys have been different from those +used in the original system). + +To do the upgrade in the original way (without gpgcheck), execute +leapp with the `--nogpgcheck` option, or specify `LEAPP_NOGPGCHECK=1` +(envar). In such a case, all actions described below are skipped. + +The current solution enables the GPG check by default but also could +require additional actions from user to be able to upgrade. The goal +is to ensure that no additional GPG keys are imported by DNF +automatically during any action (partially resolved, read bellow). +To be able to achive this, we are importing gpg keys automatically +from a *trusted directory* before any dnf transaction is executed, so: + a) into the rpmdb of the target userspace container, before the + actual installation of the target userspace container + b) into overlayed rpmdb when calculating/testing the upgrade + transaction + c) into the system rpmdb right before the execution of the DNF + upgrade transaction + +The case a) is handled directly in the target_userspace_creator actor. +The other cases are handled via DNFWorkaround msg, using the +`importrpmgpgkeys` tool script. + +The *trusted directory* is in this case located under +`files/rpm-gpg/`, where the directory name is major release of the +target system in case of production releases, in other cases it has +the *beta* suffix. So e.g.: + files/rpm-gpg/8 + files/rpm-gpg/8beta +That's because production and beta repositories have different gpg +keys and it is not wanted to mix production and beta keys. Beta +repositories are used only when the target production type is beta: + LEAPP_DEVEL_TARGET_PRODUCT_TYPE=beta + +Introducing the missinggpgkeysinhibitor actor that checks gpg keys +based on `gpgkey` specified in repofiles per each used target +repository which does not explicitly specify `gpgcheck=0`. +Such a key is compared with the currently installed gpg keys in the +host rpmdb and keys inside the *trusted directory*. If the key +is not stored in any of those places, the upgrade is inhibited with +the corresponding report. User can resolve the problem installing the +missing gpg keys or storing them to the trusted directory. + +Currently supported protocols for the gpg keys are + file:/// + http:// + https:// +If a key cannot be obtained (including use of an unsupported protocol, +e.g. ftp://) the actor prompt a log, but does not generate a report +about that (so the upgrade can continue, which could later lead into +a failure during the download of packages - one of TODOs). + +This is not the final commit for this feature and additional work +is expected before the new release is introduced. Regarding that, +see the code for new TODO / FIXME notes that are put into the code. + +Summary of some TODOs planned to address in followup PR: + - add checks that DNF does not import additional GPG keys during + any action + - report GPG keys that could not be checked, informing user about + possible consequences - the report should not inhibit the upgrade + - possibly introduce fallback for getting file:///... gpg keys + as currently they are obtained from the target userspace container + but if not present, the host system should be possibly checked: + - Note that if the file has been created manually (custom repo file) + most likely the gpgkey will be stored only on the host system + - and in such a case the file would need to be copied from the + host system into the container. + +Signed-off-by: Jakub Jelen +--- + commands/preupgrade/__init__.py | 1 + + commands/rerun/__init__.py | 1 + + commands/upgrade/__init__.py | 1 + + commands/upgrade/util.py | 3 + + .../actors/missinggpgkeysinhibitor/actor.py | 40 ++ + .../libraries/missinggpgkey.py | 368 ++++++++++++ + .../tests/component_test_missinggpgkey.py | 522 ++++++++++++++++++ + .../tests/unit_test_missinggpgkey.py | 209 +++++++ + .../libraries/userspacegen.py | 62 ++- + .../rpm-gpg/8/RPM-GPG-KEY-redhat-release | 89 +++ + .../rpm-gpg/8beta/RPM-GPG-KEY-redhat-beta | 29 + + .../rpm-gpg/9/RPM-GPG-KEY-redhat-release | 66 +++ + .../rpm-gpg/9beta/RPM-GPG-KEY-redhat-beta | 29 + + .../common/libraries/dnfplugin.py | 13 +- + .../common/libraries/tests/test_dnfplugin.py | 21 +- + .../common/models/targetrepositories.py | 34 ++ + 16 files changed, 1455 insertions(+), 33 deletions(-) + create mode 100644 repos/system_upgrade/common/actors/missinggpgkeysinhibitor/actor.py + create mode 100644 repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py + create mode 100644 repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/component_test_missinggpgkey.py + create mode 100644 repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/unit_test_missinggpgkey.py + create mode 100644 repos/system_upgrade/common/files/rpm-gpg/8/RPM-GPG-KEY-redhat-release + create mode 100644 repos/system_upgrade/common/files/rpm-gpg/8beta/RPM-GPG-KEY-redhat-beta + create mode 100644 repos/system_upgrade/common/files/rpm-gpg/9/RPM-GPG-KEY-redhat-release + create mode 100644 repos/system_upgrade/common/files/rpm-gpg/9beta/RPM-GPG-KEY-redhat-beta + +diff --git a/commands/preupgrade/__init__.py b/commands/preupgrade/__init__.py +index d612fbb1..a1577a63 100644 +--- a/commands/preupgrade/__init__.py ++++ b/commands/preupgrade/__init__.py +@@ -30,6 +30,7 @@ from leapp.utils.output import beautify_actor_exception, report_errors, report_i + command_utils.get_upgrade_flavour())) + @command_opt('report-schema', help='Specify report schema version for leapp-report.json', + choices=['1.0.0', '1.1.0', '1.2.0'], default=get_config().get('report', 'schema')) ++@command_opt('nogpgcheck', is_flag=True, help='Disable RPM GPG checks. Same as yum/dnf --nogpgcheck option.') + @breadcrumbs.produces_breadcrumbs + def preupgrade(args, breadcrumbs): + util.disable_database_sync() +diff --git a/commands/rerun/__init__.py b/commands/rerun/__init__.py +index 57149571..a06dd266 100644 +--- a/commands/rerun/__init__.py ++++ b/commands/rerun/__init__.py +@@ -68,6 +68,7 @@ def rerun(args): + verbose=args.verbose, + reboot=False, + no_rhsm=False, ++ nogpgcheck=False, + channel=None, + report_schema='1.1.0', + whitelist_experimental=[], +diff --git a/commands/upgrade/__init__.py b/commands/upgrade/__init__.py +index 005538ed..8b257fa9 100644 +--- a/commands/upgrade/__init__.py ++++ b/commands/upgrade/__init__.py +@@ -36,6 +36,7 @@ from leapp.utils.output import beautify_actor_exception, report_errors, report_i + command_utils.get_upgrade_flavour())) + @command_opt('report-schema', help='Specify report schema version for leapp-report.json', + choices=['1.0.0', '1.1.0', '1.2.0'], default=get_config().get('report', 'schema')) ++@command_opt('nogpgcheck', is_flag=True, help='Disable RPM GPG checks. Same as yum/dnf --nogpgcheck option.') + @breadcrumbs.produces_breadcrumbs + def upgrade(args, breadcrumbs): + skip_phases_until = None +diff --git a/commands/upgrade/util.py b/commands/upgrade/util.py +index aa433786..6055c65b 100644 +--- a/commands/upgrade/util.py ++++ b/commands/upgrade/util.py +@@ -206,6 +206,9 @@ def prepare_configuration(args): + # Make sure we convert rel paths into abs ones while we know what CWD is + os.environ['LEAPP_TARGET_ISO'] = os.path.abspath(target_iso_path) + ++ if args.nogpgcheck: ++ os.environ['LEAPP_NOGPGCHECK'] = '1' ++ + # Check upgrade path and fail early if it's unsupported + target_version, flavor = command_utils.vet_upgrade_path(args) + os.environ['LEAPP_UPGRADE_PATH_TARGET_RELEASE'] = target_version +diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/actor.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/actor.py +new file mode 100644 +index 00000000..6f836a5b +--- /dev/null ++++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/actor.py +@@ -0,0 +1,40 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import missinggpgkey ++from leapp.models import ( ++ DNFWorkaround, ++ InstalledRPM, ++ TargetUserSpaceInfo, ++ TMPTargetRepositoriesFacts, ++ UsedTargetRepositories ++) ++from leapp.reporting import Report ++from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag ++ ++ ++class MissingGpgKeysInhibitor(Actor): ++ """ ++ Check if all used target repositories have signing gpg keys ++ imported in the existing RPM DB or they are planned to be imported ++ ++ Right now, we can not check the package signatures yet, but we can do some ++ best effort estimation based on the gpgkey option in the repofile ++ and content of the existing rpm db. ++ ++ Also register the DNFWorkaround to import trusted gpg keys - files provided ++ inside the GPG_CERTS_FOLDER directory. ++ ++ In case that leapp is executed with --nogpgcheck, all actions are skipped. ++ """ ++ ++ name = 'missing_gpg_keys_inhibitor' ++ consumes = ( ++ InstalledRPM, ++ TMPTargetRepositoriesFacts, ++ TargetUserSpaceInfo, ++ UsedTargetRepositories, ++ ) ++ produces = (DNFWorkaround, Report,) ++ tags = (IPUWorkflowTag, TargetTransactionChecksPhaseTag,) ++ ++ def process(self): ++ missinggpgkey.process() +diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py +new file mode 100644 +index 00000000..b8b28df2 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py +@@ -0,0 +1,368 @@ ++import json ++import os ++import re ++import shutil ++import tempfile ++ ++from six.moves import urllib ++ ++from leapp import reporting ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.common import config ++from leapp.libraries.common.config.version import get_source_major_version, get_target_major_version ++from leapp.libraries.stdlib import api, run ++from leapp.models import ( ++ DNFWorkaround, ++ InstalledRPM, ++ TargetUserSpaceInfo, ++ TMPTargetRepositoriesFacts, ++ UsedTargetRepositories ++) ++from leapp.utils.deprecation import suppress_deprecation ++ ++GPG_CERTS_FOLDER = 'rpm-gpg' ++ ++ ++def _gpg_show_keys(key_path): ++ """ ++ Show keys in given file in version-agnostic manner ++ ++ This runs gpg --show-keys (EL8) or gpg --with-fingerprints (EL7) ++ to verify the given file exists, is readable and contains valid ++ OpenPGP key data, which is printed in parsable format (--with-colons). ++ """ ++ try: ++ cmd = ['gpg2'] ++ # RHEL7 gnupg requires different switches to get the same output ++ if get_source_major_version() == '7': ++ cmd.append('--with-fingerprint') ++ else: ++ cmd.append('--show-keys') ++ cmd += ['--with-colons', key_path] ++ # TODO: discussed, most likely the checked=False will be dropped ++ # and error will be handled in other functions ++ return run(cmd, split=True, checked=False) ++ except OSError as err: ++ # NOTE: this is hypothetic; gnupg2 has to be installed on RHEL 7+ ++ error = 'Failed to read fingerprint from GPG key {}: {}'.format(key_path, str(err)) ++ api.current_logger().error(error) ++ return {} ++ ++ ++def _parse_fp_from_gpg(output): ++ """ ++ Parse the output of gpg --show-keys --with-colons. ++ ++ Return list of 8 characters fingerprints per each gpgkey for the given ++ output from stdlib.run() or None if some error occurred. Either the ++ command return non-zero exit code, the file does not exists, its not ++ readable or does not contain any openpgp data. ++ """ ++ if not output or output['exit_code']: ++ return [] ++ ++ # we are interested in the lines of the output starting with "pub:" ++ # the colons are used for separating the fields in output like this ++ # pub:-:4096:1:999F7CBF38AB71F4:1612983048:::-:::escESC::::::23::0: ++ # ^--------------^ this is the fingerprint we need ++ # ^------^ but RPM version is just the last 8 chars lowercase ++ # Also multiple gpg keys can be stored in the file, so go through all "pub" ++ # lines ++ gpg_fps = [] ++ for line in output['stdout']: ++ if not line or not line.startswith('pub:'): ++ continue ++ parts = line.split(':') ++ if len(parts) >= 4 and len(parts[4]) == 16: ++ gpg_fps.append(parts[4][8:].lower()) ++ else: ++ api.current_logger().warning( ++ 'Cannot parse the gpg2 output. Line: "{}"' ++ .format(line) ++ ) ++ ++ return gpg_fps ++ ++ ++def _read_gpg_fp_from_file(key_path): ++ """ ++ Returns the list of public key fingerprints from the given file ++ ++ Logs warning in case no OpenPGP data found in the given file or it is not ++ readable for some reason. ++ """ ++ res = _gpg_show_keys(key_path) ++ fp = _parse_fp_from_gpg(res) ++ if not fp: ++ error = 'Unable to read OpenPGP keys from {}: {}'.format(key_path, res['stderr']) ++ api.current_logger().error(error) ++ return fp ++ ++ ++def _get_path_to_gpg_certs(): ++ """ ++ Get path to the directory with trusted target gpg keys in leapp tree ++ """ ++ # XXX This is copy&paste from TargetUserspaceCreator actor. ++ # Potential changes need to happen in both places to keep them in sync. ++ target_major_version = get_target_major_version() ++ target_product_type = config.get_product_type('target') ++ certs_dir = target_major_version ++ # only beta is special in regards to the GPG signing keys ++ if target_product_type == 'beta': ++ certs_dir = '{}beta'.format(target_major_version) ++ return os.path.join(api.get_common_folder_path(GPG_CERTS_FOLDER), certs_dir) ++ ++ ++def _expand_vars(path): ++ """ ++ Expand variables like $releasever and $basearch to the target system version ++ """ ++ r = path.replace('$releasever', get_target_major_version()) ++ r = r.replace('$basearch', api.current_actor().configuration.architecture) ++ return r ++ ++ ++def _get_abs_file_path(target_userspace, file_url): ++ """ ++ Return the absolute path for file_url if starts with file:/// ++ ++ If the file_url starts with 'file:///', return its absolute path to ++ the target userspace container, as such a file is supposed to be located ++ on the target system. ++ ++ For all other cases, return the originally obtained value. ++ """ ++ # TODO(pstodulk): @Jakuje: are we sure the file will be inside the ++ # target userspace container? What if it's a file locally stored by user ++ # and the repository is defined like that as well? Possibly it's just ++ # a corner corner case. I guess it does not have a high prio tbh, but want ++ # to be sure. ++ if not isinstance(target_userspace, TargetUserSpaceInfo): ++ # not need to cover this by tests, it's seatbelt ++ raise ValueError('target_userspace must by TargetUserSpaceInfo object') ++ ++ prefix = 'file:///' ++ if not file_url.startswith(prefix): ++ return file_url ++ return os.path.join(target_userspace.path, file_url[len(prefix):]) ++ ++ ++def _pubkeys_from_rpms(installed_rpms): ++ """ ++ Return the list of fingerprints of GPG keys in RPM DB ++ ++ This function returns short 8 characters fingerprints of trusted GPG keys ++ "installed" in the source OS RPM database. These look like normal packages ++ named "gpg-pubkey" and the fingerprint is present in the version field. ++ """ ++ return [pkg.version for pkg in installed_rpms.items if pkg.name == 'gpg-pubkey'] ++ ++ ++def _get_pubkeys(installed_rpms): ++ """ ++ Get pubkeys from installed rpms and the trusted directory ++ """ ++ pubkeys = _pubkeys_from_rpms(installed_rpms) ++ certs_path = _get_path_to_gpg_certs() ++ for certname in os.listdir(certs_path): ++ key_file = os.path.join(certs_path, certname) ++ fps = _read_gpg_fp_from_file(key_file) ++ if fps: ++ pubkeys += fps ++ # TODO: what about else: ? ++ # The warning is now logged in _read_gpg_fp_from_file. We can raise ++ # the priority of the message or convert it to report though. ++ return pubkeys ++ ++ ++def _the_nogpgcheck_option_used(): ++ return config.get_env('LEAPP_NOGPGCHECK', False) == '1' ++ ++ ++def _consume_data(): ++ try: ++ used_target_repos = next(api.consume(UsedTargetRepositories)).repos ++ except StopIteration: ++ raise StopActorExecutionError( ++ 'Could not check for valid GPG keys', details={'details': 'No UsedTargetRepositories facts'} ++ ) ++ ++ try: ++ target_repos = next(api.consume(TMPTargetRepositoriesFacts)).repositories ++ except StopIteration: ++ raise StopActorExecutionError( ++ 'Could not check for valid GPG keys', details={'details': 'No TMPTargetRepositoriesFacts facts'} ++ ) ++ try: ++ installed_rpms = next(api.consume(InstalledRPM)) ++ except StopIteration: ++ raise StopActorExecutionError( ++ 'Could not check for valid GPG keys', details={'details': 'No InstalledRPM facts'} ++ ) ++ try: ++ target_userspace = next(api.consume(TargetUserSpaceInfo)) ++ except StopIteration: ++ raise StopActorExecutionError( ++ 'Could not check for valid GPG keys', details={'details': 'No TargetUserSpaceInfo facts'} ++ ) ++ ++ return used_target_repos, target_repos, installed_rpms, target_userspace ++ ++ ++def _get_repo_gpgkey_urls(repo): ++ """ ++ Return the list or repository gpgkeys that should be checked ++ ++ If the gpgcheck is disabled for the repo or gpgkey is not specified, ++ return an empty list. ++ ++ Returned gpgkeys are URLs with already expanded variables ++ (e.g. $releasever) as gpgkey can contain list of URLs separated by comma ++ or whitespaces. ++ """ ++ ++ repo_additional = json.loads(repo.additional_fields) ++ ++ # TODO does the case matter here? ++ if 'gpgcheck' in repo_additional and repo_additional['gpgcheck'] in ('0', 'False', 'no'): ++ # NOTE: https://dnf.readthedocs.io/en/latest/conf_ref.html#boolean-label ++ # nothing to do with repos with enforced gpgcheck=0 ++ return [] ++ ++ if 'gpgkey' not in repo_additional: ++ # This means rpm will bail out at some time if the key is not present ++ # but we will not know if the needed key is present or not before we will have ++ # the packages at least downloaded ++ # TODO(pstodulk): possibly we should return None if gpgcheck is disabled ++ # and empty list when gpgkey is missing? So we could evaluate that better ++ # outside. ++ api.current_logger().warning( ++ 'The gpgcheck for the {} repository is enabled' ++ ' but gpgkey is not specified. Cannot be checked.' ++ .format(repo.repoid) ++ ) ++ return [] ++ ++ return re.findall(r'[^,\s]+', _expand_vars(repo_additional['gpgkey'])) ++ ++ ++def _report_missing_keys(missing_keys): ++ # TODO(pstodulk): polish the report, use FMT_LIST_SEPARATOR ++ # the list of keys should be mentioned in the summary ++ summary = ( ++ "Some of the target repositories require GPG keys that are missing from the current" ++ " RPM DB. Leapp will not be able to verify packages from these repositories during the upgrade process." ++ ) ++ hint = ( ++ "Please, review the following list and import the GPG keys before " ++ "continuing the upgrade:\n * {}".format('\n * '.join(missing_keys)) ++ ) ++ reporting.create_report( ++ [ ++ reporting.Title("Missing GPG key from target system repository"), ++ reporting.Summary(summary), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([reporting.Groups.REPOSITORY, reporting.Groups.INHIBITOR]), ++ reporting.Remediation(hint=hint), ++ # TODO(pstodulk): @Jakuje: let's sync about it ++ # TODO update external documentation ? ++ # reporting.ExternalLink( ++ # title=( ++ # "Customizing your Red Hat Enterprise Linux " ++ # "in-place upgrade" ++ # ), ++ # url=( ++ # "https://access.redhat.com/articles/4977891/" ++ # "#repos-known-issues" ++ # ), ++ # ), ++ ] ++ ) ++ ++ ++def register_dnfworkaround(): ++ api.produce(DNFWorkaround( ++ display_name='import trusted gpg keys to RPM DB', ++ script_path=api.current_actor().get_common_tool_path('importrpmgpgkeys'), ++ script_args=[_get_path_to_gpg_certs()], ++ )) ++ ++ ++@suppress_deprecation(TMPTargetRepositoriesFacts) ++def process(): ++ """ ++ Process the repositories and find missing signing keys ++ ++ UsedTargetRepositories doesn't contain baseurl attribute. So gathering ++ them from model TMPTargetRepositoriesFacts. ++ """ ++ # when the user decided to ignore gpg signatures on the packages, we can ignore these checks altogether ++ if _the_nogpgcheck_option_used(): ++ api.current_logger().warning('The --nogpgcheck option is used: skipping all related checks.') ++ return ++ ++ used_target_repos, target_repos, installed_rpms, target_userspace = _consume_data() ++ ++ target_repo_id_to_repositories_facts_map = { ++ repo.repoid: repo ++ for repofile in target_repos ++ for repo in repofile.data ++ } ++ ++ # These are used only for getting the installed gpg-pubkey "packages" ++ pubkeys = _get_pubkeys(installed_rpms) ++ missing_keys = list() ++ processed_gpgkey_urls = set() ++ tmpdir = None ++ for repoid in used_target_repos: ++ if repoid.repoid not in target_repo_id_to_repositories_facts_map: ++ api.current_logger().warning('The target repository {} metadata not available'.format(repoid.repoid)) ++ continue ++ ++ repo = target_repo_id_to_repositories_facts_map[repoid.repoid] ++ for gpgkey_url in _get_repo_gpgkey_urls(repo): ++ if gpgkey_url in processed_gpgkey_urls: ++ continue ++ processed_gpgkey_urls.add(gpgkey_url) ++ ++ if gpgkey_url.startswith('file:///'): ++ key_file = _get_abs_file_path(target_userspace, gpgkey_url) ++ elif gpgkey_url.startswith('http://') or gpgkey_url.startswith('https://'): ++ # delay creating temporary directory until we need it ++ tmpdir = tempfile.mkdtemp() if tmpdir is None else tmpdir ++ # FIXME: what to do with dummy? it's fd, that should be closed also ++ dummy, tmp_file = tempfile.mkstemp(dir=tmpdir) ++ try: ++ urllib.request.urlretrieve(gpgkey_url, tmp_file) ++ key_file = tmp_file ++ except urllib.error.URLError as err: ++ # TODO(pstodulk): create report for the repoids which cannot be checked? ++ # (no inhibitor) ++ api.current_logger().warning( ++ 'Failed to download the gpgkey {}: {}'.format(gpgkey_url, str(err))) ++ continue ++ else: ++ # TODO: report? ++ api.current_logger().error( ++ 'Skipping unknown protocol for gpgkey {}'.format(gpgkey_url)) ++ continue ++ fps = _read_gpg_fp_from_file(key_file) ++ if not fps: ++ # TODO: for now. I think it should be treated better ++ api.current_logger().warning( ++ "Cannot get any gpg key from the file: {}".format(gpgkey_url) ++ ) ++ continue ++ for fp in fps: ++ if fp not in pubkeys and gpgkey_url not in missing_keys: ++ missing_keys.append(_get_abs_file_path(target_userspace, gpgkey_url)) ++ ++ if tmpdir: ++ # clean up temporary directory with downloaded gpg keys ++ shutil.rmtree(tmpdir) ++ ++ if missing_keys: ++ _report_missing_keys(missing_keys) ++ ++ register_dnfworkaround() +diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/component_test_missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/component_test_missinggpgkey.py +new file mode 100644 +index 00000000..5af5f026 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/component_test_missinggpgkey.py +@@ -0,0 +1,522 @@ ++import pytest ++from six.moves.urllib.error import URLError ++ ++from leapp import reporting ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.actor.missinggpgkey import _pubkeys_from_rpms, process ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, logger_mocked, produce_mocked ++from leapp.libraries.stdlib import api ++from leapp.models import ( ++ InstalledRPM, ++ Report, ++ RepositoriesFacts, ++ RepositoryData, ++ RepositoryFile, ++ RPM, ++ TargetUserSpaceInfo, ++ TMPTargetRepositoriesFacts, ++ UsedTargetRepositories, ++ UsedTargetRepository ++) ++from leapp.utils.deprecation import suppress_deprecation ++ ++# Note, that this is not a real component test as described in the documentation, ++# but basically unit test calling the "main" function process() to simulate the ++# whole process as I was initially advised not to use these component tests. ++ ++ ++def _get_test_installedrpm_no_my_key(): ++ return [ ++ RPM( ++ name='rpm', ++ version='4.16.1.3', ++ release='17.el9', ++ epoch='0', ++ packager='Red Hat, Inc. ', ++ arch='x86_64', ++ pgpsig='RSA/SHA256, Mon 08 Aug 2022 09:10:15 AM UTC, Key ID 199e2f91fd431d51', ++ repository='BaseOS', ++ ), ++ RPM( ++ name='gpg-pubkey', ++ version='fd431d51', ++ release='4ae0493b', ++ epoch='0', ++ packager='Red Hat, Inc. (release key 2) ', ++ arch='noarch', ++ pgpsig='' ++ ), ++ RPM( ++ name='gpg-pubkey', ++ version='5a6340b3', ++ release='6229229e', ++ epoch='0', ++ packager='Red Hat, Inc. (auxiliary key 3) ', ++ arch='noarch', ++ pgpsig='' ++ ), ++ ] ++ ++ ++def _get_test_installedrpm(): ++ return InstalledRPM( ++ items=[ ++ RPM( ++ name='gpg-pubkey', ++ version='3228467c', ++ release='613798eb', ++ epoch='0', ++ packager='edora (epel9) ', ++ arch='noarch', ++ pgpsig='' ++ ), ++ ] + _get_test_installedrpm_no_my_key(), ++ ) ++ ++ ++def _get_test_targuserspaceinfo(path='nopath'): ++ return TargetUserSpaceInfo( ++ path=path, ++ scratch='', ++ mounts='', ++ ) ++ ++ ++def _get_test_usedtargetrepositories_list(): ++ return [ ++ UsedTargetRepository( ++ repoid='BaseOS', ++ ), ++ UsedTargetRepository( ++ repoid='AppStream', ++ ), ++ UsedTargetRepository( ++ repoid='MyAnotherRepo', ++ ), ++ ] ++ ++ ++def _get_test_usedtargetrepositories(): ++ return UsedTargetRepositories( ++ repos=_get_test_usedtargetrepositories_list() ++ ) ++ ++ ++def _get_test_target_repofile(): ++ return RepositoryFile( ++ file='/etc/yum.repos.d/target_rhel.repo', ++ data=[ ++ RepositoryData( ++ repoid='BaseOS', ++ name="RHEL BaseOS repository", ++ baseurl="/whatever/", ++ enabled=True, ++ additional_fields='{"gpgkey":"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release"}' ++ ), ++ RepositoryData( ++ repoid='AppStream', ++ name="RHEL AppStream repository", ++ baseurl="/whatever/", ++ enabled=True, ++ additional_fields='{"gpgkey":"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release"}' ++ ), ++ ], ++ ) ++ ++ ++def _get_test_target_repofile_additional(): ++ return RepositoryFile( ++ file='/etc/yum.repos.d/my_target_rhel.repo', ++ data=[ ++ RepositoryData( ++ repoid='MyRepo', ++ name="My repository", ++ baseurl="/whatever/", ++ enabled=False, ++ ), ++ RepositoryData( ++ repoid='MyAnotherRepo', ++ name="My another repository", ++ baseurl="/whatever/", ++ enabled=True, ++ additional_fields='{"gpgkey":"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-my-release"}' ++ ), ++ ], ++ ) ++ ++ ++@suppress_deprecation(TMPTargetRepositoriesFacts) ++def _get_test_tmptargetrepositoriesfacts(): ++ return TMPTargetRepositoriesFacts( ++ repositories=[ ++ _get_test_target_repofile(), ++ _get_test_target_repofile_additional(), ++ ], ++ ) ++ ++ ++def test_perform_nogpgcheck(monkeypatch): ++ """ ++ Executes the "main" function with the --nogpgcheck commandline switch ++ ++ This test should skip any checks and just log a message that no checks were executed ++ """ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked( ++ envars={'LEAPP_NOGPGCHECK': '1'}, ++ msgs=[ ++ _get_test_installedrpm(), ++ _get_test_usedtargetrepositories(), ++ _get_test_tmptargetrepositoriesfacts(), ++ ], ++ )) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ ++ process() ++ ++ assert api.produce.called == 0 ++ assert len(api.current_logger.warnmsg) == 1 ++ assert '--nogpgcheck option is used' in api.current_logger.warnmsg[0] ++ ++ ++@pytest.mark.parametrize('msgs', [ ++ [], ++ [_get_test_installedrpm], ++ [_get_test_usedtargetrepositories], ++ [_get_test_tmptargetrepositoriesfacts], ++ # These are just incomplete lists of required facts ++ [_get_test_installedrpm(), _get_test_usedtargetrepositories()], ++ [_get_test_usedtargetrepositories(), _get_test_tmptargetrepositoriesfacts()], ++ [_get_test_installedrpm(), _get_test_tmptargetrepositoriesfacts()], ++]) ++def test_perform_missing_facts(monkeypatch, msgs): ++ """ ++ Executes the "main" function with missing required facts ++ ++ The missing facts (either RPM information, Target Repositories or their facts) cause ++ the StopActorExecutionError excepction. But this should be rare as the required facts ++ are clearly defined in the actor interface. ++ """ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs)) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ # TODO: the gpg call should be mocked ++ ++ with pytest.raises(StopActorExecutionError): ++ process() ++ # nothing produced ++ assert api.produce.called == 0 ++ # not skipped by --nogpgcheck ++ assert not api.current_logger.warnmsg ++ ++ ++@suppress_deprecation(TMPTargetRepositoriesFacts) ++def _get_test_tmptargetrepositoriesfacts_partial(): ++ return [ ++ _get_test_installedrpm(), ++ _get_test_usedtargetrepositories(), ++ TMPTargetRepositoriesFacts( ++ repositories=[ ++ _get_test_target_repofile(), ++ # missing MyAnotherRepo ++ ] ++ ) ++ ] ++ ++ ++def _gpg_show_keys_mocked(key_path): ++ """ ++ Get faked output from gpg reading keys. ++ ++ This is needed to get away from dependency on the filesystem ++ """ ++ if key_path == '/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release': ++ return { ++ 'stdout': [ ++ 'pub:-:4096:1:199E2F91FD431D51:1256212795:::-:::scSC::::::23::0:', ++ 'fpr:::::::::567E347AD0044ADE55BA8A5F199E2F91FD431D51:', ++ ('uid:-::::1256212795::DC1CAEC7997B3575101BB0FCAAC6191792660D8F::' ++ 'Red Hat, Inc. (release key 2) ::::::::::0:'), ++ 'pub:-:4096:1:5054E4A45A6340B3:1646863006:::-:::scSC::::::23::0:', ++ 'fpr:::::::::7E4624258C406535D56D6F135054E4A45A6340B3:', ++ ('uid:-::::1646863006::DA7F68E3872D6E7BDCE05225E7EB5F3ACDD9699F::' ++ 'Red Hat, Inc. (auxiliary key 3) ::::::::::0:'), ++ ], ++ 'stderr': (), ++ 'exit_code': 0, ++ } ++ if key_path == '/etc/pki/rpm-gpg/RPM-GPG-KEY-my-release': # actually epel9 key ++ return { ++ 'stdout': [ ++ 'pub:-:4096:1:8A3872BF3228467C:1631033579:::-:::escESC::::::23::0:', ++ 'fpr:::::::::FF8AD1344597106ECE813B918A3872BF3228467C:', ++ ('uid:-::::1631033579::3EED52B2BDE50880047DB883C87B0FCAE458D111::' ++ 'Fedora (epel9) ::::::::::0:'), ++ ], ++ 'stderr': (), ++ 'exit_code': 0, ++ } ++ ++ return { ++ 'stdout': [ ++ 'pub:-:4096:1:F55AD3FB5323552A:1628617948:::-:::escESC::::::23::0:', ++ 'fpr:::::::::ACB5EE4E831C74BB7C168D27F55AD3FB5323552A:', ++ ('uid:-::::1628617948::4830BB019772421B89ABD0BBE245B89C73BF053F::' ++ 'Fedora (37) ::::::::::0:'), ++ ], ++ 'stderr': (), ++ 'exit_code': 0, ++ } ++ ++ ++def _get_pubkeys_mocked(installed_rpms): ++ """ ++ This skips getting fps from files in container for simplification ++ """ ++ return _pubkeys_from_rpms(installed_rpms) ++ ++ ++def test_perform_missing_some_repo_facts(monkeypatch): ++ """ ++ Executes the "main" function with missing repositories facts ++ ++ This is misalignment in the provided facts UsedTargetRepositories and TMPTargetRepositoriesFacts, ++ where we miss some metadata that are required by the first message. ++ """ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked( ++ msgs=_get_test_tmptargetrepositoriesfacts_partial()) ++ ) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) ++ ++ with pytest.raises(StopActorExecutionError): ++ process() ++ assert api.produce.called == 0 ++ assert reporting.create_report.called == 0 ++ ++ ++@suppress_deprecation(TMPTargetRepositoriesFacts) ++def _get_test_tmptargetrepositoriesfacts_https_unused(): ++ return [ ++ _get_test_targuserspaceinfo(), ++ _get_test_installedrpm(), ++ _get_test_usedtargetrepositories(), ++ TMPTargetRepositoriesFacts( ++ repositories=[ ++ _get_test_target_repofile(), ++ _get_test_target_repofile_additional(), ++ RepositoryFile( ++ file='/etc/yum.repos.d/internet.repo', ++ data=[ ++ RepositoryData( ++ repoid='ExternalRepo', ++ name="External repository", ++ baseurl="/whatever/path", ++ enabled=True, ++ additional_fields='{"gpgkey":"https://example.com/rpm-gpg/key.gpg"}', ++ ), ++ ], ++ ) ++ ], ++ ), ++ ] ++ ++ ++def test_perform_https_gpgkey_unused(monkeypatch): ++ """ ++ Executes the "main" function with repositories providing keys over internet ++ ++ The external repository is not listed in UsedTargetRepositories so the repository ++ is not checked and we should not get any error here. ++ """ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked( ++ msgs=_get_test_tmptargetrepositoriesfacts_https_unused() ++ )) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ ++ process() ++ assert not api.current_logger.warnmsg ++ # This is the DNFWorkaround ++ assert api.produce.called == 1 ++ assert reporting.create_report.called == 1 ++ ++ ++@suppress_deprecation(TMPTargetRepositoriesFacts) ++def get_test_tmptargetrepositoriesfacts_https(): ++ return ( ++ _get_test_targuserspaceinfo(), ++ _get_test_installedrpm(), ++ UsedTargetRepositories( ++ repos=_get_test_usedtargetrepositories_list() + [ ++ UsedTargetRepository( ++ repoid='ExternalRepo', ++ ), ++ ] ++ ), ++ TMPTargetRepositoriesFacts( ++ repositories=[ ++ _get_test_target_repofile(), ++ _get_test_target_repofile_additional(), ++ RepositoryFile( ++ file='/etc/yum.repos.d/internet.repo', ++ data=[ ++ RepositoryData( ++ repoid='ExternalRepo', ++ name="External repository", ++ baseurl="/whatever/path", ++ enabled=True, ++ additional_fields='{"gpgkey":"https://example.com/rpm-gpg/key.gpg"}', ++ ), ++ ], ++ ) ++ ], ++ ), ++ ) ++ ++ ++@suppress_deprecation(TMPTargetRepositoriesFacts) ++def get_test_tmptargetrepositoriesfacts_ftp(): ++ return ( ++ _get_test_targuserspaceinfo(), ++ _get_test_installedrpm(), ++ UsedTargetRepositories( ++ repos=_get_test_usedtargetrepositories_list() + [ ++ UsedTargetRepository( ++ repoid='ExternalRepo', ++ ), ++ ] ++ ), ++ TMPTargetRepositoriesFacts( ++ repositories=[ ++ _get_test_target_repofile(), ++ _get_test_target_repofile_additional(), ++ RepositoryFile( ++ file='/etc/yum.repos.d/internet.repo', ++ data=[ ++ RepositoryData( ++ repoid='ExternalRepo', ++ name="External repository", ++ baseurl="/whatever/path", ++ enabled=True, ++ additional_fields='{"gpgkey":"ftp://example.com/rpm-gpg/key.gpg"}', ++ ), ++ ], ++ ) ++ ], ++ ), ++ ) ++ ++ ++def _urlretrive_mocked(url, filename=None, reporthook=None, data=None): ++ return filename ++ ++ ++def test_perform_https_gpgkey(monkeypatch): ++ """ ++ Executes the "main" function with repositories providing keys over internet ++ ++ This produces an report. ++ """ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked( ++ msgs=get_test_tmptargetrepositoriesfacts_https()) ++ ) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ monkeypatch.setattr('six.moves.urllib.request.urlretrieve', _urlretrive_mocked) ++ ++ process() ++ # This is the DNFWorkaround ++ assert api.produce.called == 1 ++ assert reporting.create_report.called == 1 ++ ++ ++def _urlretrive_mocked_urlerror(url, filename=None, reporthook=None, data=None): ++ raise URLError('error') ++ ++ ++def test_perform_https_gpgkey_urlerror(monkeypatch): ++ """ ++ Executes the "main" function with repositories providing keys over internet ++ ++ This results in warning message printed. Other than that, no report is still produced. ++ """ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked( ++ msgs=get_test_tmptargetrepositoriesfacts_https()) ++ ) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ monkeypatch.setattr('six.moves.urllib.request.urlretrieve', _urlretrive_mocked_urlerror) ++ ++ process() ++ assert len(api.current_logger.warnmsg) == 1 ++ assert 'Failed to download the gpgkey https://example.com/rpm-gpg/key.gpg:' in api.current_logger.warnmsg[0] ++ # This is the DNFWorkaround ++ assert api.produce.called == 1 ++ assert reporting.create_report.called == 1 ++ ++ ++def test_perform_ftp_gpgkey(monkeypatch): ++ """ ++ Executes the "main" function with repositories providing keys over internet ++ ++ This results in error message printed. Other than that, no report is still produced. ++ """ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked( ++ msgs=get_test_tmptargetrepositoriesfacts_ftp()) ++ ) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ ++ process() ++ assert len(api.current_logger.errmsg) == 1 ++ assert 'Skipping unknown protocol for gpgkey ftp://example.com/rpm-gpg/key.gpg' in api.current_logger.errmsg[0] ++ # This is the DNFWorkaround ++ assert api.produce.called == 1 ++ assert reporting.create_report.called == 1 ++ ++ ++@suppress_deprecation(TMPTargetRepositoriesFacts) ++def get_test_data_missing_key(): ++ return [ ++ _get_test_targuserspaceinfo(), ++ InstalledRPM(items=_get_test_installedrpm_no_my_key()), ++ _get_test_usedtargetrepositories(), ++ _get_test_tmptargetrepositoriesfacts(), ++ ] ++ ++ ++def test_perform_report(monkeypatch): ++ """ ++ Executes the "main" function with missing keys ++ ++ This should result in report outlining what key mentioned in target repositories is missing. ++ """ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked( ++ msgs=get_test_data_missing_key()) ++ ) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._gpg_show_keys', _gpg_show_keys_mocked) ++ monkeypatch.setattr('leapp.libraries.actor.missinggpgkey._get_pubkeys', _get_pubkeys_mocked) ++ ++ process() ++ assert not api.current_logger.warnmsg ++ # This is the DNFWorkaround ++ assert api.produce.called == 1 ++ assert reporting.create_report.called == 1 +diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/unit_test_missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/unit_test_missinggpgkey.py +new file mode 100644 +index 00000000..8a46f97b +--- /dev/null ++++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/tests/unit_test_missinggpgkey.py +@@ -0,0 +1,209 @@ ++import os ++import shutil ++import sys ++import tempfile ++ ++import distro ++import pytest ++ ++from leapp.libraries.actor.missinggpgkey import ( ++ _expand_vars, ++ _get_path_to_gpg_certs, ++ _get_pubkeys, ++ _get_repo_gpgkey_urls, ++ _gpg_show_keys, ++ _parse_fp_from_gpg, ++ _pubkeys_from_rpms ++) ++from leapp.libraries.common.testutils import CurrentActorMocked ++from leapp.libraries.stdlib import api ++from leapp.models import InstalledRPM, RepositoryData, RPM ++ ++ ++def is_rhel7(): ++ return int(distro.major_version()) < 8 ++ ++ ++def test_gpg_show_keys(current_actor_context, monkeypatch): ++ src = '7.9' if is_rhel7() else '8.6' ++ current_actor = CurrentActorMocked(src_ver=src) ++ monkeypatch.setattr(api, 'current_actor', current_actor) ++ ++ # python2 compatibility :/ ++ dirpath = tempfile.mkdtemp() ++ ++ # using GNUPGHOME env should avoid gnupg modifying the system ++ os.environ['GNUPGHOME'] = dirpath ++ ++ try: ++ # non-existing file ++ non_existent_path = os.path.join(dirpath, 'nonexistent') ++ res = _gpg_show_keys(non_existent_path) ++ if is_rhel7(): ++ err_msg = "gpg: can't open `{}'".format(non_existent_path) ++ else: ++ err_msg = "gpg: can't open '{}': No such file or directory\n".format(non_existent_path) ++ assert not res['stdout'] ++ assert err_msg in res['stderr'] ++ assert res['exit_code'] == 2 ++ ++ fp = _parse_fp_from_gpg(res) ++ assert fp == [] ++ ++ # no gpg data found ++ no_key_path = os.path.join(dirpath, "no_key") ++ with open(no_key_path, "w") as f: ++ f.write('test') ++ ++ res = _gpg_show_keys(no_key_path) ++ if is_rhel7(): ++ err_msg = ('gpg: no valid OpenPGP data found.\n' ++ 'gpg: processing message failed: Unknown system error\n') ++ else: ++ err_msg = 'gpg: no valid OpenPGP data found.\n' ++ assert not res['stdout'] ++ assert res['stderr'] == err_msg ++ assert res['exit_code'] == 2 ++ ++ fp = _parse_fp_from_gpg(res) ++ assert fp == [] ++ ++ # with some test data now -- rhel9 release key ++ # rhel9_key_path = os.path.join(api.get_common_folder_path('rpm-gpg'), '9') ++ cur_dir = os.path.dirname(os.path.abspath(__file__)) ++ rhel9_key_path = os.path.join(cur_dir, '..', '..', '..', 'files', 'rpm-gpg', '9', ++ 'RPM-GPG-KEY-redhat-release') ++ res = _gpg_show_keys(rhel9_key_path) ++ finally: ++ shutil.rmtree(dirpath) ++ ++ if is_rhel7(): ++ assert len(res['stdout']) == 4 ++ assert res['stdout'][0] == ('pub:-:4096:1:199E2F91FD431D51:1256212795:::-:' ++ 'Red Hat, Inc. (release key 2) :') ++ assert res['stdout'][1] == 'fpr:::::::::567E347AD0044ADE55BA8A5F199E2F91FD431D51:' ++ assert res['stdout'][2] == ('pub:-:4096:1:5054E4A45A6340B3:1646863006:::-:' ++ 'Red Hat, Inc. (auxiliary key 3) :') ++ assert res['stdout'][3] == 'fpr:::::::::7E4624258C406535D56D6F135054E4A45A6340B3:' ++ else: ++ assert len(res['stdout']) == 6 ++ assert res['stdout'][0] == 'pub:-:4096:1:199E2F91FD431D51:1256212795:::-:::scSC::::::23::0:' ++ assert res['stdout'][1] == 'fpr:::::::::567E347AD0044ADE55BA8A5F199E2F91FD431D51:' ++ assert res['stdout'][2] == ('uid:-::::1256212795::DC1CAEC7997B3575101BB0FCAAC6191792660D8F::' ++ 'Red Hat, Inc. (release key 2) ::::::::::0:') ++ assert res['stdout'][3] == 'pub:-:4096:1:5054E4A45A6340B3:1646863006:::-:::scSC::::::23::0:' ++ assert res['stdout'][4] == 'fpr:::::::::7E4624258C406535D56D6F135054E4A45A6340B3:' ++ assert res['stdout'][5] == ('uid:-::::1646863006::DA7F68E3872D6E7BDCE05225E7EB5F3ACDD9699F::' ++ 'Red Hat, Inc. (auxiliary key 3) ::::::::::0:') ++ ++ err = '{}/trustdb.gpg: trustdb created'.format(dirpath) ++ assert err in res['stderr'] ++ assert res['exit_code'] == 0 ++ ++ # now, parse the output too ++ fp = _parse_fp_from_gpg(res) ++ assert fp == ['fd431d51', '5a6340b3'] ++ ++ ++@pytest.mark.parametrize('res, exp', [ ++ ({'exit_code': 2, 'stdout': '', 'stderr': ''}, []), ++ ({'exit_code': 2, 'stdout': '', 'stderr': 'bash: gpg2: command not found...'}, []), ++ ({'exit_code': 0, 'stdout': 'Some other output', 'stderr': ''}, []), ++ ({'exit_code': 0, 'stdout': ['Some other output', 'other line'], 'stderr': ''}, []), ++ ({'exit_code': 0, 'stdout': ['pub:-:4096:1:199E2F91FD431D:'], 'stderr': ''}, []), ++ ({'exit_code': 0, 'stdout': ['pub:-:4096:1:5054E4A45A6340B3:1..'], 'stderr': ''}, ['5a6340b3']), ++]) ++def test_parse_fp_from_gpg(res, exp): ++ fp = _parse_fp_from_gpg(res) ++ assert fp == exp ++ ++ ++@pytest.mark.parametrize('target, product_type, exp', [ ++ ('8.6', 'beta', '../../files/rpm-gpg/8beta'), ++ ('8.8', 'htb', '../../files/rpm-gpg/8'), ++ ('9.0', 'beta', '../../files/rpm-gpg/9beta'), ++ ('9.2', 'ga', '../../files/rpm-gpg/9'), ++]) ++def test_get_path_to_gpg_certs(current_actor_context, monkeypatch, target, product_type, exp): ++ current_actor = CurrentActorMocked(dst_ver=target, ++ envars={'LEAPP_DEVEL_TARGET_PRODUCT_TYPE': product_type}) ++ monkeypatch.setattr(api, 'current_actor', current_actor) ++ ++ p = _get_path_to_gpg_certs() ++ assert p == exp ++ ++ ++@pytest.mark.parametrize('data, exp', [ ++ ('bare string', 'bare string'), ++ ('with dollar$$$', 'with dollar$$$'), ++ ('path/with/$basearch/something', 'path/with/x86_64/something'), ++ ('path/with/$releasever/something', 'path/with/9/something'), ++ ('path/with/$releasever/$basearch', 'path/with/9/x86_64'), ++ ('path/with/$releasever/$basearch', 'path/with/9/x86_64'), ++]) ++def test_expand_vars(monkeypatch, data, exp): ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(dst_ver='9.1')) # x86_64 arch is default ++ res = _expand_vars(data) ++ assert res == exp ++ ++ ++def _get_test_installed_rmps(): ++ return InstalledRPM( ++ items=[ ++ RPM(name='gpg-pubkey', ++ version='9570ff31', ++ release='5e3006fb', ++ epoch='0', ++ packager='Fedora (33) ', ++ arch='noarch', ++ pgpsig=''), ++ RPM(name='rpm', ++ version='4.17.1', ++ release='3.fc35', ++ epoch='0', ++ packager='Fedora Project', ++ arch='x86_64', ++ pgpsig='RSA/SHA256, Tue 02 Aug 2022 03:12:43 PM CEST, Key ID db4639719867c58f'), ++ ], ++ ) ++ ++ ++def test_pubkeys_from_rpms(): ++ installed_rpm = _get_test_installed_rmps() ++ assert _pubkeys_from_rpms(installed_rpm) == ['9570ff31'] ++ ++ ++# @pytest.mark.parametrize('target, product_type, exp', [ ++# ('8.6', 'beta', ['F21541EB']), ++# ('8.8', 'htb', ['FD431D51', 'D4082792']), # ga ++# ('9.0', 'beta', ['F21541EB']), ++# ('9.2', 'ga', ['FD431D51', '5A6340B3']), ++# ]) ++# Def test_get_pubkeys(current_actor_context, monkeypatch, target, product_type, exp): ++# current_actor = CurrentActorMocked(dst_ver=target, ++# envars={'LEAPP_DEVEL_TARGET_PRODUCT_TYPE': product_type}) ++# monkeypatch.setattr(api, 'current_actor', current_actor) ++# installed_rpm = _get_test_installed_rmps() ++# ++# p = _get_pubkeys(installed_rpm) ++# assert '9570ff31' in p ++# for x in exp: ++# assert x in p ++ ++ ++@pytest.mark.parametrize('repo, exp', [ ++ (RepositoryData(repoid='dummy', name='name', additional_fields='{"gpgcheck":0}'), []), ++ (RepositoryData(repoid='dummy', name='name', additional_fields='{"gpgcheck":"no"}'), []), ++ (RepositoryData(repoid='dummy', name='name', additional_fields='{"gpgcheck":"False"}'), []), ++ (RepositoryData(repoid='dummy', name='name', additional_fields='{"gpgkey":"dummy"}'), ["dummy"]), ++ (RepositoryData(repoid='dummy', name='name', additional_fields='{"gpgkey":"dummy, another"}'), ++ ["dummy", "another"]), ++ (RepositoryData(repoid='dummy', name='name', additional_fields='{"gpgkey":"dummy\\nanother"}'), ++ ["dummy", "another"]), ++ (RepositoryData(repoid='dummy', name='name', additional_fields='{"gpgkey":"$releasever"}'), ++ ["9"]), ++]) ++def test_get_repo_gpgkey_urls(monkeypatch, repo, exp): ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(dst_ver='9.1')) ++ keys = _get_repo_gpgkey_urls(repo) ++ assert keys == exp +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index 0415f0fe..f2391ee8 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -52,6 +52,7 @@ from leapp.utils.deprecation import suppress_deprecation + # Issue: #486 + + PROD_CERTS_FOLDER = 'prod-certs' ++GPG_CERTS_FOLDER = 'rpm-gpg' + PERSISTENT_PACKAGE_CACHE_DIR = '/var/lib/leapp/persistent_package_cache' + + +@@ -136,32 +137,65 @@ def _backup_to_persistent_package_cache(userspace_dir): + target_context.copytree_from('/var/cache/dnf', PERSISTENT_PACKAGE_CACHE_DIR) + + ++def _the_nogpgcheck_option_used(): ++ return get_env('LEAPP_NOGPGCHECK', False) == '1' ++ ++ ++def _get_path_to_gpg_certs(target_major_version): ++ target_product_type = get_product_type('target') ++ certs_dir = target_major_version ++ # only beta is special in regards to the GPG signing keys ++ if target_product_type == 'beta': ++ certs_dir = '{}beta'.format(target_major_version) ++ return os.path.join(api.get_common_folder_path(GPG_CERTS_FOLDER), certs_dir) ++ ++ ++def _import_gpg_keys(context, install_root_dir, target_major_version): ++ certs_path = _get_path_to_gpg_certs(target_major_version) ++ # Import the RHEL X+1 GPG key to be able to verify the installation of initial packages ++ try: ++ # Import also any other keys provided by the customer in the same directory ++ for certname in os.listdir(certs_path): ++ cmd = ['rpm', '--root', install_root_dir, '--import', os.path.join(certs_path, certname)] ++ context.call(cmd, callback_raw=utils.logging_handler) ++ except CalledProcessError as exc: ++ raise StopActorExecutionError( ++ message=( ++ 'Unable to import GPG certificates to install RHEL {} userspace packages.' ++ .format(target_major_version) ++ ), ++ details={'details': str(exc), 'stderr': exc.stderr} ++ ) ++ ++ + def prepare_target_userspace(context, userspace_dir, enabled_repos, packages): + """ + Implement the creation of the target userspace. + """ + _backup_to_persistent_package_cache(userspace_dir) + +- target_major_version = get_target_major_version() + run(['rm', '-rf', userspace_dir]) + _create_target_userspace_directories(userspace_dir) +- with mounting.BindMount( +- source=userspace_dir, target=os.path.join(context.base_dir, 'el{}target'.format(target_major_version)) +- ): ++ ++ target_major_version = get_target_major_version() ++ install_root_dir = '/el{}target'.format(target_major_version) ++ with mounting.BindMount(source=userspace_dir, target=os.path.join(context.base_dir, install_root_dir.lstrip('/'))): + _restore_persistent_package_cache(userspace_dir) ++ if not _the_nogpgcheck_option_used(): ++ _import_gpg_keys(context, install_root_dir, target_major_version) + + repos_opt = [['--enablerepo', repo] for repo in enabled_repos] + repos_opt = list(itertools.chain(*repos_opt)) +- cmd = ['dnf', +- 'install', +- '-y', +- '--nogpgcheck', +- '--setopt=module_platform_id=platform:el{}'.format(target_major_version), +- '--setopt=keepcache=1', +- '--releasever', api.current_actor().configuration.version.target, +- '--installroot', '/el{}target'.format(target_major_version), +- '--disablerepo', '*' +- ] + repos_opt + packages ++ cmd = ['dnf', 'install', '-y'] ++ if _the_nogpgcheck_option_used(): ++ cmd.append('--nogpgcheck') ++ cmd += [ ++ '--setopt=module_platform_id=platform:el{}'.format(target_major_version), ++ '--setopt=keepcache=1', ++ '--releasever', api.current_actor().configuration.version.target, ++ '--installroot', install_root_dir, ++ '--disablerepo', '*' ++ ] + repos_opt + packages + if config.is_verbose(): + cmd.append('-v') + if rhsm.skip_rhsm(): +diff --git a/repos/system_upgrade/common/files/rpm-gpg/8/RPM-GPG-KEY-redhat-release b/repos/system_upgrade/common/files/rpm-gpg/8/RPM-GPG-KEY-redhat-release +new file mode 100644 +index 00000000..6744de9e +--- /dev/null ++++ b/repos/system_upgrade/common/files/rpm-gpg/8/RPM-GPG-KEY-redhat-release +@@ -0,0 +1,89 @@ ++The following public key can be used to verify RPM packages built and ++signed by Red Hat, Inc. This key is used for packages in Red Hat ++products shipped after November 2009, and for all updates to those ++products. ++ ++Questions about this key should be sent to security@redhat.com. ++ ++pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) ++ ++-----BEGIN PGP PUBLIC KEY BLOCK----- ++Version: GnuPG v1.2.6 (GNU/Linux) ++ ++mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF ++0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF ++0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c ++u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh ++XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H ++5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW ++9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj ++/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1 ++PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY ++HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF ++buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB ++tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0 ++LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK ++CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC ++2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf ++C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5 ++un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E ++0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE ++IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh ++8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL ++Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki ++JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25 ++OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq ++dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw== ++=zbHE ++-----END PGP PUBLIC KEY BLOCK----- ++-----BEGIN PGP PUBLIC KEY BLOCK----- ++ ++mQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+ ++oYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri ++XGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh ++KbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/ ++xsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX ++4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD ++x2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1 ++sdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98 ++Ell/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD ++O4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA ++PyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB ++tDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0 ++LmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX ++gAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI ++nKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i ++eSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE ++/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS ++BQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq ++sEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY ++eHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR ++dzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr ++016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP ++Vy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK ++Yl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW ++iLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND ++G+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V ++P95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i ++PVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa ++y04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D ++8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w ++XR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr ++nHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf ++rfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf ++yIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2 ++bc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA ++AYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A ++XRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq ++6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b ++53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO ++lwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r ++2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH ++gESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P ++K1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B ++SKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq ++Unh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d ++SC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi ++vt61kpkXoRGxjTlc6h+69aidSg== ++=ls8J ++-----END PGP PUBLIC KEY BLOCK----- +diff --git a/repos/system_upgrade/common/files/rpm-gpg/8beta/RPM-GPG-KEY-redhat-beta b/repos/system_upgrade/common/files/rpm-gpg/8beta/RPM-GPG-KEY-redhat-beta +new file mode 100644 +index 00000000..1efd1509 +--- /dev/null ++++ b/repos/system_upgrade/common/files/rpm-gpg/8beta/RPM-GPG-KEY-redhat-beta +@@ -0,0 +1,29 @@ ++-----BEGIN PGP PUBLIC KEY BLOCK----- ++Version: GnuPG v1.2.6 (GNU/Linux) ++ ++mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT ++kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A ++BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo ++gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P ++xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D ++FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7 ++Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i ++QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm ++G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt ++0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR ++fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB ++tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv ++bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT ++ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy ++6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ ++OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6 ++0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc ++MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u ++QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE ++Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6 ++DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0 ++B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH ++V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT ++CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ== ++=21pb ++-----END PGP PUBLIC KEY BLOCK----- +diff --git a/repos/system_upgrade/common/files/rpm-gpg/9/RPM-GPG-KEY-redhat-release b/repos/system_upgrade/common/files/rpm-gpg/9/RPM-GPG-KEY-redhat-release +new file mode 100644 +index 00000000..afd9e05a +--- /dev/null ++++ b/repos/system_upgrade/common/files/rpm-gpg/9/RPM-GPG-KEY-redhat-release +@@ -0,0 +1,66 @@ ++The following public key can be used to verify RPM packages built and ++signed by Red Hat, Inc. This key is used for packages in Red Hat ++products shipped after November 2009, and for all updates to those ++products. ++ ++Questions about this key should be sent to security@redhat.com. ++ ++pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) ++ ++-----BEGIN PGP PUBLIC KEY BLOCK----- ++ ++mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF ++0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF ++0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c ++u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh ++XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H ++5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW ++9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj ++/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1 ++PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY ++HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF ++buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB ++tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0 ++LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK ++CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC ++2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf ++C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5 ++un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E ++0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE ++IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh ++8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL ++Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki ++JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25 ++OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq ++dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw== ++=zbHE ++-----END PGP PUBLIC KEY BLOCK----- ++-----BEGIN PGP PUBLIC KEY BLOCK----- ++ ++mQINBGIpIp4BEAC/o5e1WzLIsS6/JOQCs4XYATYTcf6B6ALzcP05G0W3uRpUQSrL ++FRKNrU8ZCelm/B+XSh2ljJNeklp2WLxYENDOsftDXGoyLr2hEkI5OyK267IHhFNJ ++g+BN+T5Cjh4ZiiWij6o9F7x2ZpxISE9M4iI80rwSv1KOnGSw5j2zD2EwoMjTVyVE ++/t3s5XJxnDclB7ZqL+cgjv0mWUY/4+b/OoRTkhq7b8QILuZp75Y64pkrndgakm1T ++8mAGXV02mEzpNj9DyAJdUqa11PIhMJMxxHOGHJ8CcHZ2NJL2e7yJf4orTj+cMhP5 ++LzJcVlaXnQYu8Zkqa0V6J1Qdj8ZXL72QsmyicRYXAtK9Jm5pvBHuYU2m6Ja7dBEB ++Vkhe7lTKhAjkZC5ErPmANNS9kPdtXCOpwN1lOnmD2m04hks3kpH9OTX7RkTFUSws ++eARAfRID6RLfi59B9lmAbekecnsMIFMx7qR7ZKyQb3GOuZwNYOaYFevuxusSwCHv ++4FtLDIhk+Fge+EbPdEva+VLJeMOb02gC4V/cX/oFoPkxM1A5LHjkuAM+aFLAiIRd ++Np/tAPWk1k6yc+FqkcDqOttbP4ciiXb9JPtmzTCbJD8lgH0rGp8ufyMXC9x7/dqX ++TjsiGzyvlMnrkKB4GL4DqRFl8LAR02A3846DD8CAcaxoXggL2bJCU2rgUQARAQAB ++tDVSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5IDMpIDxzZWN1cml0eUByZWRo ++YXQuY29tPokCUgQTAQgAPBYhBH5GJCWMQGU11W1vE1BU5KRaY0CzBQJiKSKeAhsD ++BQsJCAcCAyICAQYVCgkICwIEFgIDAQIeBwIXgAAKCRBQVOSkWmNAsyBfEACuTN/X ++YR+QyzeRw0pXcTvMqzNE4DKKr97hSQEwZH1/v1PEPs5O3psuVUm2iam7bqYwG+ry ++EskAgMHi8AJmY0lioQD5/LTSLTrM8UyQnU3g17DHau1NHIFTGyaW4a7xviU4C2+k ++c6X0u1CPHI1U4Q8prpNcfLsldaNYlsVZtUtYSHKPAUcswXWliW7QYjZ5tMSbu8jR ++OMOc3mZuf0fcVFNu8+XSpN7qLhRNcPv+FCNmk/wkaQfH4Pv+jVsOgHqkV3aLqJeN ++kNUnpyEKYkNqo7mNfNVWOcl+Z1KKKwSkIi3vg8maC7rODsy6IX+Y96M93sqYDQom ++aaWue2gvw6thEoH4SaCrCL78mj2YFpeg1Oew4QwVcBnt68KOPfL9YyoOicNs4Vuu ++fb/vjU2ONPZAeepIKA8QxCETiryCcP43daqThvIgdbUIiWne3gae6eSj0EuUPoYe ++H5g2Lw0qdwbHIOxqp2kvN96Ii7s1DK3VyhMt/GSPCxRnDRJ8oQKJ2W/I1IT5VtiU ++zMjjq5JcYzRPzHDxfVzT9CLeU/0XQ+2OOUAiZKZ0dzSyyVn8xbpviT7iadvjlQX3 ++CINaPB+d2Kxa6uFWh+ZYOLLAgZ9B8NKutUHpXN66YSfe79xFBSFWKkJ8cSIMk13/ ++Ifs7ApKlKCCRDpwoDqx/sjIaj1cpOfLHYjnefg== ++=UZd/ ++-----END PGP PUBLIC KEY BLOCK----- +diff --git a/repos/system_upgrade/common/files/rpm-gpg/9beta/RPM-GPG-KEY-redhat-beta b/repos/system_upgrade/common/files/rpm-gpg/9beta/RPM-GPG-KEY-redhat-beta +new file mode 100644 +index 00000000..1efd1509 +--- /dev/null ++++ b/repos/system_upgrade/common/files/rpm-gpg/9beta/RPM-GPG-KEY-redhat-beta +@@ -0,0 +1,29 @@ ++-----BEGIN PGP PUBLIC KEY BLOCK----- ++Version: GnuPG v1.2.6 (GNU/Linux) ++ ++mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT ++kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A ++BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo ++gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P ++xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D ++FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7 ++Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i ++QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm ++G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt ++0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR ++fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB ++tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv ++bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT ++ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy ++6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ ++OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6 ++0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc ++MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u ++QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE ++Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6 ++DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0 ++B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH ++V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT ++CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ== ++=21pb ++-----END PGP PUBLIC KEY BLOCK----- +diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py +index 0ef9ea9b..7a15abc4 100644 +--- a/repos/system_upgrade/common/libraries/dnfplugin.py ++++ b/repos/system_upgrade/common/libraries/dnfplugin.py +@@ -6,6 +6,7 @@ import shutil + + from leapp.exceptions import StopActorExecutionError + from leapp.libraries.common import dnfconfig, guards, mounting, overlaygen, rhsm, utils ++from leapp.libraries.common.config import get_env + from leapp.libraries.common.config.version import get_target_major_version, get_target_version + from leapp.libraries.stdlib import api, CalledProcessError, config + from leapp.models import DNFWorkaround +@@ -74,6 +75,10 @@ def _rebuild_rpm_db(context, root=None): + context.call(cmd) + + ++def _the_nogpgcheck_option_used(): ++ return get_env('LEAPP_NOGPGCHECK', '0') == '1' ++ ++ + def build_plugin_data(target_repoids, debug, test, tasks, on_aws): + """ + Generates a dictionary with the DNF plugin data. +@@ -93,7 +98,7 @@ def build_plugin_data(target_repoids, debug, test, tasks, on_aws): + 'debugsolver': debug, + 'disable_repos': True, + 'enable_repos': target_repoids, +- 'gpgcheck': False, ++ 'gpgcheck': not _the_nogpgcheck_option_used(), + 'platform_id': 'platform:el{}'.format(get_target_major_version()), + 'releasever': get_target_version(), + 'installroot': '/installroot', +@@ -269,8 +274,10 @@ def install_initramdisk_requirements(packages, target_userspace_info, used_repos + cmd = [ + 'dnf', + 'install', +- '-y', +- '--nogpgcheck', ++ '-y'] ++ if _the_nogpgcheck_option_used(): ++ cmd.append('--nogpgcheck') ++ cmd += [ + '--setopt=module_platform_id=platform:el{}'.format(get_target_major_version()), + '--setopt=keepcache=1', + '--releasever', api.current_actor().configuration.version.target, +diff --git a/repos/system_upgrade/common/libraries/tests/test_dnfplugin.py b/repos/system_upgrade/common/libraries/tests/test_dnfplugin.py +index 3d0b908f..1ca95945 100644 +--- a/repos/system_upgrade/common/libraries/tests/test_dnfplugin.py ++++ b/repos/system_upgrade/common/libraries/tests/test_dnfplugin.py +@@ -5,6 +5,8 @@ import pytest + import leapp.models + from leapp.libraries.common import dnfplugin + from leapp.libraries.common.config.version import get_major_version ++from leapp.libraries.common.testutils import CurrentActorMocked ++from leapp.libraries.stdlib import api + from leapp.models.fields import Boolean + from leapp.topics import Topic + +@@ -61,7 +63,7 @@ class DATADnfPluginDataDnfConf(leapp.models.Model): + debugsolver = fields.Boolean() + disable_repos = BooleanEnum(choices=[True]) + enable_repos = fields.List(fields.StringEnum(choices=TEST_ENABLE_REPOS_CHOICES)) +- gpgcheck = BooleanEnum(choices=[False]) ++ gpgcheck = fields.Boolean() + platform_id = fields.StringEnum(choices=['platform:el8', 'platform:el9']) + releasever = fields.String() + installroot = fields.StringEnum(choices=['/installroot']) +@@ -94,16 +96,6 @@ del leapp.models.DATADnfPluginDataRHUIAWS + del leapp.models.DATADnfPluginData + + +-def _mocked_get_target_major_version(version): +- def impl(): +- return version +- return impl +- +- +-def _mocked_api_get_file_path(name): +- return 'some/random/file/path/{}'.format(name) +- +- + _CONFIG_BUILD_TEST_DEFINITION = ( + # Parameter, Input Data, Expected Fields with data + ('debug', False, ('dnf_conf', 'debugsolver'), False), +@@ -131,9 +123,7 @@ def test_build_plugin_data_variations( + expected_value, + ): + used_target_major_version = get_major_version(used_target_version) +- monkeypatch.setattr(dnfplugin, 'get_target_version', _mocked_get_target_major_version(used_target_version)) +- monkeypatch.setattr(dnfplugin, 'get_target_major_version', +- _mocked_get_target_major_version(used_target_major_version)) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(dst_ver=used_target_version)) + inputs = { + 'target_repoids': ['BASEOS', 'APPSTREAM'], + 'debug': True, +@@ -161,8 +151,7 @@ def test_build_plugin_data_variations( + + + def test_build_plugin_data(monkeypatch): +- monkeypatch.setattr(dnfplugin, 'get_target_version', _mocked_get_target_major_version('8.4')) +- monkeypatch.setattr(dnfplugin, 'get_target_major_version', _mocked_get_target_major_version('8')) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(dst_ver='8.4')) + # Use leapp to validate format and data + created = DATADnfPluginData.create( + dnfplugin.build_plugin_data( +diff --git a/repos/system_upgrade/common/models/targetrepositories.py b/repos/system_upgrade/common/models/targetrepositories.py +index a5a245f1..02c6c5e5 100644 +--- a/repos/system_upgrade/common/models/targetrepositories.py ++++ b/repos/system_upgrade/common/models/targetrepositories.py +@@ -22,14 +22,48 @@ class CustomTargetRepository(TargetRepositoryBase): + + + class TargetRepositories(Model): ++ """ ++ Repositories supposed to be used during the IPU process ++ ++ The list of the actually used repositories could be just subset ++ of these repositoies. In case of `custom_repositories`, all such repositories ++ must be available otherwise the upgrade is inhibited. But in case of ++ `rhel_repos`, only BaseOS and Appstream repos are required now. If others ++ are missing, upgrade can still continue. ++ """ + topic = TransactionTopic + rhel_repos = fields.List(fields.Model(RHELTargetRepository)) ++ """ ++ Expected target YUM RHEL repositories provided via RHSM ++ ++ These repositories are stored inside /etc/yum.repos.d/redhat.repo and ++ are expected to be used based on the provided repositories mapping. ++ """ ++ + custom_repos = fields.List(fields.Model(CustomTargetRepository), default=[]) ++ """ ++ Custom YUM repositories required to be used for the IPU ++ ++ Usually contains third-party or custom repositories specified by user ++ to be used for the IPU. But can contain also RHEL repositories. Difference ++ is that these repositories are not mapped automatically but are explicitly ++ required by user or by an additional product via actors. ++ """ + + + class UsedTargetRepositories(Model): ++ """ ++ Repositories that are used for the IPU process ++ ++ This is the source of truth about the repositories used during the upgrade. ++ Once specified, it is used for all actions related to the upgrade rpm ++ transaction itself. ++ """ + topic = TransactionTopic + repos = fields.List(fields.Model(UsedTargetRepository)) ++ """ ++ The list of the used target repositories. ++ """ + + + class CustomTargetRepositoryFile(Model): +-- +2.38.1 + diff --git a/SOURCES/0038-missinggpgkey-polish-the-report-msg.patch b/SOURCES/0038-missinggpgkey-polish-the-report-msg.patch new file mode 100644 index 0000000..9ac6357 --- /dev/null +++ b/SOURCES/0038-missinggpgkey-polish-the-report-msg.patch @@ -0,0 +1,68 @@ +From 56da8453683c529c62823aedda2d3b81d1a55a0f Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Tue, 29 Nov 2022 22:18:50 +0100 +Subject: [PATCH] missinggpgkey: polish the report msg + +--- + .../libraries/missinggpgkey.py | 26 ++++++++++++++----- + 1 file changed, 20 insertions(+), 6 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py +index b8b28df2..7f038ee0 100644 +--- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py ++++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py +@@ -21,6 +21,7 @@ from leapp.models import ( + from leapp.utils.deprecation import suppress_deprecation + + GPG_CERTS_FOLDER = 'rpm-gpg' ++FMT_LIST_SEPARATOR = '\n - ' + + + def _gpg_show_keys(key_path): +@@ -251,16 +252,29 @@ def _report_missing_keys(missing_keys): + # TODO(pstodulk): polish the report, use FMT_LIST_SEPARATOR + # the list of keys should be mentioned in the summary + summary = ( +- "Some of the target repositories require GPG keys that are missing from the current" +- " RPM DB. Leapp will not be able to verify packages from these repositories during the upgrade process." ++ 'Some of the target repositories require GPG keys that are not installed' ++ ' in the current RPM DB or are not stored in the {trust_dir} directory.' ++ ' Leapp is not able to guarantee validity of such gpg keys and manual' ++ ' review is required, so any spurious keys are not imported in the system' ++ ' during the in-place upgrade.' ++ ' The following additional gpg keys are required to be imported during' ++ ' the upgrade:{sep}{key_list}' ++ .format( ++ trust_dir=_get_path_to_gpg_certs(), ++ sep=FMT_LIST_SEPARATOR, ++ key_list=FMT_LIST_SEPARATOR.join(missing_keys) ++ ) + ) + hint = ( +- "Please, review the following list and import the GPG keys before " +- "continuing the upgrade:\n * {}".format('\n * '.join(missing_keys)) ++ 'Check the listed GPG keys they are valid and import them into the' ++ ' host RPM DB or store them inside the {} directory prior the upgrade.' ++ ' If you want to proceed the in-place upgrade without checking any RPM' ++ ' signatures, execute leapp with the `--nogpgcheck` option.' ++ .format(_get_path_to_gpg_certs()) + ) + reporting.create_report( + [ +- reporting.Title("Missing GPG key from target system repository"), ++ reporting.Title('Detected unknown GPG keys for target system repositories'), + reporting.Summary(summary), + reporting.Severity(reporting.Severity.HIGH), + reporting.Groups([reporting.Groups.REPOSITORY, reporting.Groups.INHIBITOR]), +@@ -351,7 +365,7 @@ def process(): + if not fps: + # TODO: for now. I think it should be treated better + api.current_logger().warning( +- "Cannot get any gpg key from the file: {}".format(gpgkey_url) ++ 'Cannot get any gpg key from the file: {}'.format(gpgkey_url) + ) + continue + for fp in fps: +-- +2.38.1 + diff --git a/SPECS/leapp-repository.spec b/SPECS/leapp-repository.spec index 3280841..ffb41b7 100644 --- a/SPECS/leapp-repository.spec +++ b/SPECS/leapp-repository.spec @@ -2,7 +2,7 @@ %global repositorydir %{leapp_datadir}/repositories %global custom_repositorydir %{leapp_datadir}/custom-repositories -%define leapp_repo_deps 7 +%define leapp_repo_deps 8 %if 0%{?rhel} == 7 %define leapp_python_sitelib %{python2_sitelib} @@ -42,25 +42,59 @@ py2_byte_compile "%1" "%2"} Name: leapp-repository Version: 0.17.0 -Release: 1%{?dist}.2 +Release: 5%{?dist} Summary: Repositories for leapp License: ASL 2.0 URL: https://oamg.github.io/leapp/ Source0: https://github.com/oamg/%{name}/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz -Source1: deps-pkgs-7.tar.gz +Source1: deps-pkgs-8.tar.gz # NOTE: Our packages must be noarch. Do no drop this in any way. BuildArch: noarch ### PATCHES HERE # Patch0001: filename.patch -Patch0001: 0001-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch + +## DO NOT REMOVE THIS PATCH UNLESS THE RUBYGEM-IRB ISSUE IS RESOLVED IN ACTORS! +# See: https://bugzilla.redhat.com/show_bug.cgi?id=2030627 Patch0004: 0004-Enforce-the-removal-of-rubygem-irb-do-not-install-it.patch -Patch0005: 0005-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch -Patch0006: 0006-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch -Patch0007: 0007-checksaphana-Move-to-common.patch -Patch0008: 0008-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch + +# TMP patches - remove them when rebase +Patch0005: 0005-Disable-isort-check-for-deprecated-imports.patch +Patch0006: 0006-Add-codespell-GitHub-actions-workflow-for-spell-chec.patch +Patch0007: 0007-Mini-updateds-in-the-spec-files.patch +Patch0008: 0008-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch +Patch0009: 0009-Add-actors-for-checking-and-setting-systemd-services.patch +Patch0010: 0010-migratentp-Replace-reports-with-log-messages.patch +Patch0011: 0011-migratentp-Catch-more-specific-exception-from-ntp2ch.patch +Patch0012: 0012-migratentp-Don-t-raise-StopActorExecutionError.patch +Patch0013: 0013-Make-shellcheck-happy-again.patch +Patch0014: 0014-actor-firewalld-support-0.8.z.patch +Patch0015: 0015-Scanpkgmanager-detect-proxy-configuration.patch +Patch0016: 0016-Merge-of-the-yumconfigscanner-actor-into-the-scanpkg.patch +Patch0017: 0017-firewalldcheckallowzonedrifting-Fix-the-remediation-.patch +Patch0018: 0018-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch +Patch0019: 0019-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch +Patch0020: 0020-checksaphana-Move-to-common.patch +Patch0021: 0021-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch +Patch0022: 0022-Add-an-actor-that-enables-device_cio_free.service-on.patch +Patch0023: 0023-Add-the-scanzfcp-actor-handling-the-IPU-with-ZFCP-s3.patch +Patch0024: 0024-ziplconverttoblscfg-bind-mount-dev-boot-into-the-use.patch +Patch0025: 0025-Provide-common-information-about-systemd.patch +Patch0026: 0026-systemd-Move-enable-disable-reenable-_unit-functions.patch +Patch0027: 0027-Fix-broken-or-incorrect-systemd-symlinks.patch +Patch0028: 0028-Add-check-for-systemd-symlinks-broken-before-the-upg.patch +Patch0029: 0029-checksystemdservicestasks-update-docstrings-extend-t.patch +Patch0030: 0030-Support-IPU-using-a-target-RHEL-installation-ISO-ima.patch +Patch0031: 0031-Add-prod-certs-for-8.8-9.2-Beta-GA.patch +Patch0032: 0032-Introduce-new-upgrade-paths-8.8-9.2.patch +Patch0033: 0033-testutils-Implement-get_common_tool_path-method.patch +Patch0034: 0034-targetuserspacecreator-improve-copy-of-etc-pki-rpm-g.patch +Patch0035: 0035-DNFWorkaround-extend-the-model-by-script_args.patch +Patch0036: 0036-Introduce-theimportrpmgpgkeys-tool-script.patch +Patch0037: 0037-Enable-gpgcheck-during-IPU-add-nogpgcheck-CLI-option.patch +Patch0038: 0038-missinggpgkey-polish-the-report-msg.patch %description @@ -100,7 +134,7 @@ Conflicts: leapp-upgrade-el7toel8 %endif -# IMPORTANT: everytime the requirements are changed, increment number by one +# IMPORTANT: every time the requirements are changed, increment number by one # - same for Provides in deps subpackage Requires: leapp-repository-dependencies = %{leapp_repo_deps} @@ -112,6 +146,10 @@ Requires: leapp-framework >= 3.1 # tool to be installed as well. Requires: leapp +# Used to determine RHEL version of a given target RHEL installation image - +# uncompressing redhat-release package from the ISO. +Requires: cpio + # The leapp-repository rpm is renamed to %%{lpr_name} Obsoletes: leapp-repository < 0.14.0-5 Provides: leapp-repository = %{version}-%{release} @@ -134,7 +172,7 @@ Leapp repositories for the in-place upgrade to the next major version of the Red Hat Enterprise Linux system. -# This metapackage should contain all RPM dependencies exluding deps on *leapp* +# This metapackage should contain all RPM dependencies excluding deps on *leapp* # RPMs. This metapackage will be automatically replaced during the upgrade # to satisfy dependencies with RPMs from target system. %package -n %{lpr_name}-deps @@ -143,7 +181,7 @@ Summary: Meta-package with system dependencies of %{lpr_name} package # The package has been renamed, so let's obsoletes the old one Obsoletes: leapp-repository-deps < 0.14.0-5 -# IMPORTANT: everytime the requirements are changed, increment number by one +# IMPORTANT: every time the requirements are changed, increment number by one # - same for Requires in main package Provides: leapp-repository-dependencies = %{leapp_repo_deps} ################################################## @@ -194,12 +232,42 @@ Requires: dracut # APPLY PATCHES HERE # %%patch0001 -p1 -%patch0001 -p1 %patch0004 -p1 + %patch0005 -p1 %patch0006 -p1 %patch0007 -p1 %patch0008 -p1 +%patch0009 -p1 +%patch0010 -p1 +%patch0011 -p1 +%patch0012 -p1 +%patch0013 -p1 +%patch0014 -p1 +%patch0015 -p1 +%patch0016 -p1 +%patch0017 -p1 +%patch0018 -p1 +%patch0019 -p1 +%patch0020 -p1 +%patch0021 -p1 +%patch0022 -p1 +%patch0023 -p1 +%patch0024 -p1 +%patch0025 -p1 +%patch0026 -p1 +%patch0027 -p1 +%patch0028 -p1 +%patch0029 -p1 +%patch0030 -p1 +%patch0031 -p1 +%patch0032 -p1 +%patch0033 -p1 +%patch0034 -p1 +%patch0035 -p1 +%patch0036 -p1 +%patch0037 -p1 +%patch0038 -p1 %build @@ -237,6 +305,7 @@ rm -rf %{buildroot}%{repositorydir}/common/actors/testactor find %{buildroot}%{repositorydir}/common -name "test.py" -delete rm -rf `find %{buildroot}%{repositorydir} -name "tests" -type d` find %{buildroot}%{repositorydir} -name "Makefile" -delete +find %{buildroot} -name "*.py.orig" -delete for DIRECTORY in $(find %{buildroot}%{repositorydir}/ -mindepth 1 -maxdepth 1 -type d); do @@ -274,12 +343,34 @@ done; # no files here %changelog -* Thu Oct 20 2022 Petr Stodulka - 0.17.0-1.2 -- Add checks for the in-place upgrades of RHEL for SAP -- RHUI: Fix the in-place upgrade on Azure for RHEL SAP Applications -- Resolves: rhbz#2125284 +* Wed Nov 30 2022 Petr Stodulka - 0.17.0-5 +- Check RPM signatures during the upgrade (first part) +- introduced the --nogpgcheck option to do the upgrade in the original way +- Resolves: rhbz#2143372 -* Thu Sep 08 2022 Petr Stodulka - 0.17.0-1.1 +* Wed Nov 16 2022 Petr Stodulka - 0.17.0-4 +- The new upgrade path for RHEL 8.8 -> 9.2 +- Require cpio +- Bump leapp-repositori-dependencies to 8 +- Fix systemd symlinks that become incorrect during the IPU +- Introduced an option to use an ISO file as a target RHEL version content source +- Provide common information about systemd services +- Introduced possibility to specify what systemd services should be enabled/disabled on the upgraded system +- Detect and report systemd symlinks that are broken before the upgrade +- Resolves: rhbz#2143372 + +* Wed Sep 07 2022 Petr Stodulka - 0.17.0-3 +- Adding back instruction to not install rubygem-irb during the in-place upgrade + to prevent conflict between files +- Resolves: rhbz#2090995 + +* Wed Sep 07 2022 Petr Stodulka - 0.17.0-2 +- Update VDO checks to enable user to decide the system state on check failures + and undetermined block devices +- The VDO dialog and related VDO reports have been properly updated +- Resolves: rhbz#2096159 + +* Wed Aug 24 2022 Petr Stodulka - 0.17.0-1 - Rebase to v0.17.0 - Support upgrade path RHEL 8.7 -> 9.0 and RHEL SAP 8.6 -> 9.0 - Provide and require leapp-repository-dependencies 7 @@ -320,10 +411,7 @@ done; - Skip comment lines when parsing the GRUB configuration file - Stop propagating the “debug” and ”enforcing=0” kernel cmdline options into the target kernel cmdline options - Mass refactoring to be compatible with leapp v0.15.0 -- Update VDO checks to enable user to decide the system state on check failures - and undetermined block devices -- The VDO dialog and related VDO reports have been properly updated -- Resolves: rhbz#2125284 +- Resolves: rhbz#2090995, rhbz#2040470, rhbz#2092005, rhbz#2093220, rhbz#2095704, rhbz#2096159, rhbz#2100108, rhbz#2100110, rhbz#2103282, rhbz#2106904, rhbz#2110627 * Wed Apr 27 2022 Petr Stodulka - 0.16.0-6 - Skip comments in /etc/default/grub during the parsing