From 51f8cea591d6a96b796d8b1cd1e909615fad99d6 Mon Sep 17 00:00:00 2001 From: eabdullin Date: Mon, 21 Jul 2025 11:47:55 +0000 Subject: [PATCH] Import from CS git --- SOURCES/0044-Remove-obsolete-workflows.patch | 267 ++++ .../0045-README-IRC-GitHub-discussions.patch | 24 + ...ues-in-hybrid-azure-during-upgrades-.patch | 538 ++++++++ ...7-Restructure-hybrid-image-detection.patch | 1090 +++++++++++++++++ ...p-repository-contribution-guidelines.patch | 61 + ...-the-DNF-config-by-module.py-library.patch | 46 + ..._gpgcheck-parameter-from-plugin-data.patch | 34 + ...pdate-link-to-contrib-guidelines-139.patch | 37 + ...g-ownership-of-files-in-.-directory-.patch | 30 + ...hecking-ownership-of-files-in-.-dire.patch | 35 + SOURCES/0054-Add-test.patch | 100 ++ SOURCES/0055-fixup-Add-test.patch | 25 + SOURCES/0056-Add-test-for-non-recursive.patch | 110 ++ ...sql-Add-MySQL-actor-with-recommendat.patch | 176 +++ ...058-Fix-target-version-format-checks.patch | 176 +++ ...rhui-add-rhel10-targets-for-upgrades.patch | 67 + ...n-do-not-repolist-source-debug-repos.patch | 35 + ...i-do-not-boostrap-client-on-AWS-9-10.patch | 46 + ...ibaba-client-repositories-to-repomap.patch | 456 +++++++ ...arget-client-s-GPG-key-to-mandatory-.patch | 36 + ...port-rhel9-upgrade-to-rhel10-using-r.patch | 77 ++ ...on-systems-with-cgroupsv1-on-9to10-1.patch | 203 +++ SPECS/leapp-repository.spec | 60 +- 23 files changed, 3728 insertions(+), 1 deletion(-) create mode 100644 SOURCES/0044-Remove-obsolete-workflows.patch create mode 100644 SOURCES/0045-README-IRC-GitHub-discussions.patch create mode 100644 SOURCES/0046-Resolve-boot-issues-in-hybrid-azure-during-upgrades-.patch create mode 100644 SOURCES/0047-Restructure-hybrid-image-detection.patch create mode 100644 SOURCES/0048-Point-to-leapp-repository-contribution-guidelines.patch create mode 100644 SOURCES/0049-Read-the-DNF-config-by-module.py-library.patch create mode 100644 SOURCES/0050-Disable-localpkg_gpgcheck-parameter-from-plugin-data.patch create mode 100644 SOURCES/0051-PR-welcome-msg-update-link-to-contrib-guidelines-139.patch create mode 100644 SOURCES/0052-Fix-skip-checking-ownership-of-files-in-.-directory-.patch create mode 100644 SOURCES/0053-fixup-Fix-skip-checking-ownership-of-files-in-.-dire.patch create mode 100644 SOURCES/0054-Add-test.patch create mode 100644 SOURCES/0055-fixup-Add-test.patch create mode 100644 SOURCES/0056-Add-test-for-non-recursive.patch create mode 100644 SOURCES/0057-el8to9-actors-mysql-Add-MySQL-actor-with-recommendat.patch create mode 100644 SOURCES/0058-Fix-target-version-format-checks.patch create mode 100644 SOURCES/0060-libs-rhui-add-rhel10-targets-for-upgrades.patch create mode 100644 SOURCES/0061-userspacegen-do-not-repolist-source-debug-repos.patch create mode 100644 SOURCES/0062-actor-checkrhui-do-not-boostrap-client-on-AWS-9-10.patch create mode 100644 SOURCES/0063-add-azure-aws-alibaba-client-repositories-to-repomap.patch create mode 100644 SOURCES/0064-rhui-azure-add-target-client-s-GPG-key-to-mandatory-.patch create mode 100644 SOURCES/0065-feat-alibaba-support-rhel9-upgrade-to-rhel10-using-r.patch create mode 100644 SOURCES/0066-Inhibit-upgrade-on-systems-with-cgroupsv1-on-9to10-1.patch diff --git a/SOURCES/0044-Remove-obsolete-workflows.patch b/SOURCES/0044-Remove-obsolete-workflows.patch new file mode 100644 index 0000000..5c17530 --- /dev/null +++ b/SOURCES/0044-Remove-obsolete-workflows.patch @@ -0,0 +1,267 @@ +From c7ea9fc29989a37071bf6355828328910b6b5e1d Mon Sep 17 00:00:00 2001 +From: Daniel Diblik +Date: Tue, 17 Jun 2025 15:21:15 +0200 +Subject: [PATCH 44/66] Remove obsolete workflows + +* tmt-tests.yml and reuse-copr-build.yml were replaced by the Packit + workflows ages ago +* removed the obsolete workflows + +Signed-off-by: Daniel Diblik +--- + .github/workflows/reuse-copr-build.yml | 163 ------------------------- + .github/workflows/tmt-tests.yml | 72 ----------- + 2 files changed, 235 deletions(-) + delete mode 100644 .github/workflows/reuse-copr-build.yml + delete mode 100644 .github/workflows/tmt-tests.yml + +diff --git a/.github/workflows/reuse-copr-build.yml b/.github/workflows/reuse-copr-build.yml +deleted file mode 100644 +index a772fb64..00000000 +--- a/.github/workflows/reuse-copr-build.yml ++++ /dev/null +@@ -1,163 +0,0 @@ +-name: reuse-copr-build@TF +- +-on: +- workflow_call: +- secrets: +- FEDORA_COPR_LOGIN: +- required: true +- FEDORA_COPR_TOKEN: +- required: true +- outputs: +- artifacts: +- description: "A string with test artifacts to install in tft test env" +- value: ${{ jobs.reusable_workflow_copr_build_job.outputs.artifacts }} +- +-jobs: +- reusable_workflow_copr_build_job: +- # This job only runs for '/rerun' pull request comments by owner, member, or collaborator of the repo/organization. +- name: Build copr builds for tft tests +- runs-on: ubuntu-24.04 +- outputs: +- artifacts: ${{ steps.gen_artifacts.outputs.artifacts }} +- if: | +- github.event.issue.pull_request +- && startsWith(github.event.comment.body, '/rerun') +- && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association) +- steps: +- - name: Update repository +- id: repo_update +- run: sudo apt-get update +- +- - name: Install necessary deps +- id: deps_install +- run: sudo apt-get install -y libkrb5-dev +- +- - name: Get pull request number +- id: pr_nr +- run: | +- PR_URL="${{ github.event.comment.issue_url }}" +- echo "::set-output name=pr_nr::${PR_URL##*/}" +- +- - name: Checkout +- # TODO: The correct way to checkout would be to use similar approach as in get_commit_by_timestamp function of +- # the github gluetool module (i.e. do not use HEAD but the last commit before comment). +- id: checkout +- uses: actions/checkout@v4 +- with: +- ref: "refs/pull/${{ steps.pr_nr.outputs.pr_nr }}/head" +- +- - name: Get ref and sha +- id: ref_sha +- run: | +- echo "::set-output name=sha::$(git rev-parse --short HEAD)" +- echo "::set-output name=ref::refs/pull/${{ steps.pr_nr.outputs.pr_nr }}/head" +- +- - name: Trigger copr build +- id: copr_build +- env: +- COPR_CONFIG: "copr_fedora.conf" +- COPR_CHROOT: "epel-8-x86_64" +- COPR_REPO: "@oamg/leapp" +- run: | +- cat << EOF > $COPR_CONFIG +- [copr-cli] +- login = ${{ secrets.FEDORA_COPR_LOGIN }} +- username = oamgbot +- token = ${{ secrets.FEDORA_COPR_TOKEN }} +- copr_url = https://copr.fedorainfracloud.org +- # expiration date: 2030-07-04 +- EOF +- +- pip install copr-cli +- PR=${{ steps.pr_nr.outputs.pr_nr }} COPR_CONFIG=$COPR_CONFIG COPR_REPO="$COPR_REPO" COPR_CHROOT=$COPR_CHROOT make copr_build | tee copr.log +- +- COPR_URL=$(grep -Po 'https://copr.fedorainfracloud.org/coprs/build/\d+' copr.log) +- echo "::set-output name=copr_url::${COPR_URL}" +- echo "::set-output name=copr_id::${COPR_URL##*/}" +- +- - name: Add comment with copr build url +- # TODO: Create comment when copr build fails. +- id: link_copr +- uses: actions/github-script@v7 +- with: +- script: | +- github.issues.createComment({ +- issue_number: context.issue.number, +- owner: context.repo.owner, +- repo: context.repo.repo, +- body: 'Copr build succeeded: ${{ steps.copr_build.outputs.copr_url }}' +- }) +- +- - name: Get dependent leapp pr number from rerun comment +- uses: actions-ecosystem/action-regex-match@v2 +- id: leapp_pr_regex_match +- with: +- text: ${{ github.event.comment.body }} +- regex: '^/(rerun|rerun-sst)\s+([0-9]+)\s*$' +- +- - name: If leapp_pr was specified in the comment - trigger copr build +- # TODO: XXX FIXME This should schedule copr build for leapp but for now it will be just setting an env var +- id: leapp_pr +- if: ${{ steps.leapp_pr_regex_match.outputs.match != '' }} +- run: | +- echo "::set-output name=leapp_pr::${{ steps.leapp_pr_regex_match.outputs.group2 }}" +- +- - name: Checkout leapp +- id: checkout_leapp +- if: ${{ steps.leapp_pr_regex_match.outputs.match != '' }} +- uses: actions/checkout@v4 +- with: +- repository: "oamg/leapp" +- ref: "refs/pull/${{ steps.leapp_pr.outputs.leapp_pr }}/head" +- +- - name: Get ref and sha for leapp +- id: ref_sha_leapp +- if: ${{ steps.leapp_pr_regex_match.outputs.match != '' }} +- run: | +- echo "::set-output name=sha::$(git rev-parse --short HEAD)" +- echo "::set-output name=ref::refs/pull/${{ steps.leapp_pr.outputs.leapp_pr }}/head" +- +- - name: Trigger copr build for leapp +- id: copr_build_leapp +- if: ${{ steps.leapp_pr_regex_match.outputs.match != '' }} +- env: +- COPR_CONFIG: "copr_fedora.conf" +- COPR_CHROOT: "epel-8-x86_64" +- COPR_REPO: "@oamg/leapp" +- run: | +- cat << EOF > $COPR_CONFIG +- [copr-cli] +- login = ${{ secrets.FEDORA_COPR_LOGIN }} +- username = oamgbot +- token = ${{ secrets.FEDORA_COPR_TOKEN }} +- copr_url = https://copr.fedorainfracloud.org +- # expiration date: 2030-07-04 +- EOF +- +- pip install copr-cli +- PR=${{ steps.leapp_pr.outputs.leapp_pr }} COPR_CONFIG=$COPR_CONFIG COPR_REPO="$COPR_REPO" COPR_CHROOT=$COPR_CHROOT make copr_build | tee copr.log +- +- COPR_URL=$(grep -Po 'https://copr.fedorainfracloud.org/coprs/build/\d+' copr.log) +- echo "::set-output name=copr_url::${COPR_URL}" +- echo "::set-output name=copr_id::${COPR_URL##*/}" +- +- - name: Add comment with copr build url for leapp +- # TODO: Create comment when copr build fails. +- id: link_copr_leapp +- if: ${{ steps.leapp_pr_regex_match.outputs.match != '' }} +- uses: actions/github-script@v7 +- with: +- script: | +- github.issues.createComment({ +- issue_number: context.issue.number, +- owner: context.repo.owner, +- repo: context.repo.repo, +- body: 'Copr build succeeded: ${{ steps.copr_build_leapp.outputs.copr_url }}' +- }) +- +- - name: Generate artifacts output +- id: gen_artifacts +- env: +- ARTIFACTS: ${{ steps.leapp_pr_regex_match.outputs.match != '' && format('{0};{1}', steps.copr_build_leapp.outputs.copr_id, steps.copr_build.outputs.copr_id) || steps.copr_build.outputs.copr_id }} +- run: | +- echo "::set-output name=artifacts::${{ env.ARTIFACTS }}" +diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml +deleted file mode 100644 +index c9f76ef7..00000000 +--- a/.github/workflows/tmt-tests.yml ++++ /dev/null +@@ -1,72 +0,0 @@ +-name: tmt@TF +- +-on: +- issue_comment: +- types: +- - created +- +-jobs: +- call_workflow_copr_build: +- uses: ./.github/workflows/reuse-copr-build.yml +- secrets: inherit +- +- call_workflow_tests_86to90_integration: +- needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main +- secrets: inherit +- with: +- copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +- tmt_plan_regex: "^(?!.*max_sst)(.*tier1)" +- variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;LEAPPDATA_BRANCH=upstream' +- pull_request_status_name: "8.6to9.0" +- if: | +- github.event.issue.pull_request +- && ! startsWith(github.event.comment.body, '/rerun-sst') +- && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association) +- +- call_workflow_tests_88to92_integration: +- needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main +- secrets: inherit +- with: +- copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +- tmt_plan_regex: "^(?!.*max_sst)(.*tier1)" +- variables: 'SOURCE_RELEASE=8.8;TARGET_RELEASE=9.2;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms;LEAPPDATA_BRANCH=upstream' +- compose: "RHEL-8.8.0-Nightly" +- pull_request_status_name: "8.8to9.2" +- tmt_context: "distro=rhel-8.8" +- if: | +- github.event.issue.pull_request +- && ! startsWith(github.event.comment.body, '/rerun-sst') +- && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association) +- +- call_workflow_tests_86to90_sst: +- needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main +- secrets: inherit +- with: +- copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +- tmt_plan_regex: "^(?!.*tier[2-3].*)(.*max_sst.*)" +- variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;LEAPPDATA_BRANCH=upstream' +- pull_request_status_name: "8to9-sst" +- update_pull_request_status: 'false' +- if: | +- github.event.issue.pull_request +- && startsWith(github.event.comment.body, '/rerun-sst') +- && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association) +- +- call_workflow_tests_86to90_aws: +- needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main +- secrets: inherit +- with: +- copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)" +- compose: "RHEL-8.6-rhui" +- environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"}}' +- pull_request_status_name: "8to9-aws-e2e" +- variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;RHUI=aws;LEAPPDATA_BRANCH=upstream' +- if: | +- github.event.issue.pull_request +- && ! startsWith(github.event.comment.body, '/rerun-sst') +- && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association) +-- +2.50.1 + diff --git a/SOURCES/0045-README-IRC-GitHub-discussions.patch b/SOURCES/0045-README-IRC-GitHub-discussions.patch new file mode 100644 index 0000000..3f6ada8 --- /dev/null +++ b/SOURCES/0045-README-IRC-GitHub-discussions.patch @@ -0,0 +1,24 @@ +From 7c6bb64e550ccfefe4001e79fef44b7d69446c36 Mon Sep 17 00:00:00 2001 +From: Michal Bocek +Date: Thu, 19 Jun 2025 12:46:50 +0200 +Subject: [PATCH 45/66] README: IRC -> GitHub discussions + +The Leapp team is not available on IRC anymore. We advise the public to +talk to us on GitHub instead. +--- + README.md | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/README.md b/README.md +index 5ec5f723..6b45b4b7 100644 +--- a/README.md ++++ b/README.md +@@ -29,4 +29,4 @@ Then you may attach only the `leapp-logs.tgz` file. + We’ll gladly answer your questions and lead you to through any troubles with the + actor development. + +-You can reach us at IRC: `#leapp` on Libera.Chat. ++You can reach us in the [discussions sections of our GitHub repository](https://github.com/oamg/leapp-repository/discussions). +-- +2.50.1 + diff --git a/SOURCES/0046-Resolve-boot-issues-in-hybrid-azure-during-upgrades-.patch b/SOURCES/0046-Resolve-boot-issues-in-hybrid-azure-during-upgrades-.patch new file mode 100644 index 0000000..919031d --- /dev/null +++ b/SOURCES/0046-Resolve-boot-issues-in-hybrid-azure-during-upgrades-.patch @@ -0,0 +1,538 @@ +From 9966eb19daca97c18d798080c62af5638c1e0eab Mon Sep 17 00:00:00 2001 +From: David Kubek +Date: Tue, 20 Aug 2024 12:57:42 +0200 +Subject: [PATCH 46/66] Resolve boot issues in hybrid azure during upgrades + from RHEL 7 > 8 > 9. + +This commit addresses the issue where the `/boot/grub2/grub.cfg` file is +overwritten during the upgrade process by an old RHEL7 configuration +leftover on the system, causing the system to fail to boot. + +The problem occurs on hybrid Azure images, which support both UEFI and +Legacy systems and have both `grub-pc` and `grub-efi` packages installed. +It is caused by one of the scriplets in `grub-efi` which overwrites the old +configuration. + +If old configuration is detected, this actor regenerates the grub +configuration using `grub2-mkconfig -o /boot/grub2/grub.cfg` after +installing rpms to ensure the correct boot configuration is in place. + +The fix is applied specifically to Azure hybrid cloud systems. + +JIRA: RHEL-38255 +--- + .../cloud/ensurevalidgrubcfghybrid/actor.py | 34 +++ + .../libraries/ensurevalidgrubcfghybrid.py | 66 ++++++ + .../tests/files/invalid_grub.cfg | 51 +++++ + .../tests/files/valid_grub.cfg | 195 ++++++++++++++++++ + .../tests/test_ensurevalidgrubcfghybrid.py | 124 +++++++++++ + 5 files changed, 470 insertions(+) + create mode 100644 repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py + create mode 100644 repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py + create mode 100644 repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/invalid_grub.cfg + create mode 100644 repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/valid_grub.cfg + create mode 100644 repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py + +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py +new file mode 100644 +index 00000000..68de0433 +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py +@@ -0,0 +1,34 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import ensurevalidgrubcfghybrid ++from leapp.models import HybridImage ++from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag ++ ++ ++class EnsureValidGrubcfgHybrid(Actor): ++ """ ++ Resolve boot failures in Azure Gen1 VMs during upgrades from RHEL 7 to RHEL 8 to RHEL 9. ++ ++ This actor addresses the issue where the `/boot/grub2/grub.cfg` file is ++ overwritten during the upgrade process by an old RHEL7 configuration ++ leftover on the system, causing the system to fail to boot. ++ ++ The problem occurs on hybrid Azure images, which support both UEFI and ++ Legacy systems and have both `grub-pc` and `grub-efi` packages installed. ++ It is caused by one of the scriplets in `grub-efi` which overwrites the old ++ configuration. ++ ++ If old configuration is detected, this actor regenerates the grub ++ configuration using `grub2-mkconfig -o /boot/grub2/grub.cfg` after ++ installing rpms to ensure the correct boot configuration is in place. ++ ++ The fix is applied specifically to Azure hybrid cloud systems. ++ ++ """ ++ ++ name = 'ensure_valid_grubcfg_hybrid' ++ consumes = (HybridImage,) ++ produces = () ++ tags = (ApplicationsPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ ensurevalidgrubcfghybrid.process() +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py +new file mode 100644 +index 00000000..127eccfc +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py +@@ -0,0 +1,66 @@ ++import re ++ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.common.config.architecture import ARCH_ACCEPTED ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import HybridImage ++ ++GRUB_CFG_PATH = '/boot/grub2/grub.cfg' ++ ++MATCH_ARCH = r'({})'.format('|'.join(ARCH_ACCEPTED)) ++MATCH_RHEL7_KERNEL_VERSION = r"\d+\.\d+\.\d+-\d+(\.\d+)*\.el7\.{}".format(MATCH_ARCH) ++MATCH_RHEL7_KERNEL_DEFINITION = r"vmlinuz-{}".format(MATCH_RHEL7_KERNEL_VERSION) ++ ++ ++def process(): ++ if not _is_hybrid_image(): ++ api.current_logger().info('System is not a hybrid image. Skipping.') ++ return ++ ++ grubcfg = _read_grubcfg() ++ if _is_grubcfg_invalid(grubcfg): ++ _run_grub2_mkconfig() ++ ++ ++def _is_hybrid_image(): ++ return next(api.consume(HybridImage), None) is not None ++ ++ ++def _read_grubcfg(): ++ api.current_logger().debug('Reading {}:'.format(GRUB_CFG_PATH)) ++ with open(GRUB_CFG_PATH, 'r') as fin: ++ grubcfg = fin.read() ++ ++ api.current_logger().debug(grubcfg) ++ return grubcfg ++ ++ ++def _is_grubcfg_invalid(grubcfg): ++ return _contains_rhel7_kernel_definition(grubcfg) ++ ++ ++def _contains_rhel7_kernel_definition(grubcfg): ++ api.current_logger().debug("Looking for RHEL7 kernel version ...") ++ ++ match = re.search(MATCH_RHEL7_KERNEL_DEFINITION, grubcfg) ++ ++ api.current_logger().debug( ++ "Matched: {}".format(match.group() if match else "[NO MATCH]") ++ ) ++ ++ return match is not None ++ ++ ++def _run_grub2_mkconfig(): ++ api.current_logger().info("Regenerating {}".format(GRUB_CFG_PATH)) ++ ++ try: ++ run([ ++ 'grub2-mkconfig', ++ '-o', ++ GRUB_CFG_PATH ++ ]) ++ except CalledProcessError as err: ++ msg = 'Could not regenerate {}: {}'.format(GRUB_CFG_PATH, str(err)) ++ api.current_logger().error(msg) ++ raise StopActorExecutionError(msg) +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/invalid_grub.cfg b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/invalid_grub.cfg +new file mode 100644 +index 00000000..58f55c53 +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/invalid_grub.cfg +@@ -0,0 +1,51 @@ ++ ++# Created by osbuild ++ ++set timeout=10 ++ ++# load the grubenv file ++load_env ++ ++# selection of the next boot entry ++if [ "${next_entry}" ] ; then ++ set default="${next_entry}" ++ set next_entry= ++ save_env next_entry ++ set boot_once=true ++else ++ set default="${saved_entry}" ++fi ++ ++if [ "${prev_saved_entry}" ]; then ++ set saved_entry="${prev_saved_entry}" ++ save_env saved_entry ++ set prev_saved_entry= ++ save_env prev_saved_entry ++ set boot_once=true ++fi ++ ++function savedefault { ++ if [ -z "${boot_once}" ]; then ++ saved_entry="${chosen}" ++ save_env saved_entry ++ fi ++} ++ ++serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1 ++terminal_input serial console ++terminal_output serial console ++ ++menuentry 'Red Hat Enterprise Linux Server (3.10.0-1160.119.1.el7.x86_64) 7.9 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted --id 'gnulinux-3.10.0-1160.99.1.el7.x86_64-advanced-76a22bf4-f153-4541-b6c7-0332c0dfaeac' { ++ insmod all_video ++ set gfxpayload=keep ++ search --no-floppy --set=root --fs-uuid 61779359-8d11-49ba-bc9d-8d038ee4b108 ++ linuxefi /vmlinuz-3.10.0-1160.119.1.el7.x86_64 root=UUID=d3c9a2bd-7ffb-4113-9b8f-234c13b18274 ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y LANG=en_US.UTF-8 ++ initrdefi /initramfs-3.10.0-1160.119.1.el7.x86_64.img ++} ++menuentry 'Red Hat Enterprise Linux (3.10.0-1160.99.1.el7.x86_64) 7.9 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted --id 'gnulinux-3.10.0-1160.99.1.el7.x86_64-advanced-76a22bf4-f153-4541-b6c7-0332c0dfaeac' { ++ insmod all_video ++ set gfxpayload=keep ++ search --no-floppy --set=root --fs-uuid 61779359-8d11-49ba-bc9d-8d038ee4b108 ++ linuxefi /vmlinuz-3.10.0-1160.99.1.el7.x86_64 root=UUID=d3c9a2bd-7ffb-4113-9b8f-234c13b18274 ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y ++ initrdefi /initramfs-3.10.0-1160.99.1.el7.x86_64.img ++} +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/valid_grub.cfg b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/valid_grub.cfg +new file mode 100644 +index 00000000..8192665e +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/files/valid_grub.cfg +@@ -0,0 +1,195 @@ ++# ++# DO NOT EDIT THIS FILE ++# ++# It is automatically generated by grub2-mkconfig using templates ++# from /etc/grub.d and settings from /etc/default/grub ++# ++ ++### BEGIN /etc/grub.d/00_header ### ++set pager=1 ++ ++if [ -f ${config_directory}/grubenv ]; then ++ load_env -f ${config_directory}/grubenv ++elif [ -s $prefix/grubenv ]; then ++ load_env ++fi ++if [ "${next_entry}" ] ; then ++ set default="${next_entry}" ++ set next_entry= ++ save_env next_entry ++ set boot_once=true ++else ++ set default="${saved_entry}" ++fi ++ ++if [ x"${feature_menuentry_id}" = xy ]; then ++ menuentry_id_option="--id" ++else ++ menuentry_id_option="" ++fi ++ ++export menuentry_id_option ++ ++if [ "${prev_saved_entry}" ]; then ++ set saved_entry="${prev_saved_entry}" ++ save_env saved_entry ++ set prev_saved_entry= ++ save_env prev_saved_entry ++ set boot_once=true ++fi ++ ++function savedefault { ++ if [ -z "${boot_once}" ]; then ++ saved_entry="${chosen}" ++ save_env saved_entry ++ fi ++} ++ ++function load_video { ++ if [ x$feature_all_video_module = xy ]; then ++ insmod all_video ++ else ++ insmod efi_gop ++ insmod efi_uga ++ insmod ieee1275_fb ++ insmod vbe ++ insmod vga ++ insmod video_bochs ++ insmod video_cirrus ++ fi ++} ++ ++serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1 ++terminal_input serial console ++terminal_output serial console ++if [ x$feature_timeout_style = xy ] ; then ++ set timeout_style=countdown ++ set timeout=10 ++# Fallback hidden-timeout code in case the timeout_style feature is ++# unavailable. ++elif sleep --interruptible 10 ; then ++ set timeout=0 ++fi ++### END /etc/grub.d/00_header ### ++ ++### BEGIN /etc/grub.d/00_tuned ### ++set tuned_params="" ++set tuned_initrd="" ++### END /etc/grub.d/00_tuned ### ++ ++### BEGIN /etc/grub.d/01_users ### ++if [ -f ${prefix}/user.cfg ]; then ++ source ${prefix}/user.cfg ++ if [ -n "${GRUB2_PASSWORD}" ]; then ++ set superusers="root" ++ export superusers ++ password_pbkdf2 root ${GRUB2_PASSWORD} ++ fi ++fi ++### END /etc/grub.d/01_users ### ++ ++### BEGIN /etc/grub.d/08_fallback_counting ### ++insmod increment ++# Check if boot_counter exists and boot_success=0 to activate this behaviour. ++if [ -n "${boot_counter}" -a "${boot_success}" = "0" ]; then ++ # if countdown has ended, choose to boot rollback deployment, ++ # i.e. default=1 on OSTree-based systems. ++ if [ "${boot_counter}" = "0" -o "${boot_counter}" = "-1" ]; then ++ set default=1 ++ set boot_counter=-1 ++ # otherwise decrement boot_counter ++ else ++ decrement boot_counter ++ fi ++ save_env boot_counter ++fi ++### END /etc/grub.d/08_fallback_counting ### ++ ++### BEGIN /etc/grub.d/10_linux ### ++insmod part_gpt ++insmod xfs ++set root='hd0,gpt2' ++if [ x$feature_platform_search_hint = xy ]; then ++ search --no-floppy --fs-uuid --set=root --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 61779359-8d11-49ba-bc9d-8d038ee4b108 ++else ++ search --no-floppy --fs-uuid --set=root 61779359-8d11-49ba-bc9d-8d038ee4b108 ++fi ++insmod part_gpt ++insmod xfs ++set boot='hd0,gpt2' ++if [ x$feature_platform_search_hint = xy ]; then ++ search --no-floppy --fs-uuid --set=boot --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 61779359-8d11-49ba-bc9d-8d038ee4b108 ++else ++ search --no-floppy --fs-uuid --set=boot 61779359-8d11-49ba-bc9d-8d038ee4b108 ++fi ++ ++# This section was generated by a script. Do not modify the generated file - all changes ++# will be lost the next time file is regenerated. Instead edit the BootLoaderSpec files. ++# ++# The blscfg command parses the BootLoaderSpec files stored in /boot/loader/entries and ++# populates the boot menu. Please refer to the Boot Loader Specification documentation ++# for the files format: https://www.freedesktop.org/wiki/Specifications/BootLoaderSpec/. ++ ++# The kernelopts variable should be defined in the grubenv file. But to ensure that menu ++# entries populated from BootLoaderSpec files that use this variable work correctly even ++# without a grubenv file, define a fallback kernelopts variable if this has not been set. ++# ++# The kernelopts variable in the grubenv file can be modified using the grubby tool or by ++# executing the grub2-mkconfig tool. For the latter, the values of the GRUB_CMDLINE_LINUX ++# and GRUB_CMDLINE_LINUX_DEFAULT options from /etc/default/grub file are used to set both ++# the kernelopts variable in the grubenv file and the fallback kernelopts variable. ++if [ -z "${kernelopts}" ]; then ++ set kernelopts="root=/dev/mapper/rootvg-rootlv ro ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y " ++fi ++ ++insmod blscfg ++blscfg ++### END /etc/grub.d/10_linux ### ++ ++### BEGIN /etc/grub.d/10_reset_boot_success ### ++# Hiding the menu is ok if last boot was ok or if this is a first boot attempt to boot the entry ++if [ "${boot_success}" = "1" -o "${boot_indeterminate}" = "1" ]; then ++ set menu_hide_ok=1 ++else ++ set menu_hide_ok=0 ++fi ++# Reset boot_indeterminate after a successful boot ++if [ "${boot_success}" = "1" ] ; then ++ set boot_indeterminate=0 ++# Avoid boot_indeterminate causing the menu to be hidden more then once ++elif [ "${boot_indeterminate}" = "1" ]; then ++ set boot_indeterminate=2 ++fi ++# Reset boot_success for current boot ++set boot_success=0 ++save_env boot_success boot_indeterminate ++### END /etc/grub.d/10_reset_boot_success ### ++ ++### BEGIN /etc/grub.d/12_menu_auto_hide ### ++### END /etc/grub.d/12_menu_auto_hide ### ++ ++### BEGIN /etc/grub.d/20_linux_xen ### ++### END /etc/grub.d/20_linux_xen ### ++ ++### BEGIN /etc/grub.d/20_ppc_terminfo ### ++### END /etc/grub.d/20_ppc_terminfo ### ++ ++### BEGIN /etc/grub.d/30_os-prober ### ++### END /etc/grub.d/30_os-prober ### ++ ++### BEGIN /etc/grub.d/30_uefi-firmware ### ++### END /etc/grub.d/30_uefi-firmware ### ++ ++### BEGIN /etc/grub.d/40_custom ### ++# This file provides an easy way to add custom menu entries. Simply type the ++# menu entries you want to add after this comment. Be careful not to change ++# the 'exec tail' line above. ++### END /etc/grub.d/40_custom ### ++ ++### BEGIN /etc/grub.d/41_custom ### ++if [ -f ${config_directory}/custom.cfg ]; then ++ source ${config_directory}/custom.cfg ++elif [ -z "${config_directory}" -a -f $prefix/custom.cfg ]; then ++ source $prefix/custom.cfg; ++fi ++### END /etc/grub.d/41_custom ### +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py +new file mode 100644 +index 00000000..c0fb0a0d +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py +@@ -0,0 +1,124 @@ ++import os ++ ++import pytest ++ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.actor import ensurevalidgrubcfghybrid ++from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked ++from leapp.libraries.stdlib import api, CalledProcessError ++from leapp.models import HybridImage ++ ++CUR_DIR = os.path.dirname(os.path.abspath(__file__)) ++ ++ ++def raise_call_error(args=None): ++ raise CalledProcessError( ++ message='A Leapp Command Error occurred.', ++ command=args, ++ result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} ++ ) ++ ++ ++class run_mocked(object): ++ def __init__(self, raise_err=False): ++ self.called = 0 ++ self.args = [] ++ self.raise_err = raise_err ++ ++ def __call__(self, *args): ++ self.called += 1 ++ self.args.append(args) ++ if self.raise_err: ++ raise_call_error(args) ++ ++ ++def test_not_hybrid_image(monkeypatch): ++ """ ++ Skip when system is not a hybrid. ++ """ ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[])) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=False)) ++ ++ ensurevalidgrubcfghybrid.process() ++ ++ assert api.current_logger.infomsg[0].startswith('System is not a hybrid image') ++ assert ensurevalidgrubcfghybrid.run.called == 0 ++ ++ ++@pytest.mark.parametrize("is_invalid", [True, False]) ++def test_is_grubcfg_valid(monkeypatch, is_invalid): ++ ++ grubcfg_filename = ('invalid' if is_invalid else 'valid') + '_grub.cfg' ++ grubcfg_filepath = os.path.join(CUR_DIR, 'files', grubcfg_filename) ++ with open(grubcfg_filepath, 'r') as fin: ++ grubcfg = fin.read() ++ ++ assert ensurevalidgrubcfghybrid._is_grubcfg_invalid(grubcfg) == is_invalid ++ ++ ++def test_valid_grubcfg(monkeypatch): ++ """ ++ Test valid configuration does not trigger grub2-mkconfig ++ """ ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImage()])) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=False)) ++ ++ grubcfg_filepath = os.path.join(CUR_DIR, 'files', 'valid_grub.cfg') ++ with open(grubcfg_filepath, 'r') as fin: ++ grubcfg = fin.read() ++ ++ monkeypatch.setattr(ensurevalidgrubcfghybrid, '_read_grubcfg', lambda: grubcfg) ++ ++ ensurevalidgrubcfghybrid.process() ++ ++ assert ensurevalidgrubcfghybrid.run.called == 0 ++ ++ ++def test_invalid_grubcfg(monkeypatch): ++ """ ++ Test invalid configuration triggers grub2-mkconfig ++ """ ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImage()])) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=False)) ++ ++ grubcfg_filepath = os.path.join(CUR_DIR, 'files', 'invalid_grub.cfg') ++ with open(grubcfg_filepath, 'r') as fin: ++ grubcfg = fin.read() ++ ++ monkeypatch.setattr(ensurevalidgrubcfghybrid, '_read_grubcfg', lambda: grubcfg) ++ ++ ensurevalidgrubcfghybrid.process() ++ ++ assert ensurevalidgrubcfghybrid.run.called == 1 ++ assert any(msg.startswith('Regenerating') for msg in api.current_logger.infomsg) ++ ++ ++def test_run_error(monkeypatch): ++ """ ++ Test invalid configuration triggers grub2-mkconfig ++ """ ++ ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImage()])) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=True)) ++ ++ grubcfg_filepath = os.path.join(CUR_DIR, 'files', 'invalid_grub.cfg') ++ with open(grubcfg_filepath, 'r') as fin: ++ grubcfg = fin.read() ++ ++ monkeypatch.setattr(ensurevalidgrubcfghybrid, '_read_grubcfg', lambda: grubcfg) ++ ++ with pytest.raises(StopActorExecutionError): ++ ensurevalidgrubcfghybrid.process() ++ ++ assert ensurevalidgrubcfghybrid.run.called == 1 ++ assert any( ++ msg.startswith('Could not regenerate') ++ for msg in api.current_logger.err ++ ) +-- +2.50.1 + diff --git a/SOURCES/0047-Restructure-hybrid-image-detection.patch b/SOURCES/0047-Restructure-hybrid-image-detection.patch new file mode 100644 index 0000000..4cb5df1 --- /dev/null +++ b/SOURCES/0047-Restructure-hybrid-image-detection.patch @@ -0,0 +1,1090 @@ +From 1964c6990c3c7b822f6aca732b0742969c67025e Mon Sep 17 00:00:00 2001 +From: David Kubek +Date: Thu, 22 Aug 2024 11:54:37 +0200 +Subject: [PATCH 47/66] Restructure hybrid image detection + +Previosly detection of Azure hybrid image was tightly coupled with +process of converting grubenv symlink to a regular file. Since there +exists other issues relating to hybrid images it is worth to separate +these two concepts. + +This commit modifies the ScanHybridImage actor so that it produces a +message whel WALinuxAgent is detected or we are booted in bios and ESP +partition is mounted and we are running on Hyper-V (sign of a hybrid +image). + +New CheckGrubenvToFile actor is responsible for detection of grubenv +symlink on hybrid images and tasks ConvertGrubenvToFile that is later +responsible for the actual conversion. +--- + .../actors/cloud/checkgrubenvtofile/actor.py | 34 +++++ + .../libraries/checkgrubenvtofile.py | 44 +++++++ + .../tests/test_checkgrubenvtofile.py | 35 +++++ + .../actors/cloud/checkhybridimage/actor.py | 24 ---- + .../libraries/checkhybridimage.py | 65 --------- + .../tests/test_checkhybridimage.py | 82 ------------ + .../cloud/convertgrubenvtofile/actor.py | 21 +++ + .../libraries/convertgrubenvtofile.py} | 8 ++ + .../tests/test_convertgrubenvtofile.py | 51 +++++++ + .../actors/cloud/grubenvtofile/actor.py | 28 ---- + .../grubenvtofile/tests/test_grubenvtofile.py | 43 ------ + .../actors/cloud/scanhybridimage/actor.py | 19 +++ + .../libraries/scanhybridimage.py | 102 ++++++++++++++ + .../tests/test_scanhybridimage.py | 124 ++++++++++++++++++ + repos/system_upgrade/common/models/grubenv.py | 11 +- + .../common/models/hybridimage.py | 12 ++ + .../cloud/checkvalidgrubcfghybrid/actor.py | 32 +++++ + .../libraries/checkvalidgrubcfghybrid.py | 30 +++++ + .../tests/test_checkvalidgrubcfghybrid.py | 25 ++++ + .../cloud/ensurevalidgrubcfghybrid/actor.py | 18 +-- + .../libraries/ensurevalidgrubcfghybrid.py | 4 +- + .../tests/test_ensurevalidgrubcfghybrid.py | 8 +- + 22 files changed, 555 insertions(+), 265 deletions(-) + create mode 100644 repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/actor.py + create mode 100644 repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/libraries/checkgrubenvtofile.py + create mode 100644 repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/tests/test_checkgrubenvtofile.py + delete mode 100644 repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py + delete mode 100644 repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py + delete mode 100644 repos/system_upgrade/common/actors/cloud/checkhybridimage/tests/test_checkhybridimage.py + create mode 100644 repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/actor.py + rename repos/system_upgrade/common/actors/cloud/{grubenvtofile/libraries/grubenvtofile.py => convertgrubenvtofile/libraries/convertgrubenvtofile.py} (79%) + create mode 100644 repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/tests/test_convertgrubenvtofile.py + delete mode 100644 repos/system_upgrade/common/actors/cloud/grubenvtofile/actor.py + delete mode 100644 repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py + create mode 100644 repos/system_upgrade/common/actors/cloud/scanhybridimage/actor.py + create mode 100644 repos/system_upgrade/common/actors/cloud/scanhybridimage/libraries/scanhybridimage.py + create mode 100644 repos/system_upgrade/common/actors/cloud/scanhybridimage/tests/test_scanhybridimage.py + create mode 100644 repos/system_upgrade/common/models/hybridimage.py + create mode 100644 repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/actor.py + create mode 100644 repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/libraries/checkvalidgrubcfghybrid.py + create mode 100644 repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/tests/test_checkvalidgrubcfghybrid.py + +diff --git a/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/actor.py b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/actor.py +new file mode 100644 +index 00000000..62ff7644 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/actor.py +@@ -0,0 +1,34 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import checkgrubenvtofile ++from leapp.models import ConvertGrubenvTask, FirmwareFacts, HybridImageAzure ++from leapp.reporting import Report ++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag ++ ++ ++class CheckGrubenvToFile(Actor): ++ """ ++ Check whether grubenv is a symlink on Azure hybrid images using BIOS. ++ ++ Azure images provided by Red Hat aim for hybrid (BIOS/EFI) functionality, ++ however, currently GRUB is not able to see the "grubenv" file if it is a ++ symlink to a different partition (default on EFI with grub2-efi pkg ++ installed) and fails on BIOS systems. ++ ++ These images have a default relative symlink to EFI partition even when ++ booted using BIOS and in such cases GRUB is not able to find "grubenv" and ++ fails to get the kernel cmdline options resulting in system failing to boot ++ after upgrade. ++ ++ The symlink needs to be converted to a normal file with the content of ++ grubenv on the EFI partition in case the system is using BIOS and running ++ on the Azure cloud. This action is reported in the preupgrade phase. ++ ++ """ ++ ++ name = 'check_grubenv_to_file' ++ consumes = (FirmwareFacts, HybridImageAzure,) ++ produces = (ConvertGrubenvTask, Report) ++ tags = (ChecksPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ checkgrubenvtofile.process() +diff --git a/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/libraries/checkgrubenvtofile.py b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/libraries/checkgrubenvtofile.py +new file mode 100644 +index 00000000..a4c5ee1c +--- /dev/null ++++ b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/libraries/checkgrubenvtofile.py +@@ -0,0 +1,44 @@ ++from leapp import reporting ++from leapp.libraries.stdlib import api ++from leapp.models import ConvertGrubenvTask, FirmwareFacts, HybridImageAzure ++ ++ ++def process(): ++ hybrid_image = next(api.consume(HybridImageAzure), None) ++ ++ if not hybrid_image: ++ return ++ ++ if not is_bios() or not hybrid_image.grubenv_is_symlink_to_efi: ++ return ++ ++ reporting.create_report([ ++ reporting.Title( ++ 'Azure hybrid (BIOS/EFI) image detected. "grubenv" symlink will be converted to a regular file' ++ ), ++ reporting.Summary( ++ 'Leapp detected the system is running on Azure cloud, booted using BIOS and ' ++ 'the "/boot/grub2/grubenv" file is a symlink to "../efi/EFI/redhat/grubenv". In case of such a ' ++ 'hybrid image scenario GRUB is not able to locate "grubenv" as it is a symlink to different ' ++ 'partition and fails to boot. If the system needs to be run in EFI mode later, please re-create ' ++ 'the relative symlink again.' ++ ), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([ ++ reporting.Groups.PUBLIC_CLOUD, ++ reporting.Groups.BOOT ++ ]), ++ reporting.RelatedResource('file', '/boot/grub2/grubenv'), ++ reporting.RelatedResource('file', '/boot/efi/EFI/redhat/grubenv'), ++ ]) ++ ++ api.produce(ConvertGrubenvTask()) ++ ++ ++def is_bios(): ++ """ ++ Check whether system is booted into BIOS ++ """ ++ ++ ff = next(api.consume(FirmwareFacts), None) ++ return ff and ff.firmware == 'bios' +diff --git a/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/tests/test_checkgrubenvtofile.py b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/tests/test_checkgrubenvtofile.py +new file mode 100644 +index 00000000..a5a203fd +--- /dev/null ++++ b/repos/system_upgrade/common/actors/cloud/checkgrubenvtofile/tests/test_checkgrubenvtofile.py +@@ -0,0 +1,35 @@ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor import checkgrubenvtofile ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked ++from leapp.libraries.stdlib import api ++from leapp.models import FirmwareFacts, HybridImageAzure ++ ++BIOS_FIRMWARE = FirmwareFacts(firmware='bios') ++EFI_FIRMWARE = FirmwareFacts(firmware='efi') ++ ++ ++@pytest.mark.parametrize('is_hybrid', [True, False]) ++@pytest.mark.parametrize('is_bios', [True, False]) ++@pytest.mark.parametrize('is_symlink', [True, False]) ++def test_check_grubenv_to_file(monkeypatch, tmpdir, is_hybrid, is_bios, is_symlink): ++ ++ should_report = all([is_hybrid, is_bios, is_symlink]) ++ ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ ++ firmware = BIOS_FIRMWARE if is_bios else EFI_FIRMWARE ++ msgs = [firmware] + ([HybridImageAzure(grubenv_is_symlink_to_efi=is_symlink)] if is_hybrid else []) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=msgs)) ++ monkeypatch.setattr(api, "produce", produce_mocked()) ++ ++ checkgrubenvtofile.process() ++ ++ if should_report: ++ assert reporting.create_report.called == 1 ++ assert 'hybrid' in reporting.create_report.report_fields['title'] ++ assert api.produce.called == 1 ++ else: ++ assert reporting.create_report.called == 0 ++ assert api.produce.called == 0 +diff --git a/repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py b/repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py +deleted file mode 100644 +index 3cd2d864..00000000 +--- a/repos/system_upgrade/common/actors/cloud/checkhybridimage/actor.py ++++ /dev/null +@@ -1,24 +0,0 @@ +-from leapp.actors import Actor +-from leapp.libraries.actor.checkhybridimage import check_hybrid_image +-from leapp.models import FirmwareFacts, HybridImage, InstalledRPM +-from leapp.reporting import Report +-from leapp.tags import ChecksPhaseTag, IPUWorkflowTag +- +- +-class CheckHybridImage(Actor): +- """ +- Check if the system is using Azure hybrid image. +- +- These images have a default relative symlink to EFI +- partition even when booted using BIOS and in such cases +- GRUB is not able find "grubenv" to get the kernel cmdline +- options and fails to boot after upgrade`. +- """ +- +- name = 'checkhybridimage' +- consumes = (InstalledRPM, FirmwareFacts) +- produces = (HybridImage, Report) +- tags = (ChecksPhaseTag, IPUWorkflowTag) +- +- def process(self): +- check_hybrid_image() +diff --git a/repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py b/repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py +deleted file mode 100644 +index a4eb6fa1..00000000 +--- a/repos/system_upgrade/common/actors/cloud/checkhybridimage/libraries/checkhybridimage.py ++++ /dev/null +@@ -1,65 +0,0 @@ +-import os +- +-from leapp import reporting +-from leapp.libraries.common import rhui +-from leapp.libraries.common.config.version import get_source_major_version +-from leapp.libraries.common.rpms import has_package +-from leapp.libraries.stdlib import api +-from leapp.models import FirmwareFacts, HybridImage, InstalledRPM +- +-BIOS_PATH = '/boot/grub2/grubenv' +-EFI_PATH = '/boot/efi/EFI/redhat/grubenv' +- +- +-def is_grubenv_symlink_to_efi(): +- """ +- Check whether '/boot/grub2/grubenv' is a relative symlink to +- '/boot/efi/EFI/redhat/grubenv'. +- """ +- return os.path.islink(BIOS_PATH) and os.path.realpath(BIOS_PATH) == os.path.realpath(EFI_PATH) +- +- +-def is_azure_agent_installed(): +- """Check whether 'WALinuxAgent' package is installed.""" +- src_ver_major = get_source_major_version() +- +- family = rhui.RHUIFamily(rhui.RHUIProvider.AZURE) +- azure_setups = rhui.RHUI_SETUPS.get(family, []) +- +- agent_pkg = None +- for setup in azure_setups: +- setup_major_ver = str(setup.os_version[0]) +- if setup_major_ver == src_ver_major: +- agent_pkg = setup.extra_info.get('agent_pkg') +- break +- +- if not agent_pkg: +- return False +- +- return has_package(InstalledRPM, agent_pkg) +- +- +-def is_bios(): +- """Check whether system is booted into BIOS""" +- ff = next(api.consume(FirmwareFacts), None) +- return ff and ff.firmware == 'bios' +- +- +-def check_hybrid_image(): +- """Check whether the system is using Azure hybrid image.""" +- if all([is_grubenv_symlink_to_efi(), is_azure_agent_installed(), is_bios()]): +- api.produce(HybridImage(detected=True)) +- reporting.create_report([ +- reporting.Title( +- 'Azure hybrid (BIOS/EFI) image detected. "grubenv" symlink will be converted to a regular file' +- ), +- reporting.Summary( +- 'Leapp detected the system is running on Azure cloud, booted using BIOS and ' +- 'the "/boot/grub2/grubenv" file is a symlink to "../efi/EFI/redhat/grubenv". In case of such a ' +- 'hybrid image scenario GRUB is not able to locate "grubenv" as it is a symlink to different ' +- 'partition and fails to boot. If the system needs to be run in EFI mode later, please re-create ' +- 'the relative symlink again.' +- ), +- reporting.Severity(reporting.Severity.HIGH), +- reporting.Groups([reporting.Groups.PUBLIC_CLOUD]), +- ]) +diff --git a/repos/system_upgrade/common/actors/cloud/checkhybridimage/tests/test_checkhybridimage.py b/repos/system_upgrade/common/actors/cloud/checkhybridimage/tests/test_checkhybridimage.py +deleted file mode 100644 +index 16fbb44c..00000000 +--- a/repos/system_upgrade/common/actors/cloud/checkhybridimage/tests/test_checkhybridimage.py ++++ /dev/null +@@ -1,82 +0,0 @@ +-import pytest +- +-from leapp import reporting +-from leapp.libraries.actor import checkhybridimage +-from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked +-from leapp.libraries.stdlib import api +-from leapp.models import FirmwareFacts, InstalledRPM, RPM +-from leapp.reporting import Report +- +-RH_PACKAGER = 'Red Hat, Inc. ' +-WA_AGENT_RPM = RPM( +- name='WALinuxAgent', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', +- pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51' +-) +-NO_AGENT_RPM = RPM( +- name='NoAgent', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', +- pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51' +-) +- +-INSTALLED_AGENT = InstalledRPM(items=[WA_AGENT_RPM]) +-NOT_INSTALLED_AGENT = InstalledRPM(items=[NO_AGENT_RPM]) +- +-BIOS_FIRMWARE = FirmwareFacts(firmware='bios') +-EFI_FIRMWARE = FirmwareFacts(firmware='efi') +- +-BIOS_PATH = '/boot/grub2/grubenv' +-EFI_PATH = '/boot/efi/EFI/redhat/grubenv' +- +- +-def test_hybrid_image(monkeypatch, tmpdir): +- grubenv_efi = tmpdir.join('grubenv_efi') +- grubenv_efi.write('grubenv') +- +- grubenv_boot = tmpdir.join('grubenv_boot') +- grubenv_boot.mksymlinkto('grubenv_efi') +- +- monkeypatch.setattr(checkhybridimage, 'BIOS_PATH', grubenv_boot.strpath) +- monkeypatch.setattr(checkhybridimage, 'EFI_PATH', grubenv_efi.strpath) +- monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) +- monkeypatch.setattr( +- api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=[BIOS_FIRMWARE, INSTALLED_AGENT]) +- ) +- monkeypatch.setattr(api, "produce", produce_mocked()) +- +- checkhybridimage.check_hybrid_image() +- assert reporting.create_report.called == 1 +- assert 'hybrid' in reporting.create_report.report_fields['title'] +- assert api.produce.called == 1 +- +- +-@pytest.mark.parametrize('is_symlink, realpath_match, is_bios, agent_installed', [ +- (False, True, True, True), +- (True, False, True, True), +- (True, True, False, True), +- (True, True, True, False), +-]) +-def test_no_hybrid_image(monkeypatch, is_symlink, realpath_match, is_bios, agent_installed, tmpdir): +- grubenv_efi = tmpdir.join('grubenv_efi') +- grubenv_efi.write('grubenv') +- grubenv_efi_false = tmpdir.join('grubenv_efi_false') +- grubenv_efi.write('nope') +- grubenv_boot = tmpdir.join('grubenv_boot') +- +- grubenv_target = grubenv_efi if realpath_match else grubenv_efi_false +- +- if is_symlink: +- grubenv_boot.mksymlinkto(grubenv_target) +- +- firmw = BIOS_FIRMWARE if is_bios else EFI_FIRMWARE +- inst_rpms = INSTALLED_AGENT if agent_installed else NOT_INSTALLED_AGENT +- +- monkeypatch.setattr(checkhybridimage, 'BIOS_PATH', grubenv_boot.strpath) +- monkeypatch.setattr(checkhybridimage, 'EFI_PATH', grubenv_efi.strpath) +- monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) +- monkeypatch.setattr( +- api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=[firmw, inst_rpms]) +- ) +- monkeypatch.setattr(api, "produce", produce_mocked()) +- +- checkhybridimage.check_hybrid_image() +- assert not reporting.create_report.called +- assert not api.produce.called +diff --git a/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/actor.py b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/actor.py +new file mode 100644 +index 00000000..68ef54bb +--- /dev/null ++++ b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/actor.py +@@ -0,0 +1,21 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import convertgrubenvtofile ++from leapp.models import ConvertGrubenvTask ++from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag ++ ++ ++class ConvertGrubenvToFile(Actor): ++ """ ++ Convert "grubenv" symlink to a regular file on Azure hybrid images using BIOS. ++ ++ For more information see CheckGrubenvToFile actor. ++ ++ """ ++ ++ name = 'convert_grubenv_to_file' ++ consumes = (ConvertGrubenvTask,) ++ produces = () ++ tags = (FinalizationPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ convertgrubenvtofile.process() +diff --git a/repos/system_upgrade/common/actors/cloud/grubenvtofile/libraries/grubenvtofile.py b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/libraries/convertgrubenvtofile.py +similarity index 79% +rename from repos/system_upgrade/common/actors/cloud/grubenvtofile/libraries/grubenvtofile.py +rename to repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/libraries/convertgrubenvtofile.py +index 4d699ec3..1803c6c7 100644 +--- a/repos/system_upgrade/common/actors/cloud/grubenvtofile/libraries/grubenvtofile.py ++++ b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/libraries/convertgrubenvtofile.py +@@ -1,9 +1,17 @@ + from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import ConvertGrubenvTask + + BIOS_PATH = '/boot/grub2/grubenv' + EFI_PATH = '/boot/efi/EFI/redhat/grubenv' + + ++def process(): ++ convert_grubenv_task = next(api.consume(ConvertGrubenvTask), None) ++ ++ if convert_grubenv_task: ++ grubenv_to_file() ++ ++ + def grubenv_to_file(): + try: + run(['unlink', BIOS_PATH]) +diff --git a/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/tests/test_convertgrubenvtofile.py b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/tests/test_convertgrubenvtofile.py +new file mode 100644 +index 00000000..c4534bd6 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/cloud/convertgrubenvtofile/tests/test_convertgrubenvtofile.py +@@ -0,0 +1,51 @@ ++import pytest ++ ++from leapp.libraries.actor import convertgrubenvtofile ++from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked ++from leapp.libraries.stdlib import api, CalledProcessError ++from leapp.models import ConvertGrubenvTask ++ ++ ++def raise_call_error(args=None): ++ raise CalledProcessError( ++ message='A Leapp Command Error occurred.', ++ command=args, ++ result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} ++ ) ++ ++ ++class run_mocked(object): ++ def __init__(self, raise_err=False): ++ self.called = 0 ++ self.args = [] ++ self.raise_err = raise_err ++ ++ def __call__(self, *args): ++ self.called += 1 ++ self.args.append(args) ++ if self.raise_err: ++ raise_call_error(args) ++ ++ ++def test_grubenv_to_file(monkeypatch): ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=[ConvertGrubenvTask()])) ++ monkeypatch.setattr(convertgrubenvtofile, 'run', run_mocked(raise_err=False)) ++ convertgrubenvtofile.process() ++ assert convertgrubenvtofile.run.called == 2 ++ ++ ++def test_no_grubenv_to_file(monkeypatch): ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=[])) ++ monkeypatch.setattr(convertgrubenvtofile, 'run', run_mocked(raise_err=False)) ++ convertgrubenvtofile.process() ++ assert convertgrubenvtofile.run.called == 0 ++ ++ ++def test_fail_grubenv_to_file(monkeypatch): ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=[ConvertGrubenvTask()])) ++ monkeypatch.setattr(convertgrubenvtofile, 'run', run_mocked(raise_err=True)) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ convertgrubenvtofile.grubenv_to_file() ++ ++ assert convertgrubenvtofile.run.called == 1 ++ assert api.current_logger.warnmsg[0].startswith('Could not unlink') +diff --git a/repos/system_upgrade/common/actors/cloud/grubenvtofile/actor.py b/repos/system_upgrade/common/actors/cloud/grubenvtofile/actor.py +deleted file mode 100644 +index fc94219c..00000000 +--- a/repos/system_upgrade/common/actors/cloud/grubenvtofile/actor.py ++++ /dev/null +@@ -1,28 +0,0 @@ +-from leapp.actors import Actor +-from leapp.libraries.actor.grubenvtofile import grubenv_to_file +-from leapp.models import HybridImage +-from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag +- +- +-class GrubenvToFile(Actor): +- """ +- Convert "grubenv" symlink to a regular file on Azure hybrid images using BIOS. +- +- Azure images provided by Red Hat aim for hybrid (BIOS/EFI) functionality, +- however, currently GRUB is not able to see the "grubenv" file if it is a symlink +- to a different partition (default on EFI with grub2-efi pkg installed) and +- fails on BIOS systems. This actor converts the symlink to the normal file +- with the content of grubenv on the EFI partition in case the system is using BIOS +- and running on the Azure cloud. This action is reported in the preupgrade phase. +- """ +- +- name = 'grubenvtofile' +- consumes = (HybridImage,) +- produces = () +- tags = (FinalizationPhaseTag, IPUWorkflowTag) +- +- def process(self): +- grubenv_msg = next(self.consume(HybridImage), None) +- +- if grubenv_msg and grubenv_msg.detected: +- grubenv_to_file() +diff --git a/repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py b/repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py +deleted file mode 100644 +index 807f5efa..00000000 +--- a/repos/system_upgrade/common/actors/cloud/grubenvtofile/tests/test_grubenvtofile.py ++++ /dev/null +@@ -1,43 +0,0 @@ +-import pytest +- +-from leapp.libraries.actor import grubenvtofile +-from leapp.libraries.common.testutils import logger_mocked +-from leapp.libraries.stdlib import api, CalledProcessError +-from leapp.models import HybridImage +- +- +-def raise_call_error(args=None): +- raise CalledProcessError( +- message='A Leapp Command Error occurred.', +- command=args, +- result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} +- ) +- +- +-class run_mocked(object): +- def __init__(self, raise_err=False): +- self.called = 0 +- self.args = [] +- self.raise_err = raise_err +- +- def __call__(self, *args): +- self.called += 1 +- self.args.append(args) +- if self.raise_err: +- raise_call_error(args) +- +- +-def test_grubenv_to_file(monkeypatch): +- monkeypatch.setattr(api, 'consume', lambda x: iter([HybridImage()])) +- monkeypatch.setattr(grubenvtofile, 'run', run_mocked()) +- grubenvtofile.grubenv_to_file() +- assert grubenvtofile.run.called == 2 +- +- +-def test_fail_grubenv_to_file(monkeypatch): +- monkeypatch.setattr(api, 'consume', lambda x: iter([HybridImage()])) +- monkeypatch.setattr(grubenvtofile, 'run', run_mocked(raise_err=True)) +- monkeypatch.setattr(api, 'current_logger', logger_mocked()) +- grubenvtofile.grubenv_to_file() +- assert grubenvtofile.run.called == 1 +- assert api.current_logger.warnmsg[0].startswith('Could not unlink') +diff --git a/repos/system_upgrade/common/actors/cloud/scanhybridimage/actor.py b/repos/system_upgrade/common/actors/cloud/scanhybridimage/actor.py +new file mode 100644 +index 00000000..b1848141 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/cloud/scanhybridimage/actor.py +@@ -0,0 +1,19 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor.scanhybridimage import scan_hybrid_image ++from leapp.models import FirmwareFacts, HybridImageAzure, InstalledRPM ++from leapp.reporting import Report ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class ScanHybridImageAzure(Actor): ++ """ ++ Check if the system is using Azure hybrid image. ++ """ ++ ++ name = 'scan_hybrid_image_azure' ++ consumes = (InstalledRPM, FirmwareFacts) ++ produces = (HybridImageAzure, Report) ++ tags = (FactsPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ scan_hybrid_image() +diff --git a/repos/system_upgrade/common/actors/cloud/scanhybridimage/libraries/scanhybridimage.py b/repos/system_upgrade/common/actors/cloud/scanhybridimage/libraries/scanhybridimage.py +new file mode 100644 +index 00000000..a37ab415 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/cloud/scanhybridimage/libraries/scanhybridimage.py +@@ -0,0 +1,102 @@ ++import os ++ ++from leapp.libraries.common import rhui ++from leapp.libraries.common.config.version import get_source_major_version ++from leapp.libraries.common.rpms import has_package ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import FirmwareFacts, HybridImageAzure, InstalledRPM ++ ++EFI_MOUNTPOINT = '/boot/efi/' ++AZURE_HYPERVISOR_ID = 'microsoft' ++ ++GRUBENV_BIOS_PATH = '/boot/grub2/grubenv' ++GRUBENV_EFI_PATH = '/boot/efi/EFI/redhat/grubenv' ++ ++ ++def scan_hybrid_image(): ++ """ ++ Check whether the system is using Azure hybrid image. ++ """ ++ ++ hybrid_image_condition_1 = is_azure_agent_installed() and is_bios() ++ hybrid_image_condition_2 = has_efi_partition() and is_bios() and is_running_on_azure_hypervisor() ++ ++ if any([hybrid_image_condition_1, hybrid_image_condition_2]): ++ api.produce( ++ HybridImageAzure( ++ grubenv_is_symlink_to_efi=is_grubenv_symlink_to_efi() ++ ) ++ ) ++ ++ ++def is_azure_agent_installed(): ++ """ ++ Check whether 'WALinuxAgent' package is installed. ++ """ ++ ++ src_ver_major = get_source_major_version() ++ ++ family = rhui.RHUIFamily(rhui.RHUIProvider.AZURE) ++ azure_setups = rhui.RHUI_SETUPS.get(family, []) ++ ++ agent_pkg = None ++ for setup in azure_setups: ++ setup_major_ver = str(setup.os_version[0]) ++ if setup_major_ver == src_ver_major: ++ agent_pkg = setup.extra_info.get('agent_pkg') ++ break ++ ++ if not agent_pkg: ++ return False ++ ++ return has_package(InstalledRPM, agent_pkg) ++ ++ ++def has_efi_partition(): ++ """ ++ Check whether ESP partition exists and is mounted. ++ """ ++ ++ return os.path.exists(EFI_MOUNTPOINT) and os.path.ismount(EFI_MOUNTPOINT) ++ ++ ++def is_bios(): ++ """ ++ Check whether system is booted into BIOS ++ """ ++ ++ ff = next(api.consume(FirmwareFacts), None) ++ return ff and ff.firmware == 'bios' ++ ++ ++def is_running_on_azure_hypervisor(): ++ """ ++ Check if system is running on Azure hypervisor (Hyper-V) ++ """ ++ ++ return detect_virt() == AZURE_HYPERVISOR_ID ++ ++ ++def detect_virt(): ++ """ ++ Detect execution in a virtualized environment ++ """ ++ ++ try: ++ result = run(['systemd-detect-virt']) ++ except CalledProcessError as e: ++ api.current_logger().warning('Unable to detect virtualization environment! Error: {}'.format(e)) ++ return '' ++ ++ return result['stdout'] ++ ++ ++def is_grubenv_symlink_to_efi(): ++ """ ++ Check whether '/boot/grub2/grubenv' is a relative symlink to '/boot/efi/EFI/redhat/grubenv'. ++ """ ++ ++ is_symlink = os.path.islink(GRUBENV_BIOS_PATH) ++ realpaths_match = os.path.realpath(GRUBENV_BIOS_PATH) == os.path.realpath(GRUBENV_EFI_PATH) ++ ++ return is_symlink and realpaths_match +diff --git a/repos/system_upgrade/common/actors/cloud/scanhybridimage/tests/test_scanhybridimage.py b/repos/system_upgrade/common/actors/cloud/scanhybridimage/tests/test_scanhybridimage.py +new file mode 100644 +index 00000000..a0f6fd4c +--- /dev/null ++++ b/repos/system_upgrade/common/actors/cloud/scanhybridimage/tests/test_scanhybridimage.py +@@ -0,0 +1,124 @@ ++import os ++ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor import scanhybridimage ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, logger_mocked, produce_mocked ++from leapp.libraries.stdlib import api, CalledProcessError ++from leapp.models import FirmwareFacts, HybridImageAzure, InstalledRPM, RPM ++ ++RH_PACKAGER = 'Red Hat, Inc. ' ++WA_AGENT_RPM = RPM( ++ name='WALinuxAgent', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', ++ pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51' ++) ++NO_AGENT_RPM = RPM( ++ name='NoAgent', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch', ++ pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51' ++) ++ ++INSTALLED_AGENT = InstalledRPM(items=[WA_AGENT_RPM]) ++NOT_INSTALLED_AGENT = InstalledRPM(items=[NO_AGENT_RPM]) ++ ++BIOS_FIRMWARE = FirmwareFacts(firmware='bios') ++EFI_FIRMWARE = FirmwareFacts(firmware='efi') ++ ++BIOS_PATH = '/boot/grub2/grubenv' ++EFI_PATH = '/boot/efi/EFI/redhat/grubenv' ++ ++ ++def raise_call_error(args=None): ++ raise CalledProcessError( ++ message='A Leapp Command Error occurred.', ++ command=args, ++ result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'} ++ ) ++ ++ ++class run_mocked(object): ++ def __init__(self, hypervisor='', raise_err=False): ++ self.hypervisor = hypervisor ++ self.called = 0 ++ self.args = [] ++ self.raise_err = raise_err ++ ++ def __call__(self, *args): # pylint: disable=inconsistent-return-statements ++ self.called += 1 ++ self.args.append(args) ++ ++ if self.raise_err: ++ raise_call_error(args) ++ ++ if args[0] == ['systemd-detect-virt']: ++ return {'stdout': self.hypervisor} ++ ++ raise AttributeError("Unexpected command supplied!") ++ ++ ++@pytest.mark.parametrize('hypervisor, expected', [('none', False), ('microsoft', True)]) ++def test_is_running_on_azure_hypervisor(monkeypatch, hypervisor, expected): ++ monkeypatch.setattr(scanhybridimage, 'run', run_mocked(hypervisor)) ++ ++ assert scanhybridimage.is_running_on_azure_hypervisor() == expected ++ ++ ++def test_is_running_on_azure_hypervisor_error(monkeypatch): ++ monkeypatch.setattr(scanhybridimage, 'run', run_mocked('microsoft', raise_err=True)) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ ++ result = scanhybridimage.is_running_on_azure_hypervisor() ++ ++ assert result is False ++ assert any('Unable to detect' in msg for msg in api.current_logger.warnmsg) ++ ++ ++@pytest.mark.parametrize('is_symlink', [True, False]) ++@pytest.mark.parametrize('realpath_match', [True, False]) ++def test_is_grubenv_symlink_to_efi(monkeypatch, is_symlink, realpath_match): ++ grubenv_efi_false = '/other/grub/grubenv' ++ ++ monkeypatch.setattr(scanhybridimage, 'GRUBENV_BIOS_PATH', BIOS_PATH) ++ monkeypatch.setattr(scanhybridimage, 'GRUBENV_EFI_PATH', EFI_PATH) ++ ++ monkeypatch.setattr(os.path, 'islink', lambda path: is_symlink) ++ ++ def mocked_realpath(path): ++ if realpath_match: ++ return EFI_PATH ++ ++ return grubenv_efi_false if path == EFI_PATH else EFI_PATH ++ ++ monkeypatch.setattr(os.path, 'realpath', mocked_realpath) ++ ++ result = scanhybridimage.is_grubenv_symlink_to_efi() ++ ++ assert result == (is_symlink and realpath_match) ++ ++ ++@pytest.mark.parametrize('is_bios', [True, False]) ++@pytest.mark.parametrize('has_efi_partition', [True, False]) ++@pytest.mark.parametrize('agent_installed', [True, False]) ++@pytest.mark.parametrize('is_microsoft', [True, False]) ++@pytest.mark.parametrize('is_symlink', [True, False]) ++def test_hybrid_image(monkeypatch, tmpdir, is_bios, has_efi_partition, agent_installed, is_microsoft, is_symlink): ++ should_produce = (is_microsoft and is_bios and has_efi_partition) or (agent_installed and is_bios) ++ ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ msgs = [ ++ BIOS_FIRMWARE if is_bios else EFI_FIRMWARE, ++ INSTALLED_AGENT if agent_installed else NOT_INSTALLED_AGENT ++ ] ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=msgs)) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(scanhybridimage, 'has_efi_partition', lambda: has_efi_partition) ++ monkeypatch.setattr(scanhybridimage, 'is_running_on_azure_hypervisor', lambda: is_microsoft) ++ monkeypatch.setattr(scanhybridimage, 'is_grubenv_symlink_to_efi', lambda: is_symlink) ++ ++ scanhybridimage.scan_hybrid_image() ++ ++ if should_produce: ++ assert api.produce.called == 1 ++ assert HybridImageAzure(grubenv_is_symlink_to_efi=is_symlink) in api.produce.model_instances ++ else: ++ assert not api.produce.called +diff --git a/repos/system_upgrade/common/models/grubenv.py b/repos/system_upgrade/common/models/grubenv.py +index be541131..c7f339f1 100644 +--- a/repos/system_upgrade/common/models/grubenv.py ++++ b/repos/system_upgrade/common/models/grubenv.py +@@ -1,12 +1,11 @@ +-from leapp.models import fields, Model ++from leapp.models import Model + from leapp.topics import SystemFactsTopic + + +-class HybridImage(Model): ++class ConvertGrubenvTask(Model): + """ +- Model used for instructing Leapp to convert "grubenv" symlink +- into a regular file in case of hybrid (BIOS/EFI) images using BIOS +- on Azure. ++ Model used for instructing Leapp to convert "grubenv" symlink into a ++ regular file. + """ ++ + topic = SystemFactsTopic +- detected = fields.Boolean(default=False) +diff --git a/repos/system_upgrade/common/models/hybridimage.py b/repos/system_upgrade/common/models/hybridimage.py +new file mode 100644 +index 00000000..6cf860ef +--- /dev/null ++++ b/repos/system_upgrade/common/models/hybridimage.py +@@ -0,0 +1,12 @@ ++from leapp.models import fields, Model ++from leapp.topics import SystemFactsTopic ++ ++ ++class HybridImageAzure(Model): ++ """ ++ Model used to signify that the system is using a hybrid (BIOS/EFI) images ++ using BIOS on Azure. ++ """ ++ ++ topic = SystemFactsTopic ++ grubenv_is_symlink_to_efi = fields.Boolean(default=False) +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/actor.py b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/actor.py +new file mode 100644 +index 00000000..14668e42 +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/actor.py +@@ -0,0 +1,32 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import checkvalidgrubcfghybrid ++from leapp.models import FirmwareFacts, HybridImageAzure ++from leapp.reporting import Report ++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag ++ ++ ++class CheckValidGrubConfigHybrid(Actor): ++ """ ++ Check potential for boot failures in Azure Gen1 VMs due to invalid grubcfg ++ ++ This actor addresses the issue where the `/boot/grub2/grub.cfg` file is ++ overwritten during the upgrade process by an old RHEL7 configuration ++ leftover on the system, causing the system to fail to boot. ++ ++ The problem occurs on hybrid Azure images, which support both UEFI and ++ Legacy systems. The issue is caused by one of the scriplets in `grub-efi` ++ which overwrites during the upgrade current configuration in ++ `/boot/grub2/grub.cfg` by an old configuration from ++ `/boot/efi/EFI/redhat/grub.cfg`. ++ ++ The issue is detected specifically to Azure hybrid cloud systems. ++ ++ """ ++ ++ name = 'check_valid_grubcfg_hybrid' ++ consumes = (FirmwareFacts, HybridImageAzure,) ++ produces = (Report,) ++ tags = (ChecksPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ checkvalidgrubcfghybrid.process() +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/libraries/checkvalidgrubcfghybrid.py b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/libraries/checkvalidgrubcfghybrid.py +new file mode 100644 +index 00000000..374772f5 +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/libraries/checkvalidgrubcfghybrid.py +@@ -0,0 +1,30 @@ ++from leapp import reporting ++from leapp.libraries.stdlib import api ++from leapp.models import HybridImageAzure ++ ++ ++def process(): ++ hybrid_image = next(api.consume(HybridImageAzure), None) ++ ++ if hybrid_image: ++ reporting.create_report([ ++ reporting.Title( ++ 'Azure hybrid (BIOS/EFI) image detected. The GRUB configuration might be regenerated.' ++ ), ++ reporting.Summary( ++ 'Leapp detected that the system is running on Azure cloud and is booted using BIOS. ' ++ 'While upgrading from older systems (i.e. RHEL 7) on such systems' ++ 'it is possible that the system might end up with invalid GRUB configuration, ' ++ 'as `/boot/grub2/grub.cfg` might be overwritten by an old configuration from ' ++ '`/boot/efi/EFI/redhat/grub.cfg`, which might cause the system to fail to boot. ' ++ ++ 'Please ensure that the system is able to boot with both of these ' ++ 'configurations. If an invalid configuration is detected during upgrade, ' ++ 'it will be regenerated automatically using `grub2-mkconfig.`' ++ ), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([ ++ reporting.Groups.PUBLIC_CLOUD, ++ reporting.Groups.BOOT ++ ]), ++ ]) +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/tests/test_checkvalidgrubcfghybrid.py b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/tests/test_checkvalidgrubcfghybrid.py +new file mode 100644 +index 00000000..3fd9a53c +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/cloud/checkvalidgrubcfghybrid/tests/test_checkvalidgrubcfghybrid.py +@@ -0,0 +1,25 @@ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor import checkvalidgrubcfghybrid ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked ++from leapp.libraries.stdlib import api ++from leapp.models import HybridImageAzure ++ ++ ++@pytest.mark.parametrize('is_hybrid', [True, False]) ++def test_check_invalid_grubcfg_hybrid(monkeypatch, is_hybrid): ++ ++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) ++ ++ msgs = [HybridImageAzure()] if is_hybrid else [] ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch='x86_64', msgs=msgs)) ++ monkeypatch.setattr(api, "produce", produce_mocked()) ++ ++ checkvalidgrubcfghybrid.process() ++ ++ if is_hybrid: ++ assert reporting.create_report.called == 1 ++ assert 'regenerated' in reporting.create_report.report_fields['title'] ++ else: ++ assert reporting.create_report.called == 0 +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py +index 68de0433..a350c7a0 100644 +--- a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py ++++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/actor.py +@@ -1,6 +1,6 @@ + from leapp.actors import Actor + from leapp.libraries.actor import ensurevalidgrubcfghybrid +-from leapp.models import HybridImage ++from leapp.models import HybridImageAzure + from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag + + +@@ -8,25 +8,21 @@ class EnsureValidGrubcfgHybrid(Actor): + """ + Resolve boot failures in Azure Gen1 VMs during upgrades from RHEL 7 to RHEL 8 to RHEL 9. + +- This actor addresses the issue where the `/boot/grub2/grub.cfg` file is +- overwritten during the upgrade process by an old RHEL7 configuration +- leftover on the system, causing the system to fail to boot. +- +- The problem occurs on hybrid Azure images, which support both UEFI and +- Legacy systems and have both `grub-pc` and `grub-efi` packages installed. +- It is caused by one of the scriplets in `grub-efi` which overwrites the old +- configuration. +- + If old configuration is detected, this actor regenerates the grub + configuration using `grub2-mkconfig -o /boot/grub2/grub.cfg` after + installing rpms to ensure the correct boot configuration is in place. + ++ Old configuration is detected by looking for a menuentry corresponding to a ++ kernel from RHEL 7 which should not be present on RHEL 8 systems. ++ + The fix is applied specifically to Azure hybrid cloud systems. + ++ See also CheckValidGrubConfigHybrid actor. ++ + """ + + name = 'ensure_valid_grubcfg_hybrid' +- consumes = (HybridImage,) ++ consumes = (HybridImageAzure,) + produces = () + tags = (ApplicationsPhaseTag, IPUWorkflowTag) + +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py +index 127eccfc..f94cf67b 100644 +--- a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py ++++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/libraries/ensurevalidgrubcfghybrid.py +@@ -3,7 +3,7 @@ import re + from leapp.exceptions import StopActorExecutionError + from leapp.libraries.common.config.architecture import ARCH_ACCEPTED + from leapp.libraries.stdlib import api, CalledProcessError, run +-from leapp.models import HybridImage ++from leapp.models import HybridImageAzure + + GRUB_CFG_PATH = '/boot/grub2/grub.cfg' + +@@ -23,7 +23,7 @@ def process(): + + + def _is_hybrid_image(): +- return next(api.consume(HybridImage), None) is not None ++ return next(api.consume(HybridImageAzure), None) is not None + + + def _read_grubcfg(): +diff --git a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py +index c0fb0a0d..3ba46cb5 100644 +--- a/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py ++++ b/repos/system_upgrade/el8toel9/actors/cloud/ensurevalidgrubcfghybrid/tests/test_ensurevalidgrubcfghybrid.py +@@ -6,7 +6,7 @@ from leapp.exceptions import StopActorExecutionError + from leapp.libraries.actor import ensurevalidgrubcfghybrid + from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked + from leapp.libraries.stdlib import api, CalledProcessError +-from leapp.models import HybridImage ++from leapp.models import HybridImageAzure + + CUR_DIR = os.path.dirname(os.path.abspath(__file__)) + +@@ -63,7 +63,7 @@ def test_valid_grubcfg(monkeypatch): + Test valid configuration does not trigger grub2-mkconfig + """ + +- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImage()])) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImageAzure()])) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=False)) + +@@ -83,7 +83,7 @@ def test_invalid_grubcfg(monkeypatch): + Test invalid configuration triggers grub2-mkconfig + """ + +- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImage()])) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImageAzure()])) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=False)) + +@@ -104,7 +104,7 @@ def test_run_error(monkeypatch): + Test invalid configuration triggers grub2-mkconfig + """ + +- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImage()])) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[HybridImageAzure()])) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(ensurevalidgrubcfghybrid, 'run', run_mocked(raise_err=True)) + +-- +2.50.1 + diff --git a/SOURCES/0048-Point-to-leapp-repository-contribution-guidelines.patch b/SOURCES/0048-Point-to-leapp-repository-contribution-guidelines.patch new file mode 100644 index 0000000..9328d59 --- /dev/null +++ b/SOURCES/0048-Point-to-leapp-repository-contribution-guidelines.patch @@ -0,0 +1,61 @@ +From ba074a40e4297c90cbb8e5e522ec2a154fa2b9b0 Mon Sep 17 00:00:00 2001 +From: Michal Bocek +Date: Tue, 1 Jul 2025 20:38:20 +0200 +Subject: [PATCH 48/66] Point to leapp-repository contribution guidelines + +Instead of the leapp framework contribution guidelines. + +Also update the leapp-repository contribution guidelines to point to the +new place of the leapp framework Python coding guidelines (moved under +https://github.com/oamg/leapp/commit/123f2700dc0a354d4357ce325ff61fcb2f53e33b). +--- + CONTRIBUTING.md | 2 +- + README.md | 6 +++--- + docs/source/contrib-and-devel-guidelines.md | 2 +- + 3 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md +index d4cb2046..7315b693 100644 +--- a/CONTRIBUTING.md ++++ b/CONTRIBUTING.md +@@ -1 +1 @@ +-See the [Contribution guidelines](https://leapp.readthedocs.io/en/latest/contributing.html) ++See the [contribution guidelines](https://leapp-repository.readthedocs.io/latest/contrib-and-devel-guidelines.html). +diff --git a/README.md b/README.md +index 6b45b4b7..43da589e 100644 +--- a/README.md ++++ b/README.md +@@ -1,6 +1,6 @@ +-**Before doing anything, please read the upstream [documentation](https://leapp-repository.readthedocs.io/).** ++**Before doing anything, please read the [leapp-repository documentation](https://leapp-repository.readthedocs.io/).** + +-Also, you could find useufl to read [Leapp framework documentation](https://leapp.readthedocs.io/). ++Also, you could find the [Leapp framework documentation](https://leapp.readthedocs.io/) useful to read. + + --- + +@@ -17,7 +17,7 @@ Also, you could find useufl to read [Leapp framework documentation](https://leap + - *All files in /var/log/leapp* + - */var/lib/leapp/leapp.db* + - *journalctl* +- - If you want, you can optionally send anything else would you like to provide (e.g. storage info) ++ - If you want, you can optionally send any other relevant information (e.g. storage, network) + + **For your convenience you can pack all logs with this command:** + +diff --git a/docs/source/contrib-and-devel-guidelines.md b/docs/source/contrib-and-devel-guidelines.md +index 66bef9b1..f2edf8b7 100644 +--- a/docs/source/contrib-and-devel-guidelines.md ++++ b/docs/source/contrib-and-devel-guidelines.md +@@ -1,7 +1,7 @@ + # Contribution and development guidelines + ## Code guidelines + +-Your code should follow the [Python Coding Guidelines](https://leapp.readthedocs.io/en/latest/python-coding-guidelines.html) used for the leapp project. On top of these rules follow instructions ++Your code should follow the [Python Coding Guidelines](https://leapp.readthedocs.io/en/latest/contributing.html#follow-python-coding-guidelines) used for the leapp project. On top of these rules follow instructions + below. + + ### Retrieving information about the source system should be separated from its use +-- +2.50.1 + diff --git a/SOURCES/0049-Read-the-DNF-config-by-module.py-library.patch b/SOURCES/0049-Read-the-DNF-config-by-module.py-library.patch new file mode 100644 index 0000000..046d56c --- /dev/null +++ b/SOURCES/0049-Read-the-DNF-config-by-module.py-library.patch @@ -0,0 +1,46 @@ +From be6d23241e6fbe0c42a4f6a2df48efd6f999ed71 Mon Sep 17 00:00:00 2001 +From: karolinku +Date: Thu, 3 Jul 2025 13:12:32 +0200 +Subject: [PATCH 49/66] Read the DNF config by module.py library + +The DNF configuration has not been loaded when trying to get +information about available module streams (library module.py). +This causes a traceback e.g. on systems which must access DNF +repositories via a proxy. +This patch introduces loading the DNF configuration before trying +to access remote resources. + +Jira: RHEL-39095 +--- + .../common/actors/rpmscanner/libraries/rpmscanner.py | 2 ++ + repos/system_upgrade/common/libraries/module.py | 1 + + 2 files changed, 3 insertions(+) + +diff --git a/repos/system_upgrade/common/actors/rpmscanner/libraries/rpmscanner.py b/repos/system_upgrade/common/actors/rpmscanner/libraries/rpmscanner.py +index dbe56191..74c4b101 100644 +--- a/repos/system_upgrade/common/actors/rpmscanner/libraries/rpmscanner.py ++++ b/repos/system_upgrade/common/actors/rpmscanner/libraries/rpmscanner.py +@@ -25,6 +25,8 @@ except ImportError: + + def _get_package_repository_data_yum(): + yum_base = yum.YumBase() ++ # DNF configuration is not loaded here, since no impact for operations ++ # done by the actor is observed here + pkg_repos = {} + + try: +diff --git a/repos/system_upgrade/common/libraries/module.py b/repos/system_upgrade/common/libraries/module.py +index 7d4e8aa4..db725e71 100644 +--- a/repos/system_upgrade/common/libraries/module.py ++++ b/repos/system_upgrade/common/libraries/module.py +@@ -38,6 +38,7 @@ def _create_or_get_dnf_base(base=None): + conf.substitutions.update_from_etc('/') + + base = dnf.Base(conf=conf) ++ base.conf.read() + base.init_plugins() + base.read_all_repos() + # configure plugins after the repositories are loaded +-- +2.50.1 + diff --git a/SOURCES/0050-Disable-localpkg_gpgcheck-parameter-from-plugin-data.patch b/SOURCES/0050-Disable-localpkg_gpgcheck-parameter-from-plugin-data.patch new file mode 100644 index 0000000..5171e2e --- /dev/null +++ b/SOURCES/0050-Disable-localpkg_gpgcheck-parameter-from-plugin-data.patch @@ -0,0 +1,34 @@ +From a4d10adb2f432ef09898228c0e2d00288bbcc412 Mon Sep 17 00:00:00 2001 +From: karolinku +Date: Mon, 7 Jul 2025 14:52:34 +0200 +Subject: [PATCH 50/66] Disable localpkg_gpgcheck parameter from plugin data + +This commit is setting the localpkg_gpgcheck DNF option within the +rhel-upgrade plugin to 0 (disabled). +The upgrade process has been blocked with "Error: GPG check FAILED" +when dnf has been configured to apply gpg check also on local packages +(localpkg_gpgcheck=1). That's because the bundled leapp*-deps meta +packages, which are managing Leapp and leapp-repository dependencies +during the transition to the new system, are not signed by the Red +Hat key. Therefore, this option needs to be disabled. + +Jira: RHEL-47472 +--- + repos/system_upgrade/common/files/rhel_upgrade.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/repos/system_upgrade/common/files/rhel_upgrade.py b/repos/system_upgrade/common/files/rhel_upgrade.py +index 34f7b8f9..4f76a61d 100644 +--- a/repos/system_upgrade/common/files/rhel_upgrade.py ++++ b/repos/system_upgrade/common/files/rhel_upgrade.py +@@ -116,6 +116,7 @@ class RhelUpgradeCommand(dnf.cli.Command): + self.base.conf.best = self.plugin_data['dnf_conf']['best'] + self.base.conf.assumeyes = True + self.base.conf.gpgcheck = self.plugin_data['dnf_conf']['gpgcheck'] ++ self.base.conf.localpkg_gpgcheck = False + self.base.conf.debug_solver = self.plugin_data['dnf_conf']['debugsolver'] + self.base.conf.module_platform_id = self.plugin_data['dnf_conf']['platform_id'] + installroot = self.plugin_data['dnf_conf'].get('installroot') +-- +2.50.1 + diff --git a/SOURCES/0051-PR-welcome-msg-update-link-to-contrib-guidelines-139.patch b/SOURCES/0051-PR-welcome-msg-update-link-to-contrib-guidelines-139.patch new file mode 100644 index 0000000..4cee709 --- /dev/null +++ b/SOURCES/0051-PR-welcome-msg-update-link-to-contrib-guidelines-139.patch @@ -0,0 +1,37 @@ +From c3bef4d9e89d109aee226f64e54e2eed088e591b Mon Sep 17 00:00:00 2001 +From: Michal Bocek +Date: Tue, 8 Jul 2025 14:09:30 +0200 +Subject: [PATCH 51/66] PR welcome msg: update link to contrib guidelines + (#1399) + +Following the https://github.com/oamg/leapp-repository/pull/1394 where +the README was updated to point to the leapp-repository guidelines, this +change updates the link to the guidelines in the PR welcome message. +--- + .github/workflows/pr-welcome-msg.yml | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/.github/workflows/pr-welcome-msg.yml b/.github/workflows/pr-welcome-msg.yml +index 0102c41f..f056fb79 100644 +--- a/.github/workflows/pr-welcome-msg.yml ++++ b/.github/workflows/pr-welcome-msg.yml +@@ -19,7 +19,7 @@ jobs: + issue-number: ${{ github.event.pull_request.number }} + body: | + ## **Thank you for contributing to the Leapp project!** +- Please note that every PR needs to comply with the [Leapp Guidelines](https://leapp.readthedocs.io/en/latest/contributing.html#) and must pass all tests in order to be mergeable. ++ Please note that every PR needs to comply with the [leapp-repository contribution and development guidelines](https://leapp-repository.readthedocs.io/latest/contrib-and-devel-guidelines.html) and must pass all tests in order to be mergeable. + If you want to request a review or rebuild a package in copr, you can use following commands as a comment: + - **`review please @oamg/developers`** to notify leapp developers of the review request + - **`/packit copr-build`** to submit a public copr build using packit +@@ -39,6 +39,6 @@ jobs: + + See other labels for particular jobs defined in the `.packit.yaml` file. + +- Please [open ticket](https://url.corp.redhat.com/oamg-ci-issue) in case you experience technical problem with the CI. (RH internal only) ++ Please [open ticket](https://red.ht/rhel-upgrades-ci-issue) in case you experience technical problem with the CI. (RH internal only) + + **Note:** In case there are problems with tests not being triggered automatically on new PR/commit or pending for a long time, please contact leapp-infra. +-- +2.50.1 + diff --git a/SOURCES/0052-Fix-skip-checking-ownership-of-files-in-.-directory-.patch b/SOURCES/0052-Fix-skip-checking-ownership-of-files-in-.-directory-.patch new file mode 100644 index 0000000..084153a --- /dev/null +++ b/SOURCES/0052-Fix-skip-checking-ownership-of-files-in-.-directory-.patch @@ -0,0 +1,30 @@ +From 55c885ce7fbf5fd237047d4f1d8e66a043e3a1b3 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Fri, 11 Jul 2025 14:32:57 +0200 +Subject: [PATCH 52/66] Fix: skip checking ownership of files in + .../directory-hash/ dir + +This check has been reintroduced in 87013d25b5aa3, however the "root" +directory during traversal is: +/var/lib/leapp/el10userspace/etc/pki/ca-trust/extracted/pem/directory-hash. +The skip condition looks for '/directory-hash/' which is false. +--- + .../actors/targetuserspacecreator/libraries/userspacegen.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index 9fc96a52..cbad91fe 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -311,7 +311,7 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False): + searchdir = context.full_path(dirpath) + if recursive: + for root, _, files in os.walk(searchdir): +- if '/directory-hash/' in root: ++ if '/directory-hash' in root: + # tl;dr; for the performance improvement + # The directory has been relatively recently added to ca-certificates + # rpm on EL 9+ systems and the content does not seem to be important +-- +2.50.1 + diff --git a/SOURCES/0053-fixup-Fix-skip-checking-ownership-of-files-in-.-dire.patch b/SOURCES/0053-fixup-Fix-skip-checking-ownership-of-files-in-.-dire.patch new file mode 100644 index 0000000..9876fd2 --- /dev/null +++ b/SOURCES/0053-fixup-Fix-skip-checking-ownership-of-files-in-.-dire.patch @@ -0,0 +1,35 @@ +From acaed7edcea66f2cef9aabc148a431825f91479e Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Fri, 11 Jul 2025 18:17:51 +0200 +Subject: [PATCH 53/66] fixup! Fix: skip checking ownership of files in + .../directory-hash/ dir + +--- + .../actors/targetuserspacecreator/libraries/userspacegen.py | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index cbad91fe..699f1517 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -294,6 +294,8 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False): + """ + Return the list of file names inside dirpath owned by RPMs. + ++ The returned paths are relative to the dirpath. ++ + This is important e.g. in case of RHUI which installs specific repo files + in the yum.repos.d directory. + +@@ -334,7 +336,7 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False): + api.current_logger().debug('SKIP the {} file: not owned by any rpm'.format(fname)) + continue + if pkgs and not [pkg for pkg in pkgs if pkg in result['stdout']]: +- api.current_logger().debug('SKIP the {} file: not owned by any searched rpm:'.format(fname)) ++ api.current_logger().debug('SKIP the {} file: not owned by any searched rpm'.format(fname)) + continue + api.current_logger().debug('Found the file owned by an rpm: {}.'.format(fname)) + files_owned_by_rpms.append(fname) +-- +2.50.1 + diff --git a/SOURCES/0054-Add-test.patch b/SOURCES/0054-Add-test.patch new file mode 100644 index 0000000..f874936 --- /dev/null +++ b/SOURCES/0054-Add-test.patch @@ -0,0 +1,100 @@ +From 6df858daa7c678d3e951bef3fdf077a0a5b49d36 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Fri, 11 Jul 2025 18:18:07 +0200 +Subject: [PATCH 54/66] Add test + +--- + .../tests/unit_test_targetuserspacecreator.py | 73 +++++++++++++++++++ + 1 file changed, 73 insertions(+) + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +index 69ed7040..219cb7c3 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +@@ -13,6 +13,7 @@ from leapp.libraries.actor import userspacegen + from leapp.libraries.common import overlaygen, repofileutils, rhsm + from leapp.libraries.common.config import architecture + from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked ++from leapp.libraries.stdlib import api, CalledProcessError + from leapp.utils.deprecation import suppress_deprecation + + if sys.version_info < (2, 8): +@@ -1225,3 +1226,75 @@ def test_perform_ok(monkeypatch): + assert userspacegen.api.produce.model_instances[1] == msg_target_repos + # this one is full of constants, so it's safe to check just the instance + assert isinstance(userspacegen.api.produce.model_instances[2], models.TargetUserSpaceInfo) ++ ++ ++def test__get_files_owned_by_rpms(monkeypatch): ++ # this is not necessarily accurate, but close enoguh ++ fake_walk = [ ++ ("/base/dir/etc/pki", ["ca-trust", "tls", "rpm-gpg"], []), ++ ("/base/dir/etc/pki/ca-trust", ["extracted", "source"], []), ++ ("/base/dir/etc/pki/ca-trust/extracted", ["openssl", "java"], []), ++ ("/base/dir/etc/pki/ca-trust/extracted/openssl", [], ["ca-bundle.trust.crt"]), ++ ("/base/dir/etc/pki/ca-trust/extracted/java", [], ["cacerts"]), ++ ++ ("/base/dir/etc/pki/ca-trust/source", ["anchors", "directory-hash"], []), ++ ("/base/dir/etc/pki/ca-trust/source/anchors", [], ["my-ca.crt"]), ++ ("/base/dir/etc/pki/ca-trust/extracted/pem/directory-hash", [], [ ++ "5931b5bc.0", "a94d09e5.0" ++ ]), ++ ("/base/dir/etc/pki/tls", ["certs", "private"], []), ++ ("/base/dir/etc/pki/tls/certs", [], ["server.crt", "ca-bundle.crt"]), ++ ("/base/dir/etc/pki/tls/private", [], ["server.key"]), ++ ("/base/dir/etc/pki/rpm-gpg", [], [ ++ "RPM-GPG-KEY-1", ++ "RPM-GPG-KEY-2", ++ ]), ++ ] ++ monkeypatch.setattr(os, 'walk', lambda _: fake_walk) ++ logger = logger_mocked() ++ monkeypatch.setattr(api, 'current_logger', logger) ++ ++ class _MockContext(): ++ ++ def __init__(self, owned): ++ self.base_dir = '/base/dir' ++ # list of files owned, no base_dir prefixed ++ self.owned = owned ++ ++ def full_path(self, path): ++ return os.path.join(self.base_dir, os.path.abspath(path).lstrip('/')) ++ ++ def call(self, cmd): ++ assert len(cmd) == 3 and cmd[0] == 'rpm' and cmd[1] == '-qf' ++ if cmd[2] in self.owned: ++ return {'exit_code': 0} ++ raise CalledProcessError("Command failed with exit code 1", cmd, 1) ++ ++ search_dir = '/etc/pki' ++ owned = [ ++ 'tls/certs/ca-bundle.crt', ++ 'ca-trust/extracted/openssl/ca-bundle.trust.crt', ++ 'rpm-gpg/RPM-GPG-KEY-1', ++ 'rpm-gpg/RPM-GPG-KEY-2', ++ 'ca-trust/extracted/pem/directory-hash/a94d09e5.0', ++ 'ca-trust/extracted/pem/directory-hash/a94d09e5.0', ++ ] ++ # the rpm -qf call happens with the full path ++ owned_fullpath = [os.path.join(search_dir, f) for f in owned] ++ context = _MockContext(owned_fullpath) ++ ++ out = userspacegen._get_files_owned_by_rpms(context, '/etc/pki', recursive=True) ++ ++ # any directory-hash directory should be skipped ++ assert sorted(owned[0:4]) == sorted(out) ++ ++ def has_dbgmsg(substr): ++ return any([substr in log for log in logger.dbgmsg]) ++ ++ # test a few ++ assert has_dbgmsg( ++ "SKIP files in the /base/dir/etc/pki/ca-trust/extracted/pem/directory-hash directory:" ++ " Not important for the IPU.", ++ ) ++ assert has_dbgmsg('SKIP the tls/certs/server.crt file: not owned by any rpm') ++ assert has_dbgmsg('Found the file owned by an rpm: rpm-gpg/RPM-GPG-KEY-2.') +-- +2.50.1 + diff --git a/SOURCES/0055-fixup-Add-test.patch b/SOURCES/0055-fixup-Add-test.patch new file mode 100644 index 0000000..18c9f6b --- /dev/null +++ b/SOURCES/0055-fixup-Add-test.patch @@ -0,0 +1,25 @@ +From b9d7e965ee1e45f93e8250c0eda1e5b4968ecc34 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Fri, 11 Jul 2025 18:25:50 +0200 +Subject: [PATCH 55/66] fixup! Add test + +--- + .../tests/unit_test_targetuserspacecreator.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +index 219cb7c3..15db2570 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +@@ -1229,7 +1229,7 @@ def test_perform_ok(monkeypatch): + + + def test__get_files_owned_by_rpms(monkeypatch): +- # this is not necessarily accurate, but close enoguh ++ # this is not necessarily accurate, but close enough + fake_walk = [ + ("/base/dir/etc/pki", ["ca-trust", "tls", "rpm-gpg"], []), + ("/base/dir/etc/pki/ca-trust", ["extracted", "source"], []), +-- +2.50.1 + diff --git a/SOURCES/0056-Add-test-for-non-recursive.patch b/SOURCES/0056-Add-test-for-non-recursive.patch new file mode 100644 index 0000000..bed1100 --- /dev/null +++ b/SOURCES/0056-Add-test-for-non-recursive.patch @@ -0,0 +1,110 @@ +From 803aebd0edffb088181f99d0a4024231a7be942c Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Mon, 14 Jul 2025 11:15:15 +0200 +Subject: [PATCH 56/66] Add test for non-recursive + +--- + .../tests/unit_test_targetuserspacecreator.py | 67 +++++++++++++------ + 1 file changed, 47 insertions(+), 20 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +index 15db2570..267c064e 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py +@@ -1228,7 +1228,45 @@ def test_perform_ok(monkeypatch): + assert isinstance(userspacegen.api.produce.model_instances[2], models.TargetUserSpaceInfo) + + ++class _MockContext(): ++ ++ def __init__(self, base_dir, owned_by_rpms): ++ self.base_dir = base_dir ++ # list of files owned, no base_dir prefixed ++ self.owned_by_rpms = owned_by_rpms ++ ++ def full_path(self, path): ++ return os.path.join(self.base_dir, os.path.abspath(path).lstrip('/')) ++ ++ def call(self, cmd): ++ assert len(cmd) == 3 and cmd[0] == 'rpm' and cmd[1] == '-qf' ++ if cmd[2] in self.owned_by_rpms: ++ return {'exit_code': 0} ++ raise CalledProcessError("Command failed with exit code 1", cmd, 1) ++ ++ + def test__get_files_owned_by_rpms(monkeypatch): ++ ++ def listdir_mocked(path): ++ assert path == '/base/dir/some/path' ++ return ['fileA', 'fileB.txt', 'test.log', 'script.sh'] ++ ++ monkeypatch.setattr(os, 'listdir', listdir_mocked) ++ logger = logger_mocked() ++ monkeypatch.setattr(api, 'current_logger', logger) ++ ++ search_dir = '/some/path' ++ # output doesn't include full paths ++ owned = ['fileA', 'script.sh'] ++ # but the rpm -qf call happens with the full path ++ owned_fullpath = [os.path.join(search_dir, f) for f in owned] ++ context = _MockContext('/base/dir', owned_fullpath) ++ ++ out = userspacegen._get_files_owned_by_rpms(context, '/some/path', recursive=False) ++ assert sorted(owned) == sorted(out) ++ ++ ++def test__get_files_owned_by_rpms_recursive(monkeypatch): + # this is not necessarily accurate, but close enough + fake_walk = [ + ("/base/dir/etc/pki", ["ca-trust", "tls", "rpm-gpg"], []), +@@ -1250,27 +1288,17 @@ def test__get_files_owned_by_rpms(monkeypatch): + "RPM-GPG-KEY-2", + ]), + ] +- monkeypatch.setattr(os, 'walk', lambda _: fake_walk) +- logger = logger_mocked() +- monkeypatch.setattr(api, 'current_logger', logger) + +- class _MockContext(): ++ def walk_mocked(path): ++ assert path == '/base/dir/etc/pki' ++ return fake_walk + +- def __init__(self, owned): +- self.base_dir = '/base/dir' +- # list of files owned, no base_dir prefixed +- self.owned = owned +- +- def full_path(self, path): +- return os.path.join(self.base_dir, os.path.abspath(path).lstrip('/')) +- +- def call(self, cmd): +- assert len(cmd) == 3 and cmd[0] == 'rpm' and cmd[1] == '-qf' +- if cmd[2] in self.owned: +- return {'exit_code': 0} +- raise CalledProcessError("Command failed with exit code 1", cmd, 1) ++ monkeypatch.setattr(os, 'walk', walk_mocked) ++ logger = logger_mocked() ++ monkeypatch.setattr(api, 'current_logger', logger) + + search_dir = '/etc/pki' ++ # output doesn't include full paths + owned = [ + 'tls/certs/ca-bundle.crt', + 'ca-trust/extracted/openssl/ca-bundle.trust.crt', +@@ -1281,10 +1309,9 @@ def test__get_files_owned_by_rpms(monkeypatch): + ] + # the rpm -qf call happens with the full path + owned_fullpath = [os.path.join(search_dir, f) for f in owned] +- context = _MockContext(owned_fullpath) +- +- out = userspacegen._get_files_owned_by_rpms(context, '/etc/pki', recursive=True) ++ context = _MockContext('/base/dir', owned_fullpath) + ++ out = userspacegen._get_files_owned_by_rpms(context, search_dir, recursive=True) + # any directory-hash directory should be skipped + assert sorted(owned[0:4]) == sorted(out) + +-- +2.50.1 + diff --git a/SOURCES/0057-el8to9-actors-mysql-Add-MySQL-actor-with-recommendat.patch b/SOURCES/0057-el8to9-actors-mysql-Add-MySQL-actor-with-recommendat.patch new file mode 100644 index 0000000..05954d3 --- /dev/null +++ b/SOURCES/0057-el8to9-actors-mysql-Add-MySQL-actor-with-recommendat.patch @@ -0,0 +1,176 @@ +From 7c5d7f711d92fffac5567fd4b31bd6df4d24f1f9 Mon Sep 17 00:00:00 2001 +From: Ales Nezbeda +Date: Wed, 16 Jul 2025 11:58:47 +0200 +Subject: [PATCH 57/66] el8to9: actors: mysql: Add MySQL actor with + recommendations (#1335) + +Introduce a new mysql_check actor which checks for presence of the +mysql_server package and reports related recommendations for upgrade. + +Jira: RHEL-5459 +--- + .../el8toel9/actors/mysqlcheck/actor.py | 20 ++++++ + .../actors/mysqlcheck/libraries/mysqlcheck.py | 51 +++++++++++++++ + .../mysqlcheck/tests/test_mysqlcheck.py | 65 +++++++++++++++++++ + 3 files changed, 136 insertions(+) + create mode 100644 repos/system_upgrade/el8toel9/actors/mysqlcheck/actor.py + create mode 100644 repos/system_upgrade/el8toel9/actors/mysqlcheck/libraries/mysqlcheck.py + create mode 100644 repos/system_upgrade/el8toel9/actors/mysqlcheck/tests/test_mysqlcheck.py + +diff --git a/repos/system_upgrade/el8toel9/actors/mysqlcheck/actor.py b/repos/system_upgrade/el8toel9/actors/mysqlcheck/actor.py +new file mode 100644 +index 00000000..d675d75c +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/mysqlcheck/actor.py +@@ -0,0 +1,20 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor.mysqlcheck import process ++from leapp.models import DistributionSignedRPM, Report ++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag ++ ++ ++class MySQLCheck(Actor): ++ """ ++ Actor checking for presence of MySQL installation. ++ ++ Provides user with information related to upgrading systems ++ with MySQL installed. ++ """ ++ name = 'mysql_check' ++ consumes = (DistributionSignedRPM,) ++ produces = (Report,) ++ tags = (ChecksPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ process() +diff --git a/repos/system_upgrade/el8toel9/actors/mysqlcheck/libraries/mysqlcheck.py b/repos/system_upgrade/el8toel9/actors/mysqlcheck/libraries/mysqlcheck.py +new file mode 100644 +index 00000000..b446d9c4 +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/mysqlcheck/libraries/mysqlcheck.py +@@ -0,0 +1,51 @@ ++from leapp import reporting ++from leapp.libraries.common.rpms import has_package ++from leapp.models import DistributionSignedRPM ++ ++ ++def _report_server_installed(): ++ """ ++ Create report on mysql-server package installation detection. ++ ++ Should remind user about present MySQL server package ++ installation, warn them about necessary additional steps, and ++ redirect them to online documentation for the upgrade process. ++ """ ++ reporting.create_report([ ++ reporting.Title('Further action to upgrade MySQL might be needed'), ++ reporting.Summary( ++ 'The MySQL server component will be reinstalled during the upgrade with a RHEL 9' ++ ' version. Since RHEL 9 includes the same MySQL version 8.0 by default, no action' ++ ' should be required and there should not be any compatibility issues. However,' ++ ' it is still advisable to follow the documentation on this topic for up to date' ++ ' recommendations.' ++ ' Keep in mind that MySQL 8.0, which is the default in RHEL 9, will reach the end' ++ ' of \'Extended Support\' in April 2026. As such it is advisable to upgrade to' ++ ' MySQL version 8.4, which is provided via a module. MySQL 8.4 is also the' ++ ' default version for RHEL 10, therefore having MySQL 8.4 on the RHEL 9 system' ++ ' will make a future upgrade process to RHEL 10 smoother.' ++ ), ++ reporting.Severity(reporting.Severity.MEDIUM), ++ reporting.Groups([reporting.Groups.SERVICES]), ++ reporting.ExternalLink(title='Migrating MySQL databases from RHEL 8 to RHEL 9', ++ url='https://access.redhat.com/articles/7099753'), ++ reporting.RelatedResource('package', 'mysql-server'), ++ reporting.Remediation(hint=( ++ 'Dump or backup your data before proceeding with the upgrade ' ++ 'and consult attached article ' ++ '\'Migrating MySQL databases from RHEL 8 to RHEL 9\' ' ++ 'with up to date recommended steps before and after the upgrade.' ++ )), ++ ]) ++ ++ ++def process(): ++ """ ++ Create reports according to detected MySQL packages. ++ ++ Create the report if the mysql-server rpm (RH signed) is installed. ++ """ ++ has_server = has_package(DistributionSignedRPM, 'mysql-server') ++ ++ if has_server: ++ _report_server_installed() +diff --git a/repos/system_upgrade/el8toel9/actors/mysqlcheck/tests/test_mysqlcheck.py b/repos/system_upgrade/el8toel9/actors/mysqlcheck/tests/test_mysqlcheck.py +new file mode 100644 +index 00000000..385f4dfd +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/mysqlcheck/tests/test_mysqlcheck.py +@@ -0,0 +1,65 @@ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor.mysqlcheck import process ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked ++from leapp.libraries.stdlib import api ++from leapp.models import DistributionSignedRPM, RPM ++ ++ ++def _generate_rpm_with_name(name): ++ """ ++ Generate new RPM model item with given name. ++ ++ Parameters: ++ name (str): rpm name ++ ++ Returns: ++ rpm (RPM): new RPM object with name parameter set ++ """ ++ return RPM(name=name, ++ version='0.1', ++ release='1.sm01', ++ epoch='1', ++ pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51', ++ packager='Red Hat, Inc. ', ++ arch='noarch') ++ ++ ++@pytest.mark.parametrize('has_server', [ ++ (True), # with server ++ (False), # without server ++]) ++def test_actor_execution(monkeypatch, has_server): ++ """ ++ Parametrized helper function for test_actor_* functions. ++ ++ First generate list of RPM models based on set arguments. Then, run ++ the actor fed with our RPM list. Finally, assert Reports ++ according to set arguments. ++ ++ Parameters: ++ has_server (bool): mysql-server installed ++ """ ++ ++ # Couple of random packages ++ rpms = [_generate_rpm_with_name('sed'), ++ _generate_rpm_with_name('htop')] ++ ++ if has_server: ++ # Add mysql-server ++ rpms += [_generate_rpm_with_name('mysql-server')] ++ ++ curr_actor_mocked = CurrentActorMocked(msgs=[DistributionSignedRPM(items=rpms)]) ++ monkeypatch.setattr(api, 'current_actor', curr_actor_mocked) ++ monkeypatch.setattr(reporting, "create_report", create_report_mocked()) ++ ++ # Executed actor fed with fake RPMs ++ process() ++ ++ if has_server: ++ # Assert for mysql-server package installed ++ assert reporting.create_report.called == 1 ++ else: ++ # Assert for no mysql packages installed ++ assert not reporting.create_report.called +-- +2.50.1 + diff --git a/SOURCES/0058-Fix-target-version-format-checks.patch b/SOURCES/0058-Fix-target-version-format-checks.patch new file mode 100644 index 0000000..4a0e2f5 --- /dev/null +++ b/SOURCES/0058-Fix-target-version-format-checks.patch @@ -0,0 +1,176 @@ +From ce71a44ffd1965330813c15056b5e58e50217108 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Wed, 2 Jul 2025 16:36:55 +0200 +Subject: [PATCH 58/66] Fix target version format checks + +Currently only the format of a version specified by +LEAPP_DEVEL_TARGET_RELEASE is checked, when specified via --target +cmdline argument it isn't, which is a bug. + +This patch fixes the bug, by enabling the check for --target too. + +NOTE: these are only format checks, the state of support is already +checked later in the upgrade process by the checktargetversion actor. + +Jira: RHEL-96238 +--- + commands/command_utils.py | 28 +++++++++------ + commands/tests/test_upgrade_paths.py | 54 +++++++++++++++++++--------- + commands/upgrade/util.py | 4 +-- + 3 files changed, 57 insertions(+), 29 deletions(-) + +diff --git a/commands/command_utils.py b/commands/command_utils.py +index 155bacad..e6ba6ba4 100644 +--- a/commands/command_utils.py ++++ b/commands/command_utils.py +@@ -59,7 +59,9 @@ def assert_version_format(version_str, desired_format, version_kind): + :raises: CommandError + """ + if not re.match(desired_format.regex, version_str): +- error_str = 'Unexpected format of target version: {0}. The required format is \'{1}\'.' ++ error_str = ( ++ 'Unexpected format of target version: {0}. The required format is \'{1}\'.' ++ ) + raise CommandError(error_str.format(version_str, desired_format.human_readable)) + + +@@ -182,26 +184,32 @@ def get_target_version(flavour): + return target_versions[-1] if target_versions else None + + +-def vet_upgrade_path(args): ++def get_target_release(args): + """ +- Make sure the user requested upgrade_path is a supported one. +- If LEAPP_DEVEL_TARGET_RELEASE is set then it's value is not vetted against upgrade_paths_map but used as is. ++ Return the user selected target release or choose one from config. ++ ++ A target release can be specified, ordered by priority, by the ++ LEAPP_DEVEL_TARGET_RELEASE or args.target (--target cmdline arg) or in the ++ config file. ++ ++ NOTE: when specified via the env var or cmdline arg, the version isn't ++ checked against supported versions, this is done later by an actor in the ++ upgrade process. + + :return: `tuple` (target_release, flavor) + """ + flavor = get_upgrade_flavour() + env_version_override = os.getenv('LEAPP_DEVEL_TARGET_RELEASE') + +- if env_version_override: ++ target_ver = env_version_override or args.target ++ if target_ver: + os_release_contents = _retrieve_os_release_contents() + distro_id = os_release_contents.get('ID', '') + expected_version_format = _DISTRO_VERSION_FORMATS.get(distro_id, VersionFormats.MAJOR_MINOR).value +- assert_version_format(env_version_override, expected_version_format, _VersionKind.TARGET) +- +- return (env_version_override, flavor) ++ assert_version_format(target_ver, expected_version_format, _VersionKind.TARGET) ++ return (target_ver, flavor) + +- target_release = args.target or get_target_version(flavor) +- return (target_release, flavor) ++ return (get_target_version(flavor), flavor) + + + def set_resource_limits(): +diff --git a/commands/tests/test_upgrade_paths.py b/commands/tests/test_upgrade_paths.py +index c2cb09aa..89b5eb71 100644 +--- a/commands/tests/test_upgrade_paths.py ++++ b/commands/tests/test_upgrade_paths.py +@@ -1,3 +1,4 @@ ++import os + import resource + + import mock +@@ -29,34 +30,53 @@ def test_get_target_version(mock_open, monkeypatch): + assert command_utils.get_target_version('default') == '9.0' + + +-@mock.patch("leapp.cli.commands.command_utils.get_upgrade_paths_config", +- return_value={"default": {"7.9": ["8.4"], "8.6": ["9.0"], "7": ["8.4"], "8": ["9.0"]}}) +-def test_vet_upgrade_path(mock_open, monkeypatch): ++@mock.patch( ++ "leapp.cli.commands.command_utils.get_upgrade_paths_config", ++ return_value={ ++ "default": { ++ "7.9": ["8.4"], ++ "8.6": ["9.0", "9.2"], ++ "7": ["8.4"], ++ "8": ["9.0", "9.2"], ++ } ++ }, ++) ++def test_get_target_release(mock_open, monkeypatch): # do not remove mock_open + monkeypatch.setattr(command_utils, 'get_os_release_version_id', lambda x: '8.6') + + # make sure env var LEAPP_DEVEL_TARGET_RELEASE takes precedence +- # when env var set to a bad version - abort the upgrade +- args = mock.Mock(target='9.0') +- monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '1.2badsemver') +- with pytest.raises(CommandError) as err: +- command_utils.vet_upgrade_path(args) +- assert 'Unexpected format of target version' in err +- # MAJOR.MINOR.PATCH is considered as bad version, only MAJOR.MINOR is accepted + args = mock.Mock(target='9.0') ++ monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.2') ++ print(os.getenv('LEAPP_DEVEL_TARGET_RELEASE')) ++ assert command_utils.get_target_release(args) == ('9.2', 'default') ++ ++ # when env var set to a bad version, expect an error + monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.0.0') + with pytest.raises(CommandError) as err: +- command_utils.vet_upgrade_path(args) ++ command_utils.get_target_release(args) + assert 'Unexpected format of target version' in err ++ + # when env var set to a version not in upgrade_paths map - go on and use it ++ # this is checked by an actor in the IPU + monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '1.2') +- assert command_utils.vet_upgrade_path(args) == ('1.2', 'default') +- # no env var set, --target is set to proper version ++ assert command_utils.get_target_release(args) == ('1.2', 'default') ++ ++ # no env var set, --target is set to proper version - use it ++ args = mock.Mock(target='9.0') + monkeypatch.delenv('LEAPP_DEVEL_TARGET_RELEASE', raising=False) +- assert command_utils.vet_upgrade_path(args) == ('9.0', 'default') +- # env var is set to proper version, --target is set to a bad one - use env var and go on with the upgrade ++ assert command_utils.get_target_release(args) == ('9.0', 'default') ++ ++ # --target set with incorrectly formatted version, env var not set, fail ++ args = mock.Mock(target='9.0a') ++ with pytest.raises(CommandError) as err: ++ command_utils.get_target_release(args) ++ assert 'Unexpected format of target version' in err ++ ++ # env var is set to proper version, --target set to a bad one: ++ # env var has priority, use it and go on with the upgrade + monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.0') +- args = mock.Mock(target='1.2') +- assert command_utils.vet_upgrade_path(args) == ('9.0', 'default') ++ args = mock.Mock(target='9.0.0') ++ assert command_utils.get_target_release(args) == ('9.0', 'default') + + + def _mock_getrlimit_factory(nofile_limits=(1024, 4096), fsize_limits=(1024, 4096)): +diff --git a/commands/upgrade/util.py b/commands/upgrade/util.py +index b54b0b34..7d5b563e 100644 +--- a/commands/upgrade/util.py ++++ b/commands/upgrade/util.py +@@ -253,8 +253,8 @@ def prepare_configuration(args): + if args.nogpgcheck: + os.environ['LEAPP_NOGPGCHECK'] = '1' + +- # Check upgrade path and fail early if it's unsupported +- target_version, flavor = command_utils.vet_upgrade_path(args) ++ # Check upgrade path and fail early if it's invalid ++ target_version, flavor = command_utils.get_target_release(args) + os.environ['LEAPP_UPGRADE_PATH_TARGET_RELEASE'] = target_version + os.environ['LEAPP_UPGRADE_PATH_FLAVOUR'] = flavor + +-- +2.50.1 + diff --git a/SOURCES/0060-libs-rhui-add-rhel10-targets-for-upgrades.patch b/SOURCES/0060-libs-rhui-add-rhel10-targets-for-upgrades.patch new file mode 100644 index 0000000..a0aa606 --- /dev/null +++ b/SOURCES/0060-libs-rhui-add-rhel10-targets-for-upgrades.patch @@ -0,0 +1,67 @@ +From b8d942afba061255b69fa5c41259d418fb000ad8 Mon Sep 17 00:00:00 2001 +From: Michal Hecko +Date: Sun, 15 Jun 2025 17:11:28 +0200 +Subject: [PATCH 60/66] libs(rhui): add rhel10 targets for upgrades + +--- + repos/system_upgrade/common/libraries/rhui.py | 30 +++++++++++++++++++ + 1 file changed, 30 insertions(+) + +diff --git a/repos/system_upgrade/common/libraries/rhui.py b/repos/system_upgrade/common/libraries/rhui.py +index 30de0275..e1ab7c67 100644 +--- a/repos/system_upgrade/common/libraries/rhui.py ++++ b/repos/system_upgrade/common/libraries/rhui.py +@@ -158,6 +158,17 @@ RHUI_SETUPS = { + ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), + ('content-rhel9.crt', RHUI_PKI_PRODUCT_DIR) + ], os_version='9'), ++ mk_rhui_setup(clients={'rh-amazon-rhui-client'}, leapp_pkg='leapp-rhui-aws', ++ mandatory_files=[ ++ ('rhui-client-config-server-10.crt', RHUI_PKI_PRODUCT_DIR), ++ ('rhui-client-config-server-10.key', RHUI_PKI_DIR), ++ ('leapp-aws.repo', YUM_REPOS_PATH) ++ ], ++ optional_files=[ ++ ('content-rhel10.key', RHUI_PKI_DIR), ++ ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), ++ ('content-rhel10.crt', RHUI_PKI_PRODUCT_DIR) ++ ], os_version='10'), + ], + RHUIFamily(RHUIProvider.AWS, arch=arch.ARCH_ARM64, client_files_folder='aws'): [ + mk_rhui_setup(clients={'rh-amazon-rhui-client-arm'}, optional_files=[], os_version='7', arch=arch.ARCH_ARM64), +@@ -185,6 +196,17 @@ RHUI_SETUPS = { + ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), + ('content-rhel9.crt', RHUI_PKI_PRODUCT_DIR) + ], os_version='9', arch=arch.ARCH_ARM64), ++ mk_rhui_setup(clients={'rh-amazon-rhui-client'}, leapp_pkg='leapp-rhui-aws', ++ mandatory_files=[ ++ ('rhui-client-config-server-10.crt', RHUI_PKI_PRODUCT_DIR), ++ ('rhui-client-config-server-10.key', RHUI_PKI_DIR), ++ ('leapp-aws.repo', YUM_REPOS_PATH) ++ ], ++ optional_files=[ ++ ('content-rhel10.key', RHUI_PKI_DIR), ++ ('cdn.redhat.com-chain.crt', RHUI_PKI_DIR), ++ ('content-rhel10.crt', RHUI_PKI_PRODUCT_DIR) ++ ], os_version='10'), + ], + RHUIFamily(RHUIProvider.AWS, variant=RHUIVariant.SAP, client_files_folder='aws-sap-e4s'): [ + mk_rhui_setup(clients={'rh-amazon-rhui-client-sap-bundle'}, optional_files=[], os_version='7', +@@ -250,6 +272,14 @@ RHUI_SETUPS = { + ], + extra_info={'agent_pkg': 'WALinuxAgent'}, + os_version='9'), ++ mk_rhui_setup(clients={'rhui-azure-rhel10'}, leapp_pkg='leapp-rhui-azure', ++ mandatory_files=[('leapp-azure.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key.pem', RHUI_PKI_DIR), ++ ('content.crt', RHUI_PKI_PRODUCT_DIR) ++ ], ++ extra_info={'agent_pkg': 'WALinuxAgent'}, ++ os_version='10'), + ], + RHUIFamily(RHUIProvider.AZURE, variant=RHUIVariant.SAP_APPS, client_files_folder='azure-sap-apps'): [ + mk_rhui_setup(clients={'rhui-azure-rhel7-base-sap-apps'}, os_version='7', content_channel=ContentChannel.EUS), +-- +2.50.1 + diff --git a/SOURCES/0061-userspacegen-do-not-repolist-source-debug-repos.patch b/SOURCES/0061-userspacegen-do-not-repolist-source-debug-repos.patch new file mode 100644 index 0000000..fa4521e --- /dev/null +++ b/SOURCES/0061-userspacegen-do-not-repolist-source-debug-repos.patch @@ -0,0 +1,35 @@ +From 7f9df48b0040499c5de3441f56d1f1ed77667526 Mon Sep 17 00:00:00 2001 +From: Michal Hecko +Date: Sun, 15 Jun 2025 23:39:17 +0200 +Subject: [PATCH 61/66] userspacegen: do not repolist source & debug repos + +Ignore source/debug repositories when running dnf repolist to check what +repositories are available for the upgrade. Should prevent needless +crashes when the source/debug repositories are malformed, e.g., when the +maintainer forgot to sync their content. +--- + .../targetuserspacecreator/libraries/userspacegen.py | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index 699f1517..9ec4ecac 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -929,7 +929,13 @@ def _get_rh_available_repoids(context, indata): + os.rename(foreign_repofile, '{0}.back'.format(foreign_repofile)) + + try: +- dnf_cmd = ['dnf', 'repolist', '--releasever', target_ver, '-v', '--enablerepo', '*'] ++ dnf_cmd = [ ++ 'dnf', 'repolist', ++ '--releasever', target_ver, '-v', ++ '--enablerepo', '*', ++ '--disablerepo', '*-source-*', ++ '--disablerepo', '*-debug-*', ++ ] + repolist_result = context.call(dnf_cmd)['stdout'] + repoid_lines = [line for line in repolist_result.split('\n') if line.startswith('Repo-id')] + rhui_repoids = {extract_repoid_from_line(line) for line in repoid_lines} +-- +2.50.1 + diff --git a/SOURCES/0062-actor-checkrhui-do-not-boostrap-client-on-AWS-9-10.patch b/SOURCES/0062-actor-checkrhui-do-not-boostrap-client-on-AWS-9-10.patch new file mode 100644 index 0000000..7f65e94 --- /dev/null +++ b/SOURCES/0062-actor-checkrhui-do-not-boostrap-client-on-AWS-9-10.patch @@ -0,0 +1,46 @@ +From 24714605538d2e197b0d7a5012528129439b4764 Mon Sep 17 00:00:00 2001 +From: Michal Hecko +Date: Tue, 17 Jun 2025 11:18:33 +0200 +Subject: [PATCH 62/66] actor(checkrhui): do not boostrap client on AWS 9>10 + +The current implementation of the checkrhui actor and its modification +of rhui-associated target userspace preparation instructions contains +too coarse conditions on what should happen on specific upgrade paths. +In other words, changes originally intended only for 8>9 are applied +also during 9>10. This patch fixes this issue -- when performing 9>10 on +AWS, we do not bootstrap the RHUI client, as it has too many +dependencies. Instead, the leapp-rhui-aws package contains all +repositories, certs and keys that grant repository access. + +Jira-ref: RHEL-64910 +--- + .../actors/cloud/checkrhui/libraries/checkrhui.py | 12 +++++++++--- + 1 file changed, 9 insertions(+), 3 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py +index 64e36e08..ea154173 100644 +--- a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py ++++ b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py +@@ -254,10 +254,16 @@ def customize_rhui_setup_for_aws(rhui_family, setup_info): + # The leapp-rhui-aws will provide all necessary files to access entire RHEL8 content + setup_info.bootstrap_target_client = False + return ++ if target_version == '9': ++ amazon_plugin_copy_task = CopyFile(src='/usr/lib/python3.9/site-packages/dnf-plugins/amazon-id.py', ++ dst='/usr/lib/python3.6/site-packages/dnf-plugins/') ++ setup_info.postinstall_tasks.files_to_copy.append(amazon_plugin_copy_task) ++ return + +- amazon_plugin_copy_task = CopyFile(src='/usr/lib/python3.9/site-packages/dnf-plugins/amazon-id.py', +- dst='/usr/lib/python3.6/site-packages/dnf-plugins/') +- setup_info.postinstall_tasks.files_to_copy.append(amazon_plugin_copy_task) ++ # For 9>10 and higher we give up trying to do client swapping since the client has too many dependencies ++ # from target system's repositories. Our leapp-rhui-aws package will carry all of the repos provided ++ # by the client. ++ setup_info.bootstrap_target_client = False + + + def produce_rhui_info_to_setup_target(rhui_family, source_setup_desc, target_setup_desc): +-- +2.50.1 + diff --git a/SOURCES/0063-add-azure-aws-alibaba-client-repositories-to-repomap.patch b/SOURCES/0063-add-azure-aws-alibaba-client-repositories-to-repomap.patch new file mode 100644 index 0000000..46a0dce --- /dev/null +++ b/SOURCES/0063-add-azure-aws-alibaba-client-repositories-to-repomap.patch @@ -0,0 +1,456 @@ +From d0f6c44acfdd8938c7062deae6a16613af2bb4d1 Mon Sep 17 00:00:00 2001 +From: Michal Hecko +Date: Wed, 2 Jul 2025 22:26:23 +0200 +Subject: [PATCH 63/66] add azure, aws, alibaba client repositories to + repomapping + +Co-Authored-By: jinkangkang.jkk <1547182170@qq.com> +--- + etc/leapp/files/repomap.json | 293 ++++++++++++++++++++++++++++++++++- + 1 file changed, 292 insertions(+), 1 deletion(-) + +diff --git a/etc/leapp/files/repomap.json b/etc/leapp/files/repomap.json +index fc0c42f1..0cd5601a 100644 +--- a/etc/leapp/files/repomap.json ++++ b/etc/leapp/files/repomap.json +@@ -1,5 +1,5 @@ + { +- "datetime": "202505201636Z", ++ "datetime": "202507171303Z", + "version_format": "1.3.0", + "provided_data_streams": [ + "4.0" +@@ -294,6 +294,24 @@ + "target": [ + "rhel10-HighAvailability" + ] ++ }, ++ { ++ "source": "rhel9-rhui-client-config-server-9", ++ "target": [ ++ "rhel10-rhui-client-config-server-10" ++ ] ++ }, ++ { ++ "source": "rhel9-rhui-microsoft-azure-rhel9", ++ "target": [ ++ "rhel10-rhui-microsoft-azure-rhel10" ++ ] ++ }, ++ { ++ "source": "rhel9-rhui-custom-client-at-alibaba", ++ "target": [ ++ "rhel10-rhui-custom-client-at-alibaba" ++ ] + } + ] + } +@@ -343,6 +361,15 @@ + "distro": "rhel", + "rhui": "aws" + }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-baseos-rhui-rpms", ++ "arch": "aarch64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "azure" ++ }, + { + "major_version": "10", + "repoid": "rhel-10-baseos-rhui-rpms", +@@ -352,6 +379,15 @@ + "distro": "rhel", + "rhui": "aws" + }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-baseos-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "azure" ++ }, + { + "major_version": "10", + "repoid": "rhel-10-for-aarch64-baseos-beta-rpms", +@@ -456,6 +492,15 @@ + "repo_type": "rpm", + "distro": "rhel" + }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-for-x86_64-baseos-e4s-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "e4s", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "aws" ++ }, + { + "major_version": "10", + "repoid": "rhel-10-for-x86_64-baseos-e4s-rpms", +@@ -479,6 +524,24 @@ + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-rhel-10-for-aarch64-baseos-rhui-rpms", ++ "arch": "aarch64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-rhel-10-for-x86_64-baseos-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" + } + ] + }, +@@ -526,6 +589,15 @@ + "distro": "rhel", + "rhui": "aws" + }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-appstream-rhui-rpms", ++ "arch": "aarch64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "azure" ++ }, + { + "major_version": "10", + "repoid": "rhel-10-appstream-rhui-rpms", +@@ -535,6 +607,15 @@ + "distro": "rhel", + "rhui": "aws" + }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-appstream-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "azure" ++ }, + { + "major_version": "10", + "repoid": "rhel-10-for-aarch64-appstream-beta-rpms", +@@ -639,6 +720,15 @@ + "repo_type": "rpm", + "distro": "rhel" + }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-for-x86_64-appstream-e4s-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "e4s", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "aws" ++ }, + { + "major_version": "10", + "repoid": "rhel-10-for-x86_64-appstream-e4s-rpms", +@@ -662,6 +752,24 @@ + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-rhel-10-for-aarch64-appstream-rhui-rpms", ++ "arch": "aarch64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-rhel-10-for-x86_64-appstream-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" + } + ] + }, +@@ -741,6 +849,15 @@ + "distro": "rhel", + "rhui": "aws" + }, ++ { ++ "major_version": "10", ++ "repoid": "codeready-builder-for-rhel-10-rhui-rpms", ++ "arch": "aarch64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "azure" ++ }, + { + "major_version": "10", + "repoid": "codeready-builder-for-rhel-10-rhui-rpms", +@@ -750,6 +867,15 @@ + "distro": "rhel", + "rhui": "aws" + }, ++ { ++ "major_version": "10", ++ "repoid": "codeready-builder-for-rhel-10-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "azure" ++ }, + { + "major_version": "10", + "repoid": "codeready-builder-for-rhel-10-s390x-eus-rpms", +@@ -813,6 +939,24 @@ + "channel": "ga", + "repo_type": "rpm", + "distro": "centos" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-codeready-builder-for-rhel-10-aarch64-rhui-rpms", ++ "arch": "aarch64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-codeready-builder-for-rhel-10-x86_64-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" + } + ] + }, +@@ -923,6 +1067,33 @@ + "repo_type": "rpm", + "distro": "rhel", + "rhui": "aws" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-supplementary-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "azure" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-rhel-10-for-aarch64-supplementary-rhui-rpms", ++ "arch": "aarch64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-rhel-10-for-x86_64-supplementary-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" + } + ] + }, +@@ -1006,6 +1177,14 @@ + "repo_type": "rpm", + "distro": "rhel" + }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-for-aarch64-nfv-e4s-rpms", ++ "arch": "aarch64", ++ "channel": "e4s", ++ "repo_type": "rpm", ++ "distro": "rhel" ++ }, + { + "major_version": "10", + "repoid": "rhel-10-for-aarch64-nfv-rpms", +@@ -1115,6 +1294,15 @@ + "repo_type": "rpm", + "distro": "rhel" + }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-for-x86_64-sap-netweaver-e4s-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "e4s", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "aws" ++ }, + { + "major_version": "10", + "repoid": "rhel-10-for-x86_64-sap-netweaver-e4s-rpms", +@@ -1160,6 +1348,15 @@ + "repo_type": "rpm", + "distro": "rhel" + }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-for-x86_64-sap-solutions-e4s-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "e4s", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "aws" ++ }, + { + "major_version": "10", + "repoid": "rhel-10-for-x86_64-sap-solutions-e4s-rpms", +@@ -1317,6 +1514,15 @@ + "repo_type": "rpm", + "distro": "rhel" + }, ++ { ++ "major_version": "10", ++ "repoid": "rhel-10-for-x86_64-highavailability-e4s-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "e4s", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "aws" ++ }, + { + "major_version": "10", + "repoid": "rhel-10-for-x86_64-highavailability-e4s-rpms", +@@ -1340,6 +1546,75 @@ + "channel": "ga", + "repo_type": "rpm", + "distro": "rhel" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-rhel-10-for-x86_64-highavailability-rhui-rpms", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" ++ } ++ ] ++ }, ++ { ++ "pesid": "rhel10-rhui-microsoft-azure-rhel10", ++ "entries": [ ++ { ++ "major_version": "10", ++ "repoid": "rhui-microsoft-azure-rhel10", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "azure" ++ } ++ ] ++ }, ++ { ++ "pesid": "rhel10-rhui-client-config-server-10", ++ "entries": [ ++ { ++ "major_version": "10", ++ "repoid": "rhui-client-config-server-10", ++ "arch": "aarch64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "aws" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-client-config-server-10", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "aws" ++ } ++ ] ++ }, ++ { ++ "pesid": "rhel10-rhui-custom-client-at-alibaba", ++ "entries": [ ++ { ++ "major_version": "10", ++ "repoid": "rhui-custom-rhui_client_at_alibaba-rhel-10", ++ "arch": "aarch64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" ++ }, ++ { ++ "major_version": "10", ++ "repoid": "rhui-custom-rhui_client_at_alibaba-rhel-10", ++ "arch": "x86_64", ++ "channel": "ga", ++ "repo_type": "rpm", ++ "distro": "rhel", ++ "rhui": "alibaba" + } + ] + }, +@@ -5228,6 +5503,14 @@ + "repo_type": "rpm", + "distro": "rhel" + }, ++ { ++ "major_version": "9", ++ "repoid": "rhel-9-for-aarch64-nfv-e4s-rpms", ++ "arch": "aarch64", ++ "channel": "e4s", ++ "repo_type": "rpm", ++ "distro": "rhel" ++ }, + { + "major_version": "9", + "repoid": "rhel-9-for-aarch64-nfv-rpms", +@@ -5594,6 +5877,14 @@ + "repo_type": "rpm", + "distro": "rhel" + }, ++ { ++ "major_version": "9", ++ "repoid": "rhel-9-for-x86_64-highavailability-aus-rpms", ++ "arch": "x86_64", ++ "channel": "aus", ++ "repo_type": "rpm", ++ "distro": "rhel" ++ }, + { + "major_version": "9", + "repoid": "rhel-9-for-x86_64-highavailability-beta-rpms", +-- +2.50.1 + diff --git a/SOURCES/0064-rhui-azure-add-target-client-s-GPG-key-to-mandatory-.patch b/SOURCES/0064-rhui-azure-add-target-client-s-GPG-key-to-mandatory-.patch new file mode 100644 index 0000000..5e4e2e1 --- /dev/null +++ b/SOURCES/0064-rhui-azure-add-target-client-s-GPG-key-to-mandatory-.patch @@ -0,0 +1,36 @@ +From 08196d4533c51b9f9d7d45d90ca0bad8b0ef697d Mon Sep 17 00:00:00 2001 +From: Michal Hecko +Date: Fri, 4 Jul 2025 14:05:48 +0200 +Subject: [PATCH 64/66] rhui(azure): add target client's GPG key to mandatory + files + +The RHEL10's Azure RHUI client is signed using a different GPG +key than RHEL9 client. Therefore, we need to have the key available +when we try to bootstrap the RHEL10 client during the upgrade. + +Jira-ref: RHEL-64911 +--- + repos/system_upgrade/common/libraries/rhui.py | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/libraries/rhui.py b/repos/system_upgrade/common/libraries/rhui.py +index e1ab7c67..1e3b5359 100644 +--- a/repos/system_upgrade/common/libraries/rhui.py ++++ b/repos/system_upgrade/common/libraries/rhui.py +@@ -273,7 +273,12 @@ RHUI_SETUPS = { + extra_info={'agent_pkg': 'WALinuxAgent'}, + os_version='9'), + mk_rhui_setup(clients={'rhui-azure-rhel10'}, leapp_pkg='leapp-rhui-azure', +- mandatory_files=[('leapp-azure.repo', YUM_REPOS_PATH)], ++ mandatory_files=[ ++ ('leapp-azure.repo', YUM_REPOS_PATH), ++ # We need to have the new GPG key ready when we will be bootstrapping ++ # target rhui client. ++ ('RPM-GPG-KEY-microsoft-azure-release-new', '/etc/pki/rpm-gpg/') ++ ], + optional_files=[ + ('key.pem', RHUI_PKI_DIR), + ('content.crt', RHUI_PKI_PRODUCT_DIR) +-- +2.50.1 + diff --git a/SOURCES/0065-feat-alibaba-support-rhel9-upgrade-to-rhel10-using-r.patch b/SOURCES/0065-feat-alibaba-support-rhel9-upgrade-to-rhel10-using-r.patch new file mode 100644 index 0000000..f9c49f3 --- /dev/null +++ b/SOURCES/0065-feat-alibaba-support-rhel9-upgrade-to-rhel10-using-r.patch @@ -0,0 +1,77 @@ +From 7468b10c32367d11a098c4f10390c18ec55720e1 Mon Sep 17 00:00:00 2001 +From: "jinkangkang.jkk" +Date: Mon, 16 Jun 2025 20:26:45 +0800 +Subject: [PATCH 65/66] feat(alibaba): support rhel9 upgrade to rhel10 using + rhui on alibaba cloud + +--- + repos/system_upgrade/common/libraries/rhui.py | 32 +++++++++++++++++-- + 1 file changed, 30 insertions(+), 2 deletions(-) + +diff --git a/repos/system_upgrade/common/libraries/rhui.py b/repos/system_upgrade/common/libraries/rhui.py +index 1e3b5359..b3225d5f 100644 +--- a/repos/system_upgrade/common/libraries/rhui.py ++++ b/repos/system_upgrade/common/libraries/rhui.py +@@ -383,6 +383,13 @@ RHUI_SETUPS = { + ('content.crt', RHUI_PKI_PRODUCT_DIR) + ], + os_version='9'), ++ mk_rhui_setup(clients={'aliyun_rhui_rhel10'}, leapp_pkg='leapp-rhui-alibaba', ++ mandatory_files=[('leapp-alibaba.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key.pem', RHUI_PKI_DIR), ++ ('content.crt', RHUI_PKI_PRODUCT_DIR) ++ ], ++ os_version='10'), + ], + RHUIFamily(RHUIProvider.ALIBABA, arch=arch.ARCH_ARM64, client_files_folder='alibaba'): [ + mk_rhui_setup(clients={'aliyun_rhui_rhel8'}, leapp_pkg='leapp-rhui-alibaba', +@@ -399,6 +406,13 @@ RHUI_SETUPS = { + ('content.crt', RHUI_PKI_PRODUCT_DIR) + ], + os_version='9'), ++ mk_rhui_setup(clients={'aliyun_rhui_rhel10'}, leapp_pkg='leapp-rhui-alibaba', ++ mandatory_files=[('leapp-alibaba.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key.pem', RHUI_PKI_DIR), ++ ('content.crt', RHUI_PKI_PRODUCT_DIR) ++ ], ++ os_version='10'), + ] + } + +@@ -601,15 +615,29 @@ RHUI_CLOUD_MAP = { + ], + }, + }, ++ '9to10': { ++ 'alibaba': { ++ 'src_pkg': 'aliyun_rhui_rhel9', ++ 'target_pkg': 'aliyun_rhui_rhel10', ++ 'leapp_pkg': 'leapp-rhui-alibaba', ++ 'leapp_pkg_repo': 'leapp-alibaba.repo', ++ 'files_map': [ ++ ('content.crt', RHUI_PKI_PRODUCT_DIR), ++ ('key.pem', RHUI_PKI_DIR), ++ ('leapp-alibaba.repo', YUM_REPOS_PATH) ++ ], ++ }, ++ } + } + + +-# TODO(mmatuska) deprecate or adjust for 9to10? + def get_upg_path(): + """ + Get upgrade path in specific string format + """ +- return '7to8' if get_target_major_version() == '8' else '8to9' ++ source_major_version = get_source_major_version() ++ target_major_version = get_target_major_version() ++ return '{0}to{1}'.format(source_major_version, target_major_version) + + + @deprecated(since='2023-07-27', message='This functionality has been replaced with the RHUIInfo message.') +-- +2.50.1 + diff --git a/SOURCES/0066-Inhibit-upgrade-on-systems-with-cgroupsv1-on-9to10-1.patch b/SOURCES/0066-Inhibit-upgrade-on-systems-with-cgroupsv1-on-9to10-1.patch new file mode 100644 index 0000000..3be3da4 --- /dev/null +++ b/SOURCES/0066-Inhibit-upgrade-on-systems-with-cgroupsv1-on-9to10-1.patch @@ -0,0 +1,203 @@ +From 3356c045fe2982f3a26b26b46258398b490a6d67 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Matej=20Matu=C5=A1ka?= +Date: Thu, 17 Jul 2025 14:33:55 +0200 +Subject: [PATCH 66/66] Inhibit upgrade on systems with cgroupsv1 on 9to10 + (#1392) + +* Inhibit upgrade on systems with cgroupsv1 on 9to10 + +cgroups-v1 were deprecated in RHEL 9 and are unsupported in RHEL 10. The +option to switch to cgroups-v1 is ignored. Users have to migrate to +cgroups-v2 in order to upgrade. + +Both legacy and hybrid modes are unsupported. Only unified hierarchy +(cgroups-v2) is supported. More info on how the modes are switched is +at: +https://www.freedesktop.org/software/systemd/man/247/systemd.html#systemd.unified_cgroup_hierarchy +and +https://www.freedesktop.org/software/systemd/man/247/systemd.html#systemd.legacy_systemd_cgroup_controller. + +Jira: RHEL-81212 +--- + .../actors/inhibitcgroupsv1/actor.py | 23 ++++++ + .../libraries/inhibitcgroupsv1.py | 56 ++++++++++++++ + .../tests/test_inhibitcgroupsv1.py | 74 +++++++++++++++++++ + 3 files changed, 153 insertions(+) + create mode 100644 repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/actor.py + create mode 100644 repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py + create mode 100644 repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/tests/test_inhibitcgroupsv1.py + +diff --git a/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/actor.py b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/actor.py +new file mode 100644 +index 00000000..7a3e4be4 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/actor.py +@@ -0,0 +1,23 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import inhibitcgroupsv1 ++from leapp.models import KernelCmdline ++from leapp.reporting import Report ++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag ++ ++ ++class InhibitCgroupsv1(Actor): ++ """ ++ Inhibit upgrade if cgroups-v1 are enabled ++ ++ Support for cgroups-v1 was deprecated in RHEL 9 and removed in RHEL 10. ++ Both legacy and hybrid modes are unsupported, only the unified cgroup ++ hierarchy (cgroups-v2) is supported. ++ """ ++ ++ name = "inhibit_cgroupsv1" ++ consumes = (KernelCmdline,) ++ produces = (Report,) ++ tags = (ChecksPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ inhibitcgroupsv1.process() +diff --git a/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py +new file mode 100644 +index 00000000..6c891f22 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/libraries/inhibitcgroupsv1.py +@@ -0,0 +1,56 @@ ++from leapp import reporting ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.stdlib import api ++from leapp.models import KernelCmdline ++ ++ ++def process(): ++ kernel_cmdline = next(api.consume(KernelCmdline), None) ++ if not kernel_cmdline: ++ # really unlikely ++ raise StopActorExecutionError("Did not receive any KernelCmdline messages.") ++ ++ unified_hierarchy = True # default since RHEL 9 ++ legacy_controller_present = False ++ for param in kernel_cmdline.parameters: ++ if param.key == "systemd.unified_cgroup_hierarchy": ++ if param.value is not None and param.value.lower() in ("0", "false", "no"): ++ unified_hierarchy = False ++ if param.key == "systemd.legacy_systemd_cgroup_controller": ++ # no matter the value, it should be removed ++ # it has no effect when unified hierarchy is enabled ++ legacy_controller_present = True ++ ++ if unified_hierarchy: ++ api.current_logger().debug("cgroups-v2 already in use, nothing to do, skipping.") ++ return ++ ++ remediation_cmd_args = ["systemd.unified_cgroup_hierarchy"] ++ if legacy_controller_present: ++ remediation_cmd_args.append('systemd.legacy_systemd_cgroup_controller') ++ ++ summary = ( ++ "Leapp detected cgroups-v1 is enabled on the system." ++ " The support of cgroups-v1 was deprecated in RHEL 9 and is removed in RHEL 10." ++ " Software requiring cgroups-v1 might not work correctly or at all on RHEL 10." ++ ) ++ reporting.create_report( ++ [ ++ reporting.Title("cgroups-v1 enabled on the system"), ++ reporting.Summary(summary), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([reporting.Groups.INHIBITOR, reporting.Groups.KERNEL]), ++ reporting.RelatedResource("package", "systemd"), ++ reporting.Remediation( ++ hint="Make sure no third party software requires cgroups-v1 and switch to cgroups-v2.", ++ # remove the args from commandline, the defaults are the desired values ++ commands=[ ++ [ ++ "grubby", ++ "--update-kernel=ALL", ++ '--remove-args="{}"'.format(",".join(remediation_cmd_args)), ++ ], ++ ], ++ ), ++ ] ++ ) +diff --git a/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/tests/test_inhibitcgroupsv1.py b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/tests/test_inhibitcgroupsv1.py +new file mode 100644 +index 00000000..9b3ec96f +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/inhibitcgroupsv1/tests/test_inhibitcgroupsv1.py +@@ -0,0 +1,74 @@ ++import pytest ++ ++from leapp import reporting ++from leapp.libraries.actor import inhibitcgroupsv1 ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked ++from leapp.libraries.stdlib import api ++from leapp.models import KernelCmdline, KernelCmdlineArg ++ ++ ++@pytest.mark.parametrize( ++ "cmdline_params", ( ++ ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="0")]), ++ ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="false")]), ++ ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="False")]), ++ ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="no")]), ++ ( ++ [ ++ KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="0"), ++ KernelCmdlineArg(key="systemd.legacy_systemd_cgroup_controller", value="0"), ++ ] ++ ), ( ++ [ ++ KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="0"), ++ KernelCmdlineArg(key="systemd.legacy_systemd_cgroup_controller", value="1"), ++ ] ++ ) ++ ) ++) ++def test_inhibit_should_inhibit(monkeypatch, cmdline_params): ++ curr_actor_mocked = CurrentActorMocked(msgs=[KernelCmdline(parameters=cmdline_params)]) ++ monkeypatch.setattr(api, "current_actor", curr_actor_mocked) ++ monkeypatch.setattr(reporting, "create_report", create_report_mocked()) ++ ++ inhibitcgroupsv1.process() ++ ++ assert reporting.create_report.called == 1 ++ report = reporting.create_report.reports[0] ++ assert "cgroups-v1" in report["title"] ++ assert reporting.Groups.INHIBITOR in report["groups"] ++ ++ command = [r for r in report["detail"]["remediations"] if r["type"] == "command"][0] ++ assert "systemd.unified_cgroup_hierarchy" in command['context'][2] ++ if len(cmdline_params) == 2: ++ assert "systemd.legacy_systemd_cgroup_controller" in command['context'][2] ++ ++ ++@pytest.mark.parametrize( ++ "cmdline_params", ( ++ ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="1")]), ++ ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="true")]), ++ ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="True")]), ++ ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="yes")]), ++ ([KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value=None)]), ++ ( ++ [ ++ KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="1"), ++ KernelCmdlineArg(key="systemd.legacy_systemd_cgroup_controller", value="1"), ++ ] ++ ), ( ++ [ ++ KernelCmdlineArg(key="systemd.unified_cgroup_hierarchy", value="1"), ++ KernelCmdlineArg(key="systemd.legacy_systemd_cgroup_controller", value="0"), ++ ] ++ ), ++ ) ++) ++def test_inhibit_should_not_inhibit(monkeypatch, cmdline_params): ++ curr_actor_mocked = CurrentActorMocked(msgs=[KernelCmdline(parameters=cmdline_params)]) ++ monkeypatch.setattr(api, "current_actor", curr_actor_mocked) ++ monkeypatch.setattr(reporting, "create_report", create_report_mocked()) ++ ++ inhibitcgroupsv1.process() ++ ++ assert not reporting.create_report.called +-- +2.50.1 + diff --git a/SPECS/leapp-repository.spec b/SPECS/leapp-repository.spec index 0fd1e11..dfc0700 100644 --- a/SPECS/leapp-repository.spec +++ b/SPECS/leapp-repository.spec @@ -52,7 +52,7 @@ py2_byte_compile "%1" "%2"} Name: leapp-repository Version: 0.22.0 -Release: 4%{?dist} +Release: 5%{?dist} Summary: Repositories for leapp License: ASL 2.0 @@ -112,6 +112,33 @@ Patch0041: 0041-repomap-Process-repositories-based-on-distro.patch Patch0042: 0042-Update-the-upgrade-data-files-stream-4.0.patch Patch0043: 0043-unit-tests-suppress-deprecation-warning-for-is_rhel_.patch +# CTC2 Candidate1 +Patch0044: 0044-Remove-obsolete-workflows.patch +Patch0045: 0045-README-IRC-GitHub-discussions.patch +Patch0046: 0046-Resolve-boot-issues-in-hybrid-azure-during-upgrades-.patch +Patch0047: 0047-Restructure-hybrid-image-detection.patch +Patch0048: 0048-Point-to-leapp-repository-contribution-guidelines.patch +Patch0049: 0049-Read-the-DNF-config-by-module.py-library.patch +Patch0050: 0050-Disable-localpkg_gpgcheck-parameter-from-plugin-data.patch +Patch0051: 0051-PR-welcome-msg-update-link-to-contrib-guidelines-139.patch +Patch0052: 0052-Fix-skip-checking-ownership-of-files-in-.-directory-.patch +Patch0053: 0053-fixup-Fix-skip-checking-ownership-of-files-in-.-dire.patch +Patch0054: 0054-Add-test.patch +Patch0055: 0055-fixup-Add-test.patch +Patch0056: 0056-Add-test-for-non-recursive.patch +Patch0057: 0057-el8to9-actors-mysql-Add-MySQL-actor-with-recommendat.patch +Patch0058: 0058-Fix-target-version-format-checks.patch +# do not apply this patch, due to error in centpkg tool +# it doesn't contain any functional change +#Patch0059: 0059-Drop-el7toel8-leapp-repository.patch +Patch0060: 0060-libs-rhui-add-rhel10-targets-for-upgrades.patch +Patch0061: 0061-userspacegen-do-not-repolist-source-debug-repos.patch +Patch0062: 0062-actor-checkrhui-do-not-boostrap-client-on-AWS-9-10.patch +Patch0063: 0063-add-azure-aws-alibaba-client-repositories-to-repomap.patch +Patch0064: 0064-rhui-azure-add-target-client-s-GPG-key-to-mandatory-.patch +Patch0065: 0065-feat-alibaba-support-rhel9-upgrade-to-rhel10-using-r.patch +Patch0066: 0066-Inhibit-upgrade-on-systems-with-cgroupsv1-on-9to10-1.patch + %description %{summary} @@ -321,6 +348,28 @@ Requires: libdb-utils %patch -P 0041 -p1 %patch -P 0042 -p1 %patch -P 0043 -p1 +%patch -P 0044 -p1 +%patch -P 0045 -p1 +%patch -P 0046 -p1 +%patch -P 0047 -p1 +%patch -P 0048 -p1 +%patch -P 0049 -p1 +%patch -P 0050 -p1 +%patch -P 0051 -p1 +%patch -P 0052 -p1 +%patch -P 0053 -p1 +%patch -P 0054 -p1 +%patch -P 0055 -p1 +%patch -P 0056 -p1 +%patch -P 0057 -p1 +%patch -P 0058 -p1 +%patch -P 0060 -p1 +%patch -P 0061 -p1 +%patch -P 0062 -p1 +%patch -P 0063 -p1 +%patch -P 0064 -p1 +%patch -P 0065 -p1 +%patch -P 0066 -p1 %build @@ -402,6 +451,15 @@ done; # no files here %changelog +* Fri Jul 18 2025 Karolina Kula - 0.22.0-5 +- Fix broken bootloader on Azure hybrid images for systems previously upgraded from RHEL 7 +- Load DNF configuration correctly when using DNF libraries +- Disable localpkg_gpgcheck during the upgrade if set to allow installation of bundled leapp and leapp-repository deps packages +- Add actor with recommendations for upgrade of MySQL +- The HybridImage model has been replaced by ConvertGrubenvTask +- Check the input format of the target version properly +- Resolves: RHEL-5459, RHEL-38255, RHEL-39095, RHEL-47472, RHEL-96238 + * Thu Jun 05 2025 Karolina Kula - 0.22.0-4 - Fix parsing of the kernel cmdline - Require leapp data with provided_data_streams 4.0+