From ef02e9e53b23a28a56124d5507fa34ad5d7be4c1 Mon Sep 17 00:00:00 2001 From: eabdullin Date: Mon, 25 Nov 2024 09:10:29 +0000 Subject: [PATCH] Import from CS git --- .gitignore | 4 +- .leapp-repository.metadata | 4 +- ...-ARM-RHEL8-and-RHEL9-setup-entries-1.patch | 44 + ...o-not-bootstrap-target-client-on-aws.patch | 251 - ...l-versions-to-be-defined-for-obsolet.patch | 41 + SOURCES/0003-Add-RHEL-10.0-prod-certs.patch | 226 + ...ry-except-when-loading-obsoleted-key.patch | 100 + ...eferences-from-master-branch-to-main.patch | 283 + ...-ReadOfKernelArgsError-fix-the-error.patch | 43 + ...ule-too-many-positional-arguments-co.patch | 44 + ...-pam_userdb-migrate-backend-database.patch | 534 + ...entos.org-with-vault.centos.org-Cent.patch | 31 + ...econfig-Add-Report-to-produces-tuple.patch | 35 + ...fig-Use-args-from-first-entry-when-m.patch | 204 + ...refactor-to-handle-possible-future-r.patch | 216 + ...ck_microarch-add-rhel10-requirements.patch | 133 + ...ing-files-under-.-directory-hash-dir.patch | 44 + ...rlay-cap-the-max-size-of-disk-images.patch | 66 + ...rror-when-ModelViolationError-occurs.patch | 168 + ...7-InhibitWhenLuks-simplify-the-logic.patch | 56 + ...nner-Add-parent-device-name-to-lsblk.patch | 271 + ...ner-Add-LUKS-dump-scanner-and-models.patch | 1030 + ...allow-upgrades-for-LUKS2-bound-to-Cl.patch | 455 + ...e-inhibitwhenluks-actor-to-checkluks.patch | 57 + ...eing-blocked-by-resource-limitations.patch | 172 + ...possibility-to-use-net.naming-scheme.patch | 675 + ...e-feature-for-being-used-outside-8-9.patch | 26 + ...dition-on-when-net-naming-is-emitted.patch | 28 + ...devpartitionlayout-Skip-warning-msgs.patch | 56 + ...r-ARM-Upgrades-from-RHEL8-to-RHEL9.5.patch | 1756 + ...tor-to-handle-symlink-directory-with.patch | 115 + ...tor-docstring-for-the-el8-el9-rubyge.patch | 40 + SOURCES/0030-data-update-data-files.patch | 37814 ++++++++++++++++ ...e-leapp-framework-6.x-update-leapp-d.patch | 56 + ...2-spec-create-etc-leapp-actor_conf.d.patch | 48 + ...pec-drop-.gitkeep-files-from-the-RPM.patch | 31 + .../0034-cli-load-actor-configuration.patch | 95 + ...-common-introduce-RHUI-configuration.patch | 157 + ...6-check_rhui-read-RHUI-configuration.patch | 457 + ...37-testutils-add-support-for-configs.patch | 53 + ...i-remove-repofiles-only-if-now-owned.patch | 70 + ...e-IPU-for-EL-9.6-and-drop-EL-8.8-9.2.patch | 857 + ...rop-the-etc-leapp-actor_confid.d-dir.patch | 29 + ...-net-naming-scheme-enable-by-default.patch | 74 + SPECS/leapp-repository.spec | 221 +- 45 files changed, 46894 insertions(+), 276 deletions(-) create mode 100644 SOURCES/0001-rhui-alibaba-add-ARM-RHEL8-and-RHEL9-setup-entries-1.patch delete mode 100644 SOURCES/0001-rhui-do-not-bootstrap-target-client-on-aws.patch create mode 100644 SOURCES/0002-don-t-require-all-versions-to-be-defined-for-obsolet.patch create mode 100644 SOURCES/0003-Add-RHEL-10.0-prod-certs.patch create mode 100644 SOURCES/0004-properly-scope-try-except-when-loading-obsoleted-key.patch create mode 100644 SOURCES/0005-Update-references-from-master-branch-to-main.patch create mode 100644 SOURCES/0006-ReadOfKernelArgsError-fix-the-error.patch create mode 100644 SOURCES/0007-pylint-exclude-rule-too-many-positional-arguments-co.patch create mode 100644 SOURCES/0008-pam_userdb-migrate-backend-database.patch create mode 100644 SOURCES/0009-Replace-mirror.centos.org-with-vault.centos.org-Cent.patch create mode 100644 SOURCES/0010-kernelcmdlineconfig-Add-Report-to-produces-tuple.patch create mode 100644 SOURCES/0011-kernelcmdlineconfig-Use-args-from-first-entry-when-m.patch create mode 100644 SOURCES/0012-check_microarch-refactor-to-handle-possible-future-r.patch create mode 100644 SOURCES/0013-check_microarch-add-rhel10-requirements.patch create mode 100644 SOURCES/0014-Skip-checking-files-under-.-directory-hash-dir.patch create mode 100644 SOURCES/0015-lib-overlay-cap-the-max-size-of-disk-images.patch create mode 100644 SOURCES/0016-Raise-proper-error-when-ModelViolationError-occurs.patch create mode 100644 SOURCES/0017-InhibitWhenLuks-simplify-the-logic.patch create mode 100644 SOURCES/0018-StorageScanner-Add-parent-device-name-to-lsblk.patch create mode 100644 SOURCES/0019-LuksScanner-Add-LUKS-dump-scanner-and-models.patch create mode 100644 SOURCES/0020-InhibitWhenLuks-allow-upgrades-for-LUKS2-bound-to-Cl.patch create mode 100644 SOURCES/0021-Rename-inhibitwhenluks-actor-to-checkluks.patch create mode 100644 SOURCES/0022-Fix-IPU-being-blocked-by-resource-limitations.patch create mode 100644 SOURCES/0023-feature-add-possibility-to-use-net.naming-scheme.patch create mode 100644 SOURCES/0024-prevent-the-feature-for-being-used-outside-8-9.patch create mode 100644 SOURCES/0025-fix-condition-on-when-net-naming-is-emitted.patch create mode 100644 SOURCES/0026-scangrubdevpartitionlayout-Skip-warning-msgs.patch create mode 100644 SOURCES/0027-Workaround-for-ARM-Upgrades-from-RHEL8-to-RHEL9.5.patch create mode 100644 SOURCES/0028-Add-el9toel10-actor-to-handle-symlink-directory-with.patch create mode 100644 SOURCES/0029-Expand-on-the-actor-docstring-for-the-el8-el9-rubyge.patch create mode 100644 SOURCES/0030-data-update-data-files.patch create mode 100644 SOURCES/0031-Packaging-Require-leapp-framework-6.x-update-leapp-d.patch create mode 100644 SOURCES/0032-spec-create-etc-leapp-actor_conf.d.patch create mode 100644 SOURCES/0033-spec-drop-.gitkeep-files-from-the-RPM.patch create mode 100644 SOURCES/0034-cli-load-actor-configuration.patch create mode 100644 SOURCES/0035-configs-common-introduce-RHUI-configuration.patch create mode 100644 SOURCES/0036-check_rhui-read-RHUI-configuration.patch create mode 100644 SOURCES/0037-testutils-add-support-for-configs.patch create mode 100644 SOURCES/0038-userspacegen-rhui-remove-repofiles-only-if-now-owned.patch create mode 100644 SOURCES/0039-Enable-IPU-for-EL-9.6-and-drop-EL-8.8-9.2.patch create mode 100644 SOURCES/0040-spec-drop-the-etc-leapp-actor_confid.d-dir.patch create mode 100644 SOURCES/0041-feat-net-naming-scheme-enable-by-default.patch diff --git a/.gitignore b/.gitignore index d8cdd6f..584a96b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -SOURCES/deps-pkgs-10.tar.gz -SOURCES/leapp-repository-0.20.0.tar.gz +SOURCES/deps-pkgs-11.tar.gz +SOURCES/leapp-repository-0.21.0.tar.gz diff --git a/.leapp-repository.metadata b/.leapp-repository.metadata index c93f55a..c20b327 100644 --- a/.leapp-repository.metadata +++ b/.leapp-repository.metadata @@ -1,2 +1,2 @@ -d520ada12294e4dd8837c81f92d4c184ab403d51 SOURCES/deps-pkgs-10.tar.gz -185bbb040dba48e1ea2d6c627133af594378afd4 SOURCES/leapp-repository-0.20.0.tar.gz +8b3fe3a7b52d2e144d374623aa5b0b0add7ab0c7 SOURCES/deps-pkgs-11.tar.gz +9327be3720ccb3f7b285d2199463d7df0c38dfae SOURCES/leapp-repository-0.21.0.tar.gz diff --git a/SOURCES/0001-rhui-alibaba-add-ARM-RHEL8-and-RHEL9-setup-entries-1.patch b/SOURCES/0001-rhui-alibaba-add-ARM-RHEL8-and-RHEL9-setup-entries-1.patch new file mode 100644 index 0000000..bbd0ed2 --- /dev/null +++ b/SOURCES/0001-rhui-alibaba-add-ARM-RHEL8-and-RHEL9-setup-entries-1.patch @@ -0,0 +1,44 @@ +From fbc38d4ad1d828e0553579e3719c0e4ed4a2a6bd Mon Sep 17 00:00:00 2001 +From: jinkangkang <1547182170@qq.com> +Date: Mon, 19 Aug 2024 18:46:08 +0800 +Subject: [PATCH 01/40] rhui(alibaba): add ARM RHEL8 and RHEL9 setup entries + (#1277) + +Since leapp's RHUI mechanism filters setups based on the architecture of the source system, +it was not possible to upgrade of ARM-based RHEL systems on Alibaba cloud as there +were no ARM entries in RHUI_SETUPS. This patch adds these entries, making it possible +for EL 8 -> 9 upgrades of ARM systems on Alibaba cloud. +--- + repos/system_upgrade/common/libraries/rhui.py | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/repos/system_upgrade/common/libraries/rhui.py b/repos/system_upgrade/common/libraries/rhui.py +index 51694ac2..30de0275 100644 +--- a/repos/system_upgrade/common/libraries/rhui.py ++++ b/repos/system_upgrade/common/libraries/rhui.py +@@ -348,6 +348,22 @@ RHUI_SETUPS = { + ('content.crt', RHUI_PKI_PRODUCT_DIR) + ], + os_version='9'), ++ ], ++ RHUIFamily(RHUIProvider.ALIBABA, arch=arch.ARCH_ARM64, client_files_folder='alibaba'): [ ++ mk_rhui_setup(clients={'aliyun_rhui_rhel8'}, leapp_pkg='leapp-rhui-alibaba', ++ mandatory_files=[('leapp-alibaba.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key.pem', RHUI_PKI_DIR), ++ ('content.crt', RHUI_PKI_PRODUCT_DIR) ++ ], ++ os_version='8'), ++ mk_rhui_setup(clients={'aliyun_rhui_rhel9'}, leapp_pkg='leapp-rhui-alibaba', ++ mandatory_files=[('leapp-alibaba.repo', YUM_REPOS_PATH)], ++ optional_files=[ ++ ('key.pem', RHUI_PKI_DIR), ++ ('content.crt', RHUI_PKI_PRODUCT_DIR) ++ ], ++ os_version='9'), + ] + } + +-- +2.47.0 + diff --git a/SOURCES/0001-rhui-do-not-bootstrap-target-client-on-aws.patch b/SOURCES/0001-rhui-do-not-bootstrap-target-client-on-aws.patch deleted file mode 100644 index dbe1f57..0000000 --- a/SOURCES/0001-rhui-do-not-bootstrap-target-client-on-aws.patch +++ /dev/null @@ -1,251 +0,0 @@ -From 921c06892f7550a3a8e2b3fe941c6272bdacf88d Mon Sep 17 00:00:00 2001 -From: mhecko -Date: Thu, 15 Feb 2024 09:56:27 +0100 -Subject: [PATCH] rhui: do not bootstrap target client on aws - -Bootstrapping target RHUI client now requires installing the entire -RHEL8 RPM stack. Threfore, do not try installing target client -and instead rely only on the files from our leapp-rhui-aws package. ---- - .../cloud/checkrhui/libraries/checkrhui.py | 6 +- - .../libraries/userspacegen.py | 104 ++++++++++++++---- - .../system_upgrade/common/models/rhuiinfo.py | 7 ++ - 3 files changed, 92 insertions(+), 25 deletions(-) - -diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py -index 84ab40e3..e1c158c7 100644 ---- a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py -+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py -@@ -142,7 +142,11 @@ def customize_rhui_setup_for_aws(rhui_family, setup_info): - - target_version = version.get_target_major_version() - if target_version == '8': -- return # The rhel8 plugin is packed into leapp-rhui-aws as we need python2 compatible client -+ # RHEL8 rh-amazon-rhui-client depends on amazon-libdnf-plugin that depends -+ # essentially on the entire RHEL8 RPM stack, so we cannot just swap the clients -+ # The leapp-rhui-aws will provide all necessary files to access entire RHEL8 content -+ setup_info.bootstrap_target_client = False -+ return - - amazon_plugin_copy_task = CopyFile(src='/usr/lib/python3.9/site-packages/dnf-plugins/amazon-id.py', - dst='/usr/lib/python3.6/site-packages/dnf-plugins/') -diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py -index d917bfd5..d60bc75f 100644 ---- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py -+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py -@@ -853,9 +853,9 @@ def _get_rhui_available_repoids(context, cloud_repo): - return set(repoids) - - --def get_copy_location_from_copy_in_task(context, copy_task): -+def get_copy_location_from_copy_in_task(context_basepath, copy_task): - basename = os.path.basename(copy_task.src) -- dest_in_container = context.full_path(copy_task.dst) -+ dest_in_container = os.path.join(context_basepath, copy_task.dst) - if os.path.isdir(dest_in_container): - return os.path.join(copy_task.dst, basename) - return copy_task.dst -@@ -871,7 +871,10 @@ def _get_rh_available_repoids(context, indata): - - # If we are upgrading a RHUI system, check what repositories are provided by the (already installed) target clients - if indata and indata.rhui_info: -- files_provided_by_clients = _query_rpm_for_pkg_files(context, indata.rhui_info.target_client_pkg_names) -+ setup_info = indata.rhui_info.target_client_setup_info -+ target_content_access_files = set() -+ if setup_info.bootstrap_target_client: -+ target_content_access_files = _query_rpm_for_pkg_files(context, indata.rhui_info.target_client_pkg_names) - - def is_repofile(path): - return os.path.dirname(path) == '/etc/yum.repos.d' and os.path.basename(path).endswith('.repo') -@@ -884,24 +887,33 @@ def _get_rh_available_repoids(context, indata): - - yum_repos_d = context.full_path('/etc/yum.repos.d') - all_repofiles = {os.path.join(yum_repos_d, path) for path in os.listdir(yum_repos_d) if path.endswith('.repo')} -- client_repofiles = {context.full_path(path) for path in files_provided_by_clients if is_repofile(path)} -+ api.current_logger().debug('(RHUI Setup) All available repofiles: {0}'.format(' '.join(all_repofiles))) -+ -+ target_access_repofiles = { -+ context.full_path(path) for path in target_content_access_files if is_repofile(path) -+ } - - # Exclude repofiles used to setup the target rhui access as on some platforms the repos provided by - # the client are not sufficient to install the client into target userspace (GCP) - rhui_setup_repofile_tasks = [task for task in setup_tasks if task.src.endswith('repo')] - rhui_setup_repofiles = ( -- get_copy_location_from_copy_in_task(context, copy_task) for copy_task in rhui_setup_repofile_tasks -+ get_copy_location_from_copy_in_task(context.base_dir, copy) for copy in rhui_setup_repofile_tasks - ) - rhui_setup_repofiles = {context.full_path(repofile) for repofile in rhui_setup_repofiles} - -- foreign_repofiles = all_repofiles - client_repofiles - rhui_setup_repofiles -+ foreign_repofiles = all_repofiles - target_access_repofiles - rhui_setup_repofiles -+ -+ api.current_logger().debug( -+ 'The following repofiles are considered as unknown to' -+ ' the target RHUI content setup and will be ignored: {0}'.format(' '.join(foreign_repofiles)) -+ ) - - # Rename non-client repofiles so they will not be recognized when running dnf repolist - for foreign_repofile in foreign_repofiles: - os.rename(foreign_repofile, '{0}.back'.format(foreign_repofile)) - - try: -- dnf_cmd = ['dnf', 'repolist', '--releasever', target_ver, '-v'] -+ dnf_cmd = ['dnf', 'repolist', '--releasever', target_ver, '-v', '--enablerepo', '*'] - repolist_result = context.call(dnf_cmd)['stdout'] - repoid_lines = [line for line in repolist_result.split('\n') if line.startswith('Repo-id')] - rhui_repoids = {extract_repoid_from_line(line) for line in repoid_lines} -@@ -919,6 +931,9 @@ def _get_rh_available_repoids(context, indata): - for foreign_repofile in foreign_repofiles: - os.rename('{0}.back'.format(foreign_repofile), foreign_repofile) - -+ api.current_logger().debug( -+ 'The following repofiles are considered as provided by RedHat: {0}'.format(' '.join(rh_repoids)) -+ ) - return rh_repoids - - -@@ -1086,7 +1101,7 @@ def _get_target_userspace(): - return constants.TARGET_USERSPACE.format(get_target_major_version()) - - --def _create_target_userspace(context, packages, files, target_repoids): -+def _create_target_userspace(context, indata, packages, files, target_repoids): - """Create the target userspace.""" - target_path = _get_target_userspace() - prepare_target_userspace(context, target_path, target_repoids, list(packages)) -@@ -1096,12 +1111,57 @@ def _create_target_userspace(context, packages, files, target_repoids): - _copy_files(target_context, files) - dnfplugin.install(_get_target_userspace()) - -+ # If we used only repofiles from leapp-rhui- then remove these as they provide -+ # duplicit definitions as the target clients already installed in the target container -+ if indata.rhui_info: -+ api.current_logger().debug( -+ 'Target container should have access to content. ' -+ 'Removing repofiles from leapp-rhui- from the target..' -+ ) -+ setup_info = indata.rhui_info.target_client_setup_info -+ if not setup_info.bootstrap_target_client: -+ target_userspace_path = _get_target_userspace() -+ for copy in setup_info.preinstall_tasks.files_to_copy_into_overlay: -+ dst_in_container = get_copy_location_from_copy_in_task(target_userspace_path, copy) -+ dst_in_container = dst_in_container.strip('/') -+ dst_in_host = os.path.join(target_userspace_path, dst_in_container) -+ if os.path.isfile(dst_in_host) and dst_in_host.endswith('.repo'): -+ api.current_logger().debug('Removing repofile: {0}'.format(dst_in_host)) -+ os.remove(dst_in_host) -+ - # and do not forget to set the rhsm into the container mode again - with mounting.NspawnActions(_get_target_userspace()) as target_context: - rhsm.set_container_mode(target_context) - - --def install_target_rhui_client_if_needed(context, indata): -+def _apply_rhui_access_preinstall_tasks(context, rhui_setup_info): -+ if rhui_setup_info.preinstall_tasks: -+ api.current_logger().debug('Applying RHUI preinstall tasks.') -+ preinstall_tasks = rhui_setup_info.preinstall_tasks -+ -+ for file_to_remove in preinstall_tasks.files_to_remove: -+ api.current_logger().debug('Removing {0} from the scratch container.'.format(file_to_remove)) -+ context.remove(file_to_remove) -+ -+ for copy_info in preinstall_tasks.files_to_copy_into_overlay: -+ api.current_logger().debug( -+ 'Copying {0} in {1} into the scratch container.'.format(copy_info.src, copy_info.dst) -+ ) -+ context.makedirs(os.path.dirname(copy_info.dst), exists_ok=True) -+ context.copy_to(copy_info.src, copy_info.dst) -+ -+ -+def _apply_rhui_access_postinstall_tasks(context, rhui_setup_info): -+ if rhui_setup_info.postinstall_tasks: -+ api.current_logger().debug('Applying RHUI postinstall tasks.') -+ for copy_info in rhui_setup_info.postinstall_tasks.files_to_copy: -+ context.makedirs(os.path.dirname(copy_info.dst), exists_ok=True) -+ debug_msg = 'Copying {0} to {1} (inside the scratch container).' -+ api.current_logger().debug(debug_msg.format(copy_info.src, copy_info.dst)) -+ context.call(['cp', copy_info.src, copy_info.dst]) -+ -+ -+def setup_target_rhui_access_if_needed(context, indata): - if not indata.rhui_info: - return - -@@ -1110,15 +1170,14 @@ def install_target_rhui_client_if_needed(context, indata): - _create_target_userspace_directories(userspace_dir) - - setup_info = indata.rhui_info.target_client_setup_info -- if setup_info.preinstall_tasks: -- preinstall_tasks = setup_info.preinstall_tasks -+ _apply_rhui_access_preinstall_tasks(context, setup_info) - -- for file_to_remove in preinstall_tasks.files_to_remove: -- context.remove(file_to_remove) -- -- for copy_info in preinstall_tasks.files_to_copy_into_overlay: -- context.makedirs(os.path.dirname(copy_info.dst), exists_ok=True) -- context.copy_to(copy_info.src, copy_info.dst) -+ if not setup_info.bootstrap_target_client: -+ # Installation of the target RHUI client is not possible and we bundle all necessary -+ # files into the leapp-rhui- packages. -+ api.current_logger().debug('Bootstrapping target RHUI client is disabled, leapp will rely ' -+ 'only on files budled in leapp-rhui- package.') -+ return - - cmd = ['dnf', '-y'] - -@@ -1149,16 +1208,13 @@ def install_target_rhui_client_if_needed(context, indata): - - context.call(cmd, callback_raw=utils.logging_handler, stdin='\n'.join(dnf_transaction_steps)) - -- if setup_info.postinstall_tasks: -- for copy_info in setup_info.postinstall_tasks.files_to_copy: -- context.makedirs(os.path.dirname(copy_info.dst), exists_ok=True) -- context.call(['cp', copy_info.src, copy_info.dst]) -+ _apply_rhui_access_postinstall_tasks(context, setup_info) - - # Do a cleanup so there are not duplicit repoids - files_owned_by_clients = _query_rpm_for_pkg_files(context, indata.rhui_info.target_client_pkg_names) - - for copy_task in setup_info.preinstall_tasks.files_to_copy_into_overlay: -- dest = get_copy_location_from_copy_in_task(context, copy_task) -+ dest = get_copy_location_from_copy_in_task(context.base_dir, copy_task) - can_be_cleaned_up = copy_task.src not in setup_info.files_supporting_client_operation - if dest not in files_owned_by_clients and can_be_cleaned_up: - context.remove(dest) -@@ -1184,10 +1240,10 @@ def perform(): - target_iso = next(api.consume(TargetOSInstallationImage), None) - with mounting.mount_upgrade_iso_to_root_dir(overlay.target, target_iso): - -- install_target_rhui_client_if_needed(context, indata) -+ setup_target_rhui_access_if_needed(context, indata) - - target_repoids = _gather_target_repositories(context, indata, prod_cert_path) -- _create_target_userspace(context, indata.packages, indata.files, target_repoids) -+ _create_target_userspace(context, indata, indata.packages, indata.files, target_repoids) - # TODO: this is tmp solution as proper one needs significant refactoring - target_repo_facts = repofileutils.get_parsed_repofiles(context) - api.produce(TMPTargetRepositoriesFacts(repositories=target_repo_facts)) -diff --git a/repos/system_upgrade/common/models/rhuiinfo.py b/repos/system_upgrade/common/models/rhuiinfo.py -index 3eaa4826..0a2e45af 100644 ---- a/repos/system_upgrade/common/models/rhuiinfo.py -+++ b/repos/system_upgrade/common/models/rhuiinfo.py -@@ -36,6 +36,13 @@ class TargetRHUISetupInfo(Model): - files_supporting_client_operation = fields.List(fields.String(), default=[]) - """A subset of files copied in preinstall tasks that should not be cleaned up.""" - -+ bootstrap_target_client = fields.Boolean(default=True) -+ """ -+ Swap the current RHUI client for the target one to facilitate access to the target content. -+ -+ When False, only files from the leapp-rhui- will be used to access target content. -+ """ -+ - - class RHUIInfo(Model): - """ --- -2.43.0 - diff --git a/SOURCES/0002-don-t-require-all-versions-to-be-defined-for-obsolet.patch b/SOURCES/0002-don-t-require-all-versions-to-be-defined-for-obsolet.patch new file mode 100644 index 0000000..286cab5 --- /dev/null +++ b/SOURCES/0002-don-t-require-all-versions-to-be-defined-for-obsolet.patch @@ -0,0 +1,41 @@ +From 7e0fb44bb673893d0409903f6a441d0eb2829d22 Mon Sep 17 00:00:00 2001 +From: Evgeni Golov +Date: Tue, 20 Aug 2024 15:11:02 +0200 +Subject: [PATCH 02/40] don't require all versions to be defined for obsoleted + keys + +in releases where we do not have any obsoleted keys, we still had to +define an entry (with an empty list), as otherwise the code would fail + +instead, we can catch the KeyError and carry on as nothing happened +--- + .../libraries/removeobsoleterpmgpgkeys.py | 13 ++++++++----- + 1 file changed, 8 insertions(+), 5 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py +index 6e84c2e9..bda7efa3 100644 +--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py ++++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py +@@ -12,11 +12,14 @@ def _get_obsolete_keys(): + distribution = api.current_actor().configuration.os_release.release_id + obsoleted_keys_map = get_distribution_data(distribution).get('obsoleted-keys', {}) + keys = [] +- for version in range(7, int(get_target_major_version()) + 1): +- for key in obsoleted_keys_map[str(version)]: +- name, version, release = key.rsplit("-", 2) +- if has_package(InstalledRPM, name, version=version, release=release): +- keys.append(key) ++ try: ++ for version in range(7, int(get_target_major_version()) + 1): ++ for key in obsoleted_keys_map[str(version)]: ++ name, version, release = key.rsplit("-", 2) ++ if has_package(InstalledRPM, name, version=version, release=release): ++ keys.append(key) ++ except KeyError: ++ pass + + return keys + +-- +2.47.0 + diff --git a/SOURCES/0003-Add-RHEL-10.0-prod-certs.patch b/SOURCES/0003-Add-RHEL-10.0-prod-certs.patch new file mode 100644 index 0000000..77c31cd --- /dev/null +++ b/SOURCES/0003-Add-RHEL-10.0-prod-certs.patch @@ -0,0 +1,226 @@ +From 9f2f1726d8a5bdd12309a3a3111984f1666b903f Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Thu, 22 Aug 2024 15:52:19 +0200 +Subject: [PATCH 03/40] Add RHEL 10.0 prod-certs + +Previously we temporarily used the RHEL 9 x86_64 prod cert for others +archs it was missing completely. + +Jira: OAMG-11138 +--- + .../common/files/prod-certs/10.0/279.pem | 37 ++++++++++ + .../common/files/prod-certs/10.0/419.pem | 37 ++++++++++ + .../common/files/prod-certs/10.0/479.pem | 68 ++++++++++--------- + .../common/files/prod-certs/10.0/72.pem | 37 ++++++++++ + 4 files changed, 146 insertions(+), 33 deletions(-) + create mode 100644 repos/system_upgrade/common/files/prod-certs/10.0/279.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/10.0/419.pem + create mode 100644 repos/system_upgrade/common/files/prod-certs/10.0/72.pem + +diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/279.pem b/repos/system_upgrade/common/files/prod-certs/10.0/279.pem +new file mode 100644 +index 00000000..f62340fc +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/10.0/279.pem +@@ -0,0 +1,37 @@ ++-----BEGIN CERTIFICATE----- ++MIIGczCCBFugAwIBAgIUfZodBQY+YRSlyRRiFX1dx4vQ5y4wDQYJKoZIhvcNAQEL ++BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG ++A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w ++LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw ++IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx ++NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg ++SUQgWzA0YTU4NDFkLTVlNmUtNDU1Yy1hZWYwLTdhOTQ0NTBiNjg3Nl0wggIiMA0G ++CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU ++FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF ++AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD ++LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG ++OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL ++XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52 ++deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH ++F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z ++wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG ++eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra ++cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ ++nE2pDC01zYhUTwIDAQABo4HxMIHuMAkGA1UdEwQCMAAwQwYMKwYBBAGSCAkBghcB ++BDMMMVJlZCBIYXQgRW50ZXJwcmlzZSBMaW51eCBmb3IgUG93ZXIsIGxpdHRsZSBl ++bmRpYW4wFgYMKwYBBAGSCAkBghcCBAYMBDEwLjAwGQYMKwYBBAGSCAkBghcDBAkM ++B3BwYzY0bGUwKQYMKwYBBAGSCAkBghcEBBkMF3JoZWwtMTAscmhlbC0xMC1wcGM2 ++NGxlMB0GA1UdDgQWBBRh6iC1NXyvZ2Q6/2sI5hB40M0flTAfBgNVHSMEGDAWgBSW ++/bscQED/QIStsh8LJsHDam/WfDANBgkqhkiG9w0BAQsFAAOCAgEAv6ySsgygc2z2 ++kQJeu9sdvBNFKe+gEtXbPu6+rZKPPosW3cggMJCnsZgki3nUogovz0Z3MPkbmRz+ ++GJwVjiVBnfUQLoORSDYwqYZB4WRoqszW/dytd7/64IehvD/JZo3Oa8BNYRSG/Ukh ++7iUIT8ryFIH1DTUIersVObINN2gk3hC2JJXoTfNqIYG+4OAEUE7/F4CptRAGbgH/ ++4/9vfe2KNXvPMoWvILpXpD5w8t9Xh0Wl97N1W7+FLVRwQHAQ2/yBTu/sY27FvVSl ++0o+SBSvjTKIi+9QslRpi0QCVza5WxHTiO8nzYgzFjfMkt6lzK74puf3VJavpqkQ9 ++dVfyp36A3Fh6vDsiNxhsfKrp8z2JnKA3vdslsH7cOHCIFYHXiqeaP654t4oGeESD ++EPfS6PpXSyi47Kd/qjA2srgpXNQl2yMd0ih6NoHaoSYXFfb4LX6cWFGcT/AWZsaC ++xv2pN9J0KhF2loLp8SK19FESc0rJShkAacTcxeYjuDYbvLtJi4Z5aWWVU421rMSs ++X9IdiWa4WL70ZaDK5cP54S4zZNsVDKniUzNXwPltDCpqefy8ka4o5QlWNreBrXXW ++6cy8I6L2om7xZ5hAZ3CB7nUZe9QE/LXnHqK3cQetvd5Q2LMnp6gVtgQ4a+7vD9xz ++ExLtbBZjvGJFudimMmOxvn/J5+GMmm4= ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/419.pem b/repos/system_upgrade/common/files/prod-certs/10.0/419.pem +new file mode 100644 +index 00000000..08cb5b02 +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/10.0/419.pem +@@ -0,0 +1,37 @@ ++-----BEGIN CERTIFICATE----- ++MIIGZTCCBE2gAwIBAgIUWARL99TkK+hxtTJkE5icdHXLfY0wDQYJKoZIhvcNAQEL ++BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG ++A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w ++LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw ++IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx ++NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg ++SUQgW2Y3ZWFmNGU2LTYwZGYtNDMyNC04N2I0LTdhNGUzZGVkZmViNV0wggIiMA0G ++CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU ++FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF ++AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD ++LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG ++OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL ++XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52 ++deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH ++F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z ++wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG ++eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra ++cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ ++nE2pDC01zYhUTwIDAQABo4HjMIHgMAkGA1UdEwQCMAAwNQYMKwYBBAGSCAkBgyMB ++BCUMI1JlZCBIYXQgRW50ZXJwcmlzZSBMaW51eCBmb3IgQVJNIDY0MBYGDCsGAQQB ++kggJAYMjAgQGDAQxMC4wMBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCkGDCsG ++AQQBkggJAYMjBAQZDBdyaGVsLTEwLHJoZWwtMTAtYWFyY2g2NDAdBgNVHQ4EFgQU ++YeogtTV8r2dkOv9rCOYQeNDNH5UwHwYDVR0jBBgwFoAUlv27HEBA/0CErbIfCybB ++w2pv1nwwDQYJKoZIhvcNAQELBQADggIBAIpdcHN7RN18pg5ELfc55Sj58ivL5N25 ++19KprqbM7aVum32abw7/Qksfs6maGQpU6Hh/UqhJlGQ2bN48jZ/kdMKor4agSQ/T ++iwr3b8RBJFPVCuqQJXIe4g3iRbHfnIjGxgoMgv36j58PENoEnpPtR7ZtHMyqQ2SO ++m1WRQhY5tJ4Fk/Zkx/trxlNvmsTAjNRa530kqG4TfiMVvWNaVdxHsjMv0lXLJRXx ++KT6+iHt2QBs2No5O8cjlXr/CzfGrB5TlBNrsHqhO0Llmw28KpcWGYGdexKdIHrDG ++A/K0Pr21yRstUWN39jz/tdEqt1q8T7/it3oM976keQmFAxBa/CpyEG5Y6FKw9+F0 ++LtkAyI3XGHK7LbCOE67s7u0/BfgQvww1FqztVnVZ4sXlagj/IuYPJBhfGDe/6tik ++laqP8FtR6xJdSra2YQMBc0kZb0Sv1uy7pGofNSvLM5L76XqiwKoDVo/eAcl60OWY ++rF86pEDLGDmdJBLJKX2/77pzpQpZ9Yvc4vWwoZrP4gRKBuWF28aLH0OsWzdsfdMG ++9+DrcO/58slMbWng1ZzOQyEjp7x1kto5sa5m2q8LMo06ETYT8ps5A0hyltBz1yAt ++JEBS4Y14YlF6Px67aTak07MNo7AaaphuD47D2Sy3pwHa+vOx4nv/G33+G0iOm3Lr ++zVAjwlfLIUB9 ++-----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/479.pem b/repos/system_upgrade/common/files/prod-certs/10.0/479.pem +index 1ea1cd3d..d89f6188 100644 +--- a/repos/system_upgrade/common/files/prod-certs/10.0/479.pem ++++ b/repos/system_upgrade/common/files/prod-certs/10.0/479.pem +@@ -1,35 +1,37 @@ + -----BEGIN CERTIFICATE----- +-MIIGFTCCA/2gAwIBAgIJALDxRLt/tVDQMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD +-VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI +-YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk +-IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ +-ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQwOFoXDTQzMDcx +-OTE2MzQwOFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsxZDg0ZDQ5 +-Ny1jZmNmLTQxNjEtOTM0YS0zNzk2MDU4M2ZmZGZdMIICIjANBgkqhkiG9w0BAQEF +-AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk +-sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x +-8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB +-RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I +-5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa +-xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo +-QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI +-yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl +-1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v +-5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ +-ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C +-AwEAAaOBnjCBmzAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0 +-IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD +-OS40MBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM +-FHJoZWwtOSxyaGVsLTkteDg2XzY0MA0GCSqGSIb3DQEBCwUAA4ICAQCGUDPFBrLs +-sK/RITJothRhKhKNX3zu9TWRG0WKxszCx/y7c4yEfH1TV/yd7BNB2RubaoayWz8E +-TQjcRW8BnVu9JrlbdpWJm4eN+dOOpcESPilLnkz4Tr0WYDsT1/jk/uiorK4h21S0 +-EwMicuSuEmm0OUEX0zj2X/IyveFRtpJpH/JktznCkvexysc1JRzqMCbal8GipRX9 +-Xf7Oko6QiaUpu5GDLN2OXhizYHdR2f3l+Sn2cScsbi3fSVv+DLsnaz6J0kZ4U8q3 +-lYk/ZYifJjG+/7cv3e+usixpmK/qYlpOvunUDnqOkDfUs4/4bZjH8e8CdqJk4YvU +-RRtLr7muXEJsaqF7lxAViXnKxT/z/+1kOgN/+Oyzjs4QDsk2HQpWHFgNYSSG9Mmz +-PUS8tk2T0j5sN55X7QRRl5c0oqrBU5XaWyL26QcfONYcR8dBaKawjxg8CI9KzsYY +-sb2jjS+fBkB1OI2c6z4OZRd+0N6FQ6gq++KiXOLFvi/QSFNi9Veb56c5tR2l6fBk +-0pSH06Gg2s0aQg20NdMIr+HaYsVdJRsE1FgQ2tlfFx9rGkcqhgwV3Za/abgtRb2o +-YVwps28DLm41DXf5DnXK+BXFHrtR/3YAZtga+R7OL/RvcF0kc2kudlxqd/8Y33uL +-nqnoATy31FTW4J4rEfanJTQgTpatZmbaLQ== ++MIIGYzCCBEugAwIBAgIUL5D34AcwqLAbqlUcxntHUCtEVxQwDQYJKoZIhvcNAQEL ++BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG ++A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w ++LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw ++IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx ++NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg ++SUQgWzk5NDZhMmY5LTI4NDMtNDJhOS1iNzhlLTIzM2E5ODIwYjVhZV0wggIiMA0G ++CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU ++FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF ++AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD ++LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG ++OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL ++XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52 ++deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH ++F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z ++wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG ++eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra ++cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ ++nE2pDC01zYhUTwIDAQABo4HhMIHeMAkGA1UdEwQCMAAwNQYMKwYBBAGSCAkBg18B ++BCUMI1JlZCBIYXQgRW50ZXJwcmlzZSBMaW51eCBmb3IgeDg2XzY0MBYGDCsGAQQB ++kggJAYNfAgQGDAQxMC4wMBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwKAYMKwYB ++BAGSCAkBg18EBBgMFnJoZWwtMTAscmhlbC0xMC14ODZfNjQwHQYDVR0OBBYEFGHq ++ILU1fK9nZDr/awjmEHjQzR+VMB8GA1UdIwQYMBaAFJb9uxxAQP9AhK2yHwsmwcNq ++b9Z8MA0GCSqGSIb3DQEBCwUAA4ICAQAa+c2/Usg6JToULhYTdLhf15Hk6xxdlwT7 ++zZlnZLbuAKtaDqP1NiSiX0Z/lMJzFfW0B/zyWLy8uiXLYmF5V28f8yWK0Nksx2v7 ++I7u6ZZN2dKDQZKsEoP0g3ptvVRWn9h5otS7yPkOK4Dzj04yJqOSGP9bp6OHEhm1S ++x4ErITkN/3MXOf9vT+I6wydVKsw4fdlWgVjmBd90bzVTnv4dWtJio+le+9ad9RSf ++M3aD5ufiELeRKMp6ExnC/cnoWtuH+b4BJ37TQ3Kpn3fDtbrzVvQH/dpqZ7P33yqg ++PnBEXOiLimDnnmDJ9ImQ1pVTrKJMxaj1Mk6onERe36n/iAsj+BwZvBiv7UaLPMnW ++nJGg+LQ4iUZrGWYD4N9Ou++nvsR8dCWRhXSuXensfli3lL/W0P62yzfYCyqOYeL1 ++msDcCmBEWJUtAaeAbASUIVx02JWPPmMSUqWs8xOecQjzoGuCQg4JM/UfsZzxepw0 ++bs9YSUVw8J9R2d4kuze65qDTMRg+cK2LX1xg1KkR/UWZOGxHHJAfwGWdPwSkiOPQ ++MVJ7LJjvozebHWSuiSxk+GWWr+NdxIJrFRGbivXyAkmqMRrPe1VLVxWwCdyud9o8 ++b2WbFgrNS2jOnHwldtM2ZAhrF5W4ckvVL7hLp2JoQnJfCcWson9NK6Y2M4bNwQnC ++ihxphLzOAw== + -----END CERTIFICATE----- +diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/72.pem b/repos/system_upgrade/common/files/prod-certs/10.0/72.pem +new file mode 100644 +index 00000000..e0274f9c +--- /dev/null ++++ b/repos/system_upgrade/common/files/prod-certs/10.0/72.pem +@@ -0,0 +1,37 @@ ++-----BEGIN CERTIFICATE----- ++MIIGZDCCBEygAwIBAgIUSTvcD4Wsduixh8PFmwk6aI0KTEcwDQYJKoZIhvcNAQEL ++BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG ++A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w ++LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw ++IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx ++NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg ++SUQgW2VjN2EwZDQyLTgzNjItNDg2YS04ZjcyLTc3YThiOWU2MjM0YV0wggIiMA0G ++CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU ++FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF ++AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD ++LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG ++OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL ++XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52 ++deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH ++F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z ++wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG ++eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra ++cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ ++nE2pDC01zYhUTwIDAQABo4HiMIHfMAkGA1UdEwQCMAAwOwYLKwYBBAGSCAkBSAEE ++LAwqUmVkIEhhdCBFbnRlcnByaXNlIExpbnV4IGZvciBJQk0geiBTeXN0ZW1zMBUG ++CysGAQQBkggJAUgCBAYMBDEwLjAwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJgYL ++KwYBBAGSCAkBSAQEFwwVcmhlbC0xMCxyaGVsLTEwLXMzOTB4MB0GA1UdDgQWBBRh ++6iC1NXyvZ2Q6/2sI5hB40M0flTAfBgNVHSMEGDAWgBSW/bscQED/QIStsh8LJsHD ++am/WfDANBgkqhkiG9w0BAQsFAAOCAgEAsj4qPVsDkFrfuVDn8JCJ7tIH5WhaOzL6 ++3GBsQIKGd8a1WscPfSpr/phNSBPWFyvV2b+0HzblYzBZbx6ExykTDLh5L01nPM0s +++hqPxZgF/kcTbLWmAanl32R9+Gs2P2JN1CaCclXgM4USEagBWYeMhJSmQR3bOnSe ++Jjm3tjvhnbIQd6xgPpTjrqZ35z1BW0P0qQFdBbB0k+MfPkhYKEr+Vfn0rU8vk4UP ++F9sY9HkZLqIBxlXeTUerNZvHSuOy2KgoS4l25/QwUutHnnSGZZpARiU1XYNcynVL ++r5COHlb6TYkeRhSAm6RVM4XPYoFgN6cbhY1orwFC2/0i30EnsTMB6ctnLKCf7qgM ++GDG2W7ct0m6koA7s2TGmgp33DPw9adX7qgIV0OjLzBYJ1fyVv3sYlOKRuyDz0l+N ++u6Rnv1ecNUspWn+5ogBbdgwU6yah6oo/fJIWm62U38UGH5ic+/7sBnga8q5sDI90 +++h+nlTIAnD0ICzjEDASiLlYft+hQ9pOt/rgEIrPeKTe+fbefUIXJ5h343E51POnY ++uZRXcirc33QL/PgBRce1taIXjsRD+FSJM0tx/vf8H9j0rzSAxDoXJNsdq4/32scy ++6Zk2fgtm80xxIzju84jXVUrSBRMpWD9I+FZId4IE7tQhwKNi1b7DdNeaQLfaoq8U ++1PEea/tQDSA= ++-----END CERTIFICATE----- +-- +2.47.0 + diff --git a/SOURCES/0004-properly-scope-try-except-when-loading-obsoleted-key.patch b/SOURCES/0004-properly-scope-try-except-when-loading-obsoleted-key.patch new file mode 100644 index 0000000..e8d1cbf --- /dev/null +++ b/SOURCES/0004-properly-scope-try-except-when-loading-obsoleted-key.patch @@ -0,0 +1,100 @@ +From bf302fc794957a88bc4785f4dd2505b8d71012e0 Mon Sep 17 00:00:00 2001 +From: Evgeni Golov +Date: Wed, 21 Aug 2024 07:52:02 +0200 +Subject: [PATCH 04/40] properly scope try/except when loading obsoleted keys + +We want to load all possible keys, even *after* a KeyError happenend + +Fixes: 7e0fb44bb673893d0409903f6a441d0eb2829d22 +--- + .../libraries/removeobsoleterpmgpgkeys.py | 8 +-- + .../tests/test_removeobsoleterpmgpgkeys.py | 50 +++++++++++++++++++ + 2 files changed, 54 insertions(+), 4 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py +index bda7efa3..198c4368 100644 +--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py ++++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py +@@ -12,14 +12,14 @@ def _get_obsolete_keys(): + distribution = api.current_actor().configuration.os_release.release_id + obsoleted_keys_map = get_distribution_data(distribution).get('obsoleted-keys', {}) + keys = [] +- try: +- for version in range(7, int(get_target_major_version()) + 1): ++ for version in range(7, int(get_target_major_version()) + 1): ++ try: + for key in obsoleted_keys_map[str(version)]: + name, version, release = key.rsplit("-", 2) + if has_package(InstalledRPM, name, version=version, release=release): + keys.append(key) +- except KeyError: +- pass ++ except KeyError: ++ pass + + return keys + +diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py +index 4d9a0e84..b78174cc 100644 +--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py ++++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py +@@ -76,6 +76,56 @@ def test_get_obsolete_keys(monkeypatch, version, expected): + assert set(keys) == set(expected) + + ++@pytest.mark.parametrize( ++ "version, obsoleted_keys, expected", ++ [ ++ (10, None, []), ++ (10, {}, []), ++ (10, {"8": ["gpg-pubkey-888-abc"], "10": ["gpg-pubkey-10-10"]}, ["gpg-pubkey-888-abc", "gpg-pubkey-10-10"]), ++ (9, {"8": ["gpg-pubkey-888-abc"], "9": ["gpg-pubkey-999-def"]}, ["gpg-pubkey-999-def", "gpg-pubkey-888-abc"]), ++ (8, {"8": ["gpg-pubkey-888-abc"], "9": ["gpg-pubkey-999-def"]}, ["gpg-pubkey-888-abc"]) ++ ] ++) ++def test_get_obsolete_keys_incomplete_data(monkeypatch, version, obsoleted_keys, expected): ++ def get_target_major_version_mocked(): ++ return version ++ ++ def get_distribution_data_mocked(_distro): ++ if obsoleted_keys is None: ++ return {} ++ return {'obsoleted-keys': obsoleted_keys} ++ ++ def has_package_mocked(*args, **kwargs): ++ return True ++ ++ monkeypatch.setattr( ++ removeobsoleterpmgpgkeys, ++ "get_target_major_version", ++ get_target_major_version_mocked, ++ ) ++ ++ monkeypatch.setattr( ++ removeobsoleterpmgpgkeys, ++ "get_distribution_data", ++ get_distribution_data_mocked, ++ ) ++ ++ monkeypatch.setattr( ++ removeobsoleterpmgpgkeys, ++ "has_package", ++ has_package_mocked, ++ ) ++ ++ monkeypatch.setattr( ++ api, ++ "current_actor", ++ CurrentActorMocked(), ++ ) ++ ++ keys = removeobsoleterpmgpgkeys._get_obsolete_keys() ++ assert set(keys) == set(expected) ++ ++ + @pytest.mark.parametrize( + "keys, should_register", + [ +-- +2.47.0 + diff --git a/SOURCES/0005-Update-references-from-master-branch-to-main.patch b/SOURCES/0005-Update-references-from-master-branch-to-main.patch new file mode 100644 index 0000000..27b3c6b --- /dev/null +++ b/SOURCES/0005-Update-references-from-master-branch-to-main.patch @@ -0,0 +1,283 @@ +From 9d49f4675c2b7b18ba7b344bb0032a5538782560 Mon Sep 17 00:00:00 2001 +From: Vojtech Sokol +Date: Mon, 2 Sep 2024 17:21:36 +0200 +Subject: [PATCH 05/40] Update references from master branch to main + +Focus was on making the CI and GitHub actions work after the default +branch was switched from master to main. + +See: OAMG-4907 +--- + .github/workflows/codespell.yml | 4 ++-- + .github/workflows/differential-shellcheck.yml | 4 ++-- + .github/workflows/pr-welcome-msg.yml | 2 +- + .github/workflows/tmt-tests.yml | 16 ++++++++-------- + .github/workflows/unit-tests.yml | 12 ++++++------ + .packit.yaml | 10 +++++----- + Makefile | 14 +++++++------- + 7 files changed, 31 insertions(+), 31 deletions(-) + +diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml +index 673cef17..1195d8d1 100644 +--- a/.github/workflows/codespell.yml ++++ b/.github/workflows/codespell.yml +@@ -3,10 +3,10 @@ name: Codespell + on: + push: + branches: +- - master ++ - main + pull_request: + branches: +- - master ++ - main + + jobs: + codespell: +diff --git a/.github/workflows/differential-shellcheck.yml b/.github/workflows/differential-shellcheck.yml +index f1ed5f6a..e1bafb93 100644 +--- a/.github/workflows/differential-shellcheck.yml ++++ b/.github/workflows/differential-shellcheck.yml +@@ -4,7 +4,7 @@ + name: Differential ShellCheck + on: + pull_request: +- branches: [master] ++ branches: [main] + + permissions: + contents: read +@@ -17,7 +17,7 @@ jobs: + security-events: write + pull-requests: write + +- steps: ++ steps: + - name: Repository checkout + uses: actions/checkout@v4 + with: +diff --git a/.github/workflows/pr-welcome-msg.yml b/.github/workflows/pr-welcome-msg.yml +index ff9414d2..0102c41f 100644 +--- a/.github/workflows/pr-welcome-msg.yml ++++ b/.github/workflows/pr-welcome-msg.yml +@@ -28,7 +28,7 @@ jobs: + However, here are additional useful commands for packit: + - **`/packit test`** to re-run manually the default tests + - **`/packit retest-failed`** to re-run failed tests manually +- - **`/packit test oamg/leapp#42`** to run tests with leapp builds for the leapp PR#42 (default is latest upstream - master - build) ++ - **`/packit test oamg/leapp#42`** to run tests with leapp builds for the leapp PR#42 (default is latest upstream - main - build) + + Note that first time contributors cannot run tests automatically - they need to be started by a reviewer. + +diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml +index 7e9fd706..1fa00e60 100644 +--- a/.github/workflows/tmt-tests.yml ++++ b/.github/workflows/tmt-tests.yml +@@ -12,7 +12,7 @@ jobs: + + call_workflow_tests_79to88_integration: + needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master ++ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main + secrets: inherit + with: + copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +@@ -26,7 +26,7 @@ jobs: + + call_workflow_tests_79to86_integration: + needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master ++ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main + secrets: inherit + with: + copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +@@ -40,7 +40,7 @@ jobs: + + call_workflow_tests_79to88_sst: + needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master ++ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main + secrets: inherit + with: + copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +@@ -55,7 +55,7 @@ jobs: + + call_workflow_tests_7to8_aws: + needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master ++ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main + secrets: inherit + with: + copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +@@ -71,7 +71,7 @@ jobs: + + call_workflow_tests_86to90_integration: + needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master ++ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main + secrets: inherit + with: + copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +@@ -85,7 +85,7 @@ jobs: + + call_workflow_tests_88to92_integration: + needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master ++ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main + secrets: inherit + with: + copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +@@ -101,7 +101,7 @@ jobs: + + call_workflow_tests_86to90_sst: + needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master ++ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main + secrets: inherit + with: + copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +@@ -116,7 +116,7 @@ jobs: + + call_workflow_tests_86to90_aws: + needs: call_workflow_copr_build +- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master ++ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main + secrets: inherit + with: + copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }} +diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml +index 2a05106e..42b72b8d 100644 +--- a/.github/workflows/unit-tests.yml ++++ b/.github/workflows/unit-tests.yml +@@ -2,10 +2,10 @@ name: Unit Tests + on: + push: + branches: +- - master ++ - main + pull_request: + branches: +- - master ++ - main + + jobs: + test: +@@ -74,10 +74,10 @@ jobs: + # NOTE(ivasilev) fetch-depth 0 is critical here as leapp deps discovery depends on specific substring in + # commit message and default 1 option will get us just merge commit which has an unrelevant message. + fetch-depth: '0' +- # NOTE(ivasilev) master -> origin/master is used for leapp deps discovery in Makefile via git log master..HEAD +- - name: Set master to origin/master +- if: github.ref != 'refs/heads/master' ++ # NOTE(ivasilev) main -> origin/main is used for leapp deps discovery in Makefile via git log main..HEAD ++ - name: Set main to origin/main ++ if: github.ref != 'refs/heads/main' + run: | +- git branch -f master origin/master ++ git branch -f main origin/main + - name: ${{matrix.scenarios.name}} + run: script -e -c /bin/bash -c 'TERM=xterm podman build --security-opt=seccomp=unconfined -t leapp-tests -f utils/container-tests/Containerfile.${{matrix.scenarios.container}} utils/container-tests && PYTHON_VENV=${{matrix.scenarios.python}} REPOSITORIES=${{matrix.scenarios.repos}} podman run --security-opt=seccomp=unconfined --rm -ti -v ${PWD}:/payload --env=PYTHON_VENV --env=REPOSITORIES leapp-tests' +diff --git a/.packit.yaml b/.packit.yaml +index d91a47e5..fbfd0eea 100644 +--- a/.packit.yaml ++++ b/.packit.yaml +@@ -22,7 +22,7 @@ actions: + fix-spec-file: + - bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec" + post-upstream-clone: +- # builds from PRs should have lower NVR than those from master branch ++ # builds from PRs should have lower NVR than those from main branch + - bash -c "sed -i \"s/1%{?dist}/0%{?dist}/g\" packaging/leapp-repository.spec" + + jobs: +@@ -44,12 +44,12 @@ jobs: + fix-spec-file: + - bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec" + post-upstream-clone: +- # builds from PRs should have lower NVR than those from master branch ++ # builds from PRs should have lower NVR than those from main branch + - bash -c "sed -i \"s/1%{?dist}/0%{?dist}/g\" packaging/leapp-repository.spec" + - job: copr_build + trigger: commit + metadata: +- branch: master ++ branch: main + owner: "@oamg" + project: leapp + targets: +@@ -65,7 +65,7 @@ jobs: + fix-spec-file: + - bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec" + post-upstream-clone: +- # builds from master branch should start with 100 release, to have high priority ++ # builds from main branch should start with 100 release, to have high priority + - bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec" + - job: copr_build + trigger: release +@@ -85,7 +85,7 @@ jobs: + fix-spec-file: + - bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec" + post-upstream-clone: +- # builds from master branch should start with 100 release, to have high priority ++ # builds from main branch should start with 100 release, to have high priority + - bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec" + + +diff --git a/Makefile b/Makefile +index 5b2bc4d2..8aeef77d 100644 +--- a/Makefile ++++ b/Makefile +@@ -64,7 +64,7 @@ endif + + # just to reduce number of unwanted builds mark as the upstream one when + # someone will call copr_build without additional parameters +-MASTER_BRANCH=master ++MASTER_BRANCH=main + + # In case the PR or MR is defined or in case build is not coming from the + # MATER_BRANCH branch, N_REL=0; (so build is not update of the approved +@@ -76,10 +76,10 @@ SHORT_SHA=`git rev-parse --short HEAD` + BRANCH=`git rev-parse --abbrev-ref HEAD | tr -- '-/' '_'` + + # The dependent framework PR connection will be taken from the top commit's depends-on message. +-REQ_LEAPP_PR=$(shell git log master..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '*[[:digit:]]*') ++REQ_LEAPP_PR=$(shell git log main..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '*[[:digit:]]*') + # NOTE(ivasilev) In case of travis relying on top commit is a no go as a top commit will be a merge commit. + ifdef CI +- REQ_LEAPP_PR=$(shell git log master..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '[[:digit:]]*') ++ REQ_LEAPP_PR=$(shell git log main..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '[[:digit:]]*') + endif + + # In case anyone would like to add any other suffix, just make it possible +@@ -92,8 +92,8 @@ REQUEST=`if test -n "$$PR"; then echo ".PR$${PR}"; elif test -n "$$MR"; then ech + # Examples: + # 0.201810080027Z.4078402.packaging.PR2 + # 0.201810080027Z.4078402.packaging +-# 0.201810080027Z.4078402.master.MR2 +-# 1.201810080027Z.4078402.master ++# 0.201810080027Z.4078402.main.MR2 ++# 1.201810080027Z.4078402.main + RELEASE="$(N_REL).$(TIMESTAMP).$(SHORT_SHA).$(BRANCH)$(REQUEST)$(_SUFFIX)" + + all: help +@@ -302,7 +302,7 @@ install-deps: + pip install --upgrade setuptools; \ + pip install --upgrade -r requirements.txt; \ + ./utils/install_commands.sh $(_PYTHON_VENV); \ +- # In case the top commit Depends-On some yet unmerged framework patch - override master leapp with the proper version ++ # In case the top commit Depends-On some yet unmerged framework patch - override main leapp with the proper version + if [[ ! -z "$(REQ_LEAPP_PR)" ]] ; then \ + echo "Leapp-repository depends on the yet unmerged pr of the framework #$(REQ_LEAPP_PR), installing it.." && \ + $(VENVNAME)/bin/pip install -I "git+https://github.com/oamg/leapp.git@refs/pull/$(REQ_LEAPP_PR)/head"; \ +@@ -332,7 +332,7 @@ install-deps-fedora: + pip install --upgrade setuptools; \ + pip install --upgrade -r requirements.txt; \ + ./utils/install_commands.sh $(_PYTHON_VENV); \ +- # In case the top commit Depends-On some yet unmerged framework patch - override master leapp with the proper version ++ # In case the top commit Depends-On some yet unmerged framework patch - override main leapp with the proper version + if [[ ! -z "$(REQ_LEAPP_PR)" ]] ; then \ + echo "Leapp-repository depends on the yet unmerged pr of the framework #$(REQ_LEAPP_PR), installing it.." && \ + $(VENVNAME)/bin/pip install -I "git+https://github.com/oamg/leapp.git@refs/pull/$(REQ_LEAPP_PR)/head"; \ +-- +2.47.0 + diff --git a/SOURCES/0006-ReadOfKernelArgsError-fix-the-error.patch b/SOURCES/0006-ReadOfKernelArgsError-fix-the-error.patch new file mode 100644 index 0000000..2042f81 --- /dev/null +++ b/SOURCES/0006-ReadOfKernelArgsError-fix-the-error.patch @@ -0,0 +1,43 @@ +From 41e32e3aa6394b8397bef9b797892d9fa119d608 Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Thu, 29 Aug 2024 12:36:23 +0300 +Subject: [PATCH 06/40] ReadOfKernelArgsError: fix the error: - AttributeError: + module 'leapp.reporting' has no attribute 'Hints' + +--- + .../kernelcmdlineconfig/libraries/kernelcmdlineconfig.py | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py +index 238a8aa6..6b261c3b 100644 +--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py ++++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py +@@ -175,14 +175,14 @@ def entrypoint(configs=None): + api.current_logger().error(str(e)) + + if use_cmdline_file(): +- report_hint = reporting.Hints( ++ report_hint = ( + 'After the system has been rebooted into the new version of RHEL, you' + ' should take the kernel cmdline arguments from /proc/cmdline (Everything' + ' except the BOOT_IMAGE entry and initrd entries) and copy them into' + ' /etc/kernel/cmdline before installing any new kernels.' + ) + else: +- report_hint = reporting.Hints( ++ report_hint = ( + 'After the system has been rebooted into the new version of RHEL, you' + ' should take the kernel cmdline arguments from /proc/cmdline (Everything' + ' except the BOOT_IMAGE entry and initrd entries) and then use the' +@@ -204,7 +204,7 @@ def entrypoint(configs=None): + ' not able to set the arguments as the default for kernels installed in' + ' the future.' + ), +- report_hint, ++ reporting.Remediation(hint=report_hint), + reporting.Severity(reporting.Severity.HIGH), + reporting.Groups([ + reporting.Groups.BOOT, +-- +2.47.0 + diff --git a/SOURCES/0007-pylint-exclude-rule-too-many-positional-arguments-co.patch b/SOURCES/0007-pylint-exclude-rule-too-many-positional-arguments-co.patch new file mode 100644 index 0000000..2136a1e --- /dev/null +++ b/SOURCES/0007-pylint-exclude-rule-too-many-positional-arguments-co.patch @@ -0,0 +1,44 @@ +From 88e13fb0545e0d42df2777538a0c6921bab91e33 Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Fri, 27 Sep 2024 14:53:01 +0200 +Subject: [PATCH 07/40] pylint: exclude rule: too-many-positional-arguments + (code: R0917) + +New version of Pylint have the rule for checking of positional +arguments - complaining when more than 4 positional arguments exists. +We do not want to refactor the code to make it happy and the default +value cannot be set right now - that's planned for future Pylint +versions at this moment. So excluding this rule. + +For more info: + * https://pylint.readthedocs.io/en/latest/user_guide/messages/refactor/too-many-positional-arguments.html +--- + .pylintrc | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/.pylintrc b/.pylintrc +index f78c1c3f..5d75df40 100644 +--- a/.pylintrc ++++ b/.pylintrc +@@ -41,6 +41,8 @@ disable= + consider-using-from-import, + use-list-literal, + use-dict-literal, ++ too-many-lines, # we do not want to take care about that one ++ too-many-positional-arguments, # we cannot set yet max-possitional-arguments unfortunately + # new for python3 version of pylint + useless-object-inheritance, + consider-using-set-comprehension, # pylint3 force to use comprehension in place we don't want (py2 doesnt have these options, for inline skip) +@@ -57,8 +59,7 @@ disable= + redundant-u-string-prefix, # still have py2 to support + logging-format-interpolation, + logging-not-lazy, +- use-yield-from, # yield from cannot be used until we require python 3.3 or greater +- too-many-lines # we do not want to take care about that one ++ use-yield-from # yield from cannot be used until we require python 3.3 or greater + + [FORMAT] + # Maximum number of characters on a single line. +-- +2.47.0 + diff --git a/SOURCES/0008-pam_userdb-migrate-backend-database.patch b/SOURCES/0008-pam_userdb-migrate-backend-database.patch new file mode 100644 index 0000000..0ef43d9 --- /dev/null +++ b/SOURCES/0008-pam_userdb-migrate-backend-database.patch @@ -0,0 +1,534 @@ +From 658700d6424e852917b62c190dd23cbb3026b67d Mon Sep 17 00:00:00 2001 +From: Iker Pedrosa +Date: Mon, 5 Aug 2024 15:15:44 +0200 +Subject: [PATCH 08/40] pam_userdb: migrate backend database + +pam_userdb module changed its backend database technology from lidb to +gdbm for RHEL10. This requires a set of leapp actors to perform the +database migration automatically when upgrading to RHEL10: + +* ScanPamUserDB takes care of scanning the PAM service folder to detect + whether pam_userdb is used and the location of the database in use. + This information is stored in a model. + +* CheckPamUserDB checks the databases reported by ScanPamUserDB and + prints a report about them. + +* ConvertPamUserDB checks the databases reported by ScanPamUserDB and + converts them to GDBM format. + +* RemoveOldPamUserDB checks the databases reported by ScanPamUserDB and + removes them. + +All these actors include unit-tests. + +Finally, there's also a spec file change to add `libdb-utils` dependency +as it is required to convert pam_userdb databases from BerkeleyDB to +GDBM. + +Signed-off-by: Iker Pedrosa +--- + packaging/leapp-repository.spec | 6 +++ + .../actors/pamuserdb/checkpamuserdb/actor.py | 18 ++++++++ + .../libraries/checkpamuserdb.py | 28 ++++++++++++ + .../tests/test_checkpamuserdb.py | 43 +++++++++++++++++++ + .../pamuserdb/convertpamuserdb/actor.py | 18 ++++++++ + .../libraries/convertpamuserdb.py | 27 ++++++++++++ + .../tests/test_convertpamuserdb.py | 39 +++++++++++++++++ + .../pamuserdb/removeoldpamuserdb/actor.py | 18 ++++++++ + .../libraries/removeoldpamuserdb.py | 25 +++++++++++ + .../tests/test_removeoldpamuserdb.py | 38 ++++++++++++++++ + .../actors/pamuserdb/scanpamuserdb/actor.py | 18 ++++++++ + .../scanpamuserdb/libraries/scanpamuserdb.py | 29 +++++++++++++ + .../tests/files/pam_userdb_basic | 1 + + .../tests/files/pam_userdb_complete | 9 ++++ + .../tests/files/pam_userdb_missing | 1 + + .../scanpamuserdb/tests/test_scanpamuserdb.py | 27 ++++++++++++ + .../el9toel10/models/pamuserdblocation.py | 14 ++++++ + 17 files changed, 359 insertions(+) + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing + create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py + create mode 100644 repos/system_upgrade/el9toel10/models/pamuserdblocation.py + +diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec +index 146afc45..0d63ba02 100644 +--- a/packaging/leapp-repository.spec ++++ b/packaging/leapp-repository.spec +@@ -211,6 +211,12 @@ Requires: dracut + Requires: NetworkManager-libnm + Requires: python3-gobject-base + ++%endif ++ ++%if 0%{?rhel} && 0%{?rhel} == 9 ++############# RHEL 9 dependencies (when the source system is RHEL 9) ########## ++# Required to convert pam_userdb database from BerkeleyDB to GDBM ++Requires: libdb-utils + %endif + ################################################## + # end requirement +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py +new file mode 100644 +index 00000000..8fada645 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py +@@ -0,0 +1,18 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import checkpamuserdb ++from leapp.models import PamUserDbLocation, Report ++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag ++ ++ ++class CheckPamUserDb(Actor): ++ """ ++ Create report with the location of pam_userdb databases ++ """ ++ ++ name = 'check_pam_user_db' ++ consumes = (PamUserDbLocation,) ++ produces = (Report,) ++ tags = (ChecksPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ checkpamuserdb.process() +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py +new file mode 100644 +index 00000000..05cc71a9 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py +@@ -0,0 +1,28 @@ ++from leapp import reporting ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.stdlib import api ++from leapp.models import PamUserDbLocation ++ ++FMT_LIST_SEPARATOR = "\n - " ++ ++ ++def process(): ++ msg = next(api.consume(PamUserDbLocation), None) ++ if not msg: ++ raise StopActorExecutionError('Expected PamUserDbLocation, but got None') ++ ++ if msg.locations: ++ reporting.create_report([ ++ reporting.Title('pam_userdb databases will be converted to GDBM'), ++ reporting.Summary( ++ 'On RHEL 10, GDMB is used by pam_userdb as it\'s backend database,' ++ ' replacing BerkeleyDB. Existing pam_userdb databases will be' ++ ' converted to GDBM. The following databases will be converted:' ++ '{sep}{locations}'.format(sep=FMT_LIST_SEPARATOR, locations=FMT_LIST_SEPARATOR.join(msg.locations))), ++ reporting.Severity(reporting.Severity.INFO), ++ reporting.Groups([reporting.Groups.SECURITY, reporting.Groups.AUTHENTICATION]) ++ ]) ++ else: ++ api.current_logger().debug( ++ 'No pam_userdb databases were located, thus nothing will be converted' ++ ) +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py +new file mode 100644 +index 00000000..2e11106b +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py +@@ -0,0 +1,43 @@ ++import pytest ++ ++from leapp import reporting ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.actor import checkpamuserdb ++from leapp.libraries.common.testutils import create_report_mocked, logger_mocked ++from leapp.libraries.stdlib import api ++from leapp.models import PamUserDbLocation ++ ++ ++def test_process_no_msg(monkeypatch): ++ def consume_mocked(*args, **kwargs): ++ yield None ++ ++ monkeypatch.setattr(api, 'consume', consume_mocked) ++ ++ with pytest.raises(StopActorExecutionError): ++ checkpamuserdb.process() ++ ++ ++def test_process_no_location(monkeypatch): ++ def consume_mocked(*args, **kwargs): ++ yield PamUserDbLocation(locations=[]) ++ ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(api, 'consume', consume_mocked) ++ ++ checkpamuserdb.process() ++ assert ( ++ 'No pam_userdb databases were located, thus nothing will be converted' ++ in api.current_logger.dbgmsg ++ ) ++ ++ ++def test_process_locations(monkeypatch): ++ def consume_mocked(*args, **kwargs): ++ yield PamUserDbLocation(locations=['/tmp/db1', '/tmp/db2']) ++ ++ monkeypatch.setattr(reporting, "create_report", create_report_mocked()) ++ monkeypatch.setattr(api, 'consume', consume_mocked) ++ ++ checkpamuserdb.process() ++ assert reporting.create_report.called == 1 +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py +new file mode 100644 +index 00000000..5f8525b6 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py +@@ -0,0 +1,18 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import convertpamuserdb ++from leapp.models import PamUserDbLocation ++from leapp.tags import IPUWorkflowTag, PreparationPhaseTag ++ ++ ++class ConvertPamUserDb(Actor): ++ """ ++ Convert the pam_userdb databases to GDBM ++ """ ++ ++ name = 'convert_pam_user_db' ++ consumes = (PamUserDbLocation,) ++ produces = () ++ tags = (PreparationPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ convertpamuserdb.process() +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py +new file mode 100644 +index 00000000..e55b4102 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py +@@ -0,0 +1,27 @@ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import PamUserDbLocation ++ ++ ++def _convert_db(db_path): ++ cmd = ['db_converter', '--src', f'{db_path}.db', '--dest', f'{db_path}.gdbm'] ++ try: ++ run(cmd) ++ except (CalledProcessError, OSError) as e: ++ # As the db_converter does not remove the original DB after conversion or upon failure, ++ # interrupt the upgrade, keeping the original DBs. ++ # If all DBs are successfully converted, the leftover DBs are removed in the removeoldpamuserdb actor. ++ raise StopActorExecutionError( ++ 'Cannot convert pam_userdb database.', ++ details={'details': '{}: {}'.format(str(e), e.stderr)} ++ ) ++ ++ ++def process(): ++ msg = next(api.consume(PamUserDbLocation), None) ++ if not msg: ++ raise StopActorExecutionError('Expected PamUserDbLocation, but got None') ++ ++ if msg.locations: ++ for location in msg.locations: ++ _convert_db(location) +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py +new file mode 100644 +index 00000000..46505492 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py +@@ -0,0 +1,39 @@ ++import os ++ ++import pytest ++ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.actor import convertpamuserdb ++from leapp.libraries.common.testutils import logger_mocked ++from leapp.libraries.stdlib import api, CalledProcessError ++ ++CUR_DIR = os.path.dirname(os.path.abspath(__file__)) ++ ++ ++def test_convert_db_success(monkeypatch): ++ location = os.path.join(CUR_DIR, '/files/db1') ++ ++ def run_mocked(cmd, **kwargs): ++ assert cmd == ['db_converter', '--src', f'{location}.db', '--dest', f'{location}.gdbm'] ++ ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(convertpamuserdb, 'run', run_mocked) ++ convertpamuserdb._convert_db(location) ++ assert len(api.current_logger.errmsg) == 0 ++ ++ ++def test_convert_db_failure(monkeypatch): ++ location = os.path.join(CUR_DIR, '/files/db1') ++ ++ def run_mocked(cmd, **kwargs): ++ raise CalledProcessError( ++ message='A Leapp Command Error occurred.', ++ command=cmd, ++ result={'exit_code': 1} ++ ) ++ ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(convertpamuserdb, 'run', run_mocked) ++ with pytest.raises(StopActorExecutionError) as err: ++ convertpamuserdb._convert_db(location) ++ assert str(err.value) == 'Cannot convert pam_userdb database.' +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py +new file mode 100644 +index 00000000..39a00855 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py +@@ -0,0 +1,18 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import removeoldpamuserdb ++from leapp.models import PamUserDbLocation ++from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag ++ ++ ++class RemoveOldPamUserDb(Actor): ++ """ ++ Remove old pam_userdb databases ++ """ ++ ++ name = 'remove_old_pam_user_db' ++ consumes = (PamUserDbLocation,) ++ produces = () ++ tags = (ApplicationsPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ removeoldpamuserdb.process() +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py +new file mode 100644 +index 00000000..5fc4cb4d +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py +@@ -0,0 +1,25 @@ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.stdlib import api, CalledProcessError, run ++from leapp.models import PamUserDbLocation ++ ++ ++def _remove_db(db_path): ++ cmd = ['rm', '-f', f'{db_path}.db'] ++ try: ++ run(cmd) ++ except (CalledProcessError, OSError) as e: ++ api.current_logger().error( ++ 'Failed to remove {}.db: {}'.format( ++ db_path, e ++ ) ++ ) ++ ++ ++def process(): ++ msg = next(api.consume(PamUserDbLocation), None) ++ if not msg: ++ raise StopActorExecutionError('Expected PamUserDbLocation, but got None') ++ ++ if msg.locations: ++ for location in msg.locations: ++ _remove_db(location) +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py +new file mode 100644 +index 00000000..2c1d5c75 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py +@@ -0,0 +1,38 @@ ++import os ++ ++from leapp.libraries.actor import removeoldpamuserdb ++from leapp.libraries.common.testutils import logger_mocked ++from leapp.libraries.stdlib import api, CalledProcessError ++ ++CUR_DIR = os.path.dirname(os.path.abspath(__file__)) ++ ++ ++def test_remove_db_success(monkeypatch): ++ location = os.path.join(CUR_DIR, '/files/db1') ++ ++ def run_mocked(cmd, **kwargs): ++ assert cmd == ['rm', '-f', f'{location}.db'] ++ ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(removeoldpamuserdb, 'run', run_mocked) ++ removeoldpamuserdb._remove_db(location) ++ assert len(api.current_logger.errmsg) == 0 ++ ++ ++def test_remove_db_failure(monkeypatch): ++ location = os.path.join(CUR_DIR, '/files/db1') ++ ++ def run_mocked(cmd, **kwargs): ++ raise CalledProcessError( ++ message='A Leapp Command Error occurred.', ++ command=cmd, ++ result={'exit_code': 1} ++ ) ++ ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ monkeypatch.setattr(removeoldpamuserdb, 'run', run_mocked) ++ removeoldpamuserdb._remove_db(location) ++ assert ( ++ 'Failed to remove /files/db1.db' ++ not in api.current_logger.errmsg ++ ) +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py +new file mode 100644 +index 00000000..b6b35f1a +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py +@@ -0,0 +1,18 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import scanpamuserdb ++from leapp.models import PamUserDbLocation ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class ScanPamUserDb(Actor): ++ """ ++ Scan the PAM service folder for the location of pam_userdb databases ++ """ ++ ++ name = 'scan_pam_user_db' ++ consumes = () ++ produces = (PamUserDbLocation,) ++ tags = (FactsPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ self.produce(scanpamuserdb.parse_pam_config_folder('/etc/pam.d/')) +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py +new file mode 100644 +index 00000000..0f668c02 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py +@@ -0,0 +1,29 @@ ++import os ++import re ++ ++from leapp.models import PamUserDbLocation ++ ++ ++def _parse_pam_config_file(conf_file): ++ with open(conf_file, 'r') as file: ++ for line in file: ++ if 'pam_userdb' in line: ++ match = re.search(r'db=(\S+)', line) ++ if match: ++ return match.group(1) ++ ++ return None ++ ++ ++def parse_pam_config_folder(conf_folder): ++ locations = set() ++ ++ for file_name in os.listdir(conf_folder): ++ file_path = os.path.join(conf_folder, file_name) ++ ++ if os.path.isfile(file_path): ++ location = _parse_pam_config_file(file_path) ++ if location is not None: ++ locations.add(location) ++ ++ return PamUserDbLocation(locations=list(locations)) +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic +new file mode 100644 +index 00000000..f115147b +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic +@@ -0,0 +1 @@ ++auth required pam_userdb.so db=/tmp/db1 +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete +new file mode 100644 +index 00000000..84e40b48 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete +@@ -0,0 +1,9 @@ ++auth required pam_env.so ++auth required pam_faildelay.so delay=2000000 ++auth sufficient pam_fprintd.so ++auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular ++auth [default=1 ignore=ignore success=ok] pam_localuser.so ++auth required pam_userdb.so db=/tmp/db2 ++auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular ++auth sufficient pam_sss.so forward_pass ++auth required pam_deny.so +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing +new file mode 100644 +index 00000000..764947fc +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing +@@ -0,0 +1 @@ ++auth sufficient pam_unix.so nullok +diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py +new file mode 100644 +index 00000000..3b752d87 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py +@@ -0,0 +1,27 @@ ++import os ++ ++import pytest ++ ++from leapp.libraries.actor import scanpamuserdb ++ ++CUR_DIR = os.path.dirname(os.path.abspath(__file__)) ++ ++ ++@pytest.mark.parametrize( ++ "inp,exp_out", ++ [ ++ ("files/pam_userdb_missing", None), ++ ("files/pam_userdb_basic", "/tmp/db1"), ++ ("files/pam_userdb_complete", "/tmp/db2"), ++ ], ++) ++def test_parse_pam_config_file(inp, exp_out): ++ file = scanpamuserdb._parse_pam_config_file(os.path.join(CUR_DIR, inp)) ++ assert file == exp_out ++ ++ ++def test_parse_pam_config_folder(): ++ msg = scanpamuserdb.parse_pam_config_folder(os.path.join(CUR_DIR, "files/")) ++ assert len(msg.locations) == 2 ++ assert "/tmp/db1" in msg.locations ++ assert "/tmp/db2" in msg.locations +diff --git a/repos/system_upgrade/el9toel10/models/pamuserdblocation.py b/repos/system_upgrade/el9toel10/models/pamuserdblocation.py +new file mode 100644 +index 00000000..d15b2041 +--- /dev/null ++++ b/repos/system_upgrade/el9toel10/models/pamuserdblocation.py +@@ -0,0 +1,14 @@ ++from leapp.models import fields, Model ++from leapp.topics import SystemInfoTopic ++ ++ ++class PamUserDbLocation(Model): ++ """ ++ Provides a list of all database files for pam_userdb ++ """ ++ topic = SystemInfoTopic ++ ++ locations = fields.List(fields.String(), default=[]) ++ """ ++ The list with the full path to the database files. ++ """ +-- +2.47.0 + diff --git a/SOURCES/0009-Replace-mirror.centos.org-with-vault.centos.org-Cent.patch b/SOURCES/0009-Replace-mirror.centos.org-with-vault.centos.org-Cent.patch new file mode 100644 index 0000000..4b4012f --- /dev/null +++ b/SOURCES/0009-Replace-mirror.centos.org-with-vault.centos.org-Cent.patch @@ -0,0 +1,31 @@ +From d6e57eec3ded2887008055442ba906a92c572a01 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Thu, 10 Oct 2024 14:03:36 +0200 +Subject: [PATCH 09/40] Replace mirror.centos.org with vault.centos.org Centos + 7 Containerfile + +As mirror.centos.org is dead, replace mirrorlist with baseurl pointing +to vault.centos.org in utils/container-builds/Containerfile.centos7. +--- + utils/container-builds/Containerfile.centos7 | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/utils/container-builds/Containerfile.centos7 b/utils/container-builds/Containerfile.centos7 +index 70ac3df1..af00eddb 100644 +--- a/utils/container-builds/Containerfile.centos7 ++++ b/utils/container-builds/Containerfile.centos7 +@@ -2,6 +2,11 @@ FROM centos:7 + + VOLUME /repo + ++# mirror.centos.org is dead, comment out mirrorlist and set baseurl to vault.centos.org ++RUN sed -i s/mirror.centos.org/vault.centos.org/ /etc/yum.repos.d/CentOS-*.repo ++RUN sed -i s/^#\s*baseurl=http/baseurl=http/ /etc/yum.repos.d/CentOS-*.repo ++RUN sed -i s/^mirrorlist=http/#mirrorlist=http/ /etc/yum.repos.d/CentOS-*.repo ++ + RUN yum update -y && \ + yum install -y rpm-build python-devel make git + +-- +2.47.0 + diff --git a/SOURCES/0010-kernelcmdlineconfig-Add-Report-to-produces-tuple.patch b/SOURCES/0010-kernelcmdlineconfig-Add-Report-to-produces-tuple.patch new file mode 100644 index 0000000..94b0260 --- /dev/null +++ b/SOURCES/0010-kernelcmdlineconfig-Add-Report-to-produces-tuple.patch @@ -0,0 +1,35 @@ +From b997e4eeb835809d1fbfd1a0b9a6114c133bf0b4 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Thu, 10 Oct 2024 15:28:48 +0200 +Subject: [PATCH 10/40] kernelcmdlineconfig: Add Report to produces tuple + +The missing `leapp.reporting.Report` class is added to +kernelcmdlineconfig actor `produces` tuple. +--- + .../system_upgrade/common/actors/kernelcmdlineconfig/actor.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py +index b44fd835..3585a14e 100644 +--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py ++++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py +@@ -4,6 +4,7 @@ from leapp.actors import Actor + from leapp.exceptions import StopActorExecutionError + from leapp.libraries.actor import kernelcmdlineconfig + from leapp.models import FirmwareFacts, InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks ++from leapp.reporting import Report + from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag + + +@@ -14,7 +15,7 @@ class KernelCmdlineConfig(Actor): + + name = 'kernelcmdlineconfig' + consumes = (KernelCmdlineArg, InstalledTargetKernelInfo, FirmwareFacts, TargetKernelCmdlineArgTasks) +- produces = () ++ produces = (Report,) + tags = (FinalizationPhaseTag, IPUWorkflowTag) + + def process(self): +-- +2.47.0 + diff --git a/SOURCES/0011-kernelcmdlineconfig-Use-args-from-first-entry-when-m.patch b/SOURCES/0011-kernelcmdlineconfig-Use-args-from-first-entry-when-m.patch new file mode 100644 index 0000000..3c15aa0 --- /dev/null +++ b/SOURCES/0011-kernelcmdlineconfig-Use-args-from-first-entry-when-m.patch @@ -0,0 +1,204 @@ +From c2c96affa7b20c82969419ce49b65cbf646a0c32 Mon Sep 17 00:00:00 2001 +From: Matej Matuska +Date: Fri, 18 Oct 2024 12:43:19 +0200 +Subject: [PATCH 11/40] kernelcmdlineconfig: Use args from first entry when + multiple entries are listed + +Instead of erroring out when grubby lists multiple entries for the +default kernel, always use the `args=` and `root=` from the first one and create +a post-upgrade report. The report instruct user to ensure those are the +correct ones or to correct them. + +This can happen, for example, if MAKEDEBUG=yes is set in +/etc/sysconfing/kernel. + +Jira: RHEL-46911 +--- + .../libraries/kernelcmdlineconfig.py | 79 ++++++++++++++++--- + .../tests/test_kernelcmdlineconfig.py | 48 ++++++++++- + 2 files changed, 116 insertions(+), 11 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py +index 6b261c3b..19c50f3c 100644 +--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py ++++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py +@@ -109,10 +109,55 @@ def _extract_grubby_value(record): + return matches.group(2) + + ++def report_multple_entries_for_default_kernel(): ++ if use_cmdline_file(): ++ report_hint = ( ++ 'After the system has been rebooted into the new version of RHEL,' ++ ' check that configured default kernel cmdline arguments in /etc/kernel/cmdline ' ++ ' are correct. In case that different arguments are expected, update the file as needed.' ++ ) ++ else: ++ report_hint = ( ++ 'After the system has been rebooted into the new version of RHEL,' ++ ' check that configured default kernel cmdline arguments are set as expected, using' ++ ' the `grub2-editenv list` command. ' ++ ' If different default arguments are expected, update them using grub2-editenv.\n' ++ ' For example, consider that current booted kernel has correct kernel cmdline' ++ ' arguments and /proc/cmdline contains:\n\n' ++ ' BOOT_IMAGE=(hd0,msdos1)/vmlinuz-4.18.0-425.3.1.el8.x86_64' ++ ' root=/dev/mapper/rhel_ibm--root ro console=tty0' ++ ' console=ttyS0,115200 rd_NO_PLYMOUTH\n\n' ++ ' then run the following grub2-editenv command:\n\n' ++ ' # grub2-editenv - set "kernelopts=root=/dev/mapper/rhel_ibm--root' ++ ' ro console=tty0 console=ttyS0,115200 rd_NO_PLYMOUTH"' ++ ) ++ ++ reporting.create_report([ ++ reporting.Title('Ensure that expected default kernel cmdline arguments are set'), ++ reporting.Summary( ++ 'During the upgrade we needed to modify the kernel command line arguments.' ++ ' However, multiple bootloader entries with different arguments were found for the default' ++ ' kernel (perhaps MAKEDEBUG=yes is set in /etc/sysconfig/kernel).' ++ ' Leapp used the arguments from the first found entry of the target kernel' ++ ' and set it as the new default kernel cmdline arguments for kernels installed in the future.' ++ ), ++ reporting.Remediation(hint=report_hint), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([ ++ reporting.Groups.BOOT, ++ reporting.Groups.KERNEL, ++ reporting.Groups.POST, ++ ]), ++ reporting.RelatedResource('file', '/etc/kernel/cmdline'), ++ ]) ++ ++ + def retrieve_args_for_default_kernel(kernel_info): + # Copy the args for the default kernel to all kernels. + kernel_args = None + kernel_root = None ++ detected_multiple_entries = False ++ + cmd = ['grubby', '--info', kernel_info.kernel_img_path] + output = stdlib.run(cmd, split=False) + for record in output['stdout'].splitlines(): +@@ -122,19 +167,30 @@ def retrieve_args_for_default_kernel(kernel_info): + temp_kernel_args = _extract_grubby_value(record) + + if kernel_args: +- api.current_logger().warning('Grubby output is malformed:' +- ' `args=` is listed more than once.') + if kernel_args != temp_kernel_args: +- raise ReadOfKernelArgsError('Grubby listed `args=` multiple' +- ' times with different values.') +- kernel_args = _extract_grubby_value(record) ++ api.current_logger().warning( ++ 'Grubby output listed `args=` multiple times with different values,' ++ ' continuing with the first result' ++ ) ++ detected_multiple_entries = True ++ else: ++ api.current_logger().warning('Grubby output listed `args=` more than once') ++ else: ++ kernel_args = temp_kernel_args + elif record.startswith('root='): +- api.current_logger().warning('Grubby output is malformed:' +- ' `root=` is listed more than once.') ++ temp_kernel_root = _extract_grubby_value(record) ++ + if kernel_root: +- raise ReadOfKernelArgsError('Grubby listed `root=` multiple' +- ' times with different values') +- kernel_root = _extract_grubby_value(record) ++ if kernel_root != temp_kernel_root: ++ api.current_logger().warning( ++ 'Grubby output listed `root=` multiple times with different values,' ++ ' continuing with the first result' ++ ) ++ detected_multiple_entries = True ++ else: ++ api.current_logger().warning('Grubby output listed `root=` more than once') ++ else: ++ kernel_root = temp_kernel_root + + if not kernel_args or not kernel_root: + raise ReadOfKernelArgsError( +@@ -142,6 +198,9 @@ def retrieve_args_for_default_kernel(kernel_info): + ' kernels: root={}, args={}'.format(kernel_root, kernel_args) + ) + ++ if detected_multiple_entries: ++ report_multple_entries_for_default_kernel() ++ + return kernel_root, kernel_args + + +diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py +index ffe4b046..e5759a7b 100644 +--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py ++++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py +@@ -4,11 +4,12 @@ from collections import namedtuple + + import pytest + ++from leapp import reporting + from leapp.exceptions import StopActorExecutionError + from leapp.libraries import stdlib + from leapp.libraries.actor import kernelcmdlineconfig + from leapp.libraries.common.config import architecture +-from leapp.libraries.common.testutils import CurrentActorMocked ++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked + from leapp.libraries.stdlib import api + from leapp.models import InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks + +@@ -183,6 +184,51 @@ def test_kernelcmdline_config_no_version(monkeypatch): + assert not mocked_run.commands + + ++SECOND_KERNEL_ARGS = ( ++ 'ro rootflags=subvol=root' ++ ' resume=/dev/mapper/luks-2c0df999-81ec-4a35-a1f9-b93afee8c6ad' ++ ' rd.luks.uuid=luks-90a6412f-c588-46ca-9118-5aca35943d25' ++ ' rd.luks.uuid=luks-2c0df999-81ec-4a35-a1f9-b93afee8c6ad' ++) ++SECOND_KERNEL_ROOT = 'UUID=1aa15850-2685-418d-95a6-f7266a2de83b' ++ ++ ++@pytest.mark.parametrize( ++ 'second_grubby_output', ++ ( ++ TEMPLATE_GRUBBY_INFO_OUTPUT.format(SECOND_KERNEL_ARGS, SECOND_KERNEL_ROOT), ++ TEMPLATE_GRUBBY_INFO_OUTPUT.format(SAMPLE_KERNEL_ARGS, SECOND_KERNEL_ROOT), ++ TEMPLATE_GRUBBY_INFO_OUTPUT.format(SECOND_KERNEL_ARGS, SAMPLE_KERNEL_ROOT), ++ ) ++) ++def test_kernelcmdline_config_mutiple_args(monkeypatch, second_grubby_output): ++ kernel_img_path = '/boot/vmlinuz-X' ++ kernel_info = InstalledTargetKernelInfo(pkg_nevra=TARGET_KERNEL_NEVRA, ++ uname_r='', ++ kernel_img_path=kernel_img_path, ++ initramfs_path='/boot/initramfs-X') ++ ++ # For this test, we need to check we get the proper report if grubby --info ++ # outputs multiple different `root=` or `args=` ++ # and that the first ones are used ++ grubby_info_output = "\n".join((SAMPLE_GRUBBY_INFO_OUTPUT, second_grubby_output)) ++ ++ mocked_run = MockedRun( ++ outputs={" ".join(("grubby", "--info", kernel_img_path)): grubby_info_output, ++ } ++ ) ++ monkeypatch.setattr(stdlib, 'run', mocked_run) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked()) ++ monkeypatch.setattr(reporting, "create_report", create_report_mocked()) ++ ++ root, args = kernelcmdlineconfig.retrieve_args_for_default_kernel(kernel_info) ++ assert root == SAMPLE_KERNEL_ROOT ++ assert args == SAMPLE_KERNEL_ARGS ++ assert reporting.create_report.called == 1 ++ expected_title = 'Ensure that expected default kernel cmdline arguments are set' ++ assert expected_title in reporting.create_report.report_fields['title'] ++ ++ + def test_kernelcmdline_config_malformed_args(monkeypatch): + kernel_img_path = '/boot/vmlinuz-X' + kernel_info = InstalledTargetKernelInfo(pkg_nevra=TARGET_KERNEL_NEVRA, +-- +2.47.0 + diff --git a/SOURCES/0012-check_microarch-refactor-to-handle-possible-future-r.patch b/SOURCES/0012-check_microarch-refactor-to-handle-possible-future-r.patch new file mode 100644 index 0000000..dbcfa40 --- /dev/null +++ b/SOURCES/0012-check_microarch-refactor-to-handle-possible-future-r.patch @@ -0,0 +1,216 @@ +From 053137c50d1b060f9e6e6e45d82196b1045391b7 Mon Sep 17 00:00:00 2001 +From: mhecko +Date: Thu, 4 Apr 2024 14:22:48 +0200 +Subject: [PATCH 12/40] check_microarch: refactor to handle possible future + reqs + +--- + .../actors/checkmicroarchitecture/actor.py | 0 + .../libraries/checkmicroarchitecture.py | 73 +++++++++++++++++++ + .../tests/test_checkmicroarchitecture.py | 21 ++++-- + .../libraries/checkmicroarchitecture.py | 46 ------------ + 4 files changed, 87 insertions(+), 53 deletions(-) + rename repos/system_upgrade/{el8toel9 => common}/actors/checkmicroarchitecture/actor.py (100%) + create mode 100644 repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py + rename repos/system_upgrade/{el8toel9 => common}/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py (79%) + delete mode 100644 repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py + +diff --git a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/actor.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py +similarity index 100% +rename from repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/actor.py +rename to repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py +diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py +new file mode 100644 +index 00000000..cc617203 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py +@@ -0,0 +1,73 @@ ++from collections import namedtuple ++ ++from leapp import reporting ++from leapp.libraries.common.config.architecture import ARCH_X86_64, matches_architecture ++from leapp.libraries.common.config.version import get_target_major_version ++from leapp.libraries.stdlib import api ++from leapp.models import CPUInfo ++ ++X86_64_BASELINE_FLAGS = ['cmov', 'cx8', 'fpu', 'fxsr', 'mmx', 'syscall', 'sse', 'sse2'] ++X86_64_V2_FLAGS = ['cx16', 'lahf_lm', 'popcnt', 'pni', 'sse4_1', 'sse4_2', 'ssse3'] ++ ++MicroarchInfo = namedtuple('MicroarchInfo', ('required_flags', 'extra_report_fields', 'microarch_ver')) ++ ++ ++def _inhibit_upgrade(missing_flags, target_rhel, microarch_ver, extra_report_fields=None): ++ title = 'Current x86-64 microarchitecture is unsupported in {0}'.format(target_rhel) ++ summary = ('{0} has a higher CPU requirement than older versions, it now requires a CPU ' ++ 'compatible with {1} instruction set or higher.\n\n' ++ 'Missings flags detected are: {2}\n'.format(target_rhel, microarch_ver, ', '.join(missing_flags))) ++ ++ report_fields = [ ++ reporting.Title(title), ++ reporting.Summary(summary), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ reporting.Groups([reporting.Groups.SANITY]), ++ reporting.Remediation(hint=('If case of using virtualization, virtualization platforms often allow ' ++ 'configuring a minimum denominator CPU model for compatibility when migrating ' ++ 'between different CPU models. Ensure that minimum requirements are not below ' ++ 'that of {0}\n').format(target_rhel)), ++ ] ++ ++ if extra_report_fields: ++ report_fields += extra_report_fields ++ ++ reporting.create_report(report_fields) ++ ++ ++def process(): ++ """ ++ Check whether the processor matches the required microarchitecture. ++ """ ++ ++ if not matches_architecture(ARCH_X86_64): ++ api.current_logger().info('Architecture not x86-64. Skipping microarchitecture test.') ++ return ++ ++ cpuinfo = next(api.consume(CPUInfo)) ++ ++ rhel9_microarch_article = reporting.ExternalLink( ++ title='Building Red Hat Enterprise Linux 9 for the x86-64-v2 microarchitecture level', ++ url='https://red.ht/rhel-9-intel-microarchitectures' ++ ) ++ ++ rhel_major_to_microarch_reqs = { ++ '9': MicroarchInfo(microarch_ver='x86-64-v2', ++ required_flags=(X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS), ++ extra_report_fields=[rhel9_microarch_article]), ++ } ++ ++ microarch_info = rhel_major_to_microarch_reqs.get(get_target_major_version()) ++ if not microarch_info: ++ api.current_logger().info('No known microarchitecture requirements are known for target RHEL%s.', ++ get_target_major_version()) ++ return ++ ++ missing_flags = [flag for flag in microarch_info.required_flags if flag not in cpuinfo.flags] ++ api.current_logger().debug('Required flags missing: %s', missing_flags) ++ if missing_flags: ++ _inhibit_upgrade(missing_flags, ++ 'RHEL{0}'.format(get_target_major_version()), ++ microarch_info.microarch_ver, ++ extra_report_fields=microarch_info.extra_report_fields) +diff --git a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py +similarity index 79% +rename from repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py +rename to repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py +index b7c850d9..b0624f2b 100644 +--- a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py ++++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py +@@ -25,7 +25,13 @@ def test_not_x86_64_passes(monkeypatch, arch): + assert not reporting.create_report.called + + +-def test_valid_microarchitecture(monkeypatch): ++@pytest.mark.parametrize( ++ ('target_ver', 'cpu_flags'), ++ [ ++ ('9.0', checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS) ++ ] ++) ++def test_valid_microarchitecture(monkeypatch, target_ver, cpu_flags): + """ + Test no report is generated on a valid microarchitecture + """ +@@ -33,9 +39,8 @@ def test_valid_microarchitecture(monkeypatch): + monkeypatch.setattr(reporting, "create_report", create_report_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + +- required_flags = checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS +- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=ARCH_X86_64, +- msgs=[CPUInfo(flags=required_flags)])) ++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=ARCH_X86_64, dst_ver=target_ver, ++ msgs=[CPUInfo(flags=cpu_flags)])) + + checkmicroarchitecture.process() + +@@ -43,14 +48,16 @@ def test_valid_microarchitecture(monkeypatch): + assert not reporting.create_report.called + + +-def test_invalid_microarchitecture(monkeypatch): ++@pytest.mark.parametrize('target_ver', ['9.0']) ++def test_invalid_microarchitecture(monkeypatch, target_ver): + """ + Test report is generated on x86-64 architecture with invalid microarchitecture and the upgrade is inhibited + """ + + monkeypatch.setattr(reporting, "create_report", create_report_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) +- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=ARCH_X86_64, msgs=[CPUInfo()])) ++ monkeypatch.setattr(api, 'current_actor', ++ CurrentActorMocked(arch=ARCH_X86_64, msgs=[CPUInfo()], dst_ver=target_ver)) + + checkmicroarchitecture.process() + +@@ -60,6 +67,6 @@ def test_invalid_microarchitecture(monkeypatch): + assert 'Architecture not x86-64. Skipping microarchitecture test.' not in api.current_logger().infomsg + assert reporting.create_report.called == 1 + assert 'microarchitecture is unsupported' in produced_title +- assert 'RHEL9 has a higher CPU requirement' in produced_summary ++ assert 'has a higher CPU requirement' in produced_summary + assert reporting.create_report.report_fields['severity'] == reporting.Severity.HIGH + assert is_inhibitor(reporting.create_report.report_fields) +diff --git a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py b/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py +deleted file mode 100644 +index 9c083d7e..00000000 +--- a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py ++++ /dev/null +@@ -1,46 +0,0 @@ +-from leapp import reporting +-from leapp.libraries.common.config.architecture import ARCH_X86_64, matches_architecture +-from leapp.libraries.stdlib import api +-from leapp.models import CPUInfo +- +-X86_64_BASELINE_FLAGS = ['cmov', 'cx8', 'fpu', 'fxsr', 'mmx', 'syscall', 'sse', 'sse2'] +-X86_64_V2_FLAGS = ['cx16', 'lahf_lm', 'popcnt', 'pni', 'sse4_1', 'sse4_2', 'ssse3'] +- +- +-def _inhibit_upgrade(missing_flags): +- title = 'Current x86-64 microarchitecture is unsupported in RHEL9' +- summary = ('RHEL9 has a higher CPU requirement than older versions, it now requires a CPU ' +- 'compatible with x86-64-v2 instruction set or higher.\n\n' +- 'Missings flags detected are: {}\n'.format(', '.join(missing_flags))) +- +- reporting.create_report([ +- reporting.Title(title), +- reporting.Summary(summary), +- reporting.ExternalLink(title='Building Red Hat Enterprise Linux 9 for the x86-64-v2 microarchitecture level', +- url='https://red.ht/rhel-9-intel-microarchitectures'), +- reporting.Severity(reporting.Severity.HIGH), +- reporting.Groups([reporting.Groups.INHIBITOR]), +- reporting.Groups([reporting.Groups.SANITY]), +- reporting.Remediation(hint=('If case of using virtualization, virtualization platforms often allow ' +- 'configuring a minimum denominator CPU model for compatibility when migrating ' +- 'between different CPU models. Ensure that minimum requirements are not below ' +- 'that of RHEL9\n')), +- ]) +- +- +-def process(): +- """ +- Check whether the processor matches the required microarchitecture. +- """ +- +- if not matches_architecture(ARCH_X86_64): +- api.current_logger().info('Architecture not x86-64. Skipping microarchitecture test.') +- return +- +- cpuinfo = next(api.consume(CPUInfo)) +- +- required_flags = X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS +- missing_flags = [flag for flag in required_flags if flag not in cpuinfo.flags] +- api.current_logger().debug('Required flags missing: %s', missing_flags) +- if missing_flags: +- _inhibit_upgrade(missing_flags) +-- +2.47.0 + diff --git a/SOURCES/0013-check_microarch-add-rhel10-requirements.patch b/SOURCES/0013-check_microarch-add-rhel10-requirements.patch new file mode 100644 index 0000000..b2cce6c --- /dev/null +++ b/SOURCES/0013-check_microarch-add-rhel10-requirements.patch @@ -0,0 +1,133 @@ +From d3ebc990ba65801fbed2aaf1dce8329698667d1c Mon Sep 17 00:00:00 2001 +From: Michal Hecko +Date: Wed, 28 Aug 2024 12:18:40 +0200 +Subject: [PATCH 13/40] check_microarch: add rhel10 requirements + +--- + .../actors/checkmicroarchitecture/actor.py | 13 ++++++++++-- + .../libraries/checkmicroarchitecture.py | 8 +++++-- + .../tests/test_checkmicroarchitecture.py | 21 ++++++++++++++----- + 3 files changed, 33 insertions(+), 9 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py +index 98ffea80..bb342f2f 100644 +--- a/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py ++++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py +@@ -17,7 +17,8 @@ class CheckMicroarchitecture(Actor): + levels. + + RHEL9 has a higher CPU requirement than older versions, it now requires a +- CPU compatible with ``x86-64-v2`` instruction set or higher. ++ CPU compatible with ``x86-64-v2`` instruction set or higher. Similarly, ++ RHEL10 requires at least ``x86-64-v3`` instruction set. + + .. table:: Required CPU features by microarchitecure level with a + corresponding flag as shown by ``lscpu``. +@@ -43,7 +44,15 @@ class CheckMicroarchitecture(Actor): + | | SSE4_2 | sse4_2 | + | | SSSE3 | ssse3 | + +------------+-------------+--------------------+ +- | ... | | | ++ | x86-64-v3 | AVX | avx | ++ | | AVX2 | avx2 | ++ | | BMI1 | bmi1 | ++ | | BMI2 | bmi2 | ++ | | F16C | f16c | ++ | | FMA | fma | ++ | | LZCNT | abm | ++ | | MOVBE | movbe | ++ | | OSXSAVE | xsave | + +------------+-------------+--------------------+ + + Note: To get the corresponding flag for the CPU feature consult the file +diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py +index cc617203..94e85e3e 100644 +--- a/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py ++++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py +@@ -8,6 +8,7 @@ from leapp.models import CPUInfo + + X86_64_BASELINE_FLAGS = ['cmov', 'cx8', 'fpu', 'fxsr', 'mmx', 'syscall', 'sse', 'sse2'] + X86_64_V2_FLAGS = ['cx16', 'lahf_lm', 'popcnt', 'pni', 'sse4_1', 'sse4_2', 'ssse3'] ++X86_64_V3_FLAGS = ['avx2', 'bmi1', 'bmi2', 'f16c', 'fma', 'abm', 'movbe', 'xsave'] + + MicroarchInfo = namedtuple('MicroarchInfo', ('required_flags', 'extra_report_fields', 'microarch_ver')) + +@@ -16,7 +17,7 @@ def _inhibit_upgrade(missing_flags, target_rhel, microarch_ver, extra_report_fie + title = 'Current x86-64 microarchitecture is unsupported in {0}'.format(target_rhel) + summary = ('{0} has a higher CPU requirement than older versions, it now requires a CPU ' + 'compatible with {1} instruction set or higher.\n\n' +- 'Missings flags detected are: {2}\n'.format(target_rhel, microarch_ver, ', '.join(missing_flags))) ++ 'Missings flags detected are: {2}\n').format(target_rhel, microarch_ver, ', '.join(missing_flags)) + + report_fields = [ + reporting.Title(title), +@@ -24,7 +25,7 @@ def _inhibit_upgrade(missing_flags, target_rhel, microarch_ver, extra_report_fie + reporting.Severity(reporting.Severity.HIGH), + reporting.Groups([reporting.Groups.INHIBITOR]), + reporting.Groups([reporting.Groups.SANITY]), +- reporting.Remediation(hint=('If case of using virtualization, virtualization platforms often allow ' ++ reporting.Remediation(hint=('If a case of using virtualization, virtualization platforms often allow ' + 'configuring a minimum denominator CPU model for compatibility when migrating ' + 'between different CPU models. Ensure that minimum requirements are not below ' + 'that of {0}\n').format(target_rhel)), +@@ -56,6 +57,9 @@ def process(): + '9': MicroarchInfo(microarch_ver='x86-64-v2', + required_flags=(X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS), + extra_report_fields=[rhel9_microarch_article]), ++ '10': MicroarchInfo(microarch_ver='x86-64-v3', ++ required_flags=(X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS + X86_64_V3_FLAGS), ++ extra_report_fields=[]), + } + + microarch_info = rhel_major_to_microarch_reqs.get(get_target_major_version()) +diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py +index b0624f2b..eeca8be0 100644 +--- a/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py ++++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py +@@ -25,10 +25,15 @@ def test_not_x86_64_passes(monkeypatch, arch): + assert not reporting.create_report.called + + ++ENTIRE_V2_FLAG_SET = checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS ++ENTIRE_V3_FLAG_SET = ENTIRE_V2_FLAG_SET + checkmicroarchitecture.X86_64_V3_FLAGS ++ ++ + @pytest.mark.parametrize( + ('target_ver', 'cpu_flags'), + [ +- ('9.0', checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS) ++ ('9.0', ENTIRE_V2_FLAG_SET), ++ ('10.0', ENTIRE_V3_FLAG_SET) + ] + ) + def test_valid_microarchitecture(monkeypatch, target_ver, cpu_flags): +@@ -48,16 +53,22 @@ def test_valid_microarchitecture(monkeypatch, target_ver, cpu_flags): + assert not reporting.create_report.called + + +-@pytest.mark.parametrize('target_ver', ['9.0']) +-def test_invalid_microarchitecture(monkeypatch, target_ver): ++@pytest.mark.parametrize( ++ ('target_ver', 'cpu_flags'), ++ ( ++ ('9.0', checkmicroarchitecture.X86_64_BASELINE_FLAGS), ++ ('10.0', ENTIRE_V2_FLAG_SET), ++ ) ++) ++def test_invalid_microarchitecture(monkeypatch, target_ver, cpu_flags): + """ + Test report is generated on x86-64 architecture with invalid microarchitecture and the upgrade is inhibited + """ +- ++ cpu_info = CPUInfo(flags=cpu_flags) + monkeypatch.setattr(reporting, "create_report", create_report_mocked()) + monkeypatch.setattr(api, 'current_logger', logger_mocked()) + monkeypatch.setattr(api, 'current_actor', +- CurrentActorMocked(arch=ARCH_X86_64, msgs=[CPUInfo()], dst_ver=target_ver)) ++ CurrentActorMocked(arch=ARCH_X86_64, msgs=[cpu_info], dst_ver=target_ver)) + + checkmicroarchitecture.process() + +-- +2.47.0 + diff --git a/SOURCES/0014-Skip-checking-files-under-.-directory-hash-dir.patch b/SOURCES/0014-Skip-checking-files-under-.-directory-hash-dir.patch new file mode 100644 index 0000000..599c247 --- /dev/null +++ b/SOURCES/0014-Skip-checking-files-under-.-directory-hash-dir.patch @@ -0,0 +1,44 @@ +From a14793892bafaad0802844cbb56be3be3220eb47 Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Wed, 25 Sep 2024 17:29:02 +0200 +Subject: [PATCH 14/40] Skip checking files under .../directory-hash/ dir + +* The main reason for this change is to improve performance and +reduce flood of logs for the content that does not seem to be important +to check for the upgrade process. + +The directory has been relatively recently added to ca-certificates +rpm on EL 9+ systems mostly to improve performance of OpenSSL and +the content does not seem to be important for the IPU process. +The high number of files takes too much time to evaluate and causes +flood of logs that are not important. + +This is updated solution that we drop originally: 60f500e59bb92 +--- + .../targetuserspacecreator/libraries/userspacegen.py | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index cd2d7d6e..d7698056 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -311,6 +311,16 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False): + searchdir = context.full_path(dirpath) + if recursive: + for root, _, files in os.walk(searchdir): ++ if '/directory-hash/' in root: ++ # tl;dr; for the performance improvement ++ # The directory has been relatively recently added to ca-certificates ++ # rpm on EL 9+ systems and the content does not seem to be important ++ # for the IPU process. Also, it contains high number of files and ++ # their processing floods the output and slows down IPU. ++ # So skipping it entirely. ++ # This is updated solution that we drop originally: 60f500e59bb92 ++ api.current_logger().debug('SKIP files in the {} directory: Not important for the IPU.'.format(root)) ++ continue + for filename in files: + relpath = os.path.relpath(os.path.join(root, filename), searchdir) + file_list.append(relpath) +-- +2.47.0 + diff --git a/SOURCES/0015-lib-overlay-cap-the-max-size-of-disk-images.patch b/SOURCES/0015-lib-overlay-cap-the-max-size-of-disk-images.patch new file mode 100644 index 0000000..738a4bd --- /dev/null +++ b/SOURCES/0015-lib-overlay-cap-the-max-size-of-disk-images.patch @@ -0,0 +1,66 @@ +From cef2825778eb63f95e13cf48b1683bc98c32c21b Mon Sep 17 00:00:00 2001 +From: Michal Hecko +Date: Fri, 25 Oct 2024 16:33:38 +0200 +Subject: [PATCH 15/40] lib(overlay): cap the max size of disk images + +On systems with large disks (e.g. 16TB) with lots of free space, leapp +might attemt to create files larger than the max file size of the +underlying FS. Attempting to create such large files causes leapp +to crash. This patch caps the max image size to 1TB, based on empirical +evidence that more free space is not needed for the upgrade RPM +transaction. + +Jira-ref: RHEL-57064 +--- + .../common/libraries/overlaygen.py | 28 +++++++++++++++++++ + 1 file changed, 28 insertions(+) + +diff --git a/repos/system_upgrade/common/libraries/overlaygen.py b/repos/system_upgrade/common/libraries/overlaygen.py +index c1ac9ad3..867e3559 100644 +--- a/repos/system_upgrade/common/libraries/overlaygen.py ++++ b/repos/system_upgrade/common/libraries/overlaygen.py +@@ -68,6 +68,27 @@ or close to that size, stay always with this minimal protected size defined by + this constant. + """ + ++_MAX_DISK_IMAGE_SIZE_MB = 2**20 # 1*TB ++""" ++Maximum size of the created (sparse) images. ++ ++Defaults to 1TB. If a disk with capacity larger than _MAX_DISK_IMAGE_SIZE_MB ++is mounted on the system, the corresponding image used to store overlay ++modifications will be capped to _MAX_DISK_IMAGE_SIZE_MB. ++ ++Engineering rationale: ++ This constant was introduced to prevent leapp from creating files that are ++ virtually larger than the maximum file size supported by the file system. ++ E.g. if the source system hosts /var/lib/leapp on EXT4, then we cannot ++ create a file larger than 16TB. ++ We create these "disk images" to be able to verify the system has enough ++ disk space to perform the RPM upgrade transaction. From our experience, ++ we are not aware of any system which could have installed so much content ++ by RPMs that we would need 1TB of the free space on a single FS. Therefore, ++ we consider this value as safe while preventing us from exceeding FS ++ limits. ++""" ++ + + MountPoints = namedtuple('MountPoints', ['fs_file', 'fs_vfstype']) + +@@ -287,6 +308,13 @@ def _prepare_required_mounts(scratch_dir, mounts_dir, storage_info, scratch_rese + disk_size = _get_fspace(mountpoint, convert_to_mibs=True, coefficient=0.95) + if mountpoint == scratch_mp: + disk_size = scratch_disk_size ++ ++ if disk_size > _MAX_DISK_IMAGE_SIZE_MB: ++ msg = ('Image for overlayfs corresponding to the disk mounted at %s would ideally have %d MB, ' ++ 'but we truncate it to %d MB to avoid bumping to max file limits.') ++ api.current_logger().info(msg, mountpoint, disk_size, _MAX_DISK_IMAGE_SIZE_MB) ++ disk_size = _MAX_DISK_IMAGE_SIZE_MB ++ + image = _create_mount_disk_image(disk_images_directory, mountpoint, disk_size) + result[mountpoint] = mounting.LoopMount( + source=image, +-- +2.47.0 + diff --git a/SOURCES/0016-Raise-proper-error-when-ModelViolationError-occurs.patch b/SOURCES/0016-Raise-proper-error-when-ModelViolationError-occurs.patch new file mode 100644 index 0000000..8008b54 --- /dev/null +++ b/SOURCES/0016-Raise-proper-error-when-ModelViolationError-occurs.patch @@ -0,0 +1,168 @@ +From ec078243771f8ef43853bd242175a612fe84f95b Mon Sep 17 00:00:00 2001 +From: tomasfratrik +Date: Wed, 17 Jul 2024 12:12:50 +0200 +Subject: [PATCH 16/40] Raise proper error when ModelViolationError occurs + +This error occurs when repo file has invalid definition, specifically +when the 'name' entry of the config files is invalid. Also add tests. + +Jira: RHEL-19249 +--- + .../systemfacts/libraries/systemfacts.py | 13 ++++++++- + .../systemfacts/tests/test_systemfacts.py | 24 ++++++++++++++++- + .../common/libraries/repofileutils.py | 17 +++++++++++- + .../libraries/tests/test_repofileutils.py | 27 +++++++++++++++++++ + 4 files changed, 78 insertions(+), 3 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py +index d1eeb28c..f16cea1d 100644 +--- a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py ++++ b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py +@@ -217,7 +217,18 @@ def get_sysctls_status(): + + def get_repositories_status(): + """ Get a basic information about YUM repositories installed in the system """ +- return RepositoriesFacts(repositories=repofileutils.get_parsed_repofiles()) ++ try: ++ return RepositoriesFacts(repositories=repofileutils.get_parsed_repofiles()) ++ except repofileutils.InvalidRepoDefinition as e: ++ raise StopActorExecutionError( ++ message=str(e), ++ details={ ++ 'hint': 'For more directions on how to resolve the issue, see: {url}.' ++ .format( ++ url='https://access.redhat.com/solutions/6969001' ++ ) ++ } ++ ) + + + def get_selinux_status(): +diff --git a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py +index badf174c..5831b979 100644 +--- a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py ++++ b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py +@@ -3,7 +3,16 @@ import pwd + + import pytest + +-from leapp.libraries.actor.systemfacts import _get_system_groups, _get_system_users, anyendswith, anyhasprefix, aslist ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.actor.systemfacts import ( ++ _get_system_groups, ++ _get_system_users, ++ anyendswith, ++ anyhasprefix, ++ aslist, ++ get_repositories_status ++) ++from leapp.libraries.common import repofileutils + from leapp.libraries.common.testutils import logger_mocked + from leapp.libraries.stdlib import api + from leapp.snactor.fixture import current_actor_libraries +@@ -116,3 +125,16 @@ def test_get_system_groups(monkeypatch, etc_group_names, skipped_group_names): + assert group_name not in api.current_logger().dbgmsg[0] + else: + assert not api.current_logger().dbgmsg ++ ++ ++def test_failed_parsed_repofiles(monkeypatch): ++ def _raise_invalidrepo_error(): ++ raise repofileutils.InvalidRepoDefinition(msg='mocked error', ++ repofile='/etc/yum.repos.d/mock.repo', ++ repoid='mocked repoid') ++ ++ monkeypatch.setattr(repofileutils, 'get_parsed_repofiles', _raise_invalidrepo_error) ++ monkeypatch.setattr(api, 'current_logger', logger_mocked()) ++ ++ with pytest.raises(StopActorExecutionError): ++ get_repositories_status() +diff --git a/repos/system_upgrade/common/libraries/repofileutils.py b/repos/system_upgrade/common/libraries/repofileutils.py +index a563be52..cab3c42b 100644 +--- a/repos/system_upgrade/common/libraries/repofileutils.py ++++ b/repos/system_upgrade/common/libraries/repofileutils.py +@@ -11,6 +11,16 @@ except ImportError: + api.current_logger().warning('repofileutils.py: failed to import dnf') + + ++class InvalidRepoDefinition(Exception): ++ """Raised when a repository definition is invalid.""" ++ def __init__(self, msg, repofile, repoid): ++ message = 'Invalid repository definition: {repoid} in: {repofile}: {msg}'.format( ++ repoid=repoid, repofile=repofile, msg=msg) ++ super(InvalidRepoDefinition, self).__init__(message) ++ self.repofile = repofile ++ self.repoid = repoid ++ ++ + def _parse_repository(repoid, repo_data): + def asbool(x): + return x == '1' +@@ -33,12 +43,17 @@ def parse_repofile(repofile): + :param repofile: Path to the repo file + :type repofile: str + :rtype: RepositoryFile ++ :raises InvalidRepoDefinition: If the repository definition is invalid, ++ this can for example occur if 'name' field in repository is missing or it is invalid. + """ + data = [] + with open(repofile, mode='r') as fp: + cp = utils.parse_config(fp, strict=False) + for repoid in cp.sections(): +- data.append(_parse_repository(repoid, dict(cp.items(repoid)))) ++ try: ++ data.append(_parse_repository(repoid, dict(cp.items(repoid)))) ++ except fields.ModelViolationError as e: ++ raise InvalidRepoDefinition(e, repofile=repofile, repoid=repoid) + return RepositoryFile(file=repofile, data=data) + + +diff --git a/repos/system_upgrade/common/libraries/tests/test_repofileutils.py b/repos/system_upgrade/common/libraries/tests/test_repofileutils.py +index 51cc1c11..42c7e49e 100644 +--- a/repos/system_upgrade/common/libraries/tests/test_repofileutils.py ++++ b/repos/system_upgrade/common/libraries/tests/test_repofileutils.py +@@ -1,7 +1,10 @@ + import json + import os + ++import pytest ++ + from leapp.libraries.common import repofileutils ++from leapp.models.fields import ModelViolationError + + CUR_DIR = os.path.dirname(os.path.abspath(__file__)) + +@@ -12,6 +15,30 @@ def test_invert_dict(): + assert inv_dict == {'a': [1], 'b': [1, 2]} + + ++@pytest.mark.parametrize( ++ ('repoid', 'data'), ++ ( ++ ('missing-name', {'baseurl': 'http://example.com', 'enabled': '1', 'gpgcheck': '1'}), ++ (None, {'name': 'name', 'baseurl': 'http://example.com', 'enabled': '1', 'gpgcheck': '1'}), ++ ('name-none', {'name': None, 'baseurl': 'http://example.com', 'enabled': '1', 'gpgcheck': '1'}), ++ ('baseurl-true', {'name': 'valid', 'baseurl': True, 'enabled': '1', 'gpgcheck': '1'}), ++ ) ++) ++def test__parse_repository_missing_name(repoid, data): ++ with pytest.raises(ModelViolationError): ++ repofileutils._parse_repository(repoid, data) ++ ++ ++def test_parse_repofile_error(monkeypatch): ++ def _parse_repository_mocked(*args, **kwargs): ++ raise ModelViolationError('') ++ ++ monkeypatch.setattr(repofileutils, '_parse_repository', _parse_repository_mocked) ++ ++ with pytest.raises(repofileutils.InvalidRepoDefinition): ++ repofileutils.parse_repofile(os.path.join(CUR_DIR, 'sample_repos.txt')) ++ ++ + def test_parse_repofile(): + repofile = repofileutils.parse_repofile(os.path.join(CUR_DIR, 'sample_repos.txt')) + +-- +2.47.0 + diff --git a/SOURCES/0017-InhibitWhenLuks-simplify-the-logic.patch b/SOURCES/0017-InhibitWhenLuks-simplify-the-logic.patch new file mode 100644 index 0000000..5a91114 --- /dev/null +++ b/SOURCES/0017-InhibitWhenLuks-simplify-the-logic.patch @@ -0,0 +1,56 @@ +From f84c6f808a821d3ccd09a4a8278cef9c09984a28 Mon Sep 17 00:00:00 2001 +From: Daniel Zatovic +Date: Wed, 3 Apr 2024 23:25:06 +0200 +Subject: [PATCH 17/40] InhibitWhenLuks: simplify the logic + +--- + .../common/actors/inhibitwhenluks/actor.py | 35 +++++++------------ + 1 file changed, 13 insertions(+), 22 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py +index d3ff2d2e..40b845b0 100644 +--- a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py ++++ b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py +@@ -24,26 +24,17 @@ class InhibitWhenLuks(Actor): + ceph_info = next(self.consume(CephInfo)) + if ceph_info: + ceph_vol = ceph_info.encrypted_volumes[:] +- for storage_info in self.consume(StorageInfo): +- for blk in storage_info.lsblk: +- if blk.tp == 'crypt' and blk.name not in ceph_vol: +- create_report([ +- reporting.Title('LUKS encrypted partition detected'), +- reporting.Summary('Upgrading system with encrypted partitions is not supported'), +- reporting.Severity(reporting.Severity.HIGH), +- reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]), +- reporting.Groups([reporting.Groups.INHIBITOR]), +- ]) +- break + except StopIteration: +- for storage_info in self.consume(StorageInfo): +- for blk in storage_info.lsblk: +- if blk.tp == 'crypt': +- create_report([ +- reporting.Title('LUKS encrypted partition detected'), +- reporting.Summary('Upgrading system with encrypted partitions is not supported'), +- reporting.Severity(reporting.Severity.HIGH), +- reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]), +- reporting.Groups([reporting.Groups.INHIBITOR]), +- ]) +- break ++ pass ++ ++ for storage_info in self.consume(StorageInfo): ++ for blk in storage_info.lsblk: ++ if blk.tp == 'crypt' and blk.name not in ceph_vol: ++ create_report([ ++ reporting.Title('LUKS encrypted partition detected'), ++ reporting.Summary('Upgrading system with encrypted partitions is not supported'), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ ]) ++ break +-- +2.47.0 + diff --git a/SOURCES/0018-StorageScanner-Add-parent-device-name-to-lsblk.patch b/SOURCES/0018-StorageScanner-Add-parent-device-name-to-lsblk.patch new file mode 100644 index 0000000..ff85cb8 --- /dev/null +++ b/SOURCES/0018-StorageScanner-Add-parent-device-name-to-lsblk.patch @@ -0,0 +1,271 @@ +From 03fc6743b8916f23f6a213e3f0fc3020ee141b96 Mon Sep 17 00:00:00 2001 +From: Daniel Zatovic +Date: Wed, 3 Apr 2024 23:42:45 +0200 +Subject: [PATCH 18/40] StorageScanner: Add parent device name to lsblk + +Modify the StorageInfo model to include path and name of the parent +device. Use StorageScanner to collect this information. + +Morover fix lsblk test, there should be a full device path in "lsblk +-pbnr" output (just names were used in the original test). +--- + .../tests/test_inhibitwhenluks.py | 12 +-- + .../libraries/storagescanner.py | 29 +++++-- + .../tests/unit_test_storagescanner.py | 78 +++++++++++++++---- + .../common/models/storageinfo.py | 2 + + .../tests/unit_test_vdoconversionscanner.py | 4 +- + 5 files changed, 95 insertions(+), 30 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py +index fee50f9d..405a3429 100644 +--- a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py ++++ b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py +@@ -5,8 +5,8 @@ from leapp.utils.report import is_inhibitor + + + def test_actor_with_luks(current_actor_context): +- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', +- size='10G', bsize=10*(1 << 39), ro='0', tp='crypt', mountpoint='')] ++ with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39), ++ ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')] + + current_actor_context.feed(StorageInfo(lsblk=with_luks)) + current_actor_context.run() +@@ -16,8 +16,8 @@ def test_actor_with_luks(current_actor_context): + + + def test_actor_with_luks_ceph_only(current_actor_context): +- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', +- size='10G', bsize=10*(1 << 39), ro='0', tp='crypt', mountpoint='')] ++ with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39), ++ ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')] + ceph_volume = ['luks-132'] + current_actor_context.feed(StorageInfo(lsblk=with_luks)) + current_actor_context.feed(CephInfo(encrypted_volumes=ceph_volume)) +@@ -26,8 +26,8 @@ def test_actor_with_luks_ceph_only(current_actor_context): + + + def test_actor_without_luks(current_actor_context): +- without_luks = [LsblkEntry(name='sda1', kname='sda1', maj_min='8:0', rm='0', +- size='10G', bsize=10*(1 << 39), ro='0', tp='part', mountpoint='/boot')] ++ without_luks = [LsblkEntry(name='sda1', kname='sda1', maj_min='8:0', rm='0', size='10G', bsize=10*(1 << 39), ++ ro='0', tp='part', mountpoint='/boot', parent_name='', parent_path='')] + + current_actor_context.feed(StorageInfo(lsblk=without_luks)) + current_actor_context.run() +diff --git a/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py b/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py +index f15f0d87..cad6bd32 100644 +--- a/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py ++++ b/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py +@@ -164,18 +164,31 @@ def _get_mount_info(path): + ) + + ++def _get_lsblk_info_for_devpath(dev_path): ++ lsblk_cmd = ['lsblk', '-nr', '--output', 'NAME,KNAME,SIZE', dev_path] ++ lsblk_info_for_devpath = next(_get_cmd_output(lsblk_cmd, ' ', 3), None) ++ ++ return lsblk_info_for_devpath ++ ++ + @aslist + def _get_lsblk_info(): + """ Collect storage info from lsblk command """ +- cmd = ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT'] +- for entry in _get_cmd_output(cmd, ' ', 7): +- dev_path, maj_min, rm, bsize, ro, tp, mountpoint = entry +- lsblk_cmd = ['lsblk', '-nr', '--output', 'NAME,KNAME,SIZE', dev_path] +- lsblk_info_for_devpath = next(_get_cmd_output(lsblk_cmd, ' ', 3), None) ++ cmd = ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PKNAME'] ++ for entry in _get_cmd_output(cmd, ' ', 8): ++ dev_path, maj_min, rm, bsize, ro, tp, mountpoint, parent_path = entry ++ ++ lsblk_info_for_devpath = _get_lsblk_info_for_devpath(dev_path) + if not lsblk_info_for_devpath: + return +- + name, kname, size = lsblk_info_for_devpath ++ ++ parent_name = "" ++ if parent_path: ++ parent_info = _get_lsblk_info_for_devpath(parent_path) ++ if parent_info: ++ parent_name, _, _ = parent_info ++ + yield LsblkEntry( + name=name, + kname=kname, +@@ -185,7 +198,9 @@ def _get_lsblk_info(): + bsize=int(bsize), + ro=ro, + tp=tp, +- mountpoint=mountpoint) ++ mountpoint=mountpoint, ++ parent_name=parent_name, ++ parent_path=parent_path) + + + @aslist +diff --git a/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py b/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py +index 4dc11ea4..456e40ec 100644 +--- a/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py ++++ b/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py +@@ -255,13 +255,18 @@ def test_get_lsblk_info(monkeypatch): + bytes_per_gb = 1 << 30 + + def get_cmd_output_mocked(cmd, delim, expected_len): +- if cmd == ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT']: ++ if cmd == ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PKNAME']: + output_lines_split_on_whitespace = [ +- ['vda', '252:0', '0', str(40 * bytes_per_gb), '0', 'disk', ''], +- ['vda1', '252:1', '0', str(1 * bytes_per_gb), '0', 'part', '/boot'], +- ['vda2', '252:2', '0', str(39 * bytes_per_gb), '0', 'part', ''], +- ['rhel_ibm--p8--kvm--03--guest--02-root', '253:0', '0', str(38 * bytes_per_gb), '0', 'lvm', '/'], +- ['rhel_ibm--p8--kvm--03--guest--02-swap', '253:1', '0', str(1 * bytes_per_gb), '0', 'lvm', '[SWAP]'] ++ ['/dev/vda', '252:0', '0', str(40 * bytes_per_gb), '0', 'disk', '', ''], ++ ['/dev/vda1', '252:1', '0', str(1 * bytes_per_gb), '0', 'part', '/boot', ''], ++ ['/dev/vda2', '252:2', '0', str(39 * bytes_per_gb), '0', 'part', '', ''], ++ ['/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-root', '253:0', '0', str(38 * bytes_per_gb), '0', 'lvm', ++ '/', ''], ++ ['/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-swap', '253:1', '0', str(1 * bytes_per_gb), '0', 'lvm', ++ '[SWAP]', ''], ++ ['/dev/mapper/luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6', '254:0', '0', str(38 * bytes_per_gb), '0', ++ 'crypt', '', '/dev/nvme0n1p1'], ++ ['/dev/nvme0n1p1', '259:1', '0', str(39 * bytes_per_gb), '0', 'part', '', '/dev/nvme0n1'], + ] + for output_line_parts in output_lines_split_on_whitespace: + yield output_line_parts +@@ -269,11 +274,17 @@ def test_get_lsblk_info(monkeypatch): + # We cannot have the output in a list, since the command is called per device. Therefore, we have to map + # each device path to its output. + output_lines_split_on_whitespace_per_device = { +- 'vda': ['vda', 'vda', '40G'], +- 'vda1': ['vda1', 'vda1', '1G'], +- 'vda2': ['vda2', 'vda2', '39G'], +- 'rhel_ibm--p8--kvm--03--guest--02-root': ['rhel_ibm--p8--kvm--03--guest--02-root', 'kname1', '38G'], +- 'rhel_ibm--p8--kvm--03--guest--02-swap': ['rhel_ibm--p8--kvm--03--guest--02-swap', 'kname2', '1G'] ++ '/dev/vda': ['vda', 'vda', '40G'], ++ '/dev/vda1': ['vda1', 'vda1', '1G'], ++ '/dev/vda2': ['vda2', 'vda2', '39G'], ++ '/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-root': ++ ['rhel_ibm--p8--kvm--03--guest--02-root', 'kname1', '38G'], ++ '/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-swap': ++ ['rhel_ibm--p8--kvm--03--guest--02-swap', 'kname2', '1G'], ++ '/dev/mapper/luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6': ++ ['luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6', 'dm-0', '38G'], ++ '/dev/nvme0n1p1': ['nvme0n1p1', 'nvme0n1p1', '39G'], ++ '/dev/nvme0n1': ['nvme0n1', 'nvme0n1', '40G'], + } + dev_path = cmd[4] + if dev_path not in output_lines_split_on_whitespace_per_device: +@@ -294,7 +305,9 @@ def test_get_lsblk_info(monkeypatch): + bsize=40 * bytes_per_gb, + ro='0', + tp='disk', +- mountpoint=''), ++ mountpoint='', ++ parent_name='', ++ parent_path=''), + LsblkEntry( + name='vda1', + kname='vda1', +@@ -304,7 +317,9 @@ def test_get_lsblk_info(monkeypatch): + bsize=1 * bytes_per_gb, + ro='0', + tp='part', +- mountpoint='/boot'), ++ mountpoint='/boot', ++ parent_name='', ++ parent_path=''), + LsblkEntry( + name='vda2', + kname='vda2', +@@ -314,7 +329,9 @@ def test_get_lsblk_info(monkeypatch): + bsize=39 * bytes_per_gb, + ro='0', + tp='part', +- mountpoint=''), ++ mountpoint='', ++ parent_name='', ++ parent_path=''), + LsblkEntry( + name='rhel_ibm--p8--kvm--03--guest--02-root', + kname='kname1', +@@ -324,7 +341,9 @@ def test_get_lsblk_info(monkeypatch): + bsize=38 * bytes_per_gb, + ro='0', + tp='lvm', +- mountpoint='/'), ++ mountpoint='/', ++ parent_name='', ++ parent_path=''), + LsblkEntry( + name='rhel_ibm--p8--kvm--03--guest--02-swap', + kname='kname2', +@@ -334,7 +353,34 @@ def test_get_lsblk_info(monkeypatch): + bsize=1 * bytes_per_gb, + ro='0', + tp='lvm', +- mountpoint='[SWAP]')] ++ mountpoint='[SWAP]', ++ parent_name='', ++ parent_path=''), ++ LsblkEntry( ++ name='luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6', ++ kname='dm-0', ++ maj_min='254:0', ++ rm='0', ++ size='38G', ++ bsize=38 * bytes_per_gb, ++ ro='0', ++ tp='crypt', ++ mountpoint='', ++ parent_name='nvme0n1p1', ++ parent_path='/dev/nvme0n1p1'), ++ LsblkEntry( ++ name='nvme0n1p1', ++ kname='nvme0n1p1', ++ maj_min='259:1', ++ rm='0', ++ size='39G', ++ bsize=39 * bytes_per_gb, ++ ro='0', ++ tp='part', ++ mountpoint='', ++ parent_name='nvme0n1', ++ parent_path='/dev/nvme0n1'), ++ ] + + actual = storagescanner._get_lsblk_info() + assert expected == actual +diff --git a/repos/system_upgrade/common/models/storageinfo.py b/repos/system_upgrade/common/models/storageinfo.py +index 5bb9caac..71e7459d 100644 +--- a/repos/system_upgrade/common/models/storageinfo.py ++++ b/repos/system_upgrade/common/models/storageinfo.py +@@ -43,6 +43,8 @@ class LsblkEntry(Model): + ro = fields.String() + tp = fields.String() + mountpoint = fields.String() ++ parent_name = fields.String() ++ parent_path = fields.String() + + + class PvsEntry(Model): +diff --git a/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py b/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py +index 0745c91d..4d6ef0dc 100644 +--- a/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py ++++ b/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py +@@ -26,7 +26,9 @@ def _lsblk_entry(prefix, number, types, size='128G', bsize=2 ** 37): + bsize=bsize, + ro='0', + tp=types[random.randint(0, len(types) - 1)], +- mountpoint='') ++ mountpoint='', ++ parent_name='', ++ parent_path='') + + + @aslist +-- +2.47.0 + diff --git a/SOURCES/0019-LuksScanner-Add-LUKS-dump-scanner-and-models.patch b/SOURCES/0019-LuksScanner-Add-LUKS-dump-scanner-and-models.patch new file mode 100644 index 0000000..3eaa137 --- /dev/null +++ b/SOURCES/0019-LuksScanner-Add-LUKS-dump-scanner-and-models.patch @@ -0,0 +1,1030 @@ +From 266c2495b144aa13d96f72c276d7b94638e3a6b7 Mon Sep 17 00:00:00 2001 +From: Daniel Zatovic +Date: Tue, 16 Apr 2024 17:04:41 +0200 +Subject: [PATCH 19/40] LuksScanner: Add LUKS dump scanner and models + +Add LuksScanner actor that runs 'cryptsetup luksDump' for all 'crypt' +from lsblk output. The output is then parsed and filled into LuksDump +and LuksToken models. + +The LuksDump model contains information about LUKS version, device UUID, +corresponding device path, name of the backing device (which contains +the LUKS header) and a list of LuksToken models. + +LuksToken model represents a token associated with the given LUKS +device. It contains token ID, IDs of associated keyslot and token type. +If the token type is "clevis", we use "clevis luks list" command to +determine the clevis-specific subtype and append it to the token name. +E.g. if there is a "clevis" token and "clevis luks list" returns "tpm2", +the token type will be "clevis-tpm2". +--- + .../common/actors/luksscanner/actor.py | 23 ++ + .../luksscanner/libraries/luksdump_parser.py | 199 ++++++++++++++++++ + .../luksscanner/libraries/luksscanner.py | 125 +++++++++++ + .../tests/files/luksDump_luks1.txt | 27 +++ + .../tests/files/luksDump_nvme0n1p3_luks1.txt | 27 +++ + .../tests/files/luksDump_nvme0n1p3_luks2.txt | 43 ++++ + .../files/luksDump_nvme0n1p3_luks2_tokens.txt | 119 +++++++++++ + .../luksscanner/tests/test_luksdump_parser.py | 147 +++++++++++++ + .../luksscanner/tests/test_luksscaner.py | 142 +++++++++++++ + .../system_upgrade/common/models/luksdump.py | 73 +++++++ + 10 files changed, 925 insertions(+) + create mode 100644 repos/system_upgrade/common/actors/luksscanner/actor.py + create mode 100755 repos/system_upgrade/common/actors/luksscanner/libraries/luksdump_parser.py + create mode 100644 repos/system_upgrade/common/actors/luksscanner/libraries/luksscanner.py + create mode 100644 repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_luks1.txt + create mode 100644 repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks1.txt + create mode 100644 repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks2.txt + create mode 100644 repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks2_tokens.txt + create mode 100644 repos/system_upgrade/common/actors/luksscanner/tests/test_luksdump_parser.py + create mode 100644 repos/system_upgrade/common/actors/luksscanner/tests/test_luksscaner.py + create mode 100644 repos/system_upgrade/common/models/luksdump.py + +diff --git a/repos/system_upgrade/common/actors/luksscanner/actor.py b/repos/system_upgrade/common/actors/luksscanner/actor.py +new file mode 100644 +index 00000000..a163374b +--- /dev/null ++++ b/repos/system_upgrade/common/actors/luksscanner/actor.py +@@ -0,0 +1,23 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import luksscanner ++from leapp.models import LuksDumps, StorageInfo ++from leapp.reporting import Report ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class LuksScanner(Actor): ++ """ ++ Provides data about active LUKS devices. ++ ++ Scans all block devices of 'crypt' type and attempts to run 'cryptsetup luksDump' on them. ++ For every 'crypt' device a LuksDump model is produced. Furthermore, if there is any LUKS token ++ of type clevis, the concrete subtype is determined using 'clevis luks list'. ++ """ ++ ++ name = 'luks_scanner' ++ consumes = (StorageInfo,) ++ produces = (Report, LuksDumps) ++ tags = (IPUWorkflowTag, FactsPhaseTag) ++ ++ def process(self): ++ self.produce(luksscanner.get_luks_dumps_model()) +diff --git a/repos/system_upgrade/common/actors/luksscanner/libraries/luksdump_parser.py b/repos/system_upgrade/common/actors/luksscanner/libraries/luksdump_parser.py +new file mode 100755 +index 00000000..44113d0e +--- /dev/null ++++ b/repos/system_upgrade/common/actors/luksscanner/libraries/luksdump_parser.py +@@ -0,0 +1,199 @@ ++class LuksDumpParser(object): ++ """ ++ Class for parsing "cryptsetup luksDump" output. Given a list of lines, it ++ generates a dictionary representing the dump. ++ """ ++ ++ class Node(object): ++ """ ++ Helper class, every line is represented as a node. The node depth is ++ based on the indentation of the line. A dictionary is produced after ++ all lines are inserted. ++ """ ++ ++ def __init__(self, indented_line): ++ self.children = [] ++ self.level = len(indented_line) - len(indented_line.lstrip()) ++ self.text = indented_line.strip() ++ ++ def add_children(self, nodes): ++ # NOTE(pstodulk): it's expected that nodes are non-empty list and ++ # having it empty is an error if it happens. So keeping a hard crash ++ # for now as having an empty list it's hypothetical now and I would ++ # probably end with en error anyway if discovered. ++ childlevel = nodes[0].level ++ while nodes: ++ node = nodes.pop(0) ++ if node.level == childlevel: # add node as a child ++ self.children.append(node) ++ elif node.level > childlevel: # add nodes as grandchildren of the last child ++ nodes.insert(0, node) ++ self.children[-1].add_children(nodes) ++ elif node.level <= self.level: # this node is a sibling, no more children ++ nodes.insert(0, node) ++ return ++ ++ def as_dict(self): ++ if len(self.children) > 1: ++ children = [node.as_dict() for node in self.children] ++ ++ return {self.text: LuksDumpParser._merge_list(children)} ++ if len(self.children) == 1: ++ return {self.text: self.children[0].as_dict()} ++ return self.text ++ ++ @staticmethod ++ def _count_type(elem_list, elem_type): ++ """ Count the number of items of elem_type inside the elem_list """ ++ return sum(isinstance(x, elem_type) for x in elem_list) ++ ++ @staticmethod ++ def _merge_list(elem_list): ++ """ ++ Given a list of elements merge them into a single element. If all ++ elements are strings, concatenate them into a single string. When all ++ the elements are dictionaries merge them into a single dictionary ++ containing the keys/values from all of the dictionaries. ++ """ ++ ++ dict_count = LuksDumpParser._count_type(elem_list, dict) ++ str_count = LuksDumpParser._count_type(elem_list, str) ++ ++ result = elem_list ++ if dict_count == len(elem_list): ++ result = {} ++ for element in elem_list: ++ result.update(element) ++ elif str_count == len(elem_list): ++ result = "".join(elem_list) ++ ++ return result ++ ++ @staticmethod ++ def _find_single_str(elem_list): ++ """ If the list contains exactly one string return it or return None otherwise. """ ++ ++ result = None ++ ++ for elem in elem_list: ++ if isinstance(elem, str): ++ if result is not None: ++ # more than one strings in the list ++ return None ++ result = elem ++ ++ return result ++ ++ @staticmethod ++ def _fixup_type(elem_list, type_string): ++ single_string = LuksDumpParser._find_single_str(elem_list) ++ ++ if single_string is not None: ++ elem_list.remove(single_string) ++ elem_list.append({type_string: single_string}) ++ ++ @staticmethod ++ def _fixup_section(section, type_string): ++ for key, value in section.items(): ++ LuksDumpParser._fixup_type(value, type_string) ++ section[key] = LuksDumpParser._merge_list(section[key]) ++ ++ @staticmethod ++ def _fixup_dict(parsed_dict): ++ """ Various fixups of the parsed dictionary """ ++ ++ if "Version" not in parsed_dict: ++ return ++ if parsed_dict["Version"] == "1": ++ for i in range(8): ++ keyslot = "Key Slot {}".format(i) ++ ++ if keyslot not in parsed_dict: ++ continue ++ ++ if parsed_dict[keyslot] in ["ENABLED", "DISABLED"]: ++ parsed_dict[keyslot] = {"enabled": parsed_dict[keyslot] == "ENABLED"} ++ ++ if not isinstance(parsed_dict[keyslot], list): ++ continue ++ ++ enabled = None ++ if "ENABLED" in parsed_dict[keyslot]: ++ enabled = True ++ parsed_dict[keyslot].remove("ENABLED") ++ if "DISABLED" in parsed_dict[keyslot]: ++ enabled = False ++ parsed_dict[keyslot].remove("DISABLED") ++ parsed_dict[keyslot] = LuksDumpParser._merge_list(parsed_dict[keyslot]) ++ if enabled is not None: ++ parsed_dict[keyslot]["enabled"] = enabled ++ elif parsed_dict["Version"] == "2": ++ for section in ["Keyslots", "Digests", "Data segments", "Tokens"]: ++ if section in parsed_dict: ++ LuksDumpParser._fixup_section(parsed_dict[section], "type") ++ ++ @staticmethod ++ def _fixup_dump(dump): ++ """ ++ Replace tabs with spaces, for lines with colon a move the text ++ after column on new line with the indent of the following line. ++ """ ++ ++ dump = [line.replace("\t", " "*8).replace("\n", "") for line in dump] ++ newdump = [] ++ ++ for i, line in enumerate(dump): ++ if not line.strip(): ++ continue ++ ++ if ':' in line: ++ first_half = line.split(":")[0] ++ second_half = ":".join(line.split(":")[1:]).lstrip() ++ ++ current_level = len(line) - len(line.lstrip()) ++ if i+1 < len(dump): ++ next_level = len(dump[i+1]) - len(dump[i+1].lstrip()) ++ else: ++ next_level = current_level ++ ++ if next_level > current_level: ++ second_half = " " * next_level + second_half ++ else: ++ second_half = " " * (current_level + 8) + second_half ++ ++ newdump.append(first_half) ++ if second_half.strip(): ++ newdump.append(second_half) ++ else: ++ newdump.append(line) ++ ++ return newdump ++ ++ @staticmethod ++ def parse(dump): ++ """ ++ Parse the output of "cryptsetup luksDump" command into a dictionary. ++ ++ :param dump: List of output lines of luksDump ++ :returns: Parsed dictionary ++ """ ++ ++ root = LuksDumpParser.Node('root') ++ ++ nodes = [] ++ for line in LuksDumpParser._fixup_dump(dump): ++ nodes.append(LuksDumpParser.Node(line)) ++ ++ root.add_children(nodes) ++ root = root.as_dict()['root'] ++ ++ if isinstance(root, list): ++ result = {} ++ for child in root: ++ if isinstance(child, str): ++ child = {child: {}} ++ result.update(child) ++ root = result ++ ++ LuksDumpParser._fixup_dict(root) ++ return root +diff --git a/repos/system_upgrade/common/actors/luksscanner/libraries/luksscanner.py b/repos/system_upgrade/common/actors/luksscanner/libraries/luksscanner.py +new file mode 100644 +index 00000000..1c7822a5 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/luksscanner/libraries/luksscanner.py +@@ -0,0 +1,125 @@ ++import functools ++ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries import stdlib ++from leapp.libraries.actor.luksdump_parser import LuksDumpParser ++from leapp.libraries.stdlib import api ++from leapp.models import LuksDump, LuksDumps, LuksToken, StorageInfo ++ ++ ++def aslist(f): ++ """ Decorator used to convert generator to list """ ++ @functools.wraps(f) ++ def inner(*args, **kwargs): ++ return list(f(*args, **kwargs)) ++ return inner ++ ++ ++def _get_clevis_type(device_path, keyslot): ++ """ ++ Assuming the device is initialized using clevis, determine the type of ++ clevis token associated to the specified keyslot. ++ """ ++ try: ++ result = stdlib.run(["clevis", "luks", "list", "-d", device_path, "-s", str(keyslot)]) ++ except OSError: ++ message = ('A LUKS drive with clevis token was discovered, but there is ' ++ 'no clevis package installed. The clevis command is required ' ++ 'to determine clevis token type.') ++ details = {'hint': 'Use dnf to install the "clevis-luks" package.'} ++ raise StopActorExecutionError(message=message, details=details) ++ except stdlib.CalledProcessError as e: ++ api.current_logger().debug("clevis list command failed with an error code: {}".format(e.exit_code)) ++ ++ message = ('The "clevis luks list" command failed. This' ++ 'might be because the clevis-luks package is' ++ 'missing on your system.') ++ details = {'hint': 'Use dnf to install the "clevis-luks" package.'} ++ raise StopActorExecutionError(message=message, details=details) ++ ++ line = result["stdout"].split() ++ if len(line) != 3: ++ raise StopActorExecutionError( ++ 'Invalid "clevis list" output detected' ++ ) ++ ++ return "clevis-{}".format(line[1]) ++ ++ ++@aslist ++def _get_tokens(device_path, luksdump_dict): ++ """ Given a parsed LUKS dump, produce a list of tokens """ ++ if "Version" not in luksdump_dict or luksdump_dict["Version"] != '2': ++ return ++ if "Tokens" not in luksdump_dict: ++ raise StopActorExecutionError( ++ 'No tokens in cryptsetup luksDump output' ++ ) ++ ++ for token_id in luksdump_dict["Tokens"]: ++ token = luksdump_dict["Tokens"][token_id] ++ ++ if "Keyslot" not in token or "type" not in token: ++ raise StopActorExecutionError( ++ 'Token specification does not contain keyslot or type', ++ ) ++ keyslot = int(token["Keyslot"]) ++ token_type = token["type"] ++ ++ if token_type == "clevis": ++ token_type = _get_clevis_type(device_path, keyslot) ++ ++ yield LuksToken( ++ token_id=int(token_id), ++ keyslot=keyslot, ++ token_type=token_type ++ ) ++ ++ ++def get_luks_dump_by_device(device_path, device_name): ++ """ Determine info about LUKS device using cryptsetup and clevis commands """ ++ ++ try: ++ result = stdlib.run(['cryptsetup', 'luksDump', device_path]) ++ luksdump_dict = LuksDumpParser.parse(result["stdout"].splitlines()) ++ ++ version = int(luksdump_dict["Version"]) if "Version" in luksdump_dict else None ++ uuid = luksdump_dict["UUID"] if "UUID" in luksdump_dict else None ++ if version is None or uuid is None: ++ api.current_logger().error( ++ 'Failed to detect UUID or version from the output "cryptsetup luksDump {}" command'.format(device_path) ++ ) ++ raise StopActorExecutionError( ++ 'Failed to detect UUID or version from the output "cryptsetup luksDump {}" command'.format(device_path) ++ ) ++ ++ return LuksDump( ++ version=version, ++ uuid=uuid, ++ device_path=device_path, ++ device_name=device_name, ++ tokens=_get_tokens(device_path, luksdump_dict) ++ ) ++ ++ except (OSError, stdlib.CalledProcessError) as ex: ++ api.current_logger().error( ++ 'Failed to execute "cryptsetup luksDump" command: {}'.format(ex) ++ ) ++ raise StopActorExecutionError( ++ 'Failed to execute "cryptsetup luksDump {}" command'.format(device_path), ++ details={'details': str(ex)} ++ ) ++ ++ ++@aslist ++def get_luks_dumps(): ++ """ Collect info abaout every active LUKS device """ ++ ++ for storage_info in api.consume(StorageInfo): ++ for blk in storage_info.lsblk: ++ if blk.tp == 'crypt' and blk.parent_path: ++ yield get_luks_dump_by_device(blk.parent_path, blk.parent_name) ++ ++ ++def get_luks_dumps_model(): ++ return LuksDumps(dumps=get_luks_dumps()) +diff --git a/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_luks1.txt b/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_luks1.txt +new file mode 100644 +index 00000000..e22cc8ce +--- /dev/null ++++ b/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_luks1.txt +@@ -0,0 +1,27 @@ ++LUKS header information for /dev/loop10 ++ ++Version: 1 ++Cipher name: aes ++Cipher mode: xts-plain64 ++Hash spec: sha256 ++Payload offset: 4096 ++MK bits: 512 ++MK digest: fb ec 6b 31 ae e4 49 03 3e ad 43 22 02 cf a8 78 ad 3c d2 a8 ++MK salt: 17 57 4e 2f ed 0b 5c 62 d5 de 54 f5 7f ab 60 68 ++ 71 d8 72 06 64 6c 81 05 39 55 3f 55 32 56 d9 da ++MK iterations: 114573 ++UUID: 90242257-d00a-4019-aba6-03083f89404b ++ ++Key Slot 0: ENABLED ++ Iterations: 1879168 ++ Salt: fc 77 48 72 bd 31 ca 83 23 80 5a 5e b9 5b de bb ++ 55 ac d5 a9 3b 96 ad a5 82 bc 11 68 ba f8 87 56 ++ Key material offset: 8 ++ AF stripes: 4000 ++Key Slot 1: DISABLED ++Key Slot 2: DISABLED ++Key Slot 3: DISABLED ++Key Slot 4: DISABLED ++Key Slot 5: DISABLED ++Key Slot 6: DISABLED ++Key Slot 7: DISABLED +diff --git a/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks1.txt b/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks1.txt +new file mode 100644 +index 00000000..e22cc8ce +--- /dev/null ++++ b/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks1.txt +@@ -0,0 +1,27 @@ ++LUKS header information for /dev/loop10 ++ ++Version: 1 ++Cipher name: aes ++Cipher mode: xts-plain64 ++Hash spec: sha256 ++Payload offset: 4096 ++MK bits: 512 ++MK digest: fb ec 6b 31 ae e4 49 03 3e ad 43 22 02 cf a8 78 ad 3c d2 a8 ++MK salt: 17 57 4e 2f ed 0b 5c 62 d5 de 54 f5 7f ab 60 68 ++ 71 d8 72 06 64 6c 81 05 39 55 3f 55 32 56 d9 da ++MK iterations: 114573 ++UUID: 90242257-d00a-4019-aba6-03083f89404b ++ ++Key Slot 0: ENABLED ++ Iterations: 1879168 ++ Salt: fc 77 48 72 bd 31 ca 83 23 80 5a 5e b9 5b de bb ++ 55 ac d5 a9 3b 96 ad a5 82 bc 11 68 ba f8 87 56 ++ Key material offset: 8 ++ AF stripes: 4000 ++Key Slot 1: DISABLED ++Key Slot 2: DISABLED ++Key Slot 3: DISABLED ++Key Slot 4: DISABLED ++Key Slot 5: DISABLED ++Key Slot 6: DISABLED ++Key Slot 7: DISABLED +diff --git a/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks2.txt b/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks2.txt +new file mode 100644 +index 00000000..407261f4 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks2.txt +@@ -0,0 +1,43 @@ ++LUKS header information ++Version: 2 ++Epoch: 3 ++Metadata area: 16384 [bytes] ++Keyslots area: 16744448 [bytes] ++UUID: dfd8db30-2b65-4be9-8cae-65f5fac4a06f ++Label: (no label) ++Subsystem: (no subsystem) ++Flags: (no flags) ++ ++Data segments: ++ 0: crypt ++ offset: 16777216 [bytes] ++ length: (whole device) ++ cipher: aes-xts-plain64 ++ sector: 512 [bytes] ++ ++Keyslots: ++ 0: luks2 ++ Key: 512 bits ++ Priority: normal ++ Cipher: aes-xts-plain64 ++ Cipher key: 512 bits ++ PBKDF: argon2id ++ Time cost: 7 ++ Memory: 1048576 ++ Threads: 4 ++ Salt: 1d d5 97 97 dd 45 e2 d7 2b a7 0b fa c4 7f b3 f4 ++ ef 4e 5f 95 e0 ba fd 7a 7e 36 02 69 f8 44 96 d8 ++ AF stripes: 4000 ++ AF hash: sha256 ++ Area offset:32768 [bytes] ++ Area length:258048 [bytes] ++ Digest ID: 0 ++Tokens: ++Digests: ++ 0: pbkdf2 ++ Hash: sha256 ++ Iterations: 99750 ++ Salt: 10 1d a1 21 8b 93 dc bb f1 ab 2b 1b 89 8e 3d c4 ++ 18 07 51 08 ef f5 95 da 9f 85 fa d7 de c9 c4 96 ++ Digest: 4f 27 4c 19 ae 72 b1 75 ef 53 c0 6d ff db 7f fe ++ f1 67 d0 c3 67 03 0c 14 3a 6f 6a 1a 87 a8 6f 32 +diff --git a/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks2_tokens.txt b/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks2_tokens.txt +new file mode 100644 +index 00000000..c2a7464c +--- /dev/null ++++ b/repos/system_upgrade/common/actors/luksscanner/tests/files/luksDump_nvme0n1p3_luks2_tokens.txt +@@ -0,0 +1,119 @@ ++LUKS header information ++Version: 2 ++Epoch: 9 ++Metadata area: 16384 [bytes] ++Keyslots area: 16744448 [bytes] ++UUID: 6b929b85-b01e-4aa3-8ad2-a05decae6e3d ++Label: (no label) ++Subsystem: (no subsystem) ++Flags: (no flags) ++ ++Data segments: ++ 0: crypt ++ offset: 16777216 [bytes] ++ length: (whole device) ++ cipher: aes-xts-plain64 ++ sector: 512 [bytes] ++ ++Keyslots: ++ 0: luks2 ++ Key: 512 bits ++ Priority: normal ++ Cipher: aes-xts-plain64 ++ Cipher key: 512 bits ++ PBKDF: argon2id ++ Time cost: 7 ++ Memory: 1048576 ++ Threads: 4 ++ Salt: de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ AF stripes: 4000 ++ AF hash: sha256 ++ Area offset:32768 [bytes] ++ Area length:258048 [bytes] ++ Digest ID: 0 ++ 1: luks2 ++ Key: 512 bits ++ Priority: normal ++ Cipher: aes-xts-plain64 ++ Cipher key: 512 bits ++ PBKDF: pbkdf2 ++ Hash: sha256 ++ Iterations: 1000 ++ Salt: de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ AF stripes: 4000 ++ AF hash: sha256 ++ Area offset:290816 [bytes] ++ Area length:258048 [bytes] ++ Digest ID: 0 ++ 2: luks2 ++ Key: 512 bits ++ Priority: normal ++ Cipher: aes-xts-plain64 ++ Cipher key: 512 bits ++ PBKDF: pbkdf2 ++ Hash: sha256 ++ Iterations: 1000 ++ Salt: de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ AF stripes: 4000 ++ AF hash: sha256 ++ Area offset:548864 [bytes] ++ Area length:258048 [bytes] ++ Digest ID: 0 ++ 3: luks2 ++ Key: 512 bits ++ Priority: normal ++ Cipher: aes-xts-plain64 ++ Cipher key: 512 bits ++ PBKDF: pbkdf2 ++ Hash: sha512 ++ Iterations: 1000 ++ Salt: de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ AF stripes: 4000 ++ AF hash: sha512 ++ Area offset:806912 [bytes] ++ Area length:258048 [bytes] ++ Digest ID: 0 ++Tokens: ++ 0: clevis ++ Keyslot: 1 ++ 1: clevis ++ Keyslot: 2 ++ 2: systemd-tpm2 ++ tpm2-hash-pcrs: 7 ++ tpm2-pcr-bank: sha256 ++ tpm2-pubkey: ++ (null) ++ tpm2-pubkey-pcrs: n/a ++ tpm2-primary-alg: ecc ++ tpm2-blob: de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ tpm2-policy-hash: ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ tpm2-pin: false ++ tpm2-salt: false ++ Keyslot: 3 ++Digests: ++ 0: pbkdf2 ++ Hash: sha256 ++ Iterations: 117448 ++ Salt: de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ Digest: de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd ++ de a1 b9 7f 03 cb b4 89 e2 52 20 fc e4 24 65 cd +diff --git a/repos/system_upgrade/common/actors/luksscanner/tests/test_luksdump_parser.py b/repos/system_upgrade/common/actors/luksscanner/tests/test_luksdump_parser.py +new file mode 100644 +index 00000000..4b190149 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/luksscanner/tests/test_luksdump_parser.py +@@ -0,0 +1,147 @@ ++import os ++ ++from leapp.libraries.actor.luksdump_parser import LuksDumpParser ++from leapp.snactor.fixture import current_actor_context ++ ++CUR_DIR = os.path.dirname(os.path.abspath(__file__)) ++ ++ ++def test_luksdump_parser_luks1(current_actor_context): ++ f = open(os.path.join(CUR_DIR, 'files/luksDump_nvme0n1p3_luks1.txt')) ++ parsed_dict = LuksDumpParser.parse(f.readlines()) ++ ++ assert parsed_dict["Version"] == "1" ++ assert parsed_dict["Cipher name"] == "aes" ++ assert parsed_dict["Cipher mode"] == "xts-plain64" ++ assert parsed_dict["Hash spec"] == "sha256" ++ assert parsed_dict["Payload offset"] == "4096" ++ assert parsed_dict["MK bits"] == "512" ++ assert parsed_dict["MK digest"].replace(" ", "") == "fbec6b31aee449033ead432202cfa878ad3cd2a8" ++ assert parsed_dict["MK salt"].replace(" ", "") == "17574e2fed0b5c62d5de54f57fab6068"\ ++ "71d87206646c810539553f553256d9da" ++ assert parsed_dict["MK iterations"] == "114573" ++ assert parsed_dict["UUID"] == "90242257-d00a-4019-aba6-03083f89404b" ++ ++ assert parsed_dict["Key Slot 0"]["enabled"] ++ assert parsed_dict["Key Slot 0"]["Iterations"] == "1879168" ++ assert parsed_dict["Key Slot 0"]["Salt"].replace(" ", "") == "fc774872bd31ca8323805a5eb95bdebb" \ ++ "55acd5a93b96ada582bc1168baf88756" ++ assert parsed_dict["Key Slot 0"]["Key material offset"] == "8" ++ assert parsed_dict["Key Slot 0"]["AF stripes"] == "4000" ++ ++ assert not parsed_dict["Key Slot 1"]["enabled"] ++ assert not parsed_dict["Key Slot 2"]["enabled"] ++ assert not parsed_dict["Key Slot 3"]["enabled"] ++ assert not parsed_dict["Key Slot 4"]["enabled"] ++ assert not parsed_dict["Key Slot 5"]["enabled"] ++ assert not parsed_dict["Key Slot 6"]["enabled"] ++ assert not parsed_dict["Key Slot 7"]["enabled"] ++ ++ ++def test_luksdump_parser_luks2_tokens(current_actor_context): ++ f = open(os.path.join(CUR_DIR, 'files/luksDump_nvme0n1p3_luks2_tokens.txt')) ++ parsed_dict = LuksDumpParser.parse(f.readlines()) ++ ++ assert parsed_dict["Version"] == "2" ++ assert parsed_dict["Epoch"] == "9" ++ assert parsed_dict["Metadata area"] == "16384 [bytes]" ++ assert parsed_dict["Keyslots area"] == "16744448 [bytes]" ++ assert parsed_dict["UUID"] == "6b929b85-b01e-4aa3-8ad2-a05decae6e3d" ++ assert parsed_dict["Label"] == "(no label)" ++ assert parsed_dict["Subsystem"] == "(no subsystem)" ++ assert parsed_dict["Flags"] == "(no flags)" ++ ++ assert len(parsed_dict["Data segments"]) == 1 ++ assert parsed_dict["Data segments"]["0"]["type"] == "crypt" ++ assert parsed_dict["Data segments"]["0"]["offset"] == "16777216 [bytes]" ++ assert parsed_dict["Data segments"]["0"]["length"] == "(whole device)" ++ assert parsed_dict["Data segments"]["0"]["cipher"] == "aes-xts-plain64" ++ assert parsed_dict["Data segments"]["0"]["sector"] == "512 [bytes]" ++ ++ assert len(parsed_dict["Keyslots"]) == 4 ++ assert parsed_dict["Keyslots"]["0"]["type"] == "luks2" ++ assert parsed_dict["Keyslots"]["0"]["Key"] == "512 bits" ++ assert parsed_dict["Keyslots"]["0"]["Priority"] == "normal" ++ assert parsed_dict["Keyslots"]["0"]["Cipher"] == "aes-xts-plain64" ++ assert parsed_dict["Keyslots"]["0"]["Cipher key"] == "512 bits" ++ assert parsed_dict["Keyslots"]["0"]["PBKDF"] == "argon2id" ++ assert parsed_dict["Keyslots"]["0"]["Time cost"] == "7" ++ assert parsed_dict["Keyslots"]["0"]["Memory"] == "1048576" ++ assert parsed_dict["Keyslots"]["0"]["Threads"] == "4" ++ assert parsed_dict["Keyslots"]["0"]["Salt"].replace(" ", "") == 2*"dea1b97f03cbb489e25220fce42465cd" ++ assert parsed_dict["Keyslots"]["0"]["AF stripes"] == "4000" ++ assert parsed_dict["Keyslots"]["0"]["AF hash"] == "sha256" ++ assert parsed_dict["Keyslots"]["0"]["Area offset"] == "32768 [bytes]" ++ assert parsed_dict["Keyslots"]["0"]["Area length"] == "258048 [bytes]" ++ assert parsed_dict["Keyslots"]["0"]["Digest ID"] == "0" ++ ++ assert parsed_dict["Keyslots"]["1"]["type"] == "luks2" ++ assert parsed_dict["Keyslots"]["1"]["Key"] == "512 bits" ++ assert parsed_dict["Keyslots"]["1"]["Priority"] == "normal" ++ assert parsed_dict["Keyslots"]["1"]["Cipher"] == "aes-xts-plain64" ++ assert parsed_dict["Keyslots"]["1"]["Cipher key"] == "512 bits" ++ assert parsed_dict["Keyslots"]["1"]["PBKDF"] == "pbkdf2" ++ assert parsed_dict["Keyslots"]["1"]["Hash"] == "sha256" ++ assert parsed_dict["Keyslots"]["1"]["Iterations"] == "1000" ++ assert parsed_dict["Keyslots"]["1"]["Salt"].replace(" ", "") == 2*"dea1b97f03cbb489e25220fce42465cd" ++ assert parsed_dict["Keyslots"]["1"]["AF stripes"] == "4000" ++ assert parsed_dict["Keyslots"]["1"]["AF hash"] == "sha256" ++ assert parsed_dict["Keyslots"]["1"]["Area offset"] == "290816 [bytes]" ++ assert parsed_dict["Keyslots"]["1"]["Area length"] == "258048 [bytes]" ++ assert parsed_dict["Keyslots"]["1"]["Digest ID"] == "0" ++ ++ assert parsed_dict["Keyslots"]["2"]["type"] == "luks2" ++ assert parsed_dict["Keyslots"]["2"]["Key"] == "512 bits" ++ assert parsed_dict["Keyslots"]["2"]["Priority"] == "normal" ++ assert parsed_dict["Keyslots"]["2"]["Cipher"] == "aes-xts-plain64" ++ assert parsed_dict["Keyslots"]["2"]["Cipher key"] == "512 bits" ++ assert parsed_dict["Keyslots"]["2"]["PBKDF"] == "pbkdf2" ++ assert parsed_dict["Keyslots"]["2"]["Hash"] == "sha256" ++ assert parsed_dict["Keyslots"]["2"]["Iterations"] == "1000" ++ assert parsed_dict["Keyslots"]["2"]["Salt"].replace(" ", "") == 2*"dea1b97f03cbb489e25220fce42465cd" ++ assert parsed_dict["Keyslots"]["2"]["AF stripes"] == "4000" ++ assert parsed_dict["Keyslots"]["2"]["AF hash"] == "sha256" ++ assert parsed_dict["Keyslots"]["2"]["Area offset"] == "548864 [bytes]" ++ assert parsed_dict["Keyslots"]["2"]["Area length"] == "258048 [bytes]" ++ assert parsed_dict["Keyslots"]["2"]["Digest ID"] == "0" ++ ++ assert parsed_dict["Keyslots"]["3"]["type"] == "luks2" ++ assert parsed_dict["Keyslots"]["3"]["Key"] == "512 bits" ++ assert parsed_dict["Keyslots"]["3"]["Priority"] == "normal" ++ assert parsed_dict["Keyslots"]["3"]["Cipher"] == "aes-xts-plain64" ++ assert parsed_dict["Keyslots"]["3"]["Cipher key"] == "512 bits" ++ assert parsed_dict["Keyslots"]["3"]["PBKDF"] == "pbkdf2" ++ assert parsed_dict["Keyslots"]["3"]["Hash"] == "sha512" ++ assert parsed_dict["Keyslots"]["3"]["Iterations"] == "1000" ++ assert parsed_dict["Keyslots"]["3"]["Salt"].replace(" ", "") == 2*"dea1b97f03cbb489e25220fce42465cd" ++ assert parsed_dict["Keyslots"]["3"]["AF stripes"] == "4000" ++ assert parsed_dict["Keyslots"]["3"]["AF hash"] == "sha512" ++ assert parsed_dict["Keyslots"]["3"]["Area offset"] == "806912 [bytes]" ++ assert parsed_dict["Keyslots"]["3"]["Area length"] == "258048 [bytes]" ++ assert parsed_dict["Keyslots"]["3"]["Digest ID"] == "0" ++ ++ assert len(parsed_dict["Tokens"]) == 3 ++ assert parsed_dict["Tokens"]["0"]["type"] == "clevis" ++ assert parsed_dict["Tokens"]["0"]["Keyslot"] == "1" ++ ++ assert parsed_dict["Tokens"]["1"]["type"] == "clevis" ++ assert parsed_dict["Tokens"]["1"]["Keyslot"] == "2" ++ ++ assert parsed_dict["Tokens"]["2"]["type"] == "systemd-tpm2" ++ assert parsed_dict["Tokens"]["2"]["Keyslot"] == "3" ++ assert parsed_dict["Tokens"]["2"]["tpm2-hash-pcrs"] == "7" ++ assert parsed_dict["Tokens"]["2"]["tpm2-pcr-bank"] == "sha256" ++ assert parsed_dict["Tokens"]["2"]["tpm2-pubkey"] == "(null)" ++ assert parsed_dict["Tokens"]["2"]["tpm2-pubkey-pcrs"] == "n/a" ++ assert parsed_dict["Tokens"]["2"]["tpm2-primary-alg"] == "ecc" ++ assert parsed_dict["Tokens"]["2"]["tpm2-blob"].replace(" ", "") == 14*"dea1b97f03cbb489e25220fce42465cd" ++ assert parsed_dict["Tokens"]["2"]["tpm2-policy-hash"].replace(" ", "") == 2*"dea1b97f03cbb489e25220fce42465cd" ++ assert parsed_dict["Tokens"]["2"]["tpm2-pin"] == "false" ++ assert parsed_dict["Tokens"]["2"]["tpm2-salt"] == "false" ++ ++ assert len(parsed_dict["Digests"]) == 1 ++ assert parsed_dict["Digests"]["0"]["type"] == "pbkdf2" ++ assert parsed_dict["Digests"]["0"]["Hash"] == "sha256" ++ assert parsed_dict["Digests"]["0"]["Iterations"] == "117448" ++ assert parsed_dict["Digests"]["0"]["Salt"].replace(" ", "") == 2*"dea1b97f03cbb489e25220fce42465cd" ++ assert parsed_dict["Digests"]["0"]["Digest"].replace(" ", "") == 2*"dea1b97f03cbb489e25220fce42465cd" +diff --git a/repos/system_upgrade/common/actors/luksscanner/tests/test_luksscaner.py b/repos/system_upgrade/common/actors/luksscanner/tests/test_luksscaner.py +new file mode 100644 +index 00000000..22eb0946 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/luksscanner/tests/test_luksscaner.py +@@ -0,0 +1,142 @@ ++import os ++ ++import pytest ++ ++from leapp.libraries.stdlib import api ++from leapp.models import LsblkEntry, LuksDumps, StorageInfo ++from leapp.snactor.fixture import current_actor_context ++ ++CUR_DIR = os.path.dirname(os.path.abspath(__file__)) ++ ++TOKENS_ASSERT = { ++ 0: { ++ "keyslot": 1, ++ "token_type": "clevis-tpm2" ++ }, ++ 1: { ++ "keyslot": 2, ++ "token_type": "clevis-tang" ++ }, ++ 2: { ++ "keyslot": 3, ++ "token_type": "systemd-tpm2" ++ }, ++} ++ ++CLEVIS_KEYSLOTS = { ++ 1: 'tpm2 \'{"hash":"sha256","key":"rsa","pcr_bank":"sha256","pcr_ids":"0,1,7"}\'', ++ 2: 'tang \'{"url":"http://localhost"}\'' ++} ++ ++ ++class MockedRun(object): ++ """Simple mock class for leapp.libraries.stdlib.run.""" ++ ++ def __init__(self, variant, clevis_keyslots): ++ """if exc_type provided, then it will be raised on ++ instance call. ++ ++ :type exc_type: None or BaseException ++ """ ++ self.logger = api.current_logger() ++ ++ self.commands = [] ++ self.variant = variant ++ self.clevis_keyslots = clevis_keyslots ++ ++ def __call__(self, cmd, *args, **kwargs): ++ self.commands.append(cmd) ++ ++ if len(cmd) == 3 and cmd[:2] == ['cryptsetup', 'luksDump']: ++ dev_path = cmd[2] ++ ++ # We cannot have the output in a list, since the command is called per device. Therefore, we have to map ++ # each device path to its output. ++ output_files_per_device = { ++ '/dev/nvme0n1p3': 'luksDump_nvme0n1p3{}.txt'.format(("_" + self.variant) if self.variant else "") ++ } ++ ++ if dev_path not in output_files_per_device: ++ raise ValueError( ++ 'Attempting to call "cryptsetup luksDump" on an unexpected device: {}'.format(dev_path) ++ ) ++ with open(os.path.join(CUR_DIR, 'files/{}'.format(output_files_per_device[dev_path]))) as f: ++ return {"stdout": f.read()} ++ elif len(cmd) >= 3 and cmd[:3] == ['clevis', 'luks', 'list']: ++ dev_path = None ++ keyslot = None ++ ++ device_flag = False ++ keyslot_flag = False ++ for element in cmd: ++ if device_flag: ++ dev_path = element ++ elif keyslot_flag: ++ keyslot = element ++ ++ device_flag = element == "-d" ++ keyslot_flag = element == "-s" ++ ++ if dev_path is None or keyslot is None: ++ raise ValueError('Attempting to call "clevis luks list" without specifying keyslot or device') ++ if dev_path is None or keyslot is None or dev_path != "/dev/nvme0n1p3": ++ raise ValueError('Attempting to call "clevis luks list" on invalid device') ++ ++ keyslot = int(keyslot) ++ ++ if keyslot in self.clevis_keyslots: ++ return {"stdout": "{}: {}".format(keyslot, self.clevis_keyslots[keyslot])} ++ ++ return {} ++ ++ ++@pytest.mark.parametrize( ++ ("variant", "luks_version", "uuid", "tokens_assert"), ++ [ ++ ('luks1', 1, '90242257-d00a-4019-aba6-03083f89404b', {}), ++ ('luks2', 2, 'dfd8db30-2b65-4be9-8cae-65f5fac4a06f', {}), ++ ('luks2_tokens', 2, '6b929b85-b01e-4aa3-8ad2-a05decae6e3d', TOKENS_ASSERT), ++ ] ++) ++def test_actor_with_luks(monkeypatch, current_actor_context, variant, luks_version, uuid, tokens_assert): ++ mocked_run = MockedRun(variant, CLEVIS_KEYSLOTS) ++ monkeypatch.setattr('leapp.libraries.stdlib.run', mocked_run) ++ ++ with_luks = [ ++ LsblkEntry( ++ name='/dev/nvme0n1', kname='/dev/nvme0n1', maj_min='259:0', rm='0', size='10G', bsize=10*(1 << 39), ++ ro='0', tp='disk', parent_name='', parent_path='', mountpoint='' ++ ), ++ LsblkEntry( ++ name='/dev/nvme0n1p3', kname='/dev/nvme0n1p3', maj_min='259:3', rm='0', size='10G', bsize=10*(1 << 39), ++ ro='0', tp='part', parent_name='nvme0n1', parent_path='/dev/nvme0n1', mountpoint='' ++ ), ++ LsblkEntry( ++ name='/dev/mapper/tst1', kname='/dev/dm-0', maj_min='253:0', rm='0', size='9G', bsize=9*(1 << 39), ro='0', ++ tp='crypt', parent_name='nvme0n1p3', parent_path='/dev/nvme0n1p3', mountpoint='' ++ ), ++ # PKNAME is not set, so this crypt device will be ignored ++ LsblkEntry( ++ name='/dev/mapper/tst2', kname='/dev/dm-1', maj_min='253:0', rm='0', size='9G', bsize=9*(1 << 39), ro='0', ++ tp='crypt', parent_name='', parent_path='', mountpoint='' ++ ) ++ ] ++ ++ current_actor_context.feed(StorageInfo(lsblk=with_luks)) ++ current_actor_context.run() ++ ++ luks_dumps = current_actor_context.consume(LuksDumps) ++ assert len(luks_dumps) == 1 ++ assert len(luks_dumps[0].dumps) == 1 ++ luks_dump = luks_dumps[0].dumps[0] ++ ++ assert luks_dump.version == luks_version ++ assert luks_dump.uuid == uuid ++ assert luks_dump.device_name == "nvme0n1p3" ++ assert luks_dump.device_path == "/dev/nvme0n1p3" ++ assert len(luks_dump.tokens) == len(tokens_assert) ++ ++ for token in luks_dump.tokens: ++ assert token.token_id in tokens_assert ++ assert token.keyslot == tokens_assert[token.token_id]["keyslot"] ++ assert token.token_type == tokens_assert[token.token_id]["token_type"] +diff --git a/repos/system_upgrade/common/models/luksdump.py b/repos/system_upgrade/common/models/luksdump.py +new file mode 100644 +index 00000000..83b56ef8 +--- /dev/null ++++ b/repos/system_upgrade/common/models/luksdump.py +@@ -0,0 +1,73 @@ ++from leapp.models import fields, Model ++from leapp.topics import SystemInfoTopic ++ ++ ++class LuksToken(Model): ++ """ ++ Represents a single token associated with the LUKS device. ++ ++ Note this model is supposed to be used just as part of the LuksDump msg. ++ """ ++ topic = SystemInfoTopic ++ ++ token_id = fields.Integer() ++ """ ++ Token ID (as seen in the luksDump) ++ """ ++ ++ keyslot = fields.Integer() ++ """ ++ ID of the associated keyslot ++ """ ++ ++ token_type = fields.String() ++ """ ++ Type of the token. For "clevis" type the concrete subtype (determined using ++ clevis luks list) is appended e.g. clevis-tpm2. clevis-tang, ... ++ """ ++ ++ ++class LuksDump(Model): ++ """ ++ Information about a single LUKS-encrypted device. ++ ++ Note this model is supposed to be used as a part of LuksDumps msg. ++ """ ++ topic = SystemInfoTopic ++ ++ version = fields.Integer() ++ """ ++ LUKS version ++ """ ++ ++ uuid = fields.String() ++ """ ++ UUID of the LUKS device ++ """ ++ ++ device_path = fields.String() ++ """ ++ Full path to the backing device ++ """ ++ ++ device_name = fields.String() ++ """ ++ Device name of the backing device ++ """ ++ ++ tokens = fields.List(fields.Model(LuksToken), default=[]) ++ """ ++ List of LUKS2 tokens ++ """ ++ ++ ++class LuksDumps(Model): ++ """ ++ Information about all LUKS-encrypted devices on the system. ++ """ ++ topic = SystemInfoTopic ++ ++ dumps = fields.List(fields.Model(LuksDump)) ++ """ ++ List of LuksDump representing all the encrypted devices on the system. ++ """ +-- +2.47.0 + diff --git a/SOURCES/0020-InhibitWhenLuks-allow-upgrades-for-LUKS2-bound-to-Cl.patch b/SOURCES/0020-InhibitWhenLuks-allow-upgrades-for-LUKS2-bound-to-Cl.patch new file mode 100644 index 0000000..ea860ca --- /dev/null +++ b/SOURCES/0020-InhibitWhenLuks-allow-upgrades-for-LUKS2-bound-to-Cl.patch @@ -0,0 +1,455 @@ +From ad241f701b39a81d132105f1a301f2f5546f498a Mon Sep 17 00:00:00 2001 +From: Daniel Zatovic +Date: Tue, 6 Aug 2024 17:26:58 +0200 +Subject: [PATCH 20/40] InhibitWhenLuks: allow upgrades for LUKS2 bound to + Clevis TPM2 token +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +So far, upgrades with encrypted drives were not supported. Encrypted +drives require interactively typing unlock passphrases, which is not +suitable for automatic upgrades using Leapp. We add a feature, where +systems with all drives configured with automatic unlock method can be +upgraded. + +Currently, we only support drives configured with Clevis/TPM2 token, +because networking is not configured during Leapp upgrade (excluding +NBDE). + +We consume LuksDumps message to decide whether the upgrade process +should be inhibited. If there is at least one LUKS2 device without +Clevis TPM2 binding, we inhibit the upgrade because we cannot tell if +the device is not a part of a more complex storage stack and the failure +to unlock the device migt cause boot problem. + +Co-authored-by: Petr Stodůlka +--- + .../common/actors/inhibitwhenluks/actor.py | 38 ++-- + .../libraries/inhibitwhenluks.py | 164 +++++++++++++++++ + .../tests/test_inhibitwhenluks.py | 169 ++++++++++++++++-- + 3 files changed, 329 insertions(+), 42 deletions(-) + create mode 100644 repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py + +diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py +index 40b845b0..65607167 100644 +--- a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py ++++ b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py +@@ -1,40 +1,24 @@ +-from leapp import reporting + from leapp.actors import Actor +-from leapp.models import CephInfo, StorageInfo +-from leapp.reporting import create_report, Report ++from leapp.libraries.actor.inhibitwhenluks import check_invalid_luks_devices ++from leapp.models import CephInfo, LuksDumps, StorageInfo, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks ++from leapp.reporting import Report + from leapp.tags import ChecksPhaseTag, IPUWorkflowTag + + + class InhibitWhenLuks(Actor): + """ +- Check if any encrypted partitions is in use. If yes, inhibit the upgrade process. ++ Check if any encrypted partitions are in use and whether they are supported for the upgrade. + +- Upgrading system with encrypted partition is not supported. ++ Upgrading EL7 system with encrypted partition is not supported (but ceph OSDs). ++ For EL8+ it's ok if the discovered used encrypted storage has LUKS2 format ++ and it's bounded to clevis-tpm2 token (so it can be automatically unlocked ++ during the process). + """ + + name = 'check_luks_and_inhibit' +- consumes = (StorageInfo, CephInfo) +- produces = (Report,) ++ consumes = (CephInfo, LuksDumps, StorageInfo) ++ produces = (Report, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks) + tags = (ChecksPhaseTag, IPUWorkflowTag) + + def process(self): +- # If encrypted Ceph volumes present, check if there are more encrypted disk in lsblk than Ceph vol +- ceph_vol = [] +- try: +- ceph_info = next(self.consume(CephInfo)) +- if ceph_info: +- ceph_vol = ceph_info.encrypted_volumes[:] +- except StopIteration: +- pass +- +- for storage_info in self.consume(StorageInfo): +- for blk in storage_info.lsblk: +- if blk.tp == 'crypt' and blk.name not in ceph_vol: +- create_report([ +- reporting.Title('LUKS encrypted partition detected'), +- reporting.Summary('Upgrading system with encrypted partitions is not supported'), +- reporting.Severity(reporting.Severity.HIGH), +- reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]), +- reporting.Groups([reporting.Groups.INHIBITOR]), +- ]) +- break ++ check_invalid_luks_devices() +diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py b/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py +new file mode 100644 +index 00000000..57a94e9d +--- /dev/null ++++ b/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py +@@ -0,0 +1,164 @@ ++from leapp import reporting ++from leapp.libraries.common.config.version import get_source_major_version ++from leapp.libraries.stdlib import api ++from leapp.models import ( ++ CephInfo, ++ DracutModule, ++ LuksDumps, ++ StorageInfo, ++ TargetUserSpaceUpgradeTasks, ++ UpgradeInitramfsTasks ++) ++from leapp.reporting import create_report ++ ++# https://red.ht/clevis-tpm2-luks-auto-unlock-rhel8 ++# https://red.ht/clevis-tpm2-luks-auto-unlock-rhel9 ++# https://red.ht/convert-to-luks2-rhel8 ++# https://red.ht/convert-to-luks2-rhel9 ++CLEVIS_DOC_URL_FMT = 'https://red.ht/clevis-tpm2-luks-auto-unlock-rhel{}' ++LUKS2_CONVERT_DOC_URL_FMT = 'https://red.ht/convert-to-luks2-rhel{}' ++ ++FMT_LIST_SEPARATOR = '\n - ' ++ ++ ++def _formatted_list_output(input_list, sep=FMT_LIST_SEPARATOR): ++ return ['{}{}'.format(sep, item) for item in input_list] ++ ++ ++def _at_least_one_tpm_token(luks_dump): ++ return any([token.token_type == "clevis-tpm2" for token in luks_dump.tokens]) ++ ++ ++def _get_ceph_volumes(): ++ ceph_info = next(api.consume(CephInfo), None) ++ return ceph_info.encrypted_volumes[:] if ceph_info else [] ++ ++ ++def apply_obsoleted_check_ipu_7_8(): ++ ceph_vol = _get_ceph_volumes() ++ for storage_info in api.consume(StorageInfo): ++ for blk in storage_info.lsblk: ++ if blk.tp == 'crypt' and blk.name not in ceph_vol: ++ create_report([ ++ reporting.Title('LUKS encrypted partition detected'), ++ reporting.Summary('Upgrading system with encrypted partitions is not supported'), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ ]) ++ break ++ ++ ++def report_inhibitor(luks1_partitions, no_tpm2_partitions): ++ source_major_version = get_source_major_version() ++ clevis_doc_url = CLEVIS_DOC_URL_FMT.format(source_major_version) ++ luks2_convert_doc_url = LUKS2_CONVERT_DOC_URL_FMT.format(source_major_version) ++ summary = ( ++ 'We have detected LUKS encrypted volumes that do not meet current' ++ ' criteria to be able to proceed the in-place upgrade process.' ++ ' Right now the upgrade process requires for encrypted storage to be' ++ ' in LUKS2 format configured with Clevis TPM 2.0.' ++ ) ++ ++ report_hints = [] ++ ++ if luks1_partitions: ++ ++ summary += ( ++ '\n\nSince RHEL 8 the default format for LUKS encryption is LUKS2.' ++ ' Despite the old LUKS1 format is still supported on RHEL systems' ++ ' it has some limitations in comparison to LUKS2.' ++ ' Only the LUKS2 format is supported for upgrades.' ++ ' The following LUKS1 partitions have been discovered on your system:{}' ++ .format(''.join(_formatted_list_output(luks1_partitions))) ++ ) ++ report_hints.append(reporting.Remediation( ++ hint=( ++ 'Convert your LUKS1 encrypted devices to LUKS2 and bind it to TPM2 using clevis.' ++ ' If this is not possible in your case consider clean installation' ++ ' of the target RHEL system instead.' ++ ) ++ )) ++ report_hints.append(reporting.ExternalLink( ++ url=luks2_convert_doc_url, ++ title='LUKS versions in RHEL: Conversion' ++ )) ++ ++ if no_tpm2_partitions: ++ summary += ( ++ '\n\nCurrently we require the process to be non-interactive and' ++ ' offline. For this reason we require automatic unlock of' ++ ' encrypted devices during the upgrade process.' ++ ' Currently we support automatic unlocking during the upgrade only' ++ ' for volumes bound to Clevis TPM2 token.' ++ ' The following LUKS2 devices without Clevis TPM2 token ' ++ ' have been discovered on your system: {}' ++ .format(''.join(_formatted_list_output(no_tpm2_partitions))) ++ ) ++ ++ report_hints.append(reporting.Remediation( ++ hint=( ++ 'Add Clevis TPM2 binding to LUKS devices.' ++ ' If some LUKS devices use still the old LUKS1 format, convert' ++ ' them to LUKS2 prior to binding.' ++ ) ++ )) ++ report_hints.append(reporting.ExternalLink( ++ url=clevis_doc_url, ++ title='Configuring manual enrollment of LUKS-encrypted volumes by using a TPM 2.0 policy' ++ ) ++ ) ++ create_report([ ++ reporting.Title('Detected LUKS devices unsuitable for in-place upgrade.'), ++ reporting.Summary(summary), ++ reporting.Severity(reporting.Severity.HIGH), ++ reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]), ++ reporting.Groups([reporting.Groups.INHIBITOR]), ++ ] + report_hints) ++ ++ ++def check_invalid_luks_devices(): ++ if get_source_major_version() == '7': ++ # NOTE: keeping unchanged behaviour for IPU 7 -> 8 ++ apply_obsoleted_check_ipu_7_8() ++ return ++ ++ luks_dumps = next(api.consume(LuksDumps), None) ++ if not luks_dumps: ++ api.current_logger().debug('No LUKS volumes detected. Skipping.') ++ return ++ ++ luks1_partitions = [] ++ no_tpm2_partitions = [] ++ ceph_vol = _get_ceph_volumes() ++ for luks_dump in luks_dumps.dumps: ++ # if the device is managed by ceph, don't inhibit ++ if luks_dump.device_name in ceph_vol: ++ api.current_logger().debug('Skipping LUKS CEPH volume: {}'.format(luks_dump.device_name)) ++ continue ++ ++ if luks_dump.version == 1: ++ luks1_partitions.append(luks_dump.device_name) ++ elif luks_dump.version == 2 and not _at_least_one_tpm_token(luks_dump): ++ no_tpm2_partitions.append(luks_dump.device_name) ++ ++ if luks1_partitions or no_tpm2_partitions: ++ report_inhibitor(luks1_partitions, no_tpm2_partitions) ++ else: ++ required_crypt_rpms = [ ++ 'clevis', ++ 'clevis-dracut', ++ 'clevis-systemd', ++ 'clevis-udisks2', ++ 'clevis-luks', ++ 'cryptsetup', ++ 'tpm2-tss', ++ 'tpm2-tools', ++ 'tpm2-abrmd' ++ ] ++ api.produce(TargetUserSpaceUpgradeTasks(install_rpms=required_crypt_rpms)) ++ api.produce(UpgradeInitramfsTasks(include_dracut_modules=[ ++ DracutModule(name='clevis'), ++ DracutModule(name='clevis-pin-tpm2') ++ ]) ++ ) +diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py +index 405a3429..d559b54c 100644 +--- a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py ++++ b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py +@@ -1,34 +1,173 @@ +-from leapp.models import CephInfo, LsblkEntry, StorageInfo ++""" ++Unit tests for inhibitwhenluks actor ++ ++Skip isort as it's kind of broken when mixing grid import and one line imports ++ ++isort:skip_file ++""" ++ ++from leapp.libraries.common.config import version ++from leapp.models import ( ++ CephInfo, ++ LsblkEntry, ++ LuksDump, ++ LuksDumps, ++ LuksToken, ++ StorageInfo, ++ TargetUserSpaceUpgradeTasks, ++ UpgradeInitramfsTasks ++) + from leapp.reporting import Report + from leapp.snactor.fixture import current_actor_context + from leapp.utils.report import is_inhibitor + ++_REPORT_TITLE_UNSUITABLE = 'Detected LUKS devices unsuitable for in-place upgrade.' + +-def test_actor_with_luks(current_actor_context): +- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39), +- ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')] + +- current_actor_context.feed(StorageInfo(lsblk=with_luks)) ++def test_actor_with_luks1_notpm(monkeypatch, current_actor_context): ++ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8') ++ luks_dump = LuksDump( ++ version=1, ++ uuid='dd09e6d4-b595-4f1c-80b8-fd47540e6464', ++ device_path='/dev/sda', ++ device_name='sda') ++ current_actor_context.feed(LuksDumps(dumps=[luks_dump])) ++ current_actor_context.feed(CephInfo(encrypted_volumes=[])) + current_actor_context.run() + assert current_actor_context.consume(Report) + report_fields = current_actor_context.consume(Report)[0].report + assert is_inhibitor(report_fields) ++ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks) ++ assert not current_actor_context.consume(UpgradeInitramfsTasks) + ++ assert report_fields['title'] == _REPORT_TITLE_UNSUITABLE ++ assert 'LUKS1 partitions have been discovered' in report_fields['summary'] ++ assert luks_dump.device_name in report_fields['summary'] + +-def test_actor_with_luks_ceph_only(current_actor_context): +- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39), +- ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')] +- ceph_volume = ['luks-132'] +- current_actor_context.feed(StorageInfo(lsblk=with_luks)) +- current_actor_context.feed(CephInfo(encrypted_volumes=ceph_volume)) ++ ++def test_actor_with_luks2_notpm(monkeypatch, current_actor_context): ++ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8') ++ luks_dump = LuksDump( ++ version=2, ++ uuid='27b57c75-9adf-4744-ab04-9eb99726a301', ++ device_path='/dev/sda', ++ device_name='sda') ++ current_actor_context.feed(LuksDumps(dumps=[luks_dump])) ++ current_actor_context.feed(CephInfo(encrypted_volumes=[])) ++ current_actor_context.run() ++ assert current_actor_context.consume(Report) ++ report_fields = current_actor_context.consume(Report)[0].report ++ assert is_inhibitor(report_fields) ++ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks) ++ assert not current_actor_context.consume(UpgradeInitramfsTasks) ++ ++ assert report_fields['title'] == _REPORT_TITLE_UNSUITABLE ++ assert 'LUKS2 devices without Clevis TPM2 token' in report_fields['summary'] ++ assert luks_dump.device_name in report_fields['summary'] ++ ++ ++def test_actor_with_luks2_invalid_token(monkeypatch, current_actor_context): ++ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8') ++ luks_dump = LuksDump( ++ version=2, ++ uuid='dc1dbe37-6644-4094-9839-8fc5dcbec0c6', ++ device_path='/dev/sda', ++ device_name='sda', ++ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis')]) ++ current_actor_context.feed(LuksDumps(dumps=[luks_dump])) ++ current_actor_context.feed(CephInfo(encrypted_volumes=[])) ++ current_actor_context.run() ++ assert current_actor_context.consume(Report) ++ report_fields = current_actor_context.consume(Report)[0].report ++ assert is_inhibitor(report_fields) ++ ++ assert report_fields['title'] == _REPORT_TITLE_UNSUITABLE ++ assert 'LUKS2 devices without Clevis TPM2 token' in report_fields['summary'] ++ assert luks_dump.device_name in report_fields['summary'] ++ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks) ++ assert not current_actor_context.consume(UpgradeInitramfsTasks) ++ ++ ++def test_actor_with_luks2_clevis_tpm_token(monkeypatch, current_actor_context): ++ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8') ++ luks_dump = LuksDump( ++ version=2, ++ uuid='83050bd9-61c6-4ff0-846f-bfd3ac9bfc67', ++ device_path='/dev/sda', ++ device_name='sda', ++ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis-tpm2')]) ++ current_actor_context.feed(LuksDumps(dumps=[luks_dump])) ++ current_actor_context.feed(CephInfo(encrypted_volumes=[])) + current_actor_context.run() + assert not current_actor_context.consume(Report) + ++ upgrade_tasks = current_actor_context.consume(TargetUserSpaceUpgradeTasks) ++ assert len(upgrade_tasks) == 1 ++ assert set(upgrade_tasks[0].install_rpms) == set([ ++ 'clevis', ++ 'clevis-dracut', ++ 'clevis-systemd', ++ 'clevis-udisks2', ++ 'clevis-luks', ++ 'cryptsetup', ++ 'tpm2-tss', ++ 'tpm2-tools', ++ 'tpm2-abrmd' ++ ]) ++ assert current_actor_context.consume(UpgradeInitramfsTasks) + +-def test_actor_without_luks(current_actor_context): +- without_luks = [LsblkEntry(name='sda1', kname='sda1', maj_min='8:0', rm='0', size='10G', bsize=10*(1 << 39), +- ro='0', tp='part', mountpoint='/boot', parent_name='', parent_path='')] + +- current_actor_context.feed(StorageInfo(lsblk=without_luks)) ++def test_actor_with_luks2_ceph(monkeypatch, current_actor_context): ++ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8') ++ ceph_volume = ['sda'] ++ current_actor_context.feed(CephInfo(encrypted_volumes=ceph_volume)) ++ luks_dump = LuksDump( ++ version=2, ++ uuid='0edb8c11-1a04-4abd-a12d-93433ee7b8d8', ++ device_path='/dev/sda', ++ device_name='sda', ++ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis')]) ++ current_actor_context.feed(LuksDumps(dumps=[luks_dump])) + current_actor_context.run() + assert not current_actor_context.consume(Report) ++ ++ # make sure we don't needlessly include clevis packages, when there is no clevis token ++ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks) ++ ++ ++LSBLK_ENTRY = LsblkEntry( ++ name="luks-whatever", ++ kname="dm-0", ++ maj_min="252:1", ++ rm="0", ++ size="1G", ++ bsize=1073741824, ++ ro="0", ++ tp="crypt", ++ mountpoint="/", ++ parent_name="", ++ parent_path="" ++) ++ ++ ++def test_inhibitor_on_el7(monkeypatch, current_actor_context): ++ # NOTE(pstodulk): consider it good enough as el7 stuff is going to be removed ++ # soon. ++ monkeypatch.setattr(version, 'get_source_major_version', lambda: '7') ++ ++ luks_dump = LuksDump( ++ version=2, ++ uuid='83050bd9-61c6-4ff0-846f-bfd3ac9bfc67', ++ device_path='/dev/sda', ++ device_name='sda', ++ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis-tpm2')]) ++ current_actor_context.feed(LuksDumps(dumps=[luks_dump])) ++ current_actor_context.feed(CephInfo(encrypted_volumes=[])) ++ ++ current_actor_context.feed(StorageInfo(lsblk=[LSBLK_ENTRY])) ++ current_actor_context.run() ++ assert current_actor_context.consume(Report) ++ ++ report_fields = current_actor_context.consume(Report)[0].report ++ assert is_inhibitor(report_fields) ++ assert report_fields['title'] == 'LUKS encrypted partition detected' +-- +2.47.0 + diff --git a/SOURCES/0021-Rename-inhibitwhenluks-actor-to-checkluks.patch b/SOURCES/0021-Rename-inhibitwhenluks-actor-to-checkluks.patch new file mode 100644 index 0000000..6d5e039 --- /dev/null +++ b/SOURCES/0021-Rename-inhibitwhenluks-actor-to-checkluks.patch @@ -0,0 +1,57 @@ +From 8e5fe75e4ee76eb62eb51001c28f1f1443f0a563 Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Fri, 18 Oct 2024 07:13:42 +0200 +Subject: [PATCH 21/40] Rename inhibitwhenluks actor to checkluks + +The actor nowadays does more then just inhibiting the upgrade when +LUKS is detected. Let's rename it to respect current behaviour. +--- + .../common/actors/{inhibitwhenluks => checkluks}/actor.py | 6 +++--- + .../inhibitwhenluks.py => checkluks/libraries/checkluks.py} | 0 + .../tests/test_checkluks.py} | 0 + 3 files changed, 3 insertions(+), 3 deletions(-) + rename repos/system_upgrade/common/actors/{inhibitwhenluks => checkluks}/actor.py (85%) + rename repos/system_upgrade/common/actors/{inhibitwhenluks/libraries/inhibitwhenluks.py => checkluks/libraries/checkluks.py} (100%) + rename repos/system_upgrade/common/actors/{inhibitwhenluks/tests/test_inhibitwhenluks.py => checkluks/tests/test_checkluks.py} (100%) + +diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py b/repos/system_upgrade/common/actors/checkluks/actor.py +similarity index 85% +rename from repos/system_upgrade/common/actors/inhibitwhenluks/actor.py +rename to repos/system_upgrade/common/actors/checkluks/actor.py +index 65607167..607fd040 100644 +--- a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py ++++ b/repos/system_upgrade/common/actors/checkluks/actor.py +@@ -1,11 +1,11 @@ + from leapp.actors import Actor +-from leapp.libraries.actor.inhibitwhenluks import check_invalid_luks_devices ++from leapp.libraries.actor.checkluks import check_invalid_luks_devices + from leapp.models import CephInfo, LuksDumps, StorageInfo, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks + from leapp.reporting import Report + from leapp.tags import ChecksPhaseTag, IPUWorkflowTag + + +-class InhibitWhenLuks(Actor): ++class CheckLuks(Actor): + """ + Check if any encrypted partitions are in use and whether they are supported for the upgrade. + +@@ -15,7 +15,7 @@ class InhibitWhenLuks(Actor): + during the process). + """ + +- name = 'check_luks_and_inhibit' ++ name = 'check_luks' + consumes = (CephInfo, LuksDumps, StorageInfo) + produces = (Report, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks) + tags = (ChecksPhaseTag, IPUWorkflowTag) +diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py b/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py +similarity index 100% +rename from repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py +rename to repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py +diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py b/repos/system_upgrade/common/actors/checkluks/tests/test_checkluks.py +similarity index 100% +rename from repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py +rename to repos/system_upgrade/common/actors/checkluks/tests/test_checkluks.py +-- +2.47.0 + diff --git a/SOURCES/0022-Fix-IPU-being-blocked-by-resource-limitations.patch b/SOURCES/0022-Fix-IPU-being-blocked-by-resource-limitations.patch new file mode 100644 index 0000000..eba154d --- /dev/null +++ b/SOURCES/0022-Fix-IPU-being-blocked-by-resource-limitations.patch @@ -0,0 +1,172 @@ +From 5e6d176ab685f2e85ac1aea9533b04d46f25e9b7 Mon Sep 17 00:00:00 2001 +From: tomasfratrik +Date: Tue, 18 Jun 2024 10:22:35 +0200 +Subject: [PATCH 22/40] Fix IPU being blocked by resource limitations + +First resource limit is maximum number of open file descriptors limit, +second one being limit for maximum writable file size. Plus add unit +tests. + +Resolves: RHEL-26459 and RHEL-16881 +--- + commands/command_utils.py | 38 ++++++++++++++++++ + commands/preupgrade/__init__.py | 2 + + commands/tests/test_upgrade_paths.py | 60 ++++++++++++++++++++++++++++ + commands/upgrade/__init__.py | 3 ++ + 4 files changed, 103 insertions(+) + +diff --git a/commands/command_utils.py b/commands/command_utils.py +index 4f6f99eb..2810a542 100644 +--- a/commands/command_utils.py ++++ b/commands/command_utils.py +@@ -1,6 +1,7 @@ + import json + import os + import re ++import resource + + from leapp.exceptions import CommandError + from leapp.utils import path +@@ -140,3 +141,40 @@ def vet_upgrade_path(args): + flavor=flavor, + choices=','.join(supported_target_versions))) + return (target_release, flavor) ++ ++ ++def set_resource_limits(): ++ """ ++ Set resource limits for the maximum number of open file descriptors and the maximum writable file size. ++ ++ :raises: `CommandError` if the resource limits cannot be set ++ """ ++ ++ def set_resource_limit(resource_type, soft, hard): ++ rtype_string = ( ++ 'open file descriptors' if resource_type == resource.RLIMIT_NOFILE ++ else 'writable file size' if resource_type == resource.RLIMIT_FSIZE ++ else 'unknown resource' ++ ) ++ try: ++ resource.setrlimit(resource_type, (soft, hard)) ++ except ValueError as err: ++ raise CommandError( ++ 'Failure occurred while attempting to set soft limit higher than the hard limit. ' ++ 'Resource type: {}, error: {}'.format(rtype_string, err) ++ ) ++ except OSError as err: ++ raise CommandError( ++ 'Failed to set resource limit. Resource type: {}, error: {}'.format(rtype_string, err) ++ ) ++ ++ soft_nofile, _ = resource.getrlimit(resource.RLIMIT_NOFILE) ++ soft_fsize, _ = resource.getrlimit(resource.RLIMIT_FSIZE) ++ nofile_limit = 1024*16 ++ fsize_limit = resource.RLIM_INFINITY ++ ++ if soft_nofile < nofile_limit: ++ set_resource_limit(resource.RLIMIT_NOFILE, nofile_limit, nofile_limit) ++ ++ if soft_fsize != fsize_limit: ++ set_resource_limit(resource.RLIMIT_FSIZE, fsize_limit, fsize_limit) +diff --git a/commands/preupgrade/__init__.py b/commands/preupgrade/__init__.py +index 5a89069f..a9fa40e0 100644 +--- a/commands/preupgrade/__init__.py ++++ b/commands/preupgrade/__init__.py +@@ -59,6 +59,8 @@ def preupgrade(args, breadcrumbs): + except LeappError as exc: + raise CommandError(exc.message) + ++ command_utils.set_resource_limits() ++ + workflow = repositories.lookup_workflow('IPUWorkflow')() + util.warn_if_unsupported(configuration) + util.process_whitelist_experimental(repositories, workflow, configuration, logger) +diff --git a/commands/tests/test_upgrade_paths.py b/commands/tests/test_upgrade_paths.py +index 53f081a5..f1312f66 100644 +--- a/commands/tests/test_upgrade_paths.py ++++ b/commands/tests/test_upgrade_paths.py +@@ -1,3 +1,5 @@ ++import resource ++ + import mock + import pytest + +@@ -50,3 +52,61 @@ def test_vet_upgrade_path(mock_open, monkeypatch): + monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.0') + args = mock.Mock(target='1.2') + assert command_utils.vet_upgrade_path(args) == ('9.0', 'default') ++ ++ ++def _mock_getrlimit_factory(nofile_limits=(1024, 4096), fsize_limits=(1024, 4096)): ++ """ ++ Factory function to create a mock `getrlimit` function with configurable return values. ++ The default param values are lower than the expected values. ++ ++ :param nofile_limits: Tuple representing (soft, hard) limits for `RLIMIT_NOFILE` ++ :param fsize_limits: Tuple representing (soft, hard) limits for `RLIMIT_FSIZE` ++ :return: A mock `getrlimit` function ++ """ ++ def mock_getrlimit(resource_type): ++ if resource_type == resource.RLIMIT_NOFILE: ++ return nofile_limits ++ if resource_type == resource.RLIMIT_FSIZE: ++ return fsize_limits ++ return (0, 0) ++ ++ return mock_getrlimit ++ ++ ++@pytest.mark.parametrize("nofile_limits, fsize_limits, expected_calls", [ ++ # Case where both limits need to be increased ++ ((1024, 4096), (1024, 4096), [ ++ (resource.RLIMIT_NOFILE, (1024*16, 1024*16)), ++ (resource.RLIMIT_FSIZE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) ++ ]), ++ # Case where neither limit needs to be changed ++ ((1024*16, 1024*16), (resource.RLIM_INFINITY, resource.RLIM_INFINITY), []) ++]) ++def test_set_resource_limits_increase(monkeypatch, nofile_limits, fsize_limits, expected_calls): ++ setrlimit_called = [] ++ ++ def mock_setrlimit(resource_type, limits): ++ setrlimit_called.append((resource_type, limits)) ++ ++ monkeypatch.setattr(resource, "getrlimit", _mock_getrlimit_factory(nofile_limits, fsize_limits)) ++ monkeypatch.setattr(resource, "setrlimit", mock_setrlimit) ++ ++ command_utils.set_resource_limits() ++ ++ assert setrlimit_called == expected_calls ++ ++ ++@pytest.mark.parametrize("errortype, expected_message", [ ++ (OSError, "Failed to set resource limit"), ++ (ValueError, "Failure occurred while attempting to set soft limit higher than the hard limit") ++]) ++def test_set_resource_limits_exceptions(monkeypatch, errortype, expected_message): ++ monkeypatch.setattr(resource, "getrlimit", _mock_getrlimit_factory()) ++ ++ def mock_setrlimit(*args, **kwargs): ++ raise errortype("mocked error") ++ ++ monkeypatch.setattr(resource, "setrlimit", mock_setrlimit) ++ ++ with pytest.raises(CommandError, match=expected_message): ++ command_utils.set_resource_limits() +diff --git a/commands/upgrade/__init__.py b/commands/upgrade/__init__.py +index 1e15b59c..c7487fde 100644 +--- a/commands/upgrade/__init__.py ++++ b/commands/upgrade/__init__.py +@@ -89,6 +89,9 @@ def upgrade(args, breadcrumbs): + repositories = util.load_repositories() + except LeappError as exc: + raise CommandError(exc.message) ++ ++ command_utils.set_resource_limits() ++ + workflow = repositories.lookup_workflow('IPUWorkflow')(auto_reboot=args.reboot) + util.process_whitelist_experimental(repositories, workflow, configuration, logger) + util.warn_if_unsupported(configuration) +-- +2.47.0 + diff --git a/SOURCES/0023-feature-add-possibility-to-use-net.naming-scheme.patch b/SOURCES/0023-feature-add-possibility-to-use-net.naming-scheme.patch new file mode 100644 index 0000000..e68f22b --- /dev/null +++ b/SOURCES/0023-feature-add-possibility-to-use-net.naming-scheme.patch @@ -0,0 +1,675 @@ +From e1bdf2c02dd193cdd7a2da95e2a3cfa5e6e1e8b3 Mon Sep 17 00:00:00 2001 +From: mhecko +Date: Mon, 29 Apr 2024 11:16:46 +0200 +Subject: [PATCH 23/40] feature: add possibility to use net.naming-scheme + +Leapp writes .link files to prevent interfaces being renamed +after booting to post-upgrade system. This patch adds a less +error-prone approach that uses net.naming-scheme kernel param. +The naming-scheme tells udev what hardware properties to use +when composing a device name. Moreover, possible values of this +parameter are coarse-grained "profiles", that tell udev to +behave as if it did on RHEL8.0. + +The functionality is enabled by setting LEAPP_USE_NET_NAMING_SCHEME +environmental variable to 1. If the feature is enabled, the .link +file generation is disabled. A kernel parameter `net.naming-scheme=` +is added to the upgrade boot entry and the post-upgrade entry. +The value of the parameter will be `rhel-.0`. Note +that the minor source version is *not used*. Using also source major +version instead of 0 causes the device names to change slightly, +so we use 0. Moreover, an extra RPM named `rhel-net-naming-sysattrs` +is installed to the target system and target userspace container. +The RPM provides definitions of the "profiles" for net.naming-scheme. + +The feature is available only for 8>9 and higher. Attempting to +upgrade 7>8 with LEAPP_USE_NET_NAMING_SCHEME=1 will ignore +the value of LEAPP_USE_NET_NAMING_SCHEME. + +Add a possibility to use the net.naming-scheme cmdline argument +to make immutable network interface names during the upgrade. +The feature can be used only for 8>9 upgrades and higher. +To enable the feature, use LEAPP_USE_NET_NAMING_SCHEME=1. + +Jira-ref: RHEL-23473 +--- + .../actors/addupgradebootentry/actor.py | 10 +- + .../libraries/addupgradebootentry.py | 78 ++++++++++----- + .../tests/unit_test_addupgradebootentry.py | 47 ++++----- + .../actors/kernelcmdlineconfig/actor.py | 16 +++- + .../libraries/kernelcmdlineconfig.py | 12 ++- + .../libraries/persistentnetnamesconfig.py | 5 +- + .../common/models/kernelcmdlineargs.py | 21 ++++ + .../actors/emit_net_naming_scheme/actor.py | 28 ++++++ + .../libraries/emit_net_naming.py | 63 ++++++++++++ + .../tests/test_emit_net_naming_scheme.py | 95 +++++++++++++++++++ + 10 files changed, 318 insertions(+), 57 deletions(-) + create mode 100644 repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py + create mode 100644 repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py + create mode 100644 repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py + +diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/actor.py b/repos/system_upgrade/common/actors/addupgradebootentry/actor.py +index f400ebf8..e4ecf39e 100644 +--- a/repos/system_upgrade/common/actors/addupgradebootentry/actor.py ++++ b/repos/system_upgrade/common/actors/addupgradebootentry/actor.py +@@ -8,11 +8,13 @@ from leapp.models import ( + FirmwareFacts, + GrubConfigError, + KernelCmdline, ++ LateTargetKernelCmdlineArgTasks, + LiveImagePreparationInfo, + LiveModeArtifacts, + LiveModeConfig, + TargetKernelCmdlineArgTasks, +- TransactionDryRun ++ TransactionDryRun, ++ UpgradeKernelCmdlineArgTasks + ) + from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag + +@@ -33,9 +35,11 @@ class AddUpgradeBootEntry(Actor): + LiveModeArtifacts, + LiveModeConfig, + KernelCmdline, +- TransactionDryRun ++ TransactionDryRun, ++ TargetKernelCmdlineArgTasks, ++ UpgradeKernelCmdlineArgTasks + ) +- produces = (TargetKernelCmdlineArgTasks,) ++ produces = (LateTargetKernelCmdlineArgTasks,) + tags = (IPUWorkflowTag, InterimPreparationPhaseTag) + + def process(self): +diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py +index 553ffc35..b236e39b 100644 +--- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py ++++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py +@@ -9,14 +9,16 @@ from leapp.models import ( + BootContent, + KernelCmdline, + KernelCmdlineArg, ++ LateTargetKernelCmdlineArgTasks, + LiveImagePreparationInfo, + LiveModeArtifacts, + LiveModeConfig, +- TargetKernelCmdlineArgTasks ++ TargetKernelCmdlineArgTasks, ++ UpgradeKernelCmdlineArgTasks + ) + + +-def collect_boot_args(livemode_enabled): ++def collect_upgrade_kernel_args(livemode_enabled): + args = { + 'enforcing': '0', + 'rd.plymouth': '0', +@@ -34,7 +36,10 @@ def collect_boot_args(livemode_enabled): + livemode_args = construct_cmdline_args_for_livemode() + args.update(livemode_args) + +- return args ++ upgrade_kernel_args = collect_set_of_kernel_args_from_msgs(UpgradeKernelCmdlineArgTasks, 'to_add') ++ args.update(upgrade_kernel_args) ++ ++ return set(args.items()) + + + def collect_undesired_args(livemode_enabled): +@@ -43,11 +48,11 @@ def collect_undesired_args(livemode_enabled): + args = dict(zip(('ro', 'rhgb', 'quiet'), itertools.repeat(None))) + args['rd.lvm.lv'] = _get_rdlvm_arg_values() + +- return args ++ return set(args.items()) + + +-def format_grubby_args_from_args_dict(args_dict): +- """ Format the given args dictionary in a form required by grubby's --args. """ ++def format_grubby_args_from_args_set(args_dict): ++ """ Format the given args set in a form required by grubby's --args. """ + + def fmt_single_arg(arg_pair): + key, value = arg_pair +@@ -65,7 +70,7 @@ def format_grubby_args_from_args_dict(args_dict): + else: + yield (key, value) # Just a single (key, value) pair + +- arg_sequence = itertools.chain(*(flatten_arguments(arg_pair) for arg_pair in args_dict.items())) ++ arg_sequence = itertools.chain(*(flatten_arguments(arg_pair) for arg_pair in args_dict)) + + # Sorting should be fine as only values can be None, but we cannot have a (key, None) and (key, value) in + # the dictionary at the same time. +@@ -78,7 +83,7 @@ def format_grubby_args_from_args_dict(args_dict): + def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to_add, args_to_remove): + boot_entry_modification_commands = [] + +- args_to_add_str = format_grubby_args_from_args_dict(args_to_add) ++ args_to_add_str = format_grubby_args_from_args_set(args_to_add) + + create_entry_cmd = [ + '/usr/sbin/grubby', +@@ -93,7 +98,7 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to + + # We need to update root= param separately, since we cannot do it during --add-kernel with --copy-default. + # This is likely a bug in grubby. +- root_param_value = args_to_add.get('root', None) ++ root_param_value = dict(args_to_add).get('root', None) + if root_param_value: + enforce_root_param_for_the_entry_cmd = [ + '/usr/sbin/grubby', +@@ -103,7 +108,7 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to + boot_entry_modification_commands.append(enforce_root_param_for_the_entry_cmd) + + if args_to_remove: +- args_to_remove_str = format_grubby_args_from_args_dict(args_to_remove) ++ args_to_remove_str = format_grubby_args_from_args_set(args_to_remove) + remove_undesired_args_cmd = [ + '/usr/sbin/grubby', + '--update-kernel', kernel_path, +@@ -113,18 +118,55 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to + return boot_entry_modification_commands + + ++def collect_set_of_kernel_args_from_msgs(msg_type, arg_list_field_name): ++ cmdline_modification_msgs = api.consume(msg_type) ++ lists_of_args_to_add = (getattr(msg, arg_list_field_name, []) for msg in cmdline_modification_msgs) ++ args = itertools.chain(*lists_of_args_to_add) ++ return set((arg.key, arg.value) for arg in args) ++ ++ ++def emit_removal_of_args_meant_only_for_upgrade_kernel(added_upgrade_kernel_args): ++ """ ++ Emit message requesting removal of upgrade kernel args that should not be on the target kernel. ++ ++ Target kernel args are created by copying the args of the booted (upgrade) kernel. Therefore, ++ we need to explicitly modify the target kernel cmdline, removing what should not have been copied. ++ """ ++ target_args_to_add = collect_set_of_kernel_args_from_msgs(TargetKernelCmdlineArgTasks, 'to_add') ++ actual_kernel_args = collect_set_of_kernel_args_from_msgs(KernelCmdline, 'parameters') ++ ++ # actual_kernel_args should not be changed during upgrade, unless explicitly removed by ++ # TargetKernelCmdlineArgTasks.to_remove, but that is handled by some other upgrade component. We just want ++ # to make sure we remove what was not on the source system and that we don't overwrite args to be added to target. ++ args_not_present_on_target_kernel = added_upgrade_kernel_args - actual_kernel_args - target_args_to_add ++ ++ # We remove only what we've added and what will not be already removed by someone else. ++ args_to_remove = [KernelCmdlineArg(key=arg[0], value=arg[1]) for arg in args_not_present_on_target_kernel] ++ ++ if args_to_remove: ++ msg = ('Following upgrade kernel args were added, but they should not be present ' ++ 'on target cmdline: `%s`, requesting removal.') ++ api.current_logger().info(msg, args_not_present_on_target_kernel) ++ args_sorted = sorted(args_to_remove, key=lambda arg: arg.key) ++ api.produce(LateTargetKernelCmdlineArgTasks(to_remove=args_sorted)) ++ ++ + def add_boot_entry(configs=None): + kernel_dst_path, initram_dst_path = get_boot_file_paths() ++ + _remove_old_upgrade_boot_entry(kernel_dst_path, configs=configs) + + livemode_enabled = next(api.consume(LiveImagePreparationInfo), None) is not None + +- cmdline_args = collect_boot_args(livemode_enabled) ++ # We have to keep the desired and unwanted args separate and modify cmline in two separate grubby calls. Merging ++ # these sets and trying to execute only a single command would leave the unwanted cmdline args present if they ++ # are present on the original system. ++ added_cmdline_args = collect_upgrade_kernel_args(livemode_enabled) + undesired_cmdline_args = collect_undesired_args(livemode_enabled) + + commands_to_run = figure_out_commands_needed_to_add_entry(kernel_dst_path, + initram_dst_path, +- args_to_add=cmdline_args, ++ args_to_add=added_cmdline_args, + args_to_remove=undesired_cmdline_args) + + def run_commands_adding_entry(extra_command_suffix=None): +@@ -146,16 +188,8 @@ def add_boot_entry(configs=None): + # See https://bugzilla.redhat.com/show_bug.cgi?id=1764306 + run(['/usr/sbin/zipl']) + +- if 'debug' in cmdline_args: +- # The kernelopts for target kernel are generated based on the cmdline used in the upgrade initramfs, +- # therefore, if we enabled debug above, and the original system did not have the debug kernelopt, we +- # need to explicitly remove it from the target os boot entry. +- # NOTE(mhecko): This will also unconditionally remove debug kernelopt if the source system used it. +- api.produce(TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')])) +- +- # NOTE(mmatuska): This will remove the option even if the source system had it set. +- # However enforcing=0 shouldn't be set persistently anyway. +- api.produce(TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')])) ++ effective_upgrade_kernel_args = added_cmdline_args - undesired_cmdline_args ++ emit_removal_of_args_meant_only_for_upgrade_kernel(effective_upgrade_kernel_args) + + except CalledProcessError as e: + raise StopActorExecutionError( +diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py +index c4f5232b..2f58ba9e 100644 +--- a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py ++++ b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py +@@ -12,6 +12,7 @@ from leapp.models import ( + BootContent, + KernelCmdline, + KernelCmdlineArg, ++ LateTargetKernelCmdlineArgTasks, + LiveModeArtifacts, + LiveModeConfig, + TargetKernelCmdlineArgTasks +@@ -82,8 +83,10 @@ def test_add_boot_entry(monkeypatch, run_args, arch): + assert addupgradebootentry.run.args[0] == run_args.args_remove + assert addupgradebootentry.run.args[1] == run_args.args_add + assert api.produce.model_instances == [ +- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]), +- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')]) ++ LateTargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug'), ++ KernelCmdlineArg(key='enforcing', value='0'), ++ KernelCmdlineArg(key='plymouth.enable', value='0'), ++ KernelCmdlineArg(key='rd.plymouth', value='0')]) + ] + + if run_args.args_zipl: +@@ -103,16 +106,16 @@ def test_debug_kernelopt_removal_task_production(monkeypatch, is_leapp_invoked_w + CurrentActorMocked(envars={'LEAPP_DEBUG': str(int(is_leapp_invoked_with_debug))})) + + addupgradebootentry.add_boot_entry() ++ assert len(api.produce.model_instances) == 1 + +- expected_produced_messages = [] +- if is_leapp_invoked_with_debug: +- expected_produced_messages = [TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')])] +- +- expected_produced_messages.append( +- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')]) +- ) ++ produced_msg = api.produce.model_instances[0] ++ assert isinstance(produced_msg, LateTargetKernelCmdlineArgTasks) + +- assert api.produce.model_instances == expected_produced_messages ++ debug_kernel_cmline_arg = KernelCmdlineArg(key='debug') ++ if is_leapp_invoked_with_debug: ++ assert debug_kernel_cmline_arg in produced_msg.to_remove ++ else: ++ assert debug_kernel_cmline_arg not in produced_msg.to_remove + + + def test_add_boot_entry_configs(monkeypatch): +@@ -132,8 +135,10 @@ def test_add_boot_entry_configs(monkeypatch): + assert addupgradebootentry.run.args[2] == run_args_add + ['-c', CONFIGS[0]] + assert addupgradebootentry.run.args[3] == run_args_add + ['-c', CONFIGS[1]] + assert api.produce.model_instances == [ +- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]), +- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')]), ++ LateTargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug'), ++ KernelCmdlineArg(key='enforcing', value='0'), ++ KernelCmdlineArg(key='plymouth.enable', value='0'), ++ KernelCmdlineArg(key='rd.plymouth', value='0')]) + ] + + +@@ -183,7 +188,7 @@ def test_fix_grub_config_error(monkeypatch, error_type, test_file_name): + (False, False), + ) + ) +-def test_collect_boot_args(monkeypatch, is_debug_enabled, network_enablement_type): ++def test_collect_upgrade_kernel_args(monkeypatch, is_debug_enabled, network_enablement_type): + env_vars = {'LEAPP_DEBUG': str(int(is_debug_enabled))} + if network_enablement_type: + env_vars['LEAPP_DEVEL_INITRAM_NETWORK'] = network_enablement_type +@@ -192,7 +197,8 @@ def test_collect_boot_args(monkeypatch, is_debug_enabled, network_enablement_typ + monkeypatch.setattr(addupgradebootentry, 'construct_cmdline_args_for_livemode', + lambda *args: {'livemodearg': 'value'}) + +- args = addupgradebootentry.collect_boot_args(livemode_enabled=True) ++ arg_set = addupgradebootentry.collect_upgrade_kernel_args(livemode_enabled=True) ++ args = dict(arg_set) + + assert args['enforcing'] == '0' + assert args['rd.plymouth'] == '0' +@@ -320,16 +326,3 @@ def test_get_device_uuid(monkeypatch): + uuid = addupgradebootentry._get_device_uuid(path) + + assert uuid == 'MY_UUID1' +- +- +-@pytest.mark.parametrize( +- ('args', 'expected_result'), +- ( +- ([('argA', 'val'), ('argB', 'valB'), ('argC', None), ], 'argA=val argB=valB argC'), +- ([('argA', ('val1', 'val2'))], 'argA=val1 argA=val2') +- ) +-) +-def test_format_grubby_args_from_args_dict(args, expected_result): +- actual_result = addupgradebootentry.format_grubby_args_from_args_dict(dict(args)) +- +- assert actual_result == expected_result +diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py +index 3585a14e..6d5f39dd 100644 +--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py ++++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py +@@ -3,7 +3,13 @@ import os + from leapp.actors import Actor + from leapp.exceptions import StopActorExecutionError + from leapp.libraries.actor import kernelcmdlineconfig +-from leapp.models import FirmwareFacts, InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks ++from leapp.models import ( ++ FirmwareFacts, ++ InstalledTargetKernelInfo, ++ KernelCmdlineArg, ++ LateTargetKernelCmdlineArgTasks, ++ TargetKernelCmdlineArgTasks ++) + from leapp.reporting import Report + from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag + +@@ -14,7 +20,13 @@ class KernelCmdlineConfig(Actor): + """ + + name = 'kernelcmdlineconfig' +- consumes = (KernelCmdlineArg, InstalledTargetKernelInfo, FirmwareFacts, TargetKernelCmdlineArgTasks) ++ consumes = ( ++ KernelCmdlineArg, ++ InstalledTargetKernelInfo, ++ FirmwareFacts, ++ LateTargetKernelCmdlineArgTasks, ++ TargetKernelCmdlineArgTasks ++ ) + produces = (Report,) + tags = (FinalizationPhaseTag, IPUWorkflowTag) + +diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py +index 19c50f3c..98b8b95b 100644 +--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py ++++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py +@@ -1,3 +1,4 @@ ++import itertools + import re + + from leapp import reporting +@@ -5,7 +6,12 @@ from leapp.exceptions import StopActorExecutionError + from leapp.libraries import stdlib + from leapp.libraries.common.config import architecture, version + from leapp.libraries.stdlib import api +-from leapp.models import InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks ++from leapp.models import ( ++ InstalledTargetKernelInfo, ++ KernelCmdlineArg, ++ LateTargetKernelCmdlineArgTasks, ++ TargetKernelCmdlineArgTasks ++) + + KERNEL_CMDLINE_FILE = "/etc/kernel/cmdline" + +@@ -71,7 +77,9 @@ def retrieve_arguments_to_modify(): + kernelargs_msgs_to_add = list(api.consume(KernelCmdlineArg)) + kernelargs_msgs_to_remove = [] + +- for target_kernel_arg_task in api.consume(TargetKernelCmdlineArgTasks): ++ modification_msgs = itertools.chain(api.consume(TargetKernelCmdlineArgTasks), ++ api.consume(LateTargetKernelCmdlineArgTasks)) ++ for target_kernel_arg_task in modification_msgs: + kernelargs_msgs_to_add.extend(target_kernel_arg_task.to_add) + kernelargs_msgs_to_remove.extend(target_kernel_arg_task.to_remove) + +diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py +index dc5196ea..2f12742a 100644 +--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py ++++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py +@@ -2,7 +2,7 @@ import errno + import os + import re + +-from leapp.libraries.common.config import get_env ++from leapp.libraries.common.config import get_env, version + from leapp.libraries.stdlib import api + from leapp.models import ( + InitrdIncludes, +@@ -39,6 +39,9 @@ def generate_link_file(interface): + + @suppress_deprecation(InitrdIncludes) + def process(): ++ if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() != '8': ++ api.current_logger().info('Skipping generation of .link files renaming NICs as LEAPP_USE_NET_NAMING_SCHEMES=1') ++ return + + if get_env('LEAPP_NO_NETWORK_RENAMING', '0') == '1': + api.current_logger().info( +diff --git a/repos/system_upgrade/common/models/kernelcmdlineargs.py b/repos/system_upgrade/common/models/kernelcmdlineargs.py +index e3568a0a..fafd2853 100644 +--- a/repos/system_upgrade/common/models/kernelcmdlineargs.py ++++ b/repos/system_upgrade/common/models/kernelcmdlineargs.py +@@ -24,6 +24,27 @@ class TargetKernelCmdlineArgTasks(Model): + to_remove = fields.List(fields.Model(KernelCmdlineArg), default=[]) + + ++class LateTargetKernelCmdlineArgTasks(Model): ++ """ ++ Desired modifications of the target kernel args produced later in the upgrade process. ++ ++ Defined to prevent loops in the actor dependency graph. ++ """ ++ topic = SystemInfoTopic ++ ++ to_add = fields.List(fields.Model(KernelCmdlineArg), default=[]) ++ to_remove = fields.List(fields.Model(KernelCmdlineArg), default=[]) ++ ++ ++class UpgradeKernelCmdlineArgTasks(Model): ++ """ ++ Modifications of the upgrade kernel cmdline. ++ """ ++ topic = SystemInfoTopic ++ ++ to_add = fields.List(fields.Model(KernelCmdlineArg), default=[]) ++ ++ + class KernelCmdline(Model): + """ + Kernel command line parameters the system was booted with +diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py +new file mode 100644 +index 00000000..769fe20b +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py +@@ -0,0 +1,28 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import emit_net_naming as emit_net_naming_lib ++from leapp.models import ( ++ KernelCmdline, ++ RpmTransactionTasks, ++ TargetKernelCmdlineArgTasks, ++ TargetUserSpaceUpgradeTasks, ++ UpgradeKernelCmdlineArgTasks ++) ++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag ++ ++ ++class EmitNetNamingScheme(Actor): ++ """ ++ Emit necessary modifications of the upgrade environment and target command line to use net.naming-scheme. ++ """ ++ name = 'emit_net_naming_scheme' ++ consumes = (KernelCmdline,) ++ produces = ( ++ RpmTransactionTasks, ++ TargetKernelCmdlineArgTasks, ++ TargetUserSpaceUpgradeTasks, ++ UpgradeKernelCmdlineArgTasks, ++ ) ++ tags = (ChecksPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ emit_net_naming_lib.emit_msgs_to_use_net_naming_schemes() +diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py +new file mode 100644 +index 00000000..65abdd4d +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py +@@ -0,0 +1,63 @@ ++from leapp.exceptions import StopActorExecutionError ++from leapp.libraries.common.config import get_env, version ++from leapp.libraries.stdlib import api ++from leapp.models import ( ++ KernelCmdline, ++ KernelCmdlineArg, ++ RpmTransactionTasks, ++ TargetKernelCmdlineArgTasks, ++ TargetUserSpaceUpgradeTasks, ++ UpgradeKernelCmdlineArgTasks ++) ++ ++NET_NAMING_SYSATTRS_RPM_NAME = 'rhel-net-naming-sysattrs' ++ ++ ++def is_net_scheme_compatible_with_current_cmdline(): ++ kernel_cmdline = next(api.consume(KernelCmdline), None) ++ if not kernel_cmdline: ++ # Super unlikely ++ raise StopActorExecutionError('Did not receive any KernelCmdline messages.') ++ ++ allows_predictable_names = True ++ already_has_a_net_naming_scheme = False ++ for param in kernel_cmdline.parameters: ++ if param.key == 'net.ifnames': ++ if param.value == '0': ++ allows_predictable_names = False ++ elif param.value == '1': ++ allows_predictable_names = True ++ if param.key == 'net.naming-scheme': ++ # We assume that the kernel cmdline does not contain invalid entries, namely, ++ # that the net.naming-scheme refers to a valid scheme. ++ already_has_a_net_naming_scheme = True ++ ++ is_compatible = allows_predictable_names and not already_has_a_net_naming_scheme ++ ++ msg = ('Should net.naming-scheme be added to kernel cmdline: %s. ' ++ 'Reason: allows_predictable_names=%s, already_has_a_net_naming_scheme=%s') ++ api.current_logger().info(msg, 'yes' if is_compatible else 'no', ++ allows_predictable_names, ++ already_has_a_net_naming_scheme) ++ ++ return is_compatible ++ ++ ++def emit_msgs_to_use_net_naming_schemes(): ++ if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') != '1' and version.get_target_major_version() != '8': ++ return ++ ++ # The package should be installed regardless of whether we will modify the cmdline - ++ # if the cmdline already contains net.naming-scheme, then the package will be useful ++ # in both, the upgrade environment and on the target system. ++ pkgs_to_install = [NET_NAMING_SYSATTRS_RPM_NAME] ++ api.produce(TargetUserSpaceUpgradeTasks(install_rpms=pkgs_to_install)) ++ api.produce(RpmTransactionTasks(to_install=pkgs_to_install)) ++ ++ if not is_net_scheme_compatible_with_current_cmdline(): ++ return ++ ++ naming_scheme = 'rhel-{0}.0'.format(version.get_source_major_version()) ++ cmdline_args = [KernelCmdlineArg(key='net.naming-scheme', value=naming_scheme)] ++ api.produce(UpgradeKernelCmdlineArgTasks(to_add=cmdline_args)) ++ api.produce(TargetKernelCmdlineArgTasks(to_add=cmdline_args)) +diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py +new file mode 100644 +index 00000000..7a5eeba5 +--- /dev/null ++++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py +@@ -0,0 +1,95 @@ ++import pytest ++ ++from leapp.libraries.actor import emit_net_naming as emit_net_naming_lib ++from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked ++from leapp.libraries.stdlib import api ++from leapp.models import ( ++ KernelCmdline, ++ KernelCmdlineArg, ++ RpmTransactionTasks, ++ TargetKernelCmdlineArgTasks, ++ TargetUserSpaceUpgradeTasks, ++ UpgradeKernelCmdlineArgTasks ++) ++ ++ ++@pytest.mark.parametrize( ++ ('kernel_args', 'should_be_compatible'), ++ [ ++ ([KernelCmdlineArg(key='net.naming-scheme', value='rhel-8.10')], False), ++ ([KernelCmdlineArg(key='net.ifnames', value='1')], True), ++ ([KernelCmdlineArg(key='net.ifnames', value='0')], False), ++ ( ++ [ ++ KernelCmdlineArg(key='net.naming-scheme', value='rhel-8.10'), ++ KernelCmdlineArg(key='net.ifname', value='0'), ++ KernelCmdlineArg(key='root', value='/dev/vda1') ++ ], ++ False ++ ), ++ ([KernelCmdlineArg(key='root', value='/dev/vda1')], True), ++ ] ++) ++def test_is_net_scheme_compatible_with_current_cmdline(monkeypatch, kernel_args, should_be_compatible): ++ kernel_cmdline = KernelCmdline(parameters=kernel_args) ++ ++ def mocked_consume(msg_type): ++ yield {KernelCmdline: kernel_cmdline}[msg_type] ++ ++ monkeypatch.setattr(api, 'consume', mocked_consume) ++ ++ assert emit_net_naming_lib.is_net_scheme_compatible_with_current_cmdline() == should_be_compatible, \ ++ [(arg.key, arg.value) for arg in kernel_cmdline.parameters] ++ ++ ++@pytest.mark.parametrize( ++ ('is_net_scheme_enabled', 'is_current_cmdline_compatible'), ++ [ ++ (True, True), ++ (True, False), ++ (False, True) ++ ] ++) ++def test_emit_msgs_to_use_net_naming_schemes(monkeypatch, is_net_scheme_enabled, is_current_cmdline_compatible): ++ envvar_value = '1' if is_net_scheme_enabled else '0' ++ ++ mocked_actor = CurrentActorMocked(src_ver='8.10', ++ dst_ver='9.5', ++ envars={'LEAPP_USE_NET_NAMING_SCHEMES': envvar_value}) ++ monkeypatch.setattr(api, 'current_actor', mocked_actor) ++ ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(emit_net_naming_lib, ++ 'is_net_scheme_compatible_with_current_cmdline', ++ lambda: is_current_cmdline_compatible) ++ ++ emit_net_naming_lib.emit_msgs_to_use_net_naming_schemes() ++ ++ def ensure_one_msg_of_type_produced(produced_messages, msg_type): ++ msgs = (msg for msg in produced_messages if isinstance(msg, msg_type)) ++ msg = next(msgs) ++ assert not next(msgs, None), 'More than one message of type {type} produced'.format(type=type) ++ return msg ++ ++ produced_messages = api.produce.model_instances ++ if is_net_scheme_enabled: ++ userspace_tasks = ensure_one_msg_of_type_produced(produced_messages, TargetUserSpaceUpgradeTasks) ++ assert userspace_tasks.install_rpms == [emit_net_naming_lib.NET_NAMING_SYSATTRS_RPM_NAME] ++ ++ rpm_tasks = ensure_one_msg_of_type_produced(produced_messages, RpmTransactionTasks) ++ assert rpm_tasks.to_install == [emit_net_naming_lib.NET_NAMING_SYSATTRS_RPM_NAME] ++ else: ++ assert not api.produce.called ++ return ++ ++ upgrade_cmdline_mods = (msg for msg in produced_messages if isinstance(msg, UpgradeKernelCmdlineArgTasks)) ++ target_cmdline_mods = (msg for msg in produced_messages if isinstance(msg, TargetKernelCmdlineArgTasks)) ++ ++ if is_current_cmdline_compatible: ++ # We should emit cmdline modifications - both UpgradeKernelCmdlineArgTasks and TargetKernelCmdlineArgTasks ++ # should be produced ++ assert next(upgrade_cmdline_mods, None) ++ assert next(target_cmdline_mods, None) ++ else: ++ assert not next(upgrade_cmdline_mods, None) ++ assert not next(target_cmdline_mods, None) +-- +2.47.0 + diff --git a/SOURCES/0024-prevent-the-feature-for-being-used-outside-8-9.patch b/SOURCES/0024-prevent-the-feature-for-being-used-outside-8-9.patch new file mode 100644 index 0000000..02eb5fe --- /dev/null +++ b/SOURCES/0024-prevent-the-feature-for-being-used-outside-8-9.patch @@ -0,0 +1,26 @@ +From b4b535454b74c05682ecf0d3059decbd2c9530e0 Mon Sep 17 00:00:00 2001 +From: Michal Hecko +Date: Wed, 6 Nov 2024 22:23:37 +0100 +Subject: [PATCH 24/40] prevent the feature for being used outside 8>9 + +--- + .../libraries/persistentnetnamesconfig.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py +index 2f12742a..b2c7f5ff 100644 +--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py ++++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py +@@ -39,7 +39,8 @@ def generate_link_file(interface): + + @suppress_deprecation(InitrdIncludes) + def process(): +- if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() != '8': ++ if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() == '9': ++ # We can use this only for 8>9, for now + api.current_logger().info('Skipping generation of .link files renaming NICs as LEAPP_USE_NET_NAMING_SCHEMES=1') + return + +-- +2.47.0 + diff --git a/SOURCES/0025-fix-condition-on-when-net-naming-is-emitted.patch b/SOURCES/0025-fix-condition-on-when-net-naming-is-emitted.patch new file mode 100644 index 0000000..d5d92a0 --- /dev/null +++ b/SOURCES/0025-fix-condition-on-when-net-naming-is-emitted.patch @@ -0,0 +1,28 @@ +From e43a8922e06d72212e8e2a8b51747c668147182c Mon Sep 17 00:00:00 2001 +From: Michal Hecko +Date: Wed, 6 Nov 2024 22:26:01 +0100 +Subject: [PATCH 25/40] fix condition on when net naming is emitted + +--- + .../emit_net_naming_scheme/libraries/emit_net_naming.py | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py +index 65abdd4d..726bb459 100644 +--- a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py ++++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py +@@ -44,7 +44,10 @@ def is_net_scheme_compatible_with_current_cmdline(): + + + def emit_msgs_to_use_net_naming_schemes(): +- if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') != '1' and version.get_target_major_version() != '8': ++ is_env_var_set = get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' ++ is_upgrade_8to9 = version.get_target_major_version() == '9' ++ is_net_naming_enabled_and_permitted = is_env_var_set and is_upgrade_8to9 ++ if not is_net_naming_enabled_and_permitted: + return + + # The package should be installed regardless of whether we will modify the cmdline - +-- +2.47.0 + diff --git a/SOURCES/0026-scangrubdevpartitionlayout-Skip-warning-msgs.patch b/SOURCES/0026-scangrubdevpartitionlayout-Skip-warning-msgs.patch new file mode 100644 index 0000000..cc0a99c --- /dev/null +++ b/SOURCES/0026-scangrubdevpartitionlayout-Skip-warning-msgs.patch @@ -0,0 +1,56 @@ +From 0bf07d1546ccdc6d4a9e6f4936a98b4d6ca27789 Mon Sep 17 00:00:00 2001 +From: Petr Stodulka +Date: Tue, 12 Nov 2024 09:10:50 +0100 +Subject: [PATCH 26/40] scangrubdevpartitionlayout: Skip warning msgs + +The fdisk output can contain warning msgs when a partition is not +aligned on physical sector boundary, like: + Partition 4 does not start on physical sector boundary. +We know that in case of MBR the line we expect to parse always +starts with canonical path. So let's skip all lines which does +not start with '/'. + +jira: https://issues.redhat.com/browse/RHEL-50947 +--- + .../libraries/scan_layout.py | 10 ++++++++++ + .../tests/test_scan_partition_layout.py | 3 +++ + 2 files changed, 13 insertions(+) + +diff --git a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py +index 83d02656..7f4a2a59 100644 +--- a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py ++++ b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py +@@ -68,6 +68,16 @@ def get_partition_layout(device): + + partitions = [] + for partition_line in table_iter: ++ if not partition_line.startswith('/'): ++ # the output can contain warning msg when a partition is not aligned ++ # on physical sector boundary, like: ++ # ~~~ ++ # Partition 4 does not start on physical sector boundary. ++ # ~~~ ++ # We know that in case of MBR the line we expect to parse always ++ # starts with canonical path. So let's use this condition. ++ # See https://issues.redhat.com/browse/RHEL-50947 ++ continue + # Fields: Device Boot Start End Sectors Size Id Type + # The line looks like: `/dev/vda1 * 2048 2099199 2097152 1G 83 Linux` + part_info = split_on_space_segments(partition_line) +diff --git a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py +index 743ca71f..9c32e16f 100644 +--- a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py ++++ b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py +@@ -49,6 +49,9 @@ def test_get_partition_layout(monkeypatch, devices, fs): + part_line = '{0} * {1} 2099199 1048576 83 {2}'.format(part.name, part.start_offset, fs) + fdisk_output.append(part_line) + ++ # add a problematic warning msg to test: ++ # https://issues.redhat.com/browse/RHEL-50947 ++ fdisk_output.append('Partition 3 does not start on physical sector boundary.') + device_to_fdisk_output[device.name] = fdisk_output + + def mocked_run(cmd, *args, **kwargs): +-- +2.47.0 + diff --git a/SOURCES/0027-Workaround-for-ARM-Upgrades-from-RHEL8-to-RHEL9.5.patch b/SOURCES/0027-Workaround-for-ARM-Upgrades-from-RHEL8-to-RHEL9.5.patch new file mode 100644 index 0000000..91dde99 --- /dev/null +++ b/SOURCES/0027-Workaround-for-ARM-Upgrades-from-RHEL8-to-RHEL9.5.patch @@ -0,0 +1,1756 @@ +From abcf7a5d209d4f9fc054d39cf6866b2809fe382b Mon Sep 17 00:00:00 2001 +From: David Kubek +Date: Wed, 24 Jul 2024 21:59:53 +0200 +Subject: [PATCH 27/40] Workaround for ARM Upgrades from RHEL8 to RHEL9.5+ + +Address issue with ARM system upgrades from RHEL 8 to RHEL 9.5+ caused +by GRUB bootloader incompatibility with newer kernels. When attempting +to load the RHEL 9.5+ kernel using the RHEL 8 bootloader, the upgrade +process halts due to a boot crash. + +JIRA: 41193 +--- + repos/system_upgrade/common/libraries/grub.py | 323 ++++++++++++++++-- + .../common/libraries/tests/test_grub.py | 244 ++++++++++++- + repos/system_upgrade/common/models/efiinfo.py | 27 ++ + .../addarmbootloaderworkaround/actor.py | 59 ++++ + .../libraries/addupgradebootloader.py | 185 ++++++++++ + .../tests/test_addarmbootloaderworkaround.py | 312 +++++++++++++++++ + .../actors/checkarmbootloader/actor.py | 16 +- + .../libraries/checkarmbootloader.py | 44 +-- + .../tests/test_checkarmbootloader.py | 36 +- + .../actors/removeupgradeefientry/actor.py | 26 ++ + .../libraries/removeupgradeefientry.py | 100 ++++++ + .../tests/test_removeupgradeefientry.py | 105 ++++++ + .../el8toel9/models/upgradeefientry.py | 14 + + 13 files changed, 1399 insertions(+), 92 deletions(-) + create mode 100644 repos/system_upgrade/common/models/efiinfo.py + create mode 100644 repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/actor.py + create mode 100644 repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/libraries/addupgradebootloader.py + create mode 100644 repos/system_upgrade/el8toel9/actors/addarmbootloaderworkaround/tests/test_addarmbootloaderworkaround.py + create mode 100644 repos/system_upgrade/el8toel9/actors/removeupgradeefientry/actor.py + create mode 100644 repos/system_upgrade/el8toel9/actors/removeupgradeefientry/libraries/removeupgradeefientry.py + create mode 100644 repos/system_upgrade/el8toel9/actors/removeupgradeefientry/tests/test_removeupgradeefientry.py + create mode 100644 repos/system_upgrade/el8toel9/models/upgradeefientry.py + +diff --git a/repos/system_upgrade/common/libraries/grub.py b/repos/system_upgrade/common/libraries/grub.py +index 3c80556e..cd960ea4 100644 +--- a/repos/system_upgrade/common/libraries/grub.py ++++ b/repos/system_upgrade/common/libraries/grub.py +@@ -1,10 +1,204 @@ + import os ++import re + + from leapp.exceptions import StopActorExecution + from leapp.libraries.common import mdraid + from leapp.libraries.stdlib import api, CalledProcessError, run + from leapp.utils.deprecation import deprecated + ++EFI_MOUNTPOINT = '/boot/efi/' ++"""The path to the required mountpoint for ESP.""" ++ ++GRUB2_BIOS_ENTRYPOINT = '/boot/grub2' ++"""The entrypoint path of the BIOS GRUB2""" ++ ++GRUB2_BIOS_ENV_FILE = os.path.join(GRUB2_BIOS_ENTRYPOINT, 'grubenv') ++"""The path to the env file for GRUB2 in BIOS""" ++ ++ ++def canonical_path_to_efi_format(canonical_path): ++ r"""Transform the canonical path to the UEFI format. ++ ++ e.g. /boot/efi/EFI/redhat/shimx64.efi -> \EFI\redhat\shimx64.efi ++ (just single backslash; so the string needs to be put into apostrophes ++ when used for /usr/sbin/efibootmgr cmd) ++ ++ The path has to start with /boot/efi otherwise the path is invalid for UEFI. ++ """ ++ ++ # We want to keep the last "/" of the EFI_MOUNTPOINT ++ return canonical_path.replace(EFI_MOUNTPOINT[:-1], "").replace("/", "\\") ++ ++ ++class EFIBootLoaderEntry(object): ++ """ ++ Representation of an UEFI boot loader entry. ++ """ ++ # pylint: disable=eq-without-hash ++ ++ def __init__(self, boot_number, label, active, efi_bin_source): ++ self.boot_number = boot_number ++ """Expected string, e.g. '0001'. """ ++ ++ self.label = label ++ """Label of the UEFI entry. E.g. 'Redhat'""" ++ ++ self.active = active ++ """True when the UEFI entry is active (asterisk is present next to the boot number)""" ++ ++ self.efi_bin_source = efi_bin_source ++ """Source of the UEFI binary. ++ ++ It could contain various values, e.g.: ++ FvVol(7cb8bdc9-f8eb-4f34-aaea-3ee4af6516a1)/FvFile(462caa21-7614-4503-836e-8ab6f4662331) ++ HD(1,GPT,28c77f6b-3cd0-4b22-985f-c99903835d79,0x800,0x12c000)/File(\\EFI\\redhat\\shimx64.efi) ++ PciRoot(0x0)/Pci(0x2,0x3)/Pci(0x0,0x0)N.....YM....R,Y. ++ """ ++ ++ def __eq__(self, other): ++ return all( ++ [ ++ self.boot_number == other.boot_number, ++ self.label == other.label, ++ self.active == other.active, ++ self.efi_bin_source == other.efi_bin_source, ++ ] ++ ) ++ ++ def __ne__(self, other): ++ return not self.__eq__(other) ++ ++ def __repr__(self): ++ return 'EFIBootLoaderEntry({boot_number}, {label}, {active}, {efi_bin_source})'.format( ++ boot_number=repr(self.boot_number), ++ label=repr(self.label), ++ active=repr(self.active), ++ efi_bin_source=repr(self.efi_bin_source) ++ ) ++ ++ def is_referring_to_file(self): ++ """Return True when the boot source is a file. ++ ++ Some sources could refer e.g. to PXE boot. Return true if the source ++ refers to a file ("ends with /File(...path...)") ++ ++ Does not matter whether the file exists or not. ++ """ ++ return '/File(\\' in self.efi_bin_source ++ ++ @staticmethod ++ def _efi_path_to_canonical(efi_path): ++ return os.path.join(EFI_MOUNTPOINT, efi_path.replace("\\", "/").lstrip("/")) ++ ++ def get_canonical_path(self): ++ """Return expected canonical path for the referred UEFI bin or None. ++ ++ Return None in case the entry is not referring to any UEFI bin ++ (e.g. when it refers to a PXE boot). ++ """ ++ if not self.is_referring_to_file(): ++ return None ++ match = re.search(r'/File\((?P\\.*)\)$', self.efi_bin_source) ++ return EFIBootLoaderEntry._efi_path_to_canonical(match.groups('path')[0]) ++ ++ ++class EFIBootInfo(object): ++ """ ++ Data about the current UEFI boot configuration. ++ ++ Raise StopActorExecution when: ++ - unable to obtain info about the UEFI configuration. ++ - BIOS is detected. ++ - ESP is not mounted where expected. ++ """ ++ ++ def __init__(self): ++ if not is_efi(): ++ raise StopActorExecution('Unable to collect data about UEFI on a BIOS system.') ++ try: ++ result = run(['/usr/sbin/efibootmgr', '-v']) ++ except CalledProcessError: ++ raise StopActorExecution('Unable to get information about UEFI boot entries.') ++ ++ bootmgr_output = result['stdout'] ++ ++ self.current_bootnum = None ++ """The boot number (str) of the current boot.""" ++ self.next_bootnum = None ++ """The boot number (str) of the next boot.""" ++ self.boot_order = tuple() ++ """The tuple of the UEFI boot loader entries in the boot order.""" ++ self.entries = {} ++ """The UEFI boot loader entries {'boot_number': EFIBootLoader}""" ++ ++ self._parse_efi_boot_entries(bootmgr_output) ++ self._parse_current_bootnum(bootmgr_output) ++ self._parse_next_bootnum(bootmgr_output) ++ self._parse_boot_order(bootmgr_output) ++ self._print_loaded_info() ++ ++ def _parse_efi_boot_entries(self, bootmgr_output): ++ """ ++ Return dict of UEFI boot loader entries: {"": EFIBootLoader} ++ """ ++ ++ self.entries = {} ++ regexp_entry = re.compile( ++ r"^Boot(?P[a-zA-Z0-9]+)(?P\*?)\s*(?P