Compare commits

..

No commits in common. "c8" and "c8" have entirely different histories.
c8 ... c8

113 changed files with 46741 additions and 23957 deletions

4
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/deps-pkgs-13.tar.gz
SOURCES/leapp-repository-0.23.0.tar.gz
SOURCES/deps-pkgs-11.tar.gz
SOURCES/leapp-repository-0.21.0.tar.gz

View File

@ -1,2 +1,2 @@
3590b33b4a79ebe62f5cfa0eeca7efb41d526498 SOURCES/deps-pkgs-13.tar.gz
b5b541cc0c0372ee476f0ab6073a62e67290d031 SOURCES/leapp-repository-0.23.0.tar.gz
8b3fe3a7b52d2e144d374623aa5b0b0add7ab0c7 SOURCES/deps-pkgs-11.tar.gz
9327be3720ccb3f7b285d2199463d7df0c38dfae SOURCES/leapp-repository-0.21.0.tar.gz

View File

@ -1,333 +0,0 @@
From dcf53c28ea9c3fdd03277abcdeb1d124660f7f8e Mon Sep 17 00:00:00 2001
From: karolinku <kkula@redhat.com>
Date: Tue, 19 Aug 2025 09:48:11 +0200
Subject: [PATCH 01/55] Add upgrade inhibitor for custom DNF pluginpath
configuration
Implements detection and inhibition of the upgrade when DNF
pluginpath is configured in /etc/dnf/dnf.conf:
- Add DnfPluginPathDetected model to communicate detection results
- Add ScanDnfPluginPath actor (FactsPhase) to scan DNF configuration
- Add CheckDnfPluginPath actor (ChecksPhase) to create inhibitor report
- Add related unit tests
Localisation of dnf plugins is not constant between system releases
which can cause issues with the upgrade, so the user should remove
this option or comment it out.
Jira: RHEL-69601
---
.../common/actors/checkdnfpluginpath/actor.py | 22 ++++++++
.../libraries/checkdnfpluginpath.py | 35 ++++++++++++
.../tests/test_checkdnfpluginpath.py | 34 ++++++++++++
.../common/actors/scandnfpluginpath/actor.py | 21 ++++++++
.../libraries/scandnfpluginpath.py | 30 +++++++++++
.../files/dnf_config_incorrect_pluginpath | 7 +++
.../tests/files/dnf_config_no_pluginpath | 6 +++
.../tests/files/dnf_config_with_pluginpath | 7 +++
.../tests/test_scandnfpluginpath.py | 53 +++++++++++++++++++
.../common/models/dnfpluginpathdetected.py | 14 +++++
10 files changed, 229 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/checkdnfpluginpath/actor.py
create mode 100644 repos/system_upgrade/common/actors/checkdnfpluginpath/libraries/checkdnfpluginpath.py
create mode 100644 repos/system_upgrade/common/actors/checkdnfpluginpath/tests/test_checkdnfpluginpath.py
create mode 100644 repos/system_upgrade/common/actors/scandnfpluginpath/actor.py
create mode 100644 repos/system_upgrade/common/actors/scandnfpluginpath/libraries/scandnfpluginpath.py
create mode 100644 repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_incorrect_pluginpath
create mode 100644 repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_no_pluginpath
create mode 100644 repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_with_pluginpath
create mode 100644 repos/system_upgrade/common/actors/scandnfpluginpath/tests/test_scandnfpluginpath.py
create mode 100644 repos/system_upgrade/common/models/dnfpluginpathdetected.py
diff --git a/repos/system_upgrade/common/actors/checkdnfpluginpath/actor.py b/repos/system_upgrade/common/actors/checkdnfpluginpath/actor.py
new file mode 100644
index 00000000..34055886
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkdnfpluginpath/actor.py
@@ -0,0 +1,22 @@
+from leapp.actors import Actor
+from leapp.libraries.actor.checkdnfpluginpath import perform_check
+from leapp.models import DnfPluginPathDetected
+from leapp.reporting import Report
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
+
+
+class CheckDnfPluginPath(Actor):
+ """
+ Inhibits the upgrade if a custom DNF plugin path is configured.
+
+ This actor checks whether the pluginpath option is configured in /etc/dnf/dnf.conf and produces a report if it is.
+ If the option is detected with any value, the upgrade is inhibited.
+ """
+
+ name = 'check_dnf_pluginpath'
+ consumes = (DnfPluginPathDetected,)
+ produces = (Report,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ perform_check()
diff --git a/repos/system_upgrade/common/actors/checkdnfpluginpath/libraries/checkdnfpluginpath.py b/repos/system_upgrade/common/actors/checkdnfpluginpath/libraries/checkdnfpluginpath.py
new file mode 100644
index 00000000..ce705361
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkdnfpluginpath/libraries/checkdnfpluginpath.py
@@ -0,0 +1,35 @@
+from leapp import reporting
+from leapp.libraries.stdlib import api
+from leapp.models import DnfPluginPathDetected
+
+DNF_CONFIG_PATH = '/etc/dnf/dnf.conf'
+
+
+def check_dnf_pluginpath(dnf_pluginpath_detected):
+ """Create an inhibitor when pluginpath is detected in DNF configuration."""
+ if not dnf_pluginpath_detected.is_pluginpath_detected:
+ return
+ reporting.create_report([
+ reporting.Title('Detected specified pluginpath in DNF configuration.'),
+ reporting.Summary(
+ 'The "pluginpath" option is set in the {} file. The path to DNF plugins differs between '
+ 'system major releases due to different versions of Python. '
+ 'This breaks the in-place upgrades if defined explicitly as DNF plugins '
+ 'are stored on a different path on the new system.'
+ .format(DNF_CONFIG_PATH)
+ ),
+ reporting.Remediation(
+ hint='Remove or comment out the pluginpath option in the DNF '
+ 'configuration file to be able to upgrade the system',
+ commands=[['sed', '-i', '\'s/^pluginpath[[:space:]]*=/#pluginpath=/\'', DNF_CONFIG_PATH]],
+ ),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ reporting.RelatedResource('file', DNF_CONFIG_PATH),
+ ])
+
+
+def perform_check():
+ dnf_pluginpath_detected = next(api.consume(DnfPluginPathDetected), None)
+ if dnf_pluginpath_detected:
+ check_dnf_pluginpath(dnf_pluginpath_detected)
diff --git a/repos/system_upgrade/common/actors/checkdnfpluginpath/tests/test_checkdnfpluginpath.py b/repos/system_upgrade/common/actors/checkdnfpluginpath/tests/test_checkdnfpluginpath.py
new file mode 100644
index 00000000..7dd8bbf2
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkdnfpluginpath/tests/test_checkdnfpluginpath.py
@@ -0,0 +1,34 @@
+import pytest
+
+from leapp import reporting
+from leapp.libraries.actor.checkdnfpluginpath import check_dnf_pluginpath, perform_check
+from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
+from leapp.libraries.stdlib import api
+from leapp.models import DnfPluginPathDetected
+from leapp.utils.report import is_inhibitor
+
+
+@pytest.mark.parametrize('is_detected', [False, True])
+def test_check_dnf_pluginpath(monkeypatch, is_detected):
+ actor_reports = create_report_mocked()
+ msg = DnfPluginPathDetected(is_pluginpath_detected=is_detected)
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[msg]))
+ monkeypatch.setattr(reporting, 'create_report', actor_reports)
+
+ perform_check()
+
+ assert bool(actor_reports.called) == is_detected
+
+ if is_detected:
+ assert is_inhibitor(actor_reports.report_fields)
+
+
+def test_perform_check_no_message_available(monkeypatch):
+ """Test perform_check when no DnfPluginPathDetected message is available."""
+ actor_reports = create_report_mocked()
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(reporting, 'create_report', actor_reports)
+
+ perform_check()
+
+ assert not actor_reports.called
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/actor.py b/repos/system_upgrade/common/actors/scandnfpluginpath/actor.py
new file mode 100644
index 00000000..e43a691e
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/actor.py
@@ -0,0 +1,21 @@
+from leapp.actors import Actor
+from leapp.libraries.actor.scandnfpluginpath import scan_dnf_pluginpath
+from leapp.models import DnfPluginPathDetected
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class ScanDnfPluginPath(Actor):
+ """
+ Scans DNF configuration for custom pluginpath option.
+
+ This actor collects information about whether the pluginpath option is configured in DNF configuration
+ and produces a DnfPluginPathDetected message, containing the information.
+ """
+
+ name = 'scan_dnf_pluginpath'
+ consumes = ()
+ produces = (DnfPluginPathDetected,)
+ tags = (FactsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ scan_dnf_pluginpath()
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/libraries/scandnfpluginpath.py b/repos/system_upgrade/common/actors/scandnfpluginpath/libraries/scandnfpluginpath.py
new file mode 100644
index 00000000..818f7700
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/libraries/scandnfpluginpath.py
@@ -0,0 +1,30 @@
+import os
+
+from six.moves import configparser
+
+from leapp.libraries.stdlib import api
+from leapp.models import DnfPluginPathDetected
+
+DNF_CONFIG_PATH = '/etc/dnf/dnf.conf'
+
+
+def _is_pluginpath_set(config_path):
+ """Check if pluginpath option is set in DNF configuration file."""
+ if not os.path.isfile(config_path):
+ api.current_logger().warning('The %s file is missing.', config_path)
+ return False
+
+ parser = configparser.ConfigParser()
+
+ try:
+ parser.read(config_path)
+ return parser.has_option('main', 'pluginpath')
+ except (configparser.Error, IOError) as e:
+ api.current_logger().warning('The DNF config file %s couldn\'t be parsed: %s', config_path, e)
+ return False
+
+
+def scan_dnf_pluginpath():
+ """Scan DNF configuration and produce DnfPluginPathDetected message."""
+ is_detected = _is_pluginpath_set(DNF_CONFIG_PATH)
+ api.produce(DnfPluginPathDetected(is_pluginpath_detected=is_detected))
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_incorrect_pluginpath b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_incorrect_pluginpath
new file mode 100644
index 00000000..aa29db09
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_incorrect_pluginpath
@@ -0,0 +1,7 @@
+[main]
+gpgcheck=1
+installonly_limit=3
+clean_requirements_on_remove=True
+best=True
+skip_if_unavailable=False
+pluginpathincorrect=/usr/lib/python3.6/site-packages/dnf-plugins
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_no_pluginpath b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_no_pluginpath
new file mode 100644
index 00000000..3d08d075
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_no_pluginpath
@@ -0,0 +1,6 @@
+[main]
+gpgcheck=1
+installonly_limit=3
+clean_requirements_on_remove=True
+best=True
+skip_if_unavailable=False
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_with_pluginpath b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_with_pluginpath
new file mode 100644
index 00000000..09a81e64
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/files/dnf_config_with_pluginpath
@@ -0,0 +1,7 @@
+[main]
+gpgcheck=1
+installonly_limit=3
+clean_requirements_on_remove=True
+best=True
+skip_if_unavailable=False
+pluginpath=/usr/lib/python3.6/site-packages/dnf-plugins
diff --git a/repos/system_upgrade/common/actors/scandnfpluginpath/tests/test_scandnfpluginpath.py b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/test_scandnfpluginpath.py
new file mode 100644
index 00000000..fefb9d3f
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scandnfpluginpath/tests/test_scandnfpluginpath.py
@@ -0,0 +1,53 @@
+import os
+
+import pytest
+
+from leapp.libraries.actor import scandnfpluginpath
+from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import DnfPluginPathDetected
+
+
+@pytest.mark.parametrize('is_detected', [False, True])
+def test_scan_detects_pluginpath(monkeypatch, is_detected):
+ mocked_producer = produce_mocked()
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(api, 'produce', mocked_producer)
+
+ monkeypatch.setattr(scandnfpluginpath, '_is_pluginpath_set',
+ lambda path: is_detected)
+
+ scandnfpluginpath.scan_dnf_pluginpath()
+
+ assert mocked_producer.called == 1
+ assert mocked_producer.model_instances[0].is_pluginpath_detected is is_detected
+
+
+@pytest.mark.parametrize(('config_file', 'result'), [
+ ('files/dnf_config_no_pluginpath', False),
+ ('files/dnf_config_with_pluginpath', True),
+ ('files/dnf_config_incorrect_pluginpath', False),
+ ('files/not_existing_file.conf', False)
+])
+def test_is_pluginpath_set(config_file, result):
+ CUR_DIR = os.path.dirname(os.path.abspath(__file__))
+
+ assert scandnfpluginpath._is_pluginpath_set(os.path.join(CUR_DIR, config_file)) == result
+
+
+def test_scan_no_config_file(monkeypatch):
+ mocked_producer = produce_mocked()
+ logger = logger_mocked()
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(api, 'produce', mocked_producer)
+ monkeypatch.setattr(api, 'current_logger', lambda: logger)
+
+ filename = 'files/not_existing_file.conf'
+ monkeypatch.setattr(scandnfpluginpath, 'DNF_CONFIG_PATH', filename)
+ scandnfpluginpath.scan_dnf_pluginpath()
+
+ assert mocked_producer.called == 1
+ assert mocked_producer.model_instances[0].is_pluginpath_detected is False
+
+ assert 'The %s file is missing.' in logger.warnmsg
+ assert filename in logger.warnmsg
diff --git a/repos/system_upgrade/common/models/dnfpluginpathdetected.py b/repos/system_upgrade/common/models/dnfpluginpathdetected.py
new file mode 100644
index 00000000..c5474857
--- /dev/null
+++ b/repos/system_upgrade/common/models/dnfpluginpathdetected.py
@@ -0,0 +1,14 @@
+from leapp.models import fields, Model
+from leapp.topics import SystemInfoTopic
+
+
+class DnfPluginPathDetected(Model):
+ """
+ This model contains information about whether DNF pluginpath option is configured in /etc/dnf/dnf.conf.
+ """
+ topic = SystemInfoTopic
+
+ is_pluginpath_detected = fields.Boolean()
+ """
+ True if pluginpath option is found in /etc/dnf/dnf.conf, False otherwise.
+ """
--
2.51.1

View File

@ -0,0 +1,44 @@
From fbc38d4ad1d828e0553579e3719c0e4ed4a2a6bd Mon Sep 17 00:00:00 2001
From: jinkangkang <1547182170@qq.com>
Date: Mon, 19 Aug 2024 18:46:08 +0800
Subject: [PATCH 01/40] rhui(alibaba): add ARM RHEL8 and RHEL9 setup entries
(#1277)
Since leapp's RHUI mechanism filters setups based on the architecture of the source system,
it was not possible to upgrade of ARM-based RHEL systems on Alibaba cloud as there
were no ARM entries in RHUI_SETUPS. This patch adds these entries, making it possible
for EL 8 -> 9 upgrades of ARM systems on Alibaba cloud.
---
repos/system_upgrade/common/libraries/rhui.py | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/repos/system_upgrade/common/libraries/rhui.py b/repos/system_upgrade/common/libraries/rhui.py
index 51694ac2..30de0275 100644
--- a/repos/system_upgrade/common/libraries/rhui.py
+++ b/repos/system_upgrade/common/libraries/rhui.py
@@ -348,6 +348,22 @@ RHUI_SETUPS = {
('content.crt', RHUI_PKI_PRODUCT_DIR)
],
os_version='9'),
+ ],
+ RHUIFamily(RHUIProvider.ALIBABA, arch=arch.ARCH_ARM64, client_files_folder='alibaba'): [
+ mk_rhui_setup(clients={'aliyun_rhui_rhel8'}, leapp_pkg='leapp-rhui-alibaba',
+ mandatory_files=[('leapp-alibaba.repo', YUM_REPOS_PATH)],
+ optional_files=[
+ ('key.pem', RHUI_PKI_DIR),
+ ('content.crt', RHUI_PKI_PRODUCT_DIR)
+ ],
+ os_version='8'),
+ mk_rhui_setup(clients={'aliyun_rhui_rhel9'}, leapp_pkg='leapp-rhui-alibaba',
+ mandatory_files=[('leapp-alibaba.repo', YUM_REPOS_PATH)],
+ optional_files=[
+ ('key.pem', RHUI_PKI_DIR),
+ ('content.crt', RHUI_PKI_PRODUCT_DIR)
+ ],
+ os_version='9'),
]
}
--
2.47.0

View File

@ -1,245 +0,0 @@
From 004cec5bd33025412df84f07590b6c5452d70ab6 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Wed, 17 Apr 2024 09:58:00 +0200
Subject: [PATCH 02/55] Upgrade dracut module: Update /usr mounting solution
Originally we had implemented our own mount_usr.sh script, which
took care about mounting the /usr when it is present on separate
partition / mountpoint. It took care also about LVM activation.
However, it has been problematic in various cases (e.g. when device
needed more time for initialisation - e.g. when connected using FC).
Let's use instead existing system solutions, starting
the upgrade.target after initrd-fs.target (instead of just
basic.target). IOW, let's get as close to the standard booting
procedure when speaking about the storage, as possible.
Note that the booting is still broken in this commit and needs
additional changes made in followup commits. But due to complexity
of the solution, keeping this separated.
jira: RHEL-3344, RHEL-35446
---
.../85sys-upgrade-redhat/module-setup.sh | 1 -
.../dracut/85sys-upgrade-redhat/mount_usr.sh | 148 ------------------
.../initrd-cleanup-override.conf | 3 +
.../dracut/90sys-upgrade/module-setup.sh | 11 ++
.../files/dracut/90sys-upgrade/upgrade.target | 4 +-
5 files changed, 16 insertions(+), 151 deletions(-)
delete mode 100755 repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh
create mode 100644 repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-cleanup-override.conf
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
index d73060cb..45f98148 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
@@ -102,7 +102,6 @@ install() {
inst_binary grep
# script to actually run the upgrader binary
- inst_hook upgrade 49 "$_moddir/mount_usr.sh"
inst_hook upgrade 50 "$_moddir/do-upgrade.sh"
#NOTE: some clean up?.. ideally, everything should be inside the leapp*
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh
deleted file mode 100755
index 9366ac13..00000000
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/mount_usr.sh
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/bin/sh
-# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
-# ex: ts=8 sw=4 sts=4 et filetype=sh
-
-type info >/dev/null 2>&1 || . /lib/dracut-lib.sh
-
-export NEWROOT=${NEWROOT:-"/sysroot"}
-
-filtersubvol() {
- _oldifs="$IFS"
- IFS=","
- set "$@"
- IFS="$_oldifs"
- while [ $# -gt 0 ]; do
- case $1 in
- subvol=*) :;;
- *) printf '%s' "${1}," ;;
- esac
- shift
- done
-}
-
-mount_usr()
-{
- #
- # mount_usr [true | false]
- # Expected a "true" value for the last attempt to mount /usr. On the last
- # attempt, in case of failure drop to shell.
- #
- # Return 0 when everything is all right
- # In case of failure and /usr has been detected:
- # return 2 when $1 is "true" (drop to shell invoked)
- # (note: possibly it's nonsense, but to be sure..)
- # return 1 otherwise
- #
- _last_attempt="$1"
- # check, if we have to mount the /usr filesystem
- while read -r _dev _mp _fs _opts _freq _passno; do
- [ "${_dev%%#*}" != "$_dev" ] && continue
- if [ "$_mp" = "/usr" ]; then
- case "$_dev" in
- LABEL=*)
- _dev="$(echo "$_dev" | sed 's,/,\\x2f,g')"
- _dev="/dev/disk/by-label/${_dev#LABEL=}"
- ;;
- UUID=*)
- _dev="${_dev#block:}"
- _dev="/dev/disk/by-uuid/${_dev#UUID=}"
- ;;
- esac
-
- # shellcheck disable=SC2154 # Variable root is assigned by dracut
- _root_dev=${root#block:}
-
- if strstr "$_opts" "subvol=" && \
- [ "$(stat -c '%D:%i' "$_root_dev")" = "$(stat -c '%D:%i' "$_dev")" ] && \
- [ -n "$rflags" ]; then
- # for btrfs subvolumes we have to mount /usr with the same rflags
- rflags=$(filtersubvol "$rflags")
- rflags=${rflags%%,}
- _opts="${_opts:+${_opts},}${rflags}"
- elif getargbool 0 ro; then
- # if "ro" is specified, we want /usr to be mounted read-only
- _opts="${_opts:+${_opts},}ro"
- elif getargbool 0 rw; then
- # if "rw" is specified, we want /usr to be mounted read-write
- _opts="${_opts:+${_opts},}rw"
- fi
- echo "$_dev ${NEWROOT}${_mp} $_fs ${_opts} $_freq $_passno"
- _usr_found="1"
- break
- fi
- done < "${NEWROOT}/etc/fstab" >> /etc/fstab
-
- if [ "$_usr_found" = "" ]; then
- # nothing to do
- return 0
- fi
-
- info "Mounting /usr with -o $_opts"
- mount "${NEWROOT}/usr" 2>&1 | vinfo
- mount -o remount,rw "${NEWROOT}/usr"
-
- if ismounted "${NEWROOT}/usr"; then
- # success!!
- return 0
- fi
-
- if [ "$_last_attempt" = "true" ]; then
- warn "Mounting /usr to ${NEWROOT}/usr failed"
- warn "*** Dropping you to a shell; the system will continue"
- warn "*** when you leave the shell."
- action_on_fail
- return 2
- fi
-
- return 1
-}
-
-
-try_to_mount_usr() {
- _last_attempt="$1"
- if [ ! -f "${NEWROOT}/etc/fstab" ]; then
- warn "File ${NEWROOT}/etc/fstab doesn't exist."
- return 1
- fi
-
- # In case we have the LVM command available try make it activate all partitions
- if command -v lvm 2>/dev/null 1>/dev/null; then
- lvm vgchange --sysinit -a y || {
- warn "Detected problem when tried to activate LVM VG."
- if [ "$_last_attempt" != "true" ]; then
- # this is not last execution, retry
- return 1
- fi
- # NOTE(pstodulk):
- # last execution, so call mount_usr anyway
- # I am not 100% about lvm vgchange exit codes and I am aware of
- # possible warnings, in this last run, let's keep it on mount_usr
- # anyway..
- }
- fi
-
- mount_usr "$1"
-}
-
-_sleep_timeout=15
-_last_attempt="false"
-for i in 0 1 2 3 4 5 6 7 8 9 10 11; do
- info "Storage initialisation: Attempt $i of 11. Wait $_sleep_timeout seconds."
- sleep $_sleep_timeout
- if [ $i -eq 11 ]; then
- _last_attempt="true"
- fi
- try_to_mount_usr "$_last_attempt" && break
-
- # something is wrong. In some cases, storage needs more time for the
- # initialisation - especially in case of SAN.
-
- if [ "$_last_attempt" = "true" ]; then
- warn "The last attempt to initialize storage has not been successful."
- warn "Unknown state of the storage. It is possible that upgrade will be stopped."
- break
- fi
-
- warn "Failed attempt to initialize the storage. Retry..."
-done
-
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-cleanup-override.conf b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-cleanup-override.conf
new file mode 100644
index 00000000..d24e0ef0
--- /dev/null
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/initrd-cleanup-override.conf
@@ -0,0 +1,3 @@
+[Service]
+ExecStart=
+ExecStart=-/usr/bin/true
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
index 06479fb5..30ae57b3 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
@@ -54,6 +54,17 @@ install() {
ln -sf "../${s}.service" "$upgrade_wantsdir"
done
+ # Setup modified initrd-cleanup.service in the upgrade initramfs to enable
+ # storage initialisation using systemd-fstab-generator. We want to run the
+ # initrd-parse-etc.service but this one triggers also the initrd-cleanup.service
+ # which triggers the switch-root and isolated actions that basically kills
+ # the original upgrade service when used.
+ # The initrd-parse-etc.service has different content across RHEL systems,
+ # so we override rather initrd-cleanup.service instead as we do not need
+ # that one for the upgrade process.
+ mkdir -p "${unitdir}/initrd-cleanup.service.d"
+ inst_simple "${_moddir}/initrd-cleanup-override.conf" "${unitdir}/initrd-cleanup.service.d/initrd-cleanup-override.conf"
+
# just try : set another services into the wantsdir
# sysroot.mount \
# dracut-mount \
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/upgrade.target b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/upgrade.target
index 366b5cab..d2bf7313 100644
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/upgrade.target
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/upgrade.target
@@ -2,7 +2,7 @@
Description=System Upgrade
Documentation=man:upgrade.target(7)
# ##sysinit.target sockets.target initrd-root-fs.target initrd-root-device.target initrd-fs.target
-Wants=initrd-root-fs.target initrd-root-device.target initrd-fs.target initrd-usr-fs.target
+Wants=initrd-root-fs.target initrd-root-device.target initrd-fs.target initrd-usr-fs.target initrd-parse-etc.service
Requires=basic.target sysroot.mount
-After=basic.target sysroot.mount
+After=basic.target sysroot.mount initrd-fs.target
AllowIsolate=yes
--
2.51.1

View File

@ -0,0 +1,41 @@
From 7e0fb44bb673893d0409903f6a441d0eb2829d22 Mon Sep 17 00:00:00 2001
From: Evgeni Golov <evgeni@golov.de>
Date: Tue, 20 Aug 2024 15:11:02 +0200
Subject: [PATCH 02/40] don't require all versions to be defined for obsoleted
keys
in releases where we do not have any obsoleted keys, we still had to
define an entry (with an empty list), as otherwise the code would fail
instead, we can catch the KeyError and carry on as nothing happened
---
.../libraries/removeobsoleterpmgpgkeys.py | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
index 6e84c2e9..bda7efa3 100644
--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
+++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
@@ -12,11 +12,14 @@ def _get_obsolete_keys():
distribution = api.current_actor().configuration.os_release.release_id
obsoleted_keys_map = get_distribution_data(distribution).get('obsoleted-keys', {})
keys = []
- for version in range(7, int(get_target_major_version()) + 1):
- for key in obsoleted_keys_map[str(version)]:
- name, version, release = key.rsplit("-", 2)
- if has_package(InstalledRPM, name, version=version, release=release):
- keys.append(key)
+ try:
+ for version in range(7, int(get_target_major_version()) + 1):
+ for key in obsoleted_keys_map[str(version)]:
+ name, version, release = key.rsplit("-", 2)
+ if has_package(InstalledRPM, name, version=version, release=release):
+ keys.append(key)
+ except KeyError:
+ pass
return keys
--
2.47.0

View File

@ -0,0 +1,226 @@
From 9f2f1726d8a5bdd12309a3a3111984f1666b903f Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 22 Aug 2024 15:52:19 +0200
Subject: [PATCH 03/40] Add RHEL 10.0 prod-certs
Previously we temporarily used the RHEL 9 x86_64 prod cert for others
archs it was missing completely.
Jira: OAMG-11138
---
.../common/files/prod-certs/10.0/279.pem | 37 ++++++++++
.../common/files/prod-certs/10.0/419.pem | 37 ++++++++++
.../common/files/prod-certs/10.0/479.pem | 68 ++++++++++---------
.../common/files/prod-certs/10.0/72.pem | 37 ++++++++++
4 files changed, 146 insertions(+), 33 deletions(-)
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.0/279.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.0/419.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.0/72.pem
diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/279.pem b/repos/system_upgrade/common/files/prod-certs/10.0/279.pem
new file mode 100644
index 00000000..f62340fc
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.0/279.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGczCCBFugAwIBAgIUfZodBQY+YRSlyRRiFX1dx4vQ5y4wDQYJKoZIhvcNAQEL
+BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG
+A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w
+LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw
+IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx
+NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg
+SUQgWzA0YTU4NDFkLTVlNmUtNDU1Yy1hZWYwLTdhOTQ0NTBiNjg3Nl0wggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU
+FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF
+AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD
+LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG
+OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL
+XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52
+deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH
+F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z
+wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG
+eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra
+cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ
+nE2pDC01zYhUTwIDAQABo4HxMIHuMAkGA1UdEwQCMAAwQwYMKwYBBAGSCAkBghcB
+BDMMMVJlZCBIYXQgRW50ZXJwcmlzZSBMaW51eCBmb3IgUG93ZXIsIGxpdHRsZSBl
+bmRpYW4wFgYMKwYBBAGSCAkBghcCBAYMBDEwLjAwGQYMKwYBBAGSCAkBghcDBAkM
+B3BwYzY0bGUwKQYMKwYBBAGSCAkBghcEBBkMF3JoZWwtMTAscmhlbC0xMC1wcGM2
+NGxlMB0GA1UdDgQWBBRh6iC1NXyvZ2Q6/2sI5hB40M0flTAfBgNVHSMEGDAWgBSW
+/bscQED/QIStsh8LJsHDam/WfDANBgkqhkiG9w0BAQsFAAOCAgEAv6ySsgygc2z2
+kQJeu9sdvBNFKe+gEtXbPu6+rZKPPosW3cggMJCnsZgki3nUogovz0Z3MPkbmRz+
+GJwVjiVBnfUQLoORSDYwqYZB4WRoqszW/dytd7/64IehvD/JZo3Oa8BNYRSG/Ukh
+7iUIT8ryFIH1DTUIersVObINN2gk3hC2JJXoTfNqIYG+4OAEUE7/F4CptRAGbgH/
+4/9vfe2KNXvPMoWvILpXpD5w8t9Xh0Wl97N1W7+FLVRwQHAQ2/yBTu/sY27FvVSl
+0o+SBSvjTKIi+9QslRpi0QCVza5WxHTiO8nzYgzFjfMkt6lzK74puf3VJavpqkQ9
+dVfyp36A3Fh6vDsiNxhsfKrp8z2JnKA3vdslsH7cOHCIFYHXiqeaP654t4oGeESD
+EPfS6PpXSyi47Kd/qjA2srgpXNQl2yMd0ih6NoHaoSYXFfb4LX6cWFGcT/AWZsaC
+xv2pN9J0KhF2loLp8SK19FESc0rJShkAacTcxeYjuDYbvLtJi4Z5aWWVU421rMSs
+X9IdiWa4WL70ZaDK5cP54S4zZNsVDKniUzNXwPltDCpqefy8ka4o5QlWNreBrXXW
+6cy8I6L2om7xZ5hAZ3CB7nUZe9QE/LXnHqK3cQetvd5Q2LMnp6gVtgQ4a+7vD9xz
+ExLtbBZjvGJFudimMmOxvn/J5+GMmm4=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/419.pem b/repos/system_upgrade/common/files/prod-certs/10.0/419.pem
new file mode 100644
index 00000000..08cb5b02
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.0/419.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGZTCCBE2gAwIBAgIUWARL99TkK+hxtTJkE5icdHXLfY0wDQYJKoZIhvcNAQEL
+BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG
+A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w
+LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw
+IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx
+NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg
+SUQgW2Y3ZWFmNGU2LTYwZGYtNDMyNC04N2I0LTdhNGUzZGVkZmViNV0wggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU
+FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF
+AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD
+LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG
+OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL
+XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52
+deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH
+F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z
+wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG
+eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra
+cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ
+nE2pDC01zYhUTwIDAQABo4HjMIHgMAkGA1UdEwQCMAAwNQYMKwYBBAGSCAkBgyMB
+BCUMI1JlZCBIYXQgRW50ZXJwcmlzZSBMaW51eCBmb3IgQVJNIDY0MBYGDCsGAQQB
+kggJAYMjAgQGDAQxMC4wMBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCkGDCsG
+AQQBkggJAYMjBAQZDBdyaGVsLTEwLHJoZWwtMTAtYWFyY2g2NDAdBgNVHQ4EFgQU
+YeogtTV8r2dkOv9rCOYQeNDNH5UwHwYDVR0jBBgwFoAUlv27HEBA/0CErbIfCybB
+w2pv1nwwDQYJKoZIhvcNAQELBQADggIBAIpdcHN7RN18pg5ELfc55Sj58ivL5N25
+19KprqbM7aVum32abw7/Qksfs6maGQpU6Hh/UqhJlGQ2bN48jZ/kdMKor4agSQ/T
+iwr3b8RBJFPVCuqQJXIe4g3iRbHfnIjGxgoMgv36j58PENoEnpPtR7ZtHMyqQ2SO
+m1WRQhY5tJ4Fk/Zkx/trxlNvmsTAjNRa530kqG4TfiMVvWNaVdxHsjMv0lXLJRXx
+KT6+iHt2QBs2No5O8cjlXr/CzfGrB5TlBNrsHqhO0Llmw28KpcWGYGdexKdIHrDG
+A/K0Pr21yRstUWN39jz/tdEqt1q8T7/it3oM976keQmFAxBa/CpyEG5Y6FKw9+F0
+LtkAyI3XGHK7LbCOE67s7u0/BfgQvww1FqztVnVZ4sXlagj/IuYPJBhfGDe/6tik
+laqP8FtR6xJdSra2YQMBc0kZb0Sv1uy7pGofNSvLM5L76XqiwKoDVo/eAcl60OWY
+rF86pEDLGDmdJBLJKX2/77pzpQpZ9Yvc4vWwoZrP4gRKBuWF28aLH0OsWzdsfdMG
+9+DrcO/58slMbWng1ZzOQyEjp7x1kto5sa5m2q8LMo06ETYT8ps5A0hyltBz1yAt
+JEBS4Y14YlF6Px67aTak07MNo7AaaphuD47D2Sy3pwHa+vOx4nv/G33+G0iOm3Lr
+zVAjwlfLIUB9
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/479.pem b/repos/system_upgrade/common/files/prod-certs/10.0/479.pem
index 1ea1cd3d..d89f6188 100644
--- a/repos/system_upgrade/common/files/prod-certs/10.0/479.pem
+++ b/repos/system_upgrade/common/files/prod-certs/10.0/479.pem
@@ -1,35 +1,37 @@
-----BEGIN CERTIFICATE-----
-MIIGFTCCA/2gAwIBAgIJALDxRLt/tVDQMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
-VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
-YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
-IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
-ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQwOFoXDTQzMDcx
-OTE2MzQwOFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsxZDg0ZDQ5
-Ny1jZmNmLTQxNjEtOTM0YS0zNzk2MDU4M2ZmZGZdMIICIjANBgkqhkiG9w0BAQEF
-AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
-sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
-8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
-RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
-5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
-xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
-QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
-yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
-1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
-5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
-ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
-AwEAAaOBnjCBmzAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0
-IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD
-OS40MBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM
-FHJoZWwtOSxyaGVsLTkteDg2XzY0MA0GCSqGSIb3DQEBCwUAA4ICAQCGUDPFBrLs
-sK/RITJothRhKhKNX3zu9TWRG0WKxszCx/y7c4yEfH1TV/yd7BNB2RubaoayWz8E
-TQjcRW8BnVu9JrlbdpWJm4eN+dOOpcESPilLnkz4Tr0WYDsT1/jk/uiorK4h21S0
-EwMicuSuEmm0OUEX0zj2X/IyveFRtpJpH/JktznCkvexysc1JRzqMCbal8GipRX9
-Xf7Oko6QiaUpu5GDLN2OXhizYHdR2f3l+Sn2cScsbi3fSVv+DLsnaz6J0kZ4U8q3
-lYk/ZYifJjG+/7cv3e+usixpmK/qYlpOvunUDnqOkDfUs4/4bZjH8e8CdqJk4YvU
-RRtLr7muXEJsaqF7lxAViXnKxT/z/+1kOgN/+Oyzjs4QDsk2HQpWHFgNYSSG9Mmz
-PUS8tk2T0j5sN55X7QRRl5c0oqrBU5XaWyL26QcfONYcR8dBaKawjxg8CI9KzsYY
-sb2jjS+fBkB1OI2c6z4OZRd+0N6FQ6gq++KiXOLFvi/QSFNi9Veb56c5tR2l6fBk
-0pSH06Gg2s0aQg20NdMIr+HaYsVdJRsE1FgQ2tlfFx9rGkcqhgwV3Za/abgtRb2o
-YVwps28DLm41DXf5DnXK+BXFHrtR/3YAZtga+R7OL/RvcF0kc2kudlxqd/8Y33uL
-nqnoATy31FTW4J4rEfanJTQgTpatZmbaLQ==
+MIIGYzCCBEugAwIBAgIUL5D34AcwqLAbqlUcxntHUCtEVxQwDQYJKoZIhvcNAQEL
+BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG
+A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w
+LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw
+IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx
+NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg
+SUQgWzk5NDZhMmY5LTI4NDMtNDJhOS1iNzhlLTIzM2E5ODIwYjVhZV0wggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU
+FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF
+AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD
+LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG
+OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL
+XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52
+deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH
+F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z
+wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG
+eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra
+cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ
+nE2pDC01zYhUTwIDAQABo4HhMIHeMAkGA1UdEwQCMAAwNQYMKwYBBAGSCAkBg18B
+BCUMI1JlZCBIYXQgRW50ZXJwcmlzZSBMaW51eCBmb3IgeDg2XzY0MBYGDCsGAQQB
+kggJAYNfAgQGDAQxMC4wMBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwKAYMKwYB
+BAGSCAkBg18EBBgMFnJoZWwtMTAscmhlbC0xMC14ODZfNjQwHQYDVR0OBBYEFGHq
+ILU1fK9nZDr/awjmEHjQzR+VMB8GA1UdIwQYMBaAFJb9uxxAQP9AhK2yHwsmwcNq
+b9Z8MA0GCSqGSIb3DQEBCwUAA4ICAQAa+c2/Usg6JToULhYTdLhf15Hk6xxdlwT7
+zZlnZLbuAKtaDqP1NiSiX0Z/lMJzFfW0B/zyWLy8uiXLYmF5V28f8yWK0Nksx2v7
+I7u6ZZN2dKDQZKsEoP0g3ptvVRWn9h5otS7yPkOK4Dzj04yJqOSGP9bp6OHEhm1S
+x4ErITkN/3MXOf9vT+I6wydVKsw4fdlWgVjmBd90bzVTnv4dWtJio+le+9ad9RSf
+M3aD5ufiELeRKMp6ExnC/cnoWtuH+b4BJ37TQ3Kpn3fDtbrzVvQH/dpqZ7P33yqg
+PnBEXOiLimDnnmDJ9ImQ1pVTrKJMxaj1Mk6onERe36n/iAsj+BwZvBiv7UaLPMnW
+nJGg+LQ4iUZrGWYD4N9Ou++nvsR8dCWRhXSuXensfli3lL/W0P62yzfYCyqOYeL1
+msDcCmBEWJUtAaeAbASUIVx02JWPPmMSUqWs8xOecQjzoGuCQg4JM/UfsZzxepw0
+bs9YSUVw8J9R2d4kuze65qDTMRg+cK2LX1xg1KkR/UWZOGxHHJAfwGWdPwSkiOPQ
+MVJ7LJjvozebHWSuiSxk+GWWr+NdxIJrFRGbivXyAkmqMRrPe1VLVxWwCdyud9o8
+b2WbFgrNS2jOnHwldtM2ZAhrF5W4ckvVL7hLp2JoQnJfCcWson9NK6Y2M4bNwQnC
+ihxphLzOAw==
-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/72.pem b/repos/system_upgrade/common/files/prod-certs/10.0/72.pem
new file mode 100644
index 00000000..e0274f9c
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.0/72.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGZDCCBEygAwIBAgIUSTvcD4Wsduixh8PFmwk6aI0KTEcwDQYJKoZIhvcNAQEL
+BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG
+A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w
+LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw
+IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx
+NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg
+SUQgW2VjN2EwZDQyLTgzNjItNDg2YS04ZjcyLTc3YThiOWU2MjM0YV0wggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU
+FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF
+AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD
+LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG
+OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL
+XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52
+deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH
+F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z
+wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG
+eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra
+cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ
+nE2pDC01zYhUTwIDAQABo4HiMIHfMAkGA1UdEwQCMAAwOwYLKwYBBAGSCAkBSAEE
+LAwqUmVkIEhhdCBFbnRlcnByaXNlIExpbnV4IGZvciBJQk0geiBTeXN0ZW1zMBUG
+CysGAQQBkggJAUgCBAYMBDEwLjAwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJgYL
+KwYBBAGSCAkBSAQEFwwVcmhlbC0xMCxyaGVsLTEwLXMzOTB4MB0GA1UdDgQWBBRh
+6iC1NXyvZ2Q6/2sI5hB40M0flTAfBgNVHSMEGDAWgBSW/bscQED/QIStsh8LJsHD
+am/WfDANBgkqhkiG9w0BAQsFAAOCAgEAsj4qPVsDkFrfuVDn8JCJ7tIH5WhaOzL6
+3GBsQIKGd8a1WscPfSpr/phNSBPWFyvV2b+0HzblYzBZbx6ExykTDLh5L01nPM0s
++hqPxZgF/kcTbLWmAanl32R9+Gs2P2JN1CaCclXgM4USEagBWYeMhJSmQR3bOnSe
+Jjm3tjvhnbIQd6xgPpTjrqZ35z1BW0P0qQFdBbB0k+MfPkhYKEr+Vfn0rU8vk4UP
+F9sY9HkZLqIBxlXeTUerNZvHSuOy2KgoS4l25/QwUutHnnSGZZpARiU1XYNcynVL
+r5COHlb6TYkeRhSAm6RVM4XPYoFgN6cbhY1orwFC2/0i30EnsTMB6ctnLKCf7qgM
+GDG2W7ct0m6koA7s2TGmgp33DPw9adX7qgIV0OjLzBYJ1fyVv3sYlOKRuyDz0l+N
+u6Rnv1ecNUspWn+5ogBbdgwU6yah6oo/fJIWm62U38UGH5ic+/7sBnga8q5sDI90
++h+nlTIAnD0ICzjEDASiLlYft+hQ9pOt/rgEIrPeKTe+fbefUIXJ5h343E51POnY
+uZRXcirc33QL/PgBRce1taIXjsRD+FSJM0tx/vf8H9j0rzSAxDoXJNsdq4/32scy
+6Zk2fgtm80xxIzju84jXVUrSBRMpWD9I+FZId4IE7tQhwKNi1b7DdNeaQLfaoq8U
+1PEea/tQDSA=
+-----END CERTIFICATE-----
--
2.47.0

View File

@ -1,132 +0,0 @@
From 04a2ec2574da233a41d32f70eab780b6c305ff31 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Thu, 19 Dec 2024 10:33:24 +0100
Subject: [PATCH 03/55] feat(lvm_autoactivation): add lvm autoactivation
Add LVM autoactivation mechanism to the upgrade initramfs. The core
of the mechanism is based on a special udev rule that is triggered
when a new device is detected. The rule then calls two lvm binaries
(which are also included into the upgrade initrams) to activate
the volume groups and logical volumes.
---
.../enable_lvm_autoactivation/actor.py | 21 ++++++++
.../libraries/enable_lvm_autoactivation.py | 21 ++++++++
.../test_lvm_autoactivation_enablement.py | 50 +++++++++++++++++++
3 files changed, 92 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/actor.py
create mode 100644 repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/libraries/enable_lvm_autoactivation.py
create mode 100644 repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/tests/test_lvm_autoactivation_enablement.py
diff --git a/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/actor.py b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/actor.py
new file mode 100644
index 00000000..aba60645
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/actor.py
@@ -0,0 +1,21 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import enable_lvm_autoactivation as enable_lvm_autoactivation_lib
+from leapp.models import DistributionSignedRPM, UpgradeInitramfsTasks
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class EnableLVMAutoactivation(Actor):
+ """
+ Enable LVM autoactivation in upgrade initramfs.
+
+ Produce instructions for upgrade initramfs generation that will result in LVM
+ autoactivation in the initramfs.
+ """
+
+ name = 'enable_lvm_autoactivation'
+ consumes = (DistributionSignedRPM,)
+ produces = (UpgradeInitramfsTasks, )
+ tags = (FactsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ enable_lvm_autoactivation_lib.emit_lvm_autoactivation_instructions()
diff --git a/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/libraries/enable_lvm_autoactivation.py b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/libraries/enable_lvm_autoactivation.py
new file mode 100644
index 00000000..e312277b
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/libraries/enable_lvm_autoactivation.py
@@ -0,0 +1,21 @@
+from leapp.libraries.common.rpms import has_package
+from leapp.libraries.stdlib import api
+from leapp.models import DistributionSignedRPM, UpgradeInitramfsTasks
+
+
+def emit_lvm_autoactivation_instructions():
+ if not has_package(DistributionSignedRPM, 'lvm2'):
+ api.current_logger().debug(
+ 'Upgrade initramfs will not autoenable LVM devices - `lvm2` RPM is not installed.'
+ )
+ return
+
+ # the 69-dm-lvm.rules trigger pvscan and vgchange when LVM device is detected
+ files_to_include = [
+ '/usr/sbin/pvscan',
+ '/usr/sbin/vgchange',
+ '/usr/lib/udev/rules.d/69-dm-lvm.rules'
+ ]
+ lvm_autoactivation_instructions = UpgradeInitramfsTasks(include_files=files_to_include)
+
+ api.produce(lvm_autoactivation_instructions)
diff --git a/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/tests/test_lvm_autoactivation_enablement.py b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/tests/test_lvm_autoactivation_enablement.py
new file mode 100644
index 00000000..c5150aea
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/enable_lvm_autoactivation/tests/test_lvm_autoactivation_enablement.py
@@ -0,0 +1,50 @@
+from leapp.libraries.actor import enable_lvm_autoactivation
+from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import DistributionSignedRPM, RPM, UpgradeInitramfsTasks
+
+
+def test_emit_lvm_autoactivation_instructions_produces_correct_message(monkeypatch):
+ """Test that emit_lvm_autoactivation_instructions produces UpgradeInitramfsTasks with correct files."""
+ lvm_package = RPM(
+ name='lvm2',
+ version='2',
+ release='1',
+ epoch='1',
+ packager='',
+ arch='x86_64',
+ pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51'
+ )
+
+ msgs = [
+ DistributionSignedRPM(items=[lvm_package])
+ ]
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+
+ enable_lvm_autoactivation.emit_lvm_autoactivation_instructions()
+
+ assert api.produce.called == 1
+
+ produced_msg = api.produce.model_instances[0]
+
+ assert isinstance(produced_msg, UpgradeInitramfsTasks)
+
+ expected_files = [
+ '/usr/sbin/pvscan',
+ '/usr/sbin/vgchange',
+ '/usr/lib/udev/rules.d/69-dm-lvm.rules'
+ ]
+ assert produced_msg.include_files == expected_files
+
+
+def test_no_action_if_lvm_rpm_missing(monkeypatch):
+ msgs = [
+ DistributionSignedRPM(items=[])
+ ]
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+
+ enable_lvm_autoactivation.emit_lvm_autoactivation_instructions()
+
+ assert api.produce.called == 0
--
2.51.1

View File

@ -1,658 +0,0 @@
From 47fce173e75408d9a7a26225d389161caf72e244 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 31 Aug 2025 23:49:57 +0200
Subject: [PATCH 04/55] feat(mount_unit_gen): generate mount units for the
upgrade initramfs
Run systemd-fstab-generator to produce mount units that correspond to
the content of source system's fstab. The generated mount units are then
modified to mount /target into /sysroot/target, to reflect that the root
of the source system is mounted as /sysroot. These mount units are made
dependencies of local-fs.target, and, therefore, will be triggered by
systemd before the upgrade.
Assisted-by: Cursor (Claude Sonnet 4)
Jira-ref: RHEL-35446
@pstodulk:
Updated the code to cover also other systemd targets that can be
covered by systemd-fstab-generator. Also cover the situation when
a directory with systemd target (requires, wants) already exists.
Tests have been updated.
Note that there are still possible issues hidden in the generate
mount unit files as we update at this moment just the `Where` clause
however we are not touching anything else. (Before, After,
RequiresMountsFor, ...). But keeping that for future development and
testing. The call for `mount -a` is still present, we expect followup
PRs at this point.
Co-authored-by: Petr Stodulka <pstodulk@redhat.com>
---
.../initramfs/mount_units_generator/actor.py | 22 ++
.../libraries/mount_unit_generator.py | 307 ++++++++++++++++++
.../tests/test_mount_unit_generation.py | 269 +++++++++++++++
3 files changed, 598 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
create mode 100644 repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
create mode 100644 repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
new file mode 100644
index 00000000..5fe25515
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
@@ -0,0 +1,22 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import mount_unit_generator as mount_unit_generator_lib
+from leapp.models import TargetUserSpaceInfo, UpgradeInitramfsTasks
+from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
+
+
+class MountUnitGenerator(Actor):
+ """
+ Sets up storage initialization using systemd's mount units in the upgrade container.
+ """
+
+ name = 'mount_unit_generator'
+ consumes = (
+ TargetUserSpaceInfo,
+ )
+ produces = (
+ UpgradeInitramfsTasks,
+ )
+ tags = (IPUWorkflowTag, InterimPreparationPhaseTag)
+
+ def process(self):
+ mount_unit_generator_lib.setup_storage_initialization()
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
new file mode 100644
index 00000000..e1060559
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
@@ -0,0 +1,307 @@
+import os
+import shutil
+import tempfile
+
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.common import mounting
+from leapp.libraries.stdlib import api, CalledProcessError, run
+from leapp.models import TargetUserSpaceInfo, UpgradeInitramfsTasks
+
+
+def run_systemd_fstab_generator(output_directory):
+ api.current_logger().debug(
+ 'Generating mount units for the source system into {}'.format(output_directory)
+ )
+
+ try:
+ generator_cmd = [
+ '/usr/lib/systemd/system-generators/systemd-fstab-generator',
+ output_directory,
+ output_directory,
+ output_directory
+ ]
+ run(generator_cmd)
+ except CalledProcessError as error:
+ api.current_logger().error(
+ 'Failed to generate mount units using systemd-fstab-generator. Error: {}'.format(error)
+ )
+ details = {'details': str(error)}
+ raise StopActorExecutionError(
+ 'Failed to generate mount units using systemd-fstab-generator',
+ details
+ )
+
+ api.current_logger().debug(
+ 'Mount units successfully generated into {}'.format(output_directory)
+ )
+
+
+def _read_unit_file_lines(unit_file_path): # Encapsulate IO for tests
+ with open(unit_file_path) as unit_file:
+ return unit_file.readlines()
+
+
+def _write_unit_file_lines(unit_file_path, lines): # Encapsulate IO for tests
+ with open(unit_file_path, 'w') as unit_file:
+ unit_file.write('\n'.join(lines) + '\n')
+
+
+def _delete_file(file_path):
+ os.unlink(file_path)
+
+
+def _prefix_mount_unit_with_sysroot(mount_unit_path, new_unit_destination):
+ """
+ Prefix the mount target with /sysroot as expected in the upgrade initramfs.
+
+ A new mount unit file is written to new_unit_destination.
+ """
+ # NOTE(pstodulk): Note that right now we update just the 'Where' key, however
+ # what about RequiresMountsFor, .. there could be some hidden dragons.
+ # In case of issues, investigate these values in generated unit files.
+ api.current_logger().debug(
+ 'Prefixing {}\'s mount target with /sysroot. Output will be written to {}'.format(
+ mount_unit_path,
+ new_unit_destination
+ )
+ )
+ unit_lines = _read_unit_file_lines(mount_unit_path)
+
+ output_lines = []
+ for line in unit_lines:
+ line = line.strip()
+ if not line.startswith('Where='):
+ output_lines.append(line)
+ continue
+
+ _, destination = line.split('=', 1)
+ new_destination = os.path.join('/sysroot', destination.lstrip('/'))
+
+ output_lines.append('Where={}'.format(new_destination))
+
+ _write_unit_file_lines(new_unit_destination, output_lines)
+
+ api.current_logger().debug(
+ 'Done. Modified mount unit successfully written to {}'.format(new_unit_destination)
+ )
+
+
+def prefix_all_mount_units_with_sysroot(dir_containing_units):
+ for unit_file_path in os.listdir(dir_containing_units):
+ # systemd requires mount path to be in the unit name
+ modified_unit_destination = 'sysroot-{}'.format(unit_file_path)
+ modified_unit_destination = os.path.join(dir_containing_units, modified_unit_destination)
+
+ unit_file_path = os.path.join(dir_containing_units, unit_file_path)
+
+ if not unit_file_path.endswith('.mount'):
+ api.current_logger().debug(
+ 'Skipping {} when prefixing mount units with /sysroot - not a mount unit.'.format(
+ unit_file_path
+ )
+ )
+ continue
+
+ _prefix_mount_unit_with_sysroot(unit_file_path, modified_unit_destination)
+
+ _delete_file(unit_file_path)
+ api.current_logger().debug('Original mount unit {} removed.'.format(unit_file_path))
+
+
+def _fix_symlinks_in_dir(dir_containing_mount_units, target_dir):
+ """
+ Fix broken symlinks in given target_dir due to us modifying (renaming) the mount units.
+
+ The target_dir contains symlinks to the (mount) units that are required
+ in order for the local-fs.target to be reached. However, we renamed these units to reflect
+ that we have changed their mount destinations by prefixing the mount destination with /sysroot.
+ Hence, we regenerate the symlinks.
+ """
+
+ target_dir_path = os.path.join(dir_containing_mount_units, target_dir)
+ if not os.path.exists(target_dir_path):
+ api.current_logger().debug(
+ 'The {} directory does not exist. Skipping'
+ .format(target_dir)
+ )
+ return
+
+ api.current_logger().debug(
+ 'Removing the old {} directory from {}.'
+ .format(target_dir, dir_containing_mount_units)
+ )
+
+ shutil.rmtree(target_dir_path)
+ os.mkdir(target_dir_path)
+
+ api.current_logger().debug('Populating {} with new symlinks.'.format(target_dir))
+
+ for unit_file in os.listdir(dir_containing_mount_units):
+ if not unit_file.endswith('.mount'):
+ continue
+
+ place_fastlink_at = os.path.join(target_dir_path, unit_file)
+ fastlink_points_to = os.path.join('../', unit_file)
+ try:
+ run(['ln', '-s', fastlink_points_to, place_fastlink_at])
+
+ api.current_logger().debug(
+ 'Dependency on {} created.'.format(unit_file)
+ )
+ except CalledProcessError as err:
+ err_descr = (
+ 'Failed to create required unit dependencies under {} for the upgrade initramfs.'
+ .format(target_dir)
+ )
+ details = {'details': str(err)}
+ raise StopActorExecutionError(err_descr, details=details)
+
+
+def fix_symlinks_in_targets(dir_containing_mount_units):
+ """
+ Fix broken symlinks in *.target.* directories caused by earlier modified mount units.
+
+ Generated mount unit files are part of one of systemd targets (list below),
+ which means that a symlink from a systemd target to exists for each of
+ them. Based on this, systemd knows when (local or remote file systems?)
+ they must (".requires" suffix") or could (".wants" suffix) be mounted.
+ See the man 5 systemd.mount for more details how mount units are split into
+ these targets.
+
+ The list of possible target directories where these mount units could end:
+ * local-fs.target.requires
+ * local-fs.target.wants
+ * local-fs-pre.target.requires
+ * local-fs-pre.target.wants
+ * remote-fs.target.requires
+ * remote-fs.target.wants
+ * remote-fs-pre.target.requires
+ * remote-fs-pre.target.wants
+ Most likely, unit files are not generated for "*pre*" targets, but to be
+ sure really. Longer list does not cause any issues in this code.
+
+ In most cases, "local-fs.target.requires" is the only important directory
+ for us during the upgrade. But in some (sometimes common) cases we will
+ need some of the others as well.
+
+ These directories do not have to necessarily exists if there are no mount
+ unit files that could be put there. But most likely "local-fs.target.requires"
+ will always exists.
+ """
+ dir_list = [
+ 'local-fs.target.requires',
+ 'local-fs.target.wants',
+ 'local-fs-pre.target.requires',
+ 'local-fs-pre.target.wants',
+ 'remote-fs.target.requires',
+ 'remote-fs.target.wants',
+ 'remote-fs-pre.target.requires',
+ 'remote-fs-pre.target.wants',
+ ]
+ for tdir in dir_list:
+ _fix_symlinks_in_dir(dir_containing_mount_units, tdir)
+
+
+def copy_units_into_system_location(upgrade_container_ctx, dir_with_our_mount_units):
+ """
+ Copy units and their .wants/.requires directories into the target userspace container.
+
+ :return: A list of files in the target userspace that were created by copying.
+ :rtype: list[str]
+ """
+ dest_inside_container = '/usr/lib/systemd/system'
+
+ api.current_logger().debug(
+ 'Copying generated mount units for upgrade from {} to {}'.format(
+ dir_with_our_mount_units,
+ upgrade_container_ctx.full_path(dest_inside_container)
+ )
+ )
+
+ copied_files = []
+ prefix_len_to_drop = len(upgrade_container_ctx.base_dir)
+
+ # We cannot rely on mounting library when copying into container
+ # as we want to control what happens to symlinks and
+ # shutil.copytree in Python3.6 fails if dst directory exists already
+ # - which happens in some cases when copying these files.
+ for root, dummy_dirs, files in os.walk(dir_with_our_mount_units):
+ rel_path = os.path.relpath(root, dir_with_our_mount_units)
+ if rel_path == '.':
+ rel_path = ''
+ dst_dir = os.path.join(upgrade_container_ctx.full_path(dest_inside_container), rel_path)
+ os.makedirs(dst_dir, mode=0o755, exist_ok=True)
+
+ for file in files:
+ src_file = os.path.join(root, file)
+ dst_file = os.path.join(dst_dir, file)
+ api.current_logger().debug(
+ 'Copying mount unit file {} to {}'.format(src_file, dst_file)
+ )
+ if os.path.islink(dst_file):
+ # If the target file already exists and it is a symlink, it will
+ # fail and we want to overwrite this.
+ # NOTE(pstodulk): You could think that it cannot happen, but
+ # in future possibly it could happen, so let's rather be careful
+ # and handle it. If the dst file exists, we want to overwrite it
+ # for sure
+ _delete_file(dst_file)
+ shutil.copy2(src_file, dst_file, follow_symlinks=False)
+ copied_files.append(dst_file[prefix_len_to_drop:])
+
+ return copied_files
+
+
+def remove_units_for_targets_that_are_already_mounted_by_dracut(dir_with_our_mount_units):
+ """
+ Remove mount units for mount targets that are already mounted by dracut.
+
+ Namely, remove mount units:
+ '-.mount' (mounts /)
+ 'usr.mount' (mounts /usr)
+ """
+
+ # NOTE: remount-fs.service creates dependency cycles that are nondeterministically broken
+ # by systemd, causing unpredictable failures. The service is supposed to remount root
+ # and /usr, reapplying mount options from /etc/fstab. However, the fstab file present in
+ # the initramfs is not the fstab from the source system, and, therefore, it is pointless
+ # to require the service. It would make sense after we switched root during normal boot
+ # process.
+ already_mounted_units = [
+ '-.mount',
+ 'usr.mount',
+ 'local-fs.target.wants/systemd-remount-fs.service'
+ ]
+
+ for unit in already_mounted_units:
+ unit_location = os.path.join(dir_with_our_mount_units, unit)
+
+ if not os.path.exists(unit_location):
+ api.current_logger().debug('The {} unit does not exists, no need to remove it.'.format(unit))
+ continue
+
+ _delete_file(unit_location)
+
+
+def request_units_inclusion_in_initramfs(files_to_include):
+ api.current_logger().debug('Including the following files into initramfs: {}'.format(files_to_include))
+
+ additional_files = [
+ '/usr/sbin/swapon' # If the system has swap, we have also generated a swap unit to activate it
+ ]
+
+ tasks = UpgradeInitramfsTasks(include_files=files_to_include + additional_files)
+ api.produce(tasks)
+
+
+def setup_storage_initialization():
+ userspace_info = next(api.consume(TargetUserSpaceInfo), None)
+
+ with mounting.NspawnActions(base_dir=userspace_info.path) as upgrade_container_ctx:
+ with tempfile.TemporaryDirectory(dir='/var/lib/leapp/', prefix='tmp_systemd_fstab_') as workspace_path:
+ run_systemd_fstab_generator(workspace_path)
+ remove_units_for_targets_that_are_already_mounted_by_dracut(workspace_path)
+ prefix_all_mount_units_with_sysroot(workspace_path)
+ fix_symlinks_in_targets(workspace_path)
+ mount_unit_files = copy_units_into_system_location(upgrade_container_ctx, workspace_path)
+ request_units_inclusion_in_initramfs(mount_unit_files)
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
new file mode 100644
index 00000000..b814f6ce
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
@@ -0,0 +1,269 @@
+import os
+import shutil
+
+import pytest
+
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.actor import mount_unit_generator
+from leapp.libraries.common.testutils import logger_mocked
+from leapp.libraries.stdlib import api, CalledProcessError
+from leapp.models import TargetUserSpaceInfo, UpgradeInitramfsTasks
+
+
+def test_run_systemd_fstab_generator_successful_generation(monkeypatch):
+ """Test successful mount unit generation."""
+
+ output_dir = '/tmp/test_output'
+ expected_cmd = [
+ '/usr/lib/systemd/system-generators/systemd-fstab-generator',
+ output_dir,
+ output_dir,
+ output_dir
+ ]
+
+ def mock_run(command):
+ assert command == expected_cmd
+
+ return {
+ "stdout": "",
+ "stderr": "",
+ "exit_code": 0,
+ }
+
+ monkeypatch.setattr(mount_unit_generator, 'run', mock_run)
+ mount_unit_generator.run_systemd_fstab_generator(output_dir)
+
+
+def test_run_systemd_fstab_generator_failure(monkeypatch):
+ """Test handling of systemd-fstab-generator failure."""
+ output_dir = '/tmp/test_output'
+ expected_cmd = [
+ '/usr/lib/systemd/system-generators/systemd-fstab-generator',
+ output_dir,
+ output_dir,
+ output_dir
+ ]
+
+ def mock_run(command):
+ assert command == expected_cmd
+ raise CalledProcessError(message='Generator failed', command=['test'], result={'exit_code': 1})
+
+ monkeypatch.setattr(mount_unit_generator, 'run', mock_run)
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ with pytest.raises(StopActorExecutionError):
+ mount_unit_generator.run_systemd_fstab_generator(output_dir)
+
+
+def test_prefix_mount_unit_with_sysroot(monkeypatch):
+ """Test prefixing a single mount unit with /sysroot."""
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ input_content = [
+ "[Unit]\n",
+ "Description=Test Mount\n",
+ "[Mount]\n",
+ "Where=/home\n",
+ "What=/dev/sda1\n"
+ ]
+
+ expected_output_lines = [
+ "[Unit]",
+ "Description=Test Mount",
+ "[Mount]",
+ "Where=/sysroot/home",
+ "What=/dev/sda1"
+ ]
+
+ def mock_read_unit_file_lines(unit_file_path):
+ return input_content
+
+ def mock_write_unit_file_lines(unit_file_path, lines):
+ assert unit_file_path == '/test/output.mount'
+ assert lines == expected_output_lines
+
+ monkeypatch.setattr(mount_unit_generator, '_read_unit_file_lines', mock_read_unit_file_lines)
+ monkeypatch.setattr(mount_unit_generator, '_write_unit_file_lines', mock_write_unit_file_lines)
+
+ mount_unit_generator._prefix_mount_unit_with_sysroot(
+ '/test/input.mount',
+ '/test/output.mount'
+ )
+
+
+def test_prefix_all_mount_units_with_sysroot(monkeypatch):
+ """Test prefixing all mount units in a directory."""
+
+ expected_changes = {
+ '/test/dir/home.mount': {
+ 'new_unit_destination': '/test/dir/sysroot-home.mount',
+ 'should_be_deleted': True,
+ 'deleted': False,
+ },
+ '/test/dir/var.mount': {
+ 'new_unit_destination': '/test/dir/sysroot-var.mount',
+ 'should_be_deleted': True,
+ 'deleted': False,
+ },
+ '/test/dir/not-a-mount.service': {
+ 'new_unit_destination': None,
+ 'should_be_deleted': False,
+ 'deleted': False,
+ }
+ }
+
+ def mock_listdir(dir_path):
+ return ['home.mount', 'var.mount', 'not-a-mount.service']
+
+ def mock_delete_file(file_path):
+ assert file_path in expected_changes
+ expected_changes[file_path]['deleted'] = True
+
+ def mock_prefix(unit_file_path, new_unit_destination):
+ assert expected_changes[unit_file_path]['new_unit_destination'] == new_unit_destination
+
+ monkeypatch.setattr('os.listdir', mock_listdir)
+ monkeypatch.setattr(mount_unit_generator, '_delete_file', mock_delete_file)
+ monkeypatch.setattr(mount_unit_generator, '_prefix_mount_unit_with_sysroot', mock_prefix)
+
+ mount_unit_generator.prefix_all_mount_units_with_sysroot('/test/dir')
+
+ for original_mount_unit_location in expected_changes:
+ should_be_deleted = expected_changes[original_mount_unit_location]['should_be_deleted']
+ was_deleted = expected_changes[original_mount_unit_location]['deleted']
+ assert should_be_deleted == was_deleted
+
+
+@pytest.mark.parametrize('dirname', (
+ 'local-fs.target.requires',
+ 'local-fs.target.wants',
+ 'local-fs-pre.target.requires',
+ 'local-fs-pre.target.wants',
+ 'remote-fs.target.requires',
+ 'remote-fs.target.wants',
+ 'remote-fs-pre.target.requires',
+ 'remote-fs-pre.target.wants',
+))
+def test_fix_symlinks_in_dir(monkeypatch, dirname):
+ """Test fixing local-fs.target.requires symlinks."""
+
+ DIR_PATH = os.path.join('/test/dir/', dirname)
+
+ def mock_rmtree(dir_path):
+ assert dir_path == DIR_PATH
+
+ def mock_mkdir(dir_path):
+ assert dir_path == DIR_PATH
+
+ def mock_listdir(dir_path):
+ return ['sysroot-home.mount', 'sysroot-var.mount', 'not-a-mount.service']
+
+ def mock_os_path_exist(dir_path):
+ assert dir_path == DIR_PATH
+ return dir_path == DIR_PATH
+
+ expected_calls = [
+ ['ln', '-s', '../sysroot-home.mount', os.path.join(DIR_PATH, 'sysroot-home.mount')],
+ ['ln', '-s', '../sysroot-var.mount', os.path.join(DIR_PATH, 'sysroot-var.mount')]
+ ]
+ call_count = 0
+
+ def mock_run(command):
+ nonlocal call_count
+ assert command in expected_calls
+ call_count += 1
+ return {
+ "stdout": "",
+ "stderr": "",
+ "exit_code": 0,
+ }
+
+ monkeypatch.setattr('shutil.rmtree', mock_rmtree)
+ monkeypatch.setattr('os.mkdir', mock_mkdir)
+ monkeypatch.setattr('os.listdir', mock_listdir)
+ monkeypatch.setattr('os.path.exists', mock_os_path_exist)
+ monkeypatch.setattr(mount_unit_generator, 'run', mock_run)
+
+ mount_unit_generator._fix_symlinks_in_dir('/test/dir', dirname)
+
+
+# Test the copy_units_into_system_location function
+def test_copy_units_mixed_content(monkeypatch):
+ """Test copying units with mixed files and directories."""
+
+ def mock_walk(dir_path):
+ tuples_to_yield = [
+ ('/source/dir', ['local-fs.target.requires'], ['unit1.mount', 'unit2.mount']),
+ ('/source/dir/local-fs.target.requires', [], ['unit1.mount', 'unit2.mount']),
+ ]
+ for i in tuples_to_yield:
+ yield i
+
+ def mock_isdir(path):
+ return 'local-fs.target.requires' in path
+
+ def _make_couple(sub_path):
+ return (
+ os.path.join('/source/dir/', sub_path),
+ os.path.join('/container/usr/lib/systemd/system/', sub_path)
+ )
+
+ def mock_copy2(src, dst, follow_symlinks=True):
+ valid_combinations = [
+ _make_couple('unit1.mount'),
+ _make_couple('unit2.mount'),
+ _make_couple('local-fs.target.requires/unit1.mount'),
+ _make_couple('local-fs.target.requires/unit2.mount'),
+ ]
+ assert not follow_symlinks
+ assert (src, dst) in valid_combinations
+
+ def mock_islink(file_path):
+ return file_path == '/container/usr/lib/systemd/system/local-fs.target.requires/unit2.mount'
+
+ class MockedDeleteFile:
+ def __init__(self):
+ self.removal_called = False
+
+ def __call__(self, file_path):
+ assert file_path == '/container/usr/lib/systemd/system/local-fs.target.requires/unit2.mount'
+ self.removal_called = True
+
+ def mock_makedirs(dst_dir, mode=0o777, exist_ok=False):
+ assert exist_ok
+ assert mode == 0o755
+
+ allowed_paths = [
+ '/container/usr/lib/systemd/system',
+ '/container/usr/lib/systemd/system/local-fs.target.requires'
+ ]
+ assert dst_dir.rstrip('/') in allowed_paths
+
+ monkeypatch.setattr(os, 'walk', mock_walk)
+ monkeypatch.setattr(os, 'makedirs', mock_makedirs)
+ monkeypatch.setattr(os.path, 'isdir', mock_isdir)
+ monkeypatch.setattr(os.path, 'islink', mock_islink)
+ monkeypatch.setattr(mount_unit_generator, '_delete_file', MockedDeleteFile())
+ monkeypatch.setattr(shutil, 'copy2', mock_copy2)
+
+ class MockedContainerContext:
+ def __init__(self):
+ self.base_dir = '/container'
+
+ def full_path(self, path):
+ return os.path.join('/container', path.lstrip('/'))
+
+ mock_container = MockedContainerContext()
+
+ files = mount_unit_generator.copy_units_into_system_location(
+ mock_container, '/source/dir'
+ )
+
+ expected_files = [
+ '/usr/lib/systemd/system/unit1.mount',
+ '/usr/lib/systemd/system/unit2.mount',
+ '/usr/lib/systemd/system/local-fs.target.requires/unit1.mount',
+ '/usr/lib/systemd/system/local-fs.target.requires/unit2.mount',
+ ]
+ assert sorted(files) == sorted(expected_files)
+ assert mount_unit_generator._delete_file.removal_called
--
2.51.1

View File

@ -0,0 +1,100 @@
From bf302fc794957a88bc4785f4dd2505b8d71012e0 Mon Sep 17 00:00:00 2001
From: Evgeni Golov <evgeni@golov.de>
Date: Wed, 21 Aug 2024 07:52:02 +0200
Subject: [PATCH 04/40] properly scope try/except when loading obsoleted keys
We want to load all possible keys, even *after* a KeyError happenend
Fixes: 7e0fb44bb673893d0409903f6a441d0eb2829d22
---
.../libraries/removeobsoleterpmgpgkeys.py | 8 +--
.../tests/test_removeobsoleterpmgpgkeys.py | 50 +++++++++++++++++++
2 files changed, 54 insertions(+), 4 deletions(-)
diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
index bda7efa3..198c4368 100644
--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
+++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
@@ -12,14 +12,14 @@ def _get_obsolete_keys():
distribution = api.current_actor().configuration.os_release.release_id
obsoleted_keys_map = get_distribution_data(distribution).get('obsoleted-keys', {})
keys = []
- try:
- for version in range(7, int(get_target_major_version()) + 1):
+ for version in range(7, int(get_target_major_version()) + 1):
+ try:
for key in obsoleted_keys_map[str(version)]:
name, version, release = key.rsplit("-", 2)
if has_package(InstalledRPM, name, version=version, release=release):
keys.append(key)
- except KeyError:
- pass
+ except KeyError:
+ pass
return keys
diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py
index 4d9a0e84..b78174cc 100644
--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py
+++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py
@@ -76,6 +76,56 @@ def test_get_obsolete_keys(monkeypatch, version, expected):
assert set(keys) == set(expected)
+@pytest.mark.parametrize(
+ "version, obsoleted_keys, expected",
+ [
+ (10, None, []),
+ (10, {}, []),
+ (10, {"8": ["gpg-pubkey-888-abc"], "10": ["gpg-pubkey-10-10"]}, ["gpg-pubkey-888-abc", "gpg-pubkey-10-10"]),
+ (9, {"8": ["gpg-pubkey-888-abc"], "9": ["gpg-pubkey-999-def"]}, ["gpg-pubkey-999-def", "gpg-pubkey-888-abc"]),
+ (8, {"8": ["gpg-pubkey-888-abc"], "9": ["gpg-pubkey-999-def"]}, ["gpg-pubkey-888-abc"])
+ ]
+)
+def test_get_obsolete_keys_incomplete_data(monkeypatch, version, obsoleted_keys, expected):
+ def get_target_major_version_mocked():
+ return version
+
+ def get_distribution_data_mocked(_distro):
+ if obsoleted_keys is None:
+ return {}
+ return {'obsoleted-keys': obsoleted_keys}
+
+ def has_package_mocked(*args, **kwargs):
+ return True
+
+ monkeypatch.setattr(
+ removeobsoleterpmgpgkeys,
+ "get_target_major_version",
+ get_target_major_version_mocked,
+ )
+
+ monkeypatch.setattr(
+ removeobsoleterpmgpgkeys,
+ "get_distribution_data",
+ get_distribution_data_mocked,
+ )
+
+ monkeypatch.setattr(
+ removeobsoleterpmgpgkeys,
+ "has_package",
+ has_package_mocked,
+ )
+
+ monkeypatch.setattr(
+ api,
+ "current_actor",
+ CurrentActorMocked(),
+ )
+
+ keys = removeobsoleterpmgpgkeys._get_obsolete_keys()
+ assert set(keys) == set(expected)
+
+
@pytest.mark.parametrize(
"keys, should_register",
[
--
2.47.0

View File

@ -1,101 +0,0 @@
From e26359877b1d90d1f95a424216a00e711c72c923 Mon Sep 17 00:00:00 2001
From: karolinku <kkula@redhat.com>
Date: Tue, 9 Sep 2025 13:56:37 +0200
Subject: [PATCH 05/55] Prevent sssdupdate actor from rising errors
Potential error rise (StopActorExecutionError) is replaced with
warning logs in the SSSD update file processing function. This
prevents the upgrade from failing when accessing non-critical files.
Also fix minor formatting nit picks.
Jira: RHEL-108992
---
.../sssd/sssdchecks/libraries/sssdchecks.py | 4 ++--
.../sssd/sssdfacts/libraries/sssdfacts.py | 5 ++++-
.../sssd/sssdupdate/libraries/sssdupdate.py | 18 +++++++-----------
3 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/repos/system_upgrade/el9toel10/actors/sssd/sssdchecks/libraries/sssdchecks.py b/repos/system_upgrade/el9toel10/actors/sssd/sssdchecks/libraries/sssdchecks.py
index 0a86fa7b..cb95026c 100644
--- a/repos/system_upgrade/el9toel10/actors/sssd/sssdchecks/libraries/sssdchecks.py
+++ b/repos/system_upgrade/el9toel10/actors/sssd/sssdchecks/libraries/sssdchecks.py
@@ -15,8 +15,8 @@ def check_config(model):
'SSSD\'s sss_ssh_knownhostsproxy tool is replaced by the more '
'reliable sss_ssh_knownhosts tool. SSH\'s configuration will be updated '
'to reflect this by updating every mention of sss_ssh_knownhostsproxy by '
- 'the corresponding mention of sss_ssh_knownhosts, even those commented out.\n'
- 'SSSD\'s ssh service will be enabled if not already done.\n'
+ 'the corresponding mention of sss_ssh_knownhosts, even those commented out. '
+ 'SSSD\'s ssh service will be enabled if not already done.\n\n'
'The following files will be updated:{}{}'.format(
FMT_LIST_SEPARATOR,
FMT_LIST_SEPARATOR.join(model.sssd_config_files + model.ssh_config_files)
diff --git a/repos/system_upgrade/el9toel10/actors/sssd/sssdfacts/libraries/sssdfacts.py b/repos/system_upgrade/el9toel10/actors/sssd/sssdfacts/libraries/sssdfacts.py
index 0ae9d93f..7d343229 100644
--- a/repos/system_upgrade/el9toel10/actors/sssd/sssdfacts/libraries/sssdfacts.py
+++ b/repos/system_upgrade/el9toel10/actors/sssd/sssdfacts/libraries/sssdfacts.py
@@ -19,7 +19,10 @@ def _does_file_contain_expression(file_path, expression):
)
return False
except OSError as e:
- raise StopActorExecutionError('Could not open file ' + file_path, details={'details': str(e)})
+ raise StopActorExecutionError(
+ 'Could not open configuration file',
+ details={'details': 'Coudn\'t open {} file with error: {}.'.format(file_path, str(e))}
+ )
def _look_for_files(expression: str, path_list: list[str]) -> list[str]:
diff --git a/repos/system_upgrade/el9toel10/actors/sssd/sssdupdate/libraries/sssdupdate.py b/repos/system_upgrade/el9toel10/actors/sssd/sssdupdate/libraries/sssdupdate.py
index 6d745ead..5b96bcc6 100644
--- a/repos/system_upgrade/el9toel10/actors/sssd/sssdupdate/libraries/sssdupdate.py
+++ b/repos/system_upgrade/el9toel10/actors/sssd/sssdupdate/libraries/sssdupdate.py
@@ -1,7 +1,7 @@
import os
import re
-from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.stdlib import api
def _process_knownhosts(line: str) -> str:
@@ -29,30 +29,26 @@ def _process_enable_svc(line: str) -> str:
def _update_file(filename, process_function):
- newname = filename + '.new'
- oldname = filename + '.old'
+ newname = '{}.leappnew'.format(filename)
+ oldname = '{}.leappsave'.format(filename)
try:
- with open(filename, 'r') as input_file, open(newname, 'x') as output_file:
+ with open(filename, 'r') as input_file, open(newname, 'w') as output_file:
istat = os.fstat(input_file.fileno())
os.fchmod(output_file.fileno(), istat.st_mode)
for line in input_file:
try:
output_file.write(process_function(line))
except OSError as e:
- raise StopActorExecutionError('Failed to write to {}'.format(newname),
- details={'details': str(e)})
+ api.current_logger().warning('Failed to write to {}'.format(newname), details={'details': str(e)})
- except FileExistsError as e:
- raise StopActorExecutionError('Temporary file already exists: {}'.format(newname),
- details={'details': str(e)})
except OSError as e:
try:
os.unlink(newname)
except FileNotFoundError:
pass
- raise StopActorExecutionError('Failed to access the required files', details={'details': str(e)})
+ api.current_logger().error('Failed to access the required files', details={'details': str(e)})
- # Let's make sure the old configuration is preserverd if something goes wrong
+ # Let's make sure the old configuration is preserved if something goes wrong
os.replace(filename, oldname)
os.replace(newname, filename)
os.unlink(oldname)
--
2.51.1

View File

@ -0,0 +1,283 @@
From 9d49f4675c2b7b18ba7b344bb0032a5538782560 Mon Sep 17 00:00:00 2001
From: Vojtech Sokol <vsokol@redhat.com>
Date: Mon, 2 Sep 2024 17:21:36 +0200
Subject: [PATCH 05/40] Update references from master branch to main
Focus was on making the CI and GitHub actions work after the default
branch was switched from master to main.
See: OAMG-4907
---
.github/workflows/codespell.yml | 4 ++--
.github/workflows/differential-shellcheck.yml | 4 ++--
.github/workflows/pr-welcome-msg.yml | 2 +-
.github/workflows/tmt-tests.yml | 16 ++++++++--------
.github/workflows/unit-tests.yml | 12 ++++++------
.packit.yaml | 10 +++++-----
Makefile | 14 +++++++-------
7 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index 673cef17..1195d8d1 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -3,10 +3,10 @@ name: Codespell
on:
push:
branches:
- - master
+ - main
pull_request:
branches:
- - master
+ - main
jobs:
codespell:
diff --git a/.github/workflows/differential-shellcheck.yml b/.github/workflows/differential-shellcheck.yml
index f1ed5f6a..e1bafb93 100644
--- a/.github/workflows/differential-shellcheck.yml
+++ b/.github/workflows/differential-shellcheck.yml
@@ -4,7 +4,7 @@
name: Differential ShellCheck
on:
pull_request:
- branches: [master]
+ branches: [main]
permissions:
contents: read
@@ -17,7 +17,7 @@ jobs:
security-events: write
pull-requests: write
- steps:
+ steps:
- name: Repository checkout
uses: actions/checkout@v4
with:
diff --git a/.github/workflows/pr-welcome-msg.yml b/.github/workflows/pr-welcome-msg.yml
index ff9414d2..0102c41f 100644
--- a/.github/workflows/pr-welcome-msg.yml
+++ b/.github/workflows/pr-welcome-msg.yml
@@ -28,7 +28,7 @@ jobs:
However, here are additional useful commands for packit:
- **`/packit test`** to re-run manually the default tests
- **`/packit retest-failed`** to re-run failed tests manually
- - **`/packit test oamg/leapp#42`** to run tests with leapp builds for the leapp PR#42 (default is latest upstream - master - build)
+ - **`/packit test oamg/leapp#42`** to run tests with leapp builds for the leapp PR#42 (default is latest upstream - main - build)
Note that first time contributors cannot run tests automatically - they need to be started by a reviewer.
diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml
index 7e9fd706..1fa00e60 100644
--- a/.github/workflows/tmt-tests.yml
+++ b/.github/workflows/tmt-tests.yml
@@ -12,7 +12,7 @@ jobs:
call_workflow_tests_79to88_integration:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -26,7 +26,7 @@ jobs:
call_workflow_tests_79to86_integration:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -40,7 +40,7 @@ jobs:
call_workflow_tests_79to88_sst:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -55,7 +55,7 @@ jobs:
call_workflow_tests_7to8_aws:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -71,7 +71,7 @@ jobs:
call_workflow_tests_86to90_integration:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -85,7 +85,7 @@ jobs:
call_workflow_tests_88to92_integration:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -101,7 +101,7 @@ jobs:
call_workflow_tests_86to90_sst:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -116,7 +116,7 @@ jobs:
call_workflow_tests_86to90_aws:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index 2a05106e..42b72b8d 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -2,10 +2,10 @@ name: Unit Tests
on:
push:
branches:
- - master
+ - main
pull_request:
branches:
- - master
+ - main
jobs:
test:
@@ -74,10 +74,10 @@ jobs:
# NOTE(ivasilev) fetch-depth 0 is critical here as leapp deps discovery depends on specific substring in
# commit message and default 1 option will get us just merge commit which has an unrelevant message.
fetch-depth: '0'
- # NOTE(ivasilev) master -> origin/master is used for leapp deps discovery in Makefile via git log master..HEAD
- - name: Set master to origin/master
- if: github.ref != 'refs/heads/master'
+ # NOTE(ivasilev) main -> origin/main is used for leapp deps discovery in Makefile via git log main..HEAD
+ - name: Set main to origin/main
+ if: github.ref != 'refs/heads/main'
run: |
- git branch -f master origin/master
+ git branch -f main origin/main
- name: ${{matrix.scenarios.name}}
run: script -e -c /bin/bash -c 'TERM=xterm podman build --security-opt=seccomp=unconfined -t leapp-tests -f utils/container-tests/Containerfile.${{matrix.scenarios.container}} utils/container-tests && PYTHON_VENV=${{matrix.scenarios.python}} REPOSITORIES=${{matrix.scenarios.repos}} podman run --security-opt=seccomp=unconfined --rm -ti -v ${PWD}:/payload --env=PYTHON_VENV --env=REPOSITORIES leapp-tests'
diff --git a/.packit.yaml b/.packit.yaml
index d91a47e5..fbfd0eea 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -22,7 +22,7 @@ actions:
fix-spec-file:
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
post-upstream-clone:
- # builds from PRs should have lower NVR than those from master branch
+ # builds from PRs should have lower NVR than those from main branch
- bash -c "sed -i \"s/1%{?dist}/0%{?dist}/g\" packaging/leapp-repository.spec"
jobs:
@@ -44,12 +44,12 @@ jobs:
fix-spec-file:
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
post-upstream-clone:
- # builds from PRs should have lower NVR than those from master branch
+ # builds from PRs should have lower NVR than those from main branch
- bash -c "sed -i \"s/1%{?dist}/0%{?dist}/g\" packaging/leapp-repository.spec"
- job: copr_build
trigger: commit
metadata:
- branch: master
+ branch: main
owner: "@oamg"
project: leapp
targets:
@@ -65,7 +65,7 @@ jobs:
fix-spec-file:
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
post-upstream-clone:
- # builds from master branch should start with 100 release, to have high priority
+ # builds from main branch should start with 100 release, to have high priority
- bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec"
- job: copr_build
trigger: release
@@ -85,7 +85,7 @@ jobs:
fix-spec-file:
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
post-upstream-clone:
- # builds from master branch should start with 100 release, to have high priority
+ # builds from main branch should start with 100 release, to have high priority
- bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec"
diff --git a/Makefile b/Makefile
index 5b2bc4d2..8aeef77d 100644
--- a/Makefile
+++ b/Makefile
@@ -64,7 +64,7 @@ endif
# just to reduce number of unwanted builds mark as the upstream one when
# someone will call copr_build without additional parameters
-MASTER_BRANCH=master
+MASTER_BRANCH=main
# In case the PR or MR is defined or in case build is not coming from the
# MATER_BRANCH branch, N_REL=0; (so build is not update of the approved
@@ -76,10 +76,10 @@ SHORT_SHA=`git rev-parse --short HEAD`
BRANCH=`git rev-parse --abbrev-ref HEAD | tr -- '-/' '_'`
# The dependent framework PR connection will be taken from the top commit's depends-on message.
-REQ_LEAPP_PR=$(shell git log master..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '*[[:digit:]]*')
+REQ_LEAPP_PR=$(shell git log main..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '*[[:digit:]]*')
# NOTE(ivasilev) In case of travis relying on top commit is a no go as a top commit will be a merge commit.
ifdef CI
- REQ_LEAPP_PR=$(shell git log master..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '[[:digit:]]*')
+ REQ_LEAPP_PR=$(shell git log main..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '[[:digit:]]*')
endif
# In case anyone would like to add any other suffix, just make it possible
@@ -92,8 +92,8 @@ REQUEST=`if test -n "$$PR"; then echo ".PR$${PR}"; elif test -n "$$MR"; then ech
# Examples:
# 0.201810080027Z.4078402.packaging.PR2
# 0.201810080027Z.4078402.packaging
-# 0.201810080027Z.4078402.master.MR2
-# 1.201810080027Z.4078402.master
+# 0.201810080027Z.4078402.main.MR2
+# 1.201810080027Z.4078402.main
RELEASE="$(N_REL).$(TIMESTAMP).$(SHORT_SHA).$(BRANCH)$(REQUEST)$(_SUFFIX)"
all: help
@@ -302,7 +302,7 @@ install-deps:
pip install --upgrade setuptools; \
pip install --upgrade -r requirements.txt; \
./utils/install_commands.sh $(_PYTHON_VENV); \
- # In case the top commit Depends-On some yet unmerged framework patch - override master leapp with the proper version
+ # In case the top commit Depends-On some yet unmerged framework patch - override main leapp with the proper version
if [[ ! -z "$(REQ_LEAPP_PR)" ]] ; then \
echo "Leapp-repository depends on the yet unmerged pr of the framework #$(REQ_LEAPP_PR), installing it.." && \
$(VENVNAME)/bin/pip install -I "git+https://github.com/oamg/leapp.git@refs/pull/$(REQ_LEAPP_PR)/head"; \
@@ -332,7 +332,7 @@ install-deps-fedora:
pip install --upgrade setuptools; \
pip install --upgrade -r requirements.txt; \
./utils/install_commands.sh $(_PYTHON_VENV); \
- # In case the top commit Depends-On some yet unmerged framework patch - override master leapp with the proper version
+ # In case the top commit Depends-On some yet unmerged framework patch - override main leapp with the proper version
if [[ ! -z "$(REQ_LEAPP_PR)" ]] ; then \
echo "Leapp-repository depends on the yet unmerged pr of the framework #$(REQ_LEAPP_PR), installing it.." && \
$(VENVNAME)/bin/pip install -I "git+https://github.com/oamg/leapp.git@refs/pull/$(REQ_LEAPP_PR)/head"; \
--
2.47.0

View File

@ -0,0 +1,43 @@
From 41e32e3aa6394b8397bef9b797892d9fa119d608 Mon Sep 17 00:00:00 2001
From: Yuriy Kohut <yura.kohut@gmail.com>
Date: Thu, 29 Aug 2024 12:36:23 +0300
Subject: [PATCH 06/40] ReadOfKernelArgsError: fix the error: - AttributeError:
module 'leapp.reporting' has no attribute 'Hints'
---
.../kernelcmdlineconfig/libraries/kernelcmdlineconfig.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
index 238a8aa6..6b261c3b 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
@@ -175,14 +175,14 @@ def entrypoint(configs=None):
api.current_logger().error(str(e))
if use_cmdline_file():
- report_hint = reporting.Hints(
+ report_hint = (
'After the system has been rebooted into the new version of RHEL, you'
' should take the kernel cmdline arguments from /proc/cmdline (Everything'
' except the BOOT_IMAGE entry and initrd entries) and copy them into'
' /etc/kernel/cmdline before installing any new kernels.'
)
else:
- report_hint = reporting.Hints(
+ report_hint = (
'After the system has been rebooted into the new version of RHEL, you'
' should take the kernel cmdline arguments from /proc/cmdline (Everything'
' except the BOOT_IMAGE entry and initrd entries) and then use the'
@@ -204,7 +204,7 @@ def entrypoint(configs=None):
' not able to set the arguments as the default for kernels installed in'
' the future.'
),
- report_hint,
+ reporting.Remediation(hint=report_hint),
reporting.Severity(reporting.Severity.HIGH),
reporting.Groups([
reporting.Groups.BOOT,
--
2.47.0

View File

@ -1,132 +0,0 @@
From 23d8f69509e692ceaa3dacc0de927349ec056189 Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Tue, 19 Aug 2025 09:19:18 +0200
Subject: [PATCH 06/55] Update our test container to Fedora 42
The distro.linux_distribution in createresumeservice test is replaced
distro.id(). The distro.linux_distribution has been deprecated and on
Fedora 42 containerized tests with python3.13 it doesn't work correctly
and the createresumeservice tests don't get skipped.
Jira: RHELMISC-13271
---
Makefile | 19 +++++++++----------
.../tests/test_createresumeservice.py | 2 +-
.../{Containerfile.f34 => Containerfile.f42} | 4 ++--
3 files changed, 12 insertions(+), 13 deletions(-)
rename utils/container-tests/{Containerfile.f34 => Containerfile.f42} (84%)
diff --git a/Makefile b/Makefile
index 81b16376..e0fc7e00 100644
--- a/Makefile
+++ b/Makefile
@@ -165,7 +165,7 @@ help:
@echo " MR=6 COPR_CONFIG='path/to/the/config/copr/file' make <target>"
@echo " ACTOR=<actor> TEST_LIBS=y make test"
@echo " BUILD_CONTAINER=rhel8 make build_container"
- @echo " TEST_CONTAINER=f34 make test_container"
+ @echo " TEST_CONTAINER=f42 make test_container"
@echo " CONTAINER_TOOL=docker TEST_CONTAINER=rhel8 make test_container_no_lint"
@echo ""
@@ -379,7 +379,6 @@ test_no_lint:
done; \
$(_PYTHON_VENV) -m pytest $(REPORT_ARG) $(TEST_PATHS) $(LIBRARY_PATH) $(PYTEST_ARGS)
-
test: lint test_no_lint
# container images act like a cache so that dependencies can only be downloaded once
@@ -416,7 +415,7 @@ lint_container:
@_TEST_CONT_TARGET="lint" $(MAKE) test_container
lint_container_all:
- @for container in "f34" "rhel8" "rhel9"; do \
+ @for container in "f42" "rhel8" "rhel9"; do \
TEST_CONTAINER=$$container $(MAKE) lint_container || exit 1; \
done
@@ -426,9 +425,9 @@ lint_container_all:
# because e.g RHEL8 to RHEL9 IPU must work on python3.6 and python3.9.
test_container:
@case $(_TEST_CONTAINER) in \
- f34) \
- export CONT_FILE="utils/container-tests/Containerfile.f34"; \
- export _VENV="python3.9"; \
+ f42) \
+ export CONT_FILE="utils/container-tests/Containerfile.f42"; \
+ export _VENV="python3.13"; \
;; \
rhel8) \
export CONT_FILE="utils/container-tests/Containerfile.rhel8"; \
@@ -439,7 +438,7 @@ test_container:
export _VENV="python3.9"; \
;; \
*) \
- echo "Error: Available containers are: f34, rhel8, rhel9"; exit 1; \
+ echo "Error: Available containers are: f42, rhel8, rhel9"; exit 1; \
;; \
esac; \
export TEST_IMAGE="leapp-repo-tests-$(_TEST_CONTAINER)"; \
@@ -471,7 +470,7 @@ test_container:
exit $$res
test_container_all:
- @for container in "f34" "rhel8" "rhel9"; do \
+ @for container in "f42" "rhel8" "rhel9"; do \
TEST_CONTAINER=$$container $(MAKE) test_container || exit 1; \
done
@@ -479,13 +478,13 @@ test_container_no_lint:
@_TEST_CONT_TARGET="test_no_lint" $(MAKE) test_container
test_container_all_no_lint:
- @for container in "f34" "rhel8" "rhel9"; do \
+ @for container in "f42" "rhel8" "rhel9"; do \
TEST_CONTAINER=$$container $(MAKE) test_container_no_lint || exit 1; \
done
# clean all testing and building containers and their images
clean_containers:
- @for i in "leapp-repo-tests-f34" "leapp-repo-tests-rhel8" \
+ @for i in "leapp-repo-tests-f42" "leapp-repo-tests-rhel8" \
"leapp-repo-tests-rhel9" "leapp-repo-build-el8"; do \
$(_CONTAINER_TOOL) kill "$$i-cont" || :; \
$(_CONTAINER_TOOL) rm "$$i-cont" || :; \
diff --git a/repos/system_upgrade/common/actors/createresumeservice/tests/test_createresumeservice.py b/repos/system_upgrade/common/actors/createresumeservice/tests/test_createresumeservice.py
index 5302cdd2..c1cefc37 100644
--- a/repos/system_upgrade/common/actors/createresumeservice/tests/test_createresumeservice.py
+++ b/repos/system_upgrade/common/actors/createresumeservice/tests/test_createresumeservice.py
@@ -6,7 +6,7 @@ import pytest
@pytest.mark.skipif(os.getuid() != 0, reason='User is not a root')
@pytest.mark.skipif(
- distro.linux_distribution()[0] == 'Fedora',
+ distro.id() == 'fedora',
reason='default.target.wants does not exists on Fedora distro',
)
def test_create_resume_service(current_actor_context):
diff --git a/utils/container-tests/Containerfile.f34 b/utils/container-tests/Containerfile.f42
similarity index 84%
rename from utils/container-tests/Containerfile.f34
rename to utils/container-tests/Containerfile.f42
index a9346635..46f0f63a 100644
--- a/utils/container-tests/Containerfile.f34
+++ b/utils/container-tests/Containerfile.f42
@@ -1,11 +1,11 @@
-FROM fedora:34
+FROM fedora:42
VOLUME /repo
RUN dnf update -y && \
dnf install -y findutils make rsync python3-gobject-base NetworkManager-libnm
-ENV PYTHON_VENV python3.9
+ENV PYTHON_VENV python3.13
COPY . /repocopy
--
2.51.1

View File

@ -1,46 +0,0 @@
From db8d1f3fcc155b94b07d89d90eb82cd2a52d5cf9 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 20 Aug 2025 15:41:34 +0200
Subject: [PATCH 07/55] networkmanagerconnectionscanner: Skip test on Python !=
3.6
The test_nm_conn tests fails on at least Python >= 3.9 (not sure which
version exactly).
Let's skip the test on Python != 3.6 as they never run on a different
version - the actor is in the el8toel9 repo and is in FactsPhase.
---
.../unit_test_networkmanagerconnectionscanner.py | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
index 46af07c1..7558b307 100644
--- a/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
+++ b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
@@ -1,4 +1,5 @@
import errno
+import sys
import textwrap
import pytest
@@ -57,7 +58,16 @@ def test_no_conf(monkeypatch):
assert not api.produce.called
-@pytest.mark.skipif(not nmconnscanner.libnm_available, reason="NetworkManager g-ir not installed")
+@pytest.mark.skipif(
+ sys.version_info.major != 3 or sys.version_info.minor != 6,
+ # On Python > 3.6 the GLib and NM libraries apparently behave differently and
+ # the test fails. Let's skip it since the actor it's only ever run with
+ # Python3.6 (el8toel9 repo and FactsPhase)
+ reason="Only runs on Python 3.6",
+)
+@pytest.mark.skipif(
+ not nmconnscanner.libnm_available, reason="NetworkManager g-ir not installed"
+)
def test_nm_conn(monkeypatch):
"""
Check a basic keyfile
--
2.51.1

View File

@ -0,0 +1,44 @@
From 88e13fb0545e0d42df2777538a0c6921bab91e33 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 27 Sep 2024 14:53:01 +0200
Subject: [PATCH 07/40] pylint: exclude rule: too-many-positional-arguments
(code: R0917)
New version of Pylint have the rule for checking of positional
arguments - complaining when more than 4 positional arguments exists.
We do not want to refactor the code to make it happy and the default
value cannot be set right now - that's planned for future Pylint
versions at this moment. So excluding this rule.
For more info:
* https://pylint.readthedocs.io/en/latest/user_guide/messages/refactor/too-many-positional-arguments.html
---
.pylintrc | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index f78c1c3f..5d75df40 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -41,6 +41,8 @@ disable=
consider-using-from-import,
use-list-literal,
use-dict-literal,
+ too-many-lines, # we do not want to take care about that one
+ too-many-positional-arguments, # we cannot set yet max-possitional-arguments unfortunately
# new for python3 version of pylint
useless-object-inheritance,
consider-using-set-comprehension, # pylint3 force to use comprehension in place we don't want (py2 doesnt have these options, for inline skip)
@@ -57,8 +59,7 @@ disable=
redundant-u-string-prefix, # still have py2 to support
logging-format-interpolation,
logging-not-lazy,
- use-yield-from, # yield from cannot be used until we require python 3.3 or greater
- too-many-lines # we do not want to take care about that one
+ use-yield-from # yield from cannot be used until we require python 3.3 or greater
[FORMAT]
# Maximum number of characters on a single line.
--
2.47.0

View File

@ -1,132 +0,0 @@
From c4c36a2abd0c83b021a20a450e66b8b1fe7be2da Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 27 Aug 2025 20:02:46 +0200
Subject: [PATCH 08/55] Remove unused (rh)el7 Containerfiles
---
utils/container-builds/Containerfile.centos7 | 15 -----------
utils/container-tests/Containerfile.rhel7 | 24 ------------------
utils/container-tests/Containerfile.ubi7 | 25 -------------------
utils/container-tests/Containerfile.ubi7-lint | 25 -------------------
4 files changed, 89 deletions(-)
delete mode 100644 utils/container-builds/Containerfile.centos7
delete mode 100644 utils/container-tests/Containerfile.rhel7
delete mode 100644 utils/container-tests/Containerfile.ubi7
delete mode 100644 utils/container-tests/Containerfile.ubi7-lint
diff --git a/utils/container-builds/Containerfile.centos7 b/utils/container-builds/Containerfile.centos7
deleted file mode 100644
index af00eddb..00000000
--- a/utils/container-builds/Containerfile.centos7
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM centos:7
-
-VOLUME /repo
-
-# mirror.centos.org is dead, comment out mirrorlist and set baseurl to vault.centos.org
-RUN sed -i s/mirror.centos.org/vault.centos.org/ /etc/yum.repos.d/CentOS-*.repo
-RUN sed -i s/^#\s*baseurl=http/baseurl=http/ /etc/yum.repos.d/CentOS-*.repo
-RUN sed -i s/^mirrorlist=http/#mirrorlist=http/ /etc/yum.repos.d/CentOS-*.repo
-
-RUN yum update -y && \
- yum install -y rpm-build python-devel make git
-
-WORKDIR /repo
-ENV DIST_VERSION 7
-ENTRYPOINT make _build_local
diff --git a/utils/container-tests/Containerfile.rhel7 b/utils/container-tests/Containerfile.rhel7
deleted file mode 100644
index 0a0c384a..00000000
--- a/utils/container-tests/Containerfile.rhel7
+++ /dev/null
@@ -1,24 +0,0 @@
-FROM registry.access.redhat.com/ubi7/ubi:7.9
-
-VOLUME /repo
-
-RUN yum update -y && \
- yum install -y python-virtualenv python-setuptools make git rsync
-
-# see ./Containerfile.ubi7 for explanation
-RUN yum -y install python27-python-pip && \
- scl enable python27 -- pip install -U --target /usr/lib/python2.7/site-packages/ pip==20.3.0 && \
- python -m pip install --ignore-installed pip==20.3.4 ipaddress virtualenv
-
-ENV PYTHON_VENV python2.7
-
-COPY . /repocopy
-
-WORKDIR /repocopy
-
-RUN rm -rf tut*
-
-RUN make clean && make install-deps
-
-WORKDIR /
-
diff --git a/utils/container-tests/Containerfile.ubi7 b/utils/container-tests/Containerfile.ubi7
deleted file mode 100644
index 44625a76..00000000
--- a/utils/container-tests/Containerfile.ubi7
+++ /dev/null
@@ -1,25 +0,0 @@
-FROM registry.access.redhat.com/ubi7/ubi:7.9
-
-VOLUME /payload
-
-RUN yum update -y && \
- yum install python-virtualenv python-setuptools make git -y
-
-# NOTE(ivasilev,pstodulk) We need at least pip v10.0.1, however centos:7
-# provides just v8.1.2 (via EPEL). So do this: install epel repos -> install
-# python2-pip -> use pip to update to specific pip version we require. period
-# NOTE(pstodulk) I see we take care about pip for py3 inside the Makefile,
-# however I am afraid of additional possible troubles in future because of the
-# archaic pip3 version (v9.0.1). As we want to run tests for Py2 and Py3 in ci
-# always anyway, let's put py3 installation here as well..
-# Dropped Python3 as it is now added in its own container on RHEL8
-
-# This is some trickery: We install python27-python-pip from the scl, use the scl to bootstrap the python
-# module of pip version 20.3.0 and then make it update to 20.3.4 resulting the 'pip' command to be available.
-# The --target approach doesn't add it, but at least we now have pip 20.3.4 installed ;-)
-RUN yum -y install python27-python-pip && \
- scl enable python27 -- pip install -U --target /usr/lib/python2.7/site-packages/ pip==20.3.0 && \
- python -m pip install --ignore-installed pip==20.3.4 ipaddress virtualenv
-
-WORKDIR /payload
-ENTRYPOINT make install-deps && make test_no_lint
diff --git a/utils/container-tests/Containerfile.ubi7-lint b/utils/container-tests/Containerfile.ubi7-lint
deleted file mode 100644
index ed548985..00000000
--- a/utils/container-tests/Containerfile.ubi7-lint
+++ /dev/null
@@ -1,25 +0,0 @@
-FROM registry.access.redhat.com/ubi7/ubi:7.9
-
-VOLUME /payload
-
-RUN yum update -y && \
- yum install python-virtualenv python-setuptools make git -y
-
-# NOTE(ivasilev,pstodulk) We need at least pip v10.0.1, however centos:7
-# provides just v8.1.2 (via EPEL). So do this: install epel repos -> install
-# python2-pip -> use pip to update to specific pip version we require. period
-# NOTE(pstodulk) I see we take care about pip for py3 inside the Makefile,
-# however I am afraid of additional possible troubles in future because of the
-# archaic pip3 version (v9.0.1). As we want to run tests for Py2 and Py3 in ci
-# always anyway, let's put py3 installation here as well..
-# Dropped Python3 as it is now added in its own container on RHEL8
-
-# This is some trickery: We install python27-python-pip from the scl, use the scl to bootstrap the python
-# module of pip version 20.3.0 and then make it update to 20.3.4 resulting the 'pip' command to be available.
-# The --target approach doesn't add it, but at least we now have pip 20.3.4 installed ;-)
-RUN yum -y install python27-python-pip && \
- scl enable python27 -- pip install -U --target /usr/lib/python2.7/site-packages/ pip==20.3.0 && \
- python -m pip install --ignore-installed pip==20.3.4 ipaddress virtualenv
-
-WORKDIR /payload
-ENTRYPOINT make install-deps && make lint
--
2.51.1

View File

@ -0,0 +1,534 @@
From 658700d6424e852917b62c190dd23cbb3026b67d Mon Sep 17 00:00:00 2001
From: Iker Pedrosa <ipedrosa@redhat.com>
Date: Mon, 5 Aug 2024 15:15:44 +0200
Subject: [PATCH 08/40] pam_userdb: migrate backend database
pam_userdb module changed its backend database technology from lidb to
gdbm for RHEL10. This requires a set of leapp actors to perform the
database migration automatically when upgrading to RHEL10:
* ScanPamUserDB takes care of scanning the PAM service folder to detect
whether pam_userdb is used and the location of the database in use.
This information is stored in a model.
* CheckPamUserDB checks the databases reported by ScanPamUserDB and
prints a report about them.
* ConvertPamUserDB checks the databases reported by ScanPamUserDB and
converts them to GDBM format.
* RemoveOldPamUserDB checks the databases reported by ScanPamUserDB and
removes them.
All these actors include unit-tests.
Finally, there's also a spec file change to add `libdb-utils` dependency
as it is required to convert pam_userdb databases from BerkeleyDB to
GDBM.
Signed-off-by: Iker Pedrosa <ipedrosa@redhat.com>
---
packaging/leapp-repository.spec | 6 +++
.../actors/pamuserdb/checkpamuserdb/actor.py | 18 ++++++++
.../libraries/checkpamuserdb.py | 28 ++++++++++++
.../tests/test_checkpamuserdb.py | 43 +++++++++++++++++++
.../pamuserdb/convertpamuserdb/actor.py | 18 ++++++++
.../libraries/convertpamuserdb.py | 27 ++++++++++++
.../tests/test_convertpamuserdb.py | 39 +++++++++++++++++
.../pamuserdb/removeoldpamuserdb/actor.py | 18 ++++++++
.../libraries/removeoldpamuserdb.py | 25 +++++++++++
.../tests/test_removeoldpamuserdb.py | 38 ++++++++++++++++
.../actors/pamuserdb/scanpamuserdb/actor.py | 18 ++++++++
.../scanpamuserdb/libraries/scanpamuserdb.py | 29 +++++++++++++
.../tests/files/pam_userdb_basic | 1 +
.../tests/files/pam_userdb_complete | 9 ++++
.../tests/files/pam_userdb_missing | 1 +
.../scanpamuserdb/tests/test_scanpamuserdb.py | 27 ++++++++++++
.../el9toel10/models/pamuserdblocation.py | 14 ++++++
17 files changed, 359 insertions(+)
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/models/pamuserdblocation.py
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 146afc45..0d63ba02 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -211,6 +211,12 @@ Requires: dracut
Requires: NetworkManager-libnm
Requires: python3-gobject-base
+%endif
+
+%if 0%{?rhel} && 0%{?rhel} == 9
+############# RHEL 9 dependencies (when the source system is RHEL 9) ##########
+# Required to convert pam_userdb database from BerkeleyDB to GDBM
+Requires: libdb-utils
%endif
##################################################
# end requirement
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py
new file mode 100644
index 00000000..8fada645
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import checkpamuserdb
+from leapp.models import PamUserDbLocation, Report
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
+
+
+class CheckPamUserDb(Actor):
+ """
+ Create report with the location of pam_userdb databases
+ """
+
+ name = 'check_pam_user_db'
+ consumes = (PamUserDbLocation,)
+ produces = (Report,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ checkpamuserdb.process()
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py
new file mode 100644
index 00000000..05cc71a9
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py
@@ -0,0 +1,28 @@
+from leapp import reporting
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.stdlib import api
+from leapp.models import PamUserDbLocation
+
+FMT_LIST_SEPARATOR = "\n - "
+
+
+def process():
+ msg = next(api.consume(PamUserDbLocation), None)
+ if not msg:
+ raise StopActorExecutionError('Expected PamUserDbLocation, but got None')
+
+ if msg.locations:
+ reporting.create_report([
+ reporting.Title('pam_userdb databases will be converted to GDBM'),
+ reporting.Summary(
+ 'On RHEL 10, GDMB is used by pam_userdb as it\'s backend database,'
+ ' replacing BerkeleyDB. Existing pam_userdb databases will be'
+ ' converted to GDBM. The following databases will be converted:'
+ '{sep}{locations}'.format(sep=FMT_LIST_SEPARATOR, locations=FMT_LIST_SEPARATOR.join(msg.locations))),
+ reporting.Severity(reporting.Severity.INFO),
+ reporting.Groups([reporting.Groups.SECURITY, reporting.Groups.AUTHENTICATION])
+ ])
+ else:
+ api.current_logger().debug(
+ 'No pam_userdb databases were located, thus nothing will be converted'
+ )
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py
new file mode 100644
index 00000000..2e11106b
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py
@@ -0,0 +1,43 @@
+import pytest
+
+from leapp import reporting
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.actor import checkpamuserdb
+from leapp.libraries.common.testutils import create_report_mocked, logger_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import PamUserDbLocation
+
+
+def test_process_no_msg(monkeypatch):
+ def consume_mocked(*args, **kwargs):
+ yield None
+
+ monkeypatch.setattr(api, 'consume', consume_mocked)
+
+ with pytest.raises(StopActorExecutionError):
+ checkpamuserdb.process()
+
+
+def test_process_no_location(monkeypatch):
+ def consume_mocked(*args, **kwargs):
+ yield PamUserDbLocation(locations=[])
+
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(api, 'consume', consume_mocked)
+
+ checkpamuserdb.process()
+ assert (
+ 'No pam_userdb databases were located, thus nothing will be converted'
+ in api.current_logger.dbgmsg
+ )
+
+
+def test_process_locations(monkeypatch):
+ def consume_mocked(*args, **kwargs):
+ yield PamUserDbLocation(locations=['/tmp/db1', '/tmp/db2'])
+
+ monkeypatch.setattr(reporting, "create_report", create_report_mocked())
+ monkeypatch.setattr(api, 'consume', consume_mocked)
+
+ checkpamuserdb.process()
+ assert reporting.create_report.called == 1
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py
new file mode 100644
index 00000000..5f8525b6
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import convertpamuserdb
+from leapp.models import PamUserDbLocation
+from leapp.tags import IPUWorkflowTag, PreparationPhaseTag
+
+
+class ConvertPamUserDb(Actor):
+ """
+ Convert the pam_userdb databases to GDBM
+ """
+
+ name = 'convert_pam_user_db'
+ consumes = (PamUserDbLocation,)
+ produces = ()
+ tags = (PreparationPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ convertpamuserdb.process()
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py
new file mode 100644
index 00000000..e55b4102
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py
@@ -0,0 +1,27 @@
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.stdlib import api, CalledProcessError, run
+from leapp.models import PamUserDbLocation
+
+
+def _convert_db(db_path):
+ cmd = ['db_converter', '--src', f'{db_path}.db', '--dest', f'{db_path}.gdbm']
+ try:
+ run(cmd)
+ except (CalledProcessError, OSError) as e:
+ # As the db_converter does not remove the original DB after conversion or upon failure,
+ # interrupt the upgrade, keeping the original DBs.
+ # If all DBs are successfully converted, the leftover DBs are removed in the removeoldpamuserdb actor.
+ raise StopActorExecutionError(
+ 'Cannot convert pam_userdb database.',
+ details={'details': '{}: {}'.format(str(e), e.stderr)}
+ )
+
+
+def process():
+ msg = next(api.consume(PamUserDbLocation), None)
+ if not msg:
+ raise StopActorExecutionError('Expected PamUserDbLocation, but got None')
+
+ if msg.locations:
+ for location in msg.locations:
+ _convert_db(location)
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py
new file mode 100644
index 00000000..46505492
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py
@@ -0,0 +1,39 @@
+import os
+
+import pytest
+
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.actor import convertpamuserdb
+from leapp.libraries.common.testutils import logger_mocked
+from leapp.libraries.stdlib import api, CalledProcessError
+
+CUR_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+def test_convert_db_success(monkeypatch):
+ location = os.path.join(CUR_DIR, '/files/db1')
+
+ def run_mocked(cmd, **kwargs):
+ assert cmd == ['db_converter', '--src', f'{location}.db', '--dest', f'{location}.gdbm']
+
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(convertpamuserdb, 'run', run_mocked)
+ convertpamuserdb._convert_db(location)
+ assert len(api.current_logger.errmsg) == 0
+
+
+def test_convert_db_failure(monkeypatch):
+ location = os.path.join(CUR_DIR, '/files/db1')
+
+ def run_mocked(cmd, **kwargs):
+ raise CalledProcessError(
+ message='A Leapp Command Error occurred.',
+ command=cmd,
+ result={'exit_code': 1}
+ )
+
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(convertpamuserdb, 'run', run_mocked)
+ with pytest.raises(StopActorExecutionError) as err:
+ convertpamuserdb._convert_db(location)
+ assert str(err.value) == 'Cannot convert pam_userdb database.'
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py
new file mode 100644
index 00000000..39a00855
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import removeoldpamuserdb
+from leapp.models import PamUserDbLocation
+from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag
+
+
+class RemoveOldPamUserDb(Actor):
+ """
+ Remove old pam_userdb databases
+ """
+
+ name = 'remove_old_pam_user_db'
+ consumes = (PamUserDbLocation,)
+ produces = ()
+ tags = (ApplicationsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ removeoldpamuserdb.process()
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py
new file mode 100644
index 00000000..5fc4cb4d
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py
@@ -0,0 +1,25 @@
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.stdlib import api, CalledProcessError, run
+from leapp.models import PamUserDbLocation
+
+
+def _remove_db(db_path):
+ cmd = ['rm', '-f', f'{db_path}.db']
+ try:
+ run(cmd)
+ except (CalledProcessError, OSError) as e:
+ api.current_logger().error(
+ 'Failed to remove {}.db: {}'.format(
+ db_path, e
+ )
+ )
+
+
+def process():
+ msg = next(api.consume(PamUserDbLocation), None)
+ if not msg:
+ raise StopActorExecutionError('Expected PamUserDbLocation, but got None')
+
+ if msg.locations:
+ for location in msg.locations:
+ _remove_db(location)
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py
new file mode 100644
index 00000000..2c1d5c75
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py
@@ -0,0 +1,38 @@
+import os
+
+from leapp.libraries.actor import removeoldpamuserdb
+from leapp.libraries.common.testutils import logger_mocked
+from leapp.libraries.stdlib import api, CalledProcessError
+
+CUR_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+def test_remove_db_success(monkeypatch):
+ location = os.path.join(CUR_DIR, '/files/db1')
+
+ def run_mocked(cmd, **kwargs):
+ assert cmd == ['rm', '-f', f'{location}.db']
+
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(removeoldpamuserdb, 'run', run_mocked)
+ removeoldpamuserdb._remove_db(location)
+ assert len(api.current_logger.errmsg) == 0
+
+
+def test_remove_db_failure(monkeypatch):
+ location = os.path.join(CUR_DIR, '/files/db1')
+
+ def run_mocked(cmd, **kwargs):
+ raise CalledProcessError(
+ message='A Leapp Command Error occurred.',
+ command=cmd,
+ result={'exit_code': 1}
+ )
+
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(removeoldpamuserdb, 'run', run_mocked)
+ removeoldpamuserdb._remove_db(location)
+ assert (
+ 'Failed to remove /files/db1.db'
+ not in api.current_logger.errmsg
+ )
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py
new file mode 100644
index 00000000..b6b35f1a
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import scanpamuserdb
+from leapp.models import PamUserDbLocation
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class ScanPamUserDb(Actor):
+ """
+ Scan the PAM service folder for the location of pam_userdb databases
+ """
+
+ name = 'scan_pam_user_db'
+ consumes = ()
+ produces = (PamUserDbLocation,)
+ tags = (FactsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ self.produce(scanpamuserdb.parse_pam_config_folder('/etc/pam.d/'))
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py
new file mode 100644
index 00000000..0f668c02
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py
@@ -0,0 +1,29 @@
+import os
+import re
+
+from leapp.models import PamUserDbLocation
+
+
+def _parse_pam_config_file(conf_file):
+ with open(conf_file, 'r') as file:
+ for line in file:
+ if 'pam_userdb' in line:
+ match = re.search(r'db=(\S+)', line)
+ if match:
+ return match.group(1)
+
+ return None
+
+
+def parse_pam_config_folder(conf_folder):
+ locations = set()
+
+ for file_name in os.listdir(conf_folder):
+ file_path = os.path.join(conf_folder, file_name)
+
+ if os.path.isfile(file_path):
+ location = _parse_pam_config_file(file_path)
+ if location is not None:
+ locations.add(location)
+
+ return PamUserDbLocation(locations=list(locations))
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic
new file mode 100644
index 00000000..f115147b
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic
@@ -0,0 +1 @@
+auth required pam_userdb.so db=/tmp/db1
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete
new file mode 100644
index 00000000..84e40b48
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete
@@ -0,0 +1,9 @@
+auth required pam_env.so
+auth required pam_faildelay.so delay=2000000
+auth sufficient pam_fprintd.so
+auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular
+auth [default=1 ignore=ignore success=ok] pam_localuser.so
+auth required pam_userdb.so db=/tmp/db2
+auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular
+auth sufficient pam_sss.so forward_pass
+auth required pam_deny.so
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing
new file mode 100644
index 00000000..764947fc
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing
@@ -0,0 +1 @@
+auth sufficient pam_unix.so nullok
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py
new file mode 100644
index 00000000..3b752d87
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py
@@ -0,0 +1,27 @@
+import os
+
+import pytest
+
+from leapp.libraries.actor import scanpamuserdb
+
+CUR_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+@pytest.mark.parametrize(
+ "inp,exp_out",
+ [
+ ("files/pam_userdb_missing", None),
+ ("files/pam_userdb_basic", "/tmp/db1"),
+ ("files/pam_userdb_complete", "/tmp/db2"),
+ ],
+)
+def test_parse_pam_config_file(inp, exp_out):
+ file = scanpamuserdb._parse_pam_config_file(os.path.join(CUR_DIR, inp))
+ assert file == exp_out
+
+
+def test_parse_pam_config_folder():
+ msg = scanpamuserdb.parse_pam_config_folder(os.path.join(CUR_DIR, "files/"))
+ assert len(msg.locations) == 2
+ assert "/tmp/db1" in msg.locations
+ assert "/tmp/db2" in msg.locations
diff --git a/repos/system_upgrade/el9toel10/models/pamuserdblocation.py b/repos/system_upgrade/el9toel10/models/pamuserdblocation.py
new file mode 100644
index 00000000..d15b2041
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/models/pamuserdblocation.py
@@ -0,0 +1,14 @@
+from leapp.models import fields, Model
+from leapp.topics import SystemInfoTopic
+
+
+class PamUserDbLocation(Model):
+ """
+ Provides a list of all database files for pam_userdb
+ """
+ topic = SystemInfoTopic
+
+ locations = fields.List(fields.String(), default=[])
+ """
+ The list with the full path to the database files.
+ """
--
2.47.0

View File

@ -1,239 +0,0 @@
From 29c7619f0368384dd2e266610098a1f8d7a13813 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 27 Aug 2025 20:04:28 +0200
Subject: [PATCH 09/55] Rename Containerfiles to consistent names
The container files (both those used in CI and locally by the Makefile)
and both build and test, use various base containers - Centos, UBIs.
This patch unifies the naming scheme to Containerfile.elX to avoid
making the user remember which one is used by which command an at
which version.
The containerfiles used by Github Actions are moved to separate
folder to avoid name clashes.
Also, in the GitHub actions test workflow the
--security-opt=seccomp=unconfined is removed. Not sure why it was set in
the first place, but seems it's not needed anymore.
---
.github/workflows/unit-tests.yml | 24 ++++++++------
Makefile | 31 +++++++++----------
.../{Containerfile.ubi8 => Containerfile.el8} | 0
.../{Containerfile.ubi9 => Containerfile.el9} | 0
...{Containerfile.rhel8 => Containerfile.el8} | 0
...{Containerfile.rhel9 => Containerfile.el9} | 0
.../Containerfile.el8} | 0
.../Containerfile.el8-lint} | 0
.../Containerfile.el9} | 0
.../Containerfile.el9-lint} | 0
10 files changed, 30 insertions(+), 25 deletions(-)
rename utils/container-builds/{Containerfile.ubi8 => Containerfile.el8} (100%)
rename utils/container-builds/{Containerfile.ubi9 => Containerfile.el9} (100%)
rename utils/container-tests/{Containerfile.rhel8 => Containerfile.el8} (100%)
rename utils/container-tests/{Containerfile.rhel9 => Containerfile.el9} (100%)
rename utils/container-tests/{Containerfile.ubi8 => ci/Containerfile.el8} (100%)
rename utils/container-tests/{Containerfile.ubi8-lint => ci/Containerfile.el8-lint} (100%)
rename utils/container-tests/{Containerfile.ubi9 => ci/Containerfile.el9} (100%)
rename utils/container-tests/{Containerfile.ubi9-lint => ci/Containerfile.el9-lint} (100%)
diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index ed82e0e5..cfcec437 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -19,36 +19,36 @@ jobs:
- name: 'Unit tests (python:3.12; repos:el9toel10,common)'
python: python3.12
repos: 'el9toel10,common'
- container: ubi9
+ container: el9
- name: 'Linters (python:3.12; repos:el9toel10,common)'
python: python3.12
repos: 'el9toel10,common'
- container: ubi9-lint
+ container: el9-lint
- name: 'Unit tests (python:3.9; repos:el9toel10,common)'
python: python3.9
repos: 'el9toel10,common'
- container: ubi9
+ container: el9
- name: 'Linters (python:3.9; repos:el9toel10,common)'
python: python3.9
repos: 'el9toel10,common'
- container: ubi9-lint
+ container: el9-lint
# 8to9
- name: 'Unit tests (python:3.9; repos:el8toel9,common)'
python: python3.9
repos: 'el8toel9,common'
- container: ubi9
+ container: el9
- name: 'Linters (python:3.9; repos:el8toel9,common)'
python: python3.9
repos: 'el8toel9,common'
- container: ubi9-lint
+ container: el9-lint
- name: 'Unit tests (python:3.6; repos:el8toel9,common)'
python: python3.6
repos: 'el8toel9,common'
- container: ubi8
+ container: el8
- name: 'Linters (python:3.6; repos:el8toel9,common)'
python: python3.6
repos: 'el8toel9,common'
- container: ubi8-lint
+ container: el8-lint
steps:
- name: Checkout code
@@ -63,4 +63,10 @@ jobs:
run: |
git branch -f main origin/main
- name: ${{matrix.scenarios.name}}
- run: script -e -c /bin/bash -c 'TERM=xterm podman build --security-opt=seccomp=unconfined -t leapp-tests -f utils/container-tests/Containerfile.${{matrix.scenarios.container}} utils/container-tests && PYTHON_VENV=${{matrix.scenarios.python}} REPOSITORIES=${{matrix.scenarios.repos}} podman run --security-opt=seccomp=unconfined --rm -ti -v ${PWD}:/payload --env=PYTHON_VENV --env=REPOSITORIES leapp-tests'
+ run: |
+ script -e -c /bin/bash -c \
+ 'TERM=xterm \
+ podman build -t leapp-tests -f utils/container-tests/ci/Containerfile.${{matrix.scenarios.container}} . && \
+ PYTHON_VENV=${{matrix.scenarios.python}} \
+ REPOSITORIES=${{matrix.scenarios.repos}} \
+ podman run --rm -ti -v ${PWD}:/payload --env=PYTHON_VENV --env=REPOSITORIES leapp-tests'
diff --git a/Makefile b/Makefile
index e0fc7e00..754c2c63 100644
--- a/Makefile
+++ b/Makefile
@@ -51,7 +51,7 @@ _COPR_CONFIG=$${COPR_CONFIG:-~/.config/copr_rh_oamg.conf}
_CONTAINER_TOOL=$${CONTAINER_TOOL:-podman}
# container to run tests in
-_TEST_CONTAINER=$${TEST_CONTAINER:-rhel8}
+_TEST_CONTAINER=$${TEST_CONTAINER:-el8}
# In case just specific CHROOTs should be used for the COPR build, you can
# set the multiple CHROOTs separated by comma in the COPR_CHROOT envar, e.g.
@@ -129,7 +129,7 @@ help:
@echo " test lint source code and run tests"
@echo " test_no_lint run tests without linting the source code"
@echo " test_container run lint and tests in container"
- @echo " - default container is 'rhel8'"
+ @echo " - default container is 'el8'"
@echo " - can be changed by setting TEST_CONTAINER env"
@echo " test_container_all run lint and tests in all available containers"
@echo " test_container_no_lint run tests without linting in container, see test_container"
@@ -164,9 +164,9 @@ help:
@echo " PR=7 SUFFIX='my_additional_suffix' make <target>"
@echo " MR=6 COPR_CONFIG='path/to/the/config/copr/file' make <target>"
@echo " ACTOR=<actor> TEST_LIBS=y make test"
- @echo " BUILD_CONTAINER=rhel8 make build_container"
+ @echo " BUILD_CONTAINER=el8 make build_container"
@echo " TEST_CONTAINER=f42 make test_container"
- @echo " CONTAINER_TOOL=docker TEST_CONTAINER=rhel8 make test_container_no_lint"
+ @echo " CONTAINER_TOOL=docker TEST_CONTAINER=el8 make test_container_no_lint"
@echo ""
clean:
@@ -252,10 +252,10 @@ build_container:
echo "--- Build RPM ${PKGNAME}-${VERSION}-${RELEASE}.el$(DIST_VERSION).rpm in container ---";
case "$(BUILD_CONTAINER)" in \
el8) \
- CONT_FILE="utils/container-builds/Containerfile.ubi8"; \
+ CONT_FILE="utils/container-builds/Containerfile.el8"; \
;; \
el9) \
- CONT_FILE="utils/container-builds/Containerfile.ubi9"; \
+ CONT_FILE="utils/container-builds/Containerfile.el9"; \
;; \
"") \
echo "BUILD_CONTAINER must be set"; \
@@ -415,7 +415,7 @@ lint_container:
@_TEST_CONT_TARGET="lint" $(MAKE) test_container
lint_container_all:
- @for container in "f42" "rhel8" "rhel9"; do \
+ @for container in f42 el{8,9}; do \
TEST_CONTAINER=$$container $(MAKE) lint_container || exit 1; \
done
@@ -429,16 +429,16 @@ test_container:
export CONT_FILE="utils/container-tests/Containerfile.f42"; \
export _VENV="python3.13"; \
;; \
- rhel8) \
- export CONT_FILE="utils/container-tests/Containerfile.rhel8"; \
+ el8) \
+ export CONT_FILE="utils/container-tests/Containerfile.el8"; \
export _VENV="python3.6"; \
;; \
- rhel9) \
- export CONT_FILE="utils/container-tests/Containerfile.rhel9"; \
+ el9) \
+ export CONT_FILE="utils/container-tests/Containerfile.el9"; \
export _VENV="python3.9"; \
;; \
*) \
- echo "Error: Available containers are: f42, rhel8, rhel9"; exit 1; \
+ echo "Error: Available containers are: f42, el8, el9"; exit 1; \
;; \
esac; \
export TEST_IMAGE="leapp-repo-tests-$(_TEST_CONTAINER)"; \
@@ -470,7 +470,7 @@ test_container:
exit $$res
test_container_all:
- @for container in "f42" "rhel8" "rhel9"; do \
+ @for container in "f42" "el8" "el9"; do \
TEST_CONTAINER=$$container $(MAKE) test_container || exit 1; \
done
@@ -478,14 +478,13 @@ test_container_no_lint:
@_TEST_CONT_TARGET="test_no_lint" $(MAKE) test_container
test_container_all_no_lint:
- @for container in "f42" "rhel8" "rhel9"; do \
+ @for container in f42 el{8,9}; do \
TEST_CONTAINER=$$container $(MAKE) test_container_no_lint || exit 1; \
done
# clean all testing and building containers and their images
clean_containers:
- @for i in "leapp-repo-tests-f42" "leapp-repo-tests-rhel8" \
- "leapp-repo-tests-rhel9" "leapp-repo-build-el8"; do \
+ @for i in leapp-repo-tests-f42 leapp-repo-tests-el{8,9} leapp-repo-build-el{8,9}; do \
$(_CONTAINER_TOOL) kill "$$i-cont" || :; \
$(_CONTAINER_TOOL) rm "$$i-cont" || :; \
$(_CONTAINER_TOOL) rmi "$$i" || :; \
diff --git a/utils/container-builds/Containerfile.ubi8 b/utils/container-builds/Containerfile.el8
similarity index 100%
rename from utils/container-builds/Containerfile.ubi8
rename to utils/container-builds/Containerfile.el8
diff --git a/utils/container-builds/Containerfile.ubi9 b/utils/container-builds/Containerfile.el9
similarity index 100%
rename from utils/container-builds/Containerfile.ubi9
rename to utils/container-builds/Containerfile.el9
diff --git a/utils/container-tests/Containerfile.rhel8 b/utils/container-tests/Containerfile.el8
similarity index 100%
rename from utils/container-tests/Containerfile.rhel8
rename to utils/container-tests/Containerfile.el8
diff --git a/utils/container-tests/Containerfile.rhel9 b/utils/container-tests/Containerfile.el9
similarity index 100%
rename from utils/container-tests/Containerfile.rhel9
rename to utils/container-tests/Containerfile.el9
diff --git a/utils/container-tests/Containerfile.ubi8 b/utils/container-tests/ci/Containerfile.el8
similarity index 100%
rename from utils/container-tests/Containerfile.ubi8
rename to utils/container-tests/ci/Containerfile.el8
diff --git a/utils/container-tests/Containerfile.ubi8-lint b/utils/container-tests/ci/Containerfile.el8-lint
similarity index 100%
rename from utils/container-tests/Containerfile.ubi8-lint
rename to utils/container-tests/ci/Containerfile.el8-lint
diff --git a/utils/container-tests/Containerfile.ubi9 b/utils/container-tests/ci/Containerfile.el9
similarity index 100%
rename from utils/container-tests/Containerfile.ubi9
rename to utils/container-tests/ci/Containerfile.el9
diff --git a/utils/container-tests/Containerfile.ubi9-lint b/utils/container-tests/ci/Containerfile.el9-lint
similarity index 100%
rename from utils/container-tests/Containerfile.ubi9-lint
rename to utils/container-tests/ci/Containerfile.el9-lint
--
2.51.1

View File

@ -0,0 +1,31 @@
From d6e57eec3ded2887008055442ba906a92c572a01 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 10 Oct 2024 14:03:36 +0200
Subject: [PATCH 09/40] Replace mirror.centos.org with vault.centos.org Centos
7 Containerfile
As mirror.centos.org is dead, replace mirrorlist with baseurl pointing
to vault.centos.org in utils/container-builds/Containerfile.centos7.
---
utils/container-builds/Containerfile.centos7 | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/utils/container-builds/Containerfile.centos7 b/utils/container-builds/Containerfile.centos7
index 70ac3df1..af00eddb 100644
--- a/utils/container-builds/Containerfile.centos7
+++ b/utils/container-builds/Containerfile.centos7
@@ -2,6 +2,11 @@ FROM centos:7
VOLUME /repo
+# mirror.centos.org is dead, comment out mirrorlist and set baseurl to vault.centos.org
+RUN sed -i s/mirror.centos.org/vault.centos.org/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^#\s*baseurl=http/baseurl=http/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^mirrorlist=http/#mirrorlist=http/ /etc/yum.repos.d/CentOS-*.repo
+
RUN yum update -y && \
yum install -y rpm-build python-devel make git
--
2.47.0

View File

@ -1,80 +0,0 @@
From 17767238d038d36341181e1702b73681ae432439 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 20 Aug 2025 14:12:13 +0200
Subject: [PATCH 10/55] Replace ubi:8 containers with centos:8
The networkmanagerconnectionscanner depends on NetworkManager-nmlib and
python3-gobject. These are not available in the UBI 8 repos and the
tests are skipped.
This patch changes the base container from UBI 8 to Centos 8 which has
the packages available and the tests are not skipped.
---
commands/tests/test_upgrade_paths.py | 5 +++++
utils/container-tests/Containerfile.el8 | 10 ++++++++--
utils/container-tests/ci/Containerfile.el8 | 10 ++++++++--
3 files changed, 21 insertions(+), 4 deletions(-)
diff --git a/commands/tests/test_upgrade_paths.py b/commands/tests/test_upgrade_paths.py
index 89b5eb71..9bdf5792 100644
--- a/commands/tests/test_upgrade_paths.py
+++ b/commands/tests/test_upgrade_paths.py
@@ -42,6 +42,11 @@ def test_get_target_version(mock_open, monkeypatch):
},
)
def test_get_target_release(mock_open, monkeypatch): # do not remove mock_open
+ # Make it look like it's RHEL even on centos, because that's what the test
+ # assumes.
+ # Otherwise the test, when ran on Centos, fails because it works
+ # with MAJOR.MINOR version format while Centos uses MAJOR format.
+ monkeypatch.setattr(command_utils, 'get_distro_id', lambda: 'rhel')
monkeypatch.setattr(command_utils, 'get_os_release_version_id', lambda x: '8.6')
# make sure env var LEAPP_DEVEL_TARGET_RELEASE takes precedence
diff --git a/utils/container-tests/Containerfile.el8 b/utils/container-tests/Containerfile.el8
index 6f21839b..b92e8742 100644
--- a/utils/container-tests/Containerfile.el8
+++ b/utils/container-tests/Containerfile.el8
@@ -1,9 +1,15 @@
-FROM registry.access.redhat.com/ubi8/ubi:latest
+FROM centos:8
+
+RUN sed -i s/mirror.centos.org/vault.centos.org/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^#\s*baseurl=http/baseurl=http/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^mirrorlist=http/#mirrorlist=http/ /etc/yum.repos.d/CentOS-*.repo
VOLUME /repo
RUN dnf update -y && \
- dnf install -y python3-virtualenv python3-setuptools python3-pip make git rsync
+ dnf install -y git make rsync \
+ python3-virtualenv python3-setuptools python3-pip \
+ python3-gobject NetworkManager-libnm
ENV PYTHON_VENV python3.6
diff --git a/utils/container-tests/ci/Containerfile.el8 b/utils/container-tests/ci/Containerfile.el8
index 4da60c18..4a19092e 100644
--- a/utils/container-tests/ci/Containerfile.el8
+++ b/utils/container-tests/ci/Containerfile.el8
@@ -1,9 +1,15 @@
-FROM registry.access.redhat.com/ubi8/ubi:latest
+FROM centos:8
+
+RUN sed -i s/mirror.centos.org/vault.centos.org/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^#\s*baseurl=http/baseurl=http/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^mirrorlist=http/#mirrorlist=http/ /etc/yum.repos.d/CentOS-*.repo
VOLUME /payload
RUN dnf update -y && \
- dnf install python3-virtualenv python3-setuptools python3-pip make git -y
+ dnf install -y make git \
+ python3-virtualenv python3-setuptools python3-pip \
+ python3-gobject NetworkManager-libnm
WORKDIR /payload
ENTRYPOINT make install-deps && make test_no_lint
--
2.51.1

View File

@ -0,0 +1,35 @@
From b997e4eeb835809d1fbfd1a0b9a6114c133bf0b4 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 10 Oct 2024 15:28:48 +0200
Subject: [PATCH 10/40] kernelcmdlineconfig: Add Report to produces tuple
The missing `leapp.reporting.Report` class is added to
kernelcmdlineconfig actor `produces` tuple.
---
.../system_upgrade/common/actors/kernelcmdlineconfig/actor.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
index b44fd835..3585a14e 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
@@ -4,6 +4,7 @@ from leapp.actors import Actor
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import kernelcmdlineconfig
from leapp.models import FirmwareFacts, InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks
+from leapp.reporting import Report
from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag
@@ -14,7 +15,7 @@ class KernelCmdlineConfig(Actor):
name = 'kernelcmdlineconfig'
consumes = (KernelCmdlineArg, InstalledTargetKernelInfo, FirmwareFacts, TargetKernelCmdlineArgTasks)
- produces = ()
+ produces = (Report,)
tags = (FinalizationPhaseTag, IPUWorkflowTag)
def process(self):
--
2.47.0

View File

@ -1,53 +0,0 @@
From c4f3ace1ebb909dc53796e16959a2459a15d9d74 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 9 Oct 2025 13:30:16 +0000
Subject: [PATCH 11/55] chore(deps): update actions/checkout action to v5
---
.github/workflows/codespell.yml | 2 +-
.github/workflows/differential-shellcheck.yml | 2 +-
.github/workflows/unit-tests.yml | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index 3e595e32..4b07e4b3 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- uses: codespell-project/actions-codespell@v2
with:
ignore_words_list: ro,fo,couldn,repositor,zeor,bootup
diff --git a/.github/workflows/differential-shellcheck.yml b/.github/workflows/differential-shellcheck.yml
index e1bafb93..6c81713c 100644
--- a/.github/workflows/differential-shellcheck.yml
+++ b/.github/workflows/differential-shellcheck.yml
@@ -19,7 +19,7 @@ jobs:
steps:
- name: Repository checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
fetch-depth: 0
diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index cfcec437..d1b8fb2a 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -52,7 +52,7 @@ jobs:
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
# NOTE(ivasilev) fetch-depth 0 is critical here as leapp deps discovery depends on specific substring in
# commit message and default 1 option will get us just merge commit which has an unrelevant message.
--
2.51.1

View File

@ -0,0 +1,204 @@
From c2c96affa7b20c82969419ce49b65cbf646a0c32 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Fri, 18 Oct 2024 12:43:19 +0200
Subject: [PATCH 11/40] kernelcmdlineconfig: Use args from first entry when
multiple entries are listed
Instead of erroring out when grubby lists multiple entries for the
default kernel, always use the `args=` and `root=` from the first one and create
a post-upgrade report. The report instruct user to ensure those are the
correct ones or to correct them.
This can happen, for example, if MAKEDEBUG=yes is set in
/etc/sysconfing/kernel.
Jira: RHEL-46911
---
.../libraries/kernelcmdlineconfig.py | 79 ++++++++++++++++---
.../tests/test_kernelcmdlineconfig.py | 48 ++++++++++-
2 files changed, 116 insertions(+), 11 deletions(-)
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
index 6b261c3b..19c50f3c 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
@@ -109,10 +109,55 @@ def _extract_grubby_value(record):
return matches.group(2)
+def report_multple_entries_for_default_kernel():
+ if use_cmdline_file():
+ report_hint = (
+ 'After the system has been rebooted into the new version of RHEL,'
+ ' check that configured default kernel cmdline arguments in /etc/kernel/cmdline '
+ ' are correct. In case that different arguments are expected, update the file as needed.'
+ )
+ else:
+ report_hint = (
+ 'After the system has been rebooted into the new version of RHEL,'
+ ' check that configured default kernel cmdline arguments are set as expected, using'
+ ' the `grub2-editenv list` command. '
+ ' If different default arguments are expected, update them using grub2-editenv.\n'
+ ' For example, consider that current booted kernel has correct kernel cmdline'
+ ' arguments and /proc/cmdline contains:\n\n'
+ ' BOOT_IMAGE=(hd0,msdos1)/vmlinuz-4.18.0-425.3.1.el8.x86_64'
+ ' root=/dev/mapper/rhel_ibm--root ro console=tty0'
+ ' console=ttyS0,115200 rd_NO_PLYMOUTH\n\n'
+ ' then run the following grub2-editenv command:\n\n'
+ ' # grub2-editenv - set "kernelopts=root=/dev/mapper/rhel_ibm--root'
+ ' ro console=tty0 console=ttyS0,115200 rd_NO_PLYMOUTH"'
+ )
+
+ reporting.create_report([
+ reporting.Title('Ensure that expected default kernel cmdline arguments are set'),
+ reporting.Summary(
+ 'During the upgrade we needed to modify the kernel command line arguments.'
+ ' However, multiple bootloader entries with different arguments were found for the default'
+ ' kernel (perhaps MAKEDEBUG=yes is set in /etc/sysconfig/kernel).'
+ ' Leapp used the arguments from the first found entry of the target kernel'
+ ' and set it as the new default kernel cmdline arguments for kernels installed in the future.'
+ ),
+ reporting.Remediation(hint=report_hint),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([
+ reporting.Groups.BOOT,
+ reporting.Groups.KERNEL,
+ reporting.Groups.POST,
+ ]),
+ reporting.RelatedResource('file', '/etc/kernel/cmdline'),
+ ])
+
+
def retrieve_args_for_default_kernel(kernel_info):
# Copy the args for the default kernel to all kernels.
kernel_args = None
kernel_root = None
+ detected_multiple_entries = False
+
cmd = ['grubby', '--info', kernel_info.kernel_img_path]
output = stdlib.run(cmd, split=False)
for record in output['stdout'].splitlines():
@@ -122,19 +167,30 @@ def retrieve_args_for_default_kernel(kernel_info):
temp_kernel_args = _extract_grubby_value(record)
if kernel_args:
- api.current_logger().warning('Grubby output is malformed:'
- ' `args=` is listed more than once.')
if kernel_args != temp_kernel_args:
- raise ReadOfKernelArgsError('Grubby listed `args=` multiple'
- ' times with different values.')
- kernel_args = _extract_grubby_value(record)
+ api.current_logger().warning(
+ 'Grubby output listed `args=` multiple times with different values,'
+ ' continuing with the first result'
+ )
+ detected_multiple_entries = True
+ else:
+ api.current_logger().warning('Grubby output listed `args=` more than once')
+ else:
+ kernel_args = temp_kernel_args
elif record.startswith('root='):
- api.current_logger().warning('Grubby output is malformed:'
- ' `root=` is listed more than once.')
+ temp_kernel_root = _extract_grubby_value(record)
+
if kernel_root:
- raise ReadOfKernelArgsError('Grubby listed `root=` multiple'
- ' times with different values')
- kernel_root = _extract_grubby_value(record)
+ if kernel_root != temp_kernel_root:
+ api.current_logger().warning(
+ 'Grubby output listed `root=` multiple times with different values,'
+ ' continuing with the first result'
+ )
+ detected_multiple_entries = True
+ else:
+ api.current_logger().warning('Grubby output listed `root=` more than once')
+ else:
+ kernel_root = temp_kernel_root
if not kernel_args or not kernel_root:
raise ReadOfKernelArgsError(
@@ -142,6 +198,9 @@ def retrieve_args_for_default_kernel(kernel_info):
' kernels: root={}, args={}'.format(kernel_root, kernel_args)
)
+ if detected_multiple_entries:
+ report_multple_entries_for_default_kernel()
+
return kernel_root, kernel_args
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py
index ffe4b046..e5759a7b 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py
@@ -4,11 +4,12 @@ from collections import namedtuple
import pytest
+from leapp import reporting
from leapp.exceptions import StopActorExecutionError
from leapp.libraries import stdlib
from leapp.libraries.actor import kernelcmdlineconfig
from leapp.libraries.common.config import architecture
-from leapp.libraries.common.testutils import CurrentActorMocked
+from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
from leapp.libraries.stdlib import api
from leapp.models import InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks
@@ -183,6 +184,51 @@ def test_kernelcmdline_config_no_version(monkeypatch):
assert not mocked_run.commands
+SECOND_KERNEL_ARGS = (
+ 'ro rootflags=subvol=root'
+ ' resume=/dev/mapper/luks-2c0df999-81ec-4a35-a1f9-b93afee8c6ad'
+ ' rd.luks.uuid=luks-90a6412f-c588-46ca-9118-5aca35943d25'
+ ' rd.luks.uuid=luks-2c0df999-81ec-4a35-a1f9-b93afee8c6ad'
+)
+SECOND_KERNEL_ROOT = 'UUID=1aa15850-2685-418d-95a6-f7266a2de83b'
+
+
+@pytest.mark.parametrize(
+ 'second_grubby_output',
+ (
+ TEMPLATE_GRUBBY_INFO_OUTPUT.format(SECOND_KERNEL_ARGS, SECOND_KERNEL_ROOT),
+ TEMPLATE_GRUBBY_INFO_OUTPUT.format(SAMPLE_KERNEL_ARGS, SECOND_KERNEL_ROOT),
+ TEMPLATE_GRUBBY_INFO_OUTPUT.format(SECOND_KERNEL_ARGS, SAMPLE_KERNEL_ROOT),
+ )
+)
+def test_kernelcmdline_config_mutiple_args(monkeypatch, second_grubby_output):
+ kernel_img_path = '/boot/vmlinuz-X'
+ kernel_info = InstalledTargetKernelInfo(pkg_nevra=TARGET_KERNEL_NEVRA,
+ uname_r='',
+ kernel_img_path=kernel_img_path,
+ initramfs_path='/boot/initramfs-X')
+
+ # For this test, we need to check we get the proper report if grubby --info
+ # outputs multiple different `root=` or `args=`
+ # and that the first ones are used
+ grubby_info_output = "\n".join((SAMPLE_GRUBBY_INFO_OUTPUT, second_grubby_output))
+
+ mocked_run = MockedRun(
+ outputs={" ".join(("grubby", "--info", kernel_img_path)): grubby_info_output,
+ }
+ )
+ monkeypatch.setattr(stdlib, 'run', mocked_run)
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(reporting, "create_report", create_report_mocked())
+
+ root, args = kernelcmdlineconfig.retrieve_args_for_default_kernel(kernel_info)
+ assert root == SAMPLE_KERNEL_ROOT
+ assert args == SAMPLE_KERNEL_ARGS
+ assert reporting.create_report.called == 1
+ expected_title = 'Ensure that expected default kernel cmdline arguments are set'
+ assert expected_title in reporting.create_report.report_fields['title']
+
+
def test_kernelcmdline_config_malformed_args(monkeypatch):
kernel_img_path = '/boot/vmlinuz-X'
kernel_info = InstalledTargetKernelInfo(pkg_nevra=TARGET_KERNEL_NEVRA,
--
2.47.0

View File

@ -1,46 +0,0 @@
From 601c26f795a7b2f6cb553c656112235d17137b8f Mon Sep 17 00:00:00 2001
From: karolinku <kkula@redhat.com>
Date: Tue, 16 Sep 2025 15:58:29 +0200
Subject: [PATCH 12/55] LiveMode: Add /etc/crypttab file to the target
userspace container
When upgrading with LiveMode, the auto-unlock of encrypted devices
fails because the /etc/crypttab configuration file is not present inside
the squashfs, causing the boot process to fail.
This change copy /etc/crypttab file to the target userspace container
during the upgrade process, allowing encrypted devices to be properly
unlocked.
Jira: RHEL-90098
---
.../common/actors/checkluks/libraries/checkluks.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py b/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
index 57a94e9d..aac171a7 100644
--- a/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
+++ b/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
@@ -3,6 +3,7 @@ from leapp.libraries.common.config.version import get_source_major_version
from leapp.libraries.stdlib import api
from leapp.models import (
CephInfo,
+ CopyFile,
DracutModule,
LuksDumps,
StorageInfo,
@@ -156,7 +157,10 @@ def check_invalid_luks_devices():
'tpm2-tools',
'tpm2-abrmd'
]
- api.produce(TargetUserSpaceUpgradeTasks(install_rpms=required_crypt_rpms))
+ api.produce(TargetUserSpaceUpgradeTasks(
+ copy_files=[CopyFile(src="/etc/crypttab")],
+ install_rpms=required_crypt_rpms)
+ )
api.produce(UpgradeInitramfsTasks(include_dracut_modules=[
DracutModule(name='clevis'),
DracutModule(name='clevis-pin-tpm2')
--
2.51.1

View File

@ -0,0 +1,216 @@
From 053137c50d1b060f9e6e6e45d82196b1045391b7 Mon Sep 17 00:00:00 2001
From: mhecko <mhecko@redhat.com>
Date: Thu, 4 Apr 2024 14:22:48 +0200
Subject: [PATCH 12/40] check_microarch: refactor to handle possible future
reqs
---
.../actors/checkmicroarchitecture/actor.py | 0
.../libraries/checkmicroarchitecture.py | 73 +++++++++++++++++++
.../tests/test_checkmicroarchitecture.py | 21 ++++--
.../libraries/checkmicroarchitecture.py | 46 ------------
4 files changed, 87 insertions(+), 53 deletions(-)
rename repos/system_upgrade/{el8toel9 => common}/actors/checkmicroarchitecture/actor.py (100%)
create mode 100644 repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
rename repos/system_upgrade/{el8toel9 => common}/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py (79%)
delete mode 100644 repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
diff --git a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/actor.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py
similarity index 100%
rename from repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/actor.py
rename to repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py
diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
new file mode 100644
index 00000000..cc617203
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
@@ -0,0 +1,73 @@
+from collections import namedtuple
+
+from leapp import reporting
+from leapp.libraries.common.config.architecture import ARCH_X86_64, matches_architecture
+from leapp.libraries.common.config.version import get_target_major_version
+from leapp.libraries.stdlib import api
+from leapp.models import CPUInfo
+
+X86_64_BASELINE_FLAGS = ['cmov', 'cx8', 'fpu', 'fxsr', 'mmx', 'syscall', 'sse', 'sse2']
+X86_64_V2_FLAGS = ['cx16', 'lahf_lm', 'popcnt', 'pni', 'sse4_1', 'sse4_2', 'ssse3']
+
+MicroarchInfo = namedtuple('MicroarchInfo', ('required_flags', 'extra_report_fields', 'microarch_ver'))
+
+
+def _inhibit_upgrade(missing_flags, target_rhel, microarch_ver, extra_report_fields=None):
+ title = 'Current x86-64 microarchitecture is unsupported in {0}'.format(target_rhel)
+ summary = ('{0} has a higher CPU requirement than older versions, it now requires a CPU '
+ 'compatible with {1} instruction set or higher.\n\n'
+ 'Missings flags detected are: {2}\n'.format(target_rhel, microarch_ver, ', '.join(missing_flags)))
+
+ report_fields = [
+ reporting.Title(title),
+ reporting.Summary(summary),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ reporting.Groups([reporting.Groups.SANITY]),
+ reporting.Remediation(hint=('If case of using virtualization, virtualization platforms often allow '
+ 'configuring a minimum denominator CPU model for compatibility when migrating '
+ 'between different CPU models. Ensure that minimum requirements are not below '
+ 'that of {0}\n').format(target_rhel)),
+ ]
+
+ if extra_report_fields:
+ report_fields += extra_report_fields
+
+ reporting.create_report(report_fields)
+
+
+def process():
+ """
+ Check whether the processor matches the required microarchitecture.
+ """
+
+ if not matches_architecture(ARCH_X86_64):
+ api.current_logger().info('Architecture not x86-64. Skipping microarchitecture test.')
+ return
+
+ cpuinfo = next(api.consume(CPUInfo))
+
+ rhel9_microarch_article = reporting.ExternalLink(
+ title='Building Red Hat Enterprise Linux 9 for the x86-64-v2 microarchitecture level',
+ url='https://red.ht/rhel-9-intel-microarchitectures'
+ )
+
+ rhel_major_to_microarch_reqs = {
+ '9': MicroarchInfo(microarch_ver='x86-64-v2',
+ required_flags=(X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS),
+ extra_report_fields=[rhel9_microarch_article]),
+ }
+
+ microarch_info = rhel_major_to_microarch_reqs.get(get_target_major_version())
+ if not microarch_info:
+ api.current_logger().info('No known microarchitecture requirements are known for target RHEL%s.',
+ get_target_major_version())
+ return
+
+ missing_flags = [flag for flag in microarch_info.required_flags if flag not in cpuinfo.flags]
+ api.current_logger().debug('Required flags missing: %s', missing_flags)
+ if missing_flags:
+ _inhibit_upgrade(missing_flags,
+ 'RHEL{0}'.format(get_target_major_version()),
+ microarch_info.microarch_ver,
+ extra_report_fields=microarch_info.extra_report_fields)
diff --git a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
similarity index 79%
rename from repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
rename to repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
index b7c850d9..b0624f2b 100644
--- a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
+++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
@@ -25,7 +25,13 @@ def test_not_x86_64_passes(monkeypatch, arch):
assert not reporting.create_report.called
-def test_valid_microarchitecture(monkeypatch):
+@pytest.mark.parametrize(
+ ('target_ver', 'cpu_flags'),
+ [
+ ('9.0', checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS)
+ ]
+)
+def test_valid_microarchitecture(monkeypatch, target_ver, cpu_flags):
"""
Test no report is generated on a valid microarchitecture
"""
@@ -33,9 +39,8 @@ def test_valid_microarchitecture(monkeypatch):
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
monkeypatch.setattr(api, 'current_logger', logger_mocked())
- required_flags = checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=ARCH_X86_64,
- msgs=[CPUInfo(flags=required_flags)]))
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=ARCH_X86_64, dst_ver=target_ver,
+ msgs=[CPUInfo(flags=cpu_flags)]))
checkmicroarchitecture.process()
@@ -43,14 +48,16 @@ def test_valid_microarchitecture(monkeypatch):
assert not reporting.create_report.called
-def test_invalid_microarchitecture(monkeypatch):
+@pytest.mark.parametrize('target_ver', ['9.0'])
+def test_invalid_microarchitecture(monkeypatch, target_ver):
"""
Test report is generated on x86-64 architecture with invalid microarchitecture and the upgrade is inhibited
"""
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
monkeypatch.setattr(api, 'current_logger', logger_mocked())
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=ARCH_X86_64, msgs=[CPUInfo()]))
+ monkeypatch.setattr(api, 'current_actor',
+ CurrentActorMocked(arch=ARCH_X86_64, msgs=[CPUInfo()], dst_ver=target_ver))
checkmicroarchitecture.process()
@@ -60,6 +67,6 @@ def test_invalid_microarchitecture(monkeypatch):
assert 'Architecture not x86-64. Skipping microarchitecture test.' not in api.current_logger().infomsg
assert reporting.create_report.called == 1
assert 'microarchitecture is unsupported' in produced_title
- assert 'RHEL9 has a higher CPU requirement' in produced_summary
+ assert 'has a higher CPU requirement' in produced_summary
assert reporting.create_report.report_fields['severity'] == reporting.Severity.HIGH
assert is_inhibitor(reporting.create_report.report_fields)
diff --git a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py b/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
deleted file mode 100644
index 9c083d7e..00000000
--- a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from leapp import reporting
-from leapp.libraries.common.config.architecture import ARCH_X86_64, matches_architecture
-from leapp.libraries.stdlib import api
-from leapp.models import CPUInfo
-
-X86_64_BASELINE_FLAGS = ['cmov', 'cx8', 'fpu', 'fxsr', 'mmx', 'syscall', 'sse', 'sse2']
-X86_64_V2_FLAGS = ['cx16', 'lahf_lm', 'popcnt', 'pni', 'sse4_1', 'sse4_2', 'ssse3']
-
-
-def _inhibit_upgrade(missing_flags):
- title = 'Current x86-64 microarchitecture is unsupported in RHEL9'
- summary = ('RHEL9 has a higher CPU requirement than older versions, it now requires a CPU '
- 'compatible with x86-64-v2 instruction set or higher.\n\n'
- 'Missings flags detected are: {}\n'.format(', '.join(missing_flags)))
-
- reporting.create_report([
- reporting.Title(title),
- reporting.Summary(summary),
- reporting.ExternalLink(title='Building Red Hat Enterprise Linux 9 for the x86-64-v2 microarchitecture level',
- url='https://red.ht/rhel-9-intel-microarchitectures'),
- reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.INHIBITOR]),
- reporting.Groups([reporting.Groups.SANITY]),
- reporting.Remediation(hint=('If case of using virtualization, virtualization platforms often allow '
- 'configuring a minimum denominator CPU model for compatibility when migrating '
- 'between different CPU models. Ensure that minimum requirements are not below '
- 'that of RHEL9\n')),
- ])
-
-
-def process():
- """
- Check whether the processor matches the required microarchitecture.
- """
-
- if not matches_architecture(ARCH_X86_64):
- api.current_logger().info('Architecture not x86-64. Skipping microarchitecture test.')
- return
-
- cpuinfo = next(api.consume(CPUInfo))
-
- required_flags = X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS
- missing_flags = [flag for flag in required_flags if flag not in cpuinfo.flags]
- api.current_logger().debug('Required flags missing: %s', missing_flags)
- if missing_flags:
- _inhibit_upgrade(missing_flags)
--
2.47.0

View File

@ -0,0 +1,133 @@
From d3ebc990ba65801fbed2aaf1dce8329698667d1c Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Wed, 28 Aug 2024 12:18:40 +0200
Subject: [PATCH 13/40] check_microarch: add rhel10 requirements
---
.../actors/checkmicroarchitecture/actor.py | 13 ++++++++++--
.../libraries/checkmicroarchitecture.py | 8 +++++--
.../tests/test_checkmicroarchitecture.py | 21 ++++++++++++++-----
3 files changed, 33 insertions(+), 9 deletions(-)
diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py
index 98ffea80..bb342f2f 100644
--- a/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py
+++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py
@@ -17,7 +17,8 @@ class CheckMicroarchitecture(Actor):
levels.
RHEL9 has a higher CPU requirement than older versions, it now requires a
- CPU compatible with ``x86-64-v2`` instruction set or higher.
+ CPU compatible with ``x86-64-v2`` instruction set or higher. Similarly,
+ RHEL10 requires at least ``x86-64-v3`` instruction set.
.. table:: Required CPU features by microarchitecure level with a
corresponding flag as shown by ``lscpu``.
@@ -43,7 +44,15 @@ class CheckMicroarchitecture(Actor):
| | SSE4_2 | sse4_2 |
| | SSSE3 | ssse3 |
+------------+-------------+--------------------+
- | ... | | |
+ | x86-64-v3 | AVX | avx |
+ | | AVX2 | avx2 |
+ | | BMI1 | bmi1 |
+ | | BMI2 | bmi2 |
+ | | F16C | f16c |
+ | | FMA | fma |
+ | | LZCNT | abm |
+ | | MOVBE | movbe |
+ | | OSXSAVE | xsave |
+------------+-------------+--------------------+
Note: To get the corresponding flag for the CPU feature consult the file
diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
index cc617203..94e85e3e 100644
--- a/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
+++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
@@ -8,6 +8,7 @@ from leapp.models import CPUInfo
X86_64_BASELINE_FLAGS = ['cmov', 'cx8', 'fpu', 'fxsr', 'mmx', 'syscall', 'sse', 'sse2']
X86_64_V2_FLAGS = ['cx16', 'lahf_lm', 'popcnt', 'pni', 'sse4_1', 'sse4_2', 'ssse3']
+X86_64_V3_FLAGS = ['avx2', 'bmi1', 'bmi2', 'f16c', 'fma', 'abm', 'movbe', 'xsave']
MicroarchInfo = namedtuple('MicroarchInfo', ('required_flags', 'extra_report_fields', 'microarch_ver'))
@@ -16,7 +17,7 @@ def _inhibit_upgrade(missing_flags, target_rhel, microarch_ver, extra_report_fie
title = 'Current x86-64 microarchitecture is unsupported in {0}'.format(target_rhel)
summary = ('{0} has a higher CPU requirement than older versions, it now requires a CPU '
'compatible with {1} instruction set or higher.\n\n'
- 'Missings flags detected are: {2}\n'.format(target_rhel, microarch_ver, ', '.join(missing_flags)))
+ 'Missings flags detected are: {2}\n').format(target_rhel, microarch_ver, ', '.join(missing_flags))
report_fields = [
reporting.Title(title),
@@ -24,7 +25,7 @@ def _inhibit_upgrade(missing_flags, target_rhel, microarch_ver, extra_report_fie
reporting.Severity(reporting.Severity.HIGH),
reporting.Groups([reporting.Groups.INHIBITOR]),
reporting.Groups([reporting.Groups.SANITY]),
- reporting.Remediation(hint=('If case of using virtualization, virtualization platforms often allow '
+ reporting.Remediation(hint=('If a case of using virtualization, virtualization platforms often allow '
'configuring a minimum denominator CPU model for compatibility when migrating '
'between different CPU models. Ensure that minimum requirements are not below '
'that of {0}\n').format(target_rhel)),
@@ -56,6 +57,9 @@ def process():
'9': MicroarchInfo(microarch_ver='x86-64-v2',
required_flags=(X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS),
extra_report_fields=[rhel9_microarch_article]),
+ '10': MicroarchInfo(microarch_ver='x86-64-v3',
+ required_flags=(X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS + X86_64_V3_FLAGS),
+ extra_report_fields=[]),
}
microarch_info = rhel_major_to_microarch_reqs.get(get_target_major_version())
diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
index b0624f2b..eeca8be0 100644
--- a/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
+++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
@@ -25,10 +25,15 @@ def test_not_x86_64_passes(monkeypatch, arch):
assert not reporting.create_report.called
+ENTIRE_V2_FLAG_SET = checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS
+ENTIRE_V3_FLAG_SET = ENTIRE_V2_FLAG_SET + checkmicroarchitecture.X86_64_V3_FLAGS
+
+
@pytest.mark.parametrize(
('target_ver', 'cpu_flags'),
[
- ('9.0', checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS)
+ ('9.0', ENTIRE_V2_FLAG_SET),
+ ('10.0', ENTIRE_V3_FLAG_SET)
]
)
def test_valid_microarchitecture(monkeypatch, target_ver, cpu_flags):
@@ -48,16 +53,22 @@ def test_valid_microarchitecture(monkeypatch, target_ver, cpu_flags):
assert not reporting.create_report.called
-@pytest.mark.parametrize('target_ver', ['9.0'])
-def test_invalid_microarchitecture(monkeypatch, target_ver):
+@pytest.mark.parametrize(
+ ('target_ver', 'cpu_flags'),
+ (
+ ('9.0', checkmicroarchitecture.X86_64_BASELINE_FLAGS),
+ ('10.0', ENTIRE_V2_FLAG_SET),
+ )
+)
+def test_invalid_microarchitecture(monkeypatch, target_ver, cpu_flags):
"""
Test report is generated on x86-64 architecture with invalid microarchitecture and the upgrade is inhibited
"""
-
+ cpu_info = CPUInfo(flags=cpu_flags)
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
monkeypatch.setattr(api, 'current_logger', logger_mocked())
monkeypatch.setattr(api, 'current_actor',
- CurrentActorMocked(arch=ARCH_X86_64, msgs=[CPUInfo()], dst_ver=target_ver))
+ CurrentActorMocked(arch=ARCH_X86_64, msgs=[cpu_info], dst_ver=target_ver))
checkmicroarchitecture.process()
--
2.47.0

View File

@ -1,50 +0,0 @@
From ae048a890ddd2169f3f46d9fbd1545fd65670e16 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Mon, 6 Oct 2025 15:56:16 +0200
Subject: [PATCH 13/55] livemode: Include /etc/crypttab in upgrade initramfs
The /etc/crypttab file is sometimes not picked up automatically by
dracut, this change includes it unconditionally. This is required for
auto-unlocking encrypted devices in upgrade environment.
The upgradeinitramfsgenerator is modified to process
UpgradeInitramfsTasksinclude's include_files when upgrading in livemode.
Jira: RHEL-90098
---
.../common/actors/checkluks/libraries/checkluks.py | 4 +++-
.../libraries/upgradeinitramfsgenerator.py | 3 +++
2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py b/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
index aac171a7..d52b9e73 100644
--- a/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
+++ b/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
@@ -161,7 +161,9 @@ def check_invalid_luks_devices():
copy_files=[CopyFile(src="/etc/crypttab")],
install_rpms=required_crypt_rpms)
)
- api.produce(UpgradeInitramfsTasks(include_dracut_modules=[
+ api.produce(UpgradeInitramfsTasks(
+ include_files=['/etc/crypttab'],
+ include_dracut_modules=[
DracutModule(name='clevis'),
DracutModule(name='clevis-pin-tpm2')
])
diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py
index 02c3fd9d..3ad92167 100644
--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py
+++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py
@@ -436,6 +436,9 @@ def _generate_livemode_initramfs(context, userspace_initramfs_dest, target_kerne
'--lvmconf', '--mdadmconf',
'--kver', target_kernel_ver, '-f', userspace_initramfs_dest]
+ # Add included files
+ cmd.extend(itertools.chain(*(('--install', file) for file in initramfs_includes.files)))
+
# Add dracut modules
cmd.extend(itertools.chain(*(('--add', module) for module in dracut_modules)))
--
2.51.1

View File

@ -0,0 +1,44 @@
From a14793892bafaad0802844cbb56be3be3220eb47 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Wed, 25 Sep 2024 17:29:02 +0200
Subject: [PATCH 14/40] Skip checking files under .../directory-hash/ dir
* The main reason for this change is to improve performance and
reduce flood of logs for the content that does not seem to be important
to check for the upgrade process.
The directory has been relatively recently added to ca-certificates
rpm on EL 9+ systems mostly to improve performance of OpenSSL and
the content does not seem to be important for the IPU process.
The high number of files takes too much time to evaluate and causes
flood of logs that are not important.
This is updated solution that we drop originally: 60f500e59bb92
---
.../targetuserspacecreator/libraries/userspacegen.py | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index cd2d7d6e..d7698056 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -311,6 +311,16 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False):
searchdir = context.full_path(dirpath)
if recursive:
for root, _, files in os.walk(searchdir):
+ if '/directory-hash/' in root:
+ # tl;dr; for the performance improvement
+ # The directory has been relatively recently added to ca-certificates
+ # rpm on EL 9+ systems and the content does not seem to be important
+ # for the IPU process. Also, it contains high number of files and
+ # their processing floods the output and slows down IPU.
+ # So skipping it entirely.
+ # This is updated solution that we drop originally: 60f500e59bb92
+ api.current_logger().debug('SKIP files in the {} directory: Not important for the IPU.'.format(root))
+ continue
for filename in files:
relpath = os.path.relpath(os.path.join(root, filename), searchdir)
file_list.append(relpath)
--
2.47.0

View File

@ -1,25 +0,0 @@
From 8248f6afd54bedcfe9d9d639fda3760669360dbe Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Mon, 13 Oct 2025 13:39:14 +0200
Subject: [PATCH 14/55] overlaygen: Fix not enough arguments for format string
---
repos/system_upgrade/common/libraries/overlaygen.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/libraries/overlaygen.py b/repos/system_upgrade/common/libraries/overlaygen.py
index 867e3559..a048af2b 100644
--- a/repos/system_upgrade/common/libraries/overlaygen.py
+++ b/repos/system_upgrade/common/libraries/overlaygen.py
@@ -710,7 +710,7 @@ def _create_mount_disk_image_old(disk_images_directory, path):
try:
utils.call_with_oserror_handled(cmd=['/sbin/mkfs.ext4', '-F', diskimage_path])
except CalledProcessError as e:
- api.current_logger().error('Failed to create ext4 filesystem in %s', exc_info=True)
+ api.current_logger().error('Failed to create ext4 filesystem in %s', diskimage_path, exc_info=True)
raise StopActorExecutionError(
message=str(e)
)
--
2.51.1

View File

@ -1,444 +0,0 @@
From 546d18f64deabe8440ac6d4ee707d7a5b69415db Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Mon, 15 Sep 2025 11:09:59 +0200
Subject: [PATCH 15/55] Generalize TargetRepositories
The RHELTargetRepository model is deprecated and replaced by the
DistroTargetRepository. The target TargetRepositories model is updated
accordingly.
TargetRepositories.rhel_repos are only filled on RHEL.
---
.../libraries/checktargetrepos.py | 4 ++-
.../tests/test_checktargetrepos.py | 32 +++++++++++++------
.../cloud/checkrhui/libraries/checkrhui.py | 5 ++-
.../tests/component_test_checkrhui.py | 1 +
.../libraries/setuptargetrepos.py | 29 +++++++++++------
.../libraries/setuptargetrepos_repomap.py | 4 +--
.../tests/test_setuptargetrepos.py | 22 ++++++++++---
.../libraries/userspacegen.py | 2 ++
.../tests/unit_test_targetuserspacecreator.py | 7 ++++
.../common/models/targetrepositories.py | 32 +++++++++++++++++--
10 files changed, 107 insertions(+), 31 deletions(-)
diff --git a/repos/system_upgrade/common/actors/checktargetrepos/libraries/checktargetrepos.py b/repos/system_upgrade/common/actors/checktargetrepos/libraries/checktargetrepos.py
index c286ed4f..141cf8e4 100644
--- a/repos/system_upgrade/common/actors/checktargetrepos/libraries/checktargetrepos.py
+++ b/repos/system_upgrade/common/actors/checktargetrepos/libraries/checktargetrepos.py
@@ -2,12 +2,14 @@ from leapp import reporting
from leapp.libraries.common import config, rhsm
from leapp.libraries.common.config.version import get_target_major_version
from leapp.libraries.stdlib import api
-from leapp.models import CustomTargetRepositoryFile, RHUIInfo, TargetRepositories
+from leapp.models import CustomTargetRepositoryFile, RHELTargetRepository, RHUIInfo, TargetRepositories
+from leapp.utils.deprecation import suppress_deprecation
# TODO: we need to provide this path in a shared library
CUSTOM_REPO_PATH = '/etc/leapp/files/leapp_upgrade_repositories.repo'
+@suppress_deprecation(RHELTargetRepository) # member of TargetRepositories
def _any_custom_repo_defined():
for tr in api.consume(TargetRepositories):
if tr.custom_repos:
diff --git a/repos/system_upgrade/common/actors/checktargetrepos/tests/test_checktargetrepos.py b/repos/system_upgrade/common/actors/checktargetrepos/tests/test_checktargetrepos.py
index c1ca8cd1..ea93ce7e 100644
--- a/repos/system_upgrade/common/actors/checktargetrepos/tests/test_checktargetrepos.py
+++ b/repos/system_upgrade/common/actors/checktargetrepos/tests/test_checktargetrepos.py
@@ -8,12 +8,11 @@ from leapp.libraries.stdlib import api
from leapp.models import (
CustomTargetRepository,
CustomTargetRepositoryFile,
- EnvVar,
- Report,
- RepositoryData,
+ DistroTargetRepository,
RHELTargetRepository,
TargetRepositories
)
+from leapp.utils.deprecation import suppress_deprecation
from leapp.utils.report import is_inhibitor
@@ -32,11 +31,21 @@ class MockedConsume(object):
return iter([msg for msg in self._msgs if isinstance(msg, model)])
-_RHEL_REPOS = [
- RHELTargetRepository(repoid='repo1'),
- RHELTargetRepository(repoid='repo2'),
- RHELTargetRepository(repoid='repo3'),
- RHELTargetRepository(repoid='repo4'),
+@suppress_deprecation(RHELTargetRepository)
+def _test_rhel_repos():
+ return [
+ RHELTargetRepository(repoid='repo1'),
+ RHELTargetRepository(repoid='repo2'),
+ RHELTargetRepository(repoid='repo3'),
+ RHELTargetRepository(repoid='repo4'),
+ ]
+
+
+_DISTRO_REPOS = [
+ DistroTargetRepository(repoid='repo1'),
+ DistroTargetRepository(repoid='repo2'),
+ DistroTargetRepository(repoid='repo3'),
+ DistroTargetRepository(repoid='repo4'),
]
_CUSTOM_REPOS = [
@@ -46,8 +55,10 @@ _CUSTOM_REPOS = [
CustomTargetRepository(repoid='repo4', name='repo4name', baseurl=None, enabled=True),
]
-_TARGET_REPOS_CUSTOM = TargetRepositories(rhel_repos=_RHEL_REPOS, custom_repos=_CUSTOM_REPOS)
-_TARGET_REPOS_NO_CUSTOM = TargetRepositories(rhel_repos=_RHEL_REPOS)
+_TARGET_REPOS_CUSTOM = TargetRepositories(
+ rhel_repos=_test_rhel_repos(), distro_repos=_DISTRO_REPOS, custom_repos=_CUSTOM_REPOS
+)
+_TARGET_REPOS_NO_CUSTOM = TargetRepositories(rhel_repos=_test_rhel_repos(), distro_repos=_DISTRO_REPOS)
_CUSTOM_TARGET_REPOFILE = CustomTargetRepositoryFile(file='/etc/leapp/files/leapp_upgrade_repositories.repo')
@@ -55,6 +66,7 @@ def test_checktargetrepos_rhsm(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: False)
monkeypatch.setattr(api, 'consume', MockedConsume())
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
monkeypatch.setattr(checktargetrepos, 'get_target_major_version', lambda: '8')
checktargetrepos.process()
assert reporting.create_report.called == 0
diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py
index ea154173..5dcdd967 100644
--- a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py
+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py
@@ -22,6 +22,7 @@ from leapp.models import (
CustomTargetRepository,
DNFPluginTask,
InstalledRPM,
+ RHELTargetRepository,
RHUIInfo,
RpmTransactionTasks,
TargetRepositories,
@@ -30,6 +31,7 @@ from leapp.models import (
TargetRHUISetupInfo,
TargetUserSpacePreupgradeTasks
)
+from leapp.utils.deprecation import suppress_deprecation
MatchingSetup = namedtuple('MatchingSetup', ['family', 'description'])
@@ -370,11 +372,12 @@ def emit_rhui_setup_tasks_based_on_config(rhui_config_dict):
api.produce(rhui_info)
+@suppress_deprecation(RHELTargetRepository) # member of TargetRepositories
def request_configured_repos_to_be_enabled(rhui_config):
config_repos_to_enable = rhui_config[RhuiTargetRepositoriesToUse.name]
custom_repos = [CustomTargetRepository(repoid=repoid) for repoid in config_repos_to_enable]
if custom_repos:
- target_repos = TargetRepositories(custom_repos=custom_repos, rhel_repos=[])
+ target_repos = TargetRepositories(custom_repos=custom_repos, rhel_repos=[], distro_repos=[])
api.produce(target_repos)
diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
index 3ac9c1b8..02ca352e 100644
--- a/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
@@ -468,6 +468,7 @@ def test_request_configured_repos_to_be_enabled(monkeypatch):
target_repos = api.produce.model_instances[0]
assert isinstance(target_repos, TargetRepositories)
+ assert not target_repos.distro_repos
assert not target_repos.rhel_repos
custom_repoids = sorted(custom_repo_model.repoid for custom_repo_model in target_repos.custom_repos)
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
index a6073aa3..9e5b1334 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
@@ -1,9 +1,10 @@
-
from leapp.libraries.actor import setuptargetrepos_repomap
+from leapp.libraries.common.config import get_distro_id
from leapp.libraries.common.config.version import get_source_major_version, get_source_version, get_target_version
from leapp.libraries.stdlib import api
from leapp.models import (
CustomTargetRepository,
+ DistroTargetRepository,
InstalledRPM,
RepositoriesBlacklisted,
RepositoriesFacts,
@@ -15,6 +16,7 @@ from leapp.models import (
TargetRepositories,
UsedRepositories
)
+from leapp.utils.deprecation import suppress_deprecation
RHUI_CLIENT_REPOIDS_RHEL88_TO_RHEL810 = {
'rhui-microsoft-azure-rhel8-sapapps': 'rhui-microsoft-azure-rhel8-base-sap-apps',
@@ -80,6 +82,7 @@ def _get_mapped_repoids(repomap, src_repoids):
return mapped_repoids
+@suppress_deprecation(RHELTargetRepository)
def process():
# Load relevant data from messages
used_repoids_dict = _get_used_repo_dict()
@@ -103,10 +106,11 @@ def process():
# installed packages that have mapping to prevent missing repositories that are disabled during the upgrade, but
# can be used to upgrade installed packages.
repoids_to_map = enabled_repoids.union(repoids_from_installed_packages_with_mapping)
+ is_rhel = get_distro_id() == 'rhel'
# RHEL8.10 use a different repoid for client repository, but the repomapping mechanism cannot distinguish these
# as it does not use minor versions. Therefore, we have to hardcode these changes.
- if get_source_version() == '8.10':
+ if is_rhel and get_source_version() == '8.10':
for rhel88_rhui_client_repoid, rhel810_rhui_client_repoid in RHUI_CLIENT_REPOIDS_RHEL88_TO_RHEL810.items():
if rhel810_rhui_client_repoid in repoids_to_map:
# Replace RHEL8.10 rhui client repoids with RHEL8.8 repoids,
@@ -119,9 +123,9 @@ def process():
default_channels = setuptargetrepos_repomap.get_default_repository_channels(repomap, repoids_to_map)
repomap.set_default_channels(default_channels)
- # Get target RHEL repoids based on the repomap
+ # Get target distro repoids based on the repomap
expected_repos = repomap.get_expected_target_pesid_repos(repoids_to_map)
- target_rhel_repoids = set()
+ target_distro_repoids = set()
for target_pesid, target_pesidrepo in expected_repos.items():
if not target_pesidrepo:
# NOTE this could happen only for enabled repositories part of the set,
@@ -139,7 +143,7 @@ def process():
if target_pesidrepo.repoid in excluded_repoids:
api.current_logger().debug('Skipping the {} repo (excluded).'.format(target_pesidrepo.repoid))
continue
- target_rhel_repoids.add(target_pesidrepo.repoid)
+ target_distro_repoids.add(target_pesidrepo.repoid)
# FIXME: this could possibly result into a try to enable multiple repositories
# from the same family (pesid). But unless we have a bug in previous actors,
@@ -151,7 +155,7 @@ def process():
if repo in excluded_repoids:
api.current_logger().debug('Skipping the {} repo from setup task (excluded).'.format(repo))
continue
- target_rhel_repoids.add(repo)
+ target_distro_repoids.add(repo)
# On 8.10, some RHUI setups have different names than the one computed by repomapping.
# Although such situation could be avoided (having another client repo when a single
@@ -159,12 +163,16 @@ def process():
# solution.
if get_target_version() == '8.10':
for pre_810_repoid, post_810_repoid in RHUI_CLIENT_REPOIDS_RHEL88_TO_RHEL810.items():
- if pre_810_repoid in target_rhel_repoids:
- target_rhel_repoids.remove(pre_810_repoid)
- target_rhel_repoids.add(post_810_repoid)
+ if pre_810_repoid in target_distro_repoids:
+ target_distro_repoids.remove(pre_810_repoid)
+ target_distro_repoids.add(post_810_repoid)
# create the final lists and sort them (for easier testing)
- rhel_repos = [RHELTargetRepository(repoid=repoid) for repoid in sorted(target_rhel_repoids)]
+ if is_rhel:
+ rhel_repos = [RHELTargetRepository(repoid=repoid) for repoid in sorted(target_distro_repoids)]
+ else:
+ rhel_repos = []
+ distro_repos = [DistroTargetRepository(repoid=repoid) for repoid in sorted(target_distro_repoids)]
custom_repos = [repo for repo in custom_repos if repo.repoid not in excluded_repoids]
custom_repos = sorted(custom_repos, key=lambda x: x.repoid)
@@ -179,5 +187,6 @@ def process():
api.produce(TargetRepositories(
rhel_repos=rhel_repos,
+ distro_repos=distro_repos,
custom_repos=custom_repos,
))
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos_repomap.py b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos_repomap.py
index 37be03f1..343ee2ea 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos_repomap.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos_repomap.py
@@ -1,4 +1,4 @@
-from leapp.libraries.common.config import get_target_product_channel
+from leapp.libraries.common.config import get_distro_id, get_target_product_channel
from leapp.libraries.common.config.version import get_source_major_version, get_target_major_version
from leapp.libraries.stdlib import api
@@ -44,7 +44,7 @@ class RepoMapDataHandler(object):
# ideal for work, but there is not any significant impact..
self.repositories = repo_map.repositories
self.mapping = repo_map.mapping
- self.distro = distro or api.current_actor().configuration.os_release.release_id
+ self.distro = distro or get_distro_id()
# FIXME(pstodulk): what about default_channel -> fallback_channel
# hardcoded always as ga? instead of list of channels..
# it'd be possibly confusing naming now...
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
index 1f898e8f..e4a30f7f 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
@@ -198,11 +198,23 @@ def test_repos_mapping_for_distro(monkeypatch, distro_id):
setuptargetrepos.process()
assert api.produce.called
+ distro_repos = api.produce.model_instances[0].distro_repos
rhel_repos = api.produce.model_instances[0].rhel_repos
- assert len(rhel_repos) == 3
+ assert len(distro_repos) == 3
+
+ produced_distro_repoids = {repo.repoid for repo in distro_repos}
produced_rhel_repoids = {repo.repoid for repo in rhel_repos}
- expected_rhel_repoids = {'{0}-8-for-x86_64-baseos-htb-rpms'.format(distro_id),
- '{0}-8-for-x86_64-appstream-htb-rpms'.format(distro_id),
- '{0}-8-for-x86_64-satellite-extras-rpms'.format(distro_id)}
- assert produced_rhel_repoids == expected_rhel_repoids
+
+ expected_repoids = {
+ "{0}-8-for-x86_64-baseos-htb-rpms".format(distro_id),
+ "{0}-8-for-x86_64-appstream-htb-rpms".format(distro_id),
+ "{0}-8-for-x86_64-satellite-extras-rpms".format(distro_id),
+ }
+
+ assert produced_distro_repoids == expected_repoids
+ if distro_id == 'rhel':
+ assert len(rhel_repos) == 3
+ assert produced_rhel_repoids == expected_repoids
+ else:
+ assert len(rhel_repos) == 0
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index 55877d05..407cb0b7 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -17,6 +17,7 @@ from leapp.models import (
CustomTargetRepositoryFile,
PkgManagerInfo,
RepositoriesFacts,
+ RHELTargetRepository,
RHSMInfo,
RHUIInfo,
StorageInfo,
@@ -967,6 +968,7 @@ def _get_rh_available_repoids(context, indata):
return rh_repoids
+@suppress_deprecation(RHELTargetRepository) # member of TargetRepositories
def gather_target_repositories(context, indata):
"""
Get available required target repositories and inhibit or raise error if basic checks do not pass.
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
index 7853a7ad..f05e6bc2 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
@@ -1072,6 +1072,7 @@ def test_consume_data(monkeypatch, raised, no_rhsm, testdata):
@pytest.mark.skip(reason="Currently not implemented in the actor. It's TODO.")
+@suppress_deprecation(models.RHELTargetRepository)
def test_gather_target_repositories(monkeypatch):
monkeypatch.setattr(userspacegen.api, 'current_actor', CurrentActorMocked())
# The available RHSM repos
@@ -1104,6 +1105,7 @@ def test_gather_target_repositories_none_available(monkeypatch):
assert inhibitors[0].get('title', '') == 'Cannot find required basic RHEL target repositories.'
+@suppress_deprecation(models.RHELTargetRepository)
def test_gather_target_repositories_rhui(monkeypatch):
indata = testInData(
@@ -1122,6 +1124,10 @@ def test_gather_target_repositories_rhui(monkeypatch):
rhel_repos=[
models.RHELTargetRepository(repoid='rhui-1'),
models.RHELTargetRepository(repoid='rhui-2')
+ ],
+ distro_repos=[
+ models.DistroTargetRepository(repoid='rhui-1'),
+ models.DistroTargetRepository(repoid='rhui-2')
]
)
])
@@ -1130,6 +1136,7 @@ def test_gather_target_repositories_rhui(monkeypatch):
assert target_repoids == set(['rhui-1', 'rhui-2'])
+@suppress_deprecation(models.RHELTargetRepository)
def test_gather_target_repositories_baseos_appstream_not_available(monkeypatch):
# If the repos that Leapp identifies as required for the upgrade (based on the repo mapping and PES data) are not
# available, an exception shall be raised
diff --git a/repos/system_upgrade/common/models/targetrepositories.py b/repos/system_upgrade/common/models/targetrepositories.py
index 02c6c5e5..e1a0b646 100644
--- a/repos/system_upgrade/common/models/targetrepositories.py
+++ b/repos/system_upgrade/common/models/targetrepositories.py
@@ -1,4 +1,5 @@
from leapp.models import fields, Model
+from leapp.reporting import deprecated
from leapp.topics import TransactionTopic
@@ -11,10 +12,18 @@ class UsedTargetRepository(TargetRepositoryBase):
pass
+@deprecated(
+ since="2025-07-23",
+ message="This model is deprecated, use DistroTargetRepository instead.",
+)
class RHELTargetRepository(TargetRepositoryBase):
pass
+class DistroTargetRepository(TargetRepositoryBase):
+ pass
+
+
class CustomTargetRepository(TargetRepositoryBase):
name = fields.Nullable(fields.String())
baseurl = fields.Nullable(fields.String())
@@ -26,20 +35,39 @@ class TargetRepositories(Model):
Repositories supposed to be used during the IPU process
The list of the actually used repositories could be just subset
- of these repositoies. In case of `custom_repositories`, all such repositories
+ of these repositories. In case of `custom_repositories`, all such repositories
must be available otherwise the upgrade is inhibited. But in case of
- `rhel_repos`, only BaseOS and Appstream repos are required now. If others
+ `distro_repos`, only BaseOS and Appstream repos are required now. If others
are missing, upgrade can still continue.
+
+ Note: `rhel_repos` are deprecated, use `distro_repos` instead.
"""
topic = TransactionTopic
+
+ # DEPRECATED: this has been superseded by distro_repos
rhel_repos = fields.List(fields.Model(RHELTargetRepository))
"""
Expected target YUM RHEL repositories provided via RHSM
+ DEPRECATED - use distro_repos instead.
+
These repositories are stored inside /etc/yum.repos.d/redhat.repo and
are expected to be used based on the provided repositories mapping.
"""
+ distro_repos = fields.List(fields.Model(DistroTargetRepository))
+ """
+ Expected target DNF repositories provided by the distribution.
+
+ On RHEL these are the repositories provided via RHSM.
+ These repositories are stored inside /etc/yum.repos.d/redhat.repo and
+ are expected to be used based on the provided repositories mapping.
+
+ On other distributions, such as Centos Stream these are repositories
+ in /etc/yum.repos.d/ that are provided by the distribution and are expected
+ to be used based on the provided repositories mapping.
+ """
+
custom_repos = fields.List(fields.Model(CustomTargetRepository), default=[])
"""
Custom YUM repositories required to be used for the IPU
--
2.51.1

View File

@ -0,0 +1,66 @@
From cef2825778eb63f95e13cf48b1683bc98c32c21b Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Fri, 25 Oct 2024 16:33:38 +0200
Subject: [PATCH 15/40] lib(overlay): cap the max size of disk images
On systems with large disks (e.g. 16TB) with lots of free space, leapp
might attemt to create files larger than the max file size of the
underlying FS. Attempting to create such large files causes leapp
to crash. This patch caps the max image size to 1TB, based on empirical
evidence that more free space is not needed for the upgrade RPM
transaction.
Jira-ref: RHEL-57064
---
.../common/libraries/overlaygen.py | 28 +++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/repos/system_upgrade/common/libraries/overlaygen.py b/repos/system_upgrade/common/libraries/overlaygen.py
index c1ac9ad3..867e3559 100644
--- a/repos/system_upgrade/common/libraries/overlaygen.py
+++ b/repos/system_upgrade/common/libraries/overlaygen.py
@@ -68,6 +68,27 @@ or close to that size, stay always with this minimal protected size defined by
this constant.
"""
+_MAX_DISK_IMAGE_SIZE_MB = 2**20 # 1*TB
+"""
+Maximum size of the created (sparse) images.
+
+Defaults to 1TB. If a disk with capacity larger than _MAX_DISK_IMAGE_SIZE_MB
+is mounted on the system, the corresponding image used to store overlay
+modifications will be capped to _MAX_DISK_IMAGE_SIZE_MB.
+
+Engineering rationale:
+ This constant was introduced to prevent leapp from creating files that are
+ virtually larger than the maximum file size supported by the file system.
+ E.g. if the source system hosts /var/lib/leapp on EXT4, then we cannot
+ create a file larger than 16TB.
+ We create these "disk images" to be able to verify the system has enough
+ disk space to perform the RPM upgrade transaction. From our experience,
+ we are not aware of any system which could have installed so much content
+ by RPMs that we would need 1TB of the free space on a single FS. Therefore,
+ we consider this value as safe while preventing us from exceeding FS
+ limits.
+"""
+
MountPoints = namedtuple('MountPoints', ['fs_file', 'fs_vfstype'])
@@ -287,6 +308,13 @@ def _prepare_required_mounts(scratch_dir, mounts_dir, storage_info, scratch_rese
disk_size = _get_fspace(mountpoint, convert_to_mibs=True, coefficient=0.95)
if mountpoint == scratch_mp:
disk_size = scratch_disk_size
+
+ if disk_size > _MAX_DISK_IMAGE_SIZE_MB:
+ msg = ('Image for overlayfs corresponding to the disk mounted at %s would ideally have %d MB, '
+ 'but we truncate it to %d MB to avoid bumping to max file limits.')
+ api.current_logger().info(msg, mountpoint, disk_size, _MAX_DISK_IMAGE_SIZE_MB)
+ disk_size = _MAX_DISK_IMAGE_SIZE_MB
+
image = _create_mount_disk_image(disk_images_directory, mountpoint, disk_size)
result[mountpoint] = mounting.LoopMount(
source=image,
--
2.47.0

View File

@ -0,0 +1,168 @@
From ec078243771f8ef43853bd242175a612fe84f95b Mon Sep 17 00:00:00 2001
From: tomasfratrik <tomasfratrik8@gmail.com>
Date: Wed, 17 Jul 2024 12:12:50 +0200
Subject: [PATCH 16/40] Raise proper error when ModelViolationError occurs
This error occurs when repo file has invalid definition, specifically
when the 'name' entry of the config files is invalid. Also add tests.
Jira: RHEL-19249
---
.../systemfacts/libraries/systemfacts.py | 13 ++++++++-
.../systemfacts/tests/test_systemfacts.py | 24 ++++++++++++++++-
.../common/libraries/repofileutils.py | 17 +++++++++++-
.../libraries/tests/test_repofileutils.py | 27 +++++++++++++++++++
4 files changed, 78 insertions(+), 3 deletions(-)
diff --git a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
index d1eeb28c..f16cea1d 100644
--- a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
+++ b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
@@ -217,7 +217,18 @@ def get_sysctls_status():
def get_repositories_status():
""" Get a basic information about YUM repositories installed in the system """
- return RepositoriesFacts(repositories=repofileutils.get_parsed_repofiles())
+ try:
+ return RepositoriesFacts(repositories=repofileutils.get_parsed_repofiles())
+ except repofileutils.InvalidRepoDefinition as e:
+ raise StopActorExecutionError(
+ message=str(e),
+ details={
+ 'hint': 'For more directions on how to resolve the issue, see: {url}.'
+ .format(
+ url='https://access.redhat.com/solutions/6969001'
+ )
+ }
+ )
def get_selinux_status():
diff --git a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
index badf174c..5831b979 100644
--- a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
+++ b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
@@ -3,7 +3,16 @@ import pwd
import pytest
-from leapp.libraries.actor.systemfacts import _get_system_groups, _get_system_users, anyendswith, anyhasprefix, aslist
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.actor.systemfacts import (
+ _get_system_groups,
+ _get_system_users,
+ anyendswith,
+ anyhasprefix,
+ aslist,
+ get_repositories_status
+)
+from leapp.libraries.common import repofileutils
from leapp.libraries.common.testutils import logger_mocked
from leapp.libraries.stdlib import api
from leapp.snactor.fixture import current_actor_libraries
@@ -116,3 +125,16 @@ def test_get_system_groups(monkeypatch, etc_group_names, skipped_group_names):
assert group_name not in api.current_logger().dbgmsg[0]
else:
assert not api.current_logger().dbgmsg
+
+
+def test_failed_parsed_repofiles(monkeypatch):
+ def _raise_invalidrepo_error():
+ raise repofileutils.InvalidRepoDefinition(msg='mocked error',
+ repofile='/etc/yum.repos.d/mock.repo',
+ repoid='mocked repoid')
+
+ monkeypatch.setattr(repofileutils, 'get_parsed_repofiles', _raise_invalidrepo_error)
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ with pytest.raises(StopActorExecutionError):
+ get_repositories_status()
diff --git a/repos/system_upgrade/common/libraries/repofileutils.py b/repos/system_upgrade/common/libraries/repofileutils.py
index a563be52..cab3c42b 100644
--- a/repos/system_upgrade/common/libraries/repofileutils.py
+++ b/repos/system_upgrade/common/libraries/repofileutils.py
@@ -11,6 +11,16 @@ except ImportError:
api.current_logger().warning('repofileutils.py: failed to import dnf')
+class InvalidRepoDefinition(Exception):
+ """Raised when a repository definition is invalid."""
+ def __init__(self, msg, repofile, repoid):
+ message = 'Invalid repository definition: {repoid} in: {repofile}: {msg}'.format(
+ repoid=repoid, repofile=repofile, msg=msg)
+ super(InvalidRepoDefinition, self).__init__(message)
+ self.repofile = repofile
+ self.repoid = repoid
+
+
def _parse_repository(repoid, repo_data):
def asbool(x):
return x == '1'
@@ -33,12 +43,17 @@ def parse_repofile(repofile):
:param repofile: Path to the repo file
:type repofile: str
:rtype: RepositoryFile
+ :raises InvalidRepoDefinition: If the repository definition is invalid,
+ this can for example occur if 'name' field in repository is missing or it is invalid.
"""
data = []
with open(repofile, mode='r') as fp:
cp = utils.parse_config(fp, strict=False)
for repoid in cp.sections():
- data.append(_parse_repository(repoid, dict(cp.items(repoid))))
+ try:
+ data.append(_parse_repository(repoid, dict(cp.items(repoid))))
+ except fields.ModelViolationError as e:
+ raise InvalidRepoDefinition(e, repofile=repofile, repoid=repoid)
return RepositoryFile(file=repofile, data=data)
diff --git a/repos/system_upgrade/common/libraries/tests/test_repofileutils.py b/repos/system_upgrade/common/libraries/tests/test_repofileutils.py
index 51cc1c11..42c7e49e 100644
--- a/repos/system_upgrade/common/libraries/tests/test_repofileutils.py
+++ b/repos/system_upgrade/common/libraries/tests/test_repofileutils.py
@@ -1,7 +1,10 @@
import json
import os
+import pytest
+
from leapp.libraries.common import repofileutils
+from leapp.models.fields import ModelViolationError
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -12,6 +15,30 @@ def test_invert_dict():
assert inv_dict == {'a': [1], 'b': [1, 2]}
+@pytest.mark.parametrize(
+ ('repoid', 'data'),
+ (
+ ('missing-name', {'baseurl': 'http://example.com', 'enabled': '1', 'gpgcheck': '1'}),
+ (None, {'name': 'name', 'baseurl': 'http://example.com', 'enabled': '1', 'gpgcheck': '1'}),
+ ('name-none', {'name': None, 'baseurl': 'http://example.com', 'enabled': '1', 'gpgcheck': '1'}),
+ ('baseurl-true', {'name': 'valid', 'baseurl': True, 'enabled': '1', 'gpgcheck': '1'}),
+ )
+)
+def test__parse_repository_missing_name(repoid, data):
+ with pytest.raises(ModelViolationError):
+ repofileutils._parse_repository(repoid, data)
+
+
+def test_parse_repofile_error(monkeypatch):
+ def _parse_repository_mocked(*args, **kwargs):
+ raise ModelViolationError('')
+
+ monkeypatch.setattr(repofileutils, '_parse_repository', _parse_repository_mocked)
+
+ with pytest.raises(repofileutils.InvalidRepoDefinition):
+ repofileutils.parse_repofile(os.path.join(CUR_DIR, 'sample_repos.txt'))
+
+
def test_parse_repofile():
repofile = repofileutils.parse_repofile(os.path.join(CUR_DIR, 'sample_repos.txt'))
--
2.47.0

View File

@ -1,65 +0,0 @@
From e61718d44e0175bcd28c8a5ee44dc46880d74482 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Tue, 5 Aug 2025 12:20:23 +0200
Subject: [PATCH 16/55] checktargetrepos: Skip if not RHEL
Skip the target repos check on non-RHEL distros. On non-RHEL distros,
there is no subscription-manager. The base repositories (BaseOS,
AppStream, ...) should always be present.
This is checked using the seatbelts in target userspace creator.
---
.../system_upgrade/common/actors/checktargetrepos/actor.py | 4 +++-
.../actors/checktargetrepos/libraries/checktargetrepos.py | 7 ++++---
.../actors/checktargetrepos/tests/test_checktargetrepos.py | 2 --
3 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/repos/system_upgrade/common/actors/checktargetrepos/actor.py b/repos/system_upgrade/common/actors/checktargetrepos/actor.py
index d61fb685..a5bdde10 100644
--- a/repos/system_upgrade/common/actors/checktargetrepos/actor.py
+++ b/repos/system_upgrade/common/actors/checktargetrepos/actor.py
@@ -6,7 +6,9 @@ from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class Checktargetrepos(Actor):
"""
- Check whether target yum repositories are specified.
+ Check whether target dnf repositories are specified on RHEL.
+
+ NOTE: this actor does nothing on distros other than RHEL.
RHSM | RHUI | ER | CTR | CTRF || result
-----+------+----+-----+------++-------
diff --git a/repos/system_upgrade/common/actors/checktargetrepos/libraries/checktargetrepos.py b/repos/system_upgrade/common/actors/checktargetrepos/libraries/checktargetrepos.py
index 141cf8e4..ea21e1de 100644
--- a/repos/system_upgrade/common/actors/checktargetrepos/libraries/checktargetrepos.py
+++ b/repos/system_upgrade/common/actors/checktargetrepos/libraries/checktargetrepos.py
@@ -40,9 +40,10 @@ def process():
rhui_info = next(api.consume(RHUIInfo), None)
- if not rhsm.skip_rhsm() or rhui_info:
- # getting RH repositories through RHSM or RHUI; resolved by seatbelts
- # implemented in other actors
+ if config.get_distro_id() != 'rhel' or (not rhsm.skip_rhsm() or rhui_info):
+ # RHEL: getting RH repositories through RHSM or RHUI;
+ # resolved by seatbelts in other actors
+ # other: distro repos provided by the distro directly, seatbelts elsewhere
return
# rhsm skipped; take your seatbelts please
diff --git a/repos/system_upgrade/common/actors/checktargetrepos/tests/test_checktargetrepos.py b/repos/system_upgrade/common/actors/checktargetrepos/tests/test_checktargetrepos.py
index ea93ce7e..e055b3a6 100644
--- a/repos/system_upgrade/common/actors/checktargetrepos/tests/test_checktargetrepos.py
+++ b/repos/system_upgrade/common/actors/checktargetrepos/tests/test_checktargetrepos.py
@@ -65,9 +65,7 @@ _CUSTOM_TARGET_REPOFILE = CustomTargetRepositoryFile(file='/etc/leapp/files/leap
def test_checktargetrepos_rhsm(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: False)
- monkeypatch.setattr(api, 'consume', MockedConsume())
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
- monkeypatch.setattr(checktargetrepos, 'get_target_major_version', lambda: '8')
checktargetrepos.process()
assert reporting.create_report.called == 0
--
2.51.1

View File

@ -0,0 +1,56 @@
From f84c6f808a821d3ccd09a4a8278cef9c09984a28 Mon Sep 17 00:00:00 2001
From: Daniel Zatovic <daniel.zatovic@gmail.com>
Date: Wed, 3 Apr 2024 23:25:06 +0200
Subject: [PATCH 17/40] InhibitWhenLuks: simplify the logic
---
.../common/actors/inhibitwhenluks/actor.py | 35 +++++++------------
1 file changed, 13 insertions(+), 22 deletions(-)
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
index d3ff2d2e..40b845b0 100644
--- a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
+++ b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
@@ -24,26 +24,17 @@ class InhibitWhenLuks(Actor):
ceph_info = next(self.consume(CephInfo))
if ceph_info:
ceph_vol = ceph_info.encrypted_volumes[:]
- for storage_info in self.consume(StorageInfo):
- for blk in storage_info.lsblk:
- if blk.tp == 'crypt' and blk.name not in ceph_vol:
- create_report([
- reporting.Title('LUKS encrypted partition detected'),
- reporting.Summary('Upgrading system with encrypted partitions is not supported'),
- reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
- reporting.Groups([reporting.Groups.INHIBITOR]),
- ])
- break
except StopIteration:
- for storage_info in self.consume(StorageInfo):
- for blk in storage_info.lsblk:
- if blk.tp == 'crypt':
- create_report([
- reporting.Title('LUKS encrypted partition detected'),
- reporting.Summary('Upgrading system with encrypted partitions is not supported'),
- reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
- reporting.Groups([reporting.Groups.INHIBITOR]),
- ])
- break
+ pass
+
+ for storage_info in self.consume(StorageInfo):
+ for blk in storage_info.lsblk:
+ if blk.tp == 'crypt' and blk.name not in ceph_vol:
+ create_report([
+ reporting.Title('LUKS encrypted partition detected'),
+ reporting.Summary('Upgrading system with encrypted partitions is not supported'),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ ])
+ break
--
2.47.0

View File

@ -1,820 +0,0 @@
From 32d9c40ffc7ea8d08e2b85881579ede1fdaedb32 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 7 Aug 2025 13:40:33 +0200
Subject: [PATCH 17/55] userspacegen: Add repo gathering for non-RHEL distros
The _get_rh_available_repoids() function is replaced by the
new get_distro_repoids() function from the distro library. This function
works with all the "supported" distros.
The idea is the same as for RHEL, scan well-known distro-provided
repofiles for repositories.
For RHEL, at least for now, the existing
rhsm.get_rhsm_available_repoids() function is still used.
These changes together enable the use of repomapping on distros other
than RHEL, as before this change the --enablerepo option had to be used
to specify target repos and they were treated as custom repos.
Also, the unused _get_rhui_available_repoids() function is removed.
Jira: RHEL-107212
Move get_distro_repoids() to the distro library
---
.../libraries/userspacegen.py | 228 ++++++++++--------
.../tests/unit_test_targetuserspacecreator.py | 56 ++++-
.../system_upgrade/common/libraries/distro.py | 192 +++++++++++++++
.../common/libraries/tests/test_distro.py | 154 ++++++++++++
4 files changed, 524 insertions(+), 106 deletions(-)
create mode 100644 repos/system_upgrade/common/libraries/tests/test_distro.py
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index 407cb0b7..26fec2d9 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -6,7 +6,7 @@ import shutil
from leapp import reporting
from leapp.exceptions import StopActorExecution, StopActorExecutionError
from leapp.libraries.actor import constants
-from leapp.libraries.common import dnfplugin, mounting, overlaygen, repofileutils, rhsm, utils
+from leapp.libraries.common import distro, dnfplugin, mounting, overlaygen, repofileutils, rhsm, utils
from leapp.libraries.common.config import get_distro_id, get_env, get_product_type
from leapp.libraries.common.config.version import get_target_major_version
from leapp.libraries.common.gpg import get_path_to_gpg_certs, is_nogpgcheck_set
@@ -58,6 +58,7 @@ from leapp.utils.deprecation import suppress_deprecation
PROD_CERTS_FOLDER = 'prod-certs'
PERSISTENT_PACKAGE_CACHE_DIR = '/var/lib/leapp/persistent_package_cache'
DEDICATED_LEAPP_PART_URL = 'https://access.redhat.com/solutions/7011704'
+FMT_LIST_SEPARATOR = '\n - '
def _check_deprecated_rhsm_skip():
@@ -778,7 +779,7 @@ def _inhibit_on_duplicate_repos(repofiles):
list_separator_fmt = '\n - '
api.current_logger().warning(
'The following repoids are defined multiple times:{0}{1}'
- .format(list_separator_fmt, list_separator_fmt.join(duplicates))
+ .format(list_separator_fmt, list_separator_fmt.join(sorted(duplicates)))
)
reporting.create_report([
@@ -786,7 +787,7 @@ def _inhibit_on_duplicate_repos(repofiles):
reporting.Summary(
'The following repositories are defined multiple times inside the'
' "upgrade" container:{0}{1}'
- .format(list_separator_fmt, list_separator_fmt.join(duplicates))
+ .format(list_separator_fmt, list_separator_fmt.join(sorted(duplicates)))
),
reporting.Severity(reporting.Severity.MEDIUM),
reporting.Groups([reporting.Groups.REPOSITORY]),
@@ -815,21 +816,19 @@ def _get_all_available_repoids(context):
return set(repoids)
-def _get_rhsm_available_repoids(context):
- target_major_version = get_target_major_version()
+def _inhibit_if_no_base_repos(distro_repoids):
# FIXME: check that required repo IDs (baseos, appstream)
# + or check that all required RHEL repo IDs are available.
- if rhsm.skip_rhsm():
- return set()
- # Get the RHSM repos available in the target RHEL container
- # TODO: very similar thing should happens for all other repofiles in container
- #
- repoids = rhsm.get_available_repo_ids(context)
+
+ target_major_version = get_target_major_version()
# NOTE(ivasilev) For the moment at least AppStream and BaseOS repos are required. While we are still
# contemplating on what can be a generic solution to checking this, let's introduce a minimal check for
# at-least-one-appstream and at-least-one-baseos among present repoids
- if not repoids or all("baseos" not in ri for ri in repoids) or all("appstream" not in ri for ri in repoids):
+ no_baseos = all("baseos" not in ri for ri in distro_repoids)
+ no_appstream = all("appstream" not in ri for ri in distro_repoids)
+ if no_baseos or no_appstream:
reporting.create_report([
+ # TODO: Make the report distro agnostic
reporting.Title('Cannot find required basic RHEL target repositories.'),
reporting.Summary(
'This can happen when a repository ID was entered incorrectly either while using the --enablerepo'
@@ -861,21 +860,6 @@ def _get_rhsm_available_repoids(context):
title='Preparing for the upgrade')
])
raise StopActorExecution()
- return set(repoids)
-
-
-def _get_rhui_available_repoids(context, cloud_repo):
- repofiles = repofileutils.get_parsed_repofiles(context)
-
- # TODO: same refactoring as Issue #486?
- _inhibit_on_duplicate_repos(repofiles)
- repoids = []
- for rfile in repofiles:
- if rfile.file == cloud_repo and rfile.data:
- repoids = [repo.repoid for repo in rfile.data]
- repoids.sort()
- break
- return set(repoids)
def get_copy_location_from_copy_in_task(context_basepath, copy_task):
@@ -886,86 +870,106 @@ def get_copy_location_from_copy_in_task(context_basepath, copy_task):
return copy_task.dst
-def _get_rh_available_repoids(context, indata):
+def _get_rhui_available_repoids(context, rhui_info):
"""
- RH repositories are provided either by RHSM or are stored in the expected repo file provided by
- RHUI special packages (every cloud provider has itw own rpm).
+ Get repoids provided by the RHUI target clients
+
+ :rtype: set[str]
"""
+ # If we are upgrading a RHUI system, check what repositories are provided by the (already installed) target clients
+ setup_info = rhui_info.target_client_setup_info
+ target_content_access_files = set()
+ if setup_info.bootstrap_target_client:
+ target_content_access_files = _query_rpm_for_pkg_files(context, rhui_info.target_client_pkg_names)
- rh_repoids = _get_rhsm_available_repoids(context)
+ def is_repofile(path):
+ return os.path.dirname(path) == '/etc/yum.repos.d' and os.path.basename(path).endswith('.repo')
- # If we are upgrading a RHUI system, check what repositories are provided by the (already installed) target clients
- if indata and indata.rhui_info:
- setup_info = indata.rhui_info.target_client_setup_info
- target_content_access_files = set()
- if setup_info.bootstrap_target_client:
- target_content_access_files = _query_rpm_for_pkg_files(context, indata.rhui_info.target_client_pkg_names)
+ def extract_repoid_from_line(line):
+ return line.split(':', 1)[1].strip()
- def is_repofile(path):
- return os.path.dirname(path) == '/etc/yum.repos.d' and os.path.basename(path).endswith('.repo')
+ target_ver = api.current_actor().configuration.version.target
+ setup_tasks = rhui_info.target_client_setup_info.preinstall_tasks.files_to_copy_into_overlay
- def extract_repoid_from_line(line):
- return line.split(':', 1)[1].strip()
+ yum_repos_d = context.full_path('/etc/yum.repos.d')
+ all_repofiles = {os.path.join(yum_repos_d, path) for path in os.listdir(yum_repos_d) if path.endswith('.repo')}
+ api.current_logger().debug('(RHUI Setup) All available repofiles: {0}'.format(' '.join(all_repofiles)))
- target_ver = api.current_actor().configuration.version.target
- setup_tasks = indata.rhui_info.target_client_setup_info.preinstall_tasks.files_to_copy_into_overlay
+ target_access_repofiles = {
+ context.full_path(path) for path in target_content_access_files if is_repofile(path)
+ }
- yum_repos_d = context.full_path('/etc/yum.repos.d')
- all_repofiles = {os.path.join(yum_repos_d, path) for path in os.listdir(yum_repos_d) if path.endswith('.repo')}
- api.current_logger().debug('(RHUI Setup) All available repofiles: {0}'.format(' '.join(all_repofiles)))
+ # Exclude repofiles used to setup the target rhui access as on some platforms the repos provided by
+ # the client are not sufficient to install the client into target userspace (GCP)
+ rhui_setup_repofile_tasks = [task for task in setup_tasks if task.src.endswith('repo')]
+ rhui_setup_repofiles = (
+ get_copy_location_from_copy_in_task(context.base_dir, copy) for copy in rhui_setup_repofile_tasks
+ )
+ rhui_setup_repofiles = {context.full_path(repofile) for repofile in rhui_setup_repofiles}
- target_access_repofiles = {
- context.full_path(path) for path in target_content_access_files if is_repofile(path)
- }
+ foreign_repofiles = all_repofiles - target_access_repofiles - rhui_setup_repofiles
- # Exclude repofiles used to setup the target rhui access as on some platforms the repos provided by
- # the client are not sufficient to install the client into target userspace (GCP)
- rhui_setup_repofile_tasks = [task for task in setup_tasks if task.src.endswith('repo')]
- rhui_setup_repofiles = (
- get_copy_location_from_copy_in_task(context.base_dir, copy) for copy in rhui_setup_repofile_tasks
- )
- rhui_setup_repofiles = {context.full_path(repofile) for repofile in rhui_setup_repofiles}
+ api.current_logger().debug(
+ 'The following repofiles are considered as unknown to'
+ ' the target RHUI content setup and will be ignored: {0}'.format(' '.join(foreign_repofiles))
+ )
- foreign_repofiles = all_repofiles - target_access_repofiles - rhui_setup_repofiles
+ # Rename non-client repofiles so they will not be recognized when running dnf repolist
+ for foreign_repofile in foreign_repofiles:
+ os.rename(foreign_repofile, '{0}.back'.format(foreign_repofile))
- api.current_logger().debug(
- 'The following repofiles are considered as unknown to'
- ' the target RHUI content setup and will be ignored: {0}'.format(' '.join(foreign_repofiles))
+ rhui_repoids = set()
+ try:
+ dnf_cmd = [
+ 'dnf', 'repolist',
+ '--releasever', target_ver, '-v',
+ '--enablerepo', '*',
+ '--disablerepo', '*-source-*',
+ '--disablerepo', '*-debug-*',
+ ]
+ repolist_result = context.call(dnf_cmd)['stdout']
+ repoid_lines = [line for line in repolist_result.split('\n') if line.startswith('Repo-id')]
+ rhui_repoids.update({extract_repoid_from_line(line) for line in repoid_lines})
+
+ except CalledProcessError as err:
+ details = {'err': err.stderr, 'details': str(err)}
+ raise StopActorExecutionError(
+ message='Failed to retrieve repoids provided by target RHUI clients.',
+ details=details
)
- # Rename non-client repofiles so they will not be recognized when running dnf repolist
+ finally:
+ # Revert the renaming of non-client repofiles
for foreign_repofile in foreign_repofiles:
- os.rename(foreign_repofile, '{0}.back'.format(foreign_repofile))
+ os.rename('{0}.back'.format(foreign_repofile), foreign_repofile)
- try:
- dnf_cmd = [
- 'dnf', 'repolist',
- '--releasever', target_ver, '-v',
- '--enablerepo', '*',
- '--disablerepo', '*-source-*',
- '--disablerepo', '*-debug-*',
- ]
- repolist_result = context.call(dnf_cmd)['stdout']
- repoid_lines = [line for line in repolist_result.split('\n') if line.startswith('Repo-id')]
- rhui_repoids = {extract_repoid_from_line(line) for line in repoid_lines}
- rh_repoids.update(rhui_repoids)
-
- except CalledProcessError as err:
- details = {'err': err.stderr, 'details': str(err)}
- raise StopActorExecutionError(
- message='Failed to retrieve repoids provided by target RHUI clients.',
- details=details
- )
+ return rhui_repoids
- finally:
- # Revert the renaming of non-client repofiles
- for foreign_repofile in foreign_repofiles:
- os.rename('{0}.back'.format(foreign_repofile), foreign_repofile)
- api.current_logger().debug(
- 'The following repofiles are considered as provided by RedHat: {0}'.format(' '.join(rh_repoids))
- )
- return rh_repoids
+def _get_distro_available_repoids(context, indata):
+ """
+ Get repoids provided by the distribution
+
+ On RHEL: RH repositories are provided either by RHSM or are stored in the
+ expected repo file provided by RHUI special packages (every cloud
+ provider has itw own rpm).
+ On other: Repositories are provided in specific repofiles (e.g. centos.repo
+ and centos-addons.repo on CS)
+
+ :return: A set of repoids provided by distribution
+ :rtype: set[str]
+ """
+ distro_repoids = distro.get_target_distro_repoids(context)
+ distro_id = get_distro_id()
+ rhel_and_rhsm = distro_id == 'rhel' and not rhsm.skip_rhsm()
+ if distro_id != 'rhel' or rhel_and_rhsm:
+ _inhibit_if_no_base_repos(distro_repoids)
+
+ if indata and indata.rhui_info:
+ rhui_repoids = _get_rhui_available_repoids(context, indata.rhui_info)
+ distro_repoids.extend(rhui_repoids)
+
+ return set(distro_repoids)
@suppress_deprecation(RHELTargetRepository) # member of TargetRepositories
@@ -986,17 +990,31 @@ def gather_target_repositories(context, indata):
:param context: An instance of a mounting.IsolatedActions class
:type context: mounting.IsolatedActions class
:return: List of target system repoids
- :rtype: List(string)
+ :rtype: set[str]
"""
- rh_available_repoids = _get_rh_available_repoids(context, indata)
- all_available_repoids = _get_all_available_repoids(context)
- target_repoids = []
- missing_custom_repoids = []
+ distro_repoids = _get_distro_available_repoids(context, indata)
+ if distro_repoids:
+ api.current_logger().info(
+ "The following repoids are considered as provided by the '{}' distribution:{}{}".format(
+ get_distro_id(),
+ FMT_LIST_SEPARATOR,
+ FMT_LIST_SEPARATOR.join(sorted(distro_repoids)),
+ )
+ )
+ else:
+ api.current_logger().warning(
+ "No repoids provided by the {} distribution have been discovered".format(get_distro_id())
+ )
+
+ all_repoids = _get_all_available_repoids(context)
+
+ target_repoids = set()
+ missing_custom_repoids = set()
for target_repo in api.consume(TargetRepositories):
- for rhel_repo in target_repo.rhel_repos:
- if rhel_repo.repoid in rh_available_repoids:
- target_repoids.append(rhel_repo.repoid)
+ for distro_repo in target_repo.distro_repos:
+ if distro_repo.repoid in distro_repoids:
+ target_repoids.add(distro_repo.repoid)
else:
# TODO: We shall report that the RHEL repos that we deem necessary for
# the upgrade are not available; but currently it would just print bunch of
@@ -1005,12 +1023,16 @@ def gather_target_repositories(context, indata):
# of the upgrade. Let's skip it for now until it's clear how we will deal
# with it.
pass
+
for custom_repo in target_repo.custom_repos:
- if custom_repo.repoid in all_available_repoids:
- target_repoids.append(custom_repo.repoid)
+ if custom_repo.repoid in all_repoids:
+ target_repoids.add(custom_repo.repoid)
else:
- missing_custom_repoids.append(custom_repo.repoid)
- api.current_logger().debug("Gathered target repositories: {}".format(', '.join(target_repoids)))
+ missing_custom_repoids.add(custom_repo.repoid)
+ api.current_logger().debug(
+ "Gathered target repositories: {}".format(", ".join(sorted(target_repoids)))
+ )
+
if not target_repoids:
target_major_version = get_target_major_version()
reporting.create_report([
@@ -1056,7 +1078,7 @@ def gather_target_repositories(context, indata):
' while using the --enablerepo option of leapp, or in a third party actor that produces a'
' CustomTargetRepositoryMessage.\n'
'The following repositories IDs could not be found in the target configuration:\n'
- '- {}\n'.format('\n- '.join(missing_custom_repoids))
+ '- {}\n'.format('\n- '.join(sorted(missing_custom_repoids)))
),
reporting.Groups([reporting.Groups.REPOSITORY]),
reporting.Groups([reporting.Groups.INHIBITOR]),
@@ -1073,7 +1095,7 @@ def gather_target_repositories(context, indata):
))
])
raise StopActorExecution()
- return set(target_repoids)
+ return target_repoids
def _install_custom_repofiles(context, custom_repofiles):
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
index f05e6bc2..2ae194d7 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
@@ -11,9 +11,9 @@ import pytest
from leapp import models, reporting
from leapp.exceptions import StopActorExecution, StopActorExecutionError
from leapp.libraries.actor import userspacegen
-from leapp.libraries.common import overlaygen, repofileutils, rhsm
+from leapp.libraries.common import distro, overlaygen, repofileutils, rhsm
from leapp.libraries.common.config import architecture
-from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked
+from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, logger_mocked, produce_mocked
from leapp.libraries.stdlib import api, CalledProcessError
from leapp.utils.deprecation import suppress_deprecation
@@ -1115,7 +1115,9 @@ def test_gather_target_repositories_rhui(monkeypatch):
monkeypatch.setattr(userspacegen.api, 'current_actor', CurrentActorMocked())
monkeypatch.setattr(userspacegen, '_get_all_available_repoids', lambda x: [])
monkeypatch.setattr(
- userspacegen, '_get_rh_available_repoids', lambda x, y: ['rhui-1', 'rhui-2', 'rhui-3']
+ userspacegen,
+ "_get_distro_available_repoids",
+ lambda dummy_context, dummy_indata: {"rhui-1", "rhui-2", "rhui-3"},
)
monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: True)
monkeypatch.setattr(
@@ -1195,6 +1197,54 @@ def test_gather_target_repositories_baseos_appstream_not_available(monkeypatch):
assert inhibitors[0].get('title', '') == 'Cannot find required basic RHEL target repositories.'
+def test__get_distro_available_repoids_norhsm_norhui(monkeypatch):
+ """
+ Empty set should be returned when on rhel and skip_rhsm == True.
+ """
+ monkeypatch.setattr(
+ userspacegen.api, "current_actor", CurrentActorMocked(release_id="rhel")
+ )
+ monkeypatch.setattr(userspacegen.api.current_actor(), 'produce', produce_mocked())
+
+ monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: True)
+ monkeypatch.setattr(distro, 'get_target_distro_repoids', lambda ctx: [])
+
+ indata = testInData(_PACKAGES_MSGS, None, None, _XFS_MSG, _STORAGEINFO_MSG, None)
+ # NOTE: context is not used without rhsm, for simplicity setting to None
+ repoids = userspacegen._get_distro_available_repoids(None, indata)
+ assert repoids == set()
+
+
+@pytest.mark.parametrize(
+ "distro_id,skip_rhsm", [("rhel", False), ("centos", True), ("almalinux", True)]
+)
+def test__get_distro_available_repoids_nobaserepos_inhibit(
+ monkeypatch, distro_id, skip_rhsm
+):
+ """
+ Test that get_distro_available repoids reports and raises if there are no base repos.
+ """
+ monkeypatch.setattr(
+ userspacegen.api, "current_actor", CurrentActorMocked(release_id=distro_id)
+ )
+ monkeypatch.setattr(userspacegen.api.current_actor(), 'produce', produce_mocked())
+ monkeypatch.setattr(reporting, "create_report", create_report_mocked())
+
+ monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: skip_rhsm)
+ monkeypatch.setattr(distro, 'get_target_distro_repoids', lambda ctx: [])
+
+ indata = testInData(_PACKAGES_MSGS, None, None, _XFS_MSG, _STORAGEINFO_MSG, None)
+ with pytest.raises(StopActorExecution):
+ # NOTE: context is not used without rhsm, for simplicity setting to None
+ userspacegen._get_distro_available_repoids(None, indata)
+
+ # TODO adjust the asserts when the report is made distro agnostic
+ assert reporting.create_report.called == 1
+ report = reporting.create_report.reports[0]
+ assert "Cannot find required basic RHEL target repositories" in report["title"]
+ assert reporting.Groups.INHIBITOR in report["groups"]
+
+
def mocked_consume_data():
packages = {'dnf', 'dnf-command(config-manager)', 'pkgA', 'pkgB'}
rhsm_info = _RHSMINFO_MSG
diff --git a/repos/system_upgrade/common/libraries/distro.py b/repos/system_upgrade/common/libraries/distro.py
index 2ed5eacd..d6a2381a 100644
--- a/repos/system_upgrade/common/libraries/distro.py
+++ b/repos/system_upgrade/common/libraries/distro.py
@@ -2,6 +2,10 @@ import json
import os
from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.common import repofileutils, rhsm
+from leapp.libraries.common.config import get_distro_id
+from leapp.libraries.common.config.architecture import ARCH_ACCEPTED, ARCH_X86_64
+from leapp.libraries.common.config.version import get_target_major_version
from leapp.libraries.stdlib import api
@@ -16,3 +20,191 @@ def get_distribution_data(distribution):
raise StopActorExecutionError(
'Cannot find distribution signature configuration.',
details={'Problem': 'Distribution {} was not found in {}.'.format(distribution, distributions_path)})
+
+
+# distro -> major_version -> repofile -> tuple of architectures where it's present
+_DISTRO_REPOFILES_MAP = {
+ 'rhel': {
+ '8': {'/etc/yum.repos.d/redhat.repo': ARCH_ACCEPTED},
+ '9': {'/etc/yum.repos.d/redhat.repo': ARCH_ACCEPTED},
+ '10': {'/etc/yum.repos.d/redhat.repo': ARCH_ACCEPTED},
+ },
+ 'centos': {
+ '8': {
+ # TODO is this true on all archs?
+ 'CentOS-Linux-AppStream.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-BaseOS.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-ContinuousRelease.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-Debuginfo.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-Devel.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-Extras.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-FastTrack.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-HighAvailability.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-Media.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-Plus.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-PowerTools.repo': ARCH_ACCEPTED,
+ 'CentOS-Linux-Sources.repo': ARCH_ACCEPTED,
+ },
+ '9': {
+ '/etc/yum.repos.d/centos.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/centos-addons.repo': ARCH_ACCEPTED,
+ },
+ '10': {
+ '/etc/yum.repos.d/centos.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/centos-addons.repo': ARCH_ACCEPTED,
+ },
+ },
+ 'almalinux': {
+ '8': {
+ # TODO is this true on all archs?
+ '/etc/yum.repos.d/almalinux-ha.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-nfv.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-plus.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-powertools.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-resilientstorage.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-rt.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-sap.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-saphana.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux.repo': ARCH_ACCEPTED,
+ },
+ '9': {
+ '/etc/yum.repos.d/almalinux-appstream.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-baseos.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-crb.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-extras.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-highavailability.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-plus.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-resilientstorage.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-sap.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-saphana.repo': ARCH_ACCEPTED,
+ # RT and NFV are only on x86_64 on almalinux 9
+ '/etc/yum.repos.d/almalinux-nfv.repo': (ARCH_X86_64,),
+ '/etc/yum.repos.d/almalinux-rt.repo': (ARCH_X86_64,),
+ },
+ '10': {
+ # no resilientstorage on 10
+ '/etc/yum.repos.d/almalinux-appstream.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-baseos.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-crb.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-extras.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-highavailability.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-plus.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-sap.repo': ARCH_ACCEPTED,
+ '/etc/yum.repos.d/almalinux-saphana.repo': ARCH_ACCEPTED,
+ # RT and NFV are only on x86_64 on almalinux 10
+ '/etc/yum.repos.d/almalinux-nfv.repo': (ARCH_X86_64,),
+ '/etc/yum.repos.d/almalinux-rt.repo': (ARCH_X86_64,),
+ },
+ },
+}
+
+
+def _get_distro_repofiles(distro, major_version, arch):
+ """
+ Get distribution provided repofiles.
+
+ Note that this does not perform any validation, the caller must check
+ whether the files exist.
+
+ :param distro: The distribution to get repofiles for.
+ :type distro: str
+ :param major_version: The major version to get repofiles for.
+ :type major_version: str
+ :param arch: The architecture to get repofiles for.
+ :type arch: str
+ :return: A list of paths to repofiles provided by distribution
+ :rtype: list[str] or None if no repofiles are mapped for the arguments
+ """
+
+ distro_repofiles = _DISTRO_REPOFILES_MAP.get(distro)
+ if not distro_repofiles:
+ return None
+
+ version_repofiles = distro_repofiles.get(major_version, {})
+ if not version_repofiles:
+ return None
+
+ return [repofile for repofile, archs in version_repofiles.items() if arch in archs]
+
+
+def get_target_distro_repoids(context):
+ """
+ Get repoids defined in distro provided repofiles
+
+ See the generic :func:`_get_distro_repoids` for more details.
+
+ :param context: An instance of mounting.IsolatedActions class
+ :type context: mounting.IsolatedActions
+ :return: Repoids of distribution provided repositories
+ :type: list[str]
+ """
+
+ return get_distro_repoids(
+ context,
+ get_distro_id(),
+ get_target_major_version(),
+ api.current_actor().configuration.architecture
+ )
+
+
+def get_distro_repoids(context, distro, major_version, arch):
+ """
+ Get repoids defined in distro provided repofiles
+
+ On RHEL with RHSM this delegates to rhsm.get_available_repo_ids.
+
+ Repofiles installed by RHUI client packages are not covered by this
+ function.
+
+ :param context: An instance of mounting.IsolatedActions class
+ :type context: mounting.IsolatedActions
+ :param distro: The distro whose repoids to return
+ :type distro: str
+ :param major_version: The major version to get distro repoids for.
+ :type major_version: str
+ :param arch: The architecture to get distro repoids for.
+ :type arch: str
+ :return: Repoids of distribution provided repositories
+ :type: list[str]
+ """
+
+ if distro == 'rhel':
+ if rhsm.skip_rhsm():
+ return []
+ # Kept this todo here from the original code from
+ # userspacegen._get_rh_available_repoids:
+ # Get the RHSM repos available in the target RHEL container
+ # TODO: very similar thing should happens for all other repofiles in container
+ return rhsm.get_available_repo_ids(context)
+
+ repofiles = repofileutils.get_parsed_repofiles(context)
+ distro_repofiles = _get_distro_repofiles(distro, major_version, arch)
+ if not distro_repofiles:
+ # TODO: a different way of signaling an error would be preferred (e.g. returning None),
+ # but since rhsm.get_available_repo_ids also raises StopActorExecutionError,
+ # let's make it easier for the caller for now and use it too
+ raise StopActorExecutionError(
+ "No known distro provided repofiles mapped",
+ details={
+ "details": "distro: {}, major version: {}, architecture: {}".format(
+ distro, major_version, arch
+ )
+ },
+ )
+
+ distro_repoids = []
+ for rfile in repofiles:
+ if rfile.file in distro_repofiles:
+
+ if not os.path.exists(context.full_path(rfile.file)):
+ api.current_logger().debug(
+ "Expected distribution provided repofile does not exists: {}".format(
+ rfile
+ )
+ )
+ continue
+
+ if rfile.data:
+ distro_repoids.extend([repo.repoid for repo in rfile.data])
+
+ return sorted(distro_repoids)
diff --git a/repos/system_upgrade/common/libraries/tests/test_distro.py b/repos/system_upgrade/common/libraries/tests/test_distro.py
new file mode 100644
index 00000000..3a8f174f
--- /dev/null
+++ b/repos/system_upgrade/common/libraries/tests/test_distro.py
@@ -0,0 +1,154 @@
+import os
+
+import pytest
+
+from leapp.actors import StopActorExecutionError
+from leapp.libraries.common import distro, repofileutils, rhsm
+from leapp.libraries.common.config.architecture import ARCH_ACCEPTED, ARCH_ARM64, ARCH_PPC64LE, ARCH_S390X, ARCH_X86_64
+from leapp.libraries.common.distro import _get_distro_repofiles, get_distro_repoids
+from leapp.libraries.common.testutils import CurrentActorMocked
+from leapp.libraries.stdlib import api
+from leapp.models import RepositoryData, RepositoryFile
+
+_RHEL_REPOFILES = ['/etc/yum.repos.d/redhat.repo']
+_CENTOS_REPOFILES = [
+ "/etc/yum.repos.d/centos.repo", "/etc/yum.repos.d/centos-addons.repo"
+]
+
+
+def test_get_distro_repofiles(monkeypatch):
+ """
+ Test the functionality, not the data.
+ """
+ test_map = {
+ 'distro1': {
+ '8': {
+ 'repofile1': ARCH_ACCEPTED,
+ 'repofile2': [ARCH_X86_64],
+ },
+ '9': {
+ 'repofile3': ARCH_ACCEPTED,
+ },
+ },
+ 'distro2': {
+ '8': {},
+ '9': {
+ 'repofile2': [ARCH_X86_64],
+ 'repofile3': [ARCH_ARM64, ARCH_S390X, ARCH_PPC64LE],
+ },
+ },
+ }
+ monkeypatch.setattr(distro, '_DISTRO_REPOFILES_MAP', test_map)
+
+ # mix of all and specific arch
+ repofiles = _get_distro_repofiles('distro1', '8', ARCH_X86_64)
+ assert repofiles == ['repofile1', 'repofile2']
+
+ # match all but not x86_64
+ repofiles = _get_distro_repofiles('distro1', '8', ARCH_ARM64)
+ assert repofiles == ['repofile1']
+
+ repofiles = _get_distro_repofiles('distro2', '9', ARCH_X86_64)
+ assert repofiles == ['repofile2']
+ repofiles = _get_distro_repofiles('distro2', '9', ARCH_ARM64)
+ assert repofiles == ['repofile3']
+ repofiles = _get_distro_repofiles('distro2', '9', ARCH_S390X)
+ assert repofiles == ['repofile3']
+ repofiles = _get_distro_repofiles('distro2', '9', ARCH_PPC64LE)
+ assert repofiles == ['repofile3']
+
+ # version not mapped
+ repofiles = _get_distro_repofiles('distro2', '8', ARCH_X86_64)
+ assert repofiles is None
+
+ # distro not mapped
+ repofiles = _get_distro_repofiles('distro42', '8', ARCH_X86_64)
+ assert repofiles is None
+
+
+def _make_repo(repoid):
+ return RepositoryData(repoid=repoid, name='name {}'.format(repoid))
+
+
+def _make_repofile(rfile, data=None):
+ if data is None:
+ data = [_make_repo("{}-{}".format(rfile.split("/")[-1], i)) for i in range(3)]
+ return RepositoryFile(file=rfile, data=data)
+
+
+def _make_repofiles(rfiles):
+ return [_make_repofile(rfile) for rfile in rfiles]
+
+
+@pytest.mark.parametrize('other_rfiles', [
+ [],
+ [_make_repofile("foo")],
+ _make_repofiles(["foo", "bar"]),
+])
+@pytest.mark.parametrize(
+ "distro_id,skip_rhsm,distro_rfiles",
+ [
+ ("rhel", True, []),
+ ("rhel", True, _make_repofiles(_RHEL_REPOFILES)),
+ ("rhel", False, _make_repofiles(_RHEL_REPOFILES)),
+ ("centos", True, []),
+ ("centos", True, _make_repofiles(_CENTOS_REPOFILES)),
+ ]
+)
+def test_get_distro_repoids(
+ monkeypatch, distro_id, skip_rhsm, distro_rfiles, other_rfiles
+):
+ """
+ Tests that the correct repoids are returned
+
+ This is a little ugly because on RHEL the get_distro_repoids function still
+ delegates to rhsm.get_available_repo_ids and also has different behavior
+ with skip_rhsm
+ """
+ current_actor = CurrentActorMocked(release_id=distro_id if distro_id else 'rhel')
+ monkeypatch.setattr(api, 'current_actor', current_actor)
+ monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: skip_rhsm)
+
+ repofiles = other_rfiles
+ if distro_rfiles:
+ repofiles.extend(distro_rfiles)
+ monkeypatch.setattr(repofileutils, 'get_parsed_repofiles', lambda x: repofiles)
+
+ distro_repoids = []
+ for rfile in distro_rfiles:
+ distro_repoids.extend([repo.repoid for repo in rfile.data] if rfile else [])
+ distro_repoids.sort()
+
+ monkeypatch.setattr(rhsm, 'get_available_repo_ids', lambda _: distro_repoids)
+ monkeypatch.setattr(os.path, 'exists', lambda f: f in _CENTOS_REPOFILES)
+
+ class MockedContext:
+ def full_path(self, path):
+ return path
+
+ repoids = get_distro_repoids(MockedContext(), distro_id, '9', 'x86_64')
+
+ if distro_id == 'rhel' and skip_rhsm:
+ assert repoids == []
+ else:
+ assert sorted(repoids) == distro_repoids
+
+
+@pytest.mark.parametrize('other_rfiles', [
+ [],
+ [_make_repofile("foo")],
+ _make_repofiles(["foo", "bar"]),
+])
+def test_get_distro_repoids_no_distro_repofiles(monkeypatch, other_rfiles):
+ """
+ Test that exception is thrown when there are no known distro provided repofiles.
+ """
+
+ def mocked_get_distro_repofiles(*args):
+ return []
+
+ monkeypatch.setattr(distro, '_get_distro_repofiles', mocked_get_distro_repofiles)
+ monkeypatch.setattr(repofileutils, "get_parsed_repofiles", lambda x: other_rfiles)
+
+ with pytest.raises(StopActorExecutionError):
+ get_distro_repoids(None, 'somedistro', '8', 'x86_64')
--
2.51.1

View File

@ -0,0 +1,271 @@
From 03fc6743b8916f23f6a213e3f0fc3020ee141b96 Mon Sep 17 00:00:00 2001
From: Daniel Zatovic <daniel.zatovic@gmail.com>
Date: Wed, 3 Apr 2024 23:42:45 +0200
Subject: [PATCH 18/40] StorageScanner: Add parent device name to lsblk
Modify the StorageInfo model to include path and name of the parent
device. Use StorageScanner to collect this information.
Morover fix lsblk test, there should be a full device path in "lsblk
-pbnr" output (just names were used in the original test).
---
.../tests/test_inhibitwhenluks.py | 12 +--
.../libraries/storagescanner.py | 29 +++++--
.../tests/unit_test_storagescanner.py | 78 +++++++++++++++----
.../common/models/storageinfo.py | 2 +
.../tests/unit_test_vdoconversionscanner.py | 4 +-
5 files changed, 95 insertions(+), 30 deletions(-)
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
index fee50f9d..405a3429 100644
--- a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
+++ b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
@@ -5,8 +5,8 @@ from leapp.utils.report import is_inhibitor
def test_actor_with_luks(current_actor_context):
- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0',
- size='10G', bsize=10*(1 << 39), ro='0', tp='crypt', mountpoint='')]
+ with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39),
+ ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')]
current_actor_context.feed(StorageInfo(lsblk=with_luks))
current_actor_context.run()
@@ -16,8 +16,8 @@ def test_actor_with_luks(current_actor_context):
def test_actor_with_luks_ceph_only(current_actor_context):
- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0',
- size='10G', bsize=10*(1 << 39), ro='0', tp='crypt', mountpoint='')]
+ with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39),
+ ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')]
ceph_volume = ['luks-132']
current_actor_context.feed(StorageInfo(lsblk=with_luks))
current_actor_context.feed(CephInfo(encrypted_volumes=ceph_volume))
@@ -26,8 +26,8 @@ def test_actor_with_luks_ceph_only(current_actor_context):
def test_actor_without_luks(current_actor_context):
- without_luks = [LsblkEntry(name='sda1', kname='sda1', maj_min='8:0', rm='0',
- size='10G', bsize=10*(1 << 39), ro='0', tp='part', mountpoint='/boot')]
+ without_luks = [LsblkEntry(name='sda1', kname='sda1', maj_min='8:0', rm='0', size='10G', bsize=10*(1 << 39),
+ ro='0', tp='part', mountpoint='/boot', parent_name='', parent_path='')]
current_actor_context.feed(StorageInfo(lsblk=without_luks))
current_actor_context.run()
diff --git a/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py b/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py
index f15f0d87..cad6bd32 100644
--- a/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py
+++ b/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py
@@ -164,18 +164,31 @@ def _get_mount_info(path):
)
+def _get_lsblk_info_for_devpath(dev_path):
+ lsblk_cmd = ['lsblk', '-nr', '--output', 'NAME,KNAME,SIZE', dev_path]
+ lsblk_info_for_devpath = next(_get_cmd_output(lsblk_cmd, ' ', 3), None)
+
+ return lsblk_info_for_devpath
+
+
@aslist
def _get_lsblk_info():
""" Collect storage info from lsblk command """
- cmd = ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT']
- for entry in _get_cmd_output(cmd, ' ', 7):
- dev_path, maj_min, rm, bsize, ro, tp, mountpoint = entry
- lsblk_cmd = ['lsblk', '-nr', '--output', 'NAME,KNAME,SIZE', dev_path]
- lsblk_info_for_devpath = next(_get_cmd_output(lsblk_cmd, ' ', 3), None)
+ cmd = ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PKNAME']
+ for entry in _get_cmd_output(cmd, ' ', 8):
+ dev_path, maj_min, rm, bsize, ro, tp, mountpoint, parent_path = entry
+
+ lsblk_info_for_devpath = _get_lsblk_info_for_devpath(dev_path)
if not lsblk_info_for_devpath:
return
-
name, kname, size = lsblk_info_for_devpath
+
+ parent_name = ""
+ if parent_path:
+ parent_info = _get_lsblk_info_for_devpath(parent_path)
+ if parent_info:
+ parent_name, _, _ = parent_info
+
yield LsblkEntry(
name=name,
kname=kname,
@@ -185,7 +198,9 @@ def _get_lsblk_info():
bsize=int(bsize),
ro=ro,
tp=tp,
- mountpoint=mountpoint)
+ mountpoint=mountpoint,
+ parent_name=parent_name,
+ parent_path=parent_path)
@aslist
diff --git a/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py b/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py
index 4dc11ea4..456e40ec 100644
--- a/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py
+++ b/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py
@@ -255,13 +255,18 @@ def test_get_lsblk_info(monkeypatch):
bytes_per_gb = 1 << 30
def get_cmd_output_mocked(cmd, delim, expected_len):
- if cmd == ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT']:
+ if cmd == ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PKNAME']:
output_lines_split_on_whitespace = [
- ['vda', '252:0', '0', str(40 * bytes_per_gb), '0', 'disk', ''],
- ['vda1', '252:1', '0', str(1 * bytes_per_gb), '0', 'part', '/boot'],
- ['vda2', '252:2', '0', str(39 * bytes_per_gb), '0', 'part', ''],
- ['rhel_ibm--p8--kvm--03--guest--02-root', '253:0', '0', str(38 * bytes_per_gb), '0', 'lvm', '/'],
- ['rhel_ibm--p8--kvm--03--guest--02-swap', '253:1', '0', str(1 * bytes_per_gb), '0', 'lvm', '[SWAP]']
+ ['/dev/vda', '252:0', '0', str(40 * bytes_per_gb), '0', 'disk', '', ''],
+ ['/dev/vda1', '252:1', '0', str(1 * bytes_per_gb), '0', 'part', '/boot', ''],
+ ['/dev/vda2', '252:2', '0', str(39 * bytes_per_gb), '0', 'part', '', ''],
+ ['/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-root', '253:0', '0', str(38 * bytes_per_gb), '0', 'lvm',
+ '/', ''],
+ ['/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-swap', '253:1', '0', str(1 * bytes_per_gb), '0', 'lvm',
+ '[SWAP]', ''],
+ ['/dev/mapper/luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6', '254:0', '0', str(38 * bytes_per_gb), '0',
+ 'crypt', '', '/dev/nvme0n1p1'],
+ ['/dev/nvme0n1p1', '259:1', '0', str(39 * bytes_per_gb), '0', 'part', '', '/dev/nvme0n1'],
]
for output_line_parts in output_lines_split_on_whitespace:
yield output_line_parts
@@ -269,11 +274,17 @@ def test_get_lsblk_info(monkeypatch):
# We cannot have the output in a list, since the command is called per device. Therefore, we have to map
# each device path to its output.
output_lines_split_on_whitespace_per_device = {
- 'vda': ['vda', 'vda', '40G'],
- 'vda1': ['vda1', 'vda1', '1G'],
- 'vda2': ['vda2', 'vda2', '39G'],
- 'rhel_ibm--p8--kvm--03--guest--02-root': ['rhel_ibm--p8--kvm--03--guest--02-root', 'kname1', '38G'],
- 'rhel_ibm--p8--kvm--03--guest--02-swap': ['rhel_ibm--p8--kvm--03--guest--02-swap', 'kname2', '1G']
+ '/dev/vda': ['vda', 'vda', '40G'],
+ '/dev/vda1': ['vda1', 'vda1', '1G'],
+ '/dev/vda2': ['vda2', 'vda2', '39G'],
+ '/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-root':
+ ['rhel_ibm--p8--kvm--03--guest--02-root', 'kname1', '38G'],
+ '/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-swap':
+ ['rhel_ibm--p8--kvm--03--guest--02-swap', 'kname2', '1G'],
+ '/dev/mapper/luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6':
+ ['luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6', 'dm-0', '38G'],
+ '/dev/nvme0n1p1': ['nvme0n1p1', 'nvme0n1p1', '39G'],
+ '/dev/nvme0n1': ['nvme0n1', 'nvme0n1', '40G'],
}
dev_path = cmd[4]
if dev_path not in output_lines_split_on_whitespace_per_device:
@@ -294,7 +305,9 @@ def test_get_lsblk_info(monkeypatch):
bsize=40 * bytes_per_gb,
ro='0',
tp='disk',
- mountpoint=''),
+ mountpoint='',
+ parent_name='',
+ parent_path=''),
LsblkEntry(
name='vda1',
kname='vda1',
@@ -304,7 +317,9 @@ def test_get_lsblk_info(monkeypatch):
bsize=1 * bytes_per_gb,
ro='0',
tp='part',
- mountpoint='/boot'),
+ mountpoint='/boot',
+ parent_name='',
+ parent_path=''),
LsblkEntry(
name='vda2',
kname='vda2',
@@ -314,7 +329,9 @@ def test_get_lsblk_info(monkeypatch):
bsize=39 * bytes_per_gb,
ro='0',
tp='part',
- mountpoint=''),
+ mountpoint='',
+ parent_name='',
+ parent_path=''),
LsblkEntry(
name='rhel_ibm--p8--kvm--03--guest--02-root',
kname='kname1',
@@ -324,7 +341,9 @@ def test_get_lsblk_info(monkeypatch):
bsize=38 * bytes_per_gb,
ro='0',
tp='lvm',
- mountpoint='/'),
+ mountpoint='/',
+ parent_name='',
+ parent_path=''),
LsblkEntry(
name='rhel_ibm--p8--kvm--03--guest--02-swap',
kname='kname2',
@@ -334,7 +353,34 @@ def test_get_lsblk_info(monkeypatch):
bsize=1 * bytes_per_gb,
ro='0',
tp='lvm',
- mountpoint='[SWAP]')]
+ mountpoint='[SWAP]',
+ parent_name='',
+ parent_path=''),
+ LsblkEntry(
+ name='luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6',
+ kname='dm-0',
+ maj_min='254:0',
+ rm='0',
+ size='38G',
+ bsize=38 * bytes_per_gb,
+ ro='0',
+ tp='crypt',
+ mountpoint='',
+ parent_name='nvme0n1p1',
+ parent_path='/dev/nvme0n1p1'),
+ LsblkEntry(
+ name='nvme0n1p1',
+ kname='nvme0n1p1',
+ maj_min='259:1',
+ rm='0',
+ size='39G',
+ bsize=39 * bytes_per_gb,
+ ro='0',
+ tp='part',
+ mountpoint='',
+ parent_name='nvme0n1',
+ parent_path='/dev/nvme0n1'),
+ ]
actual = storagescanner._get_lsblk_info()
assert expected == actual
diff --git a/repos/system_upgrade/common/models/storageinfo.py b/repos/system_upgrade/common/models/storageinfo.py
index 5bb9caac..71e7459d 100644
--- a/repos/system_upgrade/common/models/storageinfo.py
+++ b/repos/system_upgrade/common/models/storageinfo.py
@@ -43,6 +43,8 @@ class LsblkEntry(Model):
ro = fields.String()
tp = fields.String()
mountpoint = fields.String()
+ parent_name = fields.String()
+ parent_path = fields.String()
class PvsEntry(Model):
diff --git a/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py b/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py
index 0745c91d..4d6ef0dc 100644
--- a/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py
+++ b/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py
@@ -26,7 +26,9 @@ def _lsblk_entry(prefix, number, types, size='128G', bsize=2 ** 37):
bsize=bsize,
ro='0',
tp=types[random.randint(0, len(types) - 1)],
- mountpoint='')
+ mountpoint='',
+ parent_name='',
+ parent_path='')
@aslist
--
2.47.0

View File

@ -1,82 +0,0 @@
From ae9a953dc111c0b14a8b86b3f0aee26cea1f08b4 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Mon, 1 Sep 2025 23:43:53 +0200
Subject: [PATCH 18/55] lib/distro: Add tests for existing
get_distribution_data() function
---
.../common/libraries/tests/test_distro.py | 47 ++++++++++++++++++-
1 file changed, 46 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/libraries/tests/test_distro.py b/repos/system_upgrade/common/libraries/tests/test_distro.py
index 3a8f174f..8e866455 100644
--- a/repos/system_upgrade/common/libraries/tests/test_distro.py
+++ b/repos/system_upgrade/common/libraries/tests/test_distro.py
@@ -1,3 +1,4 @@
+import json
import os
import pytest
@@ -5,7 +6,7 @@ import pytest
from leapp.actors import StopActorExecutionError
from leapp.libraries.common import distro, repofileutils, rhsm
from leapp.libraries.common.config.architecture import ARCH_ACCEPTED, ARCH_ARM64, ARCH_PPC64LE, ARCH_S390X, ARCH_X86_64
-from leapp.libraries.common.distro import _get_distro_repofiles, get_distro_repoids
+from leapp.libraries.common.distro import _get_distro_repofiles, get_distribution_data, get_distro_repoids
from leapp.libraries.common.testutils import CurrentActorMocked
from leapp.libraries.stdlib import api
from leapp.models import RepositoryData, RepositoryFile
@@ -15,6 +16,50 @@ _CENTOS_REPOFILES = [
"/etc/yum.repos.d/centos.repo", "/etc/yum.repos.d/centos-addons.repo"
]
+_CUR_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+@pytest.mark.parametrize('distro', ['rhel', 'centos'])
+def test_get_distribution_data(monkeypatch, distro):
+ common_path = os.path.join(_CUR_DIR, "../../files/", 'distro')
+ monkeypatch.setattr(
+ api,
+ "get_common_folder_path",
+ lambda folder: common_path
+ )
+ data_path = os.path.join(common_path, distro, "gpg-signatures.json")
+
+ def exists_mocked(path):
+ assert path == data_path
+ return True
+
+ monkeypatch.setattr(os.path, 'exists', exists_mocked)
+ ret = get_distribution_data(distro)
+
+ with open(data_path) as fp:
+ assert ret == json.load(fp)
+
+
+@pytest.mark.parametrize('distro', ['rhel', 'centos'])
+def test_get_distribution_data_not_exists(monkeypatch, distro):
+ common_path = os.path.join(_CUR_DIR, "../../files/", 'distro')
+ monkeypatch.setattr(
+ api,
+ "get_common_folder_path",
+ lambda folder: common_path
+ )
+ data_path = os.path.join(common_path, distro, "gpg-signatures.json")
+
+ def exists_mocked(path):
+ assert path == data_path
+ return False
+
+ monkeypatch.setattr(os.path, 'exists', exists_mocked)
+
+ with pytest.raises(StopActorExecutionError) as err:
+ get_distribution_data(distro)
+ assert 'Cannot find distribution signature configuration.' in err
+
def test_get_distro_repofiles(monkeypatch):
"""
--
2.51.1

View File

@ -1,152 +0,0 @@
From e749dbc430099ac0d0cb06fb9dff4ec458d359b3 Mon Sep 17 00:00:00 2001
From: Daniel Diblik <ddiblik@redhat.com>
Date: Fri, 10 Oct 2025 16:51:34 +0200
Subject: [PATCH 19/55] Disable RHSM tests
Signed-off-by: Daniel Diblik <ddiblik@redhat.com>
---
.packit.yaml | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/.packit.yaml b/.packit.yaml
index 607dff93..3d1cd7ff 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -155,7 +155,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:tier0 & enabled:true'
+ plan_filter: 'tag:8to9 & tag:tier0 & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -182,7 +182,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:partitioning & enabled:true'
+ plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -209,7 +209,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true'
+ plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -232,7 +232,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:tier0 & enabled:true'
+ plan_filter: 'tag:8to9 & tag:tier0 & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -258,7 +258,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:partitioning & enabled:true'
+ plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -284,7 +284,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true'
+ plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -306,7 +306,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:tier0 & enabled:true'
+ plan_filter: 'tag:8to9 & tag:tier0 & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -332,7 +332,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:partitioning & enabled:true'
+ plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -358,7 +358,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true'
+ plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -434,7 +434,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:9to10 & tag:tier0 & enabled:true'
+ plan_filter: 'tag:9to10 & tag:tier0 & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -463,7 +463,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:partitioning & enabled:true'
+ plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -489,7 +489,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true'
+ plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -514,7 +514,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:9to10 & tag:tier0 & enabled:true'
+ plan_filter: 'tag:9to10 & tag:tier0 & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -543,7 +543,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:partitioning & enabled:true'
+ plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
@@ -572,7 +572,7 @@ jobs:
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true'
+ plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
environments:
- tmt:
context:
--
2.51.1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,455 @@
From ad241f701b39a81d132105f1a301f2f5546f498a Mon Sep 17 00:00:00 2001
From: Daniel Zatovic <daniel.zatovic@gmail.com>
Date: Tue, 6 Aug 2024 17:26:58 +0200
Subject: [PATCH 20/40] InhibitWhenLuks: allow upgrades for LUKS2 bound to
Clevis TPM2 token
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
So far, upgrades with encrypted drives were not supported. Encrypted
drives require interactively typing unlock passphrases, which is not
suitable for automatic upgrades using Leapp. We add a feature, where
systems with all drives configured with automatic unlock method can be
upgraded.
Currently, we only support drives configured with Clevis/TPM2 token,
because networking is not configured during Leapp upgrade (excluding
NBDE).
We consume LuksDumps message to decide whether the upgrade process
should be inhibited. If there is at least one LUKS2 device without
Clevis TPM2 binding, we inhibit the upgrade because we cannot tell if
the device is not a part of a more complex storage stack and the failure
to unlock the device migt cause boot problem.
Co-authored-by: Petr Stodůlka <pstodulk@redhat.com>
---
.../common/actors/inhibitwhenluks/actor.py | 38 ++--
.../libraries/inhibitwhenluks.py | 164 +++++++++++++++++
.../tests/test_inhibitwhenluks.py | 169 ++++++++++++++++--
3 files changed, 329 insertions(+), 42 deletions(-)
create mode 100644 repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
index 40b845b0..65607167 100644
--- a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
+++ b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
@@ -1,40 +1,24 @@
-from leapp import reporting
from leapp.actors import Actor
-from leapp.models import CephInfo, StorageInfo
-from leapp.reporting import create_report, Report
+from leapp.libraries.actor.inhibitwhenluks import check_invalid_luks_devices
+from leapp.models import CephInfo, LuksDumps, StorageInfo, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
+from leapp.reporting import Report
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class InhibitWhenLuks(Actor):
"""
- Check if any encrypted partitions is in use. If yes, inhibit the upgrade process.
+ Check if any encrypted partitions are in use and whether they are supported for the upgrade.
- Upgrading system with encrypted partition is not supported.
+ Upgrading EL7 system with encrypted partition is not supported (but ceph OSDs).
+ For EL8+ it's ok if the discovered used encrypted storage has LUKS2 format
+ and it's bounded to clevis-tpm2 token (so it can be automatically unlocked
+ during the process).
"""
name = 'check_luks_and_inhibit'
- consumes = (StorageInfo, CephInfo)
- produces = (Report,)
+ consumes = (CephInfo, LuksDumps, StorageInfo)
+ produces = (Report, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
- # If encrypted Ceph volumes present, check if there are more encrypted disk in lsblk than Ceph vol
- ceph_vol = []
- try:
- ceph_info = next(self.consume(CephInfo))
- if ceph_info:
- ceph_vol = ceph_info.encrypted_volumes[:]
- except StopIteration:
- pass
-
- for storage_info in self.consume(StorageInfo):
- for blk in storage_info.lsblk:
- if blk.tp == 'crypt' and blk.name not in ceph_vol:
- create_report([
- reporting.Title('LUKS encrypted partition detected'),
- reporting.Summary('Upgrading system with encrypted partitions is not supported'),
- reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
- reporting.Groups([reporting.Groups.INHIBITOR]),
- ])
- break
+ check_invalid_luks_devices()
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py b/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py
new file mode 100644
index 00000000..57a94e9d
--- /dev/null
+++ b/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py
@@ -0,0 +1,164 @@
+from leapp import reporting
+from leapp.libraries.common.config.version import get_source_major_version
+from leapp.libraries.stdlib import api
+from leapp.models import (
+ CephInfo,
+ DracutModule,
+ LuksDumps,
+ StorageInfo,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeInitramfsTasks
+)
+from leapp.reporting import create_report
+
+# https://red.ht/clevis-tpm2-luks-auto-unlock-rhel8
+# https://red.ht/clevis-tpm2-luks-auto-unlock-rhel9
+# https://red.ht/convert-to-luks2-rhel8
+# https://red.ht/convert-to-luks2-rhel9
+CLEVIS_DOC_URL_FMT = 'https://red.ht/clevis-tpm2-luks-auto-unlock-rhel{}'
+LUKS2_CONVERT_DOC_URL_FMT = 'https://red.ht/convert-to-luks2-rhel{}'
+
+FMT_LIST_SEPARATOR = '\n - '
+
+
+def _formatted_list_output(input_list, sep=FMT_LIST_SEPARATOR):
+ return ['{}{}'.format(sep, item) for item in input_list]
+
+
+def _at_least_one_tpm_token(luks_dump):
+ return any([token.token_type == "clevis-tpm2" for token in luks_dump.tokens])
+
+
+def _get_ceph_volumes():
+ ceph_info = next(api.consume(CephInfo), None)
+ return ceph_info.encrypted_volumes[:] if ceph_info else []
+
+
+def apply_obsoleted_check_ipu_7_8():
+ ceph_vol = _get_ceph_volumes()
+ for storage_info in api.consume(StorageInfo):
+ for blk in storage_info.lsblk:
+ if blk.tp == 'crypt' and blk.name not in ceph_vol:
+ create_report([
+ reporting.Title('LUKS encrypted partition detected'),
+ reporting.Summary('Upgrading system with encrypted partitions is not supported'),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ ])
+ break
+
+
+def report_inhibitor(luks1_partitions, no_tpm2_partitions):
+ source_major_version = get_source_major_version()
+ clevis_doc_url = CLEVIS_DOC_URL_FMT.format(source_major_version)
+ luks2_convert_doc_url = LUKS2_CONVERT_DOC_URL_FMT.format(source_major_version)
+ summary = (
+ 'We have detected LUKS encrypted volumes that do not meet current'
+ ' criteria to be able to proceed the in-place upgrade process.'
+ ' Right now the upgrade process requires for encrypted storage to be'
+ ' in LUKS2 format configured with Clevis TPM 2.0.'
+ )
+
+ report_hints = []
+
+ if luks1_partitions:
+
+ summary += (
+ '\n\nSince RHEL 8 the default format for LUKS encryption is LUKS2.'
+ ' Despite the old LUKS1 format is still supported on RHEL systems'
+ ' it has some limitations in comparison to LUKS2.'
+ ' Only the LUKS2 format is supported for upgrades.'
+ ' The following LUKS1 partitions have been discovered on your system:{}'
+ .format(''.join(_formatted_list_output(luks1_partitions)))
+ )
+ report_hints.append(reporting.Remediation(
+ hint=(
+ 'Convert your LUKS1 encrypted devices to LUKS2 and bind it to TPM2 using clevis.'
+ ' If this is not possible in your case consider clean installation'
+ ' of the target RHEL system instead.'
+ )
+ ))
+ report_hints.append(reporting.ExternalLink(
+ url=luks2_convert_doc_url,
+ title='LUKS versions in RHEL: Conversion'
+ ))
+
+ if no_tpm2_partitions:
+ summary += (
+ '\n\nCurrently we require the process to be non-interactive and'
+ ' offline. For this reason we require automatic unlock of'
+ ' encrypted devices during the upgrade process.'
+ ' Currently we support automatic unlocking during the upgrade only'
+ ' for volumes bound to Clevis TPM2 token.'
+ ' The following LUKS2 devices without Clevis TPM2 token '
+ ' have been discovered on your system: {}'
+ .format(''.join(_formatted_list_output(no_tpm2_partitions)))
+ )
+
+ report_hints.append(reporting.Remediation(
+ hint=(
+ 'Add Clevis TPM2 binding to LUKS devices.'
+ ' If some LUKS devices use still the old LUKS1 format, convert'
+ ' them to LUKS2 prior to binding.'
+ )
+ ))
+ report_hints.append(reporting.ExternalLink(
+ url=clevis_doc_url,
+ title='Configuring manual enrollment of LUKS-encrypted volumes by using a TPM 2.0 policy'
+ )
+ )
+ create_report([
+ reporting.Title('Detected LUKS devices unsuitable for in-place upgrade.'),
+ reporting.Summary(summary),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ ] + report_hints)
+
+
+def check_invalid_luks_devices():
+ if get_source_major_version() == '7':
+ # NOTE: keeping unchanged behaviour for IPU 7 -> 8
+ apply_obsoleted_check_ipu_7_8()
+ return
+
+ luks_dumps = next(api.consume(LuksDumps), None)
+ if not luks_dumps:
+ api.current_logger().debug('No LUKS volumes detected. Skipping.')
+ return
+
+ luks1_partitions = []
+ no_tpm2_partitions = []
+ ceph_vol = _get_ceph_volumes()
+ for luks_dump in luks_dumps.dumps:
+ # if the device is managed by ceph, don't inhibit
+ if luks_dump.device_name in ceph_vol:
+ api.current_logger().debug('Skipping LUKS CEPH volume: {}'.format(luks_dump.device_name))
+ continue
+
+ if luks_dump.version == 1:
+ luks1_partitions.append(luks_dump.device_name)
+ elif luks_dump.version == 2 and not _at_least_one_tpm_token(luks_dump):
+ no_tpm2_partitions.append(luks_dump.device_name)
+
+ if luks1_partitions or no_tpm2_partitions:
+ report_inhibitor(luks1_partitions, no_tpm2_partitions)
+ else:
+ required_crypt_rpms = [
+ 'clevis',
+ 'clevis-dracut',
+ 'clevis-systemd',
+ 'clevis-udisks2',
+ 'clevis-luks',
+ 'cryptsetup',
+ 'tpm2-tss',
+ 'tpm2-tools',
+ 'tpm2-abrmd'
+ ]
+ api.produce(TargetUserSpaceUpgradeTasks(install_rpms=required_crypt_rpms))
+ api.produce(UpgradeInitramfsTasks(include_dracut_modules=[
+ DracutModule(name='clevis'),
+ DracutModule(name='clevis-pin-tpm2')
+ ])
+ )
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
index 405a3429..d559b54c 100644
--- a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
+++ b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
@@ -1,34 +1,173 @@
-from leapp.models import CephInfo, LsblkEntry, StorageInfo
+"""
+Unit tests for inhibitwhenluks actor
+
+Skip isort as it's kind of broken when mixing grid import and one line imports
+
+isort:skip_file
+"""
+
+from leapp.libraries.common.config import version
+from leapp.models import (
+ CephInfo,
+ LsblkEntry,
+ LuksDump,
+ LuksDumps,
+ LuksToken,
+ StorageInfo,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeInitramfsTasks
+)
from leapp.reporting import Report
from leapp.snactor.fixture import current_actor_context
from leapp.utils.report import is_inhibitor
+_REPORT_TITLE_UNSUITABLE = 'Detected LUKS devices unsuitable for in-place upgrade.'
-def test_actor_with_luks(current_actor_context):
- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39),
- ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')]
- current_actor_context.feed(StorageInfo(lsblk=with_luks))
+def test_actor_with_luks1_notpm(monkeypatch, current_actor_context):
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8')
+ luks_dump = LuksDump(
+ version=1,
+ uuid='dd09e6d4-b595-4f1c-80b8-fd47540e6464',
+ device_path='/dev/sda',
+ device_name='sda')
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
+ current_actor_context.feed(CephInfo(encrypted_volumes=[]))
current_actor_context.run()
assert current_actor_context.consume(Report)
report_fields = current_actor_context.consume(Report)[0].report
assert is_inhibitor(report_fields)
+ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks)
+ assert not current_actor_context.consume(UpgradeInitramfsTasks)
+ assert report_fields['title'] == _REPORT_TITLE_UNSUITABLE
+ assert 'LUKS1 partitions have been discovered' in report_fields['summary']
+ assert luks_dump.device_name in report_fields['summary']
-def test_actor_with_luks_ceph_only(current_actor_context):
- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39),
- ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')]
- ceph_volume = ['luks-132']
- current_actor_context.feed(StorageInfo(lsblk=with_luks))
- current_actor_context.feed(CephInfo(encrypted_volumes=ceph_volume))
+
+def test_actor_with_luks2_notpm(monkeypatch, current_actor_context):
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8')
+ luks_dump = LuksDump(
+ version=2,
+ uuid='27b57c75-9adf-4744-ab04-9eb99726a301',
+ device_path='/dev/sda',
+ device_name='sda')
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
+ current_actor_context.feed(CephInfo(encrypted_volumes=[]))
+ current_actor_context.run()
+ assert current_actor_context.consume(Report)
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks)
+ assert not current_actor_context.consume(UpgradeInitramfsTasks)
+
+ assert report_fields['title'] == _REPORT_TITLE_UNSUITABLE
+ assert 'LUKS2 devices without Clevis TPM2 token' in report_fields['summary']
+ assert luks_dump.device_name in report_fields['summary']
+
+
+def test_actor_with_luks2_invalid_token(monkeypatch, current_actor_context):
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8')
+ luks_dump = LuksDump(
+ version=2,
+ uuid='dc1dbe37-6644-4094-9839-8fc5dcbec0c6',
+ device_path='/dev/sda',
+ device_name='sda',
+ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis')])
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
+ current_actor_context.feed(CephInfo(encrypted_volumes=[]))
+ current_actor_context.run()
+ assert current_actor_context.consume(Report)
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+
+ assert report_fields['title'] == _REPORT_TITLE_UNSUITABLE
+ assert 'LUKS2 devices without Clevis TPM2 token' in report_fields['summary']
+ assert luks_dump.device_name in report_fields['summary']
+ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks)
+ assert not current_actor_context.consume(UpgradeInitramfsTasks)
+
+
+def test_actor_with_luks2_clevis_tpm_token(monkeypatch, current_actor_context):
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8')
+ luks_dump = LuksDump(
+ version=2,
+ uuid='83050bd9-61c6-4ff0-846f-bfd3ac9bfc67',
+ device_path='/dev/sda',
+ device_name='sda',
+ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis-tpm2')])
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
+ current_actor_context.feed(CephInfo(encrypted_volumes=[]))
current_actor_context.run()
assert not current_actor_context.consume(Report)
+ upgrade_tasks = current_actor_context.consume(TargetUserSpaceUpgradeTasks)
+ assert len(upgrade_tasks) == 1
+ assert set(upgrade_tasks[0].install_rpms) == set([
+ 'clevis',
+ 'clevis-dracut',
+ 'clevis-systemd',
+ 'clevis-udisks2',
+ 'clevis-luks',
+ 'cryptsetup',
+ 'tpm2-tss',
+ 'tpm2-tools',
+ 'tpm2-abrmd'
+ ])
+ assert current_actor_context.consume(UpgradeInitramfsTasks)
-def test_actor_without_luks(current_actor_context):
- without_luks = [LsblkEntry(name='sda1', kname='sda1', maj_min='8:0', rm='0', size='10G', bsize=10*(1 << 39),
- ro='0', tp='part', mountpoint='/boot', parent_name='', parent_path='')]
- current_actor_context.feed(StorageInfo(lsblk=without_luks))
+def test_actor_with_luks2_ceph(monkeypatch, current_actor_context):
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8')
+ ceph_volume = ['sda']
+ current_actor_context.feed(CephInfo(encrypted_volumes=ceph_volume))
+ luks_dump = LuksDump(
+ version=2,
+ uuid='0edb8c11-1a04-4abd-a12d-93433ee7b8d8',
+ device_path='/dev/sda',
+ device_name='sda',
+ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis')])
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
current_actor_context.run()
assert not current_actor_context.consume(Report)
+
+ # make sure we don't needlessly include clevis packages, when there is no clevis token
+ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks)
+
+
+LSBLK_ENTRY = LsblkEntry(
+ name="luks-whatever",
+ kname="dm-0",
+ maj_min="252:1",
+ rm="0",
+ size="1G",
+ bsize=1073741824,
+ ro="0",
+ tp="crypt",
+ mountpoint="/",
+ parent_name="",
+ parent_path=""
+)
+
+
+def test_inhibitor_on_el7(monkeypatch, current_actor_context):
+ # NOTE(pstodulk): consider it good enough as el7 stuff is going to be removed
+ # soon.
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '7')
+
+ luks_dump = LuksDump(
+ version=2,
+ uuid='83050bd9-61c6-4ff0-846f-bfd3ac9bfc67',
+ device_path='/dev/sda',
+ device_name='sda',
+ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis-tpm2')])
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
+ current_actor_context.feed(CephInfo(encrypted_volumes=[]))
+
+ current_actor_context.feed(StorageInfo(lsblk=[LSBLK_ENTRY]))
+ current_actor_context.run()
+ assert current_actor_context.consume(Report)
+
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+ assert report_fields['title'] == 'LUKS encrypted partition detected'
--
2.47.0

View File

@ -1,462 +0,0 @@
From 6a38e8a3373bdc41a04538a090531ba0ccf8fa96 Mon Sep 17 00:00:00 2001
From: karolinku <kkula@redhat.com>
Date: Tue, 14 Oct 2025 15:11:02 +0200
Subject: [PATCH 20/55] Update packit config
Introduce refactor of labels and introduce 8.10 -> 9.6 AWS tests.
---
.packit.yaml | 285 ++++++++++++++++++++++++++-------------------------
1 file changed, 143 insertions(+), 142 deletions(-)
diff --git a/.packit.yaml b/.packit.yaml
index 3d1cd7ff..720d07a7 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -104,6 +104,8 @@ jobs:
# is the last RHEL 8 release and all new future tests will start from this
# one release.
+# This job is never triggered - we define abstract anchor that are reused in jobs that 'inherit'
+# and have actionable triggers
- &sanity-abstract-8to9
job: tests
trigger: ignore
@@ -116,6 +118,47 @@ jobs:
epel-8-x86_64:
distros: [RHEL-8.10.0-Nightly]
identifier: sanity-abstract-8to9
+ tf_extra_params:
+ test:
+ tmt:
+ plan_filter: 'tag:8to9'
+ environments:
+ - &tmt-env-settings-810to94
+ tmt:
+ context: &tmt-context-810to94
+ distro: "rhel-8.10"
+ distro_target: "rhel-9.4"
+ settings:
+ provisioning:
+ tags:
+ BusinessUnit: sst_upgrades@leapp_upstream_test
+ - &tmt-env-settings-810to96
+ tmt:
+ context: &tmt-context-810to96
+ distro: "rhel-8.10"
+ distro_target: "rhel-9.6"
+ settings:
+ provisioning:
+ tags:
+ BusinessUnit: sst_upgrades@leapp_upstream_test
+ - &tmt-env-settings-810to97
+ tmt:
+ context: &tmt-context-810to97
+ distro: "rhel-8.10"
+ distro_target: "rhel-9.7"
+ settings:
+ provisioning:
+ tags:
+ BusinessUnit: sst_upgrades@leapp_upstream_test
+ - &tmt-env-settings-810to98
+ tmt:
+ context: &tmt-context-810to98
+ distro: "rhel-8.10"
+ distro_target: "rhel-9.8"
+ settings:
+ provisioning:
+ tags:
+ BusinessUnit: sst_upgrades@leapp_upstream_test
- &sanity-abstract-8to9-aws
<<: *sanity-abstract-8to9
@@ -147,7 +190,10 @@ jobs:
# ######################### Individual tests ########################### #
# ###################################################################### #
-# Tests: 8.10 -> 9.4
+# ###################################################################### #
+# ############################# 8.10 > 9.4 ############################# #
+# ###################################################################### #
+
- &sanity-810to94
<<: *sanity-abstract-8to9
trigger: pull_request
@@ -157,15 +203,8 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:tier0 & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-8.10"
- distro_target: "rhel-9.4"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
+ - *tmt-env-settings-810to94
+ env: &env-810to94
SOURCE_RELEASE: "8.10"
TARGET_RELEASE: "9.4"
LEAPP_TARGET_PRODUCT_CHANNEL: "EUS"
@@ -184,18 +223,9 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-8.10"
- distro_target: "rhel-9.4"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
+ - *tmt-env-settings-810to94
env:
- SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.4"
- LEAPP_TARGET_PRODUCT_CHANNEL: "EUS"
+ <<: *env-810to94
# On-demand kernel-rt tests
- &kernel-rt-810to94
@@ -212,19 +242,19 @@ jobs:
plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
environments:
- tmt:
- context:
- distro: "rhel-8.10"
- distro_target: "rhel-9.4"
+ context: *tmt-context-810to94
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
- SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.4"
- LEAPP_TARGET_PRODUCT_CHANNEL: "EUS"
+ <<: *env-810to94
+
+
+# ###################################################################### #
+# ############################# 8.10 > 9.6 ############################# #
+# ###################################################################### #
-# Tests: 8.10 -> 9.6
- &sanity-810to96
<<: *sanity-abstract-8to9
trigger: pull_request
@@ -234,15 +264,8 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:tier0 & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-8.10"
- distro_target: "rhel-9.6"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
+ - *tmt-env-settings-810to96
+ env: &env-810to96
SOURCE_RELEASE: "8.10"
TARGET_RELEASE: "9.6"
@@ -260,17 +283,9 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-8.10"
- distro_target: "rhel-9.6"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
+ - *tmt-env-settings-810to96
env:
- SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.6"
+ <<: *env-810to96
# On-demand kernel-rt tests
- &kernel-rt-810to96
@@ -285,20 +300,37 @@ jobs:
test:
tmt:
plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
+ environments:
+ - *tmt-env-settings-810to96
+ env:
+ <<: *env-810to96
+
+- &sanity-810to96-aws
+ <<: *sanity-abstract-8to9-aws
+ trigger: pull_request
+ targets:
+ epel-8-x86_64:
+ distros: [RHEL-8.10-rhui]
+ identifier: sanity-8.10to9.6-aws
+ tf_extra_params:
+ test:
+ tmt:
+ plan_filter: 'tag:8to9 & tag:rhui-aws-tier0 & enabled:true & tag:-rhsm'
environments:
- tmt:
- context:
- distro: "rhel-8.10"
- distro_target: "rhel-9.6"
+ context: *tmt-context-810to96
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
- SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.6"
+ <<: *env-810to96
+
+
+# ###################################################################### #
+# ############################# 8.10 > 9.7 ############################# #
+# ###################################################################### #
-# Tests: 8.10 -> 9.7
- &sanity-810to97
<<: *sanity-abstract-8to9
trigger: pull_request
@@ -308,15 +340,8 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:tier0 & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-8.10"
- distro_target: "rhel-9.7"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
+ - *tmt-env-settings-810to97
+ env: &env-810to97
SOURCE_RELEASE: "8.10"
TARGET_RELEASE: "9.7"
@@ -334,17 +359,9 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-8.10"
- distro_target: "rhel-9.7"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
+ - *tmt-env-settings-810to97
env:
- SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.7"
+ <<: *env-810to97
# On-demand kernel-rt tests
- &kernel-rt-810to97
@@ -360,17 +377,9 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-8.10"
- distro_target: "rhel-9.7"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
+ - *tmt-env-settings-810to97
env:
- SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.7"
+ <<: *env-810to97
# ###################################################################### #
# ############################## 9 TO 10 ################################ #
@@ -392,6 +401,38 @@ jobs:
epel-9-x86_64:
distros: [RHEL-9.6.0-Nightly]
identifier: sanity-abstract-9to10
+ tf_extra_params:
+ test:
+ tmt:
+ plan_filter: 'tag:9to10'
+ environments:
+ - &tmt-env-settings-96to100
+ tmt:
+ context: &tmt-context-96to100
+ distro: "rhel-9.6"
+ distro_target: "rhel-10.0"
+ settings:
+ provisioning:
+ tags:
+ BusinessUnit: sst_upgrades@leapp_upstream_test
+ - &tmt-env-settings-97to101
+ tmt:
+ context: &tmt-context-97to101
+ distro: "rhel-9.7"
+ distro_target: "rhel-10.1"
+ settings:
+ provisioning:
+ tags:
+ BusinessUnit: sst_upgrades@leapp_upstream_test
+ - &tmt-env-settings-98to102
+ tmt:
+ context: &tmt-context-98to102
+ distro: "rhel-9.8"
+ distro_target: "rhel-10.2"
+ settings:
+ provisioning:
+ tags:
+ BusinessUnit: sst_upgrades@leapp_upstream_test
- &sanity-abstract-9to10-aws
<<: *sanity-abstract-9to10
@@ -423,7 +464,10 @@ jobs:
# ######################### Individual tests ########################### #
# ###################################################################### #
-# Tests: 9.6 -> 10.0
+# ###################################################################### #
+# ############################# 9.6 > 10.0 ############################# #
+# ###################################################################### #
+
- &sanity-96to100
<<: *sanity-abstract-9to10
trigger: pull_request
@@ -436,15 +480,8 @@ jobs:
tmt:
plan_filter: 'tag:9to10 & tag:tier0 & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-9.6"
- distro_target: "rhel-10.0"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
+ - *tmt-env-settings-96to100
+ env: &env-96to100
SOURCE_RELEASE: "9.6"
TARGET_RELEASE: "10.0"
@@ -465,17 +502,9 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-9.6"
- distro_target: "rhel-10.0"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
+ - *tmt-env-settings-96to100
env:
- SOURCE_RELEASE: "9.6"
- TARGET_RELEASE: "10.0"
+ <<: *env-96to100
# On-demand kernel-rt tests
- &kernel-rt-96to100
@@ -491,19 +520,14 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-9.6"
- distro_target: "rhel-10.0"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
+ - *tmt-env-settings-96to100
env:
- SOURCE_RELEASE: "9.6"
- TARGET_RELEASE: "10.0"
+ <<: *env-96to100
+
+# ###################################################################### #
+# ############################# 9.7 > 10.1 ############################# #
+# ###################################################################### #
-# Tests: 9.7 -> 10.1
- &sanity-97to101
<<: *sanity-abstract-9to10
trigger: pull_request
@@ -516,15 +540,8 @@ jobs:
tmt:
plan_filter: 'tag:9to10 & tag:tier0 & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-9.7"
- distro_target: "rhel-10.1"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
+ - *tmt-env-settings-97to101
+ env: &env-97to101
SOURCE_RELEASE: "9.7"
TARGET_RELEASE: "10.1"
@@ -545,17 +562,9 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-9.7"
- distro_target: "rhel-10.1"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
+ - *tmt-env-settings-97to101
env:
- SOURCE_RELEASE: "9.7"
- TARGET_RELEASE: "10.1"
+ <<: *env-97to101
# On-demand kernel-rt tests
- &kernel-rt-97to101
@@ -574,14 +583,6 @@ jobs:
tmt:
plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
environments:
- - tmt:
- context:
- distro: "rhel-9.7"
- distro_target: "rhel-10.1"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
+ - *tmt-env-settings-97to101
env:
- SOURCE_RELEASE: "9.7"
- TARGET_RELEASE: "10.1"
+ <<: *env-97to101
--
2.51.1

View File

@ -0,0 +1,57 @@
From 8e5fe75e4ee76eb62eb51001c28f1f1443f0a563 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 18 Oct 2024 07:13:42 +0200
Subject: [PATCH 21/40] Rename inhibitwhenluks actor to checkluks
The actor nowadays does more then just inhibiting the upgrade when
LUKS is detected. Let's rename it to respect current behaviour.
---
.../common/actors/{inhibitwhenluks => checkluks}/actor.py | 6 +++---
.../inhibitwhenluks.py => checkluks/libraries/checkluks.py} | 0
.../tests/test_checkluks.py} | 0
3 files changed, 3 insertions(+), 3 deletions(-)
rename repos/system_upgrade/common/actors/{inhibitwhenluks => checkluks}/actor.py (85%)
rename repos/system_upgrade/common/actors/{inhibitwhenluks/libraries/inhibitwhenluks.py => checkluks/libraries/checkluks.py} (100%)
rename repos/system_upgrade/common/actors/{inhibitwhenluks/tests/test_inhibitwhenluks.py => checkluks/tests/test_checkluks.py} (100%)
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py b/repos/system_upgrade/common/actors/checkluks/actor.py
similarity index 85%
rename from repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
rename to repos/system_upgrade/common/actors/checkluks/actor.py
index 65607167..607fd040 100644
--- a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
+++ b/repos/system_upgrade/common/actors/checkluks/actor.py
@@ -1,11 +1,11 @@
from leapp.actors import Actor
-from leapp.libraries.actor.inhibitwhenluks import check_invalid_luks_devices
+from leapp.libraries.actor.checkluks import check_invalid_luks_devices
from leapp.models import CephInfo, LuksDumps, StorageInfo, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
from leapp.reporting import Report
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
-class InhibitWhenLuks(Actor):
+class CheckLuks(Actor):
"""
Check if any encrypted partitions are in use and whether they are supported for the upgrade.
@@ -15,7 +15,7 @@ class InhibitWhenLuks(Actor):
during the process).
"""
- name = 'check_luks_and_inhibit'
+ name = 'check_luks'
consumes = (CephInfo, LuksDumps, StorageInfo)
produces = (Report, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks)
tags = (ChecksPhaseTag, IPUWorkflowTag)
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py b/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
similarity index 100%
rename from repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py
rename to repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py b/repos/system_upgrade/common/actors/checkluks/tests/test_checkluks.py
similarity index 100%
rename from repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
rename to repos/system_upgrade/common/actors/checkluks/tests/test_checkluks.py
--
2.47.0

View File

@ -1,880 +0,0 @@
From 06afa61c2508f18937244787440c709c5ee0a285 Mon Sep 17 00:00:00 2001
From: karolinku <kkula@redhat.com>
Date: Tue, 14 Oct 2025 15:56:11 +0200
Subject: [PATCH 21/55] Update upgrade paths: 8.10 -> 9.8 -> 10.2 with
certificates
Jira: RHEL-108025
---
.packit.yaml | 119 ++++++++++++++++++
.../common/files/prod-certs/10.2/279.pem | 35 ++++++
.../common/files/prod-certs/10.2/362.pem | 36 ++++++
.../common/files/prod-certs/10.2/363.pem | 35 ++++++
.../common/files/prod-certs/10.2/419.pem | 35 ++++++
.../common/files/prod-certs/10.2/433.pem | 35 ++++++
.../common/files/prod-certs/10.2/479.pem | 35 ++++++
.../common/files/prod-certs/10.2/486.pem | 35 ++++++
.../common/files/prod-certs/10.2/72.pem | 35 ++++++
.../common/files/prod-certs/9.8/279.pem | 35 ++++++
.../common/files/prod-certs/9.8/362.pem | 36 ++++++
.../common/files/prod-certs/9.8/363.pem | 35 ++++++
.../common/files/prod-certs/9.8/419.pem | 35 ++++++
.../common/files/prod-certs/9.8/433.pem | 35 ++++++
.../common/files/prod-certs/9.8/479.pem | 35 ++++++
.../common/files/prod-certs/9.8/486.pem | 35 ++++++
.../common/files/prod-certs/9.8/72.pem | 35 ++++++
.../common/files/upgrade_paths.json | 10 +-
18 files changed, 687 insertions(+), 4 deletions(-)
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.2/279.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.2/362.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.2/363.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.2/419.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.2/433.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.2/479.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.2/486.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.2/72.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.8/279.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.8/362.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.8/363.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.8/419.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.8/433.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.8/479.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.8/486.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.8/72.pem
diff --git a/.packit.yaml b/.packit.yaml
index 720d07a7..0c3f682a 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -381,6 +381,60 @@ jobs:
env:
<<: *env-810to97
+# ###################################################################### #
+# ############################# 8.10 > 9.8 ############################# #
+# ###################################################################### #
+
+- &sanity-810to98
+ <<: *sanity-abstract-8to9
+ trigger: pull_request
+ identifier: sanity-8.10to9.8
+ tf_extra_params:
+ test:
+ tmt:
+ plan_filter: 'tag:8to9 & tag:tier0 & enabled:true'
+ environments:
+ - *tmt-env-settings-810to98
+ env: &env-810to98
+ SOURCE_RELEASE: "8.10"
+ TARGET_RELEASE: "9.8"
+
+# On-demand minimal beaker tests
+- &beaker-minimal-810to98
+ <<: *beaker-minimal-8to9-abstract-ondemand
+ trigger: pull_request
+ labels:
+ - beaker-minimal
+ - beaker-minimal-8.10to9.8
+ - 8.10to9.8
+ identifier: sanity-8.10to9.8-beaker-minimal-ondemand
+ tf_extra_params:
+ test:
+ tmt:
+ plan_filter: 'tag:8to9 & tag:partitioning & enabled:true'
+ environments:
+ - *tmt-env-settings-810to98
+ env:
+ <<: *env-810to98
+
+# On-demand kernel-rt tests
+- &kernel-rt-810to98
+ <<: *kernel-rt-abstract-8to9-ondemand
+ trigger: pull_request
+ labels:
+ - kernel-rt
+ - kernel-rt-8.10to9.8
+ - 8.10to9.8
+ identifier: sanity-8.10to9.8-kernel-rt-ondemand
+ tf_extra_params:
+ test:
+ tmt:
+ plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true'
+ environments:
+ - *tmt-env-settings-810to98
+ env:
+ <<: *env-810to98
+
# ###################################################################### #
# ############################## 9 TO 10 ################################ #
# ###################################################################### #
@@ -586,3 +640,68 @@ jobs:
- *tmt-env-settings-97to101
env:
<<: *env-97to101
+
+
+# ###################################################################### #
+# ############################# 9.8 > 10.2 ############################# #
+# ###################################################################### #
+
+- &sanity-98to102
+ <<: *sanity-abstract-9to10
+ trigger: pull_request
+ identifier: sanity-9.8to10.2
+ targets:
+ epel-9-x86_64:
+ distros: [RHEL-9.8.0-Nightly]
+ tf_extra_params:
+ test:
+ tmt:
+ plan_filter: 'tag:9to10 & tag:tier0 & enabled:true & tag:-rhsm'
+ environments:
+ - *tmt-env-settings-98to102
+ env: &env-98to102
+ SOURCE_RELEASE: "9.8"
+ TARGET_RELEASE: "10.2"
+
+# On-demand minimal beaker tests
+- &beaker-minimal-98to102
+ <<: *beaker-minimal-9to10-abstract-ondemand
+ trigger: pull_request
+ labels:
+ - beaker-minimal
+ - beaker-minimal-9.8to10.2
+ - 9.8to10.2
+ identifier: sanity-9.8to10.2-beaker-minimal-ondemand
+ targets:
+ epel-9-x86_64:
+ distros: [RHEL-9.8-Nightly]
+ tf_extra_params:
+ test:
+ tmt:
+ plan_filter: 'tag:8to9 & tag:partitioning & enabled:true & tag:-rhsm'
+ environments:
+ - *tmt-env-settings-98to102
+ env:
+ <<: *env-98to102
+
+# On-demand kernel-rt tests
+- &kernel-rt-98to102
+ <<: *kernel-rt-abstract-9to10-ondemand
+ trigger: pull_request
+ labels:
+ - kernel-rt
+ - kernel-rt-9.8to10.2
+ - 9.8to10.2
+ identifier: sanity-9.8to10.2-kernel-rt-ondemand
+ targets:
+ epel-9-x86_64:
+ distros: [RHEL-9.8-Nightly]
+ tf_extra_params:
+ test:
+ tmt:
+ plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true & tag:-rhsm'
+ environments:
+ - *tmt-env-settings-98to102
+ env:
+ <<: *env-98to102
+
diff --git a/repos/system_upgrade/common/files/prod-certs/10.2/279.pem b/repos/system_upgrade/common/files/prod-certs/10.2/279.pem
new file mode 100644
index 00000000..76336f82
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.2/279.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGKDCCBBCgAwIBAgIJALDxRLt/tVBkMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjQ0NloXDTQ1MDcw
+ODExMjQ0NlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsyOWJlMDI0
+My03NGU1LTRiNDctYjEwNy1iZjhkNjRjYmNjNDhdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBsTCBrjAJBgNVHRMEAjAAMEMGDCsGAQQBkggJAYIXAQQzDDFSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuMBYGDCsG
+AQQBkggJAYIXAgQGDAQxMC4yMBkGDCsGAQQBkggJAYIXAwQJDAdwcGM2NGxlMCkG
+DCsGAQQBkggJAYIXBAQZDBdyaGVsLTEwLHJoZWwtMTAtcHBjNjRsZTANBgkqhkiG
+9w0BAQsFAAOCAgEAGouNVN3DdCE2cqv3lKMNC3jit5mHi7QQt7fqN/KX4fQfArb6
+IQ9M0GaGJM8W9rj+9s+Q9LOFjys24Pcdb9qbQWpfwvn9FY60uQw3TIXAerJaVb98
+doxrFHjVptm0/VX2xnOa/dY97dmMT4Amwe5+y4RYlMEsYqY8dpJkVuKNdGtCg+Uf
+f9hb6XjDqRevADgskHNprXrjF65Ib3a92qJRfttnVUfqqeDkTPntIPbau9hZwLeR
+oMl8pn4kMIYLz1IolSAC8yBFe9sLxllGu8qIFqH4Efzx8BOtHkPUH/VqtgvUej+j
+boJ0EEpwYjvYbz00mZmJHFNkUheW6cDUPWmMoTzYibPzRTrBcAIfvybpeuPjFGfl
+gYZa/DpEG68hlEnSxB4TNpVCx9qfiqXvNcukmeX3Jr7DS1uC2ePBFDQKewx6WdAa
+bAmuANmBUB+NX1WMuNTfxxIzxfIoShaChiFRVjsRTkLo1ZPuMkvXOXYfyfW1PKQN
+PXHEdY9wprn8ZY2qhMwmE1sDdndNpSxB3boI9FQBUVDzbSG6KwbPfSdmrte+Wdrh
+QCIGU+0x7ulF68yOkMkz1spPNgrTXt0efaCSWqUK0nqv1s1Gh2Q6iJaE0yETpSG7
+hFeHpENftckpmuKcJM0v/uBBeIX7X8plrL7Fkm4ND/e61tEiDwvnhxGhtBE=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.2/362.pem b/repos/system_upgrade/common/files/prod-certs/10.2/362.pem
new file mode 100644
index 00000000..ebeb065c
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.2/362.pem
@@ -0,0 +1,36 @@
+-----BEGIN CERTIFICATE-----
+MIIGNzCCBB+gAwIBAgIJALDxRLt/tVBTMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjQzMVoXDTQ1MDcw
+ODExMjQzMVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFthMmU1N2Ix
+MS03ZDBiLTRiNGYtOGE5ZC03MmRkNGM2NDA2NzJdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBwDCBvTAJBgNVHRMEAjAAMEgGDCsGAQQBkggJAYJqAQQ4DDZSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuIEJldGEw
+GwYMKwYBBAGSCAkBgmoCBAsMCTEwLjIgQmV0YTAZBgwrBgEEAZIICQGCagMECQwH
+cHBjNjRsZTAuBgwrBgEEAZIICQGCagQEHgwccmhlbC0xMCxyaGVsLTEwLWJldGEt
+cHBjNjRsZTANBgkqhkiG9w0BAQsFAAOCAgEAgQC6qqfeW79hSj2S+OfBT5ccNNZa
+8blVYtVJU5iTfhX29VQTdGus/ROWGqfgDe8MMOCJyt0eNeqGO70KRJsT3pGeeKcM
+UbDfdqzopgD7+/6IL1c1fw/8HijOorW+CMAzeOBdnjMwRjhZxcDlFSqxNCWtngnp
+XlDMIlUR3m0rlBwzNfUMk7KYPUESmyEiBWMSKmqRDeiUg3BSP6Ci0x3Ufnf0xTBv
+VPVKO/h3ta3+pAYzeFy/ageJ/sR9tLRZQZXzvxYvIY+8/EehafPJCHDHH3uCTpdZ
+JAeXDLf2QcOBZnl8uONdev+KaE1JFRCRmqwhliUsARv/t24CY+UBoEzzaj/py2bR
+RQqfE5WI1JSdj6HoQ6YHbtR6SF+UedfvMQoSF4zPiXAPNebiIiLkc1rtb/ycUi1f
+bUjkRfgRqlDwUcgfHrKhSDp5/XhjgxVXiESNcDe2ltKvVr09qAaPBarLolWeIXkN
+n2csdFxyiDZIhk6tFL8lUtpmXWpeEn/iBPwaiBIYoBnIbaqN4OZngwfi2QtTdl+s
+9iCuYgbGQiEZnV3g7HLsYXrAagPuJxXs0FMYJZ8x6biREgUQATwTzZMQ8vWRMmYY
+kteQBaOCDzNpb8OUgbPxgncl9kgr4NIBn+5oGeMitb+I1XvWqoCFsA7Uii6oygdk
+iE+YZEA6e/4057M=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.2/363.pem b/repos/system_upgrade/common/files/prod-certs/10.2/363.pem
new file mode 100644
index 00000000..865fbda6
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.2/363.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGKTCCBBGgAwIBAgIJALDxRLt/tVBSMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjQzMVoXDTQ1MDcw
+ODExMjQzMVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs0NjJhNDRj
+ZC1jNWUzLTQzZGItYTExNy0zZjA5ZGU1ZDRmMzNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBsjCBrzAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYJrAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NCBCZXRhMBsGDCsGAQQBkggJAYJr
+AgQLDAkxMC4yIEJldGEwGQYMKwYBBAGSCAkBgmsDBAkMB2FhcmNoNjQwLgYMKwYB
+BAGSCAkBgmsEBB4MHHJoZWwtMTAscmhlbC0xMC1iZXRhLWFhcmNoNjQwDQYJKoZI
+hvcNAQELBQADggIBAC/KEEZ85rdWnL/CK9q3uT/d4reNZc1WD5oWYcpj+J31u4sw
+pjAvmq/eA6DmzqGjhfEGhwu5MDbVg77OAPCcfm7qqGSDcnjqnO3ZogDjyzat1WS5
+J2uuRcPbF6DIk/LkgIc/FgvSFG8Vc93hM+P56wTzTbnPYSRyJq3BBm8ZjSiFO5jq
+V9WOganzxsVKzifTK8RoSdWLyB0JpvL/LZKa4G97ahUctYVilhJBHCgd+uT6/IVn
+ppETnw4xo6SXg0+O+fC1P+90+GZrWWzeHeHnEgmZ8B+RTDQbx/KHQHU4UhqU5qnT
+6VngqL1453IxmlxVxwKlkwzV4SYrQnmEZPvugMhlenbx0T9pJvwg/xvWYJJTGjUy
+1l9p0LtyUHmFJxtbq50++oooUdDtQ6RDD5jtxnvWMF5PFLYGxf6gXFFCJVSgwonP
+BtqoBH2PWp8/nwumAOquzks41m+bqzaMALhp0GUGTKKTITrM4gsLVHqKh2WTCOPs
+s6mdXOyVma/o5Jri8Ec12/HGyIRlQQleb6vcC68PK3X088LZi/zENi2Bq31W5Hip
+R03YxVzmjZA3kJsA8Vim4zaG7e6puLGuXmQLawN7oScBFlvVLvZD2ycZsYLOesCz
+VSxJkmqDMb6To9RRbSmN0csPFKWNkdD8D5iBei4IaGWXyOB3GGJJ2ME/Qv65
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.2/419.pem b/repos/system_upgrade/common/files/prod-certs/10.2/419.pem
new file mode 100644
index 00000000..42986ccc
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.2/419.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGGjCCBAKgAwIBAgIJALDxRLt/tVBjMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjQ0NloXDTQ1MDcw
+ODExMjQ0NlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs0MjIwNzhj
+OS1mY2MzLTQwMWQtOGM2Yi0yZGUwNWRmZGEyN2NdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBozCBoDAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYMjAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NDAWBgwrBgEEAZIICQGDIwIEBgwE
+MTAuMjAZBgwrBgEEAZIICQGDIwMECQwHYWFyY2g2NDApBgwrBgEEAZIICQGDIwQE
+GQwXcmhlbC0xMCxyaGVsLTEwLWFhcmNoNjQwDQYJKoZIhvcNAQELBQADggIBAAvn
+gY6rJIku70uFkod9Xc45wNgPNMkim2p+dCOQXl5GG7rSLqBp/e6MhBOu+VfvXddF
+zEndO84pb7Evc3PjjtnBietvvcmcoJjMTrGF2oKcJWJP+x/SKMzN2qPPoQu4QoZj
+OTuaemuHLCkA9cnvRj2qxW9ZpINmr6H72jCHPoYGWD8Omupnctyt3/uu/MG7KT4y
+8B5hXLmFeuF1vgOkKnoqjZRgZ86xsJ4dig/vLWkAKdsWPlRlV0SICwgVALqFmTge
+Hgrz0A6F2BM7f0vYNFUTRv0qQwHR7EA/jEHCQByNc73cvDtHZFyODTqvEBoLFVOw
+2fad9K5EID1GKj9U1NGYAlAvEpbrgs2Xd2ugFyN5mtbSLon+VeXm5q9fB/Ca0j7z
+vvfdoKsd89R822m2Y+HB0eei63zGE6Ykr4aaTQNjQyTu5K8pUNG/y5UGWIpSM1IR
+YqOsdJvCyavBlQ98K7OfL9yqOiZFXB9VkmXPPiT1ljNgpYzK63ZWidjXkpG2I7g1
+YoCIT0JE5xX6x2U5Ia79OFug/g9SwQn6izVYrLCgqqNqeld0WokeFBPnyZkXSYt1
+pzY4HAjXjaDGbF1O4SmoCTtagB2vNmi1wUPazizA5SESifVcYfPeaWRk10PJT9MR
+p3EFR/BSg/hvmehuGSEfRNFV8g9Deo3EN1LHEhTY
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.2/433.pem b/repos/system_upgrade/common/files/prod-certs/10.2/433.pem
new file mode 100644
index 00000000..932dbf7a
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.2/433.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGLDCCBBSgAwIBAgIJALDxRLt/tVBUMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjQzMVoXDTQ1MDcw
+ODExMjQzMVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsxNzhhMzJi
+NC0xZWNjLTRmZDEtOTA2NS0wMGZkMjQzZDEzYzBdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBtTCBsjAJBgNVHRMEAjAAMEEGDCsGAQQBkggJAYMxAQQxDC9SZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIElCTSB6IFN5c3RlbXMgQmV0YTAbBgwrBgEE
+AZIICQGDMQIECwwJMTAuMiBCZXRhMBcGDCsGAQQBkggJAYMxAwQHDAVzMzkweDAs
+BgwrBgEEAZIICQGDMQQEHAwacmhlbC0xMCxyaGVsLTEwLWJldGEtczM5MHgwDQYJ
+KoZIhvcNAQELBQADggIBAAUwQwSc0A1Q5SiC7N5xSS1ZegZQT1hER7SRDs5p6drK
+Riayu5bER7kpQnJc/ww1/iTmHHH/H180pSP+EZEPqCLumqYmf1vW60SAR4BMklyh
+QuYqVkJCxA7uloA59cLZcPnEu+xHLfnhSQdTIXhi1uLK960mEIiexCT8xMkQ5E5A
+ZUajyEhdLp4ca8K+nUWzSzYQBpGYpkiQtniLZ/i4kzaYTfHpFGJNQQCrPlB2lMCa
+vZKseaPlFzExXfq5MJ5IX1lc2RNqeaf22p49Bia6CgVLMagsFnAr909zZ9NAaZWV
+kYqjLVMJ5EY25OJS21So0fI//lOsRVBxlfqOS7v9hYBnuLhPuiIiHEaNcQyNBI/7
+DgT5xCmL8IDzvsBJLZ/AqolO1fo5lSVOZ5PCbwIZj7bBZJwf8gTSUu2cuhbN2Gxi
+s7R2QFVco+AAPcuoWOISG4cKwX4wDUR+rHqQMCKJM6mQGlnB2OXBwZX1fYo7k82d
+b7BygRhEML6INaweUe2Do7v8phz6TXM2lFJCQYnja2lO6GxSlaXgRNb4Rnc6ty79
+O5S6K2g3uEc4Uc8F7echBFAudl9KQqu9il9cb3f0fI+kYX2j9ib4isdF8qIusZVp
+F191fHyl1Y6pp4eWKA48uO8Op8uO320UIX8HQnNGi74eEOvCqvZtfKZE5+Za/YT+
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.2/479.pem b/repos/system_upgrade/common/files/prod-certs/10.2/479.pem
new file mode 100644
index 00000000..2c4b8db2
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.2/479.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGGDCCBACgAwIBAgIJALDxRLt/tVBmMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjQ0NloXDTQ1MDcw
+ODExMjQ0NlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs5OTUxYjVm
+NC0yZTE4LTQ1OGEtYTc4ZC05NGNkZDhkN2I1ZWVdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBoTCBnjAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAWBgwrBgEEAZIICQGDXwIEBgwE
+MTAuMjAYBgwrBgEEAZIICQGDXwMECAwGeDg2XzY0MCgGDCsGAQQBkggJAYNfBAQY
+DBZyaGVsLTEwLHJoZWwtMTAteDg2XzY0MA0GCSqGSIb3DQEBCwUAA4ICAQDUoyHm
+MM07NncLVGO9xO6qa3cxy8NyJO95LScKCAzlx3dHrgW+v1CcxZEzMsV6Rtpch0kH
+De61DOCn50JgQ6s7e6Cxk6nMovcxYMi5q4JxYoIlCFTbeRRGdTQv8SmndmSd7rh2
+6mDUWoDxbw1fpeNxiWAOq8IQXrcmrEnVIpOQP4Fc+yNw/Sdsqz23o9VBlP0yBJ4W
+a6zGCwRzcisLsNOc+8mRtuirG11Zqm07V0xt2YVXlV13Wu/Dy0qKW49tPJD8WceO
+hCC/alSRh1s4YV50gVlA0IRyyezAwU/0Al+lMKfMeqqedg81QGMBiy6qzDjXllcK
+XfKYsWC2egkofpvxb5jVU0EXdl0kE+RGQfK3fVq09YwNim41n9qgJTlA1vIBrq8o
+1NMwyrbQdfndyGZLSpzWxLHpYUCe2lJomgJTNvrA6+xTnlpfEPOn2zDUxJ7CSfoQ
+ZkPhdO4UsrvJOPLt5oY5R5Q6tXLVR7xL24WeUw5FXtzFMibOaE3kT9ib0o8zluMS
+ly290tfnl8Wq7fgjFT8mt0NIH/rXC4COBw87EjLbhxUCbEHnbJiOj+JT2QRxKjWg
+9icCBbU5TEY0V8rC+vx54JCcx8NGaJDDKDmv6tgEOA0u9YEpGw44fk6RxqeNaysW
+glkF2dUoSBDKWSqiroYrjEgaFWvdSaalOSJQuA==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.2/486.pem b/repos/system_upgrade/common/files/prod-certs/10.2/486.pem
new file mode 100644
index 00000000..181b7a98
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.2/486.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGJzCCBA+gAwIBAgIJALDxRLt/tVBVMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjQzMVoXDTQ1MDcw
+ODExMjQzMVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs2OWQ5ZGY5
+Yy1mMGFmLTRjY2UtYTRhMi0zZDA4MDM1YjJmYjFdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBsDCBrTAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYNmAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NCBCZXRhMBsGDCsGAQQBkggJAYNm
+AgQLDAkxMC4yIEJldGEwGAYMKwYBBAGSCAkBg2YDBAgMBng4Nl82NDAtBgwrBgEE
+AZIICQGDZgQEHQwbcmhlbC0xMCxyaGVsLTEwLWJldGEteDg2XzY0MA0GCSqGSIb3
+DQEBCwUAA4ICAQA00Q5BALEU5y1CJwF7ru1xNujrtjZvwOdXeNl+esZIqlZQqITP
+Rr6Xww0/mcMcvqEHu/PlJ2xyWC8VYrhZ+/LC6EtTbPEKSDEAHE914MU934pC02tP
+QE+a7BKsHPGhh4SyvMrZ0vWoxnwcug5g8V5uXNOQYSgnOAHdNQxMeMh8LCHO76np
+fjWL7en5dUMWHOB9W1kyZO87f2WBGhFrTyNnFTcg99G/MNMkMD5rLc+Qg8GhY1Zt
+8+AN4c5HprFI1cUz8/4osj2ZBW1xxH+mcps2oy3L8UNFceiAdewVpTmwlBN0HEUk
+3+NB64+QXLf13EowJnAunJrVms+bQbB1Y2zOL1ymiCLF6iQu4mIdEP2yqzk7lowa
+RmuxEOI/S279n+YtilUuWKoeaLcGqPd0rPS5B01M049+KXW0Vv/6OOakA0rltB76
++RBeE4UTnPCOIBfyVCHdoCTDFaI5GavVZGTr1bLQR9FdIRzQs+nx3VUYf6o2ZHOW
+R1I794GHADaLwNfD5b5oo1XwIkuDxcvrF5kFlhnI3X9cVFDhk6uvMTzKEHPsdoYY
+Oe2PdTNfyaiAZs5RzE7If+DAK1zCHrO3GHN4tRyQEwG5p/1F91iw2/Kj67zosH38
+Wvm4FSL0ENRPIIUt+p0zT4FBPXOr4YwQGBn0PuaIob5mymAdbUI6Q3CHqA==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.2/72.pem b/repos/system_upgrade/common/files/prod-certs/10.2/72.pem
new file mode 100644
index 00000000..3d15c146
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.2/72.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGGTCCBAGgAwIBAgIJALDxRLt/tVBlMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjQ0NloXDTQ1MDcw
+ODExMjQ0NlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtmM2M4ZTQ0
+OC1lYmY4LTQxN2MtOTI5My01ZmE5NjU2YTI4YjJdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBojCBnzAJBgNVHRMEAjAAMDsGCysGAQQBkggJAUgBBCwMKlJlZCBIYXQg
+RW50ZXJwcmlzZSBMaW51eCBmb3IgSUJNIHogU3lzdGVtczAVBgsrBgEEAZIICQFI
+AgQGDAQxMC4yMBYGCysGAQQBkggJAUgDBAcMBXMzOTB4MCYGCysGAQQBkggJAUgE
+BBcMFXJoZWwtMTAscmhlbC0xMC1zMzkweDANBgkqhkiG9w0BAQsFAAOCAgEAMT/B
+VjMEkJIvwAShBe9XhWj1lNvd58VQNaet8eMebCOy2CN32v50zquH9QgQH4Sf/aBm
+X8HfQWl23zAQApCjMr2Sawmjmy05Oj7UGghkbANDvwHV2VKg1nOIoy4oaRvQj86m
+Hn7g0t4Iz1/kTCioZkRgj1PULeDKa7u/GKiYgpc1HVjxUUwJsC2JQwjZ1CwRsNPc
+AV6sDLveJn0doggYrxbC/+9oGYSxxUrkvaPzMmuvHa5F50NHuwgcNTL47uVkglIV
++GBQaBaOq9c/8yWbqLVVDbXu1JD6zgzGj6BYiziJEpU7cqYfCOF9qPIYTD9AnZLx
+43LHz33E6dRRCD9yTuMQEHE3uUoFi/G+yQvf/paSddE5FBX2d35jPSKk5um/x30g
+EiFhQKSuHqWIz/cfucwFBQJRHIPj/yN93RqE9u+uJQrSk8KorEg3fVTumBT6bTYh
+QprOvJBrV6UZg7oHnUC9byiyHzHRHktHv2HOPGbywbIZd0TM5R0KWaEQEVg0OAJG
+KgwEeuiEufQZGq29EZTEtyDpDIP9wNiC4pBHe9B1UpE6EdzfoZWlJb6wbUMRtTqw
+RS1ijNAFzvYy2Yuz0/aRi163qek95YwoXeeZn2QbDN+YgFjJZq6pHjNxYTyDthos
+uWfveDk3xJRFp+Ja5WbgEK9FxzdFz34OZKFlre4=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.8/279.pem b/repos/system_upgrade/common/files/prod-certs/9.8/279.pem
new file mode 100644
index 00000000..8757b9b0
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.8/279.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGJTCCBA2gAwIBAgIJALDxRLt/tVAaMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjI0MloXDTQ1MDcw
+ODExMjI0MlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtkZDJiN2Jk
+OS01NmJkLTQ3YzctOWQxOS1jNWYyNmE5YWQwZTJdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBrjCBqzAJBgNVHRMEAjAAMEMGDCsGAQQBkggJAYIXAQQzDDFSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuMBUGDCsG
+AQQBkggJAYIXAgQFDAM5LjgwGQYMKwYBBAGSCAkBghcDBAkMB3BwYzY0bGUwJwYM
+KwYBBAGSCAkBghcEBBcMFXJoZWwtOSxyaGVsLTktcHBjNjRsZTANBgkqhkiG9w0B
+AQsFAAOCAgEAEzlRfJCOY32tUEuMY4Z/cBUoJjM3/IoKY+7tA1xjzNWzL0hPWXcP
+Ifb2iYXG7rINxRkGk56PiFYReGjxlkhkUWRiciAZc6oHfbpw3IyeWnCgSHJXQqTc
+kUEWcUG0CJOlE9OBegvqK+PSydRx2JYhaAFza3zMvIYrdtmOvqhP3/GvuN+nnI4G
+F7GgJkOyalbaSTOWlH2+wxuXeAnlEtUTytRFBEsBywuyi6NIpBL6Oj+QoBFQdCOE
+Ot2Q3v0N4Q5+aiu5UsYPHs97NV8DPkuA0I2qDZr9j/PgxwftbMt14QHG+G9LW3Cz
+DSRIXeKfXGo0GbR7E4ZZBLpp/3LMmH5w/K13skoGtnfWC5x/yoHFRPGmSb1Rrzx2
+kre8EMrXrFFZn4hXu/huQwLTxpg8Hn5pPzDphEksTKQxLeUF0lRj5b3NtqJbQ4he
+NDBAA9cgpifdfaFO8Ax/zppiUeoEizAyst4FFGMDC5u4EFPNQJLjh6vc/2rvP1bk
+KwH2FRxd/jyCcu6bEF4Fv/O/dpddkYtmSPQs3DLX9g9N30uOdOp9TM3W9lt/HFQE
+VpqG7mXTu+f4hx5LFqJXR1pSLzCjVPl03sVi05rjD0Tjkt//pRybpzf/66wMQ1wE
+LWoT869L+7EiL5aSPE3dX7D6IsNzqHvIPKuFAO8T2ZXdiwidAlpXlyA=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.8/362.pem b/repos/system_upgrade/common/files/prod-certs/9.8/362.pem
new file mode 100644
index 00000000..cb1b7c00
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.8/362.pem
@@ -0,0 +1,36 @@
+-----BEGIN CERTIFICATE-----
+MIIGNDCCBBygAwIBAgIJALDxRLt/tVAGMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjIxOVoXDTQ1MDcw
+ODExMjIxOVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFswMGIwYzc0
+MS0xMDQyLTRiZGUtOTYyYy1kZjRjOGVlMmNiNjBdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBvTCBujAJBgNVHRMEAjAAMEgGDCsGAQQBkggJAYJqAQQ4DDZSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuIEJldGEw
+GgYMKwYBBAGSCAkBgmoCBAoMCDkuOCBCZXRhMBkGDCsGAQQBkggJAYJqAwQJDAdw
+cGM2NGxlMCwGDCsGAQQBkggJAYJqBAQcDBpyaGVsLTkscmhlbC05LWJldGEtcHBj
+NjRsZTANBgkqhkiG9w0BAQsFAAOCAgEAvtSvgFTBFe30H/WQcgvDjJe2ucROmr6B
+AW3OF3hvamcwciLMjzMgVyf4dwRDCsKL0q9cRmFXlMR0H36iNQnYkZU1p/sWfCIB
+HtPDPlSr3miELB6FTvod/L4zn+CqbjgN2D3wJJKVfldbQzOTV3kEFed96yB8exTV
+ObdCIzyadhtULog9mtUCe+8IxG8oDzpjAaaYfwkyq6tY3VzbvRS76292yFVQe6rG
+wc9kxhwCfprnvzH7+dTlbMJlvk7PQB7xH1CvSmrIf7C5tfLf/BrsygFtqnq8KLTx
+v644hMGkOvMBdEw5Ry3jMPAlmL+Eyc5751XkN3b5yujXA+T71t1/F0i99DM8XTO8
+WovLAH4KjX+gvHugdsEQs0ujRpxPDgkv9/RFWs0kkBgzhUlFqOGBsi3HyGoqq770
+/e4Fvnj/XxHzs4G3FgiyGnsKLOaKm7eFTwhePsscIckGr/6oq7U0VQF1xOc77I7n
+uPFdSXso5TUUO2UVhqmeq71hhj000wpw4vKQ71rEfgTtMiC7Et93hpk4y4iwuk9w
+mDGTksyr50QNgS9ZNWGLu2JejT3s9RcjROEJ6VOWJxorDWxEY/LXl683FtRXPEM2
+UjHyhx8twhxbIlcD3a8S0R4BfcWCLvhtpdnmOtFGACYMaYd9TAdOG/AZoc/jBOpy
+s2OKIQwKXPY=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.8/363.pem b/repos/system_upgrade/common/files/prod-certs/9.8/363.pem
new file mode 100644
index 00000000..fa09ec7c
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.8/363.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGJjCCBA6gAwIBAgIJALDxRLt/tVAFMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjIxOVoXDTQ1MDcw
+ODExMjIxOVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs0MTFmZDc4
+NC00ZTc4LTQ5YWQtOTNiOC0zNjc2OWY0ZDFlZTVdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBrzCBrDAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYJrAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NCBCZXRhMBoGDCsGAQQBkggJAYJr
+AgQKDAg5LjggQmV0YTAZBgwrBgEEAZIICQGCawMECQwHYWFyY2g2NDAsBgwrBgEE
+AZIICQGCawQEHAwacmhlbC05LHJoZWwtOS1iZXRhLWFhcmNoNjQwDQYJKoZIhvcN
+AQELBQADggIBAFzGv13LCdYf3XkOt3TjwVwY2ldFVMxf2Sx/uNjLFG4I1mhYwZZ9
+0Pyz7J771yMxyhyKb8rc8XMAYxi8lOKfOpp1PpPRVC+NtKo2pdrbZhWy2qKomfyL
+S6jN/hEgg7P6LHGEnvT1Bm9e+BoED3gmOVAmupL4xKv2eRxgXuwuPHrvE6oo63SB
+xtrYIo/pmYgVFgl/d7X5vXqerF4pwLR2DwtK6O84DSyVRf35ghNET09GYm6G+URQ
+eGWi1/h0YCpS9LCXOOOv/J4MM8zr+NLbDyJWxmaG83/zvAQhX65bzJ0bBtb0avJ0
+cgos6LBCDxt+kmipnAMqz5Cb+HVifgdBz1ep3EcoxHwmwBDpHewq0zNtPgMyjzhi
+uwB0inlcCk7JKdjdO36H7RdUYvrM7WEDUKAXtMgOXxr3o6h9v9jZKTfbk5Af91/D
+epoMULy0sErnEuzHAq9sdh3HTmDTHsMNcUpxwC+93VGaCGGrbyM2yQtdLg7dhHQK
+7d9Z9BJEzKReIy+R354M1jQsLGLQ3B8uY476dmP0G0Q01m86rsJ/gjxa8vrJpafO
+t1Up9YexwbVtEtKG7koCz4fwxPv2cauGncuUTdyHJDoS5FpPLMlaWXAfwD0Udbiv
+gZke/PD+39I+UPrxtM+XIXGoJPeZdM5Kv0+3/suvKHGqtkFa8YiK2EHA
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.8/419.pem b/repos/system_upgrade/common/files/prod-certs/9.8/419.pem
new file mode 100644
index 00000000..9ad33fd1
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.8/419.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGFzCCA/+gAwIBAgIJALDxRLt/tVAZMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjI0MloXDTQ1MDcw
+ODExMjI0MlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtiOWY4OWYx
+OC0xNjAzLTRlZDUtYTFmOS0xN2YyMmEwNDdlODNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBoDCBnTAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYMjAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NDAVBgwrBgEEAZIICQGDIwIEBQwD
+OS44MBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCcGDCsGAQQBkggJAYMjBAQX
+DBVyaGVsLTkscmhlbC05LWFhcmNoNjQwDQYJKoZIhvcNAQELBQADggIBAFu1LxAU
+wPUs7/e6mFAzT/JIzq1fQ6kMHxbrf9nS+g9pO0G0z7Imwj85Rk7qOpJpEjKaLVDy
+OJ9fXV20TNidKDvjZuL+F209NLam64MEpy7q40LgOaDYsr9tvGfJJFAQRKRszbrC
+/CGj7l/q09ym1FB3LXgpn4vHR8jD7LloGFEWq19UpRLe+L+1Frh16apy03WNYTnk
+JLALo274I3+kgO69sEemXZF+WD/O+4ySugY/ImbrIlrY1SAeAWTd/kudLMLVLYnN
+JlmB7OPUGE2ZAR0aOTvTeoDBZPz1EGItbJg2hlx4vrhrnGG9kKu+/cDOOAJ7+bgx
+fgc64NOoLTSc+9QIgKKhDt5jShXHfFjpwWbJ08/U29bTZmntcRO0h6F0WBS3ptgW
+hocfN2nDN7pPvivnrUUo+kRY7jKE57im3+mznHHw97em6YCREuvc/NwLIxi4LSiU
+cJgOQ3ltljrFSMKlv4p6evMxlX/QOwgeE+hf/QYjCODoHe/66h5bnKkLGnFdPHxk
+6btQfVePn8UpMUO64OgIcPuGyAEXu1m9N/PFL3S5JUVmfjF9COhmZQEW1x5HBF/u
+mAfwI79++xKH1nmVsgDUjm5HMVZ3qj0y3miAKtC3Ses9Ym6JawpvPSld3xFGF5Mc
+BiYQsv12swSrLy3IzdvJViXRFUFE3dWuVdG1
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.8/433.pem b/repos/system_upgrade/common/files/prod-certs/9.8/433.pem
new file mode 100644
index 00000000..eaf93d90
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.8/433.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGKTCCBBGgAwIBAgIJALDxRLt/tVAHMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjIxOVoXDTQ1MDcw
+ODExMjIxOVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsyYjk5MzMx
+OC0xZjFiLTRlY2UtODJiMS0wODEzYmFjOGFkNGJdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBsjCBrzAJBgNVHRMEAjAAMEEGDCsGAQQBkggJAYMxAQQxDC9SZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIElCTSB6IFN5c3RlbXMgQmV0YTAaBgwrBgEE
+AZIICQGDMQIECgwIOS44IEJldGEwFwYMKwYBBAGSCAkBgzEDBAcMBXMzOTB4MCoG
+DCsGAQQBkggJAYMxBAQaDBhyaGVsLTkscmhlbC05LWJldGEtczM5MHgwDQYJKoZI
+hvcNAQELBQADggIBACjKAgygB5V2mVD3S5snrHjFTpP2x1ODX1GdSgwrXFZ/ehom
+hf1SI9zIsmm+s96ns/Gc5BDrqS2TSil083OqG5YZ98ap+NkQPI/XqIhMRSw2z+QK
+p1i7e/Si6VZyiitnutCrbX/b1RzWCSOAfIU2m41iptvT7HATw0y/6CQpQNrhZ3wR
+TubEIEypmxO5stJt4CO/bqkU3vX+U3FdQdSJWJn3qpvErJ4qNFdwl8zX9WGoaueh
+gNbYrz2EWARVbvedp+ylB1VNdpYXQ+LUI/KwHI4Sxizgg16+IxcFoKJVCYNOH7Wh
+IoMZc7eW91oAzm57yS36RF/Z50S1x8JHHg2hgev+2czDG9dgRTsLvvAXqsnrUHuD
+lRPMDjgaSooUWJmKwIXQ7yJDAPHoxZAXWtMEc1kNLZGEPVDQbT73j4eDOxzZDZrr
+agWGoWJ3kuY9AVvv/RTi6z5VWs7ySJER7RxQcGhH8TctysW7gIMjHfgnTGN2bW5U
+mV5Ds+/i9AiA9/V+rWWsv8riz+MfEa23/J/EvOdBBCd5MuzsqkXn2gde8WP3cjes
+sgqUKQzOy7Rqr5LHT1IQl5SkyYr1QV1InghJ8dh7BjRLvWUaw0uqPRvxX1c6K1l/
+NFsCie9RwuhdE8OBwHuBjB28k3Zs9SPaVzYRe70qwi0epbCrhwcGOkTNfCcz
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.8/479.pem b/repos/system_upgrade/common/files/prod-certs/9.8/479.pem
new file mode 100644
index 00000000..a0ff7061
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.8/479.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGFTCCA/2gAwIBAgIJALDxRLt/tVAcMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjI0MloXDTQ1MDcw
+ODExMjI0MlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs4ZmM5YTM4
+Ni0wYzkzLTQ0MjctYTlhOC1kMTdkMzAwZGJmZDZdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBnjCBmzAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD
+OS44MBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM
+FHJoZWwtOSxyaGVsLTkteDg2XzY0MA0GCSqGSIb3DQEBCwUAA4ICAQC6vt8EEay9
+CjdRGHmseXosuo03ub+bUt61uYVpf15IoVUV+7XT6ZHff8cPZbKBjoRbuWNILvR2
+rCdl11bm3prCxfLNJh5u8hqNXv+iIB4k4qhCSrhPFQEf3HNJma2J67U/8Mt7oM4B
+RqpZ1CCw9VTHQSB+iraKzE+BFr9kNlQfZu75Clsgv5dZaT1WK5hKiuQy8kc2CBKy
+CuiL6i0PK2tzNtNH4ON/tMU3AM+edIiUFV6C376kewwO/omArY6FYmJVcPLKWh3h
+TSUt81CmaHmyW+XKJ2pM3f2hfHdq1Lf7lInjgw5Rolyhm/Xqrrj8j19SrUSru/tw
+WcmLMhhEyU2/jwfipbbzB9AC3tIXZjKv8539e4omsBmHwHQno1NAjq0+alGxr9pK
+AZywsuMhiGyznbYdIANGZyMUN3sULIsG649UcEsmzM5q9g1TVyuJH9m+OJSK2PGk
+UnorgDlGs1AiJhsqZuW8zxzy3nfQmniO/o/6wZbqlKiyLjQY7Fxa4Rb0hXbBJkZ7
+TkHkjlAObUEkcjg0jUHb8sFRQ7hXx+Tk4tGk549crSZCCg951SITV5By9bAxm7fu
+DHGXgY7tOwHII51sfBfryuvIKs+JmzF9Evzssf3kLBSXylyS6pr/8dKN6sF7Pw4M
+Fe/gvJ3J/pARSVP41wR6tI0zYvqkO/ULQg==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.8/486.pem b/repos/system_upgrade/common/files/prod-certs/9.8/486.pem
new file mode 100644
index 00000000..84461ed8
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.8/486.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGJDCCBAygAwIBAgIJALDxRLt/tVAIMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjIxOVoXDTQ1MDcw
+ODExMjIxOVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtkZjUwNWIw
+ZS02Y2E4LTRkODQtOTY0Mi0wNGRlYTg5NjY0MzNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBrTCBqjAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYNmAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NCBCZXRhMBoGDCsGAQQBkggJAYNm
+AgQKDAg5LjggQmV0YTAYBgwrBgEEAZIICQGDZgMECAwGeDg2XzY0MCsGDCsGAQQB
+kggJAYNmBAQbDBlyaGVsLTkscmhlbC05LWJldGEteDg2XzY0MA0GCSqGSIb3DQEB
+CwUAA4ICAQCNeDKnDjuvHBE0Fa0cMAFM96OWn3b3wTtAx7i9BCaCe4NsRTjEMJTw
+jO4KwkfzCQWpOJuFHnOKmVZBXVZZ8aOLCfYTyQZ6nZ1UsiekL7l9sdIFCHrbgKkL
+zBFe+8cVtUMwb9f7fORPp960ImtYpvt8ERihHf2LvIBUI+BGRz/D/3NW1ehVggGI
+0VCe270sgLSl3y8lR5BrNXtKbigeI4mNMasndf/TDMFeCk5OH4FJ+DyiY0zma2IT
+x0PwQmEeI4my1KTmQSUDgIOmHtKbq1tCR2wMIh/ju/HOrvVeOnzEsBgQTiTh92qJ
+U7/++7Ayqw/6PfPCd+gOMqIPS1+Aef7w54I+zWyYeHQcXXCxcffPwO7sZV3zQJyh
+clfLJv13Oe6G5mB7ZCH+tB4LdaVBURz9G0MkLwXGfTWfnU5N61Kne8hjOriSBWP4
+2FZEP+2BQ/1Z7aIssbQKegdRvvMd/KqJjIeiFtrz9AVSodEUZgJlxiZ9KDSysG18
+hmZcPuk2mc9nwWQ9gHZWzatGs+uONS92QqFvXxlY7TWMDIdlscubcjV/bbDHm69P
++pqGilb3zJz8msBwFpdO+h4l8eUMMMsLzdUdH499q/enZrH3VSdmNtWtoVm9R7rp
+khFJ4DdORE9/P5lfqAObt8KNO72BQ2/KcK0FZ1lLxKWG/4dZ5oAdGw==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.8/72.pem b/repos/system_upgrade/common/files/prod-certs/9.8/72.pem
new file mode 100644
index 00000000..724e0a62
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.8/72.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGFjCCA/6gAwIBAgIJALDxRLt/tVAbMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI1MDcwODExMjI0MloXDTQ1MDcw
+ODExMjI0MlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtlYjMyYzY1
+Ny00OGY0LTRiZjUtYmY3Yy1mYjMwNWU1YjgyMDFdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBnzCBnDAJBgNVHRMEAjAAMDsGCysGAQQBkggJAUgBBCwMKlJlZCBIYXQg
+RW50ZXJwcmlzZSBMaW51eCBmb3IgSUJNIHogU3lzdGVtczAUBgsrBgEEAZIICQFI
+AgQFDAM5LjgwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJAYLKwYBBAGSCAkBSAQE
+FQwTcmhlbC05LHJoZWwtOS1zMzkweDANBgkqhkiG9w0BAQsFAAOCAgEADCKqieee
+Hvj06J4U23K/Wr5zn+d6AtA2vfhpicAYh5jzYqLAJHmB9T5Ql6pFqJ9lMLI2EGSg
+jhLD+lzDP9A2vk+rFWK0BEGnqlPrQtM5atTBeihRVRci1ymspPBrLwu+Zu3jromg
+I14r86EZwSXpPZLaNUsOjOi4Euc50Q3wsUJGvXCpoU4SgnnAIER3lq9HSNFDZkmp
+AjW+VHAhPIOTujm9PhCFIn5bB0jsygHHYyqV7KvQSmxoPTaLMxFpva+Xy0QNKlwg
+NXKw/JYAHX1yaskeZviqwZzhKpnvycyEgWF9f7cBD6O8Adxx9qkqXqer7YsQ/wgR
+cHjGCAKbV2OTIgyQEDie1gdPLdSUPzrbzJ9C1I85tSJH3ujdACiGG/aHPtspLb3Z
+M6265fbXDbXOqjFuP/njDUqal3WgUgw34w4Xi2JLCcqLvHLQhTmZSKiD0SJbRDL1
+smcle/yKhTc4+7zJqQV8faR9LVEAkaLzjG3ZRiTUDq4RASr9tN/A0AfXqggG9nGL
+06m6QcXRxHM0OVgLHLksKsj3rG3VX0v3aQm353GW1sxxX0hqFnoOnGWA410GUG9S
+rg897hshyti1pn045uhhFjbpxYRKu/JY9VNNyRW0KqL1hyz4TY7OQxJxGDAPX7uJ
+7NGSWW9EsYMZNMxEee6br9lWVwGWnc8DWhA=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/upgrade_paths.json b/repos/system_upgrade/common/files/upgrade_paths.json
index 22e0fd7d..61bb73c0 100644
--- a/repos/system_upgrade/common/files/upgrade_paths.json
+++ b/repos/system_upgrade/common/files/upgrade_paths.json
@@ -2,9 +2,10 @@
"rhel": {
"default": {
"7.9": ["8.10"],
- "8.10": ["9.4", "9.6", "9.7"],
+ "8.10": ["9.4", "9.6", "9.7", "9.8"],
"9.6": ["10.0"],
"9.7": ["10.1"],
+ "9.8": ["10.2"],
"7": ["8.10"],
"8": ["9.4", "9.6"],
"9": ["10.0"]
@@ -15,6 +16,7 @@
"8.10": ["9.6", "9.4"],
"8": ["9.6", "9.4"],
"9.6": ["10.0"],
+ "9.8": ["10.2"],
"9": ["10.0"]
}
},
@@ -25,13 +27,13 @@
},
"_virtual_versions": {
"8": "8.10",
- "9": "9.7",
- "10": "10.1"
+ "9": "9.8",
+ "10": "10.2"
}
},
"almalinux": {
"default": {
- "8.10": ["9.0", "9.1", "9.2", "9.3", "9.4", "9.5", "9.6", "9.7"],
+ "8.10": ["9.0", "9.1", "9.2", "9.3", "9.4", "9.5", "9.6", "9.7","9.8"],
"9.7": ["10.0", "10.1"]
}
}
--
2.51.1

View File

@ -1,48 +0,0 @@
From b41d5b386f3369cf714cff9f3277863f6f601bc1 Mon Sep 17 00:00:00 2001
From: denli <denli@redhat.com>
Date: Mon, 6 Oct 2025 12:25:07 -0400
Subject: [PATCH 22/55] Add kpatch actor to el9toel10
---
.../actors/kernel/checkkpatch/actor.py | 29 +++++++++++++++++++
1 file changed, 29 insertions(+)
create mode 100644 repos/system_upgrade/el9toel10/actors/kernel/checkkpatch/actor.py
diff --git a/repos/system_upgrade/el9toel10/actors/kernel/checkkpatch/actor.py b/repos/system_upgrade/el9toel10/actors/kernel/checkkpatch/actor.py
new file mode 100644
index 00000000..e7f6179c
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/kernel/checkkpatch/actor.py
@@ -0,0 +1,29 @@
+from leapp.actors import Actor
+from leapp.libraries.common.rpms import has_package
+from leapp.libraries.stdlib import api
+from leapp.models import CopyFile, DistributionSignedRPM, TargetUserSpacePreupgradeTasks
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
+
+PLUGIN_PKGNAME = "kpatch-dnf"
+CONFIG_PATH = "/etc/dnf/plugins/kpatch.conf"
+
+
+class CheckKpatch(Actor):
+ """
+ Carry over kpatch-dnf and it's config into the container
+
+ Check is kpatch-dnf plugin is installed and if it is, install it and copy
+ over the config file so that the plugin can make a decision on whether any
+ kpatch-patch packages need to be installed during in-place upgrade.
+ """
+
+ name = 'check_kpatch'
+ consumes = (DistributionSignedRPM,)
+ produces = (TargetUserSpacePreupgradeTasks,)
+ tags = (IPUWorkflowTag, ChecksPhaseTag)
+
+ def process(self):
+ if has_package(DistributionSignedRPM, PLUGIN_PKGNAME):
+ api.produce(TargetUserSpacePreupgradeTasks(
+ install_rpms=[PLUGIN_PKGNAME],
+ copy_files=[CopyFile(src=CONFIG_PATH)]))
--
2.51.1

View File

@ -0,0 +1,172 @@
From 5e6d176ab685f2e85ac1aea9533b04d46f25e9b7 Mon Sep 17 00:00:00 2001
From: tomasfratrik <tomasfratrik8@gmail.com>
Date: Tue, 18 Jun 2024 10:22:35 +0200
Subject: [PATCH 22/40] Fix IPU being blocked by resource limitations
First resource limit is maximum number of open file descriptors limit,
second one being limit for maximum writable file size. Plus add unit
tests.
Resolves: RHEL-26459 and RHEL-16881
---
commands/command_utils.py | 38 ++++++++++++++++++
commands/preupgrade/__init__.py | 2 +
commands/tests/test_upgrade_paths.py | 60 ++++++++++++++++++++++++++++
commands/upgrade/__init__.py | 3 ++
4 files changed, 103 insertions(+)
diff --git a/commands/command_utils.py b/commands/command_utils.py
index 4f6f99eb..2810a542 100644
--- a/commands/command_utils.py
+++ b/commands/command_utils.py
@@ -1,6 +1,7 @@
import json
import os
import re
+import resource
from leapp.exceptions import CommandError
from leapp.utils import path
@@ -140,3 +141,40 @@ def vet_upgrade_path(args):
flavor=flavor,
choices=','.join(supported_target_versions)))
return (target_release, flavor)
+
+
+def set_resource_limits():
+ """
+ Set resource limits for the maximum number of open file descriptors and the maximum writable file size.
+
+ :raises: `CommandError` if the resource limits cannot be set
+ """
+
+ def set_resource_limit(resource_type, soft, hard):
+ rtype_string = (
+ 'open file descriptors' if resource_type == resource.RLIMIT_NOFILE
+ else 'writable file size' if resource_type == resource.RLIMIT_FSIZE
+ else 'unknown resource'
+ )
+ try:
+ resource.setrlimit(resource_type, (soft, hard))
+ except ValueError as err:
+ raise CommandError(
+ 'Failure occurred while attempting to set soft limit higher than the hard limit. '
+ 'Resource type: {}, error: {}'.format(rtype_string, err)
+ )
+ except OSError as err:
+ raise CommandError(
+ 'Failed to set resource limit. Resource type: {}, error: {}'.format(rtype_string, err)
+ )
+
+ soft_nofile, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
+ soft_fsize, _ = resource.getrlimit(resource.RLIMIT_FSIZE)
+ nofile_limit = 1024*16
+ fsize_limit = resource.RLIM_INFINITY
+
+ if soft_nofile < nofile_limit:
+ set_resource_limit(resource.RLIMIT_NOFILE, nofile_limit, nofile_limit)
+
+ if soft_fsize != fsize_limit:
+ set_resource_limit(resource.RLIMIT_FSIZE, fsize_limit, fsize_limit)
diff --git a/commands/preupgrade/__init__.py b/commands/preupgrade/__init__.py
index 5a89069f..a9fa40e0 100644
--- a/commands/preupgrade/__init__.py
+++ b/commands/preupgrade/__init__.py
@@ -59,6 +59,8 @@ def preupgrade(args, breadcrumbs):
except LeappError as exc:
raise CommandError(exc.message)
+ command_utils.set_resource_limits()
+
workflow = repositories.lookup_workflow('IPUWorkflow')()
util.warn_if_unsupported(configuration)
util.process_whitelist_experimental(repositories, workflow, configuration, logger)
diff --git a/commands/tests/test_upgrade_paths.py b/commands/tests/test_upgrade_paths.py
index 53f081a5..f1312f66 100644
--- a/commands/tests/test_upgrade_paths.py
+++ b/commands/tests/test_upgrade_paths.py
@@ -1,3 +1,5 @@
+import resource
+
import mock
import pytest
@@ -50,3 +52,61 @@ def test_vet_upgrade_path(mock_open, monkeypatch):
monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.0')
args = mock.Mock(target='1.2')
assert command_utils.vet_upgrade_path(args) == ('9.0', 'default')
+
+
+def _mock_getrlimit_factory(nofile_limits=(1024, 4096), fsize_limits=(1024, 4096)):
+ """
+ Factory function to create a mock `getrlimit` function with configurable return values.
+ The default param values are lower than the expected values.
+
+ :param nofile_limits: Tuple representing (soft, hard) limits for `RLIMIT_NOFILE`
+ :param fsize_limits: Tuple representing (soft, hard) limits for `RLIMIT_FSIZE`
+ :return: A mock `getrlimit` function
+ """
+ def mock_getrlimit(resource_type):
+ if resource_type == resource.RLIMIT_NOFILE:
+ return nofile_limits
+ if resource_type == resource.RLIMIT_FSIZE:
+ return fsize_limits
+ return (0, 0)
+
+ return mock_getrlimit
+
+
+@pytest.mark.parametrize("nofile_limits, fsize_limits, expected_calls", [
+ # Case where both limits need to be increased
+ ((1024, 4096), (1024, 4096), [
+ (resource.RLIMIT_NOFILE, (1024*16, 1024*16)),
+ (resource.RLIMIT_FSIZE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
+ ]),
+ # Case where neither limit needs to be changed
+ ((1024*16, 1024*16), (resource.RLIM_INFINITY, resource.RLIM_INFINITY), [])
+])
+def test_set_resource_limits_increase(monkeypatch, nofile_limits, fsize_limits, expected_calls):
+ setrlimit_called = []
+
+ def mock_setrlimit(resource_type, limits):
+ setrlimit_called.append((resource_type, limits))
+
+ monkeypatch.setattr(resource, "getrlimit", _mock_getrlimit_factory(nofile_limits, fsize_limits))
+ monkeypatch.setattr(resource, "setrlimit", mock_setrlimit)
+
+ command_utils.set_resource_limits()
+
+ assert setrlimit_called == expected_calls
+
+
+@pytest.mark.parametrize("errortype, expected_message", [
+ (OSError, "Failed to set resource limit"),
+ (ValueError, "Failure occurred while attempting to set soft limit higher than the hard limit")
+])
+def test_set_resource_limits_exceptions(monkeypatch, errortype, expected_message):
+ monkeypatch.setattr(resource, "getrlimit", _mock_getrlimit_factory())
+
+ def mock_setrlimit(*args, **kwargs):
+ raise errortype("mocked error")
+
+ monkeypatch.setattr(resource, "setrlimit", mock_setrlimit)
+
+ with pytest.raises(CommandError, match=expected_message):
+ command_utils.set_resource_limits()
diff --git a/commands/upgrade/__init__.py b/commands/upgrade/__init__.py
index 1e15b59c..c7487fde 100644
--- a/commands/upgrade/__init__.py
+++ b/commands/upgrade/__init__.py
@@ -89,6 +89,9 @@ def upgrade(args, breadcrumbs):
repositories = util.load_repositories()
except LeappError as exc:
raise CommandError(exc.message)
+
+ command_utils.set_resource_limits()
+
workflow = repositories.lookup_workflow('IPUWorkflow')(auto_reboot=args.reboot)
util.process_whitelist_experimental(repositories, workflow, configuration, logger)
util.warn_if_unsupported(configuration)
--
2.47.0

View File

@ -0,0 +1,675 @@
From e1bdf2c02dd193cdd7a2da95e2a3cfa5e6e1e8b3 Mon Sep 17 00:00:00 2001
From: mhecko <mhecko@redhat.com>
Date: Mon, 29 Apr 2024 11:16:46 +0200
Subject: [PATCH 23/40] feature: add possibility to use net.naming-scheme
Leapp writes .link files to prevent interfaces being renamed
after booting to post-upgrade system. This patch adds a less
error-prone approach that uses net.naming-scheme kernel param.
The naming-scheme tells udev what hardware properties to use
when composing a device name. Moreover, possible values of this
parameter are coarse-grained "profiles", that tell udev to
behave as if it did on RHEL8.0.
The functionality is enabled by setting LEAPP_USE_NET_NAMING_SCHEME
environmental variable to 1. If the feature is enabled, the .link
file generation is disabled. A kernel parameter `net.naming-scheme=`
is added to the upgrade boot entry and the post-upgrade entry.
The value of the parameter will be `rhel-<source_major>.0`. Note
that the minor source version is *not used*. Using also source major
version instead of 0 causes the device names to change slightly,
so we use 0. Moreover, an extra RPM named `rhel-net-naming-sysattrs`
is installed to the target system and target userspace container.
The RPM provides definitions of the "profiles" for net.naming-scheme.
The feature is available only for 8>9 and higher. Attempting to
upgrade 7>8 with LEAPP_USE_NET_NAMING_SCHEME=1 will ignore
the value of LEAPP_USE_NET_NAMING_SCHEME.
Add a possibility to use the net.naming-scheme cmdline argument
to make immutable network interface names during the upgrade.
The feature can be used only for 8>9 upgrades and higher.
To enable the feature, use LEAPP_USE_NET_NAMING_SCHEME=1.
Jira-ref: RHEL-23473
---
.../actors/addupgradebootentry/actor.py | 10 +-
.../libraries/addupgradebootentry.py | 78 ++++++++++-----
.../tests/unit_test_addupgradebootentry.py | 47 ++++-----
.../actors/kernelcmdlineconfig/actor.py | 16 +++-
.../libraries/kernelcmdlineconfig.py | 12 ++-
.../libraries/persistentnetnamesconfig.py | 5 +-
.../common/models/kernelcmdlineargs.py | 21 ++++
.../actors/emit_net_naming_scheme/actor.py | 28 ++++++
.../libraries/emit_net_naming.py | 63 ++++++++++++
.../tests/test_emit_net_naming_scheme.py | 95 +++++++++++++++++++
10 files changed, 318 insertions(+), 57 deletions(-)
create mode 100644 repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py
create mode 100644 repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
create mode 100644 repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/actor.py b/repos/system_upgrade/common/actors/addupgradebootentry/actor.py
index f400ebf8..e4ecf39e 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/actor.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/actor.py
@@ -8,11 +8,13 @@ from leapp.models import (
FirmwareFacts,
GrubConfigError,
KernelCmdline,
+ LateTargetKernelCmdlineArgTasks,
LiveImagePreparationInfo,
LiveModeArtifacts,
LiveModeConfig,
TargetKernelCmdlineArgTasks,
- TransactionDryRun
+ TransactionDryRun,
+ UpgradeKernelCmdlineArgTasks
)
from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
@@ -33,9 +35,11 @@ class AddUpgradeBootEntry(Actor):
LiveModeArtifacts,
LiveModeConfig,
KernelCmdline,
- TransactionDryRun
+ TransactionDryRun,
+ TargetKernelCmdlineArgTasks,
+ UpgradeKernelCmdlineArgTasks
)
- produces = (TargetKernelCmdlineArgTasks,)
+ produces = (LateTargetKernelCmdlineArgTasks,)
tags = (IPUWorkflowTag, InterimPreparationPhaseTag)
def process(self):
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
index 553ffc35..b236e39b 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
@@ -9,14 +9,16 @@ from leapp.models import (
BootContent,
KernelCmdline,
KernelCmdlineArg,
+ LateTargetKernelCmdlineArgTasks,
LiveImagePreparationInfo,
LiveModeArtifacts,
LiveModeConfig,
- TargetKernelCmdlineArgTasks
+ TargetKernelCmdlineArgTasks,
+ UpgradeKernelCmdlineArgTasks
)
-def collect_boot_args(livemode_enabled):
+def collect_upgrade_kernel_args(livemode_enabled):
args = {
'enforcing': '0',
'rd.plymouth': '0',
@@ -34,7 +36,10 @@ def collect_boot_args(livemode_enabled):
livemode_args = construct_cmdline_args_for_livemode()
args.update(livemode_args)
- return args
+ upgrade_kernel_args = collect_set_of_kernel_args_from_msgs(UpgradeKernelCmdlineArgTasks, 'to_add')
+ args.update(upgrade_kernel_args)
+
+ return set(args.items())
def collect_undesired_args(livemode_enabled):
@@ -43,11 +48,11 @@ def collect_undesired_args(livemode_enabled):
args = dict(zip(('ro', 'rhgb', 'quiet'), itertools.repeat(None)))
args['rd.lvm.lv'] = _get_rdlvm_arg_values()
- return args
+ return set(args.items())
-def format_grubby_args_from_args_dict(args_dict):
- """ Format the given args dictionary in a form required by grubby's --args. """
+def format_grubby_args_from_args_set(args_dict):
+ """ Format the given args set in a form required by grubby's --args. """
def fmt_single_arg(arg_pair):
key, value = arg_pair
@@ -65,7 +70,7 @@ def format_grubby_args_from_args_dict(args_dict):
else:
yield (key, value) # Just a single (key, value) pair
- arg_sequence = itertools.chain(*(flatten_arguments(arg_pair) for arg_pair in args_dict.items()))
+ arg_sequence = itertools.chain(*(flatten_arguments(arg_pair) for arg_pair in args_dict))
# Sorting should be fine as only values can be None, but we cannot have a (key, None) and (key, value) in
# the dictionary at the same time.
@@ -78,7 +83,7 @@ def format_grubby_args_from_args_dict(args_dict):
def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to_add, args_to_remove):
boot_entry_modification_commands = []
- args_to_add_str = format_grubby_args_from_args_dict(args_to_add)
+ args_to_add_str = format_grubby_args_from_args_set(args_to_add)
create_entry_cmd = [
'/usr/sbin/grubby',
@@ -93,7 +98,7 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to
# We need to update root= param separately, since we cannot do it during --add-kernel with --copy-default.
# This is likely a bug in grubby.
- root_param_value = args_to_add.get('root', None)
+ root_param_value = dict(args_to_add).get('root', None)
if root_param_value:
enforce_root_param_for_the_entry_cmd = [
'/usr/sbin/grubby',
@@ -103,7 +108,7 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to
boot_entry_modification_commands.append(enforce_root_param_for_the_entry_cmd)
if args_to_remove:
- args_to_remove_str = format_grubby_args_from_args_dict(args_to_remove)
+ args_to_remove_str = format_grubby_args_from_args_set(args_to_remove)
remove_undesired_args_cmd = [
'/usr/sbin/grubby',
'--update-kernel', kernel_path,
@@ -113,18 +118,55 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to
return boot_entry_modification_commands
+def collect_set_of_kernel_args_from_msgs(msg_type, arg_list_field_name):
+ cmdline_modification_msgs = api.consume(msg_type)
+ lists_of_args_to_add = (getattr(msg, arg_list_field_name, []) for msg in cmdline_modification_msgs)
+ args = itertools.chain(*lists_of_args_to_add)
+ return set((arg.key, arg.value) for arg in args)
+
+
+def emit_removal_of_args_meant_only_for_upgrade_kernel(added_upgrade_kernel_args):
+ """
+ Emit message requesting removal of upgrade kernel args that should not be on the target kernel.
+
+ Target kernel args are created by copying the args of the booted (upgrade) kernel. Therefore,
+ we need to explicitly modify the target kernel cmdline, removing what should not have been copied.
+ """
+ target_args_to_add = collect_set_of_kernel_args_from_msgs(TargetKernelCmdlineArgTasks, 'to_add')
+ actual_kernel_args = collect_set_of_kernel_args_from_msgs(KernelCmdline, 'parameters')
+
+ # actual_kernel_args should not be changed during upgrade, unless explicitly removed by
+ # TargetKernelCmdlineArgTasks.to_remove, but that is handled by some other upgrade component. We just want
+ # to make sure we remove what was not on the source system and that we don't overwrite args to be added to target.
+ args_not_present_on_target_kernel = added_upgrade_kernel_args - actual_kernel_args - target_args_to_add
+
+ # We remove only what we've added and what will not be already removed by someone else.
+ args_to_remove = [KernelCmdlineArg(key=arg[0], value=arg[1]) for arg in args_not_present_on_target_kernel]
+
+ if args_to_remove:
+ msg = ('Following upgrade kernel args were added, but they should not be present '
+ 'on target cmdline: `%s`, requesting removal.')
+ api.current_logger().info(msg, args_not_present_on_target_kernel)
+ args_sorted = sorted(args_to_remove, key=lambda arg: arg.key)
+ api.produce(LateTargetKernelCmdlineArgTasks(to_remove=args_sorted))
+
+
def add_boot_entry(configs=None):
kernel_dst_path, initram_dst_path = get_boot_file_paths()
+
_remove_old_upgrade_boot_entry(kernel_dst_path, configs=configs)
livemode_enabled = next(api.consume(LiveImagePreparationInfo), None) is not None
- cmdline_args = collect_boot_args(livemode_enabled)
+ # We have to keep the desired and unwanted args separate and modify cmline in two separate grubby calls. Merging
+ # these sets and trying to execute only a single command would leave the unwanted cmdline args present if they
+ # are present on the original system.
+ added_cmdline_args = collect_upgrade_kernel_args(livemode_enabled)
undesired_cmdline_args = collect_undesired_args(livemode_enabled)
commands_to_run = figure_out_commands_needed_to_add_entry(kernel_dst_path,
initram_dst_path,
- args_to_add=cmdline_args,
+ args_to_add=added_cmdline_args,
args_to_remove=undesired_cmdline_args)
def run_commands_adding_entry(extra_command_suffix=None):
@@ -146,16 +188,8 @@ def add_boot_entry(configs=None):
# See https://bugzilla.redhat.com/show_bug.cgi?id=1764306
run(['/usr/sbin/zipl'])
- if 'debug' in cmdline_args:
- # The kernelopts for target kernel are generated based on the cmdline used in the upgrade initramfs,
- # therefore, if we enabled debug above, and the original system did not have the debug kernelopt, we
- # need to explicitly remove it from the target os boot entry.
- # NOTE(mhecko): This will also unconditionally remove debug kernelopt if the source system used it.
- api.produce(TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]))
-
- # NOTE(mmatuska): This will remove the option even if the source system had it set.
- # However enforcing=0 shouldn't be set persistently anyway.
- api.produce(TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')]))
+ effective_upgrade_kernel_args = added_cmdline_args - undesired_cmdline_args
+ emit_removal_of_args_meant_only_for_upgrade_kernel(effective_upgrade_kernel_args)
except CalledProcessError as e:
raise StopActorExecutionError(
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
index c4f5232b..2f58ba9e 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
@@ -12,6 +12,7 @@ from leapp.models import (
BootContent,
KernelCmdline,
KernelCmdlineArg,
+ LateTargetKernelCmdlineArgTasks,
LiveModeArtifacts,
LiveModeConfig,
TargetKernelCmdlineArgTasks
@@ -82,8 +83,10 @@ def test_add_boot_entry(monkeypatch, run_args, arch):
assert addupgradebootentry.run.args[0] == run_args.args_remove
assert addupgradebootentry.run.args[1] == run_args.args_add
assert api.produce.model_instances == [
- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]),
- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')])
+ LateTargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug'),
+ KernelCmdlineArg(key='enforcing', value='0'),
+ KernelCmdlineArg(key='plymouth.enable', value='0'),
+ KernelCmdlineArg(key='rd.plymouth', value='0')])
]
if run_args.args_zipl:
@@ -103,16 +106,16 @@ def test_debug_kernelopt_removal_task_production(monkeypatch, is_leapp_invoked_w
CurrentActorMocked(envars={'LEAPP_DEBUG': str(int(is_leapp_invoked_with_debug))}))
addupgradebootentry.add_boot_entry()
+ assert len(api.produce.model_instances) == 1
- expected_produced_messages = []
- if is_leapp_invoked_with_debug:
- expected_produced_messages = [TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')])]
-
- expected_produced_messages.append(
- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')])
- )
+ produced_msg = api.produce.model_instances[0]
+ assert isinstance(produced_msg, LateTargetKernelCmdlineArgTasks)
- assert api.produce.model_instances == expected_produced_messages
+ debug_kernel_cmline_arg = KernelCmdlineArg(key='debug')
+ if is_leapp_invoked_with_debug:
+ assert debug_kernel_cmline_arg in produced_msg.to_remove
+ else:
+ assert debug_kernel_cmline_arg not in produced_msg.to_remove
def test_add_boot_entry_configs(monkeypatch):
@@ -132,8 +135,10 @@ def test_add_boot_entry_configs(monkeypatch):
assert addupgradebootentry.run.args[2] == run_args_add + ['-c', CONFIGS[0]]
assert addupgradebootentry.run.args[3] == run_args_add + ['-c', CONFIGS[1]]
assert api.produce.model_instances == [
- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]),
- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')]),
+ LateTargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug'),
+ KernelCmdlineArg(key='enforcing', value='0'),
+ KernelCmdlineArg(key='plymouth.enable', value='0'),
+ KernelCmdlineArg(key='rd.plymouth', value='0')])
]
@@ -183,7 +188,7 @@ def test_fix_grub_config_error(monkeypatch, error_type, test_file_name):
(False, False),
)
)
-def test_collect_boot_args(monkeypatch, is_debug_enabled, network_enablement_type):
+def test_collect_upgrade_kernel_args(monkeypatch, is_debug_enabled, network_enablement_type):
env_vars = {'LEAPP_DEBUG': str(int(is_debug_enabled))}
if network_enablement_type:
env_vars['LEAPP_DEVEL_INITRAM_NETWORK'] = network_enablement_type
@@ -192,7 +197,8 @@ def test_collect_boot_args(monkeypatch, is_debug_enabled, network_enablement_typ
monkeypatch.setattr(addupgradebootentry, 'construct_cmdline_args_for_livemode',
lambda *args: {'livemodearg': 'value'})
- args = addupgradebootentry.collect_boot_args(livemode_enabled=True)
+ arg_set = addupgradebootentry.collect_upgrade_kernel_args(livemode_enabled=True)
+ args = dict(arg_set)
assert args['enforcing'] == '0'
assert args['rd.plymouth'] == '0'
@@ -320,16 +326,3 @@ def test_get_device_uuid(monkeypatch):
uuid = addupgradebootentry._get_device_uuid(path)
assert uuid == 'MY_UUID1'
-
-
-@pytest.mark.parametrize(
- ('args', 'expected_result'),
- (
- ([('argA', 'val'), ('argB', 'valB'), ('argC', None), ], 'argA=val argB=valB argC'),
- ([('argA', ('val1', 'val2'))], 'argA=val1 argA=val2')
- )
-)
-def test_format_grubby_args_from_args_dict(args, expected_result):
- actual_result = addupgradebootentry.format_grubby_args_from_args_dict(dict(args))
-
- assert actual_result == expected_result
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
index 3585a14e..6d5f39dd 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
@@ -3,7 +3,13 @@ import os
from leapp.actors import Actor
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import kernelcmdlineconfig
-from leapp.models import FirmwareFacts, InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks
+from leapp.models import (
+ FirmwareFacts,
+ InstalledTargetKernelInfo,
+ KernelCmdlineArg,
+ LateTargetKernelCmdlineArgTasks,
+ TargetKernelCmdlineArgTasks
+)
from leapp.reporting import Report
from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag
@@ -14,7 +20,13 @@ class KernelCmdlineConfig(Actor):
"""
name = 'kernelcmdlineconfig'
- consumes = (KernelCmdlineArg, InstalledTargetKernelInfo, FirmwareFacts, TargetKernelCmdlineArgTasks)
+ consumes = (
+ KernelCmdlineArg,
+ InstalledTargetKernelInfo,
+ FirmwareFacts,
+ LateTargetKernelCmdlineArgTasks,
+ TargetKernelCmdlineArgTasks
+ )
produces = (Report,)
tags = (FinalizationPhaseTag, IPUWorkflowTag)
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
index 19c50f3c..98b8b95b 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
@@ -1,3 +1,4 @@
+import itertools
import re
from leapp import reporting
@@ -5,7 +6,12 @@ from leapp.exceptions import StopActorExecutionError
from leapp.libraries import stdlib
from leapp.libraries.common.config import architecture, version
from leapp.libraries.stdlib import api
-from leapp.models import InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks
+from leapp.models import (
+ InstalledTargetKernelInfo,
+ KernelCmdlineArg,
+ LateTargetKernelCmdlineArgTasks,
+ TargetKernelCmdlineArgTasks
+)
KERNEL_CMDLINE_FILE = "/etc/kernel/cmdline"
@@ -71,7 +77,9 @@ def retrieve_arguments_to_modify():
kernelargs_msgs_to_add = list(api.consume(KernelCmdlineArg))
kernelargs_msgs_to_remove = []
- for target_kernel_arg_task in api.consume(TargetKernelCmdlineArgTasks):
+ modification_msgs = itertools.chain(api.consume(TargetKernelCmdlineArgTasks),
+ api.consume(LateTargetKernelCmdlineArgTasks))
+ for target_kernel_arg_task in modification_msgs:
kernelargs_msgs_to_add.extend(target_kernel_arg_task.to_add)
kernelargs_msgs_to_remove.extend(target_kernel_arg_task.to_remove)
diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
index dc5196ea..2f12742a 100644
--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
+++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
@@ -2,7 +2,7 @@ import errno
import os
import re
-from leapp.libraries.common.config import get_env
+from leapp.libraries.common.config import get_env, version
from leapp.libraries.stdlib import api
from leapp.models import (
InitrdIncludes,
@@ -39,6 +39,9 @@ def generate_link_file(interface):
@suppress_deprecation(InitrdIncludes)
def process():
+ if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() != '8':
+ api.current_logger().info('Skipping generation of .link files renaming NICs as LEAPP_USE_NET_NAMING_SCHEMES=1')
+ return
if get_env('LEAPP_NO_NETWORK_RENAMING', '0') == '1':
api.current_logger().info(
diff --git a/repos/system_upgrade/common/models/kernelcmdlineargs.py b/repos/system_upgrade/common/models/kernelcmdlineargs.py
index e3568a0a..fafd2853 100644
--- a/repos/system_upgrade/common/models/kernelcmdlineargs.py
+++ b/repos/system_upgrade/common/models/kernelcmdlineargs.py
@@ -24,6 +24,27 @@ class TargetKernelCmdlineArgTasks(Model):
to_remove = fields.List(fields.Model(KernelCmdlineArg), default=[])
+class LateTargetKernelCmdlineArgTasks(Model):
+ """
+ Desired modifications of the target kernel args produced later in the upgrade process.
+
+ Defined to prevent loops in the actor dependency graph.
+ """
+ topic = SystemInfoTopic
+
+ to_add = fields.List(fields.Model(KernelCmdlineArg), default=[])
+ to_remove = fields.List(fields.Model(KernelCmdlineArg), default=[])
+
+
+class UpgradeKernelCmdlineArgTasks(Model):
+ """
+ Modifications of the upgrade kernel cmdline.
+ """
+ topic = SystemInfoTopic
+
+ to_add = fields.List(fields.Model(KernelCmdlineArg), default=[])
+
+
class KernelCmdline(Model):
"""
Kernel command line parameters the system was booted with
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py
new file mode 100644
index 00000000..769fe20b
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py
@@ -0,0 +1,28 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import emit_net_naming as emit_net_naming_lib
+from leapp.models import (
+ KernelCmdline,
+ RpmTransactionTasks,
+ TargetKernelCmdlineArgTasks,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeKernelCmdlineArgTasks
+)
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
+
+
+class EmitNetNamingScheme(Actor):
+ """
+ Emit necessary modifications of the upgrade environment and target command line to use net.naming-scheme.
+ """
+ name = 'emit_net_naming_scheme'
+ consumes = (KernelCmdline,)
+ produces = (
+ RpmTransactionTasks,
+ TargetKernelCmdlineArgTasks,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeKernelCmdlineArgTasks,
+ )
+ tags = (ChecksPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ emit_net_naming_lib.emit_msgs_to_use_net_naming_schemes()
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
new file mode 100644
index 00000000..65abdd4d
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
@@ -0,0 +1,63 @@
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.common.config import get_env, version
+from leapp.libraries.stdlib import api
+from leapp.models import (
+ KernelCmdline,
+ KernelCmdlineArg,
+ RpmTransactionTasks,
+ TargetKernelCmdlineArgTasks,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeKernelCmdlineArgTasks
+)
+
+NET_NAMING_SYSATTRS_RPM_NAME = 'rhel-net-naming-sysattrs'
+
+
+def is_net_scheme_compatible_with_current_cmdline():
+ kernel_cmdline = next(api.consume(KernelCmdline), None)
+ if not kernel_cmdline:
+ # Super unlikely
+ raise StopActorExecutionError('Did not receive any KernelCmdline messages.')
+
+ allows_predictable_names = True
+ already_has_a_net_naming_scheme = False
+ for param in kernel_cmdline.parameters:
+ if param.key == 'net.ifnames':
+ if param.value == '0':
+ allows_predictable_names = False
+ elif param.value == '1':
+ allows_predictable_names = True
+ if param.key == 'net.naming-scheme':
+ # We assume that the kernel cmdline does not contain invalid entries, namely,
+ # that the net.naming-scheme refers to a valid scheme.
+ already_has_a_net_naming_scheme = True
+
+ is_compatible = allows_predictable_names and not already_has_a_net_naming_scheme
+
+ msg = ('Should net.naming-scheme be added to kernel cmdline: %s. '
+ 'Reason: allows_predictable_names=%s, already_has_a_net_naming_scheme=%s')
+ api.current_logger().info(msg, 'yes' if is_compatible else 'no',
+ allows_predictable_names,
+ already_has_a_net_naming_scheme)
+
+ return is_compatible
+
+
+def emit_msgs_to_use_net_naming_schemes():
+ if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') != '1' and version.get_target_major_version() != '8':
+ return
+
+ # The package should be installed regardless of whether we will modify the cmdline -
+ # if the cmdline already contains net.naming-scheme, then the package will be useful
+ # in both, the upgrade environment and on the target system.
+ pkgs_to_install = [NET_NAMING_SYSATTRS_RPM_NAME]
+ api.produce(TargetUserSpaceUpgradeTasks(install_rpms=pkgs_to_install))
+ api.produce(RpmTransactionTasks(to_install=pkgs_to_install))
+
+ if not is_net_scheme_compatible_with_current_cmdline():
+ return
+
+ naming_scheme = 'rhel-{0}.0'.format(version.get_source_major_version())
+ cmdline_args = [KernelCmdlineArg(key='net.naming-scheme', value=naming_scheme)]
+ api.produce(UpgradeKernelCmdlineArgTasks(to_add=cmdline_args))
+ api.produce(TargetKernelCmdlineArgTasks(to_add=cmdline_args))
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
new file mode 100644
index 00000000..7a5eeba5
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
@@ -0,0 +1,95 @@
+import pytest
+
+from leapp.libraries.actor import emit_net_naming as emit_net_naming_lib
+from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import (
+ KernelCmdline,
+ KernelCmdlineArg,
+ RpmTransactionTasks,
+ TargetKernelCmdlineArgTasks,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeKernelCmdlineArgTasks
+)
+
+
+@pytest.mark.parametrize(
+ ('kernel_args', 'should_be_compatible'),
+ [
+ ([KernelCmdlineArg(key='net.naming-scheme', value='rhel-8.10')], False),
+ ([KernelCmdlineArg(key='net.ifnames', value='1')], True),
+ ([KernelCmdlineArg(key='net.ifnames', value='0')], False),
+ (
+ [
+ KernelCmdlineArg(key='net.naming-scheme', value='rhel-8.10'),
+ KernelCmdlineArg(key='net.ifname', value='0'),
+ KernelCmdlineArg(key='root', value='/dev/vda1')
+ ],
+ False
+ ),
+ ([KernelCmdlineArg(key='root', value='/dev/vda1')], True),
+ ]
+)
+def test_is_net_scheme_compatible_with_current_cmdline(monkeypatch, kernel_args, should_be_compatible):
+ kernel_cmdline = KernelCmdline(parameters=kernel_args)
+
+ def mocked_consume(msg_type):
+ yield {KernelCmdline: kernel_cmdline}[msg_type]
+
+ monkeypatch.setattr(api, 'consume', mocked_consume)
+
+ assert emit_net_naming_lib.is_net_scheme_compatible_with_current_cmdline() == should_be_compatible, \
+ [(arg.key, arg.value) for arg in kernel_cmdline.parameters]
+
+
+@pytest.mark.parametrize(
+ ('is_net_scheme_enabled', 'is_current_cmdline_compatible'),
+ [
+ (True, True),
+ (True, False),
+ (False, True)
+ ]
+)
+def test_emit_msgs_to_use_net_naming_schemes(monkeypatch, is_net_scheme_enabled, is_current_cmdline_compatible):
+ envvar_value = '1' if is_net_scheme_enabled else '0'
+
+ mocked_actor = CurrentActorMocked(src_ver='8.10',
+ dst_ver='9.5',
+ envars={'LEAPP_USE_NET_NAMING_SCHEMES': envvar_value})
+ monkeypatch.setattr(api, 'current_actor', mocked_actor)
+
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+ monkeypatch.setattr(emit_net_naming_lib,
+ 'is_net_scheme_compatible_with_current_cmdline',
+ lambda: is_current_cmdline_compatible)
+
+ emit_net_naming_lib.emit_msgs_to_use_net_naming_schemes()
+
+ def ensure_one_msg_of_type_produced(produced_messages, msg_type):
+ msgs = (msg for msg in produced_messages if isinstance(msg, msg_type))
+ msg = next(msgs)
+ assert not next(msgs, None), 'More than one message of type {type} produced'.format(type=type)
+ return msg
+
+ produced_messages = api.produce.model_instances
+ if is_net_scheme_enabled:
+ userspace_tasks = ensure_one_msg_of_type_produced(produced_messages, TargetUserSpaceUpgradeTasks)
+ assert userspace_tasks.install_rpms == [emit_net_naming_lib.NET_NAMING_SYSATTRS_RPM_NAME]
+
+ rpm_tasks = ensure_one_msg_of_type_produced(produced_messages, RpmTransactionTasks)
+ assert rpm_tasks.to_install == [emit_net_naming_lib.NET_NAMING_SYSATTRS_RPM_NAME]
+ else:
+ assert not api.produce.called
+ return
+
+ upgrade_cmdline_mods = (msg for msg in produced_messages if isinstance(msg, UpgradeKernelCmdlineArgTasks))
+ target_cmdline_mods = (msg for msg in produced_messages if isinstance(msg, TargetKernelCmdlineArgTasks))
+
+ if is_current_cmdline_compatible:
+ # We should emit cmdline modifications - both UpgradeKernelCmdlineArgTasks and TargetKernelCmdlineArgTasks
+ # should be produced
+ assert next(upgrade_cmdline_mods, None)
+ assert next(target_cmdline_mods, None)
+ else:
+ assert not next(upgrade_cmdline_mods, None)
+ assert not next(target_cmdline_mods, None)
--
2.47.0

View File

@ -1,54 +0,0 @@
From 0cf9d8adb12b40f4cdcd423e6c55c11e0fbacff5 Mon Sep 17 00:00:00 2001
From: denli <denli@redhat.com>
Date: Tue, 7 Oct 2025 08:09:58 -0400
Subject: [PATCH 23/55] move kpatch actor to common repo
---
.../actors/kernel/checkkpatch/actor.py | 0
.../actors/kernel/checkkpatch/actor.py | 29 -------------------
2 files changed, 29 deletions(-)
rename repos/system_upgrade/{el8toel9 => common}/actors/kernel/checkkpatch/actor.py (100%)
delete mode 100644 repos/system_upgrade/el9toel10/actors/kernel/checkkpatch/actor.py
diff --git a/repos/system_upgrade/el8toel9/actors/kernel/checkkpatch/actor.py b/repos/system_upgrade/common/actors/kernel/checkkpatch/actor.py
similarity index 100%
rename from repos/system_upgrade/el8toel9/actors/kernel/checkkpatch/actor.py
rename to repos/system_upgrade/common/actors/kernel/checkkpatch/actor.py
diff --git a/repos/system_upgrade/el9toel10/actors/kernel/checkkpatch/actor.py b/repos/system_upgrade/el9toel10/actors/kernel/checkkpatch/actor.py
deleted file mode 100644
index e7f6179c..00000000
--- a/repos/system_upgrade/el9toel10/actors/kernel/checkkpatch/actor.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from leapp.actors import Actor
-from leapp.libraries.common.rpms import has_package
-from leapp.libraries.stdlib import api
-from leapp.models import CopyFile, DistributionSignedRPM, TargetUserSpacePreupgradeTasks
-from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
-
-PLUGIN_PKGNAME = "kpatch-dnf"
-CONFIG_PATH = "/etc/dnf/plugins/kpatch.conf"
-
-
-class CheckKpatch(Actor):
- """
- Carry over kpatch-dnf and it's config into the container
-
- Check is kpatch-dnf plugin is installed and if it is, install it and copy
- over the config file so that the plugin can make a decision on whether any
- kpatch-patch packages need to be installed during in-place upgrade.
- """
-
- name = 'check_kpatch'
- consumes = (DistributionSignedRPM,)
- produces = (TargetUserSpacePreupgradeTasks,)
- tags = (IPUWorkflowTag, ChecksPhaseTag)
-
- def process(self):
- if has_package(DistributionSignedRPM, PLUGIN_PKGNAME):
- api.produce(TargetUserSpacePreupgradeTasks(
- install_rpms=[PLUGIN_PKGNAME],
- copy_files=[CopyFile(src=CONFIG_PATH)]))
--
2.51.1

View File

@ -1,41 +0,0 @@
From 3128ca5df81b4c7591af189c9e2ae02f96c88fb4 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Fri, 17 Oct 2025 09:00:20 +0200
Subject: [PATCH 24/55] Makefile: Do not copy unnecessary files to test
containers
In addition to tut/, docs/, packaging/, .git/ and all the __pycache__
directories (and .pyc files) are excluded. This makes for a significant
speedup in container setup (output with -v added to rsync) as a lot less
needs to be copied:
Without this patch:
sent 2,405,488 bytes received 19,754 bytes 194,019.36 bytes/sec
total size is 165,333,162 speedup is 68.17
With this patch:
sent 551,179 bytes received 4,067 bytes 100,953.82 bytes/sec
total size is 23,280,513 speedup is 41.93
Some other small files and directories are still unnecessarily copied,
but those don't really affect the copied size that much.
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 754c2c63..1bfbc3ac 100644
--- a/Makefile
+++ b/Makefile
@@ -447,7 +447,7 @@ test_container:
export _CONT_NAME="leapp-repo-tests-$(_TEST_CONTAINER)-cont"; \
$(_CONTAINER_TOOL) ps -q -f name=$$_CONT_NAME && { $(_CONTAINER_TOOL) kill $$_CONT_NAME; $(_CONTAINER_TOOL) rm $$_CONT_NAME; }; \
$(_CONTAINER_TOOL) run -di --name $$_CONT_NAME -v "$$PWD":/repo:Z -e PYTHON_VENV=$$_VENV $$TEST_IMAGE && \
- $(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude "tut*" /repo/ /repocopy && \
+ $(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude 'tut/' --exclude 'docs/' --exclude '**/__pycache__/' --exclude 'packaging/' --exclude '.git/' /repo/ /repocopy && \
export res=0; \
case $$_VENV in \
python3.6) \
--
2.51.1

View File

@ -0,0 +1,26 @@
From b4b535454b74c05682ecf0d3059decbd2c9530e0 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Wed, 6 Nov 2024 22:23:37 +0100
Subject: [PATCH 24/40] prevent the feature for being used outside 8>9
---
.../libraries/persistentnetnamesconfig.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
index 2f12742a..b2c7f5ff 100644
--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
+++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
@@ -39,7 +39,8 @@ def generate_link_file(interface):
@suppress_deprecation(InitrdIncludes)
def process():
- if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() != '8':
+ if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() == '9':
+ # We can use this only for 8>9, for now
api.current_logger().info('Skipping generation of .link files renaming NICs as LEAPP_USE_NET_NAMING_SCHEMES=1')
return
--
2.47.0

View File

@ -1,42 +0,0 @@
From 7d9ae2c0adcef2eac7cb09fd9acf74f9a6011d64 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Fri, 17 Oct 2025 09:08:02 +0200
Subject: [PATCH 25/55] Makefile: Properly copy commands dir to test containers
During a containerized tests run, the commands/ directory is not used
when running the commands tests. Instead, the commands are imported from
tut/lib/python3.X/site-packages/leapp/cli/commands/ where python3.X is
the python used in the tut/ virtualenv.
When there are changes breaking the commands tests, the test are still
passing because the files are not being updated in the directory
mentioned above. This can be currently fixed by rebuilding the container
which takes a lot of time.
This patch adds copying of the commands/ dir to the
tut/lib/python3.X/site-packages/leapp/cli/commands/.
This helped while working on #1438 because I caught the failing tests
only after opening a the PR. The CI containerized tests are always
created from scratch so it works there.
---
Makefile | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 1bfbc3ac..9774a475 100644
--- a/Makefile
+++ b/Makefile
@@ -447,7 +447,8 @@ test_container:
export _CONT_NAME="leapp-repo-tests-$(_TEST_CONTAINER)-cont"; \
$(_CONTAINER_TOOL) ps -q -f name=$$_CONT_NAME && { $(_CONTAINER_TOOL) kill $$_CONT_NAME; $(_CONTAINER_TOOL) rm $$_CONT_NAME; }; \
$(_CONTAINER_TOOL) run -di --name $$_CONT_NAME -v "$$PWD":/repo:Z -e PYTHON_VENV=$$_VENV $$TEST_IMAGE && \
- $(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude 'tut/' --exclude 'docs/' --exclude '**/__pycache__/' --exclude 'packaging/' --exclude '.git/' /repo/ /repocopy && \
+ $(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude 'tut/' --exclude 'docs/' --exclude '**/__pycache__/' --exclude 'packaging/' --exclude '.git/' --exclude 'commands/' /repo/ /repocopy && \
+ $(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude '**/__pycache__/' /repo/commands/ /repocopy/tut/lib/$$_VENV/site-packages/leapp/cli/commands/ && \
export res=0; \
case $$_VENV in \
python3.6) \
--
2.51.1

View File

@ -0,0 +1,28 @@
From e43a8922e06d72212e8e2a8b51747c668147182c Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Wed, 6 Nov 2024 22:26:01 +0100
Subject: [PATCH 25/40] fix condition on when net naming is emitted
---
.../emit_net_naming_scheme/libraries/emit_net_naming.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
index 65abdd4d..726bb459 100644
--- a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
@@ -44,7 +44,10 @@ def is_net_scheme_compatible_with_current_cmdline():
def emit_msgs_to_use_net_naming_schemes():
- if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') != '1' and version.get_target_major_version() != '8':
+ is_env_var_set = get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1'
+ is_upgrade_8to9 = version.get_target_major_version() == '9'
+ is_net_naming_enabled_and_permitted = is_env_var_set and is_upgrade_8to9
+ if not is_net_naming_enabled_and_permitted:
return
# The package should be installed regardless of whether we will modify the cmdline -
--
2.47.0

View File

@ -1,42 +0,0 @@
From 1ba6d4301602c8a253ba92263fd829e385463182 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 17 Oct 2025 15:19:03 +0200
Subject: [PATCH 26/55] livemode: update obsoleted log msg about config
The original livemode configuration was located in
/etc/leapp/files/leapp-livemode.ini
However, this file has been replaced by new configuration for actors,
represented by variable filename under /etc/leapp/actor_conf.d/
I decided to keep the info log, but dropped the information about the
file path.
---
.../livemode_config_scanner/libraries/scan_livemode_config.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/repos/system_upgrade/common/actors/livemode/livemode_config_scanner/libraries/scan_livemode_config.py b/repos/system_upgrade/common/actors/livemode/livemode_config_scanner/libraries/scan_livemode_config.py
index 26fd9d09..7d72204c 100644
--- a/repos/system_upgrade/common/actors/livemode/livemode_config_scanner/libraries/scan_livemode_config.py
+++ b/repos/system_upgrade/common/actors/livemode/livemode_config_scanner/libraries/scan_livemode_config.py
@@ -5,7 +5,6 @@ from leapp.libraries.common.rpms import has_package
from leapp.libraries.stdlib import api
from leapp.models import InstalledRPM, LiveModeConfig
-LIVEMODE_CONFIG_LOCATION = '/etc/leapp/files/devel-livemode.ini'
DEFAULT_SQUASHFS_PATH = '/var/lib/leapp/live-upgrade.img'
@@ -39,8 +38,7 @@ def scan_config_and_emit_message():
if not should_scan_config():
return
- api.current_logger().info('Loading livemode config from %s', LIVEMODE_CONFIG_LOCATION)
-
+ api.current_logger().info('Loading the livemode configuration.')
config = api.current_actor().config[livemode_config_lib.LIVEMODE_CONFIG_SECTION]
# Mapping from model field names to configuration fields - because we might have
--
2.51.1

View File

@ -0,0 +1,56 @@
From 0bf07d1546ccdc6d4a9e6f4936a98b4d6ca27789 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Tue, 12 Nov 2024 09:10:50 +0100
Subject: [PATCH 26/40] scangrubdevpartitionlayout: Skip warning msgs
The fdisk output can contain warning msgs when a partition is not
aligned on physical sector boundary, like:
Partition 4 does not start on physical sector boundary.
We know that in case of MBR the line we expect to parse always
starts with canonical path. So let's skip all lines which does
not start with '/'.
jira: https://issues.redhat.com/browse/RHEL-50947
---
.../libraries/scan_layout.py | 10 ++++++++++
.../tests/test_scan_partition_layout.py | 3 +++
2 files changed, 13 insertions(+)
diff --git a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py
index 83d02656..7f4a2a59 100644
--- a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py
+++ b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py
@@ -68,6 +68,16 @@ def get_partition_layout(device):
partitions = []
for partition_line in table_iter:
+ if not partition_line.startswith('/'):
+ # the output can contain warning msg when a partition is not aligned
+ # on physical sector boundary, like:
+ # ~~~
+ # Partition 4 does not start on physical sector boundary.
+ # ~~~
+ # We know that in case of MBR the line we expect to parse always
+ # starts with canonical path. So let's use this condition.
+ # See https://issues.redhat.com/browse/RHEL-50947
+ continue
# Fields: Device Boot Start End Sectors Size Id Type
# The line looks like: `/dev/vda1 * 2048 2099199 2097152 1G 83 Linux`
part_info = split_on_space_segments(partition_line)
diff --git a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py
index 743ca71f..9c32e16f 100644
--- a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py
+++ b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py
@@ -49,6 +49,9 @@ def test_get_partition_layout(monkeypatch, devices, fs):
part_line = '{0} * {1} 2099199 1048576 83 {2}'.format(part.name, part.start_offset, fs)
fdisk_output.append(part_line)
+ # add a problematic warning msg to test:
+ # https://issues.redhat.com/browse/RHEL-50947
+ fdisk_output.append('Partition 3 does not start on physical sector boundary.')
device_to_fdisk_output[device.name] = fdisk_output
def mocked_run(cmd, *args, **kwargs):
--
2.47.0

File diff suppressed because it is too large Load Diff

View File

@ -1,147 +0,0 @@
From 37409349656c12efd4033e0cb5a3c25d10e6630d Mon Sep 17 00:00:00 2001
From: Mark Huth <mhuth@redhat.com>
Date: Mon, 15 Sep 2025 16:29:02 +1000
Subject: [PATCH 27/55] chore(RHINENG-19596): Rebrand Insights to Lightspeed
---
commands/preupgrade/__init__.py | 2 +-
commands/upgrade/__init__.py | 2 +-
docs/source/configuring-ipu/envars.md | 2 +-
.../common/actors/checkinsightsautoregister/actor.py | 2 +-
.../libraries/checkinsightsautoregister.py | 5 +++--
.../common/actors/insightsautoregister/actor.py | 2 +-
.../insightsautoregister/libraries/insightsautoregister.py | 6 +++---
.../insightsautoregister/tests/test_insightsautoregister.py | 2 +-
8 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/commands/preupgrade/__init__.py b/commands/preupgrade/__init__.py
index 6443bd8a..f24e779a 100644
--- a/commands/preupgrade/__init__.py
+++ b/commands/preupgrade/__init__.py
@@ -26,7 +26,7 @@ from leapp.utils.output import beautify_actor_exception, report_errors, report_i
help='Use only custom repositories and skip actions with Red Hat Subscription Manager.'
' This only has effect on Red Hat Enterprise Linux systems.'
)
-@command_opt('no-insights-register', is_flag=True, help='Do not register into Red Hat Insights')
+@command_opt('no-insights-register', is_flag=True, help='Do not register into Red Hat Lightspeed')
@command_opt('no-rhsm-facts', is_flag=True, help='Do not store migration information using Red Hat '
'Subscription Manager. Automatically implied by --no-rhsm.')
@command_opt('enablerepo', action='append', metavar='<repoid>',
diff --git a/commands/upgrade/__init__.py b/commands/upgrade/__init__.py
index 36be0719..c5900c0d 100644
--- a/commands/upgrade/__init__.py
+++ b/commands/upgrade/__init__.py
@@ -32,7 +32,7 @@ from leapp.utils.output import beautify_actor_exception, report_errors, report_i
help='Use only custom repositories and skip actions with Red Hat Subscription Manager.'
' This only has effect on Red Hat Enterprise Linux systems.'
)
-@command_opt('no-insights-register', is_flag=True, help='Do not register into Red Hat Insights')
+@command_opt('no-insights-register', is_flag=True, help='Do not register into Red Hat Lightspeed')
@command_opt('no-rhsm-facts', is_flag=True, help='Do not store migration information using Red Hat '
'Subscription Manager. Automatically implied by --no-rhsm.')
@command_opt('enablerepo', action='append', metavar='<repoid>',
diff --git a/docs/source/configuring-ipu/envars.md b/docs/source/configuring-ipu/envars.md
index a042ba4a..09634df2 100644
--- a/docs/source/configuring-ipu/envars.md
+++ b/docs/source/configuring-ipu/envars.md
@@ -21,7 +21,7 @@ Overrides the automatically detected storage device with GRUB core (e.g. /dev/sd
Set to 1 to disable RPM GPG checks (same as yum/dnf nogpgckeck option). Its equivalent to the --nogpgcheck leapp option.
#### LEAPP_NO_INSIGHTS_REGISTER
-If set to `1`, Leapp does not register the system into Red Hat Insights automatically. Its equivalent to the --no-insights-register leapp option.
+If set to `1`, Leapp does not register the system into Red Hat Lightspeed automatically. Its equivalent to the --no-insights-register leapp option.
#### LEAPP_NO_NETWORK_RENAMING
If set to `1`, the actor responsible to handle NICs names ends without doing anything. The actor usually creates UDEV rules to preserve original NICs in case they are changed. However, in some cases its not wanted and it leads in malfunction network configuration (e.g. in case the bonding is configured on the system). Its expected that NICs have to be handled manually if needed.
diff --git a/repos/system_upgrade/common/actors/checkinsightsautoregister/actor.py b/repos/system_upgrade/common/actors/checkinsightsautoregister/actor.py
index 70b3b670..52108566 100644
--- a/repos/system_upgrade/common/actors/checkinsightsautoregister/actor.py
+++ b/repos/system_upgrade/common/actors/checkinsightsautoregister/actor.py
@@ -7,7 +7,7 @@ from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckInsightsAutoregister(Actor):
"""
- Checks if system can be automatically registered into Red Hat Insights
+ Checks if system can be automatically registered into Red Hat Lightspeed
The registration is skipped if NO_INSIGHTS_REGISTER=1 environment variable
is set, the --no-insights-register command line argument present. if the
diff --git a/repos/system_upgrade/common/actors/checkinsightsautoregister/libraries/checkinsightsautoregister.py b/repos/system_upgrade/common/actors/checkinsightsautoregister/libraries/checkinsightsautoregister.py
index 762f3c08..8e26485b 100644
--- a/repos/system_upgrade/common/actors/checkinsightsautoregister/libraries/checkinsightsautoregister.py
+++ b/repos/system_upgrade/common/actors/checkinsightsautoregister/libraries/checkinsightsautoregister.py
@@ -24,9 +24,9 @@ def _ensure_package(package):
def _report_registration_info(installing_client):
pkg_msg = " The '{}' package required for the registration will be installed during the upgrade."
- title = "Automatic registration into Red Hat Insights"
+ title = "Automatic registration into Red Hat Lightspeed"
summary = (
- "After the upgrade, this system will be automatically registered into Red Hat Insights."
+ "After the upgrade, this system will be automatically registered into Red Hat Lightspeed."
"{}"
" To skip the automatic registration, use the '--no-insights-register' command line option or"
" set the LEAPP_NO_INSIGHTS_REGISTER environment variable."
@@ -38,6 +38,7 @@ def _report_registration_info(installing_client):
reporting.Summary(summary),
reporting.Severity(reporting.Severity.INFO),
reporting.Groups([reporting.Groups.SERVICES]),
+ reporting.Key('693963253195f418526f045b6d630a1f4c7a193d'),
]
)
diff --git a/repos/system_upgrade/common/actors/insightsautoregister/actor.py b/repos/system_upgrade/common/actors/insightsautoregister/actor.py
index a81b434c..56615390 100644
--- a/repos/system_upgrade/common/actors/insightsautoregister/actor.py
+++ b/repos/system_upgrade/common/actors/insightsautoregister/actor.py
@@ -7,7 +7,7 @@ from leapp.tags import FirstBootPhaseTag, IPUWorkflowTag
class InsightsAutoregister(Actor):
"""
- Automatically registers system into Red Hat Insights
+ Automatically registers system into Red Hat Lightspeed
The registration is skipped if NO_INSIGHTS_REGISTER=1 environment variable
is set, the --no-insights-register command line argument present or the
diff --git a/repos/system_upgrade/common/actors/insightsautoregister/libraries/insightsautoregister.py b/repos/system_upgrade/common/actors/insightsautoregister/libraries/insightsautoregister.py
index 2134a8bb..bd113a1f 100644
--- a/repos/system_upgrade/common/actors/insightsautoregister/libraries/insightsautoregister.py
+++ b/repos/system_upgrade/common/actors/insightsautoregister/libraries/insightsautoregister.py
@@ -6,18 +6,18 @@ from leapp.libraries.stdlib import api, CalledProcessError, run
def _insights_register():
try:
run(['insights-client', '--register'])
- api.current_logger().info('Automatically registered into Red Hat Insights')
+ api.current_logger().info('Automatically registered into Red Hat Lightspeed')
except (CalledProcessError) as err:
# TODO(mmatuska) produce post-upgrade report?
api.current_logger().error(
- 'Automatic registration into Red Hat Insights failed: {}'.format(err)
+ 'Automatic registration into Red Hat Lightspeed failed: {}'.format(err)
)
def process():
if rhsm.skip_rhsm() or get_env('LEAPP_NO_INSIGHTS_REGISTER', '0') == '1':
api.current_logger().debug(
- 'Skipping registration into Insights due to --no-insights-register'
+ 'Skipping registration into Red Hat Lightspeed due to --no-insights-register'
' or LEAPP_NO_INSIGHTS_REGISTER=1 set'
)
return
diff --git a/repos/system_upgrade/common/actors/insightsautoregister/tests/test_insightsautoregister.py b/repos/system_upgrade/common/actors/insightsautoregister/tests/test_insightsautoregister.py
index 0a039455..d5e6ba20 100644
--- a/repos/system_upgrade/common/actors/insightsautoregister/tests/test_insightsautoregister.py
+++ b/repos/system_upgrade/common/actors/insightsautoregister/tests/test_insightsautoregister.py
@@ -41,7 +41,7 @@ def test_insights_register_success_logged(monkeypatch):
def run_mocked(cmd, **kwargs):
return {
- 'stdout': 'Successfully registered into Insights',
+ 'stdout': 'Successfully registered into Red Hat Lightspeed',
'stderr': '',
'exit_code': 0
}
--
2.51.1

View File

@ -0,0 +1,115 @@
From 866a4b9f163c3aec31736ac0ce25f564fe016cb4 Mon Sep 17 00:00:00 2001
From: Jarek Prokop <jprokop@redhat.com>
Date: Tue, 5 Nov 2024 10:15:28 +0100
Subject: [PATCH 28/40] Add el9toel10 actor to handle symlink -> directory with
ruby IRB.
The `/usr/share/ruby/irb` path is a symlink in RHEL 9,
but a regular directory in RHEL 10.
This puts us back in line with RHEL 8 and Fedora in terms of the
path's file type regarding the rubygem-irb package.
Since this was not handled on RPM level, handle it as actor again.
This was copied and adjusted from same-named el8->el9 actor.
We do not care about the validity or target of the symlink, we just
remove it to allow DNF create the correct directory on upgrade.
Without this workaround, the upgrade will fail in transaction test with
reports of file conflicts on the directory path.
Users should not expect to ever retain anything in this directory.
---
.../actors/registerrubyirbadjustment/actor.py | 31 +++++++++++++++++++
.../test_register_ruby_irb_adjustments.py | 11 +++++++
.../el9toel10/tools/handlerubyirbsymlink | 22 +++++++++++++
3 files changed, 64 insertions(+)
create mode 100644 repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/actor.py
create mode 100644 repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/tests/test_register_ruby_irb_adjustments.py
create mode 100755 repos/system_upgrade/el9toel10/tools/handlerubyirbsymlink
diff --git a/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/actor.py b/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/actor.py
new file mode 100644
index 00000000..4fbec7ff
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/actor.py
@@ -0,0 +1,31 @@
+from leapp.actors import Actor
+from leapp.models import DNFWorkaround
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class RegisterRubyIRBAdjustment(Actor):
+ """
+ Register a workaround to allow rubygem-irb's symlink -> directory conversion.
+
+ The /usr/share/ruby/irb has been moved from a symlink to a directory
+ in RHEL 10 and this conversion was not handled on the RPM level.
+ This leads to DNF reporting package file conflicts when a major upgrade
+ is attempted and rubygem-irb is installed.
+
+ Register "handlerubyirbsymlink" script that removes the symlink prior
+ to DNF upgrade and allows it to create the expected directory in place of
+ the removed symlink.
+ """
+
+ name = 'register_ruby_irb_adjustment'
+ consumes = ()
+ produces = (DNFWorkaround,)
+ tags = (IPUWorkflowTag, FactsPhaseTag)
+
+ def process(self):
+ self.produce(
+ DNFWorkaround(
+ display_name='IRB directory fix',
+ script_path=self.get_tool_path('handlerubyirbsymlink'),
+ )
+ )
diff --git a/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/tests/test_register_ruby_irb_adjustments.py b/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/tests/test_register_ruby_irb_adjustments.py
new file mode 100644
index 00000000..fc341646
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/tests/test_register_ruby_irb_adjustments.py
@@ -0,0 +1,11 @@
+import os.path
+
+from leapp.models import DNFWorkaround
+
+
+def test_register_ruby_irb_adjustments(current_actor_context):
+ current_actor_context.run()
+ assert len(current_actor_context.consume(DNFWorkaround)) == 1
+ assert current_actor_context.consume(DNFWorkaround)[0].display_name == 'IRB directory fix'
+ assert os.path.basename(current_actor_context.consume(DNFWorkaround)[0].script_path) == 'handlerubyirbsymlink'
+ assert os.path.exists(current_actor_context.consume(DNFWorkaround)[0].script_path)
diff --git a/repos/system_upgrade/el9toel10/tools/handlerubyirbsymlink b/repos/system_upgrade/el9toel10/tools/handlerubyirbsymlink
new file mode 100755
index 00000000..e9ac40fe
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/tools/handlerubyirbsymlink
@@ -0,0 +1,22 @@
+#!/usr/bin/bash -e
+
+# just in case of hidden files.. not sure why would someone do that, it's more
+# like forgotten cache file possibility, but rather do that..
+shopt -s dotglob
+
+handle_dir() {
+ # Check that $1 is a symlink then unlink it so that RPM
+ # can freely create the directory.
+ if [ ! -L "$1" ]; then
+ return
+ fi
+
+ # There is no configuration or anything that the user should ever customize
+ # and expect to retain.
+ unlink "$1"
+
+ return 0
+}
+
+
+handle_dir /usr/share/ruby/irb
--
2.47.0

View File

@ -1,131 +0,0 @@
From ddefdee20a97d9b5e08502e4348d92212a702cc7 Mon Sep 17 00:00:00 2001
From: Lukas Bezdicka <lbezdick@redhat.com>
Date: Fri, 16 Feb 2024 14:58:02 +0100
Subject: [PATCH 28/55] [ceph][luks] Fix ceph cephvolumescan for cephadm
For cephadm the containers are named ceph-<hash>-osd... while
ceph-ansible still uses the ceph-osd-...
Other issue is that OSDs can have multiple volumes in them so filtering
just for the first one is wrong and we need to check each volume for
the encryption.
Resolves: rhbz#2264543
Fixes: https://issues.redhat.com/browse/RHEL-25838
---
.../libraries/cephvolumescan.py | 5 +-
.../tests/test_cephvolumescan.py | 50 +++++++++++++++++--
2 files changed, 50 insertions(+), 5 deletions(-)
diff --git a/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py b/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py
index b2364104..a9bff005 100644
--- a/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py
+++ b/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py
@@ -8,7 +8,7 @@ from leapp.libraries.stdlib import api, CalledProcessError, run
from leapp.models import InstalledRPM
CEPH_CONF = "/etc/ceph/ceph.conf"
-CONTAINER = "ceph-osd"
+CONTAINER = "ceph-.*osd"
def select_osd_container(engine):
@@ -63,7 +63,8 @@ def encrypted_osds_list():
output = get_ceph_lvm_list()
if output is not None:
try:
- result = [output[key][0]['lv_uuid'] for key in output if output[key][0]['tags']['ceph.encrypted']]
+ for key in output:
+ result.extend([element['lv_uuid'] for element in output[key] if element['tags']['ceph.encrypted']])
except KeyError:
# TODO: possibly raise a report item with a medium risk factor
# TODO: possibly create list of problematic osds, extend the cephinfo
diff --git a/repos/system_upgrade/common/actors/cephvolumescan/tests/test_cephvolumescan.py b/repos/system_upgrade/common/actors/cephvolumescan/tests/test_cephvolumescan.py
index f3811c45..168b8fc2 100644
--- a/repos/system_upgrade/common/actors/cephvolumescan/tests/test_cephvolumescan.py
+++ b/repos/system_upgrade/common/actors/cephvolumescan/tests/test_cephvolumescan.py
@@ -8,6 +8,8 @@ from leapp.reporting import Report
CONT_PS_COMMAND_OUTPUT = {
"stdout":
"""CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+ b5a3d8ef25b9 undercloud-0.ctlplane.redhat.local:8787/rh-osbs/rhceph:5 "-n osd.8 -f --set..." \
+ 2 hours ago Up 2 hours ago ceph-bea1a933-0846-4aaa-8223-62cb8cb2873c-osd-8
50d96fe72019 registry.redhat.io/rhceph/rhceph-4-rhel8:latest "/opt/ceph-contain..." \
2 weeks ago Up 2 weeks ceph-osd-0
f93c17b49c40 registry.redhat.io/rhceph/rhceph-4-rhel8:latest "/opt/ceph-contain..." \
@@ -41,6 +43,32 @@ CEPH_VOLUME_OUTPUT = {
"type":"block",
"vg_name":"ceph-a696c40d-6b1d-448d-a40e-fadca22b64bc"
}
+ ],
+ "8":[
+ {
+ "devices": [
+ "/dev/nvme0n1"
+ ],
+ "lv_name": "osd-db-b04857a0-a2a2-40c3-a490-cbe1f892a76c",
+ "lv_uuid": "zcvGix-drzz-JwzP-6ktU-Od6W-N5jL-kxRFa3",
+ "tags":{
+ "ceph.encrypted":"1"
+ },
+ "type": "db",
+ "vg_name": "ceph-b78309b3-bd80-4399-87a3-ac647b216b63"
+ },
+ {
+ "devices": [
+ "/dev/sdb"
+ ],
+ "lv_name": "osd-block-477c303f-5eaf-4be8-b5cc-f6073eb345bf",
+ "lv_uuid": "Mz1dep-D715-Wxh1-zUuS-0cOA-mKXE-UxaEM3",
+ "tags":{
+ "ceph.encrypted":"1"
+ },
+ "type": "block",
+ "vg_name": "ceph-e3e0345b-8be1-40a7-955a-378ba967f954"
+ }
]
}"""
}
@@ -51,7 +79,19 @@ CEPH_LVM_LIST = {
'lv_uuid': 'Tyc0TH-RDxr-ebAF-9mWF-Kh5R-YnvJ-cEcGVn',
'tags': {'ceph.encrypted': '1'},
'type': 'block',
- 'vg_name': 'ceph-a696c40d-6b1d-448d-a40e-fadca22b64bc'}]
+ 'vg_name': 'ceph-a696c40d-6b1d-448d-a40e-fadca22b64bc'}],
+ '8': [{'devices': ['/dev/nvme0n1'],
+ 'lv_name': 'osd-db-b04857a0-a2a2-40c3-a490-cbe1f892a76c',
+ 'lv_uuid': 'zcvGix-drzz-JwzP-6ktU-Od6W-N5jL-kxRFa3',
+ 'tags': {'ceph.encrypted': '1'},
+ 'type': 'db',
+ 'vg_name': 'ceph-b78309b3-bd80-4399-87a3-ac647b216b63'},
+ {'devices': ['/dev/sdb'],
+ 'lv_name': 'osd-block-477c303f-5eaf-4be8-b5cc-f6073eb345bf',
+ 'lv_uuid': 'Mz1dep-D715-Wxh1-zUuS-0cOA-mKXE-UxaEM3',
+ 'tags': {'ceph.encrypted': '1'},
+ 'type': 'block',
+ 'vg_name': 'ceph-e3e0345b-8be1-40a7-955a-378ba967f954'}]
}
@@ -60,7 +100,7 @@ def test_select_osd_container(m_run):
m_run.return_value = CONT_PS_COMMAND_OUTPUT
- assert cephvolumescan.select_osd_container('docker') == "ceph-osd-0"
+ assert cephvolumescan.select_osd_container('docker') == "ceph-bea1a933-0846-4aaa-8223-62cb8cb2873c-osd-8"
@patch('leapp.libraries.actor.cephvolumescan.has_package')
@@ -82,4 +122,8 @@ def test_encrypted_osds_list(m_get_ceph_lvm_list, m_isfile):
m_get_ceph_lvm_list.return_value = CEPH_LVM_LIST
m_isfile.return_value = True
- assert cephvolumescan.encrypted_osds_list() == ['Tyc0TH-RDxr-ebAF-9mWF-Kh5R-YnvJ-cEcGVn']
+ assert cephvolumescan.encrypted_osds_list() == [
+ 'Tyc0TH-RDxr-ebAF-9mWF-Kh5R-YnvJ-cEcGVn',
+ 'zcvGix-drzz-JwzP-6ktU-Od6W-N5jL-kxRFa3',
+ 'Mz1dep-D715-Wxh1-zUuS-0cOA-mKXE-UxaEM3'
+ ]
--
2.51.1

View File

@ -0,0 +1,40 @@
From 81a3297516fbbd120b0fb870de36f1a1b290dd21 Mon Sep 17 00:00:00 2001
From: Jarek Prokop <jprokop@redhat.com>
Date: Wed, 6 Nov 2024 15:21:14 +0100
Subject: [PATCH 29/40] Expand on the actor docstring for the el8->el9
rubygem-irb symlink fix.
In RHEL 10, the directory is a regular directory again.
The 2 actors are separate over creating a common solution for both.
Expand in the docstring on the reason for the el8->el9 actor to
differentiate them apart.
---
.../actors/registerrubyirbadjustment/actor.py | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/el8toel9/actors/registerrubyirbadjustment/actor.py b/repos/system_upgrade/el8toel9/actors/registerrubyirbadjustment/actor.py
index ac4d1e6f..a33d8831 100644
--- a/repos/system_upgrade/el8toel9/actors/registerrubyirbadjustment/actor.py
+++ b/repos/system_upgrade/el8toel9/actors/registerrubyirbadjustment/actor.py
@@ -5,7 +5,16 @@ from leapp.tags import FactsPhaseTag, IPUWorkflowTag
class RegisterRubyIRBAdjustment(Actor):
"""
- Registers a workaround which will adjust the Ruby IRB directories during the upgrade.
+ Register a workaround to allow rubygem-irb's directory -> symlink conversion.
+
+ The /usr/share/ruby/irb has been moved from a directory to a symlink
+ in RHEL 9 and this conversion was not handled on RPM level.
+ This leads to DNF reporting package file conflicts when a major upgrade
+ is attempted and rubygem-irb (or ruby-irb) is installed.
+
+ Register "handlerubyirbsymlink" script that removes the directory prior
+ to DNF upgrade and allows it to create the expected symlink in place of
+ the removed directory.
"""
name = 'register_ruby_irb_adjustment'
--
2.47.0

View File

@ -1,36 +0,0 @@
From bf1b5f5f537ff163470b29d8bb7ba452901368eb Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 22 Oct 2025 17:52:52 +0200
Subject: [PATCH 29/55] Makefile: Do copy commands/ dir in containerized tests
In 7d9ae2c0 the commands dir copying was changed to copy the dir to
tut/lib/$$_VENV/site-packages/leapp/cli/commands/ instead of the normal
location in the container.
This fixed the problem that modifications on the host were not reflected
in the testing containers. However a new problem was introduced -
modifications to tests in the commands directory are not getting
reflected in the testing containers.
This patch fixes that by copying the entire commands directory both to
the normal location and the virtual env (path above).
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 9774a475..64115006 100644
--- a/Makefile
+++ b/Makefile
@@ -447,7 +447,7 @@ test_container:
export _CONT_NAME="leapp-repo-tests-$(_TEST_CONTAINER)-cont"; \
$(_CONTAINER_TOOL) ps -q -f name=$$_CONT_NAME && { $(_CONTAINER_TOOL) kill $$_CONT_NAME; $(_CONTAINER_TOOL) rm $$_CONT_NAME; }; \
$(_CONTAINER_TOOL) run -di --name $$_CONT_NAME -v "$$PWD":/repo:Z -e PYTHON_VENV=$$_VENV $$TEST_IMAGE && \
- $(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude 'tut/' --exclude 'docs/' --exclude '**/__pycache__/' --exclude 'packaging/' --exclude '.git/' --exclude 'commands/' /repo/ /repocopy && \
+ $(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude 'tut/' --exclude 'docs/' --exclude '**/__pycache__/' --exclude 'packaging/' --exclude '.git/' /repo/ /repocopy && \
$(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude '**/__pycache__/' /repo/commands/ /repocopy/tut/lib/$$_VENV/site-packages/leapp/cli/commands/ && \
export res=0; \
case $$_VENV in \
--
2.51.1

File diff suppressed because it is too large Load Diff

View File

@ -1,70 +0,0 @@
From 87f584d8f9b957b9ae0138d6963077d87ccb2067 Mon Sep 17 00:00:00 2001
From: Peter Mocary <pmocary@redhat.com>
Date: Mon, 20 Oct 2025 11:40:04 +0200
Subject: [PATCH 30/55] skip pre-generation of systemd fstab mount units during
LiveMode upgrade
The new storage initialization solution interfered with LiveMode. Since
LiveMode is a different upgrade approach, we now skip pre-generation of
systemd fstab mount units (mount_unit_generator actor) when
upgrading this way.
---
.../actors/initramfs/mount_units_generator/actor.py | 5 ++++-
.../libraries/mount_unit_generator.py | 8 ++++++--
2 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
index 5fe25515..dd667513 100644
--- a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
@@ -1,16 +1,19 @@
from leapp.actors import Actor
from leapp.libraries.actor import mount_unit_generator as mount_unit_generator_lib
-from leapp.models import TargetUserSpaceInfo, UpgradeInitramfsTasks
+from leapp.models import LiveModeConfig, TargetUserSpaceInfo, UpgradeInitramfsTasks
from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
class MountUnitGenerator(Actor):
"""
Sets up storage initialization using systemd's mount units in the upgrade container.
+
+ Note that this storage initialization is skipped when the LiveMode is enabled.
"""
name = 'mount_unit_generator'
consumes = (
+ LiveModeConfig,
TargetUserSpaceInfo,
)
produces = (
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
index e1060559..943bddd4 100644
--- a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
@@ -5,7 +5,7 @@ import tempfile
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.common import mounting
from leapp.libraries.stdlib import api, CalledProcessError, run
-from leapp.models import TargetUserSpaceInfo, UpgradeInitramfsTasks
+from leapp.models import LiveModeConfig, TargetUserSpaceInfo, UpgradeInitramfsTasks
def run_systemd_fstab_generator(output_directory):
@@ -295,8 +295,12 @@ def request_units_inclusion_in_initramfs(files_to_include):
def setup_storage_initialization():
- userspace_info = next(api.consume(TargetUserSpaceInfo), None)
+ livemode_config = next(api.consume(LiveModeConfig), None)
+ if livemode_config and livemode_config.is_enabled:
+ api.current_logger().debug('Pre-generation of systemd fstab mount units skipped: The LiveMode is enabled.')
+ return
+ userspace_info = next(api.consume(TargetUserSpaceInfo), None)
with mounting.NspawnActions(base_dir=userspace_info.path) as upgrade_container_ctx:
with tempfile.TemporaryDirectory(dir='/var/lib/leapp/', prefix='tmp_systemd_fstab_') as workspace_path:
run_systemd_fstab_generator(workspace_path)
--
2.51.1

View File

@ -0,0 +1,56 @@
From 89afbe8cb41f874f32acddc1e1696132f3531677 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 8 Nov 2024 17:40:01 +0100
Subject: [PATCH 31/40] Packaging: Require leapp-framework 6.x + update leapp
deps
The leapp actors configuration feature is present since
leapp-framework 6.0. Update the dependencies to ensure the correct
version of the framework is installed on the system.
Also, leapp requirements have been updated - requiring python3-PyYAML
as it requires YAML parser, bumping leapp-framework-dependencies to 6.
Address the change in leapp-deps metapackage to satisfy leapp
dependencies during the upgrade process.
---
packaging/leapp-repository.spec | 2 +-
packaging/other_specs/leapp-el7toel8-deps.spec | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 0d63ba02..570d0df2 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -120,7 +120,7 @@ Requires: leapp-repository-dependencies = %{leapp_repo_deps}
# IMPORTANT: this is capability provided by the leapp framework rpm.
# Check that 'version' instead of the real framework rpm version.
-Requires: leapp-framework >= 5.0, leapp-framework < 6
+Requires: leapp-framework >= 6.0, leapp-framework < 7
# Since we provide sub-commands for the leapp utility, we expect the leapp
# tool to be installed as well.
diff --git a/packaging/other_specs/leapp-el7toel8-deps.spec b/packaging/other_specs/leapp-el7toel8-deps.spec
index d9e94faa..2c662a37 100644
--- a/packaging/other_specs/leapp-el7toel8-deps.spec
+++ b/packaging/other_specs/leapp-el7toel8-deps.spec
@@ -14,7 +14,7 @@
%define leapp_repo_deps 10
-%define leapp_framework_deps 5
+%define leapp_framework_deps 6
# NOTE: the Version contains the %{rhel} macro just for the convenience to
# have always upgrade path between newer and older deps packages. So for
@@ -112,6 +112,7 @@ Requires: python3
Requires: python3-six
Requires: python3-setuptools
Requires: python3-requests
+Requires: python3-PyYAML
%description -n %{ldname}
--
2.47.0

View File

@ -1,56 +0,0 @@
From 827e28de7b707f9fc458e1f5fdad9fffd7474abe Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Tue, 12 Aug 2025 16:59:01 +0200
Subject: [PATCH 31/55] pylint: enable consider-using-set-comprehension
Fixed occurrences of list comprehensions wrapped in set() by using
set comprehensions directly, removing disables for
consider-using-set-comprehension added for Python 2 compatibility.
Jira: RHELMISC-16038
---
.pylintrc | 1 -
.../checkconsumedassets/tests/test_asset_version_checking.py | 2 +-
.../common/actors/selinux/selinuxapplycustom/actor.py | 4 +---
3 files changed, 2 insertions(+), 5 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index 5d75df40..e54d9a54 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -45,7 +45,6 @@ disable=
too-many-positional-arguments, # we cannot set yet max-possitional-arguments unfortunately
# new for python3 version of pylint
useless-object-inheritance,
- consider-using-set-comprehension, # pylint3 force to use comprehension in place we don't want (py2 doesnt have these options, for inline skip)
unnecessary-pass,
invalid-envvar-default, # pylint3 warnings envvar returns str/none by default
bad-option-value, # python 2 doesn't have import-outside-toplevel, but in some case we need to import outside toplevel
diff --git a/repos/system_upgrade/common/actors/checkconsumedassets/tests/test_asset_version_checking.py b/repos/system_upgrade/common/actors/checkconsumedassets/tests/test_asset_version_checking.py
index 9c324b44..f37dcea4 100644
--- a/repos/system_upgrade/common/actors/checkconsumedassets/tests/test_asset_version_checking.py
+++ b/repos/system_upgrade/common/actors/checkconsumedassets/tests/test_asset_version_checking.py
@@ -44,4 +44,4 @@ def test_make_report_entries_with_unique_urls():
docs_url_to_title_map = {'/path/to/asset1': ['asset1_title1', 'asset1_title2'],
'/path/to/asset2': ['asset2_title']}
report_urls = check_consumed_assets_lib.make_report_entries_with_unique_urls(docs_url_to_title_map)
- assert set([ru.value['url'] for ru in report_urls]) == {'/path/to/asset1', '/path/to/asset2'}
+ assert {ru.value['url'] for ru in report_urls} == {'/path/to/asset1', '/path/to/asset2'}
diff --git a/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/actor.py b/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/actor.py
index 4856f36a..db8fe8ac 100644
--- a/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/actor.py
+++ b/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/actor.py
@@ -40,9 +40,7 @@ class SELinuxApplyCustom(Actor):
return
# get list of policy modules after the upgrade
- installed_modules = set(
- [module[0] for module in selinuxapplycustom.list_selinux_modules()]
- )
+ installed_modules = {module[0] for module in selinuxapplycustom.list_selinux_modules()}
# import custom SElinux modules
for semodules in self.consume(SELinuxModules):
--
2.51.1

View File

@ -1,165 +0,0 @@
From 006517ea2d69d3f0d9e3de2eb67bfb4d32f20551 Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Wed, 13 Aug 2025 10:46:10 +0200
Subject: [PATCH 32/55] pylint: enable consider-using-with
Emitted when a resource-allocating assignment or call could be replaced
by a 'with' block. Enabling this warning enforces using 'with' to ensure
resources are properly released even if an exception occurs.
* ifcfgscanner: use StringIO in tests instead of mock_open for iteration support with 'with open'
Jira: RHELMISC-16038
---
.pylintrc | 1 -
.../ifcfgscanner/libraries/ifcfgscanner.py | 27 +++++++++----------
.../tests/unit_test_ifcfgscanner.py | 8 +++---
.../luksscanner/tests/test_luksdump_parser.py | 8 +++---
.../scansaphana/tests/test_scansaphana.py | 6 ++---
.../system_upgrade/common/libraries/guards.py | 2 +-
6 files changed, 25 insertions(+), 27 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index e54d9a54..fd770061 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -51,7 +51,6 @@ disable=
super-with-arguments, # required in python 2
raise-missing-from, # no 'raise from' in python 2
use-a-generator, # cannot be modified because of Python2 support
- consider-using-with, # on bunch spaces we cannot change that...
duplicate-string-formatting-argument, # TMP: will be fixed in close future
consider-using-f-string, # sorry, not gonna happen, still have to support py2
use-dict-literal,
diff --git a/repos/system_upgrade/common/actors/ifcfgscanner/libraries/ifcfgscanner.py b/repos/system_upgrade/common/actors/ifcfgscanner/libraries/ifcfgscanner.py
index 683327b3..f0c8b847 100644
--- a/repos/system_upgrade/common/actors/ifcfgscanner/libraries/ifcfgscanner.py
+++ b/repos/system_upgrade/common/actors/ifcfgscanner/libraries/ifcfgscanner.py
@@ -18,23 +18,22 @@ def process_ifcfg(filename, secrets=False):
return None
properties = []
- for line in open(filename).readlines():
- try:
- (name, value) = line.split("#")[0].strip().split("=")
+ with open(filename) as f:
+ for line in f:
+ try:
+ (name, value) = line.split("#")[0].strip().split("=")
+ except ValueError:
+ # We're not interested in lines that are not
+ # simple assignments. Play it safe.
+ continue
+
if secrets:
value = None
- except ValueError:
- # We're not interested in lines that are not
- # simple assignments. Play it safe.
- continue
-
- # Deal with simple quoting. We don't expand anything, nor do
- # multiline strings or anything of that sort.
- if value is not None and len(value) > 1 and value[0] == value[-1]:
- if value.startswith('"') or value.startswith("'"):
+ elif len(value) > 1 and value[0] in ('"', "'") and value[0] == value[-1]:
+ # Deal with simple quoting. We don't expand anything, nor do
+ # multiline strings or anything of that sort.
value = value[1:-1]
-
- properties.append(IfCfgProperty(name=name, value=value))
+ properties.append(IfCfgProperty(name=name, value=value))
return properties
diff --git a/repos/system_upgrade/common/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py b/repos/system_upgrade/common/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
index d3b4846f..d996de84 100644
--- a/repos/system_upgrade/common/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
+++ b/repos/system_upgrade/common/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
@@ -1,5 +1,6 @@
import errno
import textwrap
+from io import StringIO
from os.path import basename
import mock
@@ -63,8 +64,7 @@ def test_ifcfg1(monkeypatch):
KEY_TYPE=key
""")
- mock_config = mock.mock_open(read_data=ifcfg_file)
- with mock.patch(_builtins_open, mock_config):
+ with mock.patch(_builtins_open, return_value=StringIO(ifcfg_file)):
monkeypatch.setattr(ifcfgscanner, "listdir", _listdir_ifcfg)
monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_ifcfg)
monkeypatch.setattr(api, "produce", produce_mocked())
@@ -110,8 +110,8 @@ def test_ifcfg_key(monkeypatch):
Report ifcfg secrets from keys- file.
"""
- mock_config = mock.mock_open(read_data="KEY_PASSPHRASE1=Hell0")
- with mock.patch(_builtins_open, mock_config):
+ file_data = "KEY_PASSPHRASE1=Hell0"
+ with mock.patch(_builtins_open, side_effect=lambda *a, **k: StringIO(file_data)):
monkeypatch.setattr(ifcfgscanner, "listdir", _listdir_ifcfg)
monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_keys)
monkeypatch.setattr(api, "produce", produce_mocked())
diff --git a/repos/system_upgrade/common/actors/luksscanner/tests/test_luksdump_parser.py b/repos/system_upgrade/common/actors/luksscanner/tests/test_luksdump_parser.py
index 4b190149..f0482eef 100644
--- a/repos/system_upgrade/common/actors/luksscanner/tests/test_luksdump_parser.py
+++ b/repos/system_upgrade/common/actors/luksscanner/tests/test_luksdump_parser.py
@@ -7,8 +7,8 @@ CUR_DIR = os.path.dirname(os.path.abspath(__file__))
def test_luksdump_parser_luks1(current_actor_context):
- f = open(os.path.join(CUR_DIR, 'files/luksDump_nvme0n1p3_luks1.txt'))
- parsed_dict = LuksDumpParser.parse(f.readlines())
+ with open(os.path.join(CUR_DIR, 'files/luksDump_nvme0n1p3_luks1.txt')) as f:
+ parsed_dict = LuksDumpParser.parse(f.readlines())
assert parsed_dict["Version"] == "1"
assert parsed_dict["Cipher name"] == "aes"
@@ -39,8 +39,8 @@ def test_luksdump_parser_luks1(current_actor_context):
def test_luksdump_parser_luks2_tokens(current_actor_context):
- f = open(os.path.join(CUR_DIR, 'files/luksDump_nvme0n1p3_luks2_tokens.txt'))
- parsed_dict = LuksDumpParser.parse(f.readlines())
+ with open(os.path.join(CUR_DIR, 'files/luksDump_nvme0n1p3_luks2_tokens.txt')) as f:
+ parsed_dict = LuksDumpParser.parse(f.readlines())
assert parsed_dict["Version"] == "2"
assert parsed_dict["Epoch"] == "9"
diff --git a/repos/system_upgrade/common/actors/scansaphana/tests/test_scansaphana.py b/repos/system_upgrade/common/actors/scansaphana/tests/test_scansaphana.py
index 0b55c9fb..38a1cae7 100644
--- a/repos/system_upgrade/common/actors/scansaphana/tests/test_scansaphana.py
+++ b/repos/system_upgrade/common/actors/scansaphana/tests/test_scansaphana.py
@@ -77,9 +77,9 @@ class SubprocessCall(object):
assert args[0][0:3] == ['sudo', '-u', self.admusername]
cmd = args[0][3:]
kwargs.pop('checked', None)
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- p.wait()
- return {'exit_code': p.returncode, 'stdout': p.stdout.read()}
+ with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p:
+ stdout, stderr = p.communicate()
+ return {'exit_code': p.returncode, 'stdout': stdout.decode('utf-8'), 'stderr': stderr.decode('utf-8')}
def test_scansaphana_get_instance_status(monkeypatch):
diff --git a/repos/system_upgrade/common/libraries/guards.py b/repos/system_upgrade/common/libraries/guards.py
index c8001817..ea2bf4dd 100644
--- a/repos/system_upgrade/common/libraries/guards.py
+++ b/repos/system_upgrade/common/libraries/guards.py
@@ -34,7 +34,7 @@ def guarded_execution(*guards):
def connection_guard(url='https://example.com'):
def closure():
try:
- urlopen(url)
+ urlopen(url) # pylint: disable=consider-using-with
return None
except URLError as e:
cause = '''Failed to open url '{url}' with error: {error}'''.format(url=url, error=e)
--
2.51.1

View File

@ -0,0 +1,48 @@
From 36b93e4a2504f72e5a371a75a23e7cd2c695b84b Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 6 Oct 2024 21:01:13 +0200
Subject: [PATCH 32/40] spec: create /etc/leapp/actor_conf.d
Add additional build steps to the specfile that create the actor
configuration directory. The directory is owned by the package, so
it gets removed when the user uninstalls leapp.
Also prepared some comment lines for future when we will want to
include some configuration files as part of the rpm.
---
etc/leapp/actor_conf.d/.gitkeep | 0
packaging/leapp-repository.spec | 7 +++++++
2 files changed, 7 insertions(+)
create mode 100644 etc/leapp/actor_conf.d/.gitkeep
diff --git a/etc/leapp/actor_conf.d/.gitkeep b/etc/leapp/actor_conf.d/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 570d0df2..828355bf 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -250,6 +250,11 @@ install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/files/
install -m 0644 etc/leapp/transaction/* %{buildroot}%{_sysconfdir}/leapp/transaction
install -m 0644 etc/leapp/files/* %{buildroot}%{_sysconfdir}/leapp/files
+# Actor configuration dir
+install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/actor_conf.d/
+# uncomment to install existing configs
+#install -m 0644 etc/leapp/actor_conf.d/* %%{buildroot}%%{_sysconfdir}/leapp/actor_conf.d
+
# install CLI commands for the leapp utility on the expected path
install -m 0755 -d %{buildroot}%{leapp_python_sitelib}/leapp/cli/
cp -r commands %{buildroot}%{leapp_python_sitelib}/leapp/cli/
@@ -295,6 +300,8 @@ done;
%dir %{custom_repositorydir}
%dir %{leapp_python_sitelib}/leapp/cli/commands
%config %{_sysconfdir}/leapp/files/*
+# uncomment to package installed configs
+#%%config %%{_sysconfdir}/leapp/actor_conf.d/*
%{_sysconfdir}/leapp/repos.d/*
%{_sysconfdir}/leapp/transaction/*
%{repositorydir}/*
--
2.47.0

View File

@ -1,41 +0,0 @@
From 21bf23c218966040d4c3104d04ce0bcc39d0fb3d Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Wed, 13 Aug 2025 11:36:36 +0200
Subject: [PATCH 33/55] pylint: duplicate-string-formatting-argument
Jira: RHELMISC-16038
---
.pylintrc | 1 -
repos/system_upgrade/common/libraries/fetch.py | 4 ++--
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index fd770061..aaa5d99e 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -51,7 +51,6 @@ disable=
super-with-arguments, # required in python 2
raise-missing-from, # no 'raise from' in python 2
use-a-generator, # cannot be modified because of Python2 support
- duplicate-string-formatting-argument, # TMP: will be fixed in close future
consider-using-f-string, # sorry, not gonna happen, still have to support py2
use-dict-literal,
redundant-u-string-prefix, # still have py2 to support
diff --git a/repos/system_upgrade/common/libraries/fetch.py b/repos/system_upgrade/common/libraries/fetch.py
index 82bf4ff3..baf2c4eb 100644
--- a/repos/system_upgrade/common/libraries/fetch.py
+++ b/repos/system_upgrade/common/libraries/fetch.py
@@ -56,8 +56,8 @@ def _request_data(service_path, cert, proxies, timeout=REQUEST_TIMEOUT):
timeout = (timeout[0], timeout[1] + 10)
if attempt > MAX_ATTEMPTS:
logger.warning(
- 'Attempt {} of {} to get {} failed: {}.'
- .format(MAX_ATTEMPTS, MAX_ATTEMPTS, service_path, etype_msg)
+ 'Attempt {max} of {max} to get {service} failed: {error}.'
+ .format(max=MAX_ATTEMPTS, service=service_path, error=etype_msg)
)
raise
--
2.51.1

View File

@ -0,0 +1,31 @@
From 87db66c863104fea824a4406732cbe233ffee412 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Wed, 13 Nov 2024 15:05:50 +0100
Subject: [PATCH 33/40] spec: drop .gitkeep files from the RPM
We have several .gitkeep files in the repo as we want to have some
directories present in git however these directories are empty
otherwise. This is common hack to achieve this, but we do not want
to have these files really in the resulting RPMs. So we just remove
them.
---
packaging/leapp-repository.spec | 3 +++
1 file changed, 3 insertions(+)
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 828355bf..2bb52505 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -272,6 +272,9 @@ rm -rf %{buildroot}%{repositorydir}/common/actors/testactor
find %{buildroot}%{repositorydir}/common -name "test.py" -delete
rm -rf `find %{buildroot}%{repositorydir} -name "tests" -type d`
find %{buildroot}%{repositorydir} -name "Makefile" -delete
+# .gitkeep file is used to have a directory in the repo. but we do not want these
+# files in the resulting RPM
+find %{buildroot} -name .gitkeep -delete
for DIRECTORY in $(find %{buildroot}%{repositorydir}/ -mindepth 1 -maxdepth 1 -type d);
do
--
2.47.0

View File

@ -0,0 +1,95 @@
From 140a0bbb689814041fa6a03ee2b703e70a20f2f2 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 10 Nov 2024 13:54:20 +0100
Subject: [PATCH 34/40] cli: load actor configuration
Load actor configuration when running `leapp upgrade` or `leapp
preupgrade`. The configuration is loaded, saved to leapp's DB,
and remains available to all actors via framework's global variable.
---
commands/command_utils.py | 32 +++++++++++++++++++++++++++++++-
commands/preupgrade/__init__.py | 3 +++
commands/upgrade/__init__.py | 3 +++
3 files changed, 37 insertions(+), 1 deletion(-)
diff --git a/commands/command_utils.py b/commands/command_utils.py
index 2810a542..190f5f03 100644
--- a/commands/command_utils.py
+++ b/commands/command_utils.py
@@ -1,10 +1,12 @@
+import hashlib
import json
import os
import re
import resource
+from leapp.actors import config as actor_config
from leapp.exceptions import CommandError
-from leapp.utils import path
+from leapp.utils import audit, path
HANA_BASE_PATH = '/hana/shared'
HANA_SAPCONTROL_PATH_X86_64 = 'exe/linuxx86_64/hdb/sapcontrol'
@@ -178,3 +180,31 @@ def set_resource_limits():
if soft_fsize != fsize_limit:
set_resource_limit(resource.RLIMIT_FSIZE, fsize_limit, fsize_limit)
+
+
+def load_actor_configs_and_store_it_in_db(context, repositories, framework_cfg):
+ """
+ Load actor configuration so that actor's can access it and store it into leapp db.
+
+ :param context: Current execution context
+ :param repositories: Discovered repositories
+ :param framework_cfg: Leapp's configuration
+ """
+ # Read the Actor Config and validate it against the schemas saved in the
+ # configuration.
+
+ actor_config_schemas = tuple(actor.config_schemas for actor in repositories.actors)
+ actor_config_schemas = actor_config.normalize_schemas(actor_config_schemas)
+ actor_config_path = framework_cfg.get('actor_config', 'path')
+
+ # Note: actor_config.load() stores the loaded actor config into a global
+ # variable which can then be accessed by functions in that file. Is this
+ # the right way to store that information?
+ actor_cfg = actor_config.load(actor_config_path, actor_config_schemas)
+
+ # Dump the collected configuration, checksum it and store it inside the DB
+ config_text = json.dumps(actor_cfg)
+ config_text_hash = hashlib.sha256(config_text.encode('utf-8')).hexdigest()
+ config_data = audit.ActorConfigData(config=config_text, hash_id=config_text_hash)
+ db_config = audit.ActorConfig(config=config_data, context=context)
+ db_config.store()
diff --git a/commands/preupgrade/__init__.py b/commands/preupgrade/__init__.py
index a9fa40e0..631eca6b 100644
--- a/commands/preupgrade/__init__.py
+++ b/commands/preupgrade/__init__.py
@@ -62,6 +62,9 @@ def preupgrade(args, breadcrumbs):
command_utils.set_resource_limits()
workflow = repositories.lookup_workflow('IPUWorkflow')()
+
+ command_utils.load_actor_configs_and_store_it_in_db(context, repositories, cfg)
+
util.warn_if_unsupported(configuration)
util.process_whitelist_experimental(repositories, workflow, configuration, logger)
with beautify_actor_exception():
diff --git a/commands/upgrade/__init__.py b/commands/upgrade/__init__.py
index c7487fde..3dedd438 100644
--- a/commands/upgrade/__init__.py
+++ b/commands/upgrade/__init__.py
@@ -93,6 +93,9 @@ def upgrade(args, breadcrumbs):
command_utils.set_resource_limits()
workflow = repositories.lookup_workflow('IPUWorkflow')(auto_reboot=args.reboot)
+
+ command_utils.load_actor_configs_and_store_it_in_db(context, repositories, cfg)
+
util.process_whitelist_experimental(repositories, workflow, configuration, logger)
util.warn_if_unsupported(configuration)
with beautify_actor_exception():
--
2.47.0

View File

@ -1,25 +0,0 @@
From 9cd95f0fb90a60b650ddc5bd05df6807f0e80a60 Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Wed, 13 Aug 2025 13:13:24 +0200
Subject: [PATCH 34/55] pylint: enable use-dict-literal
Jira: RHELMISC-16038
---
.pylintrc | 1 -
1 file changed, 1 deletion(-)
diff --git a/.pylintrc b/.pylintrc
index aaa5d99e..bc051513 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -52,7 +52,6 @@ disable=
raise-missing-from, # no 'raise from' in python 2
use-a-generator, # cannot be modified because of Python2 support
consider-using-f-string, # sorry, not gonna happen, still have to support py2
- use-dict-literal,
redundant-u-string-prefix, # still have py2 to support
logging-format-interpolation,
logging-not-lazy,
--
2.51.1

View File

@ -0,0 +1,157 @@
From f3d38325fb525bca427a2b00e2bfb73b9297c36a Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 10 Nov 2024 14:35:26 +0100
Subject: [PATCH 35/40] configs(common): introduce RHUI configuration
Introduce a common configuration definition for RHUI related decisions.
The configuration has an atomic nature - if the user wants to overwrite
leapp's decisions, he/she must overwrite all of them. Essentially, all
fields of the RHUI_SETUPS cloud map entry can be configured. Almost no
non-empty defaults are provided, as no reasonable defaults can be given.
This is due to all setup parameters are different from provider to
provider. Therefore, default values are empty values, so that it can
later be detected by an actor whether all fields of the RHUI config
has been filled.
Jira ref: RHEL-56251
---
repos/system_upgrade/common/configs/rhui.py | 127 ++++++++++++++++++++
1 file changed, 127 insertions(+)
create mode 100644 repos/system_upgrade/common/configs/rhui.py
diff --git a/repos/system_upgrade/common/configs/rhui.py b/repos/system_upgrade/common/configs/rhui.py
new file mode 100644
index 00000000..ade9bab9
--- /dev/null
+++ b/repos/system_upgrade/common/configs/rhui.py
@@ -0,0 +1,127 @@
+"""
+Configuration keys for RHUI.
+
+In case of RHUI in private regions it usual that publicly known RHUI data
+is not valid. In such cases it's possible to provide the correct expected
+RHUI data to correct the in-place upgrade process.
+"""
+
+from leapp.actors.config import Config
+from leapp.models import fields
+
+RHUI_CONFIG_SECTION = 'rhui'
+
+
+# @Note(mhecko): We use to distinguish config instantiated from default values that we should ignore
+# # Maybe we could make all config values None and detect it that way, but then we cannot
+# # give the user an example how the config should look like.
+class RhuiUseConfig(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "use_config"
+ type_ = fields.Boolean()
+ default = False
+ description = """
+ Use values provided in the configuration file to override leapp's decisions.
+ """
+
+
+class RhuiSourcePkgs(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "source_clients"
+ type_ = fields.List(fields.String())
+ default = []
+ description = """
+ The name of the source RHUI client RPMs (to be removed from the system).
+ """
+
+
+class RhuiTargetPkgs(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "target_clients"
+ type_ = fields.List(fields.String())
+ default = []
+ description = """
+ The name of the target RHUI client RPM (to be installed on the system).
+ """
+
+
+class RhuiCloudProvider(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "cloud_provider"
+ type_ = fields.String()
+ default = ""
+ description = """
+ Cloud provider name that should be used internally by leapp.
+
+ Leapp recognizes the following cloud providers:
+ - azure
+ - aws
+ - google
+
+ Cloud provider information is used for triggering some provider-specific modifications. The value also
+ influences how leapp determines target repositories to enable.
+ """
+
+
+# @Note(mhecko): We likely don't need this. We need the variant primarily to grab files from a correct directory
+# in leapp-rhui-<provider> folders.
+class RhuiCloudVariant(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "image_variant"
+ type_ = fields.String()
+ default = "ordinary"
+ description = """
+ RHEL variant of the source system - is the source system SAP-specific image?
+
+ Leapp recognizes the following cloud providers:
+ - ordinary # The source system has not been deployed from a RHEL with SAP image
+ - sap # RHEL SAP images
+ - sap-apps # RHEL SAP Apps images (Azure only)
+ - sap-ha # RHEL HA Apps images (HA only)
+
+ Cloud provider information is used for triggering some provider-specific modifications. The value also
+ influences how leapp determines target repositories to enable.
+
+ Default:
+ "ordinary"
+ """
+
+
+class RhuiUpgradeFiles(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "upgrade_files"
+ type_ = fields.StringMap(fields.String())
+ default = dict()
+ description = """
+ A mapping from source file paths to the destination where should they be
+ placed in the upgrade container.
+
+ Typically, these files should be provided by leapp-rhui-<PROVIDER> packages.
+
+ These files are needed to facilitate access to target repositories. Typical examples are: repofile(s),
+ certificates and keys.
+ """
+
+
+class RhuiTargetRepositoriesToUse(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "rhui_target_repositories_to_use"
+ type_ = fields.List(fields.String())
+ description = """
+ List of target repositories enabled during the upgrade. Similar to executing leapp with --enablerepo.
+
+ The repositories to be enabled need to be either in the repofiles listed in the `upgrade_files` field,
+ or in repofiles present on the source system.
+ """
+ default = list()
+
+
+all_rhui_cfg = (
+ RhuiTargetPkgs,
+ RhuiUpgradeFiles,
+ RhuiTargetRepositoriesToUse,
+ RhuiCloudProvider,
+ RhuiCloudVariant,
+ RhuiSourcePkgs,
+ RhuiUseConfig
+)
--
2.47.0

View File

@ -1,43 +0,0 @@
From d99c059cb0eae4d720a2d48fb39acf6e93bc0b0e Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Wed, 13 Aug 2025 13:19:58 +0200
Subject: [PATCH 35/55] pylint: enable redundant-u-string-prefix
Jira: RHELMISC-16038
---
.pylintrc | 1 -
.../common/actors/rootscanner/tests/test_rootscanner.py | 6 +++---
2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index bc051513..7d938715 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -52,7 +52,6 @@ disable=
raise-missing-from, # no 'raise from' in python 2
use-a-generator, # cannot be modified because of Python2 support
consider-using-f-string, # sorry, not gonna happen, still have to support py2
- redundant-u-string-prefix, # still have py2 to support
logging-format-interpolation,
logging-not-lazy,
use-yield-from # yield from cannot be used until we require python 3.3 or greater
diff --git a/repos/system_upgrade/common/actors/rootscanner/tests/test_rootscanner.py b/repos/system_upgrade/common/actors/rootscanner/tests/test_rootscanner.py
index 659a3017..07ce5da8 100644
--- a/repos/system_upgrade/common/actors/rootscanner/tests/test_rootscanner.py
+++ b/repos/system_upgrade/common/actors/rootscanner/tests/test_rootscanner.py
@@ -9,9 +9,9 @@ from leapp.libraries.actor.rootscanner import scan_dir
@pytest.mark.parametrize("filename,symlink,count_invalid",
- [(u'a_utf_file'.encode('utf-8'), u"utf8_symlink".encode('utf-8'), 0),
- (u'простофайл'.encode('koi8-r'), u"этонеутф8".encode('koi8-r'), 2),
- (u'a_utf_file'.encode('utf-8'), u"этонеутф8".encode('koi8-r'), 1)])
+ [('a_utf_file'.encode('utf-8'), "utf8_symlink".encode('utf-8'), 0),
+ ('простофайл'.encode('koi8-r'), "этонеутф8".encode('koi8-r'), 2),
+ ('a_utf_file'.encode('utf-8'), "этонеутф8".encode('koi8-r'), 1)])
def test_invalid_symlinks(filename, symlink, count_invalid):
# Let's create a directory with both valid utf-8 and non-utf symlinks
# NOTE(ivasilev) As this has to run for python2 as well can't use the nice tempfile.TemporaryDirectory way
--
2.51.1

View File

@ -0,0 +1,457 @@
From a03e8e5d10c1d6f3cdae216fafa0d7f0d0896494 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 10 Nov 2024 14:36:07 +0100
Subject: [PATCH 36/40] check_rhui: read RHUI configuration
Extend the check_rhui actor to read user-provided RHUI configuration.
If the provided configuration values say that the user wants to
overrwrite leapp's decisions, then the patch checks whether all values
are provided. If so, corresponding RHUIInfo message is produced. The
only implemented safe-guards are those that prevent the user from
accidentaly specifying a non-existing file to be copied into the
scrach container during us preparing to download target userspace
content. If the user provides only some of the configuration values
the upgrade is terminated early with an error, providing quick feedback
about misconfiguration. The patch has been designed to allow development
of upgrades on previously unknown clouds (clouds without an entry in
RHUI_SETUPS).
Jira ref: RHEL-56251
---
.../common/actors/cloud/checkrhui/actor.py | 4 +
.../cloud/checkrhui/libraries/checkrhui.py | 102 +++++++++-
.../tests/component_test_checkrhui.py | 178 ++++++++++++++++--
3 files changed, 265 insertions(+), 19 deletions(-)
diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py b/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py
index 593e73e5..933ffcb3 100644
--- a/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py
+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py
@@ -1,4 +1,5 @@
from leapp.actors import Actor
+from leapp.configs.common.rhui import all_rhui_cfg
from leapp.libraries.actor import checkrhui as checkrhui_lib
from leapp.models import (
CopyFile,
@@ -8,6 +9,7 @@ from leapp.models import (
RequiredTargetUserspacePackages,
RHUIInfo,
RpmTransactionTasks,
+ TargetRepositories,
TargetUserSpacePreupgradeTasks
)
from leapp.reporting import Report
@@ -21,6 +23,7 @@ class CheckRHUI(Actor):
"""
name = 'checkrhui'
+ config_schemas = all_rhui_cfg
consumes = (InstalledRPM,)
produces = (
KernelCmdlineArg,
@@ -28,6 +31,7 @@ class CheckRHUI(Actor):
RequiredTargetUserspacePackages,
Report, DNFPluginTask,
RpmTransactionTasks,
+ TargetRepositories,
TargetUserSpacePreupgradeTasks,
CopyFile,
)
diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py
index 3b217917..64e36e08 100644
--- a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py
+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py
@@ -2,17 +2,29 @@ import itertools
import os
from collections import namedtuple
+import leapp.configs.common.rhui as rhui_config_lib
from leapp import reporting
+from leapp.configs.common.rhui import ( # Import all config fields so we are not using their name attributes directly
+ RhuiCloudProvider,
+ RhuiCloudVariant,
+ RhuiSourcePkgs,
+ RhuiTargetPkgs,
+ RhuiTargetRepositoriesToUse,
+ RhuiUpgradeFiles,
+ RhuiUseConfig
+)
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.common import rhsm, rhui
from leapp.libraries.common.config import version
from leapp.libraries.stdlib import api
from leapp.models import (
CopyFile,
+ CustomTargetRepository,
DNFPluginTask,
InstalledRPM,
RHUIInfo,
RpmTransactionTasks,
+ TargetRepositories,
TargetRHUIPostInstallTasks,
TargetRHUIPreInstallTasks,
TargetRHUISetupInfo,
@@ -291,11 +303,11 @@ def produce_rhui_info_to_setup_target(rhui_family, source_setup_desc, target_set
api.produce(rhui_info)
-def produce_rpms_to_install_into_target(source_setup, target_setup):
- to_install = sorted(target_setup.clients - source_setup.clients)
- to_remove = sorted(source_setup.clients - target_setup.clients)
+def produce_rpms_to_install_into_target(source_clients, target_clients):
+ to_install = sorted(target_clients - source_clients)
+ to_remove = sorted(source_clients - target_clients)
- api.produce(TargetUserSpacePreupgradeTasks(install_rpms=sorted(target_setup.clients)))
+ api.produce(TargetUserSpacePreupgradeTasks(install_rpms=sorted(target_clients)))
if to_install or to_remove:
api.produce(RpmTransactionTasks(to_install=to_install, to_remove=to_remove))
@@ -316,7 +328,85 @@ def inform_about_upgrade_with_rhui_without_no_rhsm():
return False
+def emit_rhui_setup_tasks_based_on_config(rhui_config_dict):
+ config_upgrade_files = rhui_config_dict[RhuiUpgradeFiles.name]
+
+ nonexisting_files_to_copy = []
+ for source_path in config_upgrade_files:
+ if not os.path.exists(source_path):
+ nonexisting_files_to_copy.append(source_path)
+
+ if nonexisting_files_to_copy:
+ details_lines = ['The following files were not found:']
+ # Use .format and put backticks around paths so that weird unicode spaces will be easily seen
+ details_lines.extend(' - `{0}`'.format(path) for path in nonexisting_files_to_copy)
+ details = '\n'.join(details_lines)
+
+ reason = 'RHUI config lists nonexisting files in its `{0}` field.'.format(RhuiUpgradeFiles.name)
+ raise StopActorExecutionError(reason, details={'details': details})
+
+ files_to_copy_into_overlay = [CopyFile(src=key, dst=value) for key, value in config_upgrade_files.items()]
+ preinstall_tasks = TargetRHUIPreInstallTasks(files_to_copy_into_overlay=files_to_copy_into_overlay)
+
+ target_client_setup_info = TargetRHUISetupInfo(
+ preinstall_tasks=preinstall_tasks,
+ postinstall_tasks=TargetRHUIPostInstallTasks(),
+ bootstrap_target_client=False, # We don't need to install the client into overlay - user provided all files
+ )
+
+ rhui_info = RHUIInfo(
+ provider=rhui_config_dict[RhuiCloudProvider.name],
+ variant=rhui_config_dict[RhuiCloudVariant.name],
+ src_client_pkg_names=rhui_config_dict[RhuiSourcePkgs.name],
+ target_client_pkg_names=rhui_config_dict[RhuiTargetPkgs.name],
+ target_client_setup_info=target_client_setup_info
+ )
+ api.produce(rhui_info)
+
+
+def request_configured_repos_to_be_enabled(rhui_config):
+ config_repos_to_enable = rhui_config[RhuiTargetRepositoriesToUse.name]
+ custom_repos = [CustomTargetRepository(repoid=repoid) for repoid in config_repos_to_enable]
+ if custom_repos:
+ target_repos = TargetRepositories(custom_repos=custom_repos, rhel_repos=[])
+ api.produce(target_repos)
+
+
+def stop_with_err_if_config_missing_fields(config):
+ required_fields = [
+ RhuiTargetRepositoriesToUse,
+ RhuiCloudProvider,
+ # RhuiCloudVariant, <- this is not required
+ RhuiSourcePkgs,
+ RhuiTargetPkgs,
+ RhuiUpgradeFiles,
+ ]
+
+ missing_fields = tuple(field for field in required_fields if not config[field.name])
+ if missing_fields:
+ field_names = (field.name for field in missing_fields)
+ missing_fields_str = ', '.join(field_names)
+ details = 'The following required RHUI config fields are missing or they are set to an empty value: {}'
+ details = details.format(missing_fields_str)
+ raise StopActorExecutionError('Provided RHUI config is missing values for required fields.',
+ details={'details': details})
+
+
def process():
+ rhui_config = api.current_actor().config[rhui_config_lib.RHUI_CONFIG_SECTION]
+
+ if rhui_config[RhuiUseConfig.name]:
+ api.current_logger().info('Skipping RHUI upgrade auto-configuration - using provided config instead.')
+ stop_with_err_if_config_missing_fields(rhui_config)
+ emit_rhui_setup_tasks_based_on_config(rhui_config)
+
+ src_clients = set(rhui_config[RhuiSourcePkgs.name])
+ target_clients = set(rhui_config[RhuiTargetPkgs.name])
+ produce_rpms_to_install_into_target(src_clients, target_clients)
+
+ request_configured_repos_to_be_enabled(rhui_config)
+ return
+
installed_rpm = itertools.chain(*[installed_rpm_msg.items for installed_rpm_msg in api.consume(InstalledRPM)])
installed_pkgs = {rpm.name for rpm in installed_rpm}
@@ -342,7 +432,9 @@ def process():
# Instruction on how to access the target content
produce_rhui_info_to_setup_target(src_rhui_setup.family, src_rhui_setup.description, target_setup_desc)
- produce_rpms_to_install_into_target(src_rhui_setup.description, target_setup_desc)
+ source_clients = src_rhui_setup.description.clients
+ target_clients = target_setup_desc.clients
+ produce_rpms_to_install_into_target(source_clients, target_clients)
if src_rhui_setup.family.provider == rhui.RHUIProvider.AWS:
# We have to disable Amazon-id plugin in the initramdisk phase as there is no network
diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
index 27e70eea..3ac9c1b8 100644
--- a/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
@@ -1,30 +1,43 @@
-from collections import namedtuple
+import itertools
+import os
+from collections import defaultdict
from enum import Enum
import pytest
from leapp import reporting
+from leapp.configs.common.rhui import (
+ all_rhui_cfg,
+ RhuiCloudProvider,
+ RhuiCloudVariant,
+ RhuiSourcePkgs,
+ RhuiTargetPkgs,
+ RhuiTargetRepositoriesToUse,
+ RhuiUpgradeFiles,
+ RhuiUseConfig
+)
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import checkrhui as checkrhui_lib
from leapp.libraries.common import rhsm, rhui
-from leapp.libraries.common.config import mock_configs, version
from leapp.libraries.common.rhui import mk_rhui_setup, RHUIFamily
-from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked
+from leapp.libraries.common.testutils import (
+ _make_default_config,
+ create_report_mocked,
+ CurrentActorMocked,
+ produce_mocked
+)
from leapp.libraries.stdlib import api
from leapp.models import (
- CopyFile,
InstalledRPM,
- RequiredTargetUserspacePackages,
RHUIInfo,
RPM,
RpmTransactionTasks,
+ TargetRepositories,
TargetRHUIPostInstallTasks,
TargetRHUIPreInstallTasks,
TargetRHUISetupInfo,
TargetUserSpacePreupgradeTasks
)
-from leapp.reporting import Report
-from leapp.snactor.fixture import current_actor_context
RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
@@ -95,7 +108,8 @@ def mk_cloud_map(variants):
]
)
def test_determine_rhui_src_variant(monkeypatch, extra_pkgs, rhui_setups, expected_result):
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.9'))
+ actor = CurrentActorMocked(src_ver='7.9', config=_make_default_config(all_rhui_cfg))
+ monkeypatch.setattr(api, 'current_actor', actor)
installed_pkgs = {'zip', 'zsh', 'bash', 'grubby'}.union(set(extra_pkgs))
if expected_result and not isinstance(expected_result, RHUIFamily): # An exception
@@ -167,7 +181,8 @@ def test_google_specific_customization(provider, should_mutate):
)
def test_aws_specific_customization(monkeypatch, rhui_family, target_major, should_mutate):
dst_ver = '{major}.0'.format(major=target_major)
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(dst_ver=dst_ver))
+ actor = CurrentActorMocked(dst_ver=dst_ver, config=_make_default_config(all_rhui_cfg))
+ monkeypatch.setattr(api, 'current_actor', actor)
setup_info = mk_setup_info()
checkrhui_lib.customize_rhui_setup_for_aws(rhui_family, setup_info)
@@ -215,12 +230,12 @@ def produce_rhui_info_to_setup_target(monkeypatch):
def test_produce_rpms_to_install_into_target(monkeypatch):
- source_rhui_setup = mk_rhui_setup(clients={'src_pkg'}, leapp_pkg='leapp_pkg')
- target_rhui_setup = mk_rhui_setup(clients={'target_pkg'}, leapp_pkg='leapp_pkg')
+ source_clients = {'src_pkg'}
+ target_clients = {'target_pkg'}
monkeypatch.setattr(api, 'produce', produce_mocked())
- checkrhui_lib.produce_rpms_to_install_into_target(source_rhui_setup, target_rhui_setup)
+ checkrhui_lib.produce_rpms_to_install_into_target(source_clients, target_clients)
assert len(api.produce.model_instances) == 2
userspace_tasks, target_rpm_tasks = api.produce.model_instances[0], api.produce.model_instances[1]
@@ -276,7 +291,8 @@ def test_process(monkeypatch, extra_installed_pkgs, skip_rhsm, expected_action):
installed_rpms = InstalledRPM(items=installed_pkgs)
monkeypatch.setattr(api, 'produce', produce_mocked())
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms]))
+ actor = CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms], config=_make_default_config(all_rhui_cfg))
+ monkeypatch.setattr(api, 'current_actor', actor)
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: skip_rhsm)
monkeypatch.setattr(rhui, 'RHUI_SETUPS', known_setups)
@@ -315,7 +331,8 @@ def test_unknown_target_rhui_setup(monkeypatch, is_target_setup_known):
installed_rpms = InstalledRPM(items=installed_pkgs)
monkeypatch.setattr(api, 'produce', produce_mocked())
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms]))
+ actor = CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms], config=_make_default_config(all_rhui_cfg))
+ monkeypatch.setattr(api, 'current_actor', actor)
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: True)
monkeypatch.setattr(rhui, 'RHUI_SETUPS', known_setups)
@@ -374,3 +391,136 @@ def test_select_chronologically_closest(monkeypatch, setups, desired_minor, expe
setup = setups[0]
assert setup == expected_setup
+
+
+def test_config_overwrites_everything(monkeypatch):
+ rhui_config = {
+ RhuiUseConfig.name: True,
+ RhuiSourcePkgs.name: ['client_source'],
+ RhuiTargetPkgs.name: ['client_target'],
+ RhuiCloudProvider.name: 'aws',
+ RhuiUpgradeFiles.name: {
+ '/root/file.repo': '/etc/yum.repos.d/'
+ },
+ RhuiTargetRepositoriesToUse.name: [
+ 'repoid_to_use'
+ ]
+ }
+ all_config = {'rhui': rhui_config}
+
+ actor = CurrentActorMocked(config=all_config)
+ monkeypatch.setattr(api, 'current_actor', actor)
+
+ function_calls = defaultdict(int)
+
+ def mk_function_probe(fn_name):
+ def probe(*args, **kwargs):
+ function_calls[fn_name] += 1
+ return probe
+
+ monkeypatch.setattr(checkrhui_lib,
+ 'emit_rhui_setup_tasks_based_on_config',
+ mk_function_probe('emit_rhui_setup_tasks_based_on_config'))
+ monkeypatch.setattr(checkrhui_lib,
+ 'stop_with_err_if_config_missing_fields',
+ mk_function_probe('stop_with_err_if_config_missing_fields'))
+ monkeypatch.setattr(checkrhui_lib,
+ 'produce_rpms_to_install_into_target',
+ mk_function_probe('produce_rpms_to_install_into_target'))
+ monkeypatch.setattr(checkrhui_lib,
+ 'request_configured_repos_to_be_enabled',
+ mk_function_probe('request_configured_repos_to_be_enabled'))
+
+ checkrhui_lib.process()
+
+ expected_function_calls = {
+ 'emit_rhui_setup_tasks_based_on_config': 1,
+ 'stop_with_err_if_config_missing_fields': 1,
+ 'produce_rpms_to_install_into_target': 1,
+ 'request_configured_repos_to_be_enabled': 1,
+ }
+
+ assert function_calls == expected_function_calls
+
+
+def test_request_configured_repos_to_be_enabled(monkeypatch):
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+
+ rhui_config = {
+ RhuiUseConfig.name: True,
+ RhuiSourcePkgs.name: ['client_source'],
+ RhuiTargetPkgs.name: ['client_target'],
+ RhuiCloudProvider.name: 'aws',
+ RhuiUpgradeFiles.name: {
+ '/root/file.repo': '/etc/yum.repos.d/'
+ },
+ RhuiTargetRepositoriesToUse.name: [
+ 'repoid1',
+ 'repoid2',
+ 'repoid3',
+ ]
+ }
+
+ checkrhui_lib.request_configured_repos_to_be_enabled(rhui_config)
+
+ assert api.produce.called
+ assert len(api.produce.model_instances) == 1
+
+ target_repos = api.produce.model_instances[0]
+ assert isinstance(target_repos, TargetRepositories)
+ assert not target_repos.rhel_repos
+
+ custom_repoids = sorted(custom_repo_model.repoid for custom_repo_model in target_repos.custom_repos)
+ assert custom_repoids == ['repoid1', 'repoid2', 'repoid3']
+
+
+@pytest.mark.parametrize(
+ ('upgrade_files', 'existing_files'),
+ (
+ (['/root/a', '/root/b'], ['/root/a', '/root/b']),
+ (['/root/a', '/root/b'], ['/root/b']),
+ (['/root/a', '/root/b'], []),
+ )
+)
+def test_missing_files_in_config(monkeypatch, upgrade_files, existing_files):
+ upgrade_files_map = dict((source_path, '/tmp/dummy') for source_path in upgrade_files)
+
+ rhui_config = {
+ RhuiUseConfig.name: True,
+ RhuiSourcePkgs.name: ['client_source'],
+ RhuiTargetPkgs.name: ['client_target'],
+ RhuiCloudProvider.name: 'aws',
+ RhuiCloudVariant.name: 'ordinary',
+ RhuiUpgradeFiles.name: upgrade_files_map,
+ RhuiTargetRepositoriesToUse.name: [
+ 'repoid_to_use'
+ ]
+ }
+
+ monkeypatch.setattr(os.path, 'exists', lambda path: path in existing_files)
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+
+ should_error = (len(upgrade_files) != len(existing_files))
+ if should_error:
+ with pytest.raises(StopActorExecutionError):
+ checkrhui_lib.emit_rhui_setup_tasks_based_on_config(rhui_config)
+ else:
+ checkrhui_lib.emit_rhui_setup_tasks_based_on_config(rhui_config)
+ assert api.produce.called
+ assert len(api.produce.model_instances) == 1
+
+ rhui_info = api.produce.model_instances[0]
+ assert isinstance(rhui_info, RHUIInfo)
+ assert rhui_info.provider == 'aws'
+ assert rhui_info.variant == 'ordinary'
+ assert rhui_info.src_client_pkg_names == ['client_source']
+ assert rhui_info.target_client_pkg_names == ['client_target']
+
+ setup_info = rhui_info.target_client_setup_info
+ assert not setup_info.bootstrap_target_client
+
+ _copies_to_perform = setup_info.preinstall_tasks.files_to_copy_into_overlay
+ copies_to_perform = sorted((copy.src, copy.dst) for copy in _copies_to_perform)
+ expected_copies = sorted(zip(upgrade_files, itertools.repeat('/tmp/dummy')))
+
+ assert copies_to_perform == expected_copies
--
2.47.0

View File

@ -1,110 +0,0 @@
From 078ba51a5851e388abe1357a552b981cba1acca9 Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Wed, 13 Aug 2025 13:43:04 +0200
Subject: [PATCH 36/55] pylint: enable logging-not-lazy
Jira: RHELMISC-16038
---
.pylintrc | 1 -
.../rpmtransactionconfigtaskscollector.py | 5 +++--
.../storagescanner/libraries/storagescanner.py | 8 ++++++--
.../common/libraries/persistentnetnames.py | 2 +-
.../libraries/multipathconfread.py | 14 +++++++++-----
5 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index 7d938715..f7f4b25d 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -53,7 +53,6 @@ disable=
use-a-generator, # cannot be modified because of Python2 support
consider-using-f-string, # sorry, not gonna happen, still have to support py2
logging-format-interpolation,
- logging-not-lazy,
use-yield-from # yield from cannot be used until we require python 3.3 or greater
[FORMAT]
diff --git a/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py b/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py
index 43ac1fc4..84895f83 100644
--- a/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py
+++ b/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py
@@ -29,8 +29,9 @@ def load_tasks(base_dir, logger):
filtered = set(to_install) - set(to_install_filtered)
if filtered:
api.current_logger().debug(
- 'The following packages from "to_install" file will be ignored as they are already installed:'
- '\n- ' + '\n- '.join(filtered))
+ 'The following packages from "to_install" file will be ignored as they are already installed:\n- %s',
+ '\n- '.join(filtered)
+ )
return RpmTransactionTasks(
to_install=to_install_filtered,
diff --git a/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py b/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py
index cae38731..e2d869da 100644
--- a/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py
+++ b/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py
@@ -35,7 +35,7 @@ def _is_file_readable(path):
def _get_cmd_output(cmd, delim, expected_len):
""" Verify if command exists and return output """
if not any(os.access(os.path.join(path, cmd[0]), os.X_OK) for path in os.environ['PATH'].split(os.pathsep)):
- api.current_logger().warning("'%s': command not found" % cmd[0])
+ api.current_logger().warning("'%s': command not found", cmd[0])
return
try:
@@ -45,7 +45,11 @@ def _get_cmd_output(cmd, delim, expected_len):
output = subprocess.check_output(cmd, env={'LVM_SUPPRESS_FD_WARNINGS': '1', 'PATH': os.environ['PATH']})
except subprocess.CalledProcessError as e:
- api.current_logger().debug("Command '%s' return non-zero exit status: %s" % (" ".join(cmd), e.returncode))
+ api.current_logger().debug(
+ "Command '%s' returned non-zero exit status: %s",
+ " ".join(cmd),
+ e.returncode
+ )
return
if bytes is not str:
diff --git a/repos/system_upgrade/common/libraries/persistentnetnames.py b/repos/system_upgrade/common/libraries/persistentnetnames.py
index 8769712c..7fdf7eaa 100644
--- a/repos/system_upgrade/common/libraries/persistentnetnames.py
+++ b/repos/system_upgrade/common/libraries/persistentnetnames.py
@@ -50,7 +50,7 @@ def interfaces():
except Exception as e: # pylint: disable=broad-except
# FIXME(msekleta): We should probably handle errors more granularly
# Maybe we should inhibit upgrade process at this point
- api.current_logger().warning('Failed to gather information about network interface: ' + str(e))
+ api.current_logger().warning('Failed to gather information about network interface: %s', e)
continue
yield Interface(**attrs)
diff --git a/repos/system_upgrade/el8toel9/actors/multipathconfread/libraries/multipathconfread.py b/repos/system_upgrade/el8toel9/actors/multipathconfread/libraries/multipathconfread.py
index e5b3f06c..5b1cef50 100644
--- a/repos/system_upgrade/el8toel9/actors/multipathconfread/libraries/multipathconfread.py
+++ b/repos/system_upgrade/el8toel9/actors/multipathconfread/libraries/multipathconfread.py
@@ -68,12 +68,16 @@ def _parse_config_dir(config_dir):
res.append(conf)
except OSError as e:
if e.errno == errno.ENOENT:
- api.current_logger().debug('Multipath conf directory ' +
- '"{}" doesn\'t exist'.format(config_dir))
+ api.current_logger().debug(
+ 'Multipath conf directory "%s" doesn\'t exist',
+ config_dir
+ )
else:
- api.current_logger().warning('Failed to read multipath config ' +
- 'directory ' +
- '"{}": {}'.format(config_dir, e))
+ api.current_logger().warning(
+ 'Failed to read multipath config directory "%s": %s',
+ config_dir,
+ e
+ )
return res
--
2.51.1

View File

@ -1,87 +0,0 @@
From 1104e25977b728a7059fc1ef4613ef55d1e0a9d7 Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Wed, 13 Aug 2025 13:51:34 +0200
Subject: [PATCH 37/55] pylint: enable use-yield-from
Jira: RHELMISC-16038
---
.pylintrc | 3 +--
.../mount_units_generator/tests/test_mount_unit_generation.py | 3 +--
.../actors/storagescanner/tests/unit_test_storagescanner.py | 3 +--
.../tests/unit_test_targetuserspacecreator.py | 3 +--
repos/system_upgrade/common/libraries/config/version.py | 3 +--
5 files changed, 5 insertions(+), 10 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index f7f4b25d..15a69461 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -52,8 +52,7 @@ disable=
raise-missing-from, # no 'raise from' in python 2
use-a-generator, # cannot be modified because of Python2 support
consider-using-f-string, # sorry, not gonna happen, still have to support py2
- logging-format-interpolation,
- use-yield-from # yield from cannot be used until we require python 3.3 or greater
+ logging-format-interpolation
[FORMAT]
# Maximum number of characters on a single line.
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
index b814f6ce..9d75a31d 100644
--- a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
@@ -196,8 +196,7 @@ def test_copy_units_mixed_content(monkeypatch):
('/source/dir', ['local-fs.target.requires'], ['unit1.mount', 'unit2.mount']),
('/source/dir/local-fs.target.requires', [], ['unit1.mount', 'unit2.mount']),
]
- for i in tuples_to_yield:
- yield i
+ yield from tuples_to_yield
def mock_isdir(path):
return 'local-fs.target.requires' in path
diff --git a/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py b/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py
index 456e40ec..3c7fcbd6 100644
--- a/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py
+++ b/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py
@@ -268,8 +268,7 @@ def test_get_lsblk_info(monkeypatch):
'crypt', '', '/dev/nvme0n1p1'],
['/dev/nvme0n1p1', '259:1', '0', str(39 * bytes_per_gb), '0', 'part', '', '/dev/nvme0n1'],
]
- for output_line_parts in output_lines_split_on_whitespace:
- yield output_line_parts
+ yield from output_lines_split_on_whitespace
elif len(cmd) == 5 and cmd[:4] == ['lsblk', '-nr', '--output', 'NAME,KNAME,SIZE']:
# We cannot have the output in a list, since the command is called per device. Therefore, we have to map
# each device path to its output.
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
index 2ae194d7..e78c3ac7 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
@@ -95,8 +95,7 @@ def traverse_structure(structure, root=Path('/')):
filepath = root / filename
if isinstance(links_to, dict):
- for pair in traverse_structure(links_to, filepath):
- yield pair
+ yield from traverse_structure(links_to, root=filepath)
else:
yield (filepath, links_to)
diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py
index 00ce3ec8..5efa932d 100644
--- a/repos/system_upgrade/common/libraries/config/version.py
+++ b/repos/system_upgrade/common/libraries/config/version.py
@@ -106,8 +106,7 @@ class _SupportedVersionsDict(dict):
def __iter__(self):
self._feed_supported_versions()
- for d in self.data:
- yield d
+ yield from self.data
def __repr__(self):
self._feed_supported_versions()
--
2.51.1

View File

@ -0,0 +1,53 @@
From a206a7f02c68f50ab50c9f547669d3a4178c4bd2 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Wed, 16 Oct 2024 17:38:36 +0200
Subject: [PATCH 37/40] testutils: add support for configs
Extend the CurrentActorMocked class to accept a `config` value,
allowing developers to mock actors that rely on configuration.
A library function `_make_default_config` is also introduced,
allowing to instantiate default configs from config schemas.
---
repos/system_upgrade/common/libraries/testutils.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/libraries/testutils.py b/repos/system_upgrade/common/libraries/testutils.py
index c538af1a..afeb360a 100644
--- a/repos/system_upgrade/common/libraries/testutils.py
+++ b/repos/system_upgrade/common/libraries/testutils.py
@@ -4,6 +4,7 @@ import os
from collections import namedtuple
from leapp import reporting
+from leapp.actors.config import _normalize_config, normalize_schemas
from leapp.libraries.common.config import architecture
from leapp.models import EnvVar
from leapp.utils.deprecation import deprecated
@@ -67,9 +68,15 @@ class logger_mocked(object):
return self
+def _make_default_config(actor_config_schema):
+ """ Make a config dict populated with default values. """
+ merged_schema = normalize_schemas((actor_config_schema, ))
+ return _normalize_config({}, merged_schema) # Will fill default values during normalization
+
+
class CurrentActorMocked(object): # pylint:disable=R0904
def __init__(self, arch=architecture.ARCH_X86_64, envars=None, kernel='3.10.0-957.43.1.el7.x86_64',
- release_id='rhel', src_ver='7.8', dst_ver='8.1', msgs=None, flavour='default'):
+ release_id='rhel', src_ver='7.8', dst_ver='8.1', msgs=None, flavour='default', config=None):
envarsList = [EnvVar(name=k, value=v) for k, v in envars.items()] if envars else []
version = namedtuple('Version', ['source', 'target'])(src_ver, dst_ver)
release = namedtuple('OS_release', ['release_id', 'version_id'])(release_id, src_ver)
@@ -82,6 +89,7 @@ class CurrentActorMocked(object): # pylint:disable=R0904
'configuration', ['architecture', 'kernel', 'leapp_env_vars', 'os_release', 'version', 'flavour']
)(arch, kernel, envarsList, release, version, flavour)
self._msgs = msgs or []
+ self.config = {} if config is None else config
def __call__(self):
return self
--
2.47.0

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,70 @@
From 0147bc268607e5931ebca95e3253087ec71a3c66 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 20 Oct 2024 16:08:49 +0200
Subject: [PATCH 38/40] userspacegen(rhui): remove repofiles only if now owned
by an RPM
We copy files into the target userspace when setting up target
repository content. If this file is named equally as some of the
files installed by the target RHUI client installed during early
phases of target userspace setup process, we would delete it in
cleanup. Therefore, if we copy a repofile named /etc/yum.repos.d/X.repo
and the target client also owns a file /etc/yum.repos.d/X.repo, we
would remove it, making the container loose access to target content.
This patch prevents us from blindly deleting files, keeping files that
are owned by some RPM (usually that would be the target RHUI client).
---
.../libraries/userspacegen.py | 30 ++++++++++++++-----
1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index d7698056..12736ab7 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -1120,6 +1120,27 @@ def _get_target_userspace():
return constants.TARGET_USERSPACE.format(get_target_major_version())
+def _remove_injected_repofiles_from_our_rhui_packages(target_userspace_ctx, rhui_setup_info):
+ target_userspace_path = _get_target_userspace()
+ for copy in rhui_setup_info.preinstall_tasks.files_to_copy_into_overlay:
+ dst_in_container = get_copy_location_from_copy_in_task(target_userspace_path, copy)
+ dst_in_container = dst_in_container.strip('/')
+ dst_in_host = os.path.join(target_userspace_path, dst_in_container)
+
+ if os.path.isfile(dst_in_host) and dst_in_host.endswith('.repo'):
+ # The repofile might have been replaced by a new one provided by the RHUI client if names collide
+ # Performance: Do the query here and not earlier, because we would be running rpm needlessly
+ try:
+ path_with_root = '/' + dst_in_container
+ target_userspace_ctx.call(['rpm', '-q', '--whatprovides', path_with_root])
+ api.current_logger().debug('Repofile {0} kept as it is owned by some RPM.'.format(dst_in_host))
+ except CalledProcessError:
+ # rpm exists with 1 if the file is not owned by any RPM. We might be catching all kinds of other
+ # problems here, but still better than always removing repofiles.
+ api.current_logger().debug('Removing repofile - not owned by any RPM: {0}'.format(dst_in_host))
+ os.remove(dst_in_host)
+
+
def _create_target_userspace(context, indata, packages, files, target_repoids):
"""Create the target userspace."""
target_path = _get_target_userspace()
@@ -1139,14 +1160,7 @@ def _create_target_userspace(context, indata, packages, files, target_repoids):
)
setup_info = indata.rhui_info.target_client_setup_info
if not setup_info.bootstrap_target_client:
- target_userspace_path = _get_target_userspace()
- for copy in setup_info.preinstall_tasks.files_to_copy_into_overlay:
- dst_in_container = get_copy_location_from_copy_in_task(target_userspace_path, copy)
- dst_in_container = dst_in_container.strip('/')
- dst_in_host = os.path.join(target_userspace_path, dst_in_container)
- if os.path.isfile(dst_in_host) and dst_in_host.endswith('.repo'):
- api.current_logger().debug('Removing repofile: {0}'.format(dst_in_host))
- os.remove(dst_in_host)
+ _remove_injected_repofiles_from_our_rhui_packages(context, setup_info)
# and do not forget to set the rhsm into the container mode again
with mounting.NspawnActions(_get_target_userspace()) as target_context:
--
2.47.0

View File

@ -0,0 +1,857 @@
From c2f2895bb570a75eb2aaa7b84a2bcd9dcd537b0e Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Thu, 14 Nov 2024 14:24:15 +0100
Subject: [PATCH 39/40] Enable IPU for EL 9.6 (and drop EL 8.8/9.2)
* Add product certificates for RHEL 9.6
* Introduce upgrade path 8.10 -> 9.6
* Drop IPUs related to EL 8.8 and 9.2
* This will not be supported in this release.
* Keeping for now still IPU 8.10 -> 9.5 as it is a fresh release
so it has a value for us to run tests there. We will drop it
later during this lifecycle (CTC-2?).
* Drop EL 8.8 from the list of supported versions
* Update tests in packit
* Note that tests for 9.6 could be failing for a while until
composes are created.
jira: RHEL-67621
---
.packit.yaml | 257 +++++-------------
.../common/files/prod-certs/9.6/279.pem | 37 +++
.../common/files/prod-certs/9.6/362.pem | 37 +++
.../common/files/prod-certs/9.6/363.pem | 37 +++
.../common/files/prod-certs/9.6/419.pem | 36 +++
.../common/files/prod-certs/9.6/433.pem | 37 +++
.../common/files/prod-certs/9.6/479.pem | 36 +++
.../common/files/prod-certs/9.6/486.pem | 37 +++
.../common/files/prod-certs/9.6/72.pem | 36 +++
.../common/files/upgrade_paths.json | 18 +-
.../common/libraries/config/version.py | 2 +-
11 files changed, 363 insertions(+), 207 deletions(-)
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/279.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/362.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/363.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/419.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/433.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/479.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/486.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/72.pem
diff --git a/.packit.yaml b/.packit.yaml
index fbfd0eea..48c3cbbb 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -145,104 +145,6 @@ jobs:
# ######################### Individual tests ########################### #
# ###################################################################### #
-# Tests: 7.9 -> 8.8
-- &sanity-79to88-aws
- <<: *sanity-abstract-7to8-aws
- trigger: pull_request
- identifier: sanity-7.9to8.8-aws
- tf_extra_params:
- test:
- tmt:
- plan_filter: 'tag:7to8 & tag:upgrade_happy_path & enabled:true'
- environments:
- - tmt:
- context:
- distro: "rhel-7.9"
- distro_target: "rhel-8.8"
- settings:
- provisioning:
- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
- SOURCE_RELEASE: "7.9"
- TARGET_RELEASE: "8.8"
- RHUI: "aws"
- LEAPPDATA_BRANCH: "upstream"
- LEAPP_NO_RHSM: "1"
- USE_CUSTOM_REPOS: rhui
-
-- &sanity-79to88
- <<: *sanity-abstract-7to8
- trigger: pull_request
- identifier: sanity-7.9to8.8
- tf_extra_params:
- test:
- tmt:
- plan_filter: 'tag:7to8 & tag:sanity & enabled:true'
- environments:
- - tmt:
- context:
- distro: "rhel-7.9"
- distro_target: "rhel-8.8"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
- SOURCE_RELEASE: "7.9"
- TARGET_RELEASE: "8.8"
-
-- &beaker-minimal-79to88
- <<: *beaker-minimal-7to8-abstract-ondemand
- trigger: pull_request
- labels:
- - beaker-minimal
- - beaker-minimal-7.9to8.8
- - 7.9to8.8
- identifier: sanity-7.9to8.8-beaker-minimal-ondemand
- tf_extra_params:
- test:
- tmt:
- plan_filter: 'tag:7to8 & tag:partitioning & enabled:true'
- environments:
- - tmt:
- context:
- distro: "rhel-7.9"
- distro_target: "rhel-8.8"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
- SOURCE_RELEASE: "7.9"
- TARGET_RELEASE: "8.8"
-
-- &kernel-rt-79to88
- <<: *kernel-rt-abstract-7to8-ondemand
- trigger: pull_request
- labels:
- - kernel-rt
- - kernel-rt-7.9to8.8
- - 7.9to8.8
- identifier: sanity-7.9to8.8-kernel-rt-ondemand
- tf_extra_params:
- test:
- tmt:
- plan_filter: 'tag:7to8 & tag:kernel-rt & enabled:true'
- environments:
- - tmt:
- context:
- distro: "rhel-7.9"
- distro_target: "rhel-8.8"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
- SOURCE_RELEASE: "7.9"
- TARGET_RELEASE: "8.8"
-
# Tests: 7.9 -> 8.10
- &sanity-79to810
<<: *sanity-abstract-7to8
@@ -397,14 +299,11 @@ jobs:
# ######################### Individual tests ########################### #
# ###################################################################### #
-# Tests: 8.8 -> 9.2
-- &sanity-88to92
+# Tests: 8.10 -> 9.4
+- &sanity-810to94
<<: *sanity-abstract-8to9
trigger: pull_request
- targets:
- epel-8-x86_64:
- distros: [RHEL-8.8.0-Nightly]
- identifier: sanity-8.8to9.2
+ identifier: sanity-8.10to9.4
tf_extra_params:
test:
tmt:
@@ -412,108 +311,74 @@ jobs:
environments:
- tmt:
context:
- distro: "rhel-8.8"
- distro_target: "rhel-9.2"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
- SOURCE_RELEASE: "8.8"
- TARGET_RELEASE: "9.2"
- RHSM_REPOS_EUS: "eus"
-
-- &sanity-88to92-aws
- <<: *sanity-abstract-8to9-aws
- trigger: pull_request
- targets:
- epel-8-x86_64:
- distros: [RHEL-8.8-rhui]
- identifier: sanity-8.8to9.2-aws
- tf_extra_params:
- test:
- tmt:
- plan_filter: 'tag:8to9 & tag:rhui-tier[0] & enabled:true'
- environments:
- - tmt:
- context:
- distro: "rhel-8.8"
- distro_target: "rhel-9.2"
+ distro: "rhel-8.10"
+ distro_target: "rhel-9.4"
settings:
provisioning:
- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
- SOURCE_RELEASE: "8.8"
- TARGET_RELEASE: "9.2"
- RHUI_HYPERSCALER: aws
+ SOURCE_RELEASE: "8.10"
+ TARGET_RELEASE: "9.4"
-- &beaker-minimal-88to92
+# On-demand minimal beaker tests
+- &beaker-minimal-810to94
<<: *beaker-minimal-8to9-abstract-ondemand
trigger: pull_request
labels:
- beaker-minimal
- - beaker-minimal-8.8to9.2
- - 8.8to9.2
- targets:
- epel-8-x86_64:
- distros: [RHEL-8.8.0-Nightly]
- identifier: sanity-8.8to9.2-beaker-minimal-ondemand
+ - beaker-minimal-8.10to9.4
+ - 8.10to9.4
+ identifier: sanity-8.10to9.4-beaker-minimal-ondemand
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 &tag:partitioning & enabled:true'
+ plan_filter: 'tag:8to9 & tag:partitioning & enabled:true'
environments:
- tmt:
context:
- distro: "rhel-8.8"
- distro_target: "rhel-9.2"
+ distro: "rhel-8.10"
+ distro_target: "rhel-9.4"
settings:
provisioning:
- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
- SOURCE_RELEASE: "8.8"
- TARGET_RELEASE: "9.2"
- RHSM_REPOS_EUS: "eus"
+ SOURCE_RELEASE: "8.10"
+ TARGET_RELEASE: "9.4"
-- &kernel-rt-88to92
+# On-demand kernel-rt tests
+- &kernel-rt-810to94
<<: *kernel-rt-abstract-8to9-ondemand
trigger: pull_request
labels:
- kernel-rt
- - kernel-rt-8.8to9.2
- - 8.8to9.2
- identifier: sanity-8.8to9.2-kernel-rt-ondemand
- targets:
- epel-8-x86_64:
- distros: [RHEL-8.8.0-Nightly]
+ - kernel-rt-8.10to9.4
+ - 8.10to9.4
+ identifier: sanity-8.10to9.4-kernel-rt-ondemand
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true'
+ plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true'
environments:
- tmt:
context:
- distro: "rhel-8.8"
- distro_target: "rhel-9.2"
+ distro: "rhel-8.10"
+ distro_target: "rhel-9.4"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
- SOURCE_RELEASE: "8.8"
- TARGET_RELEASE: "9.2"
- RHSM_REPOS_EUS: "eus"
+ SOURCE_RELEASE: "8.10"
+ TARGET_RELEASE: "9.4"
-# Tests: 8.10 -> 9.4
-- &sanity-810to94
+# Tests: 8.10 -> 9.5
+- &sanity-810to95
<<: *sanity-abstract-8to9
trigger: pull_request
- identifier: sanity-8.10to9.4
+ identifier: sanity-8.10to9.5
tf_extra_params:
test:
tmt:
@@ -522,24 +387,24 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.4"
+ distro_target: "rhel-9.5"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.4"
+ TARGET_RELEASE: "9.5"
# On-demand minimal beaker tests
-- &beaker-minimal-810to94
+- &beaker-minimal-810to95
<<: *beaker-minimal-8to9-abstract-ondemand
trigger: pull_request
labels:
- beaker-minimal
- - beaker-minimal-8.10to9.4
- - 8.10to9.4
- identifier: sanity-8.10to9.4-beaker-minimal-ondemand
+ - beaker-minimal-8.10to9.5
+ - 8.10to9.5
+ identifier: sanity-8.10to9.5-beaker-minimal-ondemand
tf_extra_params:
test:
tmt:
@@ -548,24 +413,24 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.4"
+ distro_target: "rhel-9.5"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.4"
+ TARGET_RELEASE: "9.5"
# On-demand kernel-rt tests
-- &kernel-rt-810to94
+- &kernel-rt-810to95
<<: *kernel-rt-abstract-8to9-ondemand
trigger: pull_request
labels:
- kernel-rt
- - kernel-rt-8.10to9.4
- - 8.10to9.4
- identifier: sanity-8.10to9.4-kernel-rt-ondemand
+ - kernel-rt-8.10to9.5
+ - 8.10to9.5
+ identifier: sanity-8.10to9.5-kernel-rt-ondemand
tf_extra_params:
test:
tmt:
@@ -574,21 +439,21 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.4"
+ distro_target: "rhel-9.5"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.4"
+ TARGET_RELEASE: "9.5"
-# Tests: 8.10 -> 9.5
-- &sanity-810to95
+# Tests: 8.10 -> 9.6
+- &sanity-810to96
<<: *sanity-abstract-8to9
trigger: pull_request
- identifier: sanity-8.10to9.5
+ identifier: sanity-8.10to9.6
tf_extra_params:
test:
tmt:
@@ -597,24 +462,24 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.5"
+ distro_target: "rhel-9.6"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.5"
+ TARGET_RELEASE: "9.6"
# On-demand minimal beaker tests
-- &beaker-minimal-810to95
+- &beaker-minimal-810to96
<<: *beaker-minimal-8to9-abstract-ondemand
trigger: pull_request
labels:
- beaker-minimal
- - beaker-minimal-8.10to9.5
- - 8.10to9.5
- identifier: sanity-8.10to9.5-beaker-minimal-ondemand
+ - beaker-minimal-8.10to9.6
+ - 8.10to9.6
+ identifier: sanity-8.10to9.6-beaker-minimal-ondemand
tf_extra_params:
test:
tmt:
@@ -623,24 +488,24 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.5"
+ distro_target: "rhel-9.6"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.5"
+ TARGET_RELEASE: "9.6"
# On-demand kernel-rt tests
-- &kernel-rt-810to95
+- &kernel-rt-810to96
<<: *kernel-rt-abstract-8to9-ondemand
trigger: pull_request
labels:
- kernel-rt
- - kernel-rt-8.10to9.5
- - 8.10to9.5
- identifier: sanity-8.10to9.5-kernel-rt-ondemand
+ - kernel-rt-8.10to9.6
+ - 8.10to9.6
+ identifier: sanity-8.10to9.6-kernel-rt-ondemand
tf_extra_params:
test:
tmt:
@@ -649,11 +514,11 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.5"
+ distro_target: "rhel-9.6"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.5"
+ TARGET_RELEASE: "9.6"
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/279.pem b/repos/system_upgrade/common/files/prod-certs/9.6/279.pem
new file mode 100644
index 00000000..a9ef267b
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/279.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGZTCCBE2gAwIBAgIJALDxRLt/tVEiMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIwNVoXDTQ0MDgx
+MjE5MDIwNVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs1MzUwNTA4
+OC05ZTk5LTQyODItYmE4OS1mMjhhNjAwZWNhZWFdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB7jCB6zAJBgNVHRMEAjAAMEMGDCsGAQQBkggJAYIXAQQzDDFSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuMBUGDCsG
+AQQBkggJAYIXAgQFDAM5LjYwGQYMKwYBBAGSCAkBghcDBAkMB3BwYzY0bGUwJwYM
+KwYBBAGSCAkBghcEBBcMFXJoZWwtOSxyaGVsLTktcHBjNjRsZTAdBgNVHQ4EFgQU
+YeogtTV8r2dkOv9rCOYQeNDNH5UwHwYDVR0jBBgwFoAUlv27HEBA/0CErbIfCybB
+w2pv1nwwDQYJKoZIhvcNAQELBQADggIBACRmyYbMhmuV+w4E+Hlonlt0mooB6EF6
+h/xknuBRw/saSL+7sLfbItaxWH5euxDc/5XvII2t0Jjl+GDnAjI75xrTuN3gT88Z
+9wd1kvDVqt46GI6VKVH1SujJoJpGenfhTVwenATZwdq260RgYgM3Zv1d3I4Lu/GY
+65T//j0/8tBmgqMc6BRvIrDa1wtVUbEwH3b/jwZoeitps1hKIH9yKZV79HZ7WVdb
+otDtsAk7VKZGRjGdvYsfWZrjmyyyc5wX2AemzpnhSm1kkGvOAjSMsJ0QcrSu/5vj
+AAK64J1tDA93WKsAqDnK7tUOx6qwICllbgVmKWl/02JH8ELs/sJnsWBEigfdZmTh
+/3Q8DPNni7scYkJ5Vs0tL8pke29C1bgAYjoBiQgf/ffNunTOWgdkdFHbd9I3+aLh
+pO7qqkndEkl85xkQJrZWO35NvPD4NAwnsDrIP0oJg5mYNTB11C5SlHhllT/Iu374
+8afWtoHaB50vsqM2dtvh/UsCyGynWYc93TLsU6a4gBl19D7VAx0fiOwdD+CyweUp
+xcos6MIIuFAFUIVpD+/3w499Lw9m5dcfApl6HCyQgAoafXCJjM/aXsSsSWr2d9TF
+c6S/uA2egh4fUv8zYnlZKUvCTu8kn4Lv439wW0xyIEB/sD/gXk9e8H9KkUuKDExx
+yTSjzqnPM82N
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/362.pem b/repos/system_upgrade/common/files/prod-certs/9.6/362.pem
new file mode 100644
index 00000000..d7c1a6be
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/362.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGdDCCBFygAwIBAgIJALDxRLt/tVE4MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIyNFoXDTQ0MDgx
+MjE5MDIyNFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs2MjBmNzkx
+MC0xNDk5LTRmMzAtYTk3NS1hYWFiOGQyMWE1NmNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB/TCB+jAJBgNVHRMEAjAAMEgGDCsGAQQBkggJAYJqAQQ4DDZSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuIEJldGEw
+GgYMKwYBBAGSCAkBgmoCBAoMCDkuNiBCZXRhMBkGDCsGAQQBkggJAYJqAwQJDAdw
+cGM2NGxlMCwGDCsGAQQBkggJAYJqBAQcDBpyaGVsLTkscmhlbC05LWJldGEtcHBj
+NjRsZTAdBgNVHQ4EFgQUYeogtTV8r2dkOv9rCOYQeNDNH5UwHwYDVR0jBBgwFoAU
+lv27HEBA/0CErbIfCybBw2pv1nwwDQYJKoZIhvcNAQELBQADggIBADzaMxiligMU
+vVUxGdlKgceVXcZR6TC/nACDxyRFm7JGKYC0Ncxum2RWQ10mMD1HM1xa0NVz3BLO
+a+VrZ3MGTpKuWQgN0TKIzjykxxfJMip8AVYx6UvQ4SxxZWFIVPuC0XYfYc2pOV5A
+OcO63O+R7QVvLpZ3q7tX3uAXCfWWvJkoJ+MzKCl3lEmeKAcaikcums+aOd/JwTSo
+bt5ExLgC4J1cvevH+IBCUbmN1r+xrkHNiNWjys0MIo1JsPmi1A1kDeORXPN4xXvH
+x69z9SuHrUd2iFXpMfezqZsmiaa/FP6UOKwpDyEqZGE+/aT/RBza9BeYX74vDpFI
+h0vMtx3lHE+PGh7a6kfXV2GL4IP7w5MbdZQIJ/ZS4oT/zG3E2wRnGD4+oQ3Bm/TV
+Or0IHnafxXYXgsQ6bsMsZN7BRZ8VfaEdM3IVRqVyPVWzo0kYkHZcnVQpabmCWPjc
+NUwMJDni3LfjxKreHLDQBEkwX4XoZnSq/xMHO6ppe0sZ2XgAOsw/B92ekTTEdoKZ
+dEQBkqv2FRUbMoILnNVWJp4yGMOPcTl7hrlcJjKRvKs1hKWkQKN6g4YDHCglkVfH
+ltDGkolsUYFvoygoi8VCCDfz7whn6pXmzlpk1VkzE+V1R88Tf5ygrSNWETOZMU/B
+5P07jdNriEBCZaCPq7T8odOt1cKZpVdg
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/363.pem b/repos/system_upgrade/common/files/prod-certs/9.6/363.pem
new file mode 100644
index 00000000..f75b478d
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/363.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGZjCCBE6gAwIBAgIJALDxRLt/tVE3MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIyNFoXDTQ0MDgx
+MjE5MDIyNFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs1MDE5NmU1
+ZC1lNDgzLTQwNDAtYjcwYS03NDg5NDliZTRjZmFdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB7zCB7DAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYJrAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NCBCZXRhMBoGDCsGAQQBkggJAYJr
+AgQKDAg5LjYgQmV0YTAZBgwrBgEEAZIICQGCawMECQwHYWFyY2g2NDAsBgwrBgEE
+AZIICQGCawQEHAwacmhlbC05LHJoZWwtOS1iZXRhLWFhcmNoNjQwHQYDVR0OBBYE
+FGHqILU1fK9nZDr/awjmEHjQzR+VMB8GA1UdIwQYMBaAFJb9uxxAQP9AhK2yHwsm
+wcNqb9Z8MA0GCSqGSIb3DQEBCwUAA4ICAQBiaXwTsDt1Kz79ZJ3TnNDuX3IntXuS
+DxIGAhpgJ+ynaSULh8xL6pq5L6EtYnVzpO6T+j2ADbJlLkIRV0fMD6MMZo4YQtHH
+NofoNgJoYI4uXcCKYS2vIUw+0Br7qx8BPTb5jP+VRl9LU8W299nYOTp+vY7GQ0Ny
+hT66G+FJfo5CqHZpMTGgJbpjoP3DMpXZcARBnjQ0LhvjvcalGmPP4//tcPNwft6r
+ei8fxBvpmCXDS9/vXwiEf6jEidqq1Q6bCdL20Y1ZPY13oUEYFqrf8PhexlV1yoD4
+F4gEbVHPQ4yvH3D6xIAFE4959+H+dgMfXqn9gkUvnTMdyfzcUYGLTAib3zb4eW/J
+anzwfBAcssBzjU1v/txWMRlZI1GJFNtboAixnRksj1epE848J3bjtiw3R/Z5grFn
+dieJwjfM4AEDrpRmA5tDnv5z73k1djJbacL7fTIyTuSnDbjH2J5PtCAvWTLYq/kP
+h8E3sJ9zXP2nJMBRgQiZJY98bPKLT63ngRScI+CZs1fLvaoCq0o+qkcfnDEja3aH
+TQYXHVZblA4TYnD8Vh8gKwCt8+1WF5C9BGcMmKvozuuIaIJgT21V+DLzfTESpZz7
+lcPKk/3dBFtFGOdA4SQ4o/dxItJ0Eay1SlOI9xL9KgTNqv6ftA+9kxZ0MSPwO7eG
+b5Am4gNTK734uQ==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/419.pem b/repos/system_upgrade/common/files/prod-certs/9.6/419.pem
new file mode 100644
index 00000000..e2d3ee5b
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/419.pem
@@ -0,0 +1,36 @@
+-----BEGIN CERTIFICATE-----
+MIIGVzCCBD+gAwIBAgIJALDxRLt/tVEhMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIwNVoXDTQ0MDgx
+MjE5MDIwNVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtkN2E2ZDhi
+Mi0yZjMzLTRhYzMtYmM5Ni1mMjU5MTNmZTQxNWNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB4DCB3TAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYMjAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NDAVBgwrBgEEAZIICQGDIwIEBQwD
+OS42MBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCcGDCsGAQQBkggJAYMjBAQX
+DBVyaGVsLTkscmhlbC05LWFhcmNoNjQwHQYDVR0OBBYEFGHqILU1fK9nZDr/awjm
+EHjQzR+VMB8GA1UdIwQYMBaAFJb9uxxAQP9AhK2yHwsmwcNqb9Z8MA0GCSqGSIb3
+DQEBCwUAA4ICAQCJqWcTJezGVGxsNvFkbsrvbHhJBuBMeDZZuOLaXaQVyfNwYRS2
+2k/oUhhQQMfiDiaLkz7yz0Zw5clC/K5G6Sg9+nWDA57lsZuNV5CnSBYOJf2jY2fK
+ue/1M75Y4fJAKtBxpvkFaIaKyMQ/0VC67OFYtbBZEOuwIpQh9aPFHnrh2WnpcUvJ
+B93O0fsRjHK30E7jF8ncNmhevMLvVlxH0JjfbvcU3dGG964K41tFiozshvnAGFce
+kFzxVVYQL3ZKycqonwFr3BbzgKwx5EXUFBg/ax694aijeeVA6yuQXWJvV42IjUeW
+vn+dvRrHh2fv4MXuyc+oljbXaEZE7m9gtWBtUEBHqWoQz6rQ25uZylnK+SDWE5bt
+xM+1qGUSf90VvyFO3fu1qeVVr0LbnMAgO9YnJjLRQax0mgj3tZTRvM72W4hfBy36
+ndYnJE2le5xYWVl1Hd29dil70cokj5hN8nQI9eStfcOvs9Vw2ngIL/H3+QTRS/NO
+l7MHQXbriLAaHavED6B50dEfw8pQXybEju4Rs+nDgm5hdE7FjbVflVQejSjyHIMd
+AQnwrDSMPRezCJFHQeB0t7oaHpAHECc2zBpvcvy7qCN2Z08h6jdzfrp15UDkHEcy
+Qa9dtYRUthI3pjGGu7WTPwX9y0veot3EZRnEzeIprIsHcMKfmkMg4HRJ3A==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/433.pem b/repos/system_upgrade/common/files/prod-certs/9.6/433.pem
new file mode 100644
index 00000000..ac588c1c
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/433.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGaTCCBFGgAwIBAgIJALDxRLt/tVE5MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIyNFoXDTQ0MDgx
+MjE5MDIyNFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs4NzcwNzQ4
+MS02MGEwLTQwYTUtYWVhMi0xNjNmODUyMzI3ZTFdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB8jCB7zAJBgNVHRMEAjAAMEEGDCsGAQQBkggJAYMxAQQxDC9SZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIElCTSB6IFN5c3RlbXMgQmV0YTAaBgwrBgEE
+AZIICQGDMQIECgwIOS42IEJldGEwFwYMKwYBBAGSCAkBgzEDBAcMBXMzOTB4MCoG
+DCsGAQQBkggJAYMxBAQaDBhyaGVsLTkscmhlbC05LWJldGEtczM5MHgwHQYDVR0O
+BBYEFGHqILU1fK9nZDr/awjmEHjQzR+VMB8GA1UdIwQYMBaAFJb9uxxAQP9AhK2y
+HwsmwcNqb9Z8MA0GCSqGSIb3DQEBCwUAA4ICAQC57eNKMpTQuIEEoYXkhD0oYkgD
+RzDFyqKZgyK0IdOy6t0d9GcMY/nI/uYQltUC+HWBUJWYkHc84xjfP3ITfuHWP8KP
+3qdXLPwTDcNVUGtLgXIfEz4FEM4OVwfM2X0jIcLfkDmZzffWjHgBpAUfZM6fBvXl
+soPJ+s4/vIUFNbVtcJh9iw4glt/GFBOX/bNPV9kniAAYuyabW43X7GxfREJY18Db
++Fv7c+z2eM4fQFpLkSEZwsNN68G4OHDC7tWsYtCRocipWGs6lN5MBNXC0q90ds5O
+kOLRfHKOLFqbZnBNdgSOlsf+ENH3exUhoDvZE0gnAVALABVv6PCtsHn2rPLonsrB
+l9ZKqCVVDpQMDXmZC79XKB0nVrNQ7qYorCVnYqnTAkuvw4BuXpKASaSCDSRWLQN0
+H89phUM64VnyPD5pBTw+YJURDm8cwD5e6HaXhKzG1ca9PWL+RVxedB4Rl2VG00fE
+QUBbHZktH+H1P3MtqALB7IUav4IuBgdF27W55GExCgshRuyV6/VHmYiD+L52XxCH
+71mdWTp6JR1/hMYKPLhc5/ESBoMpqMXa4UWIOtMWiafWaDS4Cib+uyIIzCgqW8ee
+t+yQtCs7MUUd6t87XP7/TTQJe6e0JsyDnME9br0E4g57Y8cXjOILGg/ihqBFOGt1
+vhbX7w/YRjVpwJhi9w==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/479.pem b/repos/system_upgrade/common/files/prod-certs/9.6/479.pem
new file mode 100644
index 00000000..c2bac3ee
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/479.pem
@@ -0,0 +1,36 @@
+-----BEGIN CERTIFICATE-----
+MIIGVTCCBD2gAwIBAgIJALDxRLt/tVEkMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIwNVoXDTQ0MDgx
+MjE5MDIwNVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtjZWRlZTRi
+My0xOGFhLTQwMzMtYjE3OS01OTkwMjk2OGFkZjhdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB3jCB2zAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD
+OS42MBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM
+FHJoZWwtOSxyaGVsLTkteDg2XzY0MB0GA1UdDgQWBBRh6iC1NXyvZ2Q6/2sI5hB4
+0M0flTAfBgNVHSMEGDAWgBSW/bscQED/QIStsh8LJsHDam/WfDANBgkqhkiG9w0B
+AQsFAAOCAgEADoQWjROe9jPuYIB5cW7URXgDPVK3cpGnlKxEINdXT+dL7N2qNijy
+BcV0+SCHmswZ+F7OTozyGzGbJCrSHZrvF2lp2L8YddvkIFsWqrPkseU/0/oog5Qf
+ULA5WzV12u0Ra/DWinhUq6NZWLAt/FvJ7+WHPdJ7B0WsiA751l7crvfKfen93Xzb
+0eakHrotcPi9YH/Jez8xjs4Wc3ra/7CbLqpsHuWzgzwJabiuLaf5PK95VVedzQIx
+lT+N6JydFIkXkofQJwTptPTh9lDbZDe33/dg5zX3l9CAQK7JYZKYoUzLirM2LO7s
+TGejW1mKGB+O23bQBGRkLoD4kbY17UMCFcKD7mZSO6laoOBOk8NYUxTDjT4e3cUB
+dHw5YVrj+BSHzgOGpc1KrmuBiOWZrZf4iaFuz4Kr88tL6TT6IH5UmfP3fuvvMyXs
+OWqTAfr/CPeJjLhjmbEagkS0kpgkyXodY8sq2Ph5vpn0o1QYNfy6KRtD/m6YaF7G
+SDkWEY5li338SROIFV6X8lKEzHMfQZzhqQWoJWQlFuAdByKrxz8x1RJZTkIT82h6
+uM/GO3v5xT5UXXa2x1X0JtS9rPGdnmAKQLJJz07s+2WCRqCFuBxJsV+aWCRLsab4
+jpo1NG0RH0KorjvBBMLx8bVSbl4YFJdOcomlRVrsC2iMUwl+PH5Ah4g=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/486.pem b/repos/system_upgrade/common/files/prod-certs/9.6/486.pem
new file mode 100644
index 00000000..e130d5dc
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/486.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGZDCCBEygAwIBAgIJALDxRLt/tVE6MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIyNFoXDTQ0MDgx
+MjE5MDIyNFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsyMTM1ODk1
+Yi1mMDRiLTRlNjUtOWYzMC04MmRlYmQ0Njc0NjNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB7TCB6jAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYNmAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NCBCZXRhMBoGDCsGAQQBkggJAYNm
+AgQKDAg5LjYgQmV0YTAYBgwrBgEEAZIICQGDZgMECAwGeDg2XzY0MCsGDCsGAQQB
+kggJAYNmBAQbDBlyaGVsLTkscmhlbC05LWJldGEteDg2XzY0MB0GA1UdDgQWBBRh
+6iC1NXyvZ2Q6/2sI5hB40M0flTAfBgNVHSMEGDAWgBSW/bscQED/QIStsh8LJsHD
+am/WfDANBgkqhkiG9w0BAQsFAAOCAgEAHhaEBX5fhB2zweFT0SuLB3OB11aE3Tjy
+q0dNxm8t3d5glgtratmAkPD+6Ct0gkdoGJ8GcBsFVzzM2ig236YOy8dCPVWBzLtd
+Oni5DpjSqnMX6yq4PuSViF1w+9pCKPJqzQK/u/F0njkwdu0mAwc1fkiCR0B6oB7s
+m1rHhuyC4PkAj5RYQ6+M4MpGfce0HSpUCzlnAlHYgjvmT3qCUvlEYLPg4/Z+wihZ
+1xdhhhoLNi43IdfmFQlTSNZqTwLB780qzHzi+UYgWg7wflTn8m1LAOlad5HWJFnE
+y6JnX+c+vfzvxFBSZABKJsZY/YKIAV14g42XL8zhIpJHtdYnUaveo1M90UAvSECP
+RAnPUIKWM1VYKfa2PpEC2/157KOQ4y7BUrAUlqs1qh8FoGCZYHMRmgYqHoycIvw+
+gs1gH77O9EyOMMjwyQqBUnzylJfhjkEgINDIGbPEiQpI33TBniw5yMRZ74XWOoi3
+rOIiaYxHBDpJ25LwbZsJOQUPmIKBTOpLK9N4IK7UvA7O8HCEEJz2+VLVf2svaoU1
+fd7MUYh9aCjEocKRQknxScJLVBXcFRy0I+tfVQwkcLqWCOrp3qpNmYwhC+C0vYtR
+/LZ58vf60+m+mKUmEJWF6X7QGFZptsc0ERme6sE1E41iNAIq3BsBMU/hQIVP50k4
+T3KefQomWk4=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/72.pem b/repos/system_upgrade/common/files/prod-certs/9.6/72.pem
new file mode 100644
index 00000000..35927fbc
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/72.pem
@@ -0,0 +1,36 @@
+-----BEGIN CERTIFICATE-----
+MIIGVjCCBD6gAwIBAgIJALDxRLt/tVEjMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIwNVoXDTQ0MDgx
+MjE5MDIwNVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs0ZDhmOTky
+Yy04NDBjLTQ4MzYtODVkOS0zYWI5YjA1ZjViY2FdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB3zCB3DAJBgNVHRMEAjAAMDsGCysGAQQBkggJAUgBBCwMKlJlZCBIYXQg
+RW50ZXJwcmlzZSBMaW51eCBmb3IgSUJNIHogU3lzdGVtczAUBgsrBgEEAZIICQFI
+AgQFDAM5LjYwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJAYLKwYBBAGSCAkBSAQE
+FQwTcmhlbC05LHJoZWwtOS1zMzkweDAdBgNVHQ4EFgQUYeogtTV8r2dkOv9rCOYQ
+eNDNH5UwHwYDVR0jBBgwFoAUlv27HEBA/0CErbIfCybBw2pv1nwwDQYJKoZIhvcN
+AQELBQADggIBANOzzfUKjlJsgJWUryjKfzPYISkCZXauHqBcST4N1HP1GA8tmMXi
+bgh14+l7ZO8EloFvEGANsX2ffMfauuJx2NV6ks07NHWuM7W9kghDe5ZccrJCz88E
+1zdvyWae5oSvTwfnvR/b63duOhs88u7NCQN2+n+pmJA0dPWbGTaIp3n4kJg8YKnd
+O8Nct2doNS+1rrLpRmVKQy/E7fAXQzt1Bxqs2hORqbgffiSE9a+4akitY97GXRBm
+nOO2DkyEW0xPtdy3zDvL7o7b1B0gdMOwqEolgGuDFsrfD+7ofpwOWjS+83gF6hMP
+5YVD3sugu6xzCx6y7Yl/BfX4qvvT4YHtYob5rQA/t7JY4u4ryadkUxQLMEccMsyS
+pKZQ8KFC5ZNJVK/ievkcBCsBlulbRftVJGF3TA2Hl2aBuMhGdUR5y/Q89WHUzeV6
+U6AVzyEsvIJguswvKvFAyHwNuViCfFCkjNkJolvd/g03OSy1A7piQaU20QyltWmx
+FILCR/DBUbCWIzKTfkLr93TbV2b1AH9uRW1SAGrftuevVXrNemWIwq1x/VgjDm3o
+nk637pnEfZZzX8T2gO5z5yjlP0PR4s7hKkmp3TmAeG9015pFxPnD3AMI261srQ+c
+KZBdIc5UseQo/4KvRKZ1CzxPh0WjJCzc/C/TKzIlEdELq/rnKGuqHKB9
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/upgrade_paths.json b/repos/system_upgrade/common/files/upgrade_paths.json
index 5399f148..cc9dcdb5 100644
--- a/repos/system_upgrade/common/files/upgrade_paths.json
+++ b/repos/system_upgrade/common/files/upgrade_paths.json
@@ -1,19 +1,17 @@
{
"default": {
- "7.9": ["8.8", "8.10"],
- "8.8": ["9.2"],
- "8.10": ["9.4", "9.5"],
+ "7.9": ["8.10"],
+ "8.10": ["9.4", "9.5", "9.6"],
"9.6": ["10.0"],
- "7": ["8.8", "8.10"],
- "8": ["9.2", "9.4", "9.5"],
+ "7": ["8.10"],
+ "8": ["9.4", "9.5", "9.6"],
"9": ["10.0"]
},
"saphana": {
- "7.9": ["8.10", "8.8"],
- "7": ["8.10", "8.8"],
- "8.8": ["9.2"],
- "8.10": ["9.4"],
- "8": ["9.4", "9.2"],
+ "7.9": ["8.10"],
+ "7": ["8.10"],
+ "8.10": ["9.6", "9.4"],
+ "8": ["9.6", "9.4"],
"9.6": ["10.0"],
"9": ["10.0"]
}
diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py
index 152d9112..d710a647 100644
--- a/repos/system_upgrade/common/libraries/config/version.py
+++ b/repos/system_upgrade/common/libraries/config/version.py
@@ -18,7 +18,7 @@ OP_MAP = {
_SUPPORTED_VERSIONS = {
# Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x
'7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9']},
- '8': {'rhel': ['8.8', '8.10'], 'rhel-saphana': ['8.8', '8.10']},
+ '8': {'rhel': ['8.10'], 'rhel-saphana': ['8.10']},
'9': {'rhel': ['9.4', '9.5', '9.6'], 'rhel-saphana': ['9.4', '9.6']},
}
--
2.47.0

View File

@ -1,127 +0,0 @@
From e530472760f0df186531bf3d17323ee082c7fba8 Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Mon, 18 Aug 2025 13:12:24 +0200
Subject: [PATCH 39/55] pylint: enable invalid-envvar-default
Jira: RHELMISC-16038
---
.pylintrc | 1 -
.../removeresumeservice/tests/test_removeresumeservice.py | 2 +-
.../tests/test_scheduleselinuxrelabeling.py | 4 ++--
.../tests/component_test_selinuxapplycustom.py | 2 +-
.../tests/component_test_selinuxcontentscanner.py | 2 +-
.../selinuxprepare/tests/component_test_selinuxprepare.py | 2 +-
.../setpermissiveselinux/tests/test_setpermissiveselinux.py | 4 ++--
7 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index d98ab151..7a373e3d 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -45,7 +45,6 @@ disable=
too-many-positional-arguments, # we cannot set yet max-possitional-arguments unfortunately
# new for python3 version of pylint
unnecessary-pass,
- invalid-envvar-default, # pylint3 warnings envvar returns str/none by default
bad-option-value, # python 2 doesn't have import-outside-toplevel, but in some case we need to import outside toplevel
super-with-arguments, # required in python 2
raise-missing-from, # no 'raise from' in python 2
diff --git a/repos/system_upgrade/common/actors/removeresumeservice/tests/test_removeresumeservice.py b/repos/system_upgrade/common/actors/removeresumeservice/tests/test_removeresumeservice.py
index ea803856..d59ef346 100644
--- a/repos/system_upgrade/common/actors/removeresumeservice/tests/test_removeresumeservice.py
+++ b/repos/system_upgrade/common/actors/removeresumeservice/tests/test_removeresumeservice.py
@@ -11,7 +11,7 @@ import pytest
'under the root user.',
)
# TODO make the test not destructive
-@pytest.mark.skipif(os.getenv("DESTRUCTIVE_TESTING", False) in [False, "0"],
+@pytest.mark.skipif(os.getenv("DESTRUCTIVE_TESTING", "0").lower() in ["false", "0"],
reason='Test disabled by default because it would modify the system')
def test_remove_resume_service(current_actor_context):
service_name = 'leapp_resume.service'
diff --git a/repos/system_upgrade/common/actors/scheduleselinuxrelabeling/tests/test_scheduleselinuxrelabeling.py b/repos/system_upgrade/common/actors/scheduleselinuxrelabeling/tests/test_scheduleselinuxrelabeling.py
index 595b9985..8603bd97 100644
--- a/repos/system_upgrade/common/actors/scheduleselinuxrelabeling/tests/test_scheduleselinuxrelabeling.py
+++ b/repos/system_upgrade/common/actors/scheduleselinuxrelabeling/tests/test_scheduleselinuxrelabeling.py
@@ -9,7 +9,7 @@ from leapp.snactor.fixture import current_actor_context
@pytest.mark.skipif(
- os.getenv("DESTRUCTIVE_TESTING", False) in [False, "0"],
+ os.getenv("DESTRUCTIVE_TESTING", "0").lower() in ["false", "0"],
reason='Test disabled by default because it would modify the system',
)
def test_schedule_no_relabel(current_actor_context):
@@ -19,7 +19,7 @@ def test_schedule_no_relabel(current_actor_context):
@pytest.mark.skipif(
- os.getenv("DESTRUCTIVE_TESTING", False) in [False, "0"],
+ os.getenv("DESTRUCTIVE_TESTING", "0").lower() in ["false", "0"],
reason='Test disabled by default because it would modify the system',
)
def test_schedule_relabel(current_actor_context):
diff --git a/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/tests/component_test_selinuxapplycustom.py b/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/tests/component_test_selinuxapplycustom.py
index 8a4665c1..aab18e58 100644
--- a/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/tests/component_test_selinuxapplycustom.py
+++ b/repos/system_upgrade/common/actors/selinux/selinuxapplycustom/tests/component_test_selinuxapplycustom.py
@@ -72,7 +72,7 @@ def destructive_selinux_env():
"Failed to remove SELinux customizations after testing")
-@pytest.mark.skipif(os.getenv("DESTRUCTIVE_TESTING", False) in [False, "0"],
+@pytest.mark.skipif(os.getenv("DESTRUCTIVE_TESTING", "0").lower() in ["false", "0"],
reason='Test disabled by default because it would modify the system')
def test_SELinuxApplyCustom(current_actor_context, destructive_selinux_teardown):
diff --git a/repos/system_upgrade/common/actors/selinux/selinuxcontentscanner/tests/component_test_selinuxcontentscanner.py b/repos/system_upgrade/common/actors/selinux/selinuxcontentscanner/tests/component_test_selinuxcontentscanner.py
index faa2e1b0..802e038a 100644
--- a/repos/system_upgrade/common/actors/selinux/selinuxcontentscanner/tests/component_test_selinuxcontentscanner.py
+++ b/repos/system_upgrade/common/actors/selinux/selinuxcontentscanner/tests/component_test_selinuxcontentscanner.py
@@ -76,7 +76,7 @@ def find_semanage_rule(rules, rule):
return next((r for r in rules if all(word in r for word in rule)), None)
-@pytest.mark.skipif(os.getenv("DESTRUCTIVE_TESTING", False) in [False, "0"],
+@pytest.mark.skipif(os.getenv("DESTRUCTIVE_TESTING", "false") in ["False", "false", "0"],
reason='Test disabled by default because it would modify the system')
def test_SELinuxContentScanner(current_actor_context, destructive_selinux_env):
diff --git a/repos/system_upgrade/common/actors/selinux/selinuxprepare/tests/component_test_selinuxprepare.py b/repos/system_upgrade/common/actors/selinux/selinuxprepare/tests/component_test_selinuxprepare.py
index bad1baa2..d124675a 100644
--- a/repos/system_upgrade/common/actors/selinux/selinuxprepare/tests/component_test_selinuxprepare.py
+++ b/repos/system_upgrade/common/actors/selinux/selinuxprepare/tests/component_test_selinuxprepare.py
@@ -76,7 +76,7 @@ def destructive_selinux_env():
_run_cmd(semodule_command)
-@pytest.mark.skipif(os.getenv('DESTRUCTIVE_TESTING', False) in [False, '0'],
+@pytest.mark.skipif(os.getenv('DESTRUCTIVE_TESTING', '0').lower() in ['false', '0'],
reason='Test disabled by default because it would modify the system')
def test_SELinuxPrepare(current_actor_context, semodule_lfull_initial, semanage_export_initial,
destructive_selinux_env):
diff --git a/repos/system_upgrade/common/actors/setpermissiveselinux/tests/test_setpermissiveselinux.py b/repos/system_upgrade/common/actors/setpermissiveselinux/tests/test_setpermissiveselinux.py
index efa4e550..9acdf39a 100644
--- a/repos/system_upgrade/common/actors/setpermissiveselinux/tests/test_setpermissiveselinux.py
+++ b/repos/system_upgrade/common/actors/setpermissiveselinux/tests/test_setpermissiveselinux.py
@@ -6,7 +6,7 @@ from leapp.models import SelinuxPermissiveDecision
@pytest.mark.skipif(
- os.getenv("DESTRUCTIVE_TESTING", False) in [False, "0"],
+ os.getenv("DESTRUCTIVE_TESTING", "0").lower() in ["0", "false"],
reason='Test disabled by default because it would modify the system')
def check_permissive_in_conf():
""" Check if we have set permissive in SElinux conf file """
@@ -19,7 +19,7 @@ def check_permissive_in_conf():
@pytest.mark.skipif(
- os.getenv("DESTRUCTIVE_TESTING", False) in [False, "0"],
+ os.getenv("DESTRUCTIVE_TESTING", "false").lower() in ["0", "false"],
reason='Test disabled by default because it would modify the system')
def test_set_selinux_permissive(current_actor_context):
current_actor_context.feed(SelinuxPermissiveDecision(set_permissive=True))
--
2.51.1

View File

@ -1,613 +0,0 @@
From 81b24a657037ceffc3959abb4231a19352ca9a82 Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Mon, 18 Aug 2025 14:42:15 +0200
Subject: [PATCH 40/55] pylint: enable bad-option-value
This pylint warning triggers when you try to disable a pylint check that is unknown or obsolete. Enabling this rule caused such warnings to appear, so the corresponding disables needed to be removed:
* no-absolute-import -> Only relevant for Python 2. Python 3 uses absolute imports by default.
* no-init -> Obsolete: __init__ method checks have changed, this option no longer exists.
* bad-continuation -> Superseded by modern pylint formatting checks.
* no-self-use -> Checks whether a method could be a function.
* relative-import -> Python 3 discourages relative imports differently.
Jira: RHELMISC-16038
---
.pylintrc | 8 +----
commands/upgrade/breadcrumbs.py | 9 +++--
.../checkmemory/libraries/checkmemory.py | 4 +--
.../tests/test_enablerhsmtargetrepos.py | 3 +-
.../tests/test_mount_unit_generation.py | 3 +-
.../libraries/upgradeinitramfsgenerator.py | 2 +-
.../unit_test_upgradeinitramfsgenerator.py | 2 +-
.../tests/test_kernelcmdlineconfig.py | 4 +--
.../opensshpermitrootlogincheck/actor.py | 6 ++--
.../actors/persistentnetnamesdisable/actor.py | 6 ++--
.../libraries/scankernel.py | 2 +-
.../tests/unit_test_targetuserspacecreator.py | 3 +-
.../tests/test_trustedgpgkeys.py | 2 +-
.../common/files/rhel_upgrade.py | 9 +++--
.../common/libraries/dnfplugin.py | 3 +-
repos/system_upgrade/common/libraries/grub.py | 4 +--
.../common/libraries/mounting.py | 3 +-
.../common/libraries/overlaygen.py | 35 ++++++++++++-------
.../common/libraries/tests/test_distro.py | 3 +-
.../common/libraries/tests/test_grub.py | 2 +-
.../common/libraries/tests/test_rhsm.py | 6 ++--
.../common/libraries/testutils.py | 2 +-
.../checkvdo/tests/unit_test_checkvdo.py | 6 ++--
.../actors/nisscanner/libraries/nisscan.py | 6 ++--
.../libraries/opensslconfigcheck.py | 3 +-
25 files changed, 81 insertions(+), 55 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index 7a373e3d..0cba1129 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -9,23 +9,19 @@ disable=
raising-bad-type,
redundant-keyword-arg, # it's one or the other, this one is not so bad at all
# "W" Warnings for stylistic problems or minor programming issues
- no-absolute-import,
arguments-differ,
cell-var-from-loop,
fixme,
lost-exception,
- no-init,
pointless-string-statement,
protected-access,
redefined-outer-name,
- relative-import,
undefined-loop-variable,
unsubscriptable-object,
unused-argument,
unused-import,
unspecified-encoding,
# "C" Coding convention violations
- bad-continuation,
missing-docstring,
wrong-import-order,
use-maxsplit-arg,
@@ -33,7 +29,6 @@ disable=
consider-using-enumerate,
# "R" Refactor recommendations
duplicate-code,
- no-self-use,
too-few-public-methods,
too-many-branches,
too-many-locals,
@@ -42,10 +37,9 @@ disable=
use-list-literal,
use-dict-literal,
too-many-lines, # we do not want to take care about that one
- too-many-positional-arguments, # we cannot set yet max-possitional-arguments unfortunately
+ too-many-positional-arguments,
# new for python3 version of pylint
unnecessary-pass,
- bad-option-value, # python 2 doesn't have import-outside-toplevel, but in some case we need to import outside toplevel
super-with-arguments, # required in python 2
raise-missing-from, # no 'raise from' in python 2
use-a-generator, # cannot be modified because of Python2 support
diff --git a/commands/upgrade/breadcrumbs.py b/commands/upgrade/breadcrumbs.py
index 1a90c143..95a551c3 100644
--- a/commands/upgrade/breadcrumbs.py
+++ b/commands/upgrade/breadcrumbs.py
@@ -80,7 +80,8 @@ class _BreadCrumbs:
# even though it shouldn't though, just ignore it
pass
- def _commit_rhsm_facts(self):
+ @staticmethod
+ def _commit_rhsm_facts():
if runs_in_container():
return
cmd = ['/usr/sbin/subscription-manager', 'facts', '--update']
@@ -122,7 +123,8 @@ class _BreadCrumbs:
except OSError:
sys.stderr.write('WARNING: Could not write to /etc/migration-results\n')
- def _get_packages(self):
+ @staticmethod
+ def _get_packages():
cmd = ['/bin/bash', '-c', 'rpm -qa --queryformat="%{nevra} %{SIGPGP:pgpsig}\n" | grep -Ee "leapp|snactor"']
res = _call(cmd, lambda x, y: None, lambda x, y: None)
if res.get('exit_code', None) == 0:
@@ -131,7 +133,8 @@ class _BreadCrumbs:
for t in [line.strip().split(' ', 1) for line in res['stdout'].split('\n') if line.strip()]]
return []
- def _verify_leapp_pkgs(self):
+ @staticmethod
+ def _verify_leapp_pkgs():
if not os.environ.get('LEAPP_IPU_IN_PROGRESS'):
return []
upg_path = os.environ.get('LEAPP_IPU_IN_PROGRESS').split('to')
diff --git a/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py b/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py
index 808c9662..040b404b 100644
--- a/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py
+++ b/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py
@@ -34,8 +34,8 @@ def process():
if minimum_req_error:
title = 'Minimum memory requirements for RHEL {} are not met'.format(version.get_target_major_version())
summary = 'Memory detected: {} MiB, required: {} MiB'.format(
- int(minimum_req_error['detected'] / 1024), # noqa: W1619; pylint: disable=old-division
- int(minimum_req_error['minimal_req'] / 1024), # noqa: W1619; pylint: disable=old-division
+ int(minimum_req_error['detected'] / 1024),
+ int(minimum_req_error['minimal_req'] / 1024),
)
reporting.create_report([
reporting.Title(title),
diff --git a/repos/system_upgrade/common/actors/enablerhsmtargetrepos/tests/test_enablerhsmtargetrepos.py b/repos/system_upgrade/common/actors/enablerhsmtargetrepos/tests/test_enablerhsmtargetrepos.py
index f7b3f34a..dba38fff 100644
--- a/repos/system_upgrade/common/actors/enablerhsmtargetrepos/tests/test_enablerhsmtargetrepos.py
+++ b/repos/system_upgrade/common/actors/enablerhsmtargetrepos/tests/test_enablerhsmtargetrepos.py
@@ -17,7 +17,8 @@ def not_isolated_actions(raise_err=False):
def __init__(self, base_dir=None):
pass
- def call(self, cmd, **kwargs):
+ @staticmethod
+ def call(cmd, **kwargs):
commands_called.append((cmd, kwargs))
if raise_err:
raise_call_error()
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
index 9d75a31d..8849ada9 100644
--- a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
@@ -249,7 +249,8 @@ def test_copy_units_mixed_content(monkeypatch):
def __init__(self):
self.base_dir = '/container'
- def full_path(self, path):
+ @staticmethod
+ def full_path(path):
return os.path.join('/container', path.lstrip('/'))
mock_container = MockedContainerContext()
diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py
index 3ad92167..f7e4a8af 100644
--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py
+++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/libraries/upgradeinitramfsgenerator.py
@@ -271,7 +271,7 @@ def _get_fspace(path, convert_to_mibs=False, coefficient=1):
coefficient = min(coefficient, 1)
fspace_bytes = int(stat.f_frsize * stat.f_bavail * coefficient)
if convert_to_mibs:
- return int(fspace_bytes / 1024 / 1024) # noqa: W1619; pylint: disable=old-division
+ return int(fspace_bytes / 1024 / 1024)
return fspace_bytes
diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py
index 185cd4f0..b96bf79f 100644
--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py
+++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py
@@ -257,7 +257,7 @@ class MockedGetFspace:
def __call__(self, dummy_path, convert_to_mibs=False):
if not convert_to_mibs:
return self.space
- return int(self.space / 1024 / 1024) # noqa: W1619; pylint: disable=old-division
+ return int(self.space / 1024 / 1024)
@pytest.mark.parametrize('input_msgs,dracut_modules,kernel_modules', [
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py
index b7e51833..5b35bcd3 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py
@@ -15,7 +15,7 @@ from leapp.models import InstalledTargetKernelInfo, KernelCmdlineArg, TargetKern
TARGET_KERNEL_NEVRA = 'kernel-core-1.2.3-4.x86_64.el8.x64_64'
-# pylint: disable=E501
+# pylint: disable=line-too-long
SAMPLE_KERNEL_ARGS = ('ro rootflags=subvol=root'
' resume=/dev/mapper/luks-2c0df999-81ec-4a35-a1f9-b93afee8c6ad'
' rd.luks.uuid=luks-90a6412f-c588-46ca-9118-5aca35943d25'
@@ -31,7 +31,7 @@ title="Fedora Linux (6.5.13-100.fc37.x86_64) 37 (Thirty Seven)"
id="a3018267cdd8451db7c77bb3e5b1403d-6.5.13-100.fc37.x86_64"
""" # noqa: E501
SAMPLE_GRUBBY_INFO_OUTPUT = TEMPLATE_GRUBBY_INFO_OUTPUT.format(SAMPLE_KERNEL_ARGS, SAMPLE_KERNEL_ROOT)
-# pylint: enable=E501
+# pylint: enable=line-too-long
class MockedRun:
diff --git a/repos/system_upgrade/common/actors/opensshpermitrootlogincheck/actor.py b/repos/system_upgrade/common/actors/opensshpermitrootlogincheck/actor.py
index 9c1a421c..98d329ab 100644
--- a/repos/system_upgrade/common/actors/opensshpermitrootlogincheck/actor.py
+++ b/repos/system_upgrade/common/actors/opensshpermitrootlogincheck/actor.py
@@ -55,7 +55,8 @@ class OpenSshPermitRootLoginCheck(Actor):
else:
api.current_logger().warning('Unknown source major version: {}'.format(get_source_major_version()))
- def process7to8(self, config):
+ @staticmethod
+ def process7to8(config):
# when the config was not modified, we can pass this check and let the
# rpm handle the configuration file update
if not config.modified:
@@ -112,7 +113,8 @@ class OpenSshPermitRootLoginCheck(Actor):
reporting.Groups([reporting.Groups.INHIBITOR])
] + COMMON_RESOURCES)
- def process8to9(self, config):
+ @staticmethod
+ def process8to9(config):
# RHEL8 default sshd configuration file is not modified: It will get replaced by rpm and
# root will no longer be able to connect through ssh. This will probably result in many
# false positives so it will have to be waived a lot
diff --git a/repos/system_upgrade/common/actors/persistentnetnamesdisable/actor.py b/repos/system_upgrade/common/actors/persistentnetnamesdisable/actor.py
index 1add3588..b0182982 100644
--- a/repos/system_upgrade/common/actors/persistentnetnamesdisable/actor.py
+++ b/repos/system_upgrade/common/actors/persistentnetnamesdisable/actor.py
@@ -18,7 +18,8 @@ class PersistentNetNamesDisable(Actor):
produces = (KernelCmdlineArg, Report)
tags = (FactsPhaseTag, IPUWorkflowTag)
- def ethX_count(self, interfaces):
+ @staticmethod
+ def ethX_count(interfaces):
ethX = re.compile('eth[0-9]+')
count = 0
@@ -27,7 +28,8 @@ class PersistentNetNamesDisable(Actor):
count = count + 1
return count
- def single_eth0(self, interfaces):
+ @staticmethod
+ def single_eth0(interfaces):
return len(interfaces) == 1 and interfaces[0].name == 'eth0'
def disable_persistent_naming(self):
diff --git a/repos/system_upgrade/common/actors/scaninstalledtargetkernelversion/libraries/scankernel.py b/repos/system_upgrade/common/actors/scaninstalledtargetkernelversion/libraries/scankernel.py
index c1cc69ee..35683cca 100644
--- a/repos/system_upgrade/common/actors/scaninstalledtargetkernelversion/libraries/scankernel.py
+++ b/repos/system_upgrade/common/actors/scaninstalledtargetkernelversion/libraries/scankernel.py
@@ -70,7 +70,7 @@ def get_boot_files_provided_by_kernel_pkg(kernel_nevra):
@suppress_deprecation(InstalledTargetKernelVersion)
def process():
- # pylint: disable=no-else-return - false positive
+ # pylint: disable=no-else-return # false positive
# TODO: should we take care about stuff of kernel-rt and kernel in the same
# time when both are present? or just one? currently, handle only one
# of these during the upgrade. kernel-rt has higher prio when original sys
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
index 1e5b87b0..bb17d89a 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
@@ -50,7 +50,8 @@ class MockedMountingBase:
def __call__(self, **dummy_kwarg):
yield self
- def call(self, *args, **kwargs):
+ @staticmethod
+ def call(*args, **kwargs):
return {'stdout': ''}
def nspawn(self):
diff --git a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/tests/test_trustedgpgkeys.py b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/tests/test_trustedgpgkeys.py
index 7497c2a9..b8229d00 100644
--- a/repos/system_upgrade/common/actors/trustedgpgkeysscanner/tests/test_trustedgpgkeys.py
+++ b/repos/system_upgrade/common/actors/trustedgpgkeysscanner/tests/test_trustedgpgkeys.py
@@ -40,7 +40,7 @@ class MockedGetGpgFromFile:
self._data[fname] = fps
def get_files(self):
- return self._data.keys() # noqa: W1655; pylint: disable=dict-keys-not-iterating
+ return self._data.keys()
def __call__(self, fname):
return self._data.get(fname, [])
diff --git a/repos/system_upgrade/common/files/rhel_upgrade.py b/repos/system_upgrade/common/files/rhel_upgrade.py
index 4f76a61d..a5d7045b 100644
--- a/repos/system_upgrade/common/files/rhel_upgrade.py
+++ b/repos/system_upgrade/common/files/rhel_upgrade.py
@@ -49,7 +49,8 @@ class RhelUpgradeCommand(dnf.cli.Command):
metavar="[%s]" % "|".join(CMDS))
parser.add_argument('filename')
- def _process_entities(self, entities, op, entity_name):
+ @staticmethod
+ def _process_entities(entities, op, entity_name):
"""
Adds list of packages for given operation to the transaction
"""
@@ -73,7 +74,8 @@ class RhelUpgradeCommand(dnf.cli.Command):
with open(self.opts.filename, 'w+') as fo:
json.dump(self.plugin_data, fo, sort_keys=True, indent=2)
- def _read_aws_region(self, repo):
+ @staticmethod
+ def _read_aws_region(repo):
region = None
if repo.baseurl:
# baseurl is tuple (changed by Amazon-id plugin)
@@ -86,7 +88,8 @@ class RhelUpgradeCommand(dnf.cli.Command):
sys.exit(1)
return region
- def _fix_rhui_url(self, repo, region):
+ @staticmethod
+ def _fix_rhui_url(repo, region):
if repo.baseurl:
repo.baseurl = tuple(
url.replace('REGION', region, 1) for url in repo.baseurl
diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py
index 4f0c3a99..1af52dc5 100644
--- a/repos/system_upgrade/common/libraries/dnfplugin.py
+++ b/repos/system_upgrade/common/libraries/dnfplugin.py
@@ -461,9 +461,10 @@ def perform_transaction_install(target_userspace_info, storage_info, used_repos,
@contextlib.contextmanager
def _prepare_perform(used_repos, target_userspace_info, xfs_info, storage_info, target_iso=None):
- # noqa: W0135; pylint: disable=contextmanager-generator-missing-cleanup
+ # noqa: W0135; pylint: disable=bad-option-value,contextmanager-generator-missing-cleanup
# NOTE(pstodulk): the pylint check is not valid in this case - finally is covered
# implicitly
+ # noqa: W0135
reserve_space = overlaygen.get_recommended_leapp_free_space(target_userspace_info.path)
with _prepare_transaction(used_repos=used_repos,
target_userspace_info=target_userspace_info
diff --git a/repos/system_upgrade/common/libraries/grub.py b/repos/system_upgrade/common/libraries/grub.py
index 71432371..77679d01 100644
--- a/repos/system_upgrade/common/libraries/grub.py
+++ b/repos/system_upgrade/common/libraries/grub.py
@@ -34,7 +34,6 @@ class EFIBootLoaderEntry:
"""
Representation of an UEFI boot loader entry.
"""
- # pylint: disable=eq-without-hash
def __init__(self, boot_number, label, active, efi_bin_source):
self.boot_number = boot_number
@@ -163,7 +162,8 @@ class EFIBootInfo:
# it's not expected that no entry exists
raise StopActorExecution('UEFI: Unable to detect any UEFI bootloader entry.')
- def _parse_key_value(self, bootmgr_output, key):
+ @staticmethod
+ def _parse_key_value(bootmgr_output, key):
# e.g.: <key>: <value>
for line in bootmgr_output.splitlines():
if line.startswith(key + ':'):
diff --git a/repos/system_upgrade/common/libraries/mounting.py b/repos/system_upgrade/common/libraries/mounting.py
index 4e99e31e..ae3885cf 100644
--- a/repos/system_upgrade/common/libraries/mounting.py
+++ b/repos/system_upgrade/common/libraries/mounting.py
@@ -66,7 +66,8 @@ class IsolationType:
""" Release the isolation context """
pass
- def make_command(self, cmd):
+ @staticmethod
+ def make_command(cmd):
""" Transform the given command to the isolated environment """
return cmd
diff --git a/repos/system_upgrade/common/libraries/overlaygen.py b/repos/system_upgrade/common/libraries/overlaygen.py
index a048af2b..83dc33b8 100644
--- a/repos/system_upgrade/common/libraries/overlaygen.py
+++ b/repos/system_upgrade/common/libraries/overlaygen.py
@@ -185,7 +185,7 @@ def _get_fspace(path, convert_to_mibs=False, coefficient=1):
coefficient = min(coefficient, 1)
fspace_bytes = int(stat.f_frsize * stat.f_bavail * coefficient)
if convert_to_mibs:
- return int(fspace_bytes / 1024 / 1024) # noqa: W1619; pylint: disable=old-division
+ return int(fspace_bytes / 1024 / 1024)
return fspace_bytes
@@ -325,7 +325,7 @@ def _prepare_required_mounts(scratch_dir, mounts_dir, storage_info, scratch_rese
@contextlib.contextmanager
def _build_overlay_mount(root_mount, mounts):
- # noqa: W0135; pylint: disable=contextmanager-generator-missing-cleanup
+ # noqa: W0135; pylint: disable=bad-option-value,contextmanager-generator-missing-cleanup
# NOTE(pstodulk): the pylint check is not valid in this case - finally is covered
# implicitly
if not root_mount:
@@ -480,8 +480,8 @@ def _create_mount_disk_image(disk_images_directory, path, disk_size):
# NOTE(pstodulk): In case the formatting params are modified,
# the minimal required size could be different
api.current_logger().warning(
- 'The apparent size for the disk image representing {path}'
- ' is too small ({disk_size} MiBs) for a formatting. Setting 130 MiBs instead.'
+ 'The apparent size for the disk image representing {path} '
+ 'is too small ({disk_size} MiBs) for a formatting. Setting 130 MiBs instead.'
.format(path=path, disk_size=disk_size)
)
disk_size = 130
@@ -489,12 +489,11 @@ def _create_mount_disk_image(disk_images_directory, path, disk_size):
cmd = [
'/bin/dd',
'if=/dev/zero', 'of={}'.format(diskimage_path),
- 'bs=1M', 'count=0', 'seek={}'.format(disk_size)
+ 'bs=1M', 'count=0', 'seek={}'.format(disk_size),
]
hint = (
'Please ensure that there is enough diskspace on the partition hosting'
- 'the {} directory.'
- .format(disk_images_directory)
+ 'the {} directory.'.format(disk_images_directory)
)
api.current_logger().debug('Attempting to create disk image at %s', diskimage_path)
@@ -540,7 +539,9 @@ def _create_mounts_dir(scratch_dir, mounts_dir):
utils.makedirs(mounts_dir)
api.current_logger().debug('Done creating mount directories.')
except OSError:
- api.current_logger().error('Failed to create mounting directories %s', mounts_dir, exc_info=True)
+ api.current_logger().error(
+ 'Failed to create mounting directories %s', mounts_dir, exc_info=True
+ )
# This is an attempt for giving the user a chance to resolve it on their own
raise StopActorExecutionError(
@@ -556,17 +557,25 @@ def _mount_dnf_cache(overlay_target):
"""
Convenience context manager to ensure bind mounted /var/cache/dnf and removal of the mount.
"""
- # noqa: W0135; pylint: disable=contextmanager-generator-missing-cleanup
+ # noqa: W0135; pylint: disable=bad-option-value,contextmanager-generator-missing-cleanup
# NOTE(pstodulk): the pylint check is not valid in this case - finally is covered
# implicitly
with mounting.BindMount(
- source='/var/cache/dnf',
- target=os.path.join(overlay_target, 'var', 'cache', 'dnf')) as cache_mount:
+ source='/var/cache/dnf',
+ target=os.path.join(overlay_target, 'var', 'cache', 'dnf'),
+ ) as cache_mount:
yield cache_mount
@contextlib.contextmanager
-def create_source_overlay(mounts_dir, scratch_dir, xfs_info, storage_info, mount_target=None, scratch_reserve=0):
+def create_source_overlay(
+ mounts_dir,
+ scratch_dir,
+ xfs_info,
+ storage_info,
+ mount_target=None,
+ scratch_reserve=0,
+):
"""
Context manager that prepares the source system overlay and yields the mount.
@@ -610,7 +619,7 @@ def create_source_overlay(mounts_dir, scratch_dir, xfs_info, storage_info, mount
:type scratch_reserve: Optional[int]
:rtype: mounting.BindMount or mounting.NullMount
"""
- # noqa: W0135; pylint: disable=contextmanager-generator-missing-cleanup
+ # noqa: W0135; pylint: disable=bad-option-value,contextmanager-generator-missing-cleanup
# NOTE(pstodulk): the pylint check is not valid in this case - finally is covered
# implicitly
api.current_logger().debug('Creating source overlay in {scratch_dir} with mounts in {mounts_dir}'.format(
diff --git a/repos/system_upgrade/common/libraries/tests/test_distro.py b/repos/system_upgrade/common/libraries/tests/test_distro.py
index 8e866455..13e782e6 100644
--- a/repos/system_upgrade/common/libraries/tests/test_distro.py
+++ b/repos/system_upgrade/common/libraries/tests/test_distro.py
@@ -168,7 +168,8 @@ def test_get_distro_repoids(
monkeypatch.setattr(os.path, 'exists', lambda f: f in _CENTOS_REPOFILES)
class MockedContext:
- def full_path(self, path):
+ @staticmethod
+ def full_path(path):
return path
repoids = get_distro_repoids(MockedContext(), distro_id, '9', 'x86_64')
diff --git a/repos/system_upgrade/common/libraries/tests/test_grub.py b/repos/system_upgrade/common/libraries/tests/test_grub.py
index d6f428bb..08dc6895 100644
--- a/repos/system_upgrade/common/libraries/tests/test_grub.py
+++ b/repos/system_upgrade/common/libraries/tests/test_grub.py
@@ -23,7 +23,7 @@ INVALID_DD = b'Nothing to see here!'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
-# pylint: disable=E501
+# pylint: disable=line-too-long
# flake8: noqa: E501
EFIBOOTMGR_OUTPUT = r"""
BootCurrent: 0006
diff --git a/repos/system_upgrade/common/libraries/tests/test_rhsm.py b/repos/system_upgrade/common/libraries/tests/test_rhsm.py
index 84a1bd5e..b118da29 100644
--- a/repos/system_upgrade/common/libraries/tests/test_rhsm.py
+++ b/repos/system_upgrade/common/libraries/tests/test_rhsm.py
@@ -73,7 +73,8 @@ class IsolatedActionsMocked:
# A map from called commands to their mocked output
self.mocked_command_call_outputs = dict()
- def is_isolated(self):
+ @staticmethod
+ def is_isolated():
return True
def call(self, cmd, *args, **dummy_kwargs):
@@ -93,7 +94,8 @@ class IsolatedActionsMocked:
'exit_code': exit_code
}
- def full_path(self, path):
+ @staticmethod
+ def full_path(path):
return path
def remove(self, path):
diff --git a/repos/system_upgrade/common/libraries/testutils.py b/repos/system_upgrade/common/libraries/testutils.py
index 328a7ede..e84cc03a 100644
--- a/repos/system_upgrade/common/libraries/testutils.py
+++ b/repos/system_upgrade/common/libraries/testutils.py
@@ -120,7 +120,7 @@ class CurrentActorMocked: # pylint:disable=R0904
return os.path.join(self._common_tools_folder, name)
def consume(self, model):
- return iter(filter( # pylint:disable=W0110,W1639
+ return iter(filter(
lambda msg: isinstance(msg, model), self._msgs
))
diff --git a/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py b/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py
index 865e036f..d7cfb4fb 100644
--- a/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py
+++ b/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py
@@ -15,13 +15,15 @@ from leapp.utils.report import is_inhibitor
# Mock actor base for CheckVdo tests.
class MockedActorCheckVdo(CurrentActorMocked):
- def get_vdo_answer(self):
+ @staticmethod
+ def get_vdo_answer():
return False
# Mock actor for all_vdo_converted dialog response.
class MockedActorAllVdoConvertedTrue(MockedActorCheckVdo):
- def get_vdo_answer(self):
+ @staticmethod
+ def get_vdo_answer():
return True
diff --git a/repos/system_upgrade/el8toel9/actors/nisscanner/libraries/nisscan.py b/repos/system_upgrade/el8toel9/actors/nisscanner/libraries/nisscan.py
index 9910f748..ae51c69d 100644
--- a/repos/system_upgrade/el8toel9/actors/nisscanner/libraries/nisscan.py
+++ b/repos/system_upgrade/el8toel9/actors/nisscanner/libraries/nisscan.py
@@ -14,7 +14,8 @@ class NISScanLibrary:
Helper library for NISScan actor.
"""
- def client_has_non_default_configuration(self):
+ @staticmethod
+ def client_has_non_default_configuration():
"""
Check for any significant ypbind configuration lines in .conf file.
"""
@@ -31,7 +32,8 @@ class NISScanLibrary:
return True
return False
- def server_has_non_default_configuration(self):
+ @staticmethod
+ def server_has_non_default_configuration():
"""
Check for any additional (not default) files in ypserv DIR.
"""
diff --git a/repos/system_upgrade/el8toel9/actors/opensslconfigcheck/libraries/opensslconfigcheck.py b/repos/system_upgrade/el8toel9/actors/opensslconfigcheck/libraries/opensslconfigcheck.py
index f36a62e1..07c1b22f 100644
--- a/repos/system_upgrade/el8toel9/actors/opensslconfigcheck/libraries/opensslconfigcheck.py
+++ b/repos/system_upgrade/el8toel9/actors/opensslconfigcheck/libraries/opensslconfigcheck.py
@@ -115,7 +115,8 @@ def _openssl_reachable_key(config, key, value=None):
return False
-# pylint: disable=too-many-return-statements -- could not simplify more
+# pylint: disable=too-many-return-statements
+# could not simplify more
def _openssl_reachable_path(config, path, value=None):
"""
Check if the given path is reachable in OpenSSL configuration
--
2.51.1

View File

@ -0,0 +1,29 @@
From f50e3474a619ed338c2514933303320d986e6ffe Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Thu, 14 Nov 2024 16:26:55 +0100
Subject: [PATCH 40/40] spec: drop the /etc/leapp/actor_confid.d dir
The directory should be provided by the framework. leapp-repository
should provide only a content inside if any present.
---
packaging/leapp-repository.spec | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 2bb52505..6676d907 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -250,9 +250,7 @@ install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/files/
install -m 0644 etc/leapp/transaction/* %{buildroot}%{_sysconfdir}/leapp/transaction
install -m 0644 etc/leapp/files/* %{buildroot}%{_sysconfdir}/leapp/files
-# Actor configuration dir
-install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/actor_conf.d/
-# uncomment to install existing configs
+# uncomment to install existing configs if any exists
#install -m 0644 etc/leapp/actor_conf.d/* %%{buildroot}%%{_sysconfdir}/leapp/actor_conf.d
# install CLI commands for the leapp utility on the expected path
--
2.47.0

View File

@ -0,0 +1,74 @@
From 3c3421a0f155fe3bdfaee74c5345e86874684a09 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Tue, 19 Nov 2024 10:56:50 +0100
Subject: [PATCH] feat(net-naming-scheme): enable by default
This commit enables the use of net.naming-scheme for 8>9 upgrades by
default. The previously used environmental variablel
LEAPP_USE_NET_NAMING_SCHEMES is replaced with
LEAPP_DISABLE_NET_NAMING_SCHEMES with inverse semantics.
---
.../libraries/persistentnetnamesconfig.py | 11 ++++++++---
.../libraries/emit_net_naming.py | 4 ++--
.../tests/test_emit_net_naming_scheme.py | 4 ++--
3 files changed, 12 insertions(+), 7 deletions(-)
diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
index b2c7f5ff..c90d13f2 100644
--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
+++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
@@ -39,9 +39,14 @@ def generate_link_file(interface):
@suppress_deprecation(InitrdIncludes)
def process():
- if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() == '9':
- # We can use this only for 8>9, for now
- api.current_logger().info('Skipping generation of .link files renaming NICs as LEAPP_USE_NET_NAMING_SCHEMES=1')
+ are_net_schemes_enabled = get_env('LEAPP_DISABLE_NET_NAMING_SCHEMES', '0') != '1'
+ is_upgrade_8to9 = version.get_target_major_version() == '9'
+
+ if are_net_schemes_enabled and is_upgrade_8to9:
+ # For 8>9 we are using net.naming_scheme kernel arg by default - do not generate link files
+ msg = ('Skipping generation of .link files renaming NICs as net.naming-scheme '
+ '{LEAPP_DISABLE_NET_NAMING_SCHEMES != 1} is enabled and upgrade is 8>9')
+ api.current_logger().info(msg)
return
if get_env('LEAPP_NO_NETWORK_RENAMING', '0') == '1':
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
index 726bb459..bab62a56 100644
--- a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
@@ -44,9 +44,9 @@ def is_net_scheme_compatible_with_current_cmdline():
def emit_msgs_to_use_net_naming_schemes():
- is_env_var_set = get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1'
+ is_feature_enabled = get_env('LEAPP_DISABLE_NET_NAMING_SCHEMES', '0') != '1'
is_upgrade_8to9 = version.get_target_major_version() == '9'
- is_net_naming_enabled_and_permitted = is_env_var_set and is_upgrade_8to9
+ is_net_naming_enabled_and_permitted = is_feature_enabled and is_upgrade_8to9
if not is_net_naming_enabled_and_permitted:
return
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
index 7a5eeba5..acf72241 100644
--- a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
@@ -51,11 +51,11 @@ def test_is_net_scheme_compatible_with_current_cmdline(monkeypatch, kernel_args,
]
)
def test_emit_msgs_to_use_net_naming_schemes(monkeypatch, is_net_scheme_enabled, is_current_cmdline_compatible):
- envvar_value = '1' if is_net_scheme_enabled else '0'
+ envvar_value = '0' if is_net_scheme_enabled else '1'
mocked_actor = CurrentActorMocked(src_ver='8.10',
dst_ver='9.5',
- envars={'LEAPP_USE_NET_NAMING_SCHEMES': envvar_value})
+ envars={'LEAPP_DISABLE_NET_NAMING_SCHEMES': envvar_value})
monkeypatch.setattr(api, 'current_actor', mocked_actor)
monkeypatch.setattr(api, 'produce', produce_mocked())
--
2.47.0

View File

@ -1,182 +0,0 @@
From 2abc41bb019a0ebef73e48f2a50990d8e5038e51 Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Mon, 18 Aug 2025 15:16:36 +0200
Subject: [PATCH 41/55] pylint: enable super-with-arguments
Jira: RHELMISC-16038
---
.pylintrc | 1 -
.../unit_test_applytransactionworkarounds.py | 2 +-
.../common/files/rhel_upgrade.py | 4 +--
.../common/libraries/mounting.py | 28 +++++++++++--------
.../common/libraries/repofileutils.py | 2 +-
5 files changed, 20 insertions(+), 17 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index 0cba1129..4cfc49e0 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -40,7 +40,6 @@ disable=
too-many-positional-arguments,
# new for python3 version of pylint
unnecessary-pass,
- super-with-arguments, # required in python 2
raise-missing-from, # no 'raise from' in python 2
use-a-generator, # cannot be modified because of Python2 support
consider-using-f-string, # sorry, not gonna happen, still have to support py2
diff --git a/repos/system_upgrade/common/actors/applytransactionworkarounds/tests/unit_test_applytransactionworkarounds.py b/repos/system_upgrade/common/actors/applytransactionworkarounds/tests/unit_test_applytransactionworkarounds.py
index 369514fc..96b8094f 100644
--- a/repos/system_upgrade/common/actors/applytransactionworkarounds/tests/unit_test_applytransactionworkarounds.py
+++ b/repos/system_upgrade/common/actors/applytransactionworkarounds/tests/unit_test_applytransactionworkarounds.py
@@ -7,7 +7,7 @@ from leapp.models import DNFWorkaround
class ShowMessageCurrentActorMocked(CurrentActorMocked):
def __init__(self, *args, **kwargs):
- super(ShowMessageCurrentActorMocked, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
self._show_messages = []
@property
diff --git a/repos/system_upgrade/common/files/rhel_upgrade.py b/repos/system_upgrade/common/files/rhel_upgrade.py
index a5d7045b..63910fe0 100644
--- a/repos/system_upgrade/common/files/rhel_upgrade.py
+++ b/repos/system_upgrade/common/files/rhel_upgrade.py
@@ -40,7 +40,7 @@ class RhelUpgradeCommand(dnf.cli.Command):
summary = 'Plugin for upgrading to the next RHEL major release'
def __init__(self, cli):
- super(RhelUpgradeCommand, self).__init__(cli)
+ super().__init__(cli)
self.plugin_data = {}
@staticmethod
@@ -225,6 +225,6 @@ class RhelUpgradePlugin(dnf.Plugin):
name = 'rhel-upgrade'
def __init__(self, base, cli):
- super(RhelUpgradePlugin, self).__init__(base, cli)
+ super().__init__(base, cli)
if cli:
cli.register_command(RhelUpgradeCommand)
diff --git a/repos/system_upgrade/common/libraries/mounting.py b/repos/system_upgrade/common/libraries/mounting.py
index ae3885cf..279d31dc 100644
--- a/repos/system_upgrade/common/libraries/mounting.py
+++ b/repos/system_upgrade/common/libraries/mounting.py
@@ -46,7 +46,7 @@ class MountError(Exception):
""" Exception that is thrown when a mount related operation failed """
def __init__(self, message, details):
- super(MountError, self).__init__(message)
+ super().__init__(message)
self.details = details
@@ -75,7 +75,7 @@ class IsolationType:
""" systemd-nspawn implementation """
def __init__(self, target, binds=(), env_vars=None):
- super(IsolationType.NSPAWN, self).__init__(target=target)
+ super().__init__(target=target)
self.binds = list(binds) + ALWAYS_BIND
self.env_vars = env_vars or get_all_envs()
@@ -98,7 +98,7 @@ class IsolationType:
""" chroot implementation """
def __init__(self, target):
- super(IsolationType.CHROOT, self).__init__(target)
+ super().__init__(target)
self.context = None
def create(self):
@@ -262,14 +262,14 @@ class ChrootActions(IsolatedActions):
""" Isolation with chroot """
def __init__(self, base_dir):
- super(ChrootActions, self).__init__(base_dir=base_dir, implementation=IsolationType.CHROOT)
+ super().__init__(base_dir=base_dir, implementation=IsolationType.CHROOT)
class NspawnActions(IsolatedActions):
""" Isolation with systemd-nspawn """
def __init__(self, base_dir, binds=(), env_vars=None):
- super(NspawnActions, self).__init__(
+ super().__init__(
base_dir=base_dir, implementation=IsolationType.NSPAWN, binds=binds, env_vars=env_vars)
@@ -278,7 +278,7 @@ class NotIsolatedActions(IsolatedActions):
_isolated = False
def __init__(self, base_dir):
- super(NotIsolatedActions, self).__init__(base_dir=base_dir, implementation=IsolationType.NONE)
+ super().__init__(base_dir=base_dir, implementation=IsolationType.NONE)
class MountConfig:
@@ -375,7 +375,7 @@ class NullMount(MountingBase):
""" This is basically a NoOp for compatibility with other mount operations, in case a mount is optional """
def __init__(self, target, config=MountConfig.AttachOnly):
- super(NullMount, self).__init__(source=target, target=target, mode=MountingMode.NONE, config=config)
+ super().__init__(source=target, target=target, mode=MountingMode.NONE, config=config)
def __enter__(self):
return self
@@ -388,21 +388,21 @@ class LoopMount(MountingBase):
""" Performs loop mounts """
def __init__(self, source, target, config=MountConfig.Mount):
- super(LoopMount, self).__init__(source=source, target=target, mode=MountingMode.LOOP, config=config)
+ super().__init__(source=source, target=target, mode=MountingMode.LOOP, config=config)
class BindMount(MountingBase):
""" Performs bind mounts """
def __init__(self, source, target, config=MountConfig.Mount):
- super(BindMount, self).__init__(source=source, target=target, mode=MountingMode.BIND, config=config)
+ super().__init__(source=source, target=target, mode=MountingMode.BIND, config=config)
class TypedMount(MountingBase):
""" Performs a typed mounts """
def __init__(self, fstype, source, target, config=MountConfig.Mount):
- super(TypedMount, self).__init__(source=source, target=target, mode=MountingMode.FSTYPE, config=config)
+ super().__init__(source=source, target=target, mode=MountingMode.FSTYPE, config=config)
self.fstype = fstype
def _mount_options(self):
@@ -416,8 +416,12 @@ class OverlayMount(MountingBase):
""" Performs an overlayfs mount """
def __init__(self, name, source, workdir, config=MountConfig.Mount):
- super(OverlayMount, self).__init__(source=source, target=os.path.join(workdir, name),
- mode=MountingMode.OVERLAY, config=config)
+ super().__init__(
+ source=source,
+ target=os.path.join(workdir, name),
+ mode=MountingMode.OVERLAY,
+ config=config
+ )
self._upper_dir = os.path.join(workdir, 'upper')
self._work_dir = os.path.join(workdir, 'work')
self.additional_directories = (self._upper_dir, self._work_dir)
diff --git a/repos/system_upgrade/common/libraries/repofileutils.py b/repos/system_upgrade/common/libraries/repofileutils.py
index cab3c42b..376473a4 100644
--- a/repos/system_upgrade/common/libraries/repofileutils.py
+++ b/repos/system_upgrade/common/libraries/repofileutils.py
@@ -16,7 +16,7 @@ class InvalidRepoDefinition(Exception):
def __init__(self, msg, repofile, repoid):
message = 'Invalid repository definition: {repoid} in: {repofile}: {msg}'.format(
repoid=repoid, repofile=repofile, msg=msg)
- super(InvalidRepoDefinition, self).__init__(message)
+ super().__init__(message)
self.repofile = repofile
self.repoid = repoid
--
2.51.1

View File

@ -1,203 +0,0 @@
From 215f307eeb1362e420ac08f528f9f11d6c1c974d Mon Sep 17 00:00:00 2001
From: Tomas Fratrik <tfratrik@redhat.com>
Date: Mon, 8 Sep 2025 08:10:13 +0200
Subject: [PATCH 42/55] pylint: enable use-a-generator
Comprehensions inside functions like any(), all(), max(), min(), or sum()
are unnecessary. Enabling this warning enforces using generator expressions
instead of full list/set comprehensions in these cases.
Jira: RHELMISC-16038
---
.pylintrc | 1 -
.../common/actors/checkluks/libraries/checkluks.py | 2 +-
.../actors/checksaphana/tests/test_checksaphana.py | 7 +++++--
.../tests/test_ddddload.py | 2 +-
.../tests/test_persistentnetnamesconfig.py | 2 +-
.../libraries/scandynamiclinkerconfiguration.py | 2 +-
.../tests/test_checksystemdbrokensymlinks.py | 4 ++--
.../tests/test_transitionsystemdservicesstates.py | 6 ++----
.../tests/unit_test_targetuserspacecreator.py | 4 ++--
.../actors/rocecheck/tests/unit_test_rocecheck.py | 10 ++++++++--
10 files changed, 23 insertions(+), 17 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index 4cfc49e0..a82f8818 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -41,7 +41,6 @@ disable=
# new for python3 version of pylint
unnecessary-pass,
raise-missing-from, # no 'raise from' in python 2
- use-a-generator, # cannot be modified because of Python2 support
consider-using-f-string, # sorry, not gonna happen, still have to support py2
logging-format-interpolation
diff --git a/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py b/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
index d52b9e73..84e8e61f 100644
--- a/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
+++ b/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
@@ -27,7 +27,7 @@ def _formatted_list_output(input_list, sep=FMT_LIST_SEPARATOR):
def _at_least_one_tpm_token(luks_dump):
- return any([token.token_type == "clevis-tpm2" for token in luks_dump.tokens])
+ return any(token.token_type == "clevis-tpm2" for token in luks_dump.tokens)
def _get_ceph_volumes():
diff --git a/repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py b/repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py
index 29e9c930..8ec8d17f 100644
--- a/repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py
+++ b/repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py
@@ -284,7 +284,7 @@ def test_checksaphana_perform_check(monkeypatch):
# Expected 3 reports due to v1names + v2lownames + running
assert len(reports) == 3
# Verifies that all expected title patterns are within the reports and not just coincidentally 3
- assert all([any([pattern(report) for report in reports]) for pattern in EXPECTED_TITLE_PATTERNS.values()])
+ assert all(any(pattern(report) for report in reports) for pattern in EXPECTED_TITLE_PATTERNS.values())
list_clear(reports)
monkeypatch.setattr(checksaphana.api, 'consume', _consume_mock_sap_hana_info(
@@ -294,4 +294,7 @@ def test_checksaphana_perform_check(monkeypatch):
# Expected 2 reports due to v1names + v2lownames
assert len(reports) == 2
# Verifies that all expected title patterns are within the reports and not just coincidentally 2
- assert all([any([EXPECTED_TITLE_PATTERNS[pattern](report) for report in reports]) for pattern in ['v1', 'low']])
+ assert all(
+ any(EXPECTED_TITLE_PATTERNS[pattern](report) for report in reports)
+ for pattern in ['v1', 'low']
+ )
diff --git a/repos/system_upgrade/common/actors/loaddevicedriverdeprecationdata/tests/test_ddddload.py b/repos/system_upgrade/common/actors/loaddevicedriverdeprecationdata/tests/test_ddddload.py
index c3386745..1f3473b6 100644
--- a/repos/system_upgrade/common/actors/loaddevicedriverdeprecationdata/tests/test_ddddload.py
+++ b/repos/system_upgrade/common/actors/loaddevicedriverdeprecationdata/tests/test_ddddload.py
@@ -60,7 +60,7 @@ def test_filtered_load(monkeypatch):
assert produced
assert len(produced[0].entries) == 3
- assert not any([e.device_type == 'unsupported' for e in produced[0].entries])
+ assert not any(e.device_type == 'unsupported' for e in produced[0].entries)
@pytest.mark.parametrize('data', (
diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/tests/test_persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/tests/test_persistentnetnamesconfig.py
index 5ad52c43..ee199ae4 100644
--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/tests/test_persistentnetnamesconfig.py
+++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/tests/test_persistentnetnamesconfig.py
@@ -177,7 +177,7 @@ def test_bz_1899455_crash_iface(monkeypatch, adjust_cwd):
for prod_models in [RenamedInterfaces, InitrdIncludes, TargetInitramfsTasks]:
any(isinstance(i, prod_models) for i in persistentnetnamesconfig.api.produce.model_instances)
- assert any(['Some network devices' in x for x in persistentnetnamesconfig.api.current_logger.warnmsg])
+ assert any('Some network devices' in x for x in persistentnetnamesconfig.api.current_logger.warnmsg)
def test_no_network_renaming(monkeypatch):
diff --git a/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/libraries/scandynamiclinkerconfiguration.py b/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/libraries/scandynamiclinkerconfiguration.py
index 8d3b473e..73b0c84e 100644
--- a/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/libraries/scandynamiclinkerconfiguration.py
+++ b/repos/system_upgrade/common/actors/scandynamiclinkerconfiguration/libraries/scandynamiclinkerconfiguration.py
@@ -113,5 +113,5 @@ def scan_dynamic_linker_configuration():
included_configs=included_config_files,
used_variables=used_variables)
- if other_lines or any([config.modified for config in included_config_files]) or used_variables:
+ if other_lines or any(config.modified for config in included_config_files) or used_variables:
api.produce(configuration)
diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py
index bcc33f13..a4c0a657 100644
--- a/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py
+++ b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py
@@ -20,7 +20,7 @@ def test_report_broken_symlinks(monkeypatch):
checksystemdbrokensymlinks._report_broken_symlinks(symlinks)
assert created_reports.called
- assert all([s in created_reports.report_fields['summary'] for s in symlinks])
+ assert all(s in created_reports.report_fields['summary'] for s in symlinks)
def test_report_enabled_services_broken_symlinks(monkeypatch):
@@ -35,7 +35,7 @@ def test_report_enabled_services_broken_symlinks(monkeypatch):
checksystemdbrokensymlinks._report_enabled_services_broken_symlinks(symlinks)
assert created_reports.called
- assert all([s in created_reports.report_fields['summary'] for s in symlinks])
+ assert all(s in created_reports.report_fields['summary'] for s in symlinks)
class ReportBrokenSymlinks:
diff --git a/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py
index 6964a65b..488b37d4 100644
--- a/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py
+++ b/repos/system_upgrade/common/actors/systemd/transitionsystemdservicesstates/tests/test_transitionsystemdservicesstates.py
@@ -205,9 +205,7 @@ def test_report_kept_enabled(monkeypatch, tasks, expect_extended_summary):
assert created_reports.called
if expect_extended_summary:
assert extended_summary_str in created_reports.report_fields["summary"]
- assert all(
- [s in created_reports.report_fields["summary"] for s in tasks.to_enable]
- )
+ all(s in created_reports.report_fields['summary'] for s in tasks.to_enable)
else:
assert extended_summary_str not in created_reports.report_fields["summary"]
@@ -238,7 +236,7 @@ def test_report_newly_enabled(monkeypatch):
transitionsystemdservicesstates._report_newly_enabled(newly_enabled)
assert created_reports.called
- assert all([s in created_reports.report_fields["summary"] for s in newly_enabled])
+ assert all(s in created_reports.report_fields["summary"] for s in newly_enabled)
@pytest.mark.parametrize(
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
index bb17d89a..0bb64f6f 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
@@ -1068,7 +1068,7 @@ def test_consume_data(monkeypatch, raised, no_rhsm, testdata):
assert raised[1] in err.value.message
else:
assert userspacegen.api.current_logger.warnmsg
- assert any([raised[1] in x for x in userspacegen.api.current_logger.warnmsg])
+ assert any(raised[1] in x for x in userspacegen.api.current_logger.warnmsg)
@pytest.mark.skip(reason="Currently not implemented in the actor. It's TODO.")
@@ -1390,7 +1390,7 @@ def test__get_files_owned_by_rpms_recursive(monkeypatch):
assert sorted(owned[0:4]) == sorted(out)
def has_dbgmsg(substr):
- return any([substr in log for log in logger.dbgmsg])
+ return any(substr in log for log in logger.dbgmsg)
# test a few
assert has_dbgmsg(
diff --git a/repos/system_upgrade/el8toel9/actors/rocecheck/tests/unit_test_rocecheck.py b/repos/system_upgrade/el8toel9/actors/rocecheck/tests/unit_test_rocecheck.py
index a36cc8ed..b5511d17 100644
--- a/repos/system_upgrade/el8toel9/actors/rocecheck/tests/unit_test_rocecheck.py
+++ b/repos/system_upgrade/el8toel9/actors/rocecheck/tests/unit_test_rocecheck.py
@@ -91,7 +91,10 @@ def test_roce_old_rhel(monkeypatch, msgs, version):
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
rocecheck.process()
assert reporting.create_report.called
- assert any(['version of RHEL' in report['title'] for report in reporting.create_report.reports])
+ assert any(
+ 'version of RHEL' in report['title']
+ for report in reporting.create_report.reports
+ )
# NOTE: what about the situation when net.naming-scheme is configured multiple times???
@@ -113,4 +116,7 @@ def test_roce_wrong_configuration(monkeypatch, msgs, version):
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
rocecheck.process()
assert reporting.create_report.called
- assert any(['RoCE configuration' in report['title'] for report in reporting.create_report.reports])
+ assert any(
+ 'RoCE configuration' in report['title']
+ for report in reporting.create_report.reports
+ )
--
2.51.1

View File

@ -1,484 +0,0 @@
From adac307afff48ebbf4255edbd620ed29cbc9429e Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 21 Aug 2025 13:12:19 +0200
Subject: [PATCH 43/55] Set CurrentActorMocked src/dst version to 8->9 values
Currently, the default upgrade path is 7.8->8.1 which has been dropped
from the repo. Several tests make assumptions about these defaults and in
some cases this means that code path for other general upgrade paths
(8->9, 9->10) are left untested e.g. modify_userspace_for_livemode.
This patch changes the default path to 8.10->9.6.
Updated tests:
- checkrhui: assume 8->9 upgrade path
- setuptargetrepos: Update tests to assume 8->9 upg path.
- repairsystemdsymlinks: use 8->9 upgrade path instead of 7->8:
We need to mock some "installation changed" service as there is
*currently* none.
- checkmemory: assume 8->9 upgrade path
- peseventsscanner: assume 8->9 upgrade path
- lib/config: assume correct upgrade path
- persistentnetnamesconfig: use 8->9 upgrade path instead of 7->8
Jira: RHELMISC-16538
---
.../checkmemory/tests/test_checkmemory.py | 2 +-
.../tests/component_test_checkrhui.py | 20 +++---
.../libraries/persistentnetnamesconfig.py | 24 +++----
.../tests/test_persistentnetnamesconfig.py | 16 ++++-
.../tests/test_pes_event_scanner.py | 14 ++--
.../tests/test_setuptargetrepos.py | 68 +++++++++----------
.../libraries/repairsystemdsymlinks.py | 1 -
.../tests/test_repairsystemdsymlinks.py | 29 +++++---
.../libraries/config/tests/test_version.py | 2 +-
.../common/libraries/testutils.py | 2 +-
10 files changed, 98 insertions(+), 80 deletions(-)
diff --git a/repos/system_upgrade/common/actors/checkmemory/tests/test_checkmemory.py b/repos/system_upgrade/common/actors/checkmemory/tests/test_checkmemory.py
index a0bac0a9..79158dc6 100644
--- a/repos/system_upgrade/common/actors/checkmemory/tests/test_checkmemory.py
+++ b/repos/system_upgrade/common/actors/checkmemory/tests/test_checkmemory.py
@@ -21,7 +21,7 @@ def test_check_memory_high(monkeypatch):
def test_report(monkeypatch):
- title_msg = 'Minimum memory requirements for RHEL 8 are not met'
+ title_msg = 'Minimum memory requirements for RHEL 9 are not met'
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
monkeypatch.setattr(api, 'consume', lambda x: iter([MemoryInfo(mem_total=129)]))
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
index 02ca352e..2e6f279e 100644
--- a/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
@@ -279,10 +279,14 @@ class ExpectedAction(Enum):
)
def test_process(monkeypatch, extra_installed_pkgs, skip_rhsm, expected_action):
known_setups = {
- RHUIFamily('rhui-variant'): [
- mk_rhui_setup(clients={'src_pkg'}, os_version='7'),
- mk_rhui_setup(clients={'target_pkg'}, os_version='8', leapp_pkg='leapp_pkg',
- mandatory_files=[('file1', '/etc'), ('file2', '/var')]),
+ RHUIFamily("rhui-variant"): [
+ mk_rhui_setup(clients={"src_pkg"}, os_version="8"),
+ mk_rhui_setup(
+ clients={"target_pkg"},
+ os_version="9",
+ leapp_pkg="leapp_pkg",
+ mandatory_files=[("file1", "/etc"), ("file2", "/var")],
+ ),
]
}
@@ -291,7 +295,7 @@ def test_process(monkeypatch, extra_installed_pkgs, skip_rhsm, expected_action):
installed_rpms = InstalledRPM(items=installed_pkgs)
monkeypatch.setattr(api, 'produce', produce_mocked())
- actor = CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms], config=_make_default_config(all_rhui_cfg))
+ actor = CurrentActorMocked(msgs=[installed_rpms], config=_make_default_config(all_rhui_cfg))
monkeypatch.setattr(api, 'current_actor', actor)
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: skip_rhsm)
@@ -318,12 +322,12 @@ def test_unknown_target_rhui_setup(monkeypatch, is_target_setup_known):
rhui_family = RHUIFamily('rhui-variant')
known_setups = {
rhui_family: [
- mk_rhui_setup(clients={'src_pkg'}, os_version='7'),
+ mk_rhui_setup(clients={'src_pkg'}, os_version='8'),
]
}
if is_target_setup_known:
- target_setup = mk_rhui_setup(clients={'target_pkg'}, os_version='8', leapp_pkg='leapp_pkg')
+ target_setup = mk_rhui_setup(clients={'target_pkg'}, os_version='9', leapp_pkg='leapp_pkg')
known_setups[rhui_family].append(target_setup)
installed_pkgs = {'zip', 'kernel-core', 'python', 'src_pkg', 'leapp_pkg'}
@@ -331,7 +335,7 @@ def test_unknown_target_rhui_setup(monkeypatch, is_target_setup_known):
installed_rpms = InstalledRPM(items=installed_pkgs)
monkeypatch.setattr(api, 'produce', produce_mocked())
- actor = CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms], config=_make_default_config(all_rhui_cfg))
+ actor = CurrentActorMocked(msgs=[installed_rpms], config=_make_default_config(all_rhui_cfg))
monkeypatch.setattr(api, 'current_actor', actor)
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: True)
diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
index c90d13f2..189cd4d0 100644
--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
+++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
@@ -55,21 +55,21 @@ def process():
)
return
- rhel7_ifaces = next(api.consume(PersistentNetNamesFacts)).interfaces
- rhel8_ifaces = next(api.consume(PersistentNetNamesFactsInitramfs)).interfaces
+ source_ifaces = next(api.consume(PersistentNetNamesFacts)).interfaces
+ target_ifaces = next(api.consume(PersistentNetNamesFactsInitramfs)).interfaces
- rhel7_ifaces_map = {iface.mac: iface for iface in rhel7_ifaces}
- rhel8_ifaces_map = {iface.mac: iface for iface in rhel8_ifaces}
+ source_ifaces_map = {iface.mac: iface for iface in source_ifaces}
+ target_ifaces_map = {iface.mac: iface for iface in target_ifaces}
initrd_files = []
missing_ifaces = []
renamed_interfaces = []
- if rhel7_ifaces != rhel8_ifaces:
- for iface in rhel7_ifaces:
- rhel7_name = rhel7_ifaces_map[iface.mac].name
+ if source_ifaces != target_ifaces:
+ for iface in source_ifaces:
+ source_name = source_ifaces_map[iface.mac].name
try:
- rhel8_name = rhel8_ifaces_map[iface.mac].name
+ target_name = target_ifaces_map[iface.mac].name
except KeyError:
missing_ifaces.append(iface)
api.current_logger().warning(
@@ -80,13 +80,13 @@ def process():
)
continue
- if rhel7_name != rhel8_name and get_env('LEAPP_NO_NETWORK_RENAMING', '0') != '1':
- api.current_logger().warning('Detected interface rename {} -> {}.'.format(rhel7_name, rhel8_name))
+ if source_name != target_name and get_env('LEAPP_NO_NETWORK_RENAMING', '0') != '1':
+ api.current_logger().warning('Detected interface rename {} -> {}.'.format(source_name, target_name))
if re.search('eth[0-9]+', iface.name) is not None:
api.current_logger().warning('Interface named using eth prefix, refusing to generate link file')
- renamed_interfaces.append(RenamedInterface(**{'rhel7_name': rhel7_name,
- 'rhel8_name': rhel8_name}))
+ renamed_interfaces.append(RenamedInterface(**{'rhel7_name': source_name,
+ 'rhel8_name': target_name}))
continue
initrd_files.append(generate_link_file(iface))
diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/tests/test_persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/tests/test_persistentnetnamesconfig.py
index ee199ae4..c584c7ea 100644
--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/tests/test_persistentnetnamesconfig.py
+++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/tests/test_persistentnetnamesconfig.py
@@ -12,7 +12,6 @@ from leapp.models import (
PCIAddress,
PersistentNetNamesFacts,
PersistentNetNamesFactsInitramfs,
- RenamedInterface,
RenamedInterfaces,
TargetInitramfsTasks
)
@@ -170,7 +169,12 @@ def test_bz_1899455_crash_iface(monkeypatch, adjust_cwd):
PersistentNetNamesFactsInitramfs.create(json_msgs["PersistentNetNamesFactsInitramfs"]),
]
monkeypatch.setattr(persistentnetnamesconfig, 'generate_link_file', generate_link_file_mocked)
- monkeypatch.setattr(persistentnetnamesconfig.api, 'current_actor', CurrentActorMocked(msgs=msgs))
+ monkeypatch.setattr(
+ persistentnetnamesconfig.api,
+ "current_actor",
+ # without this the actor exits early
+ CurrentActorMocked(msgs=msgs, envars={"LEAPP_DISABLE_NET_NAMING_SCHEMES": "1"}),
+ )
monkeypatch.setattr(persistentnetnamesconfig.api, 'current_logger', logger_mocked())
monkeypatch.setattr(persistentnetnamesconfig.api, 'produce', produce_mocked())
persistentnetnamesconfig.process()
@@ -194,7 +198,13 @@ def test_no_network_renaming(monkeypatch):
msgs = [PersistentNetNamesFacts(interfaces=interfaces)]
interfaces[0].name = 'changedinterfacename0'
msgs.append(PersistentNetNamesFactsInitramfs(interfaces=interfaces))
- mocked_actor = CurrentActorMocked(msgs=msgs, envars={'LEAPP_NO_NETWORK_RENAMING': '1'})
+ mocked_actor = CurrentActorMocked(
+ msgs=msgs,
+ envars={
+ "LEAPP_DISABLE_NET_NAMING_SCHEMES": "1",
+ "LEAPP_NO_NETWORK_RENAMING": "1",
+ },
+ )
monkeypatch.setattr(persistentnetnamesconfig.api, 'current_actor', mocked_actor)
monkeypatch.setattr(persistentnetnamesconfig.api, 'current_logger', logger_mocked())
monkeypatch.setattr(persistentnetnamesconfig.api, 'produce', produce_mocked())
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/tests/test_pes_event_scanner.py b/repos/system_upgrade/common/actors/peseventsscanner/tests/test_pes_event_scanner.py
index 09a1e82d..f67f3840 100644
--- a/repos/system_upgrade/common/actors/peseventsscanner/tests/test_pes_event_scanner.py
+++ b/repos/system_upgrade/common/actors/peseventsscanner/tests/test_pes_event_scanner.py
@@ -325,18 +325,18 @@ def test_blacklisted_repoid_is_not_produced(monkeypatch):
Test that upgrade with a package that would be from a blacklisted repository on the target system does not remove
the package as it was already installed, however, the blacklisted repoid should not be produced.
"""
- installed_pkgs = {Package('pkg-a', 'blacklisted-rhel7', None), Package('pkg-b', 'repoid-rhel7', None)}
+ installed_pkgs = {Package('pkg-a', 'blacklisted-rhel8', None), Package('pkg-b', 'repoid-rhel8', None)}
events = [
- Event(1, Action.MOVED, {Package('pkg-b', 'repoid-rhel7', None)}, {Package('pkg-b', 'repoid-rhel8', None)},
- (8, 0), (8, 1), []),
- Event(2, Action.MOVED, {Package('pkg-a', 'repoid-rhel7', None)}, {Package('pkg-a', 'blacklisted-rhel8', None)},
- (8, 0), (8, 1), []),
+ Event(1, Action.MOVED, {Package('pkg-b', 'repoid-rhel8', None)}, {Package('pkg-b', 'repoid-rhel9', None)},
+ (9, 0), (9, 1), []),
+ Event(2, Action.MOVED, {Package('pkg-a', 'repoid-rhel8', None)}, {Package('pkg-a', 'blacklisted-rhel9', None)},
+ (9, 0), (9, 1), []),
]
monkeypatch.setattr(pes_events_scanner, 'get_installed_pkgs', lambda: installed_pkgs)
monkeypatch.setattr(pes_events_scanner, 'get_pes_events', lambda folder, filename: events)
monkeypatch.setattr(pes_events_scanner, 'apply_transaction_configuration', lambda pkgs, transaction_cfg: pkgs)
- monkeypatch.setattr(pes_events_scanner, 'get_blacklisted_repoids', lambda: {'blacklisted-rhel8'})
+ monkeypatch.setattr(pes_events_scanner, 'get_blacklisted_repoids', lambda: {'blacklisted-rhel9'})
monkeypatch.setattr(pes_events_scanner, 'replace_pesids_with_repoids_in_packages',
lambda pkgs, src_pkgs_repoids: pkgs)
@@ -357,7 +357,7 @@ def test_blacklisted_repoid_is_not_produced(monkeypatch):
repo_setup_tasks = [msg for msg in api.produce.model_instances if isinstance(msg, RepositoriesSetupTasks)]
assert len(repo_setup_tasks) == 1
- assert repo_setup_tasks[0].to_enable == ['repoid-rhel8']
+ assert repo_setup_tasks[0].to_enable == ['repoid-rhel9']
@pytest.mark.parametrize(
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
index e4a30f7f..ce7f01c0 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
@@ -1,6 +1,5 @@
import pytest
-from leapp.libraries import stdlib
from leapp.libraries.actor import setuptargetrepos
from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
from leapp.libraries.stdlib import api
@@ -15,8 +14,7 @@ from leapp.models import (
RepositoriesSetupTasks,
RepositoryData,
RepositoryFile,
- RPM,
- TargetRepositories
+ RPM
)
RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
@@ -108,27 +106,27 @@ def test_repos_mapping_for_distro(monkeypatch, distro_id):
the RepositoriesMapping information for a specific distro.
"""
repos_data = [
- RepositoryData(repoid='{}-7-server-rpms'.format(distro_id), name='{} 7 Server'.format(distro_id)),
- RepositoryData(repoid='{}-7-blacklisted-rpms'.format(distro_id), name='{} 7 Blacklisted'.format(distro_id))]
+ RepositoryData(repoid='{}-8-server-rpms'.format(distro_id), name='{} 8 Server'.format(distro_id)),
+ RepositoryData(repoid='{}-8-blacklisted-rpms'.format(distro_id), name='{} 8 Blacklisted'.format(distro_id))]
repos_files = [RepositoryFile(file='/etc/yum.repos.d/redhat.repo', data=repos_data)]
facts = RepositoriesFacts(repositories=repos_files)
installed_rpms = InstalledRPM(
- items=[mock_package('foreman', '{}-7-for-x86_64-satellite-extras-rpms'.format(distro_id)),
- mock_package('foreman-proxy', 'nosuch-{}-7-for-x86_64-satellite-extras-rpms'.format(distro_id))])
+ items=[mock_package('foreman', '{}-8-for-x86_64-satellite-extras-rpms'.format(distro_id)),
+ mock_package('foreman-proxy', 'nosuch-{}-8-for-x86_64-satellite-extras-rpms'.format(distro_id))])
repomap = RepositoriesMapping(
- mapping=[RepoMapEntry(source='{0}7-base'.format(distro_id),
- target=['{0}8-baseos'.format(distro_id),
- '{0}8-appstream'.format(distro_id),
- '{0}8-blacklist'.format(distro_id)]),
- RepoMapEntry(source='{0}7-satellite-extras'.format(distro_id),
- target=['{0}8-satellite-extras'.format(distro_id)])],
+ mapping=[RepoMapEntry(source='{0}8-base'.format(distro_id),
+ target=['{0}9-baseos'.format(distro_id),
+ '{0}9-appstream'.format(distro_id),
+ '{0}9-blacklist'.format(distro_id)]),
+ RepoMapEntry(source='{0}8-satellite-extras'.format(distro_id),
+ target=['{0}9-satellite-extras'.format(distro_id)])],
repositories=[
PESIDRepositoryEntry(
- pesid='{0}7-base'.format(distro_id),
- repoid='{0}-7-server-rpms'.format(distro_id),
- major_version='7',
+ pesid='{0}8-base'.format(distro_id),
+ repoid='{0}-8-server-rpms'.format(distro_id),
+ major_version='8',
arch='x86_64',
repo_type='rpm',
channel='ga',
@@ -136,9 +134,9 @@ def test_repos_mapping_for_distro(monkeypatch, distro_id):
distro=distro_id,
),
PESIDRepositoryEntry(
- pesid='{0}8-baseos'.format(distro_id),
- repoid='{0}-8-for-x86_64-baseos-htb-rpms'.format(distro_id),
- major_version='8',
+ pesid='{0}9-baseos'.format(distro_id),
+ repoid='{0}-9-for-x86_64-baseos-htb-rpms'.format(distro_id),
+ major_version='9',
arch='x86_64',
repo_type='rpm',
channel='ga',
@@ -146,9 +144,9 @@ def test_repos_mapping_for_distro(monkeypatch, distro_id):
distro=distro_id,
),
PESIDRepositoryEntry(
- pesid='{0}8-appstream'.format(distro_id),
- repoid='{0}-8-for-x86_64-appstream-htb-rpms'.format(distro_id),
- major_version='8',
+ pesid='{0}9-appstream'.format(distro_id),
+ repoid='{0}-9-for-x86_64-appstream-htb-rpms'.format(distro_id),
+ major_version='9',
arch='x86_64',
repo_type='rpm',
channel='ga',
@@ -156,9 +154,9 @@ def test_repos_mapping_for_distro(monkeypatch, distro_id):
distro=distro_id,
),
PESIDRepositoryEntry(
- pesid='{0}8-blacklist'.format(distro_id),
- repoid='{0}-8-blacklisted-rpms'.format(distro_id),
- major_version='8',
+ pesid='{0}9-blacklist'.format(distro_id),
+ repoid='{0}-9-blacklisted-rpms'.format(distro_id),
+ major_version='9',
arch='x86_64',
repo_type='rpm',
channel='ga',
@@ -166,9 +164,9 @@ def test_repos_mapping_for_distro(monkeypatch, distro_id):
distro=distro_id,
),
PESIDRepositoryEntry(
- pesid='{0}7-satellite-extras'.format(distro_id),
- repoid='{0}-7-for-x86_64-satellite-extras-rpms'.format(distro_id),
- major_version='7',
+ pesid='{0}8-satellite-extras'.format(distro_id),
+ repoid='{0}-8-for-x86_64-satellite-extras-rpms'.format(distro_id),
+ major_version='8',
arch='x86_64',
repo_type='rpm',
channel='ga',
@@ -176,9 +174,9 @@ def test_repos_mapping_for_distro(monkeypatch, distro_id):
distro=distro_id,
),
PESIDRepositoryEntry(
- pesid='{0}8-satellite-extras'.format(distro_id),
- repoid='{0}-8-for-x86_64-satellite-extras-rpms'.format(distro_id),
- major_version='8',
+ pesid='{0}9-satellite-extras'.format(distro_id),
+ repoid='{0}-9-for-x86_64-satellite-extras-rpms'.format(distro_id),
+ major_version='9',
arch='x86_64',
repo_type='rpm',
channel='ga',
@@ -188,7 +186,7 @@ def test_repos_mapping_for_distro(monkeypatch, distro_id):
]
)
- repos_blacklisted = RepositoriesBlacklisted(repoids=['{}-8-blacklisted-rpms'.format(distro_id)])
+ repos_blacklisted = RepositoriesBlacklisted(repoids=['{}-9-blacklisted-rpms'.format(distro_id)])
msgs = [facts, repomap, repos_blacklisted, installed_rpms]
@@ -207,9 +205,9 @@ def test_repos_mapping_for_distro(monkeypatch, distro_id):
produced_rhel_repoids = {repo.repoid for repo in rhel_repos}
expected_repoids = {
- "{0}-8-for-x86_64-baseos-htb-rpms".format(distro_id),
- "{0}-8-for-x86_64-appstream-htb-rpms".format(distro_id),
- "{0}-8-for-x86_64-satellite-extras-rpms".format(distro_id),
+ "{0}-9-for-x86_64-baseos-htb-rpms".format(distro_id),
+ "{0}-9-for-x86_64-appstream-htb-rpms".format(distro_id),
+ "{0}-9-for-x86_64-satellite-extras-rpms".format(distro_id),
}
assert produced_distro_repoids == expected_repoids
diff --git a/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py
index 3fcf4aa6..a8e801b9 100644
--- a/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py
+++ b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py
@@ -7,7 +7,6 @@ from leapp.libraries.stdlib import api, CalledProcessError, run
from leapp.models import SystemdBrokenSymlinksSource, SystemdBrokenSymlinksTarget, SystemdServicesInfoSource
_INSTALLATION_CHANGED = {
- '8': ['rngd.service', 'sysstat.service'],
'9': [],
'10': [],
}
diff --git a/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py
index 5771fc6c..d52abdfa 100644
--- a/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py
+++ b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py
@@ -1,13 +1,8 @@
from leapp.libraries.actor import repairsystemdsymlinks
from leapp.libraries.common import systemd
-from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked
-from leapp.libraries.stdlib import api, CalledProcessError, run
-from leapp.models import (
- SystemdBrokenSymlinksSource,
- SystemdBrokenSymlinksTarget,
- SystemdServiceFile,
- SystemdServicesInfoSource
-)
+from leapp.libraries.common.testutils import CurrentActorMocked
+from leapp.libraries.stdlib import api
+from leapp.models import SystemdServiceFile, SystemdServicesInfoSource
class MockedSystemdCmd:
@@ -20,8 +15,16 @@ class MockedSystemdCmd:
def test_bad_symslinks(monkeypatch):
+ # there is no _INSTALLATION_CHANGED service on RHEL 8 and RHEL 9, but it's
+ # possible such service will be discovered and added in the future as it
+ # was on RHEL 7, so let's add such case
+ monkeypatch.setitem(
+ repairsystemdsymlinks._INSTALLATION_CHANGED,
+ "9", ["some.service"],
+ )
+
service_files = [
- SystemdServiceFile(name='rngd.service', state='enabled'),
+ SystemdServiceFile(name='some.service', state='enabled'),
SystemdServiceFile(name='sysstat.service', state='disabled'),
SystemdServiceFile(name='hello.service', state='enabled'),
SystemdServiceFile(name='world.service', state='disabled'),
@@ -36,11 +39,15 @@ def test_bad_symslinks(monkeypatch):
monkeypatch.setattr(systemd, 'reenable_unit', reenable_mocked)
service_info = SystemdServicesInfoSource(service_files=service_files)
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[service_info]))
+ monkeypatch.setattr(
+ api,
+ "current_actor",
+ CurrentActorMocked(src_ver="8.10", dst_ver="9.6", msgs=[service_info]),
+ )
repairsystemdsymlinks._handle_bad_symlinks(service_info.service_files)
- assert reenable_mocked.units == ['rngd.service']
+ assert reenable_mocked.units == ['some.service']
def test_handle_newly_broken_symlink(monkeypatch):
diff --git a/repos/system_upgrade/common/libraries/config/tests/test_version.py b/repos/system_upgrade/common/libraries/config/tests/test_version.py
index d51f8098..f36dbc5f 100644
--- a/repos/system_upgrade/common/libraries/config/tests/test_version.py
+++ b/repos/system_upgrade/common/libraries/config/tests/test_version.py
@@ -94,7 +94,7 @@ def test_matches_source_version(monkeypatch, result, version_list):
(False, ['8.2', '8.0']),
])
def test_matches_target_version(monkeypatch, result, version_list):
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.6'))
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.6', dst_ver='8.1'))
assert version.matches_target_version(*version_list) == result
diff --git a/repos/system_upgrade/common/libraries/testutils.py b/repos/system_upgrade/common/libraries/testutils.py
index e84cc03a..107ad8a7 100644
--- a/repos/system_upgrade/common/libraries/testutils.py
+++ b/repos/system_upgrade/common/libraries/testutils.py
@@ -80,7 +80,7 @@ def _make_default_config(actor_config_schema):
class CurrentActorMocked: # pylint:disable=R0904
def __init__(self, arch=architecture.ARCH_X86_64, envars=None, # pylint:disable=R0913
kernel='3.10.0-957.43.1.el7.x86_64',
- release_id='rhel', src_ver='7.8', dst_ver='8.1', msgs=None, flavour='default', config=None,
+ release_id='rhel', src_ver='8.10', dst_ver='9.6', msgs=None, flavour='default', config=None,
virtual_source_version=None, virtual_target_version=None,
supported_upgrade_paths=None):
"""
--
2.51.1

View File

@ -1,303 +0,0 @@
From ad9b2ae552b6c36d78a445dbbcfcc179afd1d839 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 21 Aug 2025 14:23:55 +0200
Subject: [PATCH 44/55] scancpu: Remove non-JSON ("txt") lscpu output support
This was only used on RHEL 7.
- Remove txt test files
- Remove empty_field test - IIRC it's not possible to have and empty
field in JSON lscpu output, there would be null
---
.../actors/scancpu/libraries/scancpu.py | 11 ++---
.../scancpu/tests/files/{json => }/invalid | 0
.../tests/files/{json => }/lscpu_aarch64 | 0
.../tests/files/{json => }/lscpu_ppc64le | 0
.../tests/files/{json => }/lscpu_s390x | 0
.../tests/files/{json => }/lscpu_x86_64 | 0
.../scancpu/tests/files/txt/lscpu_aarch64 | 25 -----------
.../scancpu/tests/files/txt/lscpu_empty_field | 4 --
.../scancpu/tests/files/txt/lscpu_ppc64le | 15 -------
.../scancpu/tests/files/txt/lscpu_s390x | 26 ------------
.../scancpu/tests/files/txt/lscpu_x86_64 | 36 ----------------
.../actors/scancpu/tests/test_scancpu.py | 42 ++-----------------
12 files changed, 7 insertions(+), 152 deletions(-)
rename repos/system_upgrade/common/actors/scancpu/tests/files/{json => }/invalid (100%)
rename repos/system_upgrade/common/actors/scancpu/tests/files/{json => }/lscpu_aarch64 (100%)
rename repos/system_upgrade/common/actors/scancpu/tests/files/{json => }/lscpu_ppc64le (100%)
rename repos/system_upgrade/common/actors/scancpu/tests/files/{json => }/lscpu_s390x (100%)
rename repos/system_upgrade/common/actors/scancpu/tests/files/{json => }/lscpu_x86_64 (100%)
delete mode 100644 repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_aarch64
delete mode 100644 repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_empty_field
delete mode 100644 repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_ppc64le
delete mode 100644 repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_s390x
delete mode 100644 repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_x86_64
diff --git a/repos/system_upgrade/common/actors/scancpu/libraries/scancpu.py b/repos/system_upgrade/common/actors/scancpu/libraries/scancpu.py
index db3f92d4..ecc23349 100644
--- a/repos/system_upgrade/common/actors/scancpu/libraries/scancpu.py
+++ b/repos/system_upgrade/common/actors/scancpu/libraries/scancpu.py
@@ -2,17 +2,15 @@ import json
import re
from leapp.libraries.common.config import architecture
-from leapp.libraries.common.config.version import get_source_major_version
from leapp.libraries.stdlib import api, CalledProcessError, run
from leapp.models import CPUInfo, DetectedDeviceOrDriver, DeviceDriverDeprecationData
-LSCPU_NAME_VALUE = re.compile(r'^(?P<name>[^:]+):[^\S\n]+(?P<value>.+)\n?', flags=re.MULTILINE)
PPC64LE_MODEL = re.compile(r'\d+\.\d+ \(pvr (?P<family>[0-9a-fA-F]+) 0*[0-9a-fA-F]+\)')
-def _get_lscpu_output(output_json=False):
+def _get_lscpu_output():
try:
- result = run(['lscpu'] + (['-J'] if output_json else []))
+ result = run(['lscpu', '-J'])
return result.get('stdout', '')
except (OSError, CalledProcessError):
api.current_logger().debug('Executing `lscpu` failed', exc_info=True)
@@ -20,10 +18,7 @@ def _get_lscpu_output(output_json=False):
def _parse_lscpu_output():
- if get_source_major_version() == '7':
- return dict(LSCPU_NAME_VALUE.findall(_get_lscpu_output()))
-
- lscpu = _get_lscpu_output(output_json=True)
+ lscpu = _get_lscpu_output()
try:
parsed_json = json.loads(lscpu)
# The json contains one entry "lscpu" which is a list of dictionaries
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/files/json/invalid b/repos/system_upgrade/common/actors/scancpu/tests/files/invalid
similarity index 100%
rename from repos/system_upgrade/common/actors/scancpu/tests/files/json/invalid
rename to repos/system_upgrade/common/actors/scancpu/tests/files/invalid
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/files/json/lscpu_aarch64 b/repos/system_upgrade/common/actors/scancpu/tests/files/lscpu_aarch64
similarity index 100%
rename from repos/system_upgrade/common/actors/scancpu/tests/files/json/lscpu_aarch64
rename to repos/system_upgrade/common/actors/scancpu/tests/files/lscpu_aarch64
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/files/json/lscpu_ppc64le b/repos/system_upgrade/common/actors/scancpu/tests/files/lscpu_ppc64le
similarity index 100%
rename from repos/system_upgrade/common/actors/scancpu/tests/files/json/lscpu_ppc64le
rename to repos/system_upgrade/common/actors/scancpu/tests/files/lscpu_ppc64le
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/files/json/lscpu_s390x b/repos/system_upgrade/common/actors/scancpu/tests/files/lscpu_s390x
similarity index 100%
rename from repos/system_upgrade/common/actors/scancpu/tests/files/json/lscpu_s390x
rename to repos/system_upgrade/common/actors/scancpu/tests/files/lscpu_s390x
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/files/json/lscpu_x86_64 b/repos/system_upgrade/common/actors/scancpu/tests/files/lscpu_x86_64
similarity index 100%
rename from repos/system_upgrade/common/actors/scancpu/tests/files/json/lscpu_x86_64
rename to repos/system_upgrade/common/actors/scancpu/tests/files/lscpu_x86_64
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_aarch64 b/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_aarch64
deleted file mode 100644
index 3b9619ef..00000000
--- a/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_aarch64
+++ /dev/null
@@ -1,25 +0,0 @@
-Architecture: aarch64
-Byte Order: Little Endian
-CPU(s): 160
-On-line CPU(s) list: 0-159
-Thread(s) per core: 1
-Core(s) per socket: 80
-Socket(s): 2
-NUMA node(s): 4
-Vendor ID: ARM
-BIOS Vendor ID: Ampere(R)
-Model: 1
-Model name: Neoverse-N1
-BIOS Model name: Ampere(R) Altra(R) Processor
-Stepping: r3p1
-CPU max MHz: 3000.0000
-CPU min MHz: 1000.0000
-BogoMIPS: 50.00
-L1d cache: 64K
-L1i cache: 64K
-L2 cache: 1024K
-NUMA node0 CPU(s): 0-79
-NUMA node1 CPU(s): 80-159
-NUMA node2 CPU(s):
-NUMA node3 CPU(s):
-Flags: fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp asimdhp cpuid asimdrdm lrcpc dcpop asimddp ssbs
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_empty_field b/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_empty_field
deleted file mode 100644
index f830b7fe..00000000
--- a/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_empty_field
+++ /dev/null
@@ -1,4 +0,0 @@
-Empyt 1:
-Empyt 2:
-Empyt 3:
-Flags: flag
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_ppc64le b/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_ppc64le
deleted file mode 100644
index 07d2ed65..00000000
--- a/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_ppc64le
+++ /dev/null
@@ -1,15 +0,0 @@
-Architecture: ppc64le
-Byte Order: Little Endian
-CPU(s): 8
-On-line CPU(s) list: 0-7
-Thread(s) per core: 1
-Core(s) per socket: 1
-Socket(s): 8
-NUMA node(s): 1
-Model: 2.1 (pvr 004b 0201)
-Model name: POWER8E (raw), altivec supported
-Hypervisor vendor: KVM
-Virtualization type: para
-L1d cache: 64K
-L1i cache: 32K
-NUMA node0 CPU(s): 0-7
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_s390x b/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_s390x
deleted file mode 100644
index 2c0de9f9..00000000
--- a/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_s390x
+++ /dev/null
@@ -1,26 +0,0 @@
-Architecture: s390x
-CPU op-mode(s): 32-bit, 64-bit
-Byte Order: Big Endian
-CPU(s): 4
-On-line CPU(s) list: 0-3
-Thread(s) per core: 1
-Core(s) per socket: 1
-Socket(s) per book: 1
-Book(s) per drawer: 1
-Drawer(s): 4
-NUMA node(s): 1
-Vendor ID: IBM/S390
-Machine type: 3931
-CPU dynamic MHz: 5200
-CPU static MHz: 5200
-BogoMIPS: 3331.00
-Hypervisor: KVM/Linux
-Hypervisor vendor: KVM
-Virtualization type: full
-Dispatching mode: horizontal
-L1d cache: 128K
-L1i cache: 128K
-L2 cache: 32768K
-L3 cache: 262144K
-NUMA node0 CPU(s): 0-3
-Flags: esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te vx vxd vxe gs vxe2 vxp sort dflt vxp2 nnpa sie
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_x86_64 b/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_x86_64
deleted file mode 100644
index a1dc1035..00000000
--- a/repos/system_upgrade/common/actors/scancpu/tests/files/txt/lscpu_x86_64
+++ /dev/null
@@ -1,36 +0,0 @@
-Architecture: x86_64
-CPU op-mode(s): 32-bit, 64-bit
-Address sizes: 46 bits physical, 48 bits virtual
-Byte Order: Little Endian
-CPU(s): 48
-On-line CPU(s) list: 0-47
-Vendor ID: GenuineIntel
-Model name: Intel(R) Xeon(R) CPU E5-2670 v3 @ 2.30GHz
-CPU family: 6
-Model: 63
-Thread(s) per core: 2
-Core(s) per socket: 12
-Socket(s): 2
-Stepping: 2
-CPU(s) scaling MHz: 44%
-CPU max MHz: 3100.0000
-CPU min MHz: 1200.0000
-BogoMIPS: 4599.83
-Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm xsaveopt cqm_llc cqm_occup_llc dtherm ida arat pln pts md_clear flush_l1d
-Virtualization: VT-x
-L1d cache: 768 KiB (24 instances)
-L1i cache: 768 KiB (24 instances)
-L2 cache: 6 MiB (24 instances)
-L3 cache: 60 MiB (2 instances)
-NUMA node(s): 2
-NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46
-NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47
-Vulnerability Itlb multihit: KVM: Mitigation: VMX disabled
-Vulnerability L1tf: Mitigation; PTE Inversion; VMX conditional cache flushes, SMT vulnerable
-Vulnerability Mds: Mitigation; Clear CPU buffers; SMT vulnerable
-Vulnerability Meltdown: Mitigation; PTI
-Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
-Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
-Vulnerability Spectre v2: Mitigation; Full generic retpoline, IBPB conditional, IBRS_FW, STIBP conditional, RSB filling
-Vulnerability Srbds: Not affected
-Vulnerability Tsx async abort: Not affected
diff --git a/repos/system_upgrade/common/actors/scancpu/tests/test_scancpu.py b/repos/system_upgrade/common/actors/scancpu/tests/test_scancpu.py
index be0802ba..3605ebe7 100644
--- a/repos/system_upgrade/common/actors/scancpu/tests/test_scancpu.py
+++ b/repos/system_upgrade/common/actors/scancpu/tests/test_scancpu.py
@@ -61,29 +61,20 @@ class mocked_get_cpuinfo:
def __init__(self, filename):
self.filename = filename
- def __call__(self, output_json=False):
+ def __call__(self):
"""
Return lines of the self.filename test file located in the files directory.
Those files contain /proc/cpuinfo content from several machines.
"""
-
- filename = self.filename
- if output_json:
- filename = os.path.join('json', filename)
- else:
- filename = os.path.join('txt', filename)
- filename = os.path.join(CUR_DIR, 'files', filename)
+ filename = os.path.join(CUR_DIR, 'files', self.filename)
with open(filename, 'r') as fp:
return '\n'.join(fp.read().splitlines())
@pytest.mark.parametrize("arch", ARCH_SUPPORTED)
-@pytest.mark.parametrize("version", ['7', '8'])
-def test_scancpu(monkeypatch, arch, version):
-
- monkeypatch.setattr('leapp.libraries.actor.scancpu.get_source_major_version', lambda: version)
+def test_scancpu(monkeypatch, arch):
mocked_cpuinfo = mocked_get_cpuinfo('lscpu_' + arch)
monkeypatch.setattr(scancpu, '_get_lscpu_output', mocked_cpuinfo)
@@ -106,34 +97,9 @@ def test_scancpu(monkeypatch, arch, version):
assert expected == produced
-def test_lscpu_with_empty_field(monkeypatch):
-
- def mocked_cpuinfo(*args, **kwargs):
- return mocked_get_cpuinfo('lscpu_empty_field')(output_json=False)
-
- monkeypatch.setattr(scancpu, '_get_lscpu_output', mocked_cpuinfo)
- monkeypatch.setattr(api, 'produce', produce_mocked())
- current_actor = CurrentActorMocked()
- monkeypatch.setattr(api, 'current_actor', current_actor)
-
- scancpu.process()
-
- expected = CPUInfo(machine_type=None, flags=['flag'])
- produced = api.produce.model_instances[0]
-
- assert api.produce.called == 1
-
- assert expected.machine_type == produced.machine_type
- assert sorted(expected.flags) == sorted(produced.flags)
-
-
def test_parse_invalid_json(monkeypatch):
- monkeypatch.setattr('leapp.libraries.actor.scancpu.get_source_major_version', lambda: '8')
-
- def mocked_cpuinfo(*args, **kwargs):
- return mocked_get_cpuinfo('invalid')(output_json=True)
-
+ mocked_cpuinfo = mocked_get_cpuinfo('invalid')
monkeypatch.setattr(scancpu, '_get_lscpu_output', mocked_cpuinfo)
monkeypatch.setattr(api, 'produce', produce_mocked())
monkeypatch.setattr(api, 'current_logger', logger_mocked())
--
2.51.1

View File

@ -1,51 +0,0 @@
From 44c6b10a1813bfa019fb8ee2ec08a619e325ba08 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 21 Aug 2025 14:34:37 +0200
Subject: [PATCH 45/55] modify_userspace_for_livemode: Remove RHEL7
crypto-policies workaround
---
.../libraries/prepareliveimage.py | 13 -------------
.../tests/test_livemode_userspace_modifications.py | 2 --
2 files changed, 15 deletions(-)
diff --git a/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/libraries/prepareliveimage.py b/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/libraries/prepareliveimage.py
index 686c4cd6..116c463d 100644
--- a/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/libraries/prepareliveimage.py
+++ b/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/libraries/prepareliveimage.py
@@ -381,19 +381,6 @@ def setup_sshd(context, authorized_keys):
error
)
- # @Todo(mhecko): This is hazardous. I guess we are setting this so that we can use weaker SSH keys from RHEL7,
- # # but this way we change crypto settings system-wise (could be a problem for FIPS). Instead, we
- # # should check whether the keys will be OK on RHEL8, and inform the user otherwise.
- if get_target_major_version() == '8': # set to LEGACY for 7>8 only
- try:
- with context.open('/etc/crypto-policies/config', 'w+') as f:
- f.write('LEGACY\n')
- except OSError as error:
- api.current_logger().warning('Cannot set crypto policy to LEGACY')
- details = {'details': 'Failed to set crypto-policies to LEGACY due to the error: {0}'.format(error)}
- raise StopActorExecutionError('Failed to set up livemode SSHD', details=details)
-
-
# stolen from upgradeinitramfsgenerator.py
def _get_target_kernel_version(context):
"""
diff --git a/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/tests/test_livemode_userspace_modifications.py b/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/tests/test_livemode_userspace_modifications.py
index e890f45a..b046d8c7 100644
--- a/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/tests/test_livemode_userspace_modifications.py
+++ b/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/tests/test_livemode_userspace_modifications.py
@@ -296,8 +296,6 @@ def test_setup_sshd(monkeypatch):
Action(type_=ActionType.SYMLINK,
args=('/usr/lib/systemd/system/sshd.service',
'/USERSPACE/etc/systemd/system/multi-user.target.wants/sshd.service')),
- Action(type_=ActionType.OPEN, args=('/USERSPACE/etc/crypto-policies/config',)),
- Action(type_=ActionType.WRITE, args=('LEGACY\n',)),
]
error = assert_execution_trace_subsumes_other(actual_trace, expected_trace)
--
2.51.1

View File

@ -1,61 +0,0 @@
From 4bede4b415f3e561399bf5c4ebed659c1aa4948d Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 21 Aug 2025 14:37:41 +0200
Subject: [PATCH 46/55] modify_userspace_for_livemode: Remove unused code in
modify_userspace_for_livemode
---
.../libraries/prepareliveimage.py | 30 +------------------
1 file changed, 1 insertion(+), 29 deletions(-)
diff --git a/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/libraries/prepareliveimage.py b/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/libraries/prepareliveimage.py
index 116c463d..2587bf89 100644
--- a/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/libraries/prepareliveimage.py
+++ b/repos/system_upgrade/common/actors/livemode/modify_userspace_for_livemode/libraries/prepareliveimage.py
@@ -6,7 +6,7 @@ import os.path
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.common import mounting
from leapp.libraries.common.config.version import get_target_major_version
-from leapp.libraries.stdlib import api, CalledProcessError
+from leapp.libraries.stdlib import api
from leapp.models import LiveImagePreparationInfo
LEAPP_UPGRADE_SERVICE_FILE = 'upgrade.service'
@@ -381,34 +381,6 @@ def setup_sshd(context, authorized_keys):
error
)
-# stolen from upgradeinitramfsgenerator.py
-def _get_target_kernel_version(context):
- """
- Get the version of the most recent kernel version within the container.
- """
- try:
- results = context.call(['rpm', '-qa', 'kernel-core'], split=True)['stdout']
-
- except CalledProcessError as error:
- problem = 'Could not query the target userspace kernel version through rpm. Full error: {0}'.format(error)
- raise StopActorExecutionError(
- 'Cannot get the version of the installed kernel.',
- details={'Problem': problem})
-
- if len(results) > 1:
- raise StopActorExecutionError(
- 'Cannot detect the version of the target userspace kernel.',
- details={'Problem': 'Detected unexpectedly multiple kernels inside target userspace container.'})
- if not results:
- raise StopActorExecutionError(
- 'Cannot detect the version of the target userspace kernel.',
- details={'Problem': 'An rpm query for the available kernels did not produce any results.'})
-
- kernel_version = '-'.join(results[0].rsplit("-", 2)[-2:])
- api.current_logger().debug('Detected kernel version inside container: {}.'.format(kernel_version))
-
- return kernel_version
-
def fakerootfs():
"""
--
2.51.1

View File

@ -1,170 +0,0 @@
From 000c5f6558e788842fbf95d2fdb0014d18285c15 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 21 Aug 2025 15:10:58 +0200
Subject: [PATCH 47/55] repositoriesblacklist: Remove the rhel7-optional
unsupported PESID
Update tests to assume 8->9 upgrade path.
---
.../libraries/repositoriesblacklist.py | 4 +-
.../tests/test_repositoriesblacklist.py | 48 ++++++++++---------
2 files changed, 27 insertions(+), 25 deletions(-)
diff --git a/repos/system_upgrade/common/actors/repositoriesblacklist/libraries/repositoriesblacklist.py b/repos/system_upgrade/common/actors/repositoriesblacklist/libraries/repositoriesblacklist.py
index e22fbee0..5059f619 100644
--- a/repos/system_upgrade/common/actors/repositoriesblacklist/libraries/repositoriesblacklist.py
+++ b/repos/system_upgrade/common/actors/repositoriesblacklist/libraries/repositoriesblacklist.py
@@ -6,7 +6,6 @@ from leapp.models import CustomTargetRepository, RepositoriesBlacklisted, Reposi
# {OS_MAJOR_VERSION: PESID}
UNSUPPORTED_PESIDS = {
- "7": "rhel7-optional",
"8": "rhel8-CRB",
"9": "rhel9-CRB",
"10": "rhel10-CRB"
@@ -28,9 +27,8 @@ def _report_using_unsupported_repos(repos):
def _report_excluded_repos(repos):
- optional_repository_name = 'optional' if get_source_major_version() == '7' else 'CRB'
api.current_logger().info(
- "The {0} repository is not enabled. Excluding {1} from the upgrade".format(optional_repository_name, repos)
+ "The CRB repository is not enabled. Excluding {} from the upgrade".format(repos)
)
report = [
diff --git a/repos/system_upgrade/common/actors/repositoriesblacklist/tests/test_repositoriesblacklist.py b/repos/system_upgrade/common/actors/repositoriesblacklist/tests/test_repositoriesblacklist.py
index c4f9a36e..945007c6 100644
--- a/repos/system_upgrade/common/actors/repositoriesblacklist/tests/test_repositoriesblacklist.py
+++ b/repos/system_upgrade/common/actors/repositoriesblacklist/tests/test_repositoriesblacklist.py
@@ -20,8 +20,8 @@ from leapp.models import (
def repofacts_opts_disabled():
repos_data = [
RepositoryData(
- repoid="rhel-7-server-optional-rpms",
- name="RHEL 7 Server",
+ repoid="codeready-builder-for-rhel-8-x86_64-rpms",
+ name="RHEL 8 CRB",
enabled=False,
)
]
@@ -32,11 +32,11 @@ def repofacts_opts_disabled():
@pytest.fixture
-def rhel7_optional_pesidrepo():
+def rhel8_crb_pesidrepo():
return PESIDRepositoryEntry(
- pesid='rhel7-optional',
- major_version='7',
- repoid='rhel-7-server-optional-rpms',
+ pesid='rhel8-CRB',
+ major_version='8',
+ repoid='codeready-builder-for-rhel-8-x86_64-rpms',
rhui='',
arch='x86_64',
channel='ga',
@@ -46,11 +46,11 @@ def rhel7_optional_pesidrepo():
@pytest.fixture
-def rhel8_crb_pesidrepo():
+def rhel9_crb_pesidrepo():
return PESIDRepositoryEntry(
- pesid='rhel8-CRB',
- major_version='8',
- repoid='codeready-builder-for-rhel-8-x86_64-rpms',
+ pesid='rhel9-CRB',
+ major_version='9',
+ repoid='codeready-builder-for-rhel-9-x86_64-rpms',
rhui='',
arch='x86_64',
channel='ga',
@@ -60,10 +60,10 @@ def rhel8_crb_pesidrepo():
@pytest.fixture
-def repomap_opts_only(rhel7_optional_pesidrepo, rhel8_crb_pesidrepo):
+def repomap_opts_only(rhel8_crb_pesidrepo, rhel9_crb_pesidrepo):
return RepositoriesMapping(
- mapping=[RepoMapEntry(source='rhel7-optional', target=['rhel8-CRB'])],
- repositories=[rhel7_optional_pesidrepo, rhel8_crb_pesidrepo]
+ mapping=[RepoMapEntry(source='rhel8-CRB', target=['rhel9-CRB'])],
+ repositories=[rhel8_crb_pesidrepo, rhel9_crb_pesidrepo]
)
@@ -75,8 +75,8 @@ def test_all_target_optionals_blacklisted_when_no_optional_on_source(monkeypatch
repos_data = [
RepositoryData(
- repoid="rhel-7-server-rpms",
- name="RHEL 7 Server",
+ repoid="rhel-8-server-rpms",
+ name="RHEL 8 Server",
enabled=True,
)
]
@@ -92,7 +92,7 @@ def test_all_target_optionals_blacklisted_when_no_optional_on_source(monkeypatch
repositoriesblacklist.process()
assert api.produce.called
- assert 'codeready-builder-for-rhel-8-x86_64-rpms' in api.produce.model_instances[0].repoids
+ assert 'codeready-builder-for-rhel-9-x86_64-rpms' in api.produce.model_instances[0].repoids
def test_with_no_mapping_for_optional_repos(monkeypatch, repomap_opts_only, repofacts_opts_disabled):
@@ -115,7 +115,11 @@ def test_blacklist_produced_when_optional_repo_disabled(monkeypatch, repofacts_o
Tests whether a correct blacklist is generated when there is disabled optional repo on the system.
"""
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[repofacts_opts_disabled, repomap_opts_only]))
+ monkeypatch.setattr(
+ api,
+ "current_actor",
+ CurrentActorMocked(msgs=[repofacts_opts_disabled, repomap_opts_only]),
+ )
monkeypatch.setattr(api, "produce", produce_mocked())
monkeypatch.setattr(reporting, "create_report", produce_mocked())
@@ -123,7 +127,7 @@ def test_blacklist_produced_when_optional_repo_disabled(monkeypatch, repofacts_o
assert api.produce.model_instances, 'A blacklist should get generated.'
- expected_blacklisted_repoid = 'codeready-builder-for-rhel-8-x86_64-rpms'
+ expected_blacklisted_repoid = 'codeready-builder-for-rhel-9-x86_64-rpms'
err_msg = 'Blacklist does not contain expected repoid.'
assert expected_blacklisted_repoid in api.produce.model_instances[0].repoids, err_msg
@@ -166,8 +170,8 @@ def test_repositoriesblacklist_not_empty(monkeypatch, repofacts_opts_disabled, r
def test_repositoriesblacklist_empty(monkeypatch, repofacts_opts_disabled, repomap_opts_only):
"""
- Tests whether nothing is produced if there are some disabled optional repos, but an empty blacklist is determined
- from the repo mapping data.
+ Tests whether nothing is produced if there are some disabled optional
+ repos, but an empty blacklist is determined from the repo mapping data.
"""
msgs_to_feed = [repofacts_opts_disabled, repomap_opts_only]
@@ -177,7 +181,7 @@ def test_repositoriesblacklist_empty(monkeypatch, repofacts_opts_disabled, repom
repositoriesblacklist,
"_get_repoids_to_exclude",
lambda dummy_mapping: set()
- ) # pylint:disable=W0108
+ )
monkeypatch.setattr(api, "produce", produce_mocked())
repositoriesblacklist.process()
@@ -187,7 +191,7 @@ def test_repositoriesblacklist_empty(monkeypatch, repofacts_opts_disabled, repom
@pytest.mark.parametrize(
("enabled_repo", "exp_report_title", "message_produced"),
[
- ("codeready-builder-for-rhel-8-x86_64-rpms", "Using repository not supported by Red Hat", False),
+ ("codeready-builder-for-rhel-9-x86_64-rpms", "Using repository not supported by Red Hat", False),
("some_other_enabled_repo", "Excluded target system repositories", True),
(None, "Excluded target system repositories", True),
],
--
2.51.1

View File

@ -1,209 +0,0 @@
From 0a203330cee4fba6a28c65f1c6e0e450cc45771e Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Thu, 30 Oct 2025 11:52:39 +0100
Subject: [PATCH 48/55] mount_unit_gen: bind mount /sysroot/boot to /boot
Our changes towards using systemd-fstab-generator in the upgrade
initramfs caused that we are mounting almost all partitions, including
/boot (the actual mount target is /sysroot/boot) early in the boot
process. When upgrading with FIPS, the dracut fips module tries to mount
the device where the boot partition resides to check the integrity of
the kernel, however, it fails as the boot block device is already
mounted by us. This patch therefore introduces a static unit that
bind-mounts What=/sysroot/boot to Where=/boot, making the contents of
/boot available to the fips module. The bind-mounting service is
introduced only if the source system has /boot on a separate partition.
This is determined by checking whether anything shuld be mounted at
/boot according to fstab.
Jira-ref: RHEL-123886
---
.../initramfs/mount_units_generator/actor.py | 3 +-
.../files/bundled_units/boot.mount | 11 ++++
.../libraries/mount_unit_generator.py | 38 ++++++++++-
.../tests/test_mount_unit_generation.py | 63 ++++++++++++++++++-
4 files changed, 111 insertions(+), 4 deletions(-)
create mode 100644 repos/system_upgrade/common/actors/initramfs/mount_units_generator/files/bundled_units/boot.mount
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
index dd667513..23c618b6 100644
--- a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/actor.py
@@ -1,6 +1,6 @@
from leapp.actors import Actor
from leapp.libraries.actor import mount_unit_generator as mount_unit_generator_lib
-from leapp.models import LiveModeConfig, TargetUserSpaceInfo, UpgradeInitramfsTasks
+from leapp.models import LiveModeConfig, StorageInfo, TargetUserSpaceInfo, UpgradeInitramfsTasks
from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
@@ -15,6 +15,7 @@ class MountUnitGenerator(Actor):
consumes = (
LiveModeConfig,
TargetUserSpaceInfo,
+ StorageInfo,
)
produces = (
UpgradeInitramfsTasks,
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/files/bundled_units/boot.mount b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/files/bundled_units/boot.mount
new file mode 100644
index 00000000..869c5e4c
--- /dev/null
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/files/bundled_units/boot.mount
@@ -0,0 +1,11 @@
+[Unit]
+DefaultDependencies=no
+Before=local-fs.target
+After=sysroot-boot.target
+Requires=sysroot-boot.target
+
+[Mount]
+What=/sysroot/boot
+Where=/boot
+Type=none
+Options=bind
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
index 943bddd4..e3070986 100644
--- a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/libraries/mount_unit_generator.py
@@ -5,7 +5,9 @@ import tempfile
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.common import mounting
from leapp.libraries.stdlib import api, CalledProcessError, run
-from leapp.models import LiveModeConfig, TargetUserSpaceInfo, UpgradeInitramfsTasks
+from leapp.models import LiveModeConfig, StorageInfo, TargetUserSpaceInfo, UpgradeInitramfsTasks
+
+BIND_MOUNT_SYSROOT_BOOT_UNIT = 'boot.mount'
def run_systemd_fstab_generator(output_directory):
@@ -294,6 +296,39 @@ def request_units_inclusion_in_initramfs(files_to_include):
api.produce(tasks)
+def does_system_have_separate_boot_partition():
+ storage_info = next(api.consume(StorageInfo), None)
+ if not storage_info:
+ err_msg = 'Actor did not receive required information about system storage (StorageInfo)'
+ raise StopActorExecutionError(err_msg)
+
+ for fstab_entry in storage_info.fstab:
+ if fstab_entry.fs_file == '/boot':
+ return True
+
+ return False
+
+
+def inject_bundled_units(workspace):
+ """
+ Copy static units that are bundled within this actor into the workspace.
+ """
+ bundled_units_dir = api.get_actor_folder_path('bundled_units')
+ for unit in os.listdir(bundled_units_dir):
+ if unit == BIND_MOUNT_SYSROOT_BOOT_UNIT:
+ has_separate_boot = does_system_have_separate_boot_partition()
+ if not has_separate_boot:
+ # We perform bind-mounting because of dracut's fips module.
+ # When /boot is not a separate partition, we don't need to bind mount it --
+ # the fips module itself will create a symlink.
+ continue
+
+ unit_path = os.path.join(bundled_units_dir, unit)
+ unit_dst = os.path.join(workspace, unit)
+ api.current_logger().debug('Copying static unit bundled within leapp {} to {}'.format(unit, unit_dst))
+ shutil.copyfile(unit_path, unit_dst)
+
+
def setup_storage_initialization():
livemode_config = next(api.consume(LiveModeConfig), None)
if livemode_config and livemode_config.is_enabled:
@@ -306,6 +341,7 @@ def setup_storage_initialization():
run_systemd_fstab_generator(workspace_path)
remove_units_for_targets_that_are_already_mounted_by_dracut(workspace_path)
prefix_all_mount_units_with_sysroot(workspace_path)
+ inject_bundled_units(workspace_path)
fix_symlinks_in_targets(workspace_path)
mount_unit_files = copy_units_into_system_location(upgrade_container_ctx, workspace_path)
request_units_inclusion_in_initramfs(mount_unit_files)
diff --git a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
index 8849ada9..eb90a75d 100644
--- a/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
+++ b/repos/system_upgrade/common/actors/initramfs/mount_units_generator/tests/test_mount_unit_generation.py
@@ -5,9 +5,9 @@ import pytest
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import mount_unit_generator
-from leapp.libraries.common.testutils import logger_mocked
+from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked
from leapp.libraries.stdlib import api, CalledProcessError
-from leapp.models import TargetUserSpaceInfo, UpgradeInitramfsTasks
+from leapp.models import FstabEntry, StorageInfo, TargetUserSpaceInfo, UpgradeInitramfsTasks
def test_run_systemd_fstab_generator_successful_generation(monkeypatch):
@@ -267,3 +267,62 @@ def test_copy_units_mixed_content(monkeypatch):
]
assert sorted(files) == sorted(expected_files)
assert mount_unit_generator._delete_file.removal_called
+
+
+class CurrentActorMockedWithActorFolder(CurrentActorMocked):
+ def __init__(self, actor_folder_path, *args, **kwargs):
+ self.actor_folder_path = actor_folder_path
+ super().__init__(*args, **kwargs)
+
+ def get_actor_folder_path(self, subfolder):
+ return os.path.join(self.actor_folder_path, subfolder)
+
+
+@pytest.mark.parametrize('has_separate_boot', (True, False))
+def test_injection_of_sysroot_boot_bindmount_unit(monkeypatch, has_separate_boot):
+ fstab_entries = [
+ FstabEntry(fs_spec='UUID=123', fs_file='/root', fs_vfstype='xfs',
+ fs_mntops='defaults', fs_freq='0', fs_passno='0')
+ ]
+
+ if has_separate_boot:
+ boot_fstab_entry = FstabEntry(fs_spec='UUID=123', fs_file='/root', fs_vfstype='xfs',
+ fs_mntops='defaults', fs_freq='0', fs_passno='0')
+ fstab_entries.append(boot_fstab_entry)
+
+ storage_info = StorageInfo(fstab=fstab_entries)
+
+ actor_mock = CurrentActorMockedWithActorFolder(actor_folder_path='/actor', msgs=[storage_info])
+ monkeypatch.setattr(api, 'current_actor', actor_mock)
+
+ workspace_path = '/workspace'
+ was_copyfile_for_sysroot_boot_called = False
+
+ def copyfile_mocked(source, dest, *args, **kwargs):
+ if not os.path.basename(source) == mount_unit_generator.BIND_MOUNT_SYSROOT_BOOT_UNIT:
+ return
+
+ assert has_separate_boot
+ assert dest == os.path.join(workspace_path, mount_unit_generator.BIND_MOUNT_SYSROOT_BOOT_UNIT)
+
+ nonlocal was_copyfile_for_sysroot_boot_called
+ was_copyfile_for_sysroot_boot_called = True
+
+ monkeypatch.setattr(shutil, 'copyfile', copyfile_mocked)
+
+ def listdir_mocked(path):
+ assert path == actor_mock.get_actor_folder_path('bundled_units')
+ return [
+ mount_unit_generator.BIND_MOUNT_SYSROOT_BOOT_UNIT,
+ 'other.mount'
+ ]
+
+ monkeypatch.setattr(os, 'listdir', listdir_mocked)
+ monkeypatch.setattr(mount_unit_generator,
+ 'does_system_have_separate_boot_partition',
+ lambda: has_separate_boot)
+
+ mount_unit_generator.inject_bundled_units(workspace_path)
+
+ if has_separate_boot:
+ assert was_copyfile_for_sysroot_boot_called
--
2.51.1

View File

@ -1,40 +0,0 @@
From 9b06998b2077d0007da818059d5c6e244ac55948 Mon Sep 17 00:00:00 2001
From: Peter Mocary <pmocary@redhat.com>
Date: Mon, 10 Nov 2025 19:31:25 +0100
Subject: [PATCH 49/55] fix parsing of dnf config dump
This patch fixes handling of empty lines during parsing of output from
`dnf config-manager --dump` command.
Jira: RHEL-120328
---
repos/system_upgrade/common/libraries/dnfconfig.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/libraries/dnfconfig.py b/repos/system_upgrade/common/libraries/dnfconfig.py
index 4b5afeb5..9f1902b6 100644
--- a/repos/system_upgrade/common/libraries/dnfconfig.py
+++ b/repos/system_upgrade/common/libraries/dnfconfig.py
@@ -43,8 +43,11 @@ def _get_main_dump(context, disable_plugins):
output_data = {}
for line in data[main_start:]:
+ if not line.strip():
+ continue
try:
key, val = _strip_split(line, '=', 1)
+ output_data[key] = val
except ValueError:
# This is not expected to happen, but call it a seatbelt in case
# the dnf dump implementation will change and we will miss it
@@ -54,7 +57,6 @@ def _get_main_dump(context, disable_plugins):
api.current_logger().warning(
'Cannot parse the dnf dump correctly, line: {}'.format(line))
pass
- output_data[key] = val
return output_data
--
2.51.1

View File

@ -1,609 +0,0 @@
From 428c46051619a570b08189677bb27eedf69c2a9e Mon Sep 17 00:00:00 2001
From: karolinku <kkula@redhat.com>
Date: Fri, 17 Oct 2025 16:06:15 +0200
Subject: [PATCH 50/55] Add detection for third-party target Python modules
Introduce actors to detect presence of third-party
Python modules installed for target Python. Those modules could
interfere with the upgrade process or cause issues after rebooting
into the target system.
Scanner (scanthirdpartytargetpythonmodules):
- Identifies the target Python interpreter
- Queries the target Python's sys.path to determine where it searches
for modules
- Recursively scans these directories for Python files (.py, .so, .pyc)
- Cross-references found files against the RPM database to determine
ownership and categorize them
Checker (checkthirdpartytargetpythonmodules) creates a high severity
report to inform users about findings and presents full list of them
in logs and short version in report.
Jira: RHEL-71882
---
.../actor.py | 21 ++
.../checkthirdpartytargetpythonmodules.py | 74 +++++++
...check_third_party_target_python_modules.py | 46 +++++
.../actor.py | 19 ++
.../scanthirdpartytargetpythonmodules.py | 193 ++++++++++++++++++
..._scan_third_party_target_python_modules.py | 136 ++++++++++++
.../models/thirdpartytagetpythonmodules.py | 25 +++
requirements.txt | 1 +
8 files changed, 515 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/actor.py
create mode 100644 repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/libraries/checkthirdpartytargetpythonmodules.py
create mode 100644 repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/tests/test_check_third_party_target_python_modules.py
create mode 100644 repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/actor.py
create mode 100644 repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/libraries/scanthirdpartytargetpythonmodules.py
create mode 100644 repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/tests/test_scan_third_party_target_python_modules.py
create mode 100644 repos/system_upgrade/common/models/thirdpartytagetpythonmodules.py
diff --git a/repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/actor.py b/repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/actor.py
new file mode 100644
index 00000000..e1868819
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/actor.py
@@ -0,0 +1,21 @@
+from leapp.actors import Actor
+from leapp.libraries.actor.checkthirdpartytargetpythonmodules import perform_check
+from leapp.models import ThirdPartyTargetPythonModules
+from leapp.reporting import Report
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
+
+
+class CheckThirdPartyTargetPythonModules(Actor):
+ """
+ Produces a report if any third-party target Python modules are detected on the source system.
+
+ If such modules are detected, a high risk report is produced.
+ """
+
+ name = 'check_third_party_target_python_modules'
+ consumes = (ThirdPartyTargetPythonModules,)
+ produces = (Report,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ perform_check()
diff --git a/repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/libraries/checkthirdpartytargetpythonmodules.py b/repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/libraries/checkthirdpartytargetpythonmodules.py
new file mode 100644
index 00000000..7ed34738
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/libraries/checkthirdpartytargetpythonmodules.py
@@ -0,0 +1,74 @@
+from leapp import reporting
+from leapp.libraries.stdlib import api
+from leapp.models import ThirdPartyTargetPythonModules
+
+FMT_LIST_SEPARATOR = '\n - '
+MAX_REPORTED_ITEMS = 30
+
+
+def _formatted_list_output_with_max_items(input_list, sep=FMT_LIST_SEPARATOR, max_items=MAX_REPORTED_ITEMS):
+ if not input_list:
+ return ''
+
+ total_count = len(input_list)
+ items_to_show = input_list[:max_items]
+ formatted = ['{}{}'.format(sep, item) for item in items_to_show]
+
+ if total_count > max_items:
+ formatted.append('{}... and {} more'.format(sep, total_count - max_items))
+
+ return ''.join(formatted)
+
+
+def check_third_party_target_python_modules(third_party_target_python_modules):
+ """Create an inhibitor when third-party Python modules are detected."""
+ target_python_version = third_party_target_python_modules.target_python.split('python')[1]
+ third_party_rpms = third_party_target_python_modules.third_party_rpm_names
+ third_party_modules = third_party_target_python_modules.third_party_modules
+
+ summary = (
+ 'Third-party target Python modules may interfere with '
+ 'the upgrade process or cause unexpected behavior after the upgrade.'
+ )
+
+ if third_party_rpms:
+ summary = (
+ '{pre}\n\nNon-distribution RPM packages detected:{rpmlist}'
+ .format(
+ pre=summary,
+ rpmlist=_formatted_list_output_with_max_items(third_party_rpms))
+ )
+
+ if third_party_modules:
+ summary = (
+ '{pre}\n\nNon-distribution modules detected (list can be incomplete):{modulelist}'
+ .format(
+ pre=summary,
+ modulelist=_formatted_list_output_with_max_items(third_party_modules))
+ )
+
+ reporting.create_report([
+ reporting.Title('Detected third-party Python modules for the target Python version'),
+ reporting.Summary(summary),
+ reporting.Remediation(
+ hint='Remove third-party target Python {} packages before attempting the upgrade or ensure '
+ 'that those modules are not interfering with distribution-provided modules.'
+ .format(target_python_version),
+ ),
+ reporting.Severity(reporting.Severity.HIGH)
+ ])
+
+
+def perform_check():
+ """Perform the check for third-party Python modules."""
+ third_party_target_python_modules_msg = next(api.consume(
+ ThirdPartyTargetPythonModules),
+ None,
+ )
+
+ if not third_party_target_python_modules_msg:
+ return
+
+ if (third_party_target_python_modules_msg.third_party_rpm_names or
+ third_party_target_python_modules_msg.third_party_modules):
+ check_third_party_target_python_modules(third_party_target_python_modules_msg)
diff --git a/repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/tests/test_check_third_party_target_python_modules.py b/repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/tests/test_check_third_party_target_python_modules.py
new file mode 100644
index 00000000..2a87d195
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkthirdpartytargetpythonmodules/tests/test_check_third_party_target_python_modules.py
@@ -0,0 +1,46 @@
+import pytest
+
+from leapp import reporting
+from leapp.libraries.actor import checkthirdpartytargetpythonmodules
+from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
+from leapp.libraries.stdlib import api
+from leapp.models import ThirdPartyTargetPythonModules
+
+
+def test_perform_check_no_message_available(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[]))
+ monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
+
+ checkthirdpartytargetpythonmodules.perform_check()
+
+ assert not reporting.create_report.called
+
+
+def test_perform_check_empty_lists(monkeypatch):
+ msg = ThirdPartyTargetPythonModules(
+ target_python='python3.9',
+ third_party_modules=[],
+ third_party_rpm_names=[]
+ )
+
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[msg]))
+ monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
+
+ checkthirdpartytargetpythonmodules.perform_check()
+
+ assert not reporting.create_report.called
+
+
+def test_perform_check_with_third_party_modules(monkeypatch):
+ msg = ThirdPartyTargetPythonModules(
+ target_python='python3.9',
+ third_party_modules=['third_party_module'],
+ third_party_rpm_names=['third_party_rpm']
+ )
+
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[msg]))
+ monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
+
+ checkthirdpartytargetpythonmodules.perform_check()
+
+ assert reporting.create_report.called
diff --git a/repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/actor.py b/repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/actor.py
new file mode 100644
index 00000000..2c0d1973
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/actor.py
@@ -0,0 +1,19 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import scanthirdpartytargetpythonmodules
+from leapp.models import DistributionSignedRPM, ThirdPartyTargetPythonModules
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class ScanThirdPartyTargetPythonModules(Actor):
+ """
+ Detect third-party target Python modules and RPMs on the source system.
+
+ """
+
+ name = 'scan_third_party_target_python_modules'
+ consumes = (DistributionSignedRPM,)
+ produces = (ThirdPartyTargetPythonModules,)
+ tags = (FactsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ scanthirdpartytargetpythonmodules.process()
diff --git a/repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/libraries/scanthirdpartytargetpythonmodules.py b/repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/libraries/scanthirdpartytargetpythonmodules.py
new file mode 100644
index 00000000..1329c50f
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/libraries/scanthirdpartytargetpythonmodules.py
@@ -0,0 +1,193 @@
+import json
+import os
+from collections import defaultdict
+from pathlib import Path
+
+import rpm
+
+from leapp.libraries.common.config.version import get_target_major_version
+from leapp.libraries.common.rpms import has_package
+from leapp.libraries.stdlib import api, run
+from leapp.models import DistributionSignedRPM, ThirdPartyTargetPythonModules
+
+PYTHON_EXTENSIONS = (".py", ".so", ".pyc")
+FMT_LIST_SEPARATOR = '\n - '
+
+
+def _formatted_list_output(input_list, sep=FMT_LIST_SEPARATOR):
+ return ['{}{}'.format(sep, item) for item in input_list]
+
+
+def get_python_sys_paths(python_interpreter):
+ """Get sys.path from the specified Python interpreter."""
+
+ result = run([python_interpreter, '-c', 'import sys, json; print(json.dumps(sys.path))'])['stdout']
+ raw_paths = json.loads(result)
+ paths = [Path(raw_path).resolve() for raw_path in raw_paths]
+ return paths
+
+
+def get_python_binary_for_rhel(rhel_version):
+ """
+ Maps RHEL major version to the appropriate Python binary.
+ """
+
+ version_map = {
+ '9': 'python3.9',
+ '10': 'python3.12',
+ }
+ return version_map.get(rhel_version)
+
+
+def is_target_python_present(target_python):
+ """
+ Checks if the target Python interpreter is available on the system.
+ """
+
+ result = run(['command', '-v', target_python], checked=False)
+ return not result['exit_code']
+
+
+def identify_files_of_pypackages(syspaths):
+ ts = rpm.TransactionSet()
+ # add a trailing slash by calling os.path.join(..., '')
+ roots = tuple(os.path.join(str(path), "") for path in syspaths)
+ file_to_pkg = {}
+
+ # Iterate over all installed packages
+ for header in ts.dbMatch():
+ pkg = header['name']
+ files = header['filenames']
+ for filename in files:
+ if filename and filename.endswith(PYTHON_EXTENSIONS) and filename.startswith(roots):
+ file_to_pkg[filename] = pkg
+ return file_to_pkg
+
+
+def find_python_related(root):
+ # recursively search for all files matching the given extension
+ for pattern in PYTHON_EXTENSIONS:
+ yield from root.rglob("*" + pattern)
+
+
+def _should_skip_file(file):
+ # pyc files are importable, but not if they are in __pycache__
+ return file.name.endswith(".pyc") and file.parent.name == "__pycache__"
+
+
+def scan_python_files(system_paths, rpm_files):
+ """
+ Scan system paths for Python files and categorize them by ownership.
+
+ :param system_paths: List of paths to scan for Python files
+ :param rpm_files: Dictionary mapping file paths to RPM package names
+ :return: Tuple of (rpms_to_check, third_party_unowned_files) where:
+ - rpms_to_check is a dict mapping RPM names to list of their files
+ - third_party_unowned_files is a list of files not owned by any RPM
+ """
+ rpms_to_check = defaultdict(list)
+ third_party_unowned_files = []
+
+ for path in system_paths:
+ if not path.is_dir():
+ continue
+ for file in find_python_related(path):
+ if _should_skip_file(file):
+ continue
+
+ file_path = str(file)
+ owner = rpm_files.get(file_path)
+ if owner:
+ rpms_to_check[owner].append(file_path)
+ else:
+ third_party_unowned_files.append(file_path)
+
+ return rpms_to_check, third_party_unowned_files
+
+
+def identify_unsigned_rpms(rpms_to_check):
+ """
+ Identify which RPMs are third-party (not signed by the distribution).
+
+ :param rpms_to_check: Dictionary mapping RPM names to list of their files
+ :return: Tuple of (third_party_rpms, third_party_files) where:
+ - third_party_rpms is a list of third-party RPM package names
+ - third_party_files is a list of files from third-party RPMs
+ """
+ third_party_rpms = []
+ third_party_files = []
+
+ for rpm_name, files in rpms_to_check.items():
+ if not has_package(DistributionSignedRPM, rpm_name):
+ third_party_rpms.append(rpm_name)
+ api.current_logger().warning(
+ 'Found Python files from non-distribution RPM package: {}'.format(rpm_name)
+ )
+ third_party_files.extend(files)
+
+ return third_party_rpms, third_party_files
+
+
+def process():
+ """
+ Main function to scan for third-party Python modules/RPMs on the target system.
+
+ This function:
+ 1. Validates the target RHEL version and Python interpreter
+ 2. Scans system paths for Python files
+ 3. Identifies third-party RPMs and modules
+ 4. Produces a message if any third-party modules/RPMs are detected
+ """
+ target_version = get_target_major_version()
+ target_python = get_python_binary_for_rhel(target_version)
+
+ if not target_python:
+ api.current_logger().info(
+ "RHEL version {} is not supported for third-party Python modules scanning, "
+ "skipping check.".format(target_version)
+ )
+ return
+
+ if not is_target_python_present(target_python):
+ api.current_logger().info(
+ "Target Python interpreter {} is not installed on the source system, "
+ "skipping check of 3rd party python modules.".format(target_python)
+ )
+ return
+ system_paths = get_python_sys_paths(target_python)
+ rpm_files = identify_files_of_pypackages(system_paths[1:])
+
+ rpms_to_check, third_party_unowned_files = scan_python_files(system_paths[1:], rpm_files)
+
+ third_party_rpms, third_party_rpm_files = identify_unsigned_rpms(rpms_to_check)
+
+ # Combine all third-party files (unowned + from third-party RPMs)
+ all_third_party_files = third_party_unowned_files + third_party_rpm_files
+
+ if third_party_rpms or all_third_party_files:
+ api.current_logger().warning(
+ 'Found {} third-party RPM package(s) and {} third-party Python file(s) '
+ 'for target Python {}'.format(
+ len(third_party_rpms), len(all_third_party_files), target_python
+ )
+ )
+
+ if third_party_rpms:
+ api.current_logger().info(
+ 'Complete list of third-party RPM packages:{}'.format(
+ ''.join(_formatted_list_output(third_party_rpms))
+ )
+ )
+
+ if all_third_party_files:
+ api.current_logger().info(
+ 'Complete list of third-party Python modules:{}'.format(
+ ''.join(_formatted_list_output(all_third_party_files))
+ )
+ )
+
+ api.produce(ThirdPartyTargetPythonModules(
+ target_python=target_python,
+ third_party_modules=all_third_party_files,
+ third_party_rpm_names=third_party_rpms
+ ))
diff --git a/repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/tests/test_scan_third_party_target_python_modules.py b/repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/tests/test_scan_third_party_target_python_modules.py
new file mode 100644
index 00000000..796185ae
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scanthirdpartytargetpythonmodules/tests/test_scan_third_party_target_python_modules.py
@@ -0,0 +1,136 @@
+from collections import defaultdict, namedtuple
+from pathlib import Path
+
+import pytest
+
+from leapp.libraries.actor import scanthirdpartytargetpythonmodules
+from leapp.libraries.common.testutils import logger_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import DistributionSignedRPM
+
+Parent = namedtuple('Parent', ['name'])
+MockFile = namedtuple('MockFile', ['name', 'parent', 'path'])
+
+
+def _mock_file_str(self):
+ return self.path
+
+
+MockFile.__str__ = _mock_file_str
+
+
+@pytest.mark.parametrize('rhel_version,expected_python', [
+ ('9', 'python3.9'),
+ ('10', 'python3.12'),
+ ('8', None),
+ ('7', None),
+ ('', None),
+ ('invalid', None),
+ (None, None),
+])
+def test_get_python_binary_for_rhel(rhel_version, expected_python):
+ assert scanthirdpartytargetpythonmodules.get_python_binary_for_rhel(rhel_version) == expected_python
+
+
+@pytest.mark.parametrize('file_name,parent_name,should_skip', [
+ ('module.pyc', '__pycache__', True),
+ ('module.pyc', 'site-packages', False),
+ ('module.py', '__pycache__', False),
+ ('module.so', '__pycache__', False),
+ ('module.py', 'site-packages', False),
+ ('module.so', 'site-packages', False),
+])
+def test_should_skip_file(file_name, parent_name, should_skip):
+ mock_file = MockFile(name=file_name, parent=Parent(name=parent_name), path='/dummy/path')
+ assert scanthirdpartytargetpythonmodules._should_skip_file(mock_file) is should_skip
+
+
+def test_scan_python_files(monkeypatch):
+ system_paths = [Path('/usr/lib/python3.9/site-packages')]
+ rpm_files = {
+ '/usr/lib/python3.9/site-packages/rpm_module.py': 'rpm-package',
+ '/usr/lib/python3.9/site-packages/another.py': 'another-rpm',
+ }
+
+ def mock_is_dir(self):
+ return True
+
+ def mock_find_python_related(root):
+ files = [
+ MockFile('rpm_module.py', Parent('site-packages'), '/usr/lib/python3.9/site-packages/rpm_module.py'),
+ MockFile('unowned.py', Parent('site-packages'), '/usr/lib/python3.9/site-packages/unowned.py'),
+ MockFile('another.py', Parent('site-packages'), '/usr/lib/python3.9/site-packages/another.py'),
+ ]
+ return iter(files)
+
+ monkeypatch.setattr(Path, 'is_dir', mock_is_dir)
+ monkeypatch.setattr(scanthirdpartytargetpythonmodules, 'find_python_related', mock_find_python_related)
+
+ rpms_to_check, unowned = scanthirdpartytargetpythonmodules.scan_python_files(system_paths, rpm_files)
+
+ assert 'rpm-package' in rpms_to_check
+ assert 'another-rpm' in rpms_to_check
+ assert '/usr/lib/python3.9/site-packages/unowned.py' in unowned
+ assert len(unowned) == 1
+
+
+@pytest.mark.parametrize('path_exists,mock_files', [
+ (False, None),
+ (True, [MockFile('module.pyc', Parent('__pycache__'), '/usr/lib/python3.9/site-packages/__pycache__/module.pyc')]),
+])
+def test_scan_python_files_filtering(monkeypatch, path_exists, mock_files):
+ system_paths = [Path('/usr/lib/python3.9/site-packages')]
+ rpm_files = {}
+
+ def mock_is_dir(self):
+ return path_exists
+
+ monkeypatch.setattr(Path, 'is_dir', mock_is_dir)
+
+ if mock_files is not None:
+ def mock_find_python_related(root):
+ return iter(mock_files)
+ monkeypatch.setattr(scanthirdpartytargetpythonmodules, 'find_python_related', mock_find_python_related)
+
+ rpms_to_check, unowned = scanthirdpartytargetpythonmodules.scan_python_files(system_paths, rpm_files)
+
+ assert len(rpms_to_check) == 0
+ assert len(unowned) == 0
+
+
+@pytest.mark.parametrize('is_signed,expected_rpm_count,expected_file_count', [
+ (False, 1, 2),
+ (True, 0, 0),
+])
+def test_identify_unsigned_rpms(monkeypatch, is_signed, expected_rpm_count, expected_file_count):
+ rpms_to_check = defaultdict(list)
+ package_name = 'test-package'
+ rpms_to_check[package_name] = [
+ '/path/to/file1.py',
+ '/path/to/file2.py',
+ ]
+
+ def mock_has_package(model, pkg_name):
+ return is_signed
+
+ monkeypatch.setattr(scanthirdpartytargetpythonmodules, 'has_package', mock_has_package)
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ third_party_rpms, third_party_files = scanthirdpartytargetpythonmodules.identify_unsigned_rpms(rpms_to_check)
+
+ assert len(third_party_rpms) == expected_rpm_count
+ assert len(third_party_files) == expected_file_count
+
+ if not is_signed:
+ assert package_name in third_party_rpms
+ assert '/path/to/file1.py' in third_party_files
+ assert '/path/to/file2.py' in third_party_files
+
+
+def test_identify_unsigned_rpms_empty_input():
+ rpms_to_check = defaultdict(list)
+
+ third_party_rpms, third_party_files = scanthirdpartytargetpythonmodules.identify_unsigned_rpms(rpms_to_check)
+
+ assert len(third_party_rpms) == 0
+ assert len(third_party_files) == 0
diff --git a/repos/system_upgrade/common/models/thirdpartytagetpythonmodules.py b/repos/system_upgrade/common/models/thirdpartytagetpythonmodules.py
new file mode 100644
index 00000000..105e9f2c
--- /dev/null
+++ b/repos/system_upgrade/common/models/thirdpartytagetpythonmodules.py
@@ -0,0 +1,25 @@
+from leapp.models import fields, Model
+from leapp.topics import SystemInfoTopic
+
+
+class ThirdPartyTargetPythonModules(Model):
+ """
+ Information about third-party target Python modules found on system.
+
+ """
+ topic = SystemInfoTopic
+
+ target_python = fields.String()
+ """
+ Target system Python version.
+ """
+
+ third_party_modules = fields.List(fields.String(), default=[])
+ """
+ List of third-party target Python modules found on the source system. Empty list if no modules found.
+ """
+
+ third_party_rpm_names = fields.List(fields.String(), default=[])
+ """
+ List of third-party RPMs found on the source system. Empty list if no modules found.
+ """
diff --git a/requirements.txt b/requirements.txt
index a1bb4725..3c79b23d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,3 +14,4 @@ git+https://github.com/oamg/leapp
requests
# pinning a py27 troublemaking transitive dependency
lazy-object-proxy==1.5.2; python_version < '3'
+rpm
--
2.51.1

File diff suppressed because it is too large Load Diff

View File

@ -1,39 +0,0 @@
From d9fe5528d6e92702e2188ee28f2620275d032d53 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Mon, 11 Aug 2025 14:32:16 +0200
Subject: [PATCH 52/55] Makefile: Make sanity-check respect $(REPOSITORIES)
Since b6e84f7, the sanity check, runs in each repository and doesn't
respect $(REPOSITORIES). This breaks the sanity-check when it's run with
a version of Python which is incompatible with the code in particular
repository.
For example with python3.6 and el9toel10 repo, python crashes with the
following because python3.6 doesn't understand the type hint:
File "/payload/repos/system_upgrade/el9toel10/actors/mysql/scanmysql/libraries/scanmysql.py", line 35, in <module>
def _check_incompatible_config() -> set[str]:
TypeError: 'type' object is not subscriptable
---
Makefile | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/Makefile b/Makefile
index 64115006..b1d807e1 100644
--- a/Makefile
+++ b/Makefile
@@ -373,9 +373,9 @@ lint_fix:
test_no_lint:
@. $(VENVNAME)/bin/activate; \
snactor repo find --path repos/; \
- for dir in repos/system_upgrade/*/; do \
- echo "Running sanity-check in $$dir"; \
- (cd $$dir && snactor workflow sanity-check ipu); \
+ for dir in $$(echo $(REPOSITORIES) | tr "," " "); do \
+ echo "Running sanity-check in $(_SYSUPG_REPOS)/$$dir"; \
+ (cd $(_SYSUPG_REPOS)/$$dir && snactor workflow sanity-check ipu); \
done; \
$(_PYTHON_VENV) -m pytest $(REPORT_ARG) $(TEST_PATHS) $(LIBRARY_PATH) $(PYTEST_ARGS)
--
2.51.1

View File

@ -1,178 +0,0 @@
From 60a1fa88d3a8c93063eb67b9604ac030ed7f1c3a Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Mon, 11 Aug 2025 14:32:40 +0200
Subject: [PATCH 53/55] Makefile: Partially fix ACTOR tests and enable them in
containers
The ACTOR variable for running tests of a single actor has long been
unusable, git bisect led me to dc3abf6 as the commit where
it was (first?) broken due to the same model module being defined in
both el7toel8 and el8toel9 repos.
This patch unfortunately doesn't fix that, but works around it by
enabling the utils/actor_path.py script to search in specific
repositories. These are passed from the Makefile via REPOSITORIES
variable.
As the Makefile rules for containerized tests already use repositories,
the ACTOR variable is just passed along.
NOTE: the code in actor_path.py is ugly and uses private APIs, however
that's nothing new :).
---
Makefile | 44 +++++++++++++++++++++++++++++++-------------
utils/actor_path.py | 35 ++++++++++++++++++++++++++---------
2 files changed, 57 insertions(+), 22 deletions(-)
diff --git a/Makefile b/Makefile
index b1d807e1..59671f06 100644
--- a/Makefile
+++ b/Makefile
@@ -12,24 +12,29 @@ REPOS_PATH=repos
_SYSUPG_REPOS="$(REPOS_PATH)/system_upgrade"
LIBRARY_PATH=
REPORT_ARG=
-REPOSITORIES ?= $(shell ls $(_SYSUPG_REPOS) | xargs echo | tr " " ",")
-SYSUPG_TEST_PATHS=$(shell echo $(REPOSITORIES) | sed -r "s|(,\\|^)| $(_SYSUPG_REPOS)/|g")
-TEST_PATHS:=commands repos/common $(SYSUPG_TEST_PATHS)
-
-# Several commands can take arbitrary user supplied arguments from environment
-# variables as well:
-PYTEST_ARGS ?=
-PYLINT_ARGS ?=
-FLAKE8_ARGS ?=
# python version to run test with
_PYTHON_VENV=$${PYTHON_VENV:-python3.6}
ifdef ACTOR
- TEST_PATHS=`$(_PYTHON_VENV) utils/actor_path.py $(ACTOR)`
+ # If REPOSITORIES is set, the utils/actor_path.py script searches for the
+ # actor only in the specified repositories.
+ # if REPOSITORIES is not set i.e. it's empty, all repositories are searched
+ # - this is broken due to name collisions in repositories (FIXME)
+ TEST_PATHS=$(shell . $(VENVNAME)/bin/activate && $(_PYTHON_VENV) utils/actor_path.py $(ACTOR) $(REPOSITORIES))
APPROX_TEST_PATHS=$(shell $(_PYTHON_VENV) utils/find_actors.py -C repos $(ACTOR)) # Dev only
+else
+ REPOSITORIES ?= $(shell ls $(_SYSUPG_REPOS) | xargs echo | tr " " ",")
+ SYSUPG_TEST_PATHS=$(shell echo $(REPOSITORIES) | sed -r "s|(,\\|^)| $(_SYSUPG_REPOS)/|g")
+ TEST_PATHS:=commands repos/common $(SYSUPG_TEST_PATHS)
endif
+# Several commands can take arbitrary user supplied arguments from environment
+# variables as well:
+PYTEST_ARGS ?=
+PYLINT_ARGS ?=
+FLAKE8_ARGS ?=
+
ifeq ($(TEST_LIBS),y)
LIBRARY_PATH=`python utils/library_path.py`
endif
@@ -371,12 +376,24 @@ lint_fix:
echo "--- isort inplace fixing done. ---;"
test_no_lint:
- @. $(VENVNAME)/bin/activate; \
+ @if [ -z "$(REPOSITORIES)" -a -n "$(ACTOR)" ]; then \
+ printf "\033[0;31mWARNING\033[0m: Running tests with ACTOR without"; \
+ printf " specifying REPOSITORIES is currently broken.\n" 2>&1; \
+ printf " Specify REPOSITORIES with only one elXtoelY repository"; \
+ printf " (e.g. REPOSITORIES=common,el8toel9).\n" 2>&1; \
+ exit 1; \
+ fi
+
+ @echo "============= snactor sanity-check ipu ===============" 2>&1
+ . $(VENVNAME)/bin/activate; \
snactor repo find --path repos/; \
for dir in $$(echo $(REPOSITORIES) | tr "," " "); do \
echo "Running sanity-check in $(_SYSUPG_REPOS)/$$dir"; \
(cd $(_SYSUPG_REPOS)/$$dir && snactor workflow sanity-check ipu); \
- done; \
+ done
+
+ @echo "==================== unit tests ======================" 2>&1
+ . $(VENVNAME)/bin/activate; \
$(_PYTHON_VENV) -m pytest $(REPORT_ARG) $(TEST_PATHS) $(LIBRARY_PATH) $(PYTEST_ARGS)
test: lint test_no_lint
@@ -407,7 +424,7 @@ _test_container_ipu:
;; \
esac && \
$(_CONTAINER_TOOL) exec -w /repocopy $$_CONT_NAME make clean && \
- $(_CONTAINER_TOOL) exec -w /repocopy -e REPOSITORIES $$_CONT_NAME make $${_TEST_CONT_TARGET:-test}
+ $(_CONTAINER_TOOL) exec -w /repocopy -e ACTOR -e REPOSITORIES $$_CONT_NAME make $${_TEST_CONT_TARGET:-test}
# Runs lint in a container
@@ -449,6 +466,7 @@ test_container:
$(_CONTAINER_TOOL) run -di --name $$_CONT_NAME -v "$$PWD":/repo:Z -e PYTHON_VENV=$$_VENV $$TEST_IMAGE && \
$(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude 'tut/' --exclude 'docs/' --exclude '**/__pycache__/' --exclude 'packaging/' --exclude '.git/' /repo/ /repocopy && \
$(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude '**/__pycache__/' /repo/commands/ /repocopy/tut/lib/$$_VENV/site-packages/leapp/cli/commands/ && \
+ $(_CONTAINER_TOOL) exec -w /repocopy $$_CONT_NAME bash -c '. $(VENVNAME)/bin/activate && snactor repo find --path repos' && \
export res=0; \
case $$_VENV in \
python3.6) \
diff --git a/utils/actor_path.py b/utils/actor_path.py
index 5c53a16a..3c61ce79 100755
--- a/utils/actor_path.py
+++ b/utils/actor_path.py
@@ -1,7 +1,10 @@
import logging
+import os
import sys
-from leapp.repository.scan import find_and_scan_repositories
+from leapp.repository.manager import RepositoryManager
+from leapp.repository.scan import _resolve_repository_links, find_and_scan_repositories, scan_repo
+
def err_exit():
# We want to be sure that `make test` (test_no_lint) will stop when expected
@@ -10,22 +13,36 @@ def err_exit():
sys.stdout.write('ERROR:__read_error_messages_above_this_one_on_stderr__')
sys.exit(1)
+
def main():
logging.basicConfig(level=logging.INFO, filename='/dev/null')
logger = logging.getLogger('run_pytest.py')
BASE_REPO = 'repos'
- repos = find_and_scan_repositories(BASE_REPO, include_locals=True)
- repos.load()
- if len(sys.argv) > 1:
- actors = repos._lookup_actors(sys.argv[1])
- if not actors:
- sys.stderr.write('ERROR: No actor found for search "{}"\n'.format(sys.argv[1]))
- err_exit()
- print(' '.join([actor.full_path for actor in actors]))
+ SYSUPG_REPO = os.path.join(BASE_REPO, 'system_upgrade')
+
+ if len(sys.argv) == 2:
+ manager = find_and_scan_repositories(BASE_REPO, include_locals=True)
+ manager.load()
+ elif len(sys.argv) == 3:
+ repos = sys.argv[2].split(',')
+ # TODO: it would be nicer to have some function in the framework for
+ # the scanning and resolving done below
+ manager = RepositoryManager()
+ for repo in repos:
+ manager.add_repo(scan_repo(os.path.join(SYSUPG_REPO, repo)))
+ _resolve_repository_links(manager=manager, include_locals=True)
+ manager.load()
else:
sys.stderr.write('ERROR: Missing commandline argument\n')
+ sys.stderr.write('Usage: actor_path.py <actor_name> [repositories]\n')
+ err_exit()
+
+ actors = manager._lookup_actors(sys.argv[1])
+ if not actors:
+ sys.stderr.write('ERROR: No actor found for search "{}"\n'.format(sys.argv[1]))
err_exit()
+ print(' '.join([actor.full_path for actor in actors]))
if __name__ == '__main__':
--
2.51.1

View File

@ -1,62 +0,0 @@
From 4e182e84fc8ebd499dfc9f9e2caf4ae7dd63fb60 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Fri, 25 Jul 2025 16:27:42 +0200
Subject: [PATCH 54/55] make: error uniformly when ACTOR without REPOSITORIES
is used
Using ACTOR withtout REPOSITORIES leads to a dead lock during actor
discovery (likely due to the 'multipathconfcheck' actor). This patch
adds a new make target that prevents the use of ACTOR without
REPOSITORIES.
---
Makefile | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/Makefile b/Makefile
index 59671f06..039b3e9e 100644
--- a/Makefile
+++ b/Makefile
@@ -339,7 +339,7 @@ install-deps-fedora:
$(VENVNAME)/bin/pip install -I "git+https://github.com/oamg/leapp.git@refs/pull/$(REQ_LEAPP_PR)/head"; \
fi
-lint:
+lint: _warn_misssing_repos_if_using_actor
. $(VENVNAME)/bin/activate; \
echo "--- Linting ... ---" && \
SEARCH_PATH="$(TEST_PATHS)" && \
@@ -375,14 +375,7 @@ lint_fix:
git diff $(MASTER_BRANCH) --name-only --diff-filter AMR | grep -v "^docs/" | xargs isort && \
echo "--- isort inplace fixing done. ---;"
-test_no_lint:
- @if [ -z "$(REPOSITORIES)" -a -n "$(ACTOR)" ]; then \
- printf "\033[0;31mWARNING\033[0m: Running tests with ACTOR without"; \
- printf " specifying REPOSITORIES is currently broken.\n" 2>&1; \
- printf " Specify REPOSITORIES with only one elXtoelY repository"; \
- printf " (e.g. REPOSITORIES=common,el8toel9).\n" 2>&1; \
- exit 1; \
- fi
+test_no_lint: _warn_misssing_repos_if_using_actor
@echo "============= snactor sanity-check ipu ===============" 2>&1
. $(VENVNAME)/bin/activate; \
@@ -538,5 +531,14 @@ dashboard_data:
$(_PYTHON_VENV) ../../../utils/dashboard-json-dump.py > ../../../discover.json; \
popd
-.PHONY: help build clean prepare source srpm copr_build _build_local build_container print_release register install-deps install-deps-fedora lint test_no_lint test dashboard_data fast_lint
+_warn_misssing_repos_if_using_actor:
+ @if [ -z "$(REPOSITORIES)" -a -n "$(ACTOR)" ]; then \
+ printf "\033[0;31mERROR\033[0m: Running linters/tests with ACTOR without"; \
+ printf " specifying REPOSITORIES is currently broken.\n" 2>&1; \
+ printf " Specify REPOSITORIES with only one elXtoelY repository"; \
+ printf " (e.g. REPOSITORIES=common,el8toel9).\n" 2>&1; \
+ exit 1; \
+ fi
+
+.PHONY: help build clean prepare source srpm copr_build _build_local build_container print_release register install-deps install-deps-fedora lint test_no_lint test dashboard_data fast_lint _warn_missing_repos_if_using_actor
.PHONY: test_container test_container_no_lint test_container_all test_container_all_no_lint clean_containers _build_container_image _test_container_ipu dev_test_no_lint
--
2.51.1

View File

@ -1,56 +0,0 @@
From b7f862249e2227d2c5f3f6e33d74f8d2a2367a11 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 16 Oct 2025 19:28:22 +0200
Subject: [PATCH 55/55] Makefile: Skip tests with ACTOR when it's not present
in REPOSITORIES
---
Makefile | 17 +++++++++++++----
1 file changed, 13 insertions(+), 4 deletions(-)
diff --git a/Makefile b/Makefile
index 039b3e9e..0db240a9 100644
--- a/Makefile
+++ b/Makefile
@@ -21,7 +21,7 @@ ifdef ACTOR
# actor only in the specified repositories.
# if REPOSITORIES is not set i.e. it's empty, all repositories are searched
# - this is broken due to name collisions in repositories (FIXME)
- TEST_PATHS=$(shell . $(VENVNAME)/bin/activate && $(_PYTHON_VENV) utils/actor_path.py $(ACTOR) $(REPOSITORIES))
+ TEST_PATHS = $(shell . $(VENVNAME)/bin/activate && $(_PYTHON_VENV) utils/actor_path.py $(ACTOR) $(REPOSITORIES))
APPROX_TEST_PATHS=$(shell $(_PYTHON_VENV) utils/find_actors.py -C repos $(ACTOR)) # Dev only
else
REPOSITORIES ?= $(shell ls $(_SYSUPG_REPOS) | xargs echo | tr " " ",")
@@ -376,7 +376,6 @@ lint_fix:
echo "--- isort inplace fixing done. ---;"
test_no_lint: _warn_misssing_repos_if_using_actor
-
@echo "============= snactor sanity-check ipu ===============" 2>&1
. $(VENVNAME)/bin/activate; \
snactor repo find --path repos/; \
@@ -385,9 +384,19 @@ test_no_lint: _warn_misssing_repos_if_using_actor
(cd $(_SYSUPG_REPOS)/$$dir && snactor workflow sanity-check ipu); \
done
- @echo "==================== unit tests ======================" 2>&1
+ @echo "==================== unit tests ======================" 2>&1;
+# the below commands need to be one shell invocation for the early exit to work;
+# note: need to store the paths into separate var as it here as it's lazily
+# evaluated on each use :), using ?= for the assignment does not help for
+# some reason
+ @paths="$(TEST_PATHS)"; \
+ if [[ $$(echo "$$paths" | grep 'ERROR:') && -n "$(ACTOR)" ]]; then \
+ echo Failed to find the '$(ACTOR)' actor in the '$(REPOSITORIES)' repositories: $$paths; \
+ printf "\033[0;33mSkipping unit tests, could not find the '$(ACTOR)' actor in $(REPOSITORIES) repositories\033[0m\n"; \
+ exit 0; \
+ fi; \
. $(VENVNAME)/bin/activate; \
- $(_PYTHON_VENV) -m pytest $(REPORT_ARG) $(TEST_PATHS) $(LIBRARY_PATH) $(PYTEST_ARGS)
+ $(_PYTHON_VENV) -m pytest $(REPORT_ARG) $$paths $(LIBRARY_PATH) $(PYTEST_ARGS)
test: lint test_no_lint
--
2.51.1

View File

@ -1,37 +0,0 @@
From b2dab41ff9d9492a5e121bbce729ea07a5d41330 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Mon, 29 Sep 2025 21:19:18 +0200
Subject: [PATCH 56/69] Add LEAPP_DEVEL_TARGET_OS devel environment var
Allow setting the target OS for development purposes. Internally (cannot
be set by the user) the LEAPP_TARGET_OS envar is set, similarly to how
this works other arguments.
Note that this patch only adds the variable and stores it into
LEAPP_TARGET_OS, otherwise it is unhandled.
---
commands/upgrade/util.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/commands/upgrade/util.py b/commands/upgrade/util.py
index dadfe7de..1c88eab8 100644
--- a/commands/upgrade/util.py
+++ b/commands/upgrade/util.py
@@ -221,9 +221,13 @@ def prepare_configuration(args):
if args.enable_experimental_feature:
os.environ['LEAPP_EXPERIMENTAL'] = '1'
+ os.environ["LEAPP_TARGET_OS"] = os.getenv(
+ "LEAPP_DEVEL_TARGET_OS", command_utils.get_distro_id()
+ )
+
os.environ['LEAPP_UNSUPPORTED'] = '0' if os.getenv('LEAPP_UNSUPPORTED', '0') == '0' else '1'
# force no rhsm on non-rhel systems, regardless of whether the binary is there
- if args.no_rhsm or command_utils.get_distro_id() != 'rhel':
+ if args.no_rhsm or os.environ['LEAPP_TARGET_OS'] != 'rhel':
os.environ['LEAPP_NO_RHSM'] = '1'
elif not os.path.exists('/usr/sbin/subscription-manager'):
os.environ['LEAPP_NO_RHSM'] = '1'
--
2.51.1

View File

@ -1,195 +0,0 @@
From ff0f962147fc3af6cfd78f6251ddaa8cf94705c6 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 18 Sep 2025 19:44:57 +0200
Subject: [PATCH 57/69] commands: Get target version based on target distro
Jira: RHEL-110563
---
commands/command_utils.py | 48 ++++++++++++++-------
commands/tests/test_upgrade_paths.py | 63 +++++++++++++++++++++-------
2 files changed, 80 insertions(+), 31 deletions(-)
diff --git a/commands/command_utils.py b/commands/command_utils.py
index 647e7b44..31125da5 100644
--- a/commands/command_utils.py
+++ b/commands/command_utils.py
@@ -62,9 +62,9 @@ def assert_version_format(version_str, desired_format, version_kind):
"""
if not re.match(desired_format.regex, version_str):
error_str = (
- 'Unexpected format of target version: {0}. The required format is \'{1}\'.'
- )
- raise CommandError(error_str.format(version_str, desired_format.human_readable))
+ "Unexpected format of {} version: {}. The required format is '{}'."
+ ).format(version_kind.value, version_str, desired_format.human_readable)
+ raise CommandError(error_str)
def get_major_version_from_a_valid_version(version):
@@ -165,7 +165,17 @@ def get_target_versions_from_config(src_version_id, distro, flavor):
return upgrade_paths_map.get(distro, {}).get(flavor, {}).get(src_version_id, [])
-def get_supported_target_versions(flavour=get_upgrade_flavour()):
+def get_virtual_version_from_config(src_version_id, distro):
+ """
+ Retrieve the virtual version for the given version from upgrade_paths_map.
+
+ :return: The virtual version or None if no match.
+ """
+ upgrade_paths_map = get_upgrade_paths_config()
+ return upgrade_paths_map.get(distro, {}).get('_virtual_versions').get(src_version_id)
+
+
+def get_supported_target_versions(target_distro, flavour=get_upgrade_flavour()):
"""
Return a list of supported target versions for the given `flavour` of upgrade.
The default value for `flavour` is `default`.
@@ -173,26 +183,30 @@ def get_supported_target_versions(flavour=get_upgrade_flavour()):
os_release_contents = _retrieve_os_release_contents()
current_version_id = os_release_contents.get('VERSION_ID', '')
- distro_id = os_release_contents.get('ID', '')
+ source_distro = os_release_contents.get('ID', '')
# We want to guarantee our actors that if they see 'centos'/'rhel'/...
# then they will always see expected version format
- expected_version_format = _DISTRO_VERSION_FORMATS.get(distro_id, VersionFormats.MAJOR_MINOR).value
- assert_version_format(current_version_id, expected_version_format, _VersionKind.SOURCE)
+ expected_version_format = _DISTRO_VERSION_FORMATS.get(source_distro, VersionFormats.MAJOR_MINOR)
+ assert_version_format(current_version_id, expected_version_format.value, _VersionKind.SOURCE)
+ if source_distro == 'centos' and target_distro != 'centos':
+ # when upconverting from centos, we need to lookup by virtual version
+ current_version_id = get_virtual_version_from_config(current_version_id, source_distro)
- target_versions = get_target_versions_from_config(current_version_id, distro_id, flavour)
+ target_versions = get_target_versions_from_config(current_version_id, target_distro, flavour)
if not target_versions:
# If we cannot find a particular major.minor version in the map,
# we fallback to pick a target version just based on a major version.
- # This can happen for example when testing not yet released versions
+ # This can happen for example when testing not yet released versions.
+ # But also removes the need to handle virtual versions on X->centos upgrades.
major_version = get_major_version_from_a_valid_version(current_version_id)
- target_versions = get_target_versions_from_config(major_version, distro_id, flavour)
+ target_versions = get_target_versions_from_config(major_version, target_distro, flavour)
return target_versions
-def get_target_version(flavour):
- target_versions = get_supported_target_versions(flavour)
+def get_target_version(flavour, target_distro):
+ target_versions = get_supported_target_versions(target_distro, flavour)
return target_versions[-1] if target_versions else None
@@ -214,13 +228,15 @@ def get_target_release(args):
env_version_override = os.getenv('LEAPP_DEVEL_TARGET_RELEASE')
target_ver = env_version_override or args.target
+ target_distro_id = os.getenv('LEAPP_TARGET_OS')
if target_ver:
- distro_id = get_distro_id()
- expected_version_format = _DISTRO_VERSION_FORMATS.get(distro_id, VersionFormats.MAJOR_MINOR).value
- assert_version_format(target_ver, expected_version_format, _VersionKind.TARGET)
+ expected_version_format = _DISTRO_VERSION_FORMATS.get(
+ target_distro_id, VersionFormats.MAJOR_MINOR
+ )
+ assert_version_format(target_ver, expected_version_format.value, _VersionKind.TARGET)
return (target_ver, flavor)
- return (get_target_version(flavor), flavor)
+ return (get_target_version(flavor, target_distro_id), flavor)
def set_resource_limits():
diff --git a/commands/tests/test_upgrade_paths.py b/commands/tests/test_upgrade_paths.py
index 9bdf5792..95e6519a 100644
--- a/commands/tests/test_upgrade_paths.py
+++ b/commands/tests/test_upgrade_paths.py
@@ -8,26 +8,54 @@ from leapp.cli.commands import command_utils
from leapp.exceptions import CommandError
-@mock.patch("leapp.cli.commands.command_utils.get_upgrade_paths_config",
- return_value={'rhel': {"default": {"7.9": ["8.4"], "8.6": ["9.0"], "7": ["8.4"], "8": ["9.0"]}}})
+@mock.patch(
+ "leapp.cli.commands.command_utils.get_upgrade_paths_config",
+ return_value={
+ "rhel": {
+ "default": {"7.9": ["8.4"], "8.6": ["9.0"], "8.7": ["9.1"], "7": ["8.4"], "8": ["9.0"]}
+ },
+ "centos": {
+ "default": {"8": ["9"], "9": ["10"]},
+ "_virtual_versions": {"8": "8.7", "9": "9.8", "10": "10.2"},
+ },
+ "alma": {
+ "default": {"7.9": ["8.4"], "8.6": ["9.0"], "8.7": ["9.1"]}
+ },
+ },
+)
def test_get_target_version(mock_open, monkeypatch):
- etc_os_release_contents = {'ID': 'rhel', 'VERSION_ID': '8.6'}
- monkeypatch.setattr(command_utils, '_retrieve_os_release_contents',
- lambda *args, **kwargs: etc_os_release_contents)
- assert command_utils.get_target_version('default') == '9.0'
+ def set_etc_osrelease(distro_id, version_id):
+ etc_os_release_contents = {"ID": distro_id, "VERSION_ID": version_id}
+ monkeypatch.setattr(
+ command_utils,
+ "_retrieve_os_release_contents",
+ lambda *args, **kwargs: etc_os_release_contents,
+ )
+
+ set_etc_osrelease('rhel', '8.6')
+ assert command_utils.get_target_version('default', 'rhel') == '9.0'
+
+ # the envar should not affect this function
monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '')
- etc_os_release_contents = {'ID': 'rhel', 'VERSION_ID': '8.6'}
- monkeypatch.setattr(command_utils, '_retrieve_os_release_contents',
- lambda *args, **kwargs: etc_os_release_contents)
- assert command_utils.get_target_version('default') == '9.0'
+ assert command_utils.get_target_version('default', 'rhel') == '9.0'
+ # unsupported path, matches because of the major version fallback
monkeypatch.delenv('LEAPP_DEVEL_TARGET_RELEASE', raising=True)
- # unsupported path
- etc_os_release_contents = {'ID': 'rhel', 'VERSION_ID': '8.5'}
- monkeypatch.setattr(command_utils, '_retrieve_os_release_contents',
- lambda *args, **kwargs: etc_os_release_contents)
- assert command_utils.get_target_version('default') == '9.0'
+ set_etc_osrelease('rhel', '8.5')
+ assert command_utils.get_target_version('default', 'rhel') == '9.0'
+
+ # centos->centos
+ set_etc_osrelease('centos', '9')
+ assert command_utils.get_target_version('default', 'centos') == '10'
+
+ # centos->rhel, lookup based on virtual versions
+ set_etc_osrelease('centos', '8')
+ assert command_utils.get_target_version('default', 'rhel') == '9.1'
+
+ # rhel->centos, reverse virtual versions lookup
+ set_etc_osrelease('rhel', '8.6')
+ assert command_utils.get_target_version('default', 'centos') == '9'
@mock.patch(
@@ -42,6 +70,11 @@ def test_get_target_version(mock_open, monkeypatch):
},
)
def test_get_target_release(mock_open, monkeypatch): # do not remove mock_open
+ # NOTE Not testing with other distros, the tested function is mainly about
+ # handling of the CLI option, envar and format checking, the real target
+ # release retrieval is handled in get_target_version which is tested with
+ # different source/target distro combinanations elsewhere.
+
# Make it look like it's RHEL even on centos, because that's what the test
# assumes.
# Otherwise the test, when ran on Centos, fails because it works
--
2.51.1

Some files were not shown because too many files have changed in this diff Show More