import leapp-repository-0.18.0-1.el8

This commit is contained in:
CentOS Sources 2023-03-02 06:10:32 +00:00 committed by Stepan Oksanichenko
parent 14f115ce08
commit 282f0a5e76
74 changed files with 52 additions and 16236 deletions

2
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/deps-pkgs-9.tar.gz
SOURCES/leapp-repository-0.17.0.tar.gz
SOURCES/leapp-repository-0.18.0.tar.gz

View File

@ -1,2 +1,2 @@
02499ccd70d4a8e6ce9ad29bd286a317d5e0b57b SOURCES/deps-pkgs-9.tar.gz
cbb3e6025c6567507d3bc317731b4c2f0a0eb872 SOURCES/leapp-repository-0.17.0.tar.gz
e69a6a7de3073175a4cc529cc47633a06e648a12 SOURCES/leapp-repository-0.18.0.tar.gz

View File

@ -1,131 +0,0 @@
From c5451ffb9a5c964552afd9d419855ea23b764ad7 Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Wed, 24 Aug 2022 12:17:44 +0200
Subject: [PATCH 05/32] Disable isort check for deprecated imports
Although isort works fine most of the time, the way it handles
multiline imports with inline comments is not acceptable
to everyone in the team.
So before we implement some solution we are 146% happy about
it was decided to leave those imports just as they have been for
ages. This patch mutes isort import check for deprecated imports.
---
.../common/actors/commonleappdracutmodules/actor.py | 7 ++++---
.../actors/commonleappdracutmodules/libraries/modscan.py | 5 +++--
.../tests/test_modscan_commonleappdracutmodules.py | 5 +++--
.../tests/test_targetinitramfsgenerator.py | 5 +++--
.../tests/unit_test_upgradeinitramfsgenerator.py | 5 +++--
5 files changed, 16 insertions(+), 11 deletions(-)
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/actor.py b/repos/system_upgrade/common/actors/commonleappdracutmodules/actor.py
index 950b6e88..aae42bbb 100644
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/actor.py
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/actor.py
@@ -1,13 +1,14 @@
from leapp.actors import Actor
from leapp.libraries.actor import modscan
-from leapp.models import (
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+from leapp.utils.deprecation import suppress_deprecation
+
+from leapp.models import ( # isort:skip
RequiredUpgradeInitramPackages, # deprecated
UpgradeDracutModule, # deprecated
TargetUserSpaceUpgradeTasks,
UpgradeInitramfsTasks
)
-from leapp.tags import FactsPhaseTag, IPUWorkflowTag
-from leapp.utils.deprecation import suppress_deprecation
@suppress_deprecation(RequiredUpgradeInitramPackages, UpgradeDracutModule)
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py b/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py
index a089c4c1..275b2c63 100644
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py
@@ -3,14 +3,15 @@ import re
from leapp.libraries.common.config import architecture, version
from leapp.libraries.stdlib import api
-from leapp.models import (
+from leapp.utils.deprecation import suppress_deprecation
+
+from leapp.models import ( # isort:skip
RequiredUpgradeInitramPackages, # deprecated
UpgradeDracutModule, # deprecated
DracutModule,
TargetUserSpaceUpgradeTasks,
UpgradeInitramfsTasks
)
-from leapp.utils.deprecation import suppress_deprecation
_REQUIRED_PACKAGES = [
'binutils',
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/tests/test_modscan_commonleappdracutmodules.py b/repos/system_upgrade/common/actors/commonleappdracutmodules/tests/test_modscan_commonleappdracutmodules.py
index 307e927c..9c52b51f 100644
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/tests/test_modscan_commonleappdracutmodules.py
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/tests/test_modscan_commonleappdracutmodules.py
@@ -8,13 +8,14 @@ from leapp.libraries.actor import modscan
from leapp.libraries.common.config import architecture
from leapp.libraries.common.testutils import CurrentActorMocked
from leapp.libraries.stdlib import api
-from leapp.models import (
+from leapp.utils.deprecation import suppress_deprecation
+
+from leapp.models import ( # isort:skip
RequiredUpgradeInitramPackages, # deprecated
UpgradeDracutModule, # deprecated
TargetUserSpaceUpgradeTasks,
UpgradeInitramfsTasks
)
-from leapp.utils.deprecation import suppress_deprecation
def _files_get_folder_path(name):
diff --git a/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py
index 98fe92c6..f5930b9b 100644
--- a/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py
+++ b/repos/system_upgrade/common/actors/initramfs/targetinitramfsgenerator/tests/test_targetinitramfsgenerator.py
@@ -4,13 +4,14 @@ from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import targetinitramfsgenerator
from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked
from leapp.libraries.stdlib import api, CalledProcessError
-from leapp.models import (
+from leapp.utils.deprecation import suppress_deprecation
+
+from leapp.models import ( # isort:skip
InitrdIncludes, # deprecated
DracutModule,
InstalledTargetKernelVersion,
TargetInitramfsTasks
)
-from leapp.utils.deprecation import suppress_deprecation
FILES = ['/file1', '/file2', '/dir/ect/ory/file3', '/file4', '/file5']
MODULES = [
diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py
index b54aaa1f..2b401e52 100644
--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py
+++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/tests/unit_test_upgradeinitramfsgenerator.py
@@ -7,7 +7,9 @@ from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import upgradeinitramfsgenerator
from leapp.libraries.common.config import architecture
from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked
-from leapp.models import (
+from leapp.utils.deprecation import suppress_deprecation
+
+from leapp.models import ( # isort:skip
RequiredUpgradeInitramPackages, # deprecated
UpgradeDracutModule, # deprecated
BootContent,
@@ -16,7 +18,6 @@ from leapp.models import (
TargetUserSpaceUpgradeTasks,
UpgradeInitramfsTasks,
)
-from leapp.utils.deprecation import suppress_deprecation
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PKGS = ['pkg{}'.format(c) for c in 'ABCDEFGHIJ']
--
2.38.1

View File

@ -1,39 +0,0 @@
From 25adde3fe09d200a3f8bc42af1ebcf07b179fb85 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Thu, 1 Sep 2022 11:19:18 +0200
Subject: [PATCH 07/32] Mini updateds in the spec files
To synchronize better with the downstream specfile, making lives
of people again a little bit easier.
---
packaging/leapp-repository.spec | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index c59f8acd..89750927 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -53,6 +53,10 @@ Source1: deps-pkgs.tar.gz
# NOTE: Our packages must be noarch. Do no drop this in any way.
BuildArch: noarch
+### PATCHES HERE
+# Patch0001: filename.patch
+
+
%description
%{summary}
@@ -182,6 +186,9 @@ Requires: dracut
%setup -n %{name}-%{version}
%setup -q -n %{name}-%{version} -D -T -a 1
+# APPLY PATCHES HERE
+# %%patch0001 -p1
+
%build
%if 0%{?rhel} == 7
--
2.38.1

View File

@ -1,553 +0,0 @@
From 921a3f5ae0fa75ef04eb56857b5f07275e39c112 Mon Sep 17 00:00:00 2001
From: Joe Shimkus <joe@shimkus.com>
Date: Wed, 24 Aug 2022 13:30:19 -0400
Subject: [PATCH 08/32] CheckVDO: Ask user only faiulres and undetermined
devices (+ report update)
The previous solution made possible to skip the VDO check answering
the user question (confirming no vdo devices are present) if the
vdo package is not installed (as the scan of the system could not
be performed). However as part of the bug 2096159 it was discovered
that some systems have very dynamic storage which could dissapear
in the very moment the check by the vdo tool is performed which lead
to the reported inhibitor. We have discovered that this could be real
blocker of the upgrade on such systems as it's pretty simple to hit
at least 1 of N devices to raise such an issue. (*)
To make the upgrade possible on such systems, the dialog has been
updated to be able to skip any problematic VDO checks:
- undetermined block devices
- failures during the vdo scan of a block device
In such a case, user must confirm that no VDO device non-managed
by LVM is present. The dialog is asking now for the `confirm` key
from user instead of `all_vdo_converted`. If any non-LVM managed VDO
devices are discovered, the upgrade is inhibited despite the answer
(this is supposed to happen only when user's answer is not right so
we are ok about that behaviour).
Also reports are updated, as previously it could happen that several
reports with the same title appear during one run of leapp, but each
of them has a different meaning. Set individual titles to all
reports. Also summaries or reports have been updated.
(*) This also includes situations when discovered list of devices
is not complete as some block devices could be loaded after the
initial scan of block devices (StorageInfo msg) is created. Which
means that such devices will not be checked at all as they will not
be known to other actors. We consider this ok as when a system with
dynamic storage is present, usually many of block devices are
redundant. So usually user will have to answer the dialog anyway due
to other "unstable" block devices.
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2096159
Jira: OAMG-7025
---
.../el8toel9/actors/checkvdo/actor.py | 94 +++++++----
.../actors/checkvdo/libraries/checkvdo.py | 155 ++++++++++--------
.../checkvdo/tests/unit_test_checkvdo.py | 44 +++--
3 files changed, 183 insertions(+), 110 deletions(-)
diff --git a/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py b/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py
index 4158253a..d43bac0b 100644
--- a/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py
+++ b/repos/system_upgrade/el8toel9/actors/checkvdo/actor.py
@@ -32,12 +32,24 @@ class CheckVdo(Actor):
If the VdoConversionInfo model indicates unexpected errors occurred during
scanning CheckVdo will produce appropriate inhibitory reports.
- Lastly, if the VdoConversionInfo model indicates conditions exist where VDO
- devices could exist but the necessary software to check was not installed
- on the system CheckVdo will present a dialog to the user. This dialog will
- ask the user to either install the required software if the user knows or
- is unsure that VDO devices exist or to approve the continuation of the
- upgrade if the user is certain that no VDO devices exist.
+ If the VdoConversionInfo model indicates conditions exist where VDO devices
+ could exist but the necessary software to check was not installed on the
+ system CheckVdo will present a dialog to the user. This dialog will ask the
+ user to either install the required software if the user knows or is unsure
+ that VDO devices exist or to approve the continuation of the upgrade if the
+ user is certain that either there are no VDO devices present or that all
+ VDO devices have been successfully converted.
+
+ To maximize safety CheckVdo operates against all block devices which
+ match the criteria for potential VDO devices. Given the dynamic nature
+ of device presence within a system some devices which may have been present
+ during leapp discovery may not be present when CheckVdo runs. As CheckVdo
+ defaults to producing inhibitory reports if a device cannot be checked
+ (for any reason) this dynamism may be problematic. To prevent CheckVdo
+ producing an inhibitory report for devices which are dynamically no longer
+ present within the system the user may answer the previously mentioned
+ dialog in the affirmative when the user knows that all VDO devices have
+ been converted. This will circumvent checks of block devices.
"""
name = 'check_vdo'
@@ -50,37 +62,55 @@ class CheckVdo(Actor):
reason='Confirmation',
components=(
BooleanComponent(
- key='no_vdo_devices',
- label='Are there no VDO devices on the system?',
- description='Enter True if there are no VDO devices on '
- 'the system and False continue the upgrade. '
- 'If the system has no VDO devices, then it '
- 'is safe to continue the upgrade. If there '
- 'are VDO devices they must all be converted '
- 'to LVM management before the upgrade can '
- 'proceed.',
- reason='Based on installed packages it is possible that '
- 'VDO devices exist on the system. All VDO devices '
- 'must be converted to being managed by LVM before '
- 'the upgrade occurs. Because the \'vdo\' package '
- 'is not installed, Leapp cannot determine whether '
- 'any VDO devices exist that have not yet been '
- 'converted. If the devices are not converted and '
- 'the upgrade proceeds the data on unconverted VDO '
- 'devices will be inaccessible. If you have any '
- 'doubts you should choose to install the \'vdo\' '
- 'package and re-run the upgrade process to check '
- 'for unconverted VDO devices. If you are certain '
- 'that the system has no VDO devices or that all '
- 'VDO devices have been converted to LVM management '
- 'you may opt to allow the upgrade to proceed.'
+ key='confirm',
+ label='Are all VDO devices, if any, successfully converted to LVM management?',
+ description='Enter True if no VDO devices are present '
+ 'on the system or all VDO devices on the system '
+ 'have been successfully converted to LVM '
+ 'management. '
+ 'Entering True will circumvent check of failures '
+ 'and undetermined devices. '
+ 'Recognized VDO devices that have not been '
+ 'converted to LVM management can still block '
+ 'the upgrade despite the answer.'
+ 'All VDO devices must be converted to LVM '
+ 'management before upgrading.',
+ reason='To maximize safety all block devices on a system '
+ 'that meet the criteria as possible VDO devices '
+ 'are checked to verify that, if VDOs, they have '
+ 'been converted to LVM management. '
+ 'If the devices are not converted and the upgrade '
+ 'proceeds the data on unconverted VDO devices will '
+ 'be inaccessible. '
+ 'In order to perform checking the \'vdo\' package '
+ 'must be installed. '
+ 'If the \'vdo\' package is not installed and there '
+ 'are any doubts the \'vdo\' package should be '
+ 'installed and the upgrade process re-run to check '
+ 'for unconverted VDO devices. '
+ 'If the check of any device fails for any reason '
+ 'an upgrade inhibiting report is generated. '
+ 'This may be problematic if devices are '
+ 'dynamically removed from the system subsequent to '
+ 'having been identified during device discovery. '
+ 'If it is certain that all VDO devices have been '
+ 'successfully converted to LVM management this '
+ 'dialog may be answered in the affirmative which '
+ 'will circumvent block device checking.'
),
)
),
)
+ _asked_answer = False
+ _vdo_answer = None
- def get_no_vdo_devices_response(self):
- return self.get_answers(self.dialogs[0]).get('no_vdo_devices')
+ def get_vdo_answer(self):
+ if not self._asked_answer:
+ self._asked_answer = True
+ # calling this multiple times could lead to possible issues
+ # or at least in redundant reports
+ self._vdo_answer = self.get_answers(self.dialogs[0]).get('confirm')
+ return self._vdo_answer
def process(self):
for conversion_info in self.consume(VdoConversionInfo):
diff --git a/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py b/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py
index 135a279d..3b161c9b 100644
--- a/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py
+++ b/repos/system_upgrade/el8toel9/actors/checkvdo/libraries/checkvdo.py
@@ -1,10 +1,35 @@
from leapp import reporting
from leapp.libraries.stdlib import api
-_report_title = reporting.Title('VDO devices migration to LVM management')
+def _report_skip_check():
+ if not api.current_actor().get_vdo_answer():
+ return
+
+ summary = ('User has asserted all VDO devices on the system have been '
+ 'successfully converted to LVM management or no VDO '
+ 'devices are present.')
+ reporting.create_report([
+ reporting.Title('Skipping the VDO check of block devices'),
+ reporting.Summary(summary),
+ reporting.Severity(reporting.Severity.INFO),
+ reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.DRIVERS]),
+ ])
+
+
+def _process_failed_check_devices(conversion_info):
+ # Post-conversion VDOs that were not successfully checked for having
+ # completed the migration to LVM management.
+ # Return True if failed checks detected
+ devices = [x for x in conversion_info.post_conversion if (not x.complete) and x.check_failed]
+ devices += [x for x in conversion_info.undetermined_conversion if x.check_failed]
+ if not devices:
+ return False
+
+ if api.current_actor().get_vdo_answer():
+ # User asserted all possible VDO should be already converted - skip
+ return True
-def _create_unexpected_resuilt_report(devices):
names = [x.name for x in devices]
multiple = len(names) > 1
summary = ['Unexpected result checking device{0}'.format('s' if multiple else '')]
@@ -16,13 +41,14 @@ def _create_unexpected_resuilt_report(devices):
'and re-run the upgrade.'))
reporting.create_report([
- _report_title,
+ reporting.Title('Checking VDO conversion to LVM management of block devices failed'),
reporting.Summary(summary),
reporting.Severity(reporting.Severity.HIGH),
reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.DRIVERS]),
reporting.Remediation(hint=remedy_hint),
reporting.Groups([reporting.Groups.INHIBITOR])
])
+ return True
def _process_post_conversion_vdos(vdos):
@@ -32,23 +58,28 @@ def _process_post_conversion_vdos(vdos):
if post_conversion:
devices = [x.name for x in post_conversion]
multiple = len(devices) > 1
- summary = ''.join(('VDO device{0} \'{1}\' '.format('s' if multiple else '',
- ', '.join(devices)),
- 'did not complete migration to LVM management. ',
- 'The named device{0} '.format('s' if multiple else ''),
- '{0} successfully converted at the '.format('were' if multiple else 'was'),
- 'device format level; however, the expected LVM management '
- 'portion of the conversion did not take place. This '
- 'indicates that an exceptional condition (for example, a '
- 'system crash) likely occurred during the conversion '
- 'process. The LVM portion of the conversion must be '
- 'performed in order for upgrade to proceed.'))
+ summary = (
+ 'VDO device{s_suffix} \'{devices_str}\' '
+ 'did not complete migration to LVM management. '
+ 'The named device{s_suffix} {was_were} successfully converted '
+ 'at the device format level; however, the expected LVM management '
+ 'portion of the conversion did not take place. This indicates '
+ 'that an exceptional condition (for example, a system crash) '
+ 'likely occurred during the conversion process. The LVM portion '
+ 'of the conversion must be performed in order for upgrade '
+ 'to proceed.'
+ .format(
+ s_suffix='s' if multiple else '',
+ devices_str=', '.join(devices),
+ was_were='were' if multiple else 'was',
+ )
+ )
remedy_hint = ('Consult the VDO to LVM conversion process '
'documentation for how to complete the conversion.')
reporting.create_report([
- _report_title,
+ reporting.Title('Detected VDO devices that have not finished the conversion to LVM management.'),
reporting.Summary(summary),
reporting.Severity(reporting.Severity.HIGH),
reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.DRIVERS]),
@@ -56,33 +87,32 @@ def _process_post_conversion_vdos(vdos):
reporting.Groups([reporting.Groups.INHIBITOR])
])
- # Post-conversion VDOs that were not successfully checked for having
- # completed the migration to LVM management.
- post_conversion = [x for x in vdos if (not x.complete) and x.check_failed]
- if post_conversion:
- _create_unexpected_resuilt_report(post_conversion)
-
def _process_pre_conversion_vdos(vdos):
# Pre-conversion VDOs generate an inhibiting report.
if vdos:
devices = [x.name for x in vdos]
multiple = len(devices) > 1
- summary = ''.join(('VDO device{0} \'{1}\' require{2} '.format('s' if multiple else '',
- ', '.join(devices),
- '' if multiple else 's'),
- 'migration to LVM management.'
- 'After performing the upgrade VDO devices can only be '
- 'managed via LVM. Any VDO device not currently managed '
- 'by LVM must be converted to LVM management before '
- 'upgrading. The data on any VDO device not converted to '
- 'LVM management will be inaccessible after upgrading.'))
+ summary = (
+ 'VDO device{s_suffix} \'{devices_str}\' require{s_suffix_verb} '
+ 'migration to LVM management.'
+ 'After performing the upgrade VDO devices can only be '
+ 'managed via LVM. Any VDO device not currently managed '
+ 'by LVM must be converted to LVM management before '
+ 'upgrading. The data on any VDO device not converted to '
+ 'LVM management will be inaccessible after upgrading.'
+ .format(
+ s_suffix='s' if multiple else '',
+ s_suffix_verb='' if multiple else 's',
+ devices_str=', '.join(devices),
+ )
+ )
remedy_hint = ('Consult the VDO to LVM conversion process '
'documentation for how to perform the conversion.')
reporting.create_report([
- _report_title,
+ reporting.Title('Detected VDO devices not managed by LVM'),
reporting.Summary(summary),
reporting.Severity(reporting.Severity.HIGH),
reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.DRIVERS]),
@@ -104,43 +134,40 @@ def _process_undetermined_conversion_devices(devices):
# A device can only end up as undetermined either via a check that failed
# or if it was not checked. If the info for the device indicates that it
# did not have a check failure that means it was not checked.
-
- checked = [x for x in devices if x.check_failed]
- if checked:
- _create_unexpected_resuilt_report(checked)
+ # Return True if failed checks detected
unchecked = [x for x in devices if not x.check_failed]
- if unchecked:
- no_vdo_devices = api.current_actor().get_no_vdo_devices_response()
- if no_vdo_devices:
- summary = ('User has asserted there are no VDO devices on the '
- 'system in need of conversion to LVM management.')
-
- reporting.create_report([
- _report_title,
- reporting.Summary(summary),
- reporting.Severity(reporting.Severity.INFO),
- reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.DRIVERS]),
- reporting.Groups([])
- ])
- elif no_vdo_devices is False:
- summary = ('User has opted to inhibit upgrade in regard to '
- 'potential VDO devices requiring conversion to LVM '
- 'management.')
- remedy_hint = ('Install the \'vdo\' package and re-run upgrade to '
- 'check for VDO devices requiring conversion.')
-
- reporting.create_report([
- _report_title,
- reporting.Summary(summary),
- reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.DRIVERS]),
- reporting.Remediation(hint=remedy_hint),
- reporting.Groups([reporting.Groups.INHIBITOR])
- ])
+ if not unchecked:
+ return False
+
+ if api.current_actor().get_vdo_answer():
+ # User asserted no VDO devices are present
+ return True
+
+ summary = (
+ 'The check of block devices could not be performed as the \'vdo\' '
+ 'package is not installed. All VDO devices must be converted to '
+ 'LVM management prior to the upgrade to prevent the loss of data.')
+ remedy_hint = ('Install the \'vdo\' package and re-run upgrade to '
+ 'check for VDO devices requiring conversion or confirm '
+ 'that all VDO devices, if any, are managed by LVM.')
+
+ reporting.create_report([
+ reporting.Title('Cannot perform the VDO check of block devices'),
+ reporting.Summary(summary),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.SERVICES, reporting.Groups.DRIVERS]),
+ reporting.Remediation(hint=remedy_hint),
+ reporting.Groups([reporting.Groups.INHIBITOR])
+ ])
+ return True
def check_vdo(conversion_info):
_process_pre_conversion_vdos(conversion_info.pre_conversion)
_process_post_conversion_vdos(conversion_info.post_conversion)
- _process_undetermined_conversion_devices(conversion_info.undetermined_conversion)
+
+ detected_under_dev = _process_undetermined_conversion_devices(conversion_info.undetermined_conversion)
+ detected_failed_check = _process_failed_check_devices(conversion_info)
+ if detected_under_dev or detected_failed_check:
+ _report_skip_check()
diff --git a/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py b/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py
index e0ac39d0..865e036f 100644
--- a/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py
+++ b/repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py
@@ -13,14 +13,16 @@ from leapp.models import (
from leapp.utils.report import is_inhibitor
-class MockedActorNoVdoDevices(CurrentActorMocked):
- def get_no_vdo_devices_response(self):
- return True
+# Mock actor base for CheckVdo tests.
+class MockedActorCheckVdo(CurrentActorMocked):
+ def get_vdo_answer(self):
+ return False
-class MockedActorSomeVdoDevices(CurrentActorMocked):
- def get_no_vdo_devices_response(self):
- return False
+# Mock actor for all_vdo_converted dialog response.
+class MockedActorAllVdoConvertedTrue(MockedActorCheckVdo):
+ def get_vdo_answer(self):
+ return True
def aslist(f):
@@ -66,6 +68,7 @@ def _undetermined_conversion_vdos(count=0, failing=False, start_char='a'):
# No VDOs tests.
def test_no_vdos(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(),
@@ -76,6 +79,7 @@ def test_no_vdos(monkeypatch):
# Concurrent pre- and post-conversion tests.
def test_both_conversion_vdo_incomplete(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
post_count = 7
checkvdo.check_vdo(
@@ -89,6 +93,7 @@ def test_both_conversion_vdo_incomplete(monkeypatch):
# Post-conversion tests.
def test_post_conversion_multiple_vdo_incomplete(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(7, 5),
@@ -100,6 +105,7 @@ def test_post_conversion_multiple_vdo_incomplete(monkeypatch):
def test_post_conversion_multiple_vdo_complete(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(7, 7),
@@ -109,6 +115,7 @@ def test_post_conversion_multiple_vdo_complete(monkeypatch):
def test_post_conversion_single_vdo_incomplete(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(1),
@@ -121,6 +128,7 @@ def test_post_conversion_single_vdo_incomplete(monkeypatch):
def test_post_conversion_single_check_failing(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(2, complete=1, failing=1),
@@ -135,6 +143,7 @@ def test_post_conversion_single_check_failing(monkeypatch):
def test_post_conversion_multiple_check_failing(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(7, complete=4, failing=3),
@@ -147,6 +156,7 @@ def test_post_conversion_multiple_check_failing(monkeypatch):
def test_post_conversion_incomplete_and_check_failing(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(2, failing=1),
@@ -158,6 +168,7 @@ def test_post_conversion_incomplete_and_check_failing(monkeypatch):
# Pre-conversion tests.
def test_pre_conversion_multiple_vdo_incomplete(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(),
@@ -169,6 +180,7 @@ def test_pre_conversion_multiple_vdo_incomplete(monkeypatch):
def test_pre_conversion_single_vdo_incomplete(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(),
@@ -182,6 +194,7 @@ def test_pre_conversion_single_vdo_incomplete(monkeypatch):
# Undetermined tests.
def test_undetermined_single_check_failing(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(),
@@ -196,6 +209,7 @@ def test_undetermined_single_check_failing(monkeypatch):
def test_undetermined_multiple_check_failing(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(),
@@ -207,27 +221,29 @@ def test_undetermined_multiple_check_failing(monkeypatch):
'Unexpected result checking devices')
-def test_undetermined_multiple_no_check_no_vdos(monkeypatch):
- monkeypatch.setattr(api, 'current_actor', MockedActorNoVdoDevices())
+def test_undetermined_multiple_no_check(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorCheckVdo())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(),
pre_conversion=_pre_conversion_vdos(),
undetermined_conversion=_undetermined_conversion_vdos(3)))
assert reporting.create_report.called == 1
- assert not is_inhibitor(reporting.create_report.report_fields)
+ assert is_inhibitor(reporting.create_report.report_fields)
assert reporting.create_report.report_fields['summary'].startswith(
- 'User has asserted there are no VDO devices')
+ 'The check of block devices could not be performed as the \'vdo\' '
+ 'package is not installed.')
-def test_undetermined_multiple_no_check_some_vdos(monkeypatch):
- monkeypatch.setattr(api, 'current_actor', MockedActorSomeVdoDevices())
+# all_vdo_converted test.
+def test_all_vdo_converted_true(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', MockedActorAllVdoConvertedTrue())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
checkvdo.check_vdo(
VdoConversionInfo(post_conversion=_post_conversion_vdos(),
pre_conversion=_pre_conversion_vdos(),
undetermined_conversion=_undetermined_conversion_vdos(3)))
assert reporting.create_report.called == 1
- assert is_inhibitor(reporting.create_report.report_fields)
+ assert not is_inhibitor(reporting.create_report.report_fields)
assert reporting.create_report.report_fields['summary'].startswith(
- 'User has opted to inhibit upgrade')
+ 'User has asserted all VDO devices on the system have been successfully converted')
--
2.38.1

View File

@ -1,383 +0,0 @@
From d2d7999744e97776eda664592ac0cc7ec5747b99 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 8 Sep 2022 16:27:10 +0200
Subject: [PATCH 09/32] Add actors for checking and setting systemd services
states
Introduces a new `set_systemd_services_state` actor, which
enables/disables systemd services according to received
`SystemdServicesTasks` messages and a `check_systemd_services_tasks`
actor which checks tasks in the `TargetTransactionCheckPhase` and
inhibits upgrade if there are conflicts.
Actors are in a new directory `systemd`.
---
.../systemd/checksystemdservicetasks/actor.py | 30 +++++++
.../libraries/checksystemdservicetasks.py | 36 ++++++++
.../tests/test_checksystemdservicestasks.py | 88 +++++++++++++++++++
.../systemd/setsystemdservicesstates/actor.py | 18 ++++
.../libraries/setsystemdservicesstate.py | 31 +++++++
.../tests/test_setsystemdservicesstate.py | 83 +++++++++++++++++
.../common/models/systemdservices.py | 22 +++++
7 files changed, 308 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py
create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py
create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py
create mode 100644 repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/actor.py
create mode 100644 repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py
create mode 100644 repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py
create mode 100644 repos/system_upgrade/common/models/systemdservices.py
diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py
new file mode 100644
index 00000000..2df995ee
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py
@@ -0,0 +1,30 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import checksystemdservicetasks
+from leapp.models import SystemdServicesTasks
+from leapp.reporting import Report
+from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag
+
+
+class CheckSystemdServicesTasks(Actor):
+ """
+ Inhibits upgrade if SystemdServicesTasks tasks are in conflict
+
+ There is possibility, that SystemdServicesTasks messages with conflicting
+ requested service states could be produced. For example a service is
+ requested to be both enabled and disabled. This actor inhibits upgrade in
+ such cases.
+
+ Note: We expect that SystemdServicesTasks could be produced even after the
+ TargetTransactionChecksPhase (e.g. during the ApplicationPhase). The
+ purpose of this actor is to report collisions in case we can already detect
+ them. In case of conflicts caused by produced messages later we just log
+ the collisions and the services will end up disabled.
+ """
+
+ name = 'check_systemd_services_tasks'
+ consumes = (SystemdServicesTasks,)
+ produces = (Report,)
+ tags = (TargetTransactionChecksPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ checksystemdservicetasks.check_conflicts()
diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py
new file mode 100644
index 00000000..75833e4f
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/libraries/checksystemdservicetasks.py
@@ -0,0 +1,36 @@
+from leapp import reporting
+from leapp.libraries.stdlib import api
+from leapp.models import SystemdServicesTasks
+
+FMT_LIST_SEPARATOR = '\n - '
+
+
+def _printable_conflicts(conflicts):
+ return FMT_LIST_SEPARATOR + FMT_LIST_SEPARATOR.join(sorted(conflicts))
+
+
+def _inhibit_upgrade_with_conflicts(conflicts):
+ summary = (
+ 'The requested states for systemd services on the target system are in conflict.'
+ ' The following systemd services were requested to be both enabled and disabled on the target system: {}'
+ )
+ report = [
+ reporting.Title('Conflicting requirements of systemd service states'),
+ reporting.Summary(summary.format(_printable_conflicts(conflicts))),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.SANITY]),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ ]
+ reporting.create_report(report)
+
+
+def check_conflicts():
+ services_to_enable = set()
+ services_to_disable = set()
+ for task in api.consume(SystemdServicesTasks):
+ services_to_enable.update(task.to_enable)
+ services_to_disable.update(task.to_disable)
+
+ conflicts = services_to_enable.intersection(services_to_disable)
+ if conflicts:
+ _inhibit_upgrade_with_conflicts(conflicts)
diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py
new file mode 100644
index 00000000..36ded92f
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py
@@ -0,0 +1,88 @@
+import pytest
+
+from leapp import reporting
+from leapp.libraries.actor import checksystemdservicetasks
+from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
+from leapp.libraries.stdlib import api
+from leapp.models import SystemdServicesTasks
+
+
+@pytest.mark.parametrize(
+ ('tasks', 'should_inhibit'),
+ [
+ (
+ [SystemdServicesTasks(to_enable=['hello.service'], to_disable=['hello.service'])],
+ True
+ ),
+ (
+ [SystemdServicesTasks(to_enable=['hello.service', 'world.service'],
+ to_disable=['hello.service'])],
+ True
+ ),
+ (
+ [
+ SystemdServicesTasks(to_enable=['hello.service']),
+ SystemdServicesTasks(to_disable=['hello.service'])
+ ],
+ True
+ ),
+ (
+ [SystemdServicesTasks(to_enable=['hello.service'], to_disable=['world.service'])],
+ False
+ ),
+ (
+ [
+ SystemdServicesTasks(to_enable=['hello.service']),
+ SystemdServicesTasks(to_disable=['world.service'])
+ ],
+ False
+ ),
+ (
+ [
+ SystemdServicesTasks(to_enable=['hello.service', 'world.service']),
+ SystemdServicesTasks(to_disable=['world.service', 'httpd.service'])
+ ],
+ True
+ ),
+ ]
+)
+def test_conflicts_detected(monkeypatch, tasks, should_inhibit):
+
+ created_reports = create_report_mocked()
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=tasks))
+ monkeypatch.setattr(reporting, 'create_report', created_reports)
+
+ checksystemdservicetasks.check_conflicts()
+
+ assert bool(created_reports.called) == should_inhibit
+
+
+@pytest.mark.parametrize(
+ ('tasks', 'expected_reported'),
+ [
+ (
+ [SystemdServicesTasks(to_enable=['world.service', 'httpd.service', 'hello.service'],
+ to_disable=['hello.service', 'world.service', 'test.service'])],
+ ['world.service', 'hello.service']
+ ),
+ (
+ [
+ SystemdServicesTasks(to_enable=['hello.service', 'httpd.service'],
+ to_disable=['world.service']),
+ SystemdServicesTasks(to_enable=['world.service', 'httpd.service'],
+ to_disable=['hello.service', 'test.service'])
+ ],
+ ['world.service', 'hello.service']
+ ),
+ ]
+)
+def test_coflict_reported(monkeypatch, tasks, expected_reported):
+
+ created_reports = create_report_mocked()
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=tasks))
+ monkeypatch.setattr(reporting, 'create_report', created_reports)
+
+ checksystemdservicetasks.check_conflicts()
+
+ report_summary = reporting.create_report.report_fields['summary']
+ assert all(service in report_summary for service in expected_reported)
diff --git a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/actor.py b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/actor.py
new file mode 100644
index 00000000..1709091e
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import setsystemdservicesstate
+from leapp.models import SystemdServicesTasks
+from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag
+
+
+class SetSystemdServicesState(Actor):
+ """
+ According to input messages sets systemd services states on the target system
+ """
+
+ name = 'set_systemd_services_state'
+ consumes = (SystemdServicesTasks,)
+ produces = ()
+ tags = (FinalizationPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ setsystemdservicesstate.process()
diff --git a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py
new file mode 100644
index 00000000..01272438
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py
@@ -0,0 +1,31 @@
+from leapp.libraries.stdlib import api, CalledProcessError, run
+from leapp.models import SystemdServicesTasks
+
+
+def _try_set_service_state(command, service):
+ try:
+ # it is possible to call this on multiple units at once,
+ # but failing to enable one service would cause others to not enable as well
+ run(['systemctl', command, service])
+ except CalledProcessError as err:
+ api.current_logger().error('Failed to {} systemd unit "{}". Message: {}'.format(command, service, str(err)))
+ # TODO(mmatuska) produce post-upgrade report
+
+
+def process():
+ services_to_enable = set()
+ services_to_disable = set()
+ for task in api.consume(SystemdServicesTasks):
+ services_to_enable.update(task.to_enable)
+ services_to_disable.update(task.to_disable)
+
+ intersection = services_to_enable.intersection(services_to_disable)
+ for service in intersection:
+ msg = 'Attempted to both enable and disable systemd service "{}", service will be disabled.'.format(service)
+ api.current_logger().error(msg)
+
+ for service in services_to_enable:
+ _try_set_service_state('enable', service)
+
+ for service in services_to_disable:
+ _try_set_service_state('disable', service)
diff --git a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py
new file mode 100644
index 00000000..dd153329
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py
@@ -0,0 +1,83 @@
+import pytest
+
+from leapp.libraries import stdlib
+from leapp.libraries.actor import setsystemdservicesstate
+from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked
+from leapp.libraries.stdlib import api, CalledProcessError
+from leapp.models import SystemdServicesTasks
+
+
+class MockedRun(object):
+ def __init__(self):
+ self.commands = []
+
+ def __call__(self, cmd, *args, **kwargs):
+ self.commands.append(cmd)
+ return {}
+
+
+@pytest.mark.parametrize(
+ ('msgs', 'expected_calls'),
+ [
+ (
+ [SystemdServicesTasks(to_enable=['hello.service'],
+ to_disable=['getty.service'])],
+ [['systemctl', 'enable', 'hello.service'], ['systemctl', 'disable', 'getty.service']]
+ ),
+ (
+ [SystemdServicesTasks(to_disable=['getty.service'])],
+ [['systemctl', 'disable', 'getty.service']]
+ ),
+ (
+ [SystemdServicesTasks(to_enable=['hello.service'])],
+ [['systemctl', 'enable', 'hello.service']]
+ ),
+ (
+ [SystemdServicesTasks()],
+ []
+ ),
+ ]
+)
+def test_process(monkeypatch, msgs, expected_calls):
+ mocked_run = MockedRun()
+ monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run)
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
+
+ setsystemdservicesstate.process()
+
+ assert mocked_run.commands == expected_calls
+
+
+def test_process_invalid(monkeypatch):
+
+ def mocked_run(cmd, *args, **kwargs):
+ if cmd == ['systemctl', 'enable', 'invalid.service']:
+ message = 'Command {0} failed with exit code {1}.'.format(str(cmd), 1)
+ raise CalledProcessError(message, cmd, 1)
+
+ msgs = [SystemdServicesTasks(to_enable=['invalid.service'])]
+
+ monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run)
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ setsystemdservicesstate.process()
+
+ expect_msg = ("Failed to enable systemd unit \"invalid.service\". Message:"
+ " Command ['systemctl', 'enable', 'invalid.service'] failed with exit code 1.")
+ assert expect_msg in api.current_logger.errmsg
+
+
+def test_enable_disable_conflict_logged(monkeypatch):
+ msgs = [SystemdServicesTasks(to_enable=['hello.service'],
+ to_disable=['hello.service'])]
+ mocked_run = MockedRun()
+ monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run)
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ setsystemdservicesstate.process()
+
+ expect_msg = ('Attempted to both enable and disable systemd service "hello.service",'
+ ' service will be disabled.')
+ assert expect_msg in api.current_logger.errmsg
diff --git a/repos/system_upgrade/common/models/systemdservices.py b/repos/system_upgrade/common/models/systemdservices.py
new file mode 100644
index 00000000..6c7d4a1d
--- /dev/null
+++ b/repos/system_upgrade/common/models/systemdservices.py
@@ -0,0 +1,22 @@
+from leapp.models import fields, Model
+from leapp.topics import SystemInfoTopic
+
+
+class SystemdServicesTasks(Model):
+ topic = SystemInfoTopic
+
+ to_enable = fields.List(fields.String(), default=[])
+ """
+ List of systemd services to enable on the target system
+
+ Masked services will not be enabled. Attempting to enable a masked service
+ will be evaluated by systemctl as usually. The error will be logged and the
+ upgrade process will continue.
+ """
+ to_disable = fields.List(fields.String(), default=[])
+ """
+ List of systemd services to disable on the target system
+ """
+
+ # Note: possible extension in case of requirement (currently not implemented):
+ # to_unmask = fields.List(fields.String(), default=[])
--
2.38.1

View File

@ -1,92 +0,0 @@
From 004e7f3515cc2daa1a7ca72f7c8f5becb945ff17 Mon Sep 17 00:00:00 2001
From: Miroslav Lichvar <mlichvar@redhat.com>
Date: Mon, 19 Sep 2022 15:16:46 +0200
Subject: [PATCH 10/32] migratentp: Replace reports with log messages
Reports are supposed to contain different information.
---
.../actors/migratentp/libraries/migratentp.py | 27 ++++---------------
.../migratentp/tests/unit_test_migratentp.py | 10 -------
2 files changed, 5 insertions(+), 32 deletions(-)
diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py
index deeaaccd..a0ad634b 100644
--- a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py
+++ b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py
@@ -2,11 +2,8 @@ import base64
import io
import tarfile
-from leapp import reporting
from leapp.exceptions import StopActorExecutionError
-from leapp.libraries.stdlib import CalledProcessError, run
-
-COMMON_REPORT_TAGS = [reporting.Groups.SERVICES, reporting.Groups.TIME_MANAGEMENT]
+from leapp.libraries.stdlib import api, CalledProcessError, run
def extract_tgz64(s):
@@ -82,21 +79,7 @@ def migrate_ntp(migrate_services, config_tgz64):
ignored_lines = ntp2chrony('/', ntp_conf, step_tickers)
- config_resources = [reporting.RelatedResource('file', mc) for mc in migrate_configs + [ntp_conf]]
- package_resources = [reporting.RelatedResource('package', p) for p in ['ntpd', 'chrony']]
-
- if not ignored_lines:
- reporting.create_report([
- reporting.Title('{} configuration migrated to chrony'.format(' and '.join(migrate_configs))),
- reporting.Summary('ntp2chrony executed successfully'),
- reporting.Severity(reporting.Severity.INFO),
- reporting.Groups(COMMON_REPORT_TAGS)
- ] + config_resources + package_resources)
-
- else:
- reporting.create_report([
- reporting.Title('{} configuration partially migrated to chrony'.format(' and '.join(migrate_configs))),
- reporting.Summary('Some lines in /etc/ntp.conf were ignored in migration (check /etc/chrony.conf)'),
- reporting.Severity(reporting.Severity.MEDIUM),
- reporting.Groups(COMMON_REPORT_TAGS)
- ] + config_resources + package_resources)
+ api.current_logger().info('Configuration files migrated to chrony: {}'.format(' '.join(migrate_configs)))
+ if ignored_lines:
+ api.current_logger().warning('Some lines in /etc/ntp.conf were ignored in migration'
+ ' (check /etc/chrony.conf)')
diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py
index 6ce4bb5b..fafff5e7 100644
--- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py
+++ b/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py
@@ -55,7 +55,6 @@ def test_migration(monkeypatch):
(['ntp-wait'], ['chrony-wait'], 0),
(['ntpd', 'ntpdate', 'ntp-wait'], ['chronyd', 'chronyd', 'chrony-wait'], 1),
]:
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setattr(migratentp, 'extract_tgz64', extract_tgz64_mocked())
monkeypatch.setattr(migratentp, 'enable_service', enable_service_mocked())
monkeypatch.setattr(migratentp, 'write_file', write_file_mocked())
@@ -64,14 +63,6 @@ def test_migration(monkeypatch):
migratentp.migrate_ntp(ntp_services, 'abcdef')
if ntp_services:
- assert reporting.create_report.called == 1
- if ignored_lines > 0:
- assert 'configuration partially migrated to chrony' in \
- reporting.create_report.report_fields['title']
- else:
- assert 'configuration migrated to chrony' in \
- reporting.create_report.report_fields['title']
-
assert migratentp.extract_tgz64.called == 1
assert migratentp.extract_tgz64.s == 'abcdef'
assert migratentp.enable_service.called == len(chrony_services)
@@ -86,7 +77,6 @@ def test_migration(monkeypatch):
'/etc/ntp.conf' if 'ntpd' in ntp_services else '/etc/ntp.conf.nosources',
'/etc/ntp/step-tickers' if 'ntpdate' in ntp_services else '')
else:
- assert reporting.create_report.called == 0
assert migratentp.extract_tgz64.called == 0
assert migratentp.enable_service.called == 0
assert migratentp.write_file.called == 0
--
2.38.1

View File

@ -1,28 +0,0 @@
From 83dbc935d1ac32cbfeca7ba52da6bb4bbb965879 Mon Sep 17 00:00:00 2001
From: Miroslav Lichvar <mlichvar@redhat.com>
Date: Mon, 19 Sep 2022 15:35:43 +0200
Subject: [PATCH 11/32] migratentp: Catch more specific exception from
ntp2chrony
Catch OSError instead of Exception from ntp2chrony to avoid pylint
errors.
---
.../el7toel8/actors/migratentp/libraries/migratentp.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py
index a0ad634b..1bc59448 100644
--- a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py
+++ b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py
@@ -33,7 +33,7 @@ def ntp2chrony(root, ntp_conf, step_tickers):
ntp_configuration = ntp2chrony.NtpConfiguration(root, ntp_conf, step_tickers)
ntp_configuration.write_chrony_configuration('/etc/chrony.conf', '/etc/chrony.keys',
False, True)
- except Exception as e:
+ except OSError as e:
raise StopActorExecutionError('ntp2chrony failed: {}'.format(e))
# Return ignored lines from ntp.conf, except 'disable monitor' from
--
2.38.1

View File

@ -1,92 +0,0 @@
From 02dca0a6b721c89d125c521c7da5e85b89d136f7 Mon Sep 17 00:00:00 2001
From: Miroslav Lichvar <mlichvar@redhat.com>
Date: Wed, 14 Sep 2022 14:55:10 +0200
Subject: [PATCH 12/32] migratentp: Don't raise StopActorExecutionError
When a service cannot be enabled (e.g. due to masking) or when
ntp2chrony fails, log an error message instead of failing the migration.
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2089514
---
.../actors/migratentp/libraries/migratentp.py | 22 ++++++++++---------
.../migratentp/tests/unit_test_migratentp.py | 2 +-
2 files changed, 13 insertions(+), 11 deletions(-)
diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py
index 1bc59448..306ce09e 100644
--- a/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py
+++ b/repos/system_upgrade/el7toel8/actors/migratentp/libraries/migratentp.py
@@ -2,7 +2,6 @@ import base64
import io
import tarfile
-from leapp.exceptions import StopActorExecutionError
from leapp.libraries.stdlib import api, CalledProcessError, run
@@ -17,7 +16,7 @@ def enable_service(name):
try:
run(['systemctl', 'enable', '{}.service'.format(name)])
except CalledProcessError:
- raise StopActorExecutionError('Could not enable {} service'.format(name))
+ api.current_logger().error('Could not enable {} service'.format(name))
def write_file(name, content):
@@ -34,11 +33,12 @@ def ntp2chrony(root, ntp_conf, step_tickers):
ntp_configuration.write_chrony_configuration('/etc/chrony.conf', '/etc/chrony.keys',
False, True)
except OSError as e:
- raise StopActorExecutionError('ntp2chrony failed: {}'.format(e))
+ api.current_logger().error('ntp2chrony failed: {}'.format(e))
+ return False, set()
# Return ignored lines from ntp.conf, except 'disable monitor' from
# the default ntp.conf
- return set(ntp_configuration.ignored_lines) - set(['disable monitor'])
+ return True, set(ntp_configuration.ignored_lines) - set(['disable monitor'])
def migrate_ntp(migrate_services, config_tgz64):
@@ -61,7 +61,8 @@ def migrate_ntp(migrate_services, config_tgz64):
migrate_configs = []
for service in migrate_services:
if service not in service_map:
- raise StopActorExecutionError('Unknown service {}'.format(service))
+ api.current_logger().error('Unknown service {}'.format(service))
+ continue
enable_service(service_map[service][0])
if service_map[service][1]:
migrate_configs.append(service)
@@ -77,9 +78,10 @@ def migrate_ntp(migrate_services, config_tgz64):
step_tickers = '/etc/ntp/step-tickers' if 'ntpdate' in migrate_configs else ''
- ignored_lines = ntp2chrony('/', ntp_conf, step_tickers)
+ conf_migrated, ignored_lines = ntp2chrony('/', ntp_conf, step_tickers)
- api.current_logger().info('Configuration files migrated to chrony: {}'.format(' '.join(migrate_configs)))
- if ignored_lines:
- api.current_logger().warning('Some lines in /etc/ntp.conf were ignored in migration'
- ' (check /etc/chrony.conf)')
+ if conf_migrated:
+ api.current_logger().info('Configuration files migrated to chrony: {}'.format(' '.join(migrate_configs)))
+ if ignored_lines:
+ api.current_logger().warning('Some lines in /etc/ntp.conf were ignored in migration'
+ ' (check /etc/chrony.conf)')
diff --git a/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py b/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py
index fafff5e7..5350029c 100644
--- a/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py
+++ b/repos/system_upgrade/el7toel8/actors/migratentp/tests/unit_test_migratentp.py
@@ -44,7 +44,7 @@ class ntp2chrony_mocked(object):
def __call__(self, *args):
self.called += 1
self.args = args
- return self.ignored_lines * ['a line']
+ return True, self.ignored_lines * ['a line']
def test_migration(monkeypatch):
--
2.38.1

View File

@ -1,32 +0,0 @@
From 7d915f9ce861f999d6fc559e7a466a32c7e4aec9 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 9 Sep 2022 16:44:27 +0200
Subject: [PATCH 13/32] Make shellcheck happy again
Fixing:
```
85sys-upgrade-redhat/do-upgrade.sh:236:37: warning[SC2166]: Prefer [ p ] && [ q ] as [ p -a q ] is not well defined.
```
It's not a real issue as we do not care about the order of the
evaluation, but making shellcheck happy.
---
.../files/dracut/85sys-upgrade-redhat/do-upgrade.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
index 1f39a6b2..ff491316 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
@@ -233,7 +233,7 @@ do_upgrade() {
# on aarch64 systems during el8 to el9 upgrades the swap is broken due to change in page size (64K to 4k)
# adjust the page size before booting into the new system, as it is possible the swap is necessary for to boot
# `arch` command is not available in the dracut shell, using uname -m instead
- [ "$(uname -m)" = "aarch64" -a "$RHEL_OS_MAJOR_RELEASE" = "9" ] && {
+ [ "$(uname -m)" = "aarch64" ] && [ "$RHEL_OS_MAJOR_RELEASE" = "9" ] && {
cp -aS ".leapp_bp" $NEWROOT/etc/fstab /etc/fstab
# swapon internally uses mkswap and both swapon and mkswap aren't available in dracut shell
# as a workaround we can use the one from $NEWROOT in $NEWROOT/usr/sbin
--
2.38.1

View File

@ -1,231 +0,0 @@
From c109704cb2139dbdba371b83e2f55aad8fb1f9ed Mon Sep 17 00:00:00 2001
From: Eric Garver <egarver@redhat.com>
Date: Wed, 31 Aug 2022 14:24:42 -0400
Subject: [PATCH 14/32] actor: firewalld: support 0.8.z
Prior to this change the actor only supported firewalld-0.9.z and later.
Relevant differences between 0.9.z and 0.8.z:
- Policies don't exist (new in 0.9.0)
- Zones use a tuple based API
Fixes: rhbz2101909
---
...private_firewalldcollectusedobjectnames.py | 31 +++++-
...it_test_firewalldcollectusedobjectnames.py | 105 +++++++++++++++++-
2 files changed, 129 insertions(+), 7 deletions(-)
diff --git a/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/libraries/private_firewalldcollectusedobjectnames.py b/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/libraries/private_firewalldcollectusedobjectnames.py
index 93e4c6a2..d93b980b 100644
--- a/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/libraries/private_firewalldcollectusedobjectnames.py
+++ b/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/libraries/private_firewalldcollectusedobjectnames.py
@@ -14,6 +14,13 @@ def is_zone_in_use(conf):
return False
+def is_zone_in_use_tuple(conf):
+ conf_dict = {'interfaces': conf[10],
+ 'sources': conf[11]}
+
+ return is_zone_in_use(conf_dict)
+
+
def is_policy_in_use(conf, used_zones):
# A policy is in use if both ingress_zones and egress_zones contain at
# least one of following: an active zone, 'ANY', 'HOST'.
@@ -49,6 +56,18 @@ def get_used_services(conf, isZone):
return used_services
+def get_used_services_tuple(conf, isZone):
+ if not isZone:
+ return set()
+
+ conf_dict = {'services': conf[5],
+ 'interfaces': conf[10],
+ 'sources': conf[11],
+ 'rules_str': conf[12]}
+
+ return get_used_services(conf_dict, isZone)
+
+
def read_config():
try:
fw = Firewall(offline=True)
@@ -65,12 +84,12 @@ def read_config():
used_zones = set([fw.get_default_zone()])
for zone in fw.config.get_zones():
obj = fw.config.get_zone(zone)
- conf = fw.config.get_zone_config_dict(obj)
- if is_zone_in_use(conf):
+ conf = fw.config.get_zone_config(obj)
+ if is_zone_in_use_tuple(conf):
used_zones.add(zone)
used_policies = []
- for policy in fw.config.get_policy_objects():
+ for policy in fw.config.get_policy_objects() if hasattr(fw.config, "get_policy_objects") else []:
obj = fw.config.get_policy_object(policy)
conf = fw.config.get_policy_object_config_dict(obj)
if is_policy_in_use(conf, used_zones):
@@ -79,9 +98,9 @@ def read_config():
used_services = set()
for zone in fw.config.get_zones():
obj = fw.config.get_zone(zone)
- conf = fw.config.get_zone_config_dict(obj)
- used_services.update(get_used_services(conf, True))
- for policy in fw.config.get_policy_objects():
+ conf = fw.config.get_zone_config(obj)
+ used_services.update(get_used_services_tuple(conf, True))
+ for policy in fw.config.get_policy_objects() if hasattr(fw.config, "get_policy_objects") else []:
obj = fw.config.get_policy_object(policy)
conf = fw.config.get_policy_object_config_dict(obj)
used_services.update(get_used_services(conf, False))
diff --git a/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/tests/unit_test_firewalldcollectusedobjectnames.py b/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/tests/unit_test_firewalldcollectusedobjectnames.py
index 6e1511eb..9d2cfb47 100644
--- a/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/tests/unit_test_firewalldcollectusedobjectnames.py
+++ b/repos/system_upgrade/el8toel9/actors/firewalldcollectusedobjectnames/tests/unit_test_firewalldcollectusedobjectnames.py
@@ -1,7 +1,9 @@
from leapp.libraries.actor.private_firewalldcollectusedobjectnames import (
get_used_services,
+ get_used_services_tuple,
is_policy_in_use,
- is_zone_in_use
+ is_zone_in_use,
+ is_zone_in_use_tuple
)
@@ -20,6 +22,35 @@ def test_is_zone_in_use():
assert is_zone_in_use(conf)
+def test_is_zone_in_use_tuple():
+ conf = (None, None, None, None, None,
+ ['tftp-client'], # conf[5], services
+ None, None, None, None,
+ ['dummy0'], # conf[10], interfaces
+ [], # conf[11], sources
+ [], # conf[12], rules_str
+ None, None, None)
+ assert is_zone_in_use_tuple(conf)
+
+ conf = (None, None, None, None, None,
+ ['tftp-client'], # conf[5], services
+ None, None, None, None,
+ [], # conf[10], interfaces
+ ['10.1.2.0/24'], # conf[11], sources
+ [], # conf[12], rules_str
+ None, None, None)
+ assert is_zone_in_use_tuple(conf)
+
+ conf = (None, None, None, None, None,
+ ['tftp-client'], # conf[5], services
+ None, None, None, None,
+ ['dummy0'], # conf[10], interfaces
+ ['fd00::/8'], # conf[11], sources
+ [], # conf[12], rules_str
+ None, None, None)
+ assert is_zone_in_use_tuple(conf)
+
+
def test_is_zone_in_use_negative():
conf = {'interfaces': [],
'services': ['tftp-client']}
@@ -33,6 +64,17 @@ def test_is_zone_in_use_negative():
assert not is_zone_in_use(conf)
+def test_is_zone_in_use_tuple_negative():
+ conf = (None, None, None, None, None,
+ ['tftp-client'], # conf[5], services
+ None, None, None, None,
+ [], # conf[10], interfaces
+ [], # conf[11], sources
+ [], # conf[12], rules_str
+ None, None, None)
+ assert not is_zone_in_use_tuple(conf)
+
+
def test_is_policy_in_use():
conf = {'ingress_zones': ['HOST'],
'egress_zones': ['public'],
@@ -88,6 +130,35 @@ def test_get_used_services_zone():
assert 'tftp-client' in get_used_services(conf, True)
+def test_get_used_services_tuple_zone():
+ conf = (None, None, None, None, None,
+ ['tftp-client'], # conf[5], services
+ None, None, None, None,
+ ['dummy0'], # conf[10], interfaces
+ [], # conf[11], sources
+ [], # conf[12], rules_str
+ None, None, None)
+ assert 'tftp-client' in get_used_services_tuple(conf, True)
+
+ conf = (None, None, None, None, None,
+ [], # conf[5], services
+ None, None, None, None,
+ [], # conf[10], interfaces
+ ['10.1.2.0/24'], # conf[11], sources
+ ['rule family="ipv4" source address="10.1.1.0/24" service name="tftp-client" reject'],
+ None, None, None)
+ assert 'tftp-client' in get_used_services_tuple(conf, True)
+
+ conf = (None, None, None, None, None,
+ [], # conf[5], services
+ None, None, None, None,
+ ['dummy0'], # conf[10], interfaces
+ ['fd00::/8'], # conf[11], sources
+ ['rule service name="ssh" accept', 'rule service name="tftp-client" accept'], # conf[12], rules_str
+ None, None, None)
+ assert 'tftp-client' in get_used_services_tuple(conf, True)
+
+
def test_get_used_services_zone_negative():
conf = {'interfaces': ['dummy0'],
'services': ['https']}
@@ -105,6 +176,38 @@ def test_get_used_services_zone_negative():
assert 'tftp-client' not in get_used_services(conf, True)
+def test_get_used_services_tuple_zone_negative():
+ conf = (None, None, None, None, None,
+ ['https'], # conf[5], services
+ None, None, None, None,
+ ['dummy0'], # conf[10], interfaces
+ [], # conf[11], sources
+ [], # conf[12], rules_str
+ None, None, None)
+ assert 'tftp-client' not in get_used_services_tuple(conf, True)
+
+ conf = {'sources': ['10.1.2.0/24'],
+ 'rules_str': ['rule family="ipv4" source address="10.1.1.0/24" service name="ssh" reject'],
+ 'services': ['https']}
+ conf = (None, None, None, None, None,
+ ['https'], # conf[5], services
+ None, None, None, None,
+ [], # conf[10], interfaces
+ ['10.1.2.0/24'], # conf[11], sources
+ ['rule family="ipv4" source address="10.1.1.0/24" service name="ssh" reject'], # conf[12], rules_str
+ None, None, None)
+ assert 'tftp-client' not in get_used_services_tuple(conf, True)
+
+ conf = (None, None, None, None, None,
+ [], # conf[5], services
+ None, None, None, None,
+ ['dummy0'], # conf[10], interfaces
+ ['fd00::/8'], # conf[11], sources
+ ['rule service name="ssh" accept', 'rule service name="http" accept'], # conf[12], rules_str
+ None, None, None)
+ assert 'tftp-client' not in get_used_services_tuple(conf, True)
+
+
def test_get_used_services_policy():
conf = {'services': ['tftp-client']}
assert 'tftp-client' in get_used_services(conf, False)
--
2.38.1

View File

@ -1,257 +0,0 @@
From 876e93f233c41aa6c1742ed874ac167f0ddc4dbb Mon Sep 17 00:00:00 2001
From: PeterMocary <petermocary@gmail.com>
Date: Fri, 24 Jun 2022 15:23:30 +0200
Subject: [PATCH 15/32] Scanpkgmanager: detect proxy configuration
This new information enables targetuserspacecreator actor to inform user why the package installation might have failed
---
.../libraries/scanpkgmanager.py | 53 ++++++++++++++++++-
.../tests/test_scanpkgmanager.py | 49 +++++++++++++++++
.../actors/targetuserspacecreator/actor.py | 4 ++
.../libraries/userspacegen.py | 24 +++++++--
.../common/models/packagemanagerinfo.py | 5 ++
.../common/models/repositoriesfacts.py | 1 +
6 files changed, 131 insertions(+), 5 deletions(-)
diff --git a/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py
index 6f6a79d2..7c97fb1a 100644
--- a/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py
+++ b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py
@@ -1,9 +1,13 @@
import os
+import re
from leapp.libraries.common.config.version import get_source_major_version
from leapp.libraries.stdlib import api
from leapp.models import PkgManagerInfo
+YUM_CONFIG_PATH = '/etc/yum.conf'
+DNF_CONFIG_PATH = '/etc/dnf/dnf.conf'
+
def _get_releasever_path():
default_manager = 'yum' if get_source_major_version() == '7' else 'dnf'
@@ -28,5 +32,52 @@ def get_etc_releasever():
return releasever
+def _get_config_contents(config_path):
+ if os.path.isfile(config_path):
+ with open(config_path, 'r') as config:
+ return config.read()
+ return ''
+
+
+def _get_proxy_if_set(manager_config_path):
+ """
+ Get proxy address from specified package manager config.
+
+ :param manager_config_path: path to a package manager config
+ :returns: proxy address or None when not set
+ :rtype: String
+ """
+
+ config = _get_config_contents(manager_config_path)
+
+ for line in config.split('\n'):
+ if re.match('^proxy[ \t]*=', line):
+ proxy_address = line.split('=', 1)[1]
+ return proxy_address.strip()
+
+ return None
+
+
+def get_configured_proxies():
+ """
+ Get a list of proxies used in dnf and yum configuration files.
+
+ :returns: sorted list of unique proxies
+ :rtype: List
+ """
+
+ configured_proxies = set()
+ for config_path in (DNF_CONFIG_PATH, YUM_CONFIG_PATH):
+ proxy = _get_proxy_if_set(config_path)
+ if proxy:
+ configured_proxies.add(proxy)
+
+ return sorted(configured_proxies)
+
+
def process():
- api.produce(PkgManagerInfo(etc_releasever=get_etc_releasever()))
+ pkg_manager_info = PkgManagerInfo()
+ pkg_manager_info.etc_releasever = get_etc_releasever()
+ pkg_manager_info.configured_proxies = get_configured_proxies()
+
+ api.produce(pkg_manager_info)
diff --git a/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py
index 3be6fa2f..e78b532f 100644
--- a/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py
+++ b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py
@@ -9,6 +9,9 @@ from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
from leapp.libraries.stdlib import api
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
+PROXY_ADDRESS = 'https://192.168.121.123:3128'
+YUM_CONFIG_PATH = '/etc/yum.conf'
+DNF_CONFIG_PATH = '/etc/dnf/dnf.conf'
def mock_releasever_exists(overrides):
@@ -36,6 +39,8 @@ def test_get_etcreleasever(monkeypatch, etcrelease_exists):
monkeypatch.setattr(scanpkgmanager.api, 'produce', produce_mocked())
monkeypatch.setattr(scanpkgmanager.api, 'current_actor', CurrentActorMocked())
monkeypatch.setattr(scanpkgmanager, '_get_releasever_path', mocked_get_releasever_path)
+ monkeypatch.setattr(scanpkgmanager, '_get_proxy_if_set', lambda x: None)
+ monkeypatch.setattr(pluginscanner, 'scan_enabled_package_manager_plugins', lambda: [])
scanpkgmanager.process()
@@ -44,3 +49,47 @@ def test_get_etcreleasever(monkeypatch, etcrelease_exists):
assert api.produce.model_instances[0].etc_releasever
else:
assert not api.produce.model_instances[0].etc_releasever
+
+
+@pytest.mark.parametrize('proxy_set', [True, False])
+def test_get_proxy_if_set(monkeypatch, proxy_set):
+
+ config_path = '/path/to/config.conf'
+ config_contents = '[main]\n'
+ if proxy_set:
+ config_contents += 'proxy = \t{} '.format(PROXY_ADDRESS)
+
+ def mocked_get_config_contents(path):
+ assert path == config_path
+ return config_contents
+
+ monkeypatch.setattr(scanpkgmanager, '_get_config_contents', mocked_get_config_contents)
+
+ proxy = scanpkgmanager._get_proxy_if_set(config_path)
+
+ if proxy_set:
+ assert proxy == PROXY_ADDRESS
+
+ assert proxy_set == bool(proxy)
+
+
+@pytest.mark.parametrize(
+ ('proxy_set_in_dnf_config', 'proxy_set_in_yum_config', 'expected_output'),
+ [
+ (True, True, [PROXY_ADDRESS]),
+ (True, False, [PROXY_ADDRESS]),
+ (False, False, [])
+ ]
+)
+def test_get_configured_proxies(monkeypatch, proxy_set_in_dnf_config, proxy_set_in_yum_config, expected_output):
+
+ def mocked_get_proxy_if_set(path):
+ proxy = PROXY_ADDRESS if proxy_set_in_yum_config else None
+ if path == DNF_CONFIG_PATH:
+ proxy = PROXY_ADDRESS if proxy_set_in_dnf_config else None
+ return proxy
+
+ monkeypatch.setattr(scanpkgmanager, '_get_proxy_if_set', mocked_get_proxy_if_set)
+
+ configured_proxies = scanpkgmanager.get_configured_proxies()
+ assert configured_proxies == expected_output
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py b/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py
index 7e5c7db7..04fb2e8b 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/actor.py
@@ -5,7 +5,9 @@ from leapp.models import RequiredTargetUserspacePackages # deprecated
from leapp.models import TMPTargetRepositoriesFacts # deprecated all the time
from leapp.models import (
CustomTargetRepositoryFile,
+ PkgManagerInfo,
Report,
+ RepositoriesFacts,
RepositoriesMapping,
RHSMInfo,
RHUIInfo,
@@ -36,12 +38,14 @@ class TargetUserspaceCreator(Actor):
CustomTargetRepositoryFile,
RHSMInfo,
RHUIInfo,
+ RepositoriesFacts,
RepositoriesMapping,
RequiredTargetUserspacePackages,
StorageInfo,
TargetRepositories,
TargetUserSpacePreupgradeTasks,
XFSPresence,
+ PkgManagerInfo,
)
produces = (TargetUserSpaceInfo, UsedTargetRepositories, Report, TMPTargetRepositoriesFacts,)
tags = (IPUWorkflowTag, TargetTransactionFactsPhaseTag)
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index c39af66f..00acacd9 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -12,6 +12,8 @@ from leapp.models import RequiredTargetUserspacePackages # deprecated
from leapp.models import TMPTargetRepositoriesFacts # deprecated
from leapp.models import (
CustomTargetRepositoryFile,
+ PkgManagerInfo,
+ RepositoriesFacts,
RHSMInfo,
RHUIInfo,
StorageInfo,
@@ -166,10 +168,24 @@ def prepare_target_userspace(context, userspace_dir, enabled_repos, packages):
try:
context.call(cmd, callback_raw=utils.logging_handler)
except CalledProcessError as exc:
- raise StopActorExecutionError(
- message='Unable to install RHEL {} userspace packages.'.format(target_major_version),
- details={'details': str(exc), 'stderr': exc.stderr}
- )
+ message = 'Unable to install RHEL {} userspace packages.'.format(target_major_version)
+ details = {'details': str(exc), 'stderr': exc.stderr}
+
+ # If a proxy was set in dnf config, it should be the reason why dnf
+ # failed since leapp does not support updates behind proxy yet.
+ for manager_info in api.consume(PkgManagerInfo):
+ if manager_info.configured_proxies:
+ details['details'] = ("DNF failed to install userspace packages, likely due to the proxy "
+ "configuration detected in the YUM/DNF configuration file.")
+
+ # Similarly if a proxy was set specifically for one of the repositories.
+ for repo_facts in api.consume(RepositoriesFacts):
+ for repo_file in repo_facts.repositories:
+ if any(repo_data.proxy and repo_data.enabled for repo_data in repo_file.data):
+ details['details'] = ("DNF failed to install userspace packages, likely due to the proxy "
+ "configuration detected in a repository configuration file.")
+
+ raise StopActorExecutionError(message=message, details=details)
def _get_all_rhui_pkgs():
diff --git a/repos/system_upgrade/common/models/packagemanagerinfo.py b/repos/system_upgrade/common/models/packagemanagerinfo.py
index ba6391c3..aa450978 100644
--- a/repos/system_upgrade/common/models/packagemanagerinfo.py
+++ b/repos/system_upgrade/common/models/packagemanagerinfo.py
@@ -17,3 +17,8 @@ class PkgManagerInfo(Model):
In case the value is empty string, it means the file exists but it is empty. In such a case the
original configuration is obviously broken.
"""
+
+ configured_proxies = fields.List(fields.String(), default=[])
+ """
+ A sorted list of proxies present in yum and dnf configuration files.
+ """
diff --git a/repos/system_upgrade/common/models/repositoriesfacts.py b/repos/system_upgrade/common/models/repositoriesfacts.py
index 722c579f..cd2124fc 100644
--- a/repos/system_upgrade/common/models/repositoriesfacts.py
+++ b/repos/system_upgrade/common/models/repositoriesfacts.py
@@ -13,6 +13,7 @@ class RepositoryData(Model):
mirrorlist = fields.Nullable(fields.String())
enabled = fields.Boolean(default=True)
additional_fields = fields.Nullable(fields.String())
+ proxy = fields.Nullable(fields.String())
class RepositoryFile(Model):
--
2.38.1

View File

@ -1,380 +0,0 @@
From b4c3de448324a35da8b92905c04cc169430cf4a0 Mon Sep 17 00:00:00 2001
From: PeterMocary <petermocary@gmail.com>
Date: Sun, 26 Jun 2022 13:56:24 +0200
Subject: [PATCH 16/32] Merge of the yumconfigscanner actor into the
scanpkgmanager actor
---
.../actors/checkyumpluginsenabled/actor.py | 8 ++--
.../libraries/checkyumpluginsenabled.py | 6 +--
.../tests/test_checkyumpluginsenabled.py | 6 +--
.../libraries/pluginscanner.py} | 48 +++++++------------
.../libraries/scanpkgmanager.py | 6 ++-
.../tests/test_pluginscanner.py} | 26 +++++-----
.../tests/test_scanpkgmanager.py | 2 +-
.../common/actors/yumconfigscanner/actor.py | 18 -------
.../common/models/packagemanagerinfo.py | 2 +
.../system_upgrade/common/models/yumconfig.py | 8 ----
10 files changed, 48 insertions(+), 82 deletions(-)
rename repos/system_upgrade/common/actors/{yumconfigscanner/libraries/yumconfigscanner.py => scanpkgmanager/libraries/pluginscanner.py} (56%)
rename repos/system_upgrade/common/actors/{yumconfigscanner/tests/test_yumconfigscanner.py => scanpkgmanager/tests/test_pluginscanner.py} (74%)
delete mode 100644 repos/system_upgrade/common/actors/yumconfigscanner/actor.py
delete mode 100644 repos/system_upgrade/common/models/yumconfig.py
diff --git a/repos/system_upgrade/common/actors/checkyumpluginsenabled/actor.py b/repos/system_upgrade/common/actors/checkyumpluginsenabled/actor.py
index c6872fa7..fbc2f8bc 100644
--- a/repos/system_upgrade/common/actors/checkyumpluginsenabled/actor.py
+++ b/repos/system_upgrade/common/actors/checkyumpluginsenabled/actor.py
@@ -1,6 +1,6 @@
from leapp.actors import Actor
from leapp.libraries.actor.checkyumpluginsenabled import check_required_yum_plugins_enabled
-from leapp.models import YumConfig
+from leapp.models import PkgManagerInfo
from leapp.reporting import Report
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
@@ -11,10 +11,10 @@ class CheckYumPluginsEnabled(Actor):
"""
name = 'check_yum_plugins_enabled'
- consumes = (YumConfig,)
+ consumes = (PkgManagerInfo,)
produces = (Report,)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
- yum_config = next(self.consume(YumConfig))
- check_required_yum_plugins_enabled(yum_config)
+ pkg_manager_info = next(self.consume(PkgManagerInfo))
+ check_required_yum_plugins_enabled(pkg_manager_info)
diff --git a/repos/system_upgrade/common/actors/checkyumpluginsenabled/libraries/checkyumpluginsenabled.py b/repos/system_upgrade/common/actors/checkyumpluginsenabled/libraries/checkyumpluginsenabled.py
index 7c7398df..48f38d0a 100644
--- a/repos/system_upgrade/common/actors/checkyumpluginsenabled/libraries/checkyumpluginsenabled.py
+++ b/repos/system_upgrade/common/actors/checkyumpluginsenabled/libraries/checkyumpluginsenabled.py
@@ -10,16 +10,16 @@ REQUIRED_YUM_PLUGINS = {'subscription-manager', 'product-id'}
FMT_LIST_SEPARATOR = '\n - '
-def check_required_yum_plugins_enabled(yum_config):
+def check_required_yum_plugins_enabled(pkg_manager_info):
"""
Checks whether the yum plugins required by the IPU are enabled.
If they are not enabled, a report is produced informing the user about it.
- :param yum_config: YumConfig
+ :param pkg_manager_info: PkgManagerInfo
"""
- missing_required_plugins = REQUIRED_YUM_PLUGINS - set(yum_config.enabled_plugins)
+ missing_required_plugins = REQUIRED_YUM_PLUGINS - set(pkg_manager_info.enabled_plugins)
if skip_rhsm():
missing_required_plugins -= {'subscription-manager', 'product-id'}
diff --git a/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py b/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py
index 896d31d5..9bf9a3ba 100644
--- a/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py
+++ b/repos/system_upgrade/common/actors/checkyumpluginsenabled/tests/test_checkyumpluginsenabled.py
@@ -4,7 +4,7 @@ from leapp import reporting
from leapp.libraries.actor.checkyumpluginsenabled import check_required_yum_plugins_enabled
from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
from leapp.libraries.stdlib import api
-from leapp.models import YumConfig
+from leapp.models import PkgManagerInfo
from leapp.utils.report import is_inhibitor
@@ -38,7 +38,7 @@ def test__create_report_mocked(monkeypatch):
def test_report_when_missing_required_plugins(monkeypatch):
"""Test whether a report entry is created when any of the required YUM plugins are missing."""
- yum_config = YumConfig(enabled_plugins=['product-id', 'some-user-plugin'])
+ yum_config = PkgManagerInfo(enabled_plugins=['product-id', 'some-user-plugin'])
actor_reports = create_report_mocked()
@@ -62,7 +62,7 @@ def test_nothing_is_reported_when_rhsm_disabled(monkeypatch):
monkeypatch.setattr(api, 'current_actor', actor_mocked)
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- yum_config = YumConfig(enabled_plugins=[])
+ yum_config = PkgManagerInfo(enabled_plugins=[])
check_required_yum_plugins_enabled(yum_config)
assert not reporting.create_report.called, 'Report was created even if LEAPP_NO_RHSM was set'
diff --git a/repos/system_upgrade/common/actors/yumconfigscanner/libraries/yumconfigscanner.py b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/pluginscanner.py
similarity index 56%
rename from repos/system_upgrade/common/actors/yumconfigscanner/libraries/yumconfigscanner.py
rename to repos/system_upgrade/common/actors/scanpkgmanager/libraries/pluginscanner.py
index 0b7d5fe6..7bb03996 100644
--- a/repos/system_upgrade/common/actors/yumconfigscanner/libraries/yumconfigscanner.py
+++ b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/pluginscanner.py
@@ -1,26 +1,25 @@
import re
from leapp.libraries.common.config.version import get_source_major_version
-from leapp.libraries.stdlib import api, run
-from leapp.models import YumConfig
+from leapp.libraries.stdlib import run
# When the output spans multiple lines, each of the lines after the first one
# start with a ' <SPACES> : '
-YUM_LOADED_PLUGINS_NEXT_LINE_START = ' +: '
+LOADED_PLUGINS_NEXT_LINE_START = ' +: '
-def _parse_loaded_plugins(yum_output):
+def _parse_loaded_plugins(package_manager_output):
"""
- Retrieves a list of plugins that are being loaded when calling yum.
+ Retrieves a list of plugins that are being loaded when calling dnf/yum.
- :param dict yum_output: The result of running the yum command.
+ :param dict package_manager_output: The result of running the package manager command.
:rtype: list
- :returns: A list of plugins that are being loaded when calling yum.
+ :returns: A list of plugins that are being loaded by the package manager.
"""
- # YUM might break the information about loaded plugins into multiple lines,
+ # Package manager might break the information about loaded plugins into multiple lines,
# we need to concaternate the list ourselves
loaded_plugins_str = ''
- for line in yum_output['stdout']:
+ for line in package_manager_output['stdout']:
if line.startswith('Loaded plugins:'):
# We have found the first line that contains the plugins
plugins_on_this_line = line[16:] # Remove the `Loaded plugins: ` part
@@ -32,7 +31,7 @@ def _parse_loaded_plugins(yum_output):
continue
if loaded_plugins_str:
- if re.match(YUM_LOADED_PLUGINS_NEXT_LINE_START, line):
+ if re.match(LOADED_PLUGINS_NEXT_LINE_START, line):
# The list of plugins continues on this line
plugins_on_this_line = line.lstrip(' :') # Remove the leading spaces and semicolon
@@ -49,39 +48,28 @@ def _parse_loaded_plugins(yum_output):
return loaded_plugins_str.split(', ')
-def scan_enabled_yum_plugins():
+def scan_enabled_package_manager_plugins():
"""
- Runs the `yum` command and parses its output for enabled/loaded plugins.
+ Runs package manager (yum/dnf) command and parses its output for enabled/loaded plugins.
:return: A list of enabled plugins.
:rtype: List
"""
- # We rely on yum itself to report what plugins are used when it is invoked.
- # An alternative approach would be to check /usr/lib/yum-plugins/ (install
- # path for yum plugins) and parse corresponding configurations from
- # /etc/yum/pluginconf.d/
+ # We rely on package manager itself to report what plugins are used when it is invoked.
+ # An alternative approach would be to check the install path for package manager plugins
+ # and parse corresponding plugin configuration files.
if get_source_major_version() == '7':
# in case of yum, set debuglevel=2 to be sure the output is always
# same. The format of data is different for various debuglevels
- yum_cmd = ['yum', '--setopt=debuglevel=2']
+ cmd = ['yum', '--setopt=debuglevel=2']
else:
# the verbose mode in dnf always set particular debuglevel, so the
# output is not affected by the default debug level set on the
# system
- yum_cmd = ['dnf', '-v'] # On RHEL8 we need to supply an extra switch
+ cmd = ['dnf', '-v'] # On RHEL8 we need to supply an extra switch
- yum_output = run(yum_cmd, split=True, checked=False) # The yum command will certainly fail (does not matter).
+ pkg_manager_output = run(cmd, split=True, checked=False) # The command will certainly fail (does not matter).
- return _parse_loaded_plugins(yum_output)
-
-
-def scan_yum_config():
- """
- Scans the YUM configuration and produces :class:`YumConfig` message with the information found.
- """
- config = YumConfig()
- config.enabled_plugins = scan_enabled_yum_plugins()
-
- api.produce(config)
+ return _parse_loaded_plugins(pkg_manager_output)
diff --git a/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py
index 7c97fb1a..bf7ec0be 100644
--- a/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py
+++ b/repos/system_upgrade/common/actors/scanpkgmanager/libraries/scanpkgmanager.py
@@ -1,6 +1,7 @@
import os
import re
+from leapp.libraries.actor import pluginscanner
from leapp.libraries.common.config.version import get_source_major_version
from leapp.libraries.stdlib import api
from leapp.models import PkgManagerInfo
@@ -43,9 +44,9 @@ def _get_proxy_if_set(manager_config_path):
"""
Get proxy address from specified package manager config.
- :param manager_config_path: path to a package manager config
+ :param str manager_config_path: path to a package manager config
:returns: proxy address or None when not set
- :rtype: String
+ :rtype: str
"""
config = _get_config_contents(manager_config_path)
@@ -79,5 +80,6 @@ def process():
pkg_manager_info = PkgManagerInfo()
pkg_manager_info.etc_releasever = get_etc_releasever()
pkg_manager_info.configured_proxies = get_configured_proxies()
+ pkg_manager_info.enabled_plugins = pluginscanner.scan_enabled_package_manager_plugins()
api.produce(pkg_manager_info)
diff --git a/repos/system_upgrade/common/actors/yumconfigscanner/tests/test_yumconfigscanner.py b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_pluginscanner.py
similarity index 74%
rename from repos/system_upgrade/common/actors/yumconfigscanner/tests/test_yumconfigscanner.py
rename to repos/system_upgrade/common/actors/scanpkgmanager/tests/test_pluginscanner.py
index 8406ef00..f0260e54 100644
--- a/repos/system_upgrade/common/actors/yumconfigscanner/tests/test_yumconfigscanner.py
+++ b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_pluginscanner.py
@@ -1,6 +1,6 @@
import pytest
-from leapp.libraries.actor import yumconfigscanner
+from leapp.libraries.actor import pluginscanner
CMD_YUM_OUTPUT = '''Loaded plugins: langpacks, my plugin, subscription-manager, product-id
Usage: yum [options] COMMAND
@@ -16,23 +16,23 @@ Usage: yum [options] COMMAND
def assert_plugins_identified_as_enabled(expected_plugins, identified_plugins):
- fail_description = 'Failed to parse a plugin from the yum output.'
+ fail_description = 'Failed to parse a plugin from the package manager output.'
for expected_enabled_plugin in expected_plugins:
assert expected_enabled_plugin in identified_plugins, fail_description
@pytest.mark.parametrize(
- ('source_major_version', 'yum_command'),
+ ('source_major_version', 'command'),
[
('7', ['yum', '--setopt=debuglevel=2']),
('8', ['dnf', '-v']),
]
)
-def test_scan_enabled_plugins(monkeypatch, source_major_version, yum_command):
- """Tests whether the enabled plugins are correctly retrieved from the yum output."""
+def test_scan_enabled_plugins(monkeypatch, source_major_version, command):
+ """Tests whether the enabled plugins are correctly retrieved from the package manager output."""
def run_mocked(cmd, **kwargs):
- if cmd == yum_command:
+ if cmd == command:
return {
'stdout': CMD_YUM_OUTPUT.split('\n'),
'stderr': 'You need to give some command',
@@ -45,10 +45,10 @@ def test_scan_enabled_plugins(monkeypatch, source_major_version, yum_command):
# The library imports `run` all the way into its namespace (from ...stdlib import run),
# we must overwrite it there then:
- monkeypatch.setattr(yumconfigscanner, 'run', run_mocked)
- monkeypatch.setattr(yumconfigscanner, 'get_source_major_version', get_source_major_version_mocked)
+ monkeypatch.setattr(pluginscanner, 'run', run_mocked)
+ monkeypatch.setattr(pluginscanner, 'get_source_major_version', get_source_major_version_mocked)
- enabled_plugins = yumconfigscanner.scan_enabled_yum_plugins()
+ enabled_plugins = pluginscanner.scan_enabled_package_manager_plugins()
assert_plugins_identified_as_enabled(
['langpacks', 'my plugin', 'subscription-manager', 'product-id'],
enabled_plugins
@@ -63,7 +63,7 @@ def test_scan_enabled_plugins(monkeypatch, source_major_version, yum_command):
(CMD_YUM_OUTPUT_MULTILINE_BREAK_ON_WHITESPACE,)
])
def test_yum_loaded_plugins_multiline_output(yum_output, monkeypatch):
- """Tests whether the library correctly handles yum plugins getting reported on multiple lines."""
+ """Tests whether the library correctly handles plugins getting reported on multiple lines."""
def run_mocked(cmd, **kwargs):
return {
'stdout': yum_output.split('\n'),
@@ -71,10 +71,10 @@ def test_yum_loaded_plugins_multiline_output(yum_output, monkeypatch):
'exit_code': 1
}
- monkeypatch.setattr(yumconfigscanner, 'run', run_mocked)
- monkeypatch.setattr(yumconfigscanner, 'get_source_major_version', lambda: '7')
+ monkeypatch.setattr(pluginscanner, 'run', run_mocked)
+ monkeypatch.setattr(pluginscanner, 'get_source_major_version', lambda: '7')
- enabled_plugins = yumconfigscanner.scan_enabled_yum_plugins()
+ enabled_plugins = pluginscanner.scan_enabled_package_manager_plugins()
assert len(enabled_plugins) == 4, 'Identified more yum plugins than available in the mocked yum output.'
assert_plugins_identified_as_enabled(
diff --git a/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py
index e78b532f..75c5c5ba 100644
--- a/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py
+++ b/repos/system_upgrade/common/actors/scanpkgmanager/tests/test_scanpkgmanager.py
@@ -3,7 +3,7 @@ import os
import pytest
from leapp.libraries import stdlib
-from leapp.libraries.actor import scanpkgmanager
+from leapp.libraries.actor import pluginscanner, scanpkgmanager
from leapp.libraries.common import testutils
from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
from leapp.libraries.stdlib import api
diff --git a/repos/system_upgrade/common/actors/yumconfigscanner/actor.py b/repos/system_upgrade/common/actors/yumconfigscanner/actor.py
deleted file mode 100644
index 95aee415..00000000
--- a/repos/system_upgrade/common/actors/yumconfigscanner/actor.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from leapp.actors import Actor
-from leapp.libraries.actor.yumconfigscanner import scan_yum_config
-from leapp.models import YumConfig
-from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
-
-
-class YumConfigScanner(Actor):
- """
- Scans the configuration of the YUM package manager.
- """
-
- name = 'yum_config_scanner'
- consumes = ()
- produces = (YumConfig,)
- tags = (IPUWorkflowTag, ChecksPhaseTag)
-
- def process(self):
- scan_yum_config()
diff --git a/repos/system_upgrade/common/models/packagemanagerinfo.py b/repos/system_upgrade/common/models/packagemanagerinfo.py
index aa450978..bf969338 100644
--- a/repos/system_upgrade/common/models/packagemanagerinfo.py
+++ b/repos/system_upgrade/common/models/packagemanagerinfo.py
@@ -22,3 +22,5 @@ class PkgManagerInfo(Model):
"""
A sorted list of proxies present in yum and dnf configuration files.
"""
+
+ enabled_plugins = fields.List(fields.String(), default=[])
diff --git a/repos/system_upgrade/common/models/yumconfig.py b/repos/system_upgrade/common/models/yumconfig.py
deleted file mode 100644
index 506ce47e..00000000
--- a/repos/system_upgrade/common/models/yumconfig.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from leapp.models import fields, Model
-from leapp.topics import SystemFactsTopic
-
-
-class YumConfig(Model):
- topic = SystemFactsTopic
-
- enabled_plugins = fields.List(fields.String(), default=[])
--
2.38.1

View File

@ -1,30 +0,0 @@
From 279ebc96c45ab597f9c26903f5b36e2e57ced6fe Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 30 Sep 2022 11:17:38 +0200
Subject: [PATCH 17/32] firewalldcheckallowzonedrifting: Fix the remediation
cmd
The remediation cmd was incorrect as the cmd is written as string
instead of list, the fix:
['cmd param param'] -> ['cmd', 'paramm', 'param']
JIRA: OAMG-7694
---
.../el8toel9/actors/firewalldcheckallowzonedrifting/actor.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py b/repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py
index 1f2767f5..b7eb5806 100644
--- a/repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py
+++ b/repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/actor.py
@@ -46,6 +46,6 @@ class FirewalldCheckAllowZoneDrifting(Actor):
title='Changes in firewalld related to Zone Drifting'),
reporting.Remediation(
hint='Set AllowZoneDrifting=no in /etc/firewalld/firewalld.conf',
- commands=[['sed -i "s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/" '
+ commands=[['sed', '-i', 's/^AllowZoneDrifting=.*/AllowZoneDrifting=no/',
'/etc/firewalld/firewalld.conf']]),
])
--
2.38.1

View File

@ -1,25 +0,0 @@
From 1c6388139695aefb02daa7b5cb13e628f03eab43 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Mon, 17 Oct 2022 12:59:22 +0200
Subject: [PATCH 18/32] rhui(azure-sap-apps): consider RHUI client as signed
---
.../common/actors/redhatsignedrpmscanner/actor.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py
index dd6db7c9..647805cd 100644
--- a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py
+++ b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py
@@ -56,7 +56,7 @@ class RedHatSignedRpmScanner(Actor):
upg_path = rhui.get_upg_path()
# AWS RHUI packages do not have to be whitelisted because they are signed by RedHat
- whitelisted_cloud_flavours = ('azure', 'azure-eus', 'azure-sap', 'google', 'google-sap')
+ whitelisted_cloud_flavours = ('azure', 'azure-eus', 'azure-sap', 'azure-sap-apps', 'google', 'google-sap')
whitelisted_cloud_pkgs = {
rhui.RHUI_CLOUD_MAP[upg_path].get(flavour, {}).get('src_pkg') for flavour in whitelisted_cloud_flavours
}
--
2.38.1

View File

@ -1,42 +0,0 @@
From a2f35c0aa4e00936e58c17a94d4f1507a3287c72 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Mon, 17 Oct 2022 12:59:22 +0200
Subject: [PATCH 19/32] rhui(azure-sap-apps): handle EUS SAP Apps content on
RHEL8+
---
.../common/actors/cloud/checkrhui/actor.py | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py b/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py
index 822c7535..a56bb1e1 100644
--- a/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py
+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py
@@ -3,6 +3,7 @@ import os
from leapp import reporting
from leapp.actors import Actor
from leapp.libraries.common import rhsm, rhui
+from leapp.libraries.common.config.version import get_source_major_version
from leapp.libraries.common.rpms import has_package
from leapp.libraries.stdlib import api
from leapp.models import (
@@ -105,9 +106,15 @@ class CheckRHUI(Actor):
if info['src_pkg'] != info['target_pkg']:
self.produce(RpmTransactionTasks(to_install=[info['target_pkg']]))
self.produce(RpmTransactionTasks(to_remove=[info['src_pkg']]))
- if provider in ('azure-sap', 'azure-sap-apps'):
+ # Handle azure SAP systems that use two RHUI clients - one for RHEL content, one for SAP content
+ if provider == 'azure-sap':
azure_nonsap_pkg = rhui.RHUI_CLOUD_MAP[upg_path]['azure']['src_pkg']
self.produce(RpmTransactionTasks(to_remove=[azure_nonsap_pkg]))
+ elif provider == 'azure-sap-apps':
+ # SAP Apps systems have EUS content channel from RHEL8+
+ src_rhel_content_type = 'azure' if get_source_major_version() == '7' else 'azure-eus'
+ azure_nonsap_pkg = rhui.RHUI_CLOUD_MAP[upg_path][src_rhel_content_type]['src_pkg']
+ self.produce(RpmTransactionTasks(to_remove=[azure_nonsap_pkg]))
self.produce(RHUIInfo(provider=provider))
self.produce(RequiredTargetUserspacePackages(packages=[info['target_pkg']]))
--
2.38.1

View File

@ -1,32 +0,0 @@
From a06e248faa3b336c09ee6137eee54a1a0256162b Mon Sep 17 00:00:00 2001
From: Vinzenz Feenstra <vfeenstr@redhat.com>
Date: Wed, 19 Oct 2022 21:05:00 +0200
Subject: [PATCH 20/32] checksaphana: Move to common
We need to start handling also el8 to el9 upgrades now.
Signed-off-by: Vinzenz Feenstra <vfeenstr@redhat.com>
---
.../{el7toel8 => common}/actors/checksaphana/actor.py | 0
.../actors/checksaphana/libraries/checksaphana.py | 0
.../actors/checksaphana/tests/test_checksaphana.py | 0
3 files changed, 0 insertions(+), 0 deletions(-)
rename repos/system_upgrade/{el7toel8 => common}/actors/checksaphana/actor.py (100%)
rename repos/system_upgrade/{el7toel8 => common}/actors/checksaphana/libraries/checksaphana.py (100%)
rename repos/system_upgrade/{el7toel8 => common}/actors/checksaphana/tests/test_checksaphana.py (100%)
diff --git a/repos/system_upgrade/el7toel8/actors/checksaphana/actor.py b/repos/system_upgrade/common/actors/checksaphana/actor.py
similarity index 100%
rename from repos/system_upgrade/el7toel8/actors/checksaphana/actor.py
rename to repos/system_upgrade/common/actors/checksaphana/actor.py
diff --git a/repos/system_upgrade/el7toel8/actors/checksaphana/libraries/checksaphana.py b/repos/system_upgrade/common/actors/checksaphana/libraries/checksaphana.py
similarity index 100%
rename from repos/system_upgrade/el7toel8/actors/checksaphana/libraries/checksaphana.py
rename to repos/system_upgrade/common/actors/checksaphana/libraries/checksaphana.py
diff --git a/repos/system_upgrade/el7toel8/actors/checksaphana/tests/test_checksaphana.py b/repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py
similarity index 100%
rename from repos/system_upgrade/el7toel8/actors/checksaphana/tests/test_checksaphana.py
rename to repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py
--
2.38.1

View File

@ -1,277 +0,0 @@
From b716765e638156c9a5cb21a474d1203b695acf8d Mon Sep 17 00:00:00 2001
From: Vinzenz Feenstra <vfeenstr@redhat.com>
Date: Wed, 19 Oct 2022 21:42:14 +0200
Subject: [PATCH 21/32] checksaphana: Adjust for el7toel8 and el8toel9
requirements
Previously only upgrade from el7toel8 were supported for SAP Hana.
This patch will introduce the adjustments necessary to allow the
upgrade of RHEL with SAP Hana installed even on el8toel9.
Signed-off-by: Vinzenz Feenstra <vfeenstr@redhat.com>
---
.../checksaphana/libraries/checksaphana.py | 64 ++++++++++++----
.../checksaphana/tests/test_checksaphana.py | 73 +++++++++++++++++--
2 files changed, 117 insertions(+), 20 deletions(-)
diff --git a/repos/system_upgrade/common/actors/checksaphana/libraries/checksaphana.py b/repos/system_upgrade/common/actors/checksaphana/libraries/checksaphana.py
index e540ccd1..564d86b8 100644
--- a/repos/system_upgrade/common/actors/checksaphana/libraries/checksaphana.py
+++ b/repos/system_upgrade/common/actors/checksaphana/libraries/checksaphana.py
@@ -1,5 +1,5 @@
from leapp import reporting
-from leapp.libraries.common.config import architecture
+from leapp.libraries.common.config import architecture, version
from leapp.libraries.stdlib import api
from leapp.models import SapHanaInfo
@@ -7,8 +7,17 @@ from leapp.models import SapHanaInfo
# Requirement is SAP HANA 2.00 rev 54 which is the minimal supported revision for both RHEL 7.9 and RHEL 8.2
SAP_HANA_MINIMAL_MAJOR_VERSION = 2
-SAP_HANA_RHEL8_REQUIRED_PATCH_LEVELS = ((5, 54, 0),)
-SAP_HANA_MINIMAL_VERSION_STRING = 'HANA 2.0 SPS05 rev 54 or later'
+# RHEL 8.2 target requirements
+SAP_HANA_RHEL82_REQUIRED_PATCH_LEVELS = ((5, 54, 0),)
+SAP_HANA_RHEL82_MINIMAL_VERSION_STRING = 'HANA 2.0 SPS05 rev 54 or later'
+
+# RHEL 8.6 target requirements
+SAP_HANA_RHEL86_REQUIRED_PATCH_LEVELS = ((5, 59, 2),)
+SAP_HANA_RHEL86_MINIMAL_VERSION_STRING = 'HANA 2.0 SPS05 rev 59.02 or later'
+
+# RHEL 9 target requirements
+SAP_HANA_RHEL9_REQUIRED_PATCH_LEVELS = ((5, 59, 4), (6, 63, 0))
+SAP_HANA_RHEL9_MINIMAL_VERSION_STRING = 'HANA 2.0 SPS05 rev 59.04 or later, or SPS06 rev 63 or later'
def _manifest_get(manifest, key, default_value=None):
@@ -56,6 +65,16 @@ def _create_detected_instances_list(details):
return ''
+def _min_ver_string():
+ if version.get_target_major_version() == '8':
+ ver_str = SAP_HANA_RHEL86_MINIMAL_VERSION_STRING
+ if version.matches_target_version('8.2'):
+ ver_str = SAP_HANA_RHEL82_MINIMAL_VERSION_STRING
+ else:
+ ver_str = SAP_HANA_RHEL9_MINIMAL_VERSION_STRING
+ return ver_str
+
+
def version1_check(info):
""" Creates a report for SAP HANA instances running on version 1 """
found = {}
@@ -64,6 +83,7 @@ def version1_check(info):
_add_hana_details(found, instance)
if found:
+ min_ver_string = _min_ver_string()
detected = _create_detected_instances_list(found)
reporting.create_report([
reporting.Title('Found SAP HANA 1 which is not supported with the target version of RHEL'),
@@ -75,7 +95,7 @@ def version1_check(info):
reporting.Severity(reporting.Severity.HIGH),
reporting.RemediationHint((
'In order to upgrade RHEL, you will have to upgrade your SAP HANA 1.0 software to '
- '{supported}.'.format(supported=SAP_HANA_MINIMAL_VERSION_STRING))),
+ '{supported}.'.format(supported=min_ver_string))),
reporting.ExternalLink(url='https://launchpad.support.sap.com/#/notes/2235581',
title='SAP HANA: Supported Operating Systems'),
reporting.Groups([reporting.Groups.SANITY]),
@@ -100,11 +120,11 @@ def _major_version_check(instance):
return False
-def _sp_rev_patchlevel_check(instance):
+def _sp_rev_patchlevel_check(instance, patchlevels):
""" Checks whether this SP, REV & PatchLevel are eligible """
number = _manifest_get(instance.manifest, 'rev-number', '000')
if len(number) > 2 and number.isdigit():
- required_sp_levels = [r[0] for r in SAP_HANA_RHEL8_REQUIRED_PATCH_LEVELS]
+ required_sp_levels = [r[0] for r in patchlevels]
lowest_sp = min(required_sp_levels)
highest_sp = max(required_sp_levels)
sp = int(number[0:2].lstrip('0') or '0')
@@ -114,7 +134,7 @@ def _sp_rev_patchlevel_check(instance):
if sp > highest_sp:
# Less than minimal required SP
return True
- for requirements in SAP_HANA_RHEL8_REQUIRED_PATCH_LEVELS:
+ for requirements in patchlevels:
req_sp, req_rev, req_pl = requirements
if sp == req_sp:
rev = int(number.lstrip('0') or '0')
@@ -134,7 +154,13 @@ def _sp_rev_patchlevel_check(instance):
def _fullfills_hana_min_version(instance):
""" Performs a check whether the version of SAP HANA fulfills the minimal requirements for the target RHEL """
- return _major_version_check(instance) and _sp_rev_patchlevel_check(instance)
+ if version.get_target_major_version() == '8':
+ patchlevels = SAP_HANA_RHEL86_REQUIRED_PATCH_LEVELS
+ if version.matches_target_version('8.2'):
+ patchlevels = SAP_HANA_RHEL82_REQUIRED_PATCH_LEVELS
+ else:
+ patchlevels = SAP_HANA_RHEL9_REQUIRED_PATCH_LEVELS
+ return _major_version_check(instance) and _sp_rev_patchlevel_check(instance, patchlevels)
def version2_check(info):
@@ -147,17 +173,18 @@ def version2_check(info):
_add_hana_details(found, instance)
if found:
+ min_ver_string = _min_ver_string()
detected = _create_detected_instances_list(found)
reporting.create_report([
- reporting.Title('SAP HANA needs to be updated before upgrade'),
+ reporting.Title('SAP HANA needs to be updated before the RHEL upgrade'),
reporting.Summary(
('A newer version of SAP HANA is required in order continue with the upgrade.'
' {min_hana_version} is required for the target version of RHEL.\n\n'
- 'The following SAP HANA instances have been detected to be running with a lower version'
+ 'The following SAP HANA instances have been detected to be installed with a lower version'
' than required on the target system:\n'
- '{detected}').format(detected=detected, min_hana_version=SAP_HANA_MINIMAL_VERSION_STRING)
+ '{detected}').format(detected=detected, min_hana_version=min_ver_string)
),
- reporting.RemediationHint('Update SAP HANA at least to {}'.format(SAP_HANA_MINIMAL_VERSION_STRING)),
+ reporting.RemediationHint('Update SAP HANA at least to {}'.format(min_ver_string)),
reporting.ExternalLink(url='https://launchpad.support.sap.com/#/notes/2235581',
title='SAP HANA: Supported Operating Systems'),
reporting.Severity(reporting.Severity.HIGH),
@@ -170,6 +197,15 @@ def version2_check(info):
def platform_check():
""" Creates an inhibitor report in case the system is not running on x86_64 """
if not architecture.matches_architecture(architecture.ARCH_X86_64):
+ if version.get_target_major_version() == '8':
+ elink = reporting.ExternalLink(
+ url='https://access.redhat.com/solutions/5533441',
+ title='How do I upgrade from Red Hat Enterprise Linux 7 to Red Hat Enterprise Linux 8 with SAP HANA')
+ else:
+ elink = reporting.ExternalLink(
+ url='https://access.redhat.com/solutions/6980855',
+ title='How to in-place upgrade SAP environments from RHEL 8 to RHEL 9')
+
reporting.create_report([
reporting.Title('SAP HANA upgrades are only supported on X86_64 systems'),
reporting.Summary(
@@ -180,9 +216,7 @@ def platform_check():
reporting.Groups([reporting.Groups.SANITY]),
reporting.Groups([reporting.Groups.INHIBITOR]),
reporting.Audience('sysadmin'),
- reporting.ExternalLink(
- url='https://access.redhat.com/solutions/5533441',
- title='How do I upgrade from Red Hat Enterprise Linux 7 to Red Hat Enterprise Linux 8 with SAP HANA')
+ elink,
])
return False
diff --git a/repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py b/repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py
index 3f1d4230..6f61d0bf 100644
--- a/repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py
+++ b/repos/system_upgrade/common/actors/checksaphana/tests/test_checksaphana.py
@@ -2,7 +2,7 @@ import pytest
from leapp.libraries.actor import checksaphana
from leapp.libraries.common import testutils
-from leapp.libraries.stdlib import run
+from leapp.libraries.common.config import version
from leapp.models import SapHanaManifestEntry
SAPHANA1_MANIFEST = '''comptype: HDB
@@ -77,7 +77,7 @@ def _report_has_pattern(report, pattern):
EXPECTED_TITLE_PATTERNS = {
'running': lambda report: _report_has_pattern(report, 'running SAP HANA'),
'v1': lambda report: _report_has_pattern(report, 'Found SAP HANA 1'),
- 'low': lambda report: _report_has_pattern(report, 'SAP HANA needs to be updated before upgrade'),
+ 'low': lambda report: _report_has_pattern(report, 'SAP HANA needs to be updated before the RHEL upgrade'),
}
@@ -180,8 +180,69 @@ class MockSAPHanaVersionInstance(object):
(2, 49, 0, True),
)
)
-def test_checksaphana__fullfills_hana_min_version(monkeypatch, major, rev, patchlevel, result):
- monkeypatch.setattr(checksaphana, 'SAP_HANA_RHEL8_REQUIRED_PATCH_LEVELS', ((4, 48, 2), (5, 52, 0)))
+def test_checksaphana__fullfills_rhel82_hana_min_version(monkeypatch, major, rev, patchlevel, result):
+ monkeypatch.setattr(version, 'get_target_major_version', lambda: '8')
+ monkeypatch.setattr(version, 'get_target_version', lambda: '8.2')
+ monkeypatch.setattr(checksaphana, 'SAP_HANA_RHEL82_REQUIRED_PATCH_LEVELS', ((4, 48, 2), (5, 52, 0)))
+ assert checksaphana._fullfills_hana_min_version(
+ MockSAPHanaVersionInstance(
+ major=major,
+ rev=rev,
+ patchlevel=patchlevel,
+ )
+ ) == result
+
+
+@pytest.mark.parametrize(
+ 'major,rev,patchlevel,result', (
+ (2, 52, 0, True),
+ (2, 52, 1, True),
+ (2, 52, 2, True),
+ (2, 53, 0, True),
+ (2, 60, 0, True),
+ (2, 48, 2, True),
+ (2, 48, 1, False),
+ (2, 48, 0, False),
+ (2, 38, 2, False),
+ (2, 49, 0, True),
+ )
+)
+def test_checksaphana__fullfills_rhel86_hana_min_version(monkeypatch, major, rev, patchlevel, result):
+ monkeypatch.setattr(version, 'get_target_major_version', lambda: '8')
+ monkeypatch.setattr(version, 'get_target_version', lambda: '8.6')
+ monkeypatch.setattr(checksaphana, 'SAP_HANA_RHEL86_REQUIRED_PATCH_LEVELS', ((4, 48, 2), (5, 52, 0)))
+ assert checksaphana._fullfills_hana_min_version(
+ MockSAPHanaVersionInstance(
+ major=major,
+ rev=rev,
+ patchlevel=patchlevel,
+ )
+ ) == result
+
+
+@pytest.mark.parametrize(
+ 'major,rev,patchlevel,result', (
+ (2, 59, 4, True),
+ (2, 59, 5, True),
+ (2, 59, 6, True),
+ (2, 60, 0, False),
+ (2, 61, 0, False),
+ (2, 62, 0, False),
+ (2, 63, 2, True),
+ (2, 48, 1, False),
+ (2, 48, 0, False),
+ (2, 59, 0, False),
+ (2, 59, 1, False),
+ (2, 59, 2, False),
+ (2, 59, 3, False),
+ (2, 38, 2, False),
+ (2, 64, 0, True),
+ )
+)
+def test_checksaphana__fullfills_hana_rhel9_min_version(monkeypatch, major, rev, patchlevel, result):
+ monkeypatch.setattr(version, 'get_target_major_version', lambda: '9')
+ monkeypatch.setattr(version, 'get_target_version', lambda: '9.0')
+ monkeypatch.setattr(checksaphana, 'SAP_HANA_RHEL9_REQUIRED_PATCH_LEVELS', ((5, 59, 4), (6, 63, 0)))
assert checksaphana._fullfills_hana_min_version(
MockSAPHanaVersionInstance(
major=major,
@@ -196,7 +257,9 @@ def test_checksaphana_perform_check(monkeypatch):
v2names = ('JKL', 'MNO', 'PQR', 'STU')
v2lownames = ('VWX', 'YZA')
reports = []
- monkeypatch.setattr(checksaphana, 'SAP_HANA_RHEL8_REQUIRED_PATCH_LEVELS', ((4, 48, 2), (5, 52, 0)))
+ monkeypatch.setattr(checksaphana, 'SAP_HANA_RHEL86_REQUIRED_PATCH_LEVELS', ((4, 48, 2), (5, 52, 0)))
+ monkeypatch.setattr(version, 'get_target_major_version', lambda: '8')
+ monkeypatch.setattr(version, 'get_target_version', lambda: '8.6')
monkeypatch.setattr(checksaphana.reporting, 'create_report', _report_collector(reports))
monkeypatch.setattr(checksaphana.api, 'consume', _consume_mock_sap_hana_info(
v1names=v1names, v2names=v2names, v2lownames=v2lownames, running=True))
--
2.38.1

View File

@ -1,105 +0,0 @@
From 6ec9f0adf2e9197e68a1919c1f69344fb0534eca Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 19 Oct 2022 14:15:41 +0200
Subject: [PATCH 22/32] Add an actor that enables device_cio_free.service on
s390x
After an IPU the device_cio_free.service systemd service (exclusive to
s390x) is disabled even though the vendor preset is set to disable. The
new actor instruct to enable the service during the IPU.
The service is used to enable devices not explicitly enabled on kernel
command line.
Jira ref.: OAMG-6302
---
.../enableddeviceciofreeservices390/actor.py | 21 ++++++++++++
.../libraries/enabledeviceciofreeservice.py | 8 +++++
.../tests/test_enableddeviceciofreeservice.py | 32 +++++++++++++++++++
3 files changed, 61 insertions(+)
create mode 100644 repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py
create mode 100644 repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py
create mode 100644 repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py
diff --git a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py
new file mode 100644
index 00000000..4928710e
--- /dev/null
+++ b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/actor.py
@@ -0,0 +1,21 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import enabledeviceciofreeservice
+from leapp.models import SystemdServicesTasks
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
+
+
+class EnableDeviceCioFreeService(Actor):
+ """
+ Enables device_cio_free.service systemd service on s390x
+
+ After an upgrade this service ends up disabled even though it's vendor preset is set to enabled.
+ The service is used to enable devices which are not explicitly enabled on the kernel command line.
+ """
+
+ name = 'enable_device_cio_free_service'
+ consumes = ()
+ produces = (SystemdServicesTasks,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ enabledeviceciofreeservice.process()
diff --git a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py
new file mode 100644
index 00000000..97e36f10
--- /dev/null
+++ b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/libraries/enabledeviceciofreeservice.py
@@ -0,0 +1,8 @@
+from leapp.libraries.common.config import architecture
+from leapp.libraries.stdlib import api
+from leapp.models import SystemdServicesTasks
+
+
+def process():
+ if architecture.matches_architecture(architecture.ARCH_S390X):
+ api.produce(SystemdServicesTasks(to_enable=['device_cio_free.service']))
diff --git a/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py
new file mode 100644
index 00000000..42527595
--- /dev/null
+++ b/repos/system_upgrade/el7toel8/actors/enableddeviceciofreeservices390/tests/test_enableddeviceciofreeservice.py
@@ -0,0 +1,32 @@
+import pytest
+
+from leapp.libraries.actor import enabledeviceciofreeservice
+from leapp.libraries.common.config import architecture
+from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import SystemdServicesTasks
+
+
+def test_task_produced_on_s390(monkeypatch):
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X))
+ monkeypatch.setattr(api, "produce", produce_mocked())
+
+ enabledeviceciofreeservice.process()
+
+ assert api.produce.called
+ assert isinstance(api.produce.model_instances[0], SystemdServicesTasks)
+ assert api.produce.model_instances[0].to_enable == ['device_cio_free.service']
+
+
+@pytest.mark.parametrize('arch', [
+ architecture.ARCH_X86_64,
+ architecture.ARCH_ARM64,
+ architecture.ARCH_PPC64LE,
+])
+def test_task_not_produced_on_non_s390(monkeypatch, arch):
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=arch))
+ monkeypatch.setattr(api, "produce", produce_mocked())
+
+ enabledeviceciofreeservice.process()
+
+ assert not api.produce.called
--
2.38.1

View File

@ -1,240 +0,0 @@
From 3b5f7416d5f680cbeb777ba4ba33a4bd4787d6f6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Renaud=20M=C3=A9trich?= <rmetrich@redhat.com>
Date: Mon, 7 Nov 2022 09:26:45 +0100
Subject: [PATCH 23/32] Add the scanzfcp actor handling the IPU with ZFCP
(s390x)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When having systems configured with ZFCP instead of DASD, the disks
are not seen while rebooting because `/etc/zfcp.conf` is missing
in the initramfs.
When the file exists, it's copied inside the userspace container
and installed in the upgrade initramfs, producing
TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
messages.
pstodulk: updated unit-tests in the scanzfcp and scandasd actors
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2140563
Signed-off-by: Renaud Métrich <rmetrich@redhat.com>
---
.../actors/scandasd/libraries/scandasd.py | 4 +-
.../scandasd/tests/unit_test_scandasd.py | 23 +++++---
.../common/actors/scanzfcp/actor.py | 24 ++++++++
.../actors/scanzfcp/libraries/scanzfcp.py | 25 ++++++++
.../scanzfcp/tests/unit_test_scanzfcp.py | 59 +++++++++++++++++++
5 files changed, 124 insertions(+), 11 deletions(-)
create mode 100644 repos/system_upgrade/common/actors/scanzfcp/actor.py
create mode 100644 repos/system_upgrade/common/actors/scanzfcp/libraries/scanzfcp.py
create mode 100644 repos/system_upgrade/common/actors/scanzfcp/tests/unit_test_scanzfcp.py
diff --git a/repos/system_upgrade/common/actors/scandasd/libraries/scandasd.py b/repos/system_upgrade/common/actors/scandasd/libraries/scandasd.py
index 3e1cba66..ff3104d4 100644
--- a/repos/system_upgrade/common/actors/scandasd/libraries/scandasd.py
+++ b/repos/system_upgrade/common/actors/scandasd/libraries/scandasd.py
@@ -18,8 +18,8 @@ def process():
copy_files = [CopyFile(src=DASD_CONF)]
api.produce(UpgradeInitramfsTasks(include_files=[DASD_CONF]))
else:
- api.current_logger().warning(
- "The {} file has not been discovered. DASD not used?"
+ api.current_logger().info(
+ "The {} file has not been discovered. DASD not used."
.format(DASD_CONF)
)
api.produce(TargetUserSpaceUpgradeTasks(copy_files=copy_files, install_rpms=['s390utils-core']))
diff --git a/repos/system_upgrade/common/actors/scandasd/tests/unit_test_scandasd.py b/repos/system_upgrade/common/actors/scandasd/tests/unit_test_scandasd.py
index e4eea10c..af8f951b 100644
--- a/repos/system_upgrade/common/actors/scandasd/tests/unit_test_scandasd.py
+++ b/repos/system_upgrade/common/actors/scandasd/tests/unit_test_scandasd.py
@@ -3,18 +3,18 @@ import os
import pytest
from leapp.libraries.actor import scandasd
-from leapp.libraries.common.config.architecture import ARCH_S390X
-from leapp.libraries.common.testutils import logger_mocked, produce_mocked
+from leapp.libraries.common.config import architecture
+from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked
from leapp.models import CopyFile, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
def test_dasd_exists(monkeypatch):
- monkeypatch.setattr(scandasd.architecture, 'matches_architecture', lambda dummy: True)
+ monkeypatch.setattr(scandasd.api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X))
monkeypatch.setattr(scandasd.api, 'current_logger', logger_mocked())
monkeypatch.setattr(scandasd.api, 'produce', produce_mocked())
monkeypatch.setattr(os.path, 'isfile', lambda dummy: True)
scandasd.process()
- assert not scandasd.api.current_logger.warnmsg
+ assert not scandasd.api.current_logger.infomsg
assert scandasd.api.produce.called == 2
tusut_flag = False
uit_flag = False
@@ -30,12 +30,12 @@ def test_dasd_exists(monkeypatch):
def test_dasd_not_found(monkeypatch):
- monkeypatch.setattr(scandasd.architecture, 'matches_architecture', lambda dummy: True)
+ monkeypatch.setattr(scandasd.api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X))
monkeypatch.setattr(scandasd.api, 'current_logger', logger_mocked())
monkeypatch.setattr(os.path, 'isfile', lambda dummy: False)
monkeypatch.setattr(scandasd.api, 'produce', produce_mocked())
scandasd.process()
- assert scandasd.api.current_logger.warnmsg
+ assert scandasd.api.current_logger.infomsg
assert scandasd.api.produce.called == 1
assert len(scandasd.api.produce.model_instances) == 1
assert isinstance(scandasd.api.produce.model_instances[0], TargetUserSpaceUpgradeTasks)
@@ -44,11 +44,16 @@ def test_dasd_not_found(monkeypatch):
@pytest.mark.parametrize('isfile', [True, False])
-def test_non_ibmz_arch(monkeypatch, isfile):
- monkeypatch.setattr(scandasd.architecture, 'matches_architecture', lambda dummy: False)
+@pytest.mark.parametrize('arch', [
+ architecture.ARCH_X86_64,
+ architecture.ARCH_ARM64,
+ architecture.ARCH_PPC64LE,
+])
+def test_non_ibmz_arch(monkeypatch, isfile, arch):
+ monkeypatch.setattr(scandasd.api, 'current_actor', CurrentActorMocked(arch=arch))
monkeypatch.setattr(scandasd.api, 'current_logger', logger_mocked())
monkeypatch.setattr(scandasd.api, 'produce', produce_mocked())
monkeypatch.setattr(os.path, 'isfile', lambda dummy: isfile)
scandasd.process()
- assert not scandasd.api.current_logger.warnmsg
+ assert not scandasd.api.current_logger.infomsg
assert not scandasd.api.produce.called
diff --git a/repos/system_upgrade/common/actors/scanzfcp/actor.py b/repos/system_upgrade/common/actors/scanzfcp/actor.py
new file mode 100644
index 00000000..9817fdc8
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scanzfcp/actor.py
@@ -0,0 +1,24 @@
+
+from leapp.actors import Actor
+from leapp.libraries.actor import scanzfcp
+from leapp.models import TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class ScanZFCP(Actor):
+ """
+ In case of s390x architecture, check whether ZFCP is used.
+
+ The current check is based just on existence of the /etc/zfcp.conf file.
+ If it exists, produce UpgradeInitramfsTasks msg to ensure the file
+ is available inside the target userspace to be able to generate the
+ upgrade init ramdisk correctly.
+ """
+
+ name = 'scanzfcp'
+ consumes = ()
+ produces = (TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks)
+ tags = (IPUWorkflowTag, FactsPhaseTag)
+
+ def process(self):
+ scanzfcp.process()
diff --git a/repos/system_upgrade/common/actors/scanzfcp/libraries/scanzfcp.py b/repos/system_upgrade/common/actors/scanzfcp/libraries/scanzfcp.py
new file mode 100644
index 00000000..72f83f8f
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scanzfcp/libraries/scanzfcp.py
@@ -0,0 +1,25 @@
+import os
+
+from leapp.libraries.common.config import architecture
+from leapp.libraries.stdlib import api
+from leapp.models import CopyFile, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
+
+ZFCP_CONF = '/etc/zfcp.conf'
+
+
+def process():
+ if not architecture.matches_architecture(architecture.ARCH_S390X):
+ return
+ copy_files = []
+ if os.path.isfile(ZFCP_CONF):
+ # the file has to be copied into the targetuserspace container first,
+ # then it can be included into the initramfs ==> both messages are
+ # needed to be produced
+ copy_files = [CopyFile(src=ZFCP_CONF)]
+ api.produce(UpgradeInitramfsTasks(include_files=[ZFCP_CONF]))
+ else:
+ api.current_logger().info(
+ "The {} file has not been discovered. ZFCP not used."
+ .format(ZFCP_CONF)
+ )
+ api.produce(TargetUserSpaceUpgradeTasks(copy_files=copy_files, install_rpms=['s390utils-core']))
diff --git a/repos/system_upgrade/common/actors/scanzfcp/tests/unit_test_scanzfcp.py b/repos/system_upgrade/common/actors/scanzfcp/tests/unit_test_scanzfcp.py
new file mode 100644
index 00000000..1b1f840c
--- /dev/null
+++ b/repos/system_upgrade/common/actors/scanzfcp/tests/unit_test_scanzfcp.py
@@ -0,0 +1,59 @@
+import os
+
+import pytest
+
+from leapp.libraries.actor import scanzfcp
+from leapp.libraries.common.config import architecture
+from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked
+from leapp.models import CopyFile, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
+
+
+def test_zfcp_exists(monkeypatch):
+ monkeypatch.setattr(scanzfcp.api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X))
+ monkeypatch.setattr(scanzfcp.api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(scanzfcp.api, 'produce', produce_mocked())
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True)
+ scanzfcp.process()
+ assert not scanzfcp.api.current_logger.infomsg
+ assert scanzfcp.api.produce.called == 2
+ tusut_flag = False
+ uit_flag = False
+ for msg in scanzfcp.api.produce.model_instances:
+ if isinstance(msg, TargetUserSpaceUpgradeTasks):
+ assert [CopyFile(src=scanzfcp.ZFCP_CONF)] == msg.copy_files
+ assert msg.install_rpms == ['s390utils-core']
+ tusut_flag = True
+ elif isinstance(msg, UpgradeInitramfsTasks):
+ assert [scanzfcp.ZFCP_CONF] == msg.include_files
+ uit_flag = True
+ assert tusut_flag and uit_flag
+
+
+def test_zfcp_not_found(monkeypatch):
+ monkeypatch.setattr(scanzfcp.api, 'current_actor', CurrentActorMocked(arch=architecture.ARCH_S390X))
+ monkeypatch.setattr(scanzfcp.api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(scanzfcp.os.path, 'isfile', lambda dummy: False)
+ monkeypatch.setattr(scanzfcp.api, 'produce', produce_mocked())
+ scanzfcp.process()
+ assert scanzfcp.api.current_logger.infomsg
+ assert scanzfcp.api.produce.called == 1
+ assert len(scanzfcp.api.produce.model_instances) == 1
+ assert isinstance(scanzfcp.api.produce.model_instances[0], TargetUserSpaceUpgradeTasks)
+ assert scanzfcp.api.produce.model_instances[0].install_rpms == ['s390utils-core']
+ assert not scanzfcp.api.produce.model_instances[0].copy_files
+
+
+@pytest.mark.parametrize('isfile', [True, False])
+@pytest.mark.parametrize('arch', [
+ architecture.ARCH_X86_64,
+ architecture.ARCH_ARM64,
+ architecture.ARCH_PPC64LE,
+])
+def test_non_ibmz_arch(monkeypatch, isfile, arch):
+ monkeypatch.setattr(scanzfcp.api, 'current_actor', CurrentActorMocked(arch=arch))
+ monkeypatch.setattr(scanzfcp.api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(scanzfcp.api, 'produce', produce_mocked())
+ monkeypatch.setattr(os.path, 'isfile', lambda dummy: isfile)
+ scanzfcp.process()
+ assert not scanzfcp.api.current_logger.infomsg
+ assert not scanzfcp.api.produce.called
--
2.38.1

View File

@ -1,118 +0,0 @@
From a6445b391a01bf17d3ad8229ca1185b10479f467 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Renaud=20M=C3=A9trich?= <rmetrich@redhat.com>
Date: Mon, 7 Nov 2022 09:33:32 +0100
Subject: [PATCH 24/32] ziplconverttoblscfg: bind mount /dev & /boot into the
userspace container (s390x)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The conversion of ZIPL to BLS on IBM Z machines failed when
a) the machine was configured using ZFCP instead of DASD
b) /boot was not on a separate partition
In case a), the zipl-switch-to-blscfg script failed as the /dev has
not been propagated to into the el8userspace container. Regarding
that, the /dev did not contain all required devices needed for the
correct conversion.
With this fix, the /dev is bindmounted into the el8userspace container
using the (systemd-nspawn) `--bind` option. The direct bind mounting
via `leapp.libraries.common.mounting.BindMount` cannot be used in this
case as it blocks the correct start of the container.
In case b), the content of /boot has been removed during the upgrade
due to problems when using BindMount on normal directory (that is not
mountpoint). This has been possibly resolved by this commit also,
as the /boot has been propagated using the --bind (sysmd-nspawn)
option as well. (Untested)
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2140563
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1901440
Signed-off-by: Renaud Métrich <rmetrich@redhat.com>
---
.../actors/ziplconverttoblscfg/actor.py | 66 +++++++++----------
1 file changed, 33 insertions(+), 33 deletions(-)
diff --git a/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py b/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py
index e80c335d..441c538b 100644
--- a/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py
+++ b/repos/system_upgrade/el7toel8/actors/ziplconverttoblscfg/actor.py
@@ -38,40 +38,40 @@ class ZiplConvertToBLSCFG(Actor):
# replace the original boot directory inside the container by the host one
# - as we cannot use zipl* pointing anywhere else than default directory
# - no, --bls-directory is not solution
- with mounting.BindMount(source='/boot', target=os.path.join(userspace.path, 'boot')):
+ # also make sure device nodes are available (requirement for zipl-switch-to-blscfg)
+ binds = ['/boot', '/dev']
+ with mounting.NspawnActions(base_dir=userspace.path, binds=binds) as context:
userspace_zipl_conf = os.path.join(userspace.path, 'etc', 'zipl.conf')
if os.path.exists(userspace_zipl_conf):
os.remove(userspace_zipl_conf)
- with mounting.NullMount(target=userspace.path) as userspace:
- with userspace.nspawn() as context:
- context.copy_to('/etc/zipl.conf', '/etc/zipl.conf')
- # zipl needs this one as well
- context.copy_to('/etc/machine-id', '/etc/machine-id')
- try:
- context.call(['/usr/sbin/zipl-switch-to-blscfg'])
- if filecmp.cmp('/etc/zipl.conf', userspace_zipl_conf):
- # When the files are same, zipl failed - see the switch script
- raise OSError('Failed to convert the ZIPL configuration to BLS.')
- context.copy_from('/etc/zipl.conf', '/etc/zipl.conf')
- except OSError as e:
- self.log.error('Could not call zipl-switch-to-blscfg command.',
- exc_info=True)
- raise StopActorExecutionError(
- message='Failed to execute zipl-switch-to-blscfg.',
- details={'details': str(e)}
- )
- except CalledProcessError as e:
- self.log.error('zipl-switch-to-blscfg execution failed,',
- exc_info=True)
- raise StopActorExecutionError(
- message='zipl-switch-to-blscfg execution failed with non zero exit code.',
- details={'details': str(e), 'stdout': e.stdout, 'stderr': e.stderr}
- )
+ context.copy_to('/etc/zipl.conf', '/etc/zipl.conf')
+ # zipl needs this one as well
+ context.copy_to('/etc/machine-id', '/etc/machine-id')
+ try:
+ context.call(['/usr/sbin/zipl-switch-to-blscfg'])
+ if filecmp.cmp('/etc/zipl.conf', userspace_zipl_conf):
+ # When the files are same, zipl failed - see the switch script
+ raise OSError('Failed to convert the ZIPL configuration to BLS.')
+ context.copy_from('/etc/zipl.conf', '/etc/zipl.conf')
+ except OSError as e:
+ self.log.error('Could not call zipl-switch-to-blscfg command.',
+ exc_info=True)
+ raise StopActorExecutionError(
+ message='Failed to execute zipl-switch-to-blscfg.',
+ details={'details': str(e)}
+ )
+ except CalledProcessError as e:
+ self.log.error('zipl-switch-to-blscfg execution failed,',
+ exc_info=True)
+ raise StopActorExecutionError(
+ message='zipl-switch-to-blscfg execution failed with non zero exit code.',
+ details={'details': str(e), 'stdout': e.stdout, 'stderr': e.stderr}
+ )
- # FIXME: we do not want to continue anymore, but we should clean
- # better.
- # NOTE: Basically, just removal of the /boot/loader dir content inside
- # could be enough, but we cannot remove /boot/loader because of boom
- # - - if we remove it, we will remove the snapshot as well
- # - - on the other hand, we shouldn't keep it there if zipl
- # - - has not been converted to BLS
+ # FIXME: we do not want to continue anymore, but we should clean
+ # better.
+ # NOTE: Basically, just removal of the /boot/loader dir content inside
+ # could be enough, but we cannot remove /boot/loader because of boom
+ # - - if we remove it, we will remove the snapshot as well
+ # - - on the other hand, we shouldn't keep it there if zipl
+ # - - has not been converted to BLS
--
2.38.1

File diff suppressed because it is too large Load Diff

View File

@ -1,227 +0,0 @@
From dc43277d4cab1f218a2b5d7e7743a1d2423c8c77 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 16 Nov 2022 14:01:45 +0100
Subject: [PATCH 26/32] systemd: Move (enable|disable|reenable)_unit functions
to the shared library
The functions are used to enable, disable, or re-enable the given
systemd unit. Originaly they were part of setsystemdservicesstate
actor, however we have realized they are needed in other actors too
in rare cases.
---
.../libraries/setsystemdservicesstate.py | 25 +++++-----
.../tests/test_setsystemdservicesstate.py | 48 +++++++++++-------
.../common/libraries/systemd.py | 50 +++++++++++++++++++
3 files changed, 93 insertions(+), 30 deletions(-)
diff --git a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py
index 01272438..641605db 100644
--- a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py
+++ b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/libraries/setsystemdservicesstate.py
@@ -1,17 +1,8 @@
-from leapp.libraries.stdlib import api, CalledProcessError, run
+from leapp.libraries.common import systemd
+from leapp.libraries.stdlib import api, CalledProcessError
from leapp.models import SystemdServicesTasks
-def _try_set_service_state(command, service):
- try:
- # it is possible to call this on multiple units at once,
- # but failing to enable one service would cause others to not enable as well
- run(['systemctl', command, service])
- except CalledProcessError as err:
- api.current_logger().error('Failed to {} systemd unit "{}". Message: {}'.format(command, service, str(err)))
- # TODO(mmatuska) produce post-upgrade report
-
-
def process():
services_to_enable = set()
services_to_disable = set()
@@ -25,7 +16,15 @@ def process():
api.current_logger().error(msg)
for service in services_to_enable:
- _try_set_service_state('enable', service)
+ try:
+ systemd.enable_unit(service)
+ except CalledProcessError:
+ # TODO(mmatuska) produce post-upgrade report
+ pass
for service in services_to_disable:
- _try_set_service_state('disable', service)
+ try:
+ systemd.disable_unit(service)
+ except CalledProcessError:
+ # TODO(mmatuska) produce post-upgrade report
+ pass
diff --git a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py
index dd153329..14d07537 100644
--- a/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py
+++ b/repos/system_upgrade/common/actors/systemd/setsystemdservicesstates/tests/test_setsystemdservicesstate.py
@@ -2,50 +2,60 @@ import pytest
from leapp.libraries import stdlib
from leapp.libraries.actor import setsystemdservicesstate
+from leapp.libraries.common import systemd
from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked
from leapp.libraries.stdlib import api, CalledProcessError
from leapp.models import SystemdServicesTasks
-class MockedRun(object):
+class MockedSystemdCmd(object):
def __init__(self):
- self.commands = []
+ self.units = []
- def __call__(self, cmd, *args, **kwargs):
- self.commands.append(cmd)
+ def __call__(self, unit, *args, **kwargs):
+ self.units.append(unit)
return {}
@pytest.mark.parametrize(
- ('msgs', 'expected_calls'),
+ ('msgs', 'expect_enable_units', 'expect_disable_units'),
[
(
[SystemdServicesTasks(to_enable=['hello.service'],
to_disable=['getty.service'])],
- [['systemctl', 'enable', 'hello.service'], ['systemctl', 'disable', 'getty.service']]
+ ['hello.service'],
+ ['getty.service']
),
(
[SystemdServicesTasks(to_disable=['getty.service'])],
- [['systemctl', 'disable', 'getty.service']]
+ [],
+ ['getty.service']
),
(
[SystemdServicesTasks(to_enable=['hello.service'])],
- [['systemctl', 'enable', 'hello.service']]
+ ['hello.service'],
+ []
),
(
[SystemdServicesTasks()],
+ [],
[]
),
]
)
-def test_process(monkeypatch, msgs, expected_calls):
- mocked_run = MockedRun()
- monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run)
+def test_process(monkeypatch, msgs, expect_enable_units, expect_disable_units):
+ mocked_enable = MockedSystemdCmd()
+ monkeypatch.setattr(systemd, 'enable_unit', mocked_enable)
+
+ mocked_disable = MockedSystemdCmd()
+ monkeypatch.setattr(systemd, 'disable_unit', mocked_disable)
+
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
setsystemdservicesstate.process()
- assert mocked_run.commands == expected_calls
+ assert mocked_enable.units == expect_enable_units
+ assert mocked_disable.units == expect_disable_units
def test_process_invalid(monkeypatch):
@@ -57,7 +67,7 @@ def test_process_invalid(monkeypatch):
msgs = [SystemdServicesTasks(to_enable=['invalid.service'])]
- monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run)
+ monkeypatch.setattr(systemd, 'run', mocked_run)
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
monkeypatch.setattr(api, 'current_logger', logger_mocked())
@@ -69,10 +79,14 @@ def test_process_invalid(monkeypatch):
def test_enable_disable_conflict_logged(monkeypatch):
- msgs = [SystemdServicesTasks(to_enable=['hello.service'],
- to_disable=['hello.service'])]
- mocked_run = MockedRun()
- monkeypatch.setattr(setsystemdservicesstate, 'run', mocked_run)
+ msgs = [SystemdServicesTasks(to_enable=['hello.service'], to_disable=['hello.service'])]
+
+ mocked_enable = MockedSystemdCmd()
+ monkeypatch.setattr(systemd, 'enable_unit', mocked_enable)
+
+ mocked_disable = MockedSystemdCmd()
+ monkeypatch.setattr(systemd, 'disable_unit', mocked_disable)
+
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
monkeypatch.setattr(api, 'current_logger', logger_mocked())
diff --git a/repos/system_upgrade/common/libraries/systemd.py b/repos/system_upgrade/common/libraries/systemd.py
index bbf71af7..c709f233 100644
--- a/repos/system_upgrade/common/libraries/systemd.py
+++ b/repos/system_upgrade/common/libraries/systemd.py
@@ -32,6 +32,56 @@ def get_broken_symlinks():
raise
+def _try_call_unit_command(command, unit):
+ try:
+ # it is possible to call this on multiple units at once,
+ # but failing to enable one service would cause others to not enable as well
+ run(['systemctl', command, unit])
+ except CalledProcessError as err:
+ msg = 'Failed to {} systemd unit "{}". Message: {}'.format(command, unit, str(err))
+ api.current_logger().error(msg)
+ raise err
+
+
+def enable_unit(unit):
+ """
+ Enable a systemd unit
+
+ It is strongly recommended to produce SystemdServicesTasks message instead,
+ unless it is absolutely necessary to handle failure yourself.
+
+ :param unit: The systemd unit to enable
+ :raises CalledProcessError: In case of failure
+ """
+ _try_call_unit_command('enable', unit)
+
+
+def disable_unit(unit):
+ """
+ Disable a systemd unit
+
+ It is strongly recommended to produce SystemdServicesTasks message instead,
+ unless it is absolutely necessary to handle failure yourself.
+
+ :param unit: The systemd unit to disable
+ :raises CalledProcessError: In case of failure
+ """
+ _try_call_unit_command('disable', unit)
+
+
+def reenable_unit(unit):
+ """
+ Re-enable a systemd unit
+
+ It is strongly recommended to produce SystemdServicesTasks message, unless it
+ is absolutely necessary to handle failure yourself.
+
+ :param unit: The systemd unit to re-enable
+ :raises CalledProcessError: In case of failure
+ """
+ _try_call_unit_command('reenable', unit)
+
+
def get_service_files():
"""
Get list of unit files of systemd services on the system
--
2.38.1

View File

@ -1,253 +0,0 @@
From 7a61c281946ffa0436da8f8837074f17e2103361 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 16 Nov 2022 14:11:39 +0100
Subject: [PATCH 27/32] Fix broken or incorrect systemd symlinks
Introduce repairsystemdsymlinks actor.
During the in-place upgrade process, it usually happens that some
symlinks become incorrect - symlinks are broken, or they are defined
in a wrong directory (e.g. when they are supposed to be defined in a
different systemd target). This has various reasons, but usually it's
caused by missing rpm scriptlets in particular rpms.
This change corrects only systemd symlinks are (newly) broken during
the in-place upgrade. Symlinks that have been already broken before
the in-place upgrade are ignored.
Symlinks are handled in the following fashion, if the symlink points to:
- a removed unit, such a symlink is deleted
- a unit whose installation has been changed (e.g. changed WantedBy),
such symlinks are fixed (re-enabled using systemctl)
JIRA:
OAMG-5342
OAMG-5344
OAMG-6519 (possibly related)
OAMG-7755
Bugzillas:
https://bugzilla.redhat.com/show_bug.cgi?id=1988457
https://bugzilla.redhat.com/show_bug.cgi?id=1988449
https://bugzilla.redhat.com/show_bug.cgi?id=2055117 (possibly fixed)
---
.../systemd/repairsystemdsymlinks/actor.py | 25 +++++
.../libraries/repairsystemdsymlinks.py | 76 ++++++++++++++++
.../tests/test_repairsystemdsymlinks.py | 91 +++++++++++++++++++
3 files changed, 192 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/actor.py
create mode 100644 repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py
create mode 100644 repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py
diff --git a/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/actor.py b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/actor.py
new file mode 100644
index 00000000..29134373
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/actor.py
@@ -0,0 +1,25 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import repairsystemdsymlinks
+from leapp.models import SystemdBrokenSymlinksSource, SystemdBrokenSymlinksTarget, SystemdServicesInfoSource
+from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag
+
+
+class RepairSystemdSymlinks(Actor):
+ """
+ Fix broken or incorrect systemd symlinks
+
+ Symlinks are handled in the following fashion, if the symlink points to:
+ - a removed unit, such a symlink is deleted
+ - a unit whose installation has been changed (e.g. changed WantedBy),
+ such symlinks are fixed (re-enabled using systemctl)
+
+ Symlinks that have been already broken before the in-place upgrade are ignored.
+ """
+
+ name = 'repair_systemd_symlinks'
+ consumes = (SystemdBrokenSymlinksSource, SystemdBrokenSymlinksTarget, SystemdServicesInfoSource)
+ produces = ()
+ tags = (ApplicationsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ repairsystemdsymlinks.process()
diff --git a/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py
new file mode 100644
index 00000000..884b001e
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/libraries/repairsystemdsymlinks.py
@@ -0,0 +1,76 @@
+import os
+
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.common import systemd
+from leapp.libraries.common.config.version import get_target_major_version
+from leapp.libraries.stdlib import api, CalledProcessError, run
+from leapp.models import SystemdBrokenSymlinksSource, SystemdBrokenSymlinksTarget, SystemdServicesInfoSource
+
+_INSTALLATION_CHANGED_EL8 = ['rngd.service', 'sysstat.service']
+_INSTALLATION_CHANGED_EL9 = []
+
+
+def _get_installation_changed_units():
+ version = get_target_major_version()
+ if version == '8':
+ return _INSTALLATION_CHANGED_EL8
+ if version == '9':
+ return _INSTALLATION_CHANGED_EL9
+
+ return []
+
+
+def _service_enabled_source(service_info, name):
+ service_file = next((s for s in service_info.service_files if s.name == name), None)
+ return service_file and service_file.state == 'enabled'
+
+
+def _is_unit_enabled(unit):
+ try:
+ ret = run(['systemctl', 'is-enabled', unit], split=True)['stdout']
+ return ret and ret[0] == 'enabled'
+ except (OSError, CalledProcessError):
+ return False
+
+
+def _handle_newly_broken_symlinks(symlinks, service_info):
+ for symlink in symlinks:
+ unit = os.path.basename(symlink)
+ try:
+ if not _is_unit_enabled(unit):
+ # removes the broken symlink
+ systemd.disable_unit(unit)
+ elif _service_enabled_source(service_info, unit) and _is_unit_enabled(unit):
+ # removes the old symlinks and creates the new ones
+ systemd.reenable_unit(unit)
+ except CalledProcessError:
+ # TODO(mmatuska): Produce post-upgrade report: failed to handle broken symlink (and suggest a fix?)
+ pass
+
+
+def _handle_bad_symlinks(service_files):
+ install_changed_units = _get_installation_changed_units()
+ potentially_bad = [s for s in service_files if s.name in install_changed_units]
+
+ for unit_file in potentially_bad:
+ if unit_file.state == 'enabled' and _is_unit_enabled(unit_file.name):
+ systemd.reenable_unit(unit_file.name)
+
+
+def process():
+ service_info_source = next(api.consume(SystemdServicesInfoSource), None)
+ if not service_info_source:
+ raise StopActorExecutionError("Expected SystemdServicesInfoSource message, but got None")
+
+ source_info = next(api.consume(SystemdBrokenSymlinksSource), None)
+ target_info = next(api.consume(SystemdBrokenSymlinksTarget), None)
+
+ if source_info and target_info:
+ newly_broken = []
+ newly_broken = [s for s in target_info.broken_symlinks if s not in source_info.broken_symlinks]
+ if not newly_broken:
+ return
+
+ _handle_newly_broken_symlinks(newly_broken, service_info_source)
+
+ _handle_bad_symlinks(service_info_source.service_files)
diff --git a/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py
new file mode 100644
index 00000000..2394df5e
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/repairsystemdsymlinks/tests/test_repairsystemdsymlinks.py
@@ -0,0 +1,91 @@
+from leapp.libraries.actor import repairsystemdsymlinks
+from leapp.libraries.common import systemd
+from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked
+from leapp.libraries.stdlib import api, CalledProcessError, run
+from leapp.models import (
+ SystemdBrokenSymlinksSource,
+ SystemdBrokenSymlinksTarget,
+ SystemdServiceFile,
+ SystemdServicesInfoSource
+)
+
+
+class MockedSystemdCmd(object):
+ def __init__(self):
+ self.units = []
+
+ def __call__(self, unit, *args, **kwargs):
+ self.units.append(unit)
+ return {}
+
+
+def test_bad_symslinks(monkeypatch):
+ service_files = [
+ SystemdServiceFile(name='rngd.service', state='enabled'),
+ SystemdServiceFile(name='sysstat.service', state='disabled'),
+ SystemdServiceFile(name='hello.service', state='enabled'),
+ SystemdServiceFile(name='world.service', state='disabled'),
+ ]
+
+ def is_unit_enabled_mocked(unit):
+ return True
+
+ monkeypatch.setattr(repairsystemdsymlinks, '_is_unit_enabled', is_unit_enabled_mocked)
+
+ reenable_mocked = MockedSystemdCmd()
+ monkeypatch.setattr(systemd, 'reenable_unit', reenable_mocked)
+
+ service_info = SystemdServicesInfoSource(service_files=service_files)
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[service_info]))
+
+ repairsystemdsymlinks._handle_bad_symlinks(service_info.service_files)
+
+ assert reenable_mocked.units == ['rngd.service']
+
+
+def test_handle_newly_broken_symlink(monkeypatch):
+
+ symlinks = [
+ '/etc/systemd/system/default.target.wants/systemd-readahead-replay.service',
+ '/etc/systemd/system/multi-user.target.wants/vdo.service',
+ '/etc/systemd/system/multi-user.target.wants/hello.service',
+ '/etc/systemd/system/multi-user.target.wants/world.service',
+ '/etc/systemd/system/multi-user.target.wants/foo.service',
+ '/etc/systemd/system/multi-user.target.wants/bar.service',
+ ]
+
+ def is_unit_enabled_mocked(unit):
+ return unit in ('hello.service', 'foo.service')
+
+ expect_disabled = [
+ 'systemd-readahead-replay.service',
+ 'vdo.service',
+ 'world.service',
+ 'bar.service',
+ ]
+
+ expect_reenabled = [
+ 'hello.service',
+ ]
+
+ monkeypatch.setattr(repairsystemdsymlinks, '_is_unit_enabled', is_unit_enabled_mocked)
+
+ reenable_mocked = MockedSystemdCmd()
+ monkeypatch.setattr(systemd, 'reenable_unit', reenable_mocked)
+
+ disable_mocked = MockedSystemdCmd()
+ monkeypatch.setattr(systemd, 'disable_unit', disable_mocked)
+
+ service_files = [
+ SystemdServiceFile(name='systemd-readahead-replay.service', state='enabled'),
+ SystemdServiceFile(name='vdo.service', state='disabled'),
+ SystemdServiceFile(name='hello.service', state='enabled'),
+ SystemdServiceFile(name='world.service', state='disabled'),
+ SystemdServiceFile(name='foo.service', state='disabled'),
+ SystemdServiceFile(name='bar.service', state='enabled'),
+ ]
+ service_info = SystemdServicesInfoSource(service_files=service_files)
+ repairsystemdsymlinks._handle_newly_broken_symlinks(symlinks, service_info)
+
+ assert reenable_mocked.units == expect_reenabled
+ assert disable_mocked.units == expect_disabled
--
2.38.1

View File

@ -1,271 +0,0 @@
From 2713d60a99b60a352b89374dec89f6faa683861d Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 16 Nov 2022 14:19:36 +0100
Subject: [PATCH 28/32] Add check for systemd symlinks broken before the
upgrade
Broken systemd symlinks are not treated during the in-place upgrade
if they are broken prior the leapp execution. This could lead in
unwanted behaviour on the upgraded system, but it does not have to
- so we do not inhibit the upgrade when such symlinks are detected.
Also, such symlinks could have been created by previous in-place
upgrade, when an automatical fixing of broken symlinks have not been
implemented yet. By this actor we inform people about such issues,
so they can fix it prior the upgrade.
Co-authored-by: Petr Stodulka <pstodulk@redhat.com>
---
.../checksystemdbrokensymlinks/actor.py | 29 +++++
.../libraries/checksystemdbrokensymlinks.py | 106 ++++++++++++++++++
.../tests/test_checksystemdbrokensymlinks.py | 89 +++++++++++++++
3 files changed, 224 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/actor.py
create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/libraries/checksystemdbrokensymlinks.py
create mode 100644 repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py
diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/actor.py b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/actor.py
new file mode 100644
index 00000000..257e8c33
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/actor.py
@@ -0,0 +1,29 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import checksystemdbrokensymlinks
+from leapp.models import SystemdBrokenSymlinksSource, SystemdServicesInfoSource
+from leapp.reporting import Report
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
+
+
+class CheckSystemdBrokenSymlinks(Actor):
+ """
+ Check whether some systemd symlinks are broken
+
+ If some systemd symlinks are broken, report them but do not inhibit the
+ upgrade. The symlinks broken already before the upgrade will not be
+ handled by the upgrade process anyhow. Two different reports are created:
+ - symlinks which have the same filename as an existing enabled systemd
+ service (the symlink doesn't point to an existing unit file, but the
+ service is enabled)
+ - broken symlinks which names do not correspond with any existing systemd
+ unit file (typically when the service is removed but not disabled
+ correctly)
+ """
+
+ name = 'check_systemd_broken_symlinks'
+ consumes = (SystemdBrokenSymlinksSource, SystemdServicesInfoSource)
+ produces = (Report,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ checksystemdbrokensymlinks.process()
diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/libraries/checksystemdbrokensymlinks.py b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/libraries/checksystemdbrokensymlinks.py
new file mode 100644
index 00000000..23addf72
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/libraries/checksystemdbrokensymlinks.py
@@ -0,0 +1,106 @@
+import os
+
+from leapp import reporting
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.stdlib import api
+from leapp.models import SystemdBrokenSymlinksSource, SystemdServicesInfoSource
+
+FMT_LIST_SEPARATOR = '\n - '
+
+
+def _report_broken_symlinks(symlinks):
+ summary = (
+ 'Leapp detected broken systemd symlinks on the system that do not'
+ ' correspond to any installed systemd unit.'
+ ' This typically happens when the original systemd unit file has been'
+ ' removed (e.g. an rpm removal) or renamed and the system configration'
+ ' has not been properly modified.'
+ ' These symlinks will not be handled during the in-place upgrade'
+ ' as they are already broken.'
+ ' The list of detected broken systemd symlinks:{}{}'
+ .format(FMT_LIST_SEPARATOR, FMT_LIST_SEPARATOR.join(sorted(symlinks)))
+ )
+
+ command = ['/usr/bin/rm'] + symlinks
+
+ hint = (
+ 'Remove the invalid symlinks before the upgrade.'
+ )
+
+ reporting.create_report([
+ reporting.Title(
+ 'Detected broken systemd symlinks for non-existing services'
+ ),
+ reporting.Summary(summary),
+ reporting.Remediation(hint=hint, commands=[command]),
+ reporting.Severity(reporting.Severity.LOW),
+ reporting.Tags([reporting.Tags.FILESYSTEM]),
+ ])
+
+
+def _report_enabled_services_broken_symlinks(symlinks):
+ summary = (
+ 'Leapp detected broken systemd symlinks on the system that correspond'
+ ' to existing systemd units, but on different paths. This could lead'
+ ' in future to unexpected behaviour. Also, these symlinks will not be'
+ ' handled during the in-place upgrade as they are already broken.'
+ ' The list of detected broken symlinks:{}{}'
+ .format(FMT_LIST_SEPARATOR, FMT_LIST_SEPARATOR.join(sorted(symlinks)))
+ )
+
+ hint = (
+ 'Fix the broken symlinks before the upgrade or remove them. For this'
+ ' purpose, you can re-enable or disable the related systemd services'
+ ' using the systemctl tool.'
+ )
+
+ reporting.create_report([
+ reporting.Title(
+ 'Detected broken systemd symlinks for existing services'
+ ),
+ reporting.Summary(summary),
+ reporting.Remediation(hint=hint),
+ reporting.Severity(reporting.Severity.MEDIUM),
+ reporting.Tags([reporting.Tags.FILESYSTEM]),
+ ])
+
+
+def _is_enabled(unit, service_files):
+ # FIXME(pstodulk): currently our msgs contain only information about systemd
+ # services. If the unit (broken symlink) refers to timers, etc. They will
+ # be treated now as disabled (read: symlink is broken and there is not
+ # a corresponding unit-file on the system). Considering it for now as
+ # minor issue that will be resolved in future.
+ # NOTE: One of possible solution is to put the information about enabled broken
+ # symlinks to the msg, so it can be just consumed.
+ for service_file in service_files:
+ if service_file.name == unit:
+ return service_file.state == 'enabled'
+ return False
+
+
+def process():
+ broken_symlinks_info = next(api.consume(SystemdBrokenSymlinksSource), None)
+ if not broken_symlinks_info:
+ # nothing to do
+ return
+ services = next(api.consume(SystemdServicesInfoSource), None)
+ if not services:
+ # This is just a seatbelt. It's not expected this msg will be missing.
+ # Skipping tests.
+ raise StopActorExecutionError('Missing SystemdServicesInfoSource message.')
+
+ enabled_to_report = []
+ to_report = []
+ for broken_symlink in broken_symlinks_info.broken_symlinks:
+ unit = os.path.basename(broken_symlink)
+ if _is_enabled(unit, services.service_files):
+ enabled_to_report.append(broken_symlink)
+ else:
+ to_report.append(broken_symlink)
+
+ if enabled_to_report:
+ _report_enabled_services_broken_symlinks(enabled_to_report)
+
+ if to_report:
+ _report_broken_symlinks(to_report)
diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py
new file mode 100644
index 00000000..2364f7a5
--- /dev/null
+++ b/repos/system_upgrade/common/actors/systemd/checksystemdbrokensymlinks/tests/test_checksystemdbrokensymlinks.py
@@ -0,0 +1,89 @@
+import pytest
+
+from leapp import reporting
+from leapp.libraries.actor import checksystemdbrokensymlinks
+from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
+from leapp.libraries.stdlib import api
+from leapp.models import SystemdBrokenSymlinksSource, SystemdServiceFile, SystemdServicesInfoSource
+
+
+def test_report_broken_symlinks(monkeypatch):
+
+ symlinks = [
+ '/etc/systemd/system/multi-user.target.wants/hello.service',
+ '/etc/systemd/system/multi-user.target.wants/world.service',
+ ]
+
+ created_reports = create_report_mocked()
+ monkeypatch.setattr(reporting, 'create_report', created_reports)
+
+ checksystemdbrokensymlinks._report_broken_symlinks(symlinks)
+
+ assert created_reports.called
+ assert all([s in created_reports.report_fields['summary'] for s in symlinks])
+
+
+def test_report_enabled_services_broken_symlinks(monkeypatch):
+ symlinks = [
+ '/etc/systemd/system/multi-user.target.wants/foo.service',
+ '/etc/systemd/system/multi-user.target.wants/bar.service',
+ ]
+
+ created_reports = create_report_mocked()
+ monkeypatch.setattr(reporting, 'create_report', created_reports)
+
+ checksystemdbrokensymlinks._report_enabled_services_broken_symlinks(symlinks)
+
+ assert created_reports.called
+ assert all([s in created_reports.report_fields['summary'] for s in symlinks])
+
+
+class ReportBrokenSymlinks(object):
+ def __init__(self):
+ self.symlinks = []
+
+ def __call__(self, unit, *args, **kwargs):
+ self.symlinks.append(unit)
+ return {}
+
+
+def test_broken_symlinks_reported(monkeypatch):
+ broken_symlinks = SystemdBrokenSymlinksSource(broken_symlinks=[
+ '/etc/systemd/system/multi-user.target.wants/foo.service',
+ '/etc/systemd/system/multi-user.target.wants/bar.service',
+ '/etc/systemd/system/multi-user.target.wants/hello.service',
+ '/etc/systemd/system/multi-user.target.wants/world.service',
+ ])
+ systemd_services = SystemdServicesInfoSource(service_files=[
+ SystemdServiceFile(name='foo.service', state='enabled'),
+ SystemdServiceFile(name='bar.service', state='enabled'),
+ SystemdServiceFile(name='hello.service', state='disabled'),
+ ])
+ broken = []
+ enabled_broken = []
+
+ def _report_broken_symlinks_mocked(symlinks):
+ broken.extend(symlinks)
+
+ def _report_enabled_services_broken_symlinks_mocked(symlinks):
+ enabled_broken.extend(symlinks)
+
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[broken_symlinks, systemd_services]))
+ monkeypatch.setattr(checksystemdbrokensymlinks, '_report_broken_symlinks', _report_broken_symlinks_mocked)
+ monkeypatch.setattr(
+ checksystemdbrokensymlinks,
+ '_report_enabled_services_broken_symlinks',
+ _report_enabled_services_broken_symlinks_mocked
+ )
+
+ checksystemdbrokensymlinks.process()
+
+ assert broken == [
+ '/etc/systemd/system/multi-user.target.wants/hello.service',
+ '/etc/systemd/system/multi-user.target.wants/world.service',
+ ]
+
+ assert enabled_broken == [
+ '/etc/systemd/system/multi-user.target.wants/foo.service',
+ '/etc/systemd/system/multi-user.target.wants/bar.service',
+ ]
--
2.38.1

View File

@ -1,87 +0,0 @@
From 4e2767e0eab5fe99b9e99dfea8a8425a1297574b Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Wed, 16 Nov 2022 14:10:48 +0100
Subject: [PATCH 29/32] checksystemdservicestasks: update docstrings + extend
tests
---
.../systemd/checksystemdservicetasks/actor.py | 11 +++++------
.../tests/test_checksystemdservicestasks.py | 16 +++++++++++++++-
2 files changed, 20 insertions(+), 7 deletions(-)
diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py
index 2df995ee..547a13df 100644
--- a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py
+++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/actor.py
@@ -7,17 +7,16 @@ from leapp.tags import IPUWorkflowTag, TargetTransactionChecksPhaseTag
class CheckSystemdServicesTasks(Actor):
"""
- Inhibits upgrade if SystemdServicesTasks tasks are in conflict
+ Inhibit the upgrade if SystemdServicesTasks tasks are in conflict
- There is possibility, that SystemdServicesTasks messages with conflicting
- requested service states could be produced. For example a service is
- requested to be both enabled and disabled. This actor inhibits upgrade in
- such cases.
+ SystemdServicesTasks messages with conflicting requested service states
+ could be produced. For example a service could be requested to be both
+ - enabled and disabled. This actor inhibits upgrade in such cases.
Note: We expect that SystemdServicesTasks could be produced even after the
TargetTransactionChecksPhase (e.g. during the ApplicationPhase). The
purpose of this actor is to report collisions in case we can already detect
- them. In case of conflicts caused by produced messages later we just log
+ them. In case of conflicts caused by messages produced later we just log
the collisions and the services will end up disabled.
"""
diff --git a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py
index 36ded92f..88c278d6 100644
--- a/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py
+++ b/repos/system_upgrade/common/actors/systemd/checksystemdservicetasks/tests/test_checksystemdservicestasks.py
@@ -5,6 +5,7 @@ from leapp.libraries.actor import checksystemdservicetasks
from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
from leapp.libraries.stdlib import api
from leapp.models import SystemdServicesTasks
+from leapp.utils.report import is_inhibitor
@pytest.mark.parametrize(
@@ -44,6 +45,18 @@ from leapp.models import SystemdServicesTasks
],
True
),
+ (
+ [
+ SystemdServicesTasks(to_enable=['hello.service']),
+ SystemdServicesTasks(to_disable=['world.service']),
+ SystemdServicesTasks(to_enable=['hello.service', 'kitty.service'])
+ ],
+ False
+ ),
+ (
+ [],
+ False
+ )
]
)
def test_conflicts_detected(monkeypatch, tasks, should_inhibit):
@@ -55,6 +68,7 @@ def test_conflicts_detected(monkeypatch, tasks, should_inhibit):
checksystemdservicetasks.check_conflicts()
assert bool(created_reports.called) == should_inhibit
+ assert is_inhibitor(created_reports.report_fields) == should_inhibit
@pytest.mark.parametrize(
@@ -84,5 +98,5 @@ def test_coflict_reported(monkeypatch, tasks, expected_reported):
checksystemdservicetasks.check_conflicts()
- report_summary = reporting.create_report.report_fields['summary']
+ report_summary = created_reports.report_fields['summary']
assert all(service in report_summary for service in expected_reported)
--
2.38.1

View File

@ -1,702 +0,0 @@
From 4dac9dc9d8f7c48626ea78d2d3bf128efdcb610d Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Wed, 16 Nov 2022 20:09:47 +0100
Subject: [PATCH 31/32] Add prod certs for 8.8 & 9.2 (Beta + GA)
Signed-off-by: Petr Stodulka <pstodulk@redhat.com>
---
.../common/files/prod-certs/8.8/279.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/8.8/362.pem | 36 +++++++++++++++++++
.../common/files/prod-certs/8.8/363.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/8.8/419.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/8.8/433.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/8.8/479.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/8.8/486.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/8.8/72.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/9.2/279.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/9.2/362.pem | 36 +++++++++++++++++++
.../common/files/prod-certs/9.2/363.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/9.2/419.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/9.2/433.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/9.2/479.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/9.2/486.pem | 35 ++++++++++++++++++
.../common/files/prod-certs/9.2/72.pem | 35 ++++++++++++++++++
16 files changed, 562 insertions(+)
create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/279.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/362.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/363.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/419.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/433.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/479.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/486.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/8.8/72.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/279.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/362.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/363.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/419.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/433.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/479.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/486.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.2/72.pem
diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/279.pem b/repos/system_upgrade/common/files/prod-certs/8.8/279.pem
new file mode 100644
index 00000000..8ca3cea1
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/8.8/279.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGJTCCBA2gAwIBAgIJALDxRLt/tVMfMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTM1MFoXDTQyMDcw
+NzA2NTM1MFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtlYzg2NTc3
+MC01NGViLTQ5NjEtYmJjMC1iZWVhOWI2ZGYyNjZdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBrjCBqzAJBgNVHRMEAjAAMEMGDCsGAQQBkggJAYIXAQQzDDFSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuMBUGDCsG
+AQQBkggJAYIXAgQFDAM4LjgwGQYMKwYBBAGSCAkBghcDBAkMB3BwYzY0bGUwJwYM
+KwYBBAGSCAkBghcEBBcMFXJoZWwtOCxyaGVsLTgtcHBjNjRsZTANBgkqhkiG9w0B
+AQsFAAOCAgEARI585ue/LavAlcpIOCiwxmNv/djPG3XFU0bPoTym/gycwppJPh3Z
+2wfXQMumgmp6C07yui1ybbVIzftwBMU46z+VGqYyFAvFGXLdYndQ0EJpyZkov5F+
+zd6XQlrzIrJu9G9k/bwWXld+7mIBgmWTPjv+TA4wlya9r6NSMW/xSxVm5Xm9SThy
+rvwN8ElK2+BjmyEVByNWphoagNQnKg1hkWsajNGlTKM1x+w1of941uDdBaXbyKVE
+JbYX5klal0DnqqYt8Fgj4vCDMJ635yhnwHgo5MginZZMQFZutHS8NjV2wMvYx1yY
+oLhPo6fA572tTRAEGbZ8HnlU9FrBwP938fvFDHy3hQuRUjrE5qtE+sWnwnmVMgNB
+oMUBy5hZN35VX/s0yQ25CXUqrVof1H2ZmLmRNX+c9Du/vZ2R4cjJpPu+9om4a848
+Dt4IKfaScsVISErkVvOYH7RCB0o/y3vzahou8fA3lL3Mu4D4Vlyv59Xajsnuwbu/
+5+3OYZ87h50NlbOLbV0668NztVzRppZ9aoZGVFyhcDdFc5y0uG2schWHSYOIJgJp
+8L3M9PL0FgdyEHAZD2Jyu8l+lhc+oIc41JXjW0GZhCZ9Uvw7x3apurdHk9IU5Ord
+9IugAJ1qN7veRstmb4rCVS8c/gxR24wCRGcDD3eIgvBwmgdFi09DLTA=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/362.pem b/repos/system_upgrade/common/files/prod-certs/8.8/362.pem
new file mode 100644
index 00000000..502e9d16
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/8.8/362.pem
@@ -0,0 +1,36 @@
+-----BEGIN CERTIFICATE-----
+MIIGNDCCBBygAwIBAgIJALDxRLt/tVM1MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTYyNFoXDTQyMDcw
+NzA2NTYyNFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtiOTdkODkx
+NC1jNjJhLTRhNDAtOTFiZi1hZjdlNTM3MmVjOGVdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBvTCBujAJBgNVHRMEAjAAMEgGDCsGAQQBkggJAYJqAQQ4DDZSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuIEJldGEw
+GgYMKwYBBAGSCAkBgmoCBAoMCDguOCBCZXRhMBkGDCsGAQQBkggJAYJqAwQJDAdw
+cGM2NGxlMCwGDCsGAQQBkggJAYJqBAQcDBpyaGVsLTgscmhlbC04LWJldGEtcHBj
+NjRsZTANBgkqhkiG9w0BAQsFAAOCAgEAcQBzf0ndflW+503jCYyZS1enHucWjgIi
+EqtX4s1mkLuLXyiR7LcSNq56jyRjztyab2ydA77/C/iWaDzXEEXqlO+rrHBfw4u+
+aJ3Pp0p8mYC+srWMO0wuVeRJeBkbDkXzoGmm/DkzMjGnTZB9/O0hkQ3+dnHLbf8I
+IC9lWen7Rcn+pSp2v8jz7zpZ3qrfBb2Q62KuPL6xwCfw+CVrl+PuChjz373i12CH
+9F7XG/RtVI1B+9qh4yLtTB13hPaAzIkGW3yTA+NOBoVexxZSka7ZfJFFXpmnI7Ot
+4NGi3L6aTGYGRNsHaDX1JsVd4vXC4LFca7YeKBW2aIGjt5ZSThE1tfIgXCgEm7uS
+UUB5lQiQ/9H67Vl8r4/LsUItdD9NmRdpTWT3NB8vbenqLL7QG91ra3uMR4vA9l0j
+Ei7v0WGWjpeiQbbRjzMkRgQKmeW/3M41ShUW4MNg9sFObw6eZqMJnH1BV9N/1e1k
+CpP6drmYE8qb8rVc66FIFS77JB6xmeLRK5Bq4yAtyA7PsM7r4RytgmVpVa4zoMEi
+JSboaBN9IMawvA7m4B/+fQZAy86pD168eOTBoP8G4RswFSLZCeIohFgorG0VEmEx
+CcJDxa9+ud/xFJfJQ9ILHJXYj8+SCO73LUQ1D0z9MPtKqDEk/7Rl+b6EziBzmDyO
+xYae2xpfO4E=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/363.pem b/repos/system_upgrade/common/files/prod-certs/8.8/363.pem
new file mode 100644
index 00000000..54e14706
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/8.8/363.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGJjCCBA6gAwIBAgIJALDxRLt/tVM0MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTYxOVoXDTQyMDcw
+NzA2NTYxOVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs4NDk1OTc3
+Yi0yZDU1LTQwZDItOWZjOC0yOTI5ZjJlZWZiNDRdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBrzCBrDAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYJrAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NCBCZXRhMBoGDCsGAQQBkggJAYJr
+AgQKDAg4LjggQmV0YTAZBgwrBgEEAZIICQGCawMECQwHYWFyY2g2NDAsBgwrBgEE
+AZIICQGCawQEHAwacmhlbC04LHJoZWwtOC1iZXRhLWFhcmNoNjQwDQYJKoZIhvcN
+AQELBQADggIBAMEjuJ3qX1Ggyt5axDU3Ub+YJy+JJoBPk/nxpoDWBmZLmGAhW5pa
+sjP6xL/1CLcdWe4bFDbZDdtbXEPStZ0WraNmO0nQgUJFFx7RJ1hd5CUzCi8j3uGh
+M9+YDrr4MbQJSO0Wc55m23E6V9Lug6cA/rCzBWzwxD1W1K7q26CAiWT5l0qBZJmI
+VozYzqTk941GYipukb7vbScDFFafoNMyysEYQckRKRhhIZrr0z3p9ZdFgKFVvy4b
+rYX4/W5MdsAetlzTBrsfxazSOYw/97mnPxDCIjEue2V1A1z5D5HlHotQcbq4OXff
+3aHVbhsYbLbGUhULo/HfBxA1tFSJ9QpsEDu+yvP0032non7xEDB4IvypZ0ay2qK7
+ArrSFGAyUIVrdIopspPxRikPfc+DcmPflO9vePRTT95tK0O6iObFM9azNmphp2e9
+9Bzz1A2CjctjA7z4MIP6lPVGbWhD53qRbJs3bkMjqDDCUdE+vEnCuLdronlMlzQ1
+KVGvRgnKNrAI9ORY24bz/AsGTseZp9jN4IKKnj0ZSq+SjZih/eMP1lNFHjQda/9/
+gUoeAz3oAd1KQe011R81rS/HnL4QTRqkQiMeEahrx8q0xFwgk3wsk8voFGTBGyEO
+qnVIkzgrzXSQvM3neGlnBVkLzYS2okgFtJzglqAvUUqqfj34J3d91TWF
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/419.pem b/repos/system_upgrade/common/files/prod-certs/8.8/419.pem
new file mode 100644
index 00000000..fd9944a9
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/8.8/419.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGFzCCA/+gAwIBAgIJALDxRLt/tVMeMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTM0NloXDTQyMDcw
+NzA2NTM0NlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtkODg3ZTU0
+NC0wMDBkLTQ2MTYtODk3Zi1kYmIzMDg1MzM4ODVdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBoDCBnTAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYMjAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NDAVBgwrBgEEAZIICQGDIwIEBQwD
+OC44MBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCcGDCsGAQQBkggJAYMjBAQX
+DBVyaGVsLTgscmhlbC04LWFhcmNoNjQwDQYJKoZIhvcNAQELBQADggIBAFoEXLlm
+Vxi4qkcugC++o4LrGD8l1pGWL6J7JQ7cFpiCIMtmh0EXx8Tc4511u9SqzIR6uaSI
+D23jUfndGTGsfqYW/jclr5ayoN8IKL7Km18Wc9sb2DErZ98wDDlkIq1s9Wl5TthE
+Eq1Ae81rCnK2R85IUQa7IIB26aSnSwV3DNd1nYPLewzgN8rpF21wKqcN6HowIzbg
+U06sdKCuBA/fnnk/3SInambZAl/eqtHMgmQjgNHzt+qFhno0JqhllevXYn7Gx3Pu
+qJ9UMCTLZM4OEjnNfA0f1VX1CUzf1Fz5ukvChxX4cx2pKNl8q6w+R+2A3fcSkvv2
+BHMDI00F0M22AEeZQE2ECG4/s8G2dRu2Dzp1kmBH26pSs9FTB3fTPXW2kyXPpOT/
+jv2x1jFsza0GXoMJ7t7JEV5Mx9wcC3pguxEnJeCBqejoHTcG1xuWxFhlXmkNuiyD
+/Try5lCEmOvQYyE4FrJGezkpWBab5m2426hByTnpuHYvDsqAPDjUY0HoFUtxwqwA
+kVxUQzf3GxXu5FoFq36BxiWG7e0d4OJzwMK5DI00r/rs2tUlLCfNozDdbN5rBMlR
+1RIrGctY4LDfgr8sXXEK+54nAP11me16/Z4odkQbkv+WZ9z5i4573wei88kTg49X
+Dn64lKrB2B5dKq7vjemcDO3qFp0RAyc2PGUc
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/433.pem b/repos/system_upgrade/common/files/prod-certs/8.8/433.pem
new file mode 100644
index 00000000..1c6772ca
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/8.8/433.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGKTCCBBGgAwIBAgIJALDxRLt/tVM2MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTYyOVoXDTQyMDcw
+NzA2NTYyOVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs1YjllMDEy
+Yy1jM2ZkLTQ0MTgtYWY0OC01Y2FkNWE4YTBjMjBdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBsjCBrzAJBgNVHRMEAjAAMEEGDCsGAQQBkggJAYMxAQQxDC9SZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIElCTSB6IFN5c3RlbXMgQmV0YTAaBgwrBgEE
+AZIICQGDMQIECgwIOC44IEJldGEwFwYMKwYBBAGSCAkBgzEDBAcMBXMzOTB4MCoG
+DCsGAQQBkggJAYMxBAQaDBhyaGVsLTgscmhlbC04LWJldGEtczM5MHgwDQYJKoZI
+hvcNAQELBQADggIBAEcUjx4IcWFemLJqpxFJm7nP9j/4ZqTjEa9Q7oDHNOOWM1NG
+HL9wJe/5Y/TCUGJvf4JiIUPNnfkaXXZDKqR7mbpLyb83BSAhgCBekdXvb/n+5QKI
+AAYyliEPtWkAIh0aP/nLYDEZ9aJoKIoDs9tp7uAQ/1fGPqN5lIvr7CO7HjIo7xrm
+5S4C3b+DlXp3GB74kb89r1XM3/1cmFmVz8js5KGg7JOVBUqxKQsjF7y8OGgGONiy
+xfkDFIvX+vyNli6xiXpsRH+CkSRckioTOsV8WawA0Ae89QNTVdN7xNXSugrIXSRd
+fyULDx9v+jihJuEyzMYbpvj3fmenrpcbckACsCHWGtRlvdAgYcF0TrFYsYthd2Gc
+wpR/XLn2SRu0Hx5ZbfqqhrJo765wYRPfTMVLilCPiw71d7DP0m6hrNzxX/Sp8K4e
+w/RxKaC5p/aV27dGSe83udnAXA2IgjfaJz6adnD36YfWUYIRVEg/tX2nlpDROz7Y
+saVj5Lq6wzFdt6mIVIQ6A4lM1zldHNyDv69gVDOlOgtklO94z41eJkPu5MbDG2fG
+xlVRgjiAsERNvHEXfnVb0iz/b2ymmM7HIVDowlIVhyJBkNKUW1invXOvf+AGZzQf
+LS4Db1q+P7HJZnrQf1EzgDKjTm8Kdv2CqKXpBnhDsXUXZZPbNl4txG4yIGHI
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/479.pem b/repos/system_upgrade/common/files/prod-certs/8.8/479.pem
new file mode 100644
index 00000000..2ecca847
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/8.8/479.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGFTCCA/2gAwIBAgIJALDxRLt/tVMhMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTQwMFoXDTQyMDcw
+NzA2NTQwMFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFswOWI2ZGRm
+MC03ODFkLTRjMjctYjZkZi0xMWQ2MmE5YmJkMDFdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBnjCBmzAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD
+OC44MBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM
+FHJoZWwtOCxyaGVsLTgteDg2XzY0MA0GCSqGSIb3DQEBCwUAA4ICAQBhvgRzUrOb
+VRVPq2cG/Sto2KzpuIjauYhvuYLCNItw3cOqKaUy5ThOeYkLpddyzPjjlb/jQZi2
+dUybQjF3lrRpfgMmvRuapvhXWsCQuwq63JjqUaRNuPRrjxlwUqX2ibQSw0ZpPhlj
+vw3usTbLb04zd+RLb9e897tVMxWEWcyfyakMAI2/zV4UXhCZiFoaIA1EQqIyZIhK
+ukCnMYt9m/1KwK9yNYd6yzwYxqDe1aK4Z7J57G0FBpr57JXbZrE1KHpWQawusnFB
+t+2gGTxVOyDIrMuszV93GrrzPTyn+BasVS6UMwpUPQDOFJB9y7AKNSFkhZPUZRPW
+pmJUB4+Z5KGS+Of+g0Sp1huMnCvmEre1mP3pJTBhXmut1X1r/JJI041e46qnE7KO
+wHOz/cimduPgp2Sthc0OY3jZyZU1ibrFld9QFW1vVz7jO0j28T+JInzq+ji4NHdm
+0rGnAxp6S3L6HQBqiliO62ehyG3PnK2UvQyAz3sTnT7qL6qeOvvBSQvJqyQeF33y
+a85oEvAX3air6KuIVJTlXsS4E5EyTKYaX/5BqmrfzZ94ImcnO+5OF0SMOirCG3ik
+uWRGS9+I+0p+I7G9FjDduy8Cm1MYwEC8yB2/CFGEKgsMjXEyMkXMX4hzndnwE1G7
+edrVZJxTtwuyDtMvE6jeBziapQXSDNv/2A==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/486.pem b/repos/system_upgrade/common/files/prod-certs/8.8/486.pem
new file mode 100644
index 00000000..c5108d61
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/8.8/486.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGJDCCBAygAwIBAgIJALDxRLt/tVM3MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTYzM1oXDTQyMDcw
+NzA2NTYzM1owRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs3ZmU5MDgy
+Mi00NzFiLTRmNDctOGZmNC1jYzVkMGE0MjFmZjJdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBrTCBqjAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYNmAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NCBCZXRhMBoGDCsGAQQBkggJAYNm
+AgQKDAg4LjggQmV0YTAYBgwrBgEEAZIICQGDZgMECAwGeDg2XzY0MCsGDCsGAQQB
+kggJAYNmBAQbDBlyaGVsLTgscmhlbC04LWJldGEteDg2XzY0MA0GCSqGSIb3DQEB
+CwUAA4ICAQChnxZRwBX1DK/dONKHIsXkbpKdP4xzTF79tt6o6oueR313wGEeC+uS
+SRdbK8HiNC+J8hpgUz3g2RMmoxE7lObm2gkpEtOh7b6dOTOSL+LrmUhm8Ly5Ts4X
+ExY4I5pctcTXx8PaODIPQjpHIrFSqKYtxT9y0z43StUSmM310sg45H+qSM1ilepe
+WTIcDjLldUPNiaHDvu8wqE77khPnoVaP9dZUO7dNkhPkCR0ECN4Q1YrJhUZro9M0
+/pQ5700ev5Sw48Iu8iBW/h6wjpuD8cEFA4eYxRE0T8nVSvPILqK1mt8arGFP8Vch
+d6VIyv503eRwVbq9LQE8WOpC+c53ZmJYe/L5OlJU6oRlTK1ePEKZUaLsPfwHnVXC
+2e7IynDmkG2D2PE2J3br8bIVSmxCoxCp7mH2nwKJGE4EVquTnBfdwS3uCzfHX3p8
+5LGNS460tdymPZF8y4TeL+BAKZYg+l6mLx79ob044OCxsQQbcLY8v50XsTiRpGqH
+ZPLLzastYROQWvI5OhzhXE88HN0CLKCTNPlUeFmFwOw/FYWKjQtwcceuNMmMjeAe
+IZ5MrMyPf0x+MSmlIaPONn5uHmeMp7yvazdgTAkvIsBwq2cuqqFk7xfnqk0iX3zd
+kE4mKzWMJ6Fa3C+yOroNEIJz+AAiD3mgPln7CleKtXRKrvVkyxKa0g==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/8.8/72.pem b/repos/system_upgrade/common/files/prod-certs/8.8/72.pem
new file mode 100644
index 00000000..703d0ad7
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/8.8/72.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGFjCCA/6gAwIBAgIJALDxRLt/tVMgMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxMjA2NTM1NVoXDTQyMDcw
+NzA2NTM1NVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs4YWFkYmY2
+OS0xYTA1LTRjOGYtYTc5MS04YWRlOGZiNThkMzRdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBnzCBnDAJBgNVHRMEAjAAMDsGCysGAQQBkggJAUgBBCwMKlJlZCBIYXQg
+RW50ZXJwcmlzZSBMaW51eCBmb3IgSUJNIHogU3lzdGVtczAUBgsrBgEEAZIICQFI
+AgQFDAM4LjgwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJAYLKwYBBAGSCAkBSAQE
+FQwTcmhlbC04LHJoZWwtOC1zMzkweDANBgkqhkiG9w0BAQsFAAOCAgEAbNQpBfvJ
+GequSRt4hkr4qSqM3TOsVkr6/DpM2CVHsIF6irb5sJaHjwNomw0C6ecE76j9Rm2f
+dK/TCo6vPdSvAcATwyfXBiPvRc+bT4oETBf7FqqMRwPRf35ftBL/4J1JVb/d2rFF
+hO/cu4sLTItSwlnvSuOqMDqmCpa4OfMPdTj16v7iJEkN1gMEIbi7uQdZiusO7En5
+s/w4Dreok+Q98jAKrHHuCoIKAfxMKB+1YPDN6FYfVqMbngnX8X+G4ysED5OWa47b
+qLMh1+VDKBbNmDAYx7PMEDjG3Hb4S6g+Uc5d6MxPccXwKoJTbA6vSuTTVvPL5ex5
+s1NPW50W39oPyV9818qHSmFt4RN+3dxXquBNPePKMugXU/77XKo4zeYE+zGucEph
+HaYbmfDNWp74ZG4qf6wTi91NlkkNiaihLbD17ez3AkWH9qXP37RzJ289eIcu42i5
+uDc82NKakJc4hR5h92Psc7J602gcOl2d23syFrvpMmPqVSjLYMir3ImpwIe7Pn3i
+hgywwGB1QPEnoSc3dPk8FmmFST/ULaU/Ktlc0PwxpirbLO3OTQR3/y4zqxSATWMJ
+Qs4L0ouTwzVJ633+mu+4xIO3wzvtNXHI5Q1mw78D3Xzx5B3Qu7QOkPiNQOKkmKcg
+rzKkOicYZ2gIk0hWdcb7gCJMS1h+8x6FPnE=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/279.pem b/repos/system_upgrade/common/files/prod-certs/9.2/279.pem
new file mode 100644
index 00000000..8bd078f3
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.2/279.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGJTCCBA2gAwIBAgIJALDxRLt/tU8JMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDA0N1oXDTQyMDcx
+NDEyNDA0N1owRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs3ZTA5MmI3
+My1hYmYzLTQ5N2QtYWI4Yi03MDg1NWE0OTVjMGNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBrjCBqzAJBgNVHRMEAjAAMEMGDCsGAQQBkggJAYIXAQQzDDFSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuMBUGDCsG
+AQQBkggJAYIXAgQFDAM5LjIwGQYMKwYBBAGSCAkBghcDBAkMB3BwYzY0bGUwJwYM
+KwYBBAGSCAkBghcEBBcMFXJoZWwtOSxyaGVsLTktcHBjNjRsZTANBgkqhkiG9w0B
+AQsFAAOCAgEAAQNrWf/OVdfqx1Ln9b+8EI812sNt+e3TbIT9Qs/jFQ0FeQGZcYsA
+yBkB2uE9D6eBdTfteSk9LGazf7FYsvCCgv+V938qm1cfM+Y6UoUY6kE965w0DLgJ
+Cre/yRP8k3VzBTl5luLt9QX2x3jY/jVGdBKm1L3fJdfgSQQLEikJK+z/Eweiwmfq
+1lB/G9kIDNof3Jmm+TEBI9ToMg1zZBbmAudZGKp/jyDTo0Hnnfbr+TaPAYR8LD8A
+lQNWs2WqKakTLdqm3zKqKBTm0L35KEmBLNK6Gu+43CjBjXd5IGctumUJ7Bklgxm2
+JqFT14jERJrE/YLTmu2JcMz/VzbleRQ5jtl/RmKEnUD3GgyaMujtVu2TOMxB0i8v
+Ovi7Okdf3/VA83T9noW9EYbYFdq+o00oyAxFqQPASYRLVPsyX86OUe5tXo+s1w3D
+fG7sPRP7fvAjWLL+u+BT9V9GppxF1OHbdBitKY/7KocbejkEpTAHVF2y4SJ96aDg
+BXIsf7J78hpyAYdEhbL79djygH5iZloGapJzKHVSQ55Smaj6uIj5RkEAZTjdPmIE
+PGqv74eMswYI6K/B2eHwZmuFaTtgrHfAtgl4jKEnc3qaaaDRpaXAjM25FiZavcC4
+1pr59D/wDv+kRzRK9Qy3iuyDsboeYnU30qPdrry5SCx4qsi80VxSRMM=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/362.pem b/repos/system_upgrade/common/files/prod-certs/9.2/362.pem
new file mode 100644
index 00000000..e783c625
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.2/362.pem
@@ -0,0 +1,36 @@
+-----BEGIN CERTIFICATE-----
+MIIGNDCCBBygAwIBAgIJALDxRLt/tU8fMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDQ0OVoXDTQyMDcx
+NDEyNDQ0OVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs0Y2YzNmI4
+OC0xM2QyLTQyZWYtYWM2NS1iYWQ1ZTc0ODc2ZWFdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBvTCBujAJBgNVHRMEAjAAMEgGDCsGAQQBkggJAYJqAQQ4DDZSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuIEJldGEw
+GgYMKwYBBAGSCAkBgmoCBAoMCDkuMiBCZXRhMBkGDCsGAQQBkggJAYJqAwQJDAdw
+cGM2NGxlMCwGDCsGAQQBkggJAYJqBAQcDBpyaGVsLTkscmhlbC05LWJldGEtcHBj
+NjRsZTANBgkqhkiG9w0BAQsFAAOCAgEArjsodDEcCbXin1wyhdjpSQhZEmgtO9hX
+myaAAdOaWWrOKV6rSLEL2EhmeT/zCOPdmoErKHQrcdKutr6z9Bw06K1qiFwnfd/5
+SJJtkNBNJFtpTGDZHDG6GSbRg7hA9YbrqSoX6c5UYDX6VcUv9gNXlTIxyIT86kCV
+i4QcS9hH7HvTTtfCnO7W2j47w3sGqt/mLYQWSa2ZzMzbGpBty1tLO5lux9+HVH9z
+aRiiKCHrGXBbo6PiHjcl/Ikxc3rJRLWwI3q5tegC+MjyC2tmQdc1hhXKwZj51EMt
+B+s4bLYv3WmVlcaheN6//aHz+cO6xw6OBVgUt62xBG4XprT7tbTVY1bS7+pQZm0C
+y3eUZxkfofb5k7mJqGxebNGuXZWS1yJuaPc4AGyYvnqskKE6bsJbET71zS2qZnSU
+MqYjVJ0LdoSFgNsgebbG63GovYFJYB/4cFGk2l+21D5bOXTb4CbJmEgBsVzoRXuH
+/YeJSZ++h2Y78hjxFMXeztM5TaN2d/FPm41jN9fDeCwN0XZAhVLtvrizobEj/rZF
+fF3om6ETcg7cRn7l00zsQGZeAjMDYXjQprcj074ER2Oz+6/nGuOlgBXgn76jm/2E
+oomPas/YcyxOrG1V4oZAzyedOCuU+51iJK3qJXMYG/a4X8TXv5sKu/DpfLpIbaze
+oRQ+8ay5+ys=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/363.pem b/repos/system_upgrade/common/files/prod-certs/9.2/363.pem
new file mode 100644
index 00000000..2afb74db
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.2/363.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGJjCCBA6gAwIBAgIJALDxRLt/tU8eMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDQ0NVoXDTQyMDcx
+NDEyNDQ0NVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtmYjE2MTNh
+OS04YjcyLTRiOTUtOGE0Yy0zNmNiZTVmMjg2MGNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBrzCBrDAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYJrAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NCBCZXRhMBoGDCsGAQQBkggJAYJr
+AgQKDAg5LjIgQmV0YTAZBgwrBgEEAZIICQGCawMECQwHYWFyY2g2NDAsBgwrBgEE
+AZIICQGCawQEHAwacmhlbC05LHJoZWwtOS1iZXRhLWFhcmNoNjQwDQYJKoZIhvcN
+AQELBQADggIBAK9GawETqhJTkT0VUEQt9Kn4s92TRaEMB4/X5pWDOG4BBQu5T3LV
+9xKelt6eVXPLvjytY3EgCZxm5xc+1zE/Gf9yypgH0vtNFqOr+/U9fn6YOfAwvDqo
+2ozNAmA11m5TKi57IGhQJGTaxJdKdOk3NEuwMcD1TfQqDtqMF27OnWdO501URJJW
+e52b0NraoeF6OicDKLgxc31fv457CcwT3k/GyAgmR31PXWkoySiB+Np/xf0uJQvf
+2iI1V4iqfcygMqniJsjEi2IMcLXBxs9DdFRPDMeVkmO3JKXCFjV+sHZB9LbsRh1o
+LTnAnEvfWx1nWUc3t9lwS54HlSKfOyPt/c+tPiXCHa19p+Z8gqk7KyztTMB1JeIE
+0HdjFfwino66rcEshfBEe3mq3ohY4Yq79PACKmbVVqYanBiRAvoR7j7cZROvEmGJ
+pq9qUZ91w4OwDx5G/IIUZVafGkVAiLACK3ACala4CQZmB/UKSihwnPiWXj7sdnYz
+CjEyk/z9q2zaFvB/H3fQdol0Vy66eQ+DPRO+eMnppCvG6SI5nah0ZJSnfmR+26Mc
+IeR2KzRoN1kwVMzMh3qOpSaneDOQTQONzzzmeOqVQohRbz9cfYZor99l8/LLXce6
+sH9LlaFP3aHoB5cdGyirTsB8Z65x/1y/4UrqdwdfO0o+DZH8kkhJ9roH
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/419.pem b/repos/system_upgrade/common/files/prod-certs/9.2/419.pem
new file mode 100644
index 00000000..f35743dc
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.2/419.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGFzCCA/+gAwIBAgIJALDxRLt/tU8IMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDA0MloXDTQyMDcx
+NDEyNDA0MlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFthYzI5ZTA3
+Ni1mY2ViLTRhMTEtYjM3Yi03M2YxOGFiOTAzMmRdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBoDCBnTAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYMjAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NDAVBgwrBgEEAZIICQGDIwIEBQwD
+OS4yMBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCcGDCsGAQQBkggJAYMjBAQX
+DBVyaGVsLTkscmhlbC05LWFhcmNoNjQwDQYJKoZIhvcNAQELBQADggIBAGxyb6Sk
+QPbMUsdNVwMo5lL7yR/O8JsKfMgwnXgp4szymjgCRdYKAmk/TeceuHnM+1YxxyN2
+n11Oy67Vlcchpy5Vo9m1GjSk3oQ0biyJgSgMEoHdWPCwFYDTABMK5U/4Df7wBw/q
+4TvnaX5EhYO4nQo7Pc0A4eFOvyeKv6lTw0Rv5WNHFCMZSQLdPSpGLHZYMF0lyl/p
+yAQHpSkDFaB1mMvQLu9r7FbeRm2M8eyaRp1Ok4Ypxr2yXoBUQm3YPCpBBIwnqyD5
+trnpYkjncxe9q2DSRpYgRLEmu+2Qm5WbrJ0zZKYcs/jZbaH5mrWvNCLy5u3h442V
+vHEX+ITDyuB0507ORxOpyt+k2+JenEcYNg7aHn/fUnsWjutGfEY4aDIVOnZxAf31
+DLDJXPH4/jjO9dd/4fKykgLP8OUq5x+VXAtufpyDUyYVqXnIXwfUPN0NSl8gtUKJ
+ruHJ7gNyYqdopMquuOWb/Mew2DnwXFA9b3goYBbdaCzkt7k9Zdafzz6Mu1NnxUkf
+tMyJOmPBCZSDHRilTA/dA+8Lvj+2H6q7aEFzLv1saAoktxB/fggpBJm3jRs4dy3T
+xbcWnF++VANF6LQ+5bI8dxX6/FC5/zjJd1oEoiIS7dcFUZ0uf6x5aBuzjB+c2G0C
+MnR4x3OKYQl6cy3pFJkQNgLoAHXVRsNOmVe6
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/433.pem b/repos/system_upgrade/common/files/prod-certs/9.2/433.pem
new file mode 100644
index 00000000..8af44fae
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.2/433.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGKTCCBBGgAwIBAgIJALDxRLt/tU8gMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDQ1NFoXDTQyMDcx
+NDEyNDQ1NFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs1Y2E3YWM5
+Ny0yMmZhLTRmZDUtODU3My04NTc1YjAxOWQ5N2RdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBsjCBrzAJBgNVHRMEAjAAMEEGDCsGAQQBkggJAYMxAQQxDC9SZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIElCTSB6IFN5c3RlbXMgQmV0YTAaBgwrBgEE
+AZIICQGDMQIECgwIOS4yIEJldGEwFwYMKwYBBAGSCAkBgzEDBAcMBXMzOTB4MCoG
+DCsGAQQBkggJAYMxBAQaDBhyaGVsLTkscmhlbC05LWJldGEtczM5MHgwDQYJKoZI
+hvcNAQELBQADggIBAM/RY5sRACnyRmPKq0fGBuApNJU/m8q116Ls6FSpgZiz5xa5
+qUaWW2UHn/oFdXd7A3kaLL/9VbrFVfuC/wiz+te0EqHy2NPwlGgKmbVjFZn4PcoG
+YzTopv5bwr90WONkLt7jDbhls8ZbGgPY6qUDA2TbtvHPDNPIM9ukoin9BrurksUS
+XJ9UsV3jHV9yye/u6nM5FZmc9E0IagoS/asd0B3Y3egkbCn5bcfyYvV2Y8cn5/gg
+SucFU1KIwxLOs+J61RfaFh5O/22ZJtPG/7zMYXPk/Luas0YZUEiVFjc4BWQRmM94
+dF142BpwOX9L5LBMtMhuB0sWpov7wlQamFiP2ZtsVLQgoqFKW3MXHZNy3f1FQM10
+ei9lglw7qrhoeKj7UtedL4zJREtr4fhG3TzLhDqa8GvIEr+JAPtg2eRslO6uu67e
+RdE2AIYY6HWKQ5FcEfkCdW/hFFeVr0MjvBgQCYJlO8fmHxgOAQSKjjAzyRVAcjTk
+x+8v69ucZ3uMZb6oFUZH+p67XuduCm3sQCFk+Ilscr/8E/MNB4x0bPCIXLK6T3aQ
+9JKBxofBKtTSzyxEFEXqYLYJyQrAKXVpOgOrAMmeLHwA3IoikVG1x6/GwVuYTBUA
+B0lW/aO8mL0caQyebnE4fpYef5GzrtvOt2rGB54N/3AipD5dOW/AeYP/Wcj0
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/479.pem b/repos/system_upgrade/common/files/prod-certs/9.2/479.pem
new file mode 100644
index 00000000..7ed95967
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.2/479.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGFTCCA/2gAwIBAgIJALDxRLt/tU8LMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDA1NloXDTQyMDcx
+NDEyNDA1NlowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs0ZmU2ODU0
+NC0yYjYwLTRiOGYtODdhYS02MzkxNWJkNGMyMjhdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBnjCBmzAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD
+OS4yMBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM
+FHJoZWwtOSxyaGVsLTkteDg2XzY0MA0GCSqGSIb3DQEBCwUAA4ICAQA0Sgnj5BjL
+2p4U7R/TOMhkP/7Tm4AkdmMvhkUb7c0tZhY3jJaJJt2U9IBTd8sN5Z/mb3Zr03dQ
+8gOb5mpfMGVrwoMjgDhZniRJ6/0yPKrgiRbGijHS6mXkU4dkzh6N/HyBjpQUuOaK
+5isXArEx7kv3k0Hun2DPdw8oBhXgH7x0TL3K3Yz+VXiX6Tcn4tlMTTBuR8NngP57
+V9xmtLncR8rSdNr8j7cxAoXGaSPlE4K0cTGz87gAja6702CVk8ueB8bU68S47ZEK
+xLDcj1iWiVjYiZSFO7gWFavrlitEE+yW8c6oLVVXKfA8TxrJ1VuSTqU+fOojx5sM
+qtNqeMPLzz80M6dNrfuOJ+FHuwXu6Ytj8u/u24ds12TU7NCV9YLyfB2NDhueALtr
+/6OKlANU4DdxdL3947KGnnQZLpEpDpvsgOUBFGOivNIbHt0QXpV9tnMwsWx6tQ82
+exnin3PJBkR2rg5/xv9ZXNb4WdYA3FwLsyej9gM7S4rFgMZzr7n2S5Dd8v9kRYHl
+JGUdY3LsY+SfxyYNalJirt3JxeIuLg0QZIXQP0BwBX92zZb+Zw4MxI1AcJvxsGkf
+7vGqTnIlPPER+IdK6SNeF3yJ4FQb6U1WMAyw0yqFPm4s7asaV/aULZu6+p13NlKZ
+r331U/otUJX8S2irN9kUt/oKdV/MVlgsFg==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/486.pem b/repos/system_upgrade/common/files/prod-certs/9.2/486.pem
new file mode 100644
index 00000000..c786ea82
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.2/486.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGJDCCBAygAwIBAgIJALDxRLt/tU8hMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDQ1OFoXDTQyMDcx
+NDEyNDQ1OFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsyMzg4MDQx
+Yy1iYWMxLTRmZGEtYWJjZS0zNWNkMGY5MzQxMDRdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBrTCBqjAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYNmAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NCBCZXRhMBoGDCsGAQQBkggJAYNm
+AgQKDAg5LjIgQmV0YTAYBgwrBgEEAZIICQGDZgMECAwGeDg2XzY0MCsGDCsGAQQB
+kggJAYNmBAQbDBlyaGVsLTkscmhlbC05LWJldGEteDg2XzY0MA0GCSqGSIb3DQEB
+CwUAA4ICAQAHqIuoFbUAfhRFzLGeuTaJVidWk7nbmwyGKOHBDHannogHXSxJM5nt
+Ct5vFqG7uC0UE0JgUPz/q2o6iFhro3slfvWHA1sW83XN+yiaTtDIQl8Y7O7n4wJ1
+NXH7mRC/L+58P1/HJ3gEaBdBfKiHte6J8FPonuRkfJrREiPgo+B9zNf0BEjl6xqr
+7SgfJZMO257Lkg3/Tl4amZ8M/cm/P/Z+kprfvUDsJzBQJ1z7qhriUuXFJfS799mG
++UV/wO0ZtdhGaHAXR28/MmtearogcM9rhp9DfdqmKdhktIcoHBuDXLUxnwUhX+W3
+AJTNf7YwyYUKEHzhPLJH8v0JH8N/Cfd2PQHrQ1zni0D3BXTygHrbDEWZDm+3jSOF
+joyEIFHlWIb7eF67a7x/7iiS2op07E0Ka3h3SYHy/l+WvqPg8O28Zz3U6o1dCtBT
+odDtz9FVcGJ1MhMZ3F71XvM+TNEASJW1aK0bRoJMUXZ1krtHWUCsZuea3X5JAOey
+CycnOcUkvu8tzIOmgaqPmeolG/tKdlEY90Sc8XLw/KWsW0tfqqU9weppoZnCqPyp
+8YQiUEumjpGOtZUJRvootlBN9CQH8ilCOl1c4CsGdcmnXwnC0Z8gYzM+HhcqYenD
+Y+O3lNd3WsLoQrGfj2dMYWnKFOLKJovaYpOXiQOW6ghpM5bWdqVIuQ==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.2/72.pem b/repos/system_upgrade/common/files/prod-certs/9.2/72.pem
new file mode 100644
index 00000000..dabf8506
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.2/72.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGFjCCA/6gAwIBAgIJALDxRLt/tU8KMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIyMDcxOTEyNDA1MVoXDTQyMDcx
+NDEyNDA1MVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs1YWUwNTdk
+ZC1kMWI3LTQ4NzEtYTA5MS0wYzY4MzcxMTkyZDldMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOBnzCBnDAJBgNVHRMEAjAAMDsGCysGAQQBkggJAUgBBCwMKlJlZCBIYXQg
+RW50ZXJwcmlzZSBMaW51eCBmb3IgSUJNIHogU3lzdGVtczAUBgsrBgEEAZIICQFI
+AgQFDAM5LjIwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJAYLKwYBBAGSCAkBSAQE
+FQwTcmhlbC05LHJoZWwtOS1zMzkweDANBgkqhkiG9w0BAQsFAAOCAgEApFHsXGnC
+mGFM6yMkJYDGxYGDdsOY0xl0IMT6m2bvMRlbcykLhOL/CxwjZsS/mGPeHG4Q44+e
+pq+xMh3013klRN9iZoKFHSBTuXHDxzjjEPYR414O7FehNB82f3GlkLv6z57WeAxw
+wAqPvFcsIACzVEDOvSWQzn5aDEJURHT2caax/Psm+NT5nBneueySIOe5FDZmpgDJ
+7xqnUCaniM8RN4YlNQLm8V5wM9akiIhp/60Pq4bqSvlN23vOQ/QOTUtGyGlBtsGs
+LVhR0ssaTKlHbA+1xntZkEjNI229PcFFYeWXw5Fn/18l/ulfGCmbOMuRfDpC15Wl
+dLGETkpUVcflhJOloYcaPi+6RSXEMqyMSgLfN0k1IDJdV2Gh0Ok+HUYlxgPZ07+Q
+OW2jky9+tC2kLDh424J1sZUB+M/ONGJGHwXBHsIqMcbhVzDpGpHkQoMt6jDWw+li
+mHmwmSqKGxH/uhnVepSH6iJi4pF16YhrteW4wjtmrFFp7RsvxggqfHL8IgZSZ/Es
+pvTqSygWCU6hHoHdQdIrVph1VYSpvNyaEsopj+4F8oHSzC+sXQ+4iJ++CpCFised
+pG34sx+vFi/kcRnYnd8z20dbSVeH2j2+WSaYiV53mxUdA/Hp9XEn2u7p8WWIcv79
+21f+YSbmvDuP6xg5D/l9lg1q6FljH6NcpBE=
+-----END CERTIFICATE-----
--
2.38.1

View File

@ -1,52 +0,0 @@
From 7aab954d2ed9fcdd67ceb4c6a783fafbd6021c8a Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Wed, 16 Nov 2022 20:19:34 +0100
Subject: [PATCH 32/37] Introduce new upgrade paths 8.8/9.2
The new enabled upgrade paths are:
RHEL 7.9 -> 8.8, 8.6 (default: 8.8)
RHEL 8.8 -> 9.2
Keeping some previous upgrade paths still opened, but expecting
dropping them before the release.
---
repos/system_upgrade/common/files/upgrade_paths.json | 7 ++++---
repos/system_upgrade/common/libraries/config/version.py | 2 +-
2 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/repos/system_upgrade/common/files/upgrade_paths.json b/repos/system_upgrade/common/files/upgrade_paths.json
index 11d52423..c48d916f 100644
--- a/repos/system_upgrade/common/files/upgrade_paths.json
+++ b/repos/system_upgrade/common/files/upgrade_paths.json
@@ -1,11 +1,12 @@
{
"default": {
"7.6": ["8.4", "8.6"],
- "7.9": ["8.4", "8.6"],
+ "7.9": ["8.6", "8.8"],
"8.6": ["9.0"],
"8.7": ["9.0"],
- "7": ["8.4", "8.6"],
- "8": ["9.0"]
+ "8.8": ["9.2"],
+ "7": ["8.6", "8.8"],
+ "8": ["9.2"]
},
"saphana": {
"7.9": ["8.2", "8.6"],
diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py
index e148932a..7104bdc5 100644
--- a/repos/system_upgrade/common/libraries/config/version.py
+++ b/repos/system_upgrade/common/libraries/config/version.py
@@ -14,7 +14,7 @@ OP_MAP = {
_SUPPORTED_VERSIONS = {
# Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x
'7': {'rhel': ['7.9'], 'rhel-alt': ['7.6'], 'rhel-saphana': ['7.9']},
- '8': {'rhel': ['8.6', '8.7'], 'rhel-saphana': ['8.6']},
+ '8': {'rhel': ['8.6', '8.7', '8.8'], 'rhel-saphana': ['8.6']},
}
--
2.38.1

View File

@ -1,46 +0,0 @@
From d6ddc8e6250bf3c07633a84b81e8f4b66c23c0e5 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 25 Nov 2022 17:11:26 +0100
Subject: [PATCH 33/37] testutils: Implement get_common_tool_path method
---
repos/system_upgrade/common/libraries/testutils.py | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/repos/system_upgrade/common/libraries/testutils.py b/repos/system_upgrade/common/libraries/testutils.py
index fc20aa3b..c538af1a 100644
--- a/repos/system_upgrade/common/libraries/testutils.py
+++ b/repos/system_upgrade/common/libraries/testutils.py
@@ -75,7 +75,9 @@ class CurrentActorMocked(object): # pylint:disable=R0904
release = namedtuple('OS_release', ['release_id', 'version_id'])(release_id, src_ver)
self._common_folder = '../../files'
+ self._common_tools_folder = '../../tools'
self._actor_folder = 'files'
+ self._actor_tools_folder = 'tools'
self.configuration = namedtuple(
'configuration', ['architecture', 'kernel', 'leapp_env_vars', 'os_release', 'version', 'flavour']
)(arch, kernel, envarsList, release, version, flavour)
@@ -87,6 +89,9 @@ class CurrentActorMocked(object): # pylint:disable=R0904
def get_common_folder_path(self, folder):
return os.path.join(self._common_folder, folder)
+ def get_common_tool_path(self, name):
+ return os.path.join(self._common_tools_folder, name)
+
def consume(self, model):
return iter(filter( # pylint:disable=W0110,W1639
lambda msg: isinstance(msg, model), self._msgs
@@ -149,9 +154,6 @@ class CurrentActorMocked(object): # pylint:disable=R0904
def get_tool_path(self, name):
raise NotImplementedError
- def get_common_tool_path(self, name):
- raise NotImplementedError
-
def get_actor_tool_path(self, name):
raise NotImplementedError
--
2.38.1

View File

@ -1,91 +0,0 @@
From f1c00a3823751d3fccaba3c98be86eba2b16930c Mon Sep 17 00:00:00 2001
From: Petr Stodulka <xstodu05@gmail.com>
Date: Sat, 26 Nov 2022 12:27:46 +0100
Subject: [PATCH 34/37] targetuserspacecreator: improve copy of /etc/pki
(rpm-gpg)
The original solution copied /etc/pki from the host into the
target userspace container if the upgrade has been performed with
RHSM, which causes several negative impacts:
a) certificates are missing inside the container when upgrading
without RHSM (still issue)
- Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2040706
b) the target OS certificates are replaced by the original OS
certificates when upgrading with RHSM (partially fixed)
This commit partially fixes the case b), so we preserve target
certificates inside the container only from the /etc/pki/rpm-gpg
directory when upgrading with RHSM. If files or directories with
the same name exists inside, prefered are those from the target OS.
For the full fix of this case. The full fix should preserve
all certificates owned by packages inside the container, and only
"new files" from the host should be applied. This is also prerequisite
to be able to fix the case a).
To be able to fix the case a) we would need to make this behaviour
unconditional (not dependent on the use of RHSM). Which most likely
should resolve the bug 2040706. Which needs the full fix of the case
b) first, as described above. The unconditional copy of /etc/pki
currently breaks upgrades on systems using RHUI (at least on
Azure for IPU 8 -> 9, other clouds could be affected also).
So postponing the sollution to a followup PR.
---
.../libraries/userspacegen.py | 32 +++++++++++++++++--
1 file changed, 30 insertions(+), 2 deletions(-)
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index 5a6a80f2..0415f0fe 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -235,6 +235,33 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None):
return files_owned_by_rpms
+def _copy_certificates(context, target_userspace):
+ """
+ Copy the needed cetificates into the container, but preserve original ones
+
+ Some certificates are already installed in the container and those are
+ default certificates for the target OS. We know we should preserve at
+ least certificates located at rpm-gpg directory. So preserve these for
+ now at least.
+ """
+ target_pki = os.path.join(target_userspace, 'etc', 'pki')
+ backup_pki = os.path.join(target_userspace, 'etc', 'pki.backup')
+
+ # FIXME(pstodulk): search for all files owned by RPMs inside the container
+ # before the mv, and all such files restore
+ # - this is requirement to not break IPU with RHUI when making the copy
+ # of certificates unconditional
+ run(['mv', target_pki, backup_pki])
+ context.copytree_from('/etc/pki', target_pki)
+
+ # TODO(pstodulk): restore the files owned by rpms instead of the code below
+ for fname in os.listdir(os.path.join(backup_pki, 'rpm-gpg')):
+ src_path = os.path.join(backup_pki, 'rpm-gpg', fname)
+ dst_path = os.path.join(target_pki, 'rpm-gpg', fname)
+ run(['rm', '-rf', dst_path])
+ run(['cp', '-a', src_path, dst_path])
+
+
def _prep_repository_access(context, target_userspace):
"""
Prepare repository access by copying all relevant certificates and configuration files to the userspace
@@ -243,9 +270,10 @@ def _prep_repository_access(context, target_userspace):
target_yum_repos_d = os.path.join(target_etc, 'yum.repos.d')
backup_yum_repos_d = os.path.join(target_etc, 'yum.repos.d.backup')
if not rhsm.skip_rhsm():
- run(['rm', '-rf', os.path.join(target_etc, 'pki')])
+ # TODO: make the _copy_certificates unconditional. keeping it conditional
+ # due to issues causing on RHUI
+ _copy_certificates(context, target_userspace)
run(['rm', '-rf', os.path.join(target_etc, 'rhsm')])
- context.copytree_from('/etc/pki', os.path.join(target_etc, 'pki'))
context.copytree_from('/etc/rhsm', os.path.join(target_etc, 'rhsm'))
# NOTE: we cannot just remove the original target yum.repos.d dir
# as e.g. in case of RHUI a special RHUI repofiles are installed by a pkg
--
2.38.1

View File

@ -1,75 +0,0 @@
From 9628970bf0d5a7db6553c57b55f4623c91330228 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Thu, 24 Nov 2022 12:48:51 +0100
Subject: [PATCH 35/37] DNFWorkaround: extend the model by script_args
The original model provided a possibility to execute a script
that will handle problems before the DNF / RPM transaction,
in correct contexts (overlay, host system, ..) before any use
of the upgrade dnf plugin.
But current solution provided only the script_path field, which
suggests it should contain only the path to the script. The executed
command (inside a context) looked like this:
bash -c script_path
However we have realized we need to be able to execute a script
with additional arguments. Regarding that, introducing
the script_args field. SO the final command looks like this:
bash -c 'script_path arg1 arg2..'
when script_args are specified. The default is set to an empty
list.
---
.../common/libraries/dnfplugin.py | 9 ++++++++-
.../common/models/dnfworkaround.py | 18 ++++++++++++++++--
2 files changed, 24 insertions(+), 3 deletions(-)
diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py
index 0a546637..0ef9ea9b 100644
--- a/repos/system_upgrade/common/libraries/dnfplugin.py
+++ b/repos/system_upgrade/common/libraries/dnfplugin.py
@@ -241,7 +241,14 @@ def apply_workarounds(context=None):
for workaround in api.consume(DNFWorkaround):
try:
api.show_message('Applying transaction workaround - {}'.format(workaround.display_name))
- context.call(['/bin/bash', '-c', workaround.script_path])
+ if workaround.script_args:
+ cmd_str = '{script} {args}'.format(
+ script=workaround.script_path,
+ args=' '.join(workaround.script_args)
+ )
+ else:
+ cmd_str = workaround.script_path
+ context.call(['/bin/bash', '-c', cmd_str])
except (OSError, CalledProcessError) as e:
raise StopActorExecutionError(
message=('Failed to execute script to apply transaction workaround {display_name}.'
diff --git a/repos/system_upgrade/common/models/dnfworkaround.py b/repos/system_upgrade/common/models/dnfworkaround.py
index c921c5fc..4a813dcd 100644
--- a/repos/system_upgrade/common/models/dnfworkaround.py
+++ b/repos/system_upgrade/common/models/dnfworkaround.py
@@ -15,6 +15,20 @@ class DNFWorkaround(Model):
topic = SystemInfoTopic
script_path = fields.String()
- """ Absolute path to a bash script to execute """
+ """
+ Absolute path to a bash script to execute
+ """
+
+ script_args = fields.List(fields.String(), default=[])
+ """
+ Arguments with which the script should be executed
+
+ In case that an argument contains a whitespace or an escapable character,
+ the argument must be already treated correctly. e.g.
+ `script_args = ['-i', 'my\\ string']
+ """
+
display_name = fields.String()
- """ Name to display for this script when executed """
+ """
+ Name to display for this script when executed
+ """
--
2.38.1

View File

@ -1,57 +0,0 @@
From 2277012bc6aab1f473eda8070b48d75487a41bb7 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Thu, 24 Nov 2022 17:57:12 +0100
Subject: [PATCH 36/37] Introduce theimportrpmgpgkeys tool script
The script can be used to import gpg keys from a particular file
or files inside a directory. Expected to be executed like:
importrpmgpgkey <absolute-path>
---
.../common/tools/importrpmgpgkeys | 35 +++++++++++++++++++
1 file changed, 35 insertions(+)
create mode 100755 repos/system_upgrade/common/tools/importrpmgpgkeys
diff --git a/repos/system_upgrade/common/tools/importrpmgpgkeys b/repos/system_upgrade/common/tools/importrpmgpgkeys
new file mode 100755
index 00000000..79e5c580
--- /dev/null
+++ b/repos/system_upgrade/common/tools/importrpmgpgkeys
@@ -0,0 +1,35 @@
+#!/usr/bin/bash -ef
+
+log_error() {
+ echo >&2 "Error: $1"
+}
+
+log_info() {
+ echo >&2 "Info: $1"
+}
+
+if [ "$#" -eq 0 ]; then
+ log_error "Missing the required path to the directory with trusted GPG keys."
+ exit 1
+elif [ "$#" -ge 2 ]; then
+ log_error "Expected only one argument, received $#. Possibly unescaped whitespaces? '$*'"
+ exit 1
+fi
+
+if [ ! -e "$1" ]; then
+ log_error "The $1 directory does not exist."
+ exit 1
+fi
+
+error_flag=0
+IFS=$'\n'
+# shellcheck disable=SC2044
+for key_file in $(find -L "$1" -type f); do
+ log_info "Importing GPG keys from: $key_file"
+ rpm --import "$key_file" || {
+ error_flag=2
+ log_error "Unable to import GPG keys from: $key_file"
+ }
+done
+
+exit $error_flag
--
2.38.1

View File

@ -1,68 +0,0 @@
From 56da8453683c529c62823aedda2d3b81d1a55a0f Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Tue, 29 Nov 2022 22:18:50 +0100
Subject: [PATCH] missinggpgkey: polish the report msg
---
.../libraries/missinggpgkey.py | 26 ++++++++++++++-----
1 file changed, 20 insertions(+), 6 deletions(-)
diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
index b8b28df2..7f038ee0 100644
--- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
+++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
@@ -21,6 +21,7 @@ from leapp.models import (
from leapp.utils.deprecation import suppress_deprecation
GPG_CERTS_FOLDER = 'rpm-gpg'
+FMT_LIST_SEPARATOR = '\n - '
def _gpg_show_keys(key_path):
@@ -251,16 +252,29 @@ def _report_missing_keys(missing_keys):
# TODO(pstodulk): polish the report, use FMT_LIST_SEPARATOR
# the list of keys should be mentioned in the summary
summary = (
- "Some of the target repositories require GPG keys that are missing from the current"
- " RPM DB. Leapp will not be able to verify packages from these repositories during the upgrade process."
+ 'Some of the target repositories require GPG keys that are not installed'
+ ' in the current RPM DB or are not stored in the {trust_dir} directory.'
+ ' Leapp is not able to guarantee validity of such gpg keys and manual'
+ ' review is required, so any spurious keys are not imported in the system'
+ ' during the in-place upgrade.'
+ ' The following additional gpg keys are required to be imported during'
+ ' the upgrade:{sep}{key_list}'
+ .format(
+ trust_dir=_get_path_to_gpg_certs(),
+ sep=FMT_LIST_SEPARATOR,
+ key_list=FMT_LIST_SEPARATOR.join(missing_keys)
+ )
)
hint = (
- "Please, review the following list and import the GPG keys before "
- "continuing the upgrade:\n * {}".format('\n * '.join(missing_keys))
+ 'Check the listed GPG keys they are valid and import them into the'
+ ' host RPM DB or store them inside the {} directory prior the upgrade.'
+ ' If you want to proceed the in-place upgrade without checking any RPM'
+ ' signatures, execute leapp with the `--nogpgcheck` option.'
+ .format(_get_path_to_gpg_certs())
)
reporting.create_report(
[
- reporting.Title("Missing GPG key from target system repository"),
+ reporting.Title('Detected unknown GPG keys for target system repositories'),
reporting.Summary(summary),
reporting.Severity(reporting.Severity.HIGH),
reporting.Groups([reporting.Groups.REPOSITORY, reporting.Groups.INHIBITOR]),
@@ -351,7 +365,7 @@ def process():
if not fps:
# TODO: for now. I think it should be treated better
api.current_logger().warning(
- "Cannot get any gpg key from the file: {}".format(gpgkey_url)
+ 'Cannot get any gpg key from the file: {}'.format(gpgkey_url)
)
continue
for fp in fps:
--
2.38.1

View File

@ -1,54 +0,0 @@
From b6d5a0790fd09fbb1f7eef6faef738cd50bd40cb Mon Sep 17 00:00:00 2001
From: Sergii Golovatiuk <sgolovat@redhat.com>
Date: Thu, 10 Nov 2022 12:37:40 +0100
Subject: [PATCH 39/63] Fix cephvolume actor
Change cephvolume behavior to return None when ceph-osd container is not
found.
Fixes: rhbz#2141393
---
.../actors/cephvolumescan/libraries/cephvolumescan.py | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py b/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py
index 7e3d544c..19f49528 100644
--- a/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py
+++ b/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py
@@ -12,7 +12,6 @@ CONTAINER = "ceph-osd"
def select_osd_container(engine):
- container_name = ""
try:
output = run([engine, 'ps'])
except CalledProcessError as cpe:
@@ -24,7 +23,7 @@ def select_osd_container(engine):
container_name = line.split()[-1]
if re.match(CONTAINER, container_name):
return container_name
- return container_name
+ return None
def get_ceph_lvm_list():
@@ -35,6 +34,8 @@ def get_ceph_lvm_list():
cmd_ceph_lvm_list = base_cmd
else:
container_name = select_osd_container(container_binary)
+ if container_name is None:
+ return None
cmd_ceph_lvm_list = [container_binary, 'exec', container_name]
cmd_ceph_lvm_list.extend(base_cmd)
try:
@@ -58,5 +59,6 @@ def encrypted_osds_list():
result = []
if os.path.isfile(CEPH_CONF):
output = get_ceph_lvm_list()
- result = [output[key][0]['lv_uuid'] for key in output if output[key][0]['tags']['ceph.encrypted']]
+ if output is not None:
+ result = [output[key][0]['lv_uuid'] for key in output if output[key][0]['tags']['ceph.encrypted']]
return result
--
2.39.0

View File

@ -1,26 +0,0 @@
From 5f5b5251e478e85087ea9ff7186fd58799f7def9 Mon Sep 17 00:00:00 2001
From: mreznik <mreznik@redhat.com>
Date: Tue, 6 Dec 2022 15:34:11 +0100
Subject: [PATCH 40/63] Include also Leapp RHUI special rpms in the whitelist
---
.../common/actors/redhatsignedrpmscanner/actor.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py
index 647805cd..07962adf 100644
--- a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py
+++ b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py
@@ -63,6 +63,9 @@ class RedHatSignedRpmScanner(Actor):
whitelisted_cloud_pkgs.update(
rhui.RHUI_CLOUD_MAP[upg_path].get(flavour, {}).get('target_pkg') for flavour in whitelisted_cloud_flavours
)
+ whitelisted_cloud_pkgs.update(
+ rhui.RHUI_CLOUD_MAP[upg_path].get(flavour, {}).get('leapp_pkg') for flavour in whitelisted_cloud_flavours
+ )
for rpm_pkgs in self.consume(InstalledRPM):
for pkg in rpm_pkgs.items:
--
2.39.0

View File

@ -1,90 +0,0 @@
From 97c9bd4a18d415289bceba91c534433561759aa2 Mon Sep 17 00:00:00 2001
From: Vinzenz Feenstra <vfeenstr@redhat.com>
Date: Mon, 31 Aug 2020 14:54:00 +0200
Subject: [PATCH 41/63] [POC] initram networking
Adding initial basic networking support for the initram phase.
Controlled by the LEAPP_INITRAM_NETWORK environment variable which must
be set to either `scripts` or `network-manager` to choose between the
legacy or NetworkManager based dracut modules.
Recommended to use is the network-manager part at this moment as it will
take care of most of things including default routing etc.
Signed-off-by: Vinzenz Feenstra <vfeenstr@redhat.com>
---
.../libraries/addupgradebootentry.py | 5 +++--
.../dracut/85sys-upgrade-redhat/module-setup.sh | 9 +++++++++
.../files/generate-initram.sh | 13 +++++++++++++
3 files changed, 25 insertions(+), 2 deletions(-)
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
index 3836a0d1..ca9802bb 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
@@ -9,7 +9,8 @@ from leapp.models import BootContent, KernelCmdlineArg, TargetKernelCmdlineArgTa
def add_boot_entry(configs=None):
debug = 'debug' if os.getenv('LEAPP_DEBUG', '0') == '1' else ''
-
+ enable_network = os.getenv('LEAPP_INITRAM_NETWORK') in ('network-manager', 'scripts')
+ ip_arg = ' ip=on' if enable_network else ''
kernel_dst_path, initram_dst_path = get_boot_file_paths()
_remove_old_upgrade_boot_entry(kernel_dst_path, configs=configs)
try:
@@ -20,7 +21,7 @@ def add_boot_entry(configs=None):
'--title', 'RHEL-Upgrade-Initramfs',
'--copy-default',
'--make-default',
- '--args', '{DEBUG} enforcing=0 rd.plymouth=0 plymouth.enable=0'.format(DEBUG=debug)
+ '--args', '{DEBUG}{NET} enforcing=0 rd.plymouth=0 plymouth.enable=0'.format(DEBUG=debug, NET=ip_arg)
]
if configs:
for config in configs:
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
index 18d1d07f..d73060cb 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
@@ -80,6 +80,15 @@ install() {
# Q: Would we hack that in way of copy whole initramfs into the root, mount
# mount it and set envars
+ # Install network configuration triggers
+ if [ -f /etc/leapp-initram-network-manager ]; then
+ dracut_install /etc/leapp-initram-network-manager
+ fi
+
+ if [ -f /etc/leapp-initram-network-scripts ]; then
+ dracut_install /etc/leapp-initram-network-scripts
+ fi
+
# install this one to ensure we are able to sync write
inst_binary sync
# install in-band debugging utilities
diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
index b3478280..7748aa78 100755
--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
+++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
@@ -67,6 +67,19 @@ build() {
DRACUT_MODULES_ADD=$(echo "--add $LEAPP_ADD_DRACUT_MODULES" | sed 's/,/ --add /g')
fi
+ case $LEAPP_INITRAM_NETWORK in
+ network-manager)
+ DRACUT_MODULES_ADD="$DRACUT_MODULES_ADD --add network-manager"
+ touch /etc/leapp-initram-network-manager
+ ;;
+ scripts)
+ DRACUT_MODULES_ADD="$DRACUT_MODULES_ADD --add network";
+ touch /etc/leapp-initram-network-scripts
+ ;;
+ *)
+ ;;
+ esac
+
DRACUT_INSTALL="systemd-nspawn"
if [[ -n "$LEAPP_DRACUT_INSTALL_FILES" ]]; then
DRACUT_INSTALL="$DRACUT_INSTALL $LEAPP_DRACUT_INSTALL_FILES"
--
2.39.0

View File

@ -1,158 +0,0 @@
From f8c96d8a8d2cf8fc1eeac0349aa48fe83567eecb Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Mon, 5 Sep 2022 11:56:03 +0200
Subject: [PATCH 42/63] Skip check nfs actor if env var is set
In case LEAPP_INITRAM_NETWORK is set nfs upgrade inhibitors
can be skipped.
---
.../common/actors/checknfs/actor.py | 4 ++
.../actors/checknfs/tests/test_checknfs.py | 61 ++++++++++++++++---
2 files changed, 58 insertions(+), 7 deletions(-)
diff --git a/repos/system_upgrade/common/actors/checknfs/actor.py b/repos/system_upgrade/common/actors/checknfs/actor.py
index f3424504..370ae6b3 100644
--- a/repos/system_upgrade/common/actors/checknfs/actor.py
+++ b/repos/system_upgrade/common/actors/checknfs/actor.py
@@ -1,5 +1,6 @@
from leapp import reporting
from leapp.actors import Actor
+from leapp.libraries.common.config import get_env
from leapp.models import StorageInfo
from leapp.reporting import create_report, Report
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
@@ -18,6 +19,9 @@ class CheckNfs(Actor):
tags = (ChecksPhaseTag, IPUWorkflowTag,)
def process(self):
+ # if network in initramfs is enabled NFS inhibitors are redundant
+ if get_env('LEAPP_INITRAM_NETWORK', None):
+ return
details = "NFS is currently not supported by the inplace upgrade.\n" \
"We have found NFS usage at the following locations:\n"
diff --git a/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py b/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
index 7e52440f..a8d18ed1 100644
--- a/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
+++ b/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
@@ -1,5 +1,6 @@
import pytest
+from leapp.libraries.common import config
from leapp.models import FstabEntry, MountEntry, StorageInfo, SystemdMountEntry
from leapp.reporting import Report
from leapp.snactor.fixture import current_actor_context
@@ -7,7 +8,8 @@ from leapp.utils.report import is_inhibitor
@pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4'))
-def test_actor_with_systemdmount_entry(current_actor_context, nfs_fstype):
+def test_actor_with_systemdmount_entry(current_actor_context, nfs_fstype, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
wwn="n/a", fs_type=nfs_fstype, label="n/a",
uuid="n/a")]
@@ -17,7 +19,8 @@ def test_actor_with_systemdmount_entry(current_actor_context, nfs_fstype):
assert is_inhibitor(report_fields)
-def test_actor_without_systemdmount_entry(current_actor_context):
+def test_actor_without_systemdmount_entry(current_actor_context, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
without_systemdmount_entry = [SystemdMountEntry(node="/dev/sda1",
path="pci-0000:00:17.0-ata-2",
model="TOSHIBA_THNSNJ512GDNU_A",
@@ -30,7 +33,8 @@ def test_actor_without_systemdmount_entry(current_actor_context):
@pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4'))
-def test_actor_with_fstab_entry(current_actor_context, nfs_fstype):
+def test_actor_with_fstab_entry(current_actor_context, nfs_fstype, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data",
fs_vfstype=nfs_fstype,
fs_mntops="noauto,noatime,rsize=32768,wsize=32768",
@@ -41,7 +45,8 @@ def test_actor_with_fstab_entry(current_actor_context, nfs_fstype):
assert is_inhibitor(report_fields)
-def test_actor_without_fstab_entry(current_actor_context):
+def test_actor_without_fstab_entry(current_actor_context, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
without_fstab_entry = [FstabEntry(fs_spec="/dev/mapper/fedora-home", fs_file="/home",
fs_vfstype="ext4",
fs_mntops="defaults,x-systemd.device-timeout=0",
@@ -51,7 +56,8 @@ def test_actor_without_fstab_entry(current_actor_context):
assert not current_actor_context.consume(Report)
-def test_actor_with_nfsd(current_actor_context):
+def test_actor_with_nfsd(current_actor_context, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
with_nfsd = [MountEntry(name="nfsd", mount="/proc/fs/nfsd", tp="nfsd", options="rw,relatime")]
current_actor_context.feed(StorageInfo(mount=with_nfsd))
current_actor_context.run()
@@ -59,7 +65,8 @@ def test_actor_with_nfsd(current_actor_context):
@pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4'))
-def test_actor_with_mount_share(current_actor_context, nfs_fstype):
+def test_actor_with_mount_share(current_actor_context, nfs_fstype, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp=nfs_fstype,
options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
current_actor_context.feed(StorageInfo(mount=with_mount_share))
@@ -68,9 +75,49 @@ def test_actor_with_mount_share(current_actor_context, nfs_fstype):
assert is_inhibitor(report_fields)
-def test_actor_without_mount_share(current_actor_context):
+def test_actor_without_mount_share(current_actor_context, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
without_mount_share = [MountEntry(name="tmpfs", mount="/run/snapd/ns", tp="tmpfs",
options="rw,nosuid,nodev,seclabel,mode=755")]
current_actor_context.feed(StorageInfo(mount=without_mount_share))
current_actor_context.run()
assert not current_actor_context.consume(Report)
+
+
+def test_actor_skipped_if_initram_network_enabled(current_actor_context, monkeypatch):
+ """Check that previous inhibitors are not stopping the upgrade in case env var is set"""
+ monkeypatch.setattr(config, 'get_env', lambda x, y: 'network-manager' if x == 'LEAPP_INITRAM_NETWORK' else y)
+ with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs',
+ options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
+ with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
+ wwn="n/a", fs_type='nfs', label="n/a",
+ uuid="n/a")]
+ with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data",
+ fs_vfstype='nfs',
+ fs_mntops="noauto,noatime,rsize=32768,wsize=32768",
+ fs_freq="0", fs_passno="0")]
+ current_actor_context.feed(StorageInfo(mount=with_mount_share,
+ systemdmount=with_systemdmount_entry,
+ fstab=with_fstab_entry))
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
+
+
+def test_actor_not_skipped_if_initram_network_empty(current_actor_context, monkeypatch):
+ """Check that previous inhibitors are not stopping the upgrade in case env var is set"""
+ monkeypatch.setattr(config, 'get_env', lambda x, y: '' if x == 'LEAPP_INITRAM_NETWORK' else y)
+ with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs',
+ options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
+ with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
+ wwn="n/a", fs_type='nfs', label="n/a",
+ uuid="n/a")]
+ with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data",
+ fs_vfstype='nfs',
+ fs_mntops="noauto,noatime,rsize=32768,wsize=32768",
+ fs_freq="0", fs_passno="0")]
+ current_actor_context.feed(StorageInfo(mount=with_mount_share,
+ systemdmount=with_systemdmount_entry,
+ fstab=with_fstab_entry))
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
--
2.39.0

View File

@ -1,174 +0,0 @@
From f41cdf561c04d6ec58609f0b11b3a813fa0f6143 Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Mon, 29 Aug 2022 10:57:32 +0200
Subject: [PATCH 43/63] Apply changes after rebase and do refactor
Changes done to repos/system_upgrade/el7toel8/actors
have been manually applied to repos/system_upgrade/common/actors
Refactoring is mostly about renaming variable to
LEAPP_DEVEL_INITRAM_NETWORK and moving some changes out of dracut
into the UpgradeTasks.
---
.../libraries/addupgradebootentry.py | 4 ++--
.../common/actors/checknfs/actor.py | 2 +-
.../actors/checknfs/tests/test_checknfs.py | 4 ++--
.../dracut/85sys-upgrade-redhat/do-upgrade.sh | 15 +++++++++++++++
.../libraries/modscan.py | 16 ++++++++++++++++
.../files/generate-initram.sh | 13 -------------
6 files changed, 36 insertions(+), 18 deletions(-)
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
index ca9802bb..beddafec 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
@@ -9,8 +9,8 @@ from leapp.models import BootContent, KernelCmdlineArg, TargetKernelCmdlineArgTa
def add_boot_entry(configs=None):
debug = 'debug' if os.getenv('LEAPP_DEBUG', '0') == '1' else ''
- enable_network = os.getenv('LEAPP_INITRAM_NETWORK') in ('network-manager', 'scripts')
- ip_arg = ' ip=on' if enable_network else ''
+ enable_network = os.getenv('LEAPP_DEVEL_INITRAM_NETWORK') in ('network-manager', 'scripts')
+ ip_arg = ' ip=dhcp rd.neednet=1' if enable_network else ''
kernel_dst_path, initram_dst_path = get_boot_file_paths()
_remove_old_upgrade_boot_entry(kernel_dst_path, configs=configs)
try:
diff --git a/repos/system_upgrade/common/actors/checknfs/actor.py b/repos/system_upgrade/common/actors/checknfs/actor.py
index 370ae6b3..40ca834e 100644
--- a/repos/system_upgrade/common/actors/checknfs/actor.py
+++ b/repos/system_upgrade/common/actors/checknfs/actor.py
@@ -20,7 +20,7 @@ class CheckNfs(Actor):
def process(self):
# if network in initramfs is enabled NFS inhibitors are redundant
- if get_env('LEAPP_INITRAM_NETWORK', None):
+ if get_env('LEAPP_DEVEL_INITRAM_NETWORK', None):
return
details = "NFS is currently not supported by the inplace upgrade.\n" \
"We have found NFS usage at the following locations:\n"
diff --git a/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py b/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
index a8d18ed1..907dca40 100644
--- a/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
+++ b/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
@@ -86,7 +86,7 @@ def test_actor_without_mount_share(current_actor_context, monkeypatch):
def test_actor_skipped_if_initram_network_enabled(current_actor_context, monkeypatch):
"""Check that previous inhibitors are not stopping the upgrade in case env var is set"""
- monkeypatch.setattr(config, 'get_env', lambda x, y: 'network-manager' if x == 'LEAPP_INITRAM_NETWORK' else y)
+ monkeypatch.setattr(config, 'get_env', lambda x, y: 'network-manager' if x == 'LEAPP_DEVEL_INITRAM_NETWORK' else y)
with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs',
options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
@@ -105,7 +105,7 @@ def test_actor_skipped_if_initram_network_enabled(current_actor_context, monkeyp
def test_actor_not_skipped_if_initram_network_empty(current_actor_context, monkeypatch):
"""Check that previous inhibitors are not stopping the upgrade in case env var is set"""
- monkeypatch.setattr(config, 'get_env', lambda x, y: '' if x == 'LEAPP_INITRAM_NETWORK' else y)
+ monkeypatch.setattr(config, 'get_env', lambda x, y: '' if x == 'LEAPP_DEVEL_INITRAM_NETWORK' else y)
with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs',
options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
index ff491316..49c26bc8 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
@@ -194,6 +194,19 @@ ibdmp() {
done
}
+bring_up_network() {
+ if [ -f /etc/leapp-initram-network-manager ]; then
+ # NOTE(ivasilev) Reverting the change to see if it caused the crash
+ . /lib/dracut/hooks/cmdline/99-nm-config.sh
+ . /lib/dracut/hooks/initqueue/settled/99-nm-run.sh
+ fi
+ if [ -f /etc/leapp-initram-network-scripts ]; then
+ for interface in /sys/class/net/*;
+ do
+ ifup ${interface##*/};
+ done;
+ fi
+}
do_upgrade() {
local args="" rv=0
@@ -202,6 +215,8 @@ do_upgrade() {
#getargbool 0 rd.upgrade.verbose && args="$args --verbose"
getargbool 0 rd.upgrade.debug && args="$args --debug"
+ bring_up_network
+
# Force selinux into permissive mode unless booted with 'enforcing=1'.
# FIXME: THIS IS A BIG STUPID HAMMER AND WE SHOULD ACTUALLY SOLVE THE ROOT
# PROBLEMS RATHER THAN JUST PAPERING OVER THE WHOLE THING. But this is what
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py b/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py
index 275b2c63..2b8d78a4 100644
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py
@@ -6,6 +6,7 @@ from leapp.libraries.stdlib import api
from leapp.utils.deprecation import suppress_deprecation
from leapp.models import ( # isort:skip
+ CopyFile,
RequiredUpgradeInitramPackages, # deprecated
UpgradeDracutModule, # deprecated
DracutModule,
@@ -42,6 +43,18 @@ _REQUIRED_PACKAGES = [
]
+def _create_initram_networking_tasks():
+ # include networking-related dracut modules
+ modules_map = {'network-manager': ('network-manager', '/etc/leapp-initram-network-manager'),
+ 'scripts': ('network', '/etc/leapp-initram-network-scripts')}
+ initram_network_chosen = os.getenv('LEAPP_DEVEL_INITRAM_NETWORK', None)
+ if initram_network_chosen in modules_map:
+ module, touch_file = modules_map[initram_network_chosen]
+ yield UpgradeInitramfsTasks(include_dracut_modules=[DracutModule(name=module)])
+ # touch expected file
+ yield TargetUserSpaceUpgradeTasks(copy_files=[CopyFile(src='/dev/null', dst=touch_file)])
+
+
# The decorator is not effective for generators, it has to be used one level
# above
# @suppress_deprecation(UpgradeDracutModule)
@@ -68,6 +81,8 @@ def _create_initram_packages():
required_pkgs = _REQUIRED_PACKAGES[:]
if architecture.matches_architecture(architecture.ARCH_X86_64):
required_pkgs.append('biosdevname')
+ if os.getenv('LEAPP_DEVEL_INITRAM_NETWORK', None) == 'network-manager':
+ required_pkgs.append('NetworkManager')
if version.get_target_major_version() == '9':
required_pkgs += ['policycoreutils', 'rng-tools']
return (
@@ -79,3 +94,4 @@ def _create_initram_packages():
def process():
api.produce(*tuple(_create_dracut_modules()))
api.produce(*_create_initram_packages())
+ api.produce(*tuple(_create_initram_networking_tasks()))
diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
index 7748aa78..b3478280 100755
--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
+++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
@@ -67,19 +67,6 @@ build() {
DRACUT_MODULES_ADD=$(echo "--add $LEAPP_ADD_DRACUT_MODULES" | sed 's/,/ --add /g')
fi
- case $LEAPP_INITRAM_NETWORK in
- network-manager)
- DRACUT_MODULES_ADD="$DRACUT_MODULES_ADD --add network-manager"
- touch /etc/leapp-initram-network-manager
- ;;
- scripts)
- DRACUT_MODULES_ADD="$DRACUT_MODULES_ADD --add network";
- touch /etc/leapp-initram-network-scripts
- ;;
- *)
- ;;
- esac
-
DRACUT_INSTALL="systemd-nspawn"
if [[ -n "$LEAPP_DRACUT_INSTALL_FILES" ]]; then
DRACUT_INSTALL="$DRACUT_INSTALL $LEAPP_DRACUT_INSTALL_FILES"
--
2.39.0

View File

@ -1,91 +0,0 @@
From 032b00255d0127c06c7bd851bc438290766f5cbc Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Thu, 10 Nov 2022 12:51:19 +0100
Subject: [PATCH 44/63] Tune tmt tests regexes to align with QE automation
In order for upstream tests and those launched with QE tooling
to produce the comparable set of results it's a wise thing to
do.
---
.github/workflows/tmt-tests.yml | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml
index 563c6e8c..0b565b8b 100644
--- a/.github/workflows/tmt-tests.yml
+++ b/.github/workflows/tmt-tests.yml
@@ -16,7 +16,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
pull_request_status_name: "7.9to8.4"
call_workflow_tests_79to86_integration:
@@ -25,7 +25,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
variables: 'TARGET_RELEASE=8.6'
pull_request_status_name: "7.9to8.6"
@@ -35,7 +35,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*8to9)(.*morf)"
+ tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
pull_request_status_name: "7.9to8.4-sst"
update_pull_request_status: 'false'
if: |
@@ -49,7 +49,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)"
compose: "RHEL-7.9-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; echo 42; yum-config-manager --enable rhel-7-server-rhui-optional-rpms"}}'
pull_request_status_name: "7to8-aws-e2e"
@@ -61,7 +61,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
pull_request_status_name: "8.6to9.0"
call_workflow_tests_87to91_integration:
@@ -70,7 +70,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
variables: "LEAPP_DEVEL_TARGET_PRODUCT_TYPE=beta;RHSM_SKU=RH00069;TARGET_RELEASE=9.1;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms"
compose: "RHEL-8.7.0-Nightly"
pull_request_status_name: "8.7to9.1"
@@ -82,7 +82,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*7to8)(.*morf)"
+ tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
pull_request_status_name: "8to9-sst"
update_pull_request_status: 'false'
if: |
@@ -96,7 +96,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)"
compose: "RHEL-8.6-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"}}'
pull_request_status_name: "8to9-aws-e2e"
--
2.39.0

View File

@ -1,130 +0,0 @@
From fab859941f31d4809038f571fe308154818f0dd4 Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Thu, 10 Nov 2022 12:58:00 +0100
Subject: [PATCH 45/63] Change /rerun-all to /rerun-sst
Change the concept from "rerun all tests + morf ones" to
"rerun just the morf tests".
Welcome message updated as well.
---
.github/workflows/pr-welcome-msg.yml | 4 ++--
.github/workflows/reuse-copr-build.yml | 2 +-
.github/workflows/tmt-tests.yml | 28 ++++++++++++++++++++++++--
3 files changed, 29 insertions(+), 5 deletions(-)
diff --git a/.github/workflows/pr-welcome-msg.yml b/.github/workflows/pr-welcome-msg.yml
index 7ae2fa4e..5fbf9558 100644
--- a/.github/workflows/pr-welcome-msg.yml
+++ b/.github/workflows/pr-welcome-msg.yml
@@ -27,8 +27,8 @@ jobs:
To launch regression testing public members of oamg organization can leave the following comment:
- **/rerun** to schedule basic regression tests using this pr build and leapp\*master\* as artifacts
- **/rerun 42** to schedule basic regression tests using this pr build and leapp\*PR42\* as artifacts
- - **/rerun-all** to schedule all tests (including sst) using this pr build and leapp\*master\* as artifacts
- - **/rerun-all 42** to schedule all tests (including sst) using this pr build and leapp\*PR42\* as artifacts
+ - **/rerun-sst** to schedule sst tests using this pr build and leapp\*master\* as artifacts
+ - **/rerun-sst 42** to schedule sst tests using this pr build and leapp\*PR42\* as artifacts
Please [open ticket](https://url.corp.redhat.com/oamg-ci-issue) in case you experience technical problem with the CI. (RH internal only)
diff --git a/.github/workflows/reuse-copr-build.yml b/.github/workflows/reuse-copr-build.yml
index 477d3f40..43aa98a3 100644
--- a/.github/workflows/reuse-copr-build.yml
+++ b/.github/workflows/reuse-copr-build.yml
@@ -88,7 +88,7 @@ jobs:
id: leapp_pr_regex_match
with:
text: ${{ github.event.comment.body }}
- regex: '^/(rerun|rerun-all)\s+([0-9]+)\s*$'
+ regex: '^/(rerun|rerun-sst)\s+([0-9]+)\s*$'
- name: If leapp_pr was specified in the comment - trigger copr build
# TODO: XXX FIXME This should schedule copr build for leapp but for now it will be just setting an env var
diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml
index 0b565b8b..fad1d5d0 100644
--- a/.github/workflows/tmt-tests.yml
+++ b/.github/workflows/tmt-tests.yml
@@ -18,6 +18,10 @@ jobs:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
pull_request_status_name: "7.9to8.4"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_79to86_integration:
needs: call_workflow_copr_build
@@ -28,6 +32,10 @@ jobs:
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
variables: 'TARGET_RELEASE=8.6'
pull_request_status_name: "7.9to8.6"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_79to84_sst:
needs: call_workflow_copr_build
@@ -40,7 +48,7 @@ jobs:
update_pull_request_status: 'false'
if: |
github.event.issue.pull_request
- && startsWith(github.event.comment.body, '/rerun-all')
+ && startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_7to8_aws:
@@ -54,6 +62,10 @@ jobs:
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; echo 42; yum-config-manager --enable rhel-7-server-rhui-optional-rpms"}}'
pull_request_status_name: "7to8-aws-e2e"
variables: "RHUI=aws"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_86to90_integration:
needs: call_workflow_copr_build
@@ -63,6 +75,10 @@ jobs:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
pull_request_status_name: "8.6to9.0"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_87to91_integration:
needs: call_workflow_copr_build
@@ -75,6 +91,10 @@ jobs:
compose: "RHEL-8.7.0-Nightly"
pull_request_status_name: "8.7to9.1"
tmt_context: "distro=rhel-8.7"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_8to9_sst:
needs: call_workflow_copr_build
@@ -87,7 +107,7 @@ jobs:
update_pull_request_status: 'false'
if: |
github.event.issue.pull_request
- && startsWith(github.event.comment.body, '/rerun-all')
+ && startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_8to9_aws:
@@ -101,3 +121,7 @@ jobs:
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"}}'
pull_request_status_name: "8to9-aws-e2e"
variables: "RHUI=aws"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
--
2.39.0

View File

@ -1,142 +0,0 @@
From 82b43d3b7452812a97ad2f479f8f7ef541e46154 Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Mon, 28 Nov 2022 13:16:50 +0100
Subject: [PATCH 46/63] Do not run rhsm tests in upstream
It was decided to limit test runs to tier0/tier1, non-RHSM
tests only.
Also this patch will keep github action parameters in sync with
supported upgrade paths.
---
.github/workflows/tmt-tests.yml | 36 ++++++++++++++++++---------------
1 file changed, 20 insertions(+), 16 deletions(-)
diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml
index fad1d5d0..c82256c8 100644
--- a/.github/workflows/tmt-tests.yml
+++ b/.github/workflows/tmt-tests.yml
@@ -10,14 +10,15 @@ jobs:
uses: ./.github/workflows/reuse-copr-build.yml
secrets: inherit
- call_workflow_tests_79to84_integration:
+ call_workflow_tests_79to88_integration:
needs: call_workflow_copr_build
uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
- pull_request_status_name: "7.9to8.4"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
+ pull_request_status_name: "7.9to8.8"
+ variables: 'TARGET_RELEASE=8.8'
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
@@ -29,7 +30,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
variables: 'TARGET_RELEASE=8.6'
pull_request_status_name: "7.9to8.6"
if: |
@@ -37,15 +38,16 @@ jobs:
&& ! startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
- call_workflow_tests_79to84_sst:
+ call_workflow_tests_79to88_sst:
needs: call_workflow_copr_build
uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
- pull_request_status_name: "7.9to8.4-sst"
+ pull_request_status_name: "7.9to8.8-sst"
update_pull_request_status: 'false'
+ variables: 'TARGET_RELEASE=8.8'
if: |
github.event.issue.pull_request
&& startsWith(github.event.comment.body, '/rerun-sst')
@@ -57,7 +59,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)"
compose: "RHEL-7.9-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; echo 42; yum-config-manager --enable rhel-7-server-rhui-optional-rpms"}}'
pull_request_status_name: "7to8-aws-e2e"
@@ -73,36 +75,38 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
+ variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
pull_request_status_name: "8.6to9.0"
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
- call_workflow_tests_87to91_integration:
+ call_workflow_tests_87to90_integration:
needs: call_workflow_copr_build
uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
- variables: "LEAPP_DEVEL_TARGET_PRODUCT_TYPE=beta;RHSM_SKU=RH00069;TARGET_RELEASE=9.1;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
+ variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms'
compose: "RHEL-8.7.0-Nightly"
- pull_request_status_name: "8.7to9.1"
+ pull_request_status_name: "8.7to9.0"
tmt_context: "distro=rhel-8.7"
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
- call_workflow_tests_8to9_sst:
+ call_workflow_tests_86to90_sst:
needs: call_workflow_copr_build
uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
+ variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
pull_request_status_name: "8to9-sst"
update_pull_request_status: 'false'
if: |
@@ -110,17 +114,17 @@ jobs:
&& startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
- call_workflow_tests_8to9_aws:
+ call_workflow_tests_86to90_aws:
needs: call_workflow_copr_build
uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)"
compose: "RHEL-8.6-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"}}'
pull_request_status_name: "8to9-aws-e2e"
- variables: "RHUI=aws"
+ variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;RHUI=aws'
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
--
2.39.0

View File

@ -1,90 +0,0 @@
From 5e6f37878661c44ced384a3a362c2e3515c3609a Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Fri, 16 Dec 2022 11:25:14 +0100
Subject: [PATCH 47/63] Set SOURCE_RELEASE env var
Following a change in tmt-tests this is a necessary addition
for upgrade_plugin to correctly choose an upgrade path.
---
.github/workflows/tmt-tests.yml | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml
index c82256c8..ecda20ed 100644
--- a/.github/workflows/tmt-tests.yml
+++ b/.github/workflows/tmt-tests.yml
@@ -18,7 +18,7 @@ jobs:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
pull_request_status_name: "7.9to8.8"
- variables: 'TARGET_RELEASE=8.8'
+ variables: 'SOURCE_RELEASE=7.9;TARGET_RELEASE=8.8'
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
@@ -31,7 +31,7 @@ jobs:
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
- variables: 'TARGET_RELEASE=8.6'
+ variables: 'SOURCE_RELEASE=7.9;TARGET_RELEASE=8.6'
pull_request_status_name: "7.9to8.6"
if: |
github.event.issue.pull_request
@@ -47,7 +47,7 @@ jobs:
tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
pull_request_status_name: "7.9to8.8-sst"
update_pull_request_status: 'false'
- variables: 'TARGET_RELEASE=8.8'
+ variables: 'SOURCE_RELEASE=7.9;TARGET_RELEASE=8.8'
if: |
github.event.issue.pull_request
&& startsWith(github.event.comment.body, '/rerun-sst')
@@ -63,7 +63,7 @@ jobs:
compose: "RHEL-7.9-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; echo 42; yum-config-manager --enable rhel-7-server-rhui-optional-rpms"}}'
pull_request_status_name: "7to8-aws-e2e"
- variables: "RHUI=aws"
+ variables: "SOURCE_RELEASE=7.9;TARGET_RELEASE=8.6;RHUI=aws"
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
@@ -76,7 +76,7 @@ jobs:
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
- variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
+ variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
pull_request_status_name: "8.6to9.0"
if: |
github.event.issue.pull_request
@@ -90,7 +90,7 @@ jobs:
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
- variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms'
+ variables: 'SOURCE_RELEASE=8.7;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms'
compose: "RHEL-8.7.0-Nightly"
pull_request_status_name: "8.7to9.0"
tmt_context: "distro=rhel-8.7"
@@ -106,7 +106,7 @@ jobs:
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
- variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
+ variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
pull_request_status_name: "8to9-sst"
update_pull_request_status: 'false'
if: |
@@ -124,7 +124,7 @@ jobs:
compose: "RHEL-8.6-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"}}'
pull_request_status_name: "8to9-aws-e2e"
- variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;RHUI=aws'
+ variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;RHUI=aws'
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
--
2.39.0

View File

@ -1,149 +0,0 @@
From ef0e81dba97c61f7b4c15ebc91468253b758005d Mon Sep 17 00:00:00 2001
From: Tomas Tomecek <ttomecek@redhat.com>
Date: Mon, 7 Mar 2022 16:29:39 +0100
Subject: [PATCH 48/63] Packit: build SRPM in Copr
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
TL;DR:
* specify the list of deps needed to prepare SRPM in the .packit file
* move leapp-el7toel8-deps.spec into other_specs subdir
* update the make commands for the building of packages
Previously we have prepared SRPM "locally" and then uploaded it
to the COPR server for the building. However, since Jan 2023 we are
required to build SRPMs on the COPR server itself. As in this project
we have to manage 2 SPEC files for two buildings:
- leapp-el7toel8-deps.spec
Creating metapackage resolving issues with dependencies
on the target system; created RPMs are archived and are used
as one of sources for building of leapp-repository.
- leapp-repository.spec
The main spec file used for the entire project, which requires
archive with the deps packages.
Currently it's not possible to tell COPR which specific spec file
should be used and if they are in the same directory, COPR fails with
the error message about multiple SPEC files. But COPR is ok having
multiple spec files in a project when they are in separate directories.
To fix that, we are moving the deps spec file into the separate directory.
Also, explicitly set the `_rpmfilename` macro. This is super important as
the COPR build servers are using Mock, which redefines the macro, so packages
are stored inside RPMS directory, instead RPMS/%{ARCH}. The macro must be
defined with double '%'. Using just single %, the macro is expanded when
the specfile is loaded, but it is expected to be expanded during
the build process when particular subpackages (RPMs) are created, so
each RPM has the right name. Using the single %, all RPMs would have the
name of the SRPM - which means effectively that only one RPM per build
would be created. (hopefully the explanation is clear :))
This change was finished by Peťa and Pavel Raiskup. Tomas was "only" the
initial author. For more details, please open the PR:
https://github.com/oamg/leapp-repository/pull/848
🎉🎉🎉🍻
Signed-off-by: Tomas Tomecek <ttomecek@redhat.com>
Co-authored-by: Petr Stodulk <pstodulk@redhat.com>
---
.packit.yaml | 3 +++
Makefile | 22 ++++++++++++++-----
packaging/leapp-repository.spec | 4 ++--
.../leapp-el7toel8-deps.spec | 0
4 files changed, 22 insertions(+), 7 deletions(-)
rename packaging/{ => other_specs}/leapp-el7toel8-deps.spec (100%)
diff --git a/.packit.yaml b/.packit.yaml
index fb407829..f1d59ce1 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -8,6 +8,9 @@ downstream_package_name: leapp-repository
upstream_tag_template: 'v{version}'
merge_pr_in_ci: false
+srpm_build_deps:
+- make
+
# This is just for the build from the CLI - all other builds for jobs use own
# actions
actions:
diff --git a/Makefile b/Makefile
index 7342d4bf..b1489e4f 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,7 @@ DIST_VERSION ?= 7
PKGNAME=leapp-repository
DEPS_PKGNAME=leapp-el7toel8-deps
VERSION=`grep -m1 "^Version:" packaging/$(PKGNAME).spec | grep -om1 "[0-9].[0-9.]**"`
-DEPS_VERSION=`grep -m1 "^Version:" packaging/$(DEPS_PKGNAME).spec | grep -om1 "[0-9].[0-9.]**"`
+DEPS_VERSION=`grep -m1 "^Version:" packaging/other_specs/$(DEPS_PKGNAME).spec | grep -om1 "[0-9].[0-9.]**"`
REPOS_PATH=repos
_SYSUPG_REPOS="$(REPOS_PATH)/system_upgrade"
LIBRARY_PATH=
@@ -178,7 +178,7 @@ source: prepare
mkdir -p packaging/tmp/
@__TIMESTAMP=$(TIMESTAMP) $(MAKE) _build_subpkg
@__TIMESTAMP=$(TIMESTAMP) $(MAKE) DIST_VERSION=$$(($(DIST_VERSION) + 1)) _build_subpkg
- @tar -czf packaging/sources/deps-pkgs.tar.gz -C packaging/RPMS/noarch `ls packaging/RPMS/noarch | grep -o "[^/]*rpm$$"`
+ @tar -czf packaging/sources/deps-pkgs.tar.gz -C packaging/RPMS/noarch `ls -1 packaging/RPMS/noarch | grep -o "[^/]*rpm$$"`
@rm -f packaging/RPMS/noarch/*.rpm
srpm: source
@@ -195,8 +195,19 @@ srpm: source
_build_subpkg:
@echo "--- Build RPM: $(DEPS_PKGNAME)-$(DEPS_VERSION)-$(RELEASE).. ---"
- @cp packaging/$(DEPS_PKGNAME).spec packaging/$(DEPS_PKGNAME).spec.bak
+ @cp packaging/other_specs/$(DEPS_PKGNAME).spec packaging/$(DEPS_PKGNAME).spec
@sed -i "s/1%{?dist}/$(RELEASE)%{?dist}/g" packaging/$(DEPS_PKGNAME).spec
+ # Let's be explicit about the path to the binary RPMs; Copr builders can override this
+ # IMPORTANT:
+ # Also, explicitly set the _rpmfilename macro. This is super important as
+ # the COPR build servers are using Mock, which redefines the macro, so packages
+ # are stored inside RPMS directory, instead RPMS/%{ARCH}. The macro must be
+ # defined with double '%'. Using just single %, the macro is expanded when
+ # the specfile is loaded, but it is expected to be expanded during
+ # the build process when particular subpackages (RPMs) are created, so
+ # each RPM has the right name. Using the single %, all RPMs would have the
+ # name of the SRPM - which means effectively that only one RPM per build
+ # would be created. (hopefully the explanation is clear :))
@rpmbuild -ba packaging/$(DEPS_PKGNAME).spec \
--define "_sourcedir `pwd`/packaging/sources" \
--define "_srcrpmdir `pwd`/packaging/SRPMS" \
@@ -205,8 +216,9 @@ _build_subpkg:
--define "_rpmdir `pwd`/packaging/RPMS" \
--define "rhel $$(($(DIST_VERSION) + 1))" \
--define "dist .el$$(($(DIST_VERSION) + 1))" \
- --define "el$$(($(DIST_VERSION) + 1)) 1" || FAILED=1
- @mv packaging/$(DEPS_PKGNAME).spec.bak packaging/$(DEPS_PKGNAME).spec
+ --define "el$$(($(DIST_VERSION) + 1)) 1" \
+ --define "_rpmfilename %%{ARCH}/%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" || FAILED=1
+ @rm -f packaging/$(DEPS_PKGNAME).spec
_build_local: source
@echo "--- Build RPM: $(PKGNAME)-$(VERSION)-$(RELEASE).. ---"
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 0ffba71c..044e7275 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -196,9 +196,9 @@ Requires: dracut
%build
%if 0%{?rhel} == 7
-cp -a leapp*deps-el8*rpm repos/system_upgrade/el7toel8/files/bundled-rpms/
+cp -a leapp*deps*el8.noarch.rpm repos/system_upgrade/el7toel8/files/bundled-rpms/
%else
-cp -a leapp*deps-el9*rpm repos/system_upgrade/el8toel9/files/bundled-rpms/
+cp -a leapp*deps*el9.noarch.rpm repos/system_upgrade/el8toel9/files/bundled-rpms/
%endif
diff --git a/packaging/leapp-el7toel8-deps.spec b/packaging/other_specs/leapp-el7toel8-deps.spec
similarity index 100%
rename from packaging/leapp-el7toel8-deps.spec
rename to packaging/other_specs/leapp-el7toel8-deps.spec
--
2.39.0

View File

@ -1,62 +0,0 @@
From 79320da3de243dc19ee934974fc197f4bb3b6403 Mon Sep 17 00:00:00 2001
From: Evgeni Golov <evgeni@golov.de>
Date: Thu, 1 Dec 2022 09:54:21 +0100
Subject: [PATCH 49/63] ensure Satellite metapackages are installed after
upgrade
When upgrading from EL7 to EL8, we theoretically can run into the
situation where the `satellite` and `satellite-capsule` metapackages are
removed during the upgrade due to dependency problems. While we are not
aware of any actual occurences of this problem today, let's play safe
and explicitly add those packages to the `to_install` set.
---
.../actors/satellite_upgrade_facts/actor.py | 2 ++
.../tests/unit_test_satellite_upgrade_facts.py | 16 ++++++++++++++++
2 files changed, 18 insertions(+)
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py
index 2bbceb5d..01e63465 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py
@@ -134,9 +134,11 @@ class SatelliteUpgradeFacts(Actor):
if has_package(InstalledRPM, 'satellite'):
repositories_to_enable.append('satellite-6.11-for-rhel-8-x86_64-rpms')
modules_to_enable.append(Module(name='satellite', stream='el8'))
+ to_install.append('satellite')
elif has_package(InstalledRPM, 'satellite-capsule'):
repositories_to_enable.append('satellite-capsule-6.11-for-rhel-8-x86_64-rpms')
modules_to_enable.append(Module(name='satellite-capsule', stream='el8'))
+ to_install.append('satellite-capsule')
self.produce(RpmTransactionTasks(
to_remove=to_remove,
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py
index 5d338aa1..2fb8a3ba 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py
@@ -102,6 +102,22 @@ def test_enables_satellite_capsule_module(current_actor_context):
assert Module(name='satellite', stream='el8') not in message.modules_to_enable
+def test_installs_satellite_package(current_actor_context):
+ current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, SATELLITE_RPM]))
+ current_actor_context.run(config_model=mock_configs.CONFIG)
+ message = current_actor_context.consume(RpmTransactionTasks)[0]
+ assert 'satellite' in message.to_install
+ assert 'satellite-capsule' not in message.to_install
+
+
+def test_installs_satellite_capsule_package(current_actor_context):
+ current_actor_context.feed(InstalledRPM(items=[FOREMAN_PROXY_RPM, SATELLITE_CAPSULE_RPM]))
+ current_actor_context.run(config_model=mock_configs.CONFIG)
+ message = current_actor_context.consume(RpmTransactionTasks)[0]
+ assert 'satellite-capsule' in message.to_install
+ assert 'satellite' not in message.to_install
+
+
def test_detects_local_postgresql(monkeypatch, current_actor_context):
def mock_stat():
orig_stat = os.stat
--
2.39.0

View File

@ -1,42 +0,0 @@
From 9230696fc7c601997e44f1012e859f632ef168ba Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Wed, 30 Nov 2022 12:03:49 +0100
Subject: [PATCH 50/63] Makefile: filter out removed files for linting
Originally actions with `isort` crashed when files have been removed,
especially in case of the `lint_fix` target. This causes crash of CI
when the PR contains e.g. just removal of files without additional
changes. So, filter out removed files from the list of files to be
checked by linters.
---
Makefile | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/Makefile b/Makefile
index b1489e4f..066a5fd2 100644
--- a/Makefile
+++ b/Makefile
@@ -331,9 +331,9 @@ lint:
echo "--- Linting done. ---"; \
fi
- if [[ "`git rev-parse --abbrev-ref HEAD`" != "master" ]] && [[ -n "`git diff $(MASTER_BRANCH) --name-only`" ]]; then \
+ if [[ "`git rev-parse --abbrev-ref HEAD`" != "$(MASTER_BRANCH)" ]] && [[ -n "`git diff $(MASTER_BRANCH) --name-only --diff-filter AMR`" ]]; then \
. $(VENVNAME)/bin/activate; \
- git diff $(MASTER_BRANCH) --name-only | xargs isort -c --diff || \
+ git diff $(MASTER_BRANCH) --name-only --diff-filter AMR | xargs isort -c --diff || \
{ \
echo; \
echo "------------------------------------------------------------------------------"; \
@@ -345,7 +345,7 @@ lint:
lint_fix:
. $(VENVNAME)/bin/activate; \
- git diff $(MASTER_BRANCH) --name-only | xargs isort && \
+ git diff $(MASTER_BRANCH) --name-only --diff-filter AMR | xargs isort && \
echo "--- isort inplace fixing done. ---;"
test_no_lint:
--
2.39.0

View File

@ -1,116 +0,0 @@
From 08756574378232de12ebbdf15801c52bc0090ce6 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <xstodu05@gmail.com>
Date: Fri, 25 Nov 2022 11:19:14 +0100
Subject: [PATCH 51/63] Enable upgrades on s390x when /boot is part of rootfs
Regarding the fix provided in commit a6445b39 we do not need to
inhibit the IPU on the s390x architecture when /boot is not separated
on its own partition, but it's part of the rootfs.
---
.../actors/checknonmountboots390/actor.py | 21 -------------
.../libraries/checknonmountboots390.py | 27 -----------------
.../tests/test_checknonmountboots390.py | 30 -------------------
3 files changed, 78 deletions(-)
delete mode 100644 repos/system_upgrade/common/actors/checknonmountboots390/actor.py
delete mode 100644 repos/system_upgrade/common/actors/checknonmountboots390/libraries/checknonmountboots390.py
delete mode 100644 repos/system_upgrade/common/actors/checknonmountboots390/tests/test_checknonmountboots390.py
diff --git a/repos/system_upgrade/common/actors/checknonmountboots390/actor.py b/repos/system_upgrade/common/actors/checknonmountboots390/actor.py
deleted file mode 100644
index 82dcf30f..00000000
--- a/repos/system_upgrade/common/actors/checknonmountboots390/actor.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from leapp.actors import Actor
-from leapp.libraries.actor import checknonmountboots390
-from leapp.models import Report
-from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
-
-
-class CheckNonMountBootS390(Actor):
- """
- Inhibits on s390 when /boot is NOT on a separate partition.
-
- Due to some problems, if /boot is not on a separate partition, leapp is deleting the content of /boot.
- To avoid this from happening, we are inhibiting the upgrade process until this problem has been solved.
- """
-
- name = 'check_non_mount_boot_s390'
- consumes = ()
- produces = (Report,)
- tags = (ChecksPhaseTag, IPUWorkflowTag)
-
- def process(self):
- checknonmountboots390.perform_check()
diff --git a/repos/system_upgrade/common/actors/checknonmountboots390/libraries/checknonmountboots390.py b/repos/system_upgrade/common/actors/checknonmountboots390/libraries/checknonmountboots390.py
deleted file mode 100644
index bd165603..00000000
--- a/repos/system_upgrade/common/actors/checknonmountboots390/libraries/checknonmountboots390.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os
-
-from leapp import reporting
-from leapp.libraries.common.config import architecture
-
-
-def perform_check():
- if not architecture.matches_architecture(architecture.ARCH_S390X):
- return
-
- if os.path.ismount('/boot'):
- return
-
- data = [
- reporting.Title('Leapp detected known issue related to /boot on s390x architecture'),
- reporting.Summary((
- 'Due to a bug in the Leapp code, there is a situation when the upgrade process'
- ' removes content of /boot when the directory is not on a separate partition and'
- ' the system is running on S390x architecture. To avoid this from happening, we'
- ' are inhibiting the upgrade process in this release until the issue has been fixed.'
- )),
- reporting.Groups([reporting.Groups.INHIBITOR]),
- reporting.Groups([reporting.Groups.FILESYSTEM, reporting.Groups.UPGRADE_PROCESS, reporting.Groups.BOOT]),
- reporting.Severity(reporting.Severity.HIGH),
- ]
-
- reporting.create_report(data)
diff --git a/repos/system_upgrade/common/actors/checknonmountboots390/tests/test_checknonmountboots390.py b/repos/system_upgrade/common/actors/checknonmountboots390/tests/test_checknonmountboots390.py
deleted file mode 100644
index e6d7ae1d..00000000
--- a/repos/system_upgrade/common/actors/checknonmountboots390/tests/test_checknonmountboots390.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import pytest
-
-from leapp.libraries.actor import checknonmountboots390
-
-
-class CheckNonMountBootS390ReportCreated(Exception):
- pass
-
-
-@pytest.mark.parametrize(
- 'matches_arch,ismount,should_report', (
- (True, True, False),
- (True, False, True),
- (False, True, False),
- (False, False, False),
- )
-)
-def test_checknonmountboots390_perform_check(monkeypatch, matches_arch, ismount, should_report):
- def _create_report(data):
- raise CheckNonMountBootS390ReportCreated()
-
- monkeypatch.setattr(checknonmountboots390.architecture, 'matches_architecture', lambda x: matches_arch)
- monkeypatch.setattr(checknonmountboots390.os.path, 'ismount', lambda x: ismount)
- monkeypatch.setattr(checknonmountboots390.reporting, 'create_report', _create_report)
-
- if should_report:
- with pytest.raises(CheckNonMountBootS390ReportCreated):
- checknonmountboots390.perform_check()
- else:
- checknonmountboots390.perform_check()
--
2.39.0

View File

@ -1,73 +0,0 @@
From aadf694f946ca4821fe2d9aa47eea67dcb270af9 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 7 Dec 2022 21:50:48 +0100
Subject: [PATCH 52/63] Add leapp debug tools to initramfs
Install a script with debug utilities to the Leapp upgrade dracut
module.
---
.../dracut/90sys-upgrade/leapp_debug_tools.sh | 38 +++++++++++++++++++
.../dracut/90sys-upgrade/module-setup.sh | 2 +
2 files changed, 40 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
new file mode 100644
index 00000000..91c228ce
--- /dev/null
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
@@ -0,0 +1,38 @@
+# library containing some useful functions for debugging in initramfs
+
+# mounts the sysroot
+leapp_dbg_mount() {
+ systemctl start sysroot.mount
+ mount -o remount,rw /sysroot
+}
+
+# source programs from $NEWROOT, mount if not mounted
+leapp_dbg_source() {
+ systemctl is-active sysroot.mount --quiet || {
+ echo "sysroot not mounted, mounting...";
+ leapp_dbg_mount || return 1
+ }
+
+ for dir in /bin /sbin; do
+ export PATH="$PATH:${NEWROOT}$dir"
+ done
+
+ export LD_LIBRARY_PATH=/sysroot/lib64
+}
+
+# chroot into $NEWROOT
+leapp_dbg_chroot() {
+ systemctl is-active sysroot.mount --quiet || {
+ echo "sysroot not mounted, mounting...";
+ leapp_dbg_mount || return 1
+ }
+
+ for dir in /sys /run /proc /dev /dev/pts; do
+ mount --bind $dir "$NEWROOT$dir"
+ done || { echo "Failed to mount some directories" || return 1 }
+
+ chroot $NEWROOT sh -c "mount -a; /bin/bash"
+ for dir in /sys /run /proc /dev/pts /dev; do
+ umount $NEWROOT$dir
+ done
+}
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
index d38617db..a9cfffb4 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
@@ -72,6 +72,8 @@ install() {
inst_script "${_moddir}/initrd-system-upgrade-generator" \
"${generatordir}/initrd-system-upgrade-generator"
+ inst_script "${_moddir}/leapp_debug_tools.sh" "/bin/leapp_debug_tools.sh"
+
## upgrade shell service
#sysinit_wantsdir="${_initdir}${unitdir}/sysinit.target.wants"
#mkdir -p "$sysinit_wantsdir"
--
2.39.0

View File

@ -1,80 +0,0 @@
From 0ecb880774a2a74350d055afe7773ae0c31aaab9 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Tue, 20 Dec 2022 12:25:48 +0100
Subject: [PATCH 53/63] Add autosourcing
---
.../files/dracut/90sys-upgrade/.profile | 9 +++++++++
.../files/dracut/90sys-upgrade/.shrc | 4 ++++
.../files/dracut/90sys-upgrade/leapp_debug_tools.sh | 9 ++++++---
.../files/dracut/90sys-upgrade/module-setup.sh | 2 ++
4 files changed, 21 insertions(+), 3 deletions(-)
create mode 100644 repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.profile
create mode 100644 repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.shrc
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.profile b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.profile
new file mode 100644
index 00000000..c4fe05a7
--- /dev/null
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.profile
@@ -0,0 +1,9 @@
+#!/bin/sh
+# script read at startup by login shells
+# in the initramfs this is read for example by the emergency shell
+
+# set the environment file, containing shell commands to execute at startup of
+# interactive shells
+if [ -f "$HOME/.shrc" ]; then
+ ENV="$HOME/.shrc"; export ENV
+fi
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.shrc b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.shrc
new file mode 100644
index 00000000..5e965f47
--- /dev/null
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.shrc
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+# shell commands to execute on interactive shell startup
+. leapp_debug_tools.sh
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
index 91c228ce..5878b75b 100644
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
# library containing some useful functions for debugging in initramfs
# mounts the sysroot
@@ -29,10 +30,12 @@ leapp_dbg_chroot() {
for dir in /sys /run /proc /dev /dev/pts; do
mount --bind $dir "$NEWROOT$dir"
- done || { echo "Failed to mount some directories" || return 1 }
+ done || {
+ echo "Failed to mount some directories" || return 1
+ }
- chroot $NEWROOT sh -c "mount -a; /bin/bash"
+ chroot "$NEWROOT" sh -c "mount -a; /bin/bash"
for dir in /sys /run /proc /dev/pts /dev; do
- umount $NEWROOT$dir
+ umount "$NEWROOT$dir"
done
}
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
index a9cfffb4..06479fb5 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
@@ -73,6 +73,8 @@ install() {
"${generatordir}/initrd-system-upgrade-generator"
inst_script "${_moddir}/leapp_debug_tools.sh" "/bin/leapp_debug_tools.sh"
+ inst_script "${_moddir}/.profile" "/.profile"
+ inst_script "${_moddir}/.shrc" "/.shrc"
## upgrade shell service
#sysinit_wantsdir="${_initdir}${unitdir}/sysinit.target.wants"
--
2.39.0

View File

@ -1,26 +0,0 @@
From c29095d6d334dc57c7eff79c2726ec332098d6e1 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Mon, 2 Jan 2023 13:29:56 +0100
Subject: [PATCH 54/63] Replace tabs with spaces in the dracut module
---
.../files/dracut/85sys-upgrade-redhat/do-upgrade.sh | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
index 49c26bc8..0763d5b3 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
@@ -196,8 +196,7 @@ ibdmp() {
bring_up_network() {
if [ -f /etc/leapp-initram-network-manager ]; then
- # NOTE(ivasilev) Reverting the change to see if it caused the crash
- . /lib/dracut/hooks/cmdline/99-nm-config.sh
+ . /lib/dracut/hooks/cmdline/99-nm-config.sh
. /lib/dracut/hooks/initqueue/settled/99-nm-run.sh
fi
if [ -f /etc/leapp-initram-network-scripts ]; then
--
2.39.0

View File

@ -1,53 +0,0 @@
From 072cf0bbfcff8223f1b75fa05c621692d64a0af2 Mon Sep 17 00:00:00 2001
From: Jan Macku <jamacku@redhat.com>
Date: Fri, 26 Aug 2022 10:26:23 +0200
Subject: [PATCH 55/63] ci(lint): Add differential-shellcheck GitHub action
It performs differential ShellCheck scans and report results directly in
pull request.
documentation:
https://github.com/redhat-plumbers-in-action/differential-shellcheck
---
.github/workflows/differential-shellcheck.yml | 29 +++++++++++++++++++
1 file changed, 29 insertions(+)
create mode 100644 .github/workflows/differential-shellcheck.yml
diff --git a/.github/workflows/differential-shellcheck.yml b/.github/workflows/differential-shellcheck.yml
new file mode 100644
index 00000000..4af99f8d
--- /dev/null
+++ b/.github/workflows/differential-shellcheck.yml
@@ -0,0 +1,29 @@
+---
+# https://github.com/redhat-plumbers-in-action/differential-shellcheck#readme
+
+name: Differential ShellCheck
+on:
+ pull_request:
+ branches: [master]
+
+permissions:
+ contents: read
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+
+ permissions:
+ security-events: write
+ pull-requests: write
+
+ steps:
+ - name: Repository checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Differential ShellCheck
+ uses: redhat-plumbers-in-action/differential-shellcheck@v3
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
--
2.39.0

View File

@ -1,28 +0,0 @@
From 1859d1811d6331eda8c9684fac47b12ca2e796ae Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Thu, 3 Nov 2022 13:53:34 +0100
Subject: [PATCH 56/63] Propagate TEST_PATHS to test_container targets
Allows for quick containerized runs of specific checks.
---
Makefile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Makefile b/Makefile
index 066a5fd2..d2201fcf 100644
--- a/Makefile
+++ b/Makefile
@@ -383,8 +383,8 @@ _test_container_ipu:
echo "Only supported TEST_CONT_IPUs are el7toel8, el8toel9"; exit 1; \
;; \
esac && \
- $(_CONTAINER_TOOL) exec -w /repocopy $$_CONT_NAME make clean && \
- $(_CONTAINER_TOOL) exec -w /repocopy -e REPOSITORIES $$_CONT_NAME make $${_TEST_CONT_TARGET:-test}
+ $(_CONTAINER_TOOL) exec -w /repocopy $$_CONT_NAME $(MAKE) clean && \
+ $(_CONTAINER_TOOL) exec -w /repocopy -e REPOSITORIES $$_CONT_NAME $(MAKE) $${_TEST_CONT_TARGET:-test} TEST_PATHS="$(TEST_PATHS)"
# Runs tests in a container
# Builds testing image first if it doesn't exist
--
2.39.0

View File

@ -1,182 +0,0 @@
From 6ada6553eadc08fbbaf69d54129e6d3cc0c214e3 Mon Sep 17 00:00:00 2001
From: PeterMocary <petermocary@gmail.com>
Date: Fri, 26 Aug 2022 15:44:50 +0200
Subject: [PATCH 57/63] Ignore external accounts in /etc/passwd
The /etc/passwd can contain special entries to selectively incorporate entries
from another service source such as NIS or LDAP. These entries don't need to
contain all the fields that are normally present in the /etc/passwd entry and
would cause the upgrade failure in facts phase.
---
.../systemfacts/libraries/systemfacts.py | 48 ++++++++---
.../systemfacts/tests/test_systemfacts.py | 85 ++++++++++++++++++-
2 files changed, 121 insertions(+), 12 deletions(-)
diff --git a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
index e34cb86b..d1eeb28c 100644
--- a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
+++ b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
@@ -60,13 +60,26 @@ def anyhasprefix(value, prefixes):
@aslist
def _get_system_users():
+ skipped_user_names = []
for p in pwd.getpwall():
- yield User(
- name=p.pw_name,
- uid=p.pw_uid,
- gid=p.pw_gid,
- home=p.pw_dir
- )
+ # The /etc/passwd can contain special entries from another service source such as NIS or LDAP. These entries
+ # start with + or - sign and might not contain all the mandatory fields, thus are skipped along with other
+ # invalid entries for now. The UID and GID fields are always defined by pwd to 0 even when not specifiead in
+ # /etc/passwd.
+ if p.pw_name != '' and not p.pw_name.startswith(('+', '-')) and p.pw_dir:
+ yield User(
+ name=p.pw_name,
+ uid=p.pw_uid,
+ gid=p.pw_gid,
+ home=p.pw_dir
+ )
+ else:
+ skipped_user_names.append(p.pw_name)
+
+ if skipped_user_names:
+ api.current_logger().debug("These users from /etc/passwd that are special entries for service "
+ "like NIS, or don't contain all mandatory fields won't be included "
+ "in UsersFacts: {}".format(skipped_user_names))
def get_system_users_status():
@@ -76,12 +89,25 @@ def get_system_users_status():
@aslist
def _get_system_groups():
+ skipped_group_names = []
for g in grp.getgrall():
- yield Group(
- name=g.gr_name,
- gid=g.gr_gid,
- members=g.gr_mem
- )
+ # The /etc/group can contain special entries from another service source such as NIS or LDAP. These entries
+ # start with + or - sign and might not contain all the mandatory fields, thus are skipped along with other
+ # invalid entries for now. The GID field is always defined by pwd to 0 even when not specifiead in
+ # /etc/group.
+ if g.gr_name != '' and not g.gr_name.startswith(('+', '-')):
+ yield Group(
+ name=g.gr_name,
+ gid=g.gr_gid,
+ members=g.gr_mem
+ )
+ else:
+ skipped_group_names.append(g.gr_name)
+
+ if skipped_group_names:
+ api.current_logger().debug("These groups from /etc/group that are special entries for service "
+ "like NIS, or don't contain all mandatory fields won't be included "
+ "in GroupsFacts: {}".format(skipped_group_names))
def get_system_groups_status():
diff --git a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
index f94003d5..badf174c 100644
--- a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
+++ b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
@@ -1,4 +1,11 @@
-from leapp.libraries.actor.systemfacts import anyendswith, anyhasprefix, aslist
+import grp
+import pwd
+
+import pytest
+
+from leapp.libraries.actor.systemfacts import _get_system_groups, _get_system_users, anyendswith, anyhasprefix, aslist
+from leapp.libraries.common.testutils import logger_mocked
+from leapp.libraries.stdlib import api
from leapp.snactor.fixture import current_actor_libraries
@@ -33,3 +40,79 @@ def test_aslist(current_actor_libraries):
r = local()
assert isinstance(r, list) and r[0] and r[2] and not r[1]
+
+
+@pytest.mark.parametrize(
+ ('etc_passwd_names', 'etc_passwd_directory', 'skipped_user_names'),
+ [
+ (['root', 'unbound', 'dbus'], '/', []),
+ (['root', '+@scanners', 'dbus', '-@usrc', ''], '/', ['+@scanners', '-@usrc', '']),
+ (['root', '+@scanners', 'dbus'], '', ['root', '+@scanners', 'dbus']),
+ ]
+)
+def test_get_system_users(monkeypatch, etc_passwd_names, etc_passwd_directory, skipped_user_names):
+
+ class MockedPwdEntry(object):
+ def __init__(self, pw_name, pw_uid, pw_gid, pw_dir):
+ self.pw_name = pw_name
+ self.pw_uid = pw_uid
+ self.pw_gid = pw_gid
+ self.pw_dir = pw_dir
+
+ etc_passwd_contents = []
+ for etc_passwd_name in etc_passwd_names:
+ etc_passwd_contents.append(MockedPwdEntry(etc_passwd_name, 0, 0, etc_passwd_directory))
+
+ monkeypatch.setattr(pwd, 'getpwall', lambda: etc_passwd_contents)
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ _get_system_users()
+
+ if skipped_user_names:
+ assert len(api.current_logger().dbgmsg) == 1
+
+ for skipped_user_name in skipped_user_names:
+ assert skipped_user_name in api.current_logger().dbgmsg[0]
+
+ for user_name in etc_passwd_names:
+ if user_name not in skipped_user_names:
+ assert user_name not in api.current_logger().dbgmsg[0]
+ else:
+ assert not api.current_logger().dbgmsg
+
+
+@pytest.mark.parametrize(
+ ('etc_group_names', 'skipped_group_names'),
+ [
+ (['cdrom', 'floppy', 'tape'], []),
+ (['cdrom', '+@scanners', 'floppy', '-@usrc', ''], ['+@scanners', '-@usrc', '']),
+ ]
+)
+def test_get_system_groups(monkeypatch, etc_group_names, skipped_group_names):
+
+ class MockedGrpEntry(object):
+ def __init__(self, gr_name, gr_gid, gr_mem):
+ self.gr_name = gr_name
+ self.gr_gid = gr_gid
+ self.gr_mem = gr_mem
+
+ etc_group_contents = []
+ for etc_group_name in etc_group_names:
+ etc_group_contents.append(MockedGrpEntry(etc_group_name, 0, []))
+
+ monkeypatch.setattr(grp, 'getgrall', lambda: etc_group_contents)
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ _get_system_groups()
+
+ if skipped_group_names:
+ assert len(api.current_logger().dbgmsg) == 1
+
+ for skipped_group_name in skipped_group_names:
+ assert skipped_group_name in api.current_logger().dbgmsg[0]
+
+ for group_name in etc_group_names:
+ if group_name not in skipped_group_names:
+ assert group_name not in api.current_logger().dbgmsg[0]
+ else:
+ assert not api.current_logger().dbgmsg
--
2.39.0

View File

@ -1,111 +0,0 @@
From 94bfc3e8a4fbe5923b59b828da65ae91babdcb56 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Mon, 16 Jan 2023 16:01:05 +0100
Subject: [PATCH 58/63] pes_events_scanner: prefilter problematic events and
add back logging
jira: OAMG-8182, OAMG-8221
fixes: bz#2158527
---
.../libraries/pes_events_scanner.py | 60 ++++++++++++++++++-
1 file changed, 59 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
index 96b63280..c254f4c0 100644
--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
+++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
@@ -1,4 +1,4 @@
-from collections import namedtuple
+from collections import defaultdict, namedtuple
from functools import partial
from leapp import reporting
@@ -126,6 +126,7 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs,
release,
seen_pkgs,
pkgs_to_demodularize):
+ logger = api.current_logger()
# Start with the installed packages and modify the set according to release events
target_pkgs = set(source_installed_pkgs)
@@ -154,6 +155,12 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs,
# For MERGE to be relevant it is sufficient for only one of its in_pkgs to be installed
if are_all_in_pkgs_present or (event.action == Action.MERGED and is_any_in_pkg_present):
+ removed_pkgs = target_pkgs.intersection(event.in_pkgs)
+ removed_pkgs_str = ', '.join(str(pkg) for pkg in removed_pkgs) if removed_pkgs else '[]'
+ added_pkgs_str = ', '.join(str(pkg) for pkg in event.out_pkgs) if event.out_pkgs else '[]'
+ logger.debug('Applying event %d (%s): replacing packages %s with %s',
+ event.id, event.action, removed_pkgs_str, added_pkgs_str)
+
# In pkgs are present, event can be applied
target_pkgs = target_pkgs.difference(event.in_pkgs)
target_pkgs = target_pkgs.union(event.out_pkgs)
@@ -163,6 +170,55 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs,
return (target_pkgs, pkgs_to_demodularize)
+def remove_undesired_events(events, relevant_to_releases):
+ """
+ Conservatively remove events that needless, or cause problems for the current implementation:
+ - (needless) events with to_release not in relevant releases
+ - (problematic) events with the same from_release and the same in_pkgs
+ """
+
+ logger = api.current_logger()
+ relevant_to_releases = set(relevant_to_releases)
+
+ events_with_same_in_pkgs_and_from_release = defaultdict(list)
+ for event in events:
+ if event.to_release in relevant_to_releases:
+ # NOTE(mhecko): The tuple(sorted(event.in_pkgs))) is ugly, however, the removal of the events with the same
+ # # from_release and in_pkgs is needed only because the current implementation is flawed.
+ # # I would love to rewrite the core algorithm as a "solution to graph reachability problem",
+ # # making the behaviour of PES event scanner purely data driven.
+ events_with_same_in_pkgs_and_from_release[(event.from_release, tuple(sorted(event.in_pkgs)))].append(event)
+
+ cleaned_events = []
+ for from_release_in_pkgs_pair, problematic_events in events_with_same_in_pkgs_and_from_release.items():
+ if len(problematic_events) == 1:
+ cleaned_events.append(problematic_events[0]) # There is no problem
+ continue
+
+ # E.g., one of the problematic events is to=8.6, other one to=8.7, keep only 8.7
+ from_release, dummy_in_pkgs = from_release_in_pkgs_pair
+ max_to_release = max((e.to_release for e in problematic_events))
+ events_with_max_to_release = [event for event in problematic_events if event.to_release == max_to_release]
+
+ if len(events_with_max_to_release) == 1:
+ # If there is a single event with maximal to_release, keep only that
+ kept_event = events_with_max_to_release[0]
+ event_ids = [event.id for event in problematic_events]
+ logger.debug('Events %s have the same in packages and the same from_release %s, keeping %d',
+ event_ids, from_release, kept_event.id)
+ cleaned_events.append(kept_event)
+ continue
+
+ # There are at least 2 events A, B with the same in_release, out_release and in_pkgs. If A is REMOVE and B
+ # performs some conditional mutation (e.g. SPLIT) a race-conflict arises. However, the current
+ # implementation would apply these events as `A(input_state) union B(input_state)`, where the input_state
+ # is kept immutable. Therefore, B will have an effect regardless of whether A is REMOVAL or not.
+ for event in problematic_events:
+ cleaned_events.append(event)
+
+ return cleaned_events
+
+
def compute_packages_on_target_system(source_pkgs, events, releases):
seen_pkgs = set(source_pkgs) # Used to track whether PRESENCE events can be applied
@@ -428,6 +484,8 @@ def process():
# packages of the target system, so we can distinguish what needs to be repomapped
repoids_of_source_pkgs = {pkg.repository for pkg in source_pkgs}
+ events = remove_undesired_events(events, releases)
+
# Apply events - compute what packages should the target system have
target_pkgs, pkgs_to_demodularize = compute_packages_on_target_system(source_pkgs, events, releases)
--
2.39.0

View File

@ -1,188 +0,0 @@
From bab105d15a0f848e341cd1b4ade4e4e7b3ab38aa Mon Sep 17 00:00:00 2001
From: mreznik <mreznik@redhat.com>
Date: Fri, 25 Nov 2022 09:53:53 +0100
Subject: [PATCH 59/63] Enable disabling dnf plugins in the dnfcnfig library
When on AWS, we need to disable the "amazon-id" plugin during the
upgrade stage as we do not have network up and running there yet.
Moreover, even with the network up, we do already have all the data
cached so further communication with its backend could invalidate
the data.
---
.../common/libraries/dnfconfig.py | 26 +++++++++----
.../common/libraries/dnfplugin.py | 38 ++++++++++++++++---
2 files changed, 51 insertions(+), 13 deletions(-)
diff --git a/repos/system_upgrade/common/libraries/dnfconfig.py b/repos/system_upgrade/common/libraries/dnfconfig.py
index 64d6c204..5b8180f0 100644
--- a/repos/system_upgrade/common/libraries/dnfconfig.py
+++ b/repos/system_upgrade/common/libraries/dnfconfig.py
@@ -30,15 +30,21 @@ def _strip_split(data, sep, maxsplit=-1):
return [item.strip() for item in data.split(sep, maxsplit)]
-def _get_main_dump(context):
+def _get_main_dump(context, disable_plugins):
"""
Return the dnf configuration dump of main options for the given context.
Returns the list of lines after the line with "[main]" section
"""
+ cmd = ['dnf', 'config-manager', '--dump']
+
+ if disable_plugins:
+ for plugin in disable_plugins:
+ cmd += ['--disableplugin', plugin]
+
try:
- data = context.call(['dnf', 'config-manager', '--dump'], split=True)['stdout']
+ data = context.call(cmd, split=True)['stdout']
except CalledProcessError as e:
api.current_logger().error('Cannot obtain the dnf configuration')
raise StopActorExecutionError(
@@ -73,18 +79,18 @@ def _get_main_dump(context):
return output_data
-def _get_excluded_pkgs(context):
+def _get_excluded_pkgs(context, disable_plugins):
"""
Return the list of excluded packages for DNF in the given context.
It shouldn't be used on the source system. It is expected this functions
is called only in the target userspace container or on the target system.
"""
- pkgs = _strip_split(_get_main_dump(context).get('exclude', ''), ',')
+ pkgs = _strip_split(_get_main_dump(context, disable_plugins).get('exclude', ''), ',')
return [i for i in pkgs if i]
-def _set_excluded_pkgs(context, pkglist):
+def _set_excluded_pkgs(context, pkglist, disable_plugins):
"""
Configure DNF to exclude packages in the given list
@@ -93,6 +99,10 @@ def _set_excluded_pkgs(context, pkglist):
exclude = 'exclude={}'.format(','.join(pkglist))
cmd = ['dnf', 'config-manager', '--save', '--setopt', exclude]
+ if disable_plugins:
+ for plugin in disable_plugins:
+ cmd += ['--disableplugin', plugin]
+
try:
context.call(cmd)
except CalledProcessError:
@@ -101,7 +111,7 @@ def _set_excluded_pkgs(context, pkglist):
api.current_logger().debug('The DNF configuration has been updated to exclude leapp packages.')
-def exclude_leapp_rpms(context):
+def exclude_leapp_rpms(context, disable_plugins):
"""
Ensure the leapp RPMs are excluded from any DNF transaction.
@@ -112,5 +122,5 @@ def exclude_leapp_rpms(context):
So user will have to drop these packages from the exclude after the
upgrade.
"""
- to_exclude = list(set(_get_excluded_pkgs(context) + get_leapp_packages()))
- _set_excluded_pkgs(context, to_exclude)
+ to_exclude = list(set(_get_excluded_pkgs(context, disable_plugins) + get_leapp_packages()))
+ _set_excluded_pkgs(context, to_exclude, disable_plugins)
diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py
index 7a15abc4..7f541c18 100644
--- a/repos/system_upgrade/common/libraries/dnfplugin.py
+++ b/repos/system_upgrade/common/libraries/dnfplugin.py
@@ -299,6 +299,8 @@ def perform_transaction_install(target_userspace_info, storage_info, used_repos,
Performs the actual installation with the DNF rhel-upgrade plugin using the target userspace
"""
+ stage = 'upgrade'
+
# These bind mounts are performed by systemd-nspawn --bind parameters
bind_mounts = [
'/:/installroot',
@@ -337,22 +339,28 @@ def perform_transaction_install(target_userspace_info, storage_info, used_repos,
# communicate with udev
cmd_prefix = ['nsenter', '--ipc=/installroot/proc/1/ns/ipc']
+ disable_plugins = []
+ if plugin_info:
+ for info in plugin_info:
+ if stage in info.disable_in:
+ disable_plugins += [info.name]
+
# we have to ensure the leapp packages will stay untouched
# Note: this is the most probably duplicate action - it should be already
# set like that, however seatbelt is a good thing.
- dnfconfig.exclude_leapp_rpms(context)
+ dnfconfig.exclude_leapp_rpms(context, disable_plugins)
if get_target_major_version() == '9':
_rebuild_rpm_db(context, root='/installroot')
_transaction(
- context=context, stage='upgrade', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
+ context=context, stage=stage, target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
cmd_prefix=cmd_prefix
)
# we have to ensure the leapp packages will stay untouched even after the
# upgrade is fully finished (it cannot be done before the upgrade
# on the host as the config-manager plugin is available since rhel-8)
- dnfconfig.exclude_leapp_rpms(mounting.NotIsolatedActions(base_dir='/'))
+ dnfconfig.exclude_leapp_rpms(mounting.NotIsolatedActions(base_dir='/'), disable_plugins=disable_plugins)
@contextlib.contextmanager
@@ -377,10 +385,20 @@ def perform_transaction_check(target_userspace_info,
"""
Perform DNF transaction check using our plugin
"""
+
+ stage = 'check'
+
with _prepare_perform(used_repos=used_repos, target_userspace_info=target_userspace_info, xfs_info=xfs_info,
storage_info=storage_info, target_iso=target_iso) as (context, overlay, target_repoids):
apply_workarounds(overlay.nspawn())
- dnfconfig.exclude_leapp_rpms(context)
+
+ disable_plugins = []
+ if plugin_info:
+ for info in plugin_info:
+ if stage in info.disable_in:
+ disable_plugins += [info.name]
+
+ dnfconfig.exclude_leapp_rpms(context, disable_plugins)
_transaction(
context=context, stage='check', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks
)
@@ -397,13 +415,23 @@ def perform_rpm_download(target_userspace_info,
"""
Perform RPM download including the transaction test using dnf with our plugin
"""
+
+ stage = 'download'
+
with _prepare_perform(used_repos=used_repos,
target_userspace_info=target_userspace_info,
xfs_info=xfs_info,
storage_info=storage_info,
target_iso=target_iso) as (context, overlay, target_repoids):
+
+ disable_plugins = []
+ if plugin_info:
+ for info in plugin_info:
+ if stage in info.disable_in:
+ disable_plugins += [info.name]
+
apply_workarounds(overlay.nspawn())
- dnfconfig.exclude_leapp_rpms(context)
+ dnfconfig.exclude_leapp_rpms(context, disable_plugins)
_transaction(
context=context, stage='download', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
test=True, on_aws=on_aws
--
2.39.0

View File

@ -1,164 +0,0 @@
From f5a3d626cf97c193ab1523401827c2a4c89310ea Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Matej=20Matu=C5=A1ka?= <mmatuska@redhat.com>
Date: Fri, 20 Jan 2023 14:03:59 +0100
Subject: [PATCH 60/63] Prevent failed upgrade from restarting in initramfs
(#996)
* Prevent failed upgrade from restarting in initramfs
When the upgrade fails in the initramfs the dracut shell is entered.
Upon exiting the dracut shell, the upgrade.target is restarted which
causes the upgrade.service, which runs the leapp upgrade, to rerun as
well.
This commit fixes that by creating a "flag" file when the upgrade
fails, whose existence is checked before reruning the upgrade and the
upgrade is prevented in such case.
Also, a new removeupgradeartifacts actor is introduced to clean up leftover upgrade artifacts, including the upgrade failed flag file, at the beginning of the upgrade process.
Jira ref.: OAMG-4224
---
.../dracut/85sys-upgrade-redhat/do-upgrade.sh | 20 +++++++++++++
.../actors/removeupgradeartifacts/actor.py | 23 +++++++++++++++
.../libraries/removeupgradeartifacts.py | 17 +++++++++++
.../tests/test_removeupgradeartifacts.py | 28 +++++++++++++++++++
4 files changed, 88 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/removeupgradeartifacts/actor.py
create mode 100644 repos/system_upgrade/common/actors/removeupgradeartifacts/libraries/removeupgradeartifacts.py
create mode 100644 repos/system_upgrade/common/actors/removeupgradeartifacts/tests/test_removeupgradeartifacts.py
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
index 0763d5b3..04540c1d 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
@@ -46,6 +46,8 @@ fi
export NSPAWN_OPTS="$NSPAWN_OPTS --keep-unit --register=no --timezone=off --resolv-conf=off"
+export LEAPP_FAILED_FLAG_FILE="/root/tmp_leapp_py3/.leapp_upgrade_failed"
+
#
# Temp for collecting and preparing tarball
#
@@ -268,6 +270,15 @@ do_upgrade() {
rv=$?
fi
+ if [ "$rv" -ne 0 ]; then
+ # set the upgrade failed flag to prevent the upgrade from running again
+ # when the emergency shell exits and the upgrade.target is restarted
+ local dirname
+ dirname="$("$NEWROOT/bin/dirname" "$NEWROOT$LEAPP_FAILED_FLAG_FILE")"
+ [ -d "$dirname" ] || mkdir "$dirname"
+ "$NEWROOT/bin/touch" "$NEWROOT$LEAPP_FAILED_FLAG_FILE"
+ fi
+
# Dump debug data in case something went wrong
if want_inband_dump "$rv"; then
collect_and_dump_debug_data
@@ -338,6 +349,15 @@ mount -o "remount,rw" "$NEWROOT"
##### do the upgrade #######
(
+ # check if leapp previously failed in the initramfs, if it did return to the emergency shell
+ [ -f "$NEWROOT$LEAPP_FAILED_FLAG_FILE" ] && {
+ echo >&2 "Found file $NEWROOT$LEAPP_FAILED_FLAG_FILE"
+ echo >&2 "Error: Leapp previously failed and cannot continue, returning back to emergency shell"
+ echo >&2 "Please file a support case with $NEWROOT/var/log/leapp/leapp-upgrade.log attached"
+ echo >&2 "To rerun the upgrade upon exiting the dracut shell remove the $NEWROOT$LEAPP_FAILED_FLAG_FILE file"
+ exit 1
+ }
+
[ ! -x "$NEWROOT$LEAPPBIN" ] && {
warn "upgrade binary '$LEAPPBIN' missing!"
exit 1
diff --git a/repos/system_upgrade/common/actors/removeupgradeartifacts/actor.py b/repos/system_upgrade/common/actors/removeupgradeartifacts/actor.py
new file mode 100644
index 00000000..5eb60d27
--- /dev/null
+++ b/repos/system_upgrade/common/actors/removeupgradeartifacts/actor.py
@@ -0,0 +1,23 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import removeupgradeartifacts
+from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
+
+
+class RemoveUpgradeArtifacts(Actor):
+ """
+ Removes artifacts left over by previous leapp runs
+
+ After the upgrade process, there might be some leftover files, which need
+ to be cleaned up before running another upgrade.
+
+ Removed artifacts:
+ - /root/tmp_leapp_py3/ directory (includes ".leapp_upgrade_failed" flag file)
+ """
+
+ name = 'remove_upgrade_artifacts'
+ consumes = ()
+ produces = ()
+ tags = (InterimPreparationPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ removeupgradeartifacts.process()
diff --git a/repos/system_upgrade/common/actors/removeupgradeartifacts/libraries/removeupgradeartifacts.py b/repos/system_upgrade/common/actors/removeupgradeartifacts/libraries/removeupgradeartifacts.py
new file mode 100644
index 00000000..aa748d9d
--- /dev/null
+++ b/repos/system_upgrade/common/actors/removeupgradeartifacts/libraries/removeupgradeartifacts.py
@@ -0,0 +1,17 @@
+import os
+
+from leapp.libraries.stdlib import api, CalledProcessError, run
+
+UPGRADE_ARTIFACTS_DIR = '/root/tmp_leapp_py3/'
+
+
+def process():
+ if os.path.exists(UPGRADE_ARTIFACTS_DIR):
+ api.current_logger().debug(
+ "Removing leftover upgrade artifacts dir: {} ".format(UPGRADE_ARTIFACTS_DIR))
+
+ try:
+ run(['rm', '-rf', UPGRADE_ARTIFACTS_DIR])
+ except (CalledProcessError, OSError) as e:
+ api.current_logger().debug(
+ 'Failed to remove leftover upgrade artifacts dir: {}'.format(e))
diff --git a/repos/system_upgrade/common/actors/removeupgradeartifacts/tests/test_removeupgradeartifacts.py b/repos/system_upgrade/common/actors/removeupgradeartifacts/tests/test_removeupgradeartifacts.py
new file mode 100644
index 00000000..aee4d7c6
--- /dev/null
+++ b/repos/system_upgrade/common/actors/removeupgradeartifacts/tests/test_removeupgradeartifacts.py
@@ -0,0 +1,28 @@
+import os
+
+import pytest
+
+from leapp.libraries.actor import removeupgradeartifacts
+
+
+@pytest.mark.parametrize(('exists', 'should_remove'), [
+ (True, True),
+ (False, False),
+])
+def test_remove_upgrade_artifacts(monkeypatch, exists, should_remove):
+
+ called = [False]
+
+ def mocked_run(cmd, *args, **kwargs):
+ assert cmd[0] == 'rm'
+ assert cmd[1] == '-rf'
+ assert cmd[2] == removeupgradeartifacts.UPGRADE_ARTIFACTS_DIR
+ called[0] = True
+ return {'exit_code': 0, 'stdout': '', 'stderr': ''}
+
+ monkeypatch.setattr(os.path, 'exists', lambda _: exists)
+ monkeypatch.setattr(removeupgradeartifacts, 'run', mocked_run)
+
+ removeupgradeartifacts.process()
+
+ assert called[0] == should_remove
--
2.39.0

View File

@ -1,137 +0,0 @@
From 00ab521d952d413a095b8b48e5615bedaed41c13 Mon Sep 17 00:00:00 2001
From: Evgeni Golov <evgeni@golov.de>
Date: Thu, 12 Jan 2023 12:37:36 +0100
Subject: [PATCH 61/63] BZ#2142270 - run reindexdb to fix issues due to new
locales in RHEL8
---
.../libraries/satellite_upgrade_check.py | 12 +++++++++---
.../tests/unit_test_satellite_upgrade_check.py | 6 +++++-
.../el7toel8/actors/satellite_upgrader/actor.py | 7 +++++++
.../tests/unit_test_satellite_upgrader.py | 17 +++++++++++++++--
4 files changed, 36 insertions(+), 6 deletions(-)
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py
index c33e4f6e..6954dd50 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py
@@ -23,9 +23,13 @@ def satellite_upgrade_check(facts):
title = "Satellite PostgreSQL data migration"
flags = []
severity = reporting.Severity.MEDIUM
+ reindex_msg = textwrap.dedent("""
+ After the data has been moved to the new location, all databases will require a REINDEX.
+ This will happen automatically during the first boot of the system.
+ """).strip()
if facts.postgresql.same_partition:
- summary = "Your PostgreSQL data will be automatically migrated."
+ migration_msg = "Your PostgreSQL data will be automatically migrated."
else:
scl_psql_path = '/var/opt/rh/rh-postgresql12/lib/pgsql/data/'
if facts.postgresql.space_required > facts.postgresql.space_available:
@@ -36,7 +40,7 @@ def satellite_upgrade_check(facts):
else:
storage_message = """You currently have enough free storage to move the data.
This operation can be performed by the upgrade process."""
- summary = """
+ migration_msg = """
Your PostgreSQL data in {} is currently on a dedicated volume.
PostgreSQL on RHEL8 expects the data to live in /var/lib/pgsql/data.
{}
@@ -44,9 +48,11 @@ def satellite_upgrade_check(facts):
so that the contents of {} are available in /var/lib/pgsql/data.
""".format(scl_psql_path, storage_message, scl_psql_path)
+ summary = "{}\n{}".format(textwrap.dedent(migration_msg).strip(), reindex_msg)
+
reporting.create_report([
reporting.Title(title),
- reporting.Summary(textwrap.dedent(summary).strip()),
+ reporting.Summary(summary),
reporting.Severity(severity),
reporting.Groups([]),
reporting.Groups(flags)
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py
index 0e1969b7..8b75adf7 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py
@@ -42,9 +42,11 @@ def test_same_disk(monkeypatch):
expected_title = 'Satellite PostgreSQL data migration'
expected_summary = 'Your PostgreSQL data will be automatically migrated.'
+ expected_reindex = 'all databases will require a REINDEX'
assert expected_title == reporting.create_report.report_fields['title']
- assert expected_summary == reporting.create_report.report_fields['summary']
+ assert expected_summary in reporting.create_report.report_fields['summary']
+ assert expected_reindex in reporting.create_report.report_fields['summary']
def test_different_disk_sufficient_storage(monkeypatch):
@@ -58,9 +60,11 @@ def test_different_disk_sufficient_storage(monkeypatch):
expected_title = 'Satellite PostgreSQL data migration'
expected_summary = 'You currently have enough free storage to move the data'
+ expected_reindex = 'all databases will require a REINDEX'
assert expected_title == reporting.create_report.report_fields['title']
assert expected_summary in reporting.create_report.report_fields['summary']
+ assert expected_reindex in reporting.create_report.report_fields['summary']
def test_different_disk_insufficient_storage(monkeypatch):
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrader/actor.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrader/actor.py
index bd1a5d68..b699e6de 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrader/actor.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrader/actor.py
@@ -32,3 +32,10 @@ class SatelliteUpgrader(Actor):
api.current_logger().error(
'Could not run the installer, please inspect the logs in /var/log/foreman-installer!'
)
+
+ if facts.postgresql.local_postgresql:
+ api.current_actor().show_message('Re-indexing the database. This can take a while.')
+ try:
+ run(['runuser', '-u', 'postgres', '--', 'reindexdb', '-a'])
+ except (OSError, CalledProcessError) as e:
+ api.current_logger().error('Failed to run `reindexdb`: {}'.format(str(e)))
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrader/tests/unit_test_satellite_upgrader.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrader/tests/unit_test_satellite_upgrader.py
index d62815ca..21dce7f2 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrader/tests/unit_test_satellite_upgrader.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrader/tests/unit_test_satellite_upgrader.py
@@ -17,7 +17,8 @@ class MockedRun(object):
def test_run_installer(monkeypatch, current_actor_context):
mocked_run = MockedRun()
monkeypatch.setattr('leapp.libraries.stdlib.run', mocked_run)
- current_actor_context.feed(SatelliteFacts(has_foreman=True, postgresql=SatellitePostgresqlFacts()))
+ current_actor_context.feed(SatelliteFacts(has_foreman=True,
+ postgresql=SatellitePostgresqlFacts(local_postgresql=False)))
current_actor_context.run()
assert mocked_run.commands
assert len(mocked_run.commands) == 1
@@ -28,8 +29,20 @@ def test_run_installer_without_katello(monkeypatch, current_actor_context):
mocked_run = MockedRun()
monkeypatch.setattr('leapp.libraries.stdlib.run', mocked_run)
current_actor_context.feed(SatelliteFacts(has_foreman=True, has_katello_installer=False,
- postgresql=SatellitePostgresqlFacts()))
+ postgresql=SatellitePostgresqlFacts(local_postgresql=False)))
current_actor_context.run()
assert mocked_run.commands
assert len(mocked_run.commands) == 1
assert mocked_run.commands[0] == ['foreman-installer']
+
+
+def test_run_reindexdb(monkeypatch, current_actor_context):
+ mocked_run = MockedRun()
+ monkeypatch.setattr('leapp.libraries.stdlib.run', mocked_run)
+ current_actor_context.feed(SatelliteFacts(has_foreman=True,
+ postgresql=SatellitePostgresqlFacts(local_postgresql=True)))
+ current_actor_context.run()
+ assert mocked_run.commands
+ assert len(mocked_run.commands) == 2
+ assert mocked_run.commands[0] == ['foreman-installer', '--disable-system-checks']
+ assert mocked_run.commands[1] == ['runuser', '-u', 'postgres', '--', 'reindexdb', '-a']
--
2.39.0

View File

@ -1,53 +0,0 @@
From c591be26437f6ad65de1f52fe85839cb0e1fc765 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 20 Jan 2023 12:07:42 +0100
Subject: [PATCH 62/63] Improve the hint in peseventsscanner for unknown
repositories
The original msg guided users to open ticket on RHBZ portal, which
has been confusing as the repository is used by other linux
distributions also and they haven't updated the msg properly,
so people has been asking for bugfixes unrelated to RHEL systems
Which could not be fixed by Red Hat (RH) as this is connected
to leapp data, which in case of RH covers only official repositories
and packages provided by RH. Other distributions are expected to provide
the correct leapp data valid for these systems to reflect the content
of the used linux distribution.
To fix this problem, we have decided to update the hint to improve
UX, so they report the problem as they are used for their
distribution. Also the hint has been improved to provide more
instructions what user can do on the system
* change the used (custom) repoid to the official one
* review the planned dnf transaction to see whether there is a problem
regarding the listed packages
* install missing packages after the upgrade manually
---
.../peseventsscanner/libraries/pes_events_scanner.py | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
index c254f4c0..b0a87269 100644
--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
+++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
@@ -439,8 +439,14 @@ def replace_pesids_with_repoids_in_packages(packages, source_pkgs_repoids):
message='packages may not be installed or upgraded due to repositories unknown to leapp:',
skipped_pkgs=packages_without_known_repoid,
remediation=(
- 'Please file a bug in http://bugzilla.redhat.com/ for leapp-repository component of '
- 'the Red Hat Enterprise Linux product.'
+ 'In case the listed repositories are mirrors of official repositories for RHEL'
+ ' (provided by Red Hat on CDN)'
+ ' and their repositories IDs has been customized, you can change'
+ ' the configuration to use the official IDs instead of fixing the problem.'
+ ' You can also review the projected DNF upgrade transaction result'
+ ' in the logs to see what is going to happen, as this does not necessarily mean'
+ ' that the listed packages will not be upgraded. You can also'
+ ' install any missing packages after the in-place upgrade manually.'
),
)
--
2.39.0

View File

@ -1,118 +0,0 @@
From 35d22f3063acd24ee1e3ba2f2a21c0b17e251bfc Mon Sep 17 00:00:00 2001
From: ina vasilevskaya <ivasilev@redhat.com>
Date: Fri, 20 Jan 2023 17:06:32 +0100
Subject: [PATCH 63/63] Ensure a baseos and appstream repos are available when
upgrade with RHSM (#1001)
Previously we have tested if 2+ rhsm respositories are available.
However, this led to various issues when the repositories provided
via satellite were e.g. *baseos* and *supplementary*. The original
check passed in such a case, but the upgrade transaction failed
due to missing rpms from the missing *appstream* repository.
The current check include the verification that both repositories
are present, searching the *baseos* and *appstream* substrings
in repoids - when speaking about RHSM repositories. If such
repositories are not discovered, the upgrade is inhibit.
The responsibility for custom repositories is kept on user as before.
---
.../libraries/userspacegen.py | 5 +-
.../tests/unit_test_targetuserspacecreator.py | 50 +++++++++++++++----
2 files changed, 45 insertions(+), 10 deletions(-)
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index f2391ee8..6335eb5b 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -494,7 +494,10 @@ def _get_rhsm_available_repoids(context):
# TODO: very similar thing should happens for all other repofiles in container
#
repoids = rhsm.get_available_repo_ids(context)
- if not repoids or len(repoids) < 2:
+ # NOTE(ivasilev) For the moment at least AppStream and BaseOS repos are required. While we are still
+ # contemplating on what can be a generic solution to checking this, let's introduce a minimal check for
+ # at-least-one-appstream and at-least-one-baseos among present repoids
+ if not repoids or all("baseos" not in ri for ri in repoids) or all("appstream" not in ri for ri in repoids):
reporting.create_report([
reporting.Title('Cannot find required basic RHEL target repositories.'),
reporting.Summary(
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
index 5f544471..a519275e 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
@@ -302,17 +302,20 @@ def test_gather_target_repositories_rhui(monkeypatch):
assert target_repoids == set(['rhui-1', 'rhui-2'])
-@pytest.mark.skip(reason="Currently not implemented in the actor. It's TODO.")
-def test_gather_target_repositories_required_not_available(monkeypatch):
+def test_gather_target_repositories_baseos_appstream_not_available(monkeypatch):
# If the repos that Leapp identifies as required for the upgrade (based on the repo mapping and PES data) are not
# available, an exception shall be raised
+ indata = testInData(
+ _PACKAGES_MSGS, _RHSMINFO_MSG, None, _XFS_MSG, _STORAGEINFO_MSG, None
+ )
+ monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: False)
+
mocked_produce = produce_mocked()
monkeypatch.setattr(userspacegen.api, 'current_actor', CurrentActorMocked())
monkeypatch.setattr(userspacegen.api.current_actor(), 'produce', mocked_produce)
# The available RHSM repos
monkeypatch.setattr(rhsm, 'get_available_repo_ids', lambda x: ['repoidA', 'repoidB', 'repoidC'])
- monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: False)
# The required RHEL repos based on the repo mapping and PES data + custom repos required by third party actors
monkeypatch.setattr(userspacegen.api, 'consume', lambda x: iter([models.TargetRepositories(
rhel_repos=[models.RHELTargetRepository(repoid='repoidX'),
@@ -320,12 +323,41 @@ def test_gather_target_repositories_required_not_available(monkeypatch):
custom_repos=[models.CustomTargetRepository(repoid='repoidCustom')])]))
with pytest.raises(StopActorExecution):
- userspacegen.gather_target_repositories(None)
- assert mocked_produce.called
- reports = [m.report for m in mocked_produce.model_instances if isinstance(m, reporting.Report)]
- inhibitors = [m for m in reports if 'INHIBITOR' in m.get('flags', ())]
- assert len(inhibitors) == 1
- assert inhibitors[0].get('title', '') == 'Cannot find required basic RHEL target repositories.'
+ userspacegen.gather_target_repositories(None, indata)
+ assert mocked_produce.called
+ reports = [m.report for m in mocked_produce.model_instances if isinstance(m, reporting.Report)]
+ inhibitors = [m for m in reports if 'inhibitor' in m.get('groups', ())]
+ assert len(inhibitors) == 1
+ assert inhibitors[0].get('title', '') == 'Cannot find required basic RHEL target repositories.'
+ # Now test the case when either of AppStream and BaseOs is not available, upgrade should be inhibited
+ mocked_produce = produce_mocked()
+ monkeypatch.setattr(userspacegen.api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(userspacegen.api.current_actor(), 'produce', mocked_produce)
+ monkeypatch.setattr(rhsm, 'get_available_repo_ids', lambda x: ['repoidA', 'repoidB', 'repoidC-appstream'])
+ monkeypatch.setattr(userspacegen.api, 'consume', lambda x: iter([models.TargetRepositories(
+ rhel_repos=[models.RHELTargetRepository(repoid='repoidC-appstream'),
+ models.RHELTargetRepository(repoid='repoidA')],
+ custom_repos=[models.CustomTargetRepository(repoid='repoidCustom')])]))
+ with pytest.raises(StopActorExecution):
+ userspacegen.gather_target_repositories(None, indata)
+ reports = [m.report for m in mocked_produce.model_instances if isinstance(m, reporting.Report)]
+ inhibitors = [m for m in reports if 'inhibitor' in m.get('groups', ())]
+ assert len(inhibitors) == 1
+ assert inhibitors[0].get('title', '') == 'Cannot find required basic RHEL target repositories.'
+ mocked_produce = produce_mocked()
+ monkeypatch.setattr(userspacegen.api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(userspacegen.api.current_actor(), 'produce', mocked_produce)
+ monkeypatch.setattr(rhsm, 'get_available_repo_ids', lambda x: ['repoidA', 'repoidB', 'repoidC-baseos'])
+ monkeypatch.setattr(userspacegen.api, 'consume', lambda x: iter([models.TargetRepositories(
+ rhel_repos=[models.RHELTargetRepository(repoid='repoidC-baseos'),
+ models.RHELTargetRepository(repoid='repoidA')],
+ custom_repos=[models.CustomTargetRepository(repoid='repoidCustom')])]))
+ with pytest.raises(StopActorExecution):
+ userspacegen.gather_target_repositories(None, indata)
+ reports = [m.report for m in mocked_produce.model_instances if isinstance(m, reporting.Report)]
+ inhibitors = [m for m in reports if 'inhibitor' in m.get('groups', ())]
+ assert len(inhibitors) == 1
+ assert inhibitors[0].get('title', '') == 'Cannot find required basic RHEL target repositories.'
def mocked_consume_data():
--
2.39.0

View File

@ -1,65 +0,0 @@
From e3936ebbd880bc79c2972af1bccc86fae733bf34 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Tue, 8 Nov 2022 17:44:28 +0100
Subject: [PATCH] Fix the check of memory (RAM) limits
The checkmem actor was incorrect as the limits have been written
in MiBs however the value obtained from /proc/meminfo is in KiBs.
So the actual check has been incorrect.
Also the memory limits have been changed since the creation of the
actor. Updating the values in KiBs based on the current table:
https://access.redhat.com/articles/rhel-limits
The report msg has been updated to print the values in MiB instead
of KiB.
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2139907
---
.../checkmemory/libraries/checkmemory.py | 21 ++++++++++++-------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py b/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py
index ea8bfe69..1045e5c6 100644
--- a/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py
+++ b/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py
@@ -5,10 +5,10 @@ from leapp.libraries.stdlib import api
from leapp.models import MemoryInfo
min_req_memory = {
- architecture.ARCH_X86_64: 1536, # 1.5G
- architecture.ARCH_ARM64: 2048, # 2Gb
- architecture.ARCH_PPC64LE: 2048, # 2Gb
- architecture.ARCH_S390X: 1024 # 1Gb
+ architecture.ARCH_X86_64: 1572864, # 1.5G
+ architecture.ARCH_ARM64: 1572864, # 1.5G
+ architecture.ARCH_PPC64LE: 3145728, # 3G
+ architecture.ARCH_S390X: 1572864, # 1.5G
}
@@ -33,12 +33,17 @@ def process():
if minimum_req_error:
title = 'Minimum memory requirements for RHEL {} are not met'.format(version.get_target_major_version())
- summary = 'Memory detected: {} KiB, required: {} KiB'.format(minimum_req_error['detected'],
- minimum_req_error['minimal_req'])
+ summary = 'Memory detected: {} MiB, required: {} MiB'.format(
+ int(minimum_req_error['detected'] / 1024), # noqa: W1619; pylint: disable=old-division
+ int(minimum_req_error['minimal_req'] / 1024), # noqa: W1619; pylint: disable=old-division
+ )
reporting.create_report([
reporting.Title(title),
reporting.Summary(summary),
reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.SANITY]),
- reporting.Groups([reporting.Groups.INHIBITOR]),
+ reporting.Groups([reporting.Groups.SANITY, reporting.Groups.INHIBITOR]),
+ reporting.ExternalLink(
+ url='https://access.redhat.com/articles/rhel-limits',
+ title='Red Hat Enterprise Linux Technology Capabilities and Limits'
+ ),
])
--
2.39.0

View File

@ -1,64 +0,0 @@
From ded8348f31dfb2838f79c6c14036a42bc508bc93 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 11:01:04 +0200
Subject: [PATCH 65/75] Add IfCfg model
This represents the legacy network configuration stored in
/etc/sysconfig/network-scripts in form of ifcfg-* files
(along with associated keys-, rules-, routes-, etc. files).
---
repos/system_upgrade/el8toel9/models/ifcfg.py | 42 +++++++++++++++++++
1 file changed, 42 insertions(+)
create mode 100644 repos/system_upgrade/el8toel9/models/ifcfg.py
diff --git a/repos/system_upgrade/el8toel9/models/ifcfg.py b/repos/system_upgrade/el8toel9/models/ifcfg.py
new file mode 100644
index 00000000..b0607fed
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/models/ifcfg.py
@@ -0,0 +1,42 @@
+from leapp.models import fields, Model
+from leapp.topics import SystemInfoTopic
+
+
+class IfCfgProperty(Model):
+ """
+ Key-value pair for ifcfg properties.
+
+ This model is not expected to be used as a message (produced/consumed by actors).
+ It is used from within the IfCfg model.
+ """
+ topic = SystemInfoTopic
+
+ name = fields.String()
+ """ Name of a property """
+ value = fields.Nullable(fields.String())
+ """ Value of a property """
+
+
+class IfCfg(Model):
+ """
+ IfCfg file describing legacy network configuration
+
+ Produced for every ifcfg file loaded from key-value ("sysconfig")
+ format described in nm-settings-ifcfg-rh(5) manual.
+ """
+ topic = SystemInfoTopic
+
+ filename = fields.String()
+ """ Path to file this model was populated from """
+ properties = fields.List(fields.Model(IfCfgProperty), default=[])
+ """ The list of name-value pairs from ifcfg file """
+ secrets = fields.Nullable(fields.List(fields.Model(IfCfgProperty)))
+ """ The list of name-value pairs from keys file """
+ rules = fields.Nullable(fields.List(fields.String()))
+ """ The list of traffic rules for IPv4 """
+ rules6 = fields.Nullable(fields.List(fields.String()))
+ """ The list of traffic rules for IPv6 """
+ routes = fields.Nullable(fields.List(fields.String()))
+ """ The list of routes for IPv4 """
+ routes6 = fields.Nullable(fields.List(fields.String()))
+ """ The list of routes for IPv6 """
--
2.39.0

View File

@ -1,246 +0,0 @@
From cce48a6c1ad138b3217939ccfdb0f271a8492890 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 10:57:59 +0200
Subject: [PATCH 66/75] Add IfCfgScanner actor
This scans the legacy network configuration in
/etc/sysconfig/network-scripts and produces an IfCfg for each ifcfg-*
file encountered (along with associated keys-, rules-, routes-, etc. files).
---
.../el8toel9/actors/ifcfgscanner/actor.py | 18 +++
.../ifcfgscanner/libraries/ifcfgscanner.py | 67 ++++++++++
.../tests/unit_test_ifcfgscanner.py | 123 ++++++++++++++++++
3 files changed, 208 insertions(+)
create mode 100644 repos/system_upgrade/el8toel9/actors/ifcfgscanner/actor.py
create mode 100644 repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
create mode 100644 repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
diff --git a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/actor.py b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/actor.py
new file mode 100644
index 00000000..dd94986b
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import ifcfgscanner
+from leapp.models import IfCfg
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class IfCfgScanner(Actor):
+ """
+ Scan ifcfg files with legacy network configuration
+ """
+
+ name = "ifcfg_scanner"
+ consumes = ()
+ produces = (IfCfg,)
+ tags = (IPUWorkflowTag, FactsPhaseTag,)
+
+ def process(self):
+ ifcfgscanner.process()
diff --git a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
new file mode 100644
index 00000000..cfc385dc
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
@@ -0,0 +1,67 @@
+import errno
+from os import listdir, path
+
+from leapp.libraries.stdlib import api
+from leapp.models import IfCfg, IfCfgProperty
+
+SYSCONFIG_DIR = "/etc/sysconfig/network-scripts"
+
+
+def aux_file(prefix, filename):
+ directory = path.dirname(filename)
+ keys_base = path.basename(filename).replace("ifcfg-", prefix)
+ return path.join(directory, keys_base)
+
+
+def process_ifcfg(filename, secrets=False):
+ if not path.exists(filename):
+ return None
+
+ properties = []
+ for line in open(filename).readlines():
+ try:
+ (name, value) = line.split("#")[0].strip().split("=")
+ if secrets:
+ value = None
+ except ValueError:
+ # We're not interested in lines that are not
+ # simple assignments. Play it safe.
+ continue
+
+ properties.append(IfCfgProperty(name=name, value=value))
+ return properties
+
+
+def process_plain(filename):
+ if not path.exists(filename):
+ return None
+ return open(filename).readlines()
+
+
+def process_file(filename):
+ api.produce(IfCfg(
+ filename=filename,
+ properties=process_ifcfg(filename),
+ secrets=process_ifcfg(aux_file("keys-", filename), secrets=True),
+ rules=process_plain(aux_file("rule-", filename)),
+ rules6=process_plain(aux_file("rule6-", filename)),
+ routes=process_plain(aux_file("route-", filename)),
+ routes6=process_plain(aux_file("route6-", filename)),
+ ))
+
+
+def process_dir(directory):
+ try:
+ keyfiles = listdir(directory)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return
+ raise
+
+ for f in keyfiles:
+ if f.startswith("ifcfg-"):
+ process_file(path.join(directory, f))
+
+
+def process():
+ process_dir(SYSCONFIG_DIR)
diff --git a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
new file mode 100644
index 00000000..f5e3056a
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
@@ -0,0 +1,123 @@
+import errno
+import textwrap
+from os.path import basename
+
+import mock
+import six
+
+from leapp.libraries.actor import ifcfgscanner
+from leapp.libraries.common.testutils import make_OSError, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import IfCfg
+
+_builtins_open = "builtins.open" if six.PY3 else "__builtin__.open"
+
+
+def _listdir_ifcfg(path):
+ if path == ifcfgscanner.SYSCONFIG_DIR:
+ return ["ifcfg-net0"]
+ raise make_OSError(errno.ENOENT)
+
+
+def _listdir_ifcfg2(path):
+ if path == ifcfgscanner.SYSCONFIG_DIR:
+ return ["ifcfg-net0", "ifcfg-net1"]
+ raise make_OSError(errno.ENOENT)
+
+
+def _exists_ifcfg(filename):
+ return basename(filename).startswith("ifcfg-")
+
+
+def _exists_keys(filename):
+ if _exists_ifcfg(filename):
+ return True
+ return basename(filename).startswith("keys-")
+
+
+def test_no_conf(monkeypatch):
+ """
+ No report if there are no ifcfg files.
+ """
+
+ monkeypatch.setattr(ifcfgscanner, "listdir", lambda _: ())
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ ifcfgscanner.process()
+ assert not api.produce.called
+
+
+def test_ifcfg1(monkeypatch):
+ """
+ Parse a single ifcfg file.
+ """
+
+ ifcfg_file = textwrap.dedent("""
+ TYPE=Wireless # Some comment
+ # Another comment
+ ESSID=wep1
+ NAME=wep1
+ MODE=Managed
+ WEP_KEY_FLAGS=ask
+ SECURITYMODE=open
+ DEFAULTKEY=1
+ KEY_TYPE=key
+ """)
+
+ mock_config = mock.mock_open(read_data=ifcfg_file)
+ with mock.patch(_builtins_open, mock_config):
+ monkeypatch.setattr(ifcfgscanner, "listdir", _listdir_ifcfg)
+ monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_ifcfg)
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ ifcfgscanner.process()
+
+ assert api.produce.called == 1
+ assert len(api.produce.model_instances) == 1
+ ifcfg = api.produce.model_instances[0]
+ assert isinstance(ifcfg, IfCfg)
+ assert ifcfg.filename == "/etc/sysconfig/network-scripts/ifcfg-net0"
+ assert ifcfg.secrets is None
+ assert len(ifcfg.properties) == 8
+ assert ifcfg.properties[0].name == "TYPE"
+ assert ifcfg.properties[0].value == "Wireless"
+ assert ifcfg.properties[1].name == "ESSID"
+ assert ifcfg.properties[1].value == "wep1"
+
+
+def test_ifcfg2(monkeypatch):
+ """
+ Parse two ifcfg files.
+ """
+
+ mock_config = mock.mock_open(read_data="TYPE=Ethernet")
+ with mock.patch(_builtins_open, mock_config):
+ monkeypatch.setattr(ifcfgscanner, "listdir", _listdir_ifcfg2)
+ monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_ifcfg)
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ ifcfgscanner.process()
+
+ assert api.produce.called == 2
+ assert len(api.produce.model_instances) == 2
+ ifcfg = api.produce.model_instances[0]
+ assert isinstance(ifcfg, IfCfg)
+
+
+def test_ifcfg_key(monkeypatch):
+ """
+ Report ifcfg secrets from keys- file.
+ """
+
+ mock_config = mock.mock_open(read_data="KEY_PASSPHRASE1=Hell0")
+ with mock.patch(_builtins_open, mock_config):
+ monkeypatch.setattr(ifcfgscanner, "listdir", _listdir_ifcfg)
+ monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_keys)
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ ifcfgscanner.process()
+
+ assert api.produce.called == 1
+ assert len(api.produce.model_instances) == 1
+ ifcfg = api.produce.model_instances[0]
+ assert isinstance(ifcfg, IfCfg)
+ assert ifcfg.filename == "/etc/sysconfig/network-scripts/ifcfg-net0"
+ assert len(ifcfg.secrets) == 1
+ assert ifcfg.secrets[0].name == "KEY_PASSPHRASE1"
+ assert ifcfg.secrets[0].value is None
--
2.39.0

View File

@ -1,68 +0,0 @@
From 2e7a7e40423c2f63d261b1dc088df1d3be04b45d Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 11:03:20 +0200
Subject: [PATCH 67/75] Add NetworkManagerConnection model
This represents the NetworkManager connection profiles in form
of keyfiles at /etc/NetworkManager/system-connections.
---
.../models/networkmanagerconnection.py | 47 +++++++++++++++++++
1 file changed, 47 insertions(+)
create mode 100644 repos/system_upgrade/el8toel9/models/networkmanagerconnection.py
diff --git a/repos/system_upgrade/el8toel9/models/networkmanagerconnection.py b/repos/system_upgrade/el8toel9/models/networkmanagerconnection.py
new file mode 100644
index 00000000..e3456b77
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/models/networkmanagerconnection.py
@@ -0,0 +1,47 @@
+from leapp.models import fields, Model
+from leapp.topics import SystemInfoTopic
+
+
+class NetworkManagerConnectionProperty(Model):
+ """
+ Name-value pair for NetworkManager properties.
+
+ This model is not expected to be used as a message (produced/consumed by actors).
+ It is used within NetworkManagerConnectionSetting of a NetworkManagerConnection.
+ """
+ topic = SystemInfoTopic
+
+ name = fields.String()
+ """ Name of a property """
+ value = fields.String()
+ """ Value of a property """
+
+
+class NetworkManagerConnectionSetting(Model):
+ """
+ NetworkManager setting, composed of a name and a list of name-value pairs.
+
+ This model is not expected to be used as a message (produced/consumed by actors).
+ It is used within NetworkManagerConnection.
+ """
+ topic = SystemInfoTopic
+
+ name = fields.String()
+ """ The NetworkManager setting name """
+ properties = fields.List(fields.Model(NetworkManagerConnectionProperty), default=[])
+ """ The name-value pair for every setting property """
+
+
+class NetworkManagerConnection(Model):
+ """
+ NetworkManager native keyfile connection
+
+ Produced for every connection profile loaded from INI-stile files
+ described in nm-settings-keyfile(5) manual.
+ """
+ topic = SystemInfoTopic
+
+ settings = fields.List(fields.Model(NetworkManagerConnectionSetting), default=[])
+ """ List of NetworkManager settings """
+ filename = fields.String()
+ """ Path to file this model was populated from """
--
2.39.0

View File

@ -1,288 +0,0 @@
From c4dd229113c70a7c402e4488ab0a30e4605e8d60 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 10:58:31 +0200
Subject: [PATCH 68/75] Add NetworkManagerConnectionScanner actor
This scans the NetworkManager connection profiles in form of keyfiles at
/etc/NetworkManager/system-connections and produces a
NetworkManagerConnection whenever for each one.
This doesn't need the NetworkManager daemon to be actually running,
but needs GObject introspection to be available. The reason for that is
that libnm is used (via Gir) to strip the secrets.
Add requirement for
NetworkManager-libnm
python3-gobject-base
packages. Both are available for all architectures on RHEL 8 and 9.
Currently require them only on RHEL 8 as they are not used in the
code anywhere for RHEL 9 and they seem to be used only for upgrade
RHEL 8 to RHEL 9.
Bump leapp-repository-dependencies to 9
---
packaging/leapp-repository.spec | 7 +-
.../other_specs/leapp-el7toel8-deps.spec | 3 +-
.../networkmanagerconnectionscanner/actor.py | 18 +++
.../networkmanagerconnectionscanner.py | 65 +++++++++++
...it_test_networkmanagerconnectionscanner.py | 105 ++++++++++++++++++
5 files changed, 196 insertions(+), 2 deletions(-)
create mode 100644 repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/actor.py
create mode 100644 repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/libraries/networkmanagerconnectionscanner.py
create mode 100644 repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 044e7275..8d6376ea 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -2,7 +2,7 @@
%global repositorydir %{leapp_datadir}/repositories
%global custom_repositorydir %{leapp_datadir}/custom-repositories
-%define leapp_repo_deps 8
+%define leapp_repo_deps 9
%if 0%{?rhel} == 7
%define leapp_python_sitelib %{python2_sitelib}
@@ -176,6 +176,11 @@ Requires: kmod
# and missing dracut could be killing situation for us :)
Requires: dracut
+# Required to scan NetworkManagerConnection (e.g. to recognize secrets)
+# NM is requested to be used on RHEL 8+ systems
+Requires: NetworkManager-libnm
+Requires: python3-gobject-base
+
%endif
##################################################
# end requirement
diff --git a/packaging/other_specs/leapp-el7toel8-deps.spec b/packaging/other_specs/leapp-el7toel8-deps.spec
index 822b6f63..4a181ee1 100644
--- a/packaging/other_specs/leapp-el7toel8-deps.spec
+++ b/packaging/other_specs/leapp-el7toel8-deps.spec
@@ -9,7 +9,7 @@
%endif
-%define leapp_repo_deps 8
+%define leapp_repo_deps 9
%define leapp_framework_deps 5
# NOTE: the Version contains the %{rhel} macro just for the convenience to
@@ -68,6 +68,7 @@ Requires: cpio
# just to be sure that /etc/modprobe.d is present
Requires: kmod
+
%description -n %{lrdname}
%{summary}
diff --git a/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/actor.py b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/actor.py
new file mode 100644
index 00000000..6ee66b52
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import networkmanagerconnectionscanner
+from leapp.models import NetworkManagerConnection
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class NetworkManagerConnectionScanner(Actor):
+ """
+ Scan NetworkManager connection keyfiles
+ """
+
+ name = "network_manager_connection_scanner"
+ consumes = ()
+ produces = (NetworkManagerConnection,)
+ tags = (IPUWorkflowTag, FactsPhaseTag,)
+
+ def process(self):
+ networkmanagerconnectionscanner.process()
diff --git a/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/libraries/networkmanagerconnectionscanner.py b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/libraries/networkmanagerconnectionscanner.py
new file mode 100644
index 00000000..b148de6b
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/libraries/networkmanagerconnectionscanner.py
@@ -0,0 +1,65 @@
+import errno
+import os
+
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.common import utils
+from leapp.libraries.stdlib import api
+from leapp.models import NetworkManagerConnection, NetworkManagerConnectionProperty, NetworkManagerConnectionSetting
+
+libnm_available = False
+err_details = None
+try:
+ import gi
+ try:
+ gi.require_version("NM", "1.0")
+ from gi.repository import GLib, NM
+ libnm_available = True
+ except ValueError:
+ err_details = 'NetworkManager-libnm package is not available'
+except ImportError:
+ err_details = 'python3-gobject-base package is not available'
+
+NM_CONN_DIR = "/etc/NetworkManager/system-connections"
+
+
+def process_file(filename):
+ # We're running this through libnm in order to clear the secrets.
+ # We don't know what keys are secret, but libnm does.
+ keyfile = GLib.KeyFile()
+ keyfile.load_from_file(filename, GLib.KeyFileFlags.NONE)
+ con = NM.keyfile_read(keyfile, NM_CONN_DIR, NM.KeyfileHandlerFlags.NONE)
+ con.clear_secrets()
+ keyfile = NM.keyfile_write(con, NM.KeyfileHandlerFlags.NONE)
+ cp = utils.parse_config(keyfile.to_data()[0])
+
+ settings = []
+ for setting_name in cp.sections():
+ properties = []
+ for name, value in cp.items(setting_name, raw=True):
+ properties.append(NetworkManagerConnectionProperty(name=name, value=value))
+ settings.append(
+ NetworkManagerConnectionSetting(name=setting_name, properties=properties)
+ )
+ api.produce(NetworkManagerConnection(filename=filename, settings=settings))
+
+
+def process_dir(directory):
+ try:
+ keyfiles = os.listdir(directory)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return
+ raise
+
+ for f in keyfiles:
+ process_file(os.path.join(NM_CONN_DIR, f))
+
+
+def process():
+ if libnm_available:
+ process_dir(NM_CONN_DIR)
+ else:
+ raise StopActorExecutionError(
+ message='Failed to read NetworkManager connections',
+ details=err_details
+ )
diff --git a/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
new file mode 100644
index 00000000..46af07c1
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
@@ -0,0 +1,105 @@
+import errno
+import textwrap
+
+import pytest
+import six
+
+from leapp.libraries.actor import networkmanagerconnectionscanner as nmconnscanner
+from leapp.libraries.common.testutils import make_OSError, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import NetworkManagerConnection
+
+_builtins_open = "builtins.open" if six.PY3 else "__builtin__.open"
+
+
+def _listdir_nm_conn(path):
+ if path == nmconnscanner.NM_CONN_DIR:
+ return ["conn1.nmconnection"]
+ raise make_OSError(errno.ENOENT)
+
+
+def _listdir_nm_conn2(path):
+ if path == nmconnscanner.NM_CONN_DIR:
+ return ["conn1.nmconnection", "conn2.nmconnection"]
+ raise make_OSError(errno.ENOENT)
+
+
+def _load_from_file(keyfile, filename, flags):
+ if filename.endswith(".nmconnection"):
+ return keyfile.load_from_data(textwrap.dedent("""
+ [connection]
+ type=wifi
+ id=conn1
+ uuid=a1bc695d-c548-40e8-9c7f-205a6587135d
+
+ [wifi]
+ mode=infrastructure
+ ssid=wifi
+
+ [wifi-security]
+ auth-alg=open
+ key-mgmt=none
+ wep-key-type=1
+ wep-key0=abcde
+ """), nmconnscanner.GLib.MAXSIZE, flags)
+ raise make_OSError(errno.ENOENT)
+
+
+@pytest.mark.skipif(not nmconnscanner.libnm_available, reason="NetworkManager g-ir not installed")
+def test_no_conf(monkeypatch):
+ """
+ No report if there are no keyfiles
+ """
+
+ monkeypatch.setattr(nmconnscanner.os, "listdir", lambda _: ())
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ nmconnscanner.process()
+ assert not api.produce.called
+
+
+@pytest.mark.skipif(not nmconnscanner.libnm_available, reason="NetworkManager g-ir not installed")
+def test_nm_conn(monkeypatch):
+ """
+ Check a basic keyfile
+ """
+
+ monkeypatch.setattr(nmconnscanner.os, "listdir", _listdir_nm_conn)
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ monkeypatch.setattr(nmconnscanner.GLib.KeyFile, "load_from_file", _load_from_file)
+ nmconnscanner.process()
+
+ assert api.produce.called == 1
+ assert len(api.produce.model_instances) == 1
+ nm_conn = api.produce.model_instances[0]
+ assert isinstance(nm_conn, NetworkManagerConnection)
+ assert nm_conn.filename == "/etc/NetworkManager/system-connections/conn1.nmconnection"
+ assert len(nm_conn.settings) == 3
+ assert nm_conn.settings[0].name == "connection"
+ assert len(nm_conn.settings[0].properties) == 4
+ assert nm_conn.settings[0].properties[0].name == "id"
+ assert nm_conn.settings[0].properties[0].value == "conn1"
+ assert nm_conn.settings[2].name == "wifi-security"
+
+ # It's important that wek-key0 is gone
+ assert len(nm_conn.settings[2].properties) == 3
+ assert nm_conn.settings[2].properties[0].name == "auth-alg"
+ assert nm_conn.settings[2].properties[0].value == "open"
+ assert nm_conn.settings[2].properties[1].name != "wep-key0"
+ assert nm_conn.settings[2].properties[2].name != "wep-key0"
+
+
+@pytest.mark.skipif(not nmconnscanner.libnm_available, reason="NetworkManager g-ir not installed")
+def test_nm_conn2(monkeypatch):
+ """
+ Check a pair of keyfiles
+ """
+
+ monkeypatch.setattr(nmconnscanner.os, "listdir", _listdir_nm_conn2)
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ monkeypatch.setattr(nmconnscanner.GLib.KeyFile, "load_from_file", _load_from_file)
+ nmconnscanner.process()
+
+ assert api.produce.called == 2
+ assert len(api.produce.model_instances) == 2
+ assert api.produce.model_instances[0].filename.endswith("/conn1.nmconnection")
+ assert api.produce.model_instances[1].filename.endswith("/conn2.nmconnection")
--
2.39.0

View File

@ -1,34 +0,0 @@
From 791e42430bf17502419a42a8d3067f3622bb221d Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Thu, 3 Nov 2022 13:54:10 +0100
Subject: [PATCH 69/75] Install python3-gobject-base and NetworkManager-libnm
in f34
The NetworkManagerConnectionScanner requires GObject introspection and
libnm to be installed.
The RHEL8 UBI doesn't contain NetworkManager at all and the Python 2
platforms are too old to support GObject introspection.
Let's add the packages to the f34 base so that we test the the scanner
somewhere.
---
utils/container-tests/Containerfile.f34 | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/utils/container-tests/Containerfile.f34 b/utils/container-tests/Containerfile.f34
index a74153e1..a9346635 100644
--- a/utils/container-tests/Containerfile.f34
+++ b/utils/container-tests/Containerfile.f34
@@ -3,7 +3,7 @@ FROM fedora:34
VOLUME /repo
RUN dnf update -y && \
- dnf install -y findutils make rsync
+ dnf install -y findutils make rsync python3-gobject-base NetworkManager-libnm
ENV PYTHON_VENV python3.9
--
2.39.0

View File

@ -1,375 +0,0 @@
From dc7dc4d712c1e32a62701319130f8dd66da5ecc4 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 11:01:35 +0200
Subject: [PATCH 70/75] Make CheckNetworkDeprecations consume IfCfg and
NetworkManagerConnection
This actor used to scan the NetworkManager keyfiles and icfg files
itself. No more!
---
.../actors/networkdeprecations/actor.py | 7 +-
.../libraries/networkdeprecations.py | 71 +++----
.../tests/unit_test_networkdeprecations.py | 192 ++++++++----------
3 files changed, 111 insertions(+), 159 deletions(-)
diff --git a/repos/system_upgrade/el8toel9/actors/networkdeprecations/actor.py b/repos/system_upgrade/el8toel9/actors/networkdeprecations/actor.py
index 19113e4f..3074a3c7 100644
--- a/repos/system_upgrade/el8toel9/actors/networkdeprecations/actor.py
+++ b/repos/system_upgrade/el8toel9/actors/networkdeprecations/actor.py
@@ -1,7 +1,7 @@
from leapp.actors import Actor
from leapp.libraries.actor import networkdeprecations
-from leapp.models import Report
-from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+from leapp.models import IfCfg, NetworkManagerConnection, Report
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckNetworkDeprecations(Actor):
@@ -16,8 +16,9 @@ class CheckNetworkDeprecations(Actor):
"""
name = "network_deprecations"
+ consumes = (IfCfg, NetworkManagerConnection,)
produces = (Report,)
- tags = (IPUWorkflowTag, FactsPhaseTag,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag,)
def process(self):
networkdeprecations.process()
diff --git a/repos/system_upgrade/el8toel9/actors/networkdeprecations/libraries/networkdeprecations.py b/repos/system_upgrade/el8toel9/actors/networkdeprecations/libraries/networkdeprecations.py
index 2a6a2de9..92dfc51d 100644
--- a/repos/system_upgrade/el8toel9/actors/networkdeprecations/libraries/networkdeprecations.py
+++ b/repos/system_upgrade/el8toel9/actors/networkdeprecations/libraries/networkdeprecations.py
@@ -1,11 +1,6 @@
-import errno
-import os
-
from leapp import reporting
-from leapp.libraries.common import utils
-
-SYSCONFIG_DIR = '/etc/sysconfig/network-scripts'
-NM_CONN_DIR = '/etc/NetworkManager/system-connections'
+from leapp.libraries.stdlib import api
+from leapp.models import IfCfg, NetworkManagerConnection
FMT_LIST_SEPARATOR = '\n - '
@@ -13,56 +8,36 @@ FMT_LIST_SEPARATOR = '\n - '
def process():
wep_files = []
- # Scan NetworkManager native keyfiles
- try:
- keyfiles = os.listdir(NM_CONN_DIR)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- keyfiles = []
-
- for f in keyfiles:
- path = os.path.join(NM_CONN_DIR, f)
-
- cp = utils.parse_config(open(path, mode='r').read())
-
- if not cp.has_section('wifi-security'):
- continue
+ # Scan NetworkManager native keyfile connections
+ for nmconn in api.consume(NetworkManagerConnection):
+ for setting in nmconn.settings:
+ if not setting.name == 'wifi-security':
+ continue
- key_mgmt = cp.get('wifi-security', 'key-mgmt')
- if key_mgmt in ('none', 'ieee8021x'):
- wep_files.append(path)
+ for prop in setting.properties:
+ if not prop.name == 'key-mgmt':
+ continue
+ if prop.value in ('none', 'ieee8021x'):
+ wep_files.append(nmconn.filename)
# Scan legacy ifcfg files & secrets
- try:
- ifcfgs = os.listdir(SYSCONFIG_DIR)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- ifcfgs = []
-
- for f in ifcfgs:
- path = os.path.join(SYSCONFIG_DIR, f)
+ for ifcfg in api.consume(IfCfg):
+ props = ifcfg.properties
+ if ifcfg.secrets is not None:
+ props = props + ifcfg.secrets
- if not f.startswith('ifcfg-') and not f.startswith('keys-'):
- continue
-
- for line in open(path).readlines():
- try:
- (key, value) = line.split('#')[0].strip().split('=')
- except ValueError:
- # We're not interested in lines that are not
- # simple assignments. Play it safe.
- continue
+ for prop in props:
+ name = prop.name
+ value = prop.value
# Dynamic WEP
- if key == 'KEY_MGMT' and value.upper() == 'IEEE8021X':
- wep_files.append(path)
+ if name == 'KEY_MGMT' and value.upper() == 'IEEE8021X':
+ wep_files.append(ifcfg.filename)
continue
# Static WEP, possibly with agent-owned secrets
- if key in ('KEY_PASSPHRASE1', 'KEY1', 'WEP_KEY_FLAGS'):
- wep_files.append(path)
+ if name in ('KEY_PASSPHRASE1', 'KEY1', 'WEP_KEY_FLAGS'):
+ wep_files.append(ifcfg.filename)
continue
if wep_files:
diff --git a/repos/system_upgrade/el8toel9/actors/networkdeprecations/tests/unit_test_networkdeprecations.py b/repos/system_upgrade/el8toel9/actors/networkdeprecations/tests/unit_test_networkdeprecations.py
index bd140405..659ab993 100644
--- a/repos/system_upgrade/el8toel9/actors/networkdeprecations/tests/unit_test_networkdeprecations.py
+++ b/repos/system_upgrade/el8toel9/actors/networkdeprecations/tests/unit_test_networkdeprecations.py
@@ -1,148 +1,124 @@
-import errno
-import textwrap
-
-import mock
-import six
-
-from leapp import reporting
-from leapp.libraries.actor import networkdeprecations
-from leapp.libraries.common.testutils import create_report_mocked, make_OSError
-
-
-def _listdir_nm_conn(path):
- if path == networkdeprecations.NM_CONN_DIR:
- return ['connection']
- raise make_OSError(errno.ENOENT)
-
-
-def _listdir_ifcfg(path):
- if path == networkdeprecations.SYSCONFIG_DIR:
- return ['ifcfg-wireless']
- raise make_OSError(errno.ENOENT)
-
-
-def _listdir_keys(path):
- if path == networkdeprecations.SYSCONFIG_DIR:
- return ['keys-wireless']
- raise make_OSError(errno.ENOENT)
-
-
-def test_no_conf(monkeypatch):
+from leapp.models import (
+ IfCfg,
+ IfCfgProperty,
+ NetworkManagerConnection,
+ NetworkManagerConnectionProperty,
+ NetworkManagerConnectionSetting
+)
+from leapp.reporting import Report
+from leapp.utils.report import is_inhibitor
+
+
+def test_no_conf(current_actor_context):
"""
No report if there are no networks.
"""
- monkeypatch.setattr(networkdeprecations.os, 'listdir', lambda _: ())
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert not reporting.create_report.called
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
-def test_no_wireless(monkeypatch):
+def test_no_wireless(current_actor_context):
"""
No report if there's a keyfile, but it's not for a wireless connection.
"""
- mock_config = mock.mock_open(read_data='[connection]')
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_nm_conn)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert not reporting.create_report.called
+ not_wifi_nm_conn = NetworkManagerConnection(filename='/NM/wlan0.nmconn', settings=(
+ NetworkManagerConnectionSetting(name='connection'),
+ ))
+ current_actor_context.feed(not_wifi_nm_conn)
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
-def test_keyfile_static_wep(monkeypatch):
+
+def test_keyfile_static_wep(current_actor_context):
"""
Report if there's a static WEP keyfile.
"""
- STATIC_WEP_CONN = textwrap.dedent("""
- [wifi-security]
- auth-alg=open
- key-mgmt=none
- wep-key-type=1
- wep-key0=abcde
- """)
+ static_wep_nm_conn = NetworkManagerConnection(filename='/NM/wlan0.nmconn', settings=(
+ NetworkManagerConnectionSetting(name='wifi-security', properties=(
+ NetworkManagerConnectionProperty(name='auth-alg', value='open'),
+ NetworkManagerConnectionProperty(name='key-mgmt', value='none'),
+ NetworkManagerConnectionProperty(name='wep-key-type', value='1'),
+ )),
+ ))
- mock_config = mock.mock_open(read_data=STATIC_WEP_CONN)
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_nm_conn)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert reporting.create_report.called
+ current_actor_context.feed(static_wep_nm_conn)
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
-def test_keyfile_dynamic_wep(monkeypatch):
+def test_keyfile_dynamic_wep(current_actor_context):
"""
Report if there's a dynamic WEP keyfile.
"""
- DYNAMIC_WEP_CONN = textwrap.dedent("""
- [wifi-security]
- key-mgmt=ieee8021x
- """)
+ dynamic_wep_conn = NetworkManagerConnection(filename='/NM/wlan0.nmconn', settings=(
+ NetworkManagerConnectionSetting(name='wifi-security', properties=(
+ NetworkManagerConnectionProperty(name='key-mgmt', value='ieee8021x'),
+ )),
+ ))
- mock_config = mock.mock_open(read_data=DYNAMIC_WEP_CONN)
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_nm_conn)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert reporting.create_report.called
+ current_actor_context.feed(dynamic_wep_conn)
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
-def test_ifcfg_static_wep_ask(monkeypatch):
+def test_ifcfg_static_wep_ask(current_actor_context):
"""
Report if there's a static WEP sysconfig without stored key.
"""
- STATIC_WEP_ASK_KEY_SYSCONFIG = textwrap.dedent("""
- TYPE=Wireless
- ESSID=wep1
- NAME=wep1
- MODE=Managed
- WEP_KEY_FLAGS=ask
- SECURITYMODE=open
- DEFAULTKEY=1
- KEY_TYPE=key
- """)
-
- mock_config = mock.mock_open(read_data=STATIC_WEP_ASK_KEY_SYSCONFIG)
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_ifcfg)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert reporting.create_report.called
-
-
-def test_ifcfg_static_wep(monkeypatch):
+ static_wep_ask_key_ifcfg = IfCfg(filename='/NM/ifcfg-wlan0', properties=(
+ IfCfgProperty(name='TYPE', value='Wireless'),
+ IfCfgProperty(name='ESSID', value='wep1'),
+ IfCfgProperty(name='NAME', value='wep1'),
+ IfCfgProperty(name='MODE', value='Managed'),
+ IfCfgProperty(name='WEP_KEY_FLAGS', value='ask'),
+ IfCfgProperty(name='SECURITYMODE', value='open'),
+ IfCfgProperty(name='DEFAULTKEY', value='1'),
+ IfCfgProperty(name='KEY_TYPE', value='key'),
+ ))
+
+ current_actor_context.feed(static_wep_ask_key_ifcfg)
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+
+
+def test_ifcfg_static_wep(current_actor_context):
"""
Report if there's a static WEP sysconfig with a stored passphrase.
"""
- mock_config = mock.mock_open(read_data='KEY_PASSPHRASE1=Hell0')
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_keys)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert reporting.create_report.called
+ static_wep_ifcfg = IfCfg(filename='/NM/ifcfg-wlan0', secrets=(
+ IfCfgProperty(name='KEY_PASSPHRASE1', value=None),
+ ))
+
+ current_actor_context.feed(static_wep_ifcfg)
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
-def test_ifcfg_dynamic_wep(monkeypatch):
+def test_ifcfg_dynamic_wep(current_actor_context):
"""
Report if there's a dynamic WEP sysconfig.
"""
- DYNAMIC_WEP_SYSCONFIG = textwrap.dedent("""
- ESSID=dynwep1
- MODE=Managed
- KEY_MGMT=IEEE8021X # Dynamic WEP!
- TYPE=Wireless
- NAME=dynwep1
- """)
-
- mock_config = mock.mock_open(read_data=DYNAMIC_WEP_SYSCONFIG)
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_ifcfg)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert reporting.create_report.called
+ dynamic_wep_ifcfg = IfCfg(filename='/NM/ifcfg-wlan0', properties=(
+ IfCfgProperty(name='ESSID', value='dynwep1'),
+ IfCfgProperty(name='MODE', value='Managed'),
+ IfCfgProperty(name='KEY_MGMT', value='IEEE8021X'),
+ IfCfgProperty(name='TYPE', value='Wireless'),
+ IfCfgProperty(name='NAME', value='dynwep1'),
+ ))
+
+ current_actor_context.feed(dynamic_wep_ifcfg)
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
--
2.39.0

View File

@ -1,386 +0,0 @@
From f2977392208ad6874802bed30af9616853c77c08 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 12:54:57 +0200
Subject: [PATCH 71/75] Make CheckIfCfg consume IfCfg
This actor used to scan the NetworkManager ifcfg files itself.
Now it uses IfCfg messages, sharing the scanning code with
CheckNetworkDeprecations.
---
.../el8toel9/actors/checkifcfg/actor.py | 8 +-
.../checkifcfg/libraries/checkifcfg_ifcfg.py | 40 +---
.../checkifcfg/tests/unit_test_ifcfg.py | 207 +++++++++---------
3 files changed, 118 insertions(+), 137 deletions(-)
diff --git a/repos/system_upgrade/el8toel9/actors/checkifcfg/actor.py b/repos/system_upgrade/el8toel9/actors/checkifcfg/actor.py
index c6927d96..3ad0b5a0 100644
--- a/repos/system_upgrade/el8toel9/actors/checkifcfg/actor.py
+++ b/repos/system_upgrade/el8toel9/actors/checkifcfg/actor.py
@@ -1,7 +1,7 @@
from leapp.actors import Actor
from leapp.libraries.actor import checkifcfg_ifcfg as ifcfg
-from leapp.models import InstalledRPM, Report, RpmTransactionTasks
-from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+from leapp.models import IfCfg, InstalledRPM, Report, RpmTransactionTasks
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckIfcfg(Actor):
@@ -16,9 +16,9 @@ class CheckIfcfg(Actor):
"""
name = "check_ifcfg"
- consumes = (InstalledRPM,)
+ consumes = (IfCfg, InstalledRPM,)
produces = (Report, RpmTransactionTasks,)
- tags = (IPUWorkflowTag, FactsPhaseTag,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag,)
def process(self):
ifcfg.process()
diff --git a/repos/system_upgrade/el8toel9/actors/checkifcfg/libraries/checkifcfg_ifcfg.py b/repos/system_upgrade/el8toel9/actors/checkifcfg/libraries/checkifcfg_ifcfg.py
index 9a9fe96b..5c843583 100644
--- a/repos/system_upgrade/el8toel9/actors/checkifcfg/libraries/checkifcfg_ifcfg.py
+++ b/repos/system_upgrade/el8toel9/actors/checkifcfg/libraries/checkifcfg_ifcfg.py
@@ -3,13 +3,12 @@ import os
from leapp import reporting
from leapp.libraries.common.rpms import has_package
from leapp.libraries.stdlib import api
-from leapp.models import InstalledRPM, RpmTransactionTasks
+from leapp.models import IfCfg, InstalledRPM, RpmTransactionTasks
FMT_LIST_SEPARATOR = '\n - '
def process():
- SYSCONFIG_DIR = '/etc/sysconfig/network-scripts'
TRUE_VALUES = ['yes', 'true', '1']
TYPE_MAP = {
'ethernet': 'NetworkManager',
@@ -31,48 +30,33 @@ def process():
# we don't do anything.
return
- for f in os.listdir(SYSCONFIG_DIR):
+ for ifcfg in api.consume(IfCfg):
bad_type = False
got_type = None
nm_controlled = True
- path = os.path.join(SYSCONFIG_DIR, f)
-
- if not os.path.isfile(path):
- continue
-
- if f.startswith('rule-') or f.startswith('rule6-'):
+ if ifcfg.rules is not None or ifcfg.rules6 is not None:
if 'NetworkManager-dispatcher-routing-rules' not in rpms_to_install:
rpms_to_install.append('NetworkManager-dispatcher-routing-rules')
continue
- if not f.startswith('ifcfg-'):
+ if os.path.basename(ifcfg.filename) == 'ifcfg-lo':
continue
- if f == 'ifcfg-lo':
- continue
-
- for line in open(path).readlines():
- try:
- (key, value) = line.split('#')[0].strip().split('=')
- except ValueError:
- # We're not interested in lines that are not
- # simple assignments. Play it safe.
- continue
-
- if key in ('TYPE', 'DEVICETYPE'):
+ for prop in ifcfg.properties:
+ if prop.name in ('TYPE', 'DEVICETYPE'):
if got_type is None:
- got_type = value.lower()
- elif got_type != value.lower():
+ got_type = prop.value.lower()
+ elif got_type != prop.value.lower():
bad_type = True
- if key == 'BONDING_MASTER':
+ if prop.name == 'BONDING_MASTER':
if got_type is None:
got_type = 'bond'
elif got_type != 'bond':
bad_type = True
- if key == 'NM_CONTROLLED' and value.lower() not in TRUE_VALUES:
+ if prop.name == 'NM_CONTROLLED' and prop.value.lower() not in TRUE_VALUES:
nm_controlled = False
if got_type in TYPE_MAP:
@@ -84,9 +68,9 @@ def process():
# Don't bother reporting the file for NM_CONTROLLED=no
# if its type is not supportable with NetworkManager anyway
if bad_type is True:
- bad_type_files.append(path)
+ bad_type_files.append(ifcfg.filename)
elif nm_controlled is False:
- not_controlled_files.append(path)
+ not_controlled_files.append(ifcfg.filename)
if bad_type_files:
title = 'Network configuration for unsupported device types detected'
diff --git a/repos/system_upgrade/el8toel9/actors/checkifcfg/tests/unit_test_ifcfg.py b/repos/system_upgrade/el8toel9/actors/checkifcfg/tests/unit_test_ifcfg.py
index 10e2adb1..ddabedf2 100644
--- a/repos/system_upgrade/el8toel9/actors/checkifcfg/tests/unit_test_ifcfg.py
+++ b/repos/system_upgrade/el8toel9/actors/checkifcfg/tests/unit_test_ifcfg.py
@@ -1,147 +1,144 @@
-import mock
-import six
+from leapp.models import IfCfg, IfCfgProperty, InstalledRPM, RPM, RpmTransactionTasks
+from leapp.reporting import Report
+from leapp.utils.report import is_inhibitor
-from leapp import reporting
-from leapp.libraries.actor import checkifcfg_ifcfg as ifcfg
-from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked
-from leapp.libraries.stdlib import api
-from leapp.models import InstalledRPM, RPM, RpmTransactionTasks
-
-RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
+RH_PACKAGER = "Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>"
NETWORK_SCRIPTS_RPM = RPM(
- name='network-scripts', version='10.00.17', release='1.el8', epoch='',
- packager=RH_PACKAGER, arch='x86_64',
- pgpsig='RSA/SHA256, Fri 04 Feb 2022 03:32:47 PM CET, Key ID 199e2f91fd431d51'
+ name="network-scripts",
+ version="10.00.17",
+ release="1.el8",
+ epoch="",
+ packager=RH_PACKAGER,
+ arch="x86_64",
+ pgpsig="RSA/SHA256, Fri 04 Feb 2022 03:32:47 PM CET, Key ID 199e2f91fd431d51",
)
NETWORK_MANAGER_RPM = RPM(
- name='NetworkManager', version='1.36.0', release='0.8.el8', epoch='1',
- packager=RH_PACKAGER, arch='x86_64',
- pgpsig='RSA/SHA256, Mon 14 Feb 2022 08:45:37 PM CET, Key ID 199e2f91fd431d51'
-)
-
-INITSCRIPTS_INSTALLED = CurrentActorMocked(
- msgs=[InstalledRPM(items=[NETWORK_SCRIPTS_RPM])]
+ name="NetworkManager",
+ version="1.36.0",
+ release="0.8.el8",
+ epoch="1",
+ packager=RH_PACKAGER,
+ arch="x86_64",
+ pgpsig="RSA/SHA256, Mon 14 Feb 2022 08:45:37 PM CET, Key ID 199e2f91fd431d51",
)
-INITSCRIPTS_AND_NM_INSTALLED = CurrentActorMocked(
- msgs=[InstalledRPM(items=[NETWORK_SCRIPTS_RPM, NETWORK_MANAGER_RPM])]
-)
+INITSCRIPTS_INSTALLED = InstalledRPM(items=[
+ NETWORK_SCRIPTS_RPM
+])
+INITSCRIPTS_AND_NM_INSTALLED = InstalledRPM(items=[
+ NETWORK_SCRIPTS_RPM,
+ NETWORK_MANAGER_RPM
+])
-def test_ifcfg_none(monkeypatch):
+def test_ifcfg_none(current_actor_context):
"""
No report and don't install anything if there are no ifcfg files.
"""
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_AND_NM_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('hello', 'world',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- assert not reporting.create_report.called
- assert not api.produce.called
+ current_actor_context.feed(INITSCRIPTS_AND_NM_INSTALLED)
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
+ assert not current_actor_context.consume(RpmTransactionTasks)
-def test_ifcfg_rule_file(monkeypatch):
+def test_ifcfg_rule_file(current_actor_context):
"""
Install NetworkManager-dispatcher-routing-rules package if there's a
file with ip rules.
"""
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_AND_NM_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('hello', 'world', 'rule-eth0',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- assert not reporting.create_report.called
- assert api.produce.called
- assert isinstance(api.produce.model_instances[0], RpmTransactionTasks)
- assert api.produce.model_instances[0].to_install == ['NetworkManager-dispatcher-routing-rules']
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-eth0",
+ properties=(IfCfgProperty(name="TYPE", value="Ethernet"),),
+ rules=("foo bar baz",),
+ ))
+ current_actor_context.feed(INITSCRIPTS_AND_NM_INSTALLED)
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
+ assert len(current_actor_context.consume(RpmTransactionTasks)) == 1
+ rpm_transaction = current_actor_context.consume(RpmTransactionTasks)[0]
+ assert rpm_transaction.to_install == ["NetworkManager-dispatcher-routing-rules"]
-def test_ifcfg_good_type(monkeypatch):
+def test_ifcfg_good_type(current_actor_context):
"""
No report if there's an ifcfg file that would work with NetworkManager.
Make sure NetworkManager itself is installed though.
"""
- mock_config = mock.mock_open(read_data="TYPE=Ethernet")
- with mock.patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config) as mock_ifcfg:
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_AND_NM_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('hello', 'world', 'ifcfg-eth0', 'ifcfg-lo',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- mock_ifcfg.assert_called_once_with('/etc/sysconfig/network-scripts/ifcfg-eth0')
- assert not reporting.create_report.called
- assert api.produce.called
- assert isinstance(api.produce.model_instances[0], RpmTransactionTasks)
- assert api.produce.model_instances[0].to_install == ['NetworkManager']
-
-
-def test_ifcfg_not_controlled(monkeypatch):
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-lo",
+ properties=()
+ ))
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-eth0",
+ properties=(IfCfgProperty(name="TYPE", value="Ethernet"),)
+ ))
+ current_actor_context.feed(INITSCRIPTS_AND_NM_INSTALLED)
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
+ assert len(current_actor_context.consume(RpmTransactionTasks)) == 1
+ rpm_transaction = current_actor_context.consume(RpmTransactionTasks)[0]
+ assert rpm_transaction.to_install == ["NetworkManager"]
+
+
+def test_ifcfg_not_controlled(current_actor_context):
"""
Report if there's a NM_CONTROLLED=no file.
"""
- mock_config = mock.mock_open(read_data="TYPE=Ethernet\nNM_CONTROLLED=no")
- with mock.patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config) as mock_ifcfg:
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('hello', 'world', 'ifcfg-eth0',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- mock_ifcfg.assert_called_once_with('/etc/sysconfig/network-scripts/ifcfg-eth0')
- assert reporting.create_report.called
- assert 'disabled NetworkManager' in reporting.create_report.report_fields['title']
- assert api.produce.called
-
-
-def test_ifcfg_unknown_type(monkeypatch):
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-eth0",
+ properties=(
+ IfCfgProperty(name="TYPE", value="Ethernet"),
+ IfCfgProperty(name="NM_CONTROLLED", value="no"),
+ )
+ ))
+ current_actor_context.feed(INITSCRIPTS_INSTALLED)
+ current_actor_context.run()
+ assert len(current_actor_context.consume(Report)) == 1
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+ assert "disabled NetworkManager" in report_fields['title']
+
+
+def test_ifcfg_unknown_type(current_actor_context):
"""
Report if there's configuration for a type we don't recognize.
"""
- mock_config = mock.mock_open(read_data="TYPE=AvianCarrier")
- with mock.patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config) as mock_ifcfg:
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_AND_NM_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('hello', 'world', 'ifcfg-pigeon0',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- mock_ifcfg.assert_called_once_with('/etc/sysconfig/network-scripts/ifcfg-pigeon0')
- assert reporting.create_report.called
- assert 'unsupported device types' in reporting.create_report.report_fields['title']
- assert not api.produce.called
-
-
-def test_ifcfg_install_subpackage(monkeypatch):
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-pigeon0",
+ properties=(IfCfgProperty(name="TYPE", value="AvianCarrier"),)
+ ))
+ current_actor_context.feed(INITSCRIPTS_AND_NM_INSTALLED)
+ current_actor_context.run()
+ assert len(current_actor_context.consume(Report)) == 1
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+ assert "unsupported device types" in report_fields['title']
+
+
+def test_ifcfg_install_subpackage(current_actor_context):
"""
Install NetworkManager-team if there's a team connection and also
ensure NetworkManager-config-server is installed if NetworkManager
was not there.
"""
- mock_config = mock.mock_open(read_data="TYPE=Team")
- with mock.patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config) as mock_ifcfg:
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('ifcfg-team0',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- mock_ifcfg.assert_called_once_with('/etc/sysconfig/network-scripts/ifcfg-team0')
- assert not reporting.create_report.called
- assert api.produce.called
- assert isinstance(api.produce.model_instances[0], RpmTransactionTasks)
- assert api.produce.model_instances[0].to_install == [
- 'NetworkManager-team',
- 'NetworkManager-config-server'
- ]
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-team0",
+ properties=(IfCfgProperty(name="TYPE", value="Team"),)
+ ))
+ current_actor_context.feed(INITSCRIPTS_INSTALLED)
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
+ assert len(current_actor_context.consume(RpmTransactionTasks)) == 1
+ rpm_transaction = current_actor_context.consume(RpmTransactionTasks)[0]
+ assert rpm_transaction.to_install == [
+ "NetworkManager-team",
+ "NetworkManager-config-server",
+ ]
--
2.39.0

View File

@ -1,58 +0,0 @@
From aed1a9cafbebcdcfa463b15e52f53a6ac7730f01 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 11:59:06 +0200
Subject: [PATCH 72/75] Make IfCfgScanner accept simple quoted values
Turns out people actually use those.
https://bugzilla.redhat.com/show_bug.cgi?id=2111691
---
.../actors/ifcfgscanner/libraries/ifcfgscanner.py | 6 ++++++
.../actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py | 8 ++++++--
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
index cfc385dc..683327b3 100644
--- a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
+++ b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
@@ -28,6 +28,12 @@ def process_ifcfg(filename, secrets=False):
# simple assignments. Play it safe.
continue
+ # Deal with simple quoting. We don't expand anything, nor do
+ # multiline strings or anything of that sort.
+ if value is not None and len(value) > 1 and value[0] == value[-1]:
+ if value.startswith('"') or value.startswith("'"):
+ value = value[1:-1]
+
properties.append(IfCfgProperty(name=name, value=value))
return properties
diff --git a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
index f5e3056a..d3b4846f 100644
--- a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
+++ b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
@@ -55,8 +55,8 @@ def test_ifcfg1(monkeypatch):
TYPE=Wireless # Some comment
# Another comment
ESSID=wep1
- NAME=wep1
- MODE=Managed
+ NAME="wep1"
+ MODE='Managed' # comment
WEP_KEY_FLAGS=ask
SECURITYMODE=open
DEFAULTKEY=1
@@ -81,6 +81,10 @@ def test_ifcfg1(monkeypatch):
assert ifcfg.properties[0].value == "Wireless"
assert ifcfg.properties[1].name == "ESSID"
assert ifcfg.properties[1].value == "wep1"
+ assert ifcfg.properties[2].name == "NAME"
+ assert ifcfg.properties[2].value == "wep1"
+ assert ifcfg.properties[3].name == "MODE"
+ assert ifcfg.properties[3].value == "Managed"
def test_ifcfg2(monkeypatch):
--
2.39.0

View File

@ -1,168 +0,0 @@
From 00d06d5217848d384e4b70ebf3c5eb5e4f7fa3e6 Mon Sep 17 00:00:00 2001
From: PeterMocary <petermocary@gmail.com>
Date: Thu, 25 Aug 2022 18:04:08 +0200
Subject: [PATCH 73/75] Improve error message when more space is needed for the
upgrade
When there was not enough space, leapp would output misleading error message propagated from dnf. This error message was replaced and includes a solution article.
---
.../actors/dnfupgradetransaction/actor.py | 7 +++-
.../libraries/userspacegen.py | 16 ++++++++
.../common/libraries/dnfplugin.py | 39 +++++++++++++------
3 files changed, 49 insertions(+), 13 deletions(-)
diff --git a/repos/system_upgrade/common/actors/dnfupgradetransaction/actor.py b/repos/system_upgrade/common/actors/dnfupgradetransaction/actor.py
index 296e6201..2e069296 100644
--- a/repos/system_upgrade/common/actors/dnfupgradetransaction/actor.py
+++ b/repos/system_upgrade/common/actors/dnfupgradetransaction/actor.py
@@ -11,7 +11,8 @@ from leapp.models import (
StorageInfo,
TargetUserSpaceInfo,
TransactionCompleted,
- UsedTargetRepositories
+ UsedTargetRepositories,
+ XFSPresence
)
from leapp.tags import IPUWorkflowTag, RPMUpgradePhaseTag
@@ -33,6 +34,7 @@ class DnfUpgradeTransaction(Actor):
StorageInfo,
TargetUserSpaceInfo,
UsedTargetRepositories,
+ XFSPresence
)
produces = (TransactionCompleted,)
tags = (RPMUpgradePhaseTag, IPUWorkflowTag)
@@ -48,10 +50,11 @@ class DnfUpgradeTransaction(Actor):
plugin_info = list(self.consume(DNFPluginTask))
tasks = next(self.consume(FilteredRpmTransactionTasks), FilteredRpmTransactionTasks())
target_userspace_info = next(self.consume(TargetUserSpaceInfo), None)
+ xfs_info = next(self.consume(XFSPresence), XFSPresence())
dnfplugin.perform_transaction_install(
tasks=tasks, used_repos=used_repos, storage_info=storage_info, target_userspace_info=target_userspace_info,
- plugin_info=plugin_info
+ plugin_info=plugin_info, xfs_info=xfs_info
)
self.produce(TransactionCompleted())
userspace = next(self.consume(TargetUserSpaceInfo), None)
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index 6335eb5b..3857e2f2 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -206,6 +206,22 @@ def prepare_target_userspace(context, userspace_dir, enabled_repos, packages):
message = 'Unable to install RHEL {} userspace packages.'.format(target_major_version)
details = {'details': str(exc), 'stderr': exc.stderr}
+ xfs_info = next(api.consume(XFSPresence), XFSPresence())
+ if 'more space needed on the' in exc.stderr:
+ # The stderr contains this error summary:
+ # Disk Requirements:
+ # At least <size> more space needed on the <path> filesystem.
+
+ article_section = 'Generic case'
+ if xfs_info.present and xfs_info.without_ftype:
+ article_section = 'XFS ftype=0 case'
+
+ message = ('There is not enough space on the file system hosting /var/lib/leapp directory '
+ 'to extract the packages.')
+ details = {'hint': "Please follow the instructions in the '{}' section of the article at: "
+ "link: https://access.redhat.com/solutions/5057391".format(article_section)}
+ raise StopActorExecutionError(message=message, details=details)
+
# If a proxy was set in dnf config, it should be the reason why dnf
# failed since leapp does not support updates behind proxy yet.
for manager_info in api.consume(PkgManagerInfo):
diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py
index 7f541c18..57b25909 100644
--- a/repos/system_upgrade/common/libraries/dnfplugin.py
+++ b/repos/system_upgrade/common/libraries/dnfplugin.py
@@ -146,7 +146,8 @@ def backup_debug_data(context):
api.current_logger().warning('Failed to copy debugdata. Message: {}'.format(str(e)), exc_info=True)
-def _transaction(context, stage, target_repoids, tasks, plugin_info, test=False, cmd_prefix=None, on_aws=False):
+def _transaction(context, stage, target_repoids, tasks, plugin_info, xfs_info,
+ test=False, cmd_prefix=None, on_aws=False):
"""
Perform the actual DNF rpm download via our DNF plugin
"""
@@ -219,10 +220,25 @@ def _transaction(context, stage, target_repoids, tasks, plugin_info, test=False,
)
except CalledProcessError as e:
api.current_logger().error('DNF execution failed: ')
- raise StopActorExecutionError(
- message='DNF execution failed with non zero exit code.\nSTDOUT:\n{stdout}\nSTDERR:\n{stderr}'.format(
- stdout=e.stdout, stderr=e.stderr)
- )
+
+ message = 'DNF execution failed with non zero exit code.'
+ details = {'STDOUT': e.stdout, 'STDERR': e.stderr}
+
+ if 'more space needed on the' in e.stderr:
+ # The stderr contains this error summary:
+ # Disk Requirements:
+ # At least <size> more space needed on the <path> filesystem.
+
+ article_section = 'Generic case'
+ if xfs_info.present and xfs_info.without_ftype:
+ article_section = 'XFS ftype=0 case'
+
+ message = ('There is not enough space on the file system hosting /var/lib/leapp directory '
+ 'to extract the packages.')
+ details = {'hint': "Please follow the instructions in the '{}' section of the article at: "
+ "link: https://access.redhat.com/solutions/5057391".format(article_section)}
+
+ raise StopActorExecutionError(message=message, details=details)
finally:
if stage == 'check':
backup_debug_data(context=context)
@@ -294,7 +310,7 @@ def install_initramdisk_requirements(packages, target_userspace_info, used_repos
context.call(cmd, env=env)
-def perform_transaction_install(target_userspace_info, storage_info, used_repos, tasks, plugin_info):
+def perform_transaction_install(target_userspace_info, storage_info, used_repos, tasks, plugin_info, xfs_info):
"""
Performs the actual installation with the DNF rhel-upgrade plugin using the target userspace
"""
@@ -353,8 +369,8 @@ def perform_transaction_install(target_userspace_info, storage_info, used_repos,
if get_target_major_version() == '9':
_rebuild_rpm_db(context, root='/installroot')
_transaction(
- context=context, stage=stage, target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
- cmd_prefix=cmd_prefix
+ context=context, stage='upgrade', target_repoids=target_repoids, plugin_info=plugin_info,
+ xfs_info=xfs_info, tasks=tasks, cmd_prefix=cmd_prefix
)
# we have to ensure the leapp packages will stay untouched even after the
@@ -400,7 +416,8 @@ def perform_transaction_check(target_userspace_info,
dnfconfig.exclude_leapp_rpms(context, disable_plugins)
_transaction(
- context=context, stage='check', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks
+ context=context, stage='check', target_repoids=target_repoids, plugin_info=plugin_info, xfs_info=xfs_info,
+ tasks=tasks
)
@@ -434,7 +451,7 @@ def perform_rpm_download(target_userspace_info,
dnfconfig.exclude_leapp_rpms(context, disable_plugins)
_transaction(
context=context, stage='download', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
- test=True, on_aws=on_aws
+ test=True, on_aws=on_aws, xfs_info=xfs_info
)
@@ -457,5 +474,5 @@ def perform_dry_run(target_userspace_info,
apply_workarounds(overlay.nspawn())
_transaction(
context=context, stage='dry-run', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
- test=True, on_aws=on_aws
+ test=True, on_aws=on_aws, xfs_info=xfs_info
)
--
2.39.0

View File

@ -1,46 +0,0 @@
From b5a6b83044fdbc24fd8919cf6935f3a93f4f67e2 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Fri, 27 Jan 2023 12:22:37 +0100
Subject: [PATCH 74/75] Do not create python3 .pyc files
After the in-place upgrade and removal of `leapp` packages, there are
leftover `*cpython.pyc` files in: `/usr/lib/python2.7/site-packages/leapp/`
and `/usr/share/leapp-repository/`.
Let's avoid this by not creating them in the first place.
Jira ref.: OAMG-7641
---
.../files/dracut/85sys-upgrade-redhat/do-upgrade.sh | 2 +-
.../actors/preparepythonworkround/libraries/workaround.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
index 04540c1d..491b85ec 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
@@ -266,7 +266,7 @@ do_upgrade() {
# all FSTAB partitions. As mount was working before, hopefully will
# work now as well. Later this should be probably modified as we will
# need to handle more stuff around storage at all.
- /usr/bin/systemd-nspawn $NSPAWN_OPTS -D "$NEWROOT" /usr/bin/bash -c "mount -a; /usr/bin/python3 $LEAPP3_BIN upgrade --resume $args"
+ /usr/bin/systemd-nspawn $NSPAWN_OPTS -D "$NEWROOT" /usr/bin/bash -c "mount -a; /usr/bin/python3 -B $LEAPP3_BIN upgrade --resume $args"
rv=$?
fi
diff --git a/repos/system_upgrade/common/actors/preparepythonworkround/libraries/workaround.py b/repos/system_upgrade/common/actors/preparepythonworkround/libraries/workaround.py
index de3079ee..255121dd 100644
--- a/repos/system_upgrade/common/actors/preparepythonworkround/libraries/workaround.py
+++ b/repos/system_upgrade/common/actors/preparepythonworkround/libraries/workaround.py
@@ -31,7 +31,7 @@ def apply_python3_workaround():
os.symlink(_get_orig_leapp_path(), leapp_lib_symlink_path)
with open(py3_leapp, 'w') as f:
f_content = [
- '#!/usr/bin/python3',
+ '#!/usr/bin/python3 -B',
'import sys',
'sys.path.append(\'{}\')'.format(LEAPP_HOME),
'',
--
2.39.0

View File

@ -1,247 +0,0 @@
From f7c82a2468c1dae62d3beb94a3b1271b3b396ea5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Peter=20Mo=C4=8D=C3=A1ry?=
<68905580+PeterMocary@users.noreply.github.com>
Date: Fri, 27 Jan 2023 13:53:59 +0100
Subject: [PATCH 75/75] Add mapping based on the installed content (#967)
* Add mapping based on the installed content
Repositories covered by repositories mapping, that are used by installed
RPM packages, are used to evaluate expected target repositories on top
of evaluating the target repositories from enabled repositories. This
covers repositories which might be disabled when upgrading, but should
be used to upgrade installed packages during the upgrade.
* Cover with a unit test
Co-authored-by: Inessa Vasilevskaya <ivasilev@redhat.com>
---
.../common/actors/setuptargetrepos/actor.py | 2 +
.../libraries/setuptargetrepos.py | 57 ++++++++++++-------
.../tests/test_setuptargetrepos.py | 40 +++++++++++--
3 files changed, 75 insertions(+), 24 deletions(-)
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
index 47724f0d..767fa00c 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
@@ -2,6 +2,7 @@ from leapp.actors import Actor
from leapp.libraries.actor import setuptargetrepos
from leapp.models import (
CustomTargetRepository,
+ InstalledRPM,
RepositoriesBlacklisted,
RepositoriesFacts,
RepositoriesMapping,
@@ -25,6 +26,7 @@ class SetupTargetRepos(Actor):
name = 'setuptargetrepos'
consumes = (CustomTargetRepository,
+ InstalledRPM,
RepositoriesSetupTasks,
RepositoriesMapping,
RepositoriesFacts,
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
index 3f34aedb..4b8405d0 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
@@ -4,6 +4,7 @@ from leapp.libraries.common.config.version import get_source_major_version
from leapp.libraries.stdlib import api
from leapp.models import (
CustomTargetRepository,
+ InstalledRPM,
RepositoriesBlacklisted,
RepositoriesFacts,
RepositoriesMapping,
@@ -20,7 +21,6 @@ def _get_enabled_repoids():
"""
Collects repoids of all enabled repositories on the source system.
- :param repositories_facts: Iterable of RepositoriesFacts containing info about repositories on the source system.
:returns: Set of all enabled repository IDs present on the source system.
:rtype: Set[str]
"""
@@ -33,6 +33,14 @@ def _get_enabled_repoids():
return enabled_repoids
+def _get_repoids_from_installed_packages():
+ repoids_from_installed_packages = set()
+ for installed_packages in api.consume(InstalledRPM):
+ for rpm_package in installed_packages.items:
+ repoids_from_installed_packages.add(rpm_package.repository)
+ return repoids_from_installed_packages
+
+
def _get_blacklisted_repoids():
repos_blacklisted = set()
for blacklist in api.consume(RepositoriesBlacklisted):
@@ -58,16 +66,6 @@ def _get_used_repo_dict():
return used
-def _setup_repomap_handler(src_repoids):
- repo_mappig_msg = next(api.consume(RepositoriesMapping), RepositoriesMapping())
- rhui_info = next(api.consume(RHUIInfo), RHUIInfo(provider=''))
- repomap = setuptargetrepos_repomap.RepoMapDataHandler(repo_mappig_msg, cloud_provider=rhui_info.provider)
- # TODO(pstodulk): what about skip this completely and keep the default 'ga'..?
- default_channels = setuptargetrepos_repomap.get_default_repository_channels(repomap, src_repoids)
- repomap.set_default_channels(default_channels)
- return repomap
-
-
def _get_mapped_repoids(repomap, src_repoids):
mapped_repoids = set()
src_maj_ver = get_source_major_version()
@@ -78,24 +76,40 @@ def _get_mapped_repoids(repomap, src_repoids):
def process():
- # load all data / messages
+ # Load relevant data from messages
used_repoids_dict = _get_used_repo_dict()
enabled_repoids = _get_enabled_repoids()
excluded_repoids = _get_blacklisted_repoids()
custom_repos = _get_custom_target_repos()
+ repoids_from_installed_packages = _get_repoids_from_installed_packages()
- # TODO(pstodulk): isn't that a potential issue that we map just enabled repos
- # instead of enabled + used repos??
- # initialise basic data
- repomap = _setup_repomap_handler(enabled_repoids)
- mapped_repoids = _get_mapped_repoids(repomap, enabled_repoids)
- skipped_repoids = enabled_repoids & set(used_repoids_dict.keys()) - mapped_repoids
+ # Setup repomap handler
+ repo_mappig_msg = next(api.consume(RepositoriesMapping), RepositoriesMapping())
+ rhui_info = next(api.consume(RHUIInfo), RHUIInfo(provider=''))
+ repomap = setuptargetrepos_repomap.RepoMapDataHandler(repo_mappig_msg, cloud_provider=rhui_info.provider)
+
+ # Filter set of repoids from installed packages so that it contains only repoids with mapping
+ repoids_from_installed_packages_with_mapping = _get_mapped_repoids(repomap, repoids_from_installed_packages)
+
+ # Set of repoid that are going to be mapped to target repoids containing enabled repoids and also repoids from
+ # installed packages that have mapping to prevent missing repositories that are disabled during the upgrade, but
+ # can be used to upgrade installed packages.
+ repoids_to_map = enabled_repoids.union(repoids_from_installed_packages_with_mapping)
- # Now get the info what should be the target RHEL repositories
- expected_repos = repomap.get_expected_target_pesid_repos(enabled_repoids)
+ # Set default repository channels for the repomap
+ # TODO(pstodulk): what about skip this completely and keep the default 'ga'..?
+ default_channels = setuptargetrepos_repomap.get_default_repository_channels(repomap, repoids_to_map)
+ repomap.set_default_channels(default_channels)
+
+ # Get target RHEL repoids based on the repomap
+ expected_repos = repomap.get_expected_target_pesid_repos(repoids_to_map)
target_rhel_repoids = set()
for target_pesid, target_pesidrepo in expected_repos.items():
if not target_pesidrepo:
+ # NOTE this could happen only for enabled repositories part of the set,
+ # since the repositories collected from installed packages already contain
+ # only mappable repoids.
+
# With the original repomap data, this should not happen (this should
# currently point to a problem in our data
# TODO(pstodulk): add report? inhibitor? what should be in the report?
@@ -126,6 +140,9 @@ def process():
custom_repos = [repo for repo in custom_repos if repo.repoid not in excluded_repoids]
custom_repos = sorted(custom_repos, key=lambda x: x.repoid)
+ # produce message about skipped repositories
+ enabled_repoids_with_mapping = _get_mapped_repoids(repomap, enabled_repoids)
+ skipped_repoids = enabled_repoids & set(used_repoids_dict.keys()) - enabled_repoids_with_mapping
if skipped_repoids:
pkgs = set()
for repo in skipped_repoids:
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
index 7fd626c7..ac7f49ec 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
@@ -6,6 +6,7 @@ from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
from leapp.libraries.stdlib import api
from leapp.models import (
CustomTargetRepository,
+ InstalledRPM,
PESIDRepositoryEntry,
RepoMapEntry,
RepositoriesBlacklisted,
@@ -14,9 +15,17 @@ from leapp.models import (
RepositoriesSetupTasks,
RepositoryData,
RepositoryFile,
+ RPM,
TargetRepositories
)
+RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
+
+
+def mock_package(pkg_name, repository=None):
+ return RPM(name=pkg_name, version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch',
+ pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51', repository=repository)
+
def test_minimal_execution(monkeypatch):
"""
@@ -103,9 +112,13 @@ def test_repos_mapping(monkeypatch):
repos_files = [RepositoryFile(file='/etc/yum.repos.d/redhat.repo', data=repos_data)]
facts = RepositoriesFacts(repositories=repos_files)
+ installed_rpms = InstalledRPM(
+ items=[mock_package('foreman', 'rhel-7-for-x86_64-satellite-extras-rpms'),
+ mock_package('foreman-proxy', 'nosuch-rhel-7-for-x86_64-satellite-extras-rpms')])
repomap = RepositoriesMapping(
- mapping=[RepoMapEntry(source='rhel7-base', target=['rhel8-baseos', 'rhel8-appstream', 'rhel8-blacklist'])],
+ mapping=[RepoMapEntry(source='rhel7-base', target=['rhel8-baseos', 'rhel8-appstream', 'rhel8-blacklist']),
+ RepoMapEntry(source='rhel7-satellite-extras', target=['rhel8-satellite-extras'])],
repositories=[
PESIDRepositoryEntry(
pesid='rhel7-base',
@@ -143,12 +156,30 @@ def test_repos_mapping(monkeypatch):
channel='ga',
rhui=''
),
+ PESIDRepositoryEntry(
+ pesid='rhel7-satellite-extras',
+ repoid='rhel-7-for-x86_64-satellite-extras-rpms',
+ major_version='7',
+ arch='x86_64',
+ repo_type='rpm',
+ channel='ga',
+ rhui=''
+ ),
+ PESIDRepositoryEntry(
+ pesid='rhel8-satellite-extras',
+ repoid='rhel-8-for-x86_64-satellite-extras-rpms',
+ major_version='8',
+ arch='x86_64',
+ repo_type='rpm',
+ channel='ga',
+ rhui=''
+ ),
]
)
repos_blacklisted = RepositoriesBlacklisted(repoids=['rhel-8-blacklisted-rpms'])
- msgs = [facts, repomap, repos_blacklisted]
+ msgs = [facts, repomap, repos_blacklisted, installed_rpms]
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
monkeypatch.setattr(api, 'produce', produce_mocked())
@@ -157,8 +188,9 @@ def test_repos_mapping(monkeypatch):
assert api.produce.called
rhel_repos = api.produce.model_instances[0].rhel_repos
- assert len(rhel_repos) == 2
+ assert len(rhel_repos) == 3
produced_rhel_repoids = {repo.repoid for repo in rhel_repos}
- expected_rhel_repoids = {'rhel-8-for-x86_64-baseos-htb-rpms', 'rhel-8-for-x86_64-appstream-htb-rpms'}
+ expected_rhel_repoids = {'rhel-8-for-x86_64-baseos-htb-rpms', 'rhel-8-for-x86_64-appstream-htb-rpms',
+ 'rhel-8-for-x86_64-satellite-extras-rpms'}
assert produced_rhel_repoids == expected_rhel_repoids
--
2.39.0

View File

@ -41,8 +41,8 @@ py2_byte_compile "%1" "%2"}
# RHEL 8+ packages to be consistent with other leapp projects in future.
Name: leapp-repository
Version: 0.17.0
Release: 8%{?dist}
Version: 0.18.0
Release: 1%{?dist}
Summary: Repositories for leapp
License: ASL 2.0
@ -60,79 +60,6 @@ BuildArch: noarch
# See: https://bugzilla.redhat.com/show_bug.cgi?id=2030627
Patch0004: 0004-Enforce-the-removal-of-rubygem-irb-do-not-install-it.patch
# TMP patches - remove them when rebase
Patch0005: 0005-Disable-isort-check-for-deprecated-imports.patch
Patch0006: 0006-Add-codespell-GitHub-actions-workflow-for-spell-chec.patch
Patch0007: 0007-Mini-updateds-in-the-spec-files.patch
Patch0008: 0008-CheckVDO-Ask-user-only-faiulres-and-undetermined-dev.patch
Patch0009: 0009-Add-actors-for-checking-and-setting-systemd-services.patch
Patch0010: 0010-migratentp-Replace-reports-with-log-messages.patch
Patch0011: 0011-migratentp-Catch-more-specific-exception-from-ntp2ch.patch
Patch0012: 0012-migratentp-Don-t-raise-StopActorExecutionError.patch
Patch0013: 0013-Make-shellcheck-happy-again.patch
Patch0014: 0014-actor-firewalld-support-0.8.z.patch
Patch0015: 0015-Scanpkgmanager-detect-proxy-configuration.patch
Patch0016: 0016-Merge-of-the-yumconfigscanner-actor-into-the-scanpkg.patch
Patch0017: 0017-firewalldcheckallowzonedrifting-Fix-the-remediation-.patch
Patch0018: 0018-rhui-azure-sap-apps-consider-RHUI-client-as-signed.patch
Patch0019: 0019-rhui-azure-sap-apps-handle-EUS-SAP-Apps-content-on-R.patch
Patch0020: 0020-checksaphana-Move-to-common.patch
Patch0021: 0021-checksaphana-Adjust-for-el7toel8-and-el8toel9-requir.patch
Patch0022: 0022-Add-an-actor-that-enables-device_cio_free.service-on.patch
Patch0023: 0023-Add-the-scanzfcp-actor-handling-the-IPU-with-ZFCP-s3.patch
Patch0024: 0024-ziplconverttoblscfg-bind-mount-dev-boot-into-the-use.patch
Patch0025: 0025-Provide-common-information-about-systemd.patch
Patch0026: 0026-systemd-Move-enable-disable-reenable-_unit-functions.patch
Patch0027: 0027-Fix-broken-or-incorrect-systemd-symlinks.patch
Patch0028: 0028-Add-check-for-systemd-symlinks-broken-before-the-upg.patch
Patch0029: 0029-checksystemdservicestasks-update-docstrings-extend-t.patch
Patch0030: 0030-Support-IPU-using-a-target-RHEL-installation-ISO-ima.patch
Patch0031: 0031-Add-prod-certs-for-8.8-9.2-Beta-GA.patch
Patch0032: 0032-Introduce-new-upgrade-paths-8.8-9.2.patch
Patch0033: 0033-testutils-Implement-get_common_tool_path-method.patch
Patch0034: 0034-targetuserspacecreator-improve-copy-of-etc-pki-rpm-g.patch
Patch0035: 0035-DNFWorkaround-extend-the-model-by-script_args.patch
Patch0036: 0036-Introduce-theimportrpmgpgkeys-tool-script.patch
Patch0037: 0037-Enable-gpgcheck-during-IPU-add-nogpgcheck-CLI-option.patch
Patch0038: 0038-missinggpgkey-polish-the-report-msg.patch
Patch0039: 0039-Fix-cephvolume-actor.patch
Patch0040: 0040-Include-also-Leapp-RHUI-special-rpms-in-the-whitelis.patch
Patch0041: 0041-POC-initram-networking.patch
Patch0042: 0042-Skip-check-nfs-actor-if-env-var-is-set.patch
Patch0043: 0043-Apply-changes-after-rebase-and-do-refactor.patch
Patch0044: 0044-Tune-tmt-tests-regexes-to-align-with-QE-automation.patch
Patch0045: 0045-Change-rerun-all-to-rerun-sst.patch
Patch0046: 0046-Do-not-run-rhsm-tests-in-upstream.patch
Patch0047: 0047-Set-SOURCE_RELEASE-env-var.patch
Patch0048: 0048-Packit-build-SRPM-in-Copr.patch
Patch0049: 0049-ensure-Satellite-metapackages-are-installed-after-up.patch
Patch0050: 0050-Makefile-filter-out-removed-files-for-linting.patch
Patch0051: 0051-Enable-upgrades-on-s390x-when-boot-is-part-of-rootfs.patch
Patch0052: 0052-Add-leapp-debug-tools-to-initramfs.patch
Patch0053: 0053-Add-autosourcing.patch
Patch0054: 0054-Replace-tabs-with-spaces-in-the-dracut-module.patch
Patch0055: 0055-ci-lint-Add-differential-shellcheck-GitHub-action.patch
Patch0056: 0056-Propagate-TEST_PATHS-to-test_container-targets.patch
Patch0057: 0057-Ignore-external-accounts-in-etc-passwd.patch
Patch0058: 0058-pes_events_scanner-prefilter-problematic-events-and-.patch
Patch0059: 0059-Enable-disabling-dnf-plugins-in-the-dnfcnfig-library.patch
Patch0060: 0060-Prevent-failed-upgrade-from-restarting-in-initramfs-.patch
Patch0061: 0061-BZ-2142270-run-reindexdb-to-fix-issues-due-to-new-lo.patch
Patch0062: 0062-Improve-the-hint-in-peseventsscanner-for-unknown-rep.patch
Patch0063: 0063-Ensure-a-baseos-and-appstream-repos-are-available-wh.patch
Patch0064: 0064-Fix-the-check-of-memory-RAM-limits.patch
Patch0065: 0065-Add-IfCfg-model.patch
Patch0066: 0066-Add-IfCfgScanner-actor.patch
Patch0067: 0067-Add-NetworkManagerConnection-model.patch
Patch0068: 0068-Add-NetworkManagerConnectionScanner-actor.patch
Patch0069: 0069-Install-python3-gobject-base-and-NetworkManager-libn.patch
Patch0070: 0070-Make-CheckNetworkDeprecations-consume-IfCfg-and-Netw.patch
Patch0071: 0071-Make-CheckIfCfg-consume-IfCfg.patch
Patch0072: 0072-Make-IfCfgScanner-accept-simple-quoted-values.patch
Patch0073: 0073-Improve-error-message-when-more-space-is-needed-for-.patch
Patch0074: 0074-Do-not-create-python3-.pyc-files.patch
Patch0075: 0075-Add-mapping-based-on-the-installed-content-967.patch
%description
%{summary}
@ -276,79 +203,6 @@ Requires: python3-gobject-base
# %%patch0001 -p1
%patch0004 -p1
%patch0005 -p1
%patch0006 -p1
%patch0007 -p1
%patch0008 -p1
%patch0009 -p1
%patch0010 -p1
%patch0011 -p1
%patch0012 -p1
%patch0013 -p1
%patch0014 -p1
%patch0015 -p1
%patch0016 -p1
%patch0017 -p1
%patch0018 -p1
%patch0019 -p1
%patch0020 -p1
%patch0021 -p1
%patch0022 -p1
%patch0023 -p1
%patch0024 -p1
%patch0025 -p1
%patch0026 -p1
%patch0027 -p1
%patch0028 -p1
%patch0029 -p1
%patch0030 -p1
%patch0031 -p1
%patch0032 -p1
%patch0033 -p1
%patch0034 -p1
%patch0035 -p1
%patch0036 -p1
%patch0037 -p1
%patch0038 -p1
%patch0039 -p1
%patch0040 -p1
%patch0041 -p1
%patch0042 -p1
%patch0043 -p1
%patch0044 -p1
%patch0045 -p1
%patch0046 -p1
%patch0047 -p1
%patch0048 -p1
%patch0049 -p1
%patch0050 -p1
%patch0051 -p1
%patch0052 -p1
%patch0053 -p1
%patch0054 -p1
%patch0055 -p1
%patch0056 -p1
%patch0057 -p1
%patch0058 -p1
%patch0059 -p1
%patch0060 -p1
%patch0061 -p1
%patch0062 -p1
%patch0063 -p1
%patch0064 -p1
%patch0065 -p1
%patch0066 -p1
%patch0067 -p1
%patch0068 -p1
%patch0069 -p1
%patch0070 -p1
%patch0071 -p1
%patch0072 -p1
%patch0073 -p1
%patch0074 -p1
%patch0075 -p1
%build
%if 0%{?rhel} == 7
@ -423,45 +277,56 @@ done;
# no files here
%changelog
* Fri Jan 27 2023 Petr Stodulka <pstodulk@redhat.com> - 0.17.0-8
- Do not create new *pyc files when running leapp after the DNF upgrade transaction
- Fix scan of ceph volumes on systems without ceph-osd
- Fix the check of memory (RAM) limits and use human readable values in the report
- Improve the error message to guide users when discovered more space is needed
- Map the target repositories also based on the installed content
- Rework the network configuration handling and parse the configuration data properly
- Resolves: rhbz#2139907, rhbz#2111691, rhbz#2127920
* Mon Jan 23 2023 Petr Stodulka <pstodulk@redhat.com> - 0.17.0-7
* Tue Feb 21 2023 Petr Stodulka <pstodulk@redhat.com> - 0.18.0-1
- Rebase to v0.18.0
- Introduce new upgrade path RHEL 8.8 -> 9.2
- Requires cpio
- Requires python3-gobject-base, NetworkManager-libnm
- Bump leapp-repository-dependencies to 9
- Add breadcrumbs results to RHSM facts
- Add leapp RHUI packages to an allowlist to drop confusing reports
- Disable the amazon-id DNF plugin on AWS during the upgrade stage to omit
confusing error messages
- Enable upgrades on s390x when /boot is part of rootfs
- Filter out PES events unrelated for the used upgrade path and handle overlapping event
(fixes upgrades with quagga installed)
- Fix scan of ceph volumes when ceph-osd container is not found
- Ignore external accounts in /etc/passwd
- Prevent leapp failures caused by re-run of leapp in the upgrade initramfs
after previous failure
- Prevent the upgrade with RHSM when a baseos and an appstream target
repositories are not discovered
- Resolves: rhbz#2143372, rhbz#2141393, rhbz#2139907, rhbz#2129716
* Wed Nov 30 2022 Petr Stodulka <pstodulk@redhat.com> - 0.17.0-5
- Check RPM signatures during the upgrade (first part)
- introduced the --nogpgcheck option to do the upgrade in the original way
- Resolves: rhbz#2143372
* Wed Nov 16 2022 Petr Stodulka <pstodulk@redhat.com> - 0.17.0-4
- The new upgrade path for RHEL 8.8 -> 9.2
- Require cpio
- Bump leapp-repositori-dependencies to 8
- Fix systemd symlinks that become incorrect during the IPU
- Introduced an option to use an ISO file as a target RHEL version content source
- Provide common information about systemd services
- Introduced possibility to specify what systemd services should be enabled/disabled on the upgraded system
- Added checks for RHEL SAP IPU 8.6 -> 9.0
- Check RPM signatures during the upgrade
- Check only mounted XFS partitions
- Check the validity and compatitibility of used leapp data
- Detect CIFS also when upgrading from RHEL8 to RHEL9 (PR1035)
- Detect RoCE on IBM Z machines and check the configuration is safe for the upgrade
- Detect a proxy configuration in YUM/DNF and adjust an error msg on issues caused by the configuration
- Detect and report systemd symlinks that are broken before the upgrade
- Resolves: rhbz#2143372
- Detect the kernel-core RPM instead of kernel to prevent an error during post-upgrade phases
- Disable the amazon-id DNF plugin on AWS during the upgrade stage to omit confusing error messages
- Do not create new *pyc files when running leapp after the DNF upgrade transaction
- Drop obsoleted upgrade paths
- Enable upgrades of RHEL 8 for SAP HANA to RHEL 9 on ppc64le
- Enable upgrades on s390x when /boot is part of rootfs
- Extend the allow list of RHUI clients by azure-sap-apps to omit confusing report
- Filter out PES events unrelated for the used upgrade path and handle overlapping event
(fixes upgrades with quagga installed)
- Fix scan of ceph volumes on systems without ceph-osd or when ceph-osd container is not found
- Fix systemd symlinks that become incorrect during the IPU
- Fix the check of memory (RAM) limits and use human readable values in the report
- Fix the kernel detection during initramfs creation for new kernel on RHEL 9.2+
- Fix the upgrade of IBM Z machines configured with ZFCP
- Fix the upgrade on Azure using RHUI for SAP Apps images
- Ignore external accounts in /etc/passwd
- Improve remediation instructions for packages in unknown repositories
- Improve the error message to guide users when discovered more space is needed
- Improve the handling of blocklisted certificates
- Inhibit the upgrade when entries in /etc/fstab cause overshadowing during the upgrade
- Introduced an option to use an ISO file as a target RHEL version content source
- Introduced possibility to specify what systemd services should be enabled/disabled on the upgraded system
- Introduced the --nogpgcheck option to skip checking of RPM signatures
- Map the target repositories also based on the installed content
- Prevent re-run of leapp in the upgrade initramfs in case of previous failure
- Prevent the upgrade with RHSM when Baseos and Appstream target repositories are not discovered
- Provide common information about systemd services
- RHUI(Azure) Handle correctly various SAP images
- Register subscribed systems automatically to Red Hat Insights unless --no-insights-register is used
- Remove obsoleted GPG keys provided by RH after the upgrade to prevent errors
- Rework the network configuration handling and parse the configuration data properly
- Set the system release lock after the upgrade also for premium channels
- Small improvements in various reports
- Resolves: rhbz#2088492, rhbz#2111691, rhbz#2127920, rhbz#2129716,rhbz#2139907, rhbz#2139907, rhbz#2141393, rhbz#2143372, rhbz#2155661
* Wed Sep 07 2022 Petr Stodulka <pstodulk@redhat.com> - 0.17.0-3
- Adding back instruction to not install rubygem-irb during the in-place upgrade