import leapp-repository-0.17.0-8.el8

This commit is contained in:
CentOS Sources 2023-02-22 02:12:12 +00:00 committed by root
parent 97becf6994
commit 14f115ce08
40 changed files with 4693 additions and 7 deletions

2
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/deps-pkgs-8.tar.gz
SOURCES/deps-pkgs-9.tar.gz
SOURCES/leapp-repository-0.17.0.tar.gz

View File

@ -1,2 +1,2 @@
4f7f6009adfe92d390e09beab710805fb0077c25 SOURCES/deps-pkgs-8.tar.gz
02499ccd70d4a8e6ce9ad29bd286a317d5e0b57b SOURCES/deps-pkgs-9.tar.gz
cbb3e6025c6567507d3bc317731b4c2f0a0eb872 SOURCES/leapp-repository-0.17.0.tar.gz

View File

@ -0,0 +1,54 @@
From b6d5a0790fd09fbb1f7eef6faef738cd50bd40cb Mon Sep 17 00:00:00 2001
From: Sergii Golovatiuk <sgolovat@redhat.com>
Date: Thu, 10 Nov 2022 12:37:40 +0100
Subject: [PATCH 39/63] Fix cephvolume actor
Change cephvolume behavior to return None when ceph-osd container is not
found.
Fixes: rhbz#2141393
---
.../actors/cephvolumescan/libraries/cephvolumescan.py | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py b/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py
index 7e3d544c..19f49528 100644
--- a/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py
+++ b/repos/system_upgrade/common/actors/cephvolumescan/libraries/cephvolumescan.py
@@ -12,7 +12,6 @@ CONTAINER = "ceph-osd"
def select_osd_container(engine):
- container_name = ""
try:
output = run([engine, 'ps'])
except CalledProcessError as cpe:
@@ -24,7 +23,7 @@ def select_osd_container(engine):
container_name = line.split()[-1]
if re.match(CONTAINER, container_name):
return container_name
- return container_name
+ return None
def get_ceph_lvm_list():
@@ -35,6 +34,8 @@ def get_ceph_lvm_list():
cmd_ceph_lvm_list = base_cmd
else:
container_name = select_osd_container(container_binary)
+ if container_name is None:
+ return None
cmd_ceph_lvm_list = [container_binary, 'exec', container_name]
cmd_ceph_lvm_list.extend(base_cmd)
try:
@@ -58,5 +59,6 @@ def encrypted_osds_list():
result = []
if os.path.isfile(CEPH_CONF):
output = get_ceph_lvm_list()
- result = [output[key][0]['lv_uuid'] for key in output if output[key][0]['tags']['ceph.encrypted']]
+ if output is not None:
+ result = [output[key][0]['lv_uuid'] for key in output if output[key][0]['tags']['ceph.encrypted']]
return result
--
2.39.0

View File

@ -0,0 +1,26 @@
From 5f5b5251e478e85087ea9ff7186fd58799f7def9 Mon Sep 17 00:00:00 2001
From: mreznik <mreznik@redhat.com>
Date: Tue, 6 Dec 2022 15:34:11 +0100
Subject: [PATCH 40/63] Include also Leapp RHUI special rpms in the whitelist
---
.../common/actors/redhatsignedrpmscanner/actor.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py
index 647805cd..07962adf 100644
--- a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py
+++ b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py
@@ -63,6 +63,9 @@ class RedHatSignedRpmScanner(Actor):
whitelisted_cloud_pkgs.update(
rhui.RHUI_CLOUD_MAP[upg_path].get(flavour, {}).get('target_pkg') for flavour in whitelisted_cloud_flavours
)
+ whitelisted_cloud_pkgs.update(
+ rhui.RHUI_CLOUD_MAP[upg_path].get(flavour, {}).get('leapp_pkg') for flavour in whitelisted_cloud_flavours
+ )
for rpm_pkgs in self.consume(InstalledRPM):
for pkg in rpm_pkgs.items:
--
2.39.0

View File

@ -0,0 +1,90 @@
From 97c9bd4a18d415289bceba91c534433561759aa2 Mon Sep 17 00:00:00 2001
From: Vinzenz Feenstra <vfeenstr@redhat.com>
Date: Mon, 31 Aug 2020 14:54:00 +0200
Subject: [PATCH 41/63] [POC] initram networking
Adding initial basic networking support for the initram phase.
Controlled by the LEAPP_INITRAM_NETWORK environment variable which must
be set to either `scripts` or `network-manager` to choose between the
legacy or NetworkManager based dracut modules.
Recommended to use is the network-manager part at this moment as it will
take care of most of things including default routing etc.
Signed-off-by: Vinzenz Feenstra <vfeenstr@redhat.com>
---
.../libraries/addupgradebootentry.py | 5 +++--
.../dracut/85sys-upgrade-redhat/module-setup.sh | 9 +++++++++
.../files/generate-initram.sh | 13 +++++++++++++
3 files changed, 25 insertions(+), 2 deletions(-)
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
index 3836a0d1..ca9802bb 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
@@ -9,7 +9,8 @@ from leapp.models import BootContent, KernelCmdlineArg, TargetKernelCmdlineArgTa
def add_boot_entry(configs=None):
debug = 'debug' if os.getenv('LEAPP_DEBUG', '0') == '1' else ''
-
+ enable_network = os.getenv('LEAPP_INITRAM_NETWORK') in ('network-manager', 'scripts')
+ ip_arg = ' ip=on' if enable_network else ''
kernel_dst_path, initram_dst_path = get_boot_file_paths()
_remove_old_upgrade_boot_entry(kernel_dst_path, configs=configs)
try:
@@ -20,7 +21,7 @@ def add_boot_entry(configs=None):
'--title', 'RHEL-Upgrade-Initramfs',
'--copy-default',
'--make-default',
- '--args', '{DEBUG} enforcing=0 rd.plymouth=0 plymouth.enable=0'.format(DEBUG=debug)
+ '--args', '{DEBUG}{NET} enforcing=0 rd.plymouth=0 plymouth.enable=0'.format(DEBUG=debug, NET=ip_arg)
]
if configs:
for config in configs:
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
index 18d1d07f..d73060cb 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
@@ -80,6 +80,15 @@ install() {
# Q: Would we hack that in way of copy whole initramfs into the root, mount
# mount it and set envars
+ # Install network configuration triggers
+ if [ -f /etc/leapp-initram-network-manager ]; then
+ dracut_install /etc/leapp-initram-network-manager
+ fi
+
+ if [ -f /etc/leapp-initram-network-scripts ]; then
+ dracut_install /etc/leapp-initram-network-scripts
+ fi
+
# install this one to ensure we are able to sync write
inst_binary sync
# install in-band debugging utilities
diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
index b3478280..7748aa78 100755
--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
+++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
@@ -67,6 +67,19 @@ build() {
DRACUT_MODULES_ADD=$(echo "--add $LEAPP_ADD_DRACUT_MODULES" | sed 's/,/ --add /g')
fi
+ case $LEAPP_INITRAM_NETWORK in
+ network-manager)
+ DRACUT_MODULES_ADD="$DRACUT_MODULES_ADD --add network-manager"
+ touch /etc/leapp-initram-network-manager
+ ;;
+ scripts)
+ DRACUT_MODULES_ADD="$DRACUT_MODULES_ADD --add network";
+ touch /etc/leapp-initram-network-scripts
+ ;;
+ *)
+ ;;
+ esac
+
DRACUT_INSTALL="systemd-nspawn"
if [[ -n "$LEAPP_DRACUT_INSTALL_FILES" ]]; then
DRACUT_INSTALL="$DRACUT_INSTALL $LEAPP_DRACUT_INSTALL_FILES"
--
2.39.0

View File

@ -0,0 +1,158 @@
From f8c96d8a8d2cf8fc1eeac0349aa48fe83567eecb Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Mon, 5 Sep 2022 11:56:03 +0200
Subject: [PATCH 42/63] Skip check nfs actor if env var is set
In case LEAPP_INITRAM_NETWORK is set nfs upgrade inhibitors
can be skipped.
---
.../common/actors/checknfs/actor.py | 4 ++
.../actors/checknfs/tests/test_checknfs.py | 61 ++++++++++++++++---
2 files changed, 58 insertions(+), 7 deletions(-)
diff --git a/repos/system_upgrade/common/actors/checknfs/actor.py b/repos/system_upgrade/common/actors/checknfs/actor.py
index f3424504..370ae6b3 100644
--- a/repos/system_upgrade/common/actors/checknfs/actor.py
+++ b/repos/system_upgrade/common/actors/checknfs/actor.py
@@ -1,5 +1,6 @@
from leapp import reporting
from leapp.actors import Actor
+from leapp.libraries.common.config import get_env
from leapp.models import StorageInfo
from leapp.reporting import create_report, Report
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
@@ -18,6 +19,9 @@ class CheckNfs(Actor):
tags = (ChecksPhaseTag, IPUWorkflowTag,)
def process(self):
+ # if network in initramfs is enabled NFS inhibitors are redundant
+ if get_env('LEAPP_INITRAM_NETWORK', None):
+ return
details = "NFS is currently not supported by the inplace upgrade.\n" \
"We have found NFS usage at the following locations:\n"
diff --git a/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py b/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
index 7e52440f..a8d18ed1 100644
--- a/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
+++ b/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
@@ -1,5 +1,6 @@
import pytest
+from leapp.libraries.common import config
from leapp.models import FstabEntry, MountEntry, StorageInfo, SystemdMountEntry
from leapp.reporting import Report
from leapp.snactor.fixture import current_actor_context
@@ -7,7 +8,8 @@ from leapp.utils.report import is_inhibitor
@pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4'))
-def test_actor_with_systemdmount_entry(current_actor_context, nfs_fstype):
+def test_actor_with_systemdmount_entry(current_actor_context, nfs_fstype, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
wwn="n/a", fs_type=nfs_fstype, label="n/a",
uuid="n/a")]
@@ -17,7 +19,8 @@ def test_actor_with_systemdmount_entry(current_actor_context, nfs_fstype):
assert is_inhibitor(report_fields)
-def test_actor_without_systemdmount_entry(current_actor_context):
+def test_actor_without_systemdmount_entry(current_actor_context, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
without_systemdmount_entry = [SystemdMountEntry(node="/dev/sda1",
path="pci-0000:00:17.0-ata-2",
model="TOSHIBA_THNSNJ512GDNU_A",
@@ -30,7 +33,8 @@ def test_actor_without_systemdmount_entry(current_actor_context):
@pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4'))
-def test_actor_with_fstab_entry(current_actor_context, nfs_fstype):
+def test_actor_with_fstab_entry(current_actor_context, nfs_fstype, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data",
fs_vfstype=nfs_fstype,
fs_mntops="noauto,noatime,rsize=32768,wsize=32768",
@@ -41,7 +45,8 @@ def test_actor_with_fstab_entry(current_actor_context, nfs_fstype):
assert is_inhibitor(report_fields)
-def test_actor_without_fstab_entry(current_actor_context):
+def test_actor_without_fstab_entry(current_actor_context, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
without_fstab_entry = [FstabEntry(fs_spec="/dev/mapper/fedora-home", fs_file="/home",
fs_vfstype="ext4",
fs_mntops="defaults,x-systemd.device-timeout=0",
@@ -51,7 +56,8 @@ def test_actor_without_fstab_entry(current_actor_context):
assert not current_actor_context.consume(Report)
-def test_actor_with_nfsd(current_actor_context):
+def test_actor_with_nfsd(current_actor_context, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
with_nfsd = [MountEntry(name="nfsd", mount="/proc/fs/nfsd", tp="nfsd", options="rw,relatime")]
current_actor_context.feed(StorageInfo(mount=with_nfsd))
current_actor_context.run()
@@ -59,7 +65,8 @@ def test_actor_with_nfsd(current_actor_context):
@pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4'))
-def test_actor_with_mount_share(current_actor_context, nfs_fstype):
+def test_actor_with_mount_share(current_actor_context, nfs_fstype, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp=nfs_fstype,
options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
current_actor_context.feed(StorageInfo(mount=with_mount_share))
@@ -68,9 +75,49 @@ def test_actor_with_mount_share(current_actor_context, nfs_fstype):
assert is_inhibitor(report_fields)
-def test_actor_without_mount_share(current_actor_context):
+def test_actor_without_mount_share(current_actor_context, monkeypatch):
+ monkeypatch.setattr(config, 'get_env', lambda x, y: y)
without_mount_share = [MountEntry(name="tmpfs", mount="/run/snapd/ns", tp="tmpfs",
options="rw,nosuid,nodev,seclabel,mode=755")]
current_actor_context.feed(StorageInfo(mount=without_mount_share))
current_actor_context.run()
assert not current_actor_context.consume(Report)
+
+
+def test_actor_skipped_if_initram_network_enabled(current_actor_context, monkeypatch):
+ """Check that previous inhibitors are not stopping the upgrade in case env var is set"""
+ monkeypatch.setattr(config, 'get_env', lambda x, y: 'network-manager' if x == 'LEAPP_INITRAM_NETWORK' else y)
+ with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs',
+ options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
+ with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
+ wwn="n/a", fs_type='nfs', label="n/a",
+ uuid="n/a")]
+ with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data",
+ fs_vfstype='nfs',
+ fs_mntops="noauto,noatime,rsize=32768,wsize=32768",
+ fs_freq="0", fs_passno="0")]
+ current_actor_context.feed(StorageInfo(mount=with_mount_share,
+ systemdmount=with_systemdmount_entry,
+ fstab=with_fstab_entry))
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
+
+
+def test_actor_not_skipped_if_initram_network_empty(current_actor_context, monkeypatch):
+ """Check that previous inhibitors are not stopping the upgrade in case env var is set"""
+ monkeypatch.setattr(config, 'get_env', lambda x, y: '' if x == 'LEAPP_INITRAM_NETWORK' else y)
+ with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs',
+ options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
+ with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
+ wwn="n/a", fs_type='nfs', label="n/a",
+ uuid="n/a")]
+ with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data",
+ fs_vfstype='nfs',
+ fs_mntops="noauto,noatime,rsize=32768,wsize=32768",
+ fs_freq="0", fs_passno="0")]
+ current_actor_context.feed(StorageInfo(mount=with_mount_share,
+ systemdmount=with_systemdmount_entry,
+ fstab=with_fstab_entry))
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
--
2.39.0

View File

@ -0,0 +1,174 @@
From f41cdf561c04d6ec58609f0b11b3a813fa0f6143 Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Mon, 29 Aug 2022 10:57:32 +0200
Subject: [PATCH 43/63] Apply changes after rebase and do refactor
Changes done to repos/system_upgrade/el7toel8/actors
have been manually applied to repos/system_upgrade/common/actors
Refactoring is mostly about renaming variable to
LEAPP_DEVEL_INITRAM_NETWORK and moving some changes out of dracut
into the UpgradeTasks.
---
.../libraries/addupgradebootentry.py | 4 ++--
.../common/actors/checknfs/actor.py | 2 +-
.../actors/checknfs/tests/test_checknfs.py | 4 ++--
.../dracut/85sys-upgrade-redhat/do-upgrade.sh | 15 +++++++++++++++
.../libraries/modscan.py | 16 ++++++++++++++++
.../files/generate-initram.sh | 13 -------------
6 files changed, 36 insertions(+), 18 deletions(-)
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
index ca9802bb..beddafec 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
@@ -9,8 +9,8 @@ from leapp.models import BootContent, KernelCmdlineArg, TargetKernelCmdlineArgTa
def add_boot_entry(configs=None):
debug = 'debug' if os.getenv('LEAPP_DEBUG', '0') == '1' else ''
- enable_network = os.getenv('LEAPP_INITRAM_NETWORK') in ('network-manager', 'scripts')
- ip_arg = ' ip=on' if enable_network else ''
+ enable_network = os.getenv('LEAPP_DEVEL_INITRAM_NETWORK') in ('network-manager', 'scripts')
+ ip_arg = ' ip=dhcp rd.neednet=1' if enable_network else ''
kernel_dst_path, initram_dst_path = get_boot_file_paths()
_remove_old_upgrade_boot_entry(kernel_dst_path, configs=configs)
try:
diff --git a/repos/system_upgrade/common/actors/checknfs/actor.py b/repos/system_upgrade/common/actors/checknfs/actor.py
index 370ae6b3..40ca834e 100644
--- a/repos/system_upgrade/common/actors/checknfs/actor.py
+++ b/repos/system_upgrade/common/actors/checknfs/actor.py
@@ -20,7 +20,7 @@ class CheckNfs(Actor):
def process(self):
# if network in initramfs is enabled NFS inhibitors are redundant
- if get_env('LEAPP_INITRAM_NETWORK', None):
+ if get_env('LEAPP_DEVEL_INITRAM_NETWORK', None):
return
details = "NFS is currently not supported by the inplace upgrade.\n" \
"We have found NFS usage at the following locations:\n"
diff --git a/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py b/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
index a8d18ed1..907dca40 100644
--- a/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
+++ b/repos/system_upgrade/common/actors/checknfs/tests/test_checknfs.py
@@ -86,7 +86,7 @@ def test_actor_without_mount_share(current_actor_context, monkeypatch):
def test_actor_skipped_if_initram_network_enabled(current_actor_context, monkeypatch):
"""Check that previous inhibitors are not stopping the upgrade in case env var is set"""
- monkeypatch.setattr(config, 'get_env', lambda x, y: 'network-manager' if x == 'LEAPP_INITRAM_NETWORK' else y)
+ monkeypatch.setattr(config, 'get_env', lambda x, y: 'network-manager' if x == 'LEAPP_DEVEL_INITRAM_NETWORK' else y)
with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs',
options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
@@ -105,7 +105,7 @@ def test_actor_skipped_if_initram_network_enabled(current_actor_context, monkeyp
def test_actor_not_skipped_if_initram_network_empty(current_actor_context, monkeypatch):
"""Check that previous inhibitors are not stopping the upgrade in case env var is set"""
- monkeypatch.setattr(config, 'get_env', lambda x, y: '' if x == 'LEAPP_INITRAM_NETWORK' else y)
+ monkeypatch.setattr(config, 'get_env', lambda x, y: '' if x == 'LEAPP_DEVEL_INITRAM_NETWORK' else y)
with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs',
options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
index ff491316..49c26bc8 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
@@ -194,6 +194,19 @@ ibdmp() {
done
}
+bring_up_network() {
+ if [ -f /etc/leapp-initram-network-manager ]; then
+ # NOTE(ivasilev) Reverting the change to see if it caused the crash
+ . /lib/dracut/hooks/cmdline/99-nm-config.sh
+ . /lib/dracut/hooks/initqueue/settled/99-nm-run.sh
+ fi
+ if [ -f /etc/leapp-initram-network-scripts ]; then
+ for interface in /sys/class/net/*;
+ do
+ ifup ${interface##*/};
+ done;
+ fi
+}
do_upgrade() {
local args="" rv=0
@@ -202,6 +215,8 @@ do_upgrade() {
#getargbool 0 rd.upgrade.verbose && args="$args --verbose"
getargbool 0 rd.upgrade.debug && args="$args --debug"
+ bring_up_network
+
# Force selinux into permissive mode unless booted with 'enforcing=1'.
# FIXME: THIS IS A BIG STUPID HAMMER AND WE SHOULD ACTUALLY SOLVE THE ROOT
# PROBLEMS RATHER THAN JUST PAPERING OVER THE WHOLE THING. But this is what
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py b/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py
index 275b2c63..2b8d78a4 100644
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/libraries/modscan.py
@@ -6,6 +6,7 @@ from leapp.libraries.stdlib import api
from leapp.utils.deprecation import suppress_deprecation
from leapp.models import ( # isort:skip
+ CopyFile,
RequiredUpgradeInitramPackages, # deprecated
UpgradeDracutModule, # deprecated
DracutModule,
@@ -42,6 +43,18 @@ _REQUIRED_PACKAGES = [
]
+def _create_initram_networking_tasks():
+ # include networking-related dracut modules
+ modules_map = {'network-manager': ('network-manager', '/etc/leapp-initram-network-manager'),
+ 'scripts': ('network', '/etc/leapp-initram-network-scripts')}
+ initram_network_chosen = os.getenv('LEAPP_DEVEL_INITRAM_NETWORK', None)
+ if initram_network_chosen in modules_map:
+ module, touch_file = modules_map[initram_network_chosen]
+ yield UpgradeInitramfsTasks(include_dracut_modules=[DracutModule(name=module)])
+ # touch expected file
+ yield TargetUserSpaceUpgradeTasks(copy_files=[CopyFile(src='/dev/null', dst=touch_file)])
+
+
# The decorator is not effective for generators, it has to be used one level
# above
# @suppress_deprecation(UpgradeDracutModule)
@@ -68,6 +81,8 @@ def _create_initram_packages():
required_pkgs = _REQUIRED_PACKAGES[:]
if architecture.matches_architecture(architecture.ARCH_X86_64):
required_pkgs.append('biosdevname')
+ if os.getenv('LEAPP_DEVEL_INITRAM_NETWORK', None) == 'network-manager':
+ required_pkgs.append('NetworkManager')
if version.get_target_major_version() == '9':
required_pkgs += ['policycoreutils', 'rng-tools']
return (
@@ -79,3 +94,4 @@ def _create_initram_packages():
def process():
api.produce(*tuple(_create_dracut_modules()))
api.produce(*_create_initram_packages())
+ api.produce(*tuple(_create_initram_networking_tasks()))
diff --git a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
index 7748aa78..b3478280 100755
--- a/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
+++ b/repos/system_upgrade/common/actors/initramfs/upgradeinitramfsgenerator/files/generate-initram.sh
@@ -67,19 +67,6 @@ build() {
DRACUT_MODULES_ADD=$(echo "--add $LEAPP_ADD_DRACUT_MODULES" | sed 's/,/ --add /g')
fi
- case $LEAPP_INITRAM_NETWORK in
- network-manager)
- DRACUT_MODULES_ADD="$DRACUT_MODULES_ADD --add network-manager"
- touch /etc/leapp-initram-network-manager
- ;;
- scripts)
- DRACUT_MODULES_ADD="$DRACUT_MODULES_ADD --add network";
- touch /etc/leapp-initram-network-scripts
- ;;
- *)
- ;;
- esac
-
DRACUT_INSTALL="systemd-nspawn"
if [[ -n "$LEAPP_DRACUT_INSTALL_FILES" ]]; then
DRACUT_INSTALL="$DRACUT_INSTALL $LEAPP_DRACUT_INSTALL_FILES"
--
2.39.0

View File

@ -0,0 +1,91 @@
From 032b00255d0127c06c7bd851bc438290766f5cbc Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Thu, 10 Nov 2022 12:51:19 +0100
Subject: [PATCH 44/63] Tune tmt tests regexes to align with QE automation
In order for upstream tests and those launched with QE tooling
to produce the comparable set of results it's a wise thing to
do.
---
.github/workflows/tmt-tests.yml | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml
index 563c6e8c..0b565b8b 100644
--- a/.github/workflows/tmt-tests.yml
+++ b/.github/workflows/tmt-tests.yml
@@ -16,7 +16,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
pull_request_status_name: "7.9to8.4"
call_workflow_tests_79to86_integration:
@@ -25,7 +25,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
variables: 'TARGET_RELEASE=8.6'
pull_request_status_name: "7.9to8.6"
@@ -35,7 +35,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*8to9)(.*morf)"
+ tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
pull_request_status_name: "7.9to8.4-sst"
update_pull_request_status: 'false'
if: |
@@ -49,7 +49,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)"
compose: "RHEL-7.9-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; echo 42; yum-config-manager --enable rhel-7-server-rhui-optional-rpms"}}'
pull_request_status_name: "7to8-aws-e2e"
@@ -61,7 +61,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
pull_request_status_name: "8.6to9.0"
call_workflow_tests_87to91_integration:
@@ -70,7 +70,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
variables: "LEAPP_DEVEL_TARGET_PRODUCT_TYPE=beta;RHSM_SKU=RH00069;TARGET_RELEASE=9.1;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms"
compose: "RHEL-8.7.0-Nightly"
pull_request_status_name: "8.7to9.1"
@@ -82,7 +82,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*7to8)(.*morf)"
+ tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
pull_request_status_name: "8to9-sst"
update_pull_request_status: 'false'
if: |
@@ -96,7 +96,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)"
compose: "RHEL-8.6-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"}}'
pull_request_status_name: "8to9-aws-e2e"
--
2.39.0

View File

@ -0,0 +1,130 @@
From fab859941f31d4809038f571fe308154818f0dd4 Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Thu, 10 Nov 2022 12:58:00 +0100
Subject: [PATCH 45/63] Change /rerun-all to /rerun-sst
Change the concept from "rerun all tests + morf ones" to
"rerun just the morf tests".
Welcome message updated as well.
---
.github/workflows/pr-welcome-msg.yml | 4 ++--
.github/workflows/reuse-copr-build.yml | 2 +-
.github/workflows/tmt-tests.yml | 28 ++++++++++++++++++++++++--
3 files changed, 29 insertions(+), 5 deletions(-)
diff --git a/.github/workflows/pr-welcome-msg.yml b/.github/workflows/pr-welcome-msg.yml
index 7ae2fa4e..5fbf9558 100644
--- a/.github/workflows/pr-welcome-msg.yml
+++ b/.github/workflows/pr-welcome-msg.yml
@@ -27,8 +27,8 @@ jobs:
To launch regression testing public members of oamg organization can leave the following comment:
- **/rerun** to schedule basic regression tests using this pr build and leapp\*master\* as artifacts
- **/rerun 42** to schedule basic regression tests using this pr build and leapp\*PR42\* as artifacts
- - **/rerun-all** to schedule all tests (including sst) using this pr build and leapp\*master\* as artifacts
- - **/rerun-all 42** to schedule all tests (including sst) using this pr build and leapp\*PR42\* as artifacts
+ - **/rerun-sst** to schedule sst tests using this pr build and leapp\*master\* as artifacts
+ - **/rerun-sst 42** to schedule sst tests using this pr build and leapp\*PR42\* as artifacts
Please [open ticket](https://url.corp.redhat.com/oamg-ci-issue) in case you experience technical problem with the CI. (RH internal only)
diff --git a/.github/workflows/reuse-copr-build.yml b/.github/workflows/reuse-copr-build.yml
index 477d3f40..43aa98a3 100644
--- a/.github/workflows/reuse-copr-build.yml
+++ b/.github/workflows/reuse-copr-build.yml
@@ -88,7 +88,7 @@ jobs:
id: leapp_pr_regex_match
with:
text: ${{ github.event.comment.body }}
- regex: '^/(rerun|rerun-all)\s+([0-9]+)\s*$'
+ regex: '^/(rerun|rerun-sst)\s+([0-9]+)\s*$'
- name: If leapp_pr was specified in the comment - trigger copr build
# TODO: XXX FIXME This should schedule copr build for leapp but for now it will be just setting an env var
diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml
index 0b565b8b..fad1d5d0 100644
--- a/.github/workflows/tmt-tests.yml
+++ b/.github/workflows/tmt-tests.yml
@@ -18,6 +18,10 @@ jobs:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
pull_request_status_name: "7.9to8.4"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_79to86_integration:
needs: call_workflow_copr_build
@@ -28,6 +32,10 @@ jobs:
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
variables: 'TARGET_RELEASE=8.6'
pull_request_status_name: "7.9to8.6"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_79to84_sst:
needs: call_workflow_copr_build
@@ -40,7 +48,7 @@ jobs:
update_pull_request_status: 'false'
if: |
github.event.issue.pull_request
- && startsWith(github.event.comment.body, '/rerun-all')
+ && startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_7to8_aws:
@@ -54,6 +62,10 @@ jobs:
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; echo 42; yum-config-manager --enable rhel-7-server-rhui-optional-rpms"}}'
pull_request_status_name: "7to8-aws-e2e"
variables: "RHUI=aws"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_86to90_integration:
needs: call_workflow_copr_build
@@ -63,6 +75,10 @@ jobs:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
pull_request_status_name: "8.6to9.0"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_87to91_integration:
needs: call_workflow_copr_build
@@ -75,6 +91,10 @@ jobs:
compose: "RHEL-8.7.0-Nightly"
pull_request_status_name: "8.7to9.1"
tmt_context: "distro=rhel-8.7"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_8to9_sst:
needs: call_workflow_copr_build
@@ -87,7 +107,7 @@ jobs:
update_pull_request_status: 'false'
if: |
github.event.issue.pull_request
- && startsWith(github.event.comment.body, '/rerun-all')
+ && startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
call_workflow_tests_8to9_aws:
@@ -101,3 +121,7 @@ jobs:
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"}}'
pull_request_status_name: "8to9-aws-e2e"
variables: "RHUI=aws"
+ if: |
+ github.event.issue.pull_request
+ && ! startsWith(github.event.comment.body, '/rerun-sst')
+ && contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
--
2.39.0

View File

@ -0,0 +1,142 @@
From 82b43d3b7452812a97ad2f479f8f7ef541e46154 Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Mon, 28 Nov 2022 13:16:50 +0100
Subject: [PATCH 46/63] Do not run rhsm tests in upstream
It was decided to limit test runs to tier0/tier1, non-RHSM
tests only.
Also this patch will keep github action parameters in sync with
supported upgrade paths.
---
.github/workflows/tmt-tests.yml | 36 ++++++++++++++++++---------------
1 file changed, 20 insertions(+), 16 deletions(-)
diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml
index fad1d5d0..c82256c8 100644
--- a/.github/workflows/tmt-tests.yml
+++ b/.github/workflows/tmt-tests.yml
@@ -10,14 +10,15 @@ jobs:
uses: ./.github/workflows/reuse-copr-build.yml
secrets: inherit
- call_workflow_tests_79to84_integration:
+ call_workflow_tests_79to88_integration:
needs: call_workflow_copr_build
uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
- pull_request_status_name: "7.9to8.4"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
+ pull_request_status_name: "7.9to8.8"
+ variables: 'TARGET_RELEASE=8.8'
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
@@ -29,7 +30,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
variables: 'TARGET_RELEASE=8.6'
pull_request_status_name: "7.9to8.6"
if: |
@@ -37,15 +38,16 @@ jobs:
&& ! startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
- call_workflow_tests_79to84_sst:
+ call_workflow_tests_79to88_sst:
needs: call_workflow_copr_build
uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
- pull_request_status_name: "7.9to8.4-sst"
+ pull_request_status_name: "7.9to8.8-sst"
update_pull_request_status: 'false'
+ variables: 'TARGET_RELEASE=8.8'
if: |
github.event.issue.pull_request
&& startsWith(github.event.comment.body, '/rerun-sst')
@@ -57,7 +59,7 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)"
compose: "RHEL-7.9-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; echo 42; yum-config-manager --enable rhel-7-server-rhui-optional-rpms"}}'
pull_request_status_name: "7to8-aws-e2e"
@@ -73,36 +75,38 @@ jobs:
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
+ variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
pull_request_status_name: "8.6to9.0"
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
- call_workflow_tests_87to91_integration:
+ call_workflow_tests_87to90_integration:
needs: call_workflow_copr_build
uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
- variables: "LEAPP_DEVEL_TARGET_PRODUCT_TYPE=beta;RHSM_SKU=RH00069;TARGET_RELEASE=9.1;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
+ variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms'
compose: "RHEL-8.7.0-Nightly"
- pull_request_status_name: "8.7to9.1"
+ pull_request_status_name: "8.7to9.0"
tmt_context: "distro=rhel-8.7"
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
- call_workflow_tests_8to9_sst:
+ call_workflow_tests_86to90_sst:
needs: call_workflow_copr_build
uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
+ variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
pull_request_status_name: "8to9-sst"
update_pull_request_status: 'false'
if: |
@@ -110,17 +114,17 @@ jobs:
&& startsWith(github.event.comment.body, '/rerun-sst')
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
- call_workflow_tests_8to9_aws:
+ call_workflow_tests_86to90_aws:
needs: call_workflow_copr_build
uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
- tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)"
+ tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)"
compose: "RHEL-8.6-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"}}'
pull_request_status_name: "8to9-aws-e2e"
- variables: "RHUI=aws"
+ variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;RHUI=aws'
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
--
2.39.0

View File

@ -0,0 +1,90 @@
From 5e6f37878661c44ced384a3a362c2e3515c3609a Mon Sep 17 00:00:00 2001
From: Inessa Vasilevskaya <ivasilev@redhat.com>
Date: Fri, 16 Dec 2022 11:25:14 +0100
Subject: [PATCH 47/63] Set SOURCE_RELEASE env var
Following a change in tmt-tests this is a necessary addition
for upgrade_plugin to correctly choose an upgrade path.
---
.github/workflows/tmt-tests.yml | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml
index c82256c8..ecda20ed 100644
--- a/.github/workflows/tmt-tests.yml
+++ b/.github/workflows/tmt-tests.yml
@@ -18,7 +18,7 @@ jobs:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
pull_request_status_name: "7.9to8.8"
- variables: 'TARGET_RELEASE=8.8'
+ variables: 'SOURCE_RELEASE=7.9;TARGET_RELEASE=8.8'
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
@@ -31,7 +31,7 @@ jobs:
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*morf)"
- variables: 'TARGET_RELEASE=8.6'
+ variables: 'SOURCE_RELEASE=7.9;TARGET_RELEASE=8.6'
pull_request_status_name: "7.9to8.6"
if: |
github.event.issue.pull_request
@@ -47,7 +47,7 @@ jobs:
tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
pull_request_status_name: "7.9to8.8-sst"
update_pull_request_status: 'false'
- variables: 'TARGET_RELEASE=8.8'
+ variables: 'SOURCE_RELEASE=7.9;TARGET_RELEASE=8.8'
if: |
github.event.issue.pull_request
&& startsWith(github.event.comment.body, '/rerun-sst')
@@ -63,7 +63,7 @@ jobs:
compose: "RHEL-7.9-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; echo 42; yum-config-manager --enable rhel-7-server-rhui-optional-rpms"}}'
pull_request_status_name: "7to8-aws-e2e"
- variables: "RHUI=aws"
+ variables: "SOURCE_RELEASE=7.9;TARGET_RELEASE=8.6;RHUI=aws"
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
@@ -76,7 +76,7 @@ jobs:
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
- variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
+ variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
pull_request_status_name: "8.6to9.0"
if: |
github.event.issue.pull_request
@@ -90,7 +90,7 @@ jobs:
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(?!.*morf)"
- variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms'
+ variables: 'SOURCE_RELEASE=8.7;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms'
compose: "RHEL-8.7.0-Nightly"
pull_request_status_name: "8.7to9.0"
tmt_context: "distro=rhel-8.7"
@@ -106,7 +106,7 @@ jobs:
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
tmt_plan_regex: "^(/plans/morf)(?!.*sap)"
- variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
+ variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms'
pull_request_status_name: "8to9-sst"
update_pull_request_status: 'false'
if: |
@@ -124,7 +124,7 @@ jobs:
compose: "RHEL-8.6-rhui"
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"}}'
pull_request_status_name: "8to9-aws-e2e"
- variables: 'TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;RHUI=aws'
+ variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;RHUI=aws'
if: |
github.event.issue.pull_request
&& ! startsWith(github.event.comment.body, '/rerun-sst')
--
2.39.0

View File

@ -0,0 +1,149 @@
From ef0e81dba97c61f7b4c15ebc91468253b758005d Mon Sep 17 00:00:00 2001
From: Tomas Tomecek <ttomecek@redhat.com>
Date: Mon, 7 Mar 2022 16:29:39 +0100
Subject: [PATCH 48/63] Packit: build SRPM in Copr
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
TL;DR:
* specify the list of deps needed to prepare SRPM in the .packit file
* move leapp-el7toel8-deps.spec into other_specs subdir
* update the make commands for the building of packages
Previously we have prepared SRPM "locally" and then uploaded it
to the COPR server for the building. However, since Jan 2023 we are
required to build SRPMs on the COPR server itself. As in this project
we have to manage 2 SPEC files for two buildings:
- leapp-el7toel8-deps.spec
Creating metapackage resolving issues with dependencies
on the target system; created RPMs are archived and are used
as one of sources for building of leapp-repository.
- leapp-repository.spec
The main spec file used for the entire project, which requires
archive with the deps packages.
Currently it's not possible to tell COPR which specific spec file
should be used and if they are in the same directory, COPR fails with
the error message about multiple SPEC files. But COPR is ok having
multiple spec files in a project when they are in separate directories.
To fix that, we are moving the deps spec file into the separate directory.
Also, explicitly set the `_rpmfilename` macro. This is super important as
the COPR build servers are using Mock, which redefines the macro, so packages
are stored inside RPMS directory, instead RPMS/%{ARCH}. The macro must be
defined with double '%'. Using just single %, the macro is expanded when
the specfile is loaded, but it is expected to be expanded during
the build process when particular subpackages (RPMs) are created, so
each RPM has the right name. Using the single %, all RPMs would have the
name of the SRPM - which means effectively that only one RPM per build
would be created. (hopefully the explanation is clear :))
This change was finished by Peťa and Pavel Raiskup. Tomas was "only" the
initial author. For more details, please open the PR:
https://github.com/oamg/leapp-repository/pull/848
🎉🎉🎉🍻
Signed-off-by: Tomas Tomecek <ttomecek@redhat.com>
Co-authored-by: Petr Stodulk <pstodulk@redhat.com>
---
.packit.yaml | 3 +++
Makefile | 22 ++++++++++++++-----
packaging/leapp-repository.spec | 4 ++--
.../leapp-el7toel8-deps.spec | 0
4 files changed, 22 insertions(+), 7 deletions(-)
rename packaging/{ => other_specs}/leapp-el7toel8-deps.spec (100%)
diff --git a/.packit.yaml b/.packit.yaml
index fb407829..f1d59ce1 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -8,6 +8,9 @@ downstream_package_name: leapp-repository
upstream_tag_template: 'v{version}'
merge_pr_in_ci: false
+srpm_build_deps:
+- make
+
# This is just for the build from the CLI - all other builds for jobs use own
# actions
actions:
diff --git a/Makefile b/Makefile
index 7342d4bf..b1489e4f 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,7 @@ DIST_VERSION ?= 7
PKGNAME=leapp-repository
DEPS_PKGNAME=leapp-el7toel8-deps
VERSION=`grep -m1 "^Version:" packaging/$(PKGNAME).spec | grep -om1 "[0-9].[0-9.]**"`
-DEPS_VERSION=`grep -m1 "^Version:" packaging/$(DEPS_PKGNAME).spec | grep -om1 "[0-9].[0-9.]**"`
+DEPS_VERSION=`grep -m1 "^Version:" packaging/other_specs/$(DEPS_PKGNAME).spec | grep -om1 "[0-9].[0-9.]**"`
REPOS_PATH=repos
_SYSUPG_REPOS="$(REPOS_PATH)/system_upgrade"
LIBRARY_PATH=
@@ -178,7 +178,7 @@ source: prepare
mkdir -p packaging/tmp/
@__TIMESTAMP=$(TIMESTAMP) $(MAKE) _build_subpkg
@__TIMESTAMP=$(TIMESTAMP) $(MAKE) DIST_VERSION=$$(($(DIST_VERSION) + 1)) _build_subpkg
- @tar -czf packaging/sources/deps-pkgs.tar.gz -C packaging/RPMS/noarch `ls packaging/RPMS/noarch | grep -o "[^/]*rpm$$"`
+ @tar -czf packaging/sources/deps-pkgs.tar.gz -C packaging/RPMS/noarch `ls -1 packaging/RPMS/noarch | grep -o "[^/]*rpm$$"`
@rm -f packaging/RPMS/noarch/*.rpm
srpm: source
@@ -195,8 +195,19 @@ srpm: source
_build_subpkg:
@echo "--- Build RPM: $(DEPS_PKGNAME)-$(DEPS_VERSION)-$(RELEASE).. ---"
- @cp packaging/$(DEPS_PKGNAME).spec packaging/$(DEPS_PKGNAME).spec.bak
+ @cp packaging/other_specs/$(DEPS_PKGNAME).spec packaging/$(DEPS_PKGNAME).spec
@sed -i "s/1%{?dist}/$(RELEASE)%{?dist}/g" packaging/$(DEPS_PKGNAME).spec
+ # Let's be explicit about the path to the binary RPMs; Copr builders can override this
+ # IMPORTANT:
+ # Also, explicitly set the _rpmfilename macro. This is super important as
+ # the COPR build servers are using Mock, which redefines the macro, so packages
+ # are stored inside RPMS directory, instead RPMS/%{ARCH}. The macro must be
+ # defined with double '%'. Using just single %, the macro is expanded when
+ # the specfile is loaded, but it is expected to be expanded during
+ # the build process when particular subpackages (RPMs) are created, so
+ # each RPM has the right name. Using the single %, all RPMs would have the
+ # name of the SRPM - which means effectively that only one RPM per build
+ # would be created. (hopefully the explanation is clear :))
@rpmbuild -ba packaging/$(DEPS_PKGNAME).spec \
--define "_sourcedir `pwd`/packaging/sources" \
--define "_srcrpmdir `pwd`/packaging/SRPMS" \
@@ -205,8 +216,9 @@ _build_subpkg:
--define "_rpmdir `pwd`/packaging/RPMS" \
--define "rhel $$(($(DIST_VERSION) + 1))" \
--define "dist .el$$(($(DIST_VERSION) + 1))" \
- --define "el$$(($(DIST_VERSION) + 1)) 1" || FAILED=1
- @mv packaging/$(DEPS_PKGNAME).spec.bak packaging/$(DEPS_PKGNAME).spec
+ --define "el$$(($(DIST_VERSION) + 1)) 1" \
+ --define "_rpmfilename %%{ARCH}/%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" || FAILED=1
+ @rm -f packaging/$(DEPS_PKGNAME).spec
_build_local: source
@echo "--- Build RPM: $(PKGNAME)-$(VERSION)-$(RELEASE).. ---"
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 0ffba71c..044e7275 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -196,9 +196,9 @@ Requires: dracut
%build
%if 0%{?rhel} == 7
-cp -a leapp*deps-el8*rpm repos/system_upgrade/el7toel8/files/bundled-rpms/
+cp -a leapp*deps*el8.noarch.rpm repos/system_upgrade/el7toel8/files/bundled-rpms/
%else
-cp -a leapp*deps-el9*rpm repos/system_upgrade/el8toel9/files/bundled-rpms/
+cp -a leapp*deps*el9.noarch.rpm repos/system_upgrade/el8toel9/files/bundled-rpms/
%endif
diff --git a/packaging/leapp-el7toel8-deps.spec b/packaging/other_specs/leapp-el7toel8-deps.spec
similarity index 100%
rename from packaging/leapp-el7toel8-deps.spec
rename to packaging/other_specs/leapp-el7toel8-deps.spec
--
2.39.0

View File

@ -0,0 +1,62 @@
From 79320da3de243dc19ee934974fc197f4bb3b6403 Mon Sep 17 00:00:00 2001
From: Evgeni Golov <evgeni@golov.de>
Date: Thu, 1 Dec 2022 09:54:21 +0100
Subject: [PATCH 49/63] ensure Satellite metapackages are installed after
upgrade
When upgrading from EL7 to EL8, we theoretically can run into the
situation where the `satellite` and `satellite-capsule` metapackages are
removed during the upgrade due to dependency problems. While we are not
aware of any actual occurences of this problem today, let's play safe
and explicitly add those packages to the `to_install` set.
---
.../actors/satellite_upgrade_facts/actor.py | 2 ++
.../tests/unit_test_satellite_upgrade_facts.py | 16 ++++++++++++++++
2 files changed, 18 insertions(+)
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py
index 2bbceb5d..01e63465 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/actor.py
@@ -134,9 +134,11 @@ class SatelliteUpgradeFacts(Actor):
if has_package(InstalledRPM, 'satellite'):
repositories_to_enable.append('satellite-6.11-for-rhel-8-x86_64-rpms')
modules_to_enable.append(Module(name='satellite', stream='el8'))
+ to_install.append('satellite')
elif has_package(InstalledRPM, 'satellite-capsule'):
repositories_to_enable.append('satellite-capsule-6.11-for-rhel-8-x86_64-rpms')
modules_to_enable.append(Module(name='satellite-capsule', stream='el8'))
+ to_install.append('satellite-capsule')
self.produce(RpmTransactionTasks(
to_remove=to_remove,
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py
index 5d338aa1..2fb8a3ba 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_facts/tests/unit_test_satellite_upgrade_facts.py
@@ -102,6 +102,22 @@ def test_enables_satellite_capsule_module(current_actor_context):
assert Module(name='satellite', stream='el8') not in message.modules_to_enable
+def test_installs_satellite_package(current_actor_context):
+ current_actor_context.feed(InstalledRPM(items=[FOREMAN_RPM, SATELLITE_RPM]))
+ current_actor_context.run(config_model=mock_configs.CONFIG)
+ message = current_actor_context.consume(RpmTransactionTasks)[0]
+ assert 'satellite' in message.to_install
+ assert 'satellite-capsule' not in message.to_install
+
+
+def test_installs_satellite_capsule_package(current_actor_context):
+ current_actor_context.feed(InstalledRPM(items=[FOREMAN_PROXY_RPM, SATELLITE_CAPSULE_RPM]))
+ current_actor_context.run(config_model=mock_configs.CONFIG)
+ message = current_actor_context.consume(RpmTransactionTasks)[0]
+ assert 'satellite-capsule' in message.to_install
+ assert 'satellite' not in message.to_install
+
+
def test_detects_local_postgresql(monkeypatch, current_actor_context):
def mock_stat():
orig_stat = os.stat
--
2.39.0

View File

@ -0,0 +1,42 @@
From 9230696fc7c601997e44f1012e859f632ef168ba Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Wed, 30 Nov 2022 12:03:49 +0100
Subject: [PATCH 50/63] Makefile: filter out removed files for linting
Originally actions with `isort` crashed when files have been removed,
especially in case of the `lint_fix` target. This causes crash of CI
when the PR contains e.g. just removal of files without additional
changes. So, filter out removed files from the list of files to be
checked by linters.
---
Makefile | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/Makefile b/Makefile
index b1489e4f..066a5fd2 100644
--- a/Makefile
+++ b/Makefile
@@ -331,9 +331,9 @@ lint:
echo "--- Linting done. ---"; \
fi
- if [[ "`git rev-parse --abbrev-ref HEAD`" != "master" ]] && [[ -n "`git diff $(MASTER_BRANCH) --name-only`" ]]; then \
+ if [[ "`git rev-parse --abbrev-ref HEAD`" != "$(MASTER_BRANCH)" ]] && [[ -n "`git diff $(MASTER_BRANCH) --name-only --diff-filter AMR`" ]]; then \
. $(VENVNAME)/bin/activate; \
- git diff $(MASTER_BRANCH) --name-only | xargs isort -c --diff || \
+ git diff $(MASTER_BRANCH) --name-only --diff-filter AMR | xargs isort -c --diff || \
{ \
echo; \
echo "------------------------------------------------------------------------------"; \
@@ -345,7 +345,7 @@ lint:
lint_fix:
. $(VENVNAME)/bin/activate; \
- git diff $(MASTER_BRANCH) --name-only | xargs isort && \
+ git diff $(MASTER_BRANCH) --name-only --diff-filter AMR | xargs isort && \
echo "--- isort inplace fixing done. ---;"
test_no_lint:
--
2.39.0

View File

@ -0,0 +1,116 @@
From 08756574378232de12ebbdf15801c52bc0090ce6 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <xstodu05@gmail.com>
Date: Fri, 25 Nov 2022 11:19:14 +0100
Subject: [PATCH 51/63] Enable upgrades on s390x when /boot is part of rootfs
Regarding the fix provided in commit a6445b39 we do not need to
inhibit the IPU on the s390x architecture when /boot is not separated
on its own partition, but it's part of the rootfs.
---
.../actors/checknonmountboots390/actor.py | 21 -------------
.../libraries/checknonmountboots390.py | 27 -----------------
.../tests/test_checknonmountboots390.py | 30 -------------------
3 files changed, 78 deletions(-)
delete mode 100644 repos/system_upgrade/common/actors/checknonmountboots390/actor.py
delete mode 100644 repos/system_upgrade/common/actors/checknonmountboots390/libraries/checknonmountboots390.py
delete mode 100644 repos/system_upgrade/common/actors/checknonmountboots390/tests/test_checknonmountboots390.py
diff --git a/repos/system_upgrade/common/actors/checknonmountboots390/actor.py b/repos/system_upgrade/common/actors/checknonmountboots390/actor.py
deleted file mode 100644
index 82dcf30f..00000000
--- a/repos/system_upgrade/common/actors/checknonmountboots390/actor.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from leapp.actors import Actor
-from leapp.libraries.actor import checknonmountboots390
-from leapp.models import Report
-from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
-
-
-class CheckNonMountBootS390(Actor):
- """
- Inhibits on s390 when /boot is NOT on a separate partition.
-
- Due to some problems, if /boot is not on a separate partition, leapp is deleting the content of /boot.
- To avoid this from happening, we are inhibiting the upgrade process until this problem has been solved.
- """
-
- name = 'check_non_mount_boot_s390'
- consumes = ()
- produces = (Report,)
- tags = (ChecksPhaseTag, IPUWorkflowTag)
-
- def process(self):
- checknonmountboots390.perform_check()
diff --git a/repos/system_upgrade/common/actors/checknonmountboots390/libraries/checknonmountboots390.py b/repos/system_upgrade/common/actors/checknonmountboots390/libraries/checknonmountboots390.py
deleted file mode 100644
index bd165603..00000000
--- a/repos/system_upgrade/common/actors/checknonmountboots390/libraries/checknonmountboots390.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os
-
-from leapp import reporting
-from leapp.libraries.common.config import architecture
-
-
-def perform_check():
- if not architecture.matches_architecture(architecture.ARCH_S390X):
- return
-
- if os.path.ismount('/boot'):
- return
-
- data = [
- reporting.Title('Leapp detected known issue related to /boot on s390x architecture'),
- reporting.Summary((
- 'Due to a bug in the Leapp code, there is a situation when the upgrade process'
- ' removes content of /boot when the directory is not on a separate partition and'
- ' the system is running on S390x architecture. To avoid this from happening, we'
- ' are inhibiting the upgrade process in this release until the issue has been fixed.'
- )),
- reporting.Groups([reporting.Groups.INHIBITOR]),
- reporting.Groups([reporting.Groups.FILESYSTEM, reporting.Groups.UPGRADE_PROCESS, reporting.Groups.BOOT]),
- reporting.Severity(reporting.Severity.HIGH),
- ]
-
- reporting.create_report(data)
diff --git a/repos/system_upgrade/common/actors/checknonmountboots390/tests/test_checknonmountboots390.py b/repos/system_upgrade/common/actors/checknonmountboots390/tests/test_checknonmountboots390.py
deleted file mode 100644
index e6d7ae1d..00000000
--- a/repos/system_upgrade/common/actors/checknonmountboots390/tests/test_checknonmountboots390.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import pytest
-
-from leapp.libraries.actor import checknonmountboots390
-
-
-class CheckNonMountBootS390ReportCreated(Exception):
- pass
-
-
-@pytest.mark.parametrize(
- 'matches_arch,ismount,should_report', (
- (True, True, False),
- (True, False, True),
- (False, True, False),
- (False, False, False),
- )
-)
-def test_checknonmountboots390_perform_check(monkeypatch, matches_arch, ismount, should_report):
- def _create_report(data):
- raise CheckNonMountBootS390ReportCreated()
-
- monkeypatch.setattr(checknonmountboots390.architecture, 'matches_architecture', lambda x: matches_arch)
- monkeypatch.setattr(checknonmountboots390.os.path, 'ismount', lambda x: ismount)
- monkeypatch.setattr(checknonmountboots390.reporting, 'create_report', _create_report)
-
- if should_report:
- with pytest.raises(CheckNonMountBootS390ReportCreated):
- checknonmountboots390.perform_check()
- else:
- checknonmountboots390.perform_check()
--
2.39.0

View File

@ -0,0 +1,73 @@
From aadf694f946ca4821fe2d9aa47eea67dcb270af9 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Wed, 7 Dec 2022 21:50:48 +0100
Subject: [PATCH 52/63] Add leapp debug tools to initramfs
Install a script with debug utilities to the Leapp upgrade dracut
module.
---
.../dracut/90sys-upgrade/leapp_debug_tools.sh | 38 +++++++++++++++++++
.../dracut/90sys-upgrade/module-setup.sh | 2 +
2 files changed, 40 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
new file mode 100644
index 00000000..91c228ce
--- /dev/null
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
@@ -0,0 +1,38 @@
+# library containing some useful functions for debugging in initramfs
+
+# mounts the sysroot
+leapp_dbg_mount() {
+ systemctl start sysroot.mount
+ mount -o remount,rw /sysroot
+}
+
+# source programs from $NEWROOT, mount if not mounted
+leapp_dbg_source() {
+ systemctl is-active sysroot.mount --quiet || {
+ echo "sysroot not mounted, mounting...";
+ leapp_dbg_mount || return 1
+ }
+
+ for dir in /bin /sbin; do
+ export PATH="$PATH:${NEWROOT}$dir"
+ done
+
+ export LD_LIBRARY_PATH=/sysroot/lib64
+}
+
+# chroot into $NEWROOT
+leapp_dbg_chroot() {
+ systemctl is-active sysroot.mount --quiet || {
+ echo "sysroot not mounted, mounting...";
+ leapp_dbg_mount || return 1
+ }
+
+ for dir in /sys /run /proc /dev /dev/pts; do
+ mount --bind $dir "$NEWROOT$dir"
+ done || { echo "Failed to mount some directories" || return 1 }
+
+ chroot $NEWROOT sh -c "mount -a; /bin/bash"
+ for dir in /sys /run /proc /dev/pts /dev; do
+ umount $NEWROOT$dir
+ done
+}
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
index d38617db..a9cfffb4 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
@@ -72,6 +72,8 @@ install() {
inst_script "${_moddir}/initrd-system-upgrade-generator" \
"${generatordir}/initrd-system-upgrade-generator"
+ inst_script "${_moddir}/leapp_debug_tools.sh" "/bin/leapp_debug_tools.sh"
+
## upgrade shell service
#sysinit_wantsdir="${_initdir}${unitdir}/sysinit.target.wants"
#mkdir -p "$sysinit_wantsdir"
--
2.39.0

View File

@ -0,0 +1,80 @@
From 0ecb880774a2a74350d055afe7773ae0c31aaab9 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Tue, 20 Dec 2022 12:25:48 +0100
Subject: [PATCH 53/63] Add autosourcing
---
.../files/dracut/90sys-upgrade/.profile | 9 +++++++++
.../files/dracut/90sys-upgrade/.shrc | 4 ++++
.../files/dracut/90sys-upgrade/leapp_debug_tools.sh | 9 ++++++---
.../files/dracut/90sys-upgrade/module-setup.sh | 2 ++
4 files changed, 21 insertions(+), 3 deletions(-)
create mode 100644 repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.profile
create mode 100644 repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.shrc
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.profile b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.profile
new file mode 100644
index 00000000..c4fe05a7
--- /dev/null
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.profile
@@ -0,0 +1,9 @@
+#!/bin/sh
+# script read at startup by login shells
+# in the initramfs this is read for example by the emergency shell
+
+# set the environment file, containing shell commands to execute at startup of
+# interactive shells
+if [ -f "$HOME/.shrc" ]; then
+ ENV="$HOME/.shrc"; export ENV
+fi
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.shrc b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.shrc
new file mode 100644
index 00000000..5e965f47
--- /dev/null
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/.shrc
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+# shell commands to execute on interactive shell startup
+. leapp_debug_tools.sh
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
index 91c228ce..5878b75b 100644
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/leapp_debug_tools.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
# library containing some useful functions for debugging in initramfs
# mounts the sysroot
@@ -29,10 +30,12 @@ leapp_dbg_chroot() {
for dir in /sys /run /proc /dev /dev/pts; do
mount --bind $dir "$NEWROOT$dir"
- done || { echo "Failed to mount some directories" || return 1 }
+ done || {
+ echo "Failed to mount some directories" || return 1
+ }
- chroot $NEWROOT sh -c "mount -a; /bin/bash"
+ chroot "$NEWROOT" sh -c "mount -a; /bin/bash"
for dir in /sys /run /proc /dev/pts /dev; do
- umount $NEWROOT$dir
+ umount "$NEWROOT$dir"
done
}
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
index a9cfffb4..06479fb5 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/90sys-upgrade/module-setup.sh
@@ -73,6 +73,8 @@ install() {
"${generatordir}/initrd-system-upgrade-generator"
inst_script "${_moddir}/leapp_debug_tools.sh" "/bin/leapp_debug_tools.sh"
+ inst_script "${_moddir}/.profile" "/.profile"
+ inst_script "${_moddir}/.shrc" "/.shrc"
## upgrade shell service
#sysinit_wantsdir="${_initdir}${unitdir}/sysinit.target.wants"
--
2.39.0

View File

@ -0,0 +1,26 @@
From c29095d6d334dc57c7eff79c2726ec332098d6e1 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Mon, 2 Jan 2023 13:29:56 +0100
Subject: [PATCH 54/63] Replace tabs with spaces in the dracut module
---
.../files/dracut/85sys-upgrade-redhat/do-upgrade.sh | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
index 49c26bc8..0763d5b3 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
@@ -196,8 +196,7 @@ ibdmp() {
bring_up_network() {
if [ -f /etc/leapp-initram-network-manager ]; then
- # NOTE(ivasilev) Reverting the change to see if it caused the crash
- . /lib/dracut/hooks/cmdline/99-nm-config.sh
+ . /lib/dracut/hooks/cmdline/99-nm-config.sh
. /lib/dracut/hooks/initqueue/settled/99-nm-run.sh
fi
if [ -f /etc/leapp-initram-network-scripts ]; then
--
2.39.0

View File

@ -0,0 +1,53 @@
From 072cf0bbfcff8223f1b75fa05c621692d64a0af2 Mon Sep 17 00:00:00 2001
From: Jan Macku <jamacku@redhat.com>
Date: Fri, 26 Aug 2022 10:26:23 +0200
Subject: [PATCH 55/63] ci(lint): Add differential-shellcheck GitHub action
It performs differential ShellCheck scans and report results directly in
pull request.
documentation:
https://github.com/redhat-plumbers-in-action/differential-shellcheck
---
.github/workflows/differential-shellcheck.yml | 29 +++++++++++++++++++
1 file changed, 29 insertions(+)
create mode 100644 .github/workflows/differential-shellcheck.yml
diff --git a/.github/workflows/differential-shellcheck.yml b/.github/workflows/differential-shellcheck.yml
new file mode 100644
index 00000000..4af99f8d
--- /dev/null
+++ b/.github/workflows/differential-shellcheck.yml
@@ -0,0 +1,29 @@
+---
+# https://github.com/redhat-plumbers-in-action/differential-shellcheck#readme
+
+name: Differential ShellCheck
+on:
+ pull_request:
+ branches: [master]
+
+permissions:
+ contents: read
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+
+ permissions:
+ security-events: write
+ pull-requests: write
+
+ steps:
+ - name: Repository checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Differential ShellCheck
+ uses: redhat-plumbers-in-action/differential-shellcheck@v3
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
--
2.39.0

View File

@ -0,0 +1,28 @@
From 1859d1811d6331eda8c9684fac47b12ca2e796ae Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Thu, 3 Nov 2022 13:53:34 +0100
Subject: [PATCH 56/63] Propagate TEST_PATHS to test_container targets
Allows for quick containerized runs of specific checks.
---
Makefile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Makefile b/Makefile
index 066a5fd2..d2201fcf 100644
--- a/Makefile
+++ b/Makefile
@@ -383,8 +383,8 @@ _test_container_ipu:
echo "Only supported TEST_CONT_IPUs are el7toel8, el8toel9"; exit 1; \
;; \
esac && \
- $(_CONTAINER_TOOL) exec -w /repocopy $$_CONT_NAME make clean && \
- $(_CONTAINER_TOOL) exec -w /repocopy -e REPOSITORIES $$_CONT_NAME make $${_TEST_CONT_TARGET:-test}
+ $(_CONTAINER_TOOL) exec -w /repocopy $$_CONT_NAME $(MAKE) clean && \
+ $(_CONTAINER_TOOL) exec -w /repocopy -e REPOSITORIES $$_CONT_NAME $(MAKE) $${_TEST_CONT_TARGET:-test} TEST_PATHS="$(TEST_PATHS)"
# Runs tests in a container
# Builds testing image first if it doesn't exist
--
2.39.0

View File

@ -0,0 +1,182 @@
From 6ada6553eadc08fbbaf69d54129e6d3cc0c214e3 Mon Sep 17 00:00:00 2001
From: PeterMocary <petermocary@gmail.com>
Date: Fri, 26 Aug 2022 15:44:50 +0200
Subject: [PATCH 57/63] Ignore external accounts in /etc/passwd
The /etc/passwd can contain special entries to selectively incorporate entries
from another service source such as NIS or LDAP. These entries don't need to
contain all the fields that are normally present in the /etc/passwd entry and
would cause the upgrade failure in facts phase.
---
.../systemfacts/libraries/systemfacts.py | 48 ++++++++---
.../systemfacts/tests/test_systemfacts.py | 85 ++++++++++++++++++-
2 files changed, 121 insertions(+), 12 deletions(-)
diff --git a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
index e34cb86b..d1eeb28c 100644
--- a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
+++ b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
@@ -60,13 +60,26 @@ def anyhasprefix(value, prefixes):
@aslist
def _get_system_users():
+ skipped_user_names = []
for p in pwd.getpwall():
- yield User(
- name=p.pw_name,
- uid=p.pw_uid,
- gid=p.pw_gid,
- home=p.pw_dir
- )
+ # The /etc/passwd can contain special entries from another service source such as NIS or LDAP. These entries
+ # start with + or - sign and might not contain all the mandatory fields, thus are skipped along with other
+ # invalid entries for now. The UID and GID fields are always defined by pwd to 0 even when not specifiead in
+ # /etc/passwd.
+ if p.pw_name != '' and not p.pw_name.startswith(('+', '-')) and p.pw_dir:
+ yield User(
+ name=p.pw_name,
+ uid=p.pw_uid,
+ gid=p.pw_gid,
+ home=p.pw_dir
+ )
+ else:
+ skipped_user_names.append(p.pw_name)
+
+ if skipped_user_names:
+ api.current_logger().debug("These users from /etc/passwd that are special entries for service "
+ "like NIS, or don't contain all mandatory fields won't be included "
+ "in UsersFacts: {}".format(skipped_user_names))
def get_system_users_status():
@@ -76,12 +89,25 @@ def get_system_users_status():
@aslist
def _get_system_groups():
+ skipped_group_names = []
for g in grp.getgrall():
- yield Group(
- name=g.gr_name,
- gid=g.gr_gid,
- members=g.gr_mem
- )
+ # The /etc/group can contain special entries from another service source such as NIS or LDAP. These entries
+ # start with + or - sign and might not contain all the mandatory fields, thus are skipped along with other
+ # invalid entries for now. The GID field is always defined by pwd to 0 even when not specifiead in
+ # /etc/group.
+ if g.gr_name != '' and not g.gr_name.startswith(('+', '-')):
+ yield Group(
+ name=g.gr_name,
+ gid=g.gr_gid,
+ members=g.gr_mem
+ )
+ else:
+ skipped_group_names.append(g.gr_name)
+
+ if skipped_group_names:
+ api.current_logger().debug("These groups from /etc/group that are special entries for service "
+ "like NIS, or don't contain all mandatory fields won't be included "
+ "in GroupsFacts: {}".format(skipped_group_names))
def get_system_groups_status():
diff --git a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
index f94003d5..badf174c 100644
--- a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
+++ b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
@@ -1,4 +1,11 @@
-from leapp.libraries.actor.systemfacts import anyendswith, anyhasprefix, aslist
+import grp
+import pwd
+
+import pytest
+
+from leapp.libraries.actor.systemfacts import _get_system_groups, _get_system_users, anyendswith, anyhasprefix, aslist
+from leapp.libraries.common.testutils import logger_mocked
+from leapp.libraries.stdlib import api
from leapp.snactor.fixture import current_actor_libraries
@@ -33,3 +40,79 @@ def test_aslist(current_actor_libraries):
r = local()
assert isinstance(r, list) and r[0] and r[2] and not r[1]
+
+
+@pytest.mark.parametrize(
+ ('etc_passwd_names', 'etc_passwd_directory', 'skipped_user_names'),
+ [
+ (['root', 'unbound', 'dbus'], '/', []),
+ (['root', '+@scanners', 'dbus', '-@usrc', ''], '/', ['+@scanners', '-@usrc', '']),
+ (['root', '+@scanners', 'dbus'], '', ['root', '+@scanners', 'dbus']),
+ ]
+)
+def test_get_system_users(monkeypatch, etc_passwd_names, etc_passwd_directory, skipped_user_names):
+
+ class MockedPwdEntry(object):
+ def __init__(self, pw_name, pw_uid, pw_gid, pw_dir):
+ self.pw_name = pw_name
+ self.pw_uid = pw_uid
+ self.pw_gid = pw_gid
+ self.pw_dir = pw_dir
+
+ etc_passwd_contents = []
+ for etc_passwd_name in etc_passwd_names:
+ etc_passwd_contents.append(MockedPwdEntry(etc_passwd_name, 0, 0, etc_passwd_directory))
+
+ monkeypatch.setattr(pwd, 'getpwall', lambda: etc_passwd_contents)
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ _get_system_users()
+
+ if skipped_user_names:
+ assert len(api.current_logger().dbgmsg) == 1
+
+ for skipped_user_name in skipped_user_names:
+ assert skipped_user_name in api.current_logger().dbgmsg[0]
+
+ for user_name in etc_passwd_names:
+ if user_name not in skipped_user_names:
+ assert user_name not in api.current_logger().dbgmsg[0]
+ else:
+ assert not api.current_logger().dbgmsg
+
+
+@pytest.mark.parametrize(
+ ('etc_group_names', 'skipped_group_names'),
+ [
+ (['cdrom', 'floppy', 'tape'], []),
+ (['cdrom', '+@scanners', 'floppy', '-@usrc', ''], ['+@scanners', '-@usrc', '']),
+ ]
+)
+def test_get_system_groups(monkeypatch, etc_group_names, skipped_group_names):
+
+ class MockedGrpEntry(object):
+ def __init__(self, gr_name, gr_gid, gr_mem):
+ self.gr_name = gr_name
+ self.gr_gid = gr_gid
+ self.gr_mem = gr_mem
+
+ etc_group_contents = []
+ for etc_group_name in etc_group_names:
+ etc_group_contents.append(MockedGrpEntry(etc_group_name, 0, []))
+
+ monkeypatch.setattr(grp, 'getgrall', lambda: etc_group_contents)
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ _get_system_groups()
+
+ if skipped_group_names:
+ assert len(api.current_logger().dbgmsg) == 1
+
+ for skipped_group_name in skipped_group_names:
+ assert skipped_group_name in api.current_logger().dbgmsg[0]
+
+ for group_name in etc_group_names:
+ if group_name not in skipped_group_names:
+ assert group_name not in api.current_logger().dbgmsg[0]
+ else:
+ assert not api.current_logger().dbgmsg
--
2.39.0

View File

@ -0,0 +1,111 @@
From 94bfc3e8a4fbe5923b59b828da65ae91babdcb56 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Mon, 16 Jan 2023 16:01:05 +0100
Subject: [PATCH 58/63] pes_events_scanner: prefilter problematic events and
add back logging
jira: OAMG-8182, OAMG-8221
fixes: bz#2158527
---
.../libraries/pes_events_scanner.py | 60 ++++++++++++++++++-
1 file changed, 59 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
index 96b63280..c254f4c0 100644
--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
+++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
@@ -1,4 +1,4 @@
-from collections import namedtuple
+from collections import defaultdict, namedtuple
from functools import partial
from leapp import reporting
@@ -126,6 +126,7 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs,
release,
seen_pkgs,
pkgs_to_demodularize):
+ logger = api.current_logger()
# Start with the installed packages and modify the set according to release events
target_pkgs = set(source_installed_pkgs)
@@ -154,6 +155,12 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs,
# For MERGE to be relevant it is sufficient for only one of its in_pkgs to be installed
if are_all_in_pkgs_present or (event.action == Action.MERGED and is_any_in_pkg_present):
+ removed_pkgs = target_pkgs.intersection(event.in_pkgs)
+ removed_pkgs_str = ', '.join(str(pkg) for pkg in removed_pkgs) if removed_pkgs else '[]'
+ added_pkgs_str = ', '.join(str(pkg) for pkg in event.out_pkgs) if event.out_pkgs else '[]'
+ logger.debug('Applying event %d (%s): replacing packages %s with %s',
+ event.id, event.action, removed_pkgs_str, added_pkgs_str)
+
# In pkgs are present, event can be applied
target_pkgs = target_pkgs.difference(event.in_pkgs)
target_pkgs = target_pkgs.union(event.out_pkgs)
@@ -163,6 +170,55 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs,
return (target_pkgs, pkgs_to_demodularize)
+def remove_undesired_events(events, relevant_to_releases):
+ """
+ Conservatively remove events that needless, or cause problems for the current implementation:
+ - (needless) events with to_release not in relevant releases
+ - (problematic) events with the same from_release and the same in_pkgs
+ """
+
+ logger = api.current_logger()
+ relevant_to_releases = set(relevant_to_releases)
+
+ events_with_same_in_pkgs_and_from_release = defaultdict(list)
+ for event in events:
+ if event.to_release in relevant_to_releases:
+ # NOTE(mhecko): The tuple(sorted(event.in_pkgs))) is ugly, however, the removal of the events with the same
+ # # from_release and in_pkgs is needed only because the current implementation is flawed.
+ # # I would love to rewrite the core algorithm as a "solution to graph reachability problem",
+ # # making the behaviour of PES event scanner purely data driven.
+ events_with_same_in_pkgs_and_from_release[(event.from_release, tuple(sorted(event.in_pkgs)))].append(event)
+
+ cleaned_events = []
+ for from_release_in_pkgs_pair, problematic_events in events_with_same_in_pkgs_and_from_release.items():
+ if len(problematic_events) == 1:
+ cleaned_events.append(problematic_events[0]) # There is no problem
+ continue
+
+ # E.g., one of the problematic events is to=8.6, other one to=8.7, keep only 8.7
+ from_release, dummy_in_pkgs = from_release_in_pkgs_pair
+ max_to_release = max((e.to_release for e in problematic_events))
+ events_with_max_to_release = [event for event in problematic_events if event.to_release == max_to_release]
+
+ if len(events_with_max_to_release) == 1:
+ # If there is a single event with maximal to_release, keep only that
+ kept_event = events_with_max_to_release[0]
+ event_ids = [event.id for event in problematic_events]
+ logger.debug('Events %s have the same in packages and the same from_release %s, keeping %d',
+ event_ids, from_release, kept_event.id)
+ cleaned_events.append(kept_event)
+ continue
+
+ # There are at least 2 events A, B with the same in_release, out_release and in_pkgs. If A is REMOVE and B
+ # performs some conditional mutation (e.g. SPLIT) a race-conflict arises. However, the current
+ # implementation would apply these events as `A(input_state) union B(input_state)`, where the input_state
+ # is kept immutable. Therefore, B will have an effect regardless of whether A is REMOVAL or not.
+ for event in problematic_events:
+ cleaned_events.append(event)
+
+ return cleaned_events
+
+
def compute_packages_on_target_system(source_pkgs, events, releases):
seen_pkgs = set(source_pkgs) # Used to track whether PRESENCE events can be applied
@@ -428,6 +484,8 @@ def process():
# packages of the target system, so we can distinguish what needs to be repomapped
repoids_of_source_pkgs = {pkg.repository for pkg in source_pkgs}
+ events = remove_undesired_events(events, releases)
+
# Apply events - compute what packages should the target system have
target_pkgs, pkgs_to_demodularize = compute_packages_on_target_system(source_pkgs, events, releases)
--
2.39.0

View File

@ -0,0 +1,188 @@
From bab105d15a0f848e341cd1b4ade4e4e7b3ab38aa Mon Sep 17 00:00:00 2001
From: mreznik <mreznik@redhat.com>
Date: Fri, 25 Nov 2022 09:53:53 +0100
Subject: [PATCH 59/63] Enable disabling dnf plugins in the dnfcnfig library
When on AWS, we need to disable the "amazon-id" plugin during the
upgrade stage as we do not have network up and running there yet.
Moreover, even with the network up, we do already have all the data
cached so further communication with its backend could invalidate
the data.
---
.../common/libraries/dnfconfig.py | 26 +++++++++----
.../common/libraries/dnfplugin.py | 38 ++++++++++++++++---
2 files changed, 51 insertions(+), 13 deletions(-)
diff --git a/repos/system_upgrade/common/libraries/dnfconfig.py b/repos/system_upgrade/common/libraries/dnfconfig.py
index 64d6c204..5b8180f0 100644
--- a/repos/system_upgrade/common/libraries/dnfconfig.py
+++ b/repos/system_upgrade/common/libraries/dnfconfig.py
@@ -30,15 +30,21 @@ def _strip_split(data, sep, maxsplit=-1):
return [item.strip() for item in data.split(sep, maxsplit)]
-def _get_main_dump(context):
+def _get_main_dump(context, disable_plugins):
"""
Return the dnf configuration dump of main options for the given context.
Returns the list of lines after the line with "[main]" section
"""
+ cmd = ['dnf', 'config-manager', '--dump']
+
+ if disable_plugins:
+ for plugin in disable_plugins:
+ cmd += ['--disableplugin', plugin]
+
try:
- data = context.call(['dnf', 'config-manager', '--dump'], split=True)['stdout']
+ data = context.call(cmd, split=True)['stdout']
except CalledProcessError as e:
api.current_logger().error('Cannot obtain the dnf configuration')
raise StopActorExecutionError(
@@ -73,18 +79,18 @@ def _get_main_dump(context):
return output_data
-def _get_excluded_pkgs(context):
+def _get_excluded_pkgs(context, disable_plugins):
"""
Return the list of excluded packages for DNF in the given context.
It shouldn't be used on the source system. It is expected this functions
is called only in the target userspace container or on the target system.
"""
- pkgs = _strip_split(_get_main_dump(context).get('exclude', ''), ',')
+ pkgs = _strip_split(_get_main_dump(context, disable_plugins).get('exclude', ''), ',')
return [i for i in pkgs if i]
-def _set_excluded_pkgs(context, pkglist):
+def _set_excluded_pkgs(context, pkglist, disable_plugins):
"""
Configure DNF to exclude packages in the given list
@@ -93,6 +99,10 @@ def _set_excluded_pkgs(context, pkglist):
exclude = 'exclude={}'.format(','.join(pkglist))
cmd = ['dnf', 'config-manager', '--save', '--setopt', exclude]
+ if disable_plugins:
+ for plugin in disable_plugins:
+ cmd += ['--disableplugin', plugin]
+
try:
context.call(cmd)
except CalledProcessError:
@@ -101,7 +111,7 @@ def _set_excluded_pkgs(context, pkglist):
api.current_logger().debug('The DNF configuration has been updated to exclude leapp packages.')
-def exclude_leapp_rpms(context):
+def exclude_leapp_rpms(context, disable_plugins):
"""
Ensure the leapp RPMs are excluded from any DNF transaction.
@@ -112,5 +122,5 @@ def exclude_leapp_rpms(context):
So user will have to drop these packages from the exclude after the
upgrade.
"""
- to_exclude = list(set(_get_excluded_pkgs(context) + get_leapp_packages()))
- _set_excluded_pkgs(context, to_exclude)
+ to_exclude = list(set(_get_excluded_pkgs(context, disable_plugins) + get_leapp_packages()))
+ _set_excluded_pkgs(context, to_exclude, disable_plugins)
diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py
index 7a15abc4..7f541c18 100644
--- a/repos/system_upgrade/common/libraries/dnfplugin.py
+++ b/repos/system_upgrade/common/libraries/dnfplugin.py
@@ -299,6 +299,8 @@ def perform_transaction_install(target_userspace_info, storage_info, used_repos,
Performs the actual installation with the DNF rhel-upgrade plugin using the target userspace
"""
+ stage = 'upgrade'
+
# These bind mounts are performed by systemd-nspawn --bind parameters
bind_mounts = [
'/:/installroot',
@@ -337,22 +339,28 @@ def perform_transaction_install(target_userspace_info, storage_info, used_repos,
# communicate with udev
cmd_prefix = ['nsenter', '--ipc=/installroot/proc/1/ns/ipc']
+ disable_plugins = []
+ if plugin_info:
+ for info in plugin_info:
+ if stage in info.disable_in:
+ disable_plugins += [info.name]
+
# we have to ensure the leapp packages will stay untouched
# Note: this is the most probably duplicate action - it should be already
# set like that, however seatbelt is a good thing.
- dnfconfig.exclude_leapp_rpms(context)
+ dnfconfig.exclude_leapp_rpms(context, disable_plugins)
if get_target_major_version() == '9':
_rebuild_rpm_db(context, root='/installroot')
_transaction(
- context=context, stage='upgrade', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
+ context=context, stage=stage, target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
cmd_prefix=cmd_prefix
)
# we have to ensure the leapp packages will stay untouched even after the
# upgrade is fully finished (it cannot be done before the upgrade
# on the host as the config-manager plugin is available since rhel-8)
- dnfconfig.exclude_leapp_rpms(mounting.NotIsolatedActions(base_dir='/'))
+ dnfconfig.exclude_leapp_rpms(mounting.NotIsolatedActions(base_dir='/'), disable_plugins=disable_plugins)
@contextlib.contextmanager
@@ -377,10 +385,20 @@ def perform_transaction_check(target_userspace_info,
"""
Perform DNF transaction check using our plugin
"""
+
+ stage = 'check'
+
with _prepare_perform(used_repos=used_repos, target_userspace_info=target_userspace_info, xfs_info=xfs_info,
storage_info=storage_info, target_iso=target_iso) as (context, overlay, target_repoids):
apply_workarounds(overlay.nspawn())
- dnfconfig.exclude_leapp_rpms(context)
+
+ disable_plugins = []
+ if plugin_info:
+ for info in plugin_info:
+ if stage in info.disable_in:
+ disable_plugins += [info.name]
+
+ dnfconfig.exclude_leapp_rpms(context, disable_plugins)
_transaction(
context=context, stage='check', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks
)
@@ -397,13 +415,23 @@ def perform_rpm_download(target_userspace_info,
"""
Perform RPM download including the transaction test using dnf with our plugin
"""
+
+ stage = 'download'
+
with _prepare_perform(used_repos=used_repos,
target_userspace_info=target_userspace_info,
xfs_info=xfs_info,
storage_info=storage_info,
target_iso=target_iso) as (context, overlay, target_repoids):
+
+ disable_plugins = []
+ if plugin_info:
+ for info in plugin_info:
+ if stage in info.disable_in:
+ disable_plugins += [info.name]
+
apply_workarounds(overlay.nspawn())
- dnfconfig.exclude_leapp_rpms(context)
+ dnfconfig.exclude_leapp_rpms(context, disable_plugins)
_transaction(
context=context, stage='download', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
test=True, on_aws=on_aws
--
2.39.0

View File

@ -0,0 +1,164 @@
From f5a3d626cf97c193ab1523401827c2a4c89310ea Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Matej=20Matu=C5=A1ka?= <mmatuska@redhat.com>
Date: Fri, 20 Jan 2023 14:03:59 +0100
Subject: [PATCH 60/63] Prevent failed upgrade from restarting in initramfs
(#996)
* Prevent failed upgrade from restarting in initramfs
When the upgrade fails in the initramfs the dracut shell is entered.
Upon exiting the dracut shell, the upgrade.target is restarted which
causes the upgrade.service, which runs the leapp upgrade, to rerun as
well.
This commit fixes that by creating a "flag" file when the upgrade
fails, whose existence is checked before reruning the upgrade and the
upgrade is prevented in such case.
Also, a new removeupgradeartifacts actor is introduced to clean up leftover upgrade artifacts, including the upgrade failed flag file, at the beginning of the upgrade process.
Jira ref.: OAMG-4224
---
.../dracut/85sys-upgrade-redhat/do-upgrade.sh | 20 +++++++++++++
.../actors/removeupgradeartifacts/actor.py | 23 +++++++++++++++
.../libraries/removeupgradeartifacts.py | 17 +++++++++++
.../tests/test_removeupgradeartifacts.py | 28 +++++++++++++++++++
4 files changed, 88 insertions(+)
create mode 100644 repos/system_upgrade/common/actors/removeupgradeartifacts/actor.py
create mode 100644 repos/system_upgrade/common/actors/removeupgradeartifacts/libraries/removeupgradeartifacts.py
create mode 100644 repos/system_upgrade/common/actors/removeupgradeartifacts/tests/test_removeupgradeartifacts.py
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
index 0763d5b3..04540c1d 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
@@ -46,6 +46,8 @@ fi
export NSPAWN_OPTS="$NSPAWN_OPTS --keep-unit --register=no --timezone=off --resolv-conf=off"
+export LEAPP_FAILED_FLAG_FILE="/root/tmp_leapp_py3/.leapp_upgrade_failed"
+
#
# Temp for collecting and preparing tarball
#
@@ -268,6 +270,15 @@ do_upgrade() {
rv=$?
fi
+ if [ "$rv" -ne 0 ]; then
+ # set the upgrade failed flag to prevent the upgrade from running again
+ # when the emergency shell exits and the upgrade.target is restarted
+ local dirname
+ dirname="$("$NEWROOT/bin/dirname" "$NEWROOT$LEAPP_FAILED_FLAG_FILE")"
+ [ -d "$dirname" ] || mkdir "$dirname"
+ "$NEWROOT/bin/touch" "$NEWROOT$LEAPP_FAILED_FLAG_FILE"
+ fi
+
# Dump debug data in case something went wrong
if want_inband_dump "$rv"; then
collect_and_dump_debug_data
@@ -338,6 +349,15 @@ mount -o "remount,rw" "$NEWROOT"
##### do the upgrade #######
(
+ # check if leapp previously failed in the initramfs, if it did return to the emergency shell
+ [ -f "$NEWROOT$LEAPP_FAILED_FLAG_FILE" ] && {
+ echo >&2 "Found file $NEWROOT$LEAPP_FAILED_FLAG_FILE"
+ echo >&2 "Error: Leapp previously failed and cannot continue, returning back to emergency shell"
+ echo >&2 "Please file a support case with $NEWROOT/var/log/leapp/leapp-upgrade.log attached"
+ echo >&2 "To rerun the upgrade upon exiting the dracut shell remove the $NEWROOT$LEAPP_FAILED_FLAG_FILE file"
+ exit 1
+ }
+
[ ! -x "$NEWROOT$LEAPPBIN" ] && {
warn "upgrade binary '$LEAPPBIN' missing!"
exit 1
diff --git a/repos/system_upgrade/common/actors/removeupgradeartifacts/actor.py b/repos/system_upgrade/common/actors/removeupgradeartifacts/actor.py
new file mode 100644
index 00000000..5eb60d27
--- /dev/null
+++ b/repos/system_upgrade/common/actors/removeupgradeartifacts/actor.py
@@ -0,0 +1,23 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import removeupgradeartifacts
+from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
+
+
+class RemoveUpgradeArtifacts(Actor):
+ """
+ Removes artifacts left over by previous leapp runs
+
+ After the upgrade process, there might be some leftover files, which need
+ to be cleaned up before running another upgrade.
+
+ Removed artifacts:
+ - /root/tmp_leapp_py3/ directory (includes ".leapp_upgrade_failed" flag file)
+ """
+
+ name = 'remove_upgrade_artifacts'
+ consumes = ()
+ produces = ()
+ tags = (InterimPreparationPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ removeupgradeartifacts.process()
diff --git a/repos/system_upgrade/common/actors/removeupgradeartifacts/libraries/removeupgradeartifacts.py b/repos/system_upgrade/common/actors/removeupgradeartifacts/libraries/removeupgradeartifacts.py
new file mode 100644
index 00000000..aa748d9d
--- /dev/null
+++ b/repos/system_upgrade/common/actors/removeupgradeartifacts/libraries/removeupgradeartifacts.py
@@ -0,0 +1,17 @@
+import os
+
+from leapp.libraries.stdlib import api, CalledProcessError, run
+
+UPGRADE_ARTIFACTS_DIR = '/root/tmp_leapp_py3/'
+
+
+def process():
+ if os.path.exists(UPGRADE_ARTIFACTS_DIR):
+ api.current_logger().debug(
+ "Removing leftover upgrade artifacts dir: {} ".format(UPGRADE_ARTIFACTS_DIR))
+
+ try:
+ run(['rm', '-rf', UPGRADE_ARTIFACTS_DIR])
+ except (CalledProcessError, OSError) as e:
+ api.current_logger().debug(
+ 'Failed to remove leftover upgrade artifacts dir: {}'.format(e))
diff --git a/repos/system_upgrade/common/actors/removeupgradeartifacts/tests/test_removeupgradeartifacts.py b/repos/system_upgrade/common/actors/removeupgradeartifacts/tests/test_removeupgradeartifacts.py
new file mode 100644
index 00000000..aee4d7c6
--- /dev/null
+++ b/repos/system_upgrade/common/actors/removeupgradeartifacts/tests/test_removeupgradeartifacts.py
@@ -0,0 +1,28 @@
+import os
+
+import pytest
+
+from leapp.libraries.actor import removeupgradeartifacts
+
+
+@pytest.mark.parametrize(('exists', 'should_remove'), [
+ (True, True),
+ (False, False),
+])
+def test_remove_upgrade_artifacts(monkeypatch, exists, should_remove):
+
+ called = [False]
+
+ def mocked_run(cmd, *args, **kwargs):
+ assert cmd[0] == 'rm'
+ assert cmd[1] == '-rf'
+ assert cmd[2] == removeupgradeartifacts.UPGRADE_ARTIFACTS_DIR
+ called[0] = True
+ return {'exit_code': 0, 'stdout': '', 'stderr': ''}
+
+ monkeypatch.setattr(os.path, 'exists', lambda _: exists)
+ monkeypatch.setattr(removeupgradeartifacts, 'run', mocked_run)
+
+ removeupgradeartifacts.process()
+
+ assert called[0] == should_remove
--
2.39.0

View File

@ -0,0 +1,137 @@
From 00ab521d952d413a095b8b48e5615bedaed41c13 Mon Sep 17 00:00:00 2001
From: Evgeni Golov <evgeni@golov.de>
Date: Thu, 12 Jan 2023 12:37:36 +0100
Subject: [PATCH 61/63] BZ#2142270 - run reindexdb to fix issues due to new
locales in RHEL8
---
.../libraries/satellite_upgrade_check.py | 12 +++++++++---
.../tests/unit_test_satellite_upgrade_check.py | 6 +++++-
.../el7toel8/actors/satellite_upgrader/actor.py | 7 +++++++
.../tests/unit_test_satellite_upgrader.py | 17 +++++++++++++++--
4 files changed, 36 insertions(+), 6 deletions(-)
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py
index c33e4f6e..6954dd50 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/libraries/satellite_upgrade_check.py
@@ -23,9 +23,13 @@ def satellite_upgrade_check(facts):
title = "Satellite PostgreSQL data migration"
flags = []
severity = reporting.Severity.MEDIUM
+ reindex_msg = textwrap.dedent("""
+ After the data has been moved to the new location, all databases will require a REINDEX.
+ This will happen automatically during the first boot of the system.
+ """).strip()
if facts.postgresql.same_partition:
- summary = "Your PostgreSQL data will be automatically migrated."
+ migration_msg = "Your PostgreSQL data will be automatically migrated."
else:
scl_psql_path = '/var/opt/rh/rh-postgresql12/lib/pgsql/data/'
if facts.postgresql.space_required > facts.postgresql.space_available:
@@ -36,7 +40,7 @@ def satellite_upgrade_check(facts):
else:
storage_message = """You currently have enough free storage to move the data.
This operation can be performed by the upgrade process."""
- summary = """
+ migration_msg = """
Your PostgreSQL data in {} is currently on a dedicated volume.
PostgreSQL on RHEL8 expects the data to live in /var/lib/pgsql/data.
{}
@@ -44,9 +48,11 @@ def satellite_upgrade_check(facts):
so that the contents of {} are available in /var/lib/pgsql/data.
""".format(scl_psql_path, storage_message, scl_psql_path)
+ summary = "{}\n{}".format(textwrap.dedent(migration_msg).strip(), reindex_msg)
+
reporting.create_report([
reporting.Title(title),
- reporting.Summary(textwrap.dedent(summary).strip()),
+ reporting.Summary(summary),
reporting.Severity(severity),
reporting.Groups([]),
reporting.Groups(flags)
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py
index 0e1969b7..8b75adf7 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrade_check/tests/unit_test_satellite_upgrade_check.py
@@ -42,9 +42,11 @@ def test_same_disk(monkeypatch):
expected_title = 'Satellite PostgreSQL data migration'
expected_summary = 'Your PostgreSQL data will be automatically migrated.'
+ expected_reindex = 'all databases will require a REINDEX'
assert expected_title == reporting.create_report.report_fields['title']
- assert expected_summary == reporting.create_report.report_fields['summary']
+ assert expected_summary in reporting.create_report.report_fields['summary']
+ assert expected_reindex in reporting.create_report.report_fields['summary']
def test_different_disk_sufficient_storage(monkeypatch):
@@ -58,9 +60,11 @@ def test_different_disk_sufficient_storage(monkeypatch):
expected_title = 'Satellite PostgreSQL data migration'
expected_summary = 'You currently have enough free storage to move the data'
+ expected_reindex = 'all databases will require a REINDEX'
assert expected_title == reporting.create_report.report_fields['title']
assert expected_summary in reporting.create_report.report_fields['summary']
+ assert expected_reindex in reporting.create_report.report_fields['summary']
def test_different_disk_insufficient_storage(monkeypatch):
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrader/actor.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrader/actor.py
index bd1a5d68..b699e6de 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrader/actor.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrader/actor.py
@@ -32,3 +32,10 @@ class SatelliteUpgrader(Actor):
api.current_logger().error(
'Could not run the installer, please inspect the logs in /var/log/foreman-installer!'
)
+
+ if facts.postgresql.local_postgresql:
+ api.current_actor().show_message('Re-indexing the database. This can take a while.')
+ try:
+ run(['runuser', '-u', 'postgres', '--', 'reindexdb', '-a'])
+ except (OSError, CalledProcessError) as e:
+ api.current_logger().error('Failed to run `reindexdb`: {}'.format(str(e)))
diff --git a/repos/system_upgrade/el7toel8/actors/satellite_upgrader/tests/unit_test_satellite_upgrader.py b/repos/system_upgrade/el7toel8/actors/satellite_upgrader/tests/unit_test_satellite_upgrader.py
index d62815ca..21dce7f2 100644
--- a/repos/system_upgrade/el7toel8/actors/satellite_upgrader/tests/unit_test_satellite_upgrader.py
+++ b/repos/system_upgrade/el7toel8/actors/satellite_upgrader/tests/unit_test_satellite_upgrader.py
@@ -17,7 +17,8 @@ class MockedRun(object):
def test_run_installer(monkeypatch, current_actor_context):
mocked_run = MockedRun()
monkeypatch.setattr('leapp.libraries.stdlib.run', mocked_run)
- current_actor_context.feed(SatelliteFacts(has_foreman=True, postgresql=SatellitePostgresqlFacts()))
+ current_actor_context.feed(SatelliteFacts(has_foreman=True,
+ postgresql=SatellitePostgresqlFacts(local_postgresql=False)))
current_actor_context.run()
assert mocked_run.commands
assert len(mocked_run.commands) == 1
@@ -28,8 +29,20 @@ def test_run_installer_without_katello(monkeypatch, current_actor_context):
mocked_run = MockedRun()
monkeypatch.setattr('leapp.libraries.stdlib.run', mocked_run)
current_actor_context.feed(SatelliteFacts(has_foreman=True, has_katello_installer=False,
- postgresql=SatellitePostgresqlFacts()))
+ postgresql=SatellitePostgresqlFacts(local_postgresql=False)))
current_actor_context.run()
assert mocked_run.commands
assert len(mocked_run.commands) == 1
assert mocked_run.commands[0] == ['foreman-installer']
+
+
+def test_run_reindexdb(monkeypatch, current_actor_context):
+ mocked_run = MockedRun()
+ monkeypatch.setattr('leapp.libraries.stdlib.run', mocked_run)
+ current_actor_context.feed(SatelliteFacts(has_foreman=True,
+ postgresql=SatellitePostgresqlFacts(local_postgresql=True)))
+ current_actor_context.run()
+ assert mocked_run.commands
+ assert len(mocked_run.commands) == 2
+ assert mocked_run.commands[0] == ['foreman-installer', '--disable-system-checks']
+ assert mocked_run.commands[1] == ['runuser', '-u', 'postgres', '--', 'reindexdb', '-a']
--
2.39.0

View File

@ -0,0 +1,53 @@
From c591be26437f6ad65de1f52fe85839cb0e1fc765 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 20 Jan 2023 12:07:42 +0100
Subject: [PATCH 62/63] Improve the hint in peseventsscanner for unknown
repositories
The original msg guided users to open ticket on RHBZ portal, which
has been confusing as the repository is used by other linux
distributions also and they haven't updated the msg properly,
so people has been asking for bugfixes unrelated to RHEL systems
Which could not be fixed by Red Hat (RH) as this is connected
to leapp data, which in case of RH covers only official repositories
and packages provided by RH. Other distributions are expected to provide
the correct leapp data valid for these systems to reflect the content
of the used linux distribution.
To fix this problem, we have decided to update the hint to improve
UX, so they report the problem as they are used for their
distribution. Also the hint has been improved to provide more
instructions what user can do on the system
* change the used (custom) repoid to the official one
* review the planned dnf transaction to see whether there is a problem
regarding the listed packages
* install missing packages after the upgrade manually
---
.../peseventsscanner/libraries/pes_events_scanner.py | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
index c254f4c0..b0a87269 100644
--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
+++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py
@@ -439,8 +439,14 @@ def replace_pesids_with_repoids_in_packages(packages, source_pkgs_repoids):
message='packages may not be installed or upgraded due to repositories unknown to leapp:',
skipped_pkgs=packages_without_known_repoid,
remediation=(
- 'Please file a bug in http://bugzilla.redhat.com/ for leapp-repository component of '
- 'the Red Hat Enterprise Linux product.'
+ 'In case the listed repositories are mirrors of official repositories for RHEL'
+ ' (provided by Red Hat on CDN)'
+ ' and their repositories IDs has been customized, you can change'
+ ' the configuration to use the official IDs instead of fixing the problem.'
+ ' You can also review the projected DNF upgrade transaction result'
+ ' in the logs to see what is going to happen, as this does not necessarily mean'
+ ' that the listed packages will not be upgraded. You can also'
+ ' install any missing packages after the in-place upgrade manually.'
),
)
--
2.39.0

View File

@ -0,0 +1,118 @@
From 35d22f3063acd24ee1e3ba2f2a21c0b17e251bfc Mon Sep 17 00:00:00 2001
From: ina vasilevskaya <ivasilev@redhat.com>
Date: Fri, 20 Jan 2023 17:06:32 +0100
Subject: [PATCH 63/63] Ensure a baseos and appstream repos are available when
upgrade with RHSM (#1001)
Previously we have tested if 2+ rhsm respositories are available.
However, this led to various issues when the repositories provided
via satellite were e.g. *baseos* and *supplementary*. The original
check passed in such a case, but the upgrade transaction failed
due to missing rpms from the missing *appstream* repository.
The current check include the verification that both repositories
are present, searching the *baseos* and *appstream* substrings
in repoids - when speaking about RHSM repositories. If such
repositories are not discovered, the upgrade is inhibit.
The responsibility for custom repositories is kept on user as before.
---
.../libraries/userspacegen.py | 5 +-
.../tests/unit_test_targetuserspacecreator.py | 50 +++++++++++++++----
2 files changed, 45 insertions(+), 10 deletions(-)
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index f2391ee8..6335eb5b 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -494,7 +494,10 @@ def _get_rhsm_available_repoids(context):
# TODO: very similar thing should happens for all other repofiles in container
#
repoids = rhsm.get_available_repo_ids(context)
- if not repoids or len(repoids) < 2:
+ # NOTE(ivasilev) For the moment at least AppStream and BaseOS repos are required. While we are still
+ # contemplating on what can be a generic solution to checking this, let's introduce a minimal check for
+ # at-least-one-appstream and at-least-one-baseos among present repoids
+ if not repoids or all("baseos" not in ri for ri in repoids) or all("appstream" not in ri for ri in repoids):
reporting.create_report([
reporting.Title('Cannot find required basic RHEL target repositories.'),
reporting.Summary(
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
index 5f544471..a519275e 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/tests/unit_test_targetuserspacecreator.py
@@ -302,17 +302,20 @@ def test_gather_target_repositories_rhui(monkeypatch):
assert target_repoids == set(['rhui-1', 'rhui-2'])
-@pytest.mark.skip(reason="Currently not implemented in the actor. It's TODO.")
-def test_gather_target_repositories_required_not_available(monkeypatch):
+def test_gather_target_repositories_baseos_appstream_not_available(monkeypatch):
# If the repos that Leapp identifies as required for the upgrade (based on the repo mapping and PES data) are not
# available, an exception shall be raised
+ indata = testInData(
+ _PACKAGES_MSGS, _RHSMINFO_MSG, None, _XFS_MSG, _STORAGEINFO_MSG, None
+ )
+ monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: False)
+
mocked_produce = produce_mocked()
monkeypatch.setattr(userspacegen.api, 'current_actor', CurrentActorMocked())
monkeypatch.setattr(userspacegen.api.current_actor(), 'produce', mocked_produce)
# The available RHSM repos
monkeypatch.setattr(rhsm, 'get_available_repo_ids', lambda x: ['repoidA', 'repoidB', 'repoidC'])
- monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: False)
# The required RHEL repos based on the repo mapping and PES data + custom repos required by third party actors
monkeypatch.setattr(userspacegen.api, 'consume', lambda x: iter([models.TargetRepositories(
rhel_repos=[models.RHELTargetRepository(repoid='repoidX'),
@@ -320,12 +323,41 @@ def test_gather_target_repositories_required_not_available(monkeypatch):
custom_repos=[models.CustomTargetRepository(repoid='repoidCustom')])]))
with pytest.raises(StopActorExecution):
- userspacegen.gather_target_repositories(None)
- assert mocked_produce.called
- reports = [m.report for m in mocked_produce.model_instances if isinstance(m, reporting.Report)]
- inhibitors = [m for m in reports if 'INHIBITOR' in m.get('flags', ())]
- assert len(inhibitors) == 1
- assert inhibitors[0].get('title', '') == 'Cannot find required basic RHEL target repositories.'
+ userspacegen.gather_target_repositories(None, indata)
+ assert mocked_produce.called
+ reports = [m.report for m in mocked_produce.model_instances if isinstance(m, reporting.Report)]
+ inhibitors = [m for m in reports if 'inhibitor' in m.get('groups', ())]
+ assert len(inhibitors) == 1
+ assert inhibitors[0].get('title', '') == 'Cannot find required basic RHEL target repositories.'
+ # Now test the case when either of AppStream and BaseOs is not available, upgrade should be inhibited
+ mocked_produce = produce_mocked()
+ monkeypatch.setattr(userspacegen.api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(userspacegen.api.current_actor(), 'produce', mocked_produce)
+ monkeypatch.setattr(rhsm, 'get_available_repo_ids', lambda x: ['repoidA', 'repoidB', 'repoidC-appstream'])
+ monkeypatch.setattr(userspacegen.api, 'consume', lambda x: iter([models.TargetRepositories(
+ rhel_repos=[models.RHELTargetRepository(repoid='repoidC-appstream'),
+ models.RHELTargetRepository(repoid='repoidA')],
+ custom_repos=[models.CustomTargetRepository(repoid='repoidCustom')])]))
+ with pytest.raises(StopActorExecution):
+ userspacegen.gather_target_repositories(None, indata)
+ reports = [m.report for m in mocked_produce.model_instances if isinstance(m, reporting.Report)]
+ inhibitors = [m for m in reports if 'inhibitor' in m.get('groups', ())]
+ assert len(inhibitors) == 1
+ assert inhibitors[0].get('title', '') == 'Cannot find required basic RHEL target repositories.'
+ mocked_produce = produce_mocked()
+ monkeypatch.setattr(userspacegen.api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(userspacegen.api.current_actor(), 'produce', mocked_produce)
+ monkeypatch.setattr(rhsm, 'get_available_repo_ids', lambda x: ['repoidA', 'repoidB', 'repoidC-baseos'])
+ monkeypatch.setattr(userspacegen.api, 'consume', lambda x: iter([models.TargetRepositories(
+ rhel_repos=[models.RHELTargetRepository(repoid='repoidC-baseos'),
+ models.RHELTargetRepository(repoid='repoidA')],
+ custom_repos=[models.CustomTargetRepository(repoid='repoidCustom')])]))
+ with pytest.raises(StopActorExecution):
+ userspacegen.gather_target_repositories(None, indata)
+ reports = [m.report for m in mocked_produce.model_instances if isinstance(m, reporting.Report)]
+ inhibitors = [m for m in reports if 'inhibitor' in m.get('groups', ())]
+ assert len(inhibitors) == 1
+ assert inhibitors[0].get('title', '') == 'Cannot find required basic RHEL target repositories.'
def mocked_consume_data():
--
2.39.0

View File

@ -0,0 +1,65 @@
From e3936ebbd880bc79c2972af1bccc86fae733bf34 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Tue, 8 Nov 2022 17:44:28 +0100
Subject: [PATCH] Fix the check of memory (RAM) limits
The checkmem actor was incorrect as the limits have been written
in MiBs however the value obtained from /proc/meminfo is in KiBs.
So the actual check has been incorrect.
Also the memory limits have been changed since the creation of the
actor. Updating the values in KiBs based on the current table:
https://access.redhat.com/articles/rhel-limits
The report msg has been updated to print the values in MiB instead
of KiB.
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2139907
---
.../checkmemory/libraries/checkmemory.py | 21 ++++++++++++-------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py b/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py
index ea8bfe69..1045e5c6 100644
--- a/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py
+++ b/repos/system_upgrade/common/actors/checkmemory/libraries/checkmemory.py
@@ -5,10 +5,10 @@ from leapp.libraries.stdlib import api
from leapp.models import MemoryInfo
min_req_memory = {
- architecture.ARCH_X86_64: 1536, # 1.5G
- architecture.ARCH_ARM64: 2048, # 2Gb
- architecture.ARCH_PPC64LE: 2048, # 2Gb
- architecture.ARCH_S390X: 1024 # 1Gb
+ architecture.ARCH_X86_64: 1572864, # 1.5G
+ architecture.ARCH_ARM64: 1572864, # 1.5G
+ architecture.ARCH_PPC64LE: 3145728, # 3G
+ architecture.ARCH_S390X: 1572864, # 1.5G
}
@@ -33,12 +33,17 @@ def process():
if minimum_req_error:
title = 'Minimum memory requirements for RHEL {} are not met'.format(version.get_target_major_version())
- summary = 'Memory detected: {} KiB, required: {} KiB'.format(minimum_req_error['detected'],
- minimum_req_error['minimal_req'])
+ summary = 'Memory detected: {} MiB, required: {} MiB'.format(
+ int(minimum_req_error['detected'] / 1024), # noqa: W1619; pylint: disable=old-division
+ int(minimum_req_error['minimal_req'] / 1024), # noqa: W1619; pylint: disable=old-division
+ )
reporting.create_report([
reporting.Title(title),
reporting.Summary(summary),
reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.SANITY]),
- reporting.Groups([reporting.Groups.INHIBITOR]),
+ reporting.Groups([reporting.Groups.SANITY, reporting.Groups.INHIBITOR]),
+ reporting.ExternalLink(
+ url='https://access.redhat.com/articles/rhel-limits',
+ title='Red Hat Enterprise Linux Technology Capabilities and Limits'
+ ),
])
--
2.39.0

View File

@ -0,0 +1,64 @@
From ded8348f31dfb2838f79c6c14036a42bc508bc93 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 11:01:04 +0200
Subject: [PATCH 65/75] Add IfCfg model
This represents the legacy network configuration stored in
/etc/sysconfig/network-scripts in form of ifcfg-* files
(along with associated keys-, rules-, routes-, etc. files).
---
repos/system_upgrade/el8toel9/models/ifcfg.py | 42 +++++++++++++++++++
1 file changed, 42 insertions(+)
create mode 100644 repos/system_upgrade/el8toel9/models/ifcfg.py
diff --git a/repos/system_upgrade/el8toel9/models/ifcfg.py b/repos/system_upgrade/el8toel9/models/ifcfg.py
new file mode 100644
index 00000000..b0607fed
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/models/ifcfg.py
@@ -0,0 +1,42 @@
+from leapp.models import fields, Model
+from leapp.topics import SystemInfoTopic
+
+
+class IfCfgProperty(Model):
+ """
+ Key-value pair for ifcfg properties.
+
+ This model is not expected to be used as a message (produced/consumed by actors).
+ It is used from within the IfCfg model.
+ """
+ topic = SystemInfoTopic
+
+ name = fields.String()
+ """ Name of a property """
+ value = fields.Nullable(fields.String())
+ """ Value of a property """
+
+
+class IfCfg(Model):
+ """
+ IfCfg file describing legacy network configuration
+
+ Produced for every ifcfg file loaded from key-value ("sysconfig")
+ format described in nm-settings-ifcfg-rh(5) manual.
+ """
+ topic = SystemInfoTopic
+
+ filename = fields.String()
+ """ Path to file this model was populated from """
+ properties = fields.List(fields.Model(IfCfgProperty), default=[])
+ """ The list of name-value pairs from ifcfg file """
+ secrets = fields.Nullable(fields.List(fields.Model(IfCfgProperty)))
+ """ The list of name-value pairs from keys file """
+ rules = fields.Nullable(fields.List(fields.String()))
+ """ The list of traffic rules for IPv4 """
+ rules6 = fields.Nullable(fields.List(fields.String()))
+ """ The list of traffic rules for IPv6 """
+ routes = fields.Nullable(fields.List(fields.String()))
+ """ The list of routes for IPv4 """
+ routes6 = fields.Nullable(fields.List(fields.String()))
+ """ The list of routes for IPv6 """
--
2.39.0

View File

@ -0,0 +1,246 @@
From cce48a6c1ad138b3217939ccfdb0f271a8492890 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 10:57:59 +0200
Subject: [PATCH 66/75] Add IfCfgScanner actor
This scans the legacy network configuration in
/etc/sysconfig/network-scripts and produces an IfCfg for each ifcfg-*
file encountered (along with associated keys-, rules-, routes-, etc. files).
---
.../el8toel9/actors/ifcfgscanner/actor.py | 18 +++
.../ifcfgscanner/libraries/ifcfgscanner.py | 67 ++++++++++
.../tests/unit_test_ifcfgscanner.py | 123 ++++++++++++++++++
3 files changed, 208 insertions(+)
create mode 100644 repos/system_upgrade/el8toel9/actors/ifcfgscanner/actor.py
create mode 100644 repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
create mode 100644 repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
diff --git a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/actor.py b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/actor.py
new file mode 100644
index 00000000..dd94986b
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import ifcfgscanner
+from leapp.models import IfCfg
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class IfCfgScanner(Actor):
+ """
+ Scan ifcfg files with legacy network configuration
+ """
+
+ name = "ifcfg_scanner"
+ consumes = ()
+ produces = (IfCfg,)
+ tags = (IPUWorkflowTag, FactsPhaseTag,)
+
+ def process(self):
+ ifcfgscanner.process()
diff --git a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
new file mode 100644
index 00000000..cfc385dc
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
@@ -0,0 +1,67 @@
+import errno
+from os import listdir, path
+
+from leapp.libraries.stdlib import api
+from leapp.models import IfCfg, IfCfgProperty
+
+SYSCONFIG_DIR = "/etc/sysconfig/network-scripts"
+
+
+def aux_file(prefix, filename):
+ directory = path.dirname(filename)
+ keys_base = path.basename(filename).replace("ifcfg-", prefix)
+ return path.join(directory, keys_base)
+
+
+def process_ifcfg(filename, secrets=False):
+ if not path.exists(filename):
+ return None
+
+ properties = []
+ for line in open(filename).readlines():
+ try:
+ (name, value) = line.split("#")[0].strip().split("=")
+ if secrets:
+ value = None
+ except ValueError:
+ # We're not interested in lines that are not
+ # simple assignments. Play it safe.
+ continue
+
+ properties.append(IfCfgProperty(name=name, value=value))
+ return properties
+
+
+def process_plain(filename):
+ if not path.exists(filename):
+ return None
+ return open(filename).readlines()
+
+
+def process_file(filename):
+ api.produce(IfCfg(
+ filename=filename,
+ properties=process_ifcfg(filename),
+ secrets=process_ifcfg(aux_file("keys-", filename), secrets=True),
+ rules=process_plain(aux_file("rule-", filename)),
+ rules6=process_plain(aux_file("rule6-", filename)),
+ routes=process_plain(aux_file("route-", filename)),
+ routes6=process_plain(aux_file("route6-", filename)),
+ ))
+
+
+def process_dir(directory):
+ try:
+ keyfiles = listdir(directory)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return
+ raise
+
+ for f in keyfiles:
+ if f.startswith("ifcfg-"):
+ process_file(path.join(directory, f))
+
+
+def process():
+ process_dir(SYSCONFIG_DIR)
diff --git a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
new file mode 100644
index 00000000..f5e3056a
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
@@ -0,0 +1,123 @@
+import errno
+import textwrap
+from os.path import basename
+
+import mock
+import six
+
+from leapp.libraries.actor import ifcfgscanner
+from leapp.libraries.common.testutils import make_OSError, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import IfCfg
+
+_builtins_open = "builtins.open" if six.PY3 else "__builtin__.open"
+
+
+def _listdir_ifcfg(path):
+ if path == ifcfgscanner.SYSCONFIG_DIR:
+ return ["ifcfg-net0"]
+ raise make_OSError(errno.ENOENT)
+
+
+def _listdir_ifcfg2(path):
+ if path == ifcfgscanner.SYSCONFIG_DIR:
+ return ["ifcfg-net0", "ifcfg-net1"]
+ raise make_OSError(errno.ENOENT)
+
+
+def _exists_ifcfg(filename):
+ return basename(filename).startswith("ifcfg-")
+
+
+def _exists_keys(filename):
+ if _exists_ifcfg(filename):
+ return True
+ return basename(filename).startswith("keys-")
+
+
+def test_no_conf(monkeypatch):
+ """
+ No report if there are no ifcfg files.
+ """
+
+ monkeypatch.setattr(ifcfgscanner, "listdir", lambda _: ())
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ ifcfgscanner.process()
+ assert not api.produce.called
+
+
+def test_ifcfg1(monkeypatch):
+ """
+ Parse a single ifcfg file.
+ """
+
+ ifcfg_file = textwrap.dedent("""
+ TYPE=Wireless # Some comment
+ # Another comment
+ ESSID=wep1
+ NAME=wep1
+ MODE=Managed
+ WEP_KEY_FLAGS=ask
+ SECURITYMODE=open
+ DEFAULTKEY=1
+ KEY_TYPE=key
+ """)
+
+ mock_config = mock.mock_open(read_data=ifcfg_file)
+ with mock.patch(_builtins_open, mock_config):
+ monkeypatch.setattr(ifcfgscanner, "listdir", _listdir_ifcfg)
+ monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_ifcfg)
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ ifcfgscanner.process()
+
+ assert api.produce.called == 1
+ assert len(api.produce.model_instances) == 1
+ ifcfg = api.produce.model_instances[0]
+ assert isinstance(ifcfg, IfCfg)
+ assert ifcfg.filename == "/etc/sysconfig/network-scripts/ifcfg-net0"
+ assert ifcfg.secrets is None
+ assert len(ifcfg.properties) == 8
+ assert ifcfg.properties[0].name == "TYPE"
+ assert ifcfg.properties[0].value == "Wireless"
+ assert ifcfg.properties[1].name == "ESSID"
+ assert ifcfg.properties[1].value == "wep1"
+
+
+def test_ifcfg2(monkeypatch):
+ """
+ Parse two ifcfg files.
+ """
+
+ mock_config = mock.mock_open(read_data="TYPE=Ethernet")
+ with mock.patch(_builtins_open, mock_config):
+ monkeypatch.setattr(ifcfgscanner, "listdir", _listdir_ifcfg2)
+ monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_ifcfg)
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ ifcfgscanner.process()
+
+ assert api.produce.called == 2
+ assert len(api.produce.model_instances) == 2
+ ifcfg = api.produce.model_instances[0]
+ assert isinstance(ifcfg, IfCfg)
+
+
+def test_ifcfg_key(monkeypatch):
+ """
+ Report ifcfg secrets from keys- file.
+ """
+
+ mock_config = mock.mock_open(read_data="KEY_PASSPHRASE1=Hell0")
+ with mock.patch(_builtins_open, mock_config):
+ monkeypatch.setattr(ifcfgscanner, "listdir", _listdir_ifcfg)
+ monkeypatch.setattr(ifcfgscanner.path, "exists", _exists_keys)
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ ifcfgscanner.process()
+
+ assert api.produce.called == 1
+ assert len(api.produce.model_instances) == 1
+ ifcfg = api.produce.model_instances[0]
+ assert isinstance(ifcfg, IfCfg)
+ assert ifcfg.filename == "/etc/sysconfig/network-scripts/ifcfg-net0"
+ assert len(ifcfg.secrets) == 1
+ assert ifcfg.secrets[0].name == "KEY_PASSPHRASE1"
+ assert ifcfg.secrets[0].value is None
--
2.39.0

View File

@ -0,0 +1,68 @@
From 2e7a7e40423c2f63d261b1dc088df1d3be04b45d Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 11:03:20 +0200
Subject: [PATCH 67/75] Add NetworkManagerConnection model
This represents the NetworkManager connection profiles in form
of keyfiles at /etc/NetworkManager/system-connections.
---
.../models/networkmanagerconnection.py | 47 +++++++++++++++++++
1 file changed, 47 insertions(+)
create mode 100644 repos/system_upgrade/el8toel9/models/networkmanagerconnection.py
diff --git a/repos/system_upgrade/el8toel9/models/networkmanagerconnection.py b/repos/system_upgrade/el8toel9/models/networkmanagerconnection.py
new file mode 100644
index 00000000..e3456b77
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/models/networkmanagerconnection.py
@@ -0,0 +1,47 @@
+from leapp.models import fields, Model
+from leapp.topics import SystemInfoTopic
+
+
+class NetworkManagerConnectionProperty(Model):
+ """
+ Name-value pair for NetworkManager properties.
+
+ This model is not expected to be used as a message (produced/consumed by actors).
+ It is used within NetworkManagerConnectionSetting of a NetworkManagerConnection.
+ """
+ topic = SystemInfoTopic
+
+ name = fields.String()
+ """ Name of a property """
+ value = fields.String()
+ """ Value of a property """
+
+
+class NetworkManagerConnectionSetting(Model):
+ """
+ NetworkManager setting, composed of a name and a list of name-value pairs.
+
+ This model is not expected to be used as a message (produced/consumed by actors).
+ It is used within NetworkManagerConnection.
+ """
+ topic = SystemInfoTopic
+
+ name = fields.String()
+ """ The NetworkManager setting name """
+ properties = fields.List(fields.Model(NetworkManagerConnectionProperty), default=[])
+ """ The name-value pair for every setting property """
+
+
+class NetworkManagerConnection(Model):
+ """
+ NetworkManager native keyfile connection
+
+ Produced for every connection profile loaded from INI-stile files
+ described in nm-settings-keyfile(5) manual.
+ """
+ topic = SystemInfoTopic
+
+ settings = fields.List(fields.Model(NetworkManagerConnectionSetting), default=[])
+ """ List of NetworkManager settings """
+ filename = fields.String()
+ """ Path to file this model was populated from """
--
2.39.0

View File

@ -0,0 +1,288 @@
From c4dd229113c70a7c402e4488ab0a30e4605e8d60 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 10:58:31 +0200
Subject: [PATCH 68/75] Add NetworkManagerConnectionScanner actor
This scans the NetworkManager connection profiles in form of keyfiles at
/etc/NetworkManager/system-connections and produces a
NetworkManagerConnection whenever for each one.
This doesn't need the NetworkManager daemon to be actually running,
but needs GObject introspection to be available. The reason for that is
that libnm is used (via Gir) to strip the secrets.
Add requirement for
NetworkManager-libnm
python3-gobject-base
packages. Both are available for all architectures on RHEL 8 and 9.
Currently require them only on RHEL 8 as they are not used in the
code anywhere for RHEL 9 and they seem to be used only for upgrade
RHEL 8 to RHEL 9.
Bump leapp-repository-dependencies to 9
---
packaging/leapp-repository.spec | 7 +-
.../other_specs/leapp-el7toel8-deps.spec | 3 +-
.../networkmanagerconnectionscanner/actor.py | 18 +++
.../networkmanagerconnectionscanner.py | 65 +++++++++++
...it_test_networkmanagerconnectionscanner.py | 105 ++++++++++++++++++
5 files changed, 196 insertions(+), 2 deletions(-)
create mode 100644 repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/actor.py
create mode 100644 repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/libraries/networkmanagerconnectionscanner.py
create mode 100644 repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 044e7275..8d6376ea 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -2,7 +2,7 @@
%global repositorydir %{leapp_datadir}/repositories
%global custom_repositorydir %{leapp_datadir}/custom-repositories
-%define leapp_repo_deps 8
+%define leapp_repo_deps 9
%if 0%{?rhel} == 7
%define leapp_python_sitelib %{python2_sitelib}
@@ -176,6 +176,11 @@ Requires: kmod
# and missing dracut could be killing situation for us :)
Requires: dracut
+# Required to scan NetworkManagerConnection (e.g. to recognize secrets)
+# NM is requested to be used on RHEL 8+ systems
+Requires: NetworkManager-libnm
+Requires: python3-gobject-base
+
%endif
##################################################
# end requirement
diff --git a/packaging/other_specs/leapp-el7toel8-deps.spec b/packaging/other_specs/leapp-el7toel8-deps.spec
index 822b6f63..4a181ee1 100644
--- a/packaging/other_specs/leapp-el7toel8-deps.spec
+++ b/packaging/other_specs/leapp-el7toel8-deps.spec
@@ -9,7 +9,7 @@
%endif
-%define leapp_repo_deps 8
+%define leapp_repo_deps 9
%define leapp_framework_deps 5
# NOTE: the Version contains the %{rhel} macro just for the convenience to
@@ -68,6 +68,7 @@ Requires: cpio
# just to be sure that /etc/modprobe.d is present
Requires: kmod
+
%description -n %{lrdname}
%{summary}
diff --git a/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/actor.py b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/actor.py
new file mode 100644
index 00000000..6ee66b52
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import networkmanagerconnectionscanner
+from leapp.models import NetworkManagerConnection
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class NetworkManagerConnectionScanner(Actor):
+ """
+ Scan NetworkManager connection keyfiles
+ """
+
+ name = "network_manager_connection_scanner"
+ consumes = ()
+ produces = (NetworkManagerConnection,)
+ tags = (IPUWorkflowTag, FactsPhaseTag,)
+
+ def process(self):
+ networkmanagerconnectionscanner.process()
diff --git a/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/libraries/networkmanagerconnectionscanner.py b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/libraries/networkmanagerconnectionscanner.py
new file mode 100644
index 00000000..b148de6b
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/libraries/networkmanagerconnectionscanner.py
@@ -0,0 +1,65 @@
+import errno
+import os
+
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.common import utils
+from leapp.libraries.stdlib import api
+from leapp.models import NetworkManagerConnection, NetworkManagerConnectionProperty, NetworkManagerConnectionSetting
+
+libnm_available = False
+err_details = None
+try:
+ import gi
+ try:
+ gi.require_version("NM", "1.0")
+ from gi.repository import GLib, NM
+ libnm_available = True
+ except ValueError:
+ err_details = 'NetworkManager-libnm package is not available'
+except ImportError:
+ err_details = 'python3-gobject-base package is not available'
+
+NM_CONN_DIR = "/etc/NetworkManager/system-connections"
+
+
+def process_file(filename):
+ # We're running this through libnm in order to clear the secrets.
+ # We don't know what keys are secret, but libnm does.
+ keyfile = GLib.KeyFile()
+ keyfile.load_from_file(filename, GLib.KeyFileFlags.NONE)
+ con = NM.keyfile_read(keyfile, NM_CONN_DIR, NM.KeyfileHandlerFlags.NONE)
+ con.clear_secrets()
+ keyfile = NM.keyfile_write(con, NM.KeyfileHandlerFlags.NONE)
+ cp = utils.parse_config(keyfile.to_data()[0])
+
+ settings = []
+ for setting_name in cp.sections():
+ properties = []
+ for name, value in cp.items(setting_name, raw=True):
+ properties.append(NetworkManagerConnectionProperty(name=name, value=value))
+ settings.append(
+ NetworkManagerConnectionSetting(name=setting_name, properties=properties)
+ )
+ api.produce(NetworkManagerConnection(filename=filename, settings=settings))
+
+
+def process_dir(directory):
+ try:
+ keyfiles = os.listdir(directory)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return
+ raise
+
+ for f in keyfiles:
+ process_file(os.path.join(NM_CONN_DIR, f))
+
+
+def process():
+ if libnm_available:
+ process_dir(NM_CONN_DIR)
+ else:
+ raise StopActorExecutionError(
+ message='Failed to read NetworkManager connections',
+ details=err_details
+ )
diff --git a/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
new file mode 100644
index 00000000..46af07c1
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/networkmanagerconnectionscanner/tests/unit_test_networkmanagerconnectionscanner.py
@@ -0,0 +1,105 @@
+import errno
+import textwrap
+
+import pytest
+import six
+
+from leapp.libraries.actor import networkmanagerconnectionscanner as nmconnscanner
+from leapp.libraries.common.testutils import make_OSError, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import NetworkManagerConnection
+
+_builtins_open = "builtins.open" if six.PY3 else "__builtin__.open"
+
+
+def _listdir_nm_conn(path):
+ if path == nmconnscanner.NM_CONN_DIR:
+ return ["conn1.nmconnection"]
+ raise make_OSError(errno.ENOENT)
+
+
+def _listdir_nm_conn2(path):
+ if path == nmconnscanner.NM_CONN_DIR:
+ return ["conn1.nmconnection", "conn2.nmconnection"]
+ raise make_OSError(errno.ENOENT)
+
+
+def _load_from_file(keyfile, filename, flags):
+ if filename.endswith(".nmconnection"):
+ return keyfile.load_from_data(textwrap.dedent("""
+ [connection]
+ type=wifi
+ id=conn1
+ uuid=a1bc695d-c548-40e8-9c7f-205a6587135d
+
+ [wifi]
+ mode=infrastructure
+ ssid=wifi
+
+ [wifi-security]
+ auth-alg=open
+ key-mgmt=none
+ wep-key-type=1
+ wep-key0=abcde
+ """), nmconnscanner.GLib.MAXSIZE, flags)
+ raise make_OSError(errno.ENOENT)
+
+
+@pytest.mark.skipif(not nmconnscanner.libnm_available, reason="NetworkManager g-ir not installed")
+def test_no_conf(monkeypatch):
+ """
+ No report if there are no keyfiles
+ """
+
+ monkeypatch.setattr(nmconnscanner.os, "listdir", lambda _: ())
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ nmconnscanner.process()
+ assert not api.produce.called
+
+
+@pytest.mark.skipif(not nmconnscanner.libnm_available, reason="NetworkManager g-ir not installed")
+def test_nm_conn(monkeypatch):
+ """
+ Check a basic keyfile
+ """
+
+ monkeypatch.setattr(nmconnscanner.os, "listdir", _listdir_nm_conn)
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ monkeypatch.setattr(nmconnscanner.GLib.KeyFile, "load_from_file", _load_from_file)
+ nmconnscanner.process()
+
+ assert api.produce.called == 1
+ assert len(api.produce.model_instances) == 1
+ nm_conn = api.produce.model_instances[0]
+ assert isinstance(nm_conn, NetworkManagerConnection)
+ assert nm_conn.filename == "/etc/NetworkManager/system-connections/conn1.nmconnection"
+ assert len(nm_conn.settings) == 3
+ assert nm_conn.settings[0].name == "connection"
+ assert len(nm_conn.settings[0].properties) == 4
+ assert nm_conn.settings[0].properties[0].name == "id"
+ assert nm_conn.settings[0].properties[0].value == "conn1"
+ assert nm_conn.settings[2].name == "wifi-security"
+
+ # It's important that wek-key0 is gone
+ assert len(nm_conn.settings[2].properties) == 3
+ assert nm_conn.settings[2].properties[0].name == "auth-alg"
+ assert nm_conn.settings[2].properties[0].value == "open"
+ assert nm_conn.settings[2].properties[1].name != "wep-key0"
+ assert nm_conn.settings[2].properties[2].name != "wep-key0"
+
+
+@pytest.mark.skipif(not nmconnscanner.libnm_available, reason="NetworkManager g-ir not installed")
+def test_nm_conn2(monkeypatch):
+ """
+ Check a pair of keyfiles
+ """
+
+ monkeypatch.setattr(nmconnscanner.os, "listdir", _listdir_nm_conn2)
+ monkeypatch.setattr(api, "produce", produce_mocked())
+ monkeypatch.setattr(nmconnscanner.GLib.KeyFile, "load_from_file", _load_from_file)
+ nmconnscanner.process()
+
+ assert api.produce.called == 2
+ assert len(api.produce.model_instances) == 2
+ assert api.produce.model_instances[0].filename.endswith("/conn1.nmconnection")
+ assert api.produce.model_instances[1].filename.endswith("/conn2.nmconnection")
--
2.39.0

View File

@ -0,0 +1,34 @@
From 791e42430bf17502419a42a8d3067f3622bb221d Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Thu, 3 Nov 2022 13:54:10 +0100
Subject: [PATCH 69/75] Install python3-gobject-base and NetworkManager-libnm
in f34
The NetworkManagerConnectionScanner requires GObject introspection and
libnm to be installed.
The RHEL8 UBI doesn't contain NetworkManager at all and the Python 2
platforms are too old to support GObject introspection.
Let's add the packages to the f34 base so that we test the the scanner
somewhere.
---
utils/container-tests/Containerfile.f34 | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/utils/container-tests/Containerfile.f34 b/utils/container-tests/Containerfile.f34
index a74153e1..a9346635 100644
--- a/utils/container-tests/Containerfile.f34
+++ b/utils/container-tests/Containerfile.f34
@@ -3,7 +3,7 @@ FROM fedora:34
VOLUME /repo
RUN dnf update -y && \
- dnf install -y findutils make rsync
+ dnf install -y findutils make rsync python3-gobject-base NetworkManager-libnm
ENV PYTHON_VENV python3.9
--
2.39.0

View File

@ -0,0 +1,375 @@
From dc7dc4d712c1e32a62701319130f8dd66da5ecc4 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 11:01:35 +0200
Subject: [PATCH 70/75] Make CheckNetworkDeprecations consume IfCfg and
NetworkManagerConnection
This actor used to scan the NetworkManager keyfiles and icfg files
itself. No more!
---
.../actors/networkdeprecations/actor.py | 7 +-
.../libraries/networkdeprecations.py | 71 +++----
.../tests/unit_test_networkdeprecations.py | 192 ++++++++----------
3 files changed, 111 insertions(+), 159 deletions(-)
diff --git a/repos/system_upgrade/el8toel9/actors/networkdeprecations/actor.py b/repos/system_upgrade/el8toel9/actors/networkdeprecations/actor.py
index 19113e4f..3074a3c7 100644
--- a/repos/system_upgrade/el8toel9/actors/networkdeprecations/actor.py
+++ b/repos/system_upgrade/el8toel9/actors/networkdeprecations/actor.py
@@ -1,7 +1,7 @@
from leapp.actors import Actor
from leapp.libraries.actor import networkdeprecations
-from leapp.models import Report
-from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+from leapp.models import IfCfg, NetworkManagerConnection, Report
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckNetworkDeprecations(Actor):
@@ -16,8 +16,9 @@ class CheckNetworkDeprecations(Actor):
"""
name = "network_deprecations"
+ consumes = (IfCfg, NetworkManagerConnection,)
produces = (Report,)
- tags = (IPUWorkflowTag, FactsPhaseTag,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag,)
def process(self):
networkdeprecations.process()
diff --git a/repos/system_upgrade/el8toel9/actors/networkdeprecations/libraries/networkdeprecations.py b/repos/system_upgrade/el8toel9/actors/networkdeprecations/libraries/networkdeprecations.py
index 2a6a2de9..92dfc51d 100644
--- a/repos/system_upgrade/el8toel9/actors/networkdeprecations/libraries/networkdeprecations.py
+++ b/repos/system_upgrade/el8toel9/actors/networkdeprecations/libraries/networkdeprecations.py
@@ -1,11 +1,6 @@
-import errno
-import os
-
from leapp import reporting
-from leapp.libraries.common import utils
-
-SYSCONFIG_DIR = '/etc/sysconfig/network-scripts'
-NM_CONN_DIR = '/etc/NetworkManager/system-connections'
+from leapp.libraries.stdlib import api
+from leapp.models import IfCfg, NetworkManagerConnection
FMT_LIST_SEPARATOR = '\n - '
@@ -13,56 +8,36 @@ FMT_LIST_SEPARATOR = '\n - '
def process():
wep_files = []
- # Scan NetworkManager native keyfiles
- try:
- keyfiles = os.listdir(NM_CONN_DIR)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- keyfiles = []
-
- for f in keyfiles:
- path = os.path.join(NM_CONN_DIR, f)
-
- cp = utils.parse_config(open(path, mode='r').read())
-
- if not cp.has_section('wifi-security'):
- continue
+ # Scan NetworkManager native keyfile connections
+ for nmconn in api.consume(NetworkManagerConnection):
+ for setting in nmconn.settings:
+ if not setting.name == 'wifi-security':
+ continue
- key_mgmt = cp.get('wifi-security', 'key-mgmt')
- if key_mgmt in ('none', 'ieee8021x'):
- wep_files.append(path)
+ for prop in setting.properties:
+ if not prop.name == 'key-mgmt':
+ continue
+ if prop.value in ('none', 'ieee8021x'):
+ wep_files.append(nmconn.filename)
# Scan legacy ifcfg files & secrets
- try:
- ifcfgs = os.listdir(SYSCONFIG_DIR)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- ifcfgs = []
-
- for f in ifcfgs:
- path = os.path.join(SYSCONFIG_DIR, f)
+ for ifcfg in api.consume(IfCfg):
+ props = ifcfg.properties
+ if ifcfg.secrets is not None:
+ props = props + ifcfg.secrets
- if not f.startswith('ifcfg-') and not f.startswith('keys-'):
- continue
-
- for line in open(path).readlines():
- try:
- (key, value) = line.split('#')[0].strip().split('=')
- except ValueError:
- # We're not interested in lines that are not
- # simple assignments. Play it safe.
- continue
+ for prop in props:
+ name = prop.name
+ value = prop.value
# Dynamic WEP
- if key == 'KEY_MGMT' and value.upper() == 'IEEE8021X':
- wep_files.append(path)
+ if name == 'KEY_MGMT' and value.upper() == 'IEEE8021X':
+ wep_files.append(ifcfg.filename)
continue
# Static WEP, possibly with agent-owned secrets
- if key in ('KEY_PASSPHRASE1', 'KEY1', 'WEP_KEY_FLAGS'):
- wep_files.append(path)
+ if name in ('KEY_PASSPHRASE1', 'KEY1', 'WEP_KEY_FLAGS'):
+ wep_files.append(ifcfg.filename)
continue
if wep_files:
diff --git a/repos/system_upgrade/el8toel9/actors/networkdeprecations/tests/unit_test_networkdeprecations.py b/repos/system_upgrade/el8toel9/actors/networkdeprecations/tests/unit_test_networkdeprecations.py
index bd140405..659ab993 100644
--- a/repos/system_upgrade/el8toel9/actors/networkdeprecations/tests/unit_test_networkdeprecations.py
+++ b/repos/system_upgrade/el8toel9/actors/networkdeprecations/tests/unit_test_networkdeprecations.py
@@ -1,148 +1,124 @@
-import errno
-import textwrap
-
-import mock
-import six
-
-from leapp import reporting
-from leapp.libraries.actor import networkdeprecations
-from leapp.libraries.common.testutils import create_report_mocked, make_OSError
-
-
-def _listdir_nm_conn(path):
- if path == networkdeprecations.NM_CONN_DIR:
- return ['connection']
- raise make_OSError(errno.ENOENT)
-
-
-def _listdir_ifcfg(path):
- if path == networkdeprecations.SYSCONFIG_DIR:
- return ['ifcfg-wireless']
- raise make_OSError(errno.ENOENT)
-
-
-def _listdir_keys(path):
- if path == networkdeprecations.SYSCONFIG_DIR:
- return ['keys-wireless']
- raise make_OSError(errno.ENOENT)
-
-
-def test_no_conf(monkeypatch):
+from leapp.models import (
+ IfCfg,
+ IfCfgProperty,
+ NetworkManagerConnection,
+ NetworkManagerConnectionProperty,
+ NetworkManagerConnectionSetting
+)
+from leapp.reporting import Report
+from leapp.utils.report import is_inhibitor
+
+
+def test_no_conf(current_actor_context):
"""
No report if there are no networks.
"""
- monkeypatch.setattr(networkdeprecations.os, 'listdir', lambda _: ())
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert not reporting.create_report.called
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
-def test_no_wireless(monkeypatch):
+def test_no_wireless(current_actor_context):
"""
No report if there's a keyfile, but it's not for a wireless connection.
"""
- mock_config = mock.mock_open(read_data='[connection]')
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_nm_conn)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert not reporting.create_report.called
+ not_wifi_nm_conn = NetworkManagerConnection(filename='/NM/wlan0.nmconn', settings=(
+ NetworkManagerConnectionSetting(name='connection'),
+ ))
+ current_actor_context.feed(not_wifi_nm_conn)
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
-def test_keyfile_static_wep(monkeypatch):
+
+def test_keyfile_static_wep(current_actor_context):
"""
Report if there's a static WEP keyfile.
"""
- STATIC_WEP_CONN = textwrap.dedent("""
- [wifi-security]
- auth-alg=open
- key-mgmt=none
- wep-key-type=1
- wep-key0=abcde
- """)
+ static_wep_nm_conn = NetworkManagerConnection(filename='/NM/wlan0.nmconn', settings=(
+ NetworkManagerConnectionSetting(name='wifi-security', properties=(
+ NetworkManagerConnectionProperty(name='auth-alg', value='open'),
+ NetworkManagerConnectionProperty(name='key-mgmt', value='none'),
+ NetworkManagerConnectionProperty(name='wep-key-type', value='1'),
+ )),
+ ))
- mock_config = mock.mock_open(read_data=STATIC_WEP_CONN)
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_nm_conn)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert reporting.create_report.called
+ current_actor_context.feed(static_wep_nm_conn)
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
-def test_keyfile_dynamic_wep(monkeypatch):
+def test_keyfile_dynamic_wep(current_actor_context):
"""
Report if there's a dynamic WEP keyfile.
"""
- DYNAMIC_WEP_CONN = textwrap.dedent("""
- [wifi-security]
- key-mgmt=ieee8021x
- """)
+ dynamic_wep_conn = NetworkManagerConnection(filename='/NM/wlan0.nmconn', settings=(
+ NetworkManagerConnectionSetting(name='wifi-security', properties=(
+ NetworkManagerConnectionProperty(name='key-mgmt', value='ieee8021x'),
+ )),
+ ))
- mock_config = mock.mock_open(read_data=DYNAMIC_WEP_CONN)
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_nm_conn)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert reporting.create_report.called
+ current_actor_context.feed(dynamic_wep_conn)
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
-def test_ifcfg_static_wep_ask(monkeypatch):
+def test_ifcfg_static_wep_ask(current_actor_context):
"""
Report if there's a static WEP sysconfig without stored key.
"""
- STATIC_WEP_ASK_KEY_SYSCONFIG = textwrap.dedent("""
- TYPE=Wireless
- ESSID=wep1
- NAME=wep1
- MODE=Managed
- WEP_KEY_FLAGS=ask
- SECURITYMODE=open
- DEFAULTKEY=1
- KEY_TYPE=key
- """)
-
- mock_config = mock.mock_open(read_data=STATIC_WEP_ASK_KEY_SYSCONFIG)
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_ifcfg)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert reporting.create_report.called
-
-
-def test_ifcfg_static_wep(monkeypatch):
+ static_wep_ask_key_ifcfg = IfCfg(filename='/NM/ifcfg-wlan0', properties=(
+ IfCfgProperty(name='TYPE', value='Wireless'),
+ IfCfgProperty(name='ESSID', value='wep1'),
+ IfCfgProperty(name='NAME', value='wep1'),
+ IfCfgProperty(name='MODE', value='Managed'),
+ IfCfgProperty(name='WEP_KEY_FLAGS', value='ask'),
+ IfCfgProperty(name='SECURITYMODE', value='open'),
+ IfCfgProperty(name='DEFAULTKEY', value='1'),
+ IfCfgProperty(name='KEY_TYPE', value='key'),
+ ))
+
+ current_actor_context.feed(static_wep_ask_key_ifcfg)
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+
+
+def test_ifcfg_static_wep(current_actor_context):
"""
Report if there's a static WEP sysconfig with a stored passphrase.
"""
- mock_config = mock.mock_open(read_data='KEY_PASSPHRASE1=Hell0')
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_keys)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert reporting.create_report.called
+ static_wep_ifcfg = IfCfg(filename='/NM/ifcfg-wlan0', secrets=(
+ IfCfgProperty(name='KEY_PASSPHRASE1', value=None),
+ ))
+
+ current_actor_context.feed(static_wep_ifcfg)
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
-def test_ifcfg_dynamic_wep(monkeypatch):
+def test_ifcfg_dynamic_wep(current_actor_context):
"""
Report if there's a dynamic WEP sysconfig.
"""
- DYNAMIC_WEP_SYSCONFIG = textwrap.dedent("""
- ESSID=dynwep1
- MODE=Managed
- KEY_MGMT=IEEE8021X # Dynamic WEP!
- TYPE=Wireless
- NAME=dynwep1
- """)
-
- mock_config = mock.mock_open(read_data=DYNAMIC_WEP_SYSCONFIG)
- with mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock_config):
- monkeypatch.setattr(networkdeprecations.os, 'listdir', _listdir_ifcfg)
- monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
- networkdeprecations.process()
- assert reporting.create_report.called
+ dynamic_wep_ifcfg = IfCfg(filename='/NM/ifcfg-wlan0', properties=(
+ IfCfgProperty(name='ESSID', value='dynwep1'),
+ IfCfgProperty(name='MODE', value='Managed'),
+ IfCfgProperty(name='KEY_MGMT', value='IEEE8021X'),
+ IfCfgProperty(name='TYPE', value='Wireless'),
+ IfCfgProperty(name='NAME', value='dynwep1'),
+ ))
+
+ current_actor_context.feed(dynamic_wep_ifcfg)
+ current_actor_context.run()
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
--
2.39.0

View File

@ -0,0 +1,386 @@
From f2977392208ad6874802bed30af9616853c77c08 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 12:54:57 +0200
Subject: [PATCH 71/75] Make CheckIfCfg consume IfCfg
This actor used to scan the NetworkManager ifcfg files itself.
Now it uses IfCfg messages, sharing the scanning code with
CheckNetworkDeprecations.
---
.../el8toel9/actors/checkifcfg/actor.py | 8 +-
.../checkifcfg/libraries/checkifcfg_ifcfg.py | 40 +---
.../checkifcfg/tests/unit_test_ifcfg.py | 207 +++++++++---------
3 files changed, 118 insertions(+), 137 deletions(-)
diff --git a/repos/system_upgrade/el8toel9/actors/checkifcfg/actor.py b/repos/system_upgrade/el8toel9/actors/checkifcfg/actor.py
index c6927d96..3ad0b5a0 100644
--- a/repos/system_upgrade/el8toel9/actors/checkifcfg/actor.py
+++ b/repos/system_upgrade/el8toel9/actors/checkifcfg/actor.py
@@ -1,7 +1,7 @@
from leapp.actors import Actor
from leapp.libraries.actor import checkifcfg_ifcfg as ifcfg
-from leapp.models import InstalledRPM, Report, RpmTransactionTasks
-from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+from leapp.models import IfCfg, InstalledRPM, Report, RpmTransactionTasks
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckIfcfg(Actor):
@@ -16,9 +16,9 @@ class CheckIfcfg(Actor):
"""
name = "check_ifcfg"
- consumes = (InstalledRPM,)
+ consumes = (IfCfg, InstalledRPM,)
produces = (Report, RpmTransactionTasks,)
- tags = (IPUWorkflowTag, FactsPhaseTag,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag,)
def process(self):
ifcfg.process()
diff --git a/repos/system_upgrade/el8toel9/actors/checkifcfg/libraries/checkifcfg_ifcfg.py b/repos/system_upgrade/el8toel9/actors/checkifcfg/libraries/checkifcfg_ifcfg.py
index 9a9fe96b..5c843583 100644
--- a/repos/system_upgrade/el8toel9/actors/checkifcfg/libraries/checkifcfg_ifcfg.py
+++ b/repos/system_upgrade/el8toel9/actors/checkifcfg/libraries/checkifcfg_ifcfg.py
@@ -3,13 +3,12 @@ import os
from leapp import reporting
from leapp.libraries.common.rpms import has_package
from leapp.libraries.stdlib import api
-from leapp.models import InstalledRPM, RpmTransactionTasks
+from leapp.models import IfCfg, InstalledRPM, RpmTransactionTasks
FMT_LIST_SEPARATOR = '\n - '
def process():
- SYSCONFIG_DIR = '/etc/sysconfig/network-scripts'
TRUE_VALUES = ['yes', 'true', '1']
TYPE_MAP = {
'ethernet': 'NetworkManager',
@@ -31,48 +30,33 @@ def process():
# we don't do anything.
return
- for f in os.listdir(SYSCONFIG_DIR):
+ for ifcfg in api.consume(IfCfg):
bad_type = False
got_type = None
nm_controlled = True
- path = os.path.join(SYSCONFIG_DIR, f)
-
- if not os.path.isfile(path):
- continue
-
- if f.startswith('rule-') or f.startswith('rule6-'):
+ if ifcfg.rules is not None or ifcfg.rules6 is not None:
if 'NetworkManager-dispatcher-routing-rules' not in rpms_to_install:
rpms_to_install.append('NetworkManager-dispatcher-routing-rules')
continue
- if not f.startswith('ifcfg-'):
+ if os.path.basename(ifcfg.filename) == 'ifcfg-lo':
continue
- if f == 'ifcfg-lo':
- continue
-
- for line in open(path).readlines():
- try:
- (key, value) = line.split('#')[0].strip().split('=')
- except ValueError:
- # We're not interested in lines that are not
- # simple assignments. Play it safe.
- continue
-
- if key in ('TYPE', 'DEVICETYPE'):
+ for prop in ifcfg.properties:
+ if prop.name in ('TYPE', 'DEVICETYPE'):
if got_type is None:
- got_type = value.lower()
- elif got_type != value.lower():
+ got_type = prop.value.lower()
+ elif got_type != prop.value.lower():
bad_type = True
- if key == 'BONDING_MASTER':
+ if prop.name == 'BONDING_MASTER':
if got_type is None:
got_type = 'bond'
elif got_type != 'bond':
bad_type = True
- if key == 'NM_CONTROLLED' and value.lower() not in TRUE_VALUES:
+ if prop.name == 'NM_CONTROLLED' and prop.value.lower() not in TRUE_VALUES:
nm_controlled = False
if got_type in TYPE_MAP:
@@ -84,9 +68,9 @@ def process():
# Don't bother reporting the file for NM_CONTROLLED=no
# if its type is not supportable with NetworkManager anyway
if bad_type is True:
- bad_type_files.append(path)
+ bad_type_files.append(ifcfg.filename)
elif nm_controlled is False:
- not_controlled_files.append(path)
+ not_controlled_files.append(ifcfg.filename)
if bad_type_files:
title = 'Network configuration for unsupported device types detected'
diff --git a/repos/system_upgrade/el8toel9/actors/checkifcfg/tests/unit_test_ifcfg.py b/repos/system_upgrade/el8toel9/actors/checkifcfg/tests/unit_test_ifcfg.py
index 10e2adb1..ddabedf2 100644
--- a/repos/system_upgrade/el8toel9/actors/checkifcfg/tests/unit_test_ifcfg.py
+++ b/repos/system_upgrade/el8toel9/actors/checkifcfg/tests/unit_test_ifcfg.py
@@ -1,147 +1,144 @@
-import mock
-import six
+from leapp.models import IfCfg, IfCfgProperty, InstalledRPM, RPM, RpmTransactionTasks
+from leapp.reporting import Report
+from leapp.utils.report import is_inhibitor
-from leapp import reporting
-from leapp.libraries.actor import checkifcfg_ifcfg as ifcfg
-from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked
-from leapp.libraries.stdlib import api
-from leapp.models import InstalledRPM, RPM, RpmTransactionTasks
-
-RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
+RH_PACKAGER = "Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>"
NETWORK_SCRIPTS_RPM = RPM(
- name='network-scripts', version='10.00.17', release='1.el8', epoch='',
- packager=RH_PACKAGER, arch='x86_64',
- pgpsig='RSA/SHA256, Fri 04 Feb 2022 03:32:47 PM CET, Key ID 199e2f91fd431d51'
+ name="network-scripts",
+ version="10.00.17",
+ release="1.el8",
+ epoch="",
+ packager=RH_PACKAGER,
+ arch="x86_64",
+ pgpsig="RSA/SHA256, Fri 04 Feb 2022 03:32:47 PM CET, Key ID 199e2f91fd431d51",
)
NETWORK_MANAGER_RPM = RPM(
- name='NetworkManager', version='1.36.0', release='0.8.el8', epoch='1',
- packager=RH_PACKAGER, arch='x86_64',
- pgpsig='RSA/SHA256, Mon 14 Feb 2022 08:45:37 PM CET, Key ID 199e2f91fd431d51'
-)
-
-INITSCRIPTS_INSTALLED = CurrentActorMocked(
- msgs=[InstalledRPM(items=[NETWORK_SCRIPTS_RPM])]
+ name="NetworkManager",
+ version="1.36.0",
+ release="0.8.el8",
+ epoch="1",
+ packager=RH_PACKAGER,
+ arch="x86_64",
+ pgpsig="RSA/SHA256, Mon 14 Feb 2022 08:45:37 PM CET, Key ID 199e2f91fd431d51",
)
-INITSCRIPTS_AND_NM_INSTALLED = CurrentActorMocked(
- msgs=[InstalledRPM(items=[NETWORK_SCRIPTS_RPM, NETWORK_MANAGER_RPM])]
-)
+INITSCRIPTS_INSTALLED = InstalledRPM(items=[
+ NETWORK_SCRIPTS_RPM
+])
+INITSCRIPTS_AND_NM_INSTALLED = InstalledRPM(items=[
+ NETWORK_SCRIPTS_RPM,
+ NETWORK_MANAGER_RPM
+])
-def test_ifcfg_none(monkeypatch):
+def test_ifcfg_none(current_actor_context):
"""
No report and don't install anything if there are no ifcfg files.
"""
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_AND_NM_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('hello', 'world',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- assert not reporting.create_report.called
- assert not api.produce.called
+ current_actor_context.feed(INITSCRIPTS_AND_NM_INSTALLED)
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
+ assert not current_actor_context.consume(RpmTransactionTasks)
-def test_ifcfg_rule_file(monkeypatch):
+def test_ifcfg_rule_file(current_actor_context):
"""
Install NetworkManager-dispatcher-routing-rules package if there's a
file with ip rules.
"""
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_AND_NM_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('hello', 'world', 'rule-eth0',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- assert not reporting.create_report.called
- assert api.produce.called
- assert isinstance(api.produce.model_instances[0], RpmTransactionTasks)
- assert api.produce.model_instances[0].to_install == ['NetworkManager-dispatcher-routing-rules']
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-eth0",
+ properties=(IfCfgProperty(name="TYPE", value="Ethernet"),),
+ rules=("foo bar baz",),
+ ))
+ current_actor_context.feed(INITSCRIPTS_AND_NM_INSTALLED)
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
+ assert len(current_actor_context.consume(RpmTransactionTasks)) == 1
+ rpm_transaction = current_actor_context.consume(RpmTransactionTasks)[0]
+ assert rpm_transaction.to_install == ["NetworkManager-dispatcher-routing-rules"]
-def test_ifcfg_good_type(monkeypatch):
+def test_ifcfg_good_type(current_actor_context):
"""
No report if there's an ifcfg file that would work with NetworkManager.
Make sure NetworkManager itself is installed though.
"""
- mock_config = mock.mock_open(read_data="TYPE=Ethernet")
- with mock.patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config) as mock_ifcfg:
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_AND_NM_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('hello', 'world', 'ifcfg-eth0', 'ifcfg-lo',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- mock_ifcfg.assert_called_once_with('/etc/sysconfig/network-scripts/ifcfg-eth0')
- assert not reporting.create_report.called
- assert api.produce.called
- assert isinstance(api.produce.model_instances[0], RpmTransactionTasks)
- assert api.produce.model_instances[0].to_install == ['NetworkManager']
-
-
-def test_ifcfg_not_controlled(monkeypatch):
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-lo",
+ properties=()
+ ))
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-eth0",
+ properties=(IfCfgProperty(name="TYPE", value="Ethernet"),)
+ ))
+ current_actor_context.feed(INITSCRIPTS_AND_NM_INSTALLED)
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
+ assert len(current_actor_context.consume(RpmTransactionTasks)) == 1
+ rpm_transaction = current_actor_context.consume(RpmTransactionTasks)[0]
+ assert rpm_transaction.to_install == ["NetworkManager"]
+
+
+def test_ifcfg_not_controlled(current_actor_context):
"""
Report if there's a NM_CONTROLLED=no file.
"""
- mock_config = mock.mock_open(read_data="TYPE=Ethernet\nNM_CONTROLLED=no")
- with mock.patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config) as mock_ifcfg:
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('hello', 'world', 'ifcfg-eth0',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- mock_ifcfg.assert_called_once_with('/etc/sysconfig/network-scripts/ifcfg-eth0')
- assert reporting.create_report.called
- assert 'disabled NetworkManager' in reporting.create_report.report_fields['title']
- assert api.produce.called
-
-
-def test_ifcfg_unknown_type(monkeypatch):
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-eth0",
+ properties=(
+ IfCfgProperty(name="TYPE", value="Ethernet"),
+ IfCfgProperty(name="NM_CONTROLLED", value="no"),
+ )
+ ))
+ current_actor_context.feed(INITSCRIPTS_INSTALLED)
+ current_actor_context.run()
+ assert len(current_actor_context.consume(Report)) == 1
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+ assert "disabled NetworkManager" in report_fields['title']
+
+
+def test_ifcfg_unknown_type(current_actor_context):
"""
Report if there's configuration for a type we don't recognize.
"""
- mock_config = mock.mock_open(read_data="TYPE=AvianCarrier")
- with mock.patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config) as mock_ifcfg:
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_AND_NM_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('hello', 'world', 'ifcfg-pigeon0',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- mock_ifcfg.assert_called_once_with('/etc/sysconfig/network-scripts/ifcfg-pigeon0')
- assert reporting.create_report.called
- assert 'unsupported device types' in reporting.create_report.report_fields['title']
- assert not api.produce.called
-
-
-def test_ifcfg_install_subpackage(monkeypatch):
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-pigeon0",
+ properties=(IfCfgProperty(name="TYPE", value="AvianCarrier"),)
+ ))
+ current_actor_context.feed(INITSCRIPTS_AND_NM_INSTALLED)
+ current_actor_context.run()
+ assert len(current_actor_context.consume(Report)) == 1
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+ assert "unsupported device types" in report_fields['title']
+
+
+def test_ifcfg_install_subpackage(current_actor_context):
"""
Install NetworkManager-team if there's a team connection and also
ensure NetworkManager-config-server is installed if NetworkManager
was not there.
"""
- mock_config = mock.mock_open(read_data="TYPE=Team")
- with mock.patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config) as mock_ifcfg:
- monkeypatch.setattr(ifcfg.api, 'current_actor', INITSCRIPTS_INSTALLED)
- monkeypatch.setattr(ifcfg.api, "produce", produce_mocked())
- monkeypatch.setattr(ifcfg.os, 'listdir', lambda dummy: ('ifcfg-team0',))
- monkeypatch.setattr(ifcfg.os.path, 'isfile', lambda dummy: True)
- monkeypatch.setattr(reporting, "create_report", create_report_mocked())
- ifcfg.process()
- mock_ifcfg.assert_called_once_with('/etc/sysconfig/network-scripts/ifcfg-team0')
- assert not reporting.create_report.called
- assert api.produce.called
- assert isinstance(api.produce.model_instances[0], RpmTransactionTasks)
- assert api.produce.model_instances[0].to_install == [
- 'NetworkManager-team',
- 'NetworkManager-config-server'
- ]
+ current_actor_context.feed(IfCfg(
+ filename="/NM/ifcfg-team0",
+ properties=(IfCfgProperty(name="TYPE", value="Team"),)
+ ))
+ current_actor_context.feed(INITSCRIPTS_INSTALLED)
+ current_actor_context.run()
+ assert not current_actor_context.consume(Report)
+ assert len(current_actor_context.consume(RpmTransactionTasks)) == 1
+ rpm_transaction = current_actor_context.consume(RpmTransactionTasks)[0]
+ assert rpm_transaction.to_install == [
+ "NetworkManager-team",
+ "NetworkManager-config-server",
+ ]
--
2.39.0

View File

@ -0,0 +1,58 @@
From aed1a9cafbebcdcfa463b15e52f53a6ac7730f01 Mon Sep 17 00:00:00 2001
From: Lubomir Rintel <lkundrak@v3.sk>
Date: Mon, 26 Sep 2022 11:59:06 +0200
Subject: [PATCH 72/75] Make IfCfgScanner accept simple quoted values
Turns out people actually use those.
https://bugzilla.redhat.com/show_bug.cgi?id=2111691
---
.../actors/ifcfgscanner/libraries/ifcfgscanner.py | 6 ++++++
.../actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py | 8 ++++++--
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
index cfc385dc..683327b3 100644
--- a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
+++ b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/libraries/ifcfgscanner.py
@@ -28,6 +28,12 @@ def process_ifcfg(filename, secrets=False):
# simple assignments. Play it safe.
continue
+ # Deal with simple quoting. We don't expand anything, nor do
+ # multiline strings or anything of that sort.
+ if value is not None and len(value) > 1 and value[0] == value[-1]:
+ if value.startswith('"') or value.startswith("'"):
+ value = value[1:-1]
+
properties.append(IfCfgProperty(name=name, value=value))
return properties
diff --git a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
index f5e3056a..d3b4846f 100644
--- a/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
+++ b/repos/system_upgrade/el8toel9/actors/ifcfgscanner/tests/unit_test_ifcfgscanner.py
@@ -55,8 +55,8 @@ def test_ifcfg1(monkeypatch):
TYPE=Wireless # Some comment
# Another comment
ESSID=wep1
- NAME=wep1
- MODE=Managed
+ NAME="wep1"
+ MODE='Managed' # comment
WEP_KEY_FLAGS=ask
SECURITYMODE=open
DEFAULTKEY=1
@@ -81,6 +81,10 @@ def test_ifcfg1(monkeypatch):
assert ifcfg.properties[0].value == "Wireless"
assert ifcfg.properties[1].name == "ESSID"
assert ifcfg.properties[1].value == "wep1"
+ assert ifcfg.properties[2].name == "NAME"
+ assert ifcfg.properties[2].value == "wep1"
+ assert ifcfg.properties[3].name == "MODE"
+ assert ifcfg.properties[3].value == "Managed"
def test_ifcfg2(monkeypatch):
--
2.39.0

View File

@ -0,0 +1,168 @@
From 00d06d5217848d384e4b70ebf3c5eb5e4f7fa3e6 Mon Sep 17 00:00:00 2001
From: PeterMocary <petermocary@gmail.com>
Date: Thu, 25 Aug 2022 18:04:08 +0200
Subject: [PATCH 73/75] Improve error message when more space is needed for the
upgrade
When there was not enough space, leapp would output misleading error message propagated from dnf. This error message was replaced and includes a solution article.
---
.../actors/dnfupgradetransaction/actor.py | 7 +++-
.../libraries/userspacegen.py | 16 ++++++++
.../common/libraries/dnfplugin.py | 39 +++++++++++++------
3 files changed, 49 insertions(+), 13 deletions(-)
diff --git a/repos/system_upgrade/common/actors/dnfupgradetransaction/actor.py b/repos/system_upgrade/common/actors/dnfupgradetransaction/actor.py
index 296e6201..2e069296 100644
--- a/repos/system_upgrade/common/actors/dnfupgradetransaction/actor.py
+++ b/repos/system_upgrade/common/actors/dnfupgradetransaction/actor.py
@@ -11,7 +11,8 @@ from leapp.models import (
StorageInfo,
TargetUserSpaceInfo,
TransactionCompleted,
- UsedTargetRepositories
+ UsedTargetRepositories,
+ XFSPresence
)
from leapp.tags import IPUWorkflowTag, RPMUpgradePhaseTag
@@ -33,6 +34,7 @@ class DnfUpgradeTransaction(Actor):
StorageInfo,
TargetUserSpaceInfo,
UsedTargetRepositories,
+ XFSPresence
)
produces = (TransactionCompleted,)
tags = (RPMUpgradePhaseTag, IPUWorkflowTag)
@@ -48,10 +50,11 @@ class DnfUpgradeTransaction(Actor):
plugin_info = list(self.consume(DNFPluginTask))
tasks = next(self.consume(FilteredRpmTransactionTasks), FilteredRpmTransactionTasks())
target_userspace_info = next(self.consume(TargetUserSpaceInfo), None)
+ xfs_info = next(self.consume(XFSPresence), XFSPresence())
dnfplugin.perform_transaction_install(
tasks=tasks, used_repos=used_repos, storage_info=storage_info, target_userspace_info=target_userspace_info,
- plugin_info=plugin_info
+ plugin_info=plugin_info, xfs_info=xfs_info
)
self.produce(TransactionCompleted())
userspace = next(self.consume(TargetUserSpaceInfo), None)
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index 6335eb5b..3857e2f2 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -206,6 +206,22 @@ def prepare_target_userspace(context, userspace_dir, enabled_repos, packages):
message = 'Unable to install RHEL {} userspace packages.'.format(target_major_version)
details = {'details': str(exc), 'stderr': exc.stderr}
+ xfs_info = next(api.consume(XFSPresence), XFSPresence())
+ if 'more space needed on the' in exc.stderr:
+ # The stderr contains this error summary:
+ # Disk Requirements:
+ # At least <size> more space needed on the <path> filesystem.
+
+ article_section = 'Generic case'
+ if xfs_info.present and xfs_info.without_ftype:
+ article_section = 'XFS ftype=0 case'
+
+ message = ('There is not enough space on the file system hosting /var/lib/leapp directory '
+ 'to extract the packages.')
+ details = {'hint': "Please follow the instructions in the '{}' section of the article at: "
+ "link: https://access.redhat.com/solutions/5057391".format(article_section)}
+ raise StopActorExecutionError(message=message, details=details)
+
# If a proxy was set in dnf config, it should be the reason why dnf
# failed since leapp does not support updates behind proxy yet.
for manager_info in api.consume(PkgManagerInfo):
diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py
index 7f541c18..57b25909 100644
--- a/repos/system_upgrade/common/libraries/dnfplugin.py
+++ b/repos/system_upgrade/common/libraries/dnfplugin.py
@@ -146,7 +146,8 @@ def backup_debug_data(context):
api.current_logger().warning('Failed to copy debugdata. Message: {}'.format(str(e)), exc_info=True)
-def _transaction(context, stage, target_repoids, tasks, plugin_info, test=False, cmd_prefix=None, on_aws=False):
+def _transaction(context, stage, target_repoids, tasks, plugin_info, xfs_info,
+ test=False, cmd_prefix=None, on_aws=False):
"""
Perform the actual DNF rpm download via our DNF plugin
"""
@@ -219,10 +220,25 @@ def _transaction(context, stage, target_repoids, tasks, plugin_info, test=False,
)
except CalledProcessError as e:
api.current_logger().error('DNF execution failed: ')
- raise StopActorExecutionError(
- message='DNF execution failed with non zero exit code.\nSTDOUT:\n{stdout}\nSTDERR:\n{stderr}'.format(
- stdout=e.stdout, stderr=e.stderr)
- )
+
+ message = 'DNF execution failed with non zero exit code.'
+ details = {'STDOUT': e.stdout, 'STDERR': e.stderr}
+
+ if 'more space needed on the' in e.stderr:
+ # The stderr contains this error summary:
+ # Disk Requirements:
+ # At least <size> more space needed on the <path> filesystem.
+
+ article_section = 'Generic case'
+ if xfs_info.present and xfs_info.without_ftype:
+ article_section = 'XFS ftype=0 case'
+
+ message = ('There is not enough space on the file system hosting /var/lib/leapp directory '
+ 'to extract the packages.')
+ details = {'hint': "Please follow the instructions in the '{}' section of the article at: "
+ "link: https://access.redhat.com/solutions/5057391".format(article_section)}
+
+ raise StopActorExecutionError(message=message, details=details)
finally:
if stage == 'check':
backup_debug_data(context=context)
@@ -294,7 +310,7 @@ def install_initramdisk_requirements(packages, target_userspace_info, used_repos
context.call(cmd, env=env)
-def perform_transaction_install(target_userspace_info, storage_info, used_repos, tasks, plugin_info):
+def perform_transaction_install(target_userspace_info, storage_info, used_repos, tasks, plugin_info, xfs_info):
"""
Performs the actual installation with the DNF rhel-upgrade plugin using the target userspace
"""
@@ -353,8 +369,8 @@ def perform_transaction_install(target_userspace_info, storage_info, used_repos,
if get_target_major_version() == '9':
_rebuild_rpm_db(context, root='/installroot')
_transaction(
- context=context, stage=stage, target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
- cmd_prefix=cmd_prefix
+ context=context, stage='upgrade', target_repoids=target_repoids, plugin_info=plugin_info,
+ xfs_info=xfs_info, tasks=tasks, cmd_prefix=cmd_prefix
)
# we have to ensure the leapp packages will stay untouched even after the
@@ -400,7 +416,8 @@ def perform_transaction_check(target_userspace_info,
dnfconfig.exclude_leapp_rpms(context, disable_plugins)
_transaction(
- context=context, stage='check', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks
+ context=context, stage='check', target_repoids=target_repoids, plugin_info=plugin_info, xfs_info=xfs_info,
+ tasks=tasks
)
@@ -434,7 +451,7 @@ def perform_rpm_download(target_userspace_info,
dnfconfig.exclude_leapp_rpms(context, disable_plugins)
_transaction(
context=context, stage='download', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
- test=True, on_aws=on_aws
+ test=True, on_aws=on_aws, xfs_info=xfs_info
)
@@ -457,5 +474,5 @@ def perform_dry_run(target_userspace_info,
apply_workarounds(overlay.nspawn())
_transaction(
context=context, stage='dry-run', target_repoids=target_repoids, plugin_info=plugin_info, tasks=tasks,
- test=True, on_aws=on_aws
+ test=True, on_aws=on_aws, xfs_info=xfs_info
)
--
2.39.0

View File

@ -0,0 +1,46 @@
From b5a6b83044fdbc24fd8919cf6935f3a93f4f67e2 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Fri, 27 Jan 2023 12:22:37 +0100
Subject: [PATCH 74/75] Do not create python3 .pyc files
After the in-place upgrade and removal of `leapp` packages, there are
leftover `*cpython.pyc` files in: `/usr/lib/python2.7/site-packages/leapp/`
and `/usr/share/leapp-repository/`.
Let's avoid this by not creating them in the first place.
Jira ref.: OAMG-7641
---
.../files/dracut/85sys-upgrade-redhat/do-upgrade.sh | 2 +-
.../actors/preparepythonworkround/libraries/workaround.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
index 04540c1d..491b85ec 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
@@ -266,7 +266,7 @@ do_upgrade() {
# all FSTAB partitions. As mount was working before, hopefully will
# work now as well. Later this should be probably modified as we will
# need to handle more stuff around storage at all.
- /usr/bin/systemd-nspawn $NSPAWN_OPTS -D "$NEWROOT" /usr/bin/bash -c "mount -a; /usr/bin/python3 $LEAPP3_BIN upgrade --resume $args"
+ /usr/bin/systemd-nspawn $NSPAWN_OPTS -D "$NEWROOT" /usr/bin/bash -c "mount -a; /usr/bin/python3 -B $LEAPP3_BIN upgrade --resume $args"
rv=$?
fi
diff --git a/repos/system_upgrade/common/actors/preparepythonworkround/libraries/workaround.py b/repos/system_upgrade/common/actors/preparepythonworkround/libraries/workaround.py
index de3079ee..255121dd 100644
--- a/repos/system_upgrade/common/actors/preparepythonworkround/libraries/workaround.py
+++ b/repos/system_upgrade/common/actors/preparepythonworkround/libraries/workaround.py
@@ -31,7 +31,7 @@ def apply_python3_workaround():
os.symlink(_get_orig_leapp_path(), leapp_lib_symlink_path)
with open(py3_leapp, 'w') as f:
f_content = [
- '#!/usr/bin/python3',
+ '#!/usr/bin/python3 -B',
'import sys',
'sys.path.append(\'{}\')'.format(LEAPP_HOME),
'',
--
2.39.0

View File

@ -0,0 +1,247 @@
From f7c82a2468c1dae62d3beb94a3b1271b3b396ea5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Peter=20Mo=C4=8D=C3=A1ry?=
<68905580+PeterMocary@users.noreply.github.com>
Date: Fri, 27 Jan 2023 13:53:59 +0100
Subject: [PATCH 75/75] Add mapping based on the installed content (#967)
* Add mapping based on the installed content
Repositories covered by repositories mapping, that are used by installed
RPM packages, are used to evaluate expected target repositories on top
of evaluating the target repositories from enabled repositories. This
covers repositories which might be disabled when upgrading, but should
be used to upgrade installed packages during the upgrade.
* Cover with a unit test
Co-authored-by: Inessa Vasilevskaya <ivasilev@redhat.com>
---
.../common/actors/setuptargetrepos/actor.py | 2 +
.../libraries/setuptargetrepos.py | 57 ++++++++++++-------
.../tests/test_setuptargetrepos.py | 40 +++++++++++--
3 files changed, 75 insertions(+), 24 deletions(-)
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
index 47724f0d..767fa00c 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py
@@ -2,6 +2,7 @@ from leapp.actors import Actor
from leapp.libraries.actor import setuptargetrepos
from leapp.models import (
CustomTargetRepository,
+ InstalledRPM,
RepositoriesBlacklisted,
RepositoriesFacts,
RepositoriesMapping,
@@ -25,6 +26,7 @@ class SetupTargetRepos(Actor):
name = 'setuptargetrepos'
consumes = (CustomTargetRepository,
+ InstalledRPM,
RepositoriesSetupTasks,
RepositoriesMapping,
RepositoriesFacts,
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
index 3f34aedb..4b8405d0 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py
@@ -4,6 +4,7 @@ from leapp.libraries.common.config.version import get_source_major_version
from leapp.libraries.stdlib import api
from leapp.models import (
CustomTargetRepository,
+ InstalledRPM,
RepositoriesBlacklisted,
RepositoriesFacts,
RepositoriesMapping,
@@ -20,7 +21,6 @@ def _get_enabled_repoids():
"""
Collects repoids of all enabled repositories on the source system.
- :param repositories_facts: Iterable of RepositoriesFacts containing info about repositories on the source system.
:returns: Set of all enabled repository IDs present on the source system.
:rtype: Set[str]
"""
@@ -33,6 +33,14 @@ def _get_enabled_repoids():
return enabled_repoids
+def _get_repoids_from_installed_packages():
+ repoids_from_installed_packages = set()
+ for installed_packages in api.consume(InstalledRPM):
+ for rpm_package in installed_packages.items:
+ repoids_from_installed_packages.add(rpm_package.repository)
+ return repoids_from_installed_packages
+
+
def _get_blacklisted_repoids():
repos_blacklisted = set()
for blacklist in api.consume(RepositoriesBlacklisted):
@@ -58,16 +66,6 @@ def _get_used_repo_dict():
return used
-def _setup_repomap_handler(src_repoids):
- repo_mappig_msg = next(api.consume(RepositoriesMapping), RepositoriesMapping())
- rhui_info = next(api.consume(RHUIInfo), RHUIInfo(provider=''))
- repomap = setuptargetrepos_repomap.RepoMapDataHandler(repo_mappig_msg, cloud_provider=rhui_info.provider)
- # TODO(pstodulk): what about skip this completely and keep the default 'ga'..?
- default_channels = setuptargetrepos_repomap.get_default_repository_channels(repomap, src_repoids)
- repomap.set_default_channels(default_channels)
- return repomap
-
-
def _get_mapped_repoids(repomap, src_repoids):
mapped_repoids = set()
src_maj_ver = get_source_major_version()
@@ -78,24 +76,40 @@ def _get_mapped_repoids(repomap, src_repoids):
def process():
- # load all data / messages
+ # Load relevant data from messages
used_repoids_dict = _get_used_repo_dict()
enabled_repoids = _get_enabled_repoids()
excluded_repoids = _get_blacklisted_repoids()
custom_repos = _get_custom_target_repos()
+ repoids_from_installed_packages = _get_repoids_from_installed_packages()
- # TODO(pstodulk): isn't that a potential issue that we map just enabled repos
- # instead of enabled + used repos??
- # initialise basic data
- repomap = _setup_repomap_handler(enabled_repoids)
- mapped_repoids = _get_mapped_repoids(repomap, enabled_repoids)
- skipped_repoids = enabled_repoids & set(used_repoids_dict.keys()) - mapped_repoids
+ # Setup repomap handler
+ repo_mappig_msg = next(api.consume(RepositoriesMapping), RepositoriesMapping())
+ rhui_info = next(api.consume(RHUIInfo), RHUIInfo(provider=''))
+ repomap = setuptargetrepos_repomap.RepoMapDataHandler(repo_mappig_msg, cloud_provider=rhui_info.provider)
+
+ # Filter set of repoids from installed packages so that it contains only repoids with mapping
+ repoids_from_installed_packages_with_mapping = _get_mapped_repoids(repomap, repoids_from_installed_packages)
+
+ # Set of repoid that are going to be mapped to target repoids containing enabled repoids and also repoids from
+ # installed packages that have mapping to prevent missing repositories that are disabled during the upgrade, but
+ # can be used to upgrade installed packages.
+ repoids_to_map = enabled_repoids.union(repoids_from_installed_packages_with_mapping)
- # Now get the info what should be the target RHEL repositories
- expected_repos = repomap.get_expected_target_pesid_repos(enabled_repoids)
+ # Set default repository channels for the repomap
+ # TODO(pstodulk): what about skip this completely and keep the default 'ga'..?
+ default_channels = setuptargetrepos_repomap.get_default_repository_channels(repomap, repoids_to_map)
+ repomap.set_default_channels(default_channels)
+
+ # Get target RHEL repoids based on the repomap
+ expected_repos = repomap.get_expected_target_pesid_repos(repoids_to_map)
target_rhel_repoids = set()
for target_pesid, target_pesidrepo in expected_repos.items():
if not target_pesidrepo:
+ # NOTE this could happen only for enabled repositories part of the set,
+ # since the repositories collected from installed packages already contain
+ # only mappable repoids.
+
# With the original repomap data, this should not happen (this should
# currently point to a problem in our data
# TODO(pstodulk): add report? inhibitor? what should be in the report?
@@ -126,6 +140,9 @@ def process():
custom_repos = [repo for repo in custom_repos if repo.repoid not in excluded_repoids]
custom_repos = sorted(custom_repos, key=lambda x: x.repoid)
+ # produce message about skipped repositories
+ enabled_repoids_with_mapping = _get_mapped_repoids(repomap, enabled_repoids)
+ skipped_repoids = enabled_repoids & set(used_repoids_dict.keys()) - enabled_repoids_with_mapping
if skipped_repoids:
pkgs = set()
for repo in skipped_repoids:
diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
index 7fd626c7..ac7f49ec 100644
--- a/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
+++ b/repos/system_upgrade/common/actors/setuptargetrepos/tests/test_setuptargetrepos.py
@@ -6,6 +6,7 @@ from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
from leapp.libraries.stdlib import api
from leapp.models import (
CustomTargetRepository,
+ InstalledRPM,
PESIDRepositoryEntry,
RepoMapEntry,
RepositoriesBlacklisted,
@@ -14,9 +15,17 @@ from leapp.models import (
RepositoriesSetupTasks,
RepositoryData,
RepositoryFile,
+ RPM,
TargetRepositories
)
+RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
+
+
+def mock_package(pkg_name, repository=None):
+ return RPM(name=pkg_name, version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch',
+ pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51', repository=repository)
+
def test_minimal_execution(monkeypatch):
"""
@@ -103,9 +112,13 @@ def test_repos_mapping(monkeypatch):
repos_files = [RepositoryFile(file='/etc/yum.repos.d/redhat.repo', data=repos_data)]
facts = RepositoriesFacts(repositories=repos_files)
+ installed_rpms = InstalledRPM(
+ items=[mock_package('foreman', 'rhel-7-for-x86_64-satellite-extras-rpms'),
+ mock_package('foreman-proxy', 'nosuch-rhel-7-for-x86_64-satellite-extras-rpms')])
repomap = RepositoriesMapping(
- mapping=[RepoMapEntry(source='rhel7-base', target=['rhel8-baseos', 'rhel8-appstream', 'rhel8-blacklist'])],
+ mapping=[RepoMapEntry(source='rhel7-base', target=['rhel8-baseos', 'rhel8-appstream', 'rhel8-blacklist']),
+ RepoMapEntry(source='rhel7-satellite-extras', target=['rhel8-satellite-extras'])],
repositories=[
PESIDRepositoryEntry(
pesid='rhel7-base',
@@ -143,12 +156,30 @@ def test_repos_mapping(monkeypatch):
channel='ga',
rhui=''
),
+ PESIDRepositoryEntry(
+ pesid='rhel7-satellite-extras',
+ repoid='rhel-7-for-x86_64-satellite-extras-rpms',
+ major_version='7',
+ arch='x86_64',
+ repo_type='rpm',
+ channel='ga',
+ rhui=''
+ ),
+ PESIDRepositoryEntry(
+ pesid='rhel8-satellite-extras',
+ repoid='rhel-8-for-x86_64-satellite-extras-rpms',
+ major_version='8',
+ arch='x86_64',
+ repo_type='rpm',
+ channel='ga',
+ rhui=''
+ ),
]
)
repos_blacklisted = RepositoriesBlacklisted(repoids=['rhel-8-blacklisted-rpms'])
- msgs = [facts, repomap, repos_blacklisted]
+ msgs = [facts, repomap, repos_blacklisted, installed_rpms]
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
monkeypatch.setattr(api, 'produce', produce_mocked())
@@ -157,8 +188,9 @@ def test_repos_mapping(monkeypatch):
assert api.produce.called
rhel_repos = api.produce.model_instances[0].rhel_repos
- assert len(rhel_repos) == 2
+ assert len(rhel_repos) == 3
produced_rhel_repoids = {repo.repoid for repo in rhel_repos}
- expected_rhel_repoids = {'rhel-8-for-x86_64-baseos-htb-rpms', 'rhel-8-for-x86_64-appstream-htb-rpms'}
+ expected_rhel_repoids = {'rhel-8-for-x86_64-baseos-htb-rpms', 'rhel-8-for-x86_64-appstream-htb-rpms',
+ 'rhel-8-for-x86_64-satellite-extras-rpms'}
assert produced_rhel_repoids == expected_rhel_repoids
--
2.39.0

View File

@ -2,7 +2,7 @@
%global repositorydir %{leapp_datadir}/repositories
%global custom_repositorydir %{leapp_datadir}/custom-repositories
%define leapp_repo_deps 8
%define leapp_repo_deps 9
%if 0%{?rhel} == 7
%define leapp_python_sitelib %{python2_sitelib}
@ -42,13 +42,13 @@ py2_byte_compile "%1" "%2"}
Name: leapp-repository
Version: 0.17.0
Release: 5%{?dist}
Release: 8%{?dist}
Summary: Repositories for leapp
License: ASL 2.0
URL: https://oamg.github.io/leapp/
Source0: https://github.com/oamg/%{name}/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz
Source1: deps-pkgs-8.tar.gz
Source1: deps-pkgs-9.tar.gz
# NOTE: Our packages must be noarch. Do no drop this in any way.
BuildArch: noarch
@ -95,6 +95,43 @@ Patch0035: 0035-DNFWorkaround-extend-the-model-by-script_args.patch
Patch0036: 0036-Introduce-theimportrpmgpgkeys-tool-script.patch
Patch0037: 0037-Enable-gpgcheck-during-IPU-add-nogpgcheck-CLI-option.patch
Patch0038: 0038-missinggpgkey-polish-the-report-msg.patch
Patch0039: 0039-Fix-cephvolume-actor.patch
Patch0040: 0040-Include-also-Leapp-RHUI-special-rpms-in-the-whitelis.patch
Patch0041: 0041-POC-initram-networking.patch
Patch0042: 0042-Skip-check-nfs-actor-if-env-var-is-set.patch
Patch0043: 0043-Apply-changes-after-rebase-and-do-refactor.patch
Patch0044: 0044-Tune-tmt-tests-regexes-to-align-with-QE-automation.patch
Patch0045: 0045-Change-rerun-all-to-rerun-sst.patch
Patch0046: 0046-Do-not-run-rhsm-tests-in-upstream.patch
Patch0047: 0047-Set-SOURCE_RELEASE-env-var.patch
Patch0048: 0048-Packit-build-SRPM-in-Copr.patch
Patch0049: 0049-ensure-Satellite-metapackages-are-installed-after-up.patch
Patch0050: 0050-Makefile-filter-out-removed-files-for-linting.patch
Patch0051: 0051-Enable-upgrades-on-s390x-when-boot-is-part-of-rootfs.patch
Patch0052: 0052-Add-leapp-debug-tools-to-initramfs.patch
Patch0053: 0053-Add-autosourcing.patch
Patch0054: 0054-Replace-tabs-with-spaces-in-the-dracut-module.patch
Patch0055: 0055-ci-lint-Add-differential-shellcheck-GitHub-action.patch
Patch0056: 0056-Propagate-TEST_PATHS-to-test_container-targets.patch
Patch0057: 0057-Ignore-external-accounts-in-etc-passwd.patch
Patch0058: 0058-pes_events_scanner-prefilter-problematic-events-and-.patch
Patch0059: 0059-Enable-disabling-dnf-plugins-in-the-dnfcnfig-library.patch
Patch0060: 0060-Prevent-failed-upgrade-from-restarting-in-initramfs-.patch
Patch0061: 0061-BZ-2142270-run-reindexdb-to-fix-issues-due-to-new-lo.patch
Patch0062: 0062-Improve-the-hint-in-peseventsscanner-for-unknown-rep.patch
Patch0063: 0063-Ensure-a-baseos-and-appstream-repos-are-available-wh.patch
Patch0064: 0064-Fix-the-check-of-memory-RAM-limits.patch
Patch0065: 0065-Add-IfCfg-model.patch
Patch0066: 0066-Add-IfCfgScanner-actor.patch
Patch0067: 0067-Add-NetworkManagerConnection-model.patch
Patch0068: 0068-Add-NetworkManagerConnectionScanner-actor.patch
Patch0069: 0069-Install-python3-gobject-base-and-NetworkManager-libn.patch
Patch0070: 0070-Make-CheckNetworkDeprecations-consume-IfCfg-and-Netw.patch
Patch0071: 0071-Make-CheckIfCfg-consume-IfCfg.patch
Patch0072: 0072-Make-IfCfgScanner-accept-simple-quoted-values.patch
Patch0073: 0073-Improve-error-message-when-more-space-is-needed-for-.patch
Patch0074: 0074-Do-not-create-python3-.pyc-files.patch
Patch0075: 0075-Add-mapping-based-on-the-installed-content-967.patch
%description
@ -216,6 +253,11 @@ Requires: kmod
# and missing dracut could be killing situation for us :)
Requires: dracut
# Required to scan NetworkManagerConnection (e.g. to recognize secrets)
# NM is requested to be used on RHEL 8+ systems
Requires: NetworkManager-libnm
Requires: python3-gobject-base
%endif
##################################################
# end requirement
@ -268,13 +310,51 @@ Requires: dracut
%patch0036 -p1
%patch0037 -p1
%patch0038 -p1
%patch0039 -p1
%patch0040 -p1
%patch0041 -p1
%patch0042 -p1
%patch0043 -p1
%patch0044 -p1
%patch0045 -p1
%patch0046 -p1
%patch0047 -p1
%patch0048 -p1
%patch0049 -p1
%patch0050 -p1
%patch0051 -p1
%patch0052 -p1
%patch0053 -p1
%patch0054 -p1
%patch0055 -p1
%patch0056 -p1
%patch0057 -p1
%patch0058 -p1
%patch0059 -p1
%patch0060 -p1
%patch0061 -p1
%patch0062 -p1
%patch0063 -p1
%patch0064 -p1
%patch0065 -p1
%patch0066 -p1
%patch0067 -p1
%patch0068 -p1
%patch0069 -p1
%patch0070 -p1
%patch0071 -p1
%patch0072 -p1
%patch0073 -p1
%patch0074 -p1
%patch0075 -p1
%build
%if 0%{?rhel} == 7
cp -a leapp*deps-el8*rpm repos/system_upgrade/el7toel8/files/bundled-rpms/
cp -a leapp*deps*el8.noarch.rpm repos/system_upgrade/el7toel8/files/bundled-rpms/
%else
cp -a leapp*deps-el9*rpm repos/system_upgrade/el8toel9/files/bundled-rpms/
cp -a leapp*deps*el9.noarch.rpm repos/system_upgrade/el8toel9/files/bundled-rpms/
%endif
@ -343,6 +423,30 @@ done;
# no files here
%changelog
* Fri Jan 27 2023 Petr Stodulka <pstodulk@redhat.com> - 0.17.0-8
- Do not create new *pyc files when running leapp after the DNF upgrade transaction
- Fix scan of ceph volumes on systems without ceph-osd
- Fix the check of memory (RAM) limits and use human readable values in the report
- Improve the error message to guide users when discovered more space is needed
- Map the target repositories also based on the installed content
- Rework the network configuration handling and parse the configuration data properly
- Resolves: rhbz#2139907, rhbz#2111691, rhbz#2127920
* Mon Jan 23 2023 Petr Stodulka <pstodulk@redhat.com> - 0.17.0-7
- Add leapp RHUI packages to an allowlist to drop confusing reports
- Disable the amazon-id DNF plugin on AWS during the upgrade stage to omit
confusing error messages
- Enable upgrades on s390x when /boot is part of rootfs
- Filter out PES events unrelated for the used upgrade path and handle overlapping event
(fixes upgrades with quagga installed)
- Fix scan of ceph volumes when ceph-osd container is not found
- Ignore external accounts in /etc/passwd
- Prevent leapp failures caused by re-run of leapp in the upgrade initramfs
after previous failure
- Prevent the upgrade with RHSM when a baseos and an appstream target
repositories are not discovered
- Resolves: rhbz#2143372, rhbz#2141393, rhbz#2139907, rhbz#2129716
* Wed Nov 30 2022 Petr Stodulka <pstodulk@redhat.com> - 0.17.0-5
- Check RPM signatures during the upgrade (first part)
- introduced the --nogpgcheck option to do the upgrade in the original way