From 43e51ec564499bd79285d5b84b824deeda6acbe3 Mon Sep 17 00:00:00 2001 From: Yuriy Kohut Date: Fri, 21 Jun 2024 19:17:31 +0300 Subject: [PATCH] Implement Vendors support Bump the package release --- SOURCES/leapp-repository-0.19.0-elevate.patch | 6591 ++++++++++++++++- SPECS/leapp-repository.spec | 5 +- 2 files changed, 6471 insertions(+), 125 deletions(-) diff --git a/SOURCES/leapp-repository-0.19.0-elevate.patch b/SOURCES/leapp-repository-0.19.0-elevate.patch index f0dda33..a58d683 100644 --- a/SOURCES/leapp-repository-0.19.0-elevate.patch +++ b/SOURCES/leapp-repository-0.19.0-elevate.patch @@ -1,31 +1,54 @@ -diff --git a/commands/command_utils.py b/commands/command_utils.py -index 338978dd..af78efdf 100644 ---- a/commands/command_utils.py -+++ b/commands/command_utils.py -@@ -13,7 +13,7 @@ LEAPP_UPGRADE_FLAVOUR_DEFAULT = 'default' - LEAPP_UPGRADE_FLAVOUR_SAP_HANA = 'saphana' - LEAPP_UPGRADE_PATHS = 'upgrade_paths.json' +From b420c259c2362e06115afbd1d7ec5e85994b65e5 Mon Sep 17 00:00:00 2001 +From: Andrew Lukoshko +Date: Mon, 26 Jul 2021 14:33:12 +0300 +Subject: [PATCH 01/36] Add AlmaLinux and CentOS keys + +(cherry picked from commit 0f31a666e94e22c51e6a949dd7450bff92cdc9ea) +--- + .../common/actors/redhatsignedrpmscanner/actor.py | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +index 1085beee..1c1f39da 100644 +--- a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py ++++ b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +@@ -21,7 +21,9 @@ class RedHatSignedRpmScanner(Actor): + '5326810137017186', + '938a80caf21541eb', + 'fd372689897da07a', +- '45689c882fa658e0'] ++ '45689c882fa658e0', ++ '24c6a8a7f4a80eb5', ++ '51d6647ec21ad6ea'] --VERSION_REGEX = re.compile(r"^([1-9]\d*)\.(\d+)$") -+VERSION_REGEX = re.compile(r"^([1-9]\d*)(\.(\d+))?$") + signed_pkgs = InstalledRedHatSignedRPM() + unsigned_pkgs = InstalledUnsignedRPM() +@@ -46,7 +48,7 @@ class RedHatSignedRpmScanner(Actor): + """ + return ( # pylint: disable-msg=consider-using-ternary + pkg.name == 'gpg-pubkey' +- and pkg.packager.startswith('Red Hat, Inc.') ++ and (pkg.packager.startswith('Red Hat, Inc.') or pkg.packager.startswith('CentOS') or pkg.packager.startswith('AlmaLinux')) + or all_signed + ) - - def check_version(version): -diff --git a/commands/upgrade/breadcrumbs.py b/commands/upgrade/breadcrumbs.py -index 16903ee0..46f116fb 100644 ---- a/commands/upgrade/breadcrumbs.py -+++ b/commands/upgrade/breadcrumbs.py -@@ -61,7 +61,7 @@ class _BreadCrumbs(object): - if not os.path.exists('/etc/rhsm'): - # If there's no /etc/rhsm folder just skip it - return -- os.path.mkdir('/etc/rhsm/facts') -+ os.mkdir('/etc/rhsm/facts') - try: - with open('/etc/rhsm/facts/leapp.facts', 'w') as f: - json.dump(_flattened({ +-- +2.43.0 + + +From 656765244ba73eb706ba6d1d2fe29dad5c07b994 Mon Sep 17 00:00:00 2001 +From: Andrew Lukoshko +Date: Mon, 26 Jul 2021 17:30:58 +0300 +Subject: [PATCH 02/36] Change GRUB entry name to AlmaLinux-Upgrade-Initramfs + +(cherry picked from commit 3b0e3da0ba4f9ab77c6992ab0ae80e49093d593e) +--- + .../actors/addupgradebootentry/libraries/addupgradebootentry.py | 2 +- + .../addupgradebootentry/tests/unit_test_addupgradebootentry.py | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py -index 4e1c4204..3474867a 100644 +index 4e1c4204..9b6fb58d 100644 --- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py +++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py @@ -18,7 +18,7 @@ def add_boot_entry(configs=None): @@ -33,12 +56,12 @@ index 4e1c4204..3474867a 100644 '--add-kernel', '{0}'.format(kernel_dst_path), '--initrd', '{0}'.format(initram_dst_path), - '--title', 'RHEL-Upgrade-Initramfs', -+ '--title', 'ELevate-Upgrade-Initramfs', ++ '--title', 'AlmaLinux-Upgrade-Initramfs', '--copy-default', '--make-default', '--args', '{DEBUG}{NET} enforcing=0 rd.plymouth=0 plymouth.enable=0'.format(DEBUG=debug, NET=ip_arg) diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py -index ddc37e52..a9c40691 100644 +index ddc37e52..84c3ae18 100644 --- a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py +++ b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py @@ -42,7 +42,7 @@ run_args_add = [ @@ -46,10 +69,89 @@ index ddc37e52..a9c40691 100644 '--add-kernel', '/abc', '--initrd', '/def', - '--title', 'RHEL-Upgrade-Initramfs', -+ '--title', 'ELevate-Upgrade-Initramfs', ++ '--title', 'AlmaLinux-Upgrade-Initramfs', '--copy-default', '--make-default', '--args', +-- +2.43.0 + + +From 5958b310e3a97ce8e1b2ec3b398c5ad54c378b41 Mon Sep 17 00:00:00 2001 +From: Andrew Lukoshko +Date: Mon, 26 Jul 2021 17:34:55 +0300 +Subject: [PATCH 03/36] Add CentOS 7.9 as supported OS + +(cherry picked from commit 6c7c4b9d55a51e4fad208c883c512ac87eeb288b) +--- + repos/system_upgrade/common/libraries/config/version.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py +index 0f1e5874..b4707b33 100644 +--- a/repos/system_upgrade/common/libraries/config/version.py ++++ b/repos/system_upgrade/common/libraries/config/version.py +@@ -15,7 +15,7 @@ OP_MAP = { + + _SUPPORTED_VERSIONS = { + # Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x +- '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9']}, ++ '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9']}, + '8': {'rhel': ['8.6', '8.8', '8.9'], 'rhel-saphana': ['8.6', '8.8']}, + } + +-- +2.43.0 + + +From f95a819d6b3a773eea627f01693ee77fb091be17 Mon Sep 17 00:00:00 2001 +From: Aleksandr Kravchenko +Date: Mon, 26 Jul 2021 17:13:30 +0300 +Subject: [PATCH 04/36] fixed version_id + +(cherry picked from commit 041e84db618561218eee09f0101fa935d3da6321) +--- + .../actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py b/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py +index 9e213f64..73197e0b 100644 +--- a/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py ++++ b/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py +@@ -47,6 +47,7 @@ def get_os_release(path): + :return: `OSRelease` model if the file can be parsed + :raises: `IOError` + """ ++ os_version = '.'.join(platform.dist()[1].split('.')[:2]) + try: + with open(path) as f: + data = dict(l.strip().split('=', 1) for l in f.readlines() if '=' in l) +@@ -55,7 +56,7 @@ def get_os_release(path): + name=data.get('NAME', '').strip('"'), + pretty_name=data.get('PRETTY_NAME', '').strip('"'), + version=data.get('VERSION', '').strip('"'), +- version_id=data.get('VERSION_ID', '').strip('"'), ++ version_id=os_version, + variant=data.get('VARIANT', '').strip('"') or None, + variant_id=data.get('VARIANT_ID', '').strip('"') or None + ) +-- +2.43.0 + + +From 8ea0f2b0d7beb39f50cbcd94c4b186a0700bcc20 Mon Sep 17 00:00:00 2001 +From: Andrew Lukoshko +Date: Fri, 24 Sep 2021 18:13:23 +0300 +Subject: [PATCH 05/36] Fix CentOS major version detection Leapp expects + distribution version to be X.Y but CentOS doesn't have minor version in + /etc/os-release. This commit fixes major version detection for CentOS + +(cherry picked from commit 04c78a211f5fc2a86dd4aaa627d4a442b9a83485) +--- + .../files/dracut/85sys-upgrade-redhat/do-upgrade.sh | 3 +-- + .../files/dracut/90sys-upgrade/initrd-system-upgrade-generator | 2 +- + 2 files changed, 2 insertions(+), 3 deletions(-) + diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh index 491b85ec..059cf506 100755 --- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh @@ -81,13 +183,171 @@ index 5cc6fd92..fe81626f 100755 [ -z "$_os_version" ] && { # This should not happen as /etc/initrd-release is supposed to have API # stability, but check is better than broken system. +-- +2.43.0 + + +From 9163f4db4ff424e0cf7dbcffdede6f0f3f3168ad Mon Sep 17 00:00:00 2001 +From: Andrew Lukoshko +Date: Mon, 27 Sep 2021 20:20:33 +0300 +Subject: [PATCH 06/36] Use ELevate name in GRUB entries + +(cherry picked from commit 66fe44ed2c92ee3a0ae0cabe50df9fb5bdbb8d1f) +--- + .../actors/addupgradebootentry/libraries/addupgradebootentry.py | 2 +- + .../addupgradebootentry/tests/unit_test_addupgradebootentry.py | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py +index 9b6fb58d..3474867a 100644 +--- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py ++++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py +@@ -18,7 +18,7 @@ def add_boot_entry(configs=None): + '/usr/sbin/grubby', + '--add-kernel', '{0}'.format(kernel_dst_path), + '--initrd', '{0}'.format(initram_dst_path), +- '--title', 'AlmaLinux-Upgrade-Initramfs', ++ '--title', 'ELevate-Upgrade-Initramfs', + '--copy-default', + '--make-default', + '--args', '{DEBUG}{NET} enforcing=0 rd.plymouth=0 plymouth.enable=0'.format(DEBUG=debug, NET=ip_arg) +diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py +index 84c3ae18..a9c40691 100644 +--- a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py ++++ b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py +@@ -42,7 +42,7 @@ run_args_add = [ + '/usr/sbin/grubby', + '--add-kernel', '/abc', + '--initrd', '/def', +- '--title', 'AlmaLinux-Upgrade-Initramfs', ++ '--title', 'ELevate-Upgrade-Initramfs', + '--copy-default', + '--make-default', + '--args', +-- +2.43.0 + + +From bc407bb2fd2159ee7c12f13a3e7502ac0e528518 Mon Sep 17 00:00:00 2001 +From: Andrew Lukoshko +Date: Mon, 27 Sep 2021 20:28:07 +0300 +Subject: [PATCH 07/36] Disable RHSM + +(cherry picked from commit 3ac3cb65c2668990971cbf528c0d6b6a77e5e1cb) +--- + repos/system_upgrade/common/libraries/rhsm.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/libraries/rhsm.py b/repos/system_upgrade/common/libraries/rhsm.py +index 4a5b0eb0..9fdec233 100644 +--- a/repos/system_upgrade/common/libraries/rhsm.py ++++ b/repos/system_upgrade/common/libraries/rhsm.py +@@ -92,7 +92,7 @@ def _handle_rhsm_exceptions(hint=None): + + def skip_rhsm(): + """Check whether we should skip RHSM related code.""" +- return get_env('LEAPP_NO_RHSM', '0') == '1' ++ return True + + + def with_rhsm(f): +-- +2.43.0 + + +From 96dbd35128163c738e819e632c359c85b907358f Mon Sep 17 00:00:00 2001 +From: Evgeni Golov +Date: Fri, 1 Apr 2022 17:43:27 +0200 +Subject: [PATCH 08/36] accept major-only versions like CentOS has them + +(cherry picked from commit b92b7167971c4702543314ba748da079c51bb78e) +--- + commands/command_utils.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/commands/command_utils.py b/commands/command_utils.py +index 338978dd..af78efdf 100644 +--- a/commands/command_utils.py ++++ b/commands/command_utils.py +@@ -13,7 +13,7 @@ LEAPP_UPGRADE_FLAVOUR_DEFAULT = 'default' + LEAPP_UPGRADE_FLAVOUR_SAP_HANA = 'saphana' + LEAPP_UPGRADE_PATHS = 'upgrade_paths.json' + +-VERSION_REGEX = re.compile(r"^([1-9]\d*)\.(\d+)$") ++VERSION_REGEX = re.compile(r"^([1-9]\d*)(\.(\d+))?$") + + + def check_version(version): +-- +2.43.0 + + +From 2316fc1e4d93cc772fa33a06e315fb00fad984d1 Mon Sep 17 00:00:00 2001 +From: Evgeni Golov +Date: Mon, 4 Apr 2022 11:43:29 +0200 +Subject: [PATCH 09/36] accept CentOS SCL gpg key as "RH signed" + +Otherwise packages from SCLs aren't recognized and ignored in certain +actions (like "to_remove" filtering in FilterRpmTransactionTasks). + +(cherry picked from commit 449b5f12d9681a42f4a4178e7e418f7709a19c97) +--- + .../common/actors/redhatsignedrpmscanner/actor.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +index 1c1f39da..45a4ef60 100644 +--- a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py ++++ b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +@@ -23,7 +23,8 @@ class RedHatSignedRpmScanner(Actor): + 'fd372689897da07a', + '45689c882fa658e0', + '24c6a8a7f4a80eb5', +- '51d6647ec21ad6ea'] ++ '51d6647ec21ad6ea', ++ '4eb84e71f2ee9d55'] + + signed_pkgs = InstalledRedHatSignedRPM() + unsigned_pkgs = InstalledUnsignedRPM() +-- +2.43.0 + + +From 77228cd1fb3538fce931db1c23c922615fef227a Mon Sep 17 00:00:00 2001 +From: Jason Tucker +Date: Mon, 3 Oct 2022 18:20:40 +0000 +Subject: [PATCH 10/36] Modify efibootorderfix to support distro changes (#3) + +* Modify efibootorderfix to support distro changes + +almalinux/leapp-repository supports upgrades to different OS +distributions (e.g. CentOS 7 -> Rocky 8); however the +efibootorderfix (finalization) actor only assumes you are +upgrading to the same distro (e.g. Redhat 7 -> Redhat 8). This +causes first boot to fail on UEFI-enabled hardware post-upgrade. +This patch makes the EFI boot entry configuration more flexible +to support distro changes. + +* Add arch detection and get release from system-release + +* Remove release version from the boot entry label + +As per request from @andrewlukoshko. + +* EfiFinalizationFix noop if not has_efibootmgr + +Co-authored-by: Roman Prilipskii +(cherry picked from commit e4ce70da849f09c811ba59bcf857614200d4eaa7) +--- + .../efibootorderfix/finalization/actor.py | 69 +++++++++++++++++-- + 1 file changed, 65 insertions(+), 4 deletions(-) + diff --git a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py -index f42909f0..4a2bc8ad 100644 +index f42909f0..caa94e09 100644 --- a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +++ b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py -@@ -1,17 +1,118 @@ +@@ -1,17 +1,78 @@ +import os -+import re + +from leapp.libraries.stdlib import run, api from leapp.actors import Actor @@ -122,9 +382,7 @@ index f42909f0..4a2bc8ad 100644 + 'CentOS Stream': 'centos', + 'Oracle Linux Server': 'redhat', + 'Red Hat Enterprise Linux': 'redhat', -+ 'Rocky Linux': 'rocky', -+ 'Scientific Linux': 'redhat', -+ 'EuroLinux': 'eurolinux', ++ 'Rocky Linux': 'rocky' + } + + efi_shimname_dict = { @@ -132,27 +390,6 @@ index f42909f0..4a2bc8ad 100644 + 'aarch64': 'shimaa64.efi' + } + -+ def devparts(dev): -+ """ -+ NVMe block devices aren't named like SCSI/ATA/etc block devices and must be parsed differently. -+ SCSI/ATA/etc devices have a syntax resembling /dev/sdb4 for the 4th partition on the 2nd disk. -+ NVMe devices have a syntax resembling /dev/nvme0n2p4 for the 4th partition on the 2nd disk. -+ """ -+ if '/dev/nvme' in dev: -+ """ -+ NVMe -+ """ -+ part = next(re.finditer(r'p\d+$', dev)).group(0) -+ dev = dev[:-len(part)] -+ part = part[1:] -+ else: -+ """ -+ Non-NVMe (SCSI, ATA, etc) -+ """ -+ part = next(re.finditer(r'\d+$', dev)).group(0) -+ dev = dev[:-len(part)] -+ return [dev, part]; -+ + with open('/etc/system-release', 'r') as sr: + release_line = next(line for line in sr if 'release' in line) + distro = release_line.split(' release ', 1)[0] @@ -184,15 +421,129 @@ index f42909f0..4a2bc8ad 100644 + break + + if is_system_efi and has_shim: -+ efidevlist = [] + with open('/proc/mounts', 'r') as fp: + for line in fp: + if '/boot/efi' in line: ++ efidev = line.split(' ', 1)[0] ++ run(['/sbin/efibootmgr', '-c', '-d', efidev, '-p 1', '-l', bootmgr_path, '-L', efi_bootentry_label]) ++ ++ if not has_grub_cfg: ++ run(['/sbin/grub2-mkconfig', '-o', grub_cfg_path]) +-- +2.43.0 + + +From 146eaf5c2218fc8f942c38bf598d4edb728dca82 Mon Sep 17 00:00:00 2001 +From: Jason Tucker +Date: Thu, 13 Oct 2022 11:58:29 +0000 +Subject: [PATCH 11/36] [efibootorderfix] fix device path for md devices (#13) + +(cherry picked from commit aebbf63e816f05f42c62a2fcc2606b37130016c6) +--- + .../actors/efibootorderfix/finalization/actor.py | 12 ++++++++++-- + 1 file changed, 10 insertions(+), 2 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +index caa94e09..9f532eb6 100644 +--- a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py ++++ b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +@@ -71,8 +71,16 @@ class EfiFinalizationFix(Actor): + with open('/proc/mounts', 'r') as fp: + for line in fp: + if '/boot/efi' in line: +- efidev = line.split(' ', 1)[0] +- run(['/sbin/efibootmgr', '-c', '-d', efidev, '-p 1', '-l', bootmgr_path, '-L', efi_bootentry_label]) + efidevpath = line.split(' ', 1)[0] -+ efidevpart = efidevpath.split('/')[-1] ++ efidev = efidevpath.split('/')[-1] + if os.path.exists('/proc/mdstat'): + with open('/proc/mdstat', 'r') as mds: + for line in mds: ++ if line.startswith(efidev): ++ mddev = line.split(' ')[-1] ++ newefidev = mddev.split('[', 1)[0] ++ efidevpath = efidevpath.replace(efidev, newefidev) ++ run(['/sbin/efibootmgr', '-c', '-d', efidevpath, '-p 1', '-l', bootmgr_path, '-L', efi_bootentry_label]) + + if not has_grub_cfg: + run(['/sbin/grub2-mkconfig', '-o', grub_cfg_path]) +-- +2.43.0 + + +From 3c66a9166b35002fcd9456f0a80a25cfd3b132ef Mon Sep 17 00:00:00 2001 +From: Jason Tucker +Date: Mon, 17 Oct 2022 12:06:20 +0000 +Subject: [PATCH 12/36] [fix] efibootorderfix (#16) + +Previous efibootorderfix versions were incorrectly specifying the +partition number being passed to `efibootmgr`. This change makes that +more dynamic. + +Also, in cases where `/boot/efi` is an actual md mirror, this PR adds +the logic to add EFI boot targets for each of the mirror plexes, instead +of just a single plex of the mirror, so better repliates EFI boot +targets as set up by OS distro installs which automatically make use +of multiple plexes. + +Example output after a successful upgrade - note the duplicate +(mirrored) boot entries for "Rocky Linux": + +``` +BootCurrent: 0005 +Timeout: 3 seconds +BootOrder: 0005,0004,0002,0003 +Boot0002* UEFI: Built-in EFI Shell VenMedia(5023b95c-db26-429b-a648-bd47664c8012)..BO +Boot0003* UEFI: PXE IP4 Cisco 1GigE I350 LOM PciRoot(0x0)/Pci(0x1c,0x0)/Pci(0x0,0x0)/MAC(d4789b4a161c,1)/IPv4(0.0.0.00.0.0.0,0,0)..BO +Boot0004* Rocky Linux HD(3,GPT,3566f194-fa71-4539-9687-2b20f67841ed,0xa00800,0x80000)/File(\EFI\rocky\shimx64.efi) +Boot0005* Rocky Linux HD(3,GPT,ad6e8abc-5728-4435-9b84-1c28cb5a18cf,0xa00800,0x80000)/File(\EFI\rocky\shimx64.efi) +``` + +(cherry picked from commit d7e5f72908432a5c073b79252b900cebc307c313) +--- + .../efibootorderfix/finalization/actor.py | 26 ++++++++++++++----- + 1 file changed, 20 insertions(+), 6 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +index 9f532eb6..20b21313 100644 +--- a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py ++++ b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +@@ -1,4 +1,5 @@ + import os ++import re + + from leapp.libraries.stdlib import run, api + from leapp.actors import Actor +@@ -37,6 +38,11 @@ class EfiFinalizationFix(Actor): + 'aarch64': 'shimaa64.efi' + } + ++ def devparts(dev): ++ part = next(re.finditer(r'\d+$', dev)).group(0) ++ dev = dev[:-len(part)] ++ return [dev, part]; ++ + with open('/etc/system-release', 'r') as sr: + release_line = next(line for line in sr if 'release' in line) + distro = release_line.split(' release ', 1)[0] +@@ -68,19 +74,27 @@ class EfiFinalizationFix(Actor): + break + + if is_system_efi and has_shim: ++ efidevlist = [] + with open('/proc/mounts', 'r') as fp: + for line in fp: + if '/boot/efi' in line: + efidevpath = line.split(' ', 1)[0] +- efidev = efidevpath.split('/')[-1] ++ efidevpart = efidevpath.split('/')[-1] + if os.path.exists('/proc/mdstat'): + with open('/proc/mdstat', 'r') as mds: + for line in mds: +- if line.startswith(efidev): +- mddev = line.split(' ')[-1] +- newefidev = mddev.split('[', 1)[0] +- efidevpath = efidevpath.replace(efidev, newefidev) +- run(['/sbin/efibootmgr', '-c', '-d', efidevpath, '-p 1', '-l', bootmgr_path, '-L', efi_bootentry_label]) + if line.startswith(efidevpart): + mddev = line.split(' ') + for md in mddev: @@ -205,41 +556,121 @@ index f42909f0..4a2bc8ad 100644 + for devpath in efidevlist: + efidev, efipart = devparts(devpath) + run(['/sbin/efibootmgr', '-c', '-d', efidev, '-p', efipart, '-l', bootmgr_path, '-L', efi_bootentry_label]) -+ -+ if not has_grub_cfg: -+ run(['/sbin/grub2-mkconfig', '-o', grub_cfg_path]) -diff --git a/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py b/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py -index 9e213f64..52cfe14f 100644 ---- a/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py -+++ b/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py -@@ -47,15 +47,20 @@ def get_os_release(path): - :return: `OSRelease` model if the file can be parsed - :raises: `IOError` - """ -+ os_version = '.'.join(platform.dist()[1].split('.')[:2]) - try: - with open(path) as f: - data = dict(l.strip().split('=', 1) for l in f.readlines() if '=' in l) -+ release_id = data.get('ID', '').strip('"') -+ version_id = data.get('VERSION_ID', '').strip('"') -+ if release_id == 'centos' and '.' not in os_version: -+ os_version = "{}.999".format(version_id) - return OSRelease( -- release_id=data.get('ID', '').strip('"'), -+ release_id=release_id, - name=data.get('NAME', '').strip('"'), - pretty_name=data.get('PRETTY_NAME', '').strip('"'), - version=data.get('VERSION', '').strip('"'), -- version_id=data.get('VERSION_ID', '').strip('"'), -+ version_id=os_version, - variant=data.get('VARIANT', '').strip('"') or None, - variant_id=data.get('VARIANT_ID', '').strip('"') or None - ) + + if not has_grub_cfg: + run(['/sbin/grub2-mkconfig', '-o', grub_cfg_path]) +-- +2.43.0 + + +From 9077960c41a84ac320be2b7cdf461d184e93a0b1 Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Mon, 14 Nov 2022 16:00:47 +0000 +Subject: [PATCH 13/36] Add support of: eurolinux 7.9, ol 7.9, cloudlinux 7.9; + centos 8.5, almalinux 8.6 and 8.7, eurolinux 8.6 and 8.7, ol 8.6 and 8.7, + rocky 8.6 and 8.7. + +(cherry picked from commit 16a1f5e89d43fb01c9cd5f610ad5121d0472fb48) +--- + repos/system_upgrade/common/libraries/config/version.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py +index b4707b33..a0ef8a81 100644 +--- a/repos/system_upgrade/common/libraries/config/version.py ++++ b/repos/system_upgrade/common/libraries/config/version.py +@@ -15,8 +15,8 @@ OP_MAP = { + + _SUPPORTED_VERSIONS = { + # Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x +- '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9']}, +- '8': {'rhel': ['8.6', '8.8', '8.9'], 'rhel-saphana': ['8.6', '8.8']}, ++ '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9'], 'eurolinux': ['7.9'], 'ol': ['7.9'], 'cloudlinux': ['7.9']}, ++ '8': {'rhel': ['8.6', '8.8', '8.9'], 'rhel-saphana': ['8.6', '8.8'], 'centos': ['8.5'], 'almalinux': ['8.6', '8.7'], 'eurolinux': ['8.6', '8.7'], 'ol': ['8.6', '8.7'], 'rocky': ['8.6', '8.7']}, + } + + +-- +2.43.0 + + +From d0920806922356800ac67428e871dce01b2a8c90 Mon Sep 17 00:00:00 2001 +From: Andrew Lukoshko +Date: Thu, 8 Dec 2022 19:29:00 +0100 +Subject: [PATCH 14/36] Add Scientific Linux support (#33) + +(cherry picked from commit c847c4a3fdbae7536481720d19384550e5993961) +--- + repos/system_upgrade/common/libraries/config/version.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py +index a0ef8a81..e46e165e 100644 +--- a/repos/system_upgrade/common/libraries/config/version.py ++++ b/repos/system_upgrade/common/libraries/config/version.py +@@ -15,7 +15,7 @@ OP_MAP = { + + _SUPPORTED_VERSIONS = { + # Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x +- '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9'], 'eurolinux': ['7.9'], 'ol': ['7.9'], 'cloudlinux': ['7.9']}, ++ '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9'], 'eurolinux': ['7.9'], 'ol': ['7.9'], 'cloudlinux': ['7.9'], 'scientific': ['7.9']}, + '8': {'rhel': ['8.6', '8.8', '8.9'], 'rhel-saphana': ['8.6', '8.8'], 'centos': ['8.5'], 'almalinux': ['8.6', '8.7'], 'eurolinux': ['8.6', '8.7'], 'ol': ['8.6', '8.7'], 'rocky': ['8.6', '8.7']}, + } + +-- +2.43.0 + + +From 132f49418fe1c93c4bad997a74551e7ec1cf2035 Mon Sep 17 00:00:00 2001 +From: Orion Poplawski +Date: Fri, 9 Dec 2022 02:58:25 -0700 +Subject: [PATCH 15/36] Scientific Linux 7.9 support (#20) + +* Attempt at supporting Scientific Linux 7.9 + +* Scientific Linux uses redhat efi dir + +* Remove duplicate scientific gpg key + +Co-authored-by: Andrew Lukoshko +(cherry picked from commit 65ca7fa85744cf2d203ba7cfe5a4da8b4659674b) +--- + .../common/actors/efibootorderfix/finalization/actor.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +index 20b21313..832f51ab 100644 +--- a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py ++++ b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +@@ -30,7 +30,8 @@ class EfiFinalizationFix(Actor): + 'CentOS Stream': 'centos', + 'Oracle Linux Server': 'redhat', + 'Red Hat Enterprise Linux': 'redhat', +- 'Rocky Linux': 'rocky' ++ 'Rocky Linux': 'rocky', ++ 'Scientific Linux': 'redhat', + } + + efi_shimname_dict = { +-- +2.43.0 + + +From a87825e07e6add1e6d32369a685dc0aeda42318d Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Thu, 8 Dec 2022 19:29:00 +0100 +Subject: [PATCH 16/36] Add CentOS Linux, CloudLinux, AlmaLinux, RockyLinux, + Oracle Linux, EuroLinux and Scientific Linux sigs. + +--- + .../actors/redhatsignedrpmscanner/actor.py | 30 +++++++++++++++---- + 1 file changed, 25 insertions(+), 5 deletions(-) + diff --git a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py -index 1085beee..8416fd39 100644 +index 45a4ef60..ef8749c5 100644 --- a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +++ b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py -@@ -17,11 +17,28 @@ class RedHatSignedRpmScanner(Actor): +@@ -17,14 +17,28 @@ class RedHatSignedRpmScanner(Actor): tags = (IPUWorkflowTag, FactsPhaseTag) def process(self): @@ -248,16 +679,18 @@ index 1085beee..8416fd39 100644 '5326810137017186', '938a80caf21541eb', 'fd372689897da07a', -- '45689c882fa658e0'] -+ '45689c882fa658e0', + '45689c882fa658e0', +- '24c6a8a7f4a80eb5', +- '51d6647ec21ad6ea', +- '4eb84e71f2ee9d55'] + '24c6a8a7f4a80eb5', # centos + '05b555b38483c65d', + '4eb84e71f2ee9d55', + 'a963bbdbf533f4fa', + '6c7cb6ef305d49d6', ++ '8c55a6628608cb71', # cloudlinux + '51d6647ec21ad6ea', # almalinux + 'd36cb86cb86b3716', -+ '2ae81e8aced7258b', + '15af5dac6d745a60', # rockylinux + '702d426d350d275d', + '72f97b74ec551f03', # ol @@ -270,13 +703,14 @@ index 1085beee..8416fd39 100644 signed_pkgs = InstalledRedHatSignedRPM() unsigned_pkgs = InstalledUnsignedRPM() -@@ -46,7 +63,12 @@ class RedHatSignedRpmScanner(Actor): +@@ -49,7 +63,13 @@ class RedHatSignedRpmScanner(Actor): """ return ( # pylint: disable-msg=consider-using-ternary pkg.name == 'gpg-pubkey' -- and pkg.packager.startswith('Red Hat, Inc.') +- and (pkg.packager.startswith('Red Hat, Inc.') or pkg.packager.startswith('CentOS') or pkg.packager.startswith('AlmaLinux')) + and (pkg.packager.startswith('Red Hat, Inc.') + or pkg.packager.startswith('CentOS') ++ or pkg.packager.startswith('CloudLinux') + or pkg.packager.startswith('AlmaLinux') + or pkg.packager.startswith('infrastructure@rockylinux.org') + or pkg.packager.startswith('EuroLinux') @@ -284,6 +718,4011 @@ index 1085beee..8416fd39 100644 or all_signed ) +-- +2.43.0 + + +From 1fac729266c733af67ee69c2425804865291ea53 Mon Sep 17 00:00:00 2001 +From: Andrew Lukoshko +Date: Thu, 25 May 2023 18:57:10 +0900 +Subject: [PATCH 17/36] Add support for 8.8 (#75) + +(cherry picked from commit 67956b80df2d90a092ccb1d5a9f4a6e949dc93d8) +--- + repos/system_upgrade/common/libraries/config/version.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py +index e46e165e..b435cc99 100644 +--- a/repos/system_upgrade/common/libraries/config/version.py ++++ b/repos/system_upgrade/common/libraries/config/version.py +@@ -16,7 +16,7 @@ OP_MAP = { + _SUPPORTED_VERSIONS = { + # Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x + '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9'], 'eurolinux': ['7.9'], 'ol': ['7.9'], 'cloudlinux': ['7.9'], 'scientific': ['7.9']}, +- '8': {'rhel': ['8.6', '8.8', '8.9'], 'rhel-saphana': ['8.6', '8.8'], 'centos': ['8.5'], 'almalinux': ['8.6', '8.7'], 'eurolinux': ['8.6', '8.7'], 'ol': ['8.6', '8.7'], 'rocky': ['8.6', '8.7']}, ++ '8': {'rhel': ['8.6', '8.8', '8.9'], 'rhel-saphana': ['8.6', '8.8'], 'centos': ['8.5'], 'almalinux': ['8.6', '8.7', '8.8'], 'eurolinux': ['8.6', '8.7', '8.8'], 'ol': ['8.6', '8.7', '8.8'], 'rocky': ['8.6', '8.7', '8.8']}, + } + + +-- +2.43.0 + + +From bdf87f2cbe298cc15c401fdebcc3f6d348442f3c Mon Sep 17 00:00:00 2001 +From: Andrew Lukoshko +Date: Wed, 22 Nov 2023 20:14:44 +0100 +Subject: [PATCH 18/36] Add support for 8.9 and 8.10 Put back support for RHEL + 8.5 + +(cherry picked from commit d1ae79368692366c47d717566695942a95895d01) +--- + repos/system_upgrade/common/libraries/config/version.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py +index b435cc99..b4a5b022 100644 +--- a/repos/system_upgrade/common/libraries/config/version.py ++++ b/repos/system_upgrade/common/libraries/config/version.py +@@ -16,7 +16,7 @@ OP_MAP = { + _SUPPORTED_VERSIONS = { + # Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x + '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9'], 'eurolinux': ['7.9'], 'ol': ['7.9'], 'cloudlinux': ['7.9'], 'scientific': ['7.9']}, +- '8': {'rhel': ['8.6', '8.8', '8.9'], 'rhel-saphana': ['8.6', '8.8'], 'centos': ['8.5'], 'almalinux': ['8.6', '8.7', '8.8'], 'eurolinux': ['8.6', '8.7', '8.8'], 'ol': ['8.6', '8.7', '8.8'], 'rocky': ['8.6', '8.7', '8.8']}, ++ '8': {'rhel': ['8.5', '8.6', '8.8', '8.9', '8.10'], 'rhel-saphana': ['8.6', '8.8', '8.9', '8.10'], 'centos': ['8.5'], 'almalinux': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'eurolinux': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'ol': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'rocky': ['8.6', '8.7', '8.8', '8.9', '8.10']}, + } + + +-- +2.43.0 + + +From 1362cd2678920382e3d54c22dce21aa4740a93e7 Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Fri, 22 Dec 2023 13:05:30 +0200 +Subject: [PATCH 19/36] Drop CloudLinux related code. + +--- + .../common/actors/redhatsignedrpmscanner/actor.py | 2 -- + repos/system_upgrade/common/libraries/config/version.py | 2 +- + 2 files changed, 1 insertion(+), 3 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +index ef8749c5..f4f8d2c5 100644 +--- a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py ++++ b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +@@ -27,7 +27,6 @@ class RedHatSignedRpmScanner(Actor): + '4eb84e71f2ee9d55', + 'a963bbdbf533f4fa', + '6c7cb6ef305d49d6', +- '8c55a6628608cb71', # cloudlinux + '51d6647ec21ad6ea', # almalinux + 'd36cb86cb86b3716', + '15af5dac6d745a60', # rockylinux +@@ -65,7 +64,6 @@ class RedHatSignedRpmScanner(Actor): + pkg.name == 'gpg-pubkey' + and (pkg.packager.startswith('Red Hat, Inc.') + or pkg.packager.startswith('CentOS') +- or pkg.packager.startswith('CloudLinux') + or pkg.packager.startswith('AlmaLinux') + or pkg.packager.startswith('infrastructure@rockylinux.org') + or pkg.packager.startswith('EuroLinux') +diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py +index b4a5b022..95f0d231 100644 +--- a/repos/system_upgrade/common/libraries/config/version.py ++++ b/repos/system_upgrade/common/libraries/config/version.py +@@ -15,7 +15,7 @@ OP_MAP = { + + _SUPPORTED_VERSIONS = { + # Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x +- '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9'], 'eurolinux': ['7.9'], 'ol': ['7.9'], 'cloudlinux': ['7.9'], 'scientific': ['7.9']}, ++ '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9'], 'eurolinux': ['7.9'], 'ol': ['7.9'], 'scientific': ['7.9']}, + '8': {'rhel': ['8.5', '8.6', '8.8', '8.9', '8.10'], 'rhel-saphana': ['8.6', '8.8', '8.9', '8.10'], 'centos': ['8.5'], 'almalinux': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'eurolinux': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'ol': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'rocky': ['8.6', '8.7', '8.8', '8.9', '8.10']}, + } + +-- +2.43.0 + + +From 8d4bc08af97a5b1bbbaee1f1c94abb9a6fb1f20e Mon Sep 17 00:00:00 2001 +From: Elkhan Mammadli +Date: Mon, 2 Oct 2023 16:23:32 +0400 +Subject: [PATCH 20/36] Initial version of automated ELevation testing + +Signed-off-by: Elkhan Mammadli +(cherry picked from commit 7c71c0f0c024badf52adcb269157bb6c4cdd453b) +--- + ci/.gitignore | 1 + + ci/ansible/ansible.cfg | 4 + + ci/ansible/docker-ce.yaml | 6 + + ci/ansible/minimal.yaml | 6 + + ci/ansible/requirements.yaml | 3 + + ci/ansible/roles/docker-ce/README.md | 43 +++ + ci/ansible/roles/docker-ce/defaults/main.yaml | 3 + + ci/ansible/roles/docker-ce/handlers/main.yaml | 2 + + ci/ansible/roles/docker-ce/meta/main.yaml | 25 ++ + .../docker-ce/tasks/install_docker_el7.yaml | 11 + + .../docker-ce/tasks/install_docker_el8.yaml | 11 + + ci/ansible/roles/docker-ce/tasks/main.yaml | 38 +++ + .../tasks/remove_old_docker_el7.yaml | 15 + + .../tasks/remove_old_docker_el8.yaml | 15 + + ci/ansible/roles/docker-ce/tests/inventory | 2 + + ci/ansible/roles/docker-ce/tests/test.yaml | 5 + + ci/ansible/roles/docker-ce/vars/main.yaml | 2 + + ci/ansible/roles/minimal/README.md | 38 +++ + ci/ansible/roles/minimal/defaults/main.yaml | 2 + + ci/ansible/roles/minimal/handlers/main.yaml | 2 + + ci/ansible/roles/minimal/meta/main.yaml | 23 ++ + .../roles/minimal/tasks/cleanup_el7.yaml | 10 + + .../roles/minimal/tasks/cleanup_el8.yaml | 7 + + ci/ansible/roles/minimal/tasks/main.yaml | 21 ++ + .../roles/minimal/tasks/upgrade_el7.yaml | 8 + + .../roles/minimal/tasks/upgrade_el8.yaml | 8 + + ci/ansible/roles/minimal/tests/inventory | 2 + + ci/ansible/roles/minimal/tests/test.yaml | 5 + + ci/ansible/roles/minimal/vars/main.yaml | 2 + + .../ELevate_el7toel8_Development.jenkinsfile | 256 ++++++++++++++++++ + .../ELevate_el7toel8_Stable.jenkinsfile | 237 ++++++++++++++++ + .../ELevate_el7toel8_Testing.jenkinsfile | 237 ++++++++++++++++ + .../ELevate_el8toel9_Development.jenkinsfile | 204 ++++++++++++++ + .../ELevate_el8toel9_Stable.jenkinsfile | 221 +++++++++++++++ + .../ELevate_el8toel9_Testing.jenkinsfile | 191 +++++++++++++ + ci/scripts/install_elevate_dev.sh | 69 +++++ + ci/tests/tests/conftest.py | 52 ++++ + .../tests/distro/test_osinfo_almalinux_8.py | 43 +++ + .../tests/distro/test_osinfo_almalinux_9.py | 52 ++++ + .../distro/test_osinfo_centosstream_8.py | 23 ++ + .../distro/test_osinfo_centosstream_9.py | 23 ++ + .../tests/distro/test_osinfo_eurolinux_8.py | 23 ++ + .../tests/distro/test_osinfo_eurolinux_9.py | 23 ++ + .../tests/distro/test_osinfo_oraclelinux_8.py | 23 ++ + .../tests/distro/test_osinfo_oraclelinux_9.py | 23 ++ + ci/tests/tests/distro/test_osinfo_rocky_8.py | 23 ++ + ci/tests/tests/distro/test_osinfo_rocky_9.py | 23 ++ + ci/tests/tests/docker/test_docker_ce.py | 26 ++ + ci/vagrant/el7toel8_multi.rb | 39 +++ + ci/vagrant/el7toel8toel9_single.rb | 53 ++++ + ci/vagrant/el8toel9_multi.rb | 45 +++ + 51 files changed, 2229 insertions(+) + create mode 100644 ci/.gitignore + create mode 100644 ci/ansible/ansible.cfg + create mode 100644 ci/ansible/docker-ce.yaml + create mode 100644 ci/ansible/minimal.yaml + create mode 100644 ci/ansible/requirements.yaml + create mode 100644 ci/ansible/roles/docker-ce/README.md + create mode 100644 ci/ansible/roles/docker-ce/defaults/main.yaml + create mode 100644 ci/ansible/roles/docker-ce/handlers/main.yaml + create mode 100644 ci/ansible/roles/docker-ce/meta/main.yaml + create mode 100644 ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml + create mode 100644 ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml + create mode 100644 ci/ansible/roles/docker-ce/tasks/main.yaml + create mode 100644 ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml + create mode 100644 ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml + create mode 100644 ci/ansible/roles/docker-ce/tests/inventory + create mode 100644 ci/ansible/roles/docker-ce/tests/test.yaml + create mode 100644 ci/ansible/roles/docker-ce/vars/main.yaml + create mode 100644 ci/ansible/roles/minimal/README.md + create mode 100644 ci/ansible/roles/minimal/defaults/main.yaml + create mode 100644 ci/ansible/roles/minimal/handlers/main.yaml + create mode 100644 ci/ansible/roles/minimal/meta/main.yaml + create mode 100644 ci/ansible/roles/minimal/tasks/cleanup_el7.yaml + create mode 100644 ci/ansible/roles/minimal/tasks/cleanup_el8.yaml + create mode 100644 ci/ansible/roles/minimal/tasks/main.yaml + create mode 100644 ci/ansible/roles/minimal/tasks/upgrade_el7.yaml + create mode 100644 ci/ansible/roles/minimal/tasks/upgrade_el8.yaml + create mode 100644 ci/ansible/roles/minimal/tests/inventory + create mode 100644 ci/ansible/roles/minimal/tests/test.yaml + create mode 100644 ci/ansible/roles/minimal/vars/main.yaml + create mode 100644 ci/jenkins/ELevate_el7toel8_Development.jenkinsfile + create mode 100644 ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile + create mode 100644 ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile + create mode 100644 ci/jenkins/ELevate_el8toel9_Development.jenkinsfile + create mode 100644 ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile + create mode 100644 ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile + create mode 100644 ci/scripts/install_elevate_dev.sh + create mode 100644 ci/tests/tests/conftest.py + create mode 100644 ci/tests/tests/distro/test_osinfo_almalinux_8.py + create mode 100644 ci/tests/tests/distro/test_osinfo_almalinux_9.py + create mode 100644 ci/tests/tests/distro/test_osinfo_centosstream_8.py + create mode 100644 ci/tests/tests/distro/test_osinfo_centosstream_9.py + create mode 100644 ci/tests/tests/distro/test_osinfo_eurolinux_8.py + create mode 100644 ci/tests/tests/distro/test_osinfo_eurolinux_9.py + create mode 100644 ci/tests/tests/distro/test_osinfo_oraclelinux_8.py + create mode 100644 ci/tests/tests/distro/test_osinfo_oraclelinux_9.py + create mode 100644 ci/tests/tests/distro/test_osinfo_rocky_8.py + create mode 100644 ci/tests/tests/distro/test_osinfo_rocky_9.py + create mode 100644 ci/tests/tests/docker/test_docker_ce.py + create mode 100644 ci/vagrant/el7toel8_multi.rb + create mode 100644 ci/vagrant/el7toel8toel9_single.rb + create mode 100644 ci/vagrant/el8toel9_multi.rb + +diff --git a/ci/.gitignore b/ci/.gitignore +new file mode 100644 +index 00000000..e6f97f0f +--- /dev/null ++++ b/ci/.gitignore +@@ -0,0 +1 @@ ++**/.vagrant +diff --git a/ci/ansible/ansible.cfg b/ci/ansible/ansible.cfg +new file mode 100644 +index 00000000..d5c13036 +--- /dev/null ++++ b/ci/ansible/ansible.cfg +@@ -0,0 +1,4 @@ ++[defaults] ++callbacks_enabled=ansible.posix.profile_tasks ++stdout_callback=community.general.yaml ++pipelining=True +diff --git a/ci/ansible/docker-ce.yaml b/ci/ansible/docker-ce.yaml +new file mode 100644 +index 00000000..bba5f3df +--- /dev/null ++++ b/ci/ansible/docker-ce.yaml +@@ -0,0 +1,6 @@ ++--- ++- name: Docker CE configuration ++ hosts: all ++ become: yes ++ roles: ++ - docker-ce +diff --git a/ci/ansible/minimal.yaml b/ci/ansible/minimal.yaml +new file mode 100644 +index 00000000..517cc81b +--- /dev/null ++++ b/ci/ansible/minimal.yaml +@@ -0,0 +1,6 @@ ++--- ++- name: Minimal configuration ++ hosts: all ++ become: yes ++ roles: ++ - minimal +diff --git a/ci/ansible/requirements.yaml b/ci/ansible/requirements.yaml +new file mode 100644 +index 00000000..13ca0224 +--- /dev/null ++++ b/ci/ansible/requirements.yaml +@@ -0,0 +1,3 @@ ++collections: ++ - name: community.general ++ - name: ansible.posix +diff --git a/ci/ansible/roles/docker-ce/README.md b/ci/ansible/roles/docker-ce/README.md +new file mode 100644 +index 00000000..860444b1 +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/README.md +@@ -0,0 +1,43 @@ ++Docker CE Install and configuration ++========= ++ ++Install latest version of Docker CE Engine form upstream repository. Start and enable services after installation. ++ ++Requirements ++------------ ++ ++Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. ++ ++Role Variables ++-------------- ++ ++`docker_ce_repo_checksum` in defaults/main.yaml. SHA512 Checksum of the docker-ce.repo file. ++A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. ++ ++Dependencies ++------------ ++ ++A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. ++ ++Example Playbook ++---------------- ++ ++Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: ++ ++ - hosts: all ++ become: yes ++ roles: ++ - role: docker ++ vars: ++ docker_ce_repo_checksum: sha512:XXXX # You can provide the new checksum if the default one not actual ++ ++ ++License ++------- ++ ++GPL-3.0-or-later ++ ++Author Information ++------------------ ++ ++AlmaLinux OS Foundation +diff --git a/ci/ansible/roles/docker-ce/defaults/main.yaml b/ci/ansible/roles/docker-ce/defaults/main.yaml +new file mode 100644 +index 00000000..d0fd0c09 +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/defaults/main.yaml +@@ -0,0 +1,3 @@ ++--- ++# defaults file for docker-ce ++docker_ce_repo_checksum: sha512:1de0b99cbb427e974144f226451711dc491caef6b1256cb599ff307a687ba2d7dd959a016d4e4cfdd4acbd83423ba1f78fa89db61bab35351e35f1152aedaf5c +diff --git a/ci/ansible/roles/docker-ce/handlers/main.yaml b/ci/ansible/roles/docker-ce/handlers/main.yaml +new file mode 100644 +index 00000000..a7236219 +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/handlers/main.yaml +@@ -0,0 +1,2 @@ ++--- ++# handlers file for docker-ce +diff --git a/ci/ansible/roles/docker-ce/meta/main.yaml b/ci/ansible/roles/docker-ce/meta/main.yaml +new file mode 100644 +index 00000000..aa67ded8 +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/meta/main.yaml +@@ -0,0 +1,25 @@ ++galaxy_info: ++ author: AlmaLinux OS Community ++ description: Install and configure Docker CE Engine ++ company: AlmaLinux OS Foundation ++ ++ license: GPL-3.0-or-later ++ ++ min_ansible_version: 2.11 ++ ++ platforms: ++ - name: EL ++ versions: ++ - 7 ++ - 8 ++ - 9 ++ ++ galaxy_tags: ++ - docker ++ - el7 ++ - el8 ++ - el9 ++ - almalinux ++ ++dependencies: ++ - minimal +diff --git a/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml b/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml +new file mode 100644 +index 00000000..320477af +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/tasks/install_docker_el7.yaml +@@ -0,0 +1,11 @@ ++--- ++# Install Docker ++- name: Install Docker CE Stable ++ ansible.builtin.yum: ++ name: ++ - docker-ce ++ - docker-ce-cli ++ - containerd.io ++ - docker-compose-plugin ++ update_cache: yes ++ state: present +diff --git a/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml b/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml +new file mode 100644 +index 00000000..d44a202a +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/tasks/install_docker_el8.yaml +@@ -0,0 +1,11 @@ ++--- ++# Install Docker ++- name: Install Docker CE Stable ++ ansible.builtin.dnf: ++ name: ++ - docker-ce ++ - docker-ce-cli ++ - containerd.io ++ - docker-compose-plugin ++ update_cache: yes ++ state: present +diff --git a/ci/ansible/roles/docker-ce/tasks/main.yaml b/ci/ansible/roles/docker-ce/tasks/main.yaml +new file mode 100644 +index 00000000..989af23f +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/tasks/main.yaml +@@ -0,0 +1,38 @@ ++--- ++# tasks file for docker-ce ++- name: Add Docker CE repository ++ ansible.builtin.get_url: ++ url: https://download.docker.com/linux/centos/docker-ce.repo ++ dest: /etc/yum.repos.d/docker-ce.repo ++ checksum: "{{ docker_ce_repo_checksum }}" ++ owner: root ++ group: root ++ mode: '0644' ++ seuser: system_u ++ serole: object_r ++ setype: system_conf_t ++ ++- name: Remove older versions of Docker on EL7 ++ ansible.builtin.include_tasks: remove_old_docker_el7.yaml ++ when: ansible_facts['distribution_major_version'] == '7' ++ ++- name: Remove older versions of Docker on >= EL8 ++ ansible.builtin.include_tasks: remove_old_docker_el8.yaml ++ when: ansible_facts['distribution_major_version'] == '8' ++ ++- name: Install Docker CE Stable on EL7 ++ ansible.builtin.include_tasks: install_docker_el7.yaml ++ when: ansible_facts['distribution_major_version'] == '7' ++ ++- name: Install Docker CE Stable on >= EL8 ++ ansible.builtin.include_tasks: install_docker_el8.yaml ++ when: ansible_facts['distribution_major_version'] == '8' ++ ++- name: Start and Enable Docker services ++ ansible.builtin.systemd: ++ name: "{{ item }}" ++ enabled: yes ++ state: started ++ loop: ++ - docker.service ++ - containerd.service +diff --git a/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml +new file mode 100644 +index 00000000..db9e0960 +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el7.yaml +@@ -0,0 +1,15 @@ ++--- ++# Remove older versions of Docker ++- name: Uninstall older versions of Docker ++ ansible.builtin.yum: ++ name: ++ - docker ++ - docker-client ++ - docker-client-latest ++ - docker-common ++ - docker-latest ++ - docker-latest-logrotate ++ - docker-logrotate ++ - docker-engine ++ autoremove: yes ++ state: absent +diff --git a/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml +new file mode 100644 +index 00000000..88f860cf +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/tasks/remove_old_docker_el8.yaml +@@ -0,0 +1,15 @@ ++--- ++# Remove older versions of Docker ++- name: Uninstall older versions of Docker ++ ansible.builtin.dnf: ++ name: ++ - docker ++ - docker-client ++ - docker-client-latest ++ - docker-common ++ - docker-latest ++ - docker-latest-logrotate ++ - docker-logrotate ++ - docker-engine ++ autoremove: yes ++ state: absent +diff --git a/ci/ansible/roles/docker-ce/tests/inventory b/ci/ansible/roles/docker-ce/tests/inventory +new file mode 100644 +index 00000000..878877b0 +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/tests/inventory +@@ -0,0 +1,2 @@ ++localhost ++ +diff --git a/ci/ansible/roles/docker-ce/tests/test.yaml b/ci/ansible/roles/docker-ce/tests/test.yaml +new file mode 100644 +index 00000000..789ba96e +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/tests/test.yaml +@@ -0,0 +1,5 @@ ++--- ++- hosts: localhost ++ remote_user: root ++ roles: ++ - docker-ce +diff --git a/ci/ansible/roles/docker-ce/vars/main.yaml b/ci/ansible/roles/docker-ce/vars/main.yaml +new file mode 100644 +index 00000000..7ff8a18f +--- /dev/null ++++ b/ci/ansible/roles/docker-ce/vars/main.yaml +@@ -0,0 +1,2 @@ ++--- ++# vars file for docker-ce +diff --git a/ci/ansible/roles/minimal/README.md b/ci/ansible/roles/minimal/README.md +new file mode 100644 +index 00000000..225dd44b +--- /dev/null ++++ b/ci/ansible/roles/minimal/README.md +@@ -0,0 +1,38 @@ ++Role Name ++========= ++ ++A brief description of the role goes here. ++ ++Requirements ++------------ ++ ++Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. ++ ++Role Variables ++-------------- ++ ++A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. ++ ++Dependencies ++------------ ++ ++A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. ++ ++Example Playbook ++---------------- ++ ++Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: ++ ++ - hosts: servers ++ roles: ++ - { role: username.rolename, x: 42 } ++ ++License ++------- ++ ++BSD ++ ++Author Information ++------------------ ++ ++An optional section for the role authors to include contact information, or a website (HTML is not allowed). +diff --git a/ci/ansible/roles/minimal/defaults/main.yaml b/ci/ansible/roles/minimal/defaults/main.yaml +new file mode 100644 +index 00000000..4a5a46cd +--- /dev/null ++++ b/ci/ansible/roles/minimal/defaults/main.yaml +@@ -0,0 +1,2 @@ ++--- ++# defaults file for minimal +diff --git a/ci/ansible/roles/minimal/handlers/main.yaml b/ci/ansible/roles/minimal/handlers/main.yaml +new file mode 100644 +index 00000000..89105fec +--- /dev/null ++++ b/ci/ansible/roles/minimal/handlers/main.yaml +@@ -0,0 +1,2 @@ ++--- ++# handlers file for minimal +diff --git a/ci/ansible/roles/minimal/meta/main.yaml b/ci/ansible/roles/minimal/meta/main.yaml +new file mode 100644 +index 00000000..ecc81ab7 +--- /dev/null ++++ b/ci/ansible/roles/minimal/meta/main.yaml +@@ -0,0 +1,23 @@ ++galaxy_info: ++ author: AlmaLinux OS Community ++ description: Minimal configuration for ELevate ++ company: AlmaLinux OS Foundation ++ ++ license: GPL-3.0-or-later ++ ++ min_ansible_version: 2.11 ++ ++ platforms: ++ - name: EL ++ versions: ++ - 7 ++ - 8 ++ - 9 ++ ++ galaxy_tags: ++ - elevate ++ - upgrade ++ - cleanup ++ - el7 ++ - el8 ++ - el9 +diff --git a/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml b/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml +new file mode 100644 +index 00000000..1b4af7c6 +--- /dev/null ++++ b/ci/ansible/roles/minimal/tasks/cleanup_el7.yaml +@@ -0,0 +1,10 @@ ++--- ++# Remove old kernels ++- name: Install the yum-utils ++ ansible.builtin.yum: ++ name: yum-utils ++ state: present ++ update_cache: yes ++ ++- name: Remove the old kernels on EL7 ++ ansible.builtin.command: package-cleanup -y --oldkernels --count=1 +diff --git a/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml b/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml +new file mode 100644 +index 00000000..56aeefd3 +--- /dev/null ++++ b/ci/ansible/roles/minimal/tasks/cleanup_el8.yaml +@@ -0,0 +1,7 @@ ++--- ++# Remove old kernels ++- name: Remove old kernels on EL8 ++ ansible.builtin.command: dnf -y remove --oldinstallonly ++ register: removeoldkernels ++ changed_when: removeoldkernels.rc == 0 ++ failed_when: removeoldkernels.rc > 1 +diff --git a/ci/ansible/roles/minimal/tasks/main.yaml b/ci/ansible/roles/minimal/tasks/main.yaml +new file mode 100644 +index 00000000..8c1b35bd +--- /dev/null ++++ b/ci/ansible/roles/minimal/tasks/main.yaml +@@ -0,0 +1,21 @@ ++--- ++# tasks file for minimal ++- name: Upgrade the packages on EL7 ++ ansible.builtin.include_tasks: upgrade_el7.yaml ++ when: ansible_facts['distribution_major_version'] == '7' ++ ++- name: Upgrade the packages on EL8 ++ ansible.builtin.include_tasks: upgrade_el8.yaml ++ when: ansible_facts['distribution_major_version'] == '8' ++ ++- name: Reboot the system ++ ansible.builtin.reboot: ++ when: upgrade_status is changed ++ ++- name: Cleanup the older kernels on EL7 ++ ansible.builtin.include_tasks: cleanup_el7.yaml ++ when: ansible_facts['distribution_major_version'] == '7' ++ ++- name: Cleanup the older kernels on El8 ++ ansible.builtin.include_tasks: cleanup_el8.yaml ++ when: ansible_facts['distribution_major_version'] == '8' +diff --git a/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml b/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml +new file mode 100644 +index 00000000..7648a586 +--- /dev/null ++++ b/ci/ansible/roles/minimal/tasks/upgrade_el7.yaml +@@ -0,0 +1,8 @@ ++--- ++# Upgrade the system ++- name: Upgrade the system ++ ansible.builtin.yum: ++ name: "*" ++ state: latest ++ update_cache: yes ++ register: upgrade_status +diff --git a/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml b/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml +new file mode 100644 +index 00000000..0d4a5d2a +--- /dev/null ++++ b/ci/ansible/roles/minimal/tasks/upgrade_el8.yaml +@@ -0,0 +1,8 @@ ++--- ++# Upgrade the system ++- name: Upgrade the system ++ ansible.builtin.dnf: ++ name: "*" ++ state: latest ++ update_cache: yes ++ register: upgrade_status +diff --git a/ci/ansible/roles/minimal/tests/inventory b/ci/ansible/roles/minimal/tests/inventory +new file mode 100644 +index 00000000..878877b0 +--- /dev/null ++++ b/ci/ansible/roles/minimal/tests/inventory +@@ -0,0 +1,2 @@ ++localhost ++ +diff --git a/ci/ansible/roles/minimal/tests/test.yaml b/ci/ansible/roles/minimal/tests/test.yaml +new file mode 100644 +index 00000000..db5c4c17 +--- /dev/null ++++ b/ci/ansible/roles/minimal/tests/test.yaml +@@ -0,0 +1,5 @@ ++--- ++- hosts: localhost ++ remote_user: root ++ roles: ++ - minimal +diff --git a/ci/ansible/roles/minimal/vars/main.yaml b/ci/ansible/roles/minimal/vars/main.yaml +new file mode 100644 +index 00000000..b24df080 +--- /dev/null ++++ b/ci/ansible/roles/minimal/vars/main.yaml +@@ -0,0 +1,2 @@ ++--- ++# vars file for minimal +diff --git a/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile +new file mode 100644 +index 00000000..d83e1788 +--- /dev/null ++++ b/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile +@@ -0,0 +1,256 @@ ++RETRY = params.RETRY ++TIMEOUT = params.TIMEOUT ++ ++pipeline { ++ agent { ++ label 'x86_64 && bm' ++ } ++ options { ++ timestamps() ++ parallelsAlwaysFailFast() ++ } ++ parameters { ++ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') ++ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) ++ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) ++ } ++ environment { ++ VAGRANT_NO_COLOR = '1' ++ } ++ stages { ++ stage('Prepare') { ++ steps { ++ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', ++ label: 'Install Ansible collections' ++ sh script: 'python3.11 -m venv .venv', ++ label: 'Create Python virtual environment' ++ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', ++ label: 'Install Testinfra' ++ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel', ++ label: 'Fetch devel version of leapp data' ++ } ++ } ++ stage('CreateSingleMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER != 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) ++ ++ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: "vagrant up $targetDistro.vmName", ++ label: 'Create source VM' ++ } ++ } ++ } ++ stage('CreateMultiMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: 'vagrant up', ++ label: 'Create source VM' ++ } ++ } ++ stage('ELevationAndTest') { ++ matrix { ++ when { ++ anyOf { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } ++ } ++ } ++ axes { ++ axis { ++ name 'TARGET_DISTRO' ++ values 'almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8' ++ } ++ } ++ stages { ++ stage('ELevate') { ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"", ++ label: 'Add testing repo of ELevate' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"", ++ label: 'Install testing version of ELevate' ++ sh script: "vagrant upload ci/scripts/install_elevate_dev.sh install_elevate_dev.sh $targetDistro.vmName", ++ label: 'Upload installer script to VMs' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo bash install_elevate_dev.sh\"", ++ label: 'Install development version of ELevate', ++ returnStatus: true ++ sh script: "vagrant upload leapp-data/ leapp-data/ --compress $targetDistro.vmName", ++ label: 'Upload devel branch of leapp data' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"", ++ label: 'Create directory structrue of leapp data' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files leapp-data/files/${targetDistro.leappData}/*\"", ++ label: 'Install devel version of leapp data' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files/vendors.d leapp-data/vendors.d/*\"", ++ label: 'Install devel version of leapp vendor data' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el8 /etc/leapp/files/leapp_upgrade_repositories.repo\"", ++ label: 'Configure leapp upgrade repositories for EL7toEL8' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/repomap.json.el8 /etc/leapp/files/repomap.json\"", ++ label: 'Configure leapp repository mapping for EL7toEL8' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum -y install tree && sudo tree -ha /etc/leapp\"", ++ label: 'Check if development version of leapp data installed correctly' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", ++ label: 'Start pre-upgrade check', ++ returnStatus: true ++ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"", ++ label: 'Permit ssh as root login' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"", ++ label: 'Answer the leapp question' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", ++ label: 'Start the Upgrade' ++ sh script: "vagrant reload $targetDistro.vmName", ++ label: 'Reboot to the ELevate initramfs' ++ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", ++ label: 'Generate the ssh-config file' ++ } ++ } ++ } ++ } ++ } ++ stage('Distro Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'minimal' } ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: 'rm -f conftest.py pytest.ini', ++ label: 'Delete root conftest.py file' ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py ++ """, ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ } ++ stage('Docker Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/docker/test_docker_ce.py ++ """, ++ label: 'Run the docker specific tests' ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ post { ++ success { ++ junit testResults: 'ci/tests/tests/**/**_junit.xml', ++ skipPublishingChecks: true ++ } ++ cleanup { ++ sh script: 'vagrant destroy -f --no-parallel -g', ++ label: 'Destroy VMs' ++ cleanWs() ++ } ++ } ++} ++ ++def targetDistroSpec(distro) { ++ def spec = [:] ++ ++ switch (distro) { ++ case 'almalinux-8': ++ vm = 'almalinux_8' ++ ldata = 'almalinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'centos-stream-8': ++ vm = 'centosstream_8' ++ ldata = 'centos' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'eurolinux-8': ++ vm = 'eurolinux_8' ++ ldata = 'eurolinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'oraclelinux-8': ++ vm = 'oraclelinux_8' ++ ldata = 'oraclelinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'rocky-8': ++ vm = 'rocky_8' ++ ldata = 'rocky' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ default: ++ spec = [ ++ vmName: 'unknown', ++ leappData: 'unknown' ++ ] ++ break ++ } ++ return spec ++} +diff --git a/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile +new file mode 100644 +index 00000000..8a8667ad +--- /dev/null ++++ b/ci/jenkins/ELevate_el7toel8_Stable.jenkinsfile +@@ -0,0 +1,237 @@ ++RETRY = params.RETRY ++TIMEOUT = params.TIMEOUT ++ ++pipeline { ++ agent { ++ label 'x86_64 && bm' ++ } ++ options { ++ timestamps() ++ parallelsAlwaysFailFast() ++ } ++ parameters { ++ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') ++ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) ++ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) ++ } ++ environment { ++ VAGRANT_NO_COLOR = '1' ++ } ++ stages { ++ stage('Prepare') { ++ steps { ++ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', ++ label: 'Install Ansible collections' ++ sh script: 'python3.11 -m venv .venv', ++ label: 'Create Python virtual environment' ++ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', ++ label: 'Install Testinfra' ++ } ++ } ++ stage('CreateSingleMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER != 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) ++ ++ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: "vagrant up $targetDistro.vmName", ++ label: 'Create source VM' ++ } ++ } ++ } ++ stage('CreateMultiMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: 'vagrant up', ++ label: 'Create source VM' ++ } ++ } ++ stage('ELevationAndTest') { ++ matrix { ++ when { ++ anyOf { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } ++ } ++ } ++ axes { ++ axis { ++ name 'TARGET_DISTRO' ++ values 'almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8' ++ } ++ } ++ stages { ++ stage('ELevate') { ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y https://repo.almalinux.org/elevate/elevate-release-latest-el7.noarch.rpm\"", ++ label: 'Install the elevate-release-latest rpm packages for EL7' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"", ++ label: 'Install the leap rpm package' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"", ++ label: 'Install the LEAP migration data rpm packages' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", ++ label: 'Start the Pre-Upgrade check', ++ returnStatus: true ++ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"", ++ label: 'Permit ssh as root login' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"", ++ label: 'Answer the LEAP question' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", ++ label: 'Start the Upgrade' ++ sh script: "vagrant reload $targetDistro.vmName", ++ label: 'Reboot to the ELevate initramfs' ++ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", ++ label: 'Generate the ssh-config file' ++ } ++ } ++ } ++ } ++ } ++ stage('Distro Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'minimal' } ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: 'rm -f conftest.py pytest.ini', ++ label: 'Delete root conftest.py file' ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py ++ """, ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ } ++ stage('Docker Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/docker/test_docker_ce.py ++ """, ++ label: 'Run the docker specific tests' ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ post { ++ success { ++ junit testResults: 'ci/tests/tests/**/**_junit.xml', ++ skipPublishingChecks: true ++ } ++ cleanup { ++ sh script: 'vagrant destroy -f --no-parallel -g', ++ label: 'Destroy VMs' ++ cleanWs() ++ } ++ } ++} ++ ++def targetDistroSpec(distro) { ++ def spec = [:] ++ ++ switch (distro) { ++ case 'almalinux-8': ++ vm = 'almalinux_8' ++ ldata = 'leapp-data-almalinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'centos-stream-8': ++ vm = 'centosstream_8' ++ ldata = 'leapp-data-centos' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'eurolinux-8': ++ vm = 'eurolinux_8' ++ ldata = 'leapp-data-eurolinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'oraclelinux-8': ++ vm = 'oraclelinux_8' ++ ldata = 'leapp-data-oraclelinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'rocky-8': ++ vm = 'rocky_8' ++ ldata = 'leapp-data-rocky' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ default: ++ spec = [ ++ vmName: 'unknown', ++ leappData: 'unknown' ++ ] ++ break ++ } ++ return spec ++} +diff --git a/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile +new file mode 100644 +index 00000000..70d1e6f9 +--- /dev/null ++++ b/ci/jenkins/ELevate_el7toel8_Testing.jenkinsfile +@@ -0,0 +1,237 @@ ++RETRY = params.RETRY ++TIMEOUT = params.TIMEOUT ++ ++pipeline { ++ agent { ++ label 'x86_64 && bm' ++ } ++ options { ++ timestamps() ++ parallelsAlwaysFailFast() ++ } ++ parameters { ++ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') ++ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) ++ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) ++ } ++ environment { ++ VAGRANT_NO_COLOR = '1' ++ } ++ stages { ++ stage('Prepare') { ++ steps { ++ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', ++ label: 'Install Ansible collections' ++ sh script: 'python3.11 -m venv .venv', ++ label: 'Create Python virtual environment' ++ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', ++ label: 'Install Testinfra' ++ } ++ } ++ stage('CreateSingleMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER != 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) ++ ++ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: "vagrant up $targetDistro.vmName", ++ label: 'Create source VM' ++ } ++ } ++ } ++ stage('CreateMultiMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: 'vagrant up', ++ label: 'Create source VM' ++ } ++ } ++ stage('ELevationAndTest') { ++ matrix { ++ when { ++ anyOf { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } ++ } ++ } ++ axes { ++ axis { ++ name 'TARGET_DISTRO' ++ values 'almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8' ++ } ++ } ++ stages { ++ stage('ELevate') { ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"", ++ label: 'Install the elevate-release-latest rpm packages for EL7' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"", ++ label: 'Install the leap rpm package' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"", ++ label: 'Install the LEAP migration data rpm packages' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", ++ label: 'Start the Pre-Upgrade check', ++ returnStatus: true ++ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"", ++ label: 'Permit ssh as root login' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"", ++ label: 'Answer the LEAP question' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", ++ label: 'Start the Upgrade' ++ sh script: "vagrant reload $targetDistro.vmName", ++ label: 'Reboot to the ELevate initramfs' ++ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", ++ label: 'Generate the ssh-config file' ++ } ++ } ++ } ++ } ++ } ++ stage('Distro Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'minimal' } ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: 'rm -f conftest.py pytest.ini', ++ label: 'Delete root conftest.py file' ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py ++ """, ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ } ++ stage('Docker Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/docker/test_docker_ce.py ++ """, ++ label: 'Run the docker specific tests' ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ post { ++ success { ++ junit testResults: 'ci/tests/tests/**/**_junit.xml', ++ skipPublishingChecks: true ++ } ++ cleanup { ++ sh script: 'vagrant destroy -f --no-parallel -g', ++ label: 'Destroy VMs' ++ cleanWs() ++ } ++ } ++} ++ ++def targetDistroSpec(distro) { ++ def spec = [:] ++ ++ switch (distro) { ++ case 'almalinux-8': ++ vm = 'almalinux_8' ++ ldata = 'leapp-data-almalinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'centos-stream-8': ++ vm = 'centosstream_8' ++ ldata = 'leapp-data-centos' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'eurolinux-8': ++ vm = 'eurolinux_8' ++ ldata = 'leapp-data-eurolinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'oraclelinux-8': ++ vm = 'oraclelinux_8' ++ ldata = 'leapp-data-oraclelinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'rocky-8': ++ vm = 'rocky_8' ++ ldata = 'leapp-data-rocky' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ default: ++ spec = [ ++ vmName: 'unknown', ++ leappData: 'unknown' ++ ] ++ break ++ } ++ return spec ++} +diff --git a/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile +new file mode 100644 +index 00000000..7362aafe +--- /dev/null ++++ b/ci/jenkins/ELevate_el8toel9_Development.jenkinsfile +@@ -0,0 +1,204 @@ ++RETRY = params.RETRY ++TIMEOUT = params.TIMEOUT ++ ++pipeline { ++ agent { ++ label params.AGENT ++ } ++ options { ++ timestamps() ++ } ++ parameters { ++ string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true) ++ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) ++ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) ++ string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true) ++ string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true) ++ choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation') ++ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'eurolinux-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation') ++ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ } ++ stages { ++ stage('Source') { ++ steps { ++ git url: REPO_URL, ++ branch: REPO_BRANCH, ++ credentialsId: 'github-almalinuxautobot' ++ } ++ } ++ stage('Prepare Build and Test enviroment') { ++ steps { ++ sh script: 'cp Vagrantfile.el8toel9 Vagrantfile', ++ label: 'Generate the el8toel9 Vagrantfile' ++ sh script: 'sudo dnf -y install python39-devel python39-wheel', ++ label: 'Install Python 3.9, PIP and Wheel' ++ sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt', ++ label: 'Install TestInfra' ++ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel', ++ label: 'Clone the leapp-data git repository' ++ } ++ } ++ stage('ELevation') { ++ matrix { ++ when { ++ allOf { ++ anyOf { ++ expression { params.SOURCE_DISTRO_FILTER == 'all' } ++ expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO } ++ } ++ anyOf { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } ++ } ++ } ++ } ++ axes { ++ axis { ++ name 'SOURCE_DISTRO' ++ values 'almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8' ++ } ++ axis { ++ name 'TARGET_DISTRO' ++ values 'almalinux-9', 'centos-stream-9', 'eurolinux-9', 'oraclelinux-9', 'rocky-9' ++ } ++ } ++ stages { ++ stage('Create and Configure Machines') { ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'vagrant destroy -f $SOURCE_DISTRO', ++ label: 'Make sure no machine present from the last retry' ++ sh script: 'vagrant up $SOURCE_DISTRO', ++ label: 'Create the source machines' ++ } ++ } ++ } ++ } ++ stage('ELevate to the all target distros') { ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"', ++ label: 'Add the ELevate Testing RPM repository' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf install -y leapp-upgrade\"', ++ label: 'Install the leap rpm package' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo bash /vagrant/scripts/install_elevate_dev.sh\"', ++ label: 'Install Development version of ELevate', ++ returnStatus: true ++ script { ++ def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO) ++ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"", ++ label:'Create the LEAPP directory') ++ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files /vagrant/leapp-data/files/${LEAPP_DATA}/*\"", ++ label:"Install the LEAPP DATA") ++ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files/vendors.d /vagrant/leapp-data/vendors.d/*\"', ++ label:"Install the Vendor DATA") ++ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el9 /etc/leapp/files/leapp_upgrade_repositories.repo\"", ++ label:'Set LEAPP Repos for EL8') ++ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/repomap.json.el9 /etc/leapp/files/repomap.json\"", ++ label:'Set LEAPP Repo map for EL8') ++ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"', ++ label:"Debug: Data paths") ++ } ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"', ++ label: 'Start the Pre-Upgrade check', ++ returnStatus: true ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"', ++ label: 'Permit ssh as root login' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"', ++ label: 'Answer the LEAP question' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"', ++ label: 'Start the Upgrade' ++ sh script: 'vagrant reload $SOURCE_DISTRO', ++ label: 'Reboot to the ELevate initramfs' ++ sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config', ++ label: 'Generate the ssh-config file' ++ } ++ } ++ } ++ } ++ stage('Distro Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'minimal'} ++ expression { params.CONF_FILTER == 'docker-ce'} ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$SOURCE_DISTRO-junit.xml tests/distro/test_osinfo_$SOURCE_DISTRO.py', ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ stage('Docker Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'docker-ce'} ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py', ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ post { ++ success { ++ junit testResults: '**/tests/**/**-junit.xml', ++ skipPublishingChecks: true ++ } ++ cleanup { ++ sh script: 'vagrant destroy -f', ++ label: 'Destroy All Machines' ++ cleanWs() ++ } ++ } ++} ++ ++/* ++* Common Functions ++*/ ++def getLeappDataDistro(TARGET_DISTRO) { ++ def leapp_data = "" ++ ++ switch(TARGET_DISTRO) { ++ case "almalinux-9": ++ leapp_data = TARGET_DISTRO.substring(0, 9) ++ break ++ ++ case "centos-stream-9": ++ leapp_data = TARGET_DISTRO.substring(0, 6) ++ break ++ ++ case "eurolinux-9": ++ leapp_data = TARGET_DISTRO.substring(0, 9) ++ break ++ ++ case "oraclelinux-9": ++ leapp_data = TARGET_DISTRO.substring(0, 11) ++ break ++ ++ case "rocky-9": ++ leapp_data = TARGET_DISTRO.substring(0, 5) ++ break ++ ++ default: ++ leap_data = "Error: Target Distro Not Supported" ++ break ++ } ++ return leapp_data ++} +diff --git a/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile +new file mode 100644 +index 00000000..d3251fc1 +--- /dev/null ++++ b/ci/jenkins/ELevate_el8toel9_Stable.jenkinsfile +@@ -0,0 +1,221 @@ ++RETRY = params.RETRY ++TIMEOUT = params.TIMEOUT ++ ++pipeline { ++ agent { ++ label 'x86_64 && bm' ++ } ++ options { ++ timestamps() ++ parallelsAlwaysFailFast() ++ } ++ parameters { ++ // choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'eurolinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation') ++ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation') ++ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) ++ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) ++ } ++ environment { ++ VAGRANT_NO_COLOR = '1' ++ } ++ stages { ++ stage('Prepare') { ++ steps { ++ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', ++ label: 'Install Ansible collections' ++ sh script: 'python3.11 -m venv .venv', ++ label: 'Create Python virtual environment' ++ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', ++ label: 'Install Testinfra' ++ } ++ } ++ stage('CreateSingleMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER != 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) ++ ++ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: "vagrant up $targetDistro.vmName", ++ label: 'Create source VM' ++ } ++ } ++ } ++ stage('CreateMultiMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ sh script: 'cp ci/vagrant/el8toel9_multi.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: 'vagrant up', ++ label: 'Create source VM' ++ } ++ } ++ stage('ELevationAndTest') { ++ matrix { ++ when { ++ anyOf { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } ++ } ++ } ++ axes { ++ axis { ++ name 'TARGET_DISTRO' ++ // values 'almalinux-9', 'centos-stream-9', 'eurolinux-9', 'rocky-9' ++ values 'almalinux-9', 'rocky-9' ++ } ++ } ++ stages { ++ stage('ELevate') { ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y https://repo.almalinux.org/elevate/elevate-release-latest-el8.noarch.rpm\"", ++ label: 'Install the elevate-release-latest rpm packages for EL8' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y leapp-upgrade\"", ++ label: 'Install the leap rpm package' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y $targetDistro.leappData\"", ++ label: 'Install the LEAP migration data rpm packages' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", ++ label: 'Start the Pre-Upgrade check', ++ returnStatus: true ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i \'s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/\' /etc/firewalld/firewalld.conf\"", ++ label: 'TODO' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section check_vdo.no_vdo_devices=True\"", ++ label: 'TODO' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", ++ label: 'Start the Upgrade' ++ sh script: "vagrant reload $targetDistro.vmName", ++ label: 'Reboot to the ELevate initramfs' ++ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", ++ label: 'Generate the ssh-config file' ++ } ++ } ++ } ++ } ++ } ++ stage('Distro Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'minimal' } ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: 'rm -f conftest.py pytest.ini', ++ label: 'Delete root conftest.py file' ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py ++ """, ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ } ++ stage('Docker Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/docker/test_docker_ce.py ++ """, ++ label: 'Run the docker specific tests' ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ post { ++ success { ++ junit testResults: 'ci/tests/tests/**/**_junit.xml', ++ skipPublishingChecks: true ++ } ++ cleanup { ++ sh script: 'vagrant destroy -f --no-parallel -g', ++ label: 'Destroy VMs' ++ cleanWs() ++ } ++ } ++} ++ ++def targetDistroSpec(distro) { ++ def spec = [:] ++ ++ switch (distro) { ++ case 'almalinux-9': ++ vm = 'almalinux_9' ++ ldata = 'leapp-data-almalinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'eurolinux-9': ++ vm = 'eurolinux_9' ++ ldata = 'leapp-data-eurolinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'rocky-9': ++ vm = 'rocky_9' ++ ldata = 'leapp-data-rocky' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ default: ++ spec = [ ++ vmName: 'unknown', ++ leappData: 'unknown' ++ ] ++ break ++ } ++ return spec ++} +diff --git a/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile +new file mode 100644 +index 00000000..af1b9205 +--- /dev/null ++++ b/ci/jenkins/ELevate_el8toel9_Testing.jenkinsfile +@@ -0,0 +1,191 @@ ++RETRY = params.RETRY ++TIMEOUT = params.TIMEOUT ++ ++pipeline { ++ agent { ++ label params.AGENT ++ } ++ options { ++ timestamps() ++ } ++ parameters { ++ string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true) ++ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) ++ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) ++ string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true) ++ string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true) ++ choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation') ++ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'eurolinux-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation') ++ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ } ++ stages { ++ stage('Source') { ++ steps { ++ git url: REPO_URL, ++ branch: REPO_BRANCH, ++ credentialsId: 'github-almalinuxautobot' ++ } ++ } ++ stage('Prepare Build and Test enviroment') { ++ steps { ++ sh script: 'cp Vagrantfile.el8toel9 Vagrantfile', ++ label: 'Generate the el8toel9 Vagrantfile' ++ sh script: 'sudo dnf -y install python39-devel python39-wheel', ++ label: 'Install Python 3.9, PIP and Wheel' ++ sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt', ++ label: 'Install TestInfra' ++ } ++ } ++ stage('ELevation') { ++ matrix { ++ when { ++ allOf { ++ anyOf { ++ expression { params.SOURCE_DISTRO_FILTER == 'all' } ++ expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO } ++ } ++ anyOf { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } ++ } ++ } ++ } ++ axes { ++ axis { ++ name 'SOURCE_DISTRO' ++ values 'almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8' ++ } ++ axis { ++ name 'TARGET_DISTRO' ++ values 'almalinux-9', 'centos-stream-9', 'eurolinux-9', 'oraclelinux-9', 'rocky-9' ++ } ++ } ++ stages { ++ stage('Create and Configure Machines') { ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'vagrant destroy -f $SOURCE_DISTRO', ++ label: 'Make sure no machine present from the last retry' ++ sh script: 'vagrant up $SOURCE_DISTRO', ++ label: 'Create the source machines' ++ } ++ } ++ } ++ } ++ stage('ELevate to the all target distros') { ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"', ++ label: 'Add the ELevate Testing RPM repository' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install leapp-upgrade\"', ++ label: 'Install the leap rpm package' ++ script { ++ def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO) ++ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install leapp-data-$LEAPP_DATA\"", ++ label:'Install the LEAP migration data rpm packages') ++ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"', ++ label:'Debug: Data paths') ++ } ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"', ++ label: 'Start the Pre-Upgrade check', ++ returnStatus: true ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"', ++ label: 'Permit ssh as root login' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"', ++ label: 'Answer the LEAP question' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"', ++ label: 'Start the Upgrade' ++ sh script: 'vagrant reload $SOURCE_DISTRO', ++ label: 'Reboot to the ELevate initramfs' ++ sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config', ++ label: 'Generate the ssh-config file' ++ } ++ } ++ } ++ } ++ stage('Distro Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'minimal'} ++ expression { params.CONF_FILTER == 'docker-ce'} ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$TARGET_DISTRO-junit.xml tests/distro/test_osinfo_$TARGET_DISTRO.py', ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ stage('Docker Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'docker-ce'} ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py', ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ post { ++ success { ++ junit testResults: '**/tests/**/**-junit.xml', ++ skipPublishingChecks: true ++ } ++ cleanup { ++ sh script: 'vagrant destroy -f', ++ label: 'Destroy All Machines' ++ cleanWs() ++ } ++ } ++} ++ ++/* ++* Common Functions ++*/ ++def getLeappDataDistro(TARGET_DISTRO) { ++ def leapp_data = "" ++ ++ switch(TARGET_DISTRO) { ++ case "almalinux-9": ++ leapp_data = TARGET_DISTRO.substring(0, 9) ++ break ++ ++ case "centos-stream-9": ++ leapp_data = TARGET_DISTRO.substring(0, 6) ++ break ++ ++ case "eurolinux-9": ++ leapp_data = TARGET_DISTRO.substring(0, 9) ++ break ++ ++ case "oraclelinux-9": ++ leapp_data = TARGET_DISTRO.substring(0, 11) ++ break ++ ++ case "rocky-9": ++ leapp_data = TARGET_DISTRO.substring(0, 5) ++ break ++ ++ default: ++ leap_data = "Error: Target Distro Not Supported" ++ break ++ } ++ return leapp_data ++} +diff --git a/ci/scripts/install_elevate_dev.sh b/ci/scripts/install_elevate_dev.sh +new file mode 100644 +index 00000000..4de5c3f4 +--- /dev/null ++++ b/ci/scripts/install_elevate_dev.sh +@@ -0,0 +1,69 @@ ++#!/usr/bin/env bash ++ ++ ++RHEL_MAJOR_VERSION=$(rpm --eval %rhel) ++WORK_DIR="$HOME" ++NEW_LEAPP_NAME='leapp-repository-almalinux' ++NEW_LEAPP_DIR="$WORK_DIR/$NEW_LEAPP_NAME/" ++LEAPP_PATH='/usr/share/leapp-repository/repositories/' ++EXCLUDE_PATH=' ++/usr/share/leapp-repository/repositories/system_upgrade/el7toel8/files/bundled-rpms ++/usr/share/leapp-repository/repositories/system_upgrade/el7toel8/files ++/usr/share/leapp-repository/repositories/system_upgrade/el7toel8 ++/usr/share/leapp-repository/repositories/system_upgrade/el8toel9/files/bundled-rpms ++/usr/share/leapp-repository/repositories/system_upgrade/el8toel9/files ++/usr/share/leapp-repository/repositories/system_upgrade/el8toel9 ++/usr/share/leapp-repository/repositories/system_upgrade ++/usr/share/leapp-repository/repositories/ ++' ++ ++ ++echo "RHEL_MAJOR_VERSION=$RHEL_MAJOR_VERSION" ++echo "WORK_DIR=$WORK_DIR" ++echo "EXCLUDED_PATHS=$EXCLUDE_PATH" ++ ++ ++echo 'Remove old files' ++for dir in $(find $LEAPP_PATH -type d); ++do ++ skip=0 ++ for exclude in $(echo $EXCLUDE_PATH); ++ do ++ if [[ $exclude == $dir ]];then ++ skip=1 ++ break ++ fi ++ done ++ if [ $skip -eq 0 ];then ++ rm -rf $dir ++ fi ++done ++ ++echo 'Download new tarball' ++curl -s -L https://github.com/AlmaLinux/leapp-repository/archive/almalinux/leapp-repository-almalinux.tar.gz | tar -xz -C $WORK_DIR/ ++ ++echo 'Deleting files as in spec file' ++rm -rf $NEW_LEAPP_DIR/repos/common/actors/testactor ++find $NEW_LEAPP_DIR/repos/common -name "test.py" -delete ++rm -rf `find $NEW_LEAPP_DIR -name "tests" -type d` ++find $NEW_LEAPP_DIR -name "Makefile" -delete ++if [ $RHEL_MAJOR_VERSION -eq '7' ]; then ++ rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/el8toel9 ++else ++ rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/el7toel8 ++ rm -rf $NEW_LEAPP_DIR/repos/system_upgrade/cloudlinux ++fi ++ ++echo 'Copy new data to system' ++cp -r $NEW_LEAPP_DIR/repos/* $LEAPP_PATH ++ ++for DIRECTORY in $(find $LEAPP_PATH -mindepth 1 -maxdepth 1 -type d); ++do ++ REPOSITORY=$(basename $DIRECTORY) ++ if ! [ -e /etc/leapp/repos.d/$REPOSITORY ];then ++ echo "Enabling repository $REPOSITORY" ++ ln -s $LEAPP_PATH/$REPOSITORY /etc/leapp/repos.d/$REPOSITORY ++ fi ++done ++ ++rm -rf $NEW_LEAPP_DIR +diff --git a/ci/tests/tests/conftest.py b/ci/tests/tests/conftest.py +new file mode 100644 +index 00000000..01f9443e +--- /dev/null ++++ b/ci/tests/tests/conftest.py +@@ -0,0 +1,52 @@ ++import pytest ++import re ++ ++ ++@pytest.fixture(scope="module") ++def get_os_release(host): ++ """Get content of the /etc/os-release""" ++ os_release = host.file("/etc/os-release") ++ return os_release ++ ++ ++@pytest.fixture(scope="module") ++def get_redhat_release(host): ++ """Get content of the /etc/redhat-release""" ++ redhat_release = host.file("/etc/redhat-release") ++ return redhat_release ++ ++ ++@pytest.fixture(scope="module") ++def get_kernel_info(host): ++ """Get kernel version and vendor information""" ++ kernel_ver_pattern = re.compile( ++ f".*(^[0-9][0-9]?[0-9]?.[0-9][0-9]?[0-9]?.[0-9][0-9]?[0-9]?).*" ++ ) ++ kernel_ver_output = host.check_output("uname -r") ++ kernel_version = kernel_ver_pattern.match(kernel_ver_output).group(1) ++ ++ with host.sudo(): ++ kernel_vendor = host.check_output( ++ "grep -Ei '(.*kernel signing key|.*CA Server|.*Build)' /proc/keys | sed -E" ++ " 's/ +/:/g' | cut -d ':' -f 9 | uniq" ++ ) ++ kernel_info = (kernel_version, kernel_vendor) ++ return kernel_info ++ ++ ++@pytest.fixture(scope="module", params=["glibc", "systemd", "coreutils", "rpm"]) ++def get_pkg_info(host, request): ++ """Get vendor and version of installed packages""" ++ pkg_name = request.param ++ pkg_vendor = host.check_output( ++ f"rpm -qa --queryformat \"%{{VENDOR}}\n\" {request.param} | sed '$p;d' " ++ ) ++ pkg_version = host.check_output( ++ f'rpm -qa --queryformat "%{{VERSION}}\n" {request.param} | sort -n | sed' ++ " '$p;d'" ++ ) ++ pkg_info = (pkg_name, pkg_vendor, pkg_version) ++ # print(pkg_name) ++ # print(pkg_vendor) ++ # print(pkg_version) ++ return pkg_info +diff --git a/ci/tests/tests/distro/test_osinfo_almalinux_8.py b/ci/tests/tests/distro/test_osinfo_almalinux_8.py +new file mode 100644 +index 00000000..c5219b35 +--- /dev/null ++++ b/ci/tests/tests/distro/test_osinfo_almalinux_8.py +@@ -0,0 +1,43 @@ ++import pytest ++ ++ ++@pytest.mark.usefixtures("get_os_release") ++class TestOSRelease: ++ """Test values of NAME, ID and VERSION_ID""" ++ ++ def test_os_rel_name(self, get_os_release): ++ assert get_os_release.contains('NAME="AlmaLinux"') ++ ++ def test_os_rel_id(self, get_os_release): ++ assert get_os_release.contains('ID="almalinux"') ++ ++ def test_os_rel_version_id(self, get_os_release): ++ assert get_os_release.contains('VERSION_ID="8.*"') ++ ++ ++@pytest.mark.usefixtures("get_redhat_release") ++class TestRHRelease: ++ """Test contents of the /etc/redhat-release""" ++ ++ def test_redhat_release(self, get_redhat_release): ++ assert get_redhat_release.contains("AlmaLinux release 8.*") ++ ++ ++@pytest.mark.usefixtures("get_pkg_info") ++class TestPkgInfo: ++ """Test vendor and version of packages""" ++ ++ def test_pkg_vendor(self, get_pkg_info): ++ assert get_pkg_info[1] == "AlmaLinux" ++ ++ def test_pkg_version(self, get_pkg_info): ++ if get_pkg_info[0] == "kernel": ++ assert get_pkg_info[2] == "4.18.0" ++ elif get_pkg_info[0] == "glibc": ++ assert get_pkg_info[2] == "2.28" ++ elif get_pkg_info[0] == "systemd": ++ assert get_pkg_info[2] == "239" ++ elif get_pkg_info[0] == "coreutils": ++ assert get_pkg_info[2] == "8.30" ++ else: ++ assert get_pkg_info[2] == "4.14.3" +diff --git a/ci/tests/tests/distro/test_osinfo_almalinux_9.py b/ci/tests/tests/distro/test_osinfo_almalinux_9.py +new file mode 100644 +index 00000000..1536e52b +--- /dev/null ++++ b/ci/tests/tests/distro/test_osinfo_almalinux_9.py +@@ -0,0 +1,52 @@ ++import pytest ++ ++ ++@pytest.mark.usefixtures("get_os_release") ++class TestOSRelease: ++ """Test values of NAME, ID and VERSION_ID""" ++ ++ def test_os_rel_name(self, get_os_release): ++ assert get_os_release.contains('NAME="AlmaLinux"') ++ ++ def test_os_rel_id(self, get_os_release): ++ assert get_os_release.contains('ID="almalinux"') ++ ++ def test_os_rel_version_id(self, get_os_release): ++ assert get_os_release.contains('VERSION_ID="9.*"') ++ ++ ++@pytest.mark.usefixtures("get_redhat_release") ++class TestRHRelease: ++ """Test contents of the /etc/redhat-release""" ++ ++ def test_redhat_release(self, get_redhat_release): ++ assert get_redhat_release.contains("AlmaLinux release 9.*") ++ ++ ++@pytest.mark.usefixtures("get_kernel_info") ++class TestKernelInfo: ++ """Test version and vendor of running kernel""" ++ ++ def test_kernel_version(self, get_kernel_info): ++ assert get_kernel_info[0] == "5.14.0" ++ ++ def test_kernel_vendor(self, get_kernel_info): ++ assert get_kernel_info[1] == "AlmaLinux" ++ ++ ++@pytest.mark.usefixtures("get_pkg_info") ++class TestPkgInfo: ++ """Test vendor and version of packages""" ++ ++ def test_pkg_vendor(self, get_pkg_info): ++ assert get_pkg_info[1] == "AlmaLinux" ++ ++ def test_pkg_version(self, get_pkg_info): ++ if get_pkg_info[0] == "glibc": ++ assert get_pkg_info[2] == "2.34" ++ elif get_pkg_info[0] == "systemd": ++ assert get_pkg_info[2] == "252" ++ elif get_pkg_info[0] == "coreutils": ++ assert get_pkg_info[2] == "8.32" ++ else: ++ assert get_pkg_info[2] == "4.16.1.3" +diff --git a/ci/tests/tests/distro/test_osinfo_centosstream_8.py b/ci/tests/tests/distro/test_osinfo_centosstream_8.py +new file mode 100644 +index 00000000..995ae61e +--- /dev/null ++++ b/ci/tests/tests/distro/test_osinfo_centosstream_8.py +@@ -0,0 +1,23 @@ ++import pytest ++ ++ ++@pytest.mark.usefixtures("get_os_release") ++class TestOSRelease: ++ """Test values of NAME, ID and VERSION_ID""" ++ ++ def test_os_rel_name(self, get_os_release): ++ assert get_os_release.contains('NAME="CentOS Stream"') ++ ++ def test_os_rel_id(self, get_os_release): ++ assert get_os_release.contains('ID="centos"') ++ ++ def test_os_rel_version_id(self, get_os_release): ++ assert get_os_release.contains('VERSION_ID="8"') ++ ++ ++@pytest.mark.usefixtures("get_redhat_release") ++class TestRHRelease: ++ """Test contents of the /etc/redhat-release""" ++ ++ def test_redhat_release(self, get_redhat_release): ++ assert get_redhat_release.contains("CentOS Stream release 8") +diff --git a/ci/tests/tests/distro/test_osinfo_centosstream_9.py b/ci/tests/tests/distro/test_osinfo_centosstream_9.py +new file mode 100644 +index 00000000..28e47202 +--- /dev/null ++++ b/ci/tests/tests/distro/test_osinfo_centosstream_9.py +@@ -0,0 +1,23 @@ ++import pytest ++ ++ ++@pytest.mark.usefixtures("get_os_release") ++class TestOSRelease: ++ """Test values of NAME, ID and VERSION_ID""" ++ ++ def test_os_rel_name(self, get_os_release): ++ assert get_os_release.contains('NAME="CentOS Stream"') ++ ++ def test_os_rel_id(self, get_os_release): ++ assert get_os_release.contains('ID="centos"') ++ ++ def test_os_rel_version_id(self, get_os_release): ++ assert get_os_release.contains('VERSION_ID="9"') ++ ++ ++@pytest.mark.usefixtures("get_redhat_release") ++class TestRHRelease: ++ """Test contents of the /etc/redhat-release""" ++ ++ def test_redhat_release(self, get_redhat_release): ++ assert get_redhat_release.contains("CentOS Stream release 9") +diff --git a/ci/tests/tests/distro/test_osinfo_eurolinux_8.py b/ci/tests/tests/distro/test_osinfo_eurolinux_8.py +new file mode 100644 +index 00000000..d1bfde55 +--- /dev/null ++++ b/ci/tests/tests/distro/test_osinfo_eurolinux_8.py +@@ -0,0 +1,23 @@ ++import pytest ++ ++ ++@pytest.mark.usefixtures("get_os_release") ++class TestOSRelease: ++ """Test values of NAME, ID and VERSION_ID""" ++ ++ def test_os_rel_name(self, get_os_release): ++ assert get_os_release.contains('NAME="EuroLinux"') ++ ++ def test_os_rel_id(self, get_os_release): ++ assert get_os_release.contains('ID="eurolinux"') ++ ++ def test_os_rel_version_id(self, get_os_release): ++ assert get_os_release.contains('VERSION_ID="8.*"') ++ ++ ++@pytest.mark.usefixtures("get_redhat_release") ++class TestRHRelease: ++ """Test contents of the /etc/redhat-release""" ++ ++ def test_redhat_release(self, get_redhat_release): ++ assert get_redhat_release.contains("EuroLinux release 8.*") +diff --git a/ci/tests/tests/distro/test_osinfo_eurolinux_9.py b/ci/tests/tests/distro/test_osinfo_eurolinux_9.py +new file mode 100644 +index 00000000..7d749b32 +--- /dev/null ++++ b/ci/tests/tests/distro/test_osinfo_eurolinux_9.py +@@ -0,0 +1,23 @@ ++import pytest ++ ++ ++@pytest.mark.usefixtures("get_os_release") ++class TestOSRelease: ++ """Test values of NAME, ID and VERSION_ID""" ++ ++ def test_os_rel_name(self, get_os_release): ++ assert get_os_release.contains('NAME="EuroLinux"') ++ ++ def test_os_rel_id(self, get_os_release): ++ assert get_os_release.contains('ID="eurolinux"') ++ ++ def test_os_rel_version_id(self, get_os_release): ++ assert get_os_release.contains('VERSION_ID="9.*"') ++ ++ ++@pytest.mark.usefixtures("get_redhat_release") ++class TestRHRelease: ++ """Test contents of the /etc/redhat-release""" ++ ++ def test_redhat_release(self, get_redhat_release): ++ assert get_redhat_release.contains("EuroLinux release 9.*") +diff --git a/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py b/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py +new file mode 100644 +index 00000000..2080fd2f +--- /dev/null ++++ b/ci/tests/tests/distro/test_osinfo_oraclelinux_8.py +@@ -0,0 +1,23 @@ ++import pytest ++ ++ ++@pytest.mark.usefixtures("get_os_release") ++class TestOSRelease: ++ """Test values of NAME, ID and VERSION_ID""" ++ ++ def test_os_rel_name(self, get_os_release): ++ assert get_os_release.contains('NAME="Oracle Linux Server"') ++ ++ def test_os_rel_id(self, get_os_release): ++ assert get_os_release.contains('ID="ol"') ++ ++ def test_os_rel_version_id(self, get_os_release): ++ assert get_os_release.contains('VERSION_ID="8.*"') ++ ++ ++@pytest.mark.usefixtures("get_redhat_release") ++class TestRHRelease: ++ """Test contents of the /etc/redhat-release""" ++ ++ def test_redhat_release(self, get_redhat_release): ++ assert get_redhat_release.contains("Red Hat Enterprise Linux release 8.*") +diff --git a/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py b/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py +new file mode 100644 +index 00000000..bd5044bb +--- /dev/null ++++ b/ci/tests/tests/distro/test_osinfo_oraclelinux_9.py +@@ -0,0 +1,23 @@ ++import pytest ++ ++ ++@pytest.mark.usefixtures("get_os_release") ++class TestOSRelease: ++ """Test values of NAME, ID and VERSION_ID""" ++ ++ def test_os_rel_name(self, get_os_release): ++ assert get_os_release.contains('NAME="Oracle Linux Server"') ++ ++ def test_os_rel_id(self, get_os_release): ++ assert get_os_release.contains('ID="ol"') ++ ++ def test_os_rel_version_id(self, get_os_release): ++ assert get_os_release.contains('VERSION_ID="9.*"') ++ ++ ++@pytest.mark.usefixtures("get_redhat_release") ++class TestRHRelease: ++ """Test contents of the /etc/redhat-release""" ++ ++ def test_redhat_release(self, get_redhat_release): ++ assert get_redhat_release.contains("Red Hat Enterprise Linux release 9.*") +diff --git a/ci/tests/tests/distro/test_osinfo_rocky_8.py b/ci/tests/tests/distro/test_osinfo_rocky_8.py +new file mode 100644 +index 00000000..cce5d668 +--- /dev/null ++++ b/ci/tests/tests/distro/test_osinfo_rocky_8.py +@@ -0,0 +1,23 @@ ++import pytest ++ ++ ++@pytest.mark.usefixtures("get_os_release") ++class TestOSRelease: ++ """Test values of NAME, ID and VERSION_ID""" ++ ++ def test_os_rel_name(self, get_os_release): ++ assert get_os_release.contains('NAME="Rocky Linux"') ++ ++ def test_os_rel_id(self, get_os_release): ++ assert get_os_release.contains('ID="rocky"') ++ ++ def test_os_rel_version_id(self, get_os_release): ++ assert get_os_release.contains('VERSION_ID="8.*"') ++ ++ ++@pytest.mark.usefixtures("get_redhat_release") ++class TestRHRelease: ++ """Test contents of the /etc/redhat-release""" ++ ++ def test_redhat_release(self, get_redhat_release): ++ assert get_redhat_release.contains("Rocky Linux release 8.*") +diff --git a/ci/tests/tests/distro/test_osinfo_rocky_9.py b/ci/tests/tests/distro/test_osinfo_rocky_9.py +new file mode 100644 +index 00000000..ce8cccdb +--- /dev/null ++++ b/ci/tests/tests/distro/test_osinfo_rocky_9.py +@@ -0,0 +1,23 @@ ++import pytest ++ ++ ++@pytest.mark.usefixtures("get_os_release") ++class TestOSRelease: ++ """Test values of NAME, ID and VERSION_ID""" ++ ++ def test_os_rel_name(self, get_os_release): ++ assert get_os_release.contains('NAME="Rocky Linux"') ++ ++ def test_os_rel_id(self, get_os_release): ++ assert get_os_release.contains('ID="rocky"') ++ ++ def test_os_rel_version_id(self, get_os_release): ++ assert get_os_release.contains('VERSION_ID="9.*"') ++ ++ ++@pytest.mark.usefixtures("get_redhat_release") ++class TestRHRelease: ++ """Test contents of the /etc/redhat-release""" ++ ++ def test_redhat_release(self, get_redhat_release): ++ assert get_redhat_release.contains("Rocky Linux release 9.*") +diff --git a/ci/tests/tests/docker/test_docker_ce.py b/ci/tests/tests/docker/test_docker_ce.py +new file mode 100644 +index 00000000..3c2550c7 +--- /dev/null ++++ b/ci/tests/tests/docker/test_docker_ce.py +@@ -0,0 +1,26 @@ ++import pytest ++ ++ ++class TestDockerServices: ++ """Test docker and containerd services running and enabled""" ++ ++ def test_docker_is_running(self, host): ++ assert host.service("docker.service").is_running ++ ++ def test_containerd_is_running(self, host): ++ assert host.service("containerd.service").is_running ++ ++ def test_docker_is_enabled(self, host): ++ assert host.service("docker.service").is_enabled ++ ++ def test_containerd_is_enabled(self, host): ++ assert host.service("containerd.service").is_enabled ++ ++ ++class TestDockerWorking: ++ """Test docker working with the hello world container""" ++ ++ def test_docker_is_working(self, host): ++ with host.sudo(): ++ cmd = host.run("sudo docker run --rm hello-world") ++ assert cmd.succeeded +diff --git a/ci/vagrant/el7toel8_multi.rb b/ci/vagrant/el7toel8_multi.rb +new file mode 100644 +index 00000000..67a9f3b9 +--- /dev/null ++++ b/ci/vagrant/el7toel8_multi.rb +@@ -0,0 +1,39 @@ ++# -*- mode: ruby -*- ++# vi: set ft=ruby : ++ ++configuration = ENV['CONFIG'] ++ ++Vagrant.configure('2') do |config| ++ config.vagrant.plugins = 'vagrant-libvirt' ++ ++ config.vm.synced_folder '.', '/vagrant', disabled: true ++ config.vm.box = 'generic/centos7' ++ ++ config.vm.provider 'libvirt' do |v| ++ v.uri = 'qemu:///system' ++ v.memory = 4096 ++ v.machine_type = 'q35' ++ v.cpu_mode = 'host-passthrough' ++ v.cpus = 2 ++ v.disk_bus = 'scsi' ++ v.disk_driver cache: 'writeback', discard: 'unmap' ++ v.random_hostname = true ++ end ++ ++ target_distros = ['almalinux', 'centosstream', 'eurolinux', 'oraclelinux', 'rocky'] ++ ++ target_distros.each do |target_distro| ++ config.vm.define "#{target_distro}_8" do |machine| ++ machine.vm.hostname = "#{target_distro}-8.test" ++ ++ if target_distro == target_distros[-1] ++ machine.vm.provision 'ansible' do |ansible| ++ ansible.compatibility_mode = '2.0' ++ ansible.limit = 'all' ++ ansible.playbook = "ci/ansible/#{configuration}.yaml" ++ ansible.config_file = 'ci/ansible/ansible.cfg' ++ end ++ end ++ end ++ end ++end +diff --git a/ci/vagrant/el7toel8toel9_single.rb b/ci/vagrant/el7toel8toel9_single.rb +new file mode 100644 +index 00000000..e19bd079 +--- /dev/null ++++ b/ci/vagrant/el7toel8toel9_single.rb +@@ -0,0 +1,53 @@ ++# -*- mode: ruby -*- ++# vi: set ft=ruby : ++ ++configuration = ENV['CONFIG'] ++ ++Vagrant.configure('2') do |config| ++ config.vagrant.plugins = 'vagrant-libvirt' ++ ++ config.vm.synced_folder '.', '/vagrant', disabled: true ++ config.ssh.disable_deprecated_algorithms = true ++ ++ config.vm.provider 'libvirt' do |v| ++ v.uri = 'qemu:///system' ++ v.memory = 4096 ++ v.machine_type = 'q35' ++ v.cpu_mode = 'host-passthrough' ++ v.cpus = 2 ++ v.disk_bus = 'scsi' ++ v.disk_driver cache: 'writeback', discard: 'unmap' ++ v.random_hostname = true ++ end ++ ++ # EL7toEL8 ++ target_distros = ['almalinux', 'centosstream', 'eurolinux', 'oraclelinux', 'rocky'] ++ ++ target_distros.each do |target_distro| ++ config.vm.define "#{target_distro}_8" do |machine| ++ machine.vm.box = 'generic/centos7' ++ machine.vm.hostname = "#{target_distro}-8.test" ++ end ++ end ++ ++ # EL8toEL9 ++ target_distros_el9 = { ++ almalinux: 'almalinux/8', ++ # centosstream: 'generic/centos8s', ++ eurolinux: 'eurolinux-vagrant/eurolinux-8', ++ rocky: 'generic/rocky8' ++ } ++ ++ target_distros_el9.each_pair do |vm, box| ++ config.vm.define "#{vm}_9" do |machine| ++ machine.vm.box = "#{box}" ++ machine.vm.hostname = "#{vm}-9.test" ++ end ++ end ++ ++ config.vm.provision 'ansible' do |ansible| ++ ansible.compatibility_mode = '2.0' ++ ansible.playbook = "ci/ansible/#{configuration}.yaml" ++ ansible.config_file = 'ci/ansible/ansible.cfg' ++ end ++end +diff --git a/ci/vagrant/el8toel9_multi.rb b/ci/vagrant/el8toel9_multi.rb +new file mode 100644 +index 00000000..4fcb18c2 +--- /dev/null ++++ b/ci/vagrant/el8toel9_multi.rb +@@ -0,0 +1,45 @@ ++# -*- mode: ruby -*- ++# vi: set ft=ruby : ++ ++configuration = ENV['CONFIG'] ++ ++Vagrant.configure('2') do |config| ++ config.vagrant.plugins = 'vagrant-libvirt' ++ ++ config.vm.synced_folder '.', '/vagrant', disabled: true ++ config.ssh.disable_deprecated_algorithms = true ++ ++ config.vm.provider 'libvirt' do |v| ++ v.uri = 'qemu:///system' ++ v.memory = 4096 ++ v.machine_type = 'q35' ++ v.cpu_mode = 'host-passthrough' ++ v.cpus = 2 ++ v.disk_bus = 'scsi' ++ v.disk_driver cache: 'writeback', discard: 'unmap' ++ v.random_hostname = true ++ end ++ ++ target_distros = { ++ almalinux: 'almalinux/8', ++ # centosstream: 'generic/centos8s', ++ # eurolinux: 'eurolinux-vagrant/eurolinux-8', ++ rocky: 'generic/rocky8' ++ } ++ ++ target_distros.each_pair do |vm, box| ++ config.vm.define "#{vm}_9" do |machine| ++ machine.vm.box = "#{box}" ++ machine.vm.hostname = "#{vm}-9.test" ++ ++ if [vm, box] == target_distros.to_a.last ++ machine.vm.provision 'ansible' do |ansible| ++ ansible.compatibility_mode = '2.0' ++ ansible.limit = 'all' ++ ansible.playbook = "ci/ansible/#{configuration}.yaml" ++ ansible.config_file = 'ci/ansible/ansible.cfg' ++ end ++ end ++ end ++ end ++end +-- +2.43.0 + + +From 051385eb12ef5e00453b104de736c7fafd5ed184 Mon Sep 17 00:00:00 2001 +From: eabdullin +Date: Mon, 25 Dec 2023 15:00:32 +0300 +Subject: [PATCH 21/36] Add ability to set custom branch and username + +(cherry picked from commit a74d5f7ac1bd57ac22f281bb5bc6d553b9b92278) +--- + ci/scripts/install_elevate_dev.sh | 40 ++++++++++++++++++++++++++++--- + 1 file changed, 37 insertions(+), 3 deletions(-) + +diff --git a/ci/scripts/install_elevate_dev.sh b/ci/scripts/install_elevate_dev.sh +index 4de5c3f4..4f2b4c06 100644 +--- a/ci/scripts/install_elevate_dev.sh ++++ b/ci/scripts/install_elevate_dev.sh +@@ -1,9 +1,43 @@ + #!/usr/bin/env bash + ++USER='AlmaLinux' ++BRANCH='almalinux' ++ ++show_usage() { ++ echo 'Usage: sync_cloudlinux [OPTION]...' ++ echo '' ++ echo ' -h, --help show this message and exit' ++ echo ' -u, --user github user name (default: AlmaLinux)' ++ echo ' -b, --branch github branch name (default: almalinux)' ++} ++ ++while [[ $# -gt 0 ]]; do ++ opt="$1" ++ case ${opt} in ++ -h|--help) ++ show_usage ++ exit 0 ++ ;; ++ -u|--user) ++ USER="$2" ++ shift ++ shift ++ ;; ++ -b|--branch) ++ BRANCH="$2" ++ shift ++ shift ++ ;; ++ *) ++ echo -e "Error: unknown option ${opt}" >&2 ++ exit 2 ++ ;; ++ esac ++done + + RHEL_MAJOR_VERSION=$(rpm --eval %rhel) + WORK_DIR="$HOME" +-NEW_LEAPP_NAME='leapp-repository-almalinux' ++NEW_LEAPP_NAME="leapp-repository-$BRANCH" + NEW_LEAPP_DIR="$WORK_DIR/$NEW_LEAPP_NAME/" + LEAPP_PATH='/usr/share/leapp-repository/repositories/' + EXCLUDE_PATH=' +@@ -39,8 +73,8 @@ do + fi + done + +-echo 'Download new tarball' +-curl -s -L https://github.com/AlmaLinux/leapp-repository/archive/almalinux/leapp-repository-almalinux.tar.gz | tar -xz -C $WORK_DIR/ ++echo "Download new tarball from https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz" ++curl -s -L https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz | tar -xz -C $WORK_DIR/ + + echo 'Deleting files as in spec file' + rm -rf $NEW_LEAPP_DIR/repos/common/actors/testactor +-- +2.43.0 + + +From d6f17ecc63e3d364cc4b349c789b62e46f4cb9d1 Mon Sep 17 00:00:00 2001 +From: Elkhan Mammadli +Date: Mon, 25 Dec 2023 21:37:12 +0400 +Subject: [PATCH 22/36] Add job parameters for custom git user and branch + +Add those parameters as GIT_USER and GIT_BRANCH +for testing the leapp changes on different +git forks/remotes. + +Signed-off-by: Elkhan Mammadli +(cherry picked from commit 821176fb308d3c45566f3e66ed8ff7f5aa1ae09e) +--- + ci/jenkins/ELevate_el7toel8_Development.jenkinsfile | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile +index d83e1788..f60a74df 100644 +--- a/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile ++++ b/ci/jenkins/ELevate_el7toel8_Development.jenkinsfile +@@ -12,6 +12,8 @@ pipeline { + parameters { + choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') + choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ string(name: 'LEAPP_SRC_GIT_USER', defaultValue: 'AlmaLinux', description: 'Input name of Git user of LEAPP source', trim: true) ++ string(name: 'LEAPP_SRC_GIT_BRANCH', defaultValue: 'almalinux', description: 'Input name of Git branch of LEAPP source', trim: true) + string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) + string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) + } +@@ -91,7 +93,7 @@ pipeline { + label: 'Install testing version of ELevate' + sh script: "vagrant upload ci/scripts/install_elevate_dev.sh install_elevate_dev.sh $targetDistro.vmName", + label: 'Upload installer script to VMs' +- sh script: "vagrant ssh $targetDistro.vmName -c \"sudo bash install_elevate_dev.sh\"", ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo bash install_elevate_dev.sh -u ${LEAPP_SRC_GIT_USER} -b ${LEAPP_SRC_GIT_BRANCH}\"", + label: 'Install development version of ELevate', + returnStatus: true + sh script: "vagrant upload leapp-data/ leapp-data/ --compress $targetDistro.vmName", +-- +2.43.0 + + +From 698f69c8d7b909e221ebc9f600a99d02de2d1aa1 Mon Sep 17 00:00:00 2001 +From: eabdullin +Date: Tue, 26 Dec 2023 13:58:25 +0300 +Subject: [PATCH 23/36] Add Dev internal job + +(cherry picked from commit bc36aa2e8d721c618db58eb334cd9fb902c31930) +--- + .../ELevate_el7toel8_Internal_Dev.jenkinsfile | 262 ++++++++++++++++++ + .../ELevate_el8toel9_Internal_Dev.jenkinsfile | 210 ++++++++++++++ + 2 files changed, 472 insertions(+) + create mode 100644 ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile + create mode 100644 ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile + +diff --git a/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile +new file mode 100644 +index 00000000..168ace8a +--- /dev/null ++++ b/ci/jenkins/ELevate_el7toel8_Internal_Dev.jenkinsfile +@@ -0,0 +1,262 @@ ++RETRY = params.RETRY ++TIMEOUT = params.TIMEOUT ++ ++pipeline { ++ agent { ++ label 'x86_64 && bm' ++ } ++ options { ++ timestamps() ++ parallelsAlwaysFailFast() ++ } ++ parameters { ++ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') ++ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ string(name: 'LEAPP_SRC_GIT_USER', defaultValue: 'AlmaLinux', description: 'Input name of Git user of LEAPP source', trim: true) ++ string(name: 'LEAPP_SRC_GIT_BRANCH', defaultValue: 'almalinux', description: 'Input name of Git branch of LEAPP source', trim: true) ++ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) ++ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) ++ } ++ environment { ++ VAGRANT_NO_COLOR = '1' ++ } ++ stages { ++ stage('Prepare') { ++ steps { ++ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', ++ label: 'Install Ansible collections' ++ sh script: 'python3.11 -m venv .venv', ++ label: 'Create Python virtual environment' ++ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', ++ label: 'Install Testinfra' ++ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel', ++ label: 'Fetch devel version of leapp data' ++ } ++ } ++ stage('CreateSingleMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER != 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) ++ ++ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: "vagrant up $targetDistro.vmName", ++ label: 'Create source VM' ++ } ++ } ++ } ++ stage('CreateMultiMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: 'vagrant up', ++ label: 'Create source VM' ++ } ++ } ++ stage('ELevationAndTest') { ++ matrix { ++ when { ++ anyOf { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } ++ } ++ } ++ axes { ++ axis { ++ name 'TARGET_DISTRO' ++ values 'almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8' ++ } ++ } ++ stages { ++ stage('ELevate') { ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum-config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"", ++ label: 'Add testing repo of ELevate' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-centos7-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"", ++ label: 'Add pulp repository' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i 's|enabled=1|enabled=1\\npriority=80|' /etc/yum.repos.d/internal-leapp.repo\"", ++ label: 'Set priority for pulp repository' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"", ++ label: 'Install testing version of ELevate' ++ sh script: "vagrant upload ci/scripts/install_elevate_dev.sh install_elevate_dev.sh $targetDistro.vmName", ++ label: 'Upload installer script to VMs' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo bash install_elevate_dev.sh -u ${LEAPP_SRC_GIT_USER} -b ${LEAPP_SRC_GIT_BRANCH}\"", ++ label: 'Install development version of ELevate', ++ returnStatus: true ++ sh script: "vagrant upload leapp-data/ leapp-data/ --compress $targetDistro.vmName", ++ label: 'Upload devel branch of leapp data' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"", ++ label: 'Create directory structrue of leapp data' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files leapp-data/files/${targetDistro.leappData}/*\"", ++ label: 'Install devel version of leapp data' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo install -t /etc/leapp/files/vendors.d leapp-data/vendors.d/*\"", ++ label: 'Install devel version of leapp vendor data' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el8 /etc/leapp/files/leapp_upgrade_repositories.repo\"", ++ label: 'Configure leapp upgrade repositories for EL7toEL8' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo mv -f /etc/leapp/files/repomap.json.el8 /etc/leapp/files/repomap.json\"", ++ label: 'Configure leapp repository mapping for EL7toEL8' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum -y install tree && sudo tree -ha /etc/leapp\"", ++ label: 'Check if development version of leapp data installed correctly' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", ++ label: 'Start pre-upgrade check', ++ returnStatus: true ++ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"", ++ label: 'Permit ssh as root login' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"", ++ label: 'Answer the leapp question' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", ++ label: 'Start the Upgrade' ++ sh script: "vagrant reload $targetDistro.vmName", ++ label: 'Reboot to the ELevate initramfs' ++ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", ++ label: 'Generate the ssh-config file' ++ } ++ } ++ } ++ } ++ } ++ stage('Distro Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'minimal' } ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: 'rm -f conftest.py pytest.ini', ++ label: 'Delete root conftest.py file' ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py ++ """, ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ } ++ stage('Docker Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/docker/test_docker_ce.py ++ """, ++ label: 'Run the docker specific tests' ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ post { ++ success { ++ junit testResults: 'ci/tests/tests/**/**_junit.xml', ++ skipPublishingChecks: true ++ } ++ cleanup { ++ sh script: 'vagrant destroy -f --no-parallel -g', ++ label: 'Destroy VMs' ++ cleanWs() ++ } ++ } ++} ++ ++def targetDistroSpec(distro) { ++ def spec = [:] ++ ++ switch (distro) { ++ case 'almalinux-8': ++ vm = 'almalinux_8' ++ ldata = 'almalinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'centos-stream-8': ++ vm = 'centosstream_8' ++ ldata = 'centos' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'eurolinux-8': ++ vm = 'eurolinux_8' ++ ldata = 'eurolinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'oraclelinux-8': ++ vm = 'oraclelinux_8' ++ ldata = 'oraclelinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'rocky-8': ++ vm = 'rocky_8' ++ ldata = 'rocky' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ default: ++ spec = [ ++ vmName: 'unknown', ++ leappData: 'unknown' ++ ] ++ break ++ } ++ return spec ++} +diff --git a/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile +new file mode 100644 +index 00000000..2647cc06 +--- /dev/null ++++ b/ci/jenkins/ELevate_el8toel9_Internal_Dev.jenkinsfile +@@ -0,0 +1,210 @@ ++RETRY = params.RETRY ++TIMEOUT = params.TIMEOUT ++ ++pipeline { ++ agent { ++ label params.AGENT ++ } ++ options { ++ timestamps() ++ } ++ parameters { ++ string(name: 'AGENT', defaultValue: 'almalinux-8-vagrant-libvirt-x86_64', description: 'Input label of the Jenkins Agent', trim: true) ++ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) ++ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) ++ string(name: 'REPO_URL', defaultValue: 'https://github.com/LKHN/el-test-auto-dev.git', description: 'URL of the pipeline repository', trim: true) ++ string(name: 'REPO_BRANCH', defaultValue: 'main', description: 'Branch of the pipeline repository', trim: true) ++ choice(name: 'SOURCE_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a source distro or all for ELevation') ++ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'eurolinux-9', 'oraclelinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all to ELevation') ++ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ } ++ stages { ++ stage('Source') { ++ steps { ++ git url: REPO_URL, ++ branch: REPO_BRANCH, ++ credentialsId: 'github-almalinuxautobot' ++ } ++ } ++ stage('Prepare Build and Test enviroment') { ++ steps { ++ sh script: 'cp Vagrantfile.el8toel9 Vagrantfile', ++ label: 'Generate the el8toel9 Vagrantfile' ++ sh script: 'sudo dnf -y install python39-devel python39-wheel', ++ label: 'Install Python 3.9, PIP and Wheel' ++ sh script: 'sudo python3 -m pip install --no-cache-dir --upgrade -r requirements.txt', ++ label: 'Install TestInfra' ++ sh script: 'git clone https://github.com/AlmaLinux/leapp-data.git --branch devel', ++ label: 'Clone the leapp-data git repository' ++ } ++ } ++ stage('ELevation') { ++ matrix { ++ when { ++ allOf { ++ anyOf { ++ expression { params.SOURCE_DISTRO_FILTER == 'all' } ++ expression { params.SOURCE_DISTRO_FILTER == env.SOURCE_DISTRO } ++ } ++ anyOf { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } ++ } ++ } ++ } ++ axes { ++ axis { ++ name 'SOURCE_DISTRO' ++ values 'almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8' ++ } ++ axis { ++ name 'TARGET_DISTRO' ++ values 'almalinux-9', 'centos-stream-9', 'eurolinux-9', 'oraclelinux-9', 'rocky-9' ++ } ++ } ++ stages { ++ stage('Create and Configure Machines') { ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'vagrant destroy -f $SOURCE_DISTRO', ++ label: 'Make sure no machine present from the last retry' ++ sh script: 'vagrant up $SOURCE_DISTRO', ++ label: 'Create the source machines' ++ } ++ } ++ } ++ } ++ stage('ELevate to the all target distros') { ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf config-manager --add-repo https://repo.almalinux.org/elevate/testing/elevate-testing.repo\"', ++ label: 'Add the ELevate Testing RPM repository' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y wget\"", ++ label: 'Install wget' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-almalinux-8-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"", ++ label: 'Add pulp repository' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i 's|enabled=1|enabled=1\\npriority=80|' /etc/yum.repos.d/internal-leapp.repo\"", ++ label: 'Set priority for pulp repository' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf install -y leapp-upgrade\"', ++ label: 'Install the leap rpm package' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo bash /vagrant/scripts/install_elevate_dev.sh\"', ++ label: 'Install Development version of ELevate', ++ returnStatus: true ++ script { ++ def LEAPP_DATA = getLeappDataDistro(TARGET_DISTRO) ++ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mkdir -p /etc/leapp/files/vendors.d\"", ++ label:'Create the LEAPP directory') ++ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files /vagrant/leapp-data/files/${LEAPP_DATA}/*\"", ++ label:"Install the LEAPP DATA") ++ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo install -t /etc/leapp/files/vendors.d /vagrant/leapp-data/vendors.d/*\"', ++ label:"Install the Vendor DATA") ++ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/leapp_upgrade_repositories.repo.el9 /etc/leapp/files/leapp_upgrade_repositories.repo\"", ++ label:'Set LEAPP Repos for EL8') ++ sh(script:"vagrant ssh $SOURCE_DISTRO -c \"sudo mv -f /etc/leapp/files/repomap.json.el9 /etc/leapp/files/repomap.json\"", ++ label:'Set LEAPP Repo map for EL8') ++ sh(script:'vagrant ssh $SOURCE_DISTRO -c \"sudo dnf -y install tree && sudo tree -ha /etc/leapp\"', ++ label:"Debug: Data paths") ++ } ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp preupgrade\"', ++ label: 'Start the Pre-Upgrade check', ++ returnStatus: true ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"', ++ label: 'Permit ssh as root login' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"', ++ label: 'Answer the LEAP question' ++ sh script: 'vagrant ssh $SOURCE_DISTRO -c \"sudo leapp upgrade\"', ++ label: 'Start the Upgrade' ++ sh script: 'vagrant reload $SOURCE_DISTRO', ++ label: 'Reboot to the ELevate initramfs' ++ sh script: 'vagrant ssh-config $SOURCE_DISTRO >> .vagrant/ssh-config', ++ label: 'Generate the ssh-config file' ++ } ++ } ++ } ++ } ++ stage('Distro Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'minimal'} ++ expression { params.CONF_FILTER == 'docker-ce'} ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/distro/test_osinfo_$SOURCE_DISTRO-junit.xml tests/distro/test_osinfo_$SOURCE_DISTRO.py', ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ stage('Docker Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'docker-ce'} ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ sh script: 'py.test -v --hosts=$SOURCE_DISTRO --ssh-config=.vagrant/ssh-config --junit-xml tests/docker/test_docker_ce_$SOURCE_DISTRO-junit.xml tests/docker/test_docker_ce.py', ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ post { ++ success { ++ junit testResults: '**/tests/**/**-junit.xml', ++ skipPublishingChecks: true ++ } ++ cleanup { ++ sh script: 'vagrant destroy -f', ++ label: 'Destroy All Machines' ++ cleanWs() ++ } ++ } ++} ++ ++/* ++* Common Functions ++*/ ++def getLeappDataDistro(TARGET_DISTRO) { ++ def leapp_data = "" ++ ++ switch(TARGET_DISTRO) { ++ case "almalinux-9": ++ leapp_data = TARGET_DISTRO.substring(0, 9) ++ break ++ ++ case "centos-stream-9": ++ leapp_data = TARGET_DISTRO.substring(0, 6) ++ break ++ ++ case "eurolinux-9": ++ leapp_data = TARGET_DISTRO.substring(0, 9) ++ break ++ ++ case "oraclelinux-9": ++ leapp_data = TARGET_DISTRO.substring(0, 11) ++ break ++ ++ case "rocky-9": ++ leapp_data = TARGET_DISTRO.substring(0, 5) ++ break ++ ++ default: ++ leap_data = "Error: Target Distro Not Supported" ++ break ++ } ++ return leapp_data ++} +-- +2.43.0 + + +From e2787314c8e3248903b81dadb29a0d671699e78e Mon Sep 17 00:00:00 2001 +From: eabdullin +Date: Tue, 12 Dec 2023 18:36:22 +0300 +Subject: [PATCH 24/36] Add priority for internal repo + +(cherry picked from commit d38464d040b9821958ca9b49929e292cade63cc5) +--- + ci/vagrant/el7toel8toel9_single.rb | 1 + + ci/vagrant/el8toel9_multi.rb | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/ci/vagrant/el7toel8toel9_single.rb b/ci/vagrant/el7toel8toel9_single.rb +index e19bd079..0a34edbd 100644 +--- a/ci/vagrant/el7toel8toel9_single.rb ++++ b/ci/vagrant/el7toel8toel9_single.rb +@@ -8,6 +8,7 @@ Vagrant.configure('2') do |config| + + config.vm.synced_folder '.', '/vagrant', disabled: true + config.ssh.disable_deprecated_algorithms = true ++ config.vm.boot_timeout = 3600 + + config.vm.provider 'libvirt' do |v| + v.uri = 'qemu:///system' +diff --git a/ci/vagrant/el8toel9_multi.rb b/ci/vagrant/el8toel9_multi.rb +index 4fcb18c2..e7fcde75 100644 +--- a/ci/vagrant/el8toel9_multi.rb ++++ b/ci/vagrant/el8toel9_multi.rb +@@ -23,7 +23,7 @@ Vagrant.configure('2') do |config| + target_distros = { + almalinux: 'almalinux/8', + # centosstream: 'generic/centos8s', +- # eurolinux: 'eurolinux-vagrant/eurolinux-8', ++ eurolinux: 'eurolinux-vagrant/eurolinux-8', + rocky: 'generic/rocky8' + } + +-- +2.43.0 + + +From c7cfb12e0a6d4f2030749178ba38291df9b80077 Mon Sep 17 00:00:00 2001 +From: eabdullin +Date: Tue, 12 Dec 2023 15:41:48 +0300 +Subject: [PATCH 25/36] Add boot_timeout var + +(cherry picked from commit bfafc09b182332c9b2b9a754e221821447be37a6) +--- + ci/vagrant/el7toel8_multi.rb | 1 + + ci/vagrant/el8toel9_multi.rb | 1 + + 2 files changed, 2 insertions(+) + +diff --git a/ci/vagrant/el7toel8_multi.rb b/ci/vagrant/el7toel8_multi.rb +index 67a9f3b9..74116f78 100644 +--- a/ci/vagrant/el7toel8_multi.rb ++++ b/ci/vagrant/el7toel8_multi.rb +@@ -8,6 +8,7 @@ Vagrant.configure('2') do |config| + + config.vm.synced_folder '.', '/vagrant', disabled: true + config.vm.box = 'generic/centos7' ++ config.vm.boot_timeout = 3600 + + config.vm.provider 'libvirt' do |v| + v.uri = 'qemu:///system' +diff --git a/ci/vagrant/el8toel9_multi.rb b/ci/vagrant/el8toel9_multi.rb +index e7fcde75..0e2ba8ab 100644 +--- a/ci/vagrant/el8toel9_multi.rb ++++ b/ci/vagrant/el8toel9_multi.rb +@@ -8,6 +8,7 @@ Vagrant.configure('2') do |config| + + config.vm.synced_folder '.', '/vagrant', disabled: true + config.ssh.disable_deprecated_algorithms = true ++ config.vm.boot_timeout = 3600 + + config.vm.provider 'libvirt' do |v| + v.uri = 'qemu:///system' +-- +2.43.0 + + +From d3adefaa9a68e64573fc7a58a34e481e4a7faa4b Mon Sep 17 00:00:00 2001 +From: eabdullin +Date: Mon, 11 Dec 2023 16:54:42 +0300 +Subject: [PATCH 26/36] Add internal repos + +(cherry picked from commit e4e6370e0e80d39de1f9e24df7934e4fad6a842d) +--- + .../ELevate_el7toel8_Internal.jenkinsfile | 239 ++++++++++++++++++ + .../ELevate_el8toel9_Internal.jenkinsfile | 223 ++++++++++++++++ + 2 files changed, 462 insertions(+) + create mode 100644 ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile + create mode 100644 ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile + +diff --git a/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile b/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile +new file mode 100644 +index 00000000..0f5ab44d +--- /dev/null ++++ b/ci/jenkins/ELevate_el7toel8_Internal.jenkinsfile +@@ -0,0 +1,239 @@ ++RETRY = params.RETRY ++TIMEOUT = params.TIMEOUT ++ ++pipeline { ++ agent { ++ label 'x86_64 && bm' ++ } ++ options { ++ timestamps() ++ parallelsAlwaysFailFast() ++ } ++ parameters { ++ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8', 'all'], description: 'Select a target distro or all for ELevation') ++ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) ++ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) ++ } ++ environment { ++ VAGRANT_NO_COLOR = '1' ++ } ++ stages { ++ stage('Prepare') { ++ steps { ++ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', ++ label: 'Install Ansible collections' ++ sh script: 'python3.11 -m venv .venv', ++ label: 'Create Python virtual environment' ++ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', ++ label: 'Install Testinfra' ++ } ++ } ++ stage('CreateSingleMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER != 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) ++ ++ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: "vagrant up $targetDistro.vmName", ++ label: 'Create source VM' ++ } ++ } ++ } ++ stage('CreateMultiMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ sh script: 'cp ci/vagrant/el7toel8_multi.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: 'vagrant up', ++ label: 'Create source VM' ++ } ++ } ++ stage('ELevationAndTest') { ++ matrix { ++ when { ++ anyOf { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } ++ } ++ } ++ axes { ++ axis { ++ name 'TARGET_DISTRO' ++ values 'almalinux-8', 'centos-stream-8', 'eurolinux-8', 'oraclelinux-8', 'rocky-8' ++ } ++ } ++ stages { ++ stage('ELevate') { ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y https://repo.almalinux.org/elevate/elevate-release-latest-el7.noarch.rpm\"", ++ label: 'Install the elevate-release-latest rpm packages for EL7' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-almalinux-8-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"", ++ label: 'Add pulp repository' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y leapp-upgrade\"", ++ label: 'Install the leap rpm package' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo yum install -y $targetDistro.leappData\"", ++ label: 'Install the LEAP migration data rpm packages' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", ++ label: 'Start the Pre-Upgrade check', ++ returnStatus: true ++ sh script: "vagrant ssh $targetDistro.vmName -c \"echo PermitRootLogin yes | sudo tee -a /etc/ssh/sshd_config\"", ++ label: 'Permit ssh as root login' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True\"", ++ label: 'Answer the LEAP question' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", ++ label: 'Start the Upgrade' ++ sh script: "vagrant reload $targetDistro.vmName", ++ label: 'Reboot to the ELevate initramfs' ++ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", ++ label: 'Generate the ssh-config file' ++ } ++ } ++ } ++ } ++ } ++ stage('Distro Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'minimal' } ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: 'rm -f conftest.py pytest.ini', ++ label: 'Delete root conftest.py file' ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py ++ """, ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ } ++ stage('Docker Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/docker/test_docker_ce.py ++ """, ++ label: 'Run the docker specific tests' ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ post { ++ success { ++ junit testResults: 'ci/tests/tests/**/**_junit.xml', ++ skipPublishingChecks: true ++ } ++ cleanup { ++ sh script: 'vagrant destroy -f --no-parallel -g', ++ label: 'Destroy VMs' ++ cleanWs() ++ } ++ } ++} ++ ++def targetDistroSpec(distro) { ++ def spec = [:] ++ ++ switch (distro) { ++ case 'almalinux-8': ++ vm = 'almalinux_8' ++ ldata = 'leapp-data-almalinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'centos-stream-8': ++ vm = 'centosstream_8' ++ ldata = 'leapp-data-centos' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'eurolinux-8': ++ vm = 'eurolinux_8' ++ ldata = 'leapp-data-eurolinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'oraclelinux-8': ++ vm = 'oraclelinux_8' ++ ldata = 'leapp-data-oraclelinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'rocky-8': ++ vm = 'rocky_8' ++ ldata = 'leapp-data-rocky' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ default: ++ spec = [ ++ vmName: 'unknown', ++ leappData: 'unknown' ++ ] ++ break ++ } ++ return spec ++} +diff --git a/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile b/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile +new file mode 100644 +index 00000000..33daa5dd +--- /dev/null ++++ b/ci/jenkins/ELevate_el8toel9_Internal.jenkinsfile +@@ -0,0 +1,223 @@ ++RETRY = params.RETRY ++TIMEOUT = params.TIMEOUT ++ ++pipeline { ++ agent { ++ label 'x86_64 && bm' ++ } ++ options { ++ timestamps() ++ parallelsAlwaysFailFast() ++ } ++ parameters { ++ // choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'centos-stream-9', 'eurolinux-9', 'rocky-9', 'all'], description: 'Select a target distro or all for ELevation') ++ choice(name: 'TARGET_DISTRO_FILTER', choices: ['almalinux-9', 'rocky-9', 'eurolinux-9', 'all'], description: 'Select a target distro or all for ELevation') ++ choice(name: 'CONF_FILTER', choices: ['minimal', 'docker-ce'], description: 'Select a configuration') ++ string(name: 'RETRY', defaultValue: '3', description: 'Input count of retry', trim: true) ++ string(name: 'TIMEOUT', defaultValue: '60', description: 'Input timeout value in minutes', trim: true) ++ } ++ environment { ++ VAGRANT_NO_COLOR = '1' ++ } ++ stages { ++ stage('Prepare') { ++ steps { ++ sh script: 'ansible-galaxy install -r ci/ansible/requirements.yaml', ++ label: 'Install Ansible collections' ++ sh script: 'python3.11 -m venv .venv', ++ label: 'Create Python virtual environment' ++ sh script: '. .venv/bin/activate && pip install --no-color pip pytest-testinfra paramiko', ++ label: 'Install Testinfra' ++ } ++ } ++ stage('CreateSingleMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER != 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO_FILTER) ++ ++ sh script: 'cp ci/vagrant/el7toel8toel9_single.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: "vagrant up $targetDistro.vmName", ++ label: 'Create source VM' ++ } ++ } ++ } ++ stage('CreateMultiMachine') { ++ when { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ } ++ environment { ++ CONFIG = "${CONF_FILTER}" ++ } ++ steps { ++ sh script: 'cp ci/vagrant/el8toel9_multi.rb Vagrantfile', ++ label: 'Generate Vagrantfile' ++ sh script: 'vagrant up', ++ label: 'Create source VM' ++ } ++ } ++ stage('ELevationAndTest') { ++ matrix { ++ when { ++ anyOf { ++ expression { params.TARGET_DISTRO_FILTER == 'all' } ++ expression { params.TARGET_DISTRO_FILTER == env.TARGET_DISTRO } ++ } ++ } ++ axes { ++ axis { ++ name 'TARGET_DISTRO' ++ // values 'almalinux-9', 'centos-stream-9', 'eurolinux-9', 'rocky-9' ++ values 'almalinux-9', 'rocky-9', 'eurolinux-9' ++ } ++ } ++ stages { ++ stage('ELevate') { ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y https://repo.almalinux.org/elevate/elevate-release-latest-el8.noarch.rpm\"", ++ label: 'Install the elevate-release-latest rpm packages for EL8' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"wget https://build.almalinux.org/pulp/content/copr/eabdullin1-leapp-data-internal-centos7-x86_64-dr/config.repo -O /etc/yum.repos.d/internal-leapp.repo\"", ++ label: 'Add pulp repository' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y leapp-upgrade\"", ++ label: 'Install the leap rpm package' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo dnf install -y $targetDistro.leappData\"", ++ label: 'Install the LEAP migration data rpm packages' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp preupgrade\"", ++ label: 'Start the Pre-Upgrade check', ++ returnStatus: true ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo sed -i \'s/^AllowZoneDrifting=.*/AllowZoneDrifting=no/\' /etc/firewalld/firewalld.conf\"", ++ label: 'TODO' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp answer --section check_vdo.no_vdo_devices=True\"", ++ label: 'TODO' ++ sh script: "vagrant ssh $targetDistro.vmName -c \"sudo leapp upgrade\"", ++ label: 'Start the Upgrade' ++ sh script: "vagrant reload $targetDistro.vmName", ++ label: 'Reboot to the ELevate initramfs' ++ sh script: "vagrant ssh-config $targetDistro.vmName >> .vagrant/ssh-config", ++ label: 'Generate the ssh-config file' ++ } ++ } ++ } ++ } ++ } ++ stage('Distro Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'minimal' } ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: 'rm -f conftest.py pytest.ini', ++ label: 'Delete root conftest.py file' ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/distro/test_osinfo_${targetDistro.vmName}.py ++ """, ++ label: 'Run the distro specific tests' ++ } ++ } ++ } ++ } ++ } ++ stage('Docker Tests') { ++ when { ++ anyOf { ++ expression { params.CONF_FILTER == 'docker-ce' } ++ } ++ } ++ steps { ++ retry(RETRY) { ++ timeout(time: TIMEOUT, unit: 'MINUTES') { ++ script { ++ def targetDistro = targetDistroSpec(TARGET_DISTRO) ++ ++ sh script: """ ++ . .venv/bin/activate \ ++ && py.test -v --hosts=${targetDistro.vmName} \ ++ --ssh-config=.vagrant/ssh-config \ ++ --junit-xml ci/tests/tests/docker/test_docker_ce_${targetDistro.vmName}_junit.xml \ ++ ci/tests/tests/docker/test_docker_ce.py ++ """, ++ label: 'Run the docker specific tests' ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ } ++ post { ++ success { ++ junit testResults: 'ci/tests/tests/**/**_junit.xml', ++ skipPublishingChecks: true ++ } ++ cleanup { ++ sh script: 'vagrant destroy -f --no-parallel -g', ++ label: 'Destroy VMs' ++ cleanWs() ++ } ++ } ++} ++ ++def targetDistroSpec(distro) { ++ def spec = [:] ++ ++ switch (distro) { ++ case 'almalinux-9': ++ vm = 'almalinux_9' ++ ldata = 'leapp-data-almalinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'eurolinux-9': ++ vm = 'eurolinux_9' ++ ldata = 'leapp-data-eurolinux' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ case 'rocky-9': ++ vm = 'rocky_9' ++ ldata = 'leapp-data-rocky' ++ ++ spec = [ ++ vmName: vm, ++ leappData: ldata ++ ] ++ break ++ default: ++ spec = [ ++ vmName: 'unknown', ++ leappData: 'unknown' ++ ] ++ break ++ } ++ return spec ++} +-- +2.43.0 + + +From 4c90150b15e71ae0ef4743485f181f6076e7edbe Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Tue, 9 Jan 2024 13:03:41 +0200 +Subject: [PATCH 27/36] Add AlmaLinux8 new sig. + +--- + .../system_upgrade/common/actors/redhatsignedrpmscanner/actor.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +index f4f8d2c5..8416fd39 100644 +--- a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py ++++ b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +@@ -29,6 +29,7 @@ class RedHatSignedRpmScanner(Actor): + '6c7cb6ef305d49d6', + '51d6647ec21ad6ea', # almalinux + 'd36cb86cb86b3716', ++ '2ae81e8aced7258b', + '15af5dac6d745a60', # rockylinux + '702d426d350d275d', + '72f97b74ec551f03', # ol +-- +2.43.0 + + +From f68c91aab845a537348530f0549dc7164fcdf695 Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Tue, 9 Jan 2024 13:04:54 +0200 +Subject: [PATCH 28/36] Copy system's certificates if RHSM only. + +--- + .../actors/targetuserspacecreator/libraries/userspacegen.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +index 9dfa0f14..3dc8e6db 100644 +--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py ++++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py +@@ -400,9 +400,9 @@ def _prep_repository_access(context, target_userspace): + target_yum_repos_d = os.path.join(target_etc, 'yum.repos.d') + backup_yum_repos_d = os.path.join(target_etc, 'yum.repos.d.backup') + +- _copy_certificates(context, target_userspace) + + if not rhsm.skip_rhsm(): ++ _copy_certificates(context, target_userspace) + run(['rm', '-rf', os.path.join(target_etc, 'rhsm')]) + context.copytree_from('/etc/rhsm', os.path.join(target_etc, 'rhsm')) + # NOTE: we cannot just remove the original target yum.repos.d dir +-- +2.43.0 + + +From 51d38a912f6e25dccc3f67bdab156e8a80634b95 Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Wed, 10 Jan 2024 10:24:01 +0200 +Subject: [PATCH 29/36] Remove obsolete GPG keys + +--- + .../libraries/removeobsoleterpmgpgkeys.py | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py index 1cc5d64f..11c61e36 100644 --- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py @@ -301,32 +4740,101 @@ index 1cc5d64f..11c61e36 100644 } -diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py -index 9dfa0f14..3dc8e6db 100644 ---- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py -+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py -@@ -400,9 +400,9 @@ def _prep_repository_access(context, target_userspace): - target_yum_repos_d = os.path.join(target_etc, 'yum.repos.d') - backup_yum_repos_d = os.path.join(target_etc, 'yum.repos.d.backup') +-- +2.43.0 + + +From 3e609f9f7519f154bd109948a16f41ad58dd6a70 Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Wed, 10 Jan 2024 19:26:16 +0200 +Subject: [PATCH 30/36] Fix creation of /etc/rhsm/facts path. + +--- + commands/upgrade/breadcrumbs.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/commands/upgrade/breadcrumbs.py b/commands/upgrade/breadcrumbs.py +index 16903ee0..46f116fb 100644 +--- a/commands/upgrade/breadcrumbs.py ++++ b/commands/upgrade/breadcrumbs.py +@@ -61,7 +61,7 @@ class _BreadCrumbs(object): + if not os.path.exists('/etc/rhsm'): + # If there's no /etc/rhsm folder just skip it + return +- os.path.mkdir('/etc/rhsm/facts') ++ os.mkdir('/etc/rhsm/facts') + try: + with open('/etc/rhsm/facts/leapp.facts', 'w') as f: + json.dump(_flattened({ +-- +2.43.0 + + +From 67e43451ca810f9673745ddf42ba86f87ab307b0 Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Thu, 18 Jan 2024 14:11:03 +0200 +Subject: [PATCH 31/36] Add EuroLinux to the list of distributions, where grub + config should be created in case if EFI. + +--- + .../common/actors/efibootorderfix/finalization/actor.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +index 832f51ab..f08b7b82 100644 +--- a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py ++++ b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +@@ -32,6 +32,7 @@ class EfiFinalizationFix(Actor): + 'Red Hat Enterprise Linux': 'redhat', + 'Rocky Linux': 'rocky', + 'Scientific Linux': 'redhat', ++ 'EuroLinux': 'eurolinux', + } -- _copy_certificates(context, target_userspace) - - if not rhsm.skip_rhsm(): -+ _copy_certificates(context, target_userspace) - run(['rm', '-rf', os.path.join(target_etc, 'rhsm')]) - context.copytree_from('/etc/rhsm', os.path.join(target_etc, 'rhsm')) - # NOTE: we cannot just remove the original target yum.repos.d dir + efi_shimname_dict = { +-- +2.43.0 + + +From 5d27743b78d41e72fabaadd98e25aeb2ea7d53a9 Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Mon, 22 Apr 2024 17:45:59 +0300 +Subject: [PATCH 32/36] Load all substitutions from etc (the way dfn CLI does) + +Convert dot-less CentOS Stream release to X.999. Add 'centos' '8.999' version to the list of supported for the in-place upgrade +--- + .../libraries/ipuworkflowconfig.py | 6 ++++- + .../common/libraries/config/version.py | 2 +- + .../system_upgrade/common/libraries/module.py | 23 +++++++++++-------- + 3 files changed, 20 insertions(+), 11 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py b/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py +index 73197e0b..7abb2b42 100644 +--- a/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py ++++ b/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py +@@ -51,8 +51,12 @@ def get_os_release(path): + try: + with open(path) as f: + data = dict(l.strip().split('=', 1) for l in f.readlines() if '=' in l) ++ release_id = data.get('ID', '').strip('"') ++ version_id = data.get('VERSION_ID', '').strip('"') ++ if release_id == 'centos' and '.' not in version_id: ++ os_version = "{}.999".format(version_id) + return OSRelease( +- release_id=data.get('ID', '').strip('"'), ++ release_id=release_id, + name=data.get('NAME', '').strip('"'), + pretty_name=data.get('PRETTY_NAME', '').strip('"'), + version=data.get('VERSION', '').strip('"'), diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py -index 0f1e5874..5eb80c9c 100644 +index 95f0d231..5eb80c9c 100644 --- a/repos/system_upgrade/common/libraries/config/version.py +++ b/repos/system_upgrade/common/libraries/config/version.py -@@ -15,8 +15,8 @@ OP_MAP = { - +@@ -16,7 +16,7 @@ OP_MAP = { _SUPPORTED_VERSIONS = { # Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x -- '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9']}, -- '8': {'rhel': ['8.6', '8.8', '8.9'], 'rhel-saphana': ['8.6', '8.8']}, -+ '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9'], 'eurolinux': ['7.9'], 'ol': ['7.9'], 'scientific': ['7.9']}, + '7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9'], 'centos': ['7.9'], 'eurolinux': ['7.9'], 'ol': ['7.9'], 'scientific': ['7.9']}, +- '8': {'rhel': ['8.5', '8.6', '8.8', '8.9', '8.10'], 'rhel-saphana': ['8.6', '8.8', '8.9', '8.10'], 'centos': ['8.5'], 'almalinux': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'eurolinux': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'ol': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'rocky': ['8.6', '8.7', '8.8', '8.9', '8.10']}, + '8': {'rhel': ['8.5', '8.6', '8.8', '8.9', '8.10'], 'rhel-saphana': ['8.6', '8.8', '8.9', '8.10'], 'centos': ['8.5', '8.999'], 'almalinux': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'eurolinux': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'ol': ['8.6', '8.7', '8.8', '8.9', '8.10'], 'rocky': ['8.6', '8.7', '8.8', '8.9', '8.10']}, } @@ -369,16 +4877,1851 @@ index abde69e7..7d4e8aa4 100644 base = dnf.Base(conf=conf) base.init_plugins() -diff --git a/repos/system_upgrade/common/libraries/rhsm.py b/repos/system_upgrade/common/libraries/rhsm.py -index 4a5b0eb0..9fdec233 100644 ---- a/repos/system_upgrade/common/libraries/rhsm.py -+++ b/repos/system_upgrade/common/libraries/rhsm.py -@@ -92,7 +92,7 @@ def _handle_rhsm_exceptions(hint=None): +-- +2.43.0 + + +From 0a7b515366b131ef13a43f75d9f17f22606fd13d Mon Sep 17 00:00:00 2001 +From: Richard Alloway +Date: Mon, 26 Feb 2024 15:15:34 -0500 +Subject: [PATCH 33/36] Update actor.py to support NVMe device enumeration + +(cherry picked from commit ca804f0c916fb21504a1393b4c8a016df9671aff) +--- + .../efibootorderfix/finalization/actor.py | 20 +++++++++++++++++-- + 1 file changed, 18 insertions(+), 2 deletions(-) + +diff --git a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +index f08b7b82..4a2bc8ad 100644 +--- a/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py ++++ b/repos/system_upgrade/common/actors/efibootorderfix/finalization/actor.py +@@ -41,8 +41,24 @@ class EfiFinalizationFix(Actor): + } - def skip_rhsm(): - """Check whether we should skip RHSM related code.""" -- return get_env('LEAPP_NO_RHSM', '0') == '1' -+ return True + def devparts(dev): +- part = next(re.finditer(r'\d+$', dev)).group(0) +- dev = dev[:-len(part)] ++ """ ++ NVMe block devices aren't named like SCSI/ATA/etc block devices and must be parsed differently. ++ SCSI/ATA/etc devices have a syntax resembling /dev/sdb4 for the 4th partition on the 2nd disk. ++ NVMe devices have a syntax resembling /dev/nvme0n2p4 for the 4th partition on the 2nd disk. ++ """ ++ if '/dev/nvme' in dev: ++ """ ++ NVMe ++ """ ++ part = next(re.finditer(r'p\d+$', dev)).group(0) ++ dev = dev[:-len(part)] ++ part = part[1:] ++ else: ++ """ ++ Non-NVMe (SCSI, ATA, etc) ++ """ ++ part = next(re.finditer(r'\d+$', dev)).group(0) ++ dev = dev[:-len(part)] + return [dev, part]; + + with open('/etc/system-release', 'r') as sr: +-- +2.43.0 + + +From 70259236fbd64f4a8b8bdef86012f3677eec3bf0 Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Wed, 24 Apr 2024 10:48:42 +0300 +Subject: [PATCH 34/36] Fix dot-less CentOS Stream release determining. + +--- + .../actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py b/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py +index 7abb2b42..52cfe14f 100644 +--- a/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py ++++ b/repos/system_upgrade/common/actors/ipuworkflowconfig/libraries/ipuworkflowconfig.py +@@ -53,7 +53,7 @@ def get_os_release(path): + data = dict(l.strip().split('=', 1) for l in f.readlines() if '=' in l) + release_id = data.get('ID', '').strip('"') + version_id = data.get('VERSION_ID', '').strip('"') +- if release_id == 'centos' and '.' not in version_id: ++ if release_id == 'centos' and '.' not in os_version: + os_version = "{}.999".format(version_id) + return OSRelease( + release_id=release_id, +-- +2.43.0 + + +From 6d4f9ea8f45f8e06489dc0712e21c7d02e4fd1bc Mon Sep 17 00:00:00 2001 +From: Monstrofil +Date: Thu, 20 Jun 2024 19:44:48 +0300 +Subject: [PATCH 35/36] AlmaLinux vendors.d functionaloty rebased on top of + v0.19 (#114) + +* Reimplement the vendors mechanism on top of 0.19.0 + +* Move the RepoMapData class to a common library + +* Add the missing function to the new library + +* Add multiple repomap support to PES Events Scanner + +* Ensure the repositories data is available to the vendors-checking actors + +* Add target directory to the PES fetch function + +* Allow packageinfo to be explicitly empty list in pes files + +Raise errors only when packageinfo is missing, but allow +empty lists which mean that no actions required. + +* Implement package reinstallation + +* Add the missing reinstallation ID + +* TODO: Reinstallation + +* Bring back the package reinstallation + +* Fix type errors in in code + +* Add additional trusted gpg certificates directory + +* Added missing signatures + +--------- + +Co-authored-by: Roman Prilipskii +Co-authored-by: Oleksandr Shyshatskyi +--- + .gitignore | 1 + + etc/leapp/transaction/to_reinstall | 3 + + .../actors/checkenabledvendorrepos/actor.py | 53 +++++++ + .../filterrpmtransactionevents/actor.py | 5 +- + .../libraries/missinggpgkey.py | 41 ++--- + .../common/actors/peseventsscanner/actor.py | 4 +- + .../libraries/pes_event_parsing.py | 4 +- + .../libraries/pes_events_scanner.py | 51 +++++-- + .../actors/redhatsignedrpmscanner/actor.py | 91 +++++++----- + .../libraries/repositoriesmapping.py | 118 +-------------- + .../rpmtransactionconfigtaskscollector.py | 28 +++- + .../actors/scanvendorrepofiles/actor.py | 26 ++++ + .../libraries/scanvendorrepofiles.py | 72 +++++++++ + .../tests/test_scanvendorrepofiles.py | 131 ++++++++++++++++ + .../common/actors/setuptargetrepos/actor.py | 6 +- + .../libraries/setuptargetrepos.py | 59 +++++++- + .../common/actors/systemfacts/actor.py | 2 +- + .../vendorreposignaturescanner/actor.py | 72 +++++++++ + .../actors/vendorrepositoriesmapping/actor.py | 19 +++ + .../libraries/vendorrepositoriesmapping.py | 92 ++++++++++++ + .../common/files/rhel_upgrade.py | 4 + + .../common/libraries/dnfplugin.py | 1 + + .../system_upgrade/common/libraries/fetch.py | 5 +- + .../common/libraries/repomaputils.py | 140 ++++++++++++++++++ + .../common/models/activevendorlist.py | 7 + + .../common/models/repositoriesmap.py | 1 + + .../common/models/rpmtransactiontasks.py | 1 + + .../common/models/targetrepositories.py | 6 + + .../common/models/vendorsignatures.py | 8 + + .../common/models/vendorsourcerepos.py | 12 ++ + .../common/topics/vendortopic.py | 5 + + 31 files changed, 867 insertions(+), 201 deletions(-) + create mode 100644 etc/leapp/transaction/to_reinstall + create mode 100644 repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py + create mode 100644 repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py + create mode 100644 repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py + create mode 100644 repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py + create mode 100644 repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py + create mode 100644 repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py + create mode 100644 repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py + create mode 100644 repos/system_upgrade/common/libraries/repomaputils.py + create mode 100644 repos/system_upgrade/common/models/activevendorlist.py + create mode 100644 repos/system_upgrade/common/models/vendorsignatures.py + create mode 100644 repos/system_upgrade/common/models/vendorsourcerepos.py + create mode 100644 repos/system_upgrade/common/topics/vendortopic.py + +diff --git a/.gitignore b/.gitignore +index 0bb92d3d..a04c7ded 100644 +--- a/.gitignore ++++ b/.gitignore +@@ -115,6 +115,7 @@ ENV/ + + # visual studio code configuration + .vscode ++*.code-workspace + + # pycharm + .idea +diff --git a/etc/leapp/transaction/to_reinstall b/etc/leapp/transaction/to_reinstall +new file mode 100644 +index 00000000..c6694a8e +--- /dev/null ++++ b/etc/leapp/transaction/to_reinstall +@@ -0,0 +1,3 @@ ++### List of packages (each on new line) to be reinstalled to the upgrade transaction ++### Useful for packages that have identical version strings but contain binary changes between major OS versions ++### Packages that aren't installed will be skipped +diff --git a/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py b/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py +new file mode 100644 +index 00000000..52f5af9d +--- /dev/null ++++ b/repos/system_upgrade/common/actors/checkenabledvendorrepos/actor.py +@@ -0,0 +1,53 @@ ++from leapp.actors import Actor ++from leapp.libraries.stdlib import api ++from leapp.models import ( ++ RepositoriesFacts, ++ VendorSourceRepos, ++ ActiveVendorList, ++) ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class CheckEnabledVendorRepos(Actor): ++ """ ++ Create a list of vendors whose repositories are present on the system and enabled. ++ Only those vendors' configurations (new repositories, PES actions, etc.) ++ will be included in the upgrade process. ++ """ ++ ++ name = "check_enabled_vendor_repos" ++ consumes = (RepositoriesFacts, VendorSourceRepos) ++ produces = (ActiveVendorList) ++ tags = (IPUWorkflowTag, FactsPhaseTag.Before) ++ ++ def process(self): ++ vendor_mapping_data = {} ++ active_vendors = set() ++ ++ # Make a dict for easy mapping of repoid -> corresponding vendor name. ++ for vendor_src_repodata in api.consume(VendorSourceRepos): ++ for vendor_src_repo in vendor_src_repodata.source_repoids: ++ vendor_mapping_data[vendor_src_repo] = vendor_src_repodata.vendor ++ ++ # Is the repo listed in the vendor map as from_repoid present on the system? ++ for repos_facts in api.consume(RepositoriesFacts): ++ for repo_file in repos_facts.repositories: ++ for repo_data in repo_file.data: ++ self.log.debug( ++ "Looking for repository {} in vendor maps".format(repo_data.repoid) ++ ) ++ if repo_data.enabled and repo_data.repoid in vendor_mapping_data: ++ # If the vendor's repository is present in the system and enabled, count the vendor as active. ++ new_vendor = vendor_mapping_data[repo_data.repoid] ++ self.log.debug( ++ "Repository {} found and enabled, enabling vendor {}".format( ++ repo_data.repoid, new_vendor ++ ) ++ ) ++ active_vendors.add(new_vendor) ++ ++ if active_vendors: ++ self.log.debug("Active vendor list: {}".format(active_vendors)) ++ api.produce(ActiveVendorList(data=list(active_vendors))) ++ else: ++ self.log.info("No active vendors found, vendor list not generated") +diff --git a/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py b/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py +index e0d89d9f..52f93ef3 100644 +--- a/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py ++++ b/repos/system_upgrade/common/actors/filterrpmtransactionevents/actor.py +@@ -32,6 +32,7 @@ class FilterRpmTransactionTasks(Actor): + to_remove = set() + to_keep = set() + to_upgrade = set() ++ to_reinstall = set() + modules_to_enable = {} + modules_to_reset = {} + for event in self.consume(RpmTransactionTasks, PESRpmTransactionTasks): +@@ -39,13 +40,14 @@ class FilterRpmTransactionTasks(Actor): + to_install.update(event.to_install) + to_remove.update(installed_pkgs.intersection(event.to_remove)) + to_keep.update(installed_pkgs.intersection(event.to_keep)) ++ to_reinstall.update(installed_pkgs.intersection(event.to_reinstall)) + modules_to_enable.update({'{}:{}'.format(m.name, m.stream): m for m in event.modules_to_enable}) + modules_to_reset.update({'{}:{}'.format(m.name, m.stream): m for m in event.modules_to_reset}) + + to_remove.difference_update(to_keep) + + # run upgrade for the rest of RH signed pkgs which we do not have rule for +- to_upgrade = installed_pkgs - (to_install | to_remove) ++ to_upgrade = installed_pkgs - (to_install | to_remove | to_reinstall) + + self.produce(FilteredRpmTransactionTasks( + local_rpms=list(local_rpms), +@@ -53,5 +55,6 @@ class FilterRpmTransactionTasks(Actor): + to_remove=list(to_remove), + to_keep=list(to_keep), + to_upgrade=list(to_upgrade), ++ to_reinstall=list(to_reinstall), + modules_to_reset=list(modules_to_reset.values()), + modules_to_enable=list(modules_to_enable.values()))) +diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py +index 1880986d..d6f64eea 100644 +--- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py ++++ b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py +@@ -112,7 +112,10 @@ def _get_path_to_gpg_certs(): + # only beta is special in regards to the GPG signing keys + if target_product_type == 'beta': + certs_dir = '{}beta'.format(target_major_version) +- return os.path.join(api.get_common_folder_path(GPG_CERTS_FOLDER), certs_dir) ++ return [ ++ "/etc/leapp/files/vendors.d/rpm-gpg/", ++ os.path.join(api.get_common_folder_path(GPG_CERTS_FOLDER), certs_dir) ++ ] - def with_rhsm(f): + def _expand_vars(path): +@@ -169,14 +172,15 @@ def _get_pubkeys(installed_rpms): + """ + pubkeys = _pubkeys_from_rpms(installed_rpms) + certs_path = _get_path_to_gpg_certs() +- for certname in os.listdir(certs_path): +- key_file = os.path.join(certs_path, certname) +- fps = _read_gpg_fp_from_file(key_file) +- if fps: +- pubkeys += fps +- # TODO: what about else: ? +- # The warning is now logged in _read_gpg_fp_from_file. We can raise +- # the priority of the message or convert it to report though. ++ for trusted_dir in certs_path: ++ for certname in os.listdir(trusted_dir): ++ key_file = os.path.join(trusted_dir, certname) ++ fps = _read_gpg_fp_from_file(key_file) ++ if fps: ++ pubkeys += fps ++ # TODO: what about else: ? ++ # The warning is now logged in _read_gpg_fp_from_file. We can raise ++ # the priority of the message or convert it to report though. + return pubkeys + + +@@ -270,11 +274,11 @@ def _report(title, summary, keys, inhibitor=False): + ) + hint = ( + 'Check the path to the listed GPG keys is correct, the keys are valid and' +- ' import them into the host RPM DB or store them inside the {} directory' ++ ' import them into the host RPM DB or store them inside on of the {} directories' + ' prior the upgrade.' + ' If you want to proceed the in-place upgrade without checking any RPM' + ' signatures, execute leapp with the `--nogpgcheck` option.' +- .format(_get_path_to_gpg_certs()) ++ .format(','.format(_get_path_to_gpg_certs())) + ) + groups = [reporting.Groups.REPOSITORY] + if inhibitor: +@@ -305,8 +309,8 @@ def _report(title, summary, keys, inhibitor=False): + def _report_missing_keys(keys): + summary = ( + 'Some of the target repositories require GPG keys that are not installed' +- ' in the current RPM DB or are not stored in the {trust_dir} directory.' +- .format(trust_dir=_get_path_to_gpg_certs()) ++ ' in the current RPM DB or are not stored in the {trust_dir} directories.' ++ .format(trust_dir=','.join(_get_path_to_gpg_certs())) + ) + _report('Detected unknown GPG keys for target system repositories', summary, keys, True) + +@@ -380,11 +384,12 @@ def _report_repos_missing_keys(repos): + + + def register_dnfworkaround(): +- api.produce(DNFWorkaround( +- display_name='import trusted gpg keys to RPM DB', +- script_path=api.current_actor().get_common_tool_path('importrpmgpgkeys'), +- script_args=[_get_path_to_gpg_certs()], +- )) ++ for trust_certs_dir in _get_path_to_gpg_certs(): ++ api.produce(DNFWorkaround( ++ display_name='import trusted gpg keys to RPM DB', ++ script_path=api.current_actor().get_common_tool_path('importrpmgpgkeys'), ++ script_args=[trust_certs_dir], ++ )) + + + @suppress_deprecation(TMPTargetRepositoriesFacts) +diff --git a/repos/system_upgrade/common/actors/peseventsscanner/actor.py b/repos/system_upgrade/common/actors/peseventsscanner/actor.py +index c00c1e0f..7256f380 100644 +--- a/repos/system_upgrade/common/actors/peseventsscanner/actor.py ++++ b/repos/system_upgrade/common/actors/peseventsscanner/actor.py +@@ -10,7 +10,8 @@ from leapp.models import ( + RepositoriesMapping, + RepositoriesSetupTasks, + RHUIInfo, +- RpmTransactionTasks ++ RpmTransactionTasks, ++ ActiveVendorList, + ) + from leapp.reporting import Report + from leapp.tags import FactsPhaseTag, IPUWorkflowTag +@@ -33,6 +34,7 @@ class PesEventsScanner(Actor): + RepositoriesMapping, + RHUIInfo, + RpmTransactionTasks, ++ ActiveVendorList, + ) + produces = (ConsumedDataAsset, PESRpmTransactionTasks, RepositoriesSetupTasks, Report) + tags = (IPUWorkflowTag, FactsPhaseTag) +diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py +index 35bcec73..0a5f46e0 100644 +--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py ++++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_event_parsing.py +@@ -58,6 +58,7 @@ class Action(IntEnum): + MERGED = 5 + MOVED = 6 + RENAMED = 7 ++ REINSTALLED = 8 + + + def get_pes_events(pes_json_directory, pes_json_filename): +@@ -69,13 +70,14 @@ def get_pes_events(pes_json_directory, pes_json_filename): + try: + events_data = fetch.load_data_asset(api.current_actor(), + pes_json_filename, ++ asset_directory=pes_json_directory, + asset_fulltext_name='PES events file', + docs_url='', + docs_title='') + if not events_data: + return None + +- if not events_data.get('packageinfo'): ++ if events_data.get('packageinfo') is None: + raise ValueError('Found PES data with invalid structure') + + all_events = list(chain(*[parse_entry(entry) for entry in events_data['packageinfo']])) +diff --git a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py +index 01457f2a..5c8ada44 100644 +--- a/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py ++++ b/repos/system_upgrade/common/actors/peseventsscanner/libraries/pes_events_scanner.py +@@ -1,11 +1,13 @@ + from collections import defaultdict, namedtuple + from functools import partial ++import os + + from leapp import reporting + from leapp.exceptions import StopActorExecutionError + from leapp.libraries.actor import peseventsscanner_repomap + from leapp.libraries.actor.pes_event_parsing import Action, get_pes_events, Package + from leapp.libraries.common.config import version ++from leapp.libraries.common.repomaputils import combine_repomap_messages + from leapp.libraries.stdlib import api + from leapp.libraries.stdlib.config import is_verbose + from leapp.models import ( +@@ -19,7 +21,8 @@ from leapp.models import ( + RepositoriesMapping, + RepositoriesSetupTasks, + RHUIInfo, +- RpmTransactionTasks ++ RpmTransactionTasks, ++ ActiveVendorList, + ) + + SKIPPED_PKGS_MSG = ( +@@ -30,8 +33,9 @@ SKIPPED_PKGS_MSG = ( + 'for details.\nThe list of these packages:' + ) + ++VENDORS_DIR = "/etc/leapp/files/vendors.d" + +-TransactionConfiguration = namedtuple('TransactionConfiguration', ('to_install', 'to_remove', 'to_keep')) ++TransactionConfiguration = namedtuple('TransactionConfiguration', ('to_install', 'to_remove', 'to_keep', 'to_reinstall')) + + + def get_cloud_provider_name(cloud_provider_variant): +@@ -82,7 +86,7 @@ def get_transaction_configuration(): + These configuration files have higher priority than PES data. + :return: RpmTransactionTasks model instance + """ +- transaction_configuration = TransactionConfiguration(to_install=[], to_remove=[], to_keep=[]) ++ transaction_configuration = TransactionConfiguration(to_install=[], to_remove=[], to_keep=[], to_reinstall=[]) + + _Pkg = partial(Package, repository=None, modulestream=None) + +@@ -90,6 +94,7 @@ def get_transaction_configuration(): + transaction_configuration.to_install.extend(_Pkg(name=pkg_name) for pkg_name in tasks.to_install) + transaction_configuration.to_remove.extend(_Pkg(name=pkg_name) for pkg_name in tasks.to_remove) + transaction_configuration.to_keep.extend(_Pkg(name=pkg_name) for pkg_name in tasks.to_keep) ++ transaction_configuration.to_reinstall.extend(_Pkg(name=pkg_name) for pkg_name in tasks.to_reinstall) + return transaction_configuration + + +@@ -129,6 +134,7 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs, + logger = api.current_logger() + # Start with the installed packages and modify the set according to release events + target_pkgs = set(source_installed_pkgs) ++ pkgs_to_reinstall = set() + + release_events = [e for e in events if e.to_release == release] + +@@ -165,9 +171,12 @@ def compute_pkg_changes_between_consequent_releases(source_installed_pkgs, + target_pkgs = target_pkgs.difference(event.in_pkgs) + target_pkgs = target_pkgs.union(event.out_pkgs) + ++ if (event.action == Action.REINSTALLED and is_any_in_pkg_present): ++ pkgs_to_reinstall = pkgs_to_reinstall.union(event.in_pkgs) ++ + pkgs_to_demodularize = pkgs_to_demodularize.difference(event.in_pkgs) + +- return (target_pkgs, pkgs_to_demodularize) ++ return (target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall) + + + def remove_undesired_events(events, relevant_to_releases): +@@ -233,15 +242,17 @@ def compute_packages_on_target_system(source_pkgs, events, releases): + did_processing_cross_major_version = True + pkgs_to_demodularize = {pkg for pkg in target_pkgs if pkg.modulestream} + +- target_pkgs, pkgs_to_demodularize = compute_pkg_changes_between_consequent_releases(target_pkgs, events, +- release, seen_pkgs, +- pkgs_to_demodularize) ++ target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall = compute_pkg_changes_between_consequent_releases( ++ target_pkgs, events, ++ release, seen_pkgs, ++ pkgs_to_demodularize ++ ) + seen_pkgs = seen_pkgs.union(target_pkgs) + + demodularized_pkgs = {Package(pkg.name, pkg.repository, None) for pkg in pkgs_to_demodularize} + demodularized_target_pkgs = target_pkgs.difference(pkgs_to_demodularize).union(demodularized_pkgs) + +- return (demodularized_target_pkgs, pkgs_to_demodularize) ++ return (demodularized_target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall) + + + def compute_rpm_tasks_from_pkg_set_diff(source_pkgs, target_pkgs, pkgs_to_demodularize): +@@ -345,15 +356,13 @@ def get_pesid_to_repoid_map(target_pesids): + :return: Dictionary mapping the target_pesids to their corresponding repoid + """ + +- repositories_map_msgs = api.consume(RepositoriesMapping) +- repositories_map_msg = next(repositories_map_msgs, None) +- if list(repositories_map_msgs): +- api.current_logger().warning('Unexpectedly received more than one RepositoriesMapping message.') +- if not repositories_map_msg: ++ repositories_map_msgs = list(api.consume(RepositoriesMapping)) ++ if not repositories_map_msgs: + raise StopActorExecutionError( + 'Cannot parse RepositoriesMapping data properly', + details={'Problem': 'Did not receive a message with mapped repositories'} + ) ++ repositories_map_msg = combine_repomap_messages(repositories_map_msgs) + + rhui_info = next(api.consume(RHUIInfo), RHUIInfo(provider='')) + +@@ -485,6 +494,19 @@ def process(): + if not events: + return + ++ active_vendors = [] ++ for vendor_list in api.consume(ActiveVendorList): ++ active_vendors.extend(vendor_list.data) ++ ++ pes_json_suffix = "_pes.json" ++ if os.path.isdir(VENDORS_DIR): ++ vendor_pesfiles = list(filter(lambda vfile: pes_json_suffix in vfile, os.listdir(VENDORS_DIR))) ++ ++ for pesfile in vendor_pesfiles: ++ if pesfile[:-len(pes_json_suffix)] in active_vendors: ++ vendor_events = get_pes_events(VENDORS_DIR, pesfile) ++ events.extend(vendor_events) ++ + releases = get_relevant_releases(events) + source_pkgs = get_installed_pkgs() + source_pkgs = apply_transaction_configuration(source_pkgs) +@@ -496,7 +518,7 @@ def process(): + events = remove_undesired_events(events, releases) + + # Apply events - compute what packages should the target system have +- target_pkgs, pkgs_to_demodularize = compute_packages_on_target_system(source_pkgs, events, releases) ++ target_pkgs, pkgs_to_demodularize, pkgs_to_reinstall = compute_packages_on_target_system(source_pkgs, events, releases) + + # Packages coming out of the events have PESID as their repository, however, we need real repoid + target_pkgs = replace_pesids_with_repoids_in_packages(target_pkgs, repoids_of_source_pkgs) +@@ -512,4 +534,5 @@ def process(): + # Compare the packages on source system and the computed packages on target system and determine what to install + rpm_tasks = compute_rpm_tasks_from_pkg_set_diff(source_pkgs, target_pkgs, pkgs_to_demodularize) + if rpm_tasks: ++ rpm_tasks.to_reinstall = sorted(pkgs_to_reinstall) + api.produce(rpm_tasks) +diff --git a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +index 8416fd39..36daf322 100644 +--- a/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py ++++ b/repos/system_upgrade/common/actors/redhatsignedrpmscanner/actor.py +@@ -1,44 +1,67 @@ + from leapp.actors import Actor + from leapp.libraries.common import rhui +-from leapp.models import InstalledRedHatSignedRPM, InstalledRPM, InstalledUnsignedRPM ++from leapp.models import InstalledRedHatSignedRPM, InstalledRPM, InstalledUnsignedRPM, VendorSignatures + from leapp.tags import FactsPhaseTag, IPUWorkflowTag + + ++VENDOR_SIGS = { ++ 'rhel': ['199e2f91fd431d51', ++ '5326810137017186', ++ '938a80caf21541eb', ++ 'fd372689897da07a', ++ '45689c882fa658e0'], ++ 'centos': ['24c6a8a7f4a80eb5', ++ '05b555b38483c65d', ++ '4eb84e71f2ee9d55', ++ 'a963bbdbf533f4fa', ++ '6c7cb6ef305d49d6'], ++ 'cloudlinux': ['8c55a6628608cb71'], ++ 'almalinux': ['51d6647ec21ad6ea', ++ 'd36cb86cb86b3716', ++ '2ae81e8aced7258b'], ++ 'rocky': ['15af5dac6d745a60', ++ '702d426d350d275d'], ++ 'ol': ['72f97b74ec551f03', ++ '82562ea9ad986da3', ++ 'bc4d06a08d8b756f'], ++ 'eurolinux': ['75c333f418cd4a9e', ++ 'b413acad6275f250', ++ 'f7ad3e5a1c9fd080'], ++ 'scientific': ['b0b4183f192a7d7d'] ++} ++ ++VENDOR_PACKAGERS = { ++ "rhel": "Red Hat, Inc.", ++ "centos": "CentOS", ++ "almalinux": "AlmaLinux Packaging Team", ++ "rocky": "infrastructure@rockylinux.org", ++ "eurolinux": "EuroLinux", ++ "scientific": "Scientific Linux", ++} ++ ++ + class RedHatSignedRpmScanner(Actor): +- """Provide data about installed RPM Packages signed by Red Hat. ++ """Provide data about installed RPM Packages signed by vendors. ++ ++ The "Red Hat" in the name of the actor is a historical artifact - the actor ++ is used for all vendors present in the config. + + After filtering the list of installed RPM packages by signature, a message + with relevant data will be produced. + """ + + name = 'red_hat_signed_rpm_scanner' +- consumes = (InstalledRPM,) ++ consumes = (InstalledRPM, VendorSignatures) + produces = (InstalledRedHatSignedRPM, InstalledUnsignedRPM,) + tags = (IPUWorkflowTag, FactsPhaseTag) + + def process(self): +- RH_SIGS = ['199e2f91fd431d51', # rhel +- '5326810137017186', +- '938a80caf21541eb', +- 'fd372689897da07a', +- '45689c882fa658e0', +- '24c6a8a7f4a80eb5', # centos +- '05b555b38483c65d', +- '4eb84e71f2ee9d55', +- 'a963bbdbf533f4fa', +- '6c7cb6ef305d49d6', +- '51d6647ec21ad6ea', # almalinux +- 'd36cb86cb86b3716', +- '2ae81e8aced7258b', +- '15af5dac6d745a60', # rockylinux +- '702d426d350d275d', +- '72f97b74ec551f03', # ol +- '82562ea9ad986da3', +- 'bc4d06a08d8b756f', +- '75c333f418cd4a9e', # eurolinux +- 'b413acad6275f250', +- 'f7ad3e5a1c9fd080', +- 'b0b4183f192a7d7d'] # scientific ++ # Packages from multiple vendors can be installed on the system. ++ # Picking the vendor based on the OS release is not enough. ++ vendor_keys = sum(VENDOR_SIGS.values(), []) ++ ++ for siglist in self.consume(VendorSignatures): ++ vendor_keys.extend(siglist.sigs) + + signed_pkgs = InstalledRedHatSignedRPM() + unsigned_pkgs = InstalledUnsignedRPM() +@@ -52,8 +75,8 @@ class RedHatSignedRpmScanner(Actor): + if env.name == 'LEAPP_DEVEL_RPMS_ALL_SIGNED' and env.value == '1' + ] + +- def has_rhsig(pkg): +- return any(key in pkg.pgpsig for key in RH_SIGS) ++ def has_vendorsig(pkg): ++ return any(key in pkg.pgpsig for key in vendor_keys) + + def is_gpg_pubkey(pkg): + """Check if gpg-pubkey pkg exists or LEAPP_DEVEL_RPMS_ALL_SIGNED=1 +@@ -61,15 +84,9 @@ class RedHatSignedRpmScanner(Actor): + gpg-pubkey is not signed as it would require another package + to verify its signature + """ +- return ( # pylint: disable-msg=consider-using-ternary +- pkg.name == 'gpg-pubkey' +- and (pkg.packager.startswith('Red Hat, Inc.') +- or pkg.packager.startswith('CentOS') +- or pkg.packager.startswith('AlmaLinux') +- or pkg.packager.startswith('infrastructure@rockylinux.org') +- or pkg.packager.startswith('EuroLinux') +- or pkg.packager.startswith('Scientific Linux')) +- or all_signed ++ return ( # pylint: disable-msg=consider-using-ternary ++ pkg.name == "gpg-pubkey" ++ or all_signed + ) + + def has_katello_prefix(pkg): +@@ -101,7 +118,7 @@ class RedHatSignedRpmScanner(Actor): + for pkg in rpm_pkgs.items: + if any( + [ +- has_rhsig(pkg), ++ has_vendorsig(pkg), + is_gpg_pubkey(pkg), + has_katello_prefix(pkg), + pkg.name in whitelisted_cloud_pkgs, +diff --git a/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py b/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py +index 416034ac..e010c5b9 100644 +--- a/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py ++++ b/repos/system_upgrade/common/actors/repositoriesmapping/libraries/repositoriesmapping.py +@@ -1,11 +1,11 @@ + import os +-from collections import defaultdict + + from leapp.exceptions import StopActorExecutionError + from leapp.libraries.common.config.version import get_source_major_version, get_target_major_version ++from leapp.libraries.common.repomaputils import RepoMapData + from leapp.libraries.common.fetch import load_data_asset + from leapp.libraries.stdlib import api +-from leapp.models import PESIDRepositoryEntry, RepoMapEntry, RepositoriesMapping ++from leapp.models import RepositoriesMapping + from leapp.models.fields import ModelViolationError + + OLD_REPOMAP_FILE = 'repomap.csv' +@@ -15,120 +15,6 @@ REPOMAP_FILE = 'repomap.json' + """The name of the new repository mapping file.""" + + +-class RepoMapData(object): +- VERSION_FORMAT = '1.2.0' +- +- def __init__(self): +- self.repositories = [] +- self.mapping = {} +- +- def add_repository(self, data, pesid): +- """ +- Add new PESIDRepositoryEntry with given pesid from the provided dictionary. +- +- :param data: A dict containing the data of the added repository. The dictionary structure corresponds +- to the repositories entries in the repository mapping JSON schema. +- :type data: Dict[str, str] +- :param pesid: PES id of the repository family that the newly added repository belongs to. +- :type pesid: str +- """ +- self.repositories.append(PESIDRepositoryEntry( +- repoid=data['repoid'], +- channel=data['channel'], +- rhui=data.get('rhui', ''), +- repo_type=data['repo_type'], +- arch=data['arch'], +- major_version=data['major_version'], +- pesid=pesid +- )) +- +- def get_repositories(self, valid_major_versions): +- """ +- Return the list of PESIDRepositoryEntry object matching the specified major versions. +- """ +- return [repo for repo in self.repositories if repo.major_version in valid_major_versions] +- +- def add_mapping(self, source_major_version, target_major_version, source_pesid, target_pesid): +- """ +- Add a new mapping entry that is mapping the source pesid to the destination pesid(s), +- relevant in an IPU from the supplied source major version to the supplied target +- major version. +- +- :param str source_major_version: Specifies the major version of the source system +- for which the added mapping applies. +- :param str target_major_version: Specifies the major version of the target system +- for which the added mapping applies. +- :param str source_pesid: PESID of the source repository. +- :param Union[str|List[str]] target_pesid: A single target PESID or a list of target +- PESIDs of the added mapping. +- """ +- # NOTE: it could be more simple, but I prefer to be sure the input data +- # contains just one map per source PESID. +- key = '{}:{}'.format(source_major_version, target_major_version) +- rmap = self.mapping.get(key, defaultdict(set)) +- self.mapping[key] = rmap +- if isinstance(target_pesid, list): +- rmap[source_pesid].update(target_pesid) +- else: +- rmap[source_pesid].add(target_pesid) +- +- def get_mappings(self, src_major_version, dst_major_version): +- """ +- Return the list of RepoMapEntry objects for the specified upgrade path. +- +- IOW, the whole mapping for specified IPU. +- """ +- key = '{}:{}'.format(src_major_version, dst_major_version) +- rmap = self.mapping.get(key, None) +- if not rmap: +- return None +- map_list = [] +- for src_pesid in sorted(rmap.keys()): +- map_list.append(RepoMapEntry(source=src_pesid, target=sorted(rmap[src_pesid]))) +- return map_list +- +- @staticmethod +- def load_from_dict(data): +- if data['version_format'] != RepoMapData.VERSION_FORMAT: +- raise ValueError( +- 'The obtained repomap data has unsupported version of format.' +- ' Get {} required {}' +- .format(data['version_format'], RepoMapData.VERSION_FORMAT) +- ) +- +- repomap = RepoMapData() +- +- # Load reposiories +- existing_pesids = set() +- for repo_family in data['repositories']: +- existing_pesids.add(repo_family['pesid']) +- for repo in repo_family['entries']: +- repomap.add_repository(repo, repo_family['pesid']) +- +- # Load mappings +- for mapping in data['mapping']: +- for entry in mapping['entries']: +- if not isinstance(entry['target'], list): +- raise ValueError( +- 'The target field of a mapping entry is not a list: {}' +- .format(entry) +- ) +- +- for pesid in [entry['source']] + entry['target']: +- if pesid not in existing_pesids: +- raise ValueError( +- 'The {} pesid is not related to any repository.' +- .format(pesid) +- ) +- repomap.add_mapping( +- source_major_version=mapping['source_major_version'], +- target_major_version=mapping['target_major_version'], +- source_pesid=entry['source'], +- target_pesid=entry['target'], +- ) +- return repomap +- +- + def _inhibit_upgrade(msg): + rpmname = 'leapp-upgrade-el{}toel{}'.format(get_source_major_version(), get_target_major_version()) + hint = ( +diff --git a/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py b/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py +index fb6ae8ff..70f07387 100644 +--- a/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py ++++ b/repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/libraries/rpmtransactionconfigtaskscollector.py +@@ -18,21 +18,37 @@ def load_tasks_file(path, logger): + return [] + + ++def filter_out(installed_rpm_names, to_filter, debug_msg): ++ # These are the packages that aren't installed on the system. ++ filtered_ok = [pkg for pkg in to_filter if pkg not in installed_rpm_names] ++ ++ # And these ones are the ones that are. ++ filtered_out = list(set(to_filter) - set(filtered_ok)) ++ if filtered_out: ++ api.current_logger().debug( ++ debug_msg + ++ '\n- ' + '\n- '.join(filtered_out) ++ ) ++ # We may want to use either of the two sets. ++ return filtered_ok, filtered_out ++ ++ + def load_tasks(base_dir, logger): + # Loads configuration files to_install, to_keep, and to_remove from the given base directory + rpms = next(api.consume(InstalledRedHatSignedRPM)) + rpm_names = [rpm.name for rpm in rpms.items] ++ + to_install = load_tasks_file(os.path.join(base_dir, 'to_install'), logger) ++ install_debug_msg = 'The following packages from "to_install" file will be ignored as they are already installed:' + # we do not want to put into rpm transaction what is already installed (it will go to "to_upgrade" bucket) +- to_install_filtered = [pkg for pkg in to_install if pkg not in rpm_names] ++ to_install_filtered, _ = filter_out(rpm_names, to_install, install_debug_msg) + +- filtered = set(to_install) - set(to_install_filtered) +- if filtered: +- api.current_logger().debug( +- 'The following packages from "to_install" file will be ignored as they are already installed:' +- '\n- ' + '\n- '.join(filtered)) ++ to_reinstall = load_tasks_file(os.path.join(base_dir, 'to_reinstall'), logger) ++ reinstall_debug_msg = 'The following packages from "to_reinstall" file will be ignored as they are not installed:' ++ _, to_reinstall_filtered = filter_out(rpm_names, to_reinstall, reinstall_debug_msg) + + return RpmTransactionTasks( + to_install=to_install_filtered, ++ to_reinstall=to_reinstall_filtered, + to_keep=load_tasks_file(os.path.join(base_dir, 'to_keep'), logger), + to_remove=load_tasks_file(os.path.join(base_dir, 'to_remove'), logger)) +diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py +new file mode 100644 +index 00000000..a5e481cb +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py +@@ -0,0 +1,26 @@ ++from leapp.actors import Actor ++from leapp.libraries.actor import scanvendorrepofiles ++from leapp.models import ( ++ CustomTargetRepositoryFile, ++ ActiveVendorList, ++ VendorCustomTargetRepositoryList, ++) ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class ScanVendorRepofiles(Actor): ++ """ ++ Load and produce custom repository data from vendor-provided files. ++ Only those vendors whose source system repoids were found on the system will be included. ++ """ ++ ++ name = "scan_vendor_repofiles" ++ consumes = ActiveVendorList ++ produces = ( ++ CustomTargetRepositoryFile, ++ VendorCustomTargetRepositoryList, ++ ) ++ tags = (FactsPhaseTag, IPUWorkflowTag) ++ ++ def process(self): ++ scanvendorrepofiles.process() +diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py +new file mode 100644 +index 00000000..84392101 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/libraries/scanvendorrepofiles.py +@@ -0,0 +1,72 @@ ++import os ++ ++from leapp.libraries.common import repofileutils ++from leapp.libraries.stdlib import api ++from leapp.models import ( ++ CustomTargetRepository, ++ CustomTargetRepositoryFile, ++ ActiveVendorList, ++ VendorCustomTargetRepositoryList, ++) ++ ++ ++VENDORS_DIR = "/etc/leapp/files/vendors.d/" ++REPOFILE_SUFFIX = ".repo" ++ ++ ++def process(): ++ """ ++ Produce CustomTargetRepository msgs for the vendor repo files inside the ++ . ++ ++ The CustomTargetRepository messages are produced only if a "from" vendor repository ++ listed indide its map matched one of the repositories active on the system. ++ """ ++ if not os.path.isdir(VENDORS_DIR): ++ api.current_logger().debug( ++ "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR) ++ ) ++ return ++ ++ for repofile_name in os.listdir(VENDORS_DIR): ++ if not repofile_name.endswith(REPOFILE_SUFFIX): ++ continue ++ # Cut the .repo part to get only the name. ++ vendor_name = repofile_name[:-5] ++ ++ active_vendors = [] ++ for vendor_list in api.consume(ActiveVendorList): ++ active_vendors.extend(vendor_list.data) ++ ++ api.current_logger().debug("Active vendor list: {}".format(active_vendors)) ++ ++ if vendor_name not in active_vendors: ++ api.current_logger().debug( ++ "Vendor {} not in active list, skipping".format(vendor_name) ++ ) ++ continue ++ ++ full_repo_path = os.path.join(VENDORS_DIR, repofile_name) ++ parsed_repofile = repofileutils.parse_repofile(full_repo_path) ++ api.current_logger().debug( ++ "Vendor {} found in active list, processing file {}".format(vendor_name, repofile_name) ++ ) ++ ++ api.produce(CustomTargetRepositoryFile(file=full_repo_path)) ++ ++ custom_vendor_repos = [ ++ CustomTargetRepository( ++ repoid=repo.repoid, ++ name=repo.name, ++ baseurl=repo.baseurl, ++ enabled=repo.enabled, ++ ) for repo in parsed_repofile.data ++ ] ++ ++ api.produce( ++ VendorCustomTargetRepositoryList(vendor=vendor_name, repos=custom_vendor_repos) ++ ) ++ ++ api.current_logger().info( ++ "The {} directory exists, vendor repositories loaded.".format(VENDORS_DIR) ++ ) +diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py +new file mode 100644 +index 00000000..cb5c7ab7 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/scanvendorrepofiles/tests/test_scanvendorrepofiles.py +@@ -0,0 +1,131 @@ ++import os ++ ++from leapp.libraries.actor import scancustomrepofile ++from leapp.libraries.common import repofileutils ++from leapp.libraries.common.testutils import produce_mocked ++from leapp.libraries.stdlib import api ++ ++from leapp.models import (CustomTargetRepository, CustomTargetRepositoryFile, ++ RepositoryData, RepositoryFile) ++ ++ ++_REPODATA = [ ++ RepositoryData(repoid="repo1", name="repo1name", baseurl="repo1url", enabled=True), ++ RepositoryData(repoid="repo2", name="repo2name", baseurl="repo2url", enabled=False), ++ RepositoryData(repoid="repo3", name="repo3name", enabled=True), ++ RepositoryData(repoid="repo4", name="repo4name", mirrorlist="mirror4list", enabled=True), ++] ++ ++_CUSTOM_REPOS = [ ++ CustomTargetRepository(repoid="repo1", name="repo1name", baseurl="repo1url", enabled=True), ++ CustomTargetRepository(repoid="repo2", name="repo2name", baseurl="repo2url", enabled=False), ++ CustomTargetRepository(repoid="repo3", name="repo3name", baseurl=None, enabled=True), ++ CustomTargetRepository(repoid="repo4", name="repo4name", baseurl=None, enabled=True), ++] ++ ++_CUSTOM_REPO_FILE_MSG = CustomTargetRepositoryFile(file=scancustomrepofile.CUSTOM_REPO_PATH) ++ ++ ++_TESTING_REPODATA = [ ++ RepositoryData(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True), ++ RepositoryData(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=False), ++ RepositoryData(repoid="repo3-stable", name="repo3name", enabled=False), ++ RepositoryData(repoid="repo4-testing", name="repo4name", mirrorlist="mirror4list", enabled=True), ++] ++ ++_TESTING_CUSTOM_REPOS_STABLE_TARGET = [ ++ CustomTargetRepository(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True), ++ CustomTargetRepository(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=False), ++ CustomTargetRepository(repoid="repo3-stable", name="repo3name", baseurl=None, enabled=False), ++ CustomTargetRepository(repoid="repo4-testing", name="repo4name", baseurl=None, enabled=True), ++] ++ ++_TESTING_CUSTOM_REPOS_BETA_TARGET = [ ++ CustomTargetRepository(repoid="repo1-stable", name="repo1name", baseurl="repo1url", enabled=True), ++ CustomTargetRepository(repoid="repo2-testing", name="repo2name", baseurl="repo2url", enabled=True), ++ CustomTargetRepository(repoid="repo3-stable", name="repo3name", baseurl=None, enabled=False), ++ CustomTargetRepository(repoid="repo4-testing", name="repo4name", baseurl=None, enabled=True), ++] ++ ++_PROCESS_STABLE_TARGET = "stable" ++_PROCESS_BETA_TARGET = "beta" ++ ++ ++class LoggerMocked(object): ++ def __init__(self): ++ self.infomsg = None ++ self.debugmsg = None ++ ++ def info(self, msg): ++ self.infomsg = msg ++ ++ def debug(self, msg): ++ self.debugmsg = msg ++ ++ def __call__(self): ++ return self ++ ++ ++def test_no_repofile(monkeypatch): ++ monkeypatch.setattr(os.path, 'isfile', lambda dummy: False) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(api, 'current_logger', LoggerMocked()) ++ scancustomrepofile.process() ++ msg = "The {} file doesn't exist. Nothing to do.".format(scancustomrepofile.CUSTOM_REPO_PATH) ++ assert api.current_logger.debugmsg == msg ++ assert not api.produce.called ++ ++ ++def test_valid_repofile_exists(monkeypatch): ++ def _mocked_parse_repofile(fpath): ++ return RepositoryFile(file=fpath, data=_REPODATA) ++ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile) ++ monkeypatch.setattr(api, 'current_logger', LoggerMocked()) ++ scancustomrepofile.process() ++ msg = "The {} file exists, custom repositories loaded.".format(scancustomrepofile.CUSTOM_REPO_PATH) ++ assert api.current_logger.infomsg == msg ++ assert api.produce.called == len(_CUSTOM_REPOS) + 1 ++ assert _CUSTOM_REPO_FILE_MSG in api.produce.model_instances ++ for crepo in _CUSTOM_REPOS: ++ assert crepo in api.produce.model_instances ++ ++ ++def test_target_stable_repos(monkeypatch): ++ def _mocked_parse_repofile(fpath): ++ return RepositoryFile(file=fpath, data=_TESTING_REPODATA) ++ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile) ++ ++ scancustomrepofile.process(_PROCESS_STABLE_TARGET) ++ assert api.produce.called == len(_TESTING_CUSTOM_REPOS_STABLE_TARGET) + 1 ++ for crepo in _TESTING_CUSTOM_REPOS_STABLE_TARGET: ++ assert crepo in api.produce.model_instances ++ ++ ++def test_target_beta_repos(monkeypatch): ++ def _mocked_parse_repofile(fpath): ++ return RepositoryFile(file=fpath, data=_TESTING_REPODATA) ++ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile) ++ ++ scancustomrepofile.process(_PROCESS_BETA_TARGET) ++ assert api.produce.called == len(_TESTING_CUSTOM_REPOS_BETA_TARGET) + 1 ++ for crepo in _TESTING_CUSTOM_REPOS_BETA_TARGET: ++ assert crepo in api.produce.model_instances ++ ++ ++def test_empty_repofile_exists(monkeypatch): ++ def _mocked_parse_repofile(fpath): ++ return RepositoryFile(file=fpath, data=[]) ++ monkeypatch.setattr(os.path, 'isfile', lambda dummy: True) ++ monkeypatch.setattr(api, 'produce', produce_mocked()) ++ monkeypatch.setattr(repofileutils, 'parse_repofile', _mocked_parse_repofile) ++ monkeypatch.setattr(api, 'current_logger', LoggerMocked()) ++ scancustomrepofile.process() ++ msg = "The {} file exists, but is empty. Nothing to do.".format(scancustomrepofile.CUSTOM_REPO_PATH) ++ assert api.current_logger.infomsg == msg ++ assert not api.produce.called +diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py +index 767fa00c..bc1d5bfa 100644 +--- a/repos/system_upgrade/common/actors/setuptargetrepos/actor.py ++++ b/repos/system_upgrade/common/actors/setuptargetrepos/actor.py +@@ -10,7 +10,8 @@ from leapp.models import ( + RHUIInfo, + SkippedRepositories, + TargetRepositories, +- UsedRepositories ++ UsedRepositories, ++ VendorCustomTargetRepositoryList + ) + from leapp.tags import FactsPhaseTag, IPUWorkflowTag + +@@ -32,7 +33,8 @@ class SetupTargetRepos(Actor): + RepositoriesFacts, + RepositoriesBlacklisted, + RHUIInfo, +- UsedRepositories) ++ UsedRepositories, ++ VendorCustomTargetRepositoryList) + produces = (TargetRepositories, SkippedRepositories) + tags = (IPUWorkflowTag, FactsPhaseTag) + +diff --git a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py +index 4b8405d0..827eb262 100644 +--- a/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py ++++ b/repos/system_upgrade/common/actors/setuptargetrepos/libraries/setuptargetrepos.py +@@ -1,6 +1,7 @@ + + from leapp.libraries.actor import setuptargetrepos_repomap + from leapp.libraries.common.config.version import get_source_major_version ++from leapp.libraries.common.repomaputils import combine_repomap_messages + from leapp.libraries.stdlib import api + from leapp.models import ( + CustomTargetRepository, +@@ -13,7 +14,8 @@ from leapp.models import ( + RHUIInfo, + SkippedRepositories, + TargetRepositories, +- UsedRepositories ++ UsedRepositories, ++ VendorCustomTargetRepositoryList + ) + + +@@ -75,16 +77,65 @@ def _get_mapped_repoids(repomap, src_repoids): + return mapped_repoids + + ++def _get_vendor_custom_repos(enabled_repos, mapping_list): ++ # Look at what source repos from the vendor mapping were enabled. ++ # If any of them are in beta, include vendor's custom repos in the list. ++ # Otherwise skip them. ++ ++ result = [] ++ ++ # Build a dict of vendor mappings for easy lookup. ++ map_dict = {mapping.vendor: mapping for mapping in mapping_list if mapping.vendor} ++ ++ for vendor_repolist in api.consume(VendorCustomTargetRepositoryList): ++ vendor_repomap = map_dict[vendor_repolist.vendor] ++ ++ # Find the beta channel repositories for the vendor. ++ beta_repos = [ ++ x.repoid for x in vendor_repomap.repositories if x.channel == "beta" ++ ] ++ api.current_logger().debug( ++ "Vendor {} beta repos: {}".format(vendor_repolist.vendor, beta_repos) ++ ) ++ ++ # Are any of the beta repos present and enabled on the system? ++ if any(rep in beta_repos for rep in enabled_repos): ++ # If so, use all repos including beta in the upgrade. ++ vendor_repos = vendor_repolist.repos ++ else: ++ # Otherwise filter beta repos out. ++ vendor_repos = [repo for repo in vendor_repolist.repos if repo.repoid not in beta_repos] ++ ++ result.extend([CustomTargetRepository( ++ repoid=repo.repoid, ++ name=repo.name, ++ baseurl=repo.baseurl, ++ enabled=repo.enabled, ++ ) for repo in vendor_repos]) ++ ++ return result ++ ++ + def process(): + # Load relevant data from messages + used_repoids_dict = _get_used_repo_dict() + enabled_repoids = _get_enabled_repoids() + excluded_repoids = _get_blacklisted_repoids() ++ ++ # Remember that we can't just grab one message, each vendor can have its own mapping. ++ repo_mapping_list = list(api.consume(RepositoriesMapping)) ++ + custom_repos = _get_custom_target_repos() + repoids_from_installed_packages = _get_repoids_from_installed_packages() ++ vendor_repos = _get_vendor_custom_repos(enabled_repoids, repo_mapping_list) ++ custom_repos.extend(vendor_repos) ++ ++ api.current_logger().debug( ++ "Vendor repolist: {}".format([repo.repoid for repo in vendor_repos]) ++ ) + + # Setup repomap handler +- repo_mappig_msg = next(api.consume(RepositoriesMapping), RepositoriesMapping()) ++ repo_mappig_msg = combine_repomap_messages(repo_mapping_list) + rhui_info = next(api.consume(RHUIInfo), RHUIInfo(provider='')) + repomap = setuptargetrepos_repomap.RepoMapDataHandler(repo_mappig_msg, cloud_provider=rhui_info.provider) + +@@ -140,6 +191,10 @@ def process(): + custom_repos = [repo for repo in custom_repos if repo.repoid not in excluded_repoids] + custom_repos = sorted(custom_repos, key=lambda x: x.repoid) + ++ api.current_logger().debug( ++ "Final repolist: {}".format([repo.repoid for repo in custom_repos]) ++ ) ++ + # produce message about skipped repositories + enabled_repoids_with_mapping = _get_mapped_repoids(repomap, enabled_repoids) + skipped_repoids = enabled_repoids & set(used_repoids_dict.keys()) - enabled_repoids_with_mapping +diff --git a/repos/system_upgrade/common/actors/systemfacts/actor.py b/repos/system_upgrade/common/actors/systemfacts/actor.py +index 59b12c87..85d4a09e 100644 +--- a/repos/system_upgrade/common/actors/systemfacts/actor.py ++++ b/repos/system_upgrade/common/actors/systemfacts/actor.py +@@ -47,7 +47,7 @@ class SystemFactsActor(Actor): + GrubCfgBios, + Report + ) +- tags = (IPUWorkflowTag, FactsPhaseTag,) ++ tags = (IPUWorkflowTag, FactsPhaseTag.Before,) + + def process(self): + self.produce(systemfacts.get_sysctls_status()) +diff --git a/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py b/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py +new file mode 100644 +index 00000000..dbf86974 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py +@@ -0,0 +1,72 @@ ++import os ++ ++from leapp.actors import Actor ++from leapp.models import VendorSignatures, ActiveVendorList ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++VENDORS_DIR = "/etc/leapp/files/vendors.d/" ++SIGFILE_SUFFIX = ".sigs" ++ ++ ++class VendorRepoSignatureScanner(Actor): ++ """ ++ Produce VendorSignatures messages for the vendor signature files inside the ++ . ++ These messages are used to extend the list of pakcages Leapp will consider ++ signed and will attempt to upgrade. ++ ++ The messages are produced only if a "from" vendor repository ++ listed indide its map matched one of the repositories active on the system. ++ """ ++ ++ name = 'vendor_repo_signature_scanner' ++ consumes = (ActiveVendorList) ++ produces = (VendorSignatures) ++ tags = (IPUWorkflowTag, FactsPhaseTag.Before) ++ ++ def process(self): ++ if not os.path.isdir(VENDORS_DIR): ++ self.log.debug( ++ "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR) ++ ) ++ return ++ ++ active_vendors = [] ++ for vendor_list in self.consume(ActiveVendorList): ++ active_vendors.extend(vendor_list.data) ++ ++ self.log.debug( ++ "Active vendor list: {}".format(active_vendors) ++ ) ++ ++ for sigfile_name in os.listdir(VENDORS_DIR): ++ if not sigfile_name.endswith(SIGFILE_SUFFIX): ++ continue ++ # Cut the suffix part to get only the name. ++ vendor_name = sigfile_name[:-5] ++ ++ if vendor_name not in active_vendors: ++ self.log.debug( ++ "Vendor {} not in active list, skipping".format(vendor_name) ++ ) ++ continue ++ ++ self.log.debug( ++ "Vendor {} found in active list, processing file {}".format(vendor_name, sigfile_name) ++ ) ++ ++ full_sigfile_path = os.path.join(VENDORS_DIR, sigfile_name) ++ with open(full_sigfile_path) as f: ++ signatures = [line for line in f.read().splitlines() if line] ++ ++ self.produce( ++ VendorSignatures( ++ vendor=vendor_name, ++ sigs=signatures, ++ ) ++ ) ++ ++ self.log.info( ++ "The {} directory exists, vendor signatures loaded.".format(VENDORS_DIR) ++ ) +diff --git a/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py +new file mode 100644 +index 00000000..13256476 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/actor.py +@@ -0,0 +1,19 @@ ++from leapp.actors import Actor ++# from leapp.libraries.common.repomaputils import scan_vendor_repomaps, VENDOR_REPOMAP_DIR ++from leapp.libraries.actor.vendorrepositoriesmapping import scan_vendor_repomaps ++from leapp.models import VendorSourceRepos, RepositoriesMapping ++from leapp.tags import FactsPhaseTag, IPUWorkflowTag ++ ++ ++class VendorRepositoriesMapping(Actor): ++ """ ++ Scan the vendor repository mapping files and provide the data to other actors. ++ """ ++ ++ name = "vendor_repositories_mapping" ++ consumes = () ++ produces = (RepositoriesMapping, VendorSourceRepos,) ++ tags = (IPUWorkflowTag, FactsPhaseTag.Before) ++ ++ def process(self): ++ scan_vendor_repomaps() +diff --git a/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py +new file mode 100644 +index 00000000..6a41d4e5 +--- /dev/null ++++ b/repos/system_upgrade/common/actors/vendorrepositoriesmapping/libraries/vendorrepositoriesmapping.py +@@ -0,0 +1,92 @@ ++import os ++import json ++ ++from leapp.libraries.common import fetch ++from leapp.libraries.common.config.version import get_target_major_version, get_source_major_version ++from leapp.libraries.common.repomaputils import RepoMapData ++from leapp.libraries.stdlib import api ++from leapp.models import VendorSourceRepos, RepositoriesMapping ++from leapp.models.fields import ModelViolationError ++from leapp.exceptions import StopActorExecutionError ++ ++ ++VENDORS_DIR = "/etc/leapp/files/vendors.d" ++"""The folder containing the vendor repository mapping files.""" ++ ++ ++def inhibit_upgrade(msg): ++ raise StopActorExecutionError( ++ msg, ++ details={'hint': ('Read documentation at the following link for more' ++ ' information about how to retrieve the valid file:' ++ ' https://access.redhat.com/articles/3664871')}) ++ ++ ++def read_repofile(repofile, repodir): ++ try: ++ return json.loads(fetch.read_or_fetch(repofile, directory=repodir, allow_download=False)) ++ except ValueError: ++ # The data does not contain a valid json ++ inhibit_upgrade('The repository mapping file is invalid: file does not contain a valid JSON object.') ++ return None ++ ++ ++def read_repomap_file(repomap_file, read_repofile_func, vendor_name): ++ json_data = read_repofile_func(repomap_file, VENDORS_DIR) ++ try: ++ repomap_data = RepoMapData.load_from_dict(json_data) ++ ++ source_major = get_source_major_version() ++ target_major = get_target_major_version() ++ ++ api.produce(VendorSourceRepos( ++ vendor=vendor_name, ++ source_repoids=repomap_data.get_version_repoids(source_major) ++ )) ++ ++ mapping = repomap_data.get_mappings(source_major, target_major) ++ valid_major_versions = [source_major, target_major] ++ ++ api.produce(RepositoriesMapping( ++ mapping=mapping, ++ repositories=repomap_data.get_repositories(valid_major_versions), ++ vendor=vendor_name ++ )) ++ except ModelViolationError as err: ++ err_message = ( ++ 'The repository mapping file is invalid: ' ++ 'the JSON does not match required schema (wrong field type/value): {}. ' ++ 'Ensure that the current upgrade path is correct and is present in the mappings: {} -> {}' ++ .format(err, source_major, target_major) ++ ) ++ inhibit_upgrade(err_message) ++ except KeyError as err: ++ inhibit_upgrade( ++ 'The repository mapping file is invalid: the JSON is missing a required field: {}'.format(err)) ++ except ValueError as err: ++ # The error should contain enough information, so we do not need to clarify it further ++ inhibit_upgrade('The repository mapping file is invalid: {}'.format(err)) ++ ++ ++def scan_vendor_repomaps(read_repofile_func=read_repofile): ++ """ ++ Scan the repository mapping file and produce RepositoriesMapping msg. ++ ++ See the description of the actor for more details. ++ """ ++ ++ map_json_suffix = "_map.json" ++ if os.path.isdir(VENDORS_DIR): ++ vendor_mapfiles = list(filter(lambda vfile: map_json_suffix in vfile, os.listdir(VENDORS_DIR))) ++ ++ for mapfile in vendor_mapfiles: ++ read_repomap_file(mapfile, read_repofile_func, mapfile[:-len(map_json_suffix)]) ++ else: ++ api.current_logger().debug( ++ "The {} directory doesn't exist. Nothing to do.".format(VENDORS_DIR) ++ ) ++ # vendor_repomap_collection = scan_vendor_repomaps(VENDOR_REPOMAP_DIR) ++ # if vendor_repomap_collection: ++ # self.produce(vendor_repomap_collection) ++ # for repomap in vendor_repomap_collection.maps: ++ # self.produce(repomap) +diff --git a/repos/system_upgrade/common/files/rhel_upgrade.py b/repos/system_upgrade/common/files/rhel_upgrade.py +index 34f7b8f9..acba532c 100644 +--- a/repos/system_upgrade/common/files/rhel_upgrade.py ++++ b/repos/system_upgrade/common/files/rhel_upgrade.py +@@ -184,6 +184,7 @@ class RhelUpgradeCommand(dnf.cli.Command): + to_install = self.plugin_data['pkgs_info']['to_install'] + to_remove = self.plugin_data['pkgs_info']['to_remove'] + to_upgrade = self.plugin_data['pkgs_info']['to_upgrade'] ++ to_reinstall = self.plugin_data['pkgs_info']['to_reinstall'] + + # Modules to enable + self._process_entities(entities=[available_modules_to_enable], +@@ -196,6 +197,9 @@ class RhelUpgradeCommand(dnf.cli.Command): + self._process_entities(entities=to_install, op=self.base.install, entity_name='Package') + # Packages to be upgraded + self._process_entities(entities=to_upgrade, op=self.base.upgrade, entity_name='Package') ++ # Packages to be reinstalled ++ self._process_entities(entities=to_reinstall, op=self.base.reinstall, entity_name='Package') ++ + self.base.distro_sync() + + if self.opts.tid[0] == 'check': +diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py +index ffde211f..8533fb04 100644 +--- a/repos/system_upgrade/common/libraries/dnfplugin.py ++++ b/repos/system_upgrade/common/libraries/dnfplugin.py +@@ -92,6 +92,7 @@ def build_plugin_data(target_repoids, debug, test, tasks, on_aws): + 'to_install': tasks.to_install, + 'to_remove': tasks.to_remove, + 'to_upgrade': tasks.to_upgrade, ++ 'to_reinstall': tasks.to_reinstall, + 'modules_to_enable': ['{}:{}'.format(m.name, m.stream) for m in tasks.modules_to_enable], + }, + 'dnf_conf': { +diff --git a/repos/system_upgrade/common/libraries/fetch.py b/repos/system_upgrade/common/libraries/fetch.py +index 1ca26170..3bfe12e8 100644 +--- a/repos/system_upgrade/common/libraries/fetch.py ++++ b/repos/system_upgrade/common/libraries/fetch.py +@@ -142,7 +142,8 @@ def load_data_asset(actor_requesting_asset, + asset_filename, + asset_fulltext_name, + docs_url, +- docs_title): ++ docs_title, ++ asset_directory="/etc/leapp/files"): + """ + Load the content of the data asset with given asset_filename + +@@ -174,7 +175,7 @@ def load_data_asset(actor_requesting_asset, + + try: + # The asset family ID has the form (major, minor), include only `major` in the URL +- raw_asset_contents = read_or_fetch(asset_filename, data_stream=data_stream_major) ++ raw_asset_contents = read_or_fetch(asset_filename, directory=asset_directory, data_stream=data_stream_major) + asset_contents = json.loads(raw_asset_contents) + except ValueError: + msg = 'The {0} file (at {1}) does not contain a valid JSON object.'.format(asset_fulltext_name, asset_filename) +diff --git a/repos/system_upgrade/common/libraries/repomaputils.py b/repos/system_upgrade/common/libraries/repomaputils.py +new file mode 100644 +index 00000000..86473903 +--- /dev/null ++++ b/repos/system_upgrade/common/libraries/repomaputils.py +@@ -0,0 +1,140 @@ ++from collections import defaultdict ++from leapp.models import PESIDRepositoryEntry, RepoMapEntry, RepositoriesMapping ++ ++class RepoMapData(object): ++ VERSION_FORMAT = '1.2.0' ++ ++ def __init__(self): ++ self.repositories = [] ++ self.mapping = {} ++ ++ def add_repository(self, data, pesid): ++ """ ++ Add new PESIDRepositoryEntry with given pesid from the provided dictionary. ++ ++ :param data: A dict containing the data of the added repository. The dictionary structure corresponds ++ to the repositories entries in the repository mapping JSON schema. ++ :type data: Dict[str, str] ++ :param pesid: PES id of the repository family that the newly added repository belongs to. ++ :type pesid: str ++ """ ++ self.repositories.append(PESIDRepositoryEntry( ++ repoid=data['repoid'], ++ channel=data['channel'], ++ rhui=data.get('rhui', ''), ++ repo_type=data['repo_type'], ++ arch=data['arch'], ++ major_version=data['major_version'], ++ pesid=pesid ++ )) ++ ++ def get_repositories(self, valid_major_versions): ++ """ ++ Return the list of PESIDRepositoryEntry object matching the specified major versions. ++ """ ++ return [repo for repo in self.repositories if repo.major_version in valid_major_versions] ++ ++ def get_version_repoids(self, major_version): ++ """ ++ Return the list of repository ID strings for repositories matching the specified major version. ++ """ ++ return [repo.repoid for repo in self.repositories if repo.major_version == major_version] ++ ++ def add_mapping(self, source_major_version, target_major_version, source_pesid, target_pesid): ++ """ ++ Add a new mapping entry that is mapping the source pesid to the destination pesid(s), ++ relevant in an IPU from the supplied source major version to the supplied target ++ major version. ++ ++ :param str source_major_version: Specifies the major version of the source system ++ for which the added mapping applies. ++ :param str target_major_version: Specifies the major version of the target system ++ for which the added mapping applies. ++ :param str source_pesid: PESID of the source repository. ++ :param Union[str|List[str]] target_pesid: A single target PESID or a list of target ++ PESIDs of the added mapping. ++ """ ++ # NOTE: it could be more simple, but I prefer to be sure the input data ++ # contains just one map per source PESID. ++ key = '{}:{}'.format(source_major_version, target_major_version) ++ rmap = self.mapping.get(key, defaultdict(set)) ++ self.mapping[key] = rmap ++ if isinstance(target_pesid, list): ++ rmap[source_pesid].update(target_pesid) ++ else: ++ rmap[source_pesid].add(target_pesid) ++ ++ def get_mappings(self, src_major_version, dst_major_version): ++ """ ++ Return the list of RepoMapEntry objects for the specified upgrade path. ++ ++ IOW, the whole mapping for specified IPU. ++ """ ++ key = '{}:{}'.format(src_major_version, dst_major_version) ++ rmap = self.mapping.get(key, None) ++ if not rmap: ++ return None ++ map_list = [] ++ for src_pesid in sorted(rmap.keys()): ++ map_list.append(RepoMapEntry(source=src_pesid, target=sorted(rmap[src_pesid]))) ++ return map_list ++ ++ @staticmethod ++ def load_from_dict(data): ++ if data['version_format'] != RepoMapData.VERSION_FORMAT: ++ raise ValueError( ++ 'The obtained repomap data has unsupported version of format.' ++ ' Get {} required {}' ++ .format(data['version_format'], RepoMapData.VERSION_FORMAT) ++ ) ++ ++ repomap = RepoMapData() ++ ++ # Load reposiories ++ existing_pesids = set() ++ for repo_family in data['repositories']: ++ existing_pesids.add(repo_family['pesid']) ++ for repo in repo_family['entries']: ++ repomap.add_repository(repo, repo_family['pesid']) ++ ++ # Load mappings ++ for mapping in data['mapping']: ++ for entry in mapping['entries']: ++ if not isinstance(entry['target'], list): ++ raise ValueError( ++ 'The target field of a mapping entry is not a list: {}' ++ .format(entry) ++ ) ++ ++ for pesid in [entry['source']] + entry['target']: ++ if pesid not in existing_pesids: ++ raise ValueError( ++ 'The {} pesid is not related to any repository.' ++ .format(pesid) ++ ) ++ repomap.add_mapping( ++ source_major_version=mapping['source_major_version'], ++ target_major_version=mapping['target_major_version'], ++ source_pesid=entry['source'], ++ target_pesid=entry['target'], ++ ) ++ return repomap ++ ++def combine_repomap_messages(mapping_list): ++ """ ++ Combine multiple RepositoryMapping messages into one. ++ Needed because we might get more than one message if there are vendors present. ++ """ ++ combined_mapping = [] ++ combined_repositories = [] ++ # Depending on whether there are any vendors present, we might get more than one message. ++ for msg in mapping_list: ++ combined_mapping.extend(msg.mapping) ++ combined_repositories.extend(msg.repositories) ++ ++ combined_repomapping = RepositoriesMapping( ++ mapping=combined_mapping, ++ repositories=combined_repositories ++ ) ++ ++ return combined_repomapping +diff --git a/repos/system_upgrade/common/models/activevendorlist.py b/repos/system_upgrade/common/models/activevendorlist.py +new file mode 100644 +index 00000000..de4056fb +--- /dev/null ++++ b/repos/system_upgrade/common/models/activevendorlist.py +@@ -0,0 +1,7 @@ ++from leapp.models import Model, fields ++from leapp.topics import VendorTopic ++ ++ ++class ActiveVendorList(Model): ++ topic = VendorTopic ++ data = fields.List(fields.String()) +diff --git a/repos/system_upgrade/common/models/repositoriesmap.py b/repos/system_upgrade/common/models/repositoriesmap.py +index 12639e19..da4f7aac 100644 +--- a/repos/system_upgrade/common/models/repositoriesmap.py ++++ b/repos/system_upgrade/common/models/repositoriesmap.py +@@ -91,3 +91,4 @@ class RepositoriesMapping(Model): + + mapping = fields.List(fields.Model(RepoMapEntry), default=[]) + repositories = fields.List(fields.Model(PESIDRepositoryEntry), default=[]) ++ vendor = fields.Nullable(fields.String()) +diff --git a/repos/system_upgrade/common/models/rpmtransactiontasks.py b/repos/system_upgrade/common/models/rpmtransactiontasks.py +index 7e2870d0..05d4e941 100644 +--- a/repos/system_upgrade/common/models/rpmtransactiontasks.py ++++ b/repos/system_upgrade/common/models/rpmtransactiontasks.py +@@ -10,6 +10,7 @@ class RpmTransactionTasks(Model): + to_keep = fields.List(fields.String(), default=[]) + to_remove = fields.List(fields.String(), default=[]) + to_upgrade = fields.List(fields.String(), default=[]) ++ to_reinstall = fields.List(fields.String(), default=[]) + modules_to_enable = fields.List(fields.Model(Module), default=[]) + modules_to_reset = fields.List(fields.Model(Module), default=[]) + +diff --git a/repos/system_upgrade/common/models/targetrepositories.py b/repos/system_upgrade/common/models/targetrepositories.py +index 02c6c5e5..f9fd4238 100644 +--- a/repos/system_upgrade/common/models/targetrepositories.py ++++ b/repos/system_upgrade/common/models/targetrepositories.py +@@ -21,6 +21,12 @@ class CustomTargetRepository(TargetRepositoryBase): + enabled = fields.Boolean(default=True) + + ++class VendorCustomTargetRepositoryList(Model): ++ topic = TransactionTopic ++ vendor = fields.String() ++ repos = fields.List(fields.Model(CustomTargetRepository)) ++ ++ + class TargetRepositories(Model): + """ + Repositories supposed to be used during the IPU process +diff --git a/repos/system_upgrade/common/models/vendorsignatures.py b/repos/system_upgrade/common/models/vendorsignatures.py +new file mode 100644 +index 00000000..f456aec5 +--- /dev/null ++++ b/repos/system_upgrade/common/models/vendorsignatures.py +@@ -0,0 +1,8 @@ ++from leapp.models import Model, fields ++from leapp.topics import VendorTopic ++ ++ ++class VendorSignatures(Model): ++ topic = VendorTopic ++ vendor = fields.String() ++ sigs = fields.List(fields.String()) +diff --git a/repos/system_upgrade/common/models/vendorsourcerepos.py b/repos/system_upgrade/common/models/vendorsourcerepos.py +new file mode 100644 +index 00000000..b7a219b4 +--- /dev/null ++++ b/repos/system_upgrade/common/models/vendorsourcerepos.py +@@ -0,0 +1,12 @@ ++from leapp.models import Model, fields ++from leapp.topics import VendorTopic ++ ++ ++class VendorSourceRepos(Model): ++ """ ++ This model contains the data on all source repositories associated with a specific vendor. ++ Its data is used to determine whether the vendor should be included into the upgrade process. ++ """ ++ topic = VendorTopic ++ vendor = fields.String() ++ source_repoids = fields.List(fields.String()) +diff --git a/repos/system_upgrade/common/topics/vendortopic.py b/repos/system_upgrade/common/topics/vendortopic.py +new file mode 100644 +index 00000000..014b7afb +--- /dev/null ++++ b/repos/system_upgrade/common/topics/vendortopic.py +@@ -0,0 +1,5 @@ ++from leapp.topics import Topic ++ ++ ++class VendorTopic(Topic): ++ name = 'vendor_topic' +-- +2.43.0 + + +From 7325b6d5af40f1c7705502a7701a48cf19483101 Mon Sep 17 00:00:00 2001 +From: Yuriy Kohut +Date: Thu, 20 Jun 2024 19:52:32 +0300 +Subject: [PATCH 36/36] CI: (#110) + +- enhance the script, to install leapp-repositoriy from Git, with the GPG keys preserving, and exits if some important commands fail +--- + ci/scripts/install_elevate_dev.sh | 22 ++++++++++++++++++---- + 1 file changed, 18 insertions(+), 4 deletions(-) + +diff --git a/ci/scripts/install_elevate_dev.sh b/ci/scripts/install_elevate_dev.sh +index 4f2b4c06..f9cc2903 100644 +--- a/ci/scripts/install_elevate_dev.sh ++++ b/ci/scripts/install_elevate_dev.sh +@@ -40,6 +40,7 @@ WORK_DIR="$HOME" + NEW_LEAPP_NAME="leapp-repository-$BRANCH" + NEW_LEAPP_DIR="$WORK_DIR/$NEW_LEAPP_NAME/" + LEAPP_PATH='/usr/share/leapp-repository/repositories/' ++LEAPP_GPG_PATH='/etc/leapp/repos.d/system_upgrade/common/files/rpm-gpg' + EXCLUDE_PATH=' + /usr/share/leapp-repository/repositories/system_upgrade/el7toel8/files/bundled-rpms + /usr/share/leapp-repository/repositories/system_upgrade/el7toel8/files +@@ -56,6 +57,11 @@ echo "RHEL_MAJOR_VERSION=$RHEL_MAJOR_VERSION" + echo "WORK_DIR=$WORK_DIR" + echo "EXCLUDED_PATHS=$EXCLUDE_PATH" + ++echo "Preserve GPG keys if any" ++for major in 8 9; do ++ test -e ${LEAPP_GPG_PATH}/${major} && mv ${LEAPP_GPG_PATH}/${major} ${WORK_DIR}/ ++done ++ + + echo 'Remove old files' + for dir in $(find $LEAPP_PATH -type d); +@@ -73,8 +79,8 @@ do + fi + done + +-echo "Download new tarball from https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz" +-curl -s -L https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz | tar -xz -C $WORK_DIR/ ++echo "Download new tarball from https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz" ++curl -s -L https://github.com/$USER/leapp-repository/archive/$BRANCH/leapp-repository-$BRANCH.tar.gz | tar -xmz -C $WORK_DIR/ || exit 1 + + echo 'Deleting files as in spec file' + rm -rf $NEW_LEAPP_DIR/repos/common/actors/testactor +@@ -89,15 +95,23 @@ else + fi + + echo 'Copy new data to system' +-cp -r $NEW_LEAPP_DIR/repos/* $LEAPP_PATH ++cp -r $NEW_LEAPP_DIR/repos/* $LEAPP_PATH || exit 1 + + for DIRECTORY in $(find $LEAPP_PATH -mindepth 1 -maxdepth 1 -type d); + do + REPOSITORY=$(basename $DIRECTORY) + if ! [ -e /etc/leapp/repos.d/$REPOSITORY ];then + echo "Enabling repository $REPOSITORY" +- ln -s $LEAPP_PATH/$REPOSITORY /etc/leapp/repos.d/$REPOSITORY ++ ln -s $LEAPP_PATH/$REPOSITORY /etc/leapp/repos.d/$REPOSITORY || exit 1 + fi + done + ++echo "Restore GPG keys if any" ++for major in 8 9; do ++ rm -rf ${LEAPP_GPG_PATH}/${major} ++ test -e ${WORK_DIR}/${major} && mv ${WORK_DIR}/${major} ${LEAPP_GPG_PATH}/ ++done ++ + rm -rf $NEW_LEAPP_DIR ++ ++exit 0 +-- +2.43.0 + diff --git a/SPECS/leapp-repository.spec b/SPECS/leapp-repository.spec index 16c8b76..400272c 100644 --- a/SPECS/leapp-repository.spec +++ b/SPECS/leapp-repository.spec @@ -43,7 +43,7 @@ py2_byte_compile "%1" "%2"} Epoch: 1 Name: leapp-repository Version: 0.19.0 -Release: 6%{?dist} +Release: 7%{?dist} Summary: Repositories for leapp License: ASL 2.0 @@ -283,6 +283,9 @@ done; # no files here %changelog +* Fri Jun 21 2024 Yuriy Kohut - 1:0.19.0-7 +- Implement Vendors support + * Wed Apr 24 2024 Yuriy Kohut - 1:0.19.0-6 - Enable CentOS Stream release 8 to 9 elevation - Update actor.py to support NVMe device enumeration