diff --git a/SOURCES/leapp-repository-0.23.0-elevate.patch b/SOURCES/leapp-repository-0.23.0-elevate.patch
index 8aebb6a..f5d22a0 100644
--- a/SOURCES/leapp-repository-0.23.0-elevate.patch
+++ b/SOURCES/leapp-repository-0.23.0-elevate.patch
@@ -63,6 +63,21 @@ index 0bb92d3d..a04c7ded 100644
# pycharm
.idea
+diff --git a/.pylintrc b/.pylintrc
+index a82f8818..bd365788 100644
+--- a/.pylintrc
++++ b/.pylintrc
+@@ -42,7 +42,9 @@ disable=
+ unnecessary-pass,
+ raise-missing-from, # no 'raise from' in python 2
+ consider-using-f-string, # sorry, not gonna happen, still have to support py2
+- logging-format-interpolation
++ logging-format-interpolation,
++# problem betwee Python 3.6 and 3.8+ pylint
++ useless-option-value
+
+ [FORMAT]
+ # Maximum number of characters on a single line.
diff --git a/ci/.gitignore b/ci/.gitignore
new file mode 100644
index 00000000..e6f97f0f
@@ -3474,6 +3489,27 @@ index 00000000..370758e6
+ end
+ end
+end
+diff --git a/docs/source/libraries-and-api/deprecations-list.md b/docs/source/libraries-and-api/deprecations-list.md
+index e620d70d..817b63c5 100644
+--- a/docs/source/libraries-and-api/deprecations-list.md
++++ b/docs/source/libraries-and-api/deprecations-list.md
+@@ -15,6 +15,16 @@ Only the versions in which a deprecation has been made are listed.
+ ## Next release (till TODO date)
+ - Shared libraries
+ - **`leapp.libraries.common.config.get_distro_id()`** - The function has been replaced by variants for source and target distros - `leapp.libraries.common.config.get_source_distro_id()` and `leapp.libraries.common.config.get_target_distro_id()`.
++ - Following UEFI related functions and classes have been moved from `leapp.libraries.common.grub` into `leapp.libraries.common.efi`:
++ - **`EFIBootInfo`** - raises `leapp.libraries.common.efi.EFIError` instead of `leapp.exceptions.StopActorExecutionError`
++ - **`EFIBootLoaderEntry`**
++ - **`canonical_path_to_efi_format()`**
++ - **`get_efi_device()`** - raises `leapp.libraries.common.efi.EFIError` instead of `leapp.exceptions.StopActorExecutionError`
++ - **`get_efi_partition()`** - raises `leapp.libraries.common.efi.EFIError` instead of `leapp.exceptions.StopActorExecutionError`
++ - **`is_efi()`**
++ - Functions related to manipulation of devices and partitions were moved from `leapp.libraries.common.grub` into `leapp.libraries.common.partitions`:
++ - **`get_device_number()`** - replaced by **`get_partition_number()`**
++ - **`blk_dev_from_partition()`**
+
+ ## v0.23.0 (till March 2026)
+
diff --git a/etc/leapp/files/device_driver_deprecation_data.json b/etc/leapp/files/device_driver_deprecation_data.json
index a9c06956..c38c2840 100644
--- a/etc/leapp/files/device_driver_deprecation_data.json
@@ -6846,15 +6882,1862 @@ index 00000000..52f5af9d
+ api.produce(ActiveVendorList(data=list(active_vendors)))
+ else:
+ self.log.info("No active vendors found, vendor list not generated")
+diff --git a/repos/system_upgrade/common/actors/checknvme/actor.py b/repos/system_upgrade/common/actors/checknvme/actor.py
+new file mode 100644
+index 00000000..dc82c4ad
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/checknvme/actor.py
+@@ -0,0 +1,54 @@
++from leapp.actors import Actor
++from leapp.libraries.actor import checknvme
++from leapp.models import (
++ KernelCmdline,
++ LiveModeConfig,
++ NVMEInfo,
++ StorageInfo,
++ TargetKernelCmdlineArgTasks,
++ TargetUserSpacePreupgradeTasks,
++ TargetUserSpaceUpgradeTasks,
++ UpgradeInitramfsTasks,
++ UpgradeKernelCmdlineArgTasks
++)
++from leapp.reporting import Report
++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
++
++
++class CheckNVME(Actor):
++ """
++ Check if NVMe devices are used and possibly register additional actions.
++
++ Check whether the system uses NVMe devices. These can be connected using
++ different transport technologies, e.g., PCIe, TCP, FC, etc. Transports
++ handled by the current implementation:
++ * PCIe (no special actions are required)
++ * Fibre Channel (FC)
++
++ When NVMe-FC devices are detected, the following actions are taken:
++ * dracut, dracut-network, nvme-cli, and some others packages are installed into initramfs
++ * /etc/nvme is copied into target userspace
++ * the nvmf dracut module is included into upgrade initramfs
++ * rd.nvmf.discover=fc,auto is added to the upgrade boot entry
++ * nvme_core.multipath is added to the upgrade and target boot entry
++
++ Conditions causing the upgrade to be inhibited:
++ * detecting a NVMe device using a transport technology different than PCIe or FC
++ that is used in /etc/fstab
++ * missing /etc/nvme/hostnqn or /etc/nvme/hostid when NVMe-FC device is present
++ * source system is RHEL 9+ and it has disabled native multipath
++ """
++ name = 'check_nvme'
++ consumes = (LiveModeConfig, KernelCmdline, NVMEInfo, StorageInfo)
++ produces = (
++ Report,
++ TargetKernelCmdlineArgTasks,
++ TargetUserSpacePreupgradeTasks,
++ TargetUserSpaceUpgradeTasks,
++ UpgradeInitramfsTasks,
++ UpgradeKernelCmdlineArgTasks
++ )
++ tags = (ChecksPhaseTag, IPUWorkflowTag)
++
++ def process(self):
++ checknvme.process()
+diff --git a/repos/system_upgrade/common/actors/checknvme/libraries/checknvme.py b/repos/system_upgrade/common/actors/checknvme/libraries/checknvme.py
+new file mode 100644
+index 00000000..cce11f43
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/checknvme/libraries/checknvme.py
+@@ -0,0 +1,352 @@
++import os
++from collections import defaultdict
++from typing import List
++
++from leapp import reporting
++from leapp.exceptions import StopActorExecutionError
++from leapp.libraries.common.config.version import get_source_major_version
++from leapp.libraries.stdlib import api
++from leapp.models import (
++ CopyFile,
++ DracutModule,
++ KernelCmdline,
++ KernelCmdlineArg,
++ LiveModeConfig,
++ NVMEDevice,
++ NVMEInfo,
++ StorageInfo,
++ TargetKernelCmdlineArgTasks,
++ TargetUserSpacePreupgradeTasks,
++ TargetUserSpaceUpgradeTasks,
++ UpgradeInitramfsTasks,
++ UpgradeKernelCmdlineArgTasks
++)
++
++FMT_LIST_SEPARATOR = '\n - '
++FABRICS_TRANSPORT_TYPES = ['fc', 'tcp', 'rdma']
++BROKEN_TRANSPORT_TYPES = ['tcp', 'rdma']
++SAFE_TRANSPORT_TYPES = ['pcie', 'fc']
++RQ_RPMS_CONTAINER = [
++ 'iproute',
++ 'jq',
++ 'nvme-cli',
++ 'sed',
++]
++
++# We need this packages early (when setting up container) as we will be modifying some
++# of their files
++EARLY_CONTAINER_RPMS = [
++ 'dracut',
++ 'dracut-network', # Adds dracut-nvmf module
++]
++
++
++class NVMEDeviceCollection:
++ def __init__(self):
++ self.device_by_transport = defaultdict(list)
++
++ def add_device(self, device: NVMEDevice):
++ self.device_by_transport[device.transport].append(device)
++
++ def add_devices(self, devices: List[NVMEDevice]):
++ for device in devices:
++ self.add_device(device)
++
++ def get_devices_by_transport(self, transport: str) -> List[NVMEDevice]:
++ return self.device_by_transport[transport]
++
++ @property
++ def handled_transport_types(self) -> List[str]:
++ return SAFE_TRANSPORT_TYPES
++
++ @property
++ def unhandled_devices(self) -> List[NVMEDevice]:
++ unhandled_devices = []
++ for transport, devices in self.device_by_transport.items():
++ if transport not in self.handled_transport_types:
++ unhandled_devices.extend(devices)
++ return unhandled_devices
++
++ @property
++ def fabrics_devices(self) -> List[NVMEDevice]:
++ fabrics_devices = []
++ for transport in FABRICS_TRANSPORT_TYPES:
++ fabrics_devices.extend(self.device_by_transport[transport])
++
++ return fabrics_devices
++
++
++def _format_list(data, sep=FMT_LIST_SEPARATOR, callback_sort=sorted, limit=0):
++ # NOTE(pstodulk): Teaser O:-> https://issues.redhat.com/browse/RHEL-126447
++
++ def identity(values):
++ return values
++
++ if callback_sort is None:
++ callback_sort = identity
++ res = ['{}{}'.format(sep, item) for item in callback_sort(data)]
++ if limit:
++ return ''.join(res[:limit])
++ return ''.join(res)
++
++
++def is_livemode_enabled() -> bool:
++ livemode_config = next(api.consume(LiveModeConfig), None)
++ if livemode_config and livemode_config.is_enabled:
++ return True
++ return False
++
++
++def get_current_cmdline_arg_value(arg_name: str):
++ cmdline = next(api.consume(KernelCmdline), None)
++
++ if not cmdline:
++ raise StopActorExecutionError(
++ 'Failed to obtain message with information about current kernel cmdline'
++ )
++
++ for arg in cmdline.parameters:
++ if arg.key == arg_name:
++ return arg.value
++
++ return None
++
++
++def _report_native_multipath_required():
++ """Report that NVMe native multipath must be enabled on RHEL 9 before the upgrade."""
++ reporting.create_report([
++ reporting.Title('NVMe native multipath must be enabled on the target system'),
++ reporting.Summary(
++ 'The system is booted with "nvme_core.multipath=N" kernel command line argument, '
++ 'disabling native multipath for NVMe devices. However, native multipath '
++ 'is required to be used for NVMe over Fabrics (NVMeoF) on the target system. '
++ 'Regarding that it is required to update the system setup to use '
++ 'the native multipath before the in-place upgrade.'
++ ),
++ reporting.Remediation(hint=(
++ 'Enable native multipath for NVMe devices following the official '
++ 'documentation and reboot your system - see the attached link.'
++ )),
++ reporting.ExternalLink(
++ url='https://red.ht/rhel-9-enabling-multipathing-on-nvme-devices',
++ title='Enabling native multipathing on NVMe devices.'
++ ),
++ reporting.Severity(reporting.Severity.HIGH),
++ reporting.Groups([reporting.Groups.INHIBITOR, reporting.Groups.FILESYSTEM]),
++ ])
++
++
++def _report_system_should_migrate_to_native_multipath():
++ """
++ Report that since RHEL 9, native NVMe multipath is the recommended multipath solution for NVMe.
++ """
++ reporting.create_report([
++ reporting.Title('Native NVMe multipath is recommended on the target system.'),
++ reporting.Summary(
++ 'In the case that the system is using dm-multipath on NVMe devices, '
++ 'it is recommended to use the native NVMe multipath instead. '
++ 'We recommend to update the system configuration after the in-place '
++ 'upgrade following the official documentation - see the attached link.'
++ ),
++ reporting.ExternalLink(
++ url='https://red.ht/rhel-9-enabling-multipathing-on-nvme-devices',
++ title='Enabling native multipathing on NVMe devices.'
++ ),
++ reporting.Severity(reporting.Severity.INFO),
++ reporting.Groups([reporting.Groups.FILESYSTEM, reporting.Groups.POST]),
++ ])
++
++
++def _report_kernel_cmdline_might_be_modified_unnecessarily():
++ """
++ Report that we introduced nvme_core.multipath=N, which might not be necessary.
++
++ We introduce nvme_core.multipath=N (unconditionally) during 8>9 upgrade. However,
++ the introduction of the argument might not be always necessary, but we currently lack
++ an implementation that would precisely identify when the argument is truly needed.
++ """
++ reporting.create_report([
++ reporting.Title('Native NVMe multipath will be disabled on the target system.'),
++ reporting.Summary(
++ 'To ensure system\'s storage layout remains consistent during the upgrade, native '
++ 'NVMe multipath will be disabled by adding nvme_core.multipath=N to the default boot entry. '
++ 'In the case that the system does not use multipath, the nvme_core.multipath=N should be manually '
++ 'removed from the target system\'s boot entry after the upgrade.'
++ ),
++ reporting.ExternalLink(
++ url='https://red.ht/rhel-9-enabling-multipathing-on-nvme-devices',
++ title='Enabling native multipathing on NVMe devices.'
++ ),
++ reporting.Severity(reporting.Severity.INFO),
++ reporting.Groups([reporting.Groups.FILESYSTEM, reporting.Groups.POST]),
++ ])
++
++
++def _tasks_copy_files_into_container(nvme_device_collection: NVMEDeviceCollection):
++ """
++ Tasks needed to modify target userspace container and the upgrade initramfs.
++ """
++ # NOTE: prepared for future extension, as it's possible that we will need
++ # to copy more files when starting to look at NVMe-(RDMA|TCP)
++ copy_files = []
++
++ if nvme_device_collection.fabrics_devices:
++ # /etc/nvme/ is required only in case of NVMe-oF (PCIe drives are safe)
++ copy_files.append(CopyFile(src='/etc/nvme/'))
++
++ api.produce(TargetUserSpaceUpgradeTasks(
++ copy_files=copy_files,
++ install_rpms=RQ_RPMS_CONTAINER)
++ )
++
++
++def _tasks_for_kernel_cmdline(nvme_device_collection: NVMEDeviceCollection):
++ upgrade_cmdline_args = []
++ target_cmdline_args = []
++
++ if not is_livemode_enabled():
++ upgrade_cmdline_args.append(KernelCmdlineArg(key='rd.nvmf.discover', value='fc,auto'))
++
++ # The nvme_core.multipath argument is used to disable native multipath for NVMeoF devices.
++ nvme_core_mpath_arg_val = get_current_cmdline_arg_value('nvme_core.multipath')
++
++ # FIXME(pstodulk): handle multi-controller NVMe-PCIe drives WITH multipath used by, e.g., Intel SSD DC P4500.
++ # Essentially, we always append nvme_core.multipath=N to the kernel command line during an 8>9 upgrade. This also
++ # includes basics setups where a simple NVMe drive is attached over PCIe without any multipath capabilities (think
++ # of an ordinary laptops). When the user attempts to later perform a 9>10 upgrade, an inhibitor will be raised with
++ # instructions to remove nvme_core.multipath=N introduced by us during the previous upgrade, which might be
++ # confusing as they might never even heard of multipath. Right now, we just emit a report for the user to remove
++ # nvme_core.multipath=N from the boot entry if multipath is not used. We should improve this behaviour in the
++ # future so that we can precisely target when to introduce the argument.
++
++ if get_source_major_version() == '8':
++ # NOTE: it's expected kind of that for NVMeoF users always use multipath
++
++ # If the system is already booted with nvme_core.multipath=?, do not change it
++ # The value will be copied from the default boot entry.
++ # On the other, on 8>9 we want to always add this as there native multipath was unsupported
++ # on RHEL 8, therefore, we should not need it (hence the value N).
++ if not nvme_core_mpath_arg_val:
++ upgrade_cmdline_args.append(KernelCmdlineArg(key='nvme_core.multipath', value='N'))
++ target_cmdline_args.append(KernelCmdlineArg(key='nvme_core.multipath', value='N'))
++
++ if nvme_core_mpath_arg_val != 'Y':
++ # Print the report only if NVMeoF is detected and
++ _report_system_should_migrate_to_native_multipath()
++ _report_kernel_cmdline_might_be_modified_unnecessarily()
++
++ if get_source_major_version() == '9':
++ # NOTE(pstodulk): Check this always, does not matter whether we detect
++ # NVMeoF or whether just PCIe is used. In any case, we will require user
++ # to fix it.
++ if nvme_core_mpath_arg_val == 'N':
++ _report_native_multipath_required()
++ return
++
++ api.produce(UpgradeKernelCmdlineArgTasks(to_add=upgrade_cmdline_args))
++ api.produce(TargetKernelCmdlineArgTasks(to_add=target_cmdline_args))
++
++
++def register_upgrade_tasks(nvme_device_collection: NVMEDeviceCollection):
++ """
++ Register tasks that should happen during IPU to handle NVMe devices
++ successfully.
++
++ Args:
++ nvme_fc_devices (list): List of NVMe-FC devices
++ """
++ _tasks_copy_files_into_container(nvme_device_collection)
++ _tasks_for_kernel_cmdline(nvme_device_collection)
++
++ api.produce(TargetUserSpacePreupgradeTasks(install_rpms=EARLY_CONTAINER_RPMS))
++ api.produce(UpgradeInitramfsTasks(include_dracut_modules=[DracutModule(name='nvmf')]))
++
++
++def report_missing_configs_for_fabrics_devices(nvme_info: NVMEInfo,
++ nvme_device_collection: NVMEDeviceCollection,
++ max_devices_in_report: int = 3) -> bool:
++ missing_configs = []
++ if not nvme_info.hostid:
++ missing_configs.append('/etc/nvme/hostid')
++ if not nvme_info.hostnqn:
++ missing_configs.append('/etc/nvme/hostnqn')
++
++ # NOTE(pstodulk): hostid and hostnqn are mandatory for NVMe-oF devices.
++ # That means practically FC, RDMA, TCP. Let's inform user the upgrade
++ # is blocked and they must configure the system properly to be able to
++ # upgrade
++ if not nvme_device_collection.fabrics_devices or not missing_configs:
++ return # We either have no fabrics devices or we have both hostid and hostnqn
++
++ files_str = ', '.join(missing_configs) if missing_configs else 'required configuration files'
++
++ device_names = [dev.name for dev in nvme_device_collection.fabrics_devices[:max_devices_in_report]]
++ if len(nvme_device_collection.fabrics_devices) > max_devices_in_report:
++ device_names.append('...')
++ device_list_str = ', '.join(device_names)
++
++ reporting.create_report([
++ reporting.Title('Missing NVMe configuration files required for the upgrade'),
++ reporting.Summary(
++ 'The system has NVMe-oF devices detected ({}), but {} are missing. '
++ 'Both /etc/nvme/hostid and /etc/nvme/hostnqn must be present and configured for NVMe-oF usage. '
++ 'Upgrade cannot continue until these files are provided.'.format(device_list_str, files_str)
++ ),
++ reporting.Severity(reporting.Severity.HIGH),
++ reporting.Groups([reporting.Groups.INHIBITOR, reporting.Groups.FILESYSTEM]),
++ reporting.Remediation(
++ hint='Ensure the files /etc/nvme/hostid and /etc/nvme/hostnqn are present and properly configured.'
++ ),
++ ])
++
++
++def get_devices_present_in_fstab() -> List[str]:
++ storage_info = next(api.consume(StorageInfo), None)
++
++ if not storage_info:
++ raise StopActorExecutionError('Failed to obtain message with information about fstab entries')
++
++ # Call realpath to get the *canonical* path to the device (user might use disk UUIDs, etc. in fstab)
++ return {os.path.realpath(entry.fs_spec) for entry in storage_info.fstab}
++
++
++def check_unhandled_devices_present_in_fstab(nvme_device_collection: NVMEDeviceCollection) -> bool:
++ """Check if any unhandled NVMe devices are present in fstab.
++
++ Args:
++ nvme_device_collection: NVMEDeviceCollection instance
++
++ Returns:
++ True if any unhandled NVMe devices are present in fstab, False otherwise
++ """
++ unhandled_dev_nodes = {os.path.join('/dev', device.name) for device in nvme_device_collection.unhandled_devices}
++ fstab_listed_dev_nodes = set(get_devices_present_in_fstab())
++
++ required_unhandled_dev_nodes = unhandled_dev_nodes.intersection(fstab_listed_dev_nodes)
++ if required_unhandled_dev_nodes:
++ summary = (
++ 'The system has NVMe devices with a transport type that is currently '
++ 'not handled during the upgrade process present in fstab. Problematic devices: {0}'
++ ).format(_format_list(required_unhandled_dev_nodes))
++
++ reporting.create_report([
++ reporting.Title('NVMe devices with unhandled transport type present in fstab'),
++ reporting.Summary(summary),
++ reporting.Severity(reporting.Severity.HIGH),
++ reporting.Groups([reporting.Groups.INHIBITOR, reporting.Groups.FILESYSTEM]),
++ ])
++ return True
++ return False
++
++
++def process():
++ nvmeinfo = next(api.consume(NVMEInfo), None)
++ if not nvmeinfo or not nvmeinfo.devices:
++ return # Nothing to do
++
++ nvme_device_collection = NVMEDeviceCollection()
++ nvme_device_collection.add_devices(nvmeinfo.devices)
++
++ check_unhandled_devices_present_in_fstab(nvme_device_collection)
++ report_missing_configs_for_fabrics_devices(nvmeinfo, nvme_device_collection)
++ register_upgrade_tasks(nvme_device_collection)
+diff --git a/repos/system_upgrade/common/actors/checknvme/tests/test_checknvme.py b/repos/system_upgrade/common/actors/checknvme/tests/test_checknvme.py
+new file mode 100644
+index 00000000..7f18e7b4
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/checknvme/tests/test_checknvme.py
+@@ -0,0 +1,431 @@
++import os
++
++from leapp import reporting
++from leapp.libraries.actor import checknvme
++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked
++from leapp.libraries.stdlib import api
++from leapp.models import (
++ FstabEntry,
++ KernelCmdline,
++ NVMEDevice,
++ NVMEInfo,
++ StorageInfo,
++ TargetKernelCmdlineArgTasks,
++ TargetUserSpacePreupgradeTasks,
++ TargetUserSpaceUpgradeTasks,
++ UpgradeInitramfsTasks,
++ UpgradeKernelCmdlineArgTasks
++)
++from leapp.utils.report import is_inhibitor
++
++
++def _make_storage_info(fstab_entries=None):
++ """Helper to create StorageInfo with fstab entries."""
++ if fstab_entries is None:
++ fstab_entries = []
++ return StorageInfo(fstab=fstab_entries)
++
++
++def test_no_nvme_devices(monkeypatch):
++ """Test when no NVMe devices are present."""
++ msgs = [KernelCmdline(parameters=[])]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(api, 'produce', produce_mocked())
++
++ checknvme.process()
++
++ # No messages should be produced when no NVMe devices are present
++ assert api.produce.called == 0
++
++
++def test_nvme_pcie_devices_only(monkeypatch):
++ """Test with only NVMe PCIe devices (no FC devices)."""
++ nvme_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='pcie'
++ )
++ nvme_info = NVMEInfo(
++ devices=[nvme_device],
++ hostid='test-hostid',
++ hostnqn='test-hostnqn'
++ )
++
++ msgs = [KernelCmdline(parameters=[]), nvme_info, _make_storage_info()]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(api, 'produce', produce_mocked())
++ monkeypatch.setattr(checknvme, '_report_system_should_migrate_to_native_multipath', lambda: None)
++ monkeypatch.setattr(checknvme, '_report_kernel_cmdline_might_be_modified_unnecessarily', lambda: None)
++
++ checknvme.process()
++
++ def _get_produced_msg(msg_type):
++ """Get a single produced message of the given type."""
++ for msg in api.produce.model_instances:
++ # We cannot use isinstance due to problems with inheritance
++ if type(msg) is msg_type: # pylint: disable=unidiomatic-typecheck
++ return msg
++ return None
++
++ # Check TargetUserSpaceUpgradeTasks - no copy_files for PCIe-only
++ userspace_tasks = _get_produced_msg(TargetUserSpaceUpgradeTasks)
++ assert userspace_tasks.copy_files == []
++ assert set(userspace_tasks.install_rpms) == {'iproute', 'jq', 'nvme-cli', 'sed'}
++
++ # Check TargetUserSpacePreupgradeTasks
++ preupgrade_tasks = _get_produced_msg(TargetUserSpacePreupgradeTasks)
++ assert set(preupgrade_tasks.install_rpms) == {'dracut', 'dracut-network'}
++
++ # Check UpgradeInitramfsTasks
++ initramfs_tasks = _get_produced_msg(UpgradeInitramfsTasks)
++ assert len(initramfs_tasks.include_dracut_modules) == 1
++ assert initramfs_tasks.include_dracut_modules[0].name == 'nvmf'
++
++ # Check UpgradeKernelCmdlineArgTasks
++ upgrade_cmdline_tasks = _get_produced_msg(UpgradeKernelCmdlineArgTasks)
++ upgrade_cmdline_args = {(arg.key, arg.value) for arg in upgrade_cmdline_tasks.to_add}
++ assert ('rd.nvmf.discover', 'fc,auto') in upgrade_cmdline_args
++
++ # Check TargetKernelCmdlineArgTasks
++ target_cmdline_tasks = _get_produced_msg(TargetKernelCmdlineArgTasks)
++ # For PCIe-only, no nvme_core.multipath arg is added (no fabrics devices)
++ target_cmdline_args = {(arg.key, arg.value) for arg in target_cmdline_tasks.to_add}
++ assert target_cmdline_args == set() or ('nvme_core.multipath', 'N') in target_cmdline_args
++
++
++def test_nvme_fc_devices_present(monkeypatch):
++ """Test with NVMe-FC devices present."""
++ nvme_fc_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='fc'
++ )
++ nvme_info = NVMEInfo(
++ devices=[nvme_fc_device],
++ hostid='test-hostid',
++ hostnqn='test-hostnqn'
++ )
++
++ msgs = [KernelCmdline(parameters=[]), nvme_info, _make_storage_info()]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(api, 'produce', produce_mocked())
++ monkeypatch.setattr(checknvme, '_report_system_should_migrate_to_native_multipath', lambda: None)
++ monkeypatch.setattr(checknvme, '_report_kernel_cmdline_might_be_modified_unnecessarily', lambda: None)
++
++ checknvme.process()
++
++ assert api.produce.called == 5
++
++ produced_msgs = api.produce.model_instances
++ assert any(isinstance(msg, TargetUserSpacePreupgradeTasks) for msg in produced_msgs)
++ assert any(isinstance(msg, TargetUserSpaceUpgradeTasks) for msg in produced_msgs)
++ assert any(isinstance(msg, UpgradeInitramfsTasks) for msg in produced_msgs)
++
++ # Check that UpgradeKernelCmdlineArgTasks was produced with correct argument
++ kernel_cmdline_msgs = [msg for msg in produced_msgs if isinstance(msg, UpgradeKernelCmdlineArgTasks)]
++ assert len(kernel_cmdline_msgs) == 1
++
++ cmdline_args = {(c_arg.key, c_arg.value) for c_arg in kernel_cmdline_msgs[0].to_add}
++ expected_cmdline_args = {
++ ('rd.nvmf.discover', 'fc,auto'),
++ ('nvme_core.multipath', 'N')
++ }
++ assert expected_cmdline_args == cmdline_args
++
++
++def test_mixed_nvme_devices(monkeypatch):
++ """Test with mixed NVMe devices (PCIe and FC)."""
++ nvme_pcie_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='pcie'
++ )
++ nvme_fc_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme1',
++ name='nvme1',
++ transport='fc'
++ )
++ nvme_info = NVMEInfo(
++ devices=[nvme_pcie_device, nvme_fc_device],
++ hostid='test-hostid',
++ hostnqn='test-hostnqn'
++ )
++
++ msgs = [KernelCmdline(parameters=[]), nvme_info, _make_storage_info()]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(api, 'produce', produce_mocked())
++ monkeypatch.setattr(checknvme, '_report_system_should_migrate_to_native_multipath', lambda: None)
++ monkeypatch.setattr(checknvme, '_report_kernel_cmdline_might_be_modified_unnecessarily', lambda: None)
++
++ checknvme.process()
++
++ assert api.produce.called == 5
++
++ produced_msgs = api.produce.model_instances
++
++ # Check that UpgradeKernelCmdlineArgTasks was produced
++ kernel_cmdline_msgs = [msg for msg in produced_msgs if isinstance(msg, UpgradeKernelCmdlineArgTasks)]
++ assert len(kernel_cmdline_msgs) == 1
++
++ cmdline_args = {(c_arg.key, c_arg.value) for c_arg in kernel_cmdline_msgs[0].to_add}
++ expected_cmdline_args = {
++ ('rd.nvmf.discover', 'fc,auto'),
++ ('nvme_core.multipath', 'N')
++ }
++ assert expected_cmdline_args == cmdline_args
++
++
++def test_multiple_nvme_fc_devices(monkeypatch):
++ """Test with multiple NVMe-FC devices."""
++ nvme_fc_device1 = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='fc'
++ )
++ nvme_fc_device2 = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme1',
++ name='nvme1',
++ transport='fc'
++ )
++ nvme_info = NVMEInfo(
++ devices=[nvme_fc_device1, nvme_fc_device2],
++ hostid='test-hostid',
++ hostnqn='test-hostnqn'
++ )
++
++ msgs = [KernelCmdline(parameters=[]), nvme_info, _make_storage_info()]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(api, 'produce', produce_mocked())
++ monkeypatch.setattr(checknvme, '_report_system_should_migrate_to_native_multipath', lambda: None)
++ monkeypatch.setattr(checknvme, '_report_kernel_cmdline_might_be_modified_unnecessarily', lambda: None)
++
++ checknvme.process()
++
++ # Should still produce only one UpgradeKernelCmdlineArgTasks message
++ kernel_cmdline_msgs = [msg for msg in api.produce.model_instances
++ if isinstance(msg, UpgradeKernelCmdlineArgTasks)]
++ assert len(kernel_cmdline_msgs) == 1
++
++ # Should still have only two kernel arguments
++ assert len(kernel_cmdline_msgs[0].to_add) == 2
++
++
++def test_nvme_missing_hostid_hostnqn_creates_inhibitor(monkeypatch):
++ """Test that missing hostid/hostnqn creates an inhibitor report for NVMe-oF devices."""
++ nvme_fc_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='fc'
++ )
++ # Missing hostid and hostnqn
++ nvme_info = NVMEInfo(
++ devices=[nvme_fc_device],
++ hostid=None,
++ hostnqn=None
++ )
++
++ msgs = [KernelCmdline(parameters=[]), nvme_info, _make_storage_info()]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(api, 'produce', produce_mocked())
++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
++ monkeypatch.setattr(checknvme, '_report_system_should_migrate_to_native_multipath', lambda: None)
++ monkeypatch.setattr(checknvme, '_report_kernel_cmdline_might_be_modified_unnecessarily', lambda: None)
++
++ checknvme.process()
++
++ # Should create an inhibitor report for missing configs
++ assert reporting.create_report.called == 1
++ assert is_inhibitor(reporting.create_report.report_fields)
++
++
++def test_nvme_device_collection_categorization():
++ """Test NVMEDeviceCollection categorizes devices correctly."""
++ nvme_pcie_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='pcie'
++ )
++ nvme_fc_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme1',
++ name='nvme1',
++ transport='fc'
++ )
++ nvme_tcp_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme2',
++ name='nvme2',
++ transport='tcp'
++ )
++
++ collection = checknvme.NVMEDeviceCollection()
++ collection.add_devices([nvme_pcie_device, nvme_fc_device, nvme_tcp_device])
++
++ assert nvme_pcie_device in collection.get_devices_by_transport('pcie')
++ assert nvme_fc_device in collection.get_devices_by_transport('fc')
++ assert nvme_tcp_device in collection.get_devices_by_transport('tcp')
++
++ # FC and TCP are fabrics devices
++ assert nvme_fc_device in collection.fabrics_devices
++ assert nvme_tcp_device in collection.fabrics_devices
++ assert nvme_pcie_device not in collection.fabrics_devices
++
++ # TCP is unhandled (not in SAFE_TRANSPORT_TYPES)
++ assert nvme_tcp_device in collection.unhandled_devices
++ assert nvme_pcie_device not in collection.unhandled_devices
++ assert nvme_fc_device not in collection.unhandled_devices
++
++
++def test_register_upgrade_tasks_without_fabrics_devices(monkeypatch):
++ """Test register_upgrade_tasks without fabrics devices."""
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
++ monkeypatch.setattr(api, 'produce', produce_mocked())
++
++ kernel_cmdline_tasks = KernelCmdline(parameters=[])
++ msgs = [kernel_cmdline_tasks]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(api, 'produce', produce_mocked())
++ monkeypatch.setattr(checknvme, '_report_system_should_migrate_to_native_multipath', lambda: None)
++ monkeypatch.setattr(checknvme, '_report_kernel_cmdline_might_be_modified_unnecessarily', lambda: None)
++
++ nvme_pcie_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='pcie'
++ )
++ collection = checknvme.NVMEDeviceCollection()
++ collection.add_device(nvme_pcie_device)
++
++ checknvme.register_upgrade_tasks(collection)
++
++ produced_msgs = api.produce.model_instances
++ expected_msg_types = {
++ TargetUserSpaceUpgradeTasks,
++ TargetUserSpacePreupgradeTasks,
++ UpgradeInitramfsTasks,
++ UpgradeKernelCmdlineArgTasks,
++ TargetKernelCmdlineArgTasks,
++ }
++ assert set(type(msg) for msg in produced_msgs) == expected_msg_types
++
++
++def test_register_upgrade_tasks_with_fabrics_devices(monkeypatch):
++ """Test register_upgrade_tasks with fabrics devices."""
++ nvme_fc_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='fc'
++ )
++ collection = checknvme.NVMEDeviceCollection()
++ collection.add_device(nvme_fc_device)
++
++ msgs = [KernelCmdline(parameters=[])]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(api, 'produce', produce_mocked())
++ monkeypatch.setattr(checknvme, '_report_system_should_migrate_to_native_multipath', lambda: None)
++ monkeypatch.setattr(checknvme, '_report_kernel_cmdline_might_be_modified_unnecessarily', lambda: None)
++
++ checknvme.register_upgrade_tasks(collection)
++
++ produced_msgs = api.produce.model_instances
++ expected_msg_types = {
++ TargetUserSpaceUpgradeTasks,
++ TargetUserSpacePreupgradeTasks,
++ UpgradeInitramfsTasks,
++ UpgradeKernelCmdlineArgTasks,
++ TargetKernelCmdlineArgTasks,
++ }
++ assert set(type(msg) for msg in produced_msgs) == expected_msg_types
++
++ kernel_cmdline_msgs = [msg for msg in produced_msgs if isinstance(msg, UpgradeKernelCmdlineArgTasks)]
++ assert len(kernel_cmdline_msgs) == 1
++
++ cmdline_args = {(c_arg.key, c_arg.value) for c_arg in kernel_cmdline_msgs[0].to_add}
++ expected_cmdline_args = {
++ ('rd.nvmf.discover', 'fc,auto'),
++ ('nvme_core.multipath', 'N')
++ }
++ assert expected_cmdline_args == cmdline_args
++
++
++def test_check_unhandled_devices_not_in_fstab(monkeypatch):
++ """Test that no inhibitor is created when unhandled devices are not in fstab."""
++ nvme_tcp_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='tcp' # tcp is unhandled
++ )
++ collection = checknvme.NVMEDeviceCollection()
++ collection.add_device(nvme_tcp_device)
++
++ # fstab contains a different device
++ fstab_entries = [
++ FstabEntry(fs_spec='/dev/sda1', fs_file='/', fs_vfstype='ext4',
++ fs_mntops='defaults', fs_freq='1', fs_passno='1')
++ ]
++ storage_info = _make_storage_info(fstab_entries)
++
++ msgs = [storage_info]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
++ monkeypatch.setattr('os.path.realpath', lambda path: path)
++
++ result = checknvme.check_unhandled_devices_present_in_fstab(collection)
++
++ assert result is False
++ assert reporting.create_report.called == 0
++
++
++def test_check_unhandled_devices_in_fstab_creates_inhibitor(monkeypatch):
++ """Test that an inhibitor is created when unhandled devices are in fstab."""
++ nvme_tcp_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='tcp' # tcp is unhandled
++ )
++ collection = checknvme.NVMEDeviceCollection()
++ collection.add_device(nvme_tcp_device)
++
++ # fstab contains the unhandled device
++ fstab_entries = [
++ FstabEntry(fs_spec='/dev/nvme0', fs_file='/', fs_vfstype='ext4',
++ fs_mntops='defaults', fs_freq='1', fs_passno='1')
++ ]
++ storage_info = _make_storage_info(fstab_entries)
++
++ msgs = [storage_info]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
++ monkeypatch.setattr(os.path, 'realpath', lambda path: path)
++
++ result = checknvme.check_unhandled_devices_present_in_fstab(collection)
++
++ assert result is True
++ assert reporting.create_report.called == 1
++ assert is_inhibitor(reporting.create_report.report_fields)
++
++
++def test_check_unhandled_devices_handled_device_in_fstab_no_inhibitor(monkeypatch):
++ """Test that no inhibitor is created when only handled devices are in fstab."""
++ nvme_pcie_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='pcie' # pcie is handled
++ )
++ collection = checknvme.NVMEDeviceCollection()
++ collection.add_device(nvme_pcie_device)
++
++ # fstab contains the handled device
++ fstab_entries = [
++ FstabEntry(fs_spec='/dev/nvme0n1p1', fs_file='/', fs_vfstype='ext4',
++ fs_mntops='defaults', fs_freq='1', fs_passno='1')
++ ]
++ storage_info = _make_storage_info(fstab_entries)
++
++ msgs = [storage_info]
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs))
++ monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
++ monkeypatch.setattr('os.path.realpath', lambda path: path)
++
++ result = checknvme.check_unhandled_devices_present_in_fstab(collection)
++
++ assert result is False
++ assert reporting.create_report.called == 0
+diff --git a/repos/system_upgrade/common/actors/checkpersistentmounts/libraries/checkpersistentmounts.py b/repos/system_upgrade/common/actors/checkpersistentmounts/libraries/checkpersistentmounts.py
+index 2a35f4c5..79b431bb 100644
+--- a/repos/system_upgrade/common/actors/checkpersistentmounts/libraries/checkpersistentmounts.py
++++ b/repos/system_upgrade/common/actors/checkpersistentmounts/libraries/checkpersistentmounts.py
+@@ -31,7 +31,7 @@ def check_mount_is_persistent(storage_info, mountpoint):
+ """Check if mountpoint is mounted in persistent fashion"""
+
+ mount_entry_exists = any(me.mount == mountpoint for me in storage_info.mount)
+- fstab_entry_exists = any(fe.fs_file == mountpoint for fe in storage_info.fstab)
++ fstab_entry_exists = any(fe.fs_file.rstrip('/') == mountpoint for fe in storage_info.fstab)
+
+ if mount_entry_exists and not fstab_entry_exists:
+ inhibit_upgrade_due_non_persistent_mount(mountpoint)
+diff --git a/repos/system_upgrade/common/actors/checkpersistentmounts/tests/test_checkpersistentmounts.py b/repos/system_upgrade/common/actors/checkpersistentmounts/tests/test_checkpersistentmounts.py
+index fd6b3da3..14ce4e97 100644
+--- a/repos/system_upgrade/common/actors/checkpersistentmounts/tests/test_checkpersistentmounts.py
++++ b/repos/system_upgrade/common/actors/checkpersistentmounts/tests/test_checkpersistentmounts.py
+@@ -11,6 +11,9 @@ MOUNT_ENTRY = MountEntry(name='/dev/sdaX', tp='ext4', mount='/var/lib/leapp', op
+ FSTAB_ENTRY = FstabEntry(fs_spec='', fs_file='/var/lib/leapp', fs_vfstype='',
+ fs_mntops='defaults', fs_freq='0', fs_passno='0')
+
++FSTAB_ENTRY_TRAIL_SLASH = FstabEntry(fs_spec='', fs_file='/var/lib/leapp/', fs_vfstype='',
++ fs_mntops='defaults', fs_freq='0', fs_passno='0')
++
+
+ @pytest.mark.parametrize(
+ ('storage_info', 'should_inhibit'),
+@@ -27,6 +30,10 @@ FSTAB_ENTRY = FstabEntry(fs_spec='', fs_file='/var/lib/leapp', fs_vfstype='',
+ StorageInfo(mount=[MOUNT_ENTRY], fstab=[FSTAB_ENTRY]),
+ False
+ ),
++ (
++ StorageInfo(mount=[MOUNT_ENTRY], fstab=[FSTAB_ENTRY_TRAIL_SLASH]),
++ False
++ ),
+ ]
+ )
+ def test_var_lib_leapp_non_persistent_is_detected(monkeypatch, storage_info, should_inhibit):
diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
-index 56a94b5d..46c5d9b6 100755
+index 56a94b5d..758e1dfa 100755
--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/do-upgrade.sh
+@@ -282,7 +282,7 @@ do_upgrade() {
+ local dirname
+ dirname="$("$NEWROOT/bin/dirname" "$NEWROOT$LEAPP_FAILED_FLAG_FILE")"
+ [ -d "$dirname" ] || mkdir "$dirname"
+-
++
+ echo >&2 "Creating file $NEWROOT$LEAPP_FAILED_FLAG_FILE"
+ echo >&2 "Warning: Leapp upgrade failed and there is an issue blocking the upgrade."
+ echo >&2 "Please file a support case with /var/log/leapp/leapp-upgrade.log attached"
@@ -390,4 +390,3 @@ getarg 'rd.break=leapp-logs' 'rd.upgrade.break=leapp-finish' && {
sync
mount -o "remount,$old_opts" "$NEWROOT"
exit $result
-
+diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
+index 45f98148..3f656d63 100755
+--- a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/module-setup.sh
+@@ -104,6 +104,12 @@ install() {
+ # script to actually run the upgrader binary
+ inst_hook upgrade 50 "$_moddir/do-upgrade.sh"
+
++ # The initqueue checkscript to ensure all requested devices are mounted.
++ # The initqueue is usually left when rootfs (eventually /usr) is mounted
++ # but we require in this case whole fstab mounted under /sysroot. Without
++ # the script, the initqueue is left too early.
++ inst_hook initqueue/finished 99 "$moddir/upgrade-mount-wait-check.sh"
++
+ #NOTE: some clean up?.. ideally, everything should be inside the leapp*
+ #NOTE: current *.service is changed so in case we would like to use the
+ # hook, we will have to modify it back
+diff --git a/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/upgrade-mount-wait-check.sh b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/upgrade-mount-wait-check.sh
+new file mode 100755
+index 00000000..02097b01
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/commonleappdracutmodules/files/dracut/85sys-upgrade-redhat/upgrade-mount-wait-check.sh
+@@ -0,0 +1,43 @@
++#!/bin/bash
++
++# shellcheck disable=SC1091 # The file must be always present to boot the system
++type getarg >/dev/null 2>&1 || . /lib/dracut-lib.sh
++
++log_debug() {
++ # TODO(pstodulk): The arg is probably not needed
++ getarg 'rd.upgrade.debug' && echo >&2 "Upgrade Initqueue Debug: $1"
++}
++
++
++check_reqs_in_dir() {
++ log_debug "Check resources from: $1"
++ result=0
++ # shellcheck disable=SC2045 # Iterating over ls should be fine (there should be no whitespaces)
++ for fname in $(ls -1 "$1"); do
++ # We grep for What=/dev explicitly to exclude bind mounting units
++ resource_path=$(grep "^What=/dev/" "$1/$fname" | cut -d "=" -f2-)
++ if [ -z "$resource_path" ]; then
++ # Grep found no match, meaning that the unit is mounting something different than a block device
++ continue
++ fi
++
++ grep -E "^Options=.*bind.*" "$1/$fname" &>/dev/null
++ is_bindmount=$?
++ if [ $is_bindmount -eq 0 ]; then
++ # The unit contains Options=...,bind,..., or Options=...,rbind,... so it is a bind mount -> skip
++ continue
++ fi
++
++ if [ ! -e "$resource_path" ]; then
++ log_debug "Waiting for missing resource: '$resource_path'"
++ result=1
++ fi
++ done
++
++ return $result
++}
++
++SYSTEMD_DIR="/usr/lib/systemd/system"
++LOCAL_FS_MOUNT_DIR="$SYSTEMD_DIR/local-fs.target.requires"
++
++check_reqs_in_dir "$LOCAL_FS_MOUNT_DIR"
+diff --git a/repos/system_upgrade/common/actors/convert/securebootinhibit/actor.py b/repos/system_upgrade/common/actors/convert/securebootinhibit/actor.py
+new file mode 100644
+index 00000000..53f41e71
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/convert/securebootinhibit/actor.py
+@@ -0,0 +1,19 @@
++from leapp.actors import Actor
++from leapp.libraries.actor import securebootinhibit
++from leapp.models import FirmwareFacts
++from leapp.reporting import Report
++from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
++
++
++class SecureBootInhibit(Actor):
++ """
++ Inhibit the conversion if SecureBoot is enabled.
++ """
++
++ name = 'secure_boot_inhibit'
++ consumes = (FirmwareFacts,)
++ produces = (Report,)
++ tags = (IPUWorkflowTag, ChecksPhaseTag)
++
++ def process(self):
++ securebootinhibit.process()
+diff --git a/repos/system_upgrade/common/actors/convert/securebootinhibit/libraries/securebootinhibit.py b/repos/system_upgrade/common/actors/convert/securebootinhibit/libraries/securebootinhibit.py
+new file mode 100644
+index 00000000..5edb9fa2
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/convert/securebootinhibit/libraries/securebootinhibit.py
+@@ -0,0 +1,42 @@
++from leapp import reporting
++from leapp.exceptions import StopActorExecutionError
++from leapp.libraries.common.config import is_conversion
++from leapp.libraries.stdlib import api
++from leapp.models import FirmwareFacts
++
++
++def process():
++ if not is_conversion():
++ return
++
++ ff = next(api.consume(FirmwareFacts), None)
++ if not ff:
++ raise StopActorExecutionError(
++ "Could not identify system firmware",
++ details={"details": "Actor did not receive FirmwareFacts message."},
++ )
++
++ if ff.firmware == "efi" and ff.secureboot_enabled:
++ report = [
++ reporting.Title(
++ "Detected enabled Secure Boot when trying to convert the system"
++ ),
++ reporting.Summary(
++ "Conversion to a different Linux distribution is not possible"
++ " when the Secure Boot is enabled. Artifacts of the target"
++ " Linux distribution are signed by keys that are not accepted"
++ " by the source Linux distribution."
++ ),
++ reporting.Severity(reporting.Severity.HIGH),
++ reporting.Groups([reporting.Groups.INHIBITOR, reporting.Groups.BOOT]),
++ # TODO some link
++ reporting.Remediation(
++ hint="Disable Secure Boot to be able to convert the system to"
++ " a different Linux distribution. Then re-enable Secure Boot"
++ " again after the upgrade process is finished successfully."
++ " Check instructions for your current OS, or hypervisor in"
++ " case of virtual machines, for more information how to"
++ " disable Secure Boot."
++ ),
++ ]
++ reporting.create_report(report)
+diff --git a/repos/system_upgrade/common/actors/convert/securebootinhibit/tests/test_securebootinhibit.py b/repos/system_upgrade/common/actors/convert/securebootinhibit/tests/test_securebootinhibit.py
+new file mode 100644
+index 00000000..340e6b16
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/convert/securebootinhibit/tests/test_securebootinhibit.py
+@@ -0,0 +1,58 @@
++import pytest
++
++from leapp import reporting
++from leapp.libraries.actor import securebootinhibit
++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
++from leapp.libraries.stdlib import api
++from leapp.models import FirmwareFacts
++
++
++@pytest.mark.parametrize(
++ 'ff,is_conversion,should_inhibit', [
++ # conversion, secureboot enabled = inhibit
++ (
++ FirmwareFacts(firmware='efi', ppc64le_opal=None, secureboot_enabled=True),
++ True,
++ True
++ ),
++ (
++ FirmwareFacts(firmware='efi', ppc64le_opal=None, secureboot_enabled=True),
++ False,
++ False
++ ),
++ # bios is ok
++ (
++ FirmwareFacts(firmware='bios', ppc64le_opal=None, secureboot_enabled=False),
++ False,
++ False
++ ),
++ # bios is ok during conversion too
++ (
++ FirmwareFacts(firmware='bios', ppc64le_opal=None, secureboot_enabled=False),
++ True,
++ False
++ ),
++ (
++ FirmwareFacts(firmware='efi', ppc64le_opal=None, secureboot_enabled=False),
++ True,
++ False
++ ),
++ (
++ FirmwareFacts(firmware='efi', ppc64le_opal=None, secureboot_enabled=False),
++ False,
++ False
++ ),
++ ]
++)
++def test_process(monkeypatch, ff, is_conversion, should_inhibit):
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[ff]))
++ monkeypatch.setattr(reporting, "create_report", create_report_mocked())
++ monkeypatch.setattr(securebootinhibit, "is_conversion", lambda: is_conversion)
++
++ securebootinhibit.process()
++
++ if should_inhibit:
++ assert reporting.create_report.called == 1
++ assert reporting.Groups.INHIBITOR in reporting.create_report.report_fields['groups']
++ else:
++ assert not reporting.create_report.called
+diff --git a/repos/system_upgrade/common/actors/convert/updateefi/actor.py b/repos/system_upgrade/common/actors/convert/updateefi/actor.py
+new file mode 100644
+index 00000000..4c97ebd7
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/convert/updateefi/actor.py
+@@ -0,0 +1,25 @@
++from leapp.actors import Actor
++from leapp.libraries.actor import updateefi
++from leapp.models import FirmwareFacts
++from leapp.reporting import Report
++from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag
++
++
++class UpdateEfiEntry(Actor):
++ """
++ Update EFI directory and entry during conversion.
++
++ During conversion, removes leftover source distro EFI directory on the ESP
++ (EFI System Partition) and it's EFI boot entry. It also adds a new boot
++ entry for the target distro.
++
++ This actor does nothing when not converting.
++ """
++
++ name = "update_efi"
++ consumes = (FirmwareFacts,)
++ produces = (Report,)
++ tags = (ApplicationsPhaseTag, IPUWorkflowTag)
++
++ def process(self):
++ updateefi.process()
+diff --git a/repos/system_upgrade/common/actors/convert/updateefi/libraries/updateefi.py b/repos/system_upgrade/common/actors/convert/updateefi/libraries/updateefi.py
+new file mode 100644
+index 00000000..1f300125
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/convert/updateefi/libraries/updateefi.py
+@@ -0,0 +1,230 @@
++import errno
++import os
++
++from leapp import reporting
++from leapp.exceptions import StopActorExecutionError
++from leapp.libraries.common import efi
++from leapp.libraries.common.config import architecture, get_source_distro_id, get_target_distro_id, is_conversion
++from leapp.libraries.common.distro import distro_id_to_pretty_name, get_distro_efidir_canon_path
++from leapp.libraries.stdlib import api
++
++
++def _get_target_efi_bin_path():
++ # Sorted by priority.
++ # NOTE: The shim-x64 package providing the shimx64.efi binary can be removed when
++ # not using secure boot, grubx64.efi should always be present (provided by
++ # grub-efi-x64).
++ # WARN: However it is expected to have the shim installed on the system to comply
++ # with the official guidelines.
++ #
++ # TODO: There are usually 2 more shim* files which appear unused on a fresh system:
++ # - shim.efi - seems like it's the same as shimx64.efi
++ # - shim64-.efi - ???
++ # What about them?
++ efibins_by_arch = {
++ architecture.ARCH_X86_64: ("shimx64.efi", "grubx64.efi"),
++ architecture.ARCH_ARM64: ("shimaa64.efi", "grubaa64.efi"),
++ }
++
++ arch = api.current_actor().configuration.architecture
++ for filename in efibins_by_arch[arch]:
++ efi_dir = get_distro_efidir_canon_path(get_target_distro_id())
++ canon_path = os.path.join(efi_dir, filename)
++ if os.path.exists(canon_path):
++ return efi.canonical_path_to_efi_format(canon_path)
++
++ return None
++
++
++def _add_boot_entry_for_target(efibootinfo):
++ """
++ Create a new UEFI bootloader entry for the target system.
++
++ Return the newly created bootloader entry.
++ """
++ efi_bin_path = _get_target_efi_bin_path()
++ if not efi_bin_path:
++ # this is a fatal error as at least one of the possible EFI binaries
++ # should be present
++ raise efi.EFIError("Unable to detect any UEFI binary file.")
++
++ label = distro_id_to_pretty_name(get_target_distro_id())
++
++ existing_entry = efi.get_boot_entry(efibootinfo, label, efi_bin_path)
++ if existing_entry:
++ api.current_logger().debug(
++ "The '{}' UEFI bootloader entry is already present.".format(label)
++ )
++ return existing_entry
++
++ return efi.add_boot_entry(label, efi_bin_path)
++
++
++def _remove_boot_entry_for_source(efibootinfo):
++ efibootinfo_fresh = efi.EFIBootInfo()
++ source_entry = efibootinfo_fresh.entries.get(efibootinfo.current_bootnum, None)
++
++ if not source_entry:
++ api.current_logger().debug(
++ "The currently booted source distro EFI boot entry has been already"
++ " removed since the target entry has been added, skipping removal."
++ )
++ return
++
++ original_source_entry = efibootinfo.entries[source_entry.boot_number]
++
++ if source_entry != original_source_entry:
++ api.current_logger().debug(
++ "The boot entry with current bootnum has changed since the target"
++ " distro entry has been added, skipping removal."
++ )
++ return
++
++ efi.remove_boot_entry(source_entry.boot_number)
++
++
++def _try_remove_source_efi_dir():
++ """
++ Try to remove the source distro EFI directory
++
++ The directory is not reported if it's not empty to preserve potential
++ custom files. In such a case a post upgrade report is produced informing
++ user to handle the leftover files.
++ """
++ efi_dir_source = get_distro_efidir_canon_path(get_source_distro_id())
++ if not os.path.exists(efi_dir_source):
++ api.current_logger().debug(
++ "Source distro EFI directory at {} does not exist, skipping removal.".format(efi_dir_source)
++ )
++ return
++
++ target_efi_dir = get_distro_efidir_canon_path(get_target_distro_id())
++ if efi_dir_source == target_efi_dir:
++ api.current_logger().debug(
++ "Source and target distros use the same '{}' EFI directory.".format(efi_dir_source)
++ )
++ return
++
++ try:
++ os.rmdir(efi_dir_source)
++ api.current_logger().debug(
++ "Deleted source system EFI directory at {}".format(efi_dir_source)
++ )
++ except FileNotFoundError:
++ api.current_logger().debug(
++ "Couldn't remove the source system EFI directory at {}: the directory no longer exists".format(
++ efi_dir_source
++ )
++ )
++ except OSError as e:
++ if e.errno == errno.ENOTEMPTY:
++ api.current_logger().debug(
++ "Didn't remove the source EFI directory {}, it does not exist".format(
++ efi_dir_source
++ )
++ )
++ summary = (
++ "During the upgrade, the EFI binaries and grub configuration files"
++ f" were migrated from the source OS EFI directory {efi_dir_source}"
++ f" to the target OS EFI directory {target_efi_dir}."
++ f" Leftover files were detected in {target_efi_dir}, review them"
++ " and migrate them manually."
++ )
++ reporting.create_report([
++ reporting.Title("Review leftover files in the source OS EFI directory"),
++ reporting.Summary(summary),
++ reporting.Groups([
++ reporting.Groups.BOOT,
++ reporting.Groups.POST,
++ ]),
++ reporting.Severity(reporting.Severity.LOW),
++ ])
++ else:
++ api.current_logger().error(
++ "Failed to remove the source system EFI directory at {}: {}".format(
++ efi_dir_source, e
++ )
++ )
++ summary = (
++ f"Removal of the source system EFI directory at {efi_dir_source} failed."
++ " Remove the directory manually if present."
++ )
++ reporting.create_report([
++ reporting.Title("Failed to remove source system EFI directory"),
++ reporting.Summary(summary),
++ reporting.Groups([
++ reporting.Groups.BOOT,
++ reporting.Groups.FAILURE,
++ reporting.Groups.POST,
++ ]),
++ reporting.Severity(reporting.Severity.LOW),
++ ])
++
++
++def _replace_boot_entries():
++ try:
++ efibootinfo = efi.EFIBootInfo()
++ target_entry = _add_boot_entry_for_target(efibootinfo)
++ # NOTE: this isn't strictly necessary as UEFI should set the next entry
++ # to be the first in the BootOrder. This is a workaround to make sure
++ # the "efi_finalization_fix" actor doesn't attempt to set BootNext to
++ # the original entry which will be deleted below.
++ efi.set_bootnext(target_entry.boot_number)
++ except efi.EFIError as e:
++ raise StopActorExecutionError(
++ "Failed to add UEFI boot entry for the target system",
++ details={"details": str(e)},
++ )
++
++ # NOTE: Some UEFI implementations, such as OVMF used in qemu, automatically
++ # add entries for EFI directories. Though the entry is named after the EFI
++ # directory (so "redhat" on RHEL). However if the UEFI doesn't add an entry
++ # after we fail to do so, it might render the OS "unbootable".
++ # Let's keep the source entry and directory if we can't add the target entry as a
++ # backup.
++
++ _try_remove_source_efi_dir()
++
++ try:
++ # doesn't matter if the removal of source EFI dir failed, we don't want
++ # the source entry, we have the new one for target
++ _remove_boot_entry_for_source(efibootinfo)
++ except efi.EFIError as e:
++ api.current_logger().error("Failed to remove source distro EFI boot entry: {}".format(e))
++
++ # This is low severity, some UEFIs will automatically remove an entry
++ # whose EFI binary no longer exists at least OVMF, used by qemu, does.
++ summary = (
++ "Removal of the source system UEFI boot entry failed."
++ " Check UEFI boot entries and manually remove it if it's still present."
++ )
++ reporting.create_report(
++ [
++ reporting.Title("Failed to remove source system EFI boot entry"),
++ reporting.Summary(summary),
++ reporting.Groups(
++ [
++ reporting.Groups.BOOT,
++ reporting.Groups.FAILURE,
++ reporting.Groups.POST,
++ ]
++ ),
++ reporting.Severity(reporting.Severity.LOW),
++ ]
++ )
++
++
++def process():
++ if not is_conversion():
++ return
++
++ if not architecture.matches_architecture(architecture.ARCH_X86_64, architecture.ARCH_ARM64):
++ return
++
++ if not efi.is_efi():
++ return
++
++ # NOTE no need to check whether we have the efibootmgr binary, the
++ # efi_check_boot actor does
++
++ _replace_boot_entries()
+diff --git a/repos/system_upgrade/common/actors/convert/updateefi/tests/test_updateefi.py b/repos/system_upgrade/common/actors/convert/updateefi/tests/test_updateefi.py
+new file mode 100644
+index 00000000..0ad31cc5
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/convert/updateefi/tests/test_updateefi.py
+@@ -0,0 +1,469 @@
++import copy
++import errno
++import os
++import types
++from unittest import mock
++
++import pytest
++
++from leapp.exceptions import StopActorExecutionError
++from leapp.libraries.actor import updateefi
++from leapp.libraries.common import efi
++from leapp.libraries.common.config import architecture
++from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, logger_mocked
++from leapp.libraries.stdlib import api
++
++
++@pytest.fixture
++def mock_logger():
++ with mock.patch(
++ "leapp.libraries.stdlib.api.current_logger", new_callable=logger_mocked
++ ) as mock_logger:
++ yield mock_logger
++
++
++@pytest.fixture
++def mock_create_report():
++ with mock.patch(
++ "leapp.reporting.create_report", new_callable=create_report_mocked
++ ) as mock_create_report:
++ yield mock_create_report
++
++
++@pytest.mark.parametrize(
++ "arch, exist, expect",
++ [
++ (architecture.ARCH_X86_64, ["shimx64.efi", "grubx64.efi"], r"\EFI\redhat\shimx64.efi"),
++ (architecture.ARCH_X86_64, ["shimx64.efi"], r"\EFI\redhat\shimx64.efi"),
++ (architecture.ARCH_X86_64, ["grubx64.efi"], r"\EFI\redhat\grubx64.efi"),
++ (architecture.ARCH_X86_64, [], None),
++
++ (architecture.ARCH_ARM64, ["shimaa64.efi", "grubaa64.efi"], r"\EFI\redhat\shimaa64.efi"),
++ (architecture.ARCH_ARM64, ["shimaa64.efi"], r"\EFI\redhat\shimaa64.efi"),
++ (architecture.ARCH_ARM64, ["grubaa64.efi"], r"\EFI\redhat\grubaa64.efi"),
++ (architecture.ARCH_ARM64, [], None),
++ ]
++)
++def test__get_target_efi_bin_path(monkeypatch, arch, exist, expect):
++ # distro is not important, just make it look like conversion
++ curr_actor = CurrentActorMocked(arch=arch, src_distro="centos", dst_distro="rhel")
++ monkeypatch.setattr(api, "current_actor", curr_actor)
++
++ def mock_exists(path):
++ efidir = "/boot/efi/EFI/redhat"
++ return path in [os.path.join(efidir, p) for p in exist]
++
++ monkeypatch.setattr(os.path, "exists", mock_exists)
++
++ actual = updateefi._get_target_efi_bin_path()
++ assert actual == expect
++
++
++TEST_ADD_ENTRY_INPUTS = [
++ ("Red Hat Enterprise Linux", r"\EFI\redhat\shimx64.efi"),
++ ("Red Hat Enterprise Linux", r"\EFI\redhat\grubx64.efi"),
++ ("Centos Stream", r"\EFI\centos\grubx64.efi"),
++]
++
++
++@pytest.mark.parametrize("label, efi_bin_path", TEST_ADD_ENTRY_INPUTS)
++@mock.patch("leapp.libraries.common.efi.get_boot_entry")
++@mock.patch("leapp.libraries.common.efi.add_boot_entry")
++def test__add_boot_entry_for_target(
++ mock_add_boot_entry, mock_get_boot_entry, monkeypatch, label, efi_bin_path
++):
++ # need to mock this but it's unused because distro_id_to_pretty_name is mocked
++ monkeypatch.setattr(api, "current_actor", CurrentActorMocked(dst_distro="whatever"))
++ monkeypatch.setattr(updateefi, "distro_id_to_pretty_name", lambda _distro: label)
++ monkeypatch.setattr(updateefi, "_get_target_efi_bin_path", lambda: efi_bin_path)
++
++ mock_efibootinfo = mock.MagicMock(name="EFIBootInfo_instance")
++ entry = efi.EFIBootLoaderEntry("0003", label, True, efi_bin_path)
++ mock_get_boot_entry.return_value = None
++ mock_add_boot_entry.return_value = entry
++
++ assert entry == updateefi._add_boot_entry_for_target(mock_efibootinfo)
++
++ mock_get_boot_entry.assert_called_once_with(mock_efibootinfo, label, efi_bin_path)
++ mock_add_boot_entry.assert_called_once_with(label, efi_bin_path)
++
++
++@pytest.mark.parametrize("label, efi_bin_path", TEST_ADD_ENTRY_INPUTS)
++@mock.patch("leapp.libraries.common.efi.get_boot_entry")
++@mock.patch("leapp.libraries.common.efi.add_boot_entry")
++def test__add_boot_entry_for_target_already_exists(
++ mock_add_boot_entry, mock_get_boot_entry, monkeypatch, label, efi_bin_path
++):
++ # need to mock this but it's unused because distro_id_to_pretty_name is mocked
++ monkeypatch.setattr(api, "current_actor", CurrentActorMocked(dst_distro="whatever"))
++ monkeypatch.setattr(updateefi, "distro_id_to_pretty_name", lambda _distro: label)
++ monkeypatch.setattr(updateefi, "_get_target_efi_bin_path", lambda: efi_bin_path)
++
++ mock_efibootinfo = mock.MagicMock(name="EFIBootInfo_instance")
++ entry = efi.EFIBootLoaderEntry("0003", label, True, efi_bin_path)
++ mock_get_boot_entry.return_value = entry
++
++ out = updateefi._add_boot_entry_for_target(mock_efibootinfo)
++
++ assert out == entry
++ mock_get_boot_entry.assert_called_once_with(mock_efibootinfo, label, efi_bin_path)
++ mock_add_boot_entry.assert_not_called()
++
++
++def test__add_boot_entry_for_target_no_efi_bin(monkeypatch):
++ monkeypatch.setattr(updateefi, "_get_target_efi_bin_path", lambda: None)
++
++ with pytest.raises(efi.EFIError, match="Unable to detect any UEFI binary file."):
++ mock_efibootinfo = mock.MagicMock(name="EFIBootInfo_instance")
++ updateefi._add_boot_entry_for_target(mock_efibootinfo)
++
++
++class MockEFIBootInfo:
++
++ def __init__(self, entries, current_bootnum=None):
++ # just to have some entries even when we don't need the entries
++ other_entry = efi.EFIBootLoaderEntry(
++ "0001",
++ "UEFI: Built-in EFI Shell",
++ True,
++ "VenMedia(5023b95c-db26-429b-a648-bd47664c8012)..BO",
++ )
++ entries = entries + [other_entry]
++
++ self.boot_order = tuple(entry.boot_number for entry in entries)
++ self.current_bootnum = current_bootnum or self.boot_order[0]
++ self.next_bootnum = None
++ self.entries = {entry.boot_number: entry for entry in entries}
++
++
++TEST_SOURCE_ENTRY = efi.EFIBootLoaderEntry(
++ "0002", "Centos Stream", True, r"File(\EFI\centos\shimx64.efi)"
++)
++TEST_TARGET_ENTRY = efi.EFIBootLoaderEntry(
++ "0003", "Red Hat Enterprise Linux", True, r"File(\EFI\redhat\shimx64.efi)"
++)
++
++
++@mock.patch("leapp.libraries.common.efi.remove_boot_entry")
++@mock.patch("leapp.libraries.common.efi.EFIBootInfo")
++def test__remove_boot_entry_for_source(
++ mock_efibootinfo,
++ mock_remove_boot_entry,
++):
++ efibootinfo = MockEFIBootInfo([TEST_SOURCE_ENTRY], current_bootnum="0002")
++ mock_efibootinfo.return_value = MockEFIBootInfo(
++ [TEST_TARGET_ENTRY, TEST_SOURCE_ENTRY], current_bootnum="0002"
++ )
++
++ updateefi._remove_boot_entry_for_source(efibootinfo)
++
++ mock_efibootinfo.assert_called_once()
++ mock_remove_boot_entry.assert_called_once_with("0002")
++
++
++@mock.patch("leapp.libraries.common.efi.remove_boot_entry")
++@mock.patch("leapp.libraries.common.efi.EFIBootInfo")
++def test__remove_boot_entry_for_source_no_longer_exists(
++ mock_efibootinfo, mock_remove_boot_entry, mock_logger
++):
++ efibootinfo = MockEFIBootInfo([TEST_SOURCE_ENTRY], current_bootnum="0002")
++ mock_efibootinfo.return_value = MockEFIBootInfo(
++ [TEST_TARGET_ENTRY], current_bootnum="0002"
++ )
++
++ updateefi._remove_boot_entry_for_source(efibootinfo)
++
++ msg = (
++ "The currently booted source distro EFI boot entry has been already"
++ " removed since the target entry has been added, skipping removal."
++ )
++ assert msg in mock_logger.dbgmsg
++ mock_efibootinfo.assert_called_once()
++ mock_remove_boot_entry.assert_not_called()
++
++
++@mock.patch("leapp.libraries.common.efi.remove_boot_entry")
++@mock.patch("leapp.libraries.common.efi.EFIBootInfo")
++def test__remove_boot_entry_for_source_has_changed(
++ mock_efibootinfo, mock_remove_boot_entry, mock_logger
++):
++ efibootinfo = MockEFIBootInfo([TEST_SOURCE_ENTRY], current_bootnum="0002")
++ modified_source_entry = copy.copy(TEST_SOURCE_ENTRY)
++ modified_source_entry.efi_bin_source = r"File(\EFI\centos\grubx64.efi)"
++ mock_efibootinfo.return_value = MockEFIBootInfo(
++ [TEST_TARGET_ENTRY, modified_source_entry], current_bootnum="0002"
++ )
++
++ updateefi._remove_boot_entry_for_source(efibootinfo)
++
++ msg = (
++ "The boot entry with current bootnum has changed since the target"
++ " distro entry has been added, skipping removal."
++ )
++ assert msg in mock_logger.dbgmsg
++ mock_efibootinfo.assert_called_once()
++ mock_remove_boot_entry.assert_not_called()
++
++
++class TestRemoveSourceEFIDir:
++ SOURCE_EFIDIR = "/boot/efi/EFI/centos"
++ TARGET_EFIDIR = "/boot/efi/EFI/redhat"
++
++ @pytest.fixture(autouse=True)
++ def mock_current_actor(self): # pylint:disable=no-self-use
++ with mock.patch("leapp.libraries.stdlib.api.current_actor") as mock_current_actor:
++ mock_current_actor.return_value = CurrentActorMocked(
++ src_distro="centos", dst_distro="redhat"
++ )
++ yield
++
++ @mock.patch("os.path.exists")
++ @mock.patch("leapp.libraries.actor.updateefi.get_distro_efidir_canon_path")
++ @mock.patch("os.rmdir")
++ def test_success(
++ self, mock_rmdir, mock_efidir_path, mock_exists, mock_logger
++ ):
++ mock_efidir_path.side_effect = [self.SOURCE_EFIDIR, self.TARGET_EFIDIR]
++
++ updateefi._try_remove_source_efi_dir()
++
++ mock_exists.assert_called_once_with(self.SOURCE_EFIDIR)
++ mock_rmdir.assert_called_once_with(self.SOURCE_EFIDIR)
++ msg = f"Deleted source system EFI directory at {self.SOURCE_EFIDIR}"
++ assert msg in mock_logger.dbgmsg
++
++ @mock.patch("os.path.exists")
++ @mock.patch("leapp.libraries.actor.updateefi.get_distro_efidir_canon_path")
++ @mock.patch("os.rmdir")
++ def test__efi_dir_does_not_exist(
++ self, mock_rmdir, mock_efidir_path, mock_exists, mock_logger
++ ):
++ mock_efidir_path.return_value = self.SOURCE_EFIDIR
++ mock_exists.return_value = False
++
++ updateefi._try_remove_source_efi_dir()
++
++ mock_exists.assert_called_once_with(self.SOURCE_EFIDIR)
++ mock_rmdir.assert_not_called()
++ msg = f"Source distro EFI directory at {self.SOURCE_EFIDIR} does not exist, skipping removal."
++ assert msg in mock_logger.dbgmsg
++
++ @mock.patch("os.path.exists")
++ @mock.patch("leapp.libraries.actor.updateefi.get_distro_efidir_canon_path")
++ @mock.patch("os.rmdir")
++ def test_source_efi_dir_same_as_target(
++ self, mock_rmdir, mock_efidir_path, mock_exists, mock_logger
++ ):
++ """
++ Source and target dirs use the same directory
++ """
++ mock_efidir_path.side_effect = [self.TARGET_EFIDIR, self.TARGET_EFIDIR]
++ mock_exists.return_value = True
++
++ updateefi._try_remove_source_efi_dir()
++
++ mock_exists.assert_called_once_with(self.TARGET_EFIDIR)
++ mock_rmdir.assert_not_called()
++ msg = f"Source and target distros use the same '{self.TARGET_EFIDIR}' EFI directory."
++ assert msg in mock_logger.dbgmsg
++
++ @mock.patch("os.path.exists")
++ @mock.patch("leapp.libraries.actor.updateefi.get_distro_efidir_canon_path")
++ @mock.patch("os.rmdir")
++ def test_rmdir_fail(
++ self, mock_rmdir, mock_efidir_path, mock_exists, mock_logger, mock_create_report
++ ):
++ """
++ Test removal failures
++ """
++ mock_efidir_path.side_effect = [self.SOURCE_EFIDIR, self.TARGET_EFIDIR]
++ mock_rmdir.side_effect = OSError
++
++ updateefi._try_remove_source_efi_dir()
++
++ mock_exists.assert_called_once_with(self.SOURCE_EFIDIR)
++ mock_rmdir.assert_called_once_with(self.SOURCE_EFIDIR)
++ msg = f"Failed to remove the source system EFI directory at {self.SOURCE_EFIDIR}"
++ assert msg in mock_logger.errmsg[0]
++ assert mock_create_report.called == 1
++ title = "Failed to remove source system EFI directory"
++ assert mock_create_report.report_fields["title"] == title
++
++ @mock.patch("os.path.exists")
++ @mock.patch("leapp.libraries.actor.updateefi.get_distro_efidir_canon_path")
++ @mock.patch("os.rmdir")
++ def test_dir_no_longer_exists_failed_rmdir(
++ self, mock_rmdir, mock_efidir_path, mock_exists, mock_logger
++ ):
++ mock_efidir_path.side_effect = [self.SOURCE_EFIDIR, self.TARGET_EFIDIR]
++ mock_rmdir.side_effect = FileNotFoundError(
++ 2, "No such file or directory", self.SOURCE_EFIDIR
++ )
++
++ updateefi._try_remove_source_efi_dir()
++
++ mock_exists.assert_called_once_with(self.SOURCE_EFIDIR)
++ mock_rmdir.assert_called_once_with(self.SOURCE_EFIDIR)
++ msg = (
++ "Couldn't remove the source system EFI directory at"
++ f" {self.SOURCE_EFIDIR}: the directory no longer exists"
++ )
++ assert msg in mock_logger.dbgmsg[0]
++
++ @mock.patch("os.path.exists")
++ @mock.patch("leapp.libraries.actor.updateefi.get_distro_efidir_canon_path")
++ @mock.patch("os.rmdir")
++ def test_dir_not_empty(
++ self, mock_rmdir, mock_efidir_path, mock_exists, mock_logger, mock_create_report
++ ):
++ """
++ Test that the directory is not removed if there are any leftover files
++
++ The distro provided files in the efi dir are usually removed during the RPM
++ upgrade transaction (shim and grub own them). If there are any leftover
++ files, such as custom user files, the directory should be preserved and
++ report created.
++ """
++ mock_efidir_path.side_effect = [self.SOURCE_EFIDIR, self.TARGET_EFIDIR]
++ mock_rmdir.side_effect = OSError(
++ errno.ENOTEMPTY, os.strerror(errno.ENOTEMPTY), self.SOURCE_EFIDIR
++ )
++
++ updateefi._try_remove_source_efi_dir()
++
++ mock_rmdir.assert_called_once_with(self.SOURCE_EFIDIR)
++ mock_exists.assert_called_once_with(self.SOURCE_EFIDIR)
++ msg = "Didn't remove the source EFI directory {}, it does not exist".format(
++ self.SOURCE_EFIDIR
++ )
++ assert msg in mock_logger.dbgmsg[0]
++ assert mock_create_report.called == 1
++ title = "Review leftover files in the source OS EFI directory"
++ assert mock_create_report.report_fields["title"] == title
++
++
++@pytest.mark.parametrize(
++ "is_conversion, arch, is_efi, should_skip",
++ [
++ # conversion, is efi
++ (True, architecture.ARCH_X86_64, True, False),
++ (True, architecture.ARCH_ARM64, True, False),
++ (True, architecture.ARCH_PPC64LE, True, True),
++ (True, architecture.ARCH_S390X, True, True),
++ # conversion, not efi
++ (True, architecture.ARCH_X86_64, False, True),
++ (True, architecture.ARCH_ARM64, False, True),
++ (True, architecture.ARCH_PPC64LE, False, True),
++ (True, architecture.ARCH_S390X, False, True),
++ # not conversion, is efi
++ (False, architecture.ARCH_X86_64, True, True),
++ (False, architecture.ARCH_ARM64, True, True),
++ (False, architecture.ARCH_PPC64LE, True, True),
++ (False, architecture.ARCH_S390X, True, True),
++ # not conversion, not efi
++ (False, architecture.ARCH_X86_64, False, True),
++ (False, architecture.ARCH_ARM64, False, True),
++ (False, architecture.ARCH_PPC64LE, False, True),
++ (False, architecture.ARCH_S390X, False, True),
++ ],
++)
++@mock.patch("leapp.libraries.actor.updateefi._replace_boot_entries")
++def test_process_skip(
++ mock_replace_boot_entries, monkeypatch, is_conversion, arch, is_efi, should_skip
++):
++ monkeypatch.setattr(api, "current_actor", CurrentActorMocked(arch=arch))
++ monkeypatch.setattr(updateefi, "is_conversion", lambda: is_conversion)
++ monkeypatch.setattr(efi, "is_efi", lambda: is_efi)
++
++ updateefi.process()
++
++ if should_skip:
++ mock_replace_boot_entries.assert_not_called()
++ else:
++ mock_replace_boot_entries.assert_called_once()
++
++
++class TestReplaceBootEntries:
++
++ @pytest.fixture
++ def mocks(self): # pylint:disable=no-self-use
++ UPDATE_EFI = 'leapp.libraries.actor.updateefi'
++ EFI_LIB = 'leapp.libraries.common.efi'
++ with mock.patch(f'{UPDATE_EFI}._try_remove_source_efi_dir') as remove_source_dir, \
++ mock.patch(f'{UPDATE_EFI}._remove_boot_entry_for_source') as remove_source_entry, \
++ mock.patch(f'{UPDATE_EFI}._add_boot_entry_for_target') as add_target_entry, \
++ mock.patch(f'{EFI_LIB}.set_bootnext') as set_bootnext, \
++ mock.patch(f'{EFI_LIB}.EFIBootInfo') as efibootinfo:
++
++ # default for happy path
++ efibootinfo_obj = mock.MagicMock(name="EFIBootInfo_instance")
++ efibootinfo.return_value = efibootinfo_obj
++
++ entry = mock.MagicMock(name="target_entry")
++ entry.boot_number = "0003"
++ add_target_entry.return_value = entry
++
++ yield types.SimpleNamespace(
++ EFIBootInfo=efibootinfo,
++ set_bootnext=set_bootnext,
++ add_boot_entry_for_target=add_target_entry,
++ try_remove_source_efi_dir=remove_source_dir,
++ remove_boot_entry_for_source=remove_source_entry,
++ logger=mock_logger,
++ )
++
++ def test__fail_remove_source_entry( # pylint:disable=no-self-use
++ self, mocks, mock_logger, mock_create_report
++ ):
++ mocks.remove_boot_entry_for_source.side_effect = efi.EFIError
++
++ updateefi._replace_boot_entries()
++
++ msg = "Failed to remove source distro EFI boot entry"
++ assert msg in mock_logger.errmsg[0]
++
++ assert mock_create_report.called == 1
++ title = "Failed to remove source system EFI boot entry"
++ assert mock_create_report.report_fields["title"] == title
++
++ @pytest.mark.parametrize(
++ "which_fail", ["EFIBootInfo", "add_target", "set_bootnext"]
++ )
++ def test__fail_add_target_entry( # pylint:disable=no-self-use
++ self, mocks, mock_logger, mock_create_report, which_fail
++ ):
++ if which_fail == "EFIBootInfo":
++ mocks.EFIBootInfo.side_effect = efi.EFIError
++ elif which_fail == "add_target":
++ mocks.add_boot_entry_for_target.side_effect = efi.EFIError
++ elif which_fail == "set_bootnext":
++ mocks.set_bootnext.side_effect = efi.EFIError
++
++ with pytest.raises(StopActorExecutionError):
++ updateefi._replace_boot_entries()
++
++ mocks.try_remove_source_efi_dir.assert_not_called()
++ mocks.remove_boot_entry_for_source.assert_not_called()
++ assert not mock_create_report.called
++
++ def test__replace_boot_entries_success( # pylint:disable=no-self-use
++ self, mocks, mock_logger
++ ):
++ """Test that operations are carried out in the right order"""
++ mgr = mock.MagicMock()
++ mgr.attach_mock(mocks.EFIBootInfo, "EFIBootInfo")
++ mgr.attach_mock(mocks.set_bootnext, "set_bootnext")
++ mgr.attach_mock(mocks.add_boot_entry_for_target, "add_target_entry")
++ mgr.attach_mock(mocks.remove_boot_entry_for_source, "remove_source_entry")
++ mgr.attach_mock(mocks.try_remove_source_efi_dir, "remove_source_efidir")
++
++ updateefi._replace_boot_entries()
++
++ expected_sequence = [
++ mock.call.EFIBootInfo(),
++ mock.call.add_target_entry(efi.EFIBootInfo.return_value),
++ mock.call.set_bootnext(mocks.add_boot_entry_for_target.return_value.boot_number),
++ mock.call.remove_source_efidir(),
++ mock.call.remove_source_entry(efi.EFIBootInfo.return_value),
++ ]
++ assert mgr.mock_calls == expected_sequence
diff --git a/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py b/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py
index 003f3fc5..9e7bbf4a 100644
--- a/repos/system_upgrade/common/actors/distributionsignedrpmscanner/actor.py
@@ -7059,6 +8942,217 @@ index 582a5821..18f2c33f 100644
+ to_reinstall=list(to_reinstall),
modules_to_reset=list(modules_to_reset.values()),
modules_to_enable=list(modules_to_enable.values())))
+diff --git a/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/actor.py b/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/actor.py
+new file mode 100644
+index 00000000..4a3cd85d
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/actor.py
+@@ -0,0 +1,22 @@
++from leapp.actors import Actor
++from leapp.libraries.actor import fix_nvmf_initqueue_rules as fix_nvmf_initqueue_rules_lib
++from leapp.models import LiveModeConfig, NVMEInfo, TargetUserSpaceInfo, UpgradeInitramfsTasks
++from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
++
++
++class FixNvmfInitqueueRules(Actor):
++ """
++ Replace nvmf dracut module's initqueue rules with a our own version.
++
++ The original 95-nvmf-initqueue.rules file in the nvmf dracut module
++ calls initqueue, which might not be running when the udev event lands.
++ Therefore, we call `nvme connect-all` directly when when the udev event is triggered.
++ """
++
++ name = 'fix_nvmf_initqueue_rules'
++ consumes = (LiveModeConfig, NVMEInfo, TargetUserSpaceInfo)
++ produces = (UpgradeInitramfsTasks,)
++ tags = (IPUWorkflowTag, InterimPreparationPhaseTag)
++
++ def process(self):
++ fix_nvmf_initqueue_rules_lib.replace_nvmf_initqueue_rules()
+diff --git a/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/files/95-nvmf-initqueue.rules b/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/files/95-nvmf-initqueue.rules
+new file mode 100644
+index 00000000..52a77fef
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/files/95-nvmf-initqueue.rules
+@@ -0,0 +1,7 @@
++#
++# Original nvmf-initqueue rules called initqueue, which might not be running when the udev event lands.
++# Therefore, we call it directly.
++
++ACTION=="change", SUBSYSTEM=="fc", ENV{FC_EVENT}=="nvmediscovery", \
++ ENV{NVMEFC_HOST_TRADDR}=="*", ENV{NVMEFC_TRADDR}=="*", \
++ RUN+="/usr/sbin/nvme connect-all --transport=fc --traddr=$env{NVMEFC_TRADDR} --host-traddr=$env{NVMEFC_HOST_TRADDR}"
+diff --git a/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/libraries/fix_nvmf_initqueue_rules.py b/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/libraries/fix_nvmf_initqueue_rules.py
+new file mode 100644
+index 00000000..9fd74ea9
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/libraries/fix_nvmf_initqueue_rules.py
+@@ -0,0 +1,66 @@
++import os
++import shutil
++
++from leapp.exceptions import StopActorExecutionError
++from leapp.libraries.stdlib import api
++from leapp.models import LiveModeConfig, NVMEInfo, TargetUserSpaceInfo, UpgradeInitramfsTasks
++
++NVMF_DRACUT_MODULE_DIR = '/usr/lib/dracut/modules.d/95nvmf'
++NVMF_INITQUEUE_RULES_FILENAME = '95-nvmf-initqueue.rules'
++NVMF_INITQUEUE_RULES_PATH = os.path.join(NVMF_DRACUT_MODULE_DIR, NVMF_INITQUEUE_RULES_FILENAME)
++
++
++def _get_rules_file_path():
++ """
++ Get the path to the fixed 95-nvmf-initqueue.rules file bundled with this actor.
++ """
++ return api.get_actor_file_path(NVMF_INITQUEUE_RULES_FILENAME)
++
++
++def is_livemode_enabled() -> bool:
++ livemode_config = next(api.consume(LiveModeConfig), None)
++ if livemode_config and livemode_config.is_enabled:
++ return True
++ return False
++
++
++def replace_nvmf_initqueue_rules():
++ """
++ Replace the nvmf dracut module's initqueue rules in the target userspace.
++ """
++ nvme_info = next(api.consume(NVMEInfo), None)
++ if not nvme_info or not nvme_info.devices:
++ api.current_logger().debug('No NVMe devices detected, skipping nvmf initqueue rules replacement.')
++ return
++
++ if is_livemode_enabled():
++ api.current_logger().debug('LiveMode is enabled. Modifying initqueue stop condition is not required.')
++ return
++
++ userspace_info = next(api.consume(TargetUserSpaceInfo), None)
++ source_rules_path = _get_rules_file_path()
++
++ target_rules_path = os.path.join(userspace_info.path, NVMF_INITQUEUE_RULES_PATH.lstrip('/'))
++ target_dir = os.path.dirname(target_rules_path)
++
++ # Check if the nvmf dracut module directory exists in the target userspace
++ if not os.path.isdir(target_dir):
++ api.current_logger().debug(
++ 'The nvmf dracut module directory {} does not exist in target userspace. '
++ 'Skipping rules replacement.'.format(target_dir)
++ )
++ return
++
++ api.current_logger().info(
++ 'Replacing {} in target userspace with fixed version.'.format(NVMF_INITQUEUE_RULES_PATH)
++ )
++
++ try:
++ shutil.copy2(source_rules_path, target_rules_path)
++ api.current_logger().debug(
++ 'Successfully copied {} to {}'.format(source_rules_path, target_rules_path)
++ )
++ except (IOError, OSError) as e:
++ raise StopActorExecutionError('Failed to copy nvmf initqueue rules to target userspace: {}'.format(e))
++
++ api.produce(UpgradeInitramfsTasks()) # To enforce ordering of actors
+diff --git a/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/tests/test_fix_nvmf_initqueue_rules.py b/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/tests/test_fix_nvmf_initqueue_rules.py
+new file mode 100644
+index 00000000..93bc0285
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/initramfs/fix_nvmf_initqueue_rules/tests/test_fix_nvmf_initqueue_rules.py
+@@ -0,0 +1,92 @@
++import os
++import tempfile
++
++from leapp.libraries.actor import fix_nvmf_initqueue_rules
++from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked, produce_mocked
++from leapp.libraries.stdlib import api
++from leapp.models import LiveModeConfig, NVMEDevice, NVMEInfo, TargetUserSpaceInfo, UpgradeInitramfsTasks
++
++
++def test_replace_nvmf_initqueue_rules_no_nvme_devices(monkeypatch):
++ """Test that replacement is skipped when no NVMe devices are detected."""
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
++ monkeypatch.setattr(api, 'current_logger', logger_mocked())
++
++ fix_nvmf_initqueue_rules.replace_nvmf_initqueue_rules()
++
++ assert any('No NVMe devices detected' in msg for msg in api.current_logger.dbgmsg)
++
++
++def test_replace_nvmf_initqueue_rules_livemode_enabled(monkeypatch):
++ """Test that replacement is skipped when no LiveMode is enabled."""
++ livemode_info = LiveModeConfig(
++ is_enabled=True,
++ squashfs_fullpath=''
++ )
++
++ nvme_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='fc'
++ )
++ nvme_info = NVMEInfo(devices=[nvme_device], hostid='test-hostid', hostnqn='test-hostnqn')
++
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[livemode_info, nvme_info]))
++ monkeypatch.setattr(api, 'current_logger', logger_mocked())
++
++ fix_nvmf_initqueue_rules.replace_nvmf_initqueue_rules()
++
++ assert any('LiveMode is enabled.' in msg for msg in api.current_logger.dbgmsg)
++
++
++def test_replace_nvmf_initqueue_rules_empty_nvme_devices(monkeypatch):
++ """Test that replacement is skipped when NVMEInfo has no devices."""
++ nvme_info = NVMEInfo(devices=[], hostid=None, hostnqn=None)
++
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[nvme_info]))
++ monkeypatch.setattr(api, 'current_logger', logger_mocked())
++
++ fix_nvmf_initqueue_rules.replace_nvmf_initqueue_rules()
++
++ assert any('No NVMe devices detected' in msg for msg in api.current_logger.dbgmsg)
++
++
++def test_replace_nvmf_initqueue_rules_success(monkeypatch):
++ """Test successful replacement of nvmf initqueue rules."""
++ with tempfile.TemporaryDirectory(prefix='leapp_test_') as tmpdir:
++ nvmf_dir = os.path.join(tmpdir, 'usr/lib/dracut/modules.d/95nvmf')
++ os.makedirs(nvmf_dir)
++
++ target_rules_path = os.path.join(nvmf_dir, '95-nvmf-initqueue.rules')
++ with open(target_rules_path, 'w') as f:
++ f.write('# original rules')
++
++ source_file = os.path.join(tmpdir, 'source_rules')
++ with open(source_file, 'w') as f:
++ f.write('# fixed rules content')
++
++ nvme_device = NVMEDevice(
++ sys_class_path='/sys/class/nvme/nvme0',
++ name='nvme0',
++ transport='fc'
++ )
++ nvme_info = NVMEInfo(devices=[nvme_device], hostid='test-hostid', hostnqn='test-hostnqn')
++ userspace_info = TargetUserSpaceInfo(path=tmpdir, scratch='/tmp/scratch', mounts='/tmp/mounts')
++
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[nvme_info, userspace_info]))
++ monkeypatch.setattr(api, 'current_logger', logger_mocked())
++ monkeypatch.setattr(api, 'produce', produce_mocked())
++ monkeypatch.setattr(api, 'get_actor_file_path', lambda x: source_file)
++
++ fix_nvmf_initqueue_rules.replace_nvmf_initqueue_rules()
++
++ # Verify the file was replaced
++ with open(target_rules_path) as f:
++ content = f.read()
++
++ assert content == '# fixed rules content'
++
++ # Verify UpgradeInitramfsTasks was produced
++ assert api.produce.called == 1
++ produced_msg = api.produce.model_instances[0]
++ assert isinstance(produced_msg, UpgradeInitramfsTasks)
diff --git a/repos/system_upgrade/common/actors/livemode/emit_livemode_userspace_requirements/libraries/emit_livemode_userspace_requirements.py b/repos/system_upgrade/common/actors/livemode/emit_livemode_userspace_requirements/libraries/emit_livemode_userspace_requirements.py
index 4ecf682b..80d38cb0 100644
--- a/repos/system_upgrade/common/actors/livemode/emit_livemode_userspace_requirements/libraries/emit_livemode_userspace_requirements.py
@@ -7130,6 +9224,96 @@ index e24aa366..6eb71fee 100644
EnablementTestCase(env_vars={'LEAPP_UNSUPPORTED': '1'},
arch=architecture.ARCH_ARM64, pkgs=tuple(),
result=EnablementResult.RAISE),
+diff --git a/repos/system_upgrade/common/actors/livemode/removeliveimage/libraries/remove_live_image.py b/repos/system_upgrade/common/actors/livemode/removeliveimage/libraries/remove_live_image.py
+index 5bb7e40f..a3718dcf 100644
+--- a/repos/system_upgrade/common/actors/livemode/removeliveimage/libraries/remove_live_image.py
++++ b/repos/system_upgrade/common/actors/livemode/removeliveimage/libraries/remove_live_image.py
+@@ -21,5 +21,11 @@ def remove_live_image():
+
+ try:
+ os.unlink(artifacts.squashfs_path)
++ except FileNotFoundError:
++ api.current_logger().debug(
++ 'The %s file does not exist. Most likely it has been removed before. Usually happens with "leapp rerun".',
++ artifacts.squashfs_path
++ )
++ return
+ except OSError as error:
+- api.current_logger().warning('Failed to remove %s with error: %s', artifacts.squashfs, error)
++ api.current_logger().warning('Failed to remove %s with error: %s', artifacts.squashfs_path, error)
+diff --git a/repos/system_upgrade/common/actors/livemode/removeliveimage/tests/test_remove_live_image.py b/repos/system_upgrade/common/actors/livemode/removeliveimage/tests/test_remove_live_image.py
+index 4d6aa821..21a5fb93 100644
+--- a/repos/system_upgrade/common/actors/livemode/removeliveimage/tests/test_remove_live_image.py
++++ b/repos/system_upgrade/common/actors/livemode/removeliveimage/tests/test_remove_live_image.py
+@@ -1,10 +1,11 @@
++import errno
+ import functools
+ import os
+
+ import pytest
+
+ from leapp.libraries.actor import remove_live_image as remove_live_image_lib
+-from leapp.libraries.common.testutils import CurrentActorMocked
++from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked
+ from leapp.libraries.stdlib import api
+ from leapp.models import LiveModeArtifacts, LiveModeConfig
+
+@@ -22,23 +23,51 @@ _LiveModeConfig = functools.partial(LiveModeConfig, squashfs_fullpath='configure
+ )
+ )
+ def test_remove_live_image(monkeypatch, livemode_config, squashfs_path, should_unlink_be_called):
+- """ Test whether live-mode image (as found in LiveModeArtifacts) is removed. """
+-
++ """
++ Test whether live-mode image (as found in LiveModeArtifacts) is removed.
++ """
+ messages = []
+ if livemode_config:
+ messages.append(livemode_config)
+ if squashfs_path:
+ messages.append(LiveModeArtifacts(squashfs_path=squashfs_path))
+
+- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=messages))
+-
+ def unlink_mock(path):
+ if should_unlink_be_called:
+ assert path == squashfs_path
+ return
+ assert False # If we should not call unlink and we call it then fail the test
++
+ monkeypatch.setattr(os, 'unlink', unlink_mock)
++ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=messages))
++
++ remove_live_image_lib.remove_live_image()
++
++
++@pytest.mark.parametrize('do_file_exists', (True, False))
++def test_remove_live_image_oserror(monkeypatch, do_file_exists):
++ """
++ Test that errors are properly handled when trying to unlink the file.
++ """
++ messages = [
++ _LiveModeConfig(is_enabled=True),
++ LiveModeArtifacts(squashfs_path='/var/lib/leapp/upgrade.img')
++ ]
++
++ def unlink_mock(dummyPath):
++ if do_file_exists:
++ raise OSError('OSError happened :)')
++ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), '/squashfs')
+
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=messages))
++ monkeypatch.setattr(api, 'current_logger', logger_mocked())
++ monkeypatch.setattr(os, 'unlink', unlink_mock)
+
+ remove_live_image_lib.remove_live_image()
++
++ if do_file_exists:
++ assert api.current_logger.warnmsg
++ assert not api.current_logger.dbgmsg
++ else:
++ assert not api.current_logger.warnmsg
++ assert api.current_logger.dbgmsg
diff --git a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py b/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
index 32e4527b..1e595e9a 100644
--- a/repos/system_upgrade/common/actors/missinggpgkeysinhibitor/libraries/missinggpgkey.py
@@ -7745,6 +9929,247 @@ index 772b33e6..95f664ad 100644
+
+ assert 'Failed to parse custom repository definition' in str(exc_info.value)
+ assert scancustomrepofile.CUSTOM_REPO_PATH in exc_info.value.details['hint']
+diff --git a/repos/system_upgrade/common/actors/scangrubdevice/actor.py b/repos/system_upgrade/common/actors/scangrubdevice/actor.py
+index cb6be7ea..b62ac50f 100644
+--- a/repos/system_upgrade/common/actors/scangrubdevice/actor.py
++++ b/repos/system_upgrade/common/actors/scangrubdevice/actor.py
+@@ -1,4 +1,5 @@
+ from leapp.actors import Actor
++from leapp.exceptions import StopActorExecutionError
+ from leapp.libraries.common import grub
+ from leapp.libraries.common.config import architecture
+ from leapp.models import GrubInfo
+@@ -19,7 +20,14 @@ class ScanGrubDeviceName(Actor):
+ if architecture.matches_architecture(architecture.ARCH_S390X):
+ return
+
+- devices = grub.get_grub_devices()
++ try:
++ devices = grub.get_grub_devices()
++ except grub.GRUBDeviceError as err:
++ # TODO(pstodulk): Tests missing
++ raise StopActorExecutionError(
++ message='Cannot detect GRUB devices',
++ details={'details': str(err)}
++ )
+ grub_info = GrubInfo(orig_devices=devices)
+ grub_info.orig_device_name = devices[0] if len(devices) == 1 else None
+ self.produce(grub_info)
+diff --git a/repos/system_upgrade/common/actors/scannvme/actor.py b/repos/system_upgrade/common/actors/scannvme/actor.py
+new file mode 100644
+index 00000000..a4f7aefe
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/scannvme/actor.py
+@@ -0,0 +1,25 @@
++from leapp.actors import Actor
++from leapp.libraries.actor import scannvme
++from leapp.models import NVMEInfo
++from leapp.tags import FactsPhaseTag, IPUWorkflowTag
++
++
++class ScanNVMe(Actor):
++ """
++ Detect existing NVMe devices.
++
++ The detection is performed by checking content under /sys/class/nvme/
++ directory where all NVMe devices should be listed. Additional information
++ is collected from the present files under each specific device.
++
++ Namely the NVMe transport type and the device name is collected at this
++ moment.
++ """
++
++ name = 'scan_nvme'
++ consumes = ()
++ produces = (NVMEInfo,)
++ tags = (FactsPhaseTag, IPUWorkflowTag)
++
++ def process(self):
++ scannvme.process()
+diff --git a/repos/system_upgrade/common/actors/scannvme/libraries/scannvme.py b/repos/system_upgrade/common/actors/scannvme/libraries/scannvme.py
+new file mode 100644
+index 00000000..ef77171d
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/scannvme/libraries/scannvme.py
+@@ -0,0 +1,88 @@
++import os
++
++from leapp.libraries.common.utils import read_file
++from leapp.libraries.stdlib import api
++from leapp.models import NVMEDevice, NVMEInfo
++
++NVME_CLASS_DIR = '/sys/class/nvme'
++NVME_CONF_DIR = '/etc/nvme'
++NVME_CONF_HOSTID = '/etc/nvme/hostid'
++NVME_CONF_HOSTNQN = '/etc/nvme/hostnqn'
++
++
++class NVMEMissingTransport(Exception):
++ def __init__(self, message):
++ super().__init__(message)
++ self.message = message
++
++
++def _get_transport_type(device_path):
++ tpath = os.path.join(device_path, 'transport')
++ if not os.path.exists(tpath):
++ raise NVMEMissingTransport(f'The {tpath} file is missing.')
++
++ transport = read_file(tpath).strip()
++ if not transport:
++ raise NVMEMissingTransport('The transport type is not defined.')
++
++ return transport
++
++
++def scan_device(device_name):
++ device_path = os.path.join(NVME_CLASS_DIR, device_name)
++ if not os.path.isdir(device_path):
++ api.current_logger().warning(
++ 'Cannot scan NVMe device: Following path is not dir: {0}'.format(device_path)
++ )
++ return None
++
++ try:
++ transport = _get_transport_type(device_path)
++ except NVMEMissingTransport as e:
++ # unexpected; seatbelt - skipping tests
++ api.current_logger().warning(
++ 'Skipping {0} NVMe device: Cannot detect transport type: {1}'.format(device_name, e.message)
++ )
++ return None
++
++ return NVMEDevice(
++ sys_class_path=device_path,
++ name=device_name,
++ transport=transport
++ )
++
++
++def get_hostid(fpath=NVME_CONF_HOSTID):
++ if not os.path.exists(fpath):
++ api.current_logger().debug('NVMe hostid config file is missing.')
++ return None
++ return read_file(fpath).strip()
++
++
++def get_hostnqn(fpath=NVME_CONF_HOSTNQN):
++ if not os.path.exists(fpath):
++ api.current_logger().debug('NVMe hostnqn config file is missing.')
++ return None
++ return read_file(fpath).strip()
++
++
++def process():
++ if not os.path.isdir(NVME_CLASS_DIR):
++ api.current_logger().debug(
++ 'NVMe is not active: {0} does not exist.'.format(NVME_CLASS_DIR)
++ )
++ return
++
++ devices = [scan_device(device_name) for device_name in os.listdir(NVME_CLASS_DIR)]
++ # drop possible None values from the list
++ devices = [dev for dev in devices if dev is not None]
++ if not devices:
++ # NOTE(pstodulk): This could be suspicious possibly.
++ api.current_logger().warning('No NVMe device detected but NVMe seems active.')
++ return
++
++ api.produce(NVMEInfo(
++ devices=devices,
++ hostnqn=get_hostnqn(),
++ hostid=get_hostid(),
++ ))
+diff --git a/repos/system_upgrade/common/actors/scannvme/tests/test_scannvme.py b/repos/system_upgrade/common/actors/scannvme/tests/test_scannvme.py
+new file mode 100644
+index 00000000..97b3980b
+--- /dev/null
++++ b/repos/system_upgrade/common/actors/scannvme/tests/test_scannvme.py
+@@ -0,0 +1,84 @@
++import pytest
++
++from leapp.libraries.actor import scannvme
++from leapp.models import NVMEDevice
++
++
++def test_get_transport_type_file_missing(monkeypatch):
++ """Test that NVMEMissingTransport is raised when transport file does not exist."""
++ monkeypatch.setattr('os.path.join', lambda *args: '/sys/class/nvme/nvme0/transport')
++ monkeypatch.setattr('os.path.exists', lambda path: False)
++
++ with pytest.raises(scannvme.NVMEMissingTransport):
++ scannvme._get_transport_type('/sys/class/nvme/nvme0')
++
++
++def test_get_transport_type_file_empty(monkeypatch):
++ """Test that NVMEMissingTransport is raised when transport file is empty."""
++ monkeypatch.setattr('os.path.join', lambda *args: '/sys/class/nvme/nvme0/transport')
++ monkeypatch.setattr('os.path.exists', lambda path: True)
++ monkeypatch.setattr(
++ 'leapp.libraries.actor.scannvme.read_file',
++ lambda path: ' \n'
++ )
++
++ with pytest.raises(scannvme.NVMEMissingTransport):
++ scannvme._get_transport_type('/sys/class/nvme/nvme0')
++
++
++@pytest.mark.parametrize('transport_value', ['pcie', 'tcp', 'rdma', 'fc', 'loop'])
++def test_get_transport_type_valid(monkeypatch, transport_value):
++ """Test that transport type is correctly read from the file."""
++ monkeypatch.setattr('os.path.join', lambda *args: '/sys/class/nvme/nvme0/transport')
++ monkeypatch.setattr('os.path.exists', lambda path: True)
++ monkeypatch.setattr(scannvme, 'read_file', lambda path: transport_value + '\n')
++
++ result = scannvme._get_transport_type('/sys/class/nvme/nvme0')
++ assert result == transport_value
++
++
++def test_scan_device_transport_detection_fails(monkeypatch):
++ """Test that None is returned when transport detection fails."""
++ monkeypatch.setattr('os.path.join', lambda *args: '/'.join(args))
++ monkeypatch.setattr('os.path.isdir', lambda path: True)
++ monkeypatch.setattr('os.path.exists', lambda path: False)
++
++ result = scannvme.scan_device('nvme0')
++
++ assert result is None
++
++
++@pytest.mark.parametrize('device_name,transport', [
++ ('nvme0', 'pcie'),
++ ('nvme1', 'tcp'),
++ ('nvme2', 'rdma'),
++])
++def test_scan_device_successful(monkeypatch, device_name, transport):
++ """Test that NVMEDevice is returned for a valid device."""
++ expected_device_path = '/sys/class/nvme/{}'.format(device_name)
++ expected_transport_path = '{}/transport'.format(expected_device_path)
++
++ def mock_isdir(path):
++ assert path == expected_device_path
++ return True
++
++ def mock_exists(path):
++ assert path == expected_transport_path
++ return True
++
++ def mock_read_file(path):
++ assert path == expected_transport_path
++ return transport + '\n'
++
++ monkeypatch.setattr('os.path.join', lambda *args: '/'.join(args))
++ monkeypatch.setattr('os.path.isdir', mock_isdir)
++ monkeypatch.setattr('os.path.exists', mock_exists)
++ monkeypatch.setattr(scannvme, 'read_file', mock_read_file)
++
++ result = scannvme.scan_device(device_name)
++
++ assert result is not None
++ assert isinstance(result, NVMEDevice)
++ assert result.name == device_name
++ assert result.transport == transport
++ assert result.sys_class_path == expected_device_path
diff --git a/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py b/repos/system_upgrade/common/actors/scanvendorrepofiles/actor.py
new file mode 100644
index 00000000..a5e481cb
@@ -8235,7 +10660,7 @@ index 59b12c87..85d4a09e 100644
def process(self):
self.produce(systemfacts.get_sysctls_status())
diff --git a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
-index f16cea1d..b14e2a09 100644
+index f16cea1d..ba7bdb82 100644
--- a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
+++ b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
@@ -221,14 +221,13 @@ def get_repositories_status():
@@ -8259,6 +10684,194 @@ index f16cea1d..b14e2a09 100644
def get_selinux_status():
+@@ -295,12 +294,35 @@ def get_firewalls_status():
+ )
+
+
++def _get_secure_boot_state():
++ try:
++ stdout = run(['mokutil', '--sb-state'])['stdout']
++ return 'enabled' in stdout
++ except CalledProcessError as e:
++ if "doesn't support Secure Boot" in e.stderr:
++ return None
++
++ raise StopActorExecutionError('Failed to determine SecureBoot state: {}'.format(e))
++ except OSError as e:
++ # shim depends on mokutil, if it's not installed assume SecureBoot is disabled
++ api.current_logger().debug(
++ 'Failed to execute mokutil, assuming SecureBoot is disabled: {}'.format(e)
++ )
++ return False
++
++
+ def get_firmware():
+ firmware = 'efi' if os.path.isdir('/sys/firmware/efi') else 'bios'
++
++ ppc64le_opal = None
+ if architecture.matches_architecture(architecture.ARCH_PPC64LE):
+- ppc64le_opal = bool(os.path.isdir('/sys/firmware/opal/'))
+- return FirmwareFacts(firmware=firmware, ppc64le_opal=ppc64le_opal)
+- return FirmwareFacts(firmware=firmware)
++ ppc64le_opal = os.path.isdir('/sys/firmware/opal/')
++
++ is_secureboot = None
++ if firmware == 'efi':
++ is_secureboot = _get_secure_boot_state()
++
++ return FirmwareFacts(firmware=firmware, ppc64le_opal=ppc64le_opal, secureboot_enabled=is_secureboot)
+
+
+ @aslist
+diff --git a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
+index 16405b15..22ee7b7b 100644
+--- a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
++++ b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
+@@ -1,20 +1,27 @@
+ import grp
++import os
+ import pwd
++from unittest import mock
+
+ import pytest
+
+ from leapp.exceptions import StopActorExecutionError
++from leapp.libraries.actor import systemfacts
+ from leapp.libraries.actor.systemfacts import (
++ _get_secure_boot_state,
+ _get_system_groups,
+ _get_system_users,
+ anyendswith,
+ anyhasprefix,
+ aslist,
++ get_firmware,
+ get_repositories_status
+ )
+ from leapp.libraries.common import repofileutils
+-from leapp.libraries.common.testutils import logger_mocked
+-from leapp.libraries.stdlib import api
++from leapp.libraries.common.config import architecture
++from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked
++from leapp.libraries.stdlib import api, CalledProcessError
++from leapp.models import FirmwareFacts
+ from leapp.snactor.fixture import current_actor_libraries
+
+
+@@ -138,3 +145,114 @@ def test_failed_parsed_repofiles(monkeypatch):
+
+ with pytest.raises(StopActorExecutionError):
+ get_repositories_status()
++
++
++@pytest.mark.parametrize('is_enabled', (True, False))
++@mock.patch('leapp.libraries.actor.systemfacts.run')
++def test_get_secure_boot_state_ok(mocked_run: mock.MagicMock, is_enabled):
++ mocked_run.return_value = {
++ 'stdout': f'SecureBoot {"enabled" if is_enabled else "disabled"}'
++ }
++
++ out = _get_secure_boot_state()
++
++ assert out == is_enabled
++ mocked_run.assert_called_once_with(['mokutil', '--sb-state'])
++
++
++@mock.patch('leapp.libraries.actor.systemfacts.run')
++def test_get_secure_boot_state_no_mokutil(mocked_run: mock.MagicMock):
++ mocked_run.side_effect = OSError
++
++ out = _get_secure_boot_state()
++
++ assert out is False
++ mocked_run.assert_called_once_with(['mokutil', '--sb-state'])
++
++
++@mock.patch('leapp.libraries.actor.systemfacts.run')
++def test_get_secure_boot_state_not_supported(mocked_run: mock.MagicMock):
++ cmd = ['mokutil', '--sb-state']
++ result = {
++ 'stderr': "This system doesn't support Secure Boot",
++ 'exit_code': 255,
++ }
++ mocked_run.side_effect = CalledProcessError(
++ "Command mokutil --sb-state failed with exit code 255.",
++ cmd,
++ result
++ )
++
++ out = _get_secure_boot_state()
++
++ assert out is None
++ mocked_run.assert_called_once_with(cmd)
++
++
++@mock.patch('leapp.libraries.actor.systemfacts.run')
++def test_get_secure_boot_state_failed(mocked_run: mock.MagicMock):
++ cmd = ['mokutil', '--sb-state']
++ result = {
++ 'stderr': 'EFI variables are not supported on this system',
++ 'exit_code': 1,
++ }
++ mocked_run.side_effect = CalledProcessError(
++ "Command mokutil --sb-state failed with exit code 1.",
++ cmd,
++ result
++ )
++
++ with pytest.raises(
++ StopActorExecutionError,
++ match='Failed to determine SecureBoot state'
++ ):
++ _get_secure_boot_state()
++
++ mocked_run.assert_called_once_with(cmd)
++
++
++def _ff(firmware, ppc64le_opal, is_secureboot):
++ return FirmwareFacts(
++ firmware=firmware,
++ ppc64le_opal=ppc64le_opal,
++ secureboot_enabled=is_secureboot
++ )
++
++
++@pytest.mark.parametrize(
++ "has_sys_efi, has_sys_opal, is_ppc, secboot_state, expect",
++ [
++ # 1. Standard BIOS on x86
++ (False, False, False, None, _ff("bios", None, None)),
++ # 2. EFI on x86 with Secure Boot Enabled
++ (True, False, False, True, _ff("efi", None, True)),
++ # 3. EFI on x86 with Secure Boot Disabled
++ (True, False, False, False, _ff("efi", None, False)),
++ # 4. PPC64LE with OPAL (No EFI)
++ (False, True, True, None, _ff("bios", True, None)),
++ # 5. PPC64LE without OPAL (No EFI)
++ (False, False, True, None, _ff("bios", False, None)),
++ # 6. EFI on PPC64LE with OPAL
++ (True, True, True, True, _ff("efi", True, True)),
++ ]
++)
++def test_get_firmware_logic(
++ has_sys_efi, has_sys_opal, is_ppc, secboot_state, expect
++):
++ with mock.patch('os.path.isdir') as mock_isdir, \
++ mock.patch('leapp.libraries.stdlib.api.current_actor') as mock_curr_actor, \
++ mock.patch('leapp.libraries.actor.systemfacts._get_secure_boot_state') as mock_get_sb_state:
++
++ mock_isdir.side_effect = lambda path: {
++ '/sys/firmware/efi': has_sys_efi,
++ '/sys/firmware/opal/': has_sys_opal
++ }.get(path, False)
++
++ mock_curr_actor.return_value = CurrentActorMocked(
++ arch=architecture.ARCH_PPC64LE if is_ppc else architecture.ARCH_X86_64
++ )
++ mock_get_sb_state.return_value = secboot_state
++
++ result = get_firmware()
++
++ assert result == expect
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index c825c731..a428aa98 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -8364,6 +10977,44 @@ index 6377f767..4c5420f6 100644
return pubkeys
+diff --git a/repos/system_upgrade/common/actors/updategrubcore/libraries/updategrubcore.py b/repos/system_upgrade/common/actors/updategrubcore/libraries/updategrubcore.py
+index 6a116db4..cc9bf280 100644
+--- a/repos/system_upgrade/common/actors/updategrubcore/libraries/updategrubcore.py
++++ b/repos/system_upgrade/common/actors/updategrubcore/libraries/updategrubcore.py
+@@ -1,4 +1,5 @@
+ from leapp import reporting
++from leapp.exceptions import StopActorExecution
+ from leapp.libraries.common import grub
+ from leapp.libraries.common.config import architecture
+ from leapp.libraries.stdlib import api, CalledProcessError, config, run
+@@ -61,7 +62,11 @@ def process():
+ return
+ ff = next(api.consume(FirmwareFacts), None)
+ if ff and ff.firmware == 'bios':
+- grub_devs = grub.get_grub_devices()
++ try:
++ grub_devs = grub.get_grub_devices()
++ except grub.GRUBDeviceError as err:
++ api.current_logger().warning('Failed to detect GRUB devices: %s', err)
++ raise StopActorExecution()
+ if grub_devs:
+ update_grub_core(grub_devs)
+ else:
+diff --git a/repos/system_upgrade/common/actors/updategrubcore/tests/test_updategrubcore.py b/repos/system_upgrade/common/actors/updategrubcore/tests/test_updategrubcore.py
+index 93816103..2262e326 100644
+--- a/repos/system_upgrade/common/actors/updategrubcore/tests/test_updategrubcore.py
++++ b/repos/system_upgrade/common/actors/updategrubcore/tests/test_updategrubcore.py
+@@ -107,9 +107,7 @@ def test_update_grub_nogrub_system_ibmz(monkeypatch):
+
+ def test_update_grub_nogrub_system(monkeypatch):
+ def get_grub_devices_mocked():
+- # this is not very well documented, but the grub.get_grub_devices function raises a StopActorExecution on error
+- # (whether that's caused by determining root partition or determining the block device a given partition is on
+- raise StopActorExecution()
++ raise grub.GRUBDeviceError()
+
+ monkeypatch.setattr(grub, 'get_grub_devices', get_grub_devices_mocked)
+ monkeypatch.setattr(reporting, 'create_report', testutils.create_report_mocked())
diff --git a/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py b/repos/system_upgrade/common/actors/vendorreposignaturescanner/actor.py
new file mode 100644
index 00000000..dbf86974
@@ -8779,11 +11430,41 @@ index 63910fe0..4e8b380d 100644
self.base.distro_sync()
if self.opts.tid[0] == 'check':
+diff --git a/repos/system_upgrade/common/libraries/config/__init__.py b/repos/system_upgrade/common/libraries/config/__init__.py
+index 396c524a..8a2b4e35 100644
+--- a/repos/system_upgrade/common/libraries/config/__init__.py
++++ b/repos/system_upgrade/common/libraries/config/__init__.py
+@@ -141,3 +141,19 @@ def get_target_distro_id():
+ :rtype: str
+ """
+ return api.current_actor().configuration.distro.target
++
++
++def is_conversion():
++ """
++ Return whether a conversion is happening during the upgrade.
++
++ Conversions in means that a target distro different from source distro was
++ specified.
++
++ This is a wrapper which compares source and target distro IDs. This can also
++ be helpful for testing.
++
++ :return: True if converting False otherwise
++ :rtype: bool
++ """
++ return get_source_distro_id() != get_target_distro_id()
diff --git a/repos/system_upgrade/common/libraries/distro.py b/repos/system_upgrade/common/libraries/distro.py
-index 04e553ac..fbf5a1b5 100644
+index 04e553ac..1544def0 100644
--- a/repos/system_upgrade/common/libraries/distro.py
+++ b/repos/system_upgrade/common/libraries/distro.py
-@@ -7,6 +7,7 @@ from leapp.libraries.common.config import get_target_distro_id
+@@ -2,11 +2,12 @@ import json
+ import os
+
+ from leapp.exceptions import StopActorExecutionError
+-from leapp.libraries.common import repofileutils, rhsm
++from leapp.libraries.common import efi, repofileutils, rhsm
+ from leapp.libraries.common.config import get_target_distro_id
from leapp.libraries.common.config.architecture import ARCH_ACCEPTED, ARCH_X86_64
from leapp.libraries.common.config.version import get_target_major_version
from leapp.libraries.stdlib import api
@@ -8840,8 +11521,44 @@ index 04e553ac..fbf5a1b5 100644
distro_repofiles = _get_distro_repofiles(distro, major_version, arch)
if not distro_repofiles:
# TODO: a different way of signaling an error would be preferred (e.g. returning None),
+@@ -208,3 +228,35 @@ def get_distro_repoids(context, distro, major_version, arch):
+ distro_repoids.extend([repo.repoid for repo in rfile.data])
+
+ return sorted(distro_repoids)
++
++
++def distro_id_to_pretty_name(distro_id):
++ """
++ Get pretty name for the given distro id.
++
++ The pretty name is what is found in the NAME field of /etc/os-release.
++ """
++ return {
++ "rhel": "Red Hat Enterprise Linux",
++ "centos": "CentOS Stream",
++ "almalinux": "AlmaLinux",
++ }[distro_id]
++
++
++def get_distro_efidir_canon_path(distro_id):
++ """
++ Get canonical path to the distro EFI directory in the EFI mountpoint.
++
++ NOTE: The path might be incorrect for distros not properly enabled for IPU,
++ when enabling new distros in the codebase, make sure the path is correct.
++ """
++ if distro_id == "rhel":
++ return os.path.join(efi.EFI_MOUNTPOINT, "EFI", "redhat")
++
++ if distro_id == "almalinux":
++ return os.path.join(efi.EFI_MOUNTPOINT, "EFI", "almalinux")
++
++ if distro_id == "centos":
++ return os.path.join(efi.EFI_MOUNTPOINT, "EFI", "centos")
++
++ return os.path.join(efi.EFI_MOUNTPOINT, "EFI", distro_id)
diff --git a/repos/system_upgrade/common/libraries/dnfplugin.py b/repos/system_upgrade/common/libraries/dnfplugin.py
-index 7e1fd497..9e2ba376 100644
+index 7e1fd497..a42af5ca 100644
--- a/repos/system_upgrade/common/libraries/dnfplugin.py
+++ b/repos/system_upgrade/common/libraries/dnfplugin.py
@@ -89,6 +89,7 @@ def build_plugin_data(target_repoids, debug, test, tasks, on_aws):
@@ -8852,6 +11569,430 @@ index 7e1fd497..9e2ba376 100644
'modules_to_enable': sorted(['{}:{}'.format(m.name, m.stream) for m in tasks.modules_to_enable]),
},
'dnf_conf': {
+@@ -270,26 +271,24 @@ def _transaction(context, stage, target_repoids, tasks, plugin_info, xfs_info,
+ # allow handling new RHEL 9 syscalls by systemd-nspawn
+ env = {'SYSTEMD_SECCOMP': '0'}
+
+- # We need to reset modules twice, once before we check, and the second time before we actually perform
+- # the upgrade. Not more often as the modules will be reset already.
+- if stage in ('check', 'upgrade') and tasks.modules_to_reset:
+- # We shall only reset modules that are not going to be enabled
+- # This will make sure it is so
+- modules_to_reset = {(module.name, module.stream) for module in tasks.modules_to_reset}
+- modules_to_enable = {(module.name, module.stream) for module in tasks.modules_to_enable}
+- module_reset_list = [module[0] for module in modules_to_reset - modules_to_enable]
+- # Perform module reset
+- cmd = ['/usr/bin/dnf', 'module', 'reset', '--enabled', ] + module_reset_list
+- cmd += ['--disablerepo', '*', '-y', '--installroot', '/installroot']
+- try:
+- context.call(
+- cmd=cmd_prefix + cmd + common_params,
+- callback_raw=utils.logging_handler,
+- env=env
+- )
+- except (CalledProcessError, OSError):
+- api.current_logger().debug('Failed to reset modules via dnf with an error. Ignoring.',
+- exc_info=True)
++ if tasks.modules_to_reset:
++ # We shall only reset modules that are not going to be enabled
++ # This will make sure it is so
++ modules_to_reset = {(module.name, module.stream) for module in tasks.modules_to_reset}
++ modules_to_enable = {(module.name, module.stream) for module in tasks.modules_to_enable}
++ module_reset_list = [module[0] for module in modules_to_reset - modules_to_enable]
++ # Perform module reset
++ cmd = ['/usr/bin/dnf', 'module', 'reset', '--enabled', ] + module_reset_list
++ cmd += ['--disablerepo', '*', '-y', '--installroot', '/installroot']
++ try:
++ context.call(
++ cmd=cmd_prefix + cmd + common_params,
++ callback_raw=utils.logging_handler,
++ env=env
++ )
++ except (CalledProcessError, OSError):
++ api.current_logger().debug('Failed to reset modules via dnf with an error. Ignoring.',
++ exc_info=True)
+
+ cmd = [
+ '/usr/bin/dnf',
+diff --git a/repos/system_upgrade/common/libraries/efi.py b/repos/system_upgrade/common/libraries/efi.py
+new file mode 100644
+index 00000000..841219ce
+--- /dev/null
++++ b/repos/system_upgrade/common/libraries/efi.py
+@@ -0,0 +1,337 @@
++import os
++import re
++
++from leapp.libraries.common.partitions import _get_partition_for_dir, blk_dev_from_partition, get_partition_number
++from leapp.libraries.stdlib import api, CalledProcessError, run
++
++EFI_MOUNTPOINT = '/boot/efi/'
++"""The path to the required mountpoint for ESP."""
++
++
++class EFIError(Exception):
++ """
++ Exception raised when EFI operation failed.
++ """
++
++
++def canonical_path_to_efi_format(canonical_path):
++ r"""
++ Transform the canonical path to the UEFI format.
++
++ e.g. /boot/efi/EFI/redhat/shimx64.efi -> \EFI\redhat\shimx64.efi
++ (just single backslash; so the string needs to be put into apostrophes
++ when used for /usr/sbin/efibootmgr cmd)
++
++ The path has to start with /boot/efi otherwise the path is invalid for UEFI.
++ """
++
++ # We want to keep the last "/" of the EFI_MOUNTPOINT
++ return canonical_path.replace(EFI_MOUNTPOINT[:-1], "").replace("/", "\\")
++
++
++class EFIBootLoaderEntry:
++ """
++ Representation of an UEFI boot loader entry.
++ """
++
++ def __init__(self, boot_number, label, active, efi_bin_source):
++ self.boot_number = boot_number
++ """Expected string, e.g. '0001'. """
++
++ self.label = label
++ """Label of the UEFI entry. E.g. 'Redhat'"""
++
++ self.active = active
++ """True when the UEFI entry is active (asterisk is present next to the boot number)"""
++
++ self.efi_bin_source = efi_bin_source
++ """Source of the UEFI binary.
++
++ It could contain various values, e.g.:
++ FvVol(7cb8bdc9-f8eb-4f34-aaea-3ee4af6516a1)/FvFile(462caa21-7614-4503-836e-8ab6f4662331)
++ HD(1,GPT,28c77f6b-3cd0-4b22-985f-c99903835d79,0x800,0x12c000)/File(\\EFI\\redhat\\shimx64.efi)
++ PciRoot(0x0)/Pci(0x2,0x3)/Pci(0x0,0x0)N.....YM....R,Y.
++ """
++
++ def __eq__(self, other):
++ return all(
++ [
++ self.boot_number == other.boot_number,
++ self.label == other.label,
++ self.active == other.active,
++ self.efi_bin_source == other.efi_bin_source,
++ ]
++ )
++
++ def __ne__(self, other):
++ return not self.__eq__(other)
++
++ def __repr__(self):
++ return 'EFIBootLoaderEntry({boot_number}, {label}, {active}, {efi_bin_source})'.format(
++ boot_number=repr(self.boot_number),
++ label=repr(self.label),
++ active=repr(self.active),
++ efi_bin_source=repr(self.efi_bin_source)
++ )
++
++ def is_referring_to_file(self):
++ """Return True when the boot source is a file.
++
++ Some sources could refer e.g. to PXE boot. Return true if the source
++ refers to a file ("ends with /File(...path...)")
++
++ Does not matter whether the file exists or not.
++ """
++ return '/File(\\' in self.efi_bin_source
++
++ @staticmethod
++ def _efi_path_to_canonical(efi_path):
++ return os.path.join(EFI_MOUNTPOINT, efi_path.replace("\\", "/").lstrip("/"))
++
++ def get_canonical_path(self):
++ """Return expected canonical path for the referred UEFI bin or None.
++
++ Return None in case the entry is not referring to any UEFI bin
++ (e.g. when it refers to a PXE boot).
++ """
++ if not self.is_referring_to_file():
++ return None
++ match = re.search(r'/File\((?P\\.*)\)$', self.efi_bin_source)
++ return EFIBootLoaderEntry._efi_path_to_canonical(match.groups('path')[0])
++
++
++class EFIBootInfo:
++ """
++ Data about the current UEFI boot configuration.
++
++ :raises EFIError: when unable to obtain info about the UEFI configuration,
++ BIOS is detected or ESP is not mounted where expected.
++ """
++
++ def __init__(self):
++ if not is_efi():
++ raise EFIError('Unable to collect data about UEFI on a BIOS system.')
++ try:
++ result = run(['/usr/sbin/efibootmgr', '-v'])
++ except CalledProcessError:
++ raise EFIError('Unable to get information about UEFI boot entries.')
++
++ bootmgr_output = result['stdout']
++
++ self.current_bootnum = None
++ """The boot number (str) of the current boot."""
++ self.next_bootnum = None
++ """The boot number (str) of the next boot."""
++ self.boot_order = tuple()
++ """The tuple of the UEFI boot loader entries in the boot order."""
++ self.entries = {}
++ """The UEFI boot loader entries {'boot_number': EFIBootLoaderEntry}"""
++
++ self._parse_efi_boot_entries(bootmgr_output)
++ self._parse_current_bootnum(bootmgr_output)
++ self._parse_next_bootnum(bootmgr_output)
++ self._parse_boot_order(bootmgr_output)
++ self._print_loaded_info()
++
++ def _parse_efi_boot_entries(self, bootmgr_output):
++ """
++ Return dict of UEFI boot loader entries: {"": EFIBootLoader}
++ """
++
++ self.entries = {}
++ regexp_entry = re.compile(
++ r"^Boot(?P[a-zA-Z0-9]+)(?P\*?)\s*(?P