Import from CS git

(cherry picked from commit ef02e9e53b)
This commit is contained in:
eabdullin 2024-11-25 09:10:29 +00:00 committed by Yuriy Kohut
parent 90a498b4ff
commit b61b49f950
44 changed files with 46781 additions and 16 deletions

2
.gitignore vendored
View File

@ -1,2 +1,2 @@
SOURCES/deps-pkgs-10.tar.gz
SOURCES/deps-pkgs-11.tar.gz
SOURCES/leapp-repository-0.21.0.tar.gz

View File

@ -1,2 +1,2 @@
d520ada12294e4dd8837c81f92d4c184ab403d51 SOURCES/deps-pkgs-10.tar.gz
8b3fe3a7b52d2e144d374623aa5b0b0add7ab0c7 SOURCES/deps-pkgs-11.tar.gz
9327be3720ccb3f7b285d2199463d7df0c38dfae SOURCES/leapp-repository-0.21.0.tar.gz

View File

@ -1,7 +1,8 @@
From fbc38d4ad1d828e0553579e3719c0e4ed4a2a6bd Mon Sep 17 00:00:00 2001
From: jinkangkang <1547182170@qq.com>
Date: Mon, 19 Aug 2024 18:46:08 +0800
Subject: [PATCH] rhui(alibaba): add ARM RHEL8 and RHEL9 setup entries (#1277)
Subject: [PATCH 01/40] rhui(alibaba): add ARM RHEL8 and RHEL9 setup entries
(#1277)
Since leapp's RHUI mechanism filters setups based on the architecture of the source system,
it was not possible to upgrade of ARM-based RHEL systems on Alibaba cloud as there
@ -39,5 +40,8 @@ index 51694ac2..30de0275 100644
}
--
<<<<<<< HEAD
2.45.2
=======
2.47.0
>>>>>>> ef02e9e (Import from CS git)

View File

@ -0,0 +1,41 @@
From 7e0fb44bb673893d0409903f6a441d0eb2829d22 Mon Sep 17 00:00:00 2001
From: Evgeni Golov <evgeni@golov.de>
Date: Tue, 20 Aug 2024 15:11:02 +0200
Subject: [PATCH 02/40] don't require all versions to be defined for obsoleted
keys
in releases where we do not have any obsoleted keys, we still had to
define an entry (with an empty list), as otherwise the code would fail
instead, we can catch the KeyError and carry on as nothing happened
---
.../libraries/removeobsoleterpmgpgkeys.py | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
index 6e84c2e9..bda7efa3 100644
--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
+++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
@@ -12,11 +12,14 @@ def _get_obsolete_keys():
distribution = api.current_actor().configuration.os_release.release_id
obsoleted_keys_map = get_distribution_data(distribution).get('obsoleted-keys', {})
keys = []
- for version in range(7, int(get_target_major_version()) + 1):
- for key in obsoleted_keys_map[str(version)]:
- name, version, release = key.rsplit("-", 2)
- if has_package(InstalledRPM, name, version=version, release=release):
- keys.append(key)
+ try:
+ for version in range(7, int(get_target_major_version()) + 1):
+ for key in obsoleted_keys_map[str(version)]:
+ name, version, release = key.rsplit("-", 2)
+ if has_package(InstalledRPM, name, version=version, release=release):
+ keys.append(key)
+ except KeyError:
+ pass
return keys
--
2.47.0

View File

@ -0,0 +1,226 @@
From 9f2f1726d8a5bdd12309a3a3111984f1666b903f Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 22 Aug 2024 15:52:19 +0200
Subject: [PATCH 03/40] Add RHEL 10.0 prod-certs
Previously we temporarily used the RHEL 9 x86_64 prod cert for others
archs it was missing completely.
Jira: OAMG-11138
---
.../common/files/prod-certs/10.0/279.pem | 37 ++++++++++
.../common/files/prod-certs/10.0/419.pem | 37 ++++++++++
.../common/files/prod-certs/10.0/479.pem | 68 ++++++++++---------
.../common/files/prod-certs/10.0/72.pem | 37 ++++++++++
4 files changed, 146 insertions(+), 33 deletions(-)
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.0/279.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.0/419.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/10.0/72.pem
diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/279.pem b/repos/system_upgrade/common/files/prod-certs/10.0/279.pem
new file mode 100644
index 00000000..f62340fc
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.0/279.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGczCCBFugAwIBAgIUfZodBQY+YRSlyRRiFX1dx4vQ5y4wDQYJKoZIhvcNAQEL
+BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG
+A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w
+LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw
+IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx
+NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg
+SUQgWzA0YTU4NDFkLTVlNmUtNDU1Yy1hZWYwLTdhOTQ0NTBiNjg3Nl0wggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU
+FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF
+AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD
+LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG
+OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL
+XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52
+deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH
+F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z
+wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG
+eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra
+cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ
+nE2pDC01zYhUTwIDAQABo4HxMIHuMAkGA1UdEwQCMAAwQwYMKwYBBAGSCAkBghcB
+BDMMMVJlZCBIYXQgRW50ZXJwcmlzZSBMaW51eCBmb3IgUG93ZXIsIGxpdHRsZSBl
+bmRpYW4wFgYMKwYBBAGSCAkBghcCBAYMBDEwLjAwGQYMKwYBBAGSCAkBghcDBAkM
+B3BwYzY0bGUwKQYMKwYBBAGSCAkBghcEBBkMF3JoZWwtMTAscmhlbC0xMC1wcGM2
+NGxlMB0GA1UdDgQWBBRh6iC1NXyvZ2Q6/2sI5hB40M0flTAfBgNVHSMEGDAWgBSW
+/bscQED/QIStsh8LJsHDam/WfDANBgkqhkiG9w0BAQsFAAOCAgEAv6ySsgygc2z2
+kQJeu9sdvBNFKe+gEtXbPu6+rZKPPosW3cggMJCnsZgki3nUogovz0Z3MPkbmRz+
+GJwVjiVBnfUQLoORSDYwqYZB4WRoqszW/dytd7/64IehvD/JZo3Oa8BNYRSG/Ukh
+7iUIT8ryFIH1DTUIersVObINN2gk3hC2JJXoTfNqIYG+4OAEUE7/F4CptRAGbgH/
+4/9vfe2KNXvPMoWvILpXpD5w8t9Xh0Wl97N1W7+FLVRwQHAQ2/yBTu/sY27FvVSl
+0o+SBSvjTKIi+9QslRpi0QCVza5WxHTiO8nzYgzFjfMkt6lzK74puf3VJavpqkQ9
+dVfyp36A3Fh6vDsiNxhsfKrp8z2JnKA3vdslsH7cOHCIFYHXiqeaP654t4oGeESD
+EPfS6PpXSyi47Kd/qjA2srgpXNQl2yMd0ih6NoHaoSYXFfb4LX6cWFGcT/AWZsaC
+xv2pN9J0KhF2loLp8SK19FESc0rJShkAacTcxeYjuDYbvLtJi4Z5aWWVU421rMSs
+X9IdiWa4WL70ZaDK5cP54S4zZNsVDKniUzNXwPltDCpqefy8ka4o5QlWNreBrXXW
+6cy8I6L2om7xZ5hAZ3CB7nUZe9QE/LXnHqK3cQetvd5Q2LMnp6gVtgQ4a+7vD9xz
+ExLtbBZjvGJFudimMmOxvn/J5+GMmm4=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/419.pem b/repos/system_upgrade/common/files/prod-certs/10.0/419.pem
new file mode 100644
index 00000000..08cb5b02
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.0/419.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGZTCCBE2gAwIBAgIUWARL99TkK+hxtTJkE5icdHXLfY0wDQYJKoZIhvcNAQEL
+BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG
+A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w
+LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw
+IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx
+NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg
+SUQgW2Y3ZWFmNGU2LTYwZGYtNDMyNC04N2I0LTdhNGUzZGVkZmViNV0wggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU
+FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF
+AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD
+LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG
+OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL
+XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52
+deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH
+F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z
+wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG
+eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra
+cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ
+nE2pDC01zYhUTwIDAQABo4HjMIHgMAkGA1UdEwQCMAAwNQYMKwYBBAGSCAkBgyMB
+BCUMI1JlZCBIYXQgRW50ZXJwcmlzZSBMaW51eCBmb3IgQVJNIDY0MBYGDCsGAQQB
+kggJAYMjAgQGDAQxMC4wMBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCkGDCsG
+AQQBkggJAYMjBAQZDBdyaGVsLTEwLHJoZWwtMTAtYWFyY2g2NDAdBgNVHQ4EFgQU
+YeogtTV8r2dkOv9rCOYQeNDNH5UwHwYDVR0jBBgwFoAUlv27HEBA/0CErbIfCybB
+w2pv1nwwDQYJKoZIhvcNAQELBQADggIBAIpdcHN7RN18pg5ELfc55Sj58ivL5N25
+19KprqbM7aVum32abw7/Qksfs6maGQpU6Hh/UqhJlGQ2bN48jZ/kdMKor4agSQ/T
+iwr3b8RBJFPVCuqQJXIe4g3iRbHfnIjGxgoMgv36j58PENoEnpPtR7ZtHMyqQ2SO
+m1WRQhY5tJ4Fk/Zkx/trxlNvmsTAjNRa530kqG4TfiMVvWNaVdxHsjMv0lXLJRXx
+KT6+iHt2QBs2No5O8cjlXr/CzfGrB5TlBNrsHqhO0Llmw28KpcWGYGdexKdIHrDG
+A/K0Pr21yRstUWN39jz/tdEqt1q8T7/it3oM976keQmFAxBa/CpyEG5Y6FKw9+F0
+LtkAyI3XGHK7LbCOE67s7u0/BfgQvww1FqztVnVZ4sXlagj/IuYPJBhfGDe/6tik
+laqP8FtR6xJdSra2YQMBc0kZb0Sv1uy7pGofNSvLM5L76XqiwKoDVo/eAcl60OWY
+rF86pEDLGDmdJBLJKX2/77pzpQpZ9Yvc4vWwoZrP4gRKBuWF28aLH0OsWzdsfdMG
+9+DrcO/58slMbWng1ZzOQyEjp7x1kto5sa5m2q8LMo06ETYT8ps5A0hyltBz1yAt
+JEBS4Y14YlF6Px67aTak07MNo7AaaphuD47D2Sy3pwHa+vOx4nv/G33+G0iOm3Lr
+zVAjwlfLIUB9
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/479.pem b/repos/system_upgrade/common/files/prod-certs/10.0/479.pem
index 1ea1cd3d..d89f6188 100644
--- a/repos/system_upgrade/common/files/prod-certs/10.0/479.pem
+++ b/repos/system_upgrade/common/files/prod-certs/10.0/479.pem
@@ -1,35 +1,37 @@
-----BEGIN CERTIFICATE-----
-MIIGFTCCA/2gAwIBAgIJALDxRLt/tVDQMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
-VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
-YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
-IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
-ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTIzMDcxOTE2MzQwOFoXDTQzMDcx
-OTE2MzQwOFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsxZDg0ZDQ5
-Ny1jZmNmLTQxNjEtOTM0YS0zNzk2MDU4M2ZmZGZdMIICIjANBgkqhkiG9w0BAQEF
-AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
-sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
-8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
-RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
-5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
-xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
-QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
-yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
-1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
-5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
-ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
-AwEAAaOBnjCBmzAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0
-IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD
-OS40MBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM
-FHJoZWwtOSxyaGVsLTkteDg2XzY0MA0GCSqGSIb3DQEBCwUAA4ICAQCGUDPFBrLs
-sK/RITJothRhKhKNX3zu9TWRG0WKxszCx/y7c4yEfH1TV/yd7BNB2RubaoayWz8E
-TQjcRW8BnVu9JrlbdpWJm4eN+dOOpcESPilLnkz4Tr0WYDsT1/jk/uiorK4h21S0
-EwMicuSuEmm0OUEX0zj2X/IyveFRtpJpH/JktznCkvexysc1JRzqMCbal8GipRX9
-Xf7Oko6QiaUpu5GDLN2OXhizYHdR2f3l+Sn2cScsbi3fSVv+DLsnaz6J0kZ4U8q3
-lYk/ZYifJjG+/7cv3e+usixpmK/qYlpOvunUDnqOkDfUs4/4bZjH8e8CdqJk4YvU
-RRtLr7muXEJsaqF7lxAViXnKxT/z/+1kOgN/+Oyzjs4QDsk2HQpWHFgNYSSG9Mmz
-PUS8tk2T0j5sN55X7QRRl5c0oqrBU5XaWyL26QcfONYcR8dBaKawjxg8CI9KzsYY
-sb2jjS+fBkB1OI2c6z4OZRd+0N6FQ6gq++KiXOLFvi/QSFNi9Veb56c5tR2l6fBk
-0pSH06Gg2s0aQg20NdMIr+HaYsVdJRsE1FgQ2tlfFx9rGkcqhgwV3Za/abgtRb2o
-YVwps28DLm41DXf5DnXK+BXFHrtR/3YAZtga+R7OL/RvcF0kc2kudlxqd/8Y33uL
-nqnoATy31FTW4J4rEfanJTQgTpatZmbaLQ==
+MIIGYzCCBEugAwIBAgIUL5D34AcwqLAbqlUcxntHUCtEVxQwDQYJKoZIhvcNAQEL
+BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG
+A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w
+LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw
+IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx
+NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg
+SUQgWzk5NDZhMmY5LTI4NDMtNDJhOS1iNzhlLTIzM2E5ODIwYjVhZV0wggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU
+FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF
+AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD
+LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG
+OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL
+XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52
+deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH
+F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z
+wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG
+eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra
+cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ
+nE2pDC01zYhUTwIDAQABo4HhMIHeMAkGA1UdEwQCMAAwNQYMKwYBBAGSCAkBg18B
+BCUMI1JlZCBIYXQgRW50ZXJwcmlzZSBMaW51eCBmb3IgeDg2XzY0MBYGDCsGAQQB
+kggJAYNfAgQGDAQxMC4wMBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwKAYMKwYB
+BAGSCAkBg18EBBgMFnJoZWwtMTAscmhlbC0xMC14ODZfNjQwHQYDVR0OBBYEFGHq
+ILU1fK9nZDr/awjmEHjQzR+VMB8GA1UdIwQYMBaAFJb9uxxAQP9AhK2yHwsmwcNq
+b9Z8MA0GCSqGSIb3DQEBCwUAA4ICAQAa+c2/Usg6JToULhYTdLhf15Hk6xxdlwT7
+zZlnZLbuAKtaDqP1NiSiX0Z/lMJzFfW0B/zyWLy8uiXLYmF5V28f8yWK0Nksx2v7
+I7u6ZZN2dKDQZKsEoP0g3ptvVRWn9h5otS7yPkOK4Dzj04yJqOSGP9bp6OHEhm1S
+x4ErITkN/3MXOf9vT+I6wydVKsw4fdlWgVjmBd90bzVTnv4dWtJio+le+9ad9RSf
+M3aD5ufiELeRKMp6ExnC/cnoWtuH+b4BJ37TQ3Kpn3fDtbrzVvQH/dpqZ7P33yqg
+PnBEXOiLimDnnmDJ9ImQ1pVTrKJMxaj1Mk6onERe36n/iAsj+BwZvBiv7UaLPMnW
+nJGg+LQ4iUZrGWYD4N9Ou++nvsR8dCWRhXSuXensfli3lL/W0P62yzfYCyqOYeL1
+msDcCmBEWJUtAaeAbASUIVx02JWPPmMSUqWs8xOecQjzoGuCQg4JM/UfsZzxepw0
+bs9YSUVw8J9R2d4kuze65qDTMRg+cK2LX1xg1KkR/UWZOGxHHJAfwGWdPwSkiOPQ
+MVJ7LJjvozebHWSuiSxk+GWWr+NdxIJrFRGbivXyAkmqMRrPe1VLVxWwCdyud9o8
+b2WbFgrNS2jOnHwldtM2ZAhrF5W4ckvVL7hLp2JoQnJfCcWson9NK6Y2M4bNwQnC
+ihxphLzOAw==
-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/10.0/72.pem b/repos/system_upgrade/common/files/prod-certs/10.0/72.pem
new file mode 100644
index 00000000..e0274f9c
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/10.0/72.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGZDCCBEygAwIBAgIUSTvcD4Wsduixh8PFmwk6aI0KTEcwDQYJKoZIhvcNAQEL
+BQAwga4xCzAJBgNVBAYTAlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEWMBQG
+A1UECgwNUmVkIEhhdCwgSW5jLjEYMBYGA1UECwwPUmVkIEhhdCBOZXR3b3JrMS4w
+LAYDVQQDDCVSZWQgSGF0IEVudGl0bGVtZW50IFByb2R1Y3QgQXV0aG9yaXR5MSQw
+IgYJKoZIhvcNAQkBFhVjYS1zdXBwb3J0QHJlZGhhdC5jb20wHhcNMjQwODE1MDYx
+NjQ5WhcNNDQwODE1MDYxNjQ5WjBEMUIwQAYDVQQDDDlSZWQgSGF0IFByb2R1Y3Qg
+SUQgW2VjN2EwZDQyLTgzNjItNDg2YS04ZjcyLTc3YThiOWU2MjM0YV0wggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGP0nTjP4TN3LHVTfeQV+0u/Se01LU
+FJ66GhksOGzXzKSx6kbuFde0eHYIwV8tmZOMDIv2LVezHKRClVB1dMalQXfcLaoF
+AcHmCViz353vzXHynybzMXFs9xbzZMglduBbcStWHy+TmoJsbVwIAAdv4NYyrQQD
+LLVuX8mACCFg0YFG8ok5tN0Kt2liHTYpSoEuRI9ke+joNQkU3fsxcOlV5Cr1W2pG
+OkosvC4R9dvRjsjnEQ6tHeRhs5oEBZW3eZhnW3Qv8p9jaNU51TlYXLIH0+Fsx0uL
+XETzTWP4YmvBwtrGaq+PhRogJHNw8BM/zrNUzUEFBr6WKWRFB6zkfKNnNkOIZi52
+deFuqYuj+fRy5ehAFVWOHNFMzHvUSKJqGaLD5TW8aqQeFA3FvXce03WVwCFQIOvH
+F4y+sCNh1aliWkjJbc2yw9a3VhQeJ0wFIAngpy0h/3V3IT3dpK2XHAL9CfIWxk6Z
+wSwHNUKfP0aZYyXX/pfMFLXINSoHKSXHRMsf7P+wr0D47atkDLWYHIJjBXG9s5mG
+eobEC5OghL4DzW/mEKOwKI5JxUH5yKXfRgG7RwfzlFnQgs2Qd0p2sstZbjCOmEra
+cGfaDaLf7O1/6dAQPalCpn+uG5bv2NzIJmX2Rep7XA50XQLBqHg3r/cvMhcQQrIQ
+nE2pDC01zYhUTwIDAQABo4HiMIHfMAkGA1UdEwQCMAAwOwYLKwYBBAGSCAkBSAEE
+LAwqUmVkIEhhdCBFbnRlcnByaXNlIExpbnV4IGZvciBJQk0geiBTeXN0ZW1zMBUG
+CysGAQQBkggJAUgCBAYMBDEwLjAwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJgYL
+KwYBBAGSCAkBSAQEFwwVcmhlbC0xMCxyaGVsLTEwLXMzOTB4MB0GA1UdDgQWBBRh
+6iC1NXyvZ2Q6/2sI5hB40M0flTAfBgNVHSMEGDAWgBSW/bscQED/QIStsh8LJsHD
+am/WfDANBgkqhkiG9w0BAQsFAAOCAgEAsj4qPVsDkFrfuVDn8JCJ7tIH5WhaOzL6
+3GBsQIKGd8a1WscPfSpr/phNSBPWFyvV2b+0HzblYzBZbx6ExykTDLh5L01nPM0s
++hqPxZgF/kcTbLWmAanl32R9+Gs2P2JN1CaCclXgM4USEagBWYeMhJSmQR3bOnSe
+Jjm3tjvhnbIQd6xgPpTjrqZ35z1BW0P0qQFdBbB0k+MfPkhYKEr+Vfn0rU8vk4UP
+F9sY9HkZLqIBxlXeTUerNZvHSuOy2KgoS4l25/QwUutHnnSGZZpARiU1XYNcynVL
+r5COHlb6TYkeRhSAm6RVM4XPYoFgN6cbhY1orwFC2/0i30EnsTMB6ctnLKCf7qgM
+GDG2W7ct0m6koA7s2TGmgp33DPw9adX7qgIV0OjLzBYJ1fyVv3sYlOKRuyDz0l+N
+u6Rnv1ecNUspWn+5ogBbdgwU6yah6oo/fJIWm62U38UGH5ic+/7sBnga8q5sDI90
++h+nlTIAnD0ICzjEDASiLlYft+hQ9pOt/rgEIrPeKTe+fbefUIXJ5h343E51POnY
+uZRXcirc33QL/PgBRce1taIXjsRD+FSJM0tx/vf8H9j0rzSAxDoXJNsdq4/32scy
+6Zk2fgtm80xxIzju84jXVUrSBRMpWD9I+FZId4IE7tQhwKNi1b7DdNeaQLfaoq8U
+1PEea/tQDSA=
+-----END CERTIFICATE-----
--
2.47.0

View File

@ -0,0 +1,100 @@
From bf302fc794957a88bc4785f4dd2505b8d71012e0 Mon Sep 17 00:00:00 2001
From: Evgeni Golov <evgeni@golov.de>
Date: Wed, 21 Aug 2024 07:52:02 +0200
Subject: [PATCH 04/40] properly scope try/except when loading obsoleted keys
We want to load all possible keys, even *after* a KeyError happenend
Fixes: 7e0fb44bb673893d0409903f6a441d0eb2829d22
---
.../libraries/removeobsoleterpmgpgkeys.py | 8 +--
.../tests/test_removeobsoleterpmgpgkeys.py | 50 +++++++++++++++++++
2 files changed, 54 insertions(+), 4 deletions(-)
diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
index bda7efa3..198c4368 100644
--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
+++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/libraries/removeobsoleterpmgpgkeys.py
@@ -12,14 +12,14 @@ def _get_obsolete_keys():
distribution = api.current_actor().configuration.os_release.release_id
obsoleted_keys_map = get_distribution_data(distribution).get('obsoleted-keys', {})
keys = []
- try:
- for version in range(7, int(get_target_major_version()) + 1):
+ for version in range(7, int(get_target_major_version()) + 1):
+ try:
for key in obsoleted_keys_map[str(version)]:
name, version, release = key.rsplit("-", 2)
if has_package(InstalledRPM, name, version=version, release=release):
keys.append(key)
- except KeyError:
- pass
+ except KeyError:
+ pass
return keys
diff --git a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py
index 4d9a0e84..b78174cc 100644
--- a/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py
+++ b/repos/system_upgrade/common/actors/removeobsoletegpgkeys/tests/test_removeobsoleterpmgpgkeys.py
@@ -76,6 +76,56 @@ def test_get_obsolete_keys(monkeypatch, version, expected):
assert set(keys) == set(expected)
+@pytest.mark.parametrize(
+ "version, obsoleted_keys, expected",
+ [
+ (10, None, []),
+ (10, {}, []),
+ (10, {"8": ["gpg-pubkey-888-abc"], "10": ["gpg-pubkey-10-10"]}, ["gpg-pubkey-888-abc", "gpg-pubkey-10-10"]),
+ (9, {"8": ["gpg-pubkey-888-abc"], "9": ["gpg-pubkey-999-def"]}, ["gpg-pubkey-999-def", "gpg-pubkey-888-abc"]),
+ (8, {"8": ["gpg-pubkey-888-abc"], "9": ["gpg-pubkey-999-def"]}, ["gpg-pubkey-888-abc"])
+ ]
+)
+def test_get_obsolete_keys_incomplete_data(monkeypatch, version, obsoleted_keys, expected):
+ def get_target_major_version_mocked():
+ return version
+
+ def get_distribution_data_mocked(_distro):
+ if obsoleted_keys is None:
+ return {}
+ return {'obsoleted-keys': obsoleted_keys}
+
+ def has_package_mocked(*args, **kwargs):
+ return True
+
+ monkeypatch.setattr(
+ removeobsoleterpmgpgkeys,
+ "get_target_major_version",
+ get_target_major_version_mocked,
+ )
+
+ monkeypatch.setattr(
+ removeobsoleterpmgpgkeys,
+ "get_distribution_data",
+ get_distribution_data_mocked,
+ )
+
+ monkeypatch.setattr(
+ removeobsoleterpmgpgkeys,
+ "has_package",
+ has_package_mocked,
+ )
+
+ monkeypatch.setattr(
+ api,
+ "current_actor",
+ CurrentActorMocked(),
+ )
+
+ keys = removeobsoleterpmgpgkeys._get_obsolete_keys()
+ assert set(keys) == set(expected)
+
+
@pytest.mark.parametrize(
"keys, should_register",
[
--
2.47.0

View File

@ -0,0 +1,283 @@
From 9d49f4675c2b7b18ba7b344bb0032a5538782560 Mon Sep 17 00:00:00 2001
From: Vojtech Sokol <vsokol@redhat.com>
Date: Mon, 2 Sep 2024 17:21:36 +0200
Subject: [PATCH 05/40] Update references from master branch to main
Focus was on making the CI and GitHub actions work after the default
branch was switched from master to main.
See: OAMG-4907
---
.github/workflows/codespell.yml | 4 ++--
.github/workflows/differential-shellcheck.yml | 4 ++--
.github/workflows/pr-welcome-msg.yml | 2 +-
.github/workflows/tmt-tests.yml | 16 ++++++++--------
.github/workflows/unit-tests.yml | 12 ++++++------
.packit.yaml | 10 +++++-----
Makefile | 14 +++++++-------
7 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index 673cef17..1195d8d1 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -3,10 +3,10 @@ name: Codespell
on:
push:
branches:
- - master
+ - main
pull_request:
branches:
- - master
+ - main
jobs:
codespell:
diff --git a/.github/workflows/differential-shellcheck.yml b/.github/workflows/differential-shellcheck.yml
index f1ed5f6a..e1bafb93 100644
--- a/.github/workflows/differential-shellcheck.yml
+++ b/.github/workflows/differential-shellcheck.yml
@@ -4,7 +4,7 @@
name: Differential ShellCheck
on:
pull_request:
- branches: [master]
+ branches: [main]
permissions:
contents: read
@@ -17,7 +17,7 @@ jobs:
security-events: write
pull-requests: write
- steps:
+ steps:
- name: Repository checkout
uses: actions/checkout@v4
with:
diff --git a/.github/workflows/pr-welcome-msg.yml b/.github/workflows/pr-welcome-msg.yml
index ff9414d2..0102c41f 100644
--- a/.github/workflows/pr-welcome-msg.yml
+++ b/.github/workflows/pr-welcome-msg.yml
@@ -28,7 +28,7 @@ jobs:
However, here are additional useful commands for packit:
- **`/packit test`** to re-run manually the default tests
- **`/packit retest-failed`** to re-run failed tests manually
- - **`/packit test oamg/leapp#42`** to run tests with leapp builds for the leapp PR#42 (default is latest upstream - master - build)
+ - **`/packit test oamg/leapp#42`** to run tests with leapp builds for the leapp PR#42 (default is latest upstream - main - build)
Note that first time contributors cannot run tests automatically - they need to be started by a reviewer.
diff --git a/.github/workflows/tmt-tests.yml b/.github/workflows/tmt-tests.yml
index 7e9fd706..1fa00e60 100644
--- a/.github/workflows/tmt-tests.yml
+++ b/.github/workflows/tmt-tests.yml
@@ -12,7 +12,7 @@ jobs:
call_workflow_tests_79to88_integration:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -26,7 +26,7 @@ jobs:
call_workflow_tests_79to86_integration:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -40,7 +40,7 @@ jobs:
call_workflow_tests_79to88_sst:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -55,7 +55,7 @@ jobs:
call_workflow_tests_7to8_aws:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -71,7 +71,7 @@ jobs:
call_workflow_tests_86to90_integration:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -85,7 +85,7 @@ jobs:
call_workflow_tests_88to92_integration:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -101,7 +101,7 @@ jobs:
call_workflow_tests_86to90_sst:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
@@ -116,7 +116,7 @@ jobs:
call_workflow_tests_86to90_aws:
needs: call_workflow_copr_build
- uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
+ uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@main
secrets: inherit
with:
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index 2a05106e..42b72b8d 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -2,10 +2,10 @@ name: Unit Tests
on:
push:
branches:
- - master
+ - main
pull_request:
branches:
- - master
+ - main
jobs:
test:
@@ -74,10 +74,10 @@ jobs:
# NOTE(ivasilev) fetch-depth 0 is critical here as leapp deps discovery depends on specific substring in
# commit message and default 1 option will get us just merge commit which has an unrelevant message.
fetch-depth: '0'
- # NOTE(ivasilev) master -> origin/master is used for leapp deps discovery in Makefile via git log master..HEAD
- - name: Set master to origin/master
- if: github.ref != 'refs/heads/master'
+ # NOTE(ivasilev) main -> origin/main is used for leapp deps discovery in Makefile via git log main..HEAD
+ - name: Set main to origin/main
+ if: github.ref != 'refs/heads/main'
run: |
- git branch -f master origin/master
+ git branch -f main origin/main
- name: ${{matrix.scenarios.name}}
run: script -e -c /bin/bash -c 'TERM=xterm podman build --security-opt=seccomp=unconfined -t leapp-tests -f utils/container-tests/Containerfile.${{matrix.scenarios.container}} utils/container-tests && PYTHON_VENV=${{matrix.scenarios.python}} REPOSITORIES=${{matrix.scenarios.repos}} podman run --security-opt=seccomp=unconfined --rm -ti -v ${PWD}:/payload --env=PYTHON_VENV --env=REPOSITORIES leapp-tests'
diff --git a/.packit.yaml b/.packit.yaml
index d91a47e5..fbfd0eea 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -22,7 +22,7 @@ actions:
fix-spec-file:
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
post-upstream-clone:
- # builds from PRs should have lower NVR than those from master branch
+ # builds from PRs should have lower NVR than those from main branch
- bash -c "sed -i \"s/1%{?dist}/0%{?dist}/g\" packaging/leapp-repository.spec"
jobs:
@@ -44,12 +44,12 @@ jobs:
fix-spec-file:
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
post-upstream-clone:
- # builds from PRs should have lower NVR than those from master branch
+ # builds from PRs should have lower NVR than those from main branch
- bash -c "sed -i \"s/1%{?dist}/0%{?dist}/g\" packaging/leapp-repository.spec"
- job: copr_build
trigger: commit
metadata:
- branch: master
+ branch: main
owner: "@oamg"
project: leapp
targets:
@@ -65,7 +65,7 @@ jobs:
fix-spec-file:
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
post-upstream-clone:
- # builds from master branch should start with 100 release, to have high priority
+ # builds from main branch should start with 100 release, to have high priority
- bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec"
- job: copr_build
trigger: release
@@ -85,7 +85,7 @@ jobs:
fix-spec-file:
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
post-upstream-clone:
- # builds from master branch should start with 100 release, to have high priority
+ # builds from main branch should start with 100 release, to have high priority
- bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec"
diff --git a/Makefile b/Makefile
index 5b2bc4d2..8aeef77d 100644
--- a/Makefile
+++ b/Makefile
@@ -64,7 +64,7 @@ endif
# just to reduce number of unwanted builds mark as the upstream one when
# someone will call copr_build without additional parameters
-MASTER_BRANCH=master
+MASTER_BRANCH=main
# In case the PR or MR is defined or in case build is not coming from the
# MATER_BRANCH branch, N_REL=0; (so build is not update of the approved
@@ -76,10 +76,10 @@ SHORT_SHA=`git rev-parse --short HEAD`
BRANCH=`git rev-parse --abbrev-ref HEAD | tr -- '-/' '_'`
# The dependent framework PR connection will be taken from the top commit's depends-on message.
-REQ_LEAPP_PR=$(shell git log master..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '*[[:digit:]]*')
+REQ_LEAPP_PR=$(shell git log main..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '*[[:digit:]]*')
# NOTE(ivasilev) In case of travis relying on top commit is a no go as a top commit will be a merge commit.
ifdef CI
- REQ_LEAPP_PR=$(shell git log master..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '[[:digit:]]*')
+ REQ_LEAPP_PR=$(shell git log main..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '[[:digit:]]*')
endif
# In case anyone would like to add any other suffix, just make it possible
@@ -92,8 +92,8 @@ REQUEST=`if test -n "$$PR"; then echo ".PR$${PR}"; elif test -n "$$MR"; then ech
# Examples:
# 0.201810080027Z.4078402.packaging.PR2
# 0.201810080027Z.4078402.packaging
-# 0.201810080027Z.4078402.master.MR2
-# 1.201810080027Z.4078402.master
+# 0.201810080027Z.4078402.main.MR2
+# 1.201810080027Z.4078402.main
RELEASE="$(N_REL).$(TIMESTAMP).$(SHORT_SHA).$(BRANCH)$(REQUEST)$(_SUFFIX)"
all: help
@@ -302,7 +302,7 @@ install-deps:
pip install --upgrade setuptools; \
pip install --upgrade -r requirements.txt; \
./utils/install_commands.sh $(_PYTHON_VENV); \
- # In case the top commit Depends-On some yet unmerged framework patch - override master leapp with the proper version
+ # In case the top commit Depends-On some yet unmerged framework patch - override main leapp with the proper version
if [[ ! -z "$(REQ_LEAPP_PR)" ]] ; then \
echo "Leapp-repository depends on the yet unmerged pr of the framework #$(REQ_LEAPP_PR), installing it.." && \
$(VENVNAME)/bin/pip install -I "git+https://github.com/oamg/leapp.git@refs/pull/$(REQ_LEAPP_PR)/head"; \
@@ -332,7 +332,7 @@ install-deps-fedora:
pip install --upgrade setuptools; \
pip install --upgrade -r requirements.txt; \
./utils/install_commands.sh $(_PYTHON_VENV); \
- # In case the top commit Depends-On some yet unmerged framework patch - override master leapp with the proper version
+ # In case the top commit Depends-On some yet unmerged framework patch - override main leapp with the proper version
if [[ ! -z "$(REQ_LEAPP_PR)" ]] ; then \
echo "Leapp-repository depends on the yet unmerged pr of the framework #$(REQ_LEAPP_PR), installing it.." && \
$(VENVNAME)/bin/pip install -I "git+https://github.com/oamg/leapp.git@refs/pull/$(REQ_LEAPP_PR)/head"; \
--
2.47.0

View File

@ -0,0 +1,43 @@
From 41e32e3aa6394b8397bef9b797892d9fa119d608 Mon Sep 17 00:00:00 2001
From: Yuriy Kohut <yura.kohut@gmail.com>
Date: Thu, 29 Aug 2024 12:36:23 +0300
Subject: [PATCH 06/40] ReadOfKernelArgsError: fix the error: - AttributeError:
module 'leapp.reporting' has no attribute 'Hints'
---
.../kernelcmdlineconfig/libraries/kernelcmdlineconfig.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
index 238a8aa6..6b261c3b 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
@@ -175,14 +175,14 @@ def entrypoint(configs=None):
api.current_logger().error(str(e))
if use_cmdline_file():
- report_hint = reporting.Hints(
+ report_hint = (
'After the system has been rebooted into the new version of RHEL, you'
' should take the kernel cmdline arguments from /proc/cmdline (Everything'
' except the BOOT_IMAGE entry and initrd entries) and copy them into'
' /etc/kernel/cmdline before installing any new kernels.'
)
else:
- report_hint = reporting.Hints(
+ report_hint = (
'After the system has been rebooted into the new version of RHEL, you'
' should take the kernel cmdline arguments from /proc/cmdline (Everything'
' except the BOOT_IMAGE entry and initrd entries) and then use the'
@@ -204,7 +204,7 @@ def entrypoint(configs=None):
' not able to set the arguments as the default for kernels installed in'
' the future.'
),
- report_hint,
+ reporting.Remediation(hint=report_hint),
reporting.Severity(reporting.Severity.HIGH),
reporting.Groups([
reporting.Groups.BOOT,
--
2.47.0

View File

@ -0,0 +1,44 @@
From 88e13fb0545e0d42df2777538a0c6921bab91e33 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 27 Sep 2024 14:53:01 +0200
Subject: [PATCH 07/40] pylint: exclude rule: too-many-positional-arguments
(code: R0917)
New version of Pylint have the rule for checking of positional
arguments - complaining when more than 4 positional arguments exists.
We do not want to refactor the code to make it happy and the default
value cannot be set right now - that's planned for future Pylint
versions at this moment. So excluding this rule.
For more info:
* https://pylint.readthedocs.io/en/latest/user_guide/messages/refactor/too-many-positional-arguments.html
---
.pylintrc | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index f78c1c3f..5d75df40 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -41,6 +41,8 @@ disable=
consider-using-from-import,
use-list-literal,
use-dict-literal,
+ too-many-lines, # we do not want to take care about that one
+ too-many-positional-arguments, # we cannot set yet max-possitional-arguments unfortunately
# new for python3 version of pylint
useless-object-inheritance,
consider-using-set-comprehension, # pylint3 force to use comprehension in place we don't want (py2 doesnt have these options, for inline skip)
@@ -57,8 +59,7 @@ disable=
redundant-u-string-prefix, # still have py2 to support
logging-format-interpolation,
logging-not-lazy,
- use-yield-from, # yield from cannot be used until we require python 3.3 or greater
- too-many-lines # we do not want to take care about that one
+ use-yield-from # yield from cannot be used until we require python 3.3 or greater
[FORMAT]
# Maximum number of characters on a single line.
--
2.47.0

View File

@ -0,0 +1,534 @@
From 658700d6424e852917b62c190dd23cbb3026b67d Mon Sep 17 00:00:00 2001
From: Iker Pedrosa <ipedrosa@redhat.com>
Date: Mon, 5 Aug 2024 15:15:44 +0200
Subject: [PATCH 08/40] pam_userdb: migrate backend database
pam_userdb module changed its backend database technology from lidb to
gdbm for RHEL10. This requires a set of leapp actors to perform the
database migration automatically when upgrading to RHEL10:
* ScanPamUserDB takes care of scanning the PAM service folder to detect
whether pam_userdb is used and the location of the database in use.
This information is stored in a model.
* CheckPamUserDB checks the databases reported by ScanPamUserDB and
prints a report about them.
* ConvertPamUserDB checks the databases reported by ScanPamUserDB and
converts them to GDBM format.
* RemoveOldPamUserDB checks the databases reported by ScanPamUserDB and
removes them.
All these actors include unit-tests.
Finally, there's also a spec file change to add `libdb-utils` dependency
as it is required to convert pam_userdb databases from BerkeleyDB to
GDBM.
Signed-off-by: Iker Pedrosa <ipedrosa@redhat.com>
---
packaging/leapp-repository.spec | 6 +++
.../actors/pamuserdb/checkpamuserdb/actor.py | 18 ++++++++
.../libraries/checkpamuserdb.py | 28 ++++++++++++
.../tests/test_checkpamuserdb.py | 43 +++++++++++++++++++
.../pamuserdb/convertpamuserdb/actor.py | 18 ++++++++
.../libraries/convertpamuserdb.py | 27 ++++++++++++
.../tests/test_convertpamuserdb.py | 39 +++++++++++++++++
.../pamuserdb/removeoldpamuserdb/actor.py | 18 ++++++++
.../libraries/removeoldpamuserdb.py | 25 +++++++++++
.../tests/test_removeoldpamuserdb.py | 38 ++++++++++++++++
.../actors/pamuserdb/scanpamuserdb/actor.py | 18 ++++++++
.../scanpamuserdb/libraries/scanpamuserdb.py | 29 +++++++++++++
.../tests/files/pam_userdb_basic | 1 +
.../tests/files/pam_userdb_complete | 9 ++++
.../tests/files/pam_userdb_missing | 1 +
.../scanpamuserdb/tests/test_scanpamuserdb.py | 27 ++++++++++++
.../el9toel10/models/pamuserdblocation.py | 14 ++++++
17 files changed, 359 insertions(+)
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing
create mode 100644 repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py
create mode 100644 repos/system_upgrade/el9toel10/models/pamuserdblocation.py
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 146afc45..0d63ba02 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -211,6 +211,12 @@ Requires: dracut
Requires: NetworkManager-libnm
Requires: python3-gobject-base
+%endif
+
+%if 0%{?rhel} && 0%{?rhel} == 9
+############# RHEL 9 dependencies (when the source system is RHEL 9) ##########
+# Required to convert pam_userdb database from BerkeleyDB to GDBM
+Requires: libdb-utils
%endif
##################################################
# end requirement
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py
new file mode 100644
index 00000000..8fada645
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import checkpamuserdb
+from leapp.models import PamUserDbLocation, Report
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
+
+
+class CheckPamUserDb(Actor):
+ """
+ Create report with the location of pam_userdb databases
+ """
+
+ name = 'check_pam_user_db'
+ consumes = (PamUserDbLocation,)
+ produces = (Report,)
+ tags = (ChecksPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ checkpamuserdb.process()
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py
new file mode 100644
index 00000000..05cc71a9
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/libraries/checkpamuserdb.py
@@ -0,0 +1,28 @@
+from leapp import reporting
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.stdlib import api
+from leapp.models import PamUserDbLocation
+
+FMT_LIST_SEPARATOR = "\n - "
+
+
+def process():
+ msg = next(api.consume(PamUserDbLocation), None)
+ if not msg:
+ raise StopActorExecutionError('Expected PamUserDbLocation, but got None')
+
+ if msg.locations:
+ reporting.create_report([
+ reporting.Title('pam_userdb databases will be converted to GDBM'),
+ reporting.Summary(
+ 'On RHEL 10, GDMB is used by pam_userdb as it\'s backend database,'
+ ' replacing BerkeleyDB. Existing pam_userdb databases will be'
+ ' converted to GDBM. The following databases will be converted:'
+ '{sep}{locations}'.format(sep=FMT_LIST_SEPARATOR, locations=FMT_LIST_SEPARATOR.join(msg.locations))),
+ reporting.Severity(reporting.Severity.INFO),
+ reporting.Groups([reporting.Groups.SECURITY, reporting.Groups.AUTHENTICATION])
+ ])
+ else:
+ api.current_logger().debug(
+ 'No pam_userdb databases were located, thus nothing will be converted'
+ )
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py
new file mode 100644
index 00000000..2e11106b
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/checkpamuserdb/tests/test_checkpamuserdb.py
@@ -0,0 +1,43 @@
+import pytest
+
+from leapp import reporting
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.actor import checkpamuserdb
+from leapp.libraries.common.testutils import create_report_mocked, logger_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import PamUserDbLocation
+
+
+def test_process_no_msg(monkeypatch):
+ def consume_mocked(*args, **kwargs):
+ yield None
+
+ monkeypatch.setattr(api, 'consume', consume_mocked)
+
+ with pytest.raises(StopActorExecutionError):
+ checkpamuserdb.process()
+
+
+def test_process_no_location(monkeypatch):
+ def consume_mocked(*args, **kwargs):
+ yield PamUserDbLocation(locations=[])
+
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(api, 'consume', consume_mocked)
+
+ checkpamuserdb.process()
+ assert (
+ 'No pam_userdb databases were located, thus nothing will be converted'
+ in api.current_logger.dbgmsg
+ )
+
+
+def test_process_locations(monkeypatch):
+ def consume_mocked(*args, **kwargs):
+ yield PamUserDbLocation(locations=['/tmp/db1', '/tmp/db2'])
+
+ monkeypatch.setattr(reporting, "create_report", create_report_mocked())
+ monkeypatch.setattr(api, 'consume', consume_mocked)
+
+ checkpamuserdb.process()
+ assert reporting.create_report.called == 1
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py
new file mode 100644
index 00000000..5f8525b6
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import convertpamuserdb
+from leapp.models import PamUserDbLocation
+from leapp.tags import IPUWorkflowTag, PreparationPhaseTag
+
+
+class ConvertPamUserDb(Actor):
+ """
+ Convert the pam_userdb databases to GDBM
+ """
+
+ name = 'convert_pam_user_db'
+ consumes = (PamUserDbLocation,)
+ produces = ()
+ tags = (PreparationPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ convertpamuserdb.process()
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py
new file mode 100644
index 00000000..e55b4102
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/libraries/convertpamuserdb.py
@@ -0,0 +1,27 @@
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.stdlib import api, CalledProcessError, run
+from leapp.models import PamUserDbLocation
+
+
+def _convert_db(db_path):
+ cmd = ['db_converter', '--src', f'{db_path}.db', '--dest', f'{db_path}.gdbm']
+ try:
+ run(cmd)
+ except (CalledProcessError, OSError) as e:
+ # As the db_converter does not remove the original DB after conversion or upon failure,
+ # interrupt the upgrade, keeping the original DBs.
+ # If all DBs are successfully converted, the leftover DBs are removed in the removeoldpamuserdb actor.
+ raise StopActorExecutionError(
+ 'Cannot convert pam_userdb database.',
+ details={'details': '{}: {}'.format(str(e), e.stderr)}
+ )
+
+
+def process():
+ msg = next(api.consume(PamUserDbLocation), None)
+ if not msg:
+ raise StopActorExecutionError('Expected PamUserDbLocation, but got None')
+
+ if msg.locations:
+ for location in msg.locations:
+ _convert_db(location)
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py
new file mode 100644
index 00000000..46505492
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/convertpamuserdb/tests/test_convertpamuserdb.py
@@ -0,0 +1,39 @@
+import os
+
+import pytest
+
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.actor import convertpamuserdb
+from leapp.libraries.common.testutils import logger_mocked
+from leapp.libraries.stdlib import api, CalledProcessError
+
+CUR_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+def test_convert_db_success(monkeypatch):
+ location = os.path.join(CUR_DIR, '/files/db1')
+
+ def run_mocked(cmd, **kwargs):
+ assert cmd == ['db_converter', '--src', f'{location}.db', '--dest', f'{location}.gdbm']
+
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(convertpamuserdb, 'run', run_mocked)
+ convertpamuserdb._convert_db(location)
+ assert len(api.current_logger.errmsg) == 0
+
+
+def test_convert_db_failure(monkeypatch):
+ location = os.path.join(CUR_DIR, '/files/db1')
+
+ def run_mocked(cmd, **kwargs):
+ raise CalledProcessError(
+ message='A Leapp Command Error occurred.',
+ command=cmd,
+ result={'exit_code': 1}
+ )
+
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(convertpamuserdb, 'run', run_mocked)
+ with pytest.raises(StopActorExecutionError) as err:
+ convertpamuserdb._convert_db(location)
+ assert str(err.value) == 'Cannot convert pam_userdb database.'
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py
new file mode 100644
index 00000000..39a00855
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import removeoldpamuserdb
+from leapp.models import PamUserDbLocation
+from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag
+
+
+class RemoveOldPamUserDb(Actor):
+ """
+ Remove old pam_userdb databases
+ """
+
+ name = 'remove_old_pam_user_db'
+ consumes = (PamUserDbLocation,)
+ produces = ()
+ tags = (ApplicationsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ removeoldpamuserdb.process()
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py
new file mode 100644
index 00000000..5fc4cb4d
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/libraries/removeoldpamuserdb.py
@@ -0,0 +1,25 @@
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.stdlib import api, CalledProcessError, run
+from leapp.models import PamUserDbLocation
+
+
+def _remove_db(db_path):
+ cmd = ['rm', '-f', f'{db_path}.db']
+ try:
+ run(cmd)
+ except (CalledProcessError, OSError) as e:
+ api.current_logger().error(
+ 'Failed to remove {}.db: {}'.format(
+ db_path, e
+ )
+ )
+
+
+def process():
+ msg = next(api.consume(PamUserDbLocation), None)
+ if not msg:
+ raise StopActorExecutionError('Expected PamUserDbLocation, but got None')
+
+ if msg.locations:
+ for location in msg.locations:
+ _remove_db(location)
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py
new file mode 100644
index 00000000..2c1d5c75
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/removeoldpamuserdb/tests/test_removeoldpamuserdb.py
@@ -0,0 +1,38 @@
+import os
+
+from leapp.libraries.actor import removeoldpamuserdb
+from leapp.libraries.common.testutils import logger_mocked
+from leapp.libraries.stdlib import api, CalledProcessError
+
+CUR_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+def test_remove_db_success(monkeypatch):
+ location = os.path.join(CUR_DIR, '/files/db1')
+
+ def run_mocked(cmd, **kwargs):
+ assert cmd == ['rm', '-f', f'{location}.db']
+
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(removeoldpamuserdb, 'run', run_mocked)
+ removeoldpamuserdb._remove_db(location)
+ assert len(api.current_logger.errmsg) == 0
+
+
+def test_remove_db_failure(monkeypatch):
+ location = os.path.join(CUR_DIR, '/files/db1')
+
+ def run_mocked(cmd, **kwargs):
+ raise CalledProcessError(
+ message='A Leapp Command Error occurred.',
+ command=cmd,
+ result={'exit_code': 1}
+ )
+
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+ monkeypatch.setattr(removeoldpamuserdb, 'run', run_mocked)
+ removeoldpamuserdb._remove_db(location)
+ assert (
+ 'Failed to remove /files/db1.db'
+ not in api.current_logger.errmsg
+ )
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py
new file mode 100644
index 00000000..b6b35f1a
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/actor.py
@@ -0,0 +1,18 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import scanpamuserdb
+from leapp.models import PamUserDbLocation
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class ScanPamUserDb(Actor):
+ """
+ Scan the PAM service folder for the location of pam_userdb databases
+ """
+
+ name = 'scan_pam_user_db'
+ consumes = ()
+ produces = (PamUserDbLocation,)
+ tags = (FactsPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ self.produce(scanpamuserdb.parse_pam_config_folder('/etc/pam.d/'))
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py
new file mode 100644
index 00000000..0f668c02
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/libraries/scanpamuserdb.py
@@ -0,0 +1,29 @@
+import os
+import re
+
+from leapp.models import PamUserDbLocation
+
+
+def _parse_pam_config_file(conf_file):
+ with open(conf_file, 'r') as file:
+ for line in file:
+ if 'pam_userdb' in line:
+ match = re.search(r'db=(\S+)', line)
+ if match:
+ return match.group(1)
+
+ return None
+
+
+def parse_pam_config_folder(conf_folder):
+ locations = set()
+
+ for file_name in os.listdir(conf_folder):
+ file_path = os.path.join(conf_folder, file_name)
+
+ if os.path.isfile(file_path):
+ location = _parse_pam_config_file(file_path)
+ if location is not None:
+ locations.add(location)
+
+ return PamUserDbLocation(locations=list(locations))
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic
new file mode 100644
index 00000000..f115147b
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_basic
@@ -0,0 +1 @@
+auth required pam_userdb.so db=/tmp/db1
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete
new file mode 100644
index 00000000..84e40b48
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_complete
@@ -0,0 +1,9 @@
+auth required pam_env.so
+auth required pam_faildelay.so delay=2000000
+auth sufficient pam_fprintd.so
+auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular
+auth [default=1 ignore=ignore success=ok] pam_localuser.so
+auth required pam_userdb.so db=/tmp/db2
+auth [default=1 ignore=ignore success=ok] pam_usertype.so isregular
+auth sufficient pam_sss.so forward_pass
+auth required pam_deny.so
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing
new file mode 100644
index 00000000..764947fc
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/files/pam_userdb_missing
@@ -0,0 +1 @@
+auth sufficient pam_unix.so nullok
diff --git a/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py
new file mode 100644
index 00000000..3b752d87
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/pamuserdb/scanpamuserdb/tests/test_scanpamuserdb.py
@@ -0,0 +1,27 @@
+import os
+
+import pytest
+
+from leapp.libraries.actor import scanpamuserdb
+
+CUR_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+@pytest.mark.parametrize(
+ "inp,exp_out",
+ [
+ ("files/pam_userdb_missing", None),
+ ("files/pam_userdb_basic", "/tmp/db1"),
+ ("files/pam_userdb_complete", "/tmp/db2"),
+ ],
+)
+def test_parse_pam_config_file(inp, exp_out):
+ file = scanpamuserdb._parse_pam_config_file(os.path.join(CUR_DIR, inp))
+ assert file == exp_out
+
+
+def test_parse_pam_config_folder():
+ msg = scanpamuserdb.parse_pam_config_folder(os.path.join(CUR_DIR, "files/"))
+ assert len(msg.locations) == 2
+ assert "/tmp/db1" in msg.locations
+ assert "/tmp/db2" in msg.locations
diff --git a/repos/system_upgrade/el9toel10/models/pamuserdblocation.py b/repos/system_upgrade/el9toel10/models/pamuserdblocation.py
new file mode 100644
index 00000000..d15b2041
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/models/pamuserdblocation.py
@@ -0,0 +1,14 @@
+from leapp.models import fields, Model
+from leapp.topics import SystemInfoTopic
+
+
+class PamUserDbLocation(Model):
+ """
+ Provides a list of all database files for pam_userdb
+ """
+ topic = SystemInfoTopic
+
+ locations = fields.List(fields.String(), default=[])
+ """
+ The list with the full path to the database files.
+ """
--
2.47.0

View File

@ -0,0 +1,31 @@
From d6e57eec3ded2887008055442ba906a92c572a01 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 10 Oct 2024 14:03:36 +0200
Subject: [PATCH 09/40] Replace mirror.centos.org with vault.centos.org Centos
7 Containerfile
As mirror.centos.org is dead, replace mirrorlist with baseurl pointing
to vault.centos.org in utils/container-builds/Containerfile.centos7.
---
utils/container-builds/Containerfile.centos7 | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/utils/container-builds/Containerfile.centos7 b/utils/container-builds/Containerfile.centos7
index 70ac3df1..af00eddb 100644
--- a/utils/container-builds/Containerfile.centos7
+++ b/utils/container-builds/Containerfile.centos7
@@ -2,6 +2,11 @@ FROM centos:7
VOLUME /repo
+# mirror.centos.org is dead, comment out mirrorlist and set baseurl to vault.centos.org
+RUN sed -i s/mirror.centos.org/vault.centos.org/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^#\s*baseurl=http/baseurl=http/ /etc/yum.repos.d/CentOS-*.repo
+RUN sed -i s/^mirrorlist=http/#mirrorlist=http/ /etc/yum.repos.d/CentOS-*.repo
+
RUN yum update -y && \
yum install -y rpm-build python-devel make git
--
2.47.0

View File

@ -0,0 +1,35 @@
From b997e4eeb835809d1fbfd1a0b9a6114c133bf0b4 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Thu, 10 Oct 2024 15:28:48 +0200
Subject: [PATCH 10/40] kernelcmdlineconfig: Add Report to produces tuple
The missing `leapp.reporting.Report` class is added to
kernelcmdlineconfig actor `produces` tuple.
---
.../system_upgrade/common/actors/kernelcmdlineconfig/actor.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
index b44fd835..3585a14e 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
@@ -4,6 +4,7 @@ from leapp.actors import Actor
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import kernelcmdlineconfig
from leapp.models import FirmwareFacts, InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks
+from leapp.reporting import Report
from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag
@@ -14,7 +15,7 @@ class KernelCmdlineConfig(Actor):
name = 'kernelcmdlineconfig'
consumes = (KernelCmdlineArg, InstalledTargetKernelInfo, FirmwareFacts, TargetKernelCmdlineArgTasks)
- produces = ()
+ produces = (Report,)
tags = (FinalizationPhaseTag, IPUWorkflowTag)
def process(self):
--
2.47.0

View File

@ -0,0 +1,204 @@
From c2c96affa7b20c82969419ce49b65cbf646a0c32 Mon Sep 17 00:00:00 2001
From: Matej Matuska <mmatuska@redhat.com>
Date: Fri, 18 Oct 2024 12:43:19 +0200
Subject: [PATCH 11/40] kernelcmdlineconfig: Use args from first entry when
multiple entries are listed
Instead of erroring out when grubby lists multiple entries for the
default kernel, always use the `args=` and `root=` from the first one and create
a post-upgrade report. The report instruct user to ensure those are the
correct ones or to correct them.
This can happen, for example, if MAKEDEBUG=yes is set in
/etc/sysconfing/kernel.
Jira: RHEL-46911
---
.../libraries/kernelcmdlineconfig.py | 79 ++++++++++++++++---
.../tests/test_kernelcmdlineconfig.py | 48 ++++++++++-
2 files changed, 116 insertions(+), 11 deletions(-)
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
index 6b261c3b..19c50f3c 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
@@ -109,10 +109,55 @@ def _extract_grubby_value(record):
return matches.group(2)
+def report_multple_entries_for_default_kernel():
+ if use_cmdline_file():
+ report_hint = (
+ 'After the system has been rebooted into the new version of RHEL,'
+ ' check that configured default kernel cmdline arguments in /etc/kernel/cmdline '
+ ' are correct. In case that different arguments are expected, update the file as needed.'
+ )
+ else:
+ report_hint = (
+ 'After the system has been rebooted into the new version of RHEL,'
+ ' check that configured default kernel cmdline arguments are set as expected, using'
+ ' the `grub2-editenv list` command. '
+ ' If different default arguments are expected, update them using grub2-editenv.\n'
+ ' For example, consider that current booted kernel has correct kernel cmdline'
+ ' arguments and /proc/cmdline contains:\n\n'
+ ' BOOT_IMAGE=(hd0,msdos1)/vmlinuz-4.18.0-425.3.1.el8.x86_64'
+ ' root=/dev/mapper/rhel_ibm--root ro console=tty0'
+ ' console=ttyS0,115200 rd_NO_PLYMOUTH\n\n'
+ ' then run the following grub2-editenv command:\n\n'
+ ' # grub2-editenv - set "kernelopts=root=/dev/mapper/rhel_ibm--root'
+ ' ro console=tty0 console=ttyS0,115200 rd_NO_PLYMOUTH"'
+ )
+
+ reporting.create_report([
+ reporting.Title('Ensure that expected default kernel cmdline arguments are set'),
+ reporting.Summary(
+ 'During the upgrade we needed to modify the kernel command line arguments.'
+ ' However, multiple bootloader entries with different arguments were found for the default'
+ ' kernel (perhaps MAKEDEBUG=yes is set in /etc/sysconfig/kernel).'
+ ' Leapp used the arguments from the first found entry of the target kernel'
+ ' and set it as the new default kernel cmdline arguments for kernels installed in the future.'
+ ),
+ reporting.Remediation(hint=report_hint),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([
+ reporting.Groups.BOOT,
+ reporting.Groups.KERNEL,
+ reporting.Groups.POST,
+ ]),
+ reporting.RelatedResource('file', '/etc/kernel/cmdline'),
+ ])
+
+
def retrieve_args_for_default_kernel(kernel_info):
# Copy the args for the default kernel to all kernels.
kernel_args = None
kernel_root = None
+ detected_multiple_entries = False
+
cmd = ['grubby', '--info', kernel_info.kernel_img_path]
output = stdlib.run(cmd, split=False)
for record in output['stdout'].splitlines():
@@ -122,19 +167,30 @@ def retrieve_args_for_default_kernel(kernel_info):
temp_kernel_args = _extract_grubby_value(record)
if kernel_args:
- api.current_logger().warning('Grubby output is malformed:'
- ' `args=` is listed more than once.')
if kernel_args != temp_kernel_args:
- raise ReadOfKernelArgsError('Grubby listed `args=` multiple'
- ' times with different values.')
- kernel_args = _extract_grubby_value(record)
+ api.current_logger().warning(
+ 'Grubby output listed `args=` multiple times with different values,'
+ ' continuing with the first result'
+ )
+ detected_multiple_entries = True
+ else:
+ api.current_logger().warning('Grubby output listed `args=` more than once')
+ else:
+ kernel_args = temp_kernel_args
elif record.startswith('root='):
- api.current_logger().warning('Grubby output is malformed:'
- ' `root=` is listed more than once.')
+ temp_kernel_root = _extract_grubby_value(record)
+
if kernel_root:
- raise ReadOfKernelArgsError('Grubby listed `root=` multiple'
- ' times with different values')
- kernel_root = _extract_grubby_value(record)
+ if kernel_root != temp_kernel_root:
+ api.current_logger().warning(
+ 'Grubby output listed `root=` multiple times with different values,'
+ ' continuing with the first result'
+ )
+ detected_multiple_entries = True
+ else:
+ api.current_logger().warning('Grubby output listed `root=` more than once')
+ else:
+ kernel_root = temp_kernel_root
if not kernel_args or not kernel_root:
raise ReadOfKernelArgsError(
@@ -142,6 +198,9 @@ def retrieve_args_for_default_kernel(kernel_info):
' kernels: root={}, args={}'.format(kernel_root, kernel_args)
)
+ if detected_multiple_entries:
+ report_multple_entries_for_default_kernel()
+
return kernel_root, kernel_args
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py
index ffe4b046..e5759a7b 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/tests/test_kernelcmdlineconfig.py
@@ -4,11 +4,12 @@ from collections import namedtuple
import pytest
+from leapp import reporting
from leapp.exceptions import StopActorExecutionError
from leapp.libraries import stdlib
from leapp.libraries.actor import kernelcmdlineconfig
from leapp.libraries.common.config import architecture
-from leapp.libraries.common.testutils import CurrentActorMocked
+from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
from leapp.libraries.stdlib import api
from leapp.models import InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks
@@ -183,6 +184,51 @@ def test_kernelcmdline_config_no_version(monkeypatch):
assert not mocked_run.commands
+SECOND_KERNEL_ARGS = (
+ 'ro rootflags=subvol=root'
+ ' resume=/dev/mapper/luks-2c0df999-81ec-4a35-a1f9-b93afee8c6ad'
+ ' rd.luks.uuid=luks-90a6412f-c588-46ca-9118-5aca35943d25'
+ ' rd.luks.uuid=luks-2c0df999-81ec-4a35-a1f9-b93afee8c6ad'
+)
+SECOND_KERNEL_ROOT = 'UUID=1aa15850-2685-418d-95a6-f7266a2de83b'
+
+
+@pytest.mark.parametrize(
+ 'second_grubby_output',
+ (
+ TEMPLATE_GRUBBY_INFO_OUTPUT.format(SECOND_KERNEL_ARGS, SECOND_KERNEL_ROOT),
+ TEMPLATE_GRUBBY_INFO_OUTPUT.format(SAMPLE_KERNEL_ARGS, SECOND_KERNEL_ROOT),
+ TEMPLATE_GRUBBY_INFO_OUTPUT.format(SECOND_KERNEL_ARGS, SAMPLE_KERNEL_ROOT),
+ )
+)
+def test_kernelcmdline_config_mutiple_args(monkeypatch, second_grubby_output):
+ kernel_img_path = '/boot/vmlinuz-X'
+ kernel_info = InstalledTargetKernelInfo(pkg_nevra=TARGET_KERNEL_NEVRA,
+ uname_r='',
+ kernel_img_path=kernel_img_path,
+ initramfs_path='/boot/initramfs-X')
+
+ # For this test, we need to check we get the proper report if grubby --info
+ # outputs multiple different `root=` or `args=`
+ # and that the first ones are used
+ grubby_info_output = "\n".join((SAMPLE_GRUBBY_INFO_OUTPUT, second_grubby_output))
+
+ mocked_run = MockedRun(
+ outputs={" ".join(("grubby", "--info", kernel_img_path)): grubby_info_output,
+ }
+ )
+ monkeypatch.setattr(stdlib, 'run', mocked_run)
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
+ monkeypatch.setattr(reporting, "create_report", create_report_mocked())
+
+ root, args = kernelcmdlineconfig.retrieve_args_for_default_kernel(kernel_info)
+ assert root == SAMPLE_KERNEL_ROOT
+ assert args == SAMPLE_KERNEL_ARGS
+ assert reporting.create_report.called == 1
+ expected_title = 'Ensure that expected default kernel cmdline arguments are set'
+ assert expected_title in reporting.create_report.report_fields['title']
+
+
def test_kernelcmdline_config_malformed_args(monkeypatch):
kernel_img_path = '/boot/vmlinuz-X'
kernel_info = InstalledTargetKernelInfo(pkg_nevra=TARGET_KERNEL_NEVRA,
--
2.47.0

View File

@ -0,0 +1,216 @@
From 053137c50d1b060f9e6e6e45d82196b1045391b7 Mon Sep 17 00:00:00 2001
From: mhecko <mhecko@redhat.com>
Date: Thu, 4 Apr 2024 14:22:48 +0200
Subject: [PATCH 12/40] check_microarch: refactor to handle possible future
reqs
---
.../actors/checkmicroarchitecture/actor.py | 0
.../libraries/checkmicroarchitecture.py | 73 +++++++++++++++++++
.../tests/test_checkmicroarchitecture.py | 21 ++++--
.../libraries/checkmicroarchitecture.py | 46 ------------
4 files changed, 87 insertions(+), 53 deletions(-)
rename repos/system_upgrade/{el8toel9 => common}/actors/checkmicroarchitecture/actor.py (100%)
create mode 100644 repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
rename repos/system_upgrade/{el8toel9 => common}/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py (79%)
delete mode 100644 repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
diff --git a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/actor.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py
similarity index 100%
rename from repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/actor.py
rename to repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py
diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
new file mode 100644
index 00000000..cc617203
--- /dev/null
+++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
@@ -0,0 +1,73 @@
+from collections import namedtuple
+
+from leapp import reporting
+from leapp.libraries.common.config.architecture import ARCH_X86_64, matches_architecture
+from leapp.libraries.common.config.version import get_target_major_version
+from leapp.libraries.stdlib import api
+from leapp.models import CPUInfo
+
+X86_64_BASELINE_FLAGS = ['cmov', 'cx8', 'fpu', 'fxsr', 'mmx', 'syscall', 'sse', 'sse2']
+X86_64_V2_FLAGS = ['cx16', 'lahf_lm', 'popcnt', 'pni', 'sse4_1', 'sse4_2', 'ssse3']
+
+MicroarchInfo = namedtuple('MicroarchInfo', ('required_flags', 'extra_report_fields', 'microarch_ver'))
+
+
+def _inhibit_upgrade(missing_flags, target_rhel, microarch_ver, extra_report_fields=None):
+ title = 'Current x86-64 microarchitecture is unsupported in {0}'.format(target_rhel)
+ summary = ('{0} has a higher CPU requirement than older versions, it now requires a CPU '
+ 'compatible with {1} instruction set or higher.\n\n'
+ 'Missings flags detected are: {2}\n'.format(target_rhel, microarch_ver, ', '.join(missing_flags)))
+
+ report_fields = [
+ reporting.Title(title),
+ reporting.Summary(summary),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ reporting.Groups([reporting.Groups.SANITY]),
+ reporting.Remediation(hint=('If case of using virtualization, virtualization platforms often allow '
+ 'configuring a minimum denominator CPU model for compatibility when migrating '
+ 'between different CPU models. Ensure that minimum requirements are not below '
+ 'that of {0}\n').format(target_rhel)),
+ ]
+
+ if extra_report_fields:
+ report_fields += extra_report_fields
+
+ reporting.create_report(report_fields)
+
+
+def process():
+ """
+ Check whether the processor matches the required microarchitecture.
+ """
+
+ if not matches_architecture(ARCH_X86_64):
+ api.current_logger().info('Architecture not x86-64. Skipping microarchitecture test.')
+ return
+
+ cpuinfo = next(api.consume(CPUInfo))
+
+ rhel9_microarch_article = reporting.ExternalLink(
+ title='Building Red Hat Enterprise Linux 9 for the x86-64-v2 microarchitecture level',
+ url='https://red.ht/rhel-9-intel-microarchitectures'
+ )
+
+ rhel_major_to_microarch_reqs = {
+ '9': MicroarchInfo(microarch_ver='x86-64-v2',
+ required_flags=(X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS),
+ extra_report_fields=[rhel9_microarch_article]),
+ }
+
+ microarch_info = rhel_major_to_microarch_reqs.get(get_target_major_version())
+ if not microarch_info:
+ api.current_logger().info('No known microarchitecture requirements are known for target RHEL%s.',
+ get_target_major_version())
+ return
+
+ missing_flags = [flag for flag in microarch_info.required_flags if flag not in cpuinfo.flags]
+ api.current_logger().debug('Required flags missing: %s', missing_flags)
+ if missing_flags:
+ _inhibit_upgrade(missing_flags,
+ 'RHEL{0}'.format(get_target_major_version()),
+ microarch_info.microarch_ver,
+ extra_report_fields=microarch_info.extra_report_fields)
diff --git a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
similarity index 79%
rename from repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
rename to repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
index b7c850d9..b0624f2b 100644
--- a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
+++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
@@ -25,7 +25,13 @@ def test_not_x86_64_passes(monkeypatch, arch):
assert not reporting.create_report.called
-def test_valid_microarchitecture(monkeypatch):
+@pytest.mark.parametrize(
+ ('target_ver', 'cpu_flags'),
+ [
+ ('9.0', checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS)
+ ]
+)
+def test_valid_microarchitecture(monkeypatch, target_ver, cpu_flags):
"""
Test no report is generated on a valid microarchitecture
"""
@@ -33,9 +39,8 @@ def test_valid_microarchitecture(monkeypatch):
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
monkeypatch.setattr(api, 'current_logger', logger_mocked())
- required_flags = checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=ARCH_X86_64,
- msgs=[CPUInfo(flags=required_flags)]))
+ monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=ARCH_X86_64, dst_ver=target_ver,
+ msgs=[CPUInfo(flags=cpu_flags)]))
checkmicroarchitecture.process()
@@ -43,14 +48,16 @@ def test_valid_microarchitecture(monkeypatch):
assert not reporting.create_report.called
-def test_invalid_microarchitecture(monkeypatch):
+@pytest.mark.parametrize('target_ver', ['9.0'])
+def test_invalid_microarchitecture(monkeypatch, target_ver):
"""
Test report is generated on x86-64 architecture with invalid microarchitecture and the upgrade is inhibited
"""
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
monkeypatch.setattr(api, 'current_logger', logger_mocked())
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch=ARCH_X86_64, msgs=[CPUInfo()]))
+ monkeypatch.setattr(api, 'current_actor',
+ CurrentActorMocked(arch=ARCH_X86_64, msgs=[CPUInfo()], dst_ver=target_ver))
checkmicroarchitecture.process()
@@ -60,6 +67,6 @@ def test_invalid_microarchitecture(monkeypatch):
assert 'Architecture not x86-64. Skipping microarchitecture test.' not in api.current_logger().infomsg
assert reporting.create_report.called == 1
assert 'microarchitecture is unsupported' in produced_title
- assert 'RHEL9 has a higher CPU requirement' in produced_summary
+ assert 'has a higher CPU requirement' in produced_summary
assert reporting.create_report.report_fields['severity'] == reporting.Severity.HIGH
assert is_inhibitor(reporting.create_report.report_fields)
diff --git a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py b/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
deleted file mode 100644
index 9c083d7e..00000000
--- a/repos/system_upgrade/el8toel9/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from leapp import reporting
-from leapp.libraries.common.config.architecture import ARCH_X86_64, matches_architecture
-from leapp.libraries.stdlib import api
-from leapp.models import CPUInfo
-
-X86_64_BASELINE_FLAGS = ['cmov', 'cx8', 'fpu', 'fxsr', 'mmx', 'syscall', 'sse', 'sse2']
-X86_64_V2_FLAGS = ['cx16', 'lahf_lm', 'popcnt', 'pni', 'sse4_1', 'sse4_2', 'ssse3']
-
-
-def _inhibit_upgrade(missing_flags):
- title = 'Current x86-64 microarchitecture is unsupported in RHEL9'
- summary = ('RHEL9 has a higher CPU requirement than older versions, it now requires a CPU '
- 'compatible with x86-64-v2 instruction set or higher.\n\n'
- 'Missings flags detected are: {}\n'.format(', '.join(missing_flags)))
-
- reporting.create_report([
- reporting.Title(title),
- reporting.Summary(summary),
- reporting.ExternalLink(title='Building Red Hat Enterprise Linux 9 for the x86-64-v2 microarchitecture level',
- url='https://red.ht/rhel-9-intel-microarchitectures'),
- reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.INHIBITOR]),
- reporting.Groups([reporting.Groups.SANITY]),
- reporting.Remediation(hint=('If case of using virtualization, virtualization platforms often allow '
- 'configuring a minimum denominator CPU model for compatibility when migrating '
- 'between different CPU models. Ensure that minimum requirements are not below '
- 'that of RHEL9\n')),
- ])
-
-
-def process():
- """
- Check whether the processor matches the required microarchitecture.
- """
-
- if not matches_architecture(ARCH_X86_64):
- api.current_logger().info('Architecture not x86-64. Skipping microarchitecture test.')
- return
-
- cpuinfo = next(api.consume(CPUInfo))
-
- required_flags = X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS
- missing_flags = [flag for flag in required_flags if flag not in cpuinfo.flags]
- api.current_logger().debug('Required flags missing: %s', missing_flags)
- if missing_flags:
- _inhibit_upgrade(missing_flags)
--
2.47.0

View File

@ -0,0 +1,133 @@
From d3ebc990ba65801fbed2aaf1dce8329698667d1c Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Wed, 28 Aug 2024 12:18:40 +0200
Subject: [PATCH 13/40] check_microarch: add rhel10 requirements
---
.../actors/checkmicroarchitecture/actor.py | 13 ++++++++++--
.../libraries/checkmicroarchitecture.py | 8 +++++--
.../tests/test_checkmicroarchitecture.py | 21 ++++++++++++++-----
3 files changed, 33 insertions(+), 9 deletions(-)
diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py
index 98ffea80..bb342f2f 100644
--- a/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py
+++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/actor.py
@@ -17,7 +17,8 @@ class CheckMicroarchitecture(Actor):
levels.
RHEL9 has a higher CPU requirement than older versions, it now requires a
- CPU compatible with ``x86-64-v2`` instruction set or higher.
+ CPU compatible with ``x86-64-v2`` instruction set or higher. Similarly,
+ RHEL10 requires at least ``x86-64-v3`` instruction set.
.. table:: Required CPU features by microarchitecure level with a
corresponding flag as shown by ``lscpu``.
@@ -43,7 +44,15 @@ class CheckMicroarchitecture(Actor):
| | SSE4_2 | sse4_2 |
| | SSSE3 | ssse3 |
+------------+-------------+--------------------+
- | ... | | |
+ | x86-64-v3 | AVX | avx |
+ | | AVX2 | avx2 |
+ | | BMI1 | bmi1 |
+ | | BMI2 | bmi2 |
+ | | F16C | f16c |
+ | | FMA | fma |
+ | | LZCNT | abm |
+ | | MOVBE | movbe |
+ | | OSXSAVE | xsave |
+------------+-------------+--------------------+
Note: To get the corresponding flag for the CPU feature consult the file
diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
index cc617203..94e85e3e 100644
--- a/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
+++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/libraries/checkmicroarchitecture.py
@@ -8,6 +8,7 @@ from leapp.models import CPUInfo
X86_64_BASELINE_FLAGS = ['cmov', 'cx8', 'fpu', 'fxsr', 'mmx', 'syscall', 'sse', 'sse2']
X86_64_V2_FLAGS = ['cx16', 'lahf_lm', 'popcnt', 'pni', 'sse4_1', 'sse4_2', 'ssse3']
+X86_64_V3_FLAGS = ['avx2', 'bmi1', 'bmi2', 'f16c', 'fma', 'abm', 'movbe', 'xsave']
MicroarchInfo = namedtuple('MicroarchInfo', ('required_flags', 'extra_report_fields', 'microarch_ver'))
@@ -16,7 +17,7 @@ def _inhibit_upgrade(missing_flags, target_rhel, microarch_ver, extra_report_fie
title = 'Current x86-64 microarchitecture is unsupported in {0}'.format(target_rhel)
summary = ('{0} has a higher CPU requirement than older versions, it now requires a CPU '
'compatible with {1} instruction set or higher.\n\n'
- 'Missings flags detected are: {2}\n'.format(target_rhel, microarch_ver, ', '.join(missing_flags)))
+ 'Missings flags detected are: {2}\n').format(target_rhel, microarch_ver, ', '.join(missing_flags))
report_fields = [
reporting.Title(title),
@@ -24,7 +25,7 @@ def _inhibit_upgrade(missing_flags, target_rhel, microarch_ver, extra_report_fie
reporting.Severity(reporting.Severity.HIGH),
reporting.Groups([reporting.Groups.INHIBITOR]),
reporting.Groups([reporting.Groups.SANITY]),
- reporting.Remediation(hint=('If case of using virtualization, virtualization platforms often allow '
+ reporting.Remediation(hint=('If a case of using virtualization, virtualization platforms often allow '
'configuring a minimum denominator CPU model for compatibility when migrating '
'between different CPU models. Ensure that minimum requirements are not below '
'that of {0}\n').format(target_rhel)),
@@ -56,6 +57,9 @@ def process():
'9': MicroarchInfo(microarch_ver='x86-64-v2',
required_flags=(X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS),
extra_report_fields=[rhel9_microarch_article]),
+ '10': MicroarchInfo(microarch_ver='x86-64-v3',
+ required_flags=(X86_64_BASELINE_FLAGS + X86_64_V2_FLAGS + X86_64_V3_FLAGS),
+ extra_report_fields=[]),
}
microarch_info = rhel_major_to_microarch_reqs.get(get_target_major_version())
diff --git a/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
index b0624f2b..eeca8be0 100644
--- a/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
+++ b/repos/system_upgrade/common/actors/checkmicroarchitecture/tests/test_checkmicroarchitecture.py
@@ -25,10 +25,15 @@ def test_not_x86_64_passes(monkeypatch, arch):
assert not reporting.create_report.called
+ENTIRE_V2_FLAG_SET = checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS
+ENTIRE_V3_FLAG_SET = ENTIRE_V2_FLAG_SET + checkmicroarchitecture.X86_64_V3_FLAGS
+
+
@pytest.mark.parametrize(
('target_ver', 'cpu_flags'),
[
- ('9.0', checkmicroarchitecture.X86_64_BASELINE_FLAGS + checkmicroarchitecture.X86_64_V2_FLAGS)
+ ('9.0', ENTIRE_V2_FLAG_SET),
+ ('10.0', ENTIRE_V3_FLAG_SET)
]
)
def test_valid_microarchitecture(monkeypatch, target_ver, cpu_flags):
@@ -48,16 +53,22 @@ def test_valid_microarchitecture(monkeypatch, target_ver, cpu_flags):
assert not reporting.create_report.called
-@pytest.mark.parametrize('target_ver', ['9.0'])
-def test_invalid_microarchitecture(monkeypatch, target_ver):
+@pytest.mark.parametrize(
+ ('target_ver', 'cpu_flags'),
+ (
+ ('9.0', checkmicroarchitecture.X86_64_BASELINE_FLAGS),
+ ('10.0', ENTIRE_V2_FLAG_SET),
+ )
+)
+def test_invalid_microarchitecture(monkeypatch, target_ver, cpu_flags):
"""
Test report is generated on x86-64 architecture with invalid microarchitecture and the upgrade is inhibited
"""
-
+ cpu_info = CPUInfo(flags=cpu_flags)
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
monkeypatch.setattr(api, 'current_logger', logger_mocked())
monkeypatch.setattr(api, 'current_actor',
- CurrentActorMocked(arch=ARCH_X86_64, msgs=[CPUInfo()], dst_ver=target_ver))
+ CurrentActorMocked(arch=ARCH_X86_64, msgs=[cpu_info], dst_ver=target_ver))
checkmicroarchitecture.process()
--
2.47.0

View File

@ -0,0 +1,44 @@
From a14793892bafaad0802844cbb56be3be3220eb47 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Wed, 25 Sep 2024 17:29:02 +0200
Subject: [PATCH 14/40] Skip checking files under .../directory-hash/ dir
* The main reason for this change is to improve performance and
reduce flood of logs for the content that does not seem to be important
to check for the upgrade process.
The directory has been relatively recently added to ca-certificates
rpm on EL 9+ systems mostly to improve performance of OpenSSL and
the content does not seem to be important for the IPU process.
The high number of files takes too much time to evaluate and causes
flood of logs that are not important.
This is updated solution that we drop originally: 60f500e59bb92
---
.../targetuserspacecreator/libraries/userspacegen.py | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index cd2d7d6e..d7698056 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -311,6 +311,16 @@ def _get_files_owned_by_rpms(context, dirpath, pkgs=None, recursive=False):
searchdir = context.full_path(dirpath)
if recursive:
for root, _, files in os.walk(searchdir):
+ if '/directory-hash/' in root:
+ # tl;dr; for the performance improvement
+ # The directory has been relatively recently added to ca-certificates
+ # rpm on EL 9+ systems and the content does not seem to be important
+ # for the IPU process. Also, it contains high number of files and
+ # their processing floods the output and slows down IPU.
+ # So skipping it entirely.
+ # This is updated solution that we drop originally: 60f500e59bb92
+ api.current_logger().debug('SKIP files in the {} directory: Not important for the IPU.'.format(root))
+ continue
for filename in files:
relpath = os.path.relpath(os.path.join(root, filename), searchdir)
file_list.append(relpath)
--
2.47.0

View File

@ -0,0 +1,66 @@
From cef2825778eb63f95e13cf48b1683bc98c32c21b Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Fri, 25 Oct 2024 16:33:38 +0200
Subject: [PATCH 15/40] lib(overlay): cap the max size of disk images
On systems with large disks (e.g. 16TB) with lots of free space, leapp
might attemt to create files larger than the max file size of the
underlying FS. Attempting to create such large files causes leapp
to crash. This patch caps the max image size to 1TB, based on empirical
evidence that more free space is not needed for the upgrade RPM
transaction.
Jira-ref: RHEL-57064
---
.../common/libraries/overlaygen.py | 28 +++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/repos/system_upgrade/common/libraries/overlaygen.py b/repos/system_upgrade/common/libraries/overlaygen.py
index c1ac9ad3..867e3559 100644
--- a/repos/system_upgrade/common/libraries/overlaygen.py
+++ b/repos/system_upgrade/common/libraries/overlaygen.py
@@ -68,6 +68,27 @@ or close to that size, stay always with this minimal protected size defined by
this constant.
"""
+_MAX_DISK_IMAGE_SIZE_MB = 2**20 # 1*TB
+"""
+Maximum size of the created (sparse) images.
+
+Defaults to 1TB. If a disk with capacity larger than _MAX_DISK_IMAGE_SIZE_MB
+is mounted on the system, the corresponding image used to store overlay
+modifications will be capped to _MAX_DISK_IMAGE_SIZE_MB.
+
+Engineering rationale:
+ This constant was introduced to prevent leapp from creating files that are
+ virtually larger than the maximum file size supported by the file system.
+ E.g. if the source system hosts /var/lib/leapp on EXT4, then we cannot
+ create a file larger than 16TB.
+ We create these "disk images" to be able to verify the system has enough
+ disk space to perform the RPM upgrade transaction. From our experience,
+ we are not aware of any system which could have installed so much content
+ by RPMs that we would need 1TB of the free space on a single FS. Therefore,
+ we consider this value as safe while preventing us from exceeding FS
+ limits.
+"""
+
MountPoints = namedtuple('MountPoints', ['fs_file', 'fs_vfstype'])
@@ -287,6 +308,13 @@ def _prepare_required_mounts(scratch_dir, mounts_dir, storage_info, scratch_rese
disk_size = _get_fspace(mountpoint, convert_to_mibs=True, coefficient=0.95)
if mountpoint == scratch_mp:
disk_size = scratch_disk_size
+
+ if disk_size > _MAX_DISK_IMAGE_SIZE_MB:
+ msg = ('Image for overlayfs corresponding to the disk mounted at %s would ideally have %d MB, '
+ 'but we truncate it to %d MB to avoid bumping to max file limits.')
+ api.current_logger().info(msg, mountpoint, disk_size, _MAX_DISK_IMAGE_SIZE_MB)
+ disk_size = _MAX_DISK_IMAGE_SIZE_MB
+
image = _create_mount_disk_image(disk_images_directory, mountpoint, disk_size)
result[mountpoint] = mounting.LoopMount(
source=image,
--
2.47.0

View File

@ -0,0 +1,168 @@
From ec078243771f8ef43853bd242175a612fe84f95b Mon Sep 17 00:00:00 2001
From: tomasfratrik <tomasfratrik8@gmail.com>
Date: Wed, 17 Jul 2024 12:12:50 +0200
Subject: [PATCH 16/40] Raise proper error when ModelViolationError occurs
This error occurs when repo file has invalid definition, specifically
when the 'name' entry of the config files is invalid. Also add tests.
Jira: RHEL-19249
---
.../systemfacts/libraries/systemfacts.py | 13 ++++++++-
.../systemfacts/tests/test_systemfacts.py | 24 ++++++++++++++++-
.../common/libraries/repofileutils.py | 17 +++++++++++-
.../libraries/tests/test_repofileutils.py | 27 +++++++++++++++++++
4 files changed, 78 insertions(+), 3 deletions(-)
diff --git a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
index d1eeb28c..f16cea1d 100644
--- a/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
+++ b/repos/system_upgrade/common/actors/systemfacts/libraries/systemfacts.py
@@ -217,7 +217,18 @@ def get_sysctls_status():
def get_repositories_status():
""" Get a basic information about YUM repositories installed in the system """
- return RepositoriesFacts(repositories=repofileutils.get_parsed_repofiles())
+ try:
+ return RepositoriesFacts(repositories=repofileutils.get_parsed_repofiles())
+ except repofileutils.InvalidRepoDefinition as e:
+ raise StopActorExecutionError(
+ message=str(e),
+ details={
+ 'hint': 'For more directions on how to resolve the issue, see: {url}.'
+ .format(
+ url='https://access.redhat.com/solutions/6969001'
+ )
+ }
+ )
def get_selinux_status():
diff --git a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
index badf174c..5831b979 100644
--- a/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
+++ b/repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts.py
@@ -3,7 +3,16 @@ import pwd
import pytest
-from leapp.libraries.actor.systemfacts import _get_system_groups, _get_system_users, anyendswith, anyhasprefix, aslist
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.actor.systemfacts import (
+ _get_system_groups,
+ _get_system_users,
+ anyendswith,
+ anyhasprefix,
+ aslist,
+ get_repositories_status
+)
+from leapp.libraries.common import repofileutils
from leapp.libraries.common.testutils import logger_mocked
from leapp.libraries.stdlib import api
from leapp.snactor.fixture import current_actor_libraries
@@ -116,3 +125,16 @@ def test_get_system_groups(monkeypatch, etc_group_names, skipped_group_names):
assert group_name not in api.current_logger().dbgmsg[0]
else:
assert not api.current_logger().dbgmsg
+
+
+def test_failed_parsed_repofiles(monkeypatch):
+ def _raise_invalidrepo_error():
+ raise repofileutils.InvalidRepoDefinition(msg='mocked error',
+ repofile='/etc/yum.repos.d/mock.repo',
+ repoid='mocked repoid')
+
+ monkeypatch.setattr(repofileutils, 'get_parsed_repofiles', _raise_invalidrepo_error)
+ monkeypatch.setattr(api, 'current_logger', logger_mocked())
+
+ with pytest.raises(StopActorExecutionError):
+ get_repositories_status()
diff --git a/repos/system_upgrade/common/libraries/repofileutils.py b/repos/system_upgrade/common/libraries/repofileutils.py
index a563be52..cab3c42b 100644
--- a/repos/system_upgrade/common/libraries/repofileutils.py
+++ b/repos/system_upgrade/common/libraries/repofileutils.py
@@ -11,6 +11,16 @@ except ImportError:
api.current_logger().warning('repofileutils.py: failed to import dnf')
+class InvalidRepoDefinition(Exception):
+ """Raised when a repository definition is invalid."""
+ def __init__(self, msg, repofile, repoid):
+ message = 'Invalid repository definition: {repoid} in: {repofile}: {msg}'.format(
+ repoid=repoid, repofile=repofile, msg=msg)
+ super(InvalidRepoDefinition, self).__init__(message)
+ self.repofile = repofile
+ self.repoid = repoid
+
+
def _parse_repository(repoid, repo_data):
def asbool(x):
return x == '1'
@@ -33,12 +43,17 @@ def parse_repofile(repofile):
:param repofile: Path to the repo file
:type repofile: str
:rtype: RepositoryFile
+ :raises InvalidRepoDefinition: If the repository definition is invalid,
+ this can for example occur if 'name' field in repository is missing or it is invalid.
"""
data = []
with open(repofile, mode='r') as fp:
cp = utils.parse_config(fp, strict=False)
for repoid in cp.sections():
- data.append(_parse_repository(repoid, dict(cp.items(repoid))))
+ try:
+ data.append(_parse_repository(repoid, dict(cp.items(repoid))))
+ except fields.ModelViolationError as e:
+ raise InvalidRepoDefinition(e, repofile=repofile, repoid=repoid)
return RepositoryFile(file=repofile, data=data)
diff --git a/repos/system_upgrade/common/libraries/tests/test_repofileutils.py b/repos/system_upgrade/common/libraries/tests/test_repofileutils.py
index 51cc1c11..42c7e49e 100644
--- a/repos/system_upgrade/common/libraries/tests/test_repofileutils.py
+++ b/repos/system_upgrade/common/libraries/tests/test_repofileutils.py
@@ -1,7 +1,10 @@
import json
import os
+import pytest
+
from leapp.libraries.common import repofileutils
+from leapp.models.fields import ModelViolationError
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -12,6 +15,30 @@ def test_invert_dict():
assert inv_dict == {'a': [1], 'b': [1, 2]}
+@pytest.mark.parametrize(
+ ('repoid', 'data'),
+ (
+ ('missing-name', {'baseurl': 'http://example.com', 'enabled': '1', 'gpgcheck': '1'}),
+ (None, {'name': 'name', 'baseurl': 'http://example.com', 'enabled': '1', 'gpgcheck': '1'}),
+ ('name-none', {'name': None, 'baseurl': 'http://example.com', 'enabled': '1', 'gpgcheck': '1'}),
+ ('baseurl-true', {'name': 'valid', 'baseurl': True, 'enabled': '1', 'gpgcheck': '1'}),
+ )
+)
+def test__parse_repository_missing_name(repoid, data):
+ with pytest.raises(ModelViolationError):
+ repofileutils._parse_repository(repoid, data)
+
+
+def test_parse_repofile_error(monkeypatch):
+ def _parse_repository_mocked(*args, **kwargs):
+ raise ModelViolationError('')
+
+ monkeypatch.setattr(repofileutils, '_parse_repository', _parse_repository_mocked)
+
+ with pytest.raises(repofileutils.InvalidRepoDefinition):
+ repofileutils.parse_repofile(os.path.join(CUR_DIR, 'sample_repos.txt'))
+
+
def test_parse_repofile():
repofile = repofileutils.parse_repofile(os.path.join(CUR_DIR, 'sample_repos.txt'))
--
2.47.0

View File

@ -0,0 +1,56 @@
From f84c6f808a821d3ccd09a4a8278cef9c09984a28 Mon Sep 17 00:00:00 2001
From: Daniel Zatovic <daniel.zatovic@gmail.com>
Date: Wed, 3 Apr 2024 23:25:06 +0200
Subject: [PATCH 17/40] InhibitWhenLuks: simplify the logic
---
.../common/actors/inhibitwhenluks/actor.py | 35 +++++++------------
1 file changed, 13 insertions(+), 22 deletions(-)
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
index d3ff2d2e..40b845b0 100644
--- a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
+++ b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
@@ -24,26 +24,17 @@ class InhibitWhenLuks(Actor):
ceph_info = next(self.consume(CephInfo))
if ceph_info:
ceph_vol = ceph_info.encrypted_volumes[:]
- for storage_info in self.consume(StorageInfo):
- for blk in storage_info.lsblk:
- if blk.tp == 'crypt' and blk.name not in ceph_vol:
- create_report([
- reporting.Title('LUKS encrypted partition detected'),
- reporting.Summary('Upgrading system with encrypted partitions is not supported'),
- reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
- reporting.Groups([reporting.Groups.INHIBITOR]),
- ])
- break
except StopIteration:
- for storage_info in self.consume(StorageInfo):
- for blk in storage_info.lsblk:
- if blk.tp == 'crypt':
- create_report([
- reporting.Title('LUKS encrypted partition detected'),
- reporting.Summary('Upgrading system with encrypted partitions is not supported'),
- reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
- reporting.Groups([reporting.Groups.INHIBITOR]),
- ])
- break
+ pass
+
+ for storage_info in self.consume(StorageInfo):
+ for blk in storage_info.lsblk:
+ if blk.tp == 'crypt' and blk.name not in ceph_vol:
+ create_report([
+ reporting.Title('LUKS encrypted partition detected'),
+ reporting.Summary('Upgrading system with encrypted partitions is not supported'),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ ])
+ break
--
2.47.0

View File

@ -0,0 +1,271 @@
From 03fc6743b8916f23f6a213e3f0fc3020ee141b96 Mon Sep 17 00:00:00 2001
From: Daniel Zatovic <daniel.zatovic@gmail.com>
Date: Wed, 3 Apr 2024 23:42:45 +0200
Subject: [PATCH 18/40] StorageScanner: Add parent device name to lsblk
Modify the StorageInfo model to include path and name of the parent
device. Use StorageScanner to collect this information.
Morover fix lsblk test, there should be a full device path in "lsblk
-pbnr" output (just names were used in the original test).
---
.../tests/test_inhibitwhenluks.py | 12 +--
.../libraries/storagescanner.py | 29 +++++--
.../tests/unit_test_storagescanner.py | 78 +++++++++++++++----
.../common/models/storageinfo.py | 2 +
.../tests/unit_test_vdoconversionscanner.py | 4 +-
5 files changed, 95 insertions(+), 30 deletions(-)
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
index fee50f9d..405a3429 100644
--- a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
+++ b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
@@ -5,8 +5,8 @@ from leapp.utils.report import is_inhibitor
def test_actor_with_luks(current_actor_context):
- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0',
- size='10G', bsize=10*(1 << 39), ro='0', tp='crypt', mountpoint='')]
+ with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39),
+ ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')]
current_actor_context.feed(StorageInfo(lsblk=with_luks))
current_actor_context.run()
@@ -16,8 +16,8 @@ def test_actor_with_luks(current_actor_context):
def test_actor_with_luks_ceph_only(current_actor_context):
- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0',
- size='10G', bsize=10*(1 << 39), ro='0', tp='crypt', mountpoint='')]
+ with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39),
+ ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')]
ceph_volume = ['luks-132']
current_actor_context.feed(StorageInfo(lsblk=with_luks))
current_actor_context.feed(CephInfo(encrypted_volumes=ceph_volume))
@@ -26,8 +26,8 @@ def test_actor_with_luks_ceph_only(current_actor_context):
def test_actor_without_luks(current_actor_context):
- without_luks = [LsblkEntry(name='sda1', kname='sda1', maj_min='8:0', rm='0',
- size='10G', bsize=10*(1 << 39), ro='0', tp='part', mountpoint='/boot')]
+ without_luks = [LsblkEntry(name='sda1', kname='sda1', maj_min='8:0', rm='0', size='10G', bsize=10*(1 << 39),
+ ro='0', tp='part', mountpoint='/boot', parent_name='', parent_path='')]
current_actor_context.feed(StorageInfo(lsblk=without_luks))
current_actor_context.run()
diff --git a/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py b/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py
index f15f0d87..cad6bd32 100644
--- a/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py
+++ b/repos/system_upgrade/common/actors/storagescanner/libraries/storagescanner.py
@@ -164,18 +164,31 @@ def _get_mount_info(path):
)
+def _get_lsblk_info_for_devpath(dev_path):
+ lsblk_cmd = ['lsblk', '-nr', '--output', 'NAME,KNAME,SIZE', dev_path]
+ lsblk_info_for_devpath = next(_get_cmd_output(lsblk_cmd, ' ', 3), None)
+
+ return lsblk_info_for_devpath
+
+
@aslist
def _get_lsblk_info():
""" Collect storage info from lsblk command """
- cmd = ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT']
- for entry in _get_cmd_output(cmd, ' ', 7):
- dev_path, maj_min, rm, bsize, ro, tp, mountpoint = entry
- lsblk_cmd = ['lsblk', '-nr', '--output', 'NAME,KNAME,SIZE', dev_path]
- lsblk_info_for_devpath = next(_get_cmd_output(lsblk_cmd, ' ', 3), None)
+ cmd = ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PKNAME']
+ for entry in _get_cmd_output(cmd, ' ', 8):
+ dev_path, maj_min, rm, bsize, ro, tp, mountpoint, parent_path = entry
+
+ lsblk_info_for_devpath = _get_lsblk_info_for_devpath(dev_path)
if not lsblk_info_for_devpath:
return
-
name, kname, size = lsblk_info_for_devpath
+
+ parent_name = ""
+ if parent_path:
+ parent_info = _get_lsblk_info_for_devpath(parent_path)
+ if parent_info:
+ parent_name, _, _ = parent_info
+
yield LsblkEntry(
name=name,
kname=kname,
@@ -185,7 +198,9 @@ def _get_lsblk_info():
bsize=int(bsize),
ro=ro,
tp=tp,
- mountpoint=mountpoint)
+ mountpoint=mountpoint,
+ parent_name=parent_name,
+ parent_path=parent_path)
@aslist
diff --git a/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py b/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py
index 4dc11ea4..456e40ec 100644
--- a/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py
+++ b/repos/system_upgrade/common/actors/storagescanner/tests/unit_test_storagescanner.py
@@ -255,13 +255,18 @@ def test_get_lsblk_info(monkeypatch):
bytes_per_gb = 1 << 30
def get_cmd_output_mocked(cmd, delim, expected_len):
- if cmd == ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT']:
+ if cmd == ['lsblk', '-pbnr', '--output', 'NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PKNAME']:
output_lines_split_on_whitespace = [
- ['vda', '252:0', '0', str(40 * bytes_per_gb), '0', 'disk', ''],
- ['vda1', '252:1', '0', str(1 * bytes_per_gb), '0', 'part', '/boot'],
- ['vda2', '252:2', '0', str(39 * bytes_per_gb), '0', 'part', ''],
- ['rhel_ibm--p8--kvm--03--guest--02-root', '253:0', '0', str(38 * bytes_per_gb), '0', 'lvm', '/'],
- ['rhel_ibm--p8--kvm--03--guest--02-swap', '253:1', '0', str(1 * bytes_per_gb), '0', 'lvm', '[SWAP]']
+ ['/dev/vda', '252:0', '0', str(40 * bytes_per_gb), '0', 'disk', '', ''],
+ ['/dev/vda1', '252:1', '0', str(1 * bytes_per_gb), '0', 'part', '/boot', ''],
+ ['/dev/vda2', '252:2', '0', str(39 * bytes_per_gb), '0', 'part', '', ''],
+ ['/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-root', '253:0', '0', str(38 * bytes_per_gb), '0', 'lvm',
+ '/', ''],
+ ['/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-swap', '253:1', '0', str(1 * bytes_per_gb), '0', 'lvm',
+ '[SWAP]', ''],
+ ['/dev/mapper/luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6', '254:0', '0', str(38 * bytes_per_gb), '0',
+ 'crypt', '', '/dev/nvme0n1p1'],
+ ['/dev/nvme0n1p1', '259:1', '0', str(39 * bytes_per_gb), '0', 'part', '', '/dev/nvme0n1'],
]
for output_line_parts in output_lines_split_on_whitespace:
yield output_line_parts
@@ -269,11 +274,17 @@ def test_get_lsblk_info(monkeypatch):
# We cannot have the output in a list, since the command is called per device. Therefore, we have to map
# each device path to its output.
output_lines_split_on_whitespace_per_device = {
- 'vda': ['vda', 'vda', '40G'],
- 'vda1': ['vda1', 'vda1', '1G'],
- 'vda2': ['vda2', 'vda2', '39G'],
- 'rhel_ibm--p8--kvm--03--guest--02-root': ['rhel_ibm--p8--kvm--03--guest--02-root', 'kname1', '38G'],
- 'rhel_ibm--p8--kvm--03--guest--02-swap': ['rhel_ibm--p8--kvm--03--guest--02-swap', 'kname2', '1G']
+ '/dev/vda': ['vda', 'vda', '40G'],
+ '/dev/vda1': ['vda1', 'vda1', '1G'],
+ '/dev/vda2': ['vda2', 'vda2', '39G'],
+ '/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-root':
+ ['rhel_ibm--p8--kvm--03--guest--02-root', 'kname1', '38G'],
+ '/dev/mapper/rhel_ibm--p8--kvm--03--guest--02-swap':
+ ['rhel_ibm--p8--kvm--03--guest--02-swap', 'kname2', '1G'],
+ '/dev/mapper/luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6':
+ ['luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6', 'dm-0', '38G'],
+ '/dev/nvme0n1p1': ['nvme0n1p1', 'nvme0n1p1', '39G'],
+ '/dev/nvme0n1': ['nvme0n1', 'nvme0n1', '40G'],
}
dev_path = cmd[4]
if dev_path not in output_lines_split_on_whitespace_per_device:
@@ -294,7 +305,9 @@ def test_get_lsblk_info(monkeypatch):
bsize=40 * bytes_per_gb,
ro='0',
tp='disk',
- mountpoint=''),
+ mountpoint='',
+ parent_name='',
+ parent_path=''),
LsblkEntry(
name='vda1',
kname='vda1',
@@ -304,7 +317,9 @@ def test_get_lsblk_info(monkeypatch):
bsize=1 * bytes_per_gb,
ro='0',
tp='part',
- mountpoint='/boot'),
+ mountpoint='/boot',
+ parent_name='',
+ parent_path=''),
LsblkEntry(
name='vda2',
kname='vda2',
@@ -314,7 +329,9 @@ def test_get_lsblk_info(monkeypatch):
bsize=39 * bytes_per_gb,
ro='0',
tp='part',
- mountpoint=''),
+ mountpoint='',
+ parent_name='',
+ parent_path=''),
LsblkEntry(
name='rhel_ibm--p8--kvm--03--guest--02-root',
kname='kname1',
@@ -324,7 +341,9 @@ def test_get_lsblk_info(monkeypatch):
bsize=38 * bytes_per_gb,
ro='0',
tp='lvm',
- mountpoint='/'),
+ mountpoint='/',
+ parent_name='',
+ parent_path=''),
LsblkEntry(
name='rhel_ibm--p8--kvm--03--guest--02-swap',
kname='kname2',
@@ -334,7 +353,34 @@ def test_get_lsblk_info(monkeypatch):
bsize=1 * bytes_per_gb,
ro='0',
tp='lvm',
- mountpoint='[SWAP]')]
+ mountpoint='[SWAP]',
+ parent_name='',
+ parent_path=''),
+ LsblkEntry(
+ name='luks-01b60fff-a2a8-4c03-893f-056bfc3f06f6',
+ kname='dm-0',
+ maj_min='254:0',
+ rm='0',
+ size='38G',
+ bsize=38 * bytes_per_gb,
+ ro='0',
+ tp='crypt',
+ mountpoint='',
+ parent_name='nvme0n1p1',
+ parent_path='/dev/nvme0n1p1'),
+ LsblkEntry(
+ name='nvme0n1p1',
+ kname='nvme0n1p1',
+ maj_min='259:1',
+ rm='0',
+ size='39G',
+ bsize=39 * bytes_per_gb,
+ ro='0',
+ tp='part',
+ mountpoint='',
+ parent_name='nvme0n1',
+ parent_path='/dev/nvme0n1'),
+ ]
actual = storagescanner._get_lsblk_info()
assert expected == actual
diff --git a/repos/system_upgrade/common/models/storageinfo.py b/repos/system_upgrade/common/models/storageinfo.py
index 5bb9caac..71e7459d 100644
--- a/repos/system_upgrade/common/models/storageinfo.py
+++ b/repos/system_upgrade/common/models/storageinfo.py
@@ -43,6 +43,8 @@ class LsblkEntry(Model):
ro = fields.String()
tp = fields.String()
mountpoint = fields.String()
+ parent_name = fields.String()
+ parent_path = fields.String()
class PvsEntry(Model):
diff --git a/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py b/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py
index 0745c91d..4d6ef0dc 100644
--- a/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py
+++ b/repos/system_upgrade/el8toel9/actors/vdoconversionscanner/tests/unit_test_vdoconversionscanner.py
@@ -26,7 +26,9 @@ def _lsblk_entry(prefix, number, types, size='128G', bsize=2 ** 37):
bsize=bsize,
ro='0',
tp=types[random.randint(0, len(types) - 1)],
- mountpoint='')
+ mountpoint='',
+ parent_name='',
+ parent_path='')
@aslist
--
2.47.0

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,455 @@
From ad241f701b39a81d132105f1a301f2f5546f498a Mon Sep 17 00:00:00 2001
From: Daniel Zatovic <daniel.zatovic@gmail.com>
Date: Tue, 6 Aug 2024 17:26:58 +0200
Subject: [PATCH 20/40] InhibitWhenLuks: allow upgrades for LUKS2 bound to
Clevis TPM2 token
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
So far, upgrades with encrypted drives were not supported. Encrypted
drives require interactively typing unlock passphrases, which is not
suitable for automatic upgrades using Leapp. We add a feature, where
systems with all drives configured with automatic unlock method can be
upgraded.
Currently, we only support drives configured with Clevis/TPM2 token,
because networking is not configured during Leapp upgrade (excluding
NBDE).
We consume LuksDumps message to decide whether the upgrade process
should be inhibited. If there is at least one LUKS2 device without
Clevis TPM2 binding, we inhibit the upgrade because we cannot tell if
the device is not a part of a more complex storage stack and the failure
to unlock the device migt cause boot problem.
Co-authored-by: Petr Stodůlka <pstodulk@redhat.com>
---
.../common/actors/inhibitwhenluks/actor.py | 38 ++--
.../libraries/inhibitwhenluks.py | 164 +++++++++++++++++
.../tests/test_inhibitwhenluks.py | 169 ++++++++++++++++--
3 files changed, 329 insertions(+), 42 deletions(-)
create mode 100644 repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
index 40b845b0..65607167 100644
--- a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
+++ b/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
@@ -1,40 +1,24 @@
-from leapp import reporting
from leapp.actors import Actor
-from leapp.models import CephInfo, StorageInfo
-from leapp.reporting import create_report, Report
+from leapp.libraries.actor.inhibitwhenluks import check_invalid_luks_devices
+from leapp.models import CephInfo, LuksDumps, StorageInfo, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
+from leapp.reporting import Report
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class InhibitWhenLuks(Actor):
"""
- Check if any encrypted partitions is in use. If yes, inhibit the upgrade process.
+ Check if any encrypted partitions are in use and whether they are supported for the upgrade.
- Upgrading system with encrypted partition is not supported.
+ Upgrading EL7 system with encrypted partition is not supported (but ceph OSDs).
+ For EL8+ it's ok if the discovered used encrypted storage has LUKS2 format
+ and it's bounded to clevis-tpm2 token (so it can be automatically unlocked
+ during the process).
"""
name = 'check_luks_and_inhibit'
- consumes = (StorageInfo, CephInfo)
- produces = (Report,)
+ consumes = (CephInfo, LuksDumps, StorageInfo)
+ produces = (Report, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
- # If encrypted Ceph volumes present, check if there are more encrypted disk in lsblk than Ceph vol
- ceph_vol = []
- try:
- ceph_info = next(self.consume(CephInfo))
- if ceph_info:
- ceph_vol = ceph_info.encrypted_volumes[:]
- except StopIteration:
- pass
-
- for storage_info in self.consume(StorageInfo):
- for blk in storage_info.lsblk:
- if blk.tp == 'crypt' and blk.name not in ceph_vol:
- create_report([
- reporting.Title('LUKS encrypted partition detected'),
- reporting.Summary('Upgrading system with encrypted partitions is not supported'),
- reporting.Severity(reporting.Severity.HIGH),
- reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
- reporting.Groups([reporting.Groups.INHIBITOR]),
- ])
- break
+ check_invalid_luks_devices()
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py b/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py
new file mode 100644
index 00000000..57a94e9d
--- /dev/null
+++ b/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py
@@ -0,0 +1,164 @@
+from leapp import reporting
+from leapp.libraries.common.config.version import get_source_major_version
+from leapp.libraries.stdlib import api
+from leapp.models import (
+ CephInfo,
+ DracutModule,
+ LuksDumps,
+ StorageInfo,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeInitramfsTasks
+)
+from leapp.reporting import create_report
+
+# https://red.ht/clevis-tpm2-luks-auto-unlock-rhel8
+# https://red.ht/clevis-tpm2-luks-auto-unlock-rhel9
+# https://red.ht/convert-to-luks2-rhel8
+# https://red.ht/convert-to-luks2-rhel9
+CLEVIS_DOC_URL_FMT = 'https://red.ht/clevis-tpm2-luks-auto-unlock-rhel{}'
+LUKS2_CONVERT_DOC_URL_FMT = 'https://red.ht/convert-to-luks2-rhel{}'
+
+FMT_LIST_SEPARATOR = '\n - '
+
+
+def _formatted_list_output(input_list, sep=FMT_LIST_SEPARATOR):
+ return ['{}{}'.format(sep, item) for item in input_list]
+
+
+def _at_least_one_tpm_token(luks_dump):
+ return any([token.token_type == "clevis-tpm2" for token in luks_dump.tokens])
+
+
+def _get_ceph_volumes():
+ ceph_info = next(api.consume(CephInfo), None)
+ return ceph_info.encrypted_volumes[:] if ceph_info else []
+
+
+def apply_obsoleted_check_ipu_7_8():
+ ceph_vol = _get_ceph_volumes()
+ for storage_info in api.consume(StorageInfo):
+ for blk in storage_info.lsblk:
+ if blk.tp == 'crypt' and blk.name not in ceph_vol:
+ create_report([
+ reporting.Title('LUKS encrypted partition detected'),
+ reporting.Summary('Upgrading system with encrypted partitions is not supported'),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ ])
+ break
+
+
+def report_inhibitor(luks1_partitions, no_tpm2_partitions):
+ source_major_version = get_source_major_version()
+ clevis_doc_url = CLEVIS_DOC_URL_FMT.format(source_major_version)
+ luks2_convert_doc_url = LUKS2_CONVERT_DOC_URL_FMT.format(source_major_version)
+ summary = (
+ 'We have detected LUKS encrypted volumes that do not meet current'
+ ' criteria to be able to proceed the in-place upgrade process.'
+ ' Right now the upgrade process requires for encrypted storage to be'
+ ' in LUKS2 format configured with Clevis TPM 2.0.'
+ )
+
+ report_hints = []
+
+ if luks1_partitions:
+
+ summary += (
+ '\n\nSince RHEL 8 the default format for LUKS encryption is LUKS2.'
+ ' Despite the old LUKS1 format is still supported on RHEL systems'
+ ' it has some limitations in comparison to LUKS2.'
+ ' Only the LUKS2 format is supported for upgrades.'
+ ' The following LUKS1 partitions have been discovered on your system:{}'
+ .format(''.join(_formatted_list_output(luks1_partitions)))
+ )
+ report_hints.append(reporting.Remediation(
+ hint=(
+ 'Convert your LUKS1 encrypted devices to LUKS2 and bind it to TPM2 using clevis.'
+ ' If this is not possible in your case consider clean installation'
+ ' of the target RHEL system instead.'
+ )
+ ))
+ report_hints.append(reporting.ExternalLink(
+ url=luks2_convert_doc_url,
+ title='LUKS versions in RHEL: Conversion'
+ ))
+
+ if no_tpm2_partitions:
+ summary += (
+ '\n\nCurrently we require the process to be non-interactive and'
+ ' offline. For this reason we require automatic unlock of'
+ ' encrypted devices during the upgrade process.'
+ ' Currently we support automatic unlocking during the upgrade only'
+ ' for volumes bound to Clevis TPM2 token.'
+ ' The following LUKS2 devices without Clevis TPM2 token '
+ ' have been discovered on your system: {}'
+ .format(''.join(_formatted_list_output(no_tpm2_partitions)))
+ )
+
+ report_hints.append(reporting.Remediation(
+ hint=(
+ 'Add Clevis TPM2 binding to LUKS devices.'
+ ' If some LUKS devices use still the old LUKS1 format, convert'
+ ' them to LUKS2 prior to binding.'
+ )
+ ))
+ report_hints.append(reporting.ExternalLink(
+ url=clevis_doc_url,
+ title='Configuring manual enrollment of LUKS-encrypted volumes by using a TPM 2.0 policy'
+ )
+ )
+ create_report([
+ reporting.Title('Detected LUKS devices unsuitable for in-place upgrade.'),
+ reporting.Summary(summary),
+ reporting.Severity(reporting.Severity.HIGH),
+ reporting.Groups([reporting.Groups.BOOT, reporting.Groups.ENCRYPTION]),
+ reporting.Groups([reporting.Groups.INHIBITOR]),
+ ] + report_hints)
+
+
+def check_invalid_luks_devices():
+ if get_source_major_version() == '7':
+ # NOTE: keeping unchanged behaviour for IPU 7 -> 8
+ apply_obsoleted_check_ipu_7_8()
+ return
+
+ luks_dumps = next(api.consume(LuksDumps), None)
+ if not luks_dumps:
+ api.current_logger().debug('No LUKS volumes detected. Skipping.')
+ return
+
+ luks1_partitions = []
+ no_tpm2_partitions = []
+ ceph_vol = _get_ceph_volumes()
+ for luks_dump in luks_dumps.dumps:
+ # if the device is managed by ceph, don't inhibit
+ if luks_dump.device_name in ceph_vol:
+ api.current_logger().debug('Skipping LUKS CEPH volume: {}'.format(luks_dump.device_name))
+ continue
+
+ if luks_dump.version == 1:
+ luks1_partitions.append(luks_dump.device_name)
+ elif luks_dump.version == 2 and not _at_least_one_tpm_token(luks_dump):
+ no_tpm2_partitions.append(luks_dump.device_name)
+
+ if luks1_partitions or no_tpm2_partitions:
+ report_inhibitor(luks1_partitions, no_tpm2_partitions)
+ else:
+ required_crypt_rpms = [
+ 'clevis',
+ 'clevis-dracut',
+ 'clevis-systemd',
+ 'clevis-udisks2',
+ 'clevis-luks',
+ 'cryptsetup',
+ 'tpm2-tss',
+ 'tpm2-tools',
+ 'tpm2-abrmd'
+ ]
+ api.produce(TargetUserSpaceUpgradeTasks(install_rpms=required_crypt_rpms))
+ api.produce(UpgradeInitramfsTasks(include_dracut_modules=[
+ DracutModule(name='clevis'),
+ DracutModule(name='clevis-pin-tpm2')
+ ])
+ )
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
index 405a3429..d559b54c 100644
--- a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
+++ b/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
@@ -1,34 +1,173 @@
-from leapp.models import CephInfo, LsblkEntry, StorageInfo
+"""
+Unit tests for inhibitwhenluks actor
+
+Skip isort as it's kind of broken when mixing grid import and one line imports
+
+isort:skip_file
+"""
+
+from leapp.libraries.common.config import version
+from leapp.models import (
+ CephInfo,
+ LsblkEntry,
+ LuksDump,
+ LuksDumps,
+ LuksToken,
+ StorageInfo,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeInitramfsTasks
+)
from leapp.reporting import Report
from leapp.snactor.fixture import current_actor_context
from leapp.utils.report import is_inhibitor
+_REPORT_TITLE_UNSUITABLE = 'Detected LUKS devices unsuitable for in-place upgrade.'
-def test_actor_with_luks(current_actor_context):
- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39),
- ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')]
- current_actor_context.feed(StorageInfo(lsblk=with_luks))
+def test_actor_with_luks1_notpm(monkeypatch, current_actor_context):
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8')
+ luks_dump = LuksDump(
+ version=1,
+ uuid='dd09e6d4-b595-4f1c-80b8-fd47540e6464',
+ device_path='/dev/sda',
+ device_name='sda')
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
+ current_actor_context.feed(CephInfo(encrypted_volumes=[]))
current_actor_context.run()
assert current_actor_context.consume(Report)
report_fields = current_actor_context.consume(Report)[0].report
assert is_inhibitor(report_fields)
+ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks)
+ assert not current_actor_context.consume(UpgradeInitramfsTasks)
+ assert report_fields['title'] == _REPORT_TITLE_UNSUITABLE
+ assert 'LUKS1 partitions have been discovered' in report_fields['summary']
+ assert luks_dump.device_name in report_fields['summary']
-def test_actor_with_luks_ceph_only(current_actor_context):
- with_luks = [LsblkEntry(name='luks-132', kname='kname1', maj_min='253:0', rm='0', size='10G', bsize=10*(1 << 39),
- ro='0', tp='crypt', mountpoint='', parent_name='', parent_path='')]
- ceph_volume = ['luks-132']
- current_actor_context.feed(StorageInfo(lsblk=with_luks))
- current_actor_context.feed(CephInfo(encrypted_volumes=ceph_volume))
+
+def test_actor_with_luks2_notpm(monkeypatch, current_actor_context):
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8')
+ luks_dump = LuksDump(
+ version=2,
+ uuid='27b57c75-9adf-4744-ab04-9eb99726a301',
+ device_path='/dev/sda',
+ device_name='sda')
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
+ current_actor_context.feed(CephInfo(encrypted_volumes=[]))
+ current_actor_context.run()
+ assert current_actor_context.consume(Report)
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks)
+ assert not current_actor_context.consume(UpgradeInitramfsTasks)
+
+ assert report_fields['title'] == _REPORT_TITLE_UNSUITABLE
+ assert 'LUKS2 devices without Clevis TPM2 token' in report_fields['summary']
+ assert luks_dump.device_name in report_fields['summary']
+
+
+def test_actor_with_luks2_invalid_token(monkeypatch, current_actor_context):
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8')
+ luks_dump = LuksDump(
+ version=2,
+ uuid='dc1dbe37-6644-4094-9839-8fc5dcbec0c6',
+ device_path='/dev/sda',
+ device_name='sda',
+ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis')])
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
+ current_actor_context.feed(CephInfo(encrypted_volumes=[]))
+ current_actor_context.run()
+ assert current_actor_context.consume(Report)
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+
+ assert report_fields['title'] == _REPORT_TITLE_UNSUITABLE
+ assert 'LUKS2 devices without Clevis TPM2 token' in report_fields['summary']
+ assert luks_dump.device_name in report_fields['summary']
+ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks)
+ assert not current_actor_context.consume(UpgradeInitramfsTasks)
+
+
+def test_actor_with_luks2_clevis_tpm_token(monkeypatch, current_actor_context):
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8')
+ luks_dump = LuksDump(
+ version=2,
+ uuid='83050bd9-61c6-4ff0-846f-bfd3ac9bfc67',
+ device_path='/dev/sda',
+ device_name='sda',
+ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis-tpm2')])
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
+ current_actor_context.feed(CephInfo(encrypted_volumes=[]))
current_actor_context.run()
assert not current_actor_context.consume(Report)
+ upgrade_tasks = current_actor_context.consume(TargetUserSpaceUpgradeTasks)
+ assert len(upgrade_tasks) == 1
+ assert set(upgrade_tasks[0].install_rpms) == set([
+ 'clevis',
+ 'clevis-dracut',
+ 'clevis-systemd',
+ 'clevis-udisks2',
+ 'clevis-luks',
+ 'cryptsetup',
+ 'tpm2-tss',
+ 'tpm2-tools',
+ 'tpm2-abrmd'
+ ])
+ assert current_actor_context.consume(UpgradeInitramfsTasks)
-def test_actor_without_luks(current_actor_context):
- without_luks = [LsblkEntry(name='sda1', kname='sda1', maj_min='8:0', rm='0', size='10G', bsize=10*(1 << 39),
- ro='0', tp='part', mountpoint='/boot', parent_name='', parent_path='')]
- current_actor_context.feed(StorageInfo(lsblk=without_luks))
+def test_actor_with_luks2_ceph(monkeypatch, current_actor_context):
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '8')
+ ceph_volume = ['sda']
+ current_actor_context.feed(CephInfo(encrypted_volumes=ceph_volume))
+ luks_dump = LuksDump(
+ version=2,
+ uuid='0edb8c11-1a04-4abd-a12d-93433ee7b8d8',
+ device_path='/dev/sda',
+ device_name='sda',
+ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis')])
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
current_actor_context.run()
assert not current_actor_context.consume(Report)
+
+ # make sure we don't needlessly include clevis packages, when there is no clevis token
+ assert not current_actor_context.consume(TargetUserSpaceUpgradeTasks)
+
+
+LSBLK_ENTRY = LsblkEntry(
+ name="luks-whatever",
+ kname="dm-0",
+ maj_min="252:1",
+ rm="0",
+ size="1G",
+ bsize=1073741824,
+ ro="0",
+ tp="crypt",
+ mountpoint="/",
+ parent_name="",
+ parent_path=""
+)
+
+
+def test_inhibitor_on_el7(monkeypatch, current_actor_context):
+ # NOTE(pstodulk): consider it good enough as el7 stuff is going to be removed
+ # soon.
+ monkeypatch.setattr(version, 'get_source_major_version', lambda: '7')
+
+ luks_dump = LuksDump(
+ version=2,
+ uuid='83050bd9-61c6-4ff0-846f-bfd3ac9bfc67',
+ device_path='/dev/sda',
+ device_name='sda',
+ tokens=[LuksToken(token_id=0, keyslot=1, token_type='clevis-tpm2')])
+ current_actor_context.feed(LuksDumps(dumps=[luks_dump]))
+ current_actor_context.feed(CephInfo(encrypted_volumes=[]))
+
+ current_actor_context.feed(StorageInfo(lsblk=[LSBLK_ENTRY]))
+ current_actor_context.run()
+ assert current_actor_context.consume(Report)
+
+ report_fields = current_actor_context.consume(Report)[0].report
+ assert is_inhibitor(report_fields)
+ assert report_fields['title'] == 'LUKS encrypted partition detected'
--
2.47.0

View File

@ -0,0 +1,57 @@
From 8e5fe75e4ee76eb62eb51001c28f1f1443f0a563 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 18 Oct 2024 07:13:42 +0200
Subject: [PATCH 21/40] Rename inhibitwhenluks actor to checkluks
The actor nowadays does more then just inhibiting the upgrade when
LUKS is detected. Let's rename it to respect current behaviour.
---
.../common/actors/{inhibitwhenluks => checkluks}/actor.py | 6 +++---
.../inhibitwhenluks.py => checkluks/libraries/checkluks.py} | 0
.../tests/test_checkluks.py} | 0
3 files changed, 3 insertions(+), 3 deletions(-)
rename repos/system_upgrade/common/actors/{inhibitwhenluks => checkluks}/actor.py (85%)
rename repos/system_upgrade/common/actors/{inhibitwhenluks/libraries/inhibitwhenluks.py => checkluks/libraries/checkluks.py} (100%)
rename repos/system_upgrade/common/actors/{inhibitwhenluks/tests/test_inhibitwhenluks.py => checkluks/tests/test_checkluks.py} (100%)
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py b/repos/system_upgrade/common/actors/checkluks/actor.py
similarity index 85%
rename from repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
rename to repos/system_upgrade/common/actors/checkluks/actor.py
index 65607167..607fd040 100644
--- a/repos/system_upgrade/common/actors/inhibitwhenluks/actor.py
+++ b/repos/system_upgrade/common/actors/checkluks/actor.py
@@ -1,11 +1,11 @@
from leapp.actors import Actor
-from leapp.libraries.actor.inhibitwhenluks import check_invalid_luks_devices
+from leapp.libraries.actor.checkluks import check_invalid_luks_devices
from leapp.models import CephInfo, LuksDumps, StorageInfo, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
from leapp.reporting import Report
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
-class InhibitWhenLuks(Actor):
+class CheckLuks(Actor):
"""
Check if any encrypted partitions are in use and whether they are supported for the upgrade.
@@ -15,7 +15,7 @@ class InhibitWhenLuks(Actor):
during the process).
"""
- name = 'check_luks_and_inhibit'
+ name = 'check_luks'
consumes = (CephInfo, LuksDumps, StorageInfo)
produces = (Report, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks)
tags = (ChecksPhaseTag, IPUWorkflowTag)
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py b/repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
similarity index 100%
rename from repos/system_upgrade/common/actors/inhibitwhenluks/libraries/inhibitwhenluks.py
rename to repos/system_upgrade/common/actors/checkluks/libraries/checkluks.py
diff --git a/repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py b/repos/system_upgrade/common/actors/checkluks/tests/test_checkluks.py
similarity index 100%
rename from repos/system_upgrade/common/actors/inhibitwhenluks/tests/test_inhibitwhenluks.py
rename to repos/system_upgrade/common/actors/checkluks/tests/test_checkluks.py
--
2.47.0

View File

@ -0,0 +1,172 @@
From 5e6d176ab685f2e85ac1aea9533b04d46f25e9b7 Mon Sep 17 00:00:00 2001
From: tomasfratrik <tomasfratrik8@gmail.com>
Date: Tue, 18 Jun 2024 10:22:35 +0200
Subject: [PATCH 22/40] Fix IPU being blocked by resource limitations
First resource limit is maximum number of open file descriptors limit,
second one being limit for maximum writable file size. Plus add unit
tests.
Resolves: RHEL-26459 and RHEL-16881
---
commands/command_utils.py | 38 ++++++++++++++++++
commands/preupgrade/__init__.py | 2 +
commands/tests/test_upgrade_paths.py | 60 ++++++++++++++++++++++++++++
commands/upgrade/__init__.py | 3 ++
4 files changed, 103 insertions(+)
diff --git a/commands/command_utils.py b/commands/command_utils.py
index 4f6f99eb..2810a542 100644
--- a/commands/command_utils.py
+++ b/commands/command_utils.py
@@ -1,6 +1,7 @@
import json
import os
import re
+import resource
from leapp.exceptions import CommandError
from leapp.utils import path
@@ -140,3 +141,40 @@ def vet_upgrade_path(args):
flavor=flavor,
choices=','.join(supported_target_versions)))
return (target_release, flavor)
+
+
+def set_resource_limits():
+ """
+ Set resource limits for the maximum number of open file descriptors and the maximum writable file size.
+
+ :raises: `CommandError` if the resource limits cannot be set
+ """
+
+ def set_resource_limit(resource_type, soft, hard):
+ rtype_string = (
+ 'open file descriptors' if resource_type == resource.RLIMIT_NOFILE
+ else 'writable file size' if resource_type == resource.RLIMIT_FSIZE
+ else 'unknown resource'
+ )
+ try:
+ resource.setrlimit(resource_type, (soft, hard))
+ except ValueError as err:
+ raise CommandError(
+ 'Failure occurred while attempting to set soft limit higher than the hard limit. '
+ 'Resource type: {}, error: {}'.format(rtype_string, err)
+ )
+ except OSError as err:
+ raise CommandError(
+ 'Failed to set resource limit. Resource type: {}, error: {}'.format(rtype_string, err)
+ )
+
+ soft_nofile, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
+ soft_fsize, _ = resource.getrlimit(resource.RLIMIT_FSIZE)
+ nofile_limit = 1024*16
+ fsize_limit = resource.RLIM_INFINITY
+
+ if soft_nofile < nofile_limit:
+ set_resource_limit(resource.RLIMIT_NOFILE, nofile_limit, nofile_limit)
+
+ if soft_fsize != fsize_limit:
+ set_resource_limit(resource.RLIMIT_FSIZE, fsize_limit, fsize_limit)
diff --git a/commands/preupgrade/__init__.py b/commands/preupgrade/__init__.py
index 5a89069f..a9fa40e0 100644
--- a/commands/preupgrade/__init__.py
+++ b/commands/preupgrade/__init__.py
@@ -59,6 +59,8 @@ def preupgrade(args, breadcrumbs):
except LeappError as exc:
raise CommandError(exc.message)
+ command_utils.set_resource_limits()
+
workflow = repositories.lookup_workflow('IPUWorkflow')()
util.warn_if_unsupported(configuration)
util.process_whitelist_experimental(repositories, workflow, configuration, logger)
diff --git a/commands/tests/test_upgrade_paths.py b/commands/tests/test_upgrade_paths.py
index 53f081a5..f1312f66 100644
--- a/commands/tests/test_upgrade_paths.py
+++ b/commands/tests/test_upgrade_paths.py
@@ -1,3 +1,5 @@
+import resource
+
import mock
import pytest
@@ -50,3 +52,61 @@ def test_vet_upgrade_path(mock_open, monkeypatch):
monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.0')
args = mock.Mock(target='1.2')
assert command_utils.vet_upgrade_path(args) == ('9.0', 'default')
+
+
+def _mock_getrlimit_factory(nofile_limits=(1024, 4096), fsize_limits=(1024, 4096)):
+ """
+ Factory function to create a mock `getrlimit` function with configurable return values.
+ The default param values are lower than the expected values.
+
+ :param nofile_limits: Tuple representing (soft, hard) limits for `RLIMIT_NOFILE`
+ :param fsize_limits: Tuple representing (soft, hard) limits for `RLIMIT_FSIZE`
+ :return: A mock `getrlimit` function
+ """
+ def mock_getrlimit(resource_type):
+ if resource_type == resource.RLIMIT_NOFILE:
+ return nofile_limits
+ if resource_type == resource.RLIMIT_FSIZE:
+ return fsize_limits
+ return (0, 0)
+
+ return mock_getrlimit
+
+
+@pytest.mark.parametrize("nofile_limits, fsize_limits, expected_calls", [
+ # Case where both limits need to be increased
+ ((1024, 4096), (1024, 4096), [
+ (resource.RLIMIT_NOFILE, (1024*16, 1024*16)),
+ (resource.RLIMIT_FSIZE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
+ ]),
+ # Case where neither limit needs to be changed
+ ((1024*16, 1024*16), (resource.RLIM_INFINITY, resource.RLIM_INFINITY), [])
+])
+def test_set_resource_limits_increase(monkeypatch, nofile_limits, fsize_limits, expected_calls):
+ setrlimit_called = []
+
+ def mock_setrlimit(resource_type, limits):
+ setrlimit_called.append((resource_type, limits))
+
+ monkeypatch.setattr(resource, "getrlimit", _mock_getrlimit_factory(nofile_limits, fsize_limits))
+ monkeypatch.setattr(resource, "setrlimit", mock_setrlimit)
+
+ command_utils.set_resource_limits()
+
+ assert setrlimit_called == expected_calls
+
+
+@pytest.mark.parametrize("errortype, expected_message", [
+ (OSError, "Failed to set resource limit"),
+ (ValueError, "Failure occurred while attempting to set soft limit higher than the hard limit")
+])
+def test_set_resource_limits_exceptions(monkeypatch, errortype, expected_message):
+ monkeypatch.setattr(resource, "getrlimit", _mock_getrlimit_factory())
+
+ def mock_setrlimit(*args, **kwargs):
+ raise errortype("mocked error")
+
+ monkeypatch.setattr(resource, "setrlimit", mock_setrlimit)
+
+ with pytest.raises(CommandError, match=expected_message):
+ command_utils.set_resource_limits()
diff --git a/commands/upgrade/__init__.py b/commands/upgrade/__init__.py
index 1e15b59c..c7487fde 100644
--- a/commands/upgrade/__init__.py
+++ b/commands/upgrade/__init__.py
@@ -89,6 +89,9 @@ def upgrade(args, breadcrumbs):
repositories = util.load_repositories()
except LeappError as exc:
raise CommandError(exc.message)
+
+ command_utils.set_resource_limits()
+
workflow = repositories.lookup_workflow('IPUWorkflow')(auto_reboot=args.reboot)
util.process_whitelist_experimental(repositories, workflow, configuration, logger)
util.warn_if_unsupported(configuration)
--
2.47.0

View File

@ -0,0 +1,675 @@
From e1bdf2c02dd193cdd7a2da95e2a3cfa5e6e1e8b3 Mon Sep 17 00:00:00 2001
From: mhecko <mhecko@redhat.com>
Date: Mon, 29 Apr 2024 11:16:46 +0200
Subject: [PATCH 23/40] feature: add possibility to use net.naming-scheme
Leapp writes .link files to prevent interfaces being renamed
after booting to post-upgrade system. This patch adds a less
error-prone approach that uses net.naming-scheme kernel param.
The naming-scheme tells udev what hardware properties to use
when composing a device name. Moreover, possible values of this
parameter are coarse-grained "profiles", that tell udev to
behave as if it did on RHEL8.0.
The functionality is enabled by setting LEAPP_USE_NET_NAMING_SCHEME
environmental variable to 1. If the feature is enabled, the .link
file generation is disabled. A kernel parameter `net.naming-scheme=`
is added to the upgrade boot entry and the post-upgrade entry.
The value of the parameter will be `rhel-<source_major>.0`. Note
that the minor source version is *not used*. Using also source major
version instead of 0 causes the device names to change slightly,
so we use 0. Moreover, an extra RPM named `rhel-net-naming-sysattrs`
is installed to the target system and target userspace container.
The RPM provides definitions of the "profiles" for net.naming-scheme.
The feature is available only for 8>9 and higher. Attempting to
upgrade 7>8 with LEAPP_USE_NET_NAMING_SCHEME=1 will ignore
the value of LEAPP_USE_NET_NAMING_SCHEME.
Add a possibility to use the net.naming-scheme cmdline argument
to make immutable network interface names during the upgrade.
The feature can be used only for 8>9 upgrades and higher.
To enable the feature, use LEAPP_USE_NET_NAMING_SCHEME=1.
Jira-ref: RHEL-23473
---
.../actors/addupgradebootentry/actor.py | 10 +-
.../libraries/addupgradebootentry.py | 78 ++++++++++-----
.../tests/unit_test_addupgradebootentry.py | 47 ++++-----
.../actors/kernelcmdlineconfig/actor.py | 16 +++-
.../libraries/kernelcmdlineconfig.py | 12 ++-
.../libraries/persistentnetnamesconfig.py | 5 +-
.../common/models/kernelcmdlineargs.py | 21 ++++
.../actors/emit_net_naming_scheme/actor.py | 28 ++++++
.../libraries/emit_net_naming.py | 63 ++++++++++++
.../tests/test_emit_net_naming_scheme.py | 95 +++++++++++++++++++
10 files changed, 318 insertions(+), 57 deletions(-)
create mode 100644 repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py
create mode 100644 repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
create mode 100644 repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/actor.py b/repos/system_upgrade/common/actors/addupgradebootentry/actor.py
index f400ebf8..e4ecf39e 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/actor.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/actor.py
@@ -8,11 +8,13 @@ from leapp.models import (
FirmwareFacts,
GrubConfigError,
KernelCmdline,
+ LateTargetKernelCmdlineArgTasks,
LiveImagePreparationInfo,
LiveModeArtifacts,
LiveModeConfig,
TargetKernelCmdlineArgTasks,
- TransactionDryRun
+ TransactionDryRun,
+ UpgradeKernelCmdlineArgTasks
)
from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
@@ -33,9 +35,11 @@ class AddUpgradeBootEntry(Actor):
LiveModeArtifacts,
LiveModeConfig,
KernelCmdline,
- TransactionDryRun
+ TransactionDryRun,
+ TargetKernelCmdlineArgTasks,
+ UpgradeKernelCmdlineArgTasks
)
- produces = (TargetKernelCmdlineArgTasks,)
+ produces = (LateTargetKernelCmdlineArgTasks,)
tags = (IPUWorkflowTag, InterimPreparationPhaseTag)
def process(self):
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
index 553ffc35..b236e39b 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/libraries/addupgradebootentry.py
@@ -9,14 +9,16 @@ from leapp.models import (
BootContent,
KernelCmdline,
KernelCmdlineArg,
+ LateTargetKernelCmdlineArgTasks,
LiveImagePreparationInfo,
LiveModeArtifacts,
LiveModeConfig,
- TargetKernelCmdlineArgTasks
+ TargetKernelCmdlineArgTasks,
+ UpgradeKernelCmdlineArgTasks
)
-def collect_boot_args(livemode_enabled):
+def collect_upgrade_kernel_args(livemode_enabled):
args = {
'enforcing': '0',
'rd.plymouth': '0',
@@ -34,7 +36,10 @@ def collect_boot_args(livemode_enabled):
livemode_args = construct_cmdline_args_for_livemode()
args.update(livemode_args)
- return args
+ upgrade_kernel_args = collect_set_of_kernel_args_from_msgs(UpgradeKernelCmdlineArgTasks, 'to_add')
+ args.update(upgrade_kernel_args)
+
+ return set(args.items())
def collect_undesired_args(livemode_enabled):
@@ -43,11 +48,11 @@ def collect_undesired_args(livemode_enabled):
args = dict(zip(('ro', 'rhgb', 'quiet'), itertools.repeat(None)))
args['rd.lvm.lv'] = _get_rdlvm_arg_values()
- return args
+ return set(args.items())
-def format_grubby_args_from_args_dict(args_dict):
- """ Format the given args dictionary in a form required by grubby's --args. """
+def format_grubby_args_from_args_set(args_dict):
+ """ Format the given args set in a form required by grubby's --args. """
def fmt_single_arg(arg_pair):
key, value = arg_pair
@@ -65,7 +70,7 @@ def format_grubby_args_from_args_dict(args_dict):
else:
yield (key, value) # Just a single (key, value) pair
- arg_sequence = itertools.chain(*(flatten_arguments(arg_pair) for arg_pair in args_dict.items()))
+ arg_sequence = itertools.chain(*(flatten_arguments(arg_pair) for arg_pair in args_dict))
# Sorting should be fine as only values can be None, but we cannot have a (key, None) and (key, value) in
# the dictionary at the same time.
@@ -78,7 +83,7 @@ def format_grubby_args_from_args_dict(args_dict):
def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to_add, args_to_remove):
boot_entry_modification_commands = []
- args_to_add_str = format_grubby_args_from_args_dict(args_to_add)
+ args_to_add_str = format_grubby_args_from_args_set(args_to_add)
create_entry_cmd = [
'/usr/sbin/grubby',
@@ -93,7 +98,7 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to
# We need to update root= param separately, since we cannot do it during --add-kernel with --copy-default.
# This is likely a bug in grubby.
- root_param_value = args_to_add.get('root', None)
+ root_param_value = dict(args_to_add).get('root', None)
if root_param_value:
enforce_root_param_for_the_entry_cmd = [
'/usr/sbin/grubby',
@@ -103,7 +108,7 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to
boot_entry_modification_commands.append(enforce_root_param_for_the_entry_cmd)
if args_to_remove:
- args_to_remove_str = format_grubby_args_from_args_dict(args_to_remove)
+ args_to_remove_str = format_grubby_args_from_args_set(args_to_remove)
remove_undesired_args_cmd = [
'/usr/sbin/grubby',
'--update-kernel', kernel_path,
@@ -113,18 +118,55 @@ def figure_out_commands_needed_to_add_entry(kernel_path, initramfs_path, args_to
return boot_entry_modification_commands
+def collect_set_of_kernel_args_from_msgs(msg_type, arg_list_field_name):
+ cmdline_modification_msgs = api.consume(msg_type)
+ lists_of_args_to_add = (getattr(msg, arg_list_field_name, []) for msg in cmdline_modification_msgs)
+ args = itertools.chain(*lists_of_args_to_add)
+ return set((arg.key, arg.value) for arg in args)
+
+
+def emit_removal_of_args_meant_only_for_upgrade_kernel(added_upgrade_kernel_args):
+ """
+ Emit message requesting removal of upgrade kernel args that should not be on the target kernel.
+
+ Target kernel args are created by copying the args of the booted (upgrade) kernel. Therefore,
+ we need to explicitly modify the target kernel cmdline, removing what should not have been copied.
+ """
+ target_args_to_add = collect_set_of_kernel_args_from_msgs(TargetKernelCmdlineArgTasks, 'to_add')
+ actual_kernel_args = collect_set_of_kernel_args_from_msgs(KernelCmdline, 'parameters')
+
+ # actual_kernel_args should not be changed during upgrade, unless explicitly removed by
+ # TargetKernelCmdlineArgTasks.to_remove, but that is handled by some other upgrade component. We just want
+ # to make sure we remove what was not on the source system and that we don't overwrite args to be added to target.
+ args_not_present_on_target_kernel = added_upgrade_kernel_args - actual_kernel_args - target_args_to_add
+
+ # We remove only what we've added and what will not be already removed by someone else.
+ args_to_remove = [KernelCmdlineArg(key=arg[0], value=arg[1]) for arg in args_not_present_on_target_kernel]
+
+ if args_to_remove:
+ msg = ('Following upgrade kernel args were added, but they should not be present '
+ 'on target cmdline: `%s`, requesting removal.')
+ api.current_logger().info(msg, args_not_present_on_target_kernel)
+ args_sorted = sorted(args_to_remove, key=lambda arg: arg.key)
+ api.produce(LateTargetKernelCmdlineArgTasks(to_remove=args_sorted))
+
+
def add_boot_entry(configs=None):
kernel_dst_path, initram_dst_path = get_boot_file_paths()
+
_remove_old_upgrade_boot_entry(kernel_dst_path, configs=configs)
livemode_enabled = next(api.consume(LiveImagePreparationInfo), None) is not None
- cmdline_args = collect_boot_args(livemode_enabled)
+ # We have to keep the desired and unwanted args separate and modify cmline in two separate grubby calls. Merging
+ # these sets and trying to execute only a single command would leave the unwanted cmdline args present if they
+ # are present on the original system.
+ added_cmdline_args = collect_upgrade_kernel_args(livemode_enabled)
undesired_cmdline_args = collect_undesired_args(livemode_enabled)
commands_to_run = figure_out_commands_needed_to_add_entry(kernel_dst_path,
initram_dst_path,
- args_to_add=cmdline_args,
+ args_to_add=added_cmdline_args,
args_to_remove=undesired_cmdline_args)
def run_commands_adding_entry(extra_command_suffix=None):
@@ -146,16 +188,8 @@ def add_boot_entry(configs=None):
# See https://bugzilla.redhat.com/show_bug.cgi?id=1764306
run(['/usr/sbin/zipl'])
- if 'debug' in cmdline_args:
- # The kernelopts for target kernel are generated based on the cmdline used in the upgrade initramfs,
- # therefore, if we enabled debug above, and the original system did not have the debug kernelopt, we
- # need to explicitly remove it from the target os boot entry.
- # NOTE(mhecko): This will also unconditionally remove debug kernelopt if the source system used it.
- api.produce(TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]))
-
- # NOTE(mmatuska): This will remove the option even if the source system had it set.
- # However enforcing=0 shouldn't be set persistently anyway.
- api.produce(TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')]))
+ effective_upgrade_kernel_args = added_cmdline_args - undesired_cmdline_args
+ emit_removal_of_args_meant_only_for_upgrade_kernel(effective_upgrade_kernel_args)
except CalledProcessError as e:
raise StopActorExecutionError(
diff --git a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
index c4f5232b..2f58ba9e 100644
--- a/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
+++ b/repos/system_upgrade/common/actors/addupgradebootentry/tests/unit_test_addupgradebootentry.py
@@ -12,6 +12,7 @@ from leapp.models import (
BootContent,
KernelCmdline,
KernelCmdlineArg,
+ LateTargetKernelCmdlineArgTasks,
LiveModeArtifacts,
LiveModeConfig,
TargetKernelCmdlineArgTasks
@@ -82,8 +83,10 @@ def test_add_boot_entry(monkeypatch, run_args, arch):
assert addupgradebootentry.run.args[0] == run_args.args_remove
assert addupgradebootentry.run.args[1] == run_args.args_add
assert api.produce.model_instances == [
- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]),
- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')])
+ LateTargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug'),
+ KernelCmdlineArg(key='enforcing', value='0'),
+ KernelCmdlineArg(key='plymouth.enable', value='0'),
+ KernelCmdlineArg(key='rd.plymouth', value='0')])
]
if run_args.args_zipl:
@@ -103,16 +106,16 @@ def test_debug_kernelopt_removal_task_production(monkeypatch, is_leapp_invoked_w
CurrentActorMocked(envars={'LEAPP_DEBUG': str(int(is_leapp_invoked_with_debug))}))
addupgradebootentry.add_boot_entry()
+ assert len(api.produce.model_instances) == 1
- expected_produced_messages = []
- if is_leapp_invoked_with_debug:
- expected_produced_messages = [TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')])]
-
- expected_produced_messages.append(
- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')])
- )
+ produced_msg = api.produce.model_instances[0]
+ assert isinstance(produced_msg, LateTargetKernelCmdlineArgTasks)
- assert api.produce.model_instances == expected_produced_messages
+ debug_kernel_cmline_arg = KernelCmdlineArg(key='debug')
+ if is_leapp_invoked_with_debug:
+ assert debug_kernel_cmline_arg in produced_msg.to_remove
+ else:
+ assert debug_kernel_cmline_arg not in produced_msg.to_remove
def test_add_boot_entry_configs(monkeypatch):
@@ -132,8 +135,10 @@ def test_add_boot_entry_configs(monkeypatch):
assert addupgradebootentry.run.args[2] == run_args_add + ['-c', CONFIGS[0]]
assert addupgradebootentry.run.args[3] == run_args_add + ['-c', CONFIGS[1]]
assert api.produce.model_instances == [
- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]),
- TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')]),
+ LateTargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug'),
+ KernelCmdlineArg(key='enforcing', value='0'),
+ KernelCmdlineArg(key='plymouth.enable', value='0'),
+ KernelCmdlineArg(key='rd.plymouth', value='0')])
]
@@ -183,7 +188,7 @@ def test_fix_grub_config_error(monkeypatch, error_type, test_file_name):
(False, False),
)
)
-def test_collect_boot_args(monkeypatch, is_debug_enabled, network_enablement_type):
+def test_collect_upgrade_kernel_args(monkeypatch, is_debug_enabled, network_enablement_type):
env_vars = {'LEAPP_DEBUG': str(int(is_debug_enabled))}
if network_enablement_type:
env_vars['LEAPP_DEVEL_INITRAM_NETWORK'] = network_enablement_type
@@ -192,7 +197,8 @@ def test_collect_boot_args(monkeypatch, is_debug_enabled, network_enablement_typ
monkeypatch.setattr(addupgradebootentry, 'construct_cmdline_args_for_livemode',
lambda *args: {'livemodearg': 'value'})
- args = addupgradebootentry.collect_boot_args(livemode_enabled=True)
+ arg_set = addupgradebootentry.collect_upgrade_kernel_args(livemode_enabled=True)
+ args = dict(arg_set)
assert args['enforcing'] == '0'
assert args['rd.plymouth'] == '0'
@@ -320,16 +326,3 @@ def test_get_device_uuid(monkeypatch):
uuid = addupgradebootentry._get_device_uuid(path)
assert uuid == 'MY_UUID1'
-
-
-@pytest.mark.parametrize(
- ('args', 'expected_result'),
- (
- ([('argA', 'val'), ('argB', 'valB'), ('argC', None), ], 'argA=val argB=valB argC'),
- ([('argA', ('val1', 'val2'))], 'argA=val1 argA=val2')
- )
-)
-def test_format_grubby_args_from_args_dict(args, expected_result):
- actual_result = addupgradebootentry.format_grubby_args_from_args_dict(dict(args))
-
- assert actual_result == expected_result
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
index 3585a14e..6d5f39dd 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/actor.py
@@ -3,7 +3,13 @@ import os
from leapp.actors import Actor
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import kernelcmdlineconfig
-from leapp.models import FirmwareFacts, InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks
+from leapp.models import (
+ FirmwareFacts,
+ InstalledTargetKernelInfo,
+ KernelCmdlineArg,
+ LateTargetKernelCmdlineArgTasks,
+ TargetKernelCmdlineArgTasks
+)
from leapp.reporting import Report
from leapp.tags import FinalizationPhaseTag, IPUWorkflowTag
@@ -14,7 +20,13 @@ class KernelCmdlineConfig(Actor):
"""
name = 'kernelcmdlineconfig'
- consumes = (KernelCmdlineArg, InstalledTargetKernelInfo, FirmwareFacts, TargetKernelCmdlineArgTasks)
+ consumes = (
+ KernelCmdlineArg,
+ InstalledTargetKernelInfo,
+ FirmwareFacts,
+ LateTargetKernelCmdlineArgTasks,
+ TargetKernelCmdlineArgTasks
+ )
produces = (Report,)
tags = (FinalizationPhaseTag, IPUWorkflowTag)
diff --git a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
index 19c50f3c..98b8b95b 100644
--- a/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
+++ b/repos/system_upgrade/common/actors/kernelcmdlineconfig/libraries/kernelcmdlineconfig.py
@@ -1,3 +1,4 @@
+import itertools
import re
from leapp import reporting
@@ -5,7 +6,12 @@ from leapp.exceptions import StopActorExecutionError
from leapp.libraries import stdlib
from leapp.libraries.common.config import architecture, version
from leapp.libraries.stdlib import api
-from leapp.models import InstalledTargetKernelInfo, KernelCmdlineArg, TargetKernelCmdlineArgTasks
+from leapp.models import (
+ InstalledTargetKernelInfo,
+ KernelCmdlineArg,
+ LateTargetKernelCmdlineArgTasks,
+ TargetKernelCmdlineArgTasks
+)
KERNEL_CMDLINE_FILE = "/etc/kernel/cmdline"
@@ -71,7 +77,9 @@ def retrieve_arguments_to_modify():
kernelargs_msgs_to_add = list(api.consume(KernelCmdlineArg))
kernelargs_msgs_to_remove = []
- for target_kernel_arg_task in api.consume(TargetKernelCmdlineArgTasks):
+ modification_msgs = itertools.chain(api.consume(TargetKernelCmdlineArgTasks),
+ api.consume(LateTargetKernelCmdlineArgTasks))
+ for target_kernel_arg_task in modification_msgs:
kernelargs_msgs_to_add.extend(target_kernel_arg_task.to_add)
kernelargs_msgs_to_remove.extend(target_kernel_arg_task.to_remove)
diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
index dc5196ea..2f12742a 100644
--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
+++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
@@ -2,7 +2,7 @@ import errno
import os
import re
-from leapp.libraries.common.config import get_env
+from leapp.libraries.common.config import get_env, version
from leapp.libraries.stdlib import api
from leapp.models import (
InitrdIncludes,
@@ -39,6 +39,9 @@ def generate_link_file(interface):
@suppress_deprecation(InitrdIncludes)
def process():
+ if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() != '8':
+ api.current_logger().info('Skipping generation of .link files renaming NICs as LEAPP_USE_NET_NAMING_SCHEMES=1')
+ return
if get_env('LEAPP_NO_NETWORK_RENAMING', '0') == '1':
api.current_logger().info(
diff --git a/repos/system_upgrade/common/models/kernelcmdlineargs.py b/repos/system_upgrade/common/models/kernelcmdlineargs.py
index e3568a0a..fafd2853 100644
--- a/repos/system_upgrade/common/models/kernelcmdlineargs.py
+++ b/repos/system_upgrade/common/models/kernelcmdlineargs.py
@@ -24,6 +24,27 @@ class TargetKernelCmdlineArgTasks(Model):
to_remove = fields.List(fields.Model(KernelCmdlineArg), default=[])
+class LateTargetKernelCmdlineArgTasks(Model):
+ """
+ Desired modifications of the target kernel args produced later in the upgrade process.
+
+ Defined to prevent loops in the actor dependency graph.
+ """
+ topic = SystemInfoTopic
+
+ to_add = fields.List(fields.Model(KernelCmdlineArg), default=[])
+ to_remove = fields.List(fields.Model(KernelCmdlineArg), default=[])
+
+
+class UpgradeKernelCmdlineArgTasks(Model):
+ """
+ Modifications of the upgrade kernel cmdline.
+ """
+ topic = SystemInfoTopic
+
+ to_add = fields.List(fields.Model(KernelCmdlineArg), default=[])
+
+
class KernelCmdline(Model):
"""
Kernel command line parameters the system was booted with
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py
new file mode 100644
index 00000000..769fe20b
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/actor.py
@@ -0,0 +1,28 @@
+from leapp.actors import Actor
+from leapp.libraries.actor import emit_net_naming as emit_net_naming_lib
+from leapp.models import (
+ KernelCmdline,
+ RpmTransactionTasks,
+ TargetKernelCmdlineArgTasks,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeKernelCmdlineArgTasks
+)
+from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
+
+
+class EmitNetNamingScheme(Actor):
+ """
+ Emit necessary modifications of the upgrade environment and target command line to use net.naming-scheme.
+ """
+ name = 'emit_net_naming_scheme'
+ consumes = (KernelCmdline,)
+ produces = (
+ RpmTransactionTasks,
+ TargetKernelCmdlineArgTasks,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeKernelCmdlineArgTasks,
+ )
+ tags = (ChecksPhaseTag, IPUWorkflowTag)
+
+ def process(self):
+ emit_net_naming_lib.emit_msgs_to_use_net_naming_schemes()
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
new file mode 100644
index 00000000..65abdd4d
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
@@ -0,0 +1,63 @@
+from leapp.exceptions import StopActorExecutionError
+from leapp.libraries.common.config import get_env, version
+from leapp.libraries.stdlib import api
+from leapp.models import (
+ KernelCmdline,
+ KernelCmdlineArg,
+ RpmTransactionTasks,
+ TargetKernelCmdlineArgTasks,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeKernelCmdlineArgTasks
+)
+
+NET_NAMING_SYSATTRS_RPM_NAME = 'rhel-net-naming-sysattrs'
+
+
+def is_net_scheme_compatible_with_current_cmdline():
+ kernel_cmdline = next(api.consume(KernelCmdline), None)
+ if not kernel_cmdline:
+ # Super unlikely
+ raise StopActorExecutionError('Did not receive any KernelCmdline messages.')
+
+ allows_predictable_names = True
+ already_has_a_net_naming_scheme = False
+ for param in kernel_cmdline.parameters:
+ if param.key == 'net.ifnames':
+ if param.value == '0':
+ allows_predictable_names = False
+ elif param.value == '1':
+ allows_predictable_names = True
+ if param.key == 'net.naming-scheme':
+ # We assume that the kernel cmdline does not contain invalid entries, namely,
+ # that the net.naming-scheme refers to a valid scheme.
+ already_has_a_net_naming_scheme = True
+
+ is_compatible = allows_predictable_names and not already_has_a_net_naming_scheme
+
+ msg = ('Should net.naming-scheme be added to kernel cmdline: %s. '
+ 'Reason: allows_predictable_names=%s, already_has_a_net_naming_scheme=%s')
+ api.current_logger().info(msg, 'yes' if is_compatible else 'no',
+ allows_predictable_names,
+ already_has_a_net_naming_scheme)
+
+ return is_compatible
+
+
+def emit_msgs_to_use_net_naming_schemes():
+ if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') != '1' and version.get_target_major_version() != '8':
+ return
+
+ # The package should be installed regardless of whether we will modify the cmdline -
+ # if the cmdline already contains net.naming-scheme, then the package will be useful
+ # in both, the upgrade environment and on the target system.
+ pkgs_to_install = [NET_NAMING_SYSATTRS_RPM_NAME]
+ api.produce(TargetUserSpaceUpgradeTasks(install_rpms=pkgs_to_install))
+ api.produce(RpmTransactionTasks(to_install=pkgs_to_install))
+
+ if not is_net_scheme_compatible_with_current_cmdline():
+ return
+
+ naming_scheme = 'rhel-{0}.0'.format(version.get_source_major_version())
+ cmdline_args = [KernelCmdlineArg(key='net.naming-scheme', value=naming_scheme)]
+ api.produce(UpgradeKernelCmdlineArgTasks(to_add=cmdline_args))
+ api.produce(TargetKernelCmdlineArgTasks(to_add=cmdline_args))
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
new file mode 100644
index 00000000..7a5eeba5
--- /dev/null
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
@@ -0,0 +1,95 @@
+import pytest
+
+from leapp.libraries.actor import emit_net_naming as emit_net_naming_lib
+from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
+from leapp.libraries.stdlib import api
+from leapp.models import (
+ KernelCmdline,
+ KernelCmdlineArg,
+ RpmTransactionTasks,
+ TargetKernelCmdlineArgTasks,
+ TargetUserSpaceUpgradeTasks,
+ UpgradeKernelCmdlineArgTasks
+)
+
+
+@pytest.mark.parametrize(
+ ('kernel_args', 'should_be_compatible'),
+ [
+ ([KernelCmdlineArg(key='net.naming-scheme', value='rhel-8.10')], False),
+ ([KernelCmdlineArg(key='net.ifnames', value='1')], True),
+ ([KernelCmdlineArg(key='net.ifnames', value='0')], False),
+ (
+ [
+ KernelCmdlineArg(key='net.naming-scheme', value='rhel-8.10'),
+ KernelCmdlineArg(key='net.ifname', value='0'),
+ KernelCmdlineArg(key='root', value='/dev/vda1')
+ ],
+ False
+ ),
+ ([KernelCmdlineArg(key='root', value='/dev/vda1')], True),
+ ]
+)
+def test_is_net_scheme_compatible_with_current_cmdline(monkeypatch, kernel_args, should_be_compatible):
+ kernel_cmdline = KernelCmdline(parameters=kernel_args)
+
+ def mocked_consume(msg_type):
+ yield {KernelCmdline: kernel_cmdline}[msg_type]
+
+ monkeypatch.setattr(api, 'consume', mocked_consume)
+
+ assert emit_net_naming_lib.is_net_scheme_compatible_with_current_cmdline() == should_be_compatible, \
+ [(arg.key, arg.value) for arg in kernel_cmdline.parameters]
+
+
+@pytest.mark.parametrize(
+ ('is_net_scheme_enabled', 'is_current_cmdline_compatible'),
+ [
+ (True, True),
+ (True, False),
+ (False, True)
+ ]
+)
+def test_emit_msgs_to_use_net_naming_schemes(monkeypatch, is_net_scheme_enabled, is_current_cmdline_compatible):
+ envvar_value = '1' if is_net_scheme_enabled else '0'
+
+ mocked_actor = CurrentActorMocked(src_ver='8.10',
+ dst_ver='9.5',
+ envars={'LEAPP_USE_NET_NAMING_SCHEMES': envvar_value})
+ monkeypatch.setattr(api, 'current_actor', mocked_actor)
+
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+ monkeypatch.setattr(emit_net_naming_lib,
+ 'is_net_scheme_compatible_with_current_cmdline',
+ lambda: is_current_cmdline_compatible)
+
+ emit_net_naming_lib.emit_msgs_to_use_net_naming_schemes()
+
+ def ensure_one_msg_of_type_produced(produced_messages, msg_type):
+ msgs = (msg for msg in produced_messages if isinstance(msg, msg_type))
+ msg = next(msgs)
+ assert not next(msgs, None), 'More than one message of type {type} produced'.format(type=type)
+ return msg
+
+ produced_messages = api.produce.model_instances
+ if is_net_scheme_enabled:
+ userspace_tasks = ensure_one_msg_of_type_produced(produced_messages, TargetUserSpaceUpgradeTasks)
+ assert userspace_tasks.install_rpms == [emit_net_naming_lib.NET_NAMING_SYSATTRS_RPM_NAME]
+
+ rpm_tasks = ensure_one_msg_of_type_produced(produced_messages, RpmTransactionTasks)
+ assert rpm_tasks.to_install == [emit_net_naming_lib.NET_NAMING_SYSATTRS_RPM_NAME]
+ else:
+ assert not api.produce.called
+ return
+
+ upgrade_cmdline_mods = (msg for msg in produced_messages if isinstance(msg, UpgradeKernelCmdlineArgTasks))
+ target_cmdline_mods = (msg for msg in produced_messages if isinstance(msg, TargetKernelCmdlineArgTasks))
+
+ if is_current_cmdline_compatible:
+ # We should emit cmdline modifications - both UpgradeKernelCmdlineArgTasks and TargetKernelCmdlineArgTasks
+ # should be produced
+ assert next(upgrade_cmdline_mods, None)
+ assert next(target_cmdline_mods, None)
+ else:
+ assert not next(upgrade_cmdline_mods, None)
+ assert not next(target_cmdline_mods, None)
--
2.47.0

View File

@ -0,0 +1,26 @@
From b4b535454b74c05682ecf0d3059decbd2c9530e0 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Wed, 6 Nov 2024 22:23:37 +0100
Subject: [PATCH 24/40] prevent the feature for being used outside 8>9
---
.../libraries/persistentnetnamesconfig.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
index 2f12742a..b2c7f5ff 100644
--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
+++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
@@ -39,7 +39,8 @@ def generate_link_file(interface):
@suppress_deprecation(InitrdIncludes)
def process():
- if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() != '8':
+ if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() == '9':
+ # We can use this only for 8>9, for now
api.current_logger().info('Skipping generation of .link files renaming NICs as LEAPP_USE_NET_NAMING_SCHEMES=1')
return
--
2.47.0

View File

@ -0,0 +1,28 @@
From e43a8922e06d72212e8e2a8b51747c668147182c Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Wed, 6 Nov 2024 22:26:01 +0100
Subject: [PATCH 25/40] fix condition on when net naming is emitted
---
.../emit_net_naming_scheme/libraries/emit_net_naming.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
index 65abdd4d..726bb459 100644
--- a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
@@ -44,7 +44,10 @@ def is_net_scheme_compatible_with_current_cmdline():
def emit_msgs_to_use_net_naming_schemes():
- if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') != '1' and version.get_target_major_version() != '8':
+ is_env_var_set = get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1'
+ is_upgrade_8to9 = version.get_target_major_version() == '9'
+ is_net_naming_enabled_and_permitted = is_env_var_set and is_upgrade_8to9
+ if not is_net_naming_enabled_and_permitted:
return
# The package should be installed regardless of whether we will modify the cmdline -
--
2.47.0

View File

@ -0,0 +1,56 @@
From 0bf07d1546ccdc6d4a9e6f4936a98b4d6ca27789 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Tue, 12 Nov 2024 09:10:50 +0100
Subject: [PATCH 26/40] scangrubdevpartitionlayout: Skip warning msgs
The fdisk output can contain warning msgs when a partition is not
aligned on physical sector boundary, like:
Partition 4 does not start on physical sector boundary.
We know that in case of MBR the line we expect to parse always
starts with canonical path. So let's skip all lines which does
not start with '/'.
jira: https://issues.redhat.com/browse/RHEL-50947
---
.../libraries/scan_layout.py | 10 ++++++++++
.../tests/test_scan_partition_layout.py | 3 +++
2 files changed, 13 insertions(+)
diff --git a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py
index 83d02656..7f4a2a59 100644
--- a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py
+++ b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/libraries/scan_layout.py
@@ -68,6 +68,16 @@ def get_partition_layout(device):
partitions = []
for partition_line in table_iter:
+ if not partition_line.startswith('/'):
+ # the output can contain warning msg when a partition is not aligned
+ # on physical sector boundary, like:
+ # ~~~
+ # Partition 4 does not start on physical sector boundary.
+ # ~~~
+ # We know that in case of MBR the line we expect to parse always
+ # starts with canonical path. So let's use this condition.
+ # See https://issues.redhat.com/browse/RHEL-50947
+ continue
# Fields: Device Boot Start End Sectors Size Id Type
# The line looks like: `/dev/vda1 * 2048 2099199 2097152 1G 83 Linux`
part_info = split_on_space_segments(partition_line)
diff --git a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py
index 743ca71f..9c32e16f 100644
--- a/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py
+++ b/repos/system_upgrade/el7toel8/actors/scangrubdevpartitionlayout/tests/test_scan_partition_layout.py
@@ -49,6 +49,9 @@ def test_get_partition_layout(monkeypatch, devices, fs):
part_line = '{0} * {1} 2099199 1048576 83 {2}'.format(part.name, part.start_offset, fs)
fdisk_output.append(part_line)
+ # add a problematic warning msg to test:
+ # https://issues.redhat.com/browse/RHEL-50947
+ fdisk_output.append('Partition 3 does not start on physical sector boundary.')
device_to_fdisk_output[device.name] = fdisk_output
def mocked_run(cmd, *args, **kwargs):
--
2.47.0

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,115 @@
From 866a4b9f163c3aec31736ac0ce25f564fe016cb4 Mon Sep 17 00:00:00 2001
From: Jarek Prokop <jprokop@redhat.com>
Date: Tue, 5 Nov 2024 10:15:28 +0100
Subject: [PATCH 28/40] Add el9toel10 actor to handle symlink -> directory with
ruby IRB.
The `/usr/share/ruby/irb` path is a symlink in RHEL 9,
but a regular directory in RHEL 10.
This puts us back in line with RHEL 8 and Fedora in terms of the
path's file type regarding the rubygem-irb package.
Since this was not handled on RPM level, handle it as actor again.
This was copied and adjusted from same-named el8->el9 actor.
We do not care about the validity or target of the symlink, we just
remove it to allow DNF create the correct directory on upgrade.
Without this workaround, the upgrade will fail in transaction test with
reports of file conflicts on the directory path.
Users should not expect to ever retain anything in this directory.
---
.../actors/registerrubyirbadjustment/actor.py | 31 +++++++++++++++++++
.../test_register_ruby_irb_adjustments.py | 11 +++++++
.../el9toel10/tools/handlerubyirbsymlink | 22 +++++++++++++
3 files changed, 64 insertions(+)
create mode 100644 repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/actor.py
create mode 100644 repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/tests/test_register_ruby_irb_adjustments.py
create mode 100755 repos/system_upgrade/el9toel10/tools/handlerubyirbsymlink
diff --git a/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/actor.py b/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/actor.py
new file mode 100644
index 00000000..4fbec7ff
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/actor.py
@@ -0,0 +1,31 @@
+from leapp.actors import Actor
+from leapp.models import DNFWorkaround
+from leapp.tags import FactsPhaseTag, IPUWorkflowTag
+
+
+class RegisterRubyIRBAdjustment(Actor):
+ """
+ Register a workaround to allow rubygem-irb's symlink -> directory conversion.
+
+ The /usr/share/ruby/irb has been moved from a symlink to a directory
+ in RHEL 10 and this conversion was not handled on the RPM level.
+ This leads to DNF reporting package file conflicts when a major upgrade
+ is attempted and rubygem-irb is installed.
+
+ Register "handlerubyirbsymlink" script that removes the symlink prior
+ to DNF upgrade and allows it to create the expected directory in place of
+ the removed symlink.
+ """
+
+ name = 'register_ruby_irb_adjustment'
+ consumes = ()
+ produces = (DNFWorkaround,)
+ tags = (IPUWorkflowTag, FactsPhaseTag)
+
+ def process(self):
+ self.produce(
+ DNFWorkaround(
+ display_name='IRB directory fix',
+ script_path=self.get_tool_path('handlerubyirbsymlink'),
+ )
+ )
diff --git a/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/tests/test_register_ruby_irb_adjustments.py b/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/tests/test_register_ruby_irb_adjustments.py
new file mode 100644
index 00000000..fc341646
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/actors/registerrubyirbadjustment/tests/test_register_ruby_irb_adjustments.py
@@ -0,0 +1,11 @@
+import os.path
+
+from leapp.models import DNFWorkaround
+
+
+def test_register_ruby_irb_adjustments(current_actor_context):
+ current_actor_context.run()
+ assert len(current_actor_context.consume(DNFWorkaround)) == 1
+ assert current_actor_context.consume(DNFWorkaround)[0].display_name == 'IRB directory fix'
+ assert os.path.basename(current_actor_context.consume(DNFWorkaround)[0].script_path) == 'handlerubyirbsymlink'
+ assert os.path.exists(current_actor_context.consume(DNFWorkaround)[0].script_path)
diff --git a/repos/system_upgrade/el9toel10/tools/handlerubyirbsymlink b/repos/system_upgrade/el9toel10/tools/handlerubyirbsymlink
new file mode 100755
index 00000000..e9ac40fe
--- /dev/null
+++ b/repos/system_upgrade/el9toel10/tools/handlerubyirbsymlink
@@ -0,0 +1,22 @@
+#!/usr/bin/bash -e
+
+# just in case of hidden files.. not sure why would someone do that, it's more
+# like forgotten cache file possibility, but rather do that..
+shopt -s dotglob
+
+handle_dir() {
+ # Check that $1 is a symlink then unlink it so that RPM
+ # can freely create the directory.
+ if [ ! -L "$1" ]; then
+ return
+ fi
+
+ # There is no configuration or anything that the user should ever customize
+ # and expect to retain.
+ unlink "$1"
+
+ return 0
+}
+
+
+handle_dir /usr/share/ruby/irb
--
2.47.0

View File

@ -0,0 +1,40 @@
From 81a3297516fbbd120b0fb870de36f1a1b290dd21 Mon Sep 17 00:00:00 2001
From: Jarek Prokop <jprokop@redhat.com>
Date: Wed, 6 Nov 2024 15:21:14 +0100
Subject: [PATCH 29/40] Expand on the actor docstring for the el8->el9
rubygem-irb symlink fix.
In RHEL 10, the directory is a regular directory again.
The 2 actors are separate over creating a common solution for both.
Expand in the docstring on the reason for the el8->el9 actor to
differentiate them apart.
---
.../actors/registerrubyirbadjustment/actor.py | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/el8toel9/actors/registerrubyirbadjustment/actor.py b/repos/system_upgrade/el8toel9/actors/registerrubyirbadjustment/actor.py
index ac4d1e6f..a33d8831 100644
--- a/repos/system_upgrade/el8toel9/actors/registerrubyirbadjustment/actor.py
+++ b/repos/system_upgrade/el8toel9/actors/registerrubyirbadjustment/actor.py
@@ -5,7 +5,16 @@ from leapp.tags import FactsPhaseTag, IPUWorkflowTag
class RegisterRubyIRBAdjustment(Actor):
"""
- Registers a workaround which will adjust the Ruby IRB directories during the upgrade.
+ Register a workaround to allow rubygem-irb's directory -> symlink conversion.
+
+ The /usr/share/ruby/irb has been moved from a directory to a symlink
+ in RHEL 9 and this conversion was not handled on RPM level.
+ This leads to DNF reporting package file conflicts when a major upgrade
+ is attempted and rubygem-irb (or ruby-irb) is installed.
+
+ Register "handlerubyirbsymlink" script that removes the directory prior
+ to DNF upgrade and allows it to create the expected symlink in place of
+ the removed directory.
"""
name = 'register_ruby_irb_adjustment'
--
2.47.0

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,56 @@
From 89afbe8cb41f874f32acddc1e1696132f3531677 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Fri, 8 Nov 2024 17:40:01 +0100
Subject: [PATCH 31/40] Packaging: Require leapp-framework 6.x + update leapp
deps
The leapp actors configuration feature is present since
leapp-framework 6.0. Update the dependencies to ensure the correct
version of the framework is installed on the system.
Also, leapp requirements have been updated - requiring python3-PyYAML
as it requires YAML parser, bumping leapp-framework-dependencies to 6.
Address the change in leapp-deps metapackage to satisfy leapp
dependencies during the upgrade process.
---
packaging/leapp-repository.spec | 2 +-
packaging/other_specs/leapp-el7toel8-deps.spec | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 0d63ba02..570d0df2 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -120,7 +120,7 @@ Requires: leapp-repository-dependencies = %{leapp_repo_deps}
# IMPORTANT: this is capability provided by the leapp framework rpm.
# Check that 'version' instead of the real framework rpm version.
-Requires: leapp-framework >= 5.0, leapp-framework < 6
+Requires: leapp-framework >= 6.0, leapp-framework < 7
# Since we provide sub-commands for the leapp utility, we expect the leapp
# tool to be installed as well.
diff --git a/packaging/other_specs/leapp-el7toel8-deps.spec b/packaging/other_specs/leapp-el7toel8-deps.spec
index d9e94faa..2c662a37 100644
--- a/packaging/other_specs/leapp-el7toel8-deps.spec
+++ b/packaging/other_specs/leapp-el7toel8-deps.spec
@@ -14,7 +14,7 @@
%define leapp_repo_deps 10
-%define leapp_framework_deps 5
+%define leapp_framework_deps 6
# NOTE: the Version contains the %{rhel} macro just for the convenience to
# have always upgrade path between newer and older deps packages. So for
@@ -112,6 +112,7 @@ Requires: python3
Requires: python3-six
Requires: python3-setuptools
Requires: python3-requests
+Requires: python3-PyYAML
%description -n %{ldname}
--
2.47.0

View File

@ -0,0 +1,48 @@
From 36b93e4a2504f72e5a371a75a23e7cd2c695b84b Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 6 Oct 2024 21:01:13 +0200
Subject: [PATCH 32/40] spec: create /etc/leapp/actor_conf.d
Add additional build steps to the specfile that create the actor
configuration directory. The directory is owned by the package, so
it gets removed when the user uninstalls leapp.
Also prepared some comment lines for future when we will want to
include some configuration files as part of the rpm.
---
etc/leapp/actor_conf.d/.gitkeep | 0
packaging/leapp-repository.spec | 7 +++++++
2 files changed, 7 insertions(+)
create mode 100644 etc/leapp/actor_conf.d/.gitkeep
diff --git a/etc/leapp/actor_conf.d/.gitkeep b/etc/leapp/actor_conf.d/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 570d0df2..828355bf 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -250,6 +250,11 @@ install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/files/
install -m 0644 etc/leapp/transaction/* %{buildroot}%{_sysconfdir}/leapp/transaction
install -m 0644 etc/leapp/files/* %{buildroot}%{_sysconfdir}/leapp/files
+# Actor configuration dir
+install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/actor_conf.d/
+# uncomment to install existing configs
+#install -m 0644 etc/leapp/actor_conf.d/* %%{buildroot}%%{_sysconfdir}/leapp/actor_conf.d
+
# install CLI commands for the leapp utility on the expected path
install -m 0755 -d %{buildroot}%{leapp_python_sitelib}/leapp/cli/
cp -r commands %{buildroot}%{leapp_python_sitelib}/leapp/cli/
@@ -295,6 +300,8 @@ done;
%dir %{custom_repositorydir}
%dir %{leapp_python_sitelib}/leapp/cli/commands
%config %{_sysconfdir}/leapp/files/*
+# uncomment to package installed configs
+#%%config %%{_sysconfdir}/leapp/actor_conf.d/*
%{_sysconfdir}/leapp/repos.d/*
%{_sysconfdir}/leapp/transaction/*
%{repositorydir}/*
--
2.47.0

View File

@ -0,0 +1,31 @@
From 87db66c863104fea824a4406732cbe233ffee412 Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Wed, 13 Nov 2024 15:05:50 +0100
Subject: [PATCH 33/40] spec: drop .gitkeep files from the RPM
We have several .gitkeep files in the repo as we want to have some
directories present in git however these directories are empty
otherwise. This is common hack to achieve this, but we do not want
to have these files really in the resulting RPMs. So we just remove
them.
---
packaging/leapp-repository.spec | 3 +++
1 file changed, 3 insertions(+)
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 828355bf..2bb52505 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -272,6 +272,9 @@ rm -rf %{buildroot}%{repositorydir}/common/actors/testactor
find %{buildroot}%{repositorydir}/common -name "test.py" -delete
rm -rf `find %{buildroot}%{repositorydir} -name "tests" -type d`
find %{buildroot}%{repositorydir} -name "Makefile" -delete
+# .gitkeep file is used to have a directory in the repo. but we do not want these
+# files in the resulting RPM
+find %{buildroot} -name .gitkeep -delete
for DIRECTORY in $(find %{buildroot}%{repositorydir}/ -mindepth 1 -maxdepth 1 -type d);
do
--
2.47.0

View File

@ -0,0 +1,95 @@
From 140a0bbb689814041fa6a03ee2b703e70a20f2f2 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 10 Nov 2024 13:54:20 +0100
Subject: [PATCH 34/40] cli: load actor configuration
Load actor configuration when running `leapp upgrade` or `leapp
preupgrade`. The configuration is loaded, saved to leapp's DB,
and remains available to all actors via framework's global variable.
---
commands/command_utils.py | 32 +++++++++++++++++++++++++++++++-
commands/preupgrade/__init__.py | 3 +++
commands/upgrade/__init__.py | 3 +++
3 files changed, 37 insertions(+), 1 deletion(-)
diff --git a/commands/command_utils.py b/commands/command_utils.py
index 2810a542..190f5f03 100644
--- a/commands/command_utils.py
+++ b/commands/command_utils.py
@@ -1,10 +1,12 @@
+import hashlib
import json
import os
import re
import resource
+from leapp.actors import config as actor_config
from leapp.exceptions import CommandError
-from leapp.utils import path
+from leapp.utils import audit, path
HANA_BASE_PATH = '/hana/shared'
HANA_SAPCONTROL_PATH_X86_64 = 'exe/linuxx86_64/hdb/sapcontrol'
@@ -178,3 +180,31 @@ def set_resource_limits():
if soft_fsize != fsize_limit:
set_resource_limit(resource.RLIMIT_FSIZE, fsize_limit, fsize_limit)
+
+
+def load_actor_configs_and_store_it_in_db(context, repositories, framework_cfg):
+ """
+ Load actor configuration so that actor's can access it and store it into leapp db.
+
+ :param context: Current execution context
+ :param repositories: Discovered repositories
+ :param framework_cfg: Leapp's configuration
+ """
+ # Read the Actor Config and validate it against the schemas saved in the
+ # configuration.
+
+ actor_config_schemas = tuple(actor.config_schemas for actor in repositories.actors)
+ actor_config_schemas = actor_config.normalize_schemas(actor_config_schemas)
+ actor_config_path = framework_cfg.get('actor_config', 'path')
+
+ # Note: actor_config.load() stores the loaded actor config into a global
+ # variable which can then be accessed by functions in that file. Is this
+ # the right way to store that information?
+ actor_cfg = actor_config.load(actor_config_path, actor_config_schemas)
+
+ # Dump the collected configuration, checksum it and store it inside the DB
+ config_text = json.dumps(actor_cfg)
+ config_text_hash = hashlib.sha256(config_text.encode('utf-8')).hexdigest()
+ config_data = audit.ActorConfigData(config=config_text, hash_id=config_text_hash)
+ db_config = audit.ActorConfig(config=config_data, context=context)
+ db_config.store()
diff --git a/commands/preupgrade/__init__.py b/commands/preupgrade/__init__.py
index a9fa40e0..631eca6b 100644
--- a/commands/preupgrade/__init__.py
+++ b/commands/preupgrade/__init__.py
@@ -62,6 +62,9 @@ def preupgrade(args, breadcrumbs):
command_utils.set_resource_limits()
workflow = repositories.lookup_workflow('IPUWorkflow')()
+
+ command_utils.load_actor_configs_and_store_it_in_db(context, repositories, cfg)
+
util.warn_if_unsupported(configuration)
util.process_whitelist_experimental(repositories, workflow, configuration, logger)
with beautify_actor_exception():
diff --git a/commands/upgrade/__init__.py b/commands/upgrade/__init__.py
index c7487fde..3dedd438 100644
--- a/commands/upgrade/__init__.py
+++ b/commands/upgrade/__init__.py
@@ -93,6 +93,9 @@ def upgrade(args, breadcrumbs):
command_utils.set_resource_limits()
workflow = repositories.lookup_workflow('IPUWorkflow')(auto_reboot=args.reboot)
+
+ command_utils.load_actor_configs_and_store_it_in_db(context, repositories, cfg)
+
util.process_whitelist_experimental(repositories, workflow, configuration, logger)
util.warn_if_unsupported(configuration)
with beautify_actor_exception():
--
2.47.0

View File

@ -0,0 +1,157 @@
From f3d38325fb525bca427a2b00e2bfb73b9297c36a Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 10 Nov 2024 14:35:26 +0100
Subject: [PATCH 35/40] configs(common): introduce RHUI configuration
Introduce a common configuration definition for RHUI related decisions.
The configuration has an atomic nature - if the user wants to overwrite
leapp's decisions, he/she must overwrite all of them. Essentially, all
fields of the RHUI_SETUPS cloud map entry can be configured. Almost no
non-empty defaults are provided, as no reasonable defaults can be given.
This is due to all setup parameters are different from provider to
provider. Therefore, default values are empty values, so that it can
later be detected by an actor whether all fields of the RHUI config
has been filled.
Jira ref: RHEL-56251
---
repos/system_upgrade/common/configs/rhui.py | 127 ++++++++++++++++++++
1 file changed, 127 insertions(+)
create mode 100644 repos/system_upgrade/common/configs/rhui.py
diff --git a/repos/system_upgrade/common/configs/rhui.py b/repos/system_upgrade/common/configs/rhui.py
new file mode 100644
index 00000000..ade9bab9
--- /dev/null
+++ b/repos/system_upgrade/common/configs/rhui.py
@@ -0,0 +1,127 @@
+"""
+Configuration keys for RHUI.
+
+In case of RHUI in private regions it usual that publicly known RHUI data
+is not valid. In such cases it's possible to provide the correct expected
+RHUI data to correct the in-place upgrade process.
+"""
+
+from leapp.actors.config import Config
+from leapp.models import fields
+
+RHUI_CONFIG_SECTION = 'rhui'
+
+
+# @Note(mhecko): We use to distinguish config instantiated from default values that we should ignore
+# # Maybe we could make all config values None and detect it that way, but then we cannot
+# # give the user an example how the config should look like.
+class RhuiUseConfig(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "use_config"
+ type_ = fields.Boolean()
+ default = False
+ description = """
+ Use values provided in the configuration file to override leapp's decisions.
+ """
+
+
+class RhuiSourcePkgs(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "source_clients"
+ type_ = fields.List(fields.String())
+ default = []
+ description = """
+ The name of the source RHUI client RPMs (to be removed from the system).
+ """
+
+
+class RhuiTargetPkgs(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "target_clients"
+ type_ = fields.List(fields.String())
+ default = []
+ description = """
+ The name of the target RHUI client RPM (to be installed on the system).
+ """
+
+
+class RhuiCloudProvider(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "cloud_provider"
+ type_ = fields.String()
+ default = ""
+ description = """
+ Cloud provider name that should be used internally by leapp.
+
+ Leapp recognizes the following cloud providers:
+ - azure
+ - aws
+ - google
+
+ Cloud provider information is used for triggering some provider-specific modifications. The value also
+ influences how leapp determines target repositories to enable.
+ """
+
+
+# @Note(mhecko): We likely don't need this. We need the variant primarily to grab files from a correct directory
+# in leapp-rhui-<provider> folders.
+class RhuiCloudVariant(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "image_variant"
+ type_ = fields.String()
+ default = "ordinary"
+ description = """
+ RHEL variant of the source system - is the source system SAP-specific image?
+
+ Leapp recognizes the following cloud providers:
+ - ordinary # The source system has not been deployed from a RHEL with SAP image
+ - sap # RHEL SAP images
+ - sap-apps # RHEL SAP Apps images (Azure only)
+ - sap-ha # RHEL HA Apps images (HA only)
+
+ Cloud provider information is used for triggering some provider-specific modifications. The value also
+ influences how leapp determines target repositories to enable.
+
+ Default:
+ "ordinary"
+ """
+
+
+class RhuiUpgradeFiles(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "upgrade_files"
+ type_ = fields.StringMap(fields.String())
+ default = dict()
+ description = """
+ A mapping from source file paths to the destination where should they be
+ placed in the upgrade container.
+
+ Typically, these files should be provided by leapp-rhui-<PROVIDER> packages.
+
+ These files are needed to facilitate access to target repositories. Typical examples are: repofile(s),
+ certificates and keys.
+ """
+
+
+class RhuiTargetRepositoriesToUse(Config):
+ section = RHUI_CONFIG_SECTION
+ name = "rhui_target_repositories_to_use"
+ type_ = fields.List(fields.String())
+ description = """
+ List of target repositories enabled during the upgrade. Similar to executing leapp with --enablerepo.
+
+ The repositories to be enabled need to be either in the repofiles listed in the `upgrade_files` field,
+ or in repofiles present on the source system.
+ """
+ default = list()
+
+
+all_rhui_cfg = (
+ RhuiTargetPkgs,
+ RhuiUpgradeFiles,
+ RhuiTargetRepositoriesToUse,
+ RhuiCloudProvider,
+ RhuiCloudVariant,
+ RhuiSourcePkgs,
+ RhuiUseConfig
+)
--
2.47.0

View File

@ -0,0 +1,457 @@
From a03e8e5d10c1d6f3cdae216fafa0d7f0d0896494 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 10 Nov 2024 14:36:07 +0100
Subject: [PATCH 36/40] check_rhui: read RHUI configuration
Extend the check_rhui actor to read user-provided RHUI configuration.
If the provided configuration values say that the user wants to
overrwrite leapp's decisions, then the patch checks whether all values
are provided. If so, corresponding RHUIInfo message is produced. The
only implemented safe-guards are those that prevent the user from
accidentaly specifying a non-existing file to be copied into the
scrach container during us preparing to download target userspace
content. If the user provides only some of the configuration values
the upgrade is terminated early with an error, providing quick feedback
about misconfiguration. The patch has been designed to allow development
of upgrades on previously unknown clouds (clouds without an entry in
RHUI_SETUPS).
Jira ref: RHEL-56251
---
.../common/actors/cloud/checkrhui/actor.py | 4 +
.../cloud/checkrhui/libraries/checkrhui.py | 102 +++++++++-
.../tests/component_test_checkrhui.py | 178 ++++++++++++++++--
3 files changed, 265 insertions(+), 19 deletions(-)
diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py b/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py
index 593e73e5..933ffcb3 100644
--- a/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py
+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/actor.py
@@ -1,4 +1,5 @@
from leapp.actors import Actor
+from leapp.configs.common.rhui import all_rhui_cfg
from leapp.libraries.actor import checkrhui as checkrhui_lib
from leapp.models import (
CopyFile,
@@ -8,6 +9,7 @@ from leapp.models import (
RequiredTargetUserspacePackages,
RHUIInfo,
RpmTransactionTasks,
+ TargetRepositories,
TargetUserSpacePreupgradeTasks
)
from leapp.reporting import Report
@@ -21,6 +23,7 @@ class CheckRHUI(Actor):
"""
name = 'checkrhui'
+ config_schemas = all_rhui_cfg
consumes = (InstalledRPM,)
produces = (
KernelCmdlineArg,
@@ -28,6 +31,7 @@ class CheckRHUI(Actor):
RequiredTargetUserspacePackages,
Report, DNFPluginTask,
RpmTransactionTasks,
+ TargetRepositories,
TargetUserSpacePreupgradeTasks,
CopyFile,
)
diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py
index 3b217917..64e36e08 100644
--- a/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py
+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/libraries/checkrhui.py
@@ -2,17 +2,29 @@ import itertools
import os
from collections import namedtuple
+import leapp.configs.common.rhui as rhui_config_lib
from leapp import reporting
+from leapp.configs.common.rhui import ( # Import all config fields so we are not using their name attributes directly
+ RhuiCloudProvider,
+ RhuiCloudVariant,
+ RhuiSourcePkgs,
+ RhuiTargetPkgs,
+ RhuiTargetRepositoriesToUse,
+ RhuiUpgradeFiles,
+ RhuiUseConfig
+)
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.common import rhsm, rhui
from leapp.libraries.common.config import version
from leapp.libraries.stdlib import api
from leapp.models import (
CopyFile,
+ CustomTargetRepository,
DNFPluginTask,
InstalledRPM,
RHUIInfo,
RpmTransactionTasks,
+ TargetRepositories,
TargetRHUIPostInstallTasks,
TargetRHUIPreInstallTasks,
TargetRHUISetupInfo,
@@ -291,11 +303,11 @@ def produce_rhui_info_to_setup_target(rhui_family, source_setup_desc, target_set
api.produce(rhui_info)
-def produce_rpms_to_install_into_target(source_setup, target_setup):
- to_install = sorted(target_setup.clients - source_setup.clients)
- to_remove = sorted(source_setup.clients - target_setup.clients)
+def produce_rpms_to_install_into_target(source_clients, target_clients):
+ to_install = sorted(target_clients - source_clients)
+ to_remove = sorted(source_clients - target_clients)
- api.produce(TargetUserSpacePreupgradeTasks(install_rpms=sorted(target_setup.clients)))
+ api.produce(TargetUserSpacePreupgradeTasks(install_rpms=sorted(target_clients)))
if to_install or to_remove:
api.produce(RpmTransactionTasks(to_install=to_install, to_remove=to_remove))
@@ -316,7 +328,85 @@ def inform_about_upgrade_with_rhui_without_no_rhsm():
return False
+def emit_rhui_setup_tasks_based_on_config(rhui_config_dict):
+ config_upgrade_files = rhui_config_dict[RhuiUpgradeFiles.name]
+
+ nonexisting_files_to_copy = []
+ for source_path in config_upgrade_files:
+ if not os.path.exists(source_path):
+ nonexisting_files_to_copy.append(source_path)
+
+ if nonexisting_files_to_copy:
+ details_lines = ['The following files were not found:']
+ # Use .format and put backticks around paths so that weird unicode spaces will be easily seen
+ details_lines.extend(' - `{0}`'.format(path) for path in nonexisting_files_to_copy)
+ details = '\n'.join(details_lines)
+
+ reason = 'RHUI config lists nonexisting files in its `{0}` field.'.format(RhuiUpgradeFiles.name)
+ raise StopActorExecutionError(reason, details={'details': details})
+
+ files_to_copy_into_overlay = [CopyFile(src=key, dst=value) for key, value in config_upgrade_files.items()]
+ preinstall_tasks = TargetRHUIPreInstallTasks(files_to_copy_into_overlay=files_to_copy_into_overlay)
+
+ target_client_setup_info = TargetRHUISetupInfo(
+ preinstall_tasks=preinstall_tasks,
+ postinstall_tasks=TargetRHUIPostInstallTasks(),
+ bootstrap_target_client=False, # We don't need to install the client into overlay - user provided all files
+ )
+
+ rhui_info = RHUIInfo(
+ provider=rhui_config_dict[RhuiCloudProvider.name],
+ variant=rhui_config_dict[RhuiCloudVariant.name],
+ src_client_pkg_names=rhui_config_dict[RhuiSourcePkgs.name],
+ target_client_pkg_names=rhui_config_dict[RhuiTargetPkgs.name],
+ target_client_setup_info=target_client_setup_info
+ )
+ api.produce(rhui_info)
+
+
+def request_configured_repos_to_be_enabled(rhui_config):
+ config_repos_to_enable = rhui_config[RhuiTargetRepositoriesToUse.name]
+ custom_repos = [CustomTargetRepository(repoid=repoid) for repoid in config_repos_to_enable]
+ if custom_repos:
+ target_repos = TargetRepositories(custom_repos=custom_repos, rhel_repos=[])
+ api.produce(target_repos)
+
+
+def stop_with_err_if_config_missing_fields(config):
+ required_fields = [
+ RhuiTargetRepositoriesToUse,
+ RhuiCloudProvider,
+ # RhuiCloudVariant, <- this is not required
+ RhuiSourcePkgs,
+ RhuiTargetPkgs,
+ RhuiUpgradeFiles,
+ ]
+
+ missing_fields = tuple(field for field in required_fields if not config[field.name])
+ if missing_fields:
+ field_names = (field.name for field in missing_fields)
+ missing_fields_str = ', '.join(field_names)
+ details = 'The following required RHUI config fields are missing or they are set to an empty value: {}'
+ details = details.format(missing_fields_str)
+ raise StopActorExecutionError('Provided RHUI config is missing values for required fields.',
+ details={'details': details})
+
+
def process():
+ rhui_config = api.current_actor().config[rhui_config_lib.RHUI_CONFIG_SECTION]
+
+ if rhui_config[RhuiUseConfig.name]:
+ api.current_logger().info('Skipping RHUI upgrade auto-configuration - using provided config instead.')
+ stop_with_err_if_config_missing_fields(rhui_config)
+ emit_rhui_setup_tasks_based_on_config(rhui_config)
+
+ src_clients = set(rhui_config[RhuiSourcePkgs.name])
+ target_clients = set(rhui_config[RhuiTargetPkgs.name])
+ produce_rpms_to_install_into_target(src_clients, target_clients)
+
+ request_configured_repos_to_be_enabled(rhui_config)
+ return
+
installed_rpm = itertools.chain(*[installed_rpm_msg.items for installed_rpm_msg in api.consume(InstalledRPM)])
installed_pkgs = {rpm.name for rpm in installed_rpm}
@@ -342,7 +432,9 @@ def process():
# Instruction on how to access the target content
produce_rhui_info_to_setup_target(src_rhui_setup.family, src_rhui_setup.description, target_setup_desc)
- produce_rpms_to_install_into_target(src_rhui_setup.description, target_setup_desc)
+ source_clients = src_rhui_setup.description.clients
+ target_clients = target_setup_desc.clients
+ produce_rpms_to_install_into_target(source_clients, target_clients)
if src_rhui_setup.family.provider == rhui.RHUIProvider.AWS:
# We have to disable Amazon-id plugin in the initramdisk phase as there is no network
diff --git a/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py b/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
index 27e70eea..3ac9c1b8 100644
--- a/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
+++ b/repos/system_upgrade/common/actors/cloud/checkrhui/tests/component_test_checkrhui.py
@@ -1,30 +1,43 @@
-from collections import namedtuple
+import itertools
+import os
+from collections import defaultdict
from enum import Enum
import pytest
from leapp import reporting
+from leapp.configs.common.rhui import (
+ all_rhui_cfg,
+ RhuiCloudProvider,
+ RhuiCloudVariant,
+ RhuiSourcePkgs,
+ RhuiTargetPkgs,
+ RhuiTargetRepositoriesToUse,
+ RhuiUpgradeFiles,
+ RhuiUseConfig
+)
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import checkrhui as checkrhui_lib
from leapp.libraries.common import rhsm, rhui
-from leapp.libraries.common.config import mock_configs, version
from leapp.libraries.common.rhui import mk_rhui_setup, RHUIFamily
-from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked
+from leapp.libraries.common.testutils import (
+ _make_default_config,
+ create_report_mocked,
+ CurrentActorMocked,
+ produce_mocked
+)
from leapp.libraries.stdlib import api
from leapp.models import (
- CopyFile,
InstalledRPM,
- RequiredTargetUserspacePackages,
RHUIInfo,
RPM,
RpmTransactionTasks,
+ TargetRepositories,
TargetRHUIPostInstallTasks,
TargetRHUIPreInstallTasks,
TargetRHUISetupInfo,
TargetUserSpacePreupgradeTasks
)
-from leapp.reporting import Report
-from leapp.snactor.fixture import current_actor_context
RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
@@ -95,7 +108,8 @@ def mk_cloud_map(variants):
]
)
def test_determine_rhui_src_variant(monkeypatch, extra_pkgs, rhui_setups, expected_result):
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.9'))
+ actor = CurrentActorMocked(src_ver='7.9', config=_make_default_config(all_rhui_cfg))
+ monkeypatch.setattr(api, 'current_actor', actor)
installed_pkgs = {'zip', 'zsh', 'bash', 'grubby'}.union(set(extra_pkgs))
if expected_result and not isinstance(expected_result, RHUIFamily): # An exception
@@ -167,7 +181,8 @@ def test_google_specific_customization(provider, should_mutate):
)
def test_aws_specific_customization(monkeypatch, rhui_family, target_major, should_mutate):
dst_ver = '{major}.0'.format(major=target_major)
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(dst_ver=dst_ver))
+ actor = CurrentActorMocked(dst_ver=dst_ver, config=_make_default_config(all_rhui_cfg))
+ monkeypatch.setattr(api, 'current_actor', actor)
setup_info = mk_setup_info()
checkrhui_lib.customize_rhui_setup_for_aws(rhui_family, setup_info)
@@ -215,12 +230,12 @@ def produce_rhui_info_to_setup_target(monkeypatch):
def test_produce_rpms_to_install_into_target(monkeypatch):
- source_rhui_setup = mk_rhui_setup(clients={'src_pkg'}, leapp_pkg='leapp_pkg')
- target_rhui_setup = mk_rhui_setup(clients={'target_pkg'}, leapp_pkg='leapp_pkg')
+ source_clients = {'src_pkg'}
+ target_clients = {'target_pkg'}
monkeypatch.setattr(api, 'produce', produce_mocked())
- checkrhui_lib.produce_rpms_to_install_into_target(source_rhui_setup, target_rhui_setup)
+ checkrhui_lib.produce_rpms_to_install_into_target(source_clients, target_clients)
assert len(api.produce.model_instances) == 2
userspace_tasks, target_rpm_tasks = api.produce.model_instances[0], api.produce.model_instances[1]
@@ -276,7 +291,8 @@ def test_process(monkeypatch, extra_installed_pkgs, skip_rhsm, expected_action):
installed_rpms = InstalledRPM(items=installed_pkgs)
monkeypatch.setattr(api, 'produce', produce_mocked())
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms]))
+ actor = CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms], config=_make_default_config(all_rhui_cfg))
+ monkeypatch.setattr(api, 'current_actor', actor)
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: skip_rhsm)
monkeypatch.setattr(rhui, 'RHUI_SETUPS', known_setups)
@@ -315,7 +331,8 @@ def test_unknown_target_rhui_setup(monkeypatch, is_target_setup_known):
installed_rpms = InstalledRPM(items=installed_pkgs)
monkeypatch.setattr(api, 'produce', produce_mocked())
- monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms]))
+ actor = CurrentActorMocked(src_ver='7.9', msgs=[installed_rpms], config=_make_default_config(all_rhui_cfg))
+ monkeypatch.setattr(api, 'current_actor', actor)
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setattr(rhsm, 'skip_rhsm', lambda: True)
monkeypatch.setattr(rhui, 'RHUI_SETUPS', known_setups)
@@ -374,3 +391,136 @@ def test_select_chronologically_closest(monkeypatch, setups, desired_minor, expe
setup = setups[0]
assert setup == expected_setup
+
+
+def test_config_overwrites_everything(monkeypatch):
+ rhui_config = {
+ RhuiUseConfig.name: True,
+ RhuiSourcePkgs.name: ['client_source'],
+ RhuiTargetPkgs.name: ['client_target'],
+ RhuiCloudProvider.name: 'aws',
+ RhuiUpgradeFiles.name: {
+ '/root/file.repo': '/etc/yum.repos.d/'
+ },
+ RhuiTargetRepositoriesToUse.name: [
+ 'repoid_to_use'
+ ]
+ }
+ all_config = {'rhui': rhui_config}
+
+ actor = CurrentActorMocked(config=all_config)
+ monkeypatch.setattr(api, 'current_actor', actor)
+
+ function_calls = defaultdict(int)
+
+ def mk_function_probe(fn_name):
+ def probe(*args, **kwargs):
+ function_calls[fn_name] += 1
+ return probe
+
+ monkeypatch.setattr(checkrhui_lib,
+ 'emit_rhui_setup_tasks_based_on_config',
+ mk_function_probe('emit_rhui_setup_tasks_based_on_config'))
+ monkeypatch.setattr(checkrhui_lib,
+ 'stop_with_err_if_config_missing_fields',
+ mk_function_probe('stop_with_err_if_config_missing_fields'))
+ monkeypatch.setattr(checkrhui_lib,
+ 'produce_rpms_to_install_into_target',
+ mk_function_probe('produce_rpms_to_install_into_target'))
+ monkeypatch.setattr(checkrhui_lib,
+ 'request_configured_repos_to_be_enabled',
+ mk_function_probe('request_configured_repos_to_be_enabled'))
+
+ checkrhui_lib.process()
+
+ expected_function_calls = {
+ 'emit_rhui_setup_tasks_based_on_config': 1,
+ 'stop_with_err_if_config_missing_fields': 1,
+ 'produce_rpms_to_install_into_target': 1,
+ 'request_configured_repos_to_be_enabled': 1,
+ }
+
+ assert function_calls == expected_function_calls
+
+
+def test_request_configured_repos_to_be_enabled(monkeypatch):
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+
+ rhui_config = {
+ RhuiUseConfig.name: True,
+ RhuiSourcePkgs.name: ['client_source'],
+ RhuiTargetPkgs.name: ['client_target'],
+ RhuiCloudProvider.name: 'aws',
+ RhuiUpgradeFiles.name: {
+ '/root/file.repo': '/etc/yum.repos.d/'
+ },
+ RhuiTargetRepositoriesToUse.name: [
+ 'repoid1',
+ 'repoid2',
+ 'repoid3',
+ ]
+ }
+
+ checkrhui_lib.request_configured_repos_to_be_enabled(rhui_config)
+
+ assert api.produce.called
+ assert len(api.produce.model_instances) == 1
+
+ target_repos = api.produce.model_instances[0]
+ assert isinstance(target_repos, TargetRepositories)
+ assert not target_repos.rhel_repos
+
+ custom_repoids = sorted(custom_repo_model.repoid for custom_repo_model in target_repos.custom_repos)
+ assert custom_repoids == ['repoid1', 'repoid2', 'repoid3']
+
+
+@pytest.mark.parametrize(
+ ('upgrade_files', 'existing_files'),
+ (
+ (['/root/a', '/root/b'], ['/root/a', '/root/b']),
+ (['/root/a', '/root/b'], ['/root/b']),
+ (['/root/a', '/root/b'], []),
+ )
+)
+def test_missing_files_in_config(monkeypatch, upgrade_files, existing_files):
+ upgrade_files_map = dict((source_path, '/tmp/dummy') for source_path in upgrade_files)
+
+ rhui_config = {
+ RhuiUseConfig.name: True,
+ RhuiSourcePkgs.name: ['client_source'],
+ RhuiTargetPkgs.name: ['client_target'],
+ RhuiCloudProvider.name: 'aws',
+ RhuiCloudVariant.name: 'ordinary',
+ RhuiUpgradeFiles.name: upgrade_files_map,
+ RhuiTargetRepositoriesToUse.name: [
+ 'repoid_to_use'
+ ]
+ }
+
+ monkeypatch.setattr(os.path, 'exists', lambda path: path in existing_files)
+ monkeypatch.setattr(api, 'produce', produce_mocked())
+
+ should_error = (len(upgrade_files) != len(existing_files))
+ if should_error:
+ with pytest.raises(StopActorExecutionError):
+ checkrhui_lib.emit_rhui_setup_tasks_based_on_config(rhui_config)
+ else:
+ checkrhui_lib.emit_rhui_setup_tasks_based_on_config(rhui_config)
+ assert api.produce.called
+ assert len(api.produce.model_instances) == 1
+
+ rhui_info = api.produce.model_instances[0]
+ assert isinstance(rhui_info, RHUIInfo)
+ assert rhui_info.provider == 'aws'
+ assert rhui_info.variant == 'ordinary'
+ assert rhui_info.src_client_pkg_names == ['client_source']
+ assert rhui_info.target_client_pkg_names == ['client_target']
+
+ setup_info = rhui_info.target_client_setup_info
+ assert not setup_info.bootstrap_target_client
+
+ _copies_to_perform = setup_info.preinstall_tasks.files_to_copy_into_overlay
+ copies_to_perform = sorted((copy.src, copy.dst) for copy in _copies_to_perform)
+ expected_copies = sorted(zip(upgrade_files, itertools.repeat('/tmp/dummy')))
+
+ assert copies_to_perform == expected_copies
--
2.47.0

View File

@ -0,0 +1,53 @@
From a206a7f02c68f50ab50c9f547669d3a4178c4bd2 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Wed, 16 Oct 2024 17:38:36 +0200
Subject: [PATCH 37/40] testutils: add support for configs
Extend the CurrentActorMocked class to accept a `config` value,
allowing developers to mock actors that rely on configuration.
A library function `_make_default_config` is also introduced,
allowing to instantiate default configs from config schemas.
---
repos/system_upgrade/common/libraries/testutils.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/repos/system_upgrade/common/libraries/testutils.py b/repos/system_upgrade/common/libraries/testutils.py
index c538af1a..afeb360a 100644
--- a/repos/system_upgrade/common/libraries/testutils.py
+++ b/repos/system_upgrade/common/libraries/testutils.py
@@ -4,6 +4,7 @@ import os
from collections import namedtuple
from leapp import reporting
+from leapp.actors.config import _normalize_config, normalize_schemas
from leapp.libraries.common.config import architecture
from leapp.models import EnvVar
from leapp.utils.deprecation import deprecated
@@ -67,9 +68,15 @@ class logger_mocked(object):
return self
+def _make_default_config(actor_config_schema):
+ """ Make a config dict populated with default values. """
+ merged_schema = normalize_schemas((actor_config_schema, ))
+ return _normalize_config({}, merged_schema) # Will fill default values during normalization
+
+
class CurrentActorMocked(object): # pylint:disable=R0904
def __init__(self, arch=architecture.ARCH_X86_64, envars=None, kernel='3.10.0-957.43.1.el7.x86_64',
- release_id='rhel', src_ver='7.8', dst_ver='8.1', msgs=None, flavour='default'):
+ release_id='rhel', src_ver='7.8', dst_ver='8.1', msgs=None, flavour='default', config=None):
envarsList = [EnvVar(name=k, value=v) for k, v in envars.items()] if envars else []
version = namedtuple('Version', ['source', 'target'])(src_ver, dst_ver)
release = namedtuple('OS_release', ['release_id', 'version_id'])(release_id, src_ver)
@@ -82,6 +89,7 @@ class CurrentActorMocked(object): # pylint:disable=R0904
'configuration', ['architecture', 'kernel', 'leapp_env_vars', 'os_release', 'version', 'flavour']
)(arch, kernel, envarsList, release, version, flavour)
self._msgs = msgs or []
+ self.config = {} if config is None else config
def __call__(self):
return self
--
2.47.0

View File

@ -0,0 +1,70 @@
From 0147bc268607e5931ebca95e3253087ec71a3c66 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Sun, 20 Oct 2024 16:08:49 +0200
Subject: [PATCH 38/40] userspacegen(rhui): remove repofiles only if now owned
by an RPM
We copy files into the target userspace when setting up target
repository content. If this file is named equally as some of the
files installed by the target RHUI client installed during early
phases of target userspace setup process, we would delete it in
cleanup. Therefore, if we copy a repofile named /etc/yum.repos.d/X.repo
and the target client also owns a file /etc/yum.repos.d/X.repo, we
would remove it, making the container loose access to target content.
This patch prevents us from blindly deleting files, keeping files that
are owned by some RPM (usually that would be the target RHUI client).
---
.../libraries/userspacegen.py | 30 ++++++++++++++-----
1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
index d7698056..12736ab7 100644
--- a/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
+++ b/repos/system_upgrade/common/actors/targetuserspacecreator/libraries/userspacegen.py
@@ -1120,6 +1120,27 @@ def _get_target_userspace():
return constants.TARGET_USERSPACE.format(get_target_major_version())
+def _remove_injected_repofiles_from_our_rhui_packages(target_userspace_ctx, rhui_setup_info):
+ target_userspace_path = _get_target_userspace()
+ for copy in rhui_setup_info.preinstall_tasks.files_to_copy_into_overlay:
+ dst_in_container = get_copy_location_from_copy_in_task(target_userspace_path, copy)
+ dst_in_container = dst_in_container.strip('/')
+ dst_in_host = os.path.join(target_userspace_path, dst_in_container)
+
+ if os.path.isfile(dst_in_host) and dst_in_host.endswith('.repo'):
+ # The repofile might have been replaced by a new one provided by the RHUI client if names collide
+ # Performance: Do the query here and not earlier, because we would be running rpm needlessly
+ try:
+ path_with_root = '/' + dst_in_container
+ target_userspace_ctx.call(['rpm', '-q', '--whatprovides', path_with_root])
+ api.current_logger().debug('Repofile {0} kept as it is owned by some RPM.'.format(dst_in_host))
+ except CalledProcessError:
+ # rpm exists with 1 if the file is not owned by any RPM. We might be catching all kinds of other
+ # problems here, but still better than always removing repofiles.
+ api.current_logger().debug('Removing repofile - not owned by any RPM: {0}'.format(dst_in_host))
+ os.remove(dst_in_host)
+
+
def _create_target_userspace(context, indata, packages, files, target_repoids):
"""Create the target userspace."""
target_path = _get_target_userspace()
@@ -1139,14 +1160,7 @@ def _create_target_userspace(context, indata, packages, files, target_repoids):
)
setup_info = indata.rhui_info.target_client_setup_info
if not setup_info.bootstrap_target_client:
- target_userspace_path = _get_target_userspace()
- for copy in setup_info.preinstall_tasks.files_to_copy_into_overlay:
- dst_in_container = get_copy_location_from_copy_in_task(target_userspace_path, copy)
- dst_in_container = dst_in_container.strip('/')
- dst_in_host = os.path.join(target_userspace_path, dst_in_container)
- if os.path.isfile(dst_in_host) and dst_in_host.endswith('.repo'):
- api.current_logger().debug('Removing repofile: {0}'.format(dst_in_host))
- os.remove(dst_in_host)
+ _remove_injected_repofiles_from_our_rhui_packages(context, setup_info)
# and do not forget to set the rhsm into the container mode again
with mounting.NspawnActions(_get_target_userspace()) as target_context:
--
2.47.0

View File

@ -0,0 +1,857 @@
From c2f2895bb570a75eb2aaa7b84a2bcd9dcd537b0e Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Thu, 14 Nov 2024 14:24:15 +0100
Subject: [PATCH 39/40] Enable IPU for EL 9.6 (and drop EL 8.8/9.2)
* Add product certificates for RHEL 9.6
* Introduce upgrade path 8.10 -> 9.6
* Drop IPUs related to EL 8.8 and 9.2
* This will not be supported in this release.
* Keeping for now still IPU 8.10 -> 9.5 as it is a fresh release
so it has a value for us to run tests there. We will drop it
later during this lifecycle (CTC-2?).
* Drop EL 8.8 from the list of supported versions
* Update tests in packit
* Note that tests for 9.6 could be failing for a while until
composes are created.
jira: RHEL-67621
---
.packit.yaml | 257 +++++-------------
.../common/files/prod-certs/9.6/279.pem | 37 +++
.../common/files/prod-certs/9.6/362.pem | 37 +++
.../common/files/prod-certs/9.6/363.pem | 37 +++
.../common/files/prod-certs/9.6/419.pem | 36 +++
.../common/files/prod-certs/9.6/433.pem | 37 +++
.../common/files/prod-certs/9.6/479.pem | 36 +++
.../common/files/prod-certs/9.6/486.pem | 37 +++
.../common/files/prod-certs/9.6/72.pem | 36 +++
.../common/files/upgrade_paths.json | 18 +-
.../common/libraries/config/version.py | 2 +-
11 files changed, 363 insertions(+), 207 deletions(-)
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/279.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/362.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/363.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/419.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/433.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/479.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/486.pem
create mode 100644 repos/system_upgrade/common/files/prod-certs/9.6/72.pem
diff --git a/.packit.yaml b/.packit.yaml
index fbfd0eea..48c3cbbb 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -145,104 +145,6 @@ jobs:
# ######################### Individual tests ########################### #
# ###################################################################### #
-# Tests: 7.9 -> 8.8
-- &sanity-79to88-aws
- <<: *sanity-abstract-7to8-aws
- trigger: pull_request
- identifier: sanity-7.9to8.8-aws
- tf_extra_params:
- test:
- tmt:
- plan_filter: 'tag:7to8 & tag:upgrade_happy_path & enabled:true'
- environments:
- - tmt:
- context:
- distro: "rhel-7.9"
- distro_target: "rhel-8.8"
- settings:
- provisioning:
- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
- SOURCE_RELEASE: "7.9"
- TARGET_RELEASE: "8.8"
- RHUI: "aws"
- LEAPPDATA_BRANCH: "upstream"
- LEAPP_NO_RHSM: "1"
- USE_CUSTOM_REPOS: rhui
-
-- &sanity-79to88
- <<: *sanity-abstract-7to8
- trigger: pull_request
- identifier: sanity-7.9to8.8
- tf_extra_params:
- test:
- tmt:
- plan_filter: 'tag:7to8 & tag:sanity & enabled:true'
- environments:
- - tmt:
- context:
- distro: "rhel-7.9"
- distro_target: "rhel-8.8"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
- SOURCE_RELEASE: "7.9"
- TARGET_RELEASE: "8.8"
-
-- &beaker-minimal-79to88
- <<: *beaker-minimal-7to8-abstract-ondemand
- trigger: pull_request
- labels:
- - beaker-minimal
- - beaker-minimal-7.9to8.8
- - 7.9to8.8
- identifier: sanity-7.9to8.8-beaker-minimal-ondemand
- tf_extra_params:
- test:
- tmt:
- plan_filter: 'tag:7to8 & tag:partitioning & enabled:true'
- environments:
- - tmt:
- context:
- distro: "rhel-7.9"
- distro_target: "rhel-8.8"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
- SOURCE_RELEASE: "7.9"
- TARGET_RELEASE: "8.8"
-
-- &kernel-rt-79to88
- <<: *kernel-rt-abstract-7to8-ondemand
- trigger: pull_request
- labels:
- - kernel-rt
- - kernel-rt-7.9to8.8
- - 7.9to8.8
- identifier: sanity-7.9to8.8-kernel-rt-ondemand
- tf_extra_params:
- test:
- tmt:
- plan_filter: 'tag:7to8 & tag:kernel-rt & enabled:true'
- environments:
- - tmt:
- context:
- distro: "rhel-7.9"
- distro_target: "rhel-8.8"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
- SOURCE_RELEASE: "7.9"
- TARGET_RELEASE: "8.8"
-
# Tests: 7.9 -> 8.10
- &sanity-79to810
<<: *sanity-abstract-7to8
@@ -397,14 +299,11 @@ jobs:
# ######################### Individual tests ########################### #
# ###################################################################### #
-# Tests: 8.8 -> 9.2
-- &sanity-88to92
+# Tests: 8.10 -> 9.4
+- &sanity-810to94
<<: *sanity-abstract-8to9
trigger: pull_request
- targets:
- epel-8-x86_64:
- distros: [RHEL-8.8.0-Nightly]
- identifier: sanity-8.8to9.2
+ identifier: sanity-8.10to9.4
tf_extra_params:
test:
tmt:
@@ -412,108 +311,74 @@ jobs:
environments:
- tmt:
context:
- distro: "rhel-8.8"
- distro_target: "rhel-9.2"
- settings:
- provisioning:
- tags:
- BusinessUnit: sst_upgrades@leapp_upstream_test
- env:
- SOURCE_RELEASE: "8.8"
- TARGET_RELEASE: "9.2"
- RHSM_REPOS_EUS: "eus"
-
-- &sanity-88to92-aws
- <<: *sanity-abstract-8to9-aws
- trigger: pull_request
- targets:
- epel-8-x86_64:
- distros: [RHEL-8.8-rhui]
- identifier: sanity-8.8to9.2-aws
- tf_extra_params:
- test:
- tmt:
- plan_filter: 'tag:8to9 & tag:rhui-tier[0] & enabled:true'
- environments:
- - tmt:
- context:
- distro: "rhel-8.8"
- distro_target: "rhel-9.2"
+ distro: "rhel-8.10"
+ distro_target: "rhel-9.4"
settings:
provisioning:
- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
- SOURCE_RELEASE: "8.8"
- TARGET_RELEASE: "9.2"
- RHUI_HYPERSCALER: aws
+ SOURCE_RELEASE: "8.10"
+ TARGET_RELEASE: "9.4"
-- &beaker-minimal-88to92
+# On-demand minimal beaker tests
+- &beaker-minimal-810to94
<<: *beaker-minimal-8to9-abstract-ondemand
trigger: pull_request
labels:
- beaker-minimal
- - beaker-minimal-8.8to9.2
- - 8.8to9.2
- targets:
- epel-8-x86_64:
- distros: [RHEL-8.8.0-Nightly]
- identifier: sanity-8.8to9.2-beaker-minimal-ondemand
+ - beaker-minimal-8.10to9.4
+ - 8.10to9.4
+ identifier: sanity-8.10to9.4-beaker-minimal-ondemand
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 &tag:partitioning & enabled:true'
+ plan_filter: 'tag:8to9 & tag:partitioning & enabled:true'
environments:
- tmt:
context:
- distro: "rhel-8.8"
- distro_target: "rhel-9.2"
+ distro: "rhel-8.10"
+ distro_target: "rhel-9.4"
settings:
provisioning:
- post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
- SOURCE_RELEASE: "8.8"
- TARGET_RELEASE: "9.2"
- RHSM_REPOS_EUS: "eus"
+ SOURCE_RELEASE: "8.10"
+ TARGET_RELEASE: "9.4"
-- &kernel-rt-88to92
+# On-demand kernel-rt tests
+- &kernel-rt-810to94
<<: *kernel-rt-abstract-8to9-ondemand
trigger: pull_request
labels:
- kernel-rt
- - kernel-rt-8.8to9.2
- - 8.8to9.2
- identifier: sanity-8.8to9.2-kernel-rt-ondemand
- targets:
- epel-8-x86_64:
- distros: [RHEL-8.8.0-Nightly]
+ - kernel-rt-8.10to9.4
+ - 8.10to9.4
+ identifier: sanity-8.10to9.4-kernel-rt-ondemand
tf_extra_params:
test:
tmt:
- plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true'
+ plan_filter: 'tag:8to9 & tag:kernel-rt & enabled:true'
environments:
- tmt:
context:
- distro: "rhel-8.8"
- distro_target: "rhel-9.2"
+ distro: "rhel-8.10"
+ distro_target: "rhel-9.4"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
- SOURCE_RELEASE: "8.8"
- TARGET_RELEASE: "9.2"
- RHSM_REPOS_EUS: "eus"
+ SOURCE_RELEASE: "8.10"
+ TARGET_RELEASE: "9.4"
-# Tests: 8.10 -> 9.4
-- &sanity-810to94
+# Tests: 8.10 -> 9.5
+- &sanity-810to95
<<: *sanity-abstract-8to9
trigger: pull_request
- identifier: sanity-8.10to9.4
+ identifier: sanity-8.10to9.5
tf_extra_params:
test:
tmt:
@@ -522,24 +387,24 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.4"
+ distro_target: "rhel-9.5"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.4"
+ TARGET_RELEASE: "9.5"
# On-demand minimal beaker tests
-- &beaker-minimal-810to94
+- &beaker-minimal-810to95
<<: *beaker-minimal-8to9-abstract-ondemand
trigger: pull_request
labels:
- beaker-minimal
- - beaker-minimal-8.10to9.4
- - 8.10to9.4
- identifier: sanity-8.10to9.4-beaker-minimal-ondemand
+ - beaker-minimal-8.10to9.5
+ - 8.10to9.5
+ identifier: sanity-8.10to9.5-beaker-minimal-ondemand
tf_extra_params:
test:
tmt:
@@ -548,24 +413,24 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.4"
+ distro_target: "rhel-9.5"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.4"
+ TARGET_RELEASE: "9.5"
# On-demand kernel-rt tests
-- &kernel-rt-810to94
+- &kernel-rt-810to95
<<: *kernel-rt-abstract-8to9-ondemand
trigger: pull_request
labels:
- kernel-rt
- - kernel-rt-8.10to9.4
- - 8.10to9.4
- identifier: sanity-8.10to9.4-kernel-rt-ondemand
+ - kernel-rt-8.10to9.5
+ - 8.10to9.5
+ identifier: sanity-8.10to9.5-kernel-rt-ondemand
tf_extra_params:
test:
tmt:
@@ -574,21 +439,21 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.4"
+ distro_target: "rhel-9.5"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.4"
+ TARGET_RELEASE: "9.5"
-# Tests: 8.10 -> 9.5
-- &sanity-810to95
+# Tests: 8.10 -> 9.6
+- &sanity-810to96
<<: *sanity-abstract-8to9
trigger: pull_request
- identifier: sanity-8.10to9.5
+ identifier: sanity-8.10to9.6
tf_extra_params:
test:
tmt:
@@ -597,24 +462,24 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.5"
+ distro_target: "rhel-9.6"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.5"
+ TARGET_RELEASE: "9.6"
# On-demand minimal beaker tests
-- &beaker-minimal-810to95
+- &beaker-minimal-810to96
<<: *beaker-minimal-8to9-abstract-ondemand
trigger: pull_request
labels:
- beaker-minimal
- - beaker-minimal-8.10to9.5
- - 8.10to9.5
- identifier: sanity-8.10to9.5-beaker-minimal-ondemand
+ - beaker-minimal-8.10to9.6
+ - 8.10to9.6
+ identifier: sanity-8.10to9.6-beaker-minimal-ondemand
tf_extra_params:
test:
tmt:
@@ -623,24 +488,24 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.5"
+ distro_target: "rhel-9.6"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.5"
+ TARGET_RELEASE: "9.6"
# On-demand kernel-rt tests
-- &kernel-rt-810to95
+- &kernel-rt-810to96
<<: *kernel-rt-abstract-8to9-ondemand
trigger: pull_request
labels:
- kernel-rt
- - kernel-rt-8.10to9.5
- - 8.10to9.5
- identifier: sanity-8.10to9.5-kernel-rt-ondemand
+ - kernel-rt-8.10to9.6
+ - 8.10to9.6
+ identifier: sanity-8.10to9.6-kernel-rt-ondemand
tf_extra_params:
test:
tmt:
@@ -649,11 +514,11 @@ jobs:
- tmt:
context:
distro: "rhel-8.10"
- distro_target: "rhel-9.5"
+ distro_target: "rhel-9.6"
settings:
provisioning:
tags:
BusinessUnit: sst_upgrades@leapp_upstream_test
env:
SOURCE_RELEASE: "8.10"
- TARGET_RELEASE: "9.5"
+ TARGET_RELEASE: "9.6"
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/279.pem b/repos/system_upgrade/common/files/prod-certs/9.6/279.pem
new file mode 100644
index 00000000..a9ef267b
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/279.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGZTCCBE2gAwIBAgIJALDxRLt/tVEiMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIwNVoXDTQ0MDgx
+MjE5MDIwNVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs1MzUwNTA4
+OC05ZTk5LTQyODItYmE4OS1mMjhhNjAwZWNhZWFdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB7jCB6zAJBgNVHRMEAjAAMEMGDCsGAQQBkggJAYIXAQQzDDFSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuMBUGDCsG
+AQQBkggJAYIXAgQFDAM5LjYwGQYMKwYBBAGSCAkBghcDBAkMB3BwYzY0bGUwJwYM
+KwYBBAGSCAkBghcEBBcMFXJoZWwtOSxyaGVsLTktcHBjNjRsZTAdBgNVHQ4EFgQU
+YeogtTV8r2dkOv9rCOYQeNDNH5UwHwYDVR0jBBgwFoAUlv27HEBA/0CErbIfCybB
+w2pv1nwwDQYJKoZIhvcNAQELBQADggIBACRmyYbMhmuV+w4E+Hlonlt0mooB6EF6
+h/xknuBRw/saSL+7sLfbItaxWH5euxDc/5XvII2t0Jjl+GDnAjI75xrTuN3gT88Z
+9wd1kvDVqt46GI6VKVH1SujJoJpGenfhTVwenATZwdq260RgYgM3Zv1d3I4Lu/GY
+65T//j0/8tBmgqMc6BRvIrDa1wtVUbEwH3b/jwZoeitps1hKIH9yKZV79HZ7WVdb
+otDtsAk7VKZGRjGdvYsfWZrjmyyyc5wX2AemzpnhSm1kkGvOAjSMsJ0QcrSu/5vj
+AAK64J1tDA93WKsAqDnK7tUOx6qwICllbgVmKWl/02JH8ELs/sJnsWBEigfdZmTh
+/3Q8DPNni7scYkJ5Vs0tL8pke29C1bgAYjoBiQgf/ffNunTOWgdkdFHbd9I3+aLh
+pO7qqkndEkl85xkQJrZWO35NvPD4NAwnsDrIP0oJg5mYNTB11C5SlHhllT/Iu374
+8afWtoHaB50vsqM2dtvh/UsCyGynWYc93TLsU6a4gBl19D7VAx0fiOwdD+CyweUp
+xcos6MIIuFAFUIVpD+/3w499Lw9m5dcfApl6HCyQgAoafXCJjM/aXsSsSWr2d9TF
+c6S/uA2egh4fUv8zYnlZKUvCTu8kn4Lv439wW0xyIEB/sD/gXk9e8H9KkUuKDExx
+yTSjzqnPM82N
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/362.pem b/repos/system_upgrade/common/files/prod-certs/9.6/362.pem
new file mode 100644
index 00000000..d7c1a6be
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/362.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGdDCCBFygAwIBAgIJALDxRLt/tVE4MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIyNFoXDTQ0MDgx
+MjE5MDIyNFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs2MjBmNzkx
+MC0xNDk5LTRmMzAtYTk3NS1hYWFiOGQyMWE1NmNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB/TCB+jAJBgNVHRMEAjAAMEgGDCsGAQQBkggJAYJqAQQ4DDZSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIFBvd2VyLCBsaXR0bGUgZW5kaWFuIEJldGEw
+GgYMKwYBBAGSCAkBgmoCBAoMCDkuNiBCZXRhMBkGDCsGAQQBkggJAYJqAwQJDAdw
+cGM2NGxlMCwGDCsGAQQBkggJAYJqBAQcDBpyaGVsLTkscmhlbC05LWJldGEtcHBj
+NjRsZTAdBgNVHQ4EFgQUYeogtTV8r2dkOv9rCOYQeNDNH5UwHwYDVR0jBBgwFoAU
+lv27HEBA/0CErbIfCybBw2pv1nwwDQYJKoZIhvcNAQELBQADggIBADzaMxiligMU
+vVUxGdlKgceVXcZR6TC/nACDxyRFm7JGKYC0Ncxum2RWQ10mMD1HM1xa0NVz3BLO
+a+VrZ3MGTpKuWQgN0TKIzjykxxfJMip8AVYx6UvQ4SxxZWFIVPuC0XYfYc2pOV5A
+OcO63O+R7QVvLpZ3q7tX3uAXCfWWvJkoJ+MzKCl3lEmeKAcaikcums+aOd/JwTSo
+bt5ExLgC4J1cvevH+IBCUbmN1r+xrkHNiNWjys0MIo1JsPmi1A1kDeORXPN4xXvH
+x69z9SuHrUd2iFXpMfezqZsmiaa/FP6UOKwpDyEqZGE+/aT/RBza9BeYX74vDpFI
+h0vMtx3lHE+PGh7a6kfXV2GL4IP7w5MbdZQIJ/ZS4oT/zG3E2wRnGD4+oQ3Bm/TV
+Or0IHnafxXYXgsQ6bsMsZN7BRZ8VfaEdM3IVRqVyPVWzo0kYkHZcnVQpabmCWPjc
+NUwMJDni3LfjxKreHLDQBEkwX4XoZnSq/xMHO6ppe0sZ2XgAOsw/B92ekTTEdoKZ
+dEQBkqv2FRUbMoILnNVWJp4yGMOPcTl7hrlcJjKRvKs1hKWkQKN6g4YDHCglkVfH
+ltDGkolsUYFvoygoi8VCCDfz7whn6pXmzlpk1VkzE+V1R88Tf5ygrSNWETOZMU/B
+5P07jdNriEBCZaCPq7T8odOt1cKZpVdg
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/363.pem b/repos/system_upgrade/common/files/prod-certs/9.6/363.pem
new file mode 100644
index 00000000..f75b478d
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/363.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGZjCCBE6gAwIBAgIJALDxRLt/tVE3MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIyNFoXDTQ0MDgx
+MjE5MDIyNFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs1MDE5NmU1
+ZC1lNDgzLTQwNDAtYjcwYS03NDg5NDliZTRjZmFdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB7zCB7DAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYJrAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NCBCZXRhMBoGDCsGAQQBkggJAYJr
+AgQKDAg5LjYgQmV0YTAZBgwrBgEEAZIICQGCawMECQwHYWFyY2g2NDAsBgwrBgEE
+AZIICQGCawQEHAwacmhlbC05LHJoZWwtOS1iZXRhLWFhcmNoNjQwHQYDVR0OBBYE
+FGHqILU1fK9nZDr/awjmEHjQzR+VMB8GA1UdIwQYMBaAFJb9uxxAQP9AhK2yHwsm
+wcNqb9Z8MA0GCSqGSIb3DQEBCwUAA4ICAQBiaXwTsDt1Kz79ZJ3TnNDuX3IntXuS
+DxIGAhpgJ+ynaSULh8xL6pq5L6EtYnVzpO6T+j2ADbJlLkIRV0fMD6MMZo4YQtHH
+NofoNgJoYI4uXcCKYS2vIUw+0Br7qx8BPTb5jP+VRl9LU8W299nYOTp+vY7GQ0Ny
+hT66G+FJfo5CqHZpMTGgJbpjoP3DMpXZcARBnjQ0LhvjvcalGmPP4//tcPNwft6r
+ei8fxBvpmCXDS9/vXwiEf6jEidqq1Q6bCdL20Y1ZPY13oUEYFqrf8PhexlV1yoD4
+F4gEbVHPQ4yvH3D6xIAFE4959+H+dgMfXqn9gkUvnTMdyfzcUYGLTAib3zb4eW/J
+anzwfBAcssBzjU1v/txWMRlZI1GJFNtboAixnRksj1epE848J3bjtiw3R/Z5grFn
+dieJwjfM4AEDrpRmA5tDnv5z73k1djJbacL7fTIyTuSnDbjH2J5PtCAvWTLYq/kP
+h8E3sJ9zXP2nJMBRgQiZJY98bPKLT63ngRScI+CZs1fLvaoCq0o+qkcfnDEja3aH
+TQYXHVZblA4TYnD8Vh8gKwCt8+1WF5C9BGcMmKvozuuIaIJgT21V+DLzfTESpZz7
+lcPKk/3dBFtFGOdA4SQ4o/dxItJ0Eay1SlOI9xL9KgTNqv6ftA+9kxZ0MSPwO7eG
+b5Am4gNTK734uQ==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/419.pem b/repos/system_upgrade/common/files/prod-certs/9.6/419.pem
new file mode 100644
index 00000000..e2d3ee5b
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/419.pem
@@ -0,0 +1,36 @@
+-----BEGIN CERTIFICATE-----
+MIIGVzCCBD+gAwIBAgIJALDxRLt/tVEhMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIwNVoXDTQ0MDgx
+MjE5MDIwNVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtkN2E2ZDhi
+Mi0yZjMzLTRhYzMtYmM5Ni1mMjU5MTNmZTQxNWNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB4DCB3TAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYMjAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIEFSTSA2NDAVBgwrBgEEAZIICQGDIwIEBQwD
+OS42MBkGDCsGAQQBkggJAYMjAwQJDAdhYXJjaDY0MCcGDCsGAQQBkggJAYMjBAQX
+DBVyaGVsLTkscmhlbC05LWFhcmNoNjQwHQYDVR0OBBYEFGHqILU1fK9nZDr/awjm
+EHjQzR+VMB8GA1UdIwQYMBaAFJb9uxxAQP9AhK2yHwsmwcNqb9Z8MA0GCSqGSIb3
+DQEBCwUAA4ICAQCJqWcTJezGVGxsNvFkbsrvbHhJBuBMeDZZuOLaXaQVyfNwYRS2
+2k/oUhhQQMfiDiaLkz7yz0Zw5clC/K5G6Sg9+nWDA57lsZuNV5CnSBYOJf2jY2fK
+ue/1M75Y4fJAKtBxpvkFaIaKyMQ/0VC67OFYtbBZEOuwIpQh9aPFHnrh2WnpcUvJ
+B93O0fsRjHK30E7jF8ncNmhevMLvVlxH0JjfbvcU3dGG964K41tFiozshvnAGFce
+kFzxVVYQL3ZKycqonwFr3BbzgKwx5EXUFBg/ax694aijeeVA6yuQXWJvV42IjUeW
+vn+dvRrHh2fv4MXuyc+oljbXaEZE7m9gtWBtUEBHqWoQz6rQ25uZylnK+SDWE5bt
+xM+1qGUSf90VvyFO3fu1qeVVr0LbnMAgO9YnJjLRQax0mgj3tZTRvM72W4hfBy36
+ndYnJE2le5xYWVl1Hd29dil70cokj5hN8nQI9eStfcOvs9Vw2ngIL/H3+QTRS/NO
+l7MHQXbriLAaHavED6B50dEfw8pQXybEju4Rs+nDgm5hdE7FjbVflVQejSjyHIMd
+AQnwrDSMPRezCJFHQeB0t7oaHpAHECc2zBpvcvy7qCN2Z08h6jdzfrp15UDkHEcy
+Qa9dtYRUthI3pjGGu7WTPwX9y0veot3EZRnEzeIprIsHcMKfmkMg4HRJ3A==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/433.pem b/repos/system_upgrade/common/files/prod-certs/9.6/433.pem
new file mode 100644
index 00000000..ac588c1c
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/433.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGaTCCBFGgAwIBAgIJALDxRLt/tVE5MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIyNFoXDTQ0MDgx
+MjE5MDIyNFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs4NzcwNzQ4
+MS02MGEwLTQwYTUtYWVhMi0xNjNmODUyMzI3ZTFdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB8jCB7zAJBgNVHRMEAjAAMEEGDCsGAQQBkggJAYMxAQQxDC9SZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIElCTSB6IFN5c3RlbXMgQmV0YTAaBgwrBgEE
+AZIICQGDMQIECgwIOS42IEJldGEwFwYMKwYBBAGSCAkBgzEDBAcMBXMzOTB4MCoG
+DCsGAQQBkggJAYMxBAQaDBhyaGVsLTkscmhlbC05LWJldGEtczM5MHgwHQYDVR0O
+BBYEFGHqILU1fK9nZDr/awjmEHjQzR+VMB8GA1UdIwQYMBaAFJb9uxxAQP9AhK2y
+HwsmwcNqb9Z8MA0GCSqGSIb3DQEBCwUAA4ICAQC57eNKMpTQuIEEoYXkhD0oYkgD
+RzDFyqKZgyK0IdOy6t0d9GcMY/nI/uYQltUC+HWBUJWYkHc84xjfP3ITfuHWP8KP
+3qdXLPwTDcNVUGtLgXIfEz4FEM4OVwfM2X0jIcLfkDmZzffWjHgBpAUfZM6fBvXl
+soPJ+s4/vIUFNbVtcJh9iw4glt/GFBOX/bNPV9kniAAYuyabW43X7GxfREJY18Db
++Fv7c+z2eM4fQFpLkSEZwsNN68G4OHDC7tWsYtCRocipWGs6lN5MBNXC0q90ds5O
+kOLRfHKOLFqbZnBNdgSOlsf+ENH3exUhoDvZE0gnAVALABVv6PCtsHn2rPLonsrB
+l9ZKqCVVDpQMDXmZC79XKB0nVrNQ7qYorCVnYqnTAkuvw4BuXpKASaSCDSRWLQN0
+H89phUM64VnyPD5pBTw+YJURDm8cwD5e6HaXhKzG1ca9PWL+RVxedB4Rl2VG00fE
+QUBbHZktH+H1P3MtqALB7IUav4IuBgdF27W55GExCgshRuyV6/VHmYiD+L52XxCH
+71mdWTp6JR1/hMYKPLhc5/ESBoMpqMXa4UWIOtMWiafWaDS4Cib+uyIIzCgqW8ee
+t+yQtCs7MUUd6t87XP7/TTQJe6e0JsyDnME9br0E4g57Y8cXjOILGg/ihqBFOGt1
+vhbX7w/YRjVpwJhi9w==
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/479.pem b/repos/system_upgrade/common/files/prod-certs/9.6/479.pem
new file mode 100644
index 00000000..c2bac3ee
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/479.pem
@@ -0,0 +1,36 @@
+-----BEGIN CERTIFICATE-----
+MIIGVTCCBD2gAwIBAgIJALDxRLt/tVEkMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIwNVoXDTQ0MDgx
+MjE5MDIwNVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFtjZWRlZTRi
+My0xOGFhLTQwMzMtYjE3OS01OTkwMjk2OGFkZjhdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB3jCB2zAJBgNVHRMEAjAAMDUGDCsGAQQBkggJAYNfAQQlDCNSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NDAVBgwrBgEEAZIICQGDXwIEBQwD
+OS42MBgGDCsGAQQBkggJAYNfAwQIDAZ4ODZfNjQwJgYMKwYBBAGSCAkBg18EBBYM
+FHJoZWwtOSxyaGVsLTkteDg2XzY0MB0GA1UdDgQWBBRh6iC1NXyvZ2Q6/2sI5hB4
+0M0flTAfBgNVHSMEGDAWgBSW/bscQED/QIStsh8LJsHDam/WfDANBgkqhkiG9w0B
+AQsFAAOCAgEADoQWjROe9jPuYIB5cW7URXgDPVK3cpGnlKxEINdXT+dL7N2qNijy
+BcV0+SCHmswZ+F7OTozyGzGbJCrSHZrvF2lp2L8YddvkIFsWqrPkseU/0/oog5Qf
+ULA5WzV12u0Ra/DWinhUq6NZWLAt/FvJ7+WHPdJ7B0WsiA751l7crvfKfen93Xzb
+0eakHrotcPi9YH/Jez8xjs4Wc3ra/7CbLqpsHuWzgzwJabiuLaf5PK95VVedzQIx
+lT+N6JydFIkXkofQJwTptPTh9lDbZDe33/dg5zX3l9CAQK7JYZKYoUzLirM2LO7s
+TGejW1mKGB+O23bQBGRkLoD4kbY17UMCFcKD7mZSO6laoOBOk8NYUxTDjT4e3cUB
+dHw5YVrj+BSHzgOGpc1KrmuBiOWZrZf4iaFuz4Kr88tL6TT6IH5UmfP3fuvvMyXs
+OWqTAfr/CPeJjLhjmbEagkS0kpgkyXodY8sq2Ph5vpn0o1QYNfy6KRtD/m6YaF7G
+SDkWEY5li338SROIFV6X8lKEzHMfQZzhqQWoJWQlFuAdByKrxz8x1RJZTkIT82h6
+uM/GO3v5xT5UXXa2x1X0JtS9rPGdnmAKQLJJz07s+2WCRqCFuBxJsV+aWCRLsab4
+jpo1NG0RH0KorjvBBMLx8bVSbl4YFJdOcomlRVrsC2iMUwl+PH5Ah4g=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/486.pem b/repos/system_upgrade/common/files/prod-certs/9.6/486.pem
new file mode 100644
index 00000000..e130d5dc
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/486.pem
@@ -0,0 +1,37 @@
+-----BEGIN CERTIFICATE-----
+MIIGZDCCBEygAwIBAgIJALDxRLt/tVE6MA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIyNFoXDTQ0MDgx
+MjE5MDIyNFowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFsyMTM1ODk1
+Yi1mMDRiLTRlNjUtOWYzMC04MmRlYmQ0Njc0NjNdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB7TCB6jAJBgNVHRMEAjAAMDoGDCsGAQQBkggJAYNmAQQqDChSZWQgSGF0
+IEVudGVycHJpc2UgTGludXggZm9yIHg4Nl82NCBCZXRhMBoGDCsGAQQBkggJAYNm
+AgQKDAg5LjYgQmV0YTAYBgwrBgEEAZIICQGDZgMECAwGeDg2XzY0MCsGDCsGAQQB
+kggJAYNmBAQbDBlyaGVsLTkscmhlbC05LWJldGEteDg2XzY0MB0GA1UdDgQWBBRh
+6iC1NXyvZ2Q6/2sI5hB40M0flTAfBgNVHSMEGDAWgBSW/bscQED/QIStsh8LJsHD
+am/WfDANBgkqhkiG9w0BAQsFAAOCAgEAHhaEBX5fhB2zweFT0SuLB3OB11aE3Tjy
+q0dNxm8t3d5glgtratmAkPD+6Ct0gkdoGJ8GcBsFVzzM2ig236YOy8dCPVWBzLtd
+Oni5DpjSqnMX6yq4PuSViF1w+9pCKPJqzQK/u/F0njkwdu0mAwc1fkiCR0B6oB7s
+m1rHhuyC4PkAj5RYQ6+M4MpGfce0HSpUCzlnAlHYgjvmT3qCUvlEYLPg4/Z+wihZ
+1xdhhhoLNi43IdfmFQlTSNZqTwLB780qzHzi+UYgWg7wflTn8m1LAOlad5HWJFnE
+y6JnX+c+vfzvxFBSZABKJsZY/YKIAV14g42XL8zhIpJHtdYnUaveo1M90UAvSECP
+RAnPUIKWM1VYKfa2PpEC2/157KOQ4y7BUrAUlqs1qh8FoGCZYHMRmgYqHoycIvw+
+gs1gH77O9EyOMMjwyQqBUnzylJfhjkEgINDIGbPEiQpI33TBniw5yMRZ74XWOoi3
+rOIiaYxHBDpJ25LwbZsJOQUPmIKBTOpLK9N4IK7UvA7O8HCEEJz2+VLVf2svaoU1
+fd7MUYh9aCjEocKRQknxScJLVBXcFRy0I+tfVQwkcLqWCOrp3qpNmYwhC+C0vYtR
+/LZ58vf60+m+mKUmEJWF6X7QGFZptsc0ERme6sE1E41iNAIq3BsBMU/hQIVP50k4
+T3KefQomWk4=
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/prod-certs/9.6/72.pem b/repos/system_upgrade/common/files/prod-certs/9.6/72.pem
new file mode 100644
index 00000000..35927fbc
--- /dev/null
+++ b/repos/system_upgrade/common/files/prod-certs/9.6/72.pem
@@ -0,0 +1,36 @@
+-----BEGIN CERTIFICATE-----
+MIIGVjCCBD6gAwIBAgIJALDxRLt/tVEjMA0GCSqGSIb3DQEBCwUAMIGuMQswCQYD
+VQQGEwJVUzEXMBUGA1UECAwOTm9ydGggQ2Fyb2xpbmExFjAUBgNVBAoMDVJlZCBI
+YXQsIEluYy4xGDAWBgNVBAsMD1JlZCBIYXQgTmV0d29yazEuMCwGA1UEAwwlUmVk
+IEhhdCBFbnRpdGxlbWVudCBQcm9kdWN0IEF1dGhvcml0eTEkMCIGCSqGSIb3DQEJ
+ARYVY2Etc3VwcG9ydEByZWRoYXQuY29tMB4XDTI0MDgxMjE5MDIwNVoXDTQ0MDgx
+MjE5MDIwNVowRDFCMEAGA1UEAww5UmVkIEhhdCBQcm9kdWN0IElEIFs0ZDhmOTky
+Yy04NDBjLTQ4MzYtODVkOS0zYWI5YjA1ZjViY2FdMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEAxj9J04z+Ezdyx1U33kFftLv0ntNS1BSeuhoZLDhs18yk
+sepG7hXXtHh2CMFfLZmTjAyL9i1XsxykQpVQdXTGpUF33C2qBQHB5glYs9+d781x
+8p8m8zFxbPcW82TIJXbgW3ErVh8vk5qCbG1cCAAHb+DWMq0EAyy1bl/JgAghYNGB
+RvKJObTdCrdpYh02KUqBLkSPZHvo6DUJFN37MXDpVeQq9VtqRjpKLLwuEfXb0Y7I
+5xEOrR3kYbOaBAWVt3mYZ1t0L/KfY2jVOdU5WFyyB9PhbMdLi1xE801j+GJrwcLa
+xmqvj4UaICRzcPATP86zVM1BBQa+lilkRQes5HyjZzZDiGYudnXhbqmLo/n0cuXo
+QBVVjhzRTMx71Eiiahmiw+U1vGqkHhQNxb13HtN1lcAhUCDrxxeMvrAjYdWpYlpI
+yW3NssPWt1YUHidMBSAJ4KctIf91dyE93aStlxwC/QnyFsZOmcEsBzVCnz9GmWMl
+1/6XzBS1yDUqByklx0TLH+z/sK9A+O2rZAy1mByCYwVxvbOZhnqGxAuToIS+A81v
+5hCjsCiOScVB+cil30YBu0cH85RZ0ILNkHdKdrLLWW4wjphK2nBn2g2i3+ztf+nQ
+ED2pQqZ/rhuW79jcyCZl9kXqe1wOdF0Cwah4N6/3LzIXEEKyEJxNqQwtNc2IVE8C
+AwEAAaOB3zCB3DAJBgNVHRMEAjAAMDsGCysGAQQBkggJAUgBBCwMKlJlZCBIYXQg
+RW50ZXJwcmlzZSBMaW51eCBmb3IgSUJNIHogU3lzdGVtczAUBgsrBgEEAZIICQFI
+AgQFDAM5LjYwFgYLKwYBBAGSCAkBSAMEBwwFczM5MHgwJAYLKwYBBAGSCAkBSAQE
+FQwTcmhlbC05LHJoZWwtOS1zMzkweDAdBgNVHQ4EFgQUYeogtTV8r2dkOv9rCOYQ
+eNDNH5UwHwYDVR0jBBgwFoAUlv27HEBA/0CErbIfCybBw2pv1nwwDQYJKoZIhvcN
+AQELBQADggIBANOzzfUKjlJsgJWUryjKfzPYISkCZXauHqBcST4N1HP1GA8tmMXi
+bgh14+l7ZO8EloFvEGANsX2ffMfauuJx2NV6ks07NHWuM7W9kghDe5ZccrJCz88E
+1zdvyWae5oSvTwfnvR/b63duOhs88u7NCQN2+n+pmJA0dPWbGTaIp3n4kJg8YKnd
+O8Nct2doNS+1rrLpRmVKQy/E7fAXQzt1Bxqs2hORqbgffiSE9a+4akitY97GXRBm
+nOO2DkyEW0xPtdy3zDvL7o7b1B0gdMOwqEolgGuDFsrfD+7ofpwOWjS+83gF6hMP
+5YVD3sugu6xzCx6y7Yl/BfX4qvvT4YHtYob5rQA/t7JY4u4ryadkUxQLMEccMsyS
+pKZQ8KFC5ZNJVK/ievkcBCsBlulbRftVJGF3TA2Hl2aBuMhGdUR5y/Q89WHUzeV6
+U6AVzyEsvIJguswvKvFAyHwNuViCfFCkjNkJolvd/g03OSy1A7piQaU20QyltWmx
+FILCR/DBUbCWIzKTfkLr93TbV2b1AH9uRW1SAGrftuevVXrNemWIwq1x/VgjDm3o
+nk637pnEfZZzX8T2gO5z5yjlP0PR4s7hKkmp3TmAeG9015pFxPnD3AMI261srQ+c
+KZBdIc5UseQo/4KvRKZ1CzxPh0WjJCzc/C/TKzIlEdELq/rnKGuqHKB9
+-----END CERTIFICATE-----
diff --git a/repos/system_upgrade/common/files/upgrade_paths.json b/repos/system_upgrade/common/files/upgrade_paths.json
index 5399f148..cc9dcdb5 100644
--- a/repos/system_upgrade/common/files/upgrade_paths.json
+++ b/repos/system_upgrade/common/files/upgrade_paths.json
@@ -1,19 +1,17 @@
{
"default": {
- "7.9": ["8.8", "8.10"],
- "8.8": ["9.2"],
- "8.10": ["9.4", "9.5"],
+ "7.9": ["8.10"],
+ "8.10": ["9.4", "9.5", "9.6"],
"9.6": ["10.0"],
- "7": ["8.8", "8.10"],
- "8": ["9.2", "9.4", "9.5"],
+ "7": ["8.10"],
+ "8": ["9.4", "9.5", "9.6"],
"9": ["10.0"]
},
"saphana": {
- "7.9": ["8.10", "8.8"],
- "7": ["8.10", "8.8"],
- "8.8": ["9.2"],
- "8.10": ["9.4"],
- "8": ["9.4", "9.2"],
+ "7.9": ["8.10"],
+ "7": ["8.10"],
+ "8.10": ["9.6", "9.4"],
+ "8": ["9.6", "9.4"],
"9.6": ["10.0"],
"9": ["10.0"]
}
diff --git a/repos/system_upgrade/common/libraries/config/version.py b/repos/system_upgrade/common/libraries/config/version.py
index 152d9112..d710a647 100644
--- a/repos/system_upgrade/common/libraries/config/version.py
+++ b/repos/system_upgrade/common/libraries/config/version.py
@@ -18,7 +18,7 @@ OP_MAP = {
_SUPPORTED_VERSIONS = {
# Note: 'rhel-alt' is detected when on 'rhel' with kernel 4.x
'7': {'rhel': ['7.9'], 'rhel-alt': [], 'rhel-saphana': ['7.9']},
- '8': {'rhel': ['8.8', '8.10'], 'rhel-saphana': ['8.8', '8.10']},
+ '8': {'rhel': ['8.10'], 'rhel-saphana': ['8.10']},
'9': {'rhel': ['9.4', '9.5', '9.6'], 'rhel-saphana': ['9.4', '9.6']},
}
--
2.47.0

View File

@ -0,0 +1,29 @@
From f50e3474a619ed338c2514933303320d986e6ffe Mon Sep 17 00:00:00 2001
From: Petr Stodulka <pstodulk@redhat.com>
Date: Thu, 14 Nov 2024 16:26:55 +0100
Subject: [PATCH 40/40] spec: drop the /etc/leapp/actor_confid.d dir
The directory should be provided by the framework. leapp-repository
should provide only a content inside if any present.
---
packaging/leapp-repository.spec | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/packaging/leapp-repository.spec b/packaging/leapp-repository.spec
index 2bb52505..6676d907 100644
--- a/packaging/leapp-repository.spec
+++ b/packaging/leapp-repository.spec
@@ -250,9 +250,7 @@ install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/files/
install -m 0644 etc/leapp/transaction/* %{buildroot}%{_sysconfdir}/leapp/transaction
install -m 0644 etc/leapp/files/* %{buildroot}%{_sysconfdir}/leapp/files
-# Actor configuration dir
-install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/actor_conf.d/
-# uncomment to install existing configs
+# uncomment to install existing configs if any exists
#install -m 0644 etc/leapp/actor_conf.d/* %%{buildroot}%%{_sysconfdir}/leapp/actor_conf.d
# install CLI commands for the leapp utility on the expected path
--
2.47.0

View File

@ -0,0 +1,74 @@
From 3c3421a0f155fe3bdfaee74c5345e86874684a09 Mon Sep 17 00:00:00 2001
From: Michal Hecko <mhecko@redhat.com>
Date: Tue, 19 Nov 2024 10:56:50 +0100
Subject: [PATCH] feat(net-naming-scheme): enable by default
This commit enables the use of net.naming-scheme for 8>9 upgrades by
default. The previously used environmental variablel
LEAPP_USE_NET_NAMING_SCHEMES is replaced with
LEAPP_DISABLE_NET_NAMING_SCHEMES with inverse semantics.
---
.../libraries/persistentnetnamesconfig.py | 11 ++++++++---
.../libraries/emit_net_naming.py | 4 ++--
.../tests/test_emit_net_naming_scheme.py | 4 ++--
3 files changed, 12 insertions(+), 7 deletions(-)
diff --git a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
index b2c7f5ff..c90d13f2 100644
--- a/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
+++ b/repos/system_upgrade/common/actors/persistentnetnamesconfig/libraries/persistentnetnamesconfig.py
@@ -39,9 +39,14 @@ def generate_link_file(interface):
@suppress_deprecation(InitrdIncludes)
def process():
- if get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1' and version.get_target_major_version() == '9':
- # We can use this only for 8>9, for now
- api.current_logger().info('Skipping generation of .link files renaming NICs as LEAPP_USE_NET_NAMING_SCHEMES=1')
+ are_net_schemes_enabled = get_env('LEAPP_DISABLE_NET_NAMING_SCHEMES', '0') != '1'
+ is_upgrade_8to9 = version.get_target_major_version() == '9'
+
+ if are_net_schemes_enabled and is_upgrade_8to9:
+ # For 8>9 we are using net.naming_scheme kernel arg by default - do not generate link files
+ msg = ('Skipping generation of .link files renaming NICs as net.naming-scheme '
+ '{LEAPP_DISABLE_NET_NAMING_SCHEMES != 1} is enabled and upgrade is 8>9')
+ api.current_logger().info(msg)
return
if get_env('LEAPP_NO_NETWORK_RENAMING', '0') == '1':
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
index 726bb459..bab62a56 100644
--- a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/libraries/emit_net_naming.py
@@ -44,9 +44,9 @@ def is_net_scheme_compatible_with_current_cmdline():
def emit_msgs_to_use_net_naming_schemes():
- is_env_var_set = get_env('LEAPP_USE_NET_NAMING_SCHEMES', '0') == '1'
+ is_feature_enabled = get_env('LEAPP_DISABLE_NET_NAMING_SCHEMES', '0') != '1'
is_upgrade_8to9 = version.get_target_major_version() == '9'
- is_net_naming_enabled_and_permitted = is_env_var_set and is_upgrade_8to9
+ is_net_naming_enabled_and_permitted = is_feature_enabled and is_upgrade_8to9
if not is_net_naming_enabled_and_permitted:
return
diff --git a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
index 7a5eeba5..acf72241 100644
--- a/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
+++ b/repos/system_upgrade/el8toel9/actors/emit_net_naming_scheme/tests/test_emit_net_naming_scheme.py
@@ -51,11 +51,11 @@ def test_is_net_scheme_compatible_with_current_cmdline(monkeypatch, kernel_args,
]
)
def test_emit_msgs_to_use_net_naming_schemes(monkeypatch, is_net_scheme_enabled, is_current_cmdline_compatible):
- envvar_value = '1' if is_net_scheme_enabled else '0'
+ envvar_value = '0' if is_net_scheme_enabled else '1'
mocked_actor = CurrentActorMocked(src_ver='8.10',
dst_ver='9.5',
- envars={'LEAPP_USE_NET_NAMING_SCHEMES': envvar_value})
+ envars={'LEAPP_DISABLE_NET_NAMING_SCHEMES': envvar_value})
monkeypatch.setattr(api, 'current_actor', mocked_actor)
monkeypatch.setattr(api, 'produce', produce_mocked())
--
2.47.0

View File

@ -53,13 +53,13 @@ py2_byte_compile "%1" "%2"}
Epoch: 1
Name: leapp-repository
Version: 0.21.0
Release: 2%{?dist}.elevate.4
Release: 4%{?dist}.elevate.1
Summary: Repositories for leapp
License: ASL 2.0
URL: https://oamg.github.io/leapp/
Source0: https://github.com/oamg/%{name}/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz
Source1: deps-pkgs-10.tar.gz
Source1: deps-pkgs-11.tar.gz
# NOTE: Our packages must be noarch. Do no drop this in any way.
BuildArch: noarch
@ -67,7 +67,47 @@ BuildArch: noarch
### PATCHES HERE
# Patch0001: filename.patch
Patch0001: 0001-rhui-alibaba-add-ARM-RHEL8-and-RHEL9-setup-entries-1.patch
Patch0010: leapp-repository-0.21.0-elevate.patch
Patch0002: 0002-don-t-require-all-versions-to-be-defined-for-obsolet.patch
Patch0003: 0003-Add-RHEL-10.0-prod-certs.patch
Patch0004: 0004-properly-scope-try-except-when-loading-obsoleted-key.patch
Patch0005: 0005-Update-references-from-master-branch-to-main.patch
Patch0006: 0006-ReadOfKernelArgsError-fix-the-error.patch
Patch0007: 0007-pylint-exclude-rule-too-many-positional-arguments-co.patch
Patch0008: 0008-pam_userdb-migrate-backend-database.patch
Patch0009: 0009-Replace-mirror.centos.org-with-vault.centos.org-Cent.patch
Patch0010: 0010-kernelcmdlineconfig-Add-Report-to-produces-tuple.patch
Patch0011: 0011-kernelcmdlineconfig-Use-args-from-first-entry-when-m.patch
Patch0012: 0012-check_microarch-refactor-to-handle-possible-future-r.patch
Patch0013: 0013-check_microarch-add-rhel10-requirements.patch
Patch0014: 0014-Skip-checking-files-under-.-directory-hash-dir.patch
Patch0015: 0015-lib-overlay-cap-the-max-size-of-disk-images.patch
Patch0016: 0016-Raise-proper-error-when-ModelViolationError-occurs.patch
Patch0017: 0017-InhibitWhenLuks-simplify-the-logic.patch
Patch0018: 0018-StorageScanner-Add-parent-device-name-to-lsblk.patch
Patch0019: 0019-LuksScanner-Add-LUKS-dump-scanner-and-models.patch
Patch0020: 0020-InhibitWhenLuks-allow-upgrades-for-LUKS2-bound-to-Cl.patch
Patch0021: 0021-Rename-inhibitwhenluks-actor-to-checkluks.patch
Patch0022: 0022-Fix-IPU-being-blocked-by-resource-limitations.patch
Patch0023: 0023-feature-add-possibility-to-use-net.naming-scheme.patch
Patch0024: 0024-prevent-the-feature-for-being-used-outside-8-9.patch
Patch0025: 0025-fix-condition-on-when-net-naming-is-emitted.patch
Patch0026: 0026-scangrubdevpartitionlayout-Skip-warning-msgs.patch
Patch0027: 0027-Workaround-for-ARM-Upgrades-from-RHEL8-to-RHEL9.5.patch
Patch0028: 0028-Add-el9toel10-actor-to-handle-symlink-directory-with.patch
Patch0029: 0029-Expand-on-the-actor-docstring-for-the-el8-el9-rubyge.patch
Patch0030: 0030-data-update-data-files.patch
Patch0031: 0031-Packaging-Require-leapp-framework-6.x-update-leapp-d.patch
Patch0032: 0032-spec-create-etc-leapp-actor_conf.d.patch
Patch0033: 0033-spec-drop-.gitkeep-files-from-the-RPM.patch
Patch0034: 0034-cli-load-actor-configuration.patch
Patch0035: 0035-configs-common-introduce-RHUI-configuration.patch
Patch0036: 0036-check_rhui-read-RHUI-configuration.patch
Patch0037: 0037-testutils-add-support-for-configs.patch
Patch0038: 0038-userspacegen-rhui-remove-repofiles-only-if-now-owned.patch
Patch0039: 0039-Enable-IPU-for-EL-9.6-and-drop-EL-8.8-9.2.patch
Patch0040: 0040-spec-drop-the-etc-leapp-actor_confid.d-dir.patch
Patch0041: 0041-feat-net-naming-scheme-enable-by-default.patch
Patch0100: leapp-repository-0.21.0-elevate.patch
%description
@ -123,7 +163,7 @@ Requires: leapp-repository-dependencies = %{leapp_repo_deps}
# IMPORTANT: this is capability provided by the leapp framework rpm.
# Check that 'version' instead of the real framework rpm version.
Requires: leapp-framework >= 5.0
Requires: leapp-framework >= 6.0
# Since we provide sub-commands for the leapp utility, we expect the leapp
# tool to be installed as well.
@ -214,6 +254,12 @@ Requires: dracut
Requires: NetworkManager-libnm
Requires: python3-gobject-base
%endif
%if 0%{?rhel} && 0%{?rhel} == 9
############# RHEL 9 dependencies (when the source system is RHEL 9) ##########
# Required to convert pam_userdb database from BerkeleyDB to GDBM
Requires: libdb-utils
%endif
##################################################
# end requirement
@ -229,9 +275,49 @@ Requires: python3-gobject-base
%setup -q -n %{name}-%{version} -D -T -a 1
# APPLY PATCHES HERE
# %%patch0001 -p1
%patch0001 -p1
%patch0010 -p1
# %%patch -P 0001 -p1
%patch -P 0001 -p1
%patch -P 0002 -p1
%patch -P 0003 -p1
%patch -P 0004 -p1
%patch -P 0005 -p1
%patch -P 0006 -p1
%patch -P 0007 -p1
%patch -P 0008 -p1
%patch -P 0009 -p1
%patch -P 0010 -p1
%patch -P 0011 -p1
%patch -P 0012 -p1
%patch -P 0013 -p1
%patch -P 0014 -p1
%patch -P 0015 -p1
%patch -P 0016 -p1
%patch -P 0017 -p1
%patch -P 0018 -p1
%patch -P 0019 -p1
%patch -P 0020 -p1
%patch -P 0021 -p1
%patch -P 0022 -p1
%patch -P 0023 -p1
%patch -P 0024 -p1
%patch -P 0025 -p1
%patch -P 0026 -p1
%patch -P 0027 -p1
%patch -P 0028 -p1
%patch -P 0029 -p1
%patch -P 0030 -p1
%patch -P 0031 -p1
%patch -P 0032 -p1
%patch -P 0033 -p1
%patch -P 0034 -p1
%patch -P 0035 -p1
%patch -P 0036 -p1
%patch -P 0037 -p1
%patch -P 0038 -p1
%patch -P 0039 -p1
%patch -P 0040 -p1
%patch -P 0041 -p1
%patch -P 0100 -p1
%build
@ -249,6 +335,9 @@ install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/files/
install -m 0644 etc/leapp/transaction/* %{buildroot}%{_sysconfdir}/leapp/transaction
install -m 0644 etc/leapp/files/* %{buildroot}%{_sysconfdir}/leapp/files
# uncomment to install existing configs if any exists
#install -m 0644 etc/leapp/actor_conf.d/* %%{buildroot}%%{_sysconfdir}/leapp/actor_conf.d
# install CLI commands for the leapp utility on the expected path
install -m 0755 -d %{buildroot}%{leapp_python_sitelib}/leapp/cli/
cp -r commands %{buildroot}%{leapp_python_sitelib}/leapp/cli/
@ -267,6 +356,9 @@ find %{buildroot}%{repositorydir}/common -name "test.py" -delete
rm -rf `find %{buildroot}%{repositorydir} -name "tests" -type d`
find %{buildroot}%{repositorydir} -name "Makefile" -delete
find %{buildroot} -name "*.py.orig" -delete
# .gitkeep file is used to have a directory in the repo. but we do not want these
# files in the resulting RPM
find %{buildroot} -name .gitkeep -delete
for DIRECTORY in $(find %{buildroot}%{repositorydir}/ -mindepth 1 -maxdepth 1 -type d);
do
@ -294,7 +386,9 @@ done;
%dir %{repositorydir}
%dir %{custom_repositorydir}
%dir %{leapp_python_sitelib}/leapp/cli/commands
%exclude %config %{_sysconfdir}/leapp/files/*
%config %{_sysconfdir}/leapp/files/*
# uncomment to package installed configs
#%%config %%{_sysconfdir}/leapp/actor_conf.d/*
%{_sysconfdir}/leapp/repos.d/*
%{_sysconfdir}/leapp/transaction/*
%{repositorydir}/*
@ -305,10 +399,32 @@ done;
# no files here
%changelog
* Tue Nov 19 2024 Matej Matuska <mmatuska@redhat.com> - 0.21.0-4
- Use net.naming-scheme by default
- Resolves: RHEL-23473
* Tue Nov 19 2024 Yuriy Kohut <ykohut@almalinux.org> - 0.21.0-2.elevate.4
- Update ELevate patch:
- extend update path for 8 and 8.10 with 9.5 target
* Mon Nov 18 2024 Petr Stodulka <pstodulk@redhat.com> - 0.21.0-3
- Introduce upgrade path 8.10 -> 9.6
- Require leapp-framework 6.0+
- Update leapp-deps package to satisfy leapp-framework-dependencies 6
- Add possibility to use net.naming-scheme during the upgrade
- Cap max size of the sparse files to 1TiB for storage with large amount of free space
- Enable upgrade for systems with LUKS bound to Clevis with TPM 2.0 token
- Adjust resource limitations for leapp to be able to perform the upgrade
- Fix problems with the bootloader when upgrading to RHEL 9.6 on ARM
- Fix the report when handling broken parsing of kernel cmdline
- Generate proper error message instead of ModelViolationError when parsing invalid repository definition
- Handle default kernel cmdline when multiple boot entries for the default kernel are defined
- Introduce a possibility to configure leapp actors covering RHUI on clouds
- Skip checking of (PKI) `directory-hash` dir to speedup the upgrade process and clean logs
- Update leapp upgrade data files
- Resolves: RHEL-67621, RHEL-57064, RHEL-56251, RHEL-50686, RHEL-41193
- Resolves: RHEL-34570, RHEL-26459, RHEL-23473, RHEL-16881, RHEL-3294
* Mon Oct 07 2024 Yuriy Kohut <ykohut@almalinux.org> - 0.21.0-2.elevate.3
- Update ELevate patch:
- remove '9.5' target from upgrade path until it is not released
@ -1077,4 +1193,3 @@ done;
* Wed Nov 07 2018 Petr Stodulka <pstodulk@redhat.com> - 0.3-1
- Initial RPM
Resolves: #1636481