Compare commits
396 Commits
4.2.17-1.e
...
master
Author | SHA1 | Date | |
---|---|---|---|
5152dfa764 | |||
b61614969d | |||
38cc2f79a0 | |||
d8b7f9210e | |||
69ec4df8f0 | |||
20841cfd4c | |||
cb53de3c46 | |||
72635cf5c1 | |||
9ce519426d | |||
208c71c194 | |||
71c4e3c178 | |||
1308986569 | |||
|
e05a11f99a | ||
|
cb9dede604 | ||
|
ce2c222dc2 | ||
|
be4fd75a7a | ||
|
33bb0ceceb | ||
|
aef48c0ab4 | ||
|
bd91ef1d10 | ||
|
32d5d32a6e | ||
|
5bcb3f5ac1 | ||
|
78bfbef206 | ||
|
88b6d8ebf5 | ||
|
6223baa2ba | ||
|
9d6226b436 | ||
|
927a0d35ab | ||
|
d81ee0f553 | ||
|
e601345a38 | ||
|
1fe075e7e4 | ||
|
a8fc1b183b | ||
|
8f171b81a1 | ||
|
ee8a56e64d | ||
|
2bf6c216bc | ||
|
99a6dfe8ad | ||
|
c63f9f41b6 | ||
|
ab1960de6d | ||
|
c17b820490 | ||
|
36133b71da | ||
|
50b217145c | ||
|
57f2b428d5 | ||
|
3cdc8d0ba7 | ||
|
07829f2229 | ||
|
bdf06ea038 | ||
|
bcab3431e1 | ||
|
b181b08033 | ||
|
e05b1bcd78 | ||
|
a97488721d | ||
|
4d858ef958 | ||
|
744b00499d | ||
|
583547c6ee | ||
|
f28053eecc | ||
|
a196e9c895 | ||
|
a6f6199910 | ||
|
a3dcec5059 | ||
|
6aa674fbb3 | ||
|
05d9651eba | ||
|
75ab6a14b2 | ||
|
533ea641d8 | ||
|
185a53d56b | ||
|
305deab9ed | ||
|
6af11d5747 | ||
|
58f96531c7 | ||
|
e570aa7726 | ||
|
d8a553163f | ||
|
a9839d8078 | ||
|
dc05d1fbba | ||
|
dc4e8b2fb7 | ||
|
27d055992e | ||
|
34fcd550b6 | ||
|
4c0059e91b | ||
|
bb2e32132e | ||
|
dca3be5861 | ||
|
38ec4ca159 | ||
|
c589ccb56f | ||
|
e413955849 | ||
|
e70e1841c7 | ||
|
fc86e03e44 | ||
|
548441644b | ||
|
ca369df0df | ||
|
67ae4202c4 | ||
|
aba5a7a093 | ||
|
323d1c1eb6 | ||
|
b0964ff555 | ||
|
79bc4e0c3a | ||
|
8772ccca23 | ||
|
3bb34225a9 | ||
|
daea6cabdf | ||
|
35b720e87a | ||
|
5a6ee9f8eb | ||
|
9a64db0485 | ||
|
de7210f69a | ||
|
24418ef74d | ||
f4765fbe3a | |||
|
80b9add9f7 | ||
|
b241545ca6 | ||
|
2e536228ae | ||
|
ff7950b9d1 | ||
|
6971624f83 | ||
|
b7d371d1c3 | ||
bc8c776872 | |||
91d282708e | |||
ccaf31bc87 | |||
5fe0504265 | |||
d79f163685 | |||
793fb23958 | |||
65d0c09e97 | |||
0a9e5df66c | |||
ae527a2e01 | |||
|
4991144a01 | ||
|
68d94ff488 | ||
|
ce45fdc39a | ||
|
b625ccea06 | ||
|
8eccfc5a03 | ||
|
f5a0e06af5 | ||
|
f6f54b56ca | ||
|
fcee346c7c | ||
|
82ec38ad60 | ||
|
c9cbd80569 | ||
|
035fca1e6d | ||
|
0f8cae69b7 | ||
|
f17628dd5f | ||
|
f3485410ad | ||
|
cccfaea14e | ||
|
e2057b75c5 | ||
|
44ea4d4419 | ||
|
d4425f7935 | ||
|
c8118527ea | ||
|
a8ea322907 | ||
|
c4995c8f4b | ||
|
997e372f25 | ||
|
42f1c62528 | ||
|
3fd29d0ee0 | ||
|
c1f2fa5035 | ||
|
85c9e9e776 | ||
|
33012ab31e | ||
|
72ddf65e62 | ||
|
c402ff3d60 | ||
|
8dd344f9ee | ||
|
d07f517a90 | ||
|
48366177cc | ||
|
4cb8671fe4 | ||
|
135bbbfe7e | ||
|
5624829564 | ||
|
5fb4f86312 | ||
|
e891fe7b09 | ||
|
4cd7d39914 | ||
|
5de829d05b | ||
|
2930a1cc54 | ||
|
9c4d3d496d | ||
|
4637fd6697 | ||
|
2ff8132eaf | ||
|
f9190d1fd1 | ||
|
80ad0448ec | ||
|
027380f969 | ||
|
41048f60b7 | ||
|
9f8f6a7956 | ||
|
3d3e4bafdf | ||
|
8fe0257e93 | ||
|
d7b5fd2278 | ||
|
8b49d4ad61 | ||
|
57443cd0aa | ||
|
1d146bb8d5 | ||
|
790091b7d7 | ||
|
28aad3ea40 | ||
|
7373b4dbbf | ||
|
218b11f1b7 | ||
|
bfbe9095d2 | ||
|
eb17182c04 | ||
f91f90cf64 | |||
49931082b2 | |||
8ba8609bda | |||
6f495a8133 | |||
2b4bddbfe0 | |||
032cf725de | |||
8b11bb81af | |||
|
114a73f100 | ||
|
1c3e5dce5e | ||
|
e55abb17f1 | ||
|
e81d78a1d1 | ||
|
68915d04f8 | ||
|
a25bf72fb8 | ||
|
68aee1fa2d | ||
|
6592735aec | ||
|
943fd8e77d | ||
|
004fc4382f | ||
|
596c5c0b7f | ||
|
141d00e941 | ||
|
4b64d20826 | ||
|
0747e967b0 | ||
|
6d58bc2ed8 | ||
|
60a347a4a2 | ||
|
53ed7386f3 | ||
|
ed43f0038e | ||
|
fcc9b4f1ca | ||
|
d32c293bca | ||
|
f0bd1af999 | ||
|
1b4747b915 | ||
|
6aabfc9285 | ||
|
9e014fed6a | ||
|
7ccb1d4849 | ||
|
abec28256d | ||
|
46216b4f17 | ||
|
02b3adbaeb | ||
|
d17e578645 | ||
|
6c1c9d9efd | ||
|
8dd7d8326f | ||
|
d7b173cae5 | ||
|
fa4640f03e | ||
|
d66eb0dea8 | ||
|
d56227ab4a | ||
|
12433157dd | ||
|
623955cb1f | ||
|
4e0d2d14c9 | ||
|
b61e59d676 | ||
|
eb35d7baac | ||
|
54209f3643 | ||
|
80c4536eaa | ||
|
9bb5550d36 | ||
|
364ed6c3af | ||
|
0b965096ee | ||
|
d914626d92 | ||
|
32215d955a | ||
|
d711f8a2d6 | ||
|
bd9d800b52 | ||
|
e03648589d | ||
|
b5fe2e8129 | ||
|
b14e85324c | ||
|
5a19ad2258 | ||
|
9ae49dae5b | ||
|
c82cbfdc32 | ||
|
ee9c9a74e6 | ||
|
ea0f933315 | ||
|
323d31df2b | ||
|
9acd7f5fa4 | ||
|
a2b16eb44f | ||
|
ff946d3f7b | ||
|
ede91bcd03 | ||
|
0fa459eb9e | ||
|
b49ffee06d | ||
|
fce5493f09 | ||
|
479849042f | ||
|
8cd19605bd | ||
|
750499eda1 | ||
|
d999960235 | ||
|
6edece449d | ||
|
dd22d94a9e | ||
|
b157a1825a | ||
|
fd298d4f17 | ||
|
fa967f79b5 | ||
|
57739c238f | ||
|
805a1083a2 | ||
|
57ea640916 | ||
|
c7121f9378 | ||
|
146b88e1e9 | ||
|
8aba2363e2 | ||
|
779793386c | ||
|
603c61a033 | ||
|
11fa342507 | ||
|
13ea8e5834 | ||
|
0abf937b0e | ||
|
778dcfa587 | ||
|
ea8020473d | ||
|
b0b494fff0 | ||
|
19cb013fec | ||
|
b27301641a | ||
|
da336f75f8 | ||
|
960c85efde | ||
|
d7aebfc7f9 | ||
|
ca185aaea8 | ||
|
895b3982d7 | ||
|
c4aa45beab | ||
|
f21ed6f607 | ||
|
cfe6ec3f4e | ||
|
e6c6f74176 | ||
|
8676941655 | ||
|
5f74175c33 | ||
|
1e18e8995d | ||
|
38ea822260 | ||
|
34eb45c7ec | ||
|
7422d1e045 | ||
|
97801e772e | ||
|
dff346eedb | ||
|
de53dd0bbd | ||
|
80957f5205 | ||
|
e8d79e9269 | ||
|
c5cdd498ac | ||
|
e490764985 | ||
|
707a2c8d10 | ||
|
f8c7ad28e4 | ||
|
bebbefe46e | ||
|
d55770898c | ||
|
903ab076ba | ||
|
88121619bc | ||
|
b805ce3d12 | ||
|
0e82663327 | ||
|
ecb1646042 | ||
|
6c280f2c46 | ||
|
aabf8faea0 | ||
|
38810b3f13 | ||
|
330ba9b9c4 | ||
|
52c9816755 | ||
|
32221e8f36 | ||
|
fe986d68b9 | ||
|
42f668d969 | ||
|
894cce6a5a | ||
|
260b3fce8d | ||
|
20c2e59218 | ||
|
5e6248e3e0 | ||
|
f681956cf1 | ||
|
cfb9882269 | ||
|
b652119d54 | ||
|
33d7290d78 | ||
|
9bae86a51e | ||
|
1d654522be | ||
|
80bd254347 | ||
|
94ffa1c5c6 | ||
|
9d02f87c99 | ||
|
7b9e08ab28 | ||
|
e2b3002726 | ||
|
e8305f3978 | ||
|
ac66c3d7f3 | ||
|
eb61c97cdb | ||
|
b03490bf18 | ||
|
ab19043773 | ||
|
204d88a351 | ||
|
8133676270 | ||
|
e42e65783d | ||
|
7475d2a3a9 | ||
|
ac061b2ea8 | ||
|
0530cf2712 | ||
|
9612241396 | ||
|
ba6f7429ee | ||
|
72bcee01be | ||
|
a1ebd234a4 | ||
|
5c26aa9127 | ||
|
195bfbefa4 | ||
|
20dc4beb6b | ||
|
d8d1cc520b | ||
|
904a1c3271 | ||
|
e8ddacd10e | ||
|
b7666ba4a4 | ||
|
3d9335e90e | ||
|
7c3e8d4276 | ||
|
9cd42a2b5e | ||
|
980c7ba8fb | ||
|
66dacb21e0 | ||
|
795bbe31e3 | ||
|
1bb038ca72 | ||
|
efff2c9504 | ||
|
a7c111643d | ||
|
5831d4ae1e | ||
|
3349585d78 | ||
|
5a8df7b69c | ||
|
6afcfef919 | ||
|
2a679dcb81 | ||
|
8a2d0162d9 | ||
|
01a52447bc | ||
|
cf761633f4 | ||
|
446334fb95 | ||
|
56a55db966 | ||
|
a435fd58da | ||
|
edb091b7b1 | ||
|
9a5e901cfe | ||
|
bf28e8d50c | ||
|
7fe32ae758 | ||
|
c27bfe0c59 | ||
|
76d13d0062 | ||
|
da791ed15c | ||
|
00a9861367 | ||
|
e866d22c04 | ||
|
ab1b5b48ec | ||
|
c8091899b2 | ||
|
035b37c566 | ||
|
edb4517e80 | ||
|
535034ef91 | ||
|
2769232b72 | ||
|
b217470464 | ||
|
735bfaa0d6 | ||
|
5b5069175d | ||
|
477dcf37d9 | ||
|
98359654cf | ||
|
64897d7d48 | ||
|
40133074b3 | ||
|
61e90fd7e0 | ||
|
36373479db | ||
|
44f7eff1b7 | ||
|
daa0ca6106 | ||
|
d4ee42ec23 | ||
|
49a5661521 | ||
|
c87fce30ac | ||
|
0f4b0577f7 | ||
|
83458f26c2 | ||
|
39b847094a | ||
|
9ea1098eae | ||
|
f518c1bb7c | ||
|
f470599f6c |
4
.gitignore
vendored
4
.gitignore
vendored
@ -11,5 +11,9 @@ tests/data/repo-krb5-lookaside
|
||||
tests/_composes
|
||||
htmlcov/
|
||||
.coverage
|
||||
.eggs
|
||||
.idea/
|
||||
.tox
|
||||
.venv
|
||||
.kdev4/
|
||||
pungi.kdev4
|
||||
|
41
1715.patch
Normal file
41
1715.patch
Normal file
@ -0,0 +1,41 @@
|
||||
From 432b0bce0401c4bbcd1a958a89305c475a794f26 Mon Sep 17 00:00:00 2001
|
||||
From: Adam Williamson <awilliam@redhat.com>
|
||||
Date: Jan 19 2024 07:25:09 +0000
|
||||
Subject: checks: don't require "repo" in the "ostree" schema
|
||||
|
||||
|
||||
Per @siosm in https://pagure.io/pungi-fedora/pull-request/1227
|
||||
this option "is deprecated and not needed anymore", so Pungi
|
||||
should not be requiring it.
|
||||
|
||||
Merges: https://pagure.io/pungi/pull-request/1714
|
||||
Signed-off-by: Adam Williamson <awilliam@redhat.com>
|
||||
|
||||
---
|
||||
|
||||
diff --git a/pungi/checks.py b/pungi/checks.py
|
||||
index a340f93..db8b297 100644
|
||||
--- a/pungi/checks.py
|
||||
+++ b/pungi/checks.py
|
||||
@@ -1066,7 +1066,6 @@ def make_schema():
|
||||
"required": [
|
||||
"treefile",
|
||||
"config_url",
|
||||
- "repo",
|
||||
"ostree_repo",
|
||||
],
|
||||
"additionalProperties": False,
|
||||
diff --git a/pungi/phases/ostree.py b/pungi/phases/ostree.py
|
||||
index 90578ae..2649cdb 100644
|
||||
--- a/pungi/phases/ostree.py
|
||||
+++ b/pungi/phases/ostree.py
|
||||
@@ -85,7 +85,7 @@ class OSTreeThread(WorkerThread):
|
||||
comps_repo = compose.paths.work.comps_repo(
|
||||
"$basearch", variant=variant, create_dir=False
|
||||
)
|
||||
- repos = shortcuts.force_list(config["repo"]) + self.repos
|
||||
+ repos = shortcuts.force_list(config.get("repo", [])) + self.repos
|
||||
if compose.has_comps:
|
||||
repos.append(translate_path(compose, comps_repo))
|
||||
repos = get_repo_dicts(repos, logger=self.pool)
|
||||
|
@ -2,6 +2,7 @@ include AUTHORS
|
||||
include COPYING
|
||||
include GPL
|
||||
include pungi.spec
|
||||
include setup.cfg
|
||||
include tox.ini
|
||||
include share/*
|
||||
include share/multilib/*
|
||||
|
@ -34,4 +34,6 @@ also moves the artifacts to correct locations.
|
||||
- Documentation: https://docs.pagure.org/pungi/
|
||||
- Upstream GIT: https://pagure.io/pungi/
|
||||
- Issue tracker: https://pagure.io/pungi/issues
|
||||
- Questions can be asked on *#fedora-releng* IRC channel on FreeNode
|
||||
- Questions can be asked in the *#fedora-releng* IRC channel on irc.libera.chat
|
||||
or in the matrix room
|
||||
[`#releng:fedoraproject.org`](https://matrix.to/#/#releng:fedoraproject.org)
|
||||
|
1
TODO
1
TODO
@ -47,7 +47,6 @@ Split Pungi into smaller well-defined tools
|
||||
|
||||
* create install images
|
||||
* lorax
|
||||
* buildinstall
|
||||
|
||||
* create isos
|
||||
* isos
|
||||
|
2
contrib/tmpfiles.d/pungi-clean-cache.conf
Normal file
2
contrib/tmpfiles.d/pungi-clean-cache.conf
Normal file
@ -0,0 +1,2 @@
|
||||
# Clean up pungi cache
|
||||
d /var/cache/pungi/createrepo_c/ - - - 30d
|
173
doc/_static/phases.svg
vendored
173
doc/_static/phases.svg
vendored
@ -1,22 +1,22 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="610.46454"
|
||||
height="301.1662"
|
||||
viewBox="0 0 610.46457 301.1662"
|
||||
height="327.16599"
|
||||
viewBox="0 0 610.46457 327.16599"
|
||||
id="svg2"
|
||||
version="1.1"
|
||||
inkscape:version="1.0.1 (3bc2e813f5, 2020-09-07)"
|
||||
inkscape:version="1.3.2 (091e20e, 2023-11-25)"
|
||||
sodipodi:docname="phases.svg"
|
||||
inkscape:export-filename="/home/lsedlar/repos/pungi/doc/_static/phases.png"
|
||||
inkscape:export-xdpi="90"
|
||||
inkscape:export-ydpi="90">
|
||||
inkscape:export-ydpi="90"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
@ -24,16 +24,16 @@
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="1"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="2.1213203"
|
||||
inkscape:cx="276.65806"
|
||||
inkscape:cy="189.24198"
|
||||
inkscape:zoom="1.5"
|
||||
inkscape:cx="268"
|
||||
inkscape:cy="260.66667"
|
||||
inkscape:document-units="px"
|
||||
inkscape:current-layer="layer1"
|
||||
showgrid="false"
|
||||
inkscape:window-width="2560"
|
||||
inkscape:window-height="1376"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1027"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="0"
|
||||
inkscape:window-y="25"
|
||||
inkscape:window-maximized="1"
|
||||
units="px"
|
||||
inkscape:document-rotation="0"
|
||||
@ -43,7 +43,10 @@
|
||||
fit-margin-left="7.4"
|
||||
fit-margin-right="7.4"
|
||||
fit-margin-bottom="7.4"
|
||||
lock-margins="true" />
|
||||
lock-margins="true"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#d1d1d1" />
|
||||
<defs
|
||||
id="defs4">
|
||||
<marker
|
||||
@ -70,7 +73,6 @@
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title></dc:title>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
@ -103,7 +105,7 @@
|
||||
style="font-size:13.1479px;line-height:1.25">Pkgset</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
transform="translate(58.253953,-80.817124)"
|
||||
transform="translate(56.378954,-80.817124)"
|
||||
id="g3398">
|
||||
<rect
|
||||
y="553.98242"
|
||||
@ -301,25 +303,29 @@
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<rect
|
||||
transform="matrix(0,1,1,0,0,0)"
|
||||
style="fill:#e9b96e;fill-rule:evenodd;stroke:none;stroke-width:2.65937px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3338-1"
|
||||
width="185.96895"
|
||||
height="115.80065"
|
||||
x="872.67383"
|
||||
y="486.55563" />
|
||||
<text
|
||||
id="text3384-0"
|
||||
y="969.2854"
|
||||
x="489.56451"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-size:13.1475px;line-height:1.25"
|
||||
id="tspan3391"
|
||||
sodipodi:role="line"
|
||||
<g
|
||||
id="g2"
|
||||
transform="translate(-1.4062678e-8,9.3749966)">
|
||||
<rect
|
||||
transform="matrix(0,1,1,0,0,0)"
|
||||
style="fill:#e9b96e;fill-rule:evenodd;stroke:none;stroke-width:1.85901px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3338-1"
|
||||
width="103.12497"
|
||||
height="115.80065"
|
||||
x="863.29883"
|
||||
y="486.55563" />
|
||||
<text
|
||||
id="text3384-0"
|
||||
y="921.73846"
|
||||
x="489.56451"
|
||||
y="969.2854">ImageChecksum</tspan></text>
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-size:13.1475px;line-height:1.25"
|
||||
id="tspan3391"
|
||||
sodipodi:role="line"
|
||||
x="489.56451"
|
||||
y="921.73846">ImageChecksum</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
transform="translate(-42.209584,-80.817124)"
|
||||
id="g3458">
|
||||
@ -417,16 +423,16 @@
|
||||
id="rect290"
|
||||
width="26.295755"
|
||||
height="224.35098"
|
||||
x="1063.5973"
|
||||
x="1091.7223"
|
||||
y="378.43698"
|
||||
transform="matrix(0,1,1,0,0,0)" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
x="380.74133"
|
||||
y="1080.3723"
|
||||
y="1106.6223"
|
||||
id="text294"><tspan
|
||||
y="1080.3723"
|
||||
y="1106.6223"
|
||||
x="380.74133"
|
||||
sodipodi:role="line"
|
||||
id="tspan301"
|
||||
@ -454,32 +460,9 @@
|
||||
y="1069.0087"
|
||||
id="tspan3812">ExtraIsos</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="g1031"
|
||||
transform="translate(-40.740337,29.23522)">
|
||||
<rect
|
||||
transform="matrix(0,1,1,0,0,0)"
|
||||
style="fill:#5ed4ec;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect206"
|
||||
width="26.295755"
|
||||
height="102.36562"
|
||||
x="1066.8611"
|
||||
y="418.66275" />
|
||||
<text
|
||||
id="text210"
|
||||
y="1084.9105"
|
||||
x="421.51923"
|
||||
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
xml:space="preserve"><tspan
|
||||
y="1084.9105"
|
||||
x="421.51923"
|
||||
id="tspan208"
|
||||
sodipodi:role="line"
|
||||
style="font-size:13.1479px;line-height:1.25">Repoclosure</tspan></text>
|
||||
</g>
|
||||
<rect
|
||||
y="377.92242"
|
||||
x="1096.0963"
|
||||
x="1122.3463"
|
||||
height="224.24059"
|
||||
width="26.295755"
|
||||
id="rect87"
|
||||
@ -489,17 +472,18 @@
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
x="380.7789"
|
||||
y="1114.1458"
|
||||
y="1140.3958"
|
||||
id="text91"><tspan
|
||||
style="font-size:13.1479px;line-height:1.25"
|
||||
sodipodi:role="line"
|
||||
id="tspan89"
|
||||
x="380.7789"
|
||||
y="1114.1458">Repoclosure</tspan></text>
|
||||
y="1140.3958">Repoclosure</tspan></text>
|
||||
<g
|
||||
id="g206">
|
||||
id="g206"
|
||||
transform="translate(0,-1.8749994)">
|
||||
<rect
|
||||
style="fill:#fcaf3e;fill-rule:evenodd;stroke:none;stroke-width:1.00033px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
style="fill:#fcd9a4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.00033px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect290-6"
|
||||
width="26.295755"
|
||||
height="101.91849"
|
||||
@ -516,6 +500,57 @@
|
||||
x="380.23166"
|
||||
sodipodi:role="line"
|
||||
id="tspan301-5"
|
||||
style="font-size:12px;line-height:0">KiwiBuild</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="g3">
|
||||
<g
|
||||
id="g1">
|
||||
<g
|
||||
id="g4">
|
||||
<rect
|
||||
transform="matrix(0,1,1,0,0,0)"
|
||||
style="fill:#729fcf;fill-rule:evenodd;stroke:none;stroke-width:1.83502px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3338-1-3"
|
||||
width="103.12497"
|
||||
height="115.80065"
|
||||
x="983.44263"
|
||||
y="486.55563" />
|
||||
<text
|
||||
id="text3384-0-6"
|
||||
y="1038.8422"
|
||||
x="489.56451"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-size:13.1475px;line-height:1.25"
|
||||
id="tspan3391-7"
|
||||
sodipodi:role="line"
|
||||
x="489.56451"
|
||||
y="1038.8422">ImageContainer</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g
|
||||
id="g206-1"
|
||||
transform="translate(-0.04628921,28.701853)">
|
||||
<rect
|
||||
style="fill:#fcaf3e;fill-rule:evenodd;stroke:none;stroke-width:1.00033px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect290-6-7"
|
||||
width="26.295755"
|
||||
height="101.91849"
|
||||
x="1032.3469"
|
||||
y="377.92731"
|
||||
transform="matrix(0,1,1,0,0,0)" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
x="380.23166"
|
||||
y="1049.1219"
|
||||
id="text294-7-5"><tspan
|
||||
y="1049.1219"
|
||||
x="380.23166"
|
||||
sodipodi:role="line"
|
||||
id="tspan301-5-5"
|
||||
style="font-size:12px;line-height:0">OSBuild</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
|
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 23 KiB |
142
doc/conf.py
142
doc/conf.py
@ -18,12 +18,12 @@ import os
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
@ -31,207 +31,201 @@ import os
|
||||
extensions = []
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
source_suffix = ".rst"
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = "index"
|
||||
|
||||
# General information about the project.
|
||||
project = u'Pungi'
|
||||
copyright = u'2016, Red Hat, Inc.'
|
||||
project = "Pungi"
|
||||
copyright = "2016, Red Hat, Inc."
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '4.2'
|
||||
version = "4.7"
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '4.2.7'
|
||||
release = "4.7.0"
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
# language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
exclude_patterns = ["_build"]
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
# add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
pygments_style = "sphinx"
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
# modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
# keep_warnings = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
html_theme = "default"
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
# html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
# html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
# html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
# html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
html_static_path = ["_static"]
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
# html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
# html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
# html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
# html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
# html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
# html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
# html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
# html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'Pungidoc'
|
||||
htmlhelp_basename = "Pungidoc"
|
||||
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
('index', 'Pungi.tex', u'Pungi Documentation',
|
||||
u'Daniel Mach', 'manual'),
|
||||
("index", "Pungi.tex", "Pungi Documentation", "Daniel Mach", "manual"),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
# latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
# latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
# latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
# latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
# latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
# latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'pungi', u'Pungi Documentation',
|
||||
[u'Daniel Mach'], 1)
|
||||
]
|
||||
man_pages = [("index", "pungi", "Pungi Documentation", ["Daniel Mach"], 1)]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
# man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
@ -240,19 +234,25 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'Pungi', u'Pungi Documentation',
|
||||
u'Daniel Mach', 'Pungi', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
(
|
||||
"index",
|
||||
"Pungi",
|
||||
"Pungi Documentation",
|
||||
"Daniel Mach",
|
||||
"Pungi",
|
||||
"One line description of project.",
|
||||
"Miscellaneous",
|
||||
),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
# texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
# texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
# texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
# texinfo_no_detailmenu = False
|
||||
|
@ -182,6 +182,8 @@ Options
|
||||
Please note that when ``dnf`` is used, the build dependencies check is
|
||||
skipped. On Python 3, only ``dnf`` backend is available.
|
||||
|
||||
See also: the ``gather_backend`` setting for Pungi's gather phase.
|
||||
|
||||
**cts_url**
|
||||
(*str*) -- URL to Compose Tracking Service. If defined, Pungi will add
|
||||
the compose to Compose Tracking Service and ge the compose ID from it.
|
||||
@ -192,6 +194,17 @@ Options
|
||||
Tracking Service Kerberos authentication. If not defined, the default
|
||||
Kerberos principal is used.
|
||||
|
||||
**cts_oidc_token_url**
|
||||
(*str*) -- URL to the OIDC token endpoint.
|
||||
For example ``https://oidc.example.com/openid-connect/token``.
|
||||
This option can be overridden by the environment variable ``CTS_OIDC_TOKEN_URL``.
|
||||
|
||||
**cts_oidc_client_id*
|
||||
(*str*) -- OIDC client ID.
|
||||
This option can be overridden by the environment variable ``CTS_OIDC_CLIENT_ID``.
|
||||
Note that environment variable ``CTS_OIDC_CLIENT_SECRET`` must be configured with
|
||||
corresponding client secret to authenticate to CTS via OIDC.
|
||||
|
||||
**compose_type**
|
||||
(*str*) -- Allows to set default compose type. Type set via a command-line
|
||||
option overwrites this.
|
||||
@ -279,8 +292,8 @@ There a couple common format specifiers available for both the options:
|
||||
format string. The pattern should not overlap, otherwise it is undefined
|
||||
which one will be used.
|
||||
|
||||
This format will be used for all phases generating images. Currently that
|
||||
means ``createiso``, ``live_images`` and ``buildinstall``.
|
||||
This format will be used for some phases generating images. Currently that
|
||||
means ``createiso``, ``buildinstall`` and ``ostree_installer``.
|
||||
|
||||
Available extra keys are:
|
||||
* ``disc_num``
|
||||
@ -310,7 +323,6 @@ There a couple common format specifiers available for both the options:
|
||||
|
||||
Available keys are:
|
||||
* ``boot`` -- for ``boot.iso`` images created in *buildinstall* phase
|
||||
* ``live`` -- for images created by *live_images* phase
|
||||
* ``dvd`` -- for images created by *createiso* phase
|
||||
* ``ostree`` -- for ostree installer images
|
||||
|
||||
@ -338,48 +350,10 @@ Example
|
||||
|
||||
disc_types = {
|
||||
'boot': 'netinst',
|
||||
'live': 'Live',
|
||||
'dvd': 'DVD',
|
||||
}
|
||||
|
||||
|
||||
Signing
|
||||
=======
|
||||
|
||||
If you want to sign deliverables generated during pungi run like RPM wrapped
|
||||
images. You must provide few configuration options:
|
||||
|
||||
**signing_command** [optional]
|
||||
(*str*) -- Command that will be run with a koji build as a single
|
||||
argument. This command must not require any user interaction.
|
||||
If you need to pass a password for a signing key to the command,
|
||||
do this via command line option of the command and use string
|
||||
formatting syntax ``%(signing_key_password)s``.
|
||||
(See **signing_key_password_file**).
|
||||
|
||||
**signing_key_id** [optional]
|
||||
(*str*) -- ID of the key that will be used for the signing.
|
||||
This ID will be used when crafting koji paths to signed files
|
||||
(``kojipkgs.fedoraproject.org/packages/NAME/VER/REL/data/signed/KEYID/..``).
|
||||
|
||||
**signing_key_password_file** [optional]
|
||||
(*str*) -- Path to a file with password that will be formatted
|
||||
into **signing_command** string via ``%(signing_key_password)s``
|
||||
string format syntax (if used).
|
||||
Because pungi config is usually stored in git and is part of compose
|
||||
logs we don't want password to be included directly in the config.
|
||||
Note: If ``-`` string is used instead of a filename, then you will be asked
|
||||
for the password interactivelly right after pungi starts.
|
||||
|
||||
Example
|
||||
-------
|
||||
::
|
||||
|
||||
signing_command = '~/git/releng/scripts/sigulsign_unsigned.py -vv --password=%(signing_key_password)s fedora-24'
|
||||
signing_key_id = '81b46521'
|
||||
signing_key_password_file = '~/password_for_fedora-24_key'
|
||||
|
||||
|
||||
.. _git-urls:
|
||||
|
||||
Git URLs
|
||||
@ -457,6 +431,12 @@ Options
|
||||
cloned files should be split into subdirectories for each architecture of
|
||||
the variant.
|
||||
|
||||
**createrepo_enable_cache** = True
|
||||
(*bool*) -- whether to use ``--cachedir`` option of ``createrepo``. It will
|
||||
cache and reuse checksum vaules to speed up createrepo phase.
|
||||
The cache dir is located at ``/var/cache/pungi/createrepo_c/$release_short-$uid``
|
||||
e.g. /var/cache/pungi/createrepo_c/Fedora-1000
|
||||
|
||||
**product_id** = None
|
||||
(:ref:`scm_dict <scm_support>`) -- If specified, it should point to a
|
||||
directory with certificates ``*<variant_uid>-<arch>-*.pem``. Pungi will
|
||||
@ -573,6 +553,16 @@ Options
|
||||
with everything. Set this option to ``False`` to ignore ``noarch`` in
|
||||
``ExclusiveArch`` and always consider only binary architectures.
|
||||
|
||||
**pkgset_inherit_exclusive_arch_to_noarch** = True
|
||||
(*bool*) -- When set to ``True``, the value of ``ExclusiveArch`` or
|
||||
``ExcludeArch`` will be copied from source rpm to all its noarch packages.
|
||||
That will than limit which architectures the noarch packages can be
|
||||
included in.
|
||||
|
||||
By setting this option to ``False`` this step is skipped, and noarch
|
||||
packages will by default land in all architectures. They can still be
|
||||
excluded by listing them in a relevant section of ``filter_packages``.
|
||||
|
||||
**pkgset_allow_reuse** = True
|
||||
(*bool*) -- When set to ``True``, *Pungi* will try to reuse pkgset data
|
||||
from the old composes specified by ``--old-composes``. When enabled, this
|
||||
@ -581,6 +571,18 @@ Options
|
||||
(for example) between composes, then Pungi may not respect those changes
|
||||
in your new compose.
|
||||
|
||||
**signed_packages_retries** = 0
|
||||
(*int*) -- In automated workflows, you might start a compose before Koji
|
||||
has completely written all signed packages to disk. In this case you may
|
||||
want Pungi to wait for the package to appear in Koji's storage. This
|
||||
option controls how many times Pungi will retry looking for the signed
|
||||
copy.
|
||||
|
||||
**signed_packages_wait** = 30
|
||||
(*int*) -- Interval in seconds for how long to wait between attempts to
|
||||
find signed packages. This option only makes sense when
|
||||
``signed_packages_retries`` is set higher than 0.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
@ -601,7 +603,7 @@ Options
|
||||
-------
|
||||
|
||||
**buildinstall_method**
|
||||
(*str*) -- "lorax" (f16+, rhel7+) or "buildinstall" (older releases)
|
||||
(*str*) -- "lorax" (f16+, rhel7+)
|
||||
**lorax_options**
|
||||
(*list*) -- special options passed on to *lorax*.
|
||||
|
||||
@ -652,6 +654,11 @@ Options
|
||||
**buildinstall_allow_reuse** = False
|
||||
(*bool*) -- When set to ``True``, *Pungi* will try to reuse buildinstall
|
||||
results from old compose specified by ``--old-composes``.
|
||||
**buildinstall_packages**
|
||||
(list) – Additional packages to be installed in the runroot environment
|
||||
where lorax will run to create installer. Format: ``[(variant_uid_regex,
|
||||
{arch|*: [package_globs]})]``.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
@ -686,6 +693,13 @@ Example
|
||||
})
|
||||
]
|
||||
|
||||
# Additional packages to be installed in the Koji runroot environment where
|
||||
# lorax will run.
|
||||
buildinstall_packages = [
|
||||
('^Simple$', {
|
||||
'*': ['dummy-package'],
|
||||
})
|
||||
]
|
||||
|
||||
.. note::
|
||||
|
||||
@ -728,7 +742,7 @@ Options
|
||||
(*bool*) -- When set to ``True``, *Pungi* will try to reuse gather results
|
||||
from old compose specified by ``--old-composes``.
|
||||
|
||||
**greedy_method**
|
||||
**greedy_method** = none
|
||||
(*str*) -- This option controls how package requirements are satisfied in
|
||||
case a particular ``Requires`` has multiple candidates.
|
||||
|
||||
@ -749,7 +763,7 @@ Options
|
||||
pulled in.
|
||||
* With ``greedy_method = "all"`` all three packages will be
|
||||
pulled in.
|
||||
* With ``greedy_method = "build" ``pkg-b-provider-1`` and
|
||||
* With ``greedy_method = "build"`` ``pkg-b-provider-1`` and
|
||||
``pkg-b-provider-2`` will be pulled in.
|
||||
|
||||
**gather_backend**
|
||||
@ -763,6 +777,9 @@ Options
|
||||
``python-multilib`` library. Please refer to ``multilib`` option to see the
|
||||
differences.
|
||||
|
||||
See also: the ``repoclosure_backend`` setting for Pungi's repoclosure
|
||||
phase.
|
||||
|
||||
**multilib**
|
||||
(*list*) -- mapping of variant regexes and arches to list of multilib
|
||||
methods
|
||||
@ -787,8 +804,14 @@ Options
|
||||
(*list*) -- additional packages to be included in a variant and
|
||||
architecture; format: ``[(variant_uid_regex, {arch|*: [package_globs]})]``
|
||||
|
||||
In contrast to the ``comps_file`` setting, the ``additional_packages``
|
||||
setting merely adds the list of packages to the compose. When a package
|
||||
is in a comps group, it is visible to users via ``dnf groupinstall`` and
|
||||
Anaconda's Groups selection, but ``additional_packages`` does not affect
|
||||
DNF groups.
|
||||
|
||||
The packages specified here are matched against RPM names, not any other
|
||||
provides in the package not the name of source package. Shell globbing is
|
||||
provides in the package nor the name of source package. Shell globbing is
|
||||
used, so wildcards are possible. The package can be specified as name only
|
||||
or ``name.arch``.
|
||||
|
||||
@ -797,6 +820,21 @@ Options
|
||||
it. If you add a debuginfo package that does not have anything else from
|
||||
the same build included in the compose, the sources will not be pulled in.
|
||||
|
||||
If you list a package in ``additional_packages`` but Pungi cannot find
|
||||
it (for example, it's not available in the Koji tag), Pungi will log a
|
||||
warning in the "work" or "logs" directories and continue without aborting.
|
||||
|
||||
*Example*: This configuration will add all packages in a Koji tag to an
|
||||
"Everything" variant::
|
||||
|
||||
additional_packages = [
|
||||
('^Everything$', {
|
||||
'*': [
|
||||
'*',
|
||||
],
|
||||
})
|
||||
]
|
||||
|
||||
**filter_packages**
|
||||
(*list*) -- packages to be excluded from a variant and architecture;
|
||||
format: ``[(variant_uid_regex, {arch|*: [package_globs]})]``
|
||||
@ -864,10 +902,15 @@ Options
|
||||
comps file can not be found in the package set. When disabled (the
|
||||
default), such cases are still reported as warnings in the log.
|
||||
|
||||
With ``dnf`` gather backend, this option will abort the compose on any
|
||||
missing package no matter if it's listed in comps, ``additional_packages``
|
||||
or prepopulate file.
|
||||
|
||||
**gather_source_mapping**
|
||||
(*str*) -- JSON mapping with initial packages for the compose. The value
|
||||
should be a path to JSON file with following mapping: ``{variant: {arch:
|
||||
{rpm_name: [rpm_arch|None]}}}``.
|
||||
{rpm_name: [rpm_arch|None]}}}``. Relative paths are interpreted relative to
|
||||
the location of main config file.
|
||||
|
||||
**gather_profiler** = False
|
||||
(*bool*) -- When set to ``True`` the gather tool will produce additional
|
||||
@ -1201,7 +1244,7 @@ Options
|
||||
|
||||
Format: ``[(variant_uid_regex, {arch|*: bool})]``
|
||||
|
||||
**create_jigdo** = True
|
||||
**create_jigdo** = False
|
||||
(*bool*) -- controls the creation of jigdo from ISO
|
||||
|
||||
**create_optional_isos** = False
|
||||
@ -1228,6 +1271,11 @@ Options
|
||||
meaning size in bytes, or it can be a string with ``k``, ``M``, ``G``
|
||||
suffix (using multiples of 1024).
|
||||
|
||||
**iso_level**
|
||||
(*int|list*) [optional] -- Set the ISO9660 conformance level. This is
|
||||
either a global single value (a number from 1 to 4), or a variant/arch
|
||||
mapping.
|
||||
|
||||
**split_iso_reserve** = 10MiB
|
||||
(*int|str*) -- how much free space should be left on each disk. The format
|
||||
is the same as for ``iso_size`` option.
|
||||
@ -1281,8 +1329,8 @@ All non-``RC`` milestones from label get appended to the version. For release
|
||||
either label is used or date, type and respin.
|
||||
|
||||
|
||||
Common options for Live Images, Live Media and Image Build
|
||||
==========================================================
|
||||
Common options for Live Media and Image Build
|
||||
=============================================
|
||||
|
||||
All images can have ``ksurl``, ``version``, ``release`` and ``target``
|
||||
specified. Since this can create a lot of duplication, there are global options
|
||||
@ -1298,14 +1346,12 @@ The kickstart URL is configured by these options.
|
||||
* ``global_ksurl`` -- global fallback setting
|
||||
* ``live_media_ksurl``
|
||||
* ``image_build_ksurl``
|
||||
* ``live_images_ksurl``
|
||||
|
||||
Target is specified by these settings.
|
||||
|
||||
* ``global_target`` -- global fallback setting
|
||||
* ``live_media_target``
|
||||
* ``image_build_target``
|
||||
* ``live_images_target``
|
||||
* ``osbuild_target``
|
||||
|
||||
Version is specified by these options. If no version is set, a default value
|
||||
@ -1314,7 +1360,6 @@ will be provided according to :ref:`automatic versioning <auto-version>`.
|
||||
* ``global_version`` -- global fallback setting
|
||||
* ``live_media_version``
|
||||
* ``image_build_version``
|
||||
* ``live_images_version``
|
||||
* ``osbuild_version``
|
||||
|
||||
Release is specified by these options. If set to a magic value to
|
||||
@ -1324,44 +1369,14 @@ to :ref:`automatic versioning <auto-version>`.
|
||||
* ``global_release`` -- global fallback setting
|
||||
* ``live_media_release``
|
||||
* ``image_build_release``
|
||||
* ``live_images_release``
|
||||
* ``osbuild_release``
|
||||
|
||||
Each configuration block can also optionally specify a ``failable`` key. For
|
||||
live images it should have a boolean value. For live media and image build it
|
||||
Each configuration block can also optionally specify a ``failable`` key. It
|
||||
should be a list of strings containing architectures that are optional. If any
|
||||
deliverable fails on an optional architecture, it will not abort the whole
|
||||
compose. If the list contains only ``"*"``, all arches will be substituted.
|
||||
|
||||
|
||||
Live Images Settings
|
||||
====================
|
||||
|
||||
**live_images**
|
||||
(*list*) -- Configuration for the particular image. The elements of the
|
||||
list should be tuples ``(variant_uid_regex, {arch|*: config})``. The config
|
||||
should be a dict with these keys:
|
||||
|
||||
* ``kickstart`` (*str*)
|
||||
* ``ksurl`` (*str*) [optional] -- where to get the kickstart from
|
||||
* ``name`` (*str*)
|
||||
* ``version`` (*str*)
|
||||
* ``target`` (*str*)
|
||||
* ``repo`` (*str|[str]*) -- repos specified by URL or variant UID
|
||||
* ``specfile`` (*str*) -- for images wrapped in RPM
|
||||
* ``scratch`` (*bool*) -- only RPM-wrapped images can use scratch builds,
|
||||
but by default this is turned off
|
||||
* ``type`` (*str*) -- what kind of task to start in Koji. Defaults to
|
||||
``live`` meaning ``koji spin-livecd`` will be used. Alternative option
|
||||
is ``appliance`` corresponding to ``koji spin-appliance``.
|
||||
* ``sign`` (*bool*) -- only RPM-wrapped images can be signed
|
||||
|
||||
**live_images_no_rename**
|
||||
(*bool*) -- When set to ``True``, filenames generated by Koji will be used.
|
||||
When ``False``, filenames will be generated based on ``image_name_format``
|
||||
configuration option.
|
||||
|
||||
|
||||
Live Media Settings
|
||||
===================
|
||||
|
||||
@ -1391,6 +1406,7 @@ Live Media Settings
|
||||
* ``repo`` (*str|[str]*) -- repos specified by URL or variant UID
|
||||
* ``title`` (*str*)
|
||||
* ``install_tree_from`` (*str*) -- variant to take install tree from
|
||||
* ``nomacboot`` (*bool*)
|
||||
|
||||
|
||||
Image Build Settings
|
||||
@ -1516,6 +1532,61 @@ Example
|
||||
}
|
||||
|
||||
|
||||
KiwiBuild Settings
|
||||
==================
|
||||
|
||||
**kiwibuild**
|
||||
(*dict*) -- configuration for building images using kiwi by a Koji plugin.
|
||||
Pungi will trigger a Koji task delegating to kiwi, which will build the image,
|
||||
import it to Koji via content generators.
|
||||
|
||||
Format: ``{variant_uid_regex: [{...}]}``.
|
||||
|
||||
Required keys in the configuration dict:
|
||||
|
||||
* ``kiwi_profile`` -- (*str*) select profile from description file.
|
||||
|
||||
Description scm, description path and target have to be provided too, but
|
||||
instead of specifying them for each image separately, you can use the
|
||||
``kiwibuild_*`` options or ``global_target``.
|
||||
|
||||
Optional keys:
|
||||
|
||||
* ``description_scm`` -- (*str*) scm URL of description kiwi description.
|
||||
* ``description_path`` -- (*str*) path to kiwi description inside the scm
|
||||
repo.
|
||||
* ``repos`` -- additional repos used to install RPMs in the image. The
|
||||
compose repository for the enclosing variant is added automatically.
|
||||
Either variant name or a URL is supported.
|
||||
* ``target`` -- (*str*) which build target to use for the task. If not
|
||||
provided, then either ``kiwibuild_target`` or ``global_target`` is
|
||||
needed.
|
||||
* ``release`` -- (*str*) release of the output image.
|
||||
* ``arches`` -- (*[str]*) List of architectures to build for. If not
|
||||
provided, all variant architectures will be built.
|
||||
* ``failable`` -- (*[str]*) List of architectures for which this
|
||||
deliverable is not release blocking.
|
||||
* ``type`` -- (*str*) override default type from the bundle with this value.
|
||||
* ``type_attr`` -- (*[str]*) override default attributes for the build type
|
||||
from description.
|
||||
* ``bundle_name_format`` -- (*str*) override default bundle format name.
|
||||
|
||||
**kiwibuild_description_scm**
|
||||
(*str*) -- URL for scm containing the description files
|
||||
|
||||
**kiwibuild_description_path**
|
||||
(*str*) -- path to a description file within the description scm
|
||||
|
||||
**kiwibuild_type**
|
||||
(*str*) -- override default type from the bundle with this value.
|
||||
|
||||
**kiwibuild_type_attr**
|
||||
(*[str]*) -- override default attributes for the build type from description.
|
||||
|
||||
**kiwibuild_bundle_name_format**
|
||||
(*str*) -- override default bundle format name.
|
||||
|
||||
|
||||
OSBuild Composer for building images
|
||||
====================================
|
||||
|
||||
@ -1531,7 +1602,9 @@ OSBuild Composer for building images
|
||||
|
||||
* ``name`` -- name of the Koji package
|
||||
* ``distro`` -- image for which distribution should be build TODO examples
|
||||
* ``image_type`` -- a list of image types to build (e.g. ``qcow2``)
|
||||
* ``image_types`` -- a list with a single image type string or just a
|
||||
string representing the image type to build (e.g. ``qcow2``). In any
|
||||
case, only a single image type can be provided as an argument.
|
||||
|
||||
Optional keys:
|
||||
|
||||
@ -1542,11 +1615,76 @@ OSBuild Composer for building images
|
||||
* ``release`` -- release part of the final NVR. If neither this option nor
|
||||
the global ``osbuild_release`` is set, Koji will automatically generate a
|
||||
value.
|
||||
* ``repo`` -- a list of repository URLs from which to consume packages for
|
||||
* ``repo`` -- a list of repositories from which to consume packages for
|
||||
building the image. By default only the variant repository is used.
|
||||
The list items may use one of the following formats:
|
||||
|
||||
* String with just the repository URL.
|
||||
|
||||
* Dictionary with the following keys:
|
||||
|
||||
* ``baseurl`` -- URL of the repository.
|
||||
* ``package_sets`` -- a list of package set names to use for this
|
||||
repository. Package sets are an internal concept of Image Builder
|
||||
and are used in image definitions. If specified, the repository is
|
||||
used by Image Builder only for the pipeline with the same name.
|
||||
For example, specifying the ``build`` package set name will make
|
||||
the repository to be used only for the build environment in which
|
||||
the image will be built. (optional)
|
||||
|
||||
* ``arches`` -- list of architectures for which to build the image. By
|
||||
default, the variant arches are used. This option can only restrict it,
|
||||
not add a new one.
|
||||
* ``manifest_type`` -- the image type that is put into the manifest by
|
||||
pungi. If not supplied then it is autodetected from the Koji output.
|
||||
* ``ostree_url`` -- URL of the repository that's used to fetch the parent
|
||||
commit from.
|
||||
* ``ostree_ref`` -- name of the ostree branch
|
||||
* ``ostree_parent`` -- commit hash or a a branch-like reference to the
|
||||
parent commit.
|
||||
* ``customizations`` -- a dictionary with customizations to use for the
|
||||
image build. For the list of supported customizations, see the **hosted**
|
||||
variants in the `Image Builder documentation
|
||||
<https://osbuild.org/docs/user-guide/blueprint-reference#installation-device>`.
|
||||
* ``upload_options`` -- a dictionary with upload options specific to the
|
||||
target cloud environment. If provided, the image will be uploaded to the
|
||||
cloud environment, in addition to the Koji server. One can't combine
|
||||
arbitrary image types with arbitrary upload options.
|
||||
The dictionary keys differ based on the target cloud environment. The
|
||||
following keys are supported:
|
||||
|
||||
* **AWS EC2 upload options** -- upload to Amazon Web Services.
|
||||
|
||||
* ``region`` -- AWS region to upload the image to
|
||||
* ``share_with_accounts`` -- list of AWS account IDs to share the image
|
||||
with
|
||||
* ``snapshot_name`` -- Snapshot name of the uploaded EC2 image
|
||||
(optional)
|
||||
|
||||
* **AWS S3 upload options** -- upload to Amazon Web Services S3.
|
||||
|
||||
* ``region`` -- AWS region to upload the image to
|
||||
|
||||
* **Azure upload options** -- upload to Microsoft Azure.
|
||||
|
||||
* ``tenant_id`` -- Azure tenant ID to upload the image to
|
||||
* ``subscription_id`` -- Azure subscription ID to upload the image to
|
||||
* ``resource_group`` -- Azure resource group to upload the image to
|
||||
* ``location`` -- Azure location of the resource group (optional)
|
||||
* ``image_name`` -- Image name of the uploaded Azure image (optional)
|
||||
|
||||
* **GCP upload options** -- upload to Google Cloud Platform.
|
||||
|
||||
* ``region`` -- GCP region to upload the image to
|
||||
* ``bucket`` -- GCP bucket to upload the image to (optional)
|
||||
* ``share_with_accounts`` -- list of GCP accounts to share the image
|
||||
with
|
||||
* ``image_name`` -- Image name of the uploaded GCP image (optional)
|
||||
|
||||
* **Container upload options** -- upload to a container registry.
|
||||
|
||||
* ``name`` -- name of the container image (optional)
|
||||
* ``tag`` -- container tag to upload the image to (optional)
|
||||
|
||||
.. note::
|
||||
There is initial support for having this task as failable without aborting
|
||||
@ -1555,6 +1693,56 @@ OSBuild Composer for building images
|
||||
arch.
|
||||
|
||||
|
||||
Image container
|
||||
===============
|
||||
|
||||
This phase supports building containers in OSBS that embed an image created in
|
||||
the same compose. This can be useful for delivering the image to users running
|
||||
in containerized environments.
|
||||
|
||||
Pungi will start a ``buildContainer`` task in Koji with configured source
|
||||
repository. The ``Dockerfile`` can expect that a repo file will be injected
|
||||
into the container that defines a repo named ``image-to-include``, and its
|
||||
``baseurl`` will point to the image to include. It is possible to extract the
|
||||
URL with a command like ``dnf config-manager --dump image-to-include | awk
|
||||
'/baseurl =/{print $3}'```
|
||||
|
||||
**image_container**
|
||||
(*dict*) -- configuration for building containers embedding an image.
|
||||
|
||||
Format: ``{variant_uid_regex: [{...}]}``.
|
||||
|
||||
The inner object will define a single container. These keys are required:
|
||||
|
||||
* ``url``, ``target``, ``git_branch``. See OSBS section for definition of
|
||||
these.
|
||||
* ``image_spec`` -- (*object*) A string mapping of filters used to select
|
||||
the image to embed. All images listed in metadata for the variant will be
|
||||
processed. The keys of this filter are used to select metadata fields for
|
||||
the image, and values are regular expression that need to match the
|
||||
metadata value.
|
||||
|
||||
The filter should match exactly one image.
|
||||
|
||||
|
||||
Example config
|
||||
--------------
|
||||
::
|
||||
|
||||
image_container = {
|
||||
"^Server$": [{
|
||||
"url": "git://example.com/dockerfiles.git?#HEAD",
|
||||
"target": "f24-container-candidate",
|
||||
"git_branch": "f24",
|
||||
"image_spec": {
|
||||
"format": "qcow2",
|
||||
"arch": "x86_64",
|
||||
"path": ".*/guest-image-.*$",
|
||||
}
|
||||
}]
|
||||
}
|
||||
|
||||
|
||||
OSTree Settings
|
||||
===============
|
||||
|
||||
@ -1565,16 +1753,16 @@ another directory. Any new packages in the compose will be added to the
|
||||
repository with a new commit.
|
||||
|
||||
**ostree**
|
||||
(*dict*) -- a mapping of configuration for each. The format should be
|
||||
``{variant_uid_regex: config_dict}``. It is possible to use a list of
|
||||
(*dict*) -- a mapping of configuration for each variant. The format should
|
||||
be ``{variant_uid_regex: config_dict}``. It is possible to use a list of
|
||||
configuration dicts as well.
|
||||
|
||||
The configuration dict for each variant arch pair must have these keys:
|
||||
|
||||
* ``treefile`` -- (*str*) Filename of configuration for ``rpm-ostree``.
|
||||
* ``config_url`` -- (*str*) URL for Git repository with the ``treefile``.
|
||||
* ``repo`` -- (*str|dict|[str|dict]*) repos specified by URL or variant UID
|
||||
or a dict of repo options, ``baseurl`` is required in the dict.
|
||||
* ``repo`` -- (*str|dict|[str|dict]*) repos specified by URL or a dict of
|
||||
repo options, ``baseurl`` is required in the dict.
|
||||
* ``ostree_repo`` -- (*str*) Where to put the ostree repository
|
||||
|
||||
These keys are optional:
|
||||
@ -1594,6 +1782,8 @@ repository with a new commit.
|
||||
* ``force_new_commit`` -- (*bool*) Do not use rpm-ostree's built-in change
|
||||
detection.
|
||||
Defaults to ``False``.
|
||||
* ``unified_core`` -- (*bool*) Use rpm-ostree in unified core mode for composes.
|
||||
Defaults to ``False``.
|
||||
* ``version`` -- (*str*) Version string to be added as versioning metadata.
|
||||
If this option is set to ``!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN``,
|
||||
a value will be generated automatically as ``$VERSION.$RELEASE``.
|
||||
@ -1603,6 +1793,8 @@ repository with a new commit.
|
||||
* ``tag_ref`` -- (*bool*, default ``True``) If set to ``False``, a git
|
||||
reference will not be created.
|
||||
* ``ostree_ref`` -- (*str*) To override value ``ref`` from ``treefile``.
|
||||
* ``runroot_packages`` -- (*list*) A list of additional package names to be
|
||||
installed in the runroot environment in Koji.
|
||||
|
||||
Example config
|
||||
--------------
|
||||
@ -1612,13 +1804,11 @@ Example config
|
||||
"^Atomic$": {
|
||||
"treefile": "fedora-atomic-docker-host.json",
|
||||
"config_url": "https://git.fedorahosted.org/git/fedora-atomic.git",
|
||||
"keep_original_sources": True,
|
||||
"repo": [
|
||||
"Server",
|
||||
"http://example.com/repo/x86_64/os",
|
||||
{"baseurl": "Everything"},
|
||||
{"baseurl": "http://example.com/linux/repo", "exclude": "systemd-container"},
|
||||
],
|
||||
"keep_original_sources": True,
|
||||
"ostree_repo": "/mnt/koji/compose/atomic/Rawhide/",
|
||||
"update_summary": True,
|
||||
# Automatically generate a reasonable version
|
||||
@ -1634,6 +1824,79 @@ Example config
|
||||
has the pungi_ostree plugin installed.
|
||||
|
||||
|
||||
OSTree Native Container Settings
|
||||
================================
|
||||
|
||||
The ``ostree_container`` phase of *Pungi* can create an ostree native container
|
||||
image as an OCI archive. This is done by running ``rpm-ostree compose image``
|
||||
in a Koji runroot environment.
|
||||
|
||||
While rpm-ostree can use information from previously built images to improve
|
||||
the split in container layers, we can not use that functionnality until
|
||||
https://github.com/containers/skopeo/pull/2114 is resolved. Each invocation
|
||||
will thus create a new OCI archive image *from scratch*.
|
||||
|
||||
**ostree_container**
|
||||
(*dict*) -- a mapping of configuration for each variant. The format should
|
||||
be ``{variant_uid_regex: config_dict}``. It is possible to use a list of
|
||||
configuration dicts as well.
|
||||
|
||||
The configuration dict for each variant arch pair must have these keys:
|
||||
|
||||
* ``treefile`` -- (*str*) Filename of configuration for ``rpm-ostree``.
|
||||
* ``config_url`` -- (*str*) URL for Git repository with the ``treefile``.
|
||||
|
||||
These keys are optional:
|
||||
|
||||
* ``repo`` -- (*str|dict|[str|dict]*) repos specified by URL or a dict of
|
||||
repo options, ``baseurl`` is required in the dict.
|
||||
* ``keep_original_sources`` -- (*bool*) Keep the existing source repos in
|
||||
the tree config file. If not enabled, all the original source repos will
|
||||
be removed from the tree config file.
|
||||
* ``config_branch`` -- (*str*) Git branch of the repo to use. Defaults to
|
||||
``main``.
|
||||
* ``arches`` -- (*[str]*) List of architectures for which to generate
|
||||
ostree native container images. There will be one task per architecture.
|
||||
By default all architectures in the variant are used.
|
||||
* ``failable`` -- (*[str]*) List of architectures for which this
|
||||
deliverable is not release blocking.
|
||||
* ``version`` -- (*str*) Version string to be added to the OCI archive name.
|
||||
If this option is set to ``!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN``,
|
||||
a value will be generated automatically as ``$VERSION.$RELEASE``.
|
||||
If this option is set to ``!VERSION_FROM_VERSION_DATE_RESPIN``,
|
||||
a value will be generated automatically as ``$VERSION.$DATE.$RESPIN``.
|
||||
:ref:`See how those values are created <auto-version>`.
|
||||
* ``tag_ref`` -- (*bool*, default ``True``) If set to ``False``, a git
|
||||
reference will not be created.
|
||||
* ``runroot_packages`` -- (*list*) A list of additional package names to be
|
||||
installed in the runroot environment in Koji.
|
||||
|
||||
Example config
|
||||
--------------
|
||||
::
|
||||
|
||||
ostree_container = {
|
||||
"^Sagano$": {
|
||||
"treefile": "fedora-tier-0-38.yaml",
|
||||
"config_url": "https://gitlab.com/CentOS/cloud/sagano.git",
|
||||
"config_branch": "main",
|
||||
"repo": [
|
||||
"http://example.com/repo/x86_64/os",
|
||||
{"baseurl": "http://example.com/linux/repo", "exclude": "systemd-container"},
|
||||
],
|
||||
# Automatically generate a reasonable version
|
||||
"version": "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN",
|
||||
# Only run this for x86_64 even if Sagano has more arches
|
||||
"arches": ["x86_64"],
|
||||
}
|
||||
}
|
||||
|
||||
**ostree_container_use_koji_plugin** = False
|
||||
(*bool*) -- When set to ``True``, the Koji pungi_ostree task will be
|
||||
used to execute rpm-ostree instead of runroot. Use only if the Koji instance
|
||||
has the pungi_ostree plugin installed.
|
||||
|
||||
|
||||
Ostree Installer Settings
|
||||
=========================
|
||||
|
||||
@ -1675,6 +1938,8 @@ an OSTree repository. This always runs in Koji as a ``runroot`` task.
|
||||
with the optional key:
|
||||
|
||||
* ``extra_runroot_pkgs`` -- (*[str]*)
|
||||
* ``skip_branding`` -- (*bool*) Stops lorax to install packages with branding.
|
||||
Defaults to ``False``.
|
||||
|
||||
**ostree_installer_overwrite** = False
|
||||
(*bool*) -- by default if a variant including OSTree installer also creates
|
||||
@ -1754,24 +2019,34 @@ they are not scratch builds).
|
||||
to create the image will not abort the whole compose.
|
||||
|
||||
The configuration will pass other attributes directly to the Koji task.
|
||||
This includes ``scratch`` and ``priority``.
|
||||
This includes ``scratch`` and ``priority``. See ``koji list-api
|
||||
buildContainer`` for more details about these options.
|
||||
|
||||
A value for ``yum_repourls`` will be created automatically and point at a
|
||||
repository in the current compose. You can add extra repositories with
|
||||
``repo`` key having a list of urls pointing to ``.repo`` files or just
|
||||
variant uid, Pungi will create the .repo file for that variant. ``gpgkey``
|
||||
can be specified to enable gpgcheck in repo files for variants.
|
||||
variant uid, Pungi will create the .repo file for that variant. If
|
||||
specific URL is used in the ``repo``, the ``$COMPOSE_ID`` variable in
|
||||
the ``repo`` string will be replaced with the real compose ID.
|
||||
``gpgkey`` can be specified to enable gpgcheck in repo files for variants.
|
||||
|
||||
**osbs_registries**
|
||||
(*dict*) -- It is possible to configure extra information about where to
|
||||
push the image (unless it is a scratch build). For each finished build,
|
||||
Pungi will try to match NVR against a key in this mapping (using shell-style
|
||||
globbing) and take the corresponding value and collect them across all built
|
||||
images. The data will be saved into ``logs/global/osbs-registries.json`` as
|
||||
a mapping from Koji NVR to the registry data. The same data is also sent to
|
||||
the message bus on ``osbs-request-push`` topic once the compose finishes
|
||||
successfully. Handling the message and performing the actual push is outside
|
||||
of scope for Pungi.
|
||||
(*dict*) -- Use this optional setting to emit ``osbs-request-push``
|
||||
messages for each non-scratch container build. These messages can guide
|
||||
other tools how to push the images to other registries. For example, an
|
||||
external tool might trigger on these messages and copy the images from
|
||||
OSBS's registry to a staging or production registry.
|
||||
|
||||
For each completed container build, Pungi will try to match the NVR against
|
||||
a key in ``osbs_registries`` mapping (using shell-style globbing) and take
|
||||
the corresponding value and collect them across all built images. Pungi
|
||||
will save this data into ``logs/global/osbs-registries.json``, mapping each
|
||||
Koji NVR to the registry data. Pungi will also send this data to the
|
||||
message bus on the ``osbs-request-push`` topic once the compose finishes
|
||||
successfully.
|
||||
|
||||
Pungi simply logs the mapped data and emits the messages. It does not
|
||||
handle the messages or push images. A separate tool must do that.
|
||||
|
||||
|
||||
Example config
|
||||
@ -1972,9 +2247,9 @@ Miscellaneous Settings
|
||||
format string accepting ``%(variant_name)s`` and ``%(arch)s`` placeholders.
|
||||
|
||||
**symlink_isos_to**
|
||||
(*str*) -- If set, the ISO files from ``buildinstall``, ``createiso`` and
|
||||
``live_images`` phases will be put into this destination, and a symlink
|
||||
pointing to this location will be created in actual compose directory.
|
||||
(*str*) -- If set, the ISO files from ``buildinstall`` and ``createiso``
|
||||
phases will be put into this destination, and a symlink pointing to this
|
||||
location will be created in actual compose directory.
|
||||
|
||||
**dogpile_cache_backend**
|
||||
(*str*) -- If set, Pungi will use the configured Dogpile cache backend to
|
||||
|
@ -30,9 +30,17 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
|
||||
module_defaults_dir = {
|
||||
'scm': 'git',
|
||||
'repo': 'https://pagure.io/releng/fedora-module-defaults.git',
|
||||
'branch': 'master',
|
||||
'branch': 'main',
|
||||
'dir': '.'
|
||||
}
|
||||
# Optional module obsoletes configuration which is merged
|
||||
# into the module index and gets resolved
|
||||
module_obsoletes_dir = {
|
||||
'scm': 'git',
|
||||
'repo': 'https://pagure.io/releng/fedora-module-defaults.git',
|
||||
'branch': 'main',
|
||||
'dir': 'obsoletes'
|
||||
}
|
||||
|
||||
variants_file='variants-fedora.xml'
|
||||
sigkeys = ['12C944D0']
|
||||
@ -83,7 +91,6 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
|
||||
|
||||
# CREATEISO
|
||||
iso_hfs_ppc64le_compatible = False
|
||||
create_jigdo = False
|
||||
|
||||
# BUILDINSTALL
|
||||
buildinstall_method = 'lorax'
|
||||
@ -287,30 +294,6 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
|
||||
})
|
||||
]
|
||||
|
||||
live_target = 'f32'
|
||||
live_images_no_rename = True
|
||||
live_images = [
|
||||
('^Workstation$', {
|
||||
'armhfp': {
|
||||
'kickstart': 'fedora-arm-workstation.ks',
|
||||
'name': 'Fedora-Workstation-armhfp',
|
||||
# Again workstation takes packages from Everything.
|
||||
'repo': 'Everything',
|
||||
'type': 'appliance',
|
||||
'failable': True,
|
||||
}
|
||||
}),
|
||||
('^Server$', {
|
||||
# But Server has its own repo.
|
||||
'armhfp': {
|
||||
'kickstart': 'fedora-arm-server.ks',
|
||||
'name': 'Fedora-Server-armhfp',
|
||||
'type': 'appliance',
|
||||
'failable': True,
|
||||
}
|
||||
}),
|
||||
]
|
||||
|
||||
ostree = {
|
||||
"^Silverblue$": {
|
||||
"version": "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN",
|
||||
@ -325,6 +308,8 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
|
||||
"tag_ref": False,
|
||||
# Don't use change detection in ostree.
|
||||
"force_new_commit": True,
|
||||
# Use unified core mode for rpm-ostree composes
|
||||
"unified_core": True,
|
||||
# This is the location for the repo where new commit will be
|
||||
# created. Note that this is outside of the compose dir.
|
||||
"ostree_repo": "/mnt/koji/compose/ostree/repo/",
|
||||
@ -334,6 +319,20 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
|
||||
}
|
||||
}
|
||||
|
||||
ostree_container = {
|
||||
"^Sagano$": {
|
||||
"treefile": "fedora-tier-0-38.yaml",
|
||||
"config_url": "https://gitlab.com/CentOS/cloud/sagano.git",
|
||||
"config_branch": "main",
|
||||
# Consume packages from Everything
|
||||
"repo": "Everything",
|
||||
# Automatically generate a reasonable version
|
||||
"version": "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN",
|
||||
# Only run this for x86_64 even if Sagano has more arches
|
||||
"arches": ["x86_64"],
|
||||
}
|
||||
}
|
||||
|
||||
ostree_installer = [
|
||||
("^Silverblue$", {
|
||||
"x86_64": {
|
||||
|
@ -19,7 +19,7 @@ Contents:
|
||||
scm_support
|
||||
messaging
|
||||
gathering
|
||||
koji
|
||||
comps
|
||||
contributing
|
||||
testing
|
||||
multi_compose
|
||||
|
105
doc/koji.rst
Normal file
105
doc/koji.rst
Normal file
@ -0,0 +1,105 @@
|
||||
======================
|
||||
Getting data from koji
|
||||
======================
|
||||
|
||||
When Pungi is configured to get packages from a Koji tag, it somehow needs to
|
||||
access the actual RPM files.
|
||||
|
||||
Historically, this required the storage used by Koji to be directly available
|
||||
on the host where Pungi was running. This was usually achieved by using NFS for
|
||||
the Koji volume, and mounting it on the compose host.
|
||||
|
||||
The compose could be created directly on the same volume. In such case the
|
||||
packages would be hardlinked, significantly reducing space consumption.
|
||||
|
||||
The compose could also be created on a different storage, in which case the
|
||||
packages would either need to be copied over or symlinked. Using symlinks
|
||||
requires that anything that accesses the compose (e.g. a download server) would
|
||||
also need to mount the Koji volume in the same location.
|
||||
|
||||
There is also a risk with symlinks that the package in Koji can change (due to
|
||||
being resigned for example), which would invalidate composes linking to it.
|
||||
|
||||
|
||||
Using Koji without direct mount
|
||||
===============================
|
||||
|
||||
It is possible now to run a compose from a Koji tag without direct access to
|
||||
Koji storage.
|
||||
|
||||
Pungi can download the packages over HTTP protocol, store them in a local
|
||||
cache, and consume them from there.
|
||||
|
||||
The local cache has similar structure to what is on the Koji volume.
|
||||
|
||||
When Pungi needs some package, it has a path on Koji volume. It will replace
|
||||
the ``topdir`` with the cache location. If such file exists, it will be used.
|
||||
If it doesn't exist, it will be downloaded from Koji (by replacing the
|
||||
``topdir`` with ``topurl``).
|
||||
|
||||
::
|
||||
|
||||
Koji path /mnt/koji/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
|
||||
Koji URL https://kojipkgs.fedoraproject.org/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
|
||||
Local path /mnt/compose/cache/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
|
||||
|
||||
The packages can be hardlinked from this cache directory.
|
||||
|
||||
|
||||
Cleanup
|
||||
-------
|
||||
|
||||
While the approach above allows each RPM to be downloaded only once, it will
|
||||
eventually result in the Koji volume being mirrored locally. Most of the
|
||||
packages will however no longer be needed.
|
||||
|
||||
There is a script ``pungi-cache-cleanup`` that can help with that. It can find
|
||||
and remove files from the cache that are no longer needed.
|
||||
|
||||
A file is no longer needed if it has a single link (meaning it is only in the
|
||||
cache, not in any compose), and it has mtime older than a given threshold.
|
||||
|
||||
It doesn't make sense to delete files that are hardlinked in an existing
|
||||
compose as it would not save any space anyway.
|
||||
|
||||
The mtime check is meant to preserve files that are downloaded but not actually
|
||||
used in a compose, like a subpackage that is not included in any variant. Every
|
||||
time its existence in the local cache is checked, the mtime is updated.
|
||||
|
||||
|
||||
Race conditions?
|
||||
----------------
|
||||
|
||||
It should be safe to have multiple compose hosts share the same storage volume
|
||||
for generated composes and local cache.
|
||||
|
||||
If a cache file is accessed and it exists, there's no risk of race condition.
|
||||
|
||||
If two composes need the same file at the same time and it is not present yet,
|
||||
one of them will take a lock on it and start downloading. The other will wait
|
||||
until the download is finished.
|
||||
|
||||
The lock is only valid for a set amount of time (5 minutes) to avoid issues
|
||||
where the downloading process is killed in a way that blocks it from releasing
|
||||
the lock.
|
||||
|
||||
If the file is large and network slow, the limit may not be enough finish
|
||||
downloading. In that case the second process will steal the lock while the
|
||||
first process is still downloading. This will result in the same file being
|
||||
downloaded twice.
|
||||
|
||||
When the first process finishes the download, it will put the file into the
|
||||
local cache location. When the second process finishes, it will atomically
|
||||
replace it, but since it's the same file it will be the same file.
|
||||
|
||||
If the first compose already managed to hardlink the file before it gets
|
||||
replaced, there will be two copies of the file present locally.
|
||||
|
||||
|
||||
Integrity checking
|
||||
------------------
|
||||
|
||||
There is minimal integrity checking. RPM packages belonging to real builds will
|
||||
be check to match the checksum provided by Koji hub.
|
||||
|
||||
There is no checking for scratch builds or any images.
|
@ -12,8 +12,9 @@ happened. A JSON-encoded object will be passed to standard input to provide
|
||||
more information about the event. At the very least, the object will contain a
|
||||
``compose_id`` key.
|
||||
|
||||
The script is invoked in compose directory and can read other information
|
||||
there.
|
||||
The notification script inherits working directory from the parent process and it
|
||||
can be called from the same directory ``pungi-koji`` is called from. The working directory
|
||||
is listed at the start of main log.
|
||||
|
||||
Currently these messages are sent:
|
||||
|
||||
|
@ -1,107 +0,0 @@
|
||||
.. _multi_compose:
|
||||
|
||||
Managing compose from multiple parts
|
||||
====================================
|
||||
|
||||
There may be cases where it makes sense to split a big compose into separate
|
||||
parts, but create a compose output that links all output into one familiar
|
||||
structure.
|
||||
|
||||
The `pungi-orchestrate` tools allows that.
|
||||
|
||||
It works with an INI-style configuration file. The ``[general]`` section
|
||||
contains information about identity of the main compose. Other sections define
|
||||
individual parts.
|
||||
|
||||
The parts are scheduled to run in parallel, with the minimal amount of
|
||||
serialization. The final compose directory will contain hard-links to the
|
||||
files.
|
||||
|
||||
|
||||
General settings
|
||||
----------------
|
||||
|
||||
**target**
|
||||
Path to directory where the final compose should be created.
|
||||
**compose_type**
|
||||
Type of compose to make.
|
||||
**release_name**
|
||||
Name of the product for the final compose.
|
||||
**release_short**
|
||||
Short name of the product for the final compose.
|
||||
**release_version**
|
||||
Version of the product for the final compose.
|
||||
**release_type**
|
||||
Type of the product for the final compose.
|
||||
**extra_args**
|
||||
Additional arguments that will be passed to the child Pungi processes.
|
||||
**koji_profile**
|
||||
If specified, a current event will be retrieved from the Koji instance and
|
||||
used for all parts.
|
||||
|
||||
**kerberos**
|
||||
If set to yes, a kerberos ticket will be automatically created at the start.
|
||||
Set keytab and principal as well.
|
||||
**kerberos_keytab**
|
||||
Path to keytab file used to create the kerberos ticket.
|
||||
**kerberos_principal**
|
||||
Kerberos principal for the ticket
|
||||
|
||||
**pre_compose_script**
|
||||
Commands to execute before first part is started. Can contain multiple
|
||||
commands on separate lines.
|
||||
**post_compose_script**
|
||||
Commands to execute after the last part finishes and final status is
|
||||
updated. Can contain multiple commands on separate lines. ::
|
||||
|
||||
post_compose_script =
|
||||
compose-latest-symlink $COMPOSE_PATH
|
||||
custom-post-compose-script.sh
|
||||
|
||||
Multiple environment variables are defined for the scripts:
|
||||
|
||||
* ``COMPOSE_PATH``
|
||||
* ``COMPOSE_ID``
|
||||
* ``COMPOSE_DATE``
|
||||
* ``COMPOSE_TYPE``
|
||||
* ``COMPOSE_RESPIN``
|
||||
* ``COMPOSE_LABEL``
|
||||
* ``RELEASE_ID``
|
||||
* ``RELEASE_NAME``
|
||||
* ``RELEASE_SHORT``
|
||||
* ``RELEASE_VERSION``
|
||||
* ``RELEASE_TYPE``
|
||||
* ``RELEASE_IS_LAYERED`` – ``YES`` for layered products, empty otherwise
|
||||
* ``BASE_PRODUCT_NAME`` – only set for layered products
|
||||
* ``BASE_PRODUCT_SHORT`` – only set for layered products
|
||||
* ``BASE_PRODUCT_VERSION`` – only set for layered products
|
||||
* ``BASE_PRODUCT_TYPE`` – only set for layered products
|
||||
|
||||
**notification_script**
|
||||
Executable name (or path to a script) that will be used to send a message
|
||||
once the compose is finished. In order for a valid URL to be included in the
|
||||
message, at least one part must configure path translation that would apply
|
||||
to location of main compose.
|
||||
|
||||
Only two messages will be sent, one for start and one for finish (either
|
||||
successful or not).
|
||||
|
||||
|
||||
Partial compose settings
|
||||
------------------------
|
||||
|
||||
Each part should have a separate section in the config file.
|
||||
|
||||
It can specify these options:
|
||||
|
||||
**config**
|
||||
Path to configuration file that describes this part. If relative, it is
|
||||
resolved relative to the file with parts configuration.
|
||||
**just_phase**, **skip_phase**
|
||||
Customize which phases should run for this part.
|
||||
**depends_on**
|
||||
A comma separated list of other parts that must be finished before this part
|
||||
starts.
|
||||
**failable**
|
||||
A boolean toggle to mark a part as failable. A failure in such part will
|
||||
mark the final compose as incomplete, but still successful.
|
@ -30,17 +30,14 @@ packages to architectures.
|
||||
Buildinstall
|
||||
------------
|
||||
|
||||
Spawns a bunch of threads, each of which runs either ``lorax`` or
|
||||
``buildinstall`` command (the latter coming from ``anaconda`` package). The
|
||||
Spawns a bunch of threads, each of which runs the ``lorax`` command. The
|
||||
commands create ``boot.iso`` and other boot configuration files. The image is
|
||||
finally linked into the ``compose/`` directory as netinstall media.
|
||||
|
||||
The created images are also needed for creating live media or other images in
|
||||
later phases.
|
||||
|
||||
With ``lorax`` this phase runs one task per variant.arch combination. For
|
||||
``buildinstall`` command there is only one task per architecture and
|
||||
``product.img`` should be used to customize the results.
|
||||
With ``lorax`` this phase runs one task per variant.arch combination.
|
||||
|
||||
Gather
|
||||
------
|
||||
@ -115,16 +112,36 @@ ImageBuild
|
||||
This phase wraps up ``koji image-build``. It also updates the metadata
|
||||
ultimately responsible for ``images.json`` manifest.
|
||||
|
||||
KiwiBuild
|
||||
---------
|
||||
|
||||
Similarly to image build, this phases creates a koji `kiwiBuild` task. In the
|
||||
background it uses Kiwi to create images.
|
||||
|
||||
OSBuild
|
||||
-------
|
||||
|
||||
Similarly to image build, this phases creates a koji `osbuild` task. In the
|
||||
background it uses OSBuild Composer to create images.
|
||||
|
||||
OSBS
|
||||
----
|
||||
|
||||
This phase builds docker base images in `OSBS
|
||||
This phase builds container base images in `OSBS
|
||||
<http://osbs.readthedocs.io/en/latest/index.html>`_.
|
||||
|
||||
The finished images are available in registry provided by OSBS, but not
|
||||
downloaded directly into the compose. The is metadata about the created image
|
||||
in ``compose/metadata/osbs.json``.
|
||||
|
||||
ImageContainer
|
||||
--------------
|
||||
|
||||
This phase builds a container image in OSBS, and stores the metadata in the
|
||||
same file as OSBS phase. The container produced here wraps a different image,
|
||||
created it ImageBuild or OSBuild phase. It can be useful to deliver a VM image
|
||||
to containerized environments.
|
||||
|
||||
OSTreeInstaller
|
||||
---------------
|
||||
|
||||
|
@ -41,6 +41,14 @@ which can contain following keys.
|
||||
* ``command`` -- defines a shell command to run after Git clone to generate the
|
||||
needed file (for example to run ``make``). Only supported in Git backend.
|
||||
|
||||
* ``options`` -- a dictionary of additional configuration options. These are
|
||||
specific to different backends.
|
||||
|
||||
Currently supported values for Git:
|
||||
|
||||
* ``credential_helper`` -- path to a credential helper used to supply
|
||||
username/password for remotes that require authentication.
|
||||
|
||||
|
||||
Koji examples
|
||||
-------------
|
||||
|
516
pungi.spec
516
pungi.spec
@ -1,26 +1,24 @@
|
||||
%{?python_enable_dependency_generator}
|
||||
|
||||
Name: pungi
|
||||
Version: 4.2.15
|
||||
Release: 1%{?dist}.cloudlinux
|
||||
Version: 4.7.0
|
||||
Release: 6%{?dist}.alma
|
||||
Summary: Distribution compose tool
|
||||
|
||||
License: GPLv2
|
||||
License: GPL-2.0-only
|
||||
URL: https://pagure.io/pungi
|
||||
Source0: %{name}-%{version}.tar.bz2
|
||||
|
||||
BuildRequires: python3-nose
|
||||
BuildRequires: make
|
||||
BuildRequires: python3-pytest
|
||||
BuildRequires: python3-mock
|
||||
BuildRequires: python3-pyfakefs
|
||||
BuildRequires: python3-ddt
|
||||
# replaced by unittest.mock
|
||||
# BuildRequires: python3-mock
|
||||
BuildRequires: python3-devel
|
||||
BuildRequires: python3-setuptools
|
||||
BuildRequires: python3-productmd >= 1.33
|
||||
BuildRequires: python3-kobo-rpmlib >= 0.18.0
|
||||
BuildRequires: createrepo_c
|
||||
BuildRequires: createrepo_c >= 0.20.1
|
||||
BuildRequires: python3-lxml
|
||||
BuildRequires: python3-ddt
|
||||
BuildRequires: python3-kickstart
|
||||
BuildRequires: python3-rpm
|
||||
BuildRequires: python3-dnf
|
||||
@ -33,38 +31,60 @@ BuildRequires: python3-kobo
|
||||
BuildRequires: python3-koji
|
||||
BuildRequires: lorax
|
||||
BuildRequires: python3-PyYAML
|
||||
BuildRequires: libmodulemd >= 2.8.0
|
||||
BuildRequires: python3-libmodulemd >= 2.8.0
|
||||
BuildRequires: python3-gobject
|
||||
BuildRequires: python3-createrepo_c
|
||||
BuildRequires: python3-createrepo_c >= 0.20.1
|
||||
BuildRequires: python3-dogpile-cache
|
||||
BuildRequires: python3-parameterized
|
||||
BuildRequires: python3-flufl-lock
|
||||
BuildRequires: python3-ddt
|
||||
BuildRequires: python3-distro
|
||||
BuildRequires: python3-gobject-base
|
||||
BuildRequires: python3-pgpy
|
||||
BuildRequires: python3-pyfakefs
|
||||
%if %{rhel} == 8
|
||||
BuildRequires: python3-dataclasses
|
||||
%endif
|
||||
|
||||
#deps for doc building
|
||||
BuildRequires: python3-sphinx
|
||||
|
||||
Requires: python3-kobo-rpmlib >= 0.18.0
|
||||
Requires: python3-productmd >= 1.33
|
||||
Requires: python3-kickstart
|
||||
Requires: python3-requests
|
||||
Requires: python3-dataclasses
|
||||
Requires: createrepo_c
|
||||
Requires: createrepo_c >= 0.20.1
|
||||
Requires: koji >= 1.10.1-13
|
||||
Requires: python3-koji-cli-plugins
|
||||
Requires: isomd5sum
|
||||
%if %{rhel} == 8 || %{rhel} == 9
|
||||
Requires: genisoimage
|
||||
%else
|
||||
Recommends: genisoimage
|
||||
%endif
|
||||
Requires: git
|
||||
Requires: python3-dnf
|
||||
Requires: python3-multilib
|
||||
Requires: python3-libcomps
|
||||
Requires: python3-koji
|
||||
Requires: libmodulemd >= 2.8.0
|
||||
Requires: python3-libmodulemd >= 2.8.0
|
||||
Requires: python3-gobject
|
||||
Requires: python3-createrepo_c
|
||||
Requires: python3-createrepo_c >= 0.20.1
|
||||
Requires: python3-PyYAML
|
||||
Requires: python3-gobject-base
|
||||
Requires: python3-productmd >= 1.38
|
||||
Requires: python3-flufl-lock
|
||||
%if %{rhel} == 10
|
||||
Requires: xorriso
|
||||
%else
|
||||
Recommends: xorriso
|
||||
%endif
|
||||
Requires: python3-productmd >= 1.33
|
||||
Requires: lorax
|
||||
Requires: python3-distro
|
||||
Requires: python3-gobject-base
|
||||
Requires: python3-pgpy
|
||||
Requires: python3-requests
|
||||
%if %{rhel} == 8
|
||||
Requires: python3-dataclasses
|
||||
%endif
|
||||
|
||||
# This package is not available on i686, hence we cannot require it
|
||||
# See https://bugzilla.redhat.com/show_bug.cgi?id=1743421
|
||||
@ -80,7 +100,7 @@ A tool to create anaconda based installation trees/isos of a set of rpms.
|
||||
%package utils
|
||||
Summary: Utilities for working with finished composes
|
||||
Requires: pungi = %{version}-%{release}
|
||||
# Requires: python3-fedmsg
|
||||
Requires: python3-fedora-messaging
|
||||
|
||||
%description utils
|
||||
These utilities work with finished composes produced by Pungi. They can be used
|
||||
@ -89,8 +109,8 @@ notification to Fedora Message Bus.
|
||||
|
||||
%package -n python3-%{name}
|
||||
Summary: Python 3 libraries for pungi
|
||||
Requires: python3-attrs
|
||||
Requires: fus
|
||||
Requires: python3-attrs
|
||||
|
||||
%description -n python3-%{name}
|
||||
Python library with code for Pungi. This is not a public library and there are
|
||||
@ -110,21 +130,14 @@ gzip _build/man/pungi.1
|
||||
|
||||
%install
|
||||
%py3_install
|
||||
%{__install} -d %{buildroot}/var/cache/pungi
|
||||
%{__install} -d %{buildroot}/var/cache/pungi/createrepo_c
|
||||
%{__install} -d %{buildroot}%{_mandir}/man1
|
||||
%{__install} -m 0644 doc/_build/man/pungi.1.gz %{buildroot}%{_mandir}/man1
|
||||
|
||||
rm %{buildroot}%{_bindir}/pungi
|
||||
|
||||
# CLOUDLINUX: We don't need fedmsg stuff
|
||||
rm %{buildroot}%{_bindir}/%{name}-fedmsg-notification
|
||||
|
||||
%check
|
||||
python3 -m pytest
|
||||
# master branch part of %check segment. Currently it doesn't work
|
||||
# because of pungi-koji requirement in bash tests
|
||||
#./tests/data/specs/build.sh
|
||||
#cd tests && ./test_compose.sh
|
||||
%pytest
|
||||
|
||||
%files
|
||||
%license COPYING GPL
|
||||
@ -140,7 +153,9 @@ python3 -m pytest
|
||||
%{_bindir}/%{name}-make-ostree
|
||||
%{_mandir}/man1/pungi.1.gz
|
||||
%{_datadir}/pungi
|
||||
/var/cache/pungi
|
||||
%{_localstatedir}/cache/pungi
|
||||
%dir %attr(1777, root, root) %{_localstatedir}/cache/pungi/createrepo_c
|
||||
%{_tmpfilesdir}/pungi-clean-cache.conf
|
||||
|
||||
%files -n python3-%{name}
|
||||
%{python3_sitelib}/%{name}
|
||||
@ -151,15 +166,343 @@ python3 -m pytest
|
||||
%{_bindir}/%{name}-create-unified-isos
|
||||
%{_bindir}/%{name}-config-dump
|
||||
%{_bindir}/%{name}-config-validate
|
||||
# %{_bindir}/%{name}-fedmsg-notification
|
||||
%{_bindir}/%{name}-fedmsg-notification
|
||||
%{_bindir}/%{name}-notification-report-progress
|
||||
%{_bindir}/%{name}-orchestrate
|
||||
%{_bindir}/%{name}-patch-iso
|
||||
%{_bindir}/%{name}-compare-depsolving
|
||||
%{_bindir}/%{name}-wait-for-signed-ostree-handler
|
||||
|
||||
%{_bindir}/%{name}-cache-cleanup
|
||||
|
||||
%changelog
|
||||
* Fri Sep 27 2024 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.7.0-6
|
||||
- Add x86_64_v2 to a lisf of exclusive arches if there is any arch with base `x86_64`
|
||||
|
||||
* Mon Sep 16 2024 Eduard Abdullin <eabdullin@almalinux.org> - 4.7.0-5
|
||||
- Add x86_64_v2 to arch list if x86_64 in list
|
||||
|
||||
* Fri Sep 06 2024 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.7.0-4
|
||||
- Truncate a volume ID to 32 bytes
|
||||
- Add new architecture `x86_64_v2`
|
||||
|
||||
* Thu Sep 05 2024 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.7.0-2
|
||||
- Use xorriso as recommended package and genisoimage as required for RHEL8/9 and vice versa for RHEL10
|
||||
|
||||
* Thu Aug 22 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.7.0-1
|
||||
- kiwibuild: Add support for type, type attr and bundle format (lsedlar)
|
||||
- createiso: Block reuse if unsigned packages are allowed (lsedlar)
|
||||
- Allow live_images phase to still be skipped (lsedlar)
|
||||
- createiso: Recompute .treeinfo checksums for images (lsedlar)
|
||||
- Drop support for signing rpm-wrapped artifacts (lsedlar)
|
||||
- Remove live_images.py (LiveImagesPhase) (awilliam)
|
||||
- Clean up requirements (lsedlar)
|
||||
- Update pungi.spec for py3 (hlin)
|
||||
|
||||
* Fri Jul 19 2024 Fedora Release Engineering <releng@fedoraproject.org> - 4.6.3-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_41_Mass_Rebuild
|
||||
|
||||
* Fri Jul 12 2024 Haibo Lin <hlin@redhat.com> - 4.6.3-1
|
||||
- Fix formatting of long line (lsedlar)
|
||||
- unified-isos: Resolve symlinks (lsedlar)
|
||||
- gather: Skip lookaside packages from local lookaside repo (lsedlar)
|
||||
- pkgset: Avoid adding modules to unavailable arches (hlin)
|
||||
- iso: Extract volume id with xorriso if available (lsedlar)
|
||||
- De-duplicate log messages for ostree and ostree_container phases (awilliam)
|
||||
- Handle tracebacks as str or bytes (lsedlar)
|
||||
- ostree/container: add missing --version arg (awilliam)
|
||||
- Block pkgset reuse on module defaults change (lsedlar)
|
||||
- Include task ID in DONE message for OSBS phase (awilliam)
|
||||
- Various phases: consistent format of failure message (awilliam)
|
||||
- Update tests to exercise kiwi specific metadata (lsedlar)
|
||||
- Kiwi: translate virtualbox and azure productmd formats (awilliam)
|
||||
- kiwibuild: Add tests for the basic functionality (lsedlar)
|
||||
- kiwibuild: Remove repos as dicts (lsedlar)
|
||||
- Fix additional image metadata (lsedlar)
|
||||
- Drop kiwibuild_version option (lsedlar)
|
||||
- Update docs with kiwibuild options (lsedlar)
|
||||
- kiwibuild: allow setting description scm and path at phase level (awilliam)
|
||||
- Use latest Fedora for python 3 test environment (lsedlar)
|
||||
- Install unittest2 only on python 2 (lsedlar)
|
||||
- Fix 'failable' handling for kiwibuild phase (awilliam)
|
||||
- image_build: Accept Kiwi extension for Azure VHD images (jeremycline)
|
||||
- image_build: accept Kiwi vagrant image name format (awilliam)
|
||||
|
||||
* Sun Jun 09 2024 Python Maint <python-maint@redhat.com> - 4.6.2-7
|
||||
- Rebuilt for Python 3.13
|
||||
|
||||
* Fri May 31 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.2-6
|
||||
- Rebuild to bump release over f40-infra build
|
||||
|
||||
* Fri May 31 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.2-2
|
||||
- Add dependency on xorriso, fixes rhbz#2278677
|
||||
|
||||
* Tue Apr 30 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.2-1
|
||||
- Phases/osbuild: support passing 'customizations' for image builds (thozza)
|
||||
- dnf: Load filelists for actual solver too (lsedlar)
|
||||
- kiwibuild: Tell Koji which arches are allowed to fail (lsedlar)
|
||||
- kiwibuild: Update documentation with more details (lsedlar)
|
||||
- kiwibuild: Add kiwibuild global options (lsedlar)
|
||||
- kiwibuild: Process images same as image-build (lsedlar)
|
||||
- kiwibuild: Add subvariant configuration (lsedlar)
|
||||
- kiwibuild: Work around missing arch in build data (lsedlar)
|
||||
- Support KiwiBuild (hlin)
|
||||
- ostree/container: Set version in treefile 'automatic-version-prefix' (tim)
|
||||
- dnf: Explicitly load filelists (lsedlar)
|
||||
- Fix buildinstall reuse with pungi_buildinstall plugin (lsedlar)
|
||||
- Fix filters for DNF query (lsedlar)
|
||||
- gather-dnf: Support dotarch in filter_packages (lsedlar)
|
||||
- gather: Support dotarch notation for debuginfo packages (lsedlar)
|
||||
- Correctly set input and fultree_exclude flags for debuginfo (lsedlar)
|
||||
|
||||
* Fri Feb 09 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.1-1
|
||||
- Make python3-mock dependency optional (lsedlar)
|
||||
- Make latest black happy (lsedlar)
|
||||
- Update tox configuration (lsedlar)
|
||||
- Fix scm tests to not use user configuration (lsedlar)
|
||||
- Add workaround for old requests in kojiwrapper (lsedlar)
|
||||
- Use pungi_buildinstall without NFS (lsedlar)
|
||||
- checks: don't require "repo" in the "ostree" schema (awilliam)
|
||||
- ostree_container: Use unique temporary directory (lsedlar)
|
||||
|
||||
* Fri Jan 26 2024 Maxwell G <maxwell@gtmx.me> - 4.6.0-5
|
||||
- Remove python3-mock dependency
|
||||
|
||||
* Fri Jan 26 2024 Fedora Release Engineering <releng@fedoraproject.org> - 4.6.0-4
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild
|
||||
|
||||
* Sun Jan 21 2024 Fedora Release Engineering <releng@fedoraproject.org> - 4.6.0-3
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild
|
||||
|
||||
* Fri Jan 19 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.0-3
|
||||
- Stop requiring repo option in ostree phase
|
||||
|
||||
* Thu Jan 18 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.0-2
|
||||
- ostree_container: Use unique temporary directory
|
||||
|
||||
* Wed Dec 13 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.0-1
|
||||
- Add ostree container to image metadata (lsedlar)
|
||||
- Updates for ostree-container phase (lsedlar)
|
||||
- Add ostree native container support (tim)
|
||||
- Improve autodetection of productmd image type for osbuild images (awilliam)
|
||||
- pkgset: ignore events for modular content tags (lsedlar)
|
||||
- pkgset: Ignore duplicated module builds (lsedlar)
|
||||
- Drop buildinstall method (abisoi)
|
||||
- Add step to send UMB message (lzhuang)
|
||||
- Fix minor Ruff/flake8 warnings (tim)
|
||||
- osbuild: manifest type in config (cmdr)
|
||||
|
||||
* Mon Sep 25 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.5.0-7
|
||||
- Backport patch for explicit setting of osbuild image type in metadata
|
||||
|
||||
* Mon Nov 21 2023 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.5.0-3
|
||||
- Method `get_remote_file_content` is object's method now
|
||||
|
||||
* Wed Nov 15 2023 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.5.0-2
|
||||
- Return empty list if a repo doesn't contain any module
|
||||
|
||||
* Thu Aug 31 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.5.0-1
|
||||
- kojiwrapper: Stop being smart about local access (lsedlar)
|
||||
- Fix unittest errors (ounsal)
|
||||
- Add integrity checking for builds (lsedlar)
|
||||
- Add script for cleaning up the cache (lsedlar)
|
||||
- Add ability to download images (lsedlar)
|
||||
- Add support for not having koji volume mounted locally (lsedlar)
|
||||
- Remove repository cloning multiple times (abisoi)
|
||||
- Support require_all_comps_packages on DNF backend (lsedlar)
|
||||
- Fix new warnings from flake8 (lsedlar)
|
||||
|
||||
* Tue Jul 25 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-8
|
||||
- Option `excluded-packages` for script `pungi-gather-rpms`
|
||||
|
||||
* Tue Jul 25 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.1-1
|
||||
- ostree: Add configuration for custom runroot packages (lsedlar)
|
||||
- pkgset: Emit better error for missing modulemd file (lsedlar)
|
||||
- Add support for git-credential-helper (lsedlar)
|
||||
- Support OIDC Client Credentials authentication to CTS (hlin)
|
||||
|
||||
* Fri Jul 21 2023 Fedora Release Engineering <releng@fedoraproject.org> - 4.4.0-4
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild
|
||||
|
||||
* Wed Jul 19 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.0-3
|
||||
- Backport ostree runroot package additions
|
||||
|
||||
* Wed Jul 19 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.0-2
|
||||
- Backport ostree runroot package additions
|
||||
|
||||
* Mon Jun 19 2023 Python Maint <python-maint@redhat.com> - 4.4.0-2
|
||||
- Rebuilt for Python 3.12
|
||||
|
||||
* Wed Jun 07 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.0-1
|
||||
- gather-dnf: Run latest() later (lsedlar)
|
||||
- iso: Support joliet long names (lsedlar)
|
||||
- Drop pungi-orchestrator code (lsedlar)
|
||||
- isos: Ensure proper file ownership and permissions (lsedlar)
|
||||
- gather: Always get latest packages (lsedlar)
|
||||
- Add back compatibility with jsonschema <3.0.0 (lsedlar)
|
||||
- Remove useless debug message (lsedlar)
|
||||
- Remove fedmsg from requirements (lsedlar)
|
||||
- gather: Support dotarch in DNF backend (lsedlar)
|
||||
- Fix compatibility with createrepo_c 0.21.1 (lsedlar)
|
||||
- comps: Apply arch filtering to environment/optionlist (lsedlar)
|
||||
- Add config file for cleaning up cache files (hlin)
|
||||
|
||||
* Wed May 17 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.8-3
|
||||
- Rebuild without fedmsg dependency
|
||||
|
||||
* Wed May 03 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.8-1
|
||||
- Set priority for Fedora messages
|
||||
|
||||
* Thu Apr 13 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-7
|
||||
- gather-module can find modules through symlinks
|
||||
|
||||
* Thu Apr 13 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-6
|
||||
- CLI option `--label` can be passed through a Pungi config file
|
||||
|
||||
* Fri Mar 31 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-4
|
||||
- ALBS-1030: Generate Devel section in packages.json
|
||||
- Also the tool can combine (remove and add) packages in a variant from different sources according to an url's type of source
|
||||
- Some upstream changes to KojiMock part
|
||||
- Skip verifying an RPM signature if sigkeys are empty
|
||||
- ALBS-987: Generate i686 and dev repositories with pungi on building new distr. version automatically
|
||||
- [Generator of packages.json] Replace using CLI by config.yaml
|
||||
- [Gather RPMs] os.path is replaced by Pat
|
||||
|
||||
* Thu Mar 30 2023 Haibo Lin <hlin@redhat.com> - 4.3.8-1
|
||||
- createiso: Update possibly changed file on DVD (lsedlar)
|
||||
- pkgset: Stop reuse if configuration changed (lsedlar)
|
||||
- Allow disabling inheriting ExcludeArch to noarch packages (lsedlar)
|
||||
- pkgset: Support extra builds with no tags (lsedlar)
|
||||
- buildinstall: Avoid pointlessly tweaking the boot images (lsedlar)
|
||||
- Prevent to reuse if unsigned packages are allowed (hlin)
|
||||
- Pass parent id/respin id to CTS (lsedlar)
|
||||
- Exclude existing files in boot.iso (hlin)
|
||||
- image-build/osbuild: Pull ISOs into the compose (lsedlar)
|
||||
- Retry 401 error from CTS (lsedlar)
|
||||
- gather: Better detection of debuginfo in lookaside (lsedlar)
|
||||
- Log versions of all installed packages (hlin)
|
||||
- Use authentication for all CTS calls (lsedlar)
|
||||
- Fix black complaints (lsedlar)
|
||||
- Add vhd.gz extension to compressed VHD images (lsedlar)
|
||||
- Add vhd-compressed image type (lsedlar)
|
||||
- Update to work with latest mock (lsedlar)
|
||||
- Default bztar format for sdist command (onosek)
|
||||
|
||||
* Fri Mar 17 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-3
|
||||
- ALBS-987: Generate i686 repositories with pungi on building new distr. version automatically
|
||||
- KojiMock extracts all modules which are suitable for the variant's arches
|
||||
- An old code is removed or refactored
|
||||
|
||||
* Fri Jan 20 2023 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.7-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild
|
||||
|
||||
* Fri Dec 09 2022 Ondřej Nosek <onosek@redhat.com> - 4.3.7-1
|
||||
- osbuild: test passing of rich repos from configuration (lsedlar)
|
||||
- osbuild: support specifying `package_sets` for repos (thozza)
|
||||
- osbuild: don't use `util.get_repo_urls()` (thozza)
|
||||
- osbuild: update schema and config documentation (thozza)
|
||||
- Speed up tests by 30 seconds (lsedlar)
|
||||
- Stop sending compose paths to CTS (lsedlar)
|
||||
- Report errors from CTS (lsedlar)
|
||||
- createiso: Create Joliet tree with xorriso (lsedlar)
|
||||
- init: Filter comps for modular variants with tags (lsedlar)
|
||||
- Retry failed cts requests (hlin)
|
||||
- Ignore existing kerberos ticket for CTS auth (lsedlar)
|
||||
- osbuild: support specifying upload_options (thozza)
|
||||
- osbuild: accept only a single image type in the configuration (thozza)
|
||||
- Add Jenkinsfile for CI (hlin)
|
||||
- profiler: Flush stdout before printing (lsedlar)
|
||||
|
||||
* Sat Nov 12 2022 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.6-3
|
||||
- AlmaLinux version. Updates from upstream
|
||||
|
||||
* Mon Nov 07 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.6-2
|
||||
- Stop including comps in modular repos
|
||||
|
||||
* Wed Oct 19 2022 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.17-1
|
||||
- Replace list of cr.packages by cr.PackageIterator in package JSON generator
|
||||
- Do not lose a module from koji if we have more than one arch (e.g. x86_64 + i686)
|
||||
|
||||
* Fri Aug 26 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.6-1
|
||||
- pkgset: Report better error when module is missing an arch (lsedlar)
|
||||
- osbuild: add support for building ostree artifacts (ondrej)
|
||||
- ostree: Add unified core mode for compose in rpm-ostree (tim)
|
||||
- createiso: Make ISO level more granular (lsedlar)
|
||||
- Create DVDs with xorriso (lsedlar)
|
||||
- Fix compatibility with jsonschema >= 4.0.0 (lsedlar)
|
||||
- Fix black complaint (lsedlar)
|
||||
- doc: fix osbuild's image_types field name (ondrej)
|
||||
- Convert _ssh_run output to str for python3 (hlin)
|
||||
- Print more logs for git_ls_remote (hlin)
|
||||
- Log time taken of each phase (hlin)
|
||||
- Avoid crash when loading pickle file failed (hlin)
|
||||
- extra_isos: Fix detection of changed packages (lsedlar)
|
||||
|
||||
* Thu Aug 11 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-8
|
||||
- Backport jsonschema compatibility patch (rhbz#2113607)
|
||||
|
||||
* Mon Jul 25 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-7
|
||||
- Update xorriso patch
|
||||
|
||||
* Fri Jul 22 2022 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.5-6
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild
|
||||
|
||||
* Mon Jun 20 2022 Python Maint <python-maint@redhat.com> - 4.3.5-5
|
||||
- Rebuilt for Python 3.11
|
||||
|
||||
* Thu Jun 16 2022 Adam Williamson <awilliam@redhat.com> - 4.3.5-4
|
||||
- Don't try and run isohybrid when using xorriso
|
||||
|
||||
* Wed Jun 15 2022 Python Maint <python-maint@redhat.com> - 4.3.5-3
|
||||
- Rebuilt for Python 3.11
|
||||
|
||||
* Wed Jun 15 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-2
|
||||
- Backport patch for building DVDs with xorriso command again
|
||||
|
||||
* Wed Jun 15 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-1
|
||||
- Fix module defaults and obsoletes validation (mkulik)
|
||||
- Update the cts_keytab field in order to get the hostname of the server
|
||||
(ounsal)
|
||||
- Add skip_branding to ostree_installer. (lzhuang)
|
||||
- kojiwrapper: Ignore warnings before task id (lsedlar)
|
||||
- Restrict jsonschema version (lsedlar)
|
||||
- Revert "Do not clone the same repository multiple times, re-use already
|
||||
cloned repository" (hlin)
|
||||
- Involve bandit (hlin)
|
||||
|
||||
* Wed Jun 08 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.4-2
|
||||
- Backport patch for building DVDs with xorriso command
|
||||
|
||||
* Wed May 4 2022 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.16-1
|
||||
- ALBS-334: Make the ability of Pungi to give module_defaults from remote sources
|
||||
|
||||
* Mon Apr 04 2022 Ondřej Nosek <onosek@redhat.com> - 4.3.4-1
|
||||
- kojiwrapper: Add retries to login call (lsedlar)
|
||||
- Variants file in config can contain path (onosek)
|
||||
- nomacboot option for livemedia koji tasks (cobrien)
|
||||
- doc: improve osbs_registries explanation (kdreyer)
|
||||
- osbs: only handle archives of type "image" (kdreyer)
|
||||
- Update the default greedy_method value in doc (ounsal)
|
||||
- Fix the wrong working directory for the progress_notification script (ounsal)
|
||||
- Filter out environment groups unmatch given arch (hlin)
|
||||
- profiler: Respect provided output stream (lsedlar)
|
||||
- modules: Correct a typo in loading obsoletes (ppisar)
|
||||
- Do not clone the same repository multiple times, re-use already cloned
|
||||
repository (ounsal)
|
||||
|
||||
* Fri Feb 04 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.3-3
|
||||
- Backport typo fix
|
||||
|
||||
* Fri Jan 21 2022 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.3-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild
|
||||
|
||||
* Fri Jan 14 2022 Haibo Lin <hlin@redhat.com> - 4.3.3-1
|
||||
- hybrid: Explicitly pull in debugsource packages (lsedlar)
|
||||
- Add module obsoletes feature (fvalder)
|
||||
- buildinstall: Add ability to install extra packages in runroot (ounsal)
|
||||
- Ignore osbs/osbuild config when reusing iso images (hlin)
|
||||
- compose: Make sure temporary dirs are world readable (lsedlar)
|
||||
- Pass compose parameter for debugging git issue (hlin)
|
||||
- Generate images.json for extra_isos phase (hlin)
|
||||
- Fix tests for python 2.6 (hlin)
|
||||
|
||||
* Thu Dec 30 2021 stepan_oksanichenio <soksanichenko@cloudlinux.com> - 4.2.15-1
|
||||
- ALBS-97: The scripts `gather_modules` and `generate_packages_json` support LZMA compression
|
||||
@ -168,21 +511,116 @@ python3 -m pytest
|
||||
* Mon Dec 20 2021 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.14-1
|
||||
- ALBS-66: The generator of packages JSON can process the same packages with different versions
|
||||
|
||||
* Mon Nov 15 2021 Haibo Lin <hlin@redhat.com> - 4.3.2-2
|
||||
- Backport patch for generating images.json
|
||||
|
||||
* Thu Nov 11 2021 Haibo Lin <hlin@redhat.com> - 4.3.2-1
|
||||
- gather: Load JSON mapping relative to config dir (lsedlar)
|
||||
- gather: Stop requiring all variants/arches in JSON (lsedlar)
|
||||
- doc: make dnf "backend" settings easier to discover (kdreyer)
|
||||
- Remove with_jigdo argument (lsedlar)
|
||||
- Check dependencies after config validation (lsedlar)
|
||||
- default "with_jigdo" to False (kdreyer)
|
||||
- Stop trying to validate non-existent metadata (lsedlar)
|
||||
- test images for metadata deserialization error (fdipretre)
|
||||
- repoclosure: Use --forcearch for dnf repoclosure (lsedlar)
|
||||
- extra_isos: Allow reusing old images (lsedlar)
|
||||
- createiso: Allow reusing old images (lsedlar)
|
||||
- Remove default runroot channel (lsedlar)
|
||||
|
||||
* Tue Oct 26 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.1-1
|
||||
- Correct irc network name & add matrix room (dan.cermak)
|
||||
- Add missing mock to osbs tests (lsedlar)
|
||||
- osbs: Reuse images from old compose (hlin)
|
||||
- image_build: Allow reusing old image_build results (hlin)
|
||||
- Allow ISO-Level configuration within the config file (ounsal)
|
||||
- Work around ODCS creating COMPOSE_ID later (lsedlar)
|
||||
- When `cts_url` is configured, use CTS `/repo` API for buildContainer
|
||||
yum_repourls. (jkaluza)
|
||||
- Add COMPOSE_ID into the pungi log file (ounsal)
|
||||
- buildinstall: Add easy way to check if previous result was reused (lsedlar)
|
||||
|
||||
* Mon Oct 04 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.0-2
|
||||
- Backport patch to avoid crash on missing COMPOSE_ID
|
||||
|
||||
* Wed Sep 15 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.0-1
|
||||
- Only build CTS url when configured (lsedlar)
|
||||
- Require requests_kerberos only when needed (lsedlar)
|
||||
- Allow specifying $COMPOSE_ID in the `repo` value for osbs phase. (jkaluza)
|
||||
- Make getting old compose config reusable (lsedlar)
|
||||
- paths: Allow customizing log file extension (lsedlar)
|
||||
- Add authentication for updating the compose URL in CTS. (ounsal)
|
||||
- Fix type detection for osbuild images (lsedlar)
|
||||
- Enable pungi to send compose_url patches to CTS (ounsal)
|
||||
- Use xorriso instead of isoinfo when createiso_use_xorrisofs is enabled
|
||||
(ounsal)
|
||||
- Fix tests for createrepo (drumian)
|
||||
- Formatted files according to flake8 and black feedback (drumian)
|
||||
- Handle the pungi failures to ensure creation of log files (ounsal)
|
||||
- Add createrepo_enable_cache to configuration doc (hlin)
|
||||
- Fix formatting (hlin)
|
||||
- Install missing deps in ci image (hlin)
|
||||
- Use pytest directly incl. support for posargs, e.g.: tox -- -s -vvv
|
||||
tests/path/to/a/single/test_something.py (fvalder)
|
||||
- Supersede ModuleStream loading with ModuleIndex (fvalder)
|
||||
- Better error message than 'KeyError' in pungi (drumian)
|
||||
- Adding multithreading support for pungi/phases/image_checksum.py (jkunstle)
|
||||
- doc: more additional_packages documentation (kdreyer)
|
||||
- doc: fix typo in additional_packages description (kdreyer)
|
||||
- doc: improve signed packages retry docs (kdreyer)
|
||||
- Better error message than 'KeyError' in pungi (drumian)
|
||||
- doc: explain buildContainer API (kdreyer)
|
||||
|
||||
* Wed Aug 04 2021 Haibo Lin <hlin@redhat.com> - 4.2.10-1
|
||||
- Show and log command when using the run_blocking_cmd() method (fdipretre)
|
||||
- Use cachedir when createrepo (hlin)
|
||||
- gather: Add all srpms to variant lookaside repo (lsedlar)
|
||||
- Add task URL to watch task log (hlin)
|
||||
- Log warning when module defined in variants.xml not found (hlin)
|
||||
- pkgset: Compare future events correctly (lsedlar)
|
||||
- util: Strip file:// from local urls (lsedlar)
|
||||
- Clean up temporary yumroot dir (hlin)
|
||||
|
||||
* Fri Jul 23 2021 Fedora Release Engineering <releng@fedoraproject.org> - 4.2.9-3
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_35_Mass_Rebuild
|
||||
|
||||
* Fri Jun 18 2021 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.13-1
|
||||
- LNX-326: Add the ability to include any package by mask in packages.json to the generator
|
||||
- LNX-318: Modify build scripts for building CloudLinux OS 8.4
|
||||
|
||||
* Fri Jun 04 2021 Python Maint <python-maint@redhat.com> - 4.2.9-2
|
||||
- Rebuilt for Python 3.10
|
||||
|
||||
* Tue May 25 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.12-1
|
||||
- LNX-108: Add multiarch support to pungi
|
||||
|
||||
* Thu Apr 29 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.11-1
|
||||
- LNX-311: Add ability to productmd set a main variant while dumping TreeInfo
|
||||
|
||||
* Thu Apr 29 2021 onosek - 4.2.9-1
|
||||
- New upstream release 4.2.9
|
||||
- Fix can't link XDEV using repos as pkgset_sources (romain.forlot)
|
||||
- Updated the deprecated ks argument name (to the current inst.ks) (lveyde)
|
||||
- gather: Adjust reusing with lookaside (hlin)
|
||||
- hybrid: Optimize getting lookaside packages (lsedlar)
|
||||
- gather: Copy old logs when reusing gather result (hlin)
|
||||
- Cancel koji tasks when pungi terminated (hlin)
|
||||
- Add Dockerfile for building testing image (hlin)
|
||||
- image_container: Fix incorrect arch processing (lsedlar)
|
||||
- runroot: Adjust permissions always (hlin)
|
||||
- Format code (hlin)
|
||||
- pkgset: Fix meaning of retries (lsedlar)
|
||||
- pkgset: Store module tag only if module is used (lsedlar)
|
||||
- Store extended traceback for gather errors (lsedlar)
|
||||
|
||||
* Wed Feb 24 2021 Danylo Kuropiatnyk <dkuropiatnyk@cloudlinux.com>, Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.10-1
|
||||
- LU-2186 .treeinfo file in AlmaLinux public kickstart repo should contain AppStream variant
|
||||
- LU-2195 Change path to sources and iso when generating repositories
|
||||
- LU-2202: Start unittests during installation or build of pungi
|
||||
|
||||
* Fri Feb 12 2021 Ondrej Nosek <onosek@redhat.com> - 4.2.8-1
|
||||
- New upstream version
|
||||
|
||||
* Thu Feb 11 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.9-1
|
||||
- LNX-133: Create a server for building nightly builds of AlmaLinux
|
||||
- LU-2133: Prepare CI for iso builds of CLOSS 8
|
||||
@ -195,6 +633,18 @@ python3 -m pytest
|
||||
- LNX-102: Add tool that collects information about modules
|
||||
- LNX-103 Update .spec file for AlmaLinux
|
||||
|
||||
* Wed Jan 27 2021 Fedora Release Engineering <releng@fedoraproject.org> - 4.2.7-3
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
|
||||
|
||||
* Fri Jan 22 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.7-2
|
||||
- Backport patch for preserving default attribute in comps
|
||||
|
||||
* Tue Dec 8 09:01:52 CET 2020 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.7-1
|
||||
- New upstream version
|
||||
|
||||
* Thu Nov 05 2020 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.6-1
|
||||
- New upstream release
|
||||
|
||||
* Fri Sep 25 2020 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.5-1
|
||||
- New upstream release
|
||||
|
||||
|
@ -93,6 +93,11 @@ def split_name_arch(name_arch):
|
||||
|
||||
def is_excluded(package, arches, logger=None):
|
||||
"""Check if package is excluded from given architectures."""
|
||||
if any(
|
||||
getBaseArch(exc_arch) == 'x86_64' for exc_arch in package.exclusivearch
|
||||
) and 'x86_64_v2' not in package.exclusivearch:
|
||||
package.exclusivearch.append('x86_64_v2')
|
||||
|
||||
if package.excludearch and set(package.excludearch) & set(arches):
|
||||
if logger:
|
||||
logger.debug(
|
||||
|
@ -34,6 +34,8 @@ arches = {
|
||||
"x86_64": "athlon",
|
||||
"amd64": "x86_64",
|
||||
"ia32e": "x86_64",
|
||||
# x86-64-v2
|
||||
"x86_64_v2": "noarch",
|
||||
# ppc64le
|
||||
"ppc64le": "noarch",
|
||||
# ppc
|
||||
@ -131,8 +133,8 @@ def getArchList(thisarch=None): # pragma: no cover
|
||||
|
||||
|
||||
def _try_read_cpuinfo(): # pragma: no cover
|
||||
""" Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not
|
||||
mounted). """
|
||||
"""Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not
|
||||
mounted)."""
|
||||
try:
|
||||
with open("/proc/cpuinfo", "r") as f:
|
||||
return f.readlines()
|
||||
@ -141,8 +143,8 @@ def _try_read_cpuinfo(): # pragma: no cover
|
||||
|
||||
|
||||
def _parse_auxv(): # pragma: no cover
|
||||
""" Read /proc/self/auxv and parse it into global dict for easier access
|
||||
later on, very similar to what rpm does. """
|
||||
"""Read /proc/self/auxv and parse it into global dict for easier access
|
||||
later on, very similar to what rpm does."""
|
||||
# In case we can't open and read /proc/self/auxv, just return
|
||||
try:
|
||||
with open("/proc/self/auxv", "rb") as f:
|
||||
@ -326,8 +328,8 @@ def getMultiArchInfo(arch=canonArch): # pragma: no cover
|
||||
|
||||
def getBaseArch(myarch=None): # pragma: no cover
|
||||
"""returns 'base' arch for myarch, if specified, or canonArch if not.
|
||||
base arch is the arch before noarch in the arches dict if myarch is not
|
||||
a key in the multilibArches."""
|
||||
base arch is the arch before noarch in the arches dict if myarch is not
|
||||
a key in the multilibArches."""
|
||||
|
||||
if not myarch:
|
||||
myarch = canonArch
|
||||
|
356
pungi/checks.py
356
pungi/checks.py
@ -53,7 +53,7 @@ from . import util
|
||||
|
||||
|
||||
def is_jigdo_needed(conf):
|
||||
return conf.get("create_jigdo", True)
|
||||
return conf.get("create_jigdo")
|
||||
|
||||
|
||||
def is_isohybrid_needed(conf):
|
||||
@ -75,8 +75,7 @@ def is_isohybrid_needed(conf):
|
||||
|
||||
|
||||
def is_genisoimage_needed(conf):
|
||||
"""This is only needed locally for createiso without runroot.
|
||||
"""
|
||||
"""This is only needed locally for createiso without runroot."""
|
||||
runroot_tag = conf.get("runroot_tag", "")
|
||||
if runroot_tag or conf.get("createiso_use_xorrisofs"):
|
||||
return False
|
||||
@ -94,7 +93,7 @@ def is_xorrisofs_needed(conf):
|
||||
|
||||
|
||||
def is_createrepo_c_needed(conf):
|
||||
return conf.get("createrepo_c", True)
|
||||
return conf.get("createrepo_c")
|
||||
|
||||
|
||||
# The first element in the tuple is package name expected to have the
|
||||
@ -228,10 +227,19 @@ def validate(config, offline=False, schema=None):
|
||||
DefaultValidator = _extend_with_default_and_alias(
|
||||
jsonschema.Draft4Validator, offline=offline
|
||||
)
|
||||
validator = DefaultValidator(
|
||||
schema,
|
||||
{"array": (tuple, list), "regex": six.string_types, "url": six.string_types},
|
||||
)
|
||||
|
||||
if hasattr(jsonschema.Draft4Validator, "TYPE_CHECKER"):
|
||||
# jsonschema >= 3.0 has new interface for checking types
|
||||
validator = DefaultValidator(schema)
|
||||
else:
|
||||
validator = DefaultValidator(
|
||||
schema,
|
||||
{
|
||||
"array": (tuple, list),
|
||||
"regex": six.string_types,
|
||||
"url": six.string_types,
|
||||
},
|
||||
)
|
||||
errors = []
|
||||
warnings = []
|
||||
for error in validator.iter_errors(config):
|
||||
@ -379,6 +387,7 @@ def _extend_with_default_and_alias(validator_class, offline=False):
|
||||
instance[property]["branch"] = resolver(
|
||||
instance[property]["repo"],
|
||||
instance[property].get("branch") or "HEAD",
|
||||
instance[property].get("options"),
|
||||
)
|
||||
|
||||
for error in _hook_errors(properties, instance, schema):
|
||||
@ -446,6 +455,19 @@ def _extend_with_default_and_alias(validator_class, offline=False):
|
||||
context=all_errors,
|
||||
)
|
||||
|
||||
kwargs = {}
|
||||
if hasattr(validator_class, "TYPE_CHECKER"):
|
||||
# jsonschema >= 3
|
||||
def is_array(checker, instance):
|
||||
return isinstance(instance, (tuple, list))
|
||||
|
||||
def is_string_type(checker, instance):
|
||||
return isinstance(instance, six.string_types)
|
||||
|
||||
kwargs["type_checker"] = validator_class.TYPE_CHECKER.redefine_many(
|
||||
{"array": is_array, "regex": is_string_type, "url": is_string_type}
|
||||
)
|
||||
|
||||
return jsonschema.validators.extend(
|
||||
validator_class,
|
||||
{
|
||||
@ -456,6 +478,7 @@ def _extend_with_default_and_alias(validator_class, offline=False):
|
||||
"additionalProperties": _validate_additional_properties,
|
||||
"anyOf": _validate_any_of,
|
||||
},
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
@ -498,6 +521,13 @@ def make_schema():
|
||||
"file": {"type": "string"},
|
||||
"dir": {"type": "string"},
|
||||
"command": {"type": "string"},
|
||||
"options": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"credential_helper": {"type": "string"},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
@ -523,27 +553,6 @@ def make_schema():
|
||||
"list_of_strings": {"type": "array", "items": {"type": "string"}},
|
||||
"strings": _one_or_list({"type": "string"}),
|
||||
"optional_string": {"anyOf": [{"type": "string"}, {"type": "null"}]},
|
||||
"live_image_config": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"kickstart": {"type": "string"},
|
||||
"ksurl": {"type": "url"},
|
||||
"name": {"type": "string"},
|
||||
"subvariant": {"type": "string"},
|
||||
"target": {"type": "string"},
|
||||
"version": {"type": "string"},
|
||||
"repo": {"$ref": "#/definitions/repos"},
|
||||
"specfile": {"type": "string"},
|
||||
"scratch": {"type": "boolean"},
|
||||
"type": {"type": "string"},
|
||||
"sign": {"type": "boolean"},
|
||||
"failable": {"type": "boolean"},
|
||||
"release": {"$ref": "#/definitions/optional_string"},
|
||||
},
|
||||
"required": ["kickstart"],
|
||||
"additionalProperties": False,
|
||||
"type": "object",
|
||||
},
|
||||
"osbs_config": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -579,6 +588,7 @@ def make_schema():
|
||||
"release_discinfo_description": {"type": "string"},
|
||||
"treeinfo_version": {"type": "string"},
|
||||
"compose_type": {"type": "string", "enum": COMPOSE_TYPES},
|
||||
"label": {"type": "string"},
|
||||
"base_product_name": {"type": "string"},
|
||||
"base_product_short": {"type": "string"},
|
||||
"base_product_version": {"type": "string"},
|
||||
@ -610,7 +620,7 @@ def make_schema():
|
||||
"runroot_ssh_init_template": {"type": "string"},
|
||||
"runroot_ssh_install_packages_template": {"type": "string"},
|
||||
"runroot_ssh_run_template": {"type": "string"},
|
||||
"create_jigdo": {"type": "boolean", "default": True},
|
||||
"create_jigdo": {"type": "boolean", "default": False},
|
||||
"check_deps": {"type": "boolean", "default": True},
|
||||
"require_all_comps_packages": {"type": "boolean", "default": False},
|
||||
"bootable": {
|
||||
@ -654,13 +664,20 @@ def make_schema():
|
||||
"gather_profiler": {"type": "boolean", "default": False},
|
||||
"gather_allow_reuse": {"type": "boolean", "default": False},
|
||||
"pkgset_allow_reuse": {"type": "boolean", "default": True},
|
||||
"pkgset_source": {"type": "string", "enum": ["koji", "repos"]},
|
||||
"createiso_allow_reuse": {"type": "boolean", "default": True},
|
||||
"extraiso_allow_reuse": {"type": "boolean", "default": True},
|
||||
"pkgset_source": {"type": "string", "enum": [
|
||||
"koji",
|
||||
"repos",
|
||||
"kojimock",
|
||||
]},
|
||||
"createrepo_c": {"type": "boolean", "default": True},
|
||||
"createrepo_checksum": {
|
||||
"type": "string",
|
||||
"default": "sha256",
|
||||
"enum": ["sha1", "sha256", "sha512"],
|
||||
},
|
||||
"createrepo_enable_cache": {"type": "boolean", "default": True},
|
||||
"createrepo_use_xz": {"type": "boolean", "default": False},
|
||||
"createrepo_num_threads": {"type": "number", "default": get_num_cpus()},
|
||||
"createrepo_num_workers": {"type": "number", "default": 3},
|
||||
@ -722,6 +739,8 @@ def make_schema():
|
||||
"minItems": 1,
|
||||
"default": [None],
|
||||
},
|
||||
"signed_packages_retries": {"type": "number", "default": 0},
|
||||
"signed_packages_wait": {"type": "number", "default": 30},
|
||||
"variants_file": {"$ref": "#/definitions/str_or_scm_dict"},
|
||||
"comps_file": {"$ref": "#/definitions/str_or_scm_dict"},
|
||||
"comps_filter_environments": {"type": "boolean", "default": True},
|
||||
@ -732,6 +751,7 @@ def make_schema():
|
||||
"patternProperties": {".+": {"$ref": "#/definitions/strings"}},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"module_obsoletes_dir": {"$ref": "#/definitions/str_or_scm_dict"},
|
||||
"create_optional_isos": {"type": "boolean", "default": False},
|
||||
"symlink_isos_to": {"type": "string"},
|
||||
"dogpile_cache_backend": {"type": "string"},
|
||||
@ -744,6 +764,12 @@ def make_schema():
|
||||
),
|
||||
"createiso_break_hardlinks": {"type": "boolean", "default": False},
|
||||
"createiso_use_xorrisofs": {"type": "boolean", "default": False},
|
||||
"iso_level": {
|
||||
"anyOf": [
|
||||
{"type": "number", "enum": [1, 2, 3, 4]},
|
||||
_variant_arch_mapping({"type": "number", "enum": [1, 2, 3, 4]}),
|
||||
],
|
||||
},
|
||||
"iso_hfs_ppc64le_compatible": {"type": "boolean", "default": True},
|
||||
"multilib": _variant_arch_mapping(
|
||||
{"$ref": "#/definitions/list_of_strings"}
|
||||
@ -771,7 +797,7 @@ def make_schema():
|
||||
"buildinstall_allow_reuse": {"type": "boolean", "default": False},
|
||||
"buildinstall_method": {
|
||||
"type": "string",
|
||||
"enum": ["lorax", "buildinstall"],
|
||||
"enum": ["lorax"],
|
||||
},
|
||||
# In phase `buildinstall` we should add to compose only the
|
||||
# images that will be used only as netinstall
|
||||
@ -785,6 +811,10 @@ def make_schema():
|
||||
"buildinstall_kickstart": {"$ref": "#/definitions/str_or_scm_dict"},
|
||||
"buildinstall_use_guestmount": {"type": "boolean", "default": True},
|
||||
"buildinstall_skip": _variant_arch_mapping({"type": "boolean"}),
|
||||
"buildinstall_packages": {
|
||||
"$ref": "#/definitions/package_mapping",
|
||||
"default": [],
|
||||
},
|
||||
"global_ksurl": {"type": "url"},
|
||||
"global_version": {"type": "string"},
|
||||
"global_target": {"type": "string"},
|
||||
@ -794,8 +824,11 @@ def make_schema():
|
||||
"pdc_insecure": {"deprecated": "Koji is queried instead"},
|
||||
"cts_url": {"type": "string"},
|
||||
"cts_keytab": {"type": "string"},
|
||||
"cts_oidc_token_url": {"type": "url"},
|
||||
"cts_oidc_client_id": {"type": "string"},
|
||||
"koji_profile": {"type": "string"},
|
||||
"koji_event": {"type": "number"},
|
||||
"koji_cache": {"type": "string"},
|
||||
"pkgset_koji_tag": {"$ref": "#/definitions/strings"},
|
||||
"pkgset_koji_builds": {"$ref": "#/definitions/strings"},
|
||||
"pkgset_koji_scratch_tasks": {"$ref": "#/definitions/strings"},
|
||||
@ -813,6 +846,10 @@ def make_schema():
|
||||
"type": "boolean",
|
||||
"default": True,
|
||||
},
|
||||
"pkgset_inherit_exclusive_arch_to_noarch": {
|
||||
"type": "boolean",
|
||||
"default": True,
|
||||
},
|
||||
"pkgset_scratch_modules": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
@ -825,7 +862,10 @@ def make_schema():
|
||||
"paths_module": {"type": "string"},
|
||||
"skip_phases": {
|
||||
"type": "array",
|
||||
"items": {"type": "string", "enum": PHASES_NAMES + ["productimg"]},
|
||||
"items": {
|
||||
"type": "string",
|
||||
"enum": PHASES_NAMES + ["productimg", "live_images"],
|
||||
},
|
||||
"default": [],
|
||||
},
|
||||
"image_name_format": {
|
||||
@ -859,11 +899,6 @@ def make_schema():
|
||||
},
|
||||
"restricted_volid": {"type": "boolean", "default": False},
|
||||
"volume_id_substitutions": {"type": "object", "default": {}},
|
||||
"live_images_no_rename": {"type": "boolean", "default": False},
|
||||
"live_images_ksurl": {"type": "url"},
|
||||
"live_images_target": {"type": "string"},
|
||||
"live_images_release": {"$ref": "#/definitions/optional_string"},
|
||||
"live_images_version": {"type": "string"},
|
||||
"image_build_ksurl": {"type": "url"},
|
||||
"image_build_target": {"type": "string"},
|
||||
"image_build_release": {"$ref": "#/definitions/optional_string"},
|
||||
@ -896,8 +931,6 @@ def make_schema():
|
||||
"product_id": {"$ref": "#/definitions/str_or_scm_dict"},
|
||||
"product_id_allow_missing": {"type": "boolean", "default": False},
|
||||
"product_id_allow_name_prefix": {"type": "boolean", "default": True},
|
||||
# Deprecated in favour of regular local/phase/global setting.
|
||||
"live_target": {"type": "string"},
|
||||
"tree_arches": {"$ref": "#/definitions/list_of_strings", "default": []},
|
||||
"tree_variants": {"$ref": "#/definitions/list_of_strings", "default": []},
|
||||
"translate_paths": {"$ref": "#/definitions/string_pairs", "default": []},
|
||||
@ -976,6 +1009,7 @@ def make_schema():
|
||||
"arches": {"$ref": "#/definitions/list_of_strings"},
|
||||
"failable": {"$ref": "#/definitions/list_of_strings"},
|
||||
"release": {"$ref": "#/definitions/optional_string"},
|
||||
"nomacboot": {"type": "boolean"},
|
||||
},
|
||||
"required": ["name", "kickstart"],
|
||||
"additionalProperties": False,
|
||||
@ -1009,15 +1043,18 @@ def make_schema():
|
||||
},
|
||||
"update_summary": {"type": "boolean"},
|
||||
"force_new_commit": {"type": "boolean"},
|
||||
"unified_core": {"type": "boolean"},
|
||||
"version": {"type": "string"},
|
||||
"config_branch": {"type": "string"},
|
||||
"tag_ref": {"type": "boolean"},
|
||||
"ostree_ref": {"type": "string"},
|
||||
"runroot_packages": {
|
||||
"$ref": "#/definitions/list_of_strings",
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"treefile",
|
||||
"config_url",
|
||||
"repo",
|
||||
"ostree_repo",
|
||||
],
|
||||
"additionalProperties": False,
|
||||
@ -1043,6 +1080,7 @@ def make_schema():
|
||||
"failable": {"$ref": "#/definitions/list_of_strings"},
|
||||
"update_summary": {"type": "boolean"},
|
||||
"force_new_commit": {"type": "boolean"},
|
||||
"unified_core": {"type": "boolean"},
|
||||
"version": {"type": "string"},
|
||||
"config_branch": {"type": "string"},
|
||||
"tag_ref": {"type": "boolean"},
|
||||
@ -1054,6 +1092,39 @@ def make_schema():
|
||||
),
|
||||
]
|
||||
},
|
||||
"ostree_container": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
# Warning: this pattern is a variant uid regex, but the
|
||||
# format does not let us validate it as there is no regular
|
||||
# expression to describe all regular expressions.
|
||||
".+": _one_or_list(
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"treefile": {"type": "string"},
|
||||
"config_url": {"type": "string"},
|
||||
"repo": {"$ref": "#/definitions/repos"},
|
||||
"keep_original_sources": {"type": "boolean"},
|
||||
"config_branch": {"type": "string"},
|
||||
"arches": {"$ref": "#/definitions/list_of_strings"},
|
||||
"failable": {"$ref": "#/definitions/list_of_strings"},
|
||||
"version": {"type": "string"},
|
||||
"tag_ref": {"type": "boolean"},
|
||||
"runroot_packages": {
|
||||
"$ref": "#/definitions/list_of_strings",
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"treefile",
|
||||
"config_url",
|
||||
],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
),
|
||||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"ostree_installer": _variant_arch_mapping(
|
||||
{
|
||||
"type": "object",
|
||||
@ -1072,16 +1143,16 @@ def make_schema():
|
||||
"template_repo": {"type": "string"},
|
||||
"template_branch": {"type": "string"},
|
||||
"extra_runroot_pkgs": {"$ref": "#/definitions/list_of_strings"},
|
||||
"skip_branding": {"type": "boolean"},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
}
|
||||
),
|
||||
"ostree_use_koji_plugin": {"type": "boolean", "default": False},
|
||||
"ostree_container_use_koji_plugin": {"type": "boolean", "default": False},
|
||||
"ostree_installer_use_koji_plugin": {"type": "boolean", "default": False},
|
||||
"ostree_installer_overwrite": {"type": "boolean", "default": False},
|
||||
"live_images": _variant_arch_mapping(
|
||||
_one_or_list({"$ref": "#/definitions/live_image_config"})
|
||||
),
|
||||
"image_build_allow_reuse": {"type": "boolean", "default": False},
|
||||
"image_build": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
@ -1131,6 +1202,50 @@ def make_schema():
|
||||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"kiwibuild": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
# Warning: this pattern is a variant uid regex, but the
|
||||
# format does not let us validate it as there is no regular
|
||||
# expression to describe all regular expressions.
|
||||
".+": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"target": {"type": "string"},
|
||||
"description_scm": {"type": "url"},
|
||||
"description_path": {"type": "string"},
|
||||
"kiwi_profile": {"type": "string"},
|
||||
"release": {"type": "string"},
|
||||
"arches": {"$ref": "#/definitions/list_of_strings"},
|
||||
"repos": {"$ref": "#/definitions/list_of_strings"},
|
||||
"failable": {"$ref": "#/definitions/list_of_strings"},
|
||||
"subvariant": {"type": "string"},
|
||||
"type": {"type": "string"},
|
||||
"type_attr": {"$ref": "#/definitions/list_of_strings"},
|
||||
"bundle_name_format": {"type": "string"},
|
||||
},
|
||||
"required": [
|
||||
# description_scm and description_path
|
||||
# are really required, but as they can
|
||||
# be set at the phase level we cannot
|
||||
# enforce that here
|
||||
"kiwi_profile",
|
||||
],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"kiwibuild_description_scm": {"type": "url"},
|
||||
"kiwibuild_description_path": {"type": "string"},
|
||||
"kiwibuild_target": {"type": "string"},
|
||||
"kiwibuild_release": {"$ref": "#/definitions/optional_string"},
|
||||
"kiwibuild_type": {"type": "string"},
|
||||
"kiwibuild_type_attr": {"$ref": "#/definitions/list_of_strings"},
|
||||
"kiwibuild_bundle_name_format": {"type": "string"},
|
||||
"osbuild_target": {"type": "string"},
|
||||
"osbuild_release": {"$ref": "#/definitions/optional_string"},
|
||||
"osbuild_version": {"type": "string"},
|
||||
@ -1149,12 +1264,135 @@ def make_schema():
|
||||
"version": {"type": "string"},
|
||||
"distro": {"type": "string"},
|
||||
"target": {"type": "string"},
|
||||
"image_types": {"$ref": "#/definitions/strings"},
|
||||
# Only a single image_type can be specified
|
||||
# https://github.com/osbuild/koji-osbuild/commit/c7252650814f82281ee57b598cb2ad970b580451
|
||||
# https://github.com/osbuild/koji-osbuild/commit/f21a2de39b145eb94f3d49cb4d8775a33ba56752
|
||||
"image_types": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Deprecated variant",
|
||||
"minItems": 1,
|
||||
"maxItems": 1,
|
||||
},
|
||||
{"type": "string"},
|
||||
]
|
||||
},
|
||||
"arches": {"$ref": "#/definitions/list_of_strings"},
|
||||
"release": {"type": "string"},
|
||||
"repo": {"$ref": "#/definitions/list_of_strings"},
|
||||
"repo": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["baseurl"],
|
||||
"properties": {
|
||||
"baseurl": {"type": "string"},
|
||||
"package_sets": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{"type": "string"},
|
||||
]
|
||||
},
|
||||
},
|
||||
"failable": {"$ref": "#/definitions/list_of_strings"},
|
||||
"subvariant": {"type": "string"},
|
||||
"ostree_url": {"type": "string"},
|
||||
"ostree_ref": {"type": "string"},
|
||||
"ostree_parent": {"type": "string"},
|
||||
"manifest_type": {"type": "string"},
|
||||
"customizations": {
|
||||
"type": "object",
|
||||
"additionalProperties": True,
|
||||
},
|
||||
"upload_options": {
|
||||
# this should be really 'oneOf', but the minimal
|
||||
# required properties in AWSEC2 and GCP options
|
||||
# overlap.
|
||||
"anyOf": [
|
||||
# AWSEC2UploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": [
|
||||
"region",
|
||||
"share_with_accounts",
|
||||
],
|
||||
"properties": {
|
||||
"region": {
|
||||
"type": "string",
|
||||
},
|
||||
"snapshot_name": {
|
||||
"type": "string",
|
||||
},
|
||||
"share_with_accounts": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
# AWSS3UploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["region"],
|
||||
"properties": {
|
||||
"region": {"type": "string"}
|
||||
},
|
||||
},
|
||||
# AzureUploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": [
|
||||
"tenant_id",
|
||||
"subscription_id",
|
||||
"resource_group",
|
||||
],
|
||||
"properties": {
|
||||
"tenant_id": {"type": "string"},
|
||||
"subscription_id": {"type": "string"},
|
||||
"resource_group": {"type": "string"},
|
||||
"location": {"type": "string"},
|
||||
"image_name": {
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
# GCPUploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["region"],
|
||||
"properties": {
|
||||
"region": {"type": "string"},
|
||||
"bucket": {"type": "string"},
|
||||
"image_name": {
|
||||
"type": "string",
|
||||
},
|
||||
"share_with_accounts": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
# ContainerUploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"tag": {"type": "string"},
|
||||
},
|
||||
},
|
||||
]
|
||||
},
|
||||
},
|
||||
"required": ["name", "distro", "image_types"],
|
||||
"additionalProperties": False,
|
||||
@ -1189,9 +1427,6 @@ def make_schema():
|
||||
{"$ref": "#/definitions/strings"}
|
||||
),
|
||||
"lorax_use_koji_plugin": {"type": "boolean", "default": False},
|
||||
"signing_key_id": {"type": "string"},
|
||||
"signing_key_password_file": {"type": "string"},
|
||||
"signing_command": {"type": "string"},
|
||||
"productimg": {
|
||||
"deprecated": "remove it. Productimg phase has been removed"
|
||||
},
|
||||
@ -1203,6 +1438,7 @@ def make_schema():
|
||||
"anyOf": [{"type": "string"}, {"type": "number"}],
|
||||
"default": 10 * 1024 * 1024,
|
||||
},
|
||||
"osbs_allow_reuse": {"type": "boolean", "default": False},
|
||||
"osbs": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
@ -1221,6 +1457,26 @@ def make_schema():
|
||||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"image_container": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
".+": _one_or_list(
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"url": {"type": "url"},
|
||||
"target": {"type": "string"},
|
||||
"priority": {"type": "number"},
|
||||
"failable": {"type": "boolean"},
|
||||
"git_branch": {"type": "string"},
|
||||
"image_spec": {"type": "object"},
|
||||
},
|
||||
"required": ["url", "target", "git_branch", "image_spec"],
|
||||
}
|
||||
),
|
||||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"extra_files": _variant_arch_mapping(
|
||||
{
|
||||
"type": "array",
|
||||
@ -1305,7 +1561,6 @@ def get_num_cpus():
|
||||
CONFIG_DEPS = {
|
||||
"buildinstall_method": {
|
||||
"conflicts": (
|
||||
(lambda val: val == "buildinstall", ["lorax_options"]),
|
||||
(lambda val: not val, ["lorax_options", "buildinstall_kickstart"]),
|
||||
),
|
||||
},
|
||||
@ -1325,6 +1580,7 @@ CONFIG_DEPS = {
|
||||
"requires": ((lambda x: x, ["base_product_name", "base_product_short"]),),
|
||||
"conflicts": ((lambda x: not x, ["base_product_name", "base_product_short"]),),
|
||||
},
|
||||
"cts_url": {"requires": ((lambda x: x, ["translate_paths"]),)},
|
||||
"product_id": {"conflicts": [(lambda x: not x, ["product_id_allow_missing"])]},
|
||||
"pkgset_scratch_modules": {"requires": ((lambda x: x, ["mbs_api_url"]),)},
|
||||
"pkgset_source": {
|
||||
|
236
pungi/compose.py
236
pungi/compose.py
@ -17,6 +17,7 @@
|
||||
__all__ = ("Compose",)
|
||||
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
@ -24,8 +25,12 @@ import time
|
||||
import tempfile
|
||||
import shutil
|
||||
import json
|
||||
import socket
|
||||
|
||||
import kobo.log
|
||||
import kobo.tback
|
||||
import requests
|
||||
from requests.exceptions import RequestException
|
||||
from productmd.composeinfo import ComposeInfo
|
||||
from productmd.images import Images
|
||||
from dogpile.cache import make_region
|
||||
@ -34,12 +39,15 @@ from dogpile.cache import make_region
|
||||
from pungi.graph import SimpleAcyclicOrientedGraph
|
||||
from pungi.wrappers.variants import VariantsXmlParser
|
||||
from pungi.paths import Paths
|
||||
from pungi.wrappers.kojiwrapper import KojiDownloadProxy
|
||||
from pungi.wrappers.scm import get_file_from_scm
|
||||
from pungi.util import (
|
||||
makedirs,
|
||||
get_arch_variant_data,
|
||||
get_format_substs,
|
||||
get_variant_data,
|
||||
retry,
|
||||
translate_path_raw,
|
||||
)
|
||||
from pungi.metadata import compose_to_composeinfo
|
||||
|
||||
@ -51,6 +59,101 @@ except ImportError:
|
||||
SUPPORTED_MILESTONES = ["RC", "Update", "SecurityFix"]
|
||||
|
||||
|
||||
def is_status_fatal(status_code):
|
||||
"""Check if status code returned from CTS reports an error that is unlikely
|
||||
to be fixed by retrying. Generally client errors (4XX) are fatal, with the
|
||||
exception of 401 Unauthorized which could be caused by transient network
|
||||
issue between compose host and KDC.
|
||||
"""
|
||||
if status_code == 401:
|
||||
return False
|
||||
return status_code >= 400 and status_code < 500
|
||||
|
||||
|
||||
@retry(wait_on=RequestException)
|
||||
def retry_request(method, url, data=None, json_data=None, auth=None):
|
||||
"""
|
||||
:param str method: Reqest method.
|
||||
:param str url: Target URL.
|
||||
:param dict data: form-urlencoded data to send in the body of the request.
|
||||
:param dict json_data: json data to send in the body of the request.
|
||||
"""
|
||||
request_method = getattr(requests, method)
|
||||
rv = request_method(url, data=data, json=json_data, auth=auth)
|
||||
if is_status_fatal(rv.status_code):
|
||||
try:
|
||||
error = rv.json()
|
||||
except ValueError:
|
||||
error = rv.text
|
||||
raise RuntimeError("%s responded with %d: %s" % (url, rv.status_code, error))
|
||||
rv.raise_for_status()
|
||||
return rv
|
||||
|
||||
|
||||
class BearerAuth(requests.auth.AuthBase):
|
||||
def __init__(self, token):
|
||||
self.token = token
|
||||
|
||||
def __call__(self, r):
|
||||
r.headers["authorization"] = "Bearer " + self.token
|
||||
return r
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def cts_auth(pungi_conf):
|
||||
"""
|
||||
:param dict pungi_conf: dict obj of pungi.json config.
|
||||
"""
|
||||
auth = None
|
||||
token = None
|
||||
cts_keytab = pungi_conf.get("cts_keytab")
|
||||
cts_oidc_token_url = os.environ.get("CTS_OIDC_TOKEN_URL", "") or pungi_conf.get(
|
||||
"cts_oidc_token_url"
|
||||
)
|
||||
|
||||
try:
|
||||
if cts_keytab:
|
||||
# requests-kerberos cannot accept custom keytab, we need to use
|
||||
# environment variable for this. But we need to change environment
|
||||
# only temporarily just for this single requests.post.
|
||||
# So at first backup the current environment and revert to it
|
||||
# after the requests call.
|
||||
from requests_kerberos import HTTPKerberosAuth
|
||||
|
||||
auth = HTTPKerberosAuth()
|
||||
environ_copy = dict(os.environ)
|
||||
if "$HOSTNAME" in cts_keytab:
|
||||
cts_keytab = cts_keytab.replace("$HOSTNAME", socket.gethostname())
|
||||
os.environ["KRB5_CLIENT_KTNAME"] = cts_keytab
|
||||
os.environ["KRB5CCNAME"] = "DIR:%s" % tempfile.mkdtemp()
|
||||
elif cts_oidc_token_url:
|
||||
cts_oidc_client_id = os.environ.get(
|
||||
"CTS_OIDC_CLIENT_ID", ""
|
||||
) or pungi_conf.get("cts_oidc_client_id", "")
|
||||
token = retry_request(
|
||||
"post",
|
||||
cts_oidc_token_url,
|
||||
data={
|
||||
"grant_type": "client_credentials",
|
||||
"client_id": cts_oidc_client_id,
|
||||
"client_secret": os.environ.get("CTS_OIDC_CLIENT_SECRET", ""),
|
||||
},
|
||||
).json()["access_token"]
|
||||
auth = BearerAuth(token)
|
||||
del token
|
||||
|
||||
yield auth
|
||||
except Exception as e:
|
||||
# Avoid leaking client secret in trackback
|
||||
e.show_locals = False
|
||||
raise e
|
||||
finally:
|
||||
if cts_keytab:
|
||||
shutil.rmtree(os.environ["KRB5CCNAME"].split(":", 1)[1])
|
||||
os.environ.clear()
|
||||
os.environ.update(environ_copy)
|
||||
|
||||
|
||||
def get_compose_info(
|
||||
conf,
|
||||
compose_type="production",
|
||||
@ -80,47 +183,25 @@ def get_compose_info(
|
||||
ci.compose.type = compose_type
|
||||
ci.compose.date = compose_date or time.strftime("%Y%m%d", time.localtime())
|
||||
ci.compose.respin = compose_respin or 0
|
||||
ci.compose.id = ci.create_compose_id()
|
||||
|
||||
cts_url = conf.get("cts_url", None)
|
||||
cts_url = conf.get("cts_url")
|
||||
if cts_url:
|
||||
# Import requests and requests-kerberos here so it is not needed
|
||||
# if running without Compose Tracking Service.
|
||||
import requests
|
||||
from requests_kerberos import HTTPKerberosAuth
|
||||
|
||||
# Requests-kerberos cannot accept custom keytab, we need to use
|
||||
# environment variable for this. But we need to change environment
|
||||
# only temporarily just for this single requests.post.
|
||||
# So at first backup the current environment and revert to it
|
||||
# after the requests.post call.
|
||||
cts_keytab = conf.get("cts_keytab", None)
|
||||
if cts_keytab:
|
||||
environ_copy = dict(os.environ)
|
||||
os.environ["KRB5_CLIENT_KTNAME"] = cts_keytab
|
||||
|
||||
try:
|
||||
# Create compose in CTS and get the reserved compose ID.
|
||||
ci.compose.id = ci.create_compose_id()
|
||||
url = os.path.join(cts_url, "api/1/composes/")
|
||||
data = {
|
||||
"compose_info": json.loads(ci.dumps()),
|
||||
"parent_compose_ids": parent_compose_ids,
|
||||
"respin_of": respin_of,
|
||||
}
|
||||
rv = requests.post(url, json=data, auth=HTTPKerberosAuth())
|
||||
rv.raise_for_status()
|
||||
finally:
|
||||
if cts_keytab:
|
||||
os.environ.clear()
|
||||
os.environ.update(environ_copy)
|
||||
# Create compose in CTS and get the reserved compose ID.
|
||||
url = os.path.join(cts_url, "api/1/composes/")
|
||||
data = {
|
||||
"compose_info": json.loads(ci.dumps()),
|
||||
"parent_compose_ids": parent_compose_ids,
|
||||
"respin_of": respin_of,
|
||||
}
|
||||
with cts_auth(conf) as authentication:
|
||||
rv = retry_request("post", url, json_data=data, auth=authentication)
|
||||
|
||||
# Update local ComposeInfo with received ComposeInfo.
|
||||
cts_ci = ComposeInfo()
|
||||
cts_ci.loads(rv.text)
|
||||
ci.compose.respin = cts_ci.compose.respin
|
||||
ci.compose.id = cts_ci.compose.id
|
||||
else:
|
||||
ci.compose.id = ci.create_compose_id()
|
||||
|
||||
return ci
|
||||
|
||||
@ -137,6 +218,23 @@ def write_compose_info(compose_dir, ci):
|
||||
ci.dump(os.path.join(work_dir, "composeinfo-base.json"))
|
||||
|
||||
|
||||
def update_compose_url(compose_id, compose_dir, conf):
|
||||
cts_url = conf.get("cts_url", None)
|
||||
if cts_url:
|
||||
url = os.path.join(cts_url, "api/1/composes", compose_id)
|
||||
tp = conf.get("translate_paths", None)
|
||||
compose_url = translate_path_raw(tp, compose_dir)
|
||||
if compose_url == compose_dir:
|
||||
# We do not have a URL, do not attempt the update.
|
||||
return
|
||||
data = {
|
||||
"action": "set_url",
|
||||
"compose_url": compose_url,
|
||||
}
|
||||
with cts_auth(conf) as authentication:
|
||||
return retry_request("patch", url, json_data=data, auth=authentication)
|
||||
|
||||
|
||||
def get_compose_dir(
|
||||
topdir,
|
||||
conf,
|
||||
@ -145,11 +243,19 @@ def get_compose_dir(
|
||||
compose_respin=None,
|
||||
compose_label=None,
|
||||
already_exists_callbacks=None,
|
||||
parent_compose_ids=None,
|
||||
respin_of=None,
|
||||
):
|
||||
already_exists_callbacks = already_exists_callbacks or []
|
||||
|
||||
ci = get_compose_info(
|
||||
conf, compose_type, compose_date, compose_respin, compose_label
|
||||
conf,
|
||||
compose_type,
|
||||
compose_date,
|
||||
compose_respin,
|
||||
compose_label,
|
||||
parent_compose_ids,
|
||||
respin_of,
|
||||
)
|
||||
|
||||
cts_url = conf.get("cts_url", None)
|
||||
@ -222,6 +328,8 @@ class Compose(kobo.log.LoggingBase):
|
||||
self.koji_event = koji_event or conf.get("koji_event")
|
||||
self.notifier = notifier
|
||||
|
||||
self._old_config = None
|
||||
|
||||
# path definitions
|
||||
self.paths = Paths(self)
|
||||
|
||||
@ -284,6 +392,8 @@ class Compose(kobo.log.LoggingBase):
|
||||
self.im.compose.respin = self.compose_respin
|
||||
self.im.metadata_path = self.paths.compose.metadata()
|
||||
|
||||
self.containers_metadata = {}
|
||||
|
||||
# Stores list of deliverables that failed, but did not abort the
|
||||
# compose.
|
||||
# {deliverable: [(Variant.uid, arch, subvariant)]}
|
||||
@ -300,9 +410,12 @@ class Compose(kobo.log.LoggingBase):
|
||||
else:
|
||||
self.cache_region = make_region().configure("dogpile.cache.null")
|
||||
|
||||
self.koji_downloader = KojiDownloadProxy.from_config(self.conf, self._logger)
|
||||
|
||||
get_compose_info = staticmethod(get_compose_info)
|
||||
write_compose_info = staticmethod(write_compose_info)
|
||||
get_compose_dir = staticmethod(get_compose_dir)
|
||||
update_compose_url = staticmethod(update_compose_url)
|
||||
|
||||
def __getitem__(self, name):
|
||||
return self.variants[name]
|
||||
@ -343,6 +456,10 @@ class Compose(kobo.log.LoggingBase):
|
||||
def has_module_defaults(self):
|
||||
return bool(self.conf.get("module_defaults_dir", False))
|
||||
|
||||
@property
|
||||
def has_module_obsoletes(self):
|
||||
return bool(self.conf.get("module_obsoletes_dir", False))
|
||||
|
||||
@property
|
||||
def config_dir(self):
|
||||
return os.path.dirname(self.conf._open_file or "")
|
||||
@ -370,7 +487,7 @@ class Compose(kobo.log.LoggingBase):
|
||||
)
|
||||
else:
|
||||
file_name = os.path.basename(scm_dict)
|
||||
scm_dict = os.path.join(self.config_dir, os.path.basename(scm_dict))
|
||||
scm_dict = os.path.join(self.config_dir, scm_dict)
|
||||
|
||||
self.log_debug("Writing variants file: %s", variants_file)
|
||||
tmp_dir = self.mkdtemp(prefix="variants_file_")
|
||||
@ -573,7 +690,54 @@ class Compose(kobo.log.LoggingBase):
|
||||
<compose_topdir>/work/{global,<arch>}/tmp[-<variant>]/
|
||||
"""
|
||||
path = os.path.join(self.paths.work.tmp_dir(arch=arch, variant=variant))
|
||||
return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=path)
|
||||
tmpdir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=path)
|
||||
os.chmod(tmpdir, 0o755)
|
||||
return tmpdir
|
||||
|
||||
def dump_containers_metadata(self):
|
||||
"""Create a file with container metadata if there are any containers."""
|
||||
if not self.containers_metadata:
|
||||
return
|
||||
with open(self.paths.compose.metadata("osbs.json"), "w") as f:
|
||||
json.dump(
|
||||
self.containers_metadata,
|
||||
f,
|
||||
indent=4,
|
||||
sort_keys=True,
|
||||
separators=(",", ": "),
|
||||
)
|
||||
|
||||
def traceback(self, detail=None, show_locals=True):
|
||||
"""Store an extended traceback. This method should only be called when
|
||||
handling an exception.
|
||||
|
||||
:param str detail: Extra information appended to the filename
|
||||
"""
|
||||
basename = "traceback"
|
||||
if detail:
|
||||
basename += "-" + detail
|
||||
tb_path = self.paths.log.log_file("global", basename)
|
||||
self.log_error("Extended traceback in: %s", tb_path)
|
||||
tback = kobo.tback.Traceback(show_locals=show_locals).get_traceback()
|
||||
# Kobo 0.36.0 returns traceback as str, older versions return bytes
|
||||
with open(tb_path, "wb" if isinstance(tback, bytes) else "w") as f:
|
||||
f.write(tback)
|
||||
|
||||
def load_old_compose_config(self):
|
||||
"""
|
||||
Helper method to load Pungi config dump from old compose.
|
||||
"""
|
||||
if not self._old_config:
|
||||
config_dump_full = self.paths.log.log_file("global", "config-dump")
|
||||
config_dump_full = self.paths.old_compose_path(config_dump_full)
|
||||
if not config_dump_full:
|
||||
return None
|
||||
|
||||
self.log_info("Loading old config file: %s", config_dump_full)
|
||||
with open(config_dump_full, "r") as f:
|
||||
self._old_config = json.load(f)
|
||||
|
||||
return self._old_config
|
||||
|
||||
|
||||
def get_ordered_variant_uids(compose):
|
||||
|
@ -5,16 +5,20 @@ from __future__ import print_function
|
||||
import os
|
||||
import six
|
||||
from collections import namedtuple
|
||||
from kobo.shortcuts import run
|
||||
from six.moves import shlex_quote
|
||||
|
||||
from .wrappers import iso
|
||||
from .wrappers.jigdo import JigdoWrapper
|
||||
|
||||
from .phases.buildinstall import BOOT_CONFIGS, BOOT_IMAGES
|
||||
|
||||
|
||||
CreateIsoOpts = namedtuple(
|
||||
"CreateIsoOpts",
|
||||
[
|
||||
"buildinstall_method",
|
||||
"boot_iso",
|
||||
"arch",
|
||||
"output_dir",
|
||||
"jigdo_dir",
|
||||
@ -25,6 +29,8 @@ CreateIsoOpts = namedtuple(
|
||||
"os_tree",
|
||||
"hfs_compat",
|
||||
"use_xorrisofs",
|
||||
"iso_level",
|
||||
"script_dir",
|
||||
],
|
||||
)
|
||||
CreateIsoOpts.__new__.__defaults__ = (None,) * len(CreateIsoOpts._fields)
|
||||
@ -61,10 +67,6 @@ def make_image(f, opts):
|
||||
os.path.join("$TEMPLATE", "config_files/ppc"),
|
||||
hfs_compat=opts.hfs_compat,
|
||||
)
|
||||
elif opts.buildinstall_method == "buildinstall":
|
||||
mkisofs_kwargs["boot_args"] = iso.get_boot_options(
|
||||
opts.arch, "/usr/lib/anaconda-runtime/boot"
|
||||
)
|
||||
|
||||
# ppc(64) doesn't seem to support utf-8
|
||||
if opts.arch in ("ppc", "ppc64", "ppc64le"):
|
||||
@ -76,6 +78,8 @@ def make_image(f, opts):
|
||||
volid=opts.volid,
|
||||
exclude=["./lost+found"],
|
||||
graft_points=opts.graft_points,
|
||||
use_xorrisofs=opts.use_xorrisofs,
|
||||
iso_level=opts.iso_level,
|
||||
**mkisofs_kwargs
|
||||
)
|
||||
emit(f, cmd)
|
||||
@ -97,7 +101,7 @@ def run_isohybrid(f, opts):
|
||||
|
||||
|
||||
def make_manifest(f, opts):
|
||||
emit(f, iso.get_manifest_cmd(opts.iso_name))
|
||||
emit(f, iso.get_manifest_cmd(opts.iso_name, opts.use_xorrisofs))
|
||||
|
||||
|
||||
def make_jigdo(f, opts):
|
||||
@ -113,6 +117,69 @@ def make_jigdo(f, opts):
|
||||
emit(f, cmd)
|
||||
|
||||
|
||||
def _get_perms(fs_path):
|
||||
"""Compute proper permissions for a file.
|
||||
|
||||
This mimicks what -rational-rock option of genisoimage does. All read bits
|
||||
are set, so that files and directories are globally readable. If any
|
||||
execute bit is set for a file, set them all. No writes are allowed and
|
||||
special bits are erased too.
|
||||
"""
|
||||
statinfo = os.stat(fs_path)
|
||||
perms = 0o444
|
||||
if statinfo.st_mode & 0o111:
|
||||
perms |= 0o111
|
||||
return perms
|
||||
|
||||
|
||||
def write_xorriso_commands(opts):
|
||||
# Create manifest for the boot.iso listing all contents
|
||||
boot_iso_manifest = "%s.manifest" % os.path.join(
|
||||
opts.script_dir, os.path.basename(opts.boot_iso)
|
||||
)
|
||||
run(
|
||||
iso.get_manifest_cmd(
|
||||
opts.boot_iso, opts.use_xorrisofs, output_file=boot_iso_manifest
|
||||
)
|
||||
)
|
||||
# Find which files may have been updated by pungi. This only includes a few
|
||||
# files from tweaking buildinstall and .discinfo metadata. There's no good
|
||||
# way to detect whether the boot config files actually changed, so we may
|
||||
# be updating files in the ISO with the same data.
|
||||
UPDATEABLE_FILES = set(BOOT_IMAGES + BOOT_CONFIGS + [".discinfo"])
|
||||
updated_files = set()
|
||||
excluded_files = set()
|
||||
with open(boot_iso_manifest) as f:
|
||||
for line in f:
|
||||
path = line.lstrip("/").rstrip("\n")
|
||||
if path in UPDATEABLE_FILES:
|
||||
updated_files.add(path)
|
||||
else:
|
||||
excluded_files.add(path)
|
||||
|
||||
script = os.path.join(opts.script_dir, "xorriso-%s.txt" % id(opts))
|
||||
with open(script, "w") as f:
|
||||
for cmd in iso.xorriso_commands(
|
||||
opts.arch, opts.boot_iso, os.path.join(opts.output_dir, opts.iso_name)
|
||||
):
|
||||
emit(f, " ".join(cmd))
|
||||
emit(f, "-volid %s" % opts.volid)
|
||||
|
||||
with open(opts.graft_points) as gp:
|
||||
for line in gp:
|
||||
iso_path, fs_path = line.strip().split("=", 1)
|
||||
if iso_path in excluded_files:
|
||||
continue
|
||||
cmd = "-update" if iso_path in updated_files else "-map"
|
||||
emit(f, "%s %s %s" % (cmd, fs_path, iso_path))
|
||||
emit(f, "-chmod 0%o %s" % (_get_perms(fs_path), iso_path))
|
||||
|
||||
emit(f, "-chown_r 0 /")
|
||||
emit(f, "-chgrp_r 0 /")
|
||||
emit(f, "-end")
|
||||
return script
|
||||
|
||||
|
||||
def write_script(opts, f):
|
||||
if bool(opts.jigdo_dir) != bool(opts.os_tree):
|
||||
raise RuntimeError("jigdo_dir must be used together with os_tree")
|
||||
@ -120,8 +187,14 @@ def write_script(opts, f):
|
||||
emit(f, "#!/bin/bash")
|
||||
emit(f, "set -ex")
|
||||
emit(f, "cd %s" % opts.output_dir)
|
||||
make_image(f, opts)
|
||||
run_isohybrid(f, opts)
|
||||
|
||||
if opts.use_xorrisofs and opts.buildinstall_method:
|
||||
script = write_xorriso_commands(opts)
|
||||
emit(f, "xorriso -dialog on <%s" % script)
|
||||
else:
|
||||
make_image(f, opts)
|
||||
run_isohybrid(f, opts)
|
||||
|
||||
implant_md5(f, opts)
|
||||
make_manifest(f, opts)
|
||||
if opts.jigdo_dir:
|
||||
|
20
pungi/errors.py
Normal file
20
pungi/errors.py
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
class UnsignedPackagesError(RuntimeError):
|
||||
"""Raised when package set fails to find a properly signed copy of an
|
||||
RPM."""
|
||||
|
||||
pass
|
@ -35,7 +35,7 @@ from pungi.wrappers.createrepo import CreaterepoWrapper
|
||||
|
||||
|
||||
class ReentrantYumLock(object):
|
||||
""" A lock that can be acquired multiple times by the same process. """
|
||||
"""A lock that can be acquired multiple times by the same process."""
|
||||
|
||||
def __init__(self, lock, log):
|
||||
self.lock = lock
|
||||
@ -60,7 +60,7 @@ class ReentrantYumLock(object):
|
||||
|
||||
|
||||
def yumlocked(method):
|
||||
""" A locking decorator. """
|
||||
"""A locking decorator."""
|
||||
|
||||
def wrapper(self, *args, **kwargs):
|
||||
with self.yumlock:
|
||||
@ -519,7 +519,7 @@ class Pungi(PungiBase):
|
||||
|
||||
def verifyCachePkg(self, po, path): # Stolen from yum
|
||||
"""check the package checksum vs the cache
|
||||
return True if pkg is good, False if not"""
|
||||
return True if pkg is good, False if not"""
|
||||
|
||||
(csum_type, csum) = po.returnIdSum()
|
||||
|
||||
@ -682,7 +682,7 @@ class Pungi(PungiBase):
|
||||
|
||||
def get_package_deps(self, po):
|
||||
"""Add the dependencies for a given package to the
|
||||
transaction info"""
|
||||
transaction info"""
|
||||
added = set()
|
||||
if po.repoid in self.lookaside_repos:
|
||||
# Don't resolve deps for stuff in lookaside.
|
||||
@ -911,7 +911,7 @@ class Pungi(PungiBase):
|
||||
def getPackagesFromGroup(self, group):
|
||||
"""Get a list of package names from a ksparser group object
|
||||
|
||||
Returns a list of package names"""
|
||||
Returns a list of package names"""
|
||||
|
||||
packages = []
|
||||
|
||||
@ -951,7 +951,7 @@ class Pungi(PungiBase):
|
||||
|
||||
def _addDefaultGroups(self, excludeGroups=None):
|
||||
"""Cycle through the groups and return at list of the ones that ara
|
||||
default."""
|
||||
default."""
|
||||
excludeGroups = excludeGroups or []
|
||||
|
||||
# This is mostly stolen from anaconda.
|
||||
@ -1118,7 +1118,6 @@ class Pungi(PungiBase):
|
||||
self.logger.info("Finished gathering package objects.")
|
||||
|
||||
def gather(self):
|
||||
|
||||
# get package objects according to the input list
|
||||
self.getPackageObjects()
|
||||
if self.is_sources:
|
||||
@ -1217,8 +1216,8 @@ class Pungi(PungiBase):
|
||||
|
||||
def createSourceHashes(self):
|
||||
"""Create two dicts - one that maps binary POs to source POs, and
|
||||
one that maps a single source PO to all binary POs it produces.
|
||||
Requires yum still configured."""
|
||||
one that maps a single source PO to all binary POs it produces.
|
||||
Requires yum still configured."""
|
||||
self.src_by_bin = {}
|
||||
self.bin_by_src = {}
|
||||
self.logger.info("Generating source <-> binary package mappings")
|
||||
@ -1232,8 +1231,8 @@ class Pungi(PungiBase):
|
||||
|
||||
def add_srpms(self, po_list=None):
|
||||
"""Cycle through the list of package objects and
|
||||
find the sourcerpm for them. Requires yum still
|
||||
configured and a list of package objects"""
|
||||
find the sourcerpm for them. Requires yum still
|
||||
configured and a list of package objects"""
|
||||
|
||||
srpms = set()
|
||||
po_list = po_list or self.po_list
|
||||
@ -1275,9 +1274,9 @@ class Pungi(PungiBase):
|
||||
|
||||
def add_fulltree(self, srpm_po_list=None):
|
||||
"""Cycle through all package objects, and add any
|
||||
that correspond to a source rpm that we are including.
|
||||
Requires yum still configured and a list of package
|
||||
objects."""
|
||||
that correspond to a source rpm that we are including.
|
||||
Requires yum still configured and a list of package
|
||||
objects."""
|
||||
|
||||
self.logger.info("Completing package set")
|
||||
|
||||
@ -1357,8 +1356,8 @@ class Pungi(PungiBase):
|
||||
|
||||
def getDebuginfoList(self):
|
||||
"""Cycle through the list of package objects and find
|
||||
debuginfo rpms for them. Requires yum still
|
||||
configured and a list of package objects"""
|
||||
debuginfo rpms for them. Requires yum still
|
||||
configured and a list of package objects"""
|
||||
|
||||
added = set()
|
||||
for po in self.all_pkgs:
|
||||
@ -1398,7 +1397,7 @@ class Pungi(PungiBase):
|
||||
|
||||
def _downloadPackageList(self, polist, relpkgdir):
|
||||
"""Cycle through the list of package objects and
|
||||
download them from their respective repos."""
|
||||
download them from their respective repos."""
|
||||
|
||||
for pkg in sorted(polist):
|
||||
repo = self.ayum.repos.getRepo(pkg.repoid)
|
||||
@ -1533,7 +1532,7 @@ class Pungi(PungiBase):
|
||||
@yumlocked
|
||||
def downloadSRPMs(self):
|
||||
"""Cycle through the list of srpms and
|
||||
find the package objects for them, Then download them."""
|
||||
find the package objects for them, Then download them."""
|
||||
|
||||
# do the downloads
|
||||
self._downloadPackageList(self.srpm_po_list, os.path.join("source", "SRPMS"))
|
||||
@ -1541,7 +1540,7 @@ class Pungi(PungiBase):
|
||||
@yumlocked
|
||||
def downloadDebuginfo(self):
|
||||
"""Cycle through the list of debuginfo rpms and
|
||||
download them."""
|
||||
download them."""
|
||||
|
||||
# do the downloads
|
||||
self._downloadPackageList(
|
||||
@ -1980,7 +1979,7 @@ class Pungi(PungiBase):
|
||||
|
||||
def doGetRelnotes(self):
|
||||
"""Get extra files from packages in the tree to put in the topdir of
|
||||
the tree."""
|
||||
the tree."""
|
||||
docsdir = os.path.join(self.workdir, "docs")
|
||||
relnoterpms = self.config.get("pungi", "relnotepkgs").split()
|
||||
|
||||
|
@ -15,17 +15,21 @@
|
||||
|
||||
|
||||
from enum import Enum
|
||||
from itertools import count
|
||||
from functools import cmp_to_key
|
||||
from itertools import count, groupby
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from kobo.rpmlib import parse_nvra
|
||||
import rpm
|
||||
|
||||
import pungi.common
|
||||
import pungi.dnf_wrapper
|
||||
import pungi.multilib_dnf
|
||||
import pungi.util
|
||||
from pungi import arch_utils
|
||||
from pungi.linker import Linker
|
||||
from pungi.profiler import Profiler
|
||||
from pungi.util import DEBUG_PATTERNS
|
||||
@ -36,6 +40,20 @@ def get_source_name(pkg):
|
||||
return pkg.sourcerpm.rsplit("-", 2)[0]
|
||||
|
||||
|
||||
def filter_dotarch(queue, pattern, **kwargs):
|
||||
"""Filter queue for packages matching the pattern. If pattern matches the
|
||||
dotarch format of <name>.<arch>, it is processed as such. Otherwise it is
|
||||
treated as just a name.
|
||||
"""
|
||||
kwargs["name__glob"] = pattern
|
||||
if "." in pattern:
|
||||
name, arch = pattern.split(".", 1)
|
||||
if arch in arch_utils.arches or arch == "noarch":
|
||||
kwargs["name__glob"] = name
|
||||
kwargs["arch"] = arch
|
||||
return queue.filter(**kwargs).apply()
|
||||
|
||||
|
||||
class GatherOptions(pungi.common.OptionsBase):
|
||||
def __init__(self, **kwargs):
|
||||
super(GatherOptions, self).__init__()
|
||||
@ -245,12 +263,36 @@ class Gather(GatherBase):
|
||||
# from lookaside. This can be achieved by removing any package that is
|
||||
# also in lookaside from the list.
|
||||
lookaside_pkgs = set()
|
||||
for pkg in package_list:
|
||||
if pkg.repoid in self.opts.lookaside_repos:
|
||||
lookaside_pkgs.add("{0.name}-{0.evr}".format(pkg))
|
||||
|
||||
if self.opts.greedy_method == "all":
|
||||
return list(package_list)
|
||||
if self.opts.lookaside_repos:
|
||||
# We will call `latest()` to get the highest version packages only.
|
||||
# However, that is per name and architecture. If a package switches
|
||||
# from arched to noarch or the other way, it is possible that the
|
||||
# package_list contains different versions in main repos and in
|
||||
# lookaside repos.
|
||||
# We need to manually filter the latest version.
|
||||
def vercmp(x, y):
|
||||
return rpm.labelCompare(x[1], y[1])
|
||||
|
||||
# Annotate the packages with their version.
|
||||
versioned_packages = [
|
||||
(pkg, (str(pkg.epoch) or "0", pkg.version, pkg.release))
|
||||
for pkg in package_list
|
||||
]
|
||||
# Sort the packages newest first.
|
||||
sorted_packages = sorted(
|
||||
versioned_packages, key=cmp_to_key(vercmp), reverse=True
|
||||
)
|
||||
# Group packages by version, take the first group and discard the
|
||||
# version info from the tuple.
|
||||
package_list = list(
|
||||
x[0] for x in next(groupby(sorted_packages, key=lambda x: x[1]))[1]
|
||||
)
|
||||
|
||||
# Now we can decide what is used from lookaside.
|
||||
for pkg in package_list:
|
||||
if pkg.repoid in self.opts.lookaside_repos:
|
||||
lookaside_pkgs.add("{0.name}-{0.evr}".format(pkg))
|
||||
|
||||
all_pkgs = []
|
||||
for pkg in package_list:
|
||||
@ -263,16 +305,21 @@ class Gather(GatherBase):
|
||||
|
||||
if not debuginfo:
|
||||
native_pkgs = set(
|
||||
self.q_native_binary_packages.filter(pkg=all_pkgs).apply()
|
||||
self.q_native_binary_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_binary_packages.filter(pkg=all_pkgs).apply()
|
||||
self.q_multilib_binary_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
else:
|
||||
native_pkgs = set(self.q_native_debug_packages.filter(pkg=all_pkgs).apply())
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_debug_packages.filter(pkg=all_pkgs).apply()
|
||||
native_pkgs = set(
|
||||
self.q_native_debug_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_debug_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
|
||||
if self.opts.greedy_method == "all":
|
||||
return list(native_pkgs | multilib_pkgs)
|
||||
|
||||
result = set()
|
||||
|
||||
@ -392,9 +439,7 @@ class Gather(GatherBase):
|
||||
"""Given an name of a queue (stored as attribute in `self`), exclude
|
||||
all given packages and keep only the latest per package name and arch.
|
||||
"""
|
||||
setattr(
|
||||
self, queue, getattr(self, queue).filter(pkg__neq=exclude).latest().apply()
|
||||
)
|
||||
setattr(self, queue, getattr(self, queue).filter(pkg__neq=exclude).apply())
|
||||
|
||||
@Profiler("Gather._apply_excludes()")
|
||||
def _apply_excludes(self, excludes):
|
||||
@ -420,12 +465,16 @@ class Gather(GatherBase):
|
||||
name__glob=pattern[:-4], reponame__neq=self.opts.lookaside_repos
|
||||
)
|
||||
elif pungi.util.pkg_is_debug(pattern):
|
||||
pkgs = self.q_debug_packages.filter(
|
||||
name__glob=pattern, reponame__neq=self.opts.lookaside_repos
|
||||
pkgs = filter_dotarch(
|
||||
self.q_debug_packages,
|
||||
pattern,
|
||||
reponame__neq=self.opts.lookaside_repos,
|
||||
)
|
||||
else:
|
||||
pkgs = self.q_binary_packages.filter(
|
||||
name__glob=pattern, reponame__neq=self.opts.lookaside_repos
|
||||
pkgs = filter_dotarch(
|
||||
self.q_binary_packages,
|
||||
pattern,
|
||||
reponame__neq=self.opts.lookaside_repos,
|
||||
)
|
||||
|
||||
exclude.update(pkgs)
|
||||
@ -491,21 +540,19 @@ class Gather(GatherBase):
|
||||
name__glob=pattern[:-2]
|
||||
).apply()
|
||||
else:
|
||||
pkgs = self.q_debug_packages.filter(
|
||||
name__glob=pattern
|
||||
).apply()
|
||||
pkgs = filter_dotarch(self.q_debug_packages, pattern)
|
||||
else:
|
||||
if pattern.endswith(".+"):
|
||||
pkgs = self.q_multilib_binary_packages.filter(
|
||||
name__glob=pattern[:-2]
|
||||
).apply()
|
||||
else:
|
||||
pkgs = self.q_binary_packages.filter(
|
||||
name__glob=pattern
|
||||
).apply()
|
||||
pkgs = filter_dotarch(self.q_binary_packages, pattern)
|
||||
|
||||
if not pkgs:
|
||||
self.logger.error("No package matches pattern %s" % pattern)
|
||||
self.logger.error(
|
||||
"Could not find a match for %s in any configured repo", pattern
|
||||
)
|
||||
|
||||
# The pattern could have been a glob. In that case we want to
|
||||
# group the packages by name and get best match in those
|
||||
@ -616,7 +663,6 @@ class Gather(GatherBase):
|
||||
return added
|
||||
|
||||
for pkg in self.result_debug_packages.copy():
|
||||
|
||||
if pkg not in self.finished_add_debug_package_deps:
|
||||
deps = self._get_package_deps(pkg, debuginfo=True)
|
||||
for i, req in deps:
|
||||
@ -784,7 +830,6 @@ class Gather(GatherBase):
|
||||
continue
|
||||
|
||||
debug_pkgs = []
|
||||
pkg_in_lookaside = pkg.repoid in self.opts.lookaside_repos
|
||||
for i in candidates:
|
||||
if pkg.arch != i.arch:
|
||||
continue
|
||||
@ -792,8 +837,14 @@ class Gather(GatherBase):
|
||||
# If it's not debugsource package or does not match name of
|
||||
# the package, we don't want it in.
|
||||
continue
|
||||
if i.repoid in self.opts.lookaside_repos or pkg_in_lookaside:
|
||||
if self.is_from_lookaside(i):
|
||||
self._set_flag(i, PkgFlag.lookaside)
|
||||
srpm_name = i.sourcerpm.rsplit("-", 2)[0]
|
||||
if srpm_name in self.opts.fulltree_excludes:
|
||||
self._set_flag(i, PkgFlag.fulltree_exclude)
|
||||
if PkgFlag.input in self.result_package_flags.get(srpm_name, set()):
|
||||
# If src rpm is marked as input, mark debuginfo as input too
|
||||
self._set_flag(i, PkgFlag.input)
|
||||
if i not in self.result_debug_packages:
|
||||
added.add(i)
|
||||
debug_pkgs.append(i)
|
||||
@ -1029,10 +1080,13 @@ class Gather(GatherBase):
|
||||
|
||||
# Link downloaded package in (or link package from file repo)
|
||||
try:
|
||||
linker.hardlink(pkg.localPkg(), target)
|
||||
except Exception:
|
||||
self.logger.error("Unable to link %s from the yum cache." % pkg.name)
|
||||
raise
|
||||
linker.link(pkg.localPkg(), target)
|
||||
except Exception as ex:
|
||||
if ex.errno == errno.EEXIST:
|
||||
self.logger.warning("Downloaded package exists in %s", target)
|
||||
else:
|
||||
self.logger.error("Unable to link %s from the yum cache.", pkg.name)
|
||||
raise
|
||||
|
||||
def log_count(self, msg, method, *args):
|
||||
"""
|
||||
|
@ -54,8 +54,7 @@ class SimpleAcyclicOrientedGraph(object):
|
||||
return False if node in self._graph else True
|
||||
|
||||
def remove_final_endpoint(self, node):
|
||||
"""
|
||||
"""
|
||||
""""""
|
||||
remove_start_points = []
|
||||
for start, ends in self._graph.items():
|
||||
if node in ends:
|
||||
|
@ -20,8 +20,8 @@ import os
|
||||
SIZE_UNITS = {
|
||||
"b": 1,
|
||||
"k": 1024,
|
||||
"M": 1024 ** 2,
|
||||
"G": 1024 ** 3,
|
||||
"M": 1024**2,
|
||||
"G": 1024**3,
|
||||
}
|
||||
|
||||
|
||||
|
@ -306,11 +306,6 @@ def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
|
||||
if variant.type in ("addon",) or variant.is_empty:
|
||||
return
|
||||
|
||||
compose.log_debug(
|
||||
"on arch '%s' looking at variant '%s' of type '%s'"
|
||||
% (arch, variant, variant.type)
|
||||
)
|
||||
|
||||
if not timestamp:
|
||||
timestamp = int(time.time())
|
||||
else:
|
||||
|
@ -44,6 +44,30 @@ def iter_module_defaults(path):
|
||||
yield module_name, index.get_module(module_name).get_defaults()
|
||||
|
||||
|
||||
def get_module_obsoletes_idx(path, mod_list):
|
||||
"""Given a path to a directory with yaml files, return Index with
|
||||
merged all obsoletes.
|
||||
"""
|
||||
|
||||
merger = Modulemd.ModuleIndexMerger.new()
|
||||
md_idxs = []
|
||||
|
||||
# associate_index does NOT copy it's argument (nor increases a
|
||||
# reference counter on the object). It only stores a pointer.
|
||||
for file in glob.glob(os.path.join(path, "*.yaml")):
|
||||
index = Modulemd.ModuleIndex()
|
||||
index.update_from_file(file, strict=False)
|
||||
mod_name = index.get_module_names()[0]
|
||||
|
||||
if mod_name and (mod_name in mod_list or not mod_list):
|
||||
md_idxs.append(index)
|
||||
merger.associate_index(md_idxs[-1], 0)
|
||||
|
||||
merged_idx = merger.resolve()
|
||||
|
||||
return merged_idx
|
||||
|
||||
|
||||
def collect_module_defaults(
|
||||
defaults_dir, modules_to_load=None, mod_index=None, overrides_dir=None
|
||||
):
|
||||
@ -69,3 +93,26 @@ def collect_module_defaults(
|
||||
mod_index.add_defaults(defaults)
|
||||
|
||||
return mod_index
|
||||
|
||||
|
||||
def collect_module_obsoletes(obsoletes_dir, modules_to_load, mod_index=None):
|
||||
"""Load module obsoletes into index.
|
||||
|
||||
This works in a similar fashion as collect_module_defaults except it
|
||||
merges indexes together instead of adding them during iteration.
|
||||
|
||||
Additionally if modules_to_load is not empty returned Index will include
|
||||
only obsoletes for those modules.
|
||||
"""
|
||||
|
||||
obsoletes_index = get_module_obsoletes_idx(obsoletes_dir, modules_to_load)
|
||||
|
||||
# Merge Obsoletes with Modules Index.
|
||||
if mod_index:
|
||||
merger = Modulemd.ModuleIndexMerger.new()
|
||||
merger.associate_index(mod_index, 0)
|
||||
merger.associate_index(obsoletes_index, 0)
|
||||
merged_idx = merger.resolve()
|
||||
obsoletes_index = merged_idx
|
||||
|
||||
return obsoletes_index
|
||||
|
@ -81,9 +81,6 @@ class PungiNotifier(object):
|
||||
|
||||
self._update_args(kwargs)
|
||||
|
||||
if self.compose:
|
||||
workdir = self.compose.paths.compose.topdir()
|
||||
|
||||
with self.lock:
|
||||
for cmd in self.cmds:
|
||||
self._run_script(cmd, msg, workdir, kwargs)
|
||||
|
@ -19,6 +19,7 @@ import logging
|
||||
|
||||
from .tree import Tree
|
||||
from .installer import Installer
|
||||
from .container import Container
|
||||
|
||||
|
||||
def main(args=None):
|
||||
@ -65,6 +66,48 @@ def main(args=None):
|
||||
action="store_true",
|
||||
help="do not use rpm-ostree's built-in change detection",
|
||||
)
|
||||
treep.add_argument(
|
||||
"--unified-core",
|
||||
action="store_true",
|
||||
help="use unified core mode in rpm-ostree",
|
||||
)
|
||||
|
||||
container = subparser.add_parser(
|
||||
"container", help="Compose OSTree native container"
|
||||
)
|
||||
container.set_defaults(_class=Container, func="run")
|
||||
container.add_argument(
|
||||
"--name",
|
||||
required=True,
|
||||
help="the name of the the OCI archive (required)",
|
||||
)
|
||||
container.add_argument(
|
||||
"--path",
|
||||
required=True,
|
||||
help="where to output the OCI archive (required)",
|
||||
)
|
||||
container.add_argument(
|
||||
"--treefile",
|
||||
metavar="FILE",
|
||||
required=True,
|
||||
help="treefile for rpm-ostree (required)",
|
||||
)
|
||||
container.add_argument(
|
||||
"--log-dir",
|
||||
metavar="DIR",
|
||||
required=True,
|
||||
help="where to log output (required).",
|
||||
)
|
||||
container.add_argument(
|
||||
"--extra-config", metavar="FILE", help="JSON file contains extra configurations"
|
||||
)
|
||||
container.add_argument(
|
||||
"-v",
|
||||
"--version",
|
||||
metavar="VERSION",
|
||||
required=True,
|
||||
help="version identifier (required)",
|
||||
)
|
||||
|
||||
installerp = subparser.add_parser(
|
||||
"installer", help="Create an OSTree installer image"
|
||||
|
86
pungi/ostree/container.py
Normal file
86
pungi/ostree/container.py
Normal file
@ -0,0 +1,86 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import os
|
||||
import json
|
||||
import six
|
||||
from six.moves import shlex_quote
|
||||
|
||||
|
||||
from .base import OSTree
|
||||
from .utils import tweak_treeconf
|
||||
|
||||
|
||||
def emit(cmd):
|
||||
"""Print line of shell code into the stream."""
|
||||
if isinstance(cmd, six.string_types):
|
||||
print(cmd)
|
||||
else:
|
||||
print(" ".join([shlex_quote(x) for x in cmd]))
|
||||
|
||||
|
||||
class Container(OSTree):
|
||||
def _make_container(self):
|
||||
"""Compose OSTree Container Native image"""
|
||||
stamp_file = os.path.join(self.logdir, "%s.stamp" % self.name)
|
||||
cmd = [
|
||||
"rpm-ostree",
|
||||
"compose",
|
||||
"image",
|
||||
# Always initialize for now
|
||||
"--initialize",
|
||||
# Touch the file if a new commit was created. This can help us tell
|
||||
# if the commitid file is missing because no commit was created or
|
||||
# because something went wrong.
|
||||
"--touch-if-changed=%s" % stamp_file,
|
||||
self.treefile,
|
||||
]
|
||||
fullpath = os.path.join(self.path, "%s.ociarchive" % self.name)
|
||||
cmd.append(fullpath)
|
||||
|
||||
# Set the umask to be more permissive so directories get group write
|
||||
# permissions. See https://pagure.io/releng/issue/8811#comment-629051
|
||||
emit("umask 0002")
|
||||
emit(cmd)
|
||||
|
||||
def run(self):
|
||||
self.name = self.args.name
|
||||
self.path = self.args.path
|
||||
self.treefile = self.args.treefile
|
||||
self.logdir = self.args.log_dir
|
||||
self.extra_config = self.args.extra_config
|
||||
|
||||
if self.extra_config:
|
||||
self.extra_config = json.load(open(self.extra_config, "r"))
|
||||
repos = self.extra_config.get("repo", [])
|
||||
keep_original_sources = self.extra_config.get(
|
||||
"keep_original_sources", False
|
||||
)
|
||||
else:
|
||||
# missing extra_config mustn't affect tweak_treeconf call
|
||||
repos = []
|
||||
keep_original_sources = True
|
||||
|
||||
update_dict = {"automatic-version-prefix": self.args.version}
|
||||
|
||||
self.treefile = tweak_treeconf(
|
||||
self.treefile,
|
||||
source_repos=repos,
|
||||
keep_original_sources=keep_original_sources,
|
||||
update_dict=update_dict,
|
||||
)
|
||||
|
||||
self._make_container()
|
@ -43,6 +43,9 @@ class Tree(OSTree):
|
||||
# because something went wrong.
|
||||
"--touch-if-changed=%s.stamp" % self.commitid_file,
|
||||
]
|
||||
if self.unified_core:
|
||||
# See https://github.com/coreos/rpm-ostree/issues/729
|
||||
cmd.append("--unified-core")
|
||||
if self.version:
|
||||
# Add versioning metadata
|
||||
cmd.append("--add-metadata-string=version=%s" % self.version)
|
||||
@ -121,6 +124,7 @@ class Tree(OSTree):
|
||||
self.extra_config = self.args.extra_config
|
||||
self.ostree_ref = self.args.ostree_ref
|
||||
self.force_new_commit = self.args.force_new_commit
|
||||
self.unified_core = self.args.unified_core
|
||||
|
||||
if self.extra_config or self.ostree_ref:
|
||||
if self.extra_config:
|
||||
|
@ -103,12 +103,23 @@ class LogPaths(object):
|
||||
makedirs(path)
|
||||
return path
|
||||
|
||||
def log_file(self, arch, log_name, create_dir=True):
|
||||
def koji_tasks_dir(self, create_dir=True):
|
||||
"""
|
||||
Examples:
|
||||
logs/global/koji-tasks
|
||||
"""
|
||||
path = os.path.join(self.topdir(create_dir=create_dir), "koji-tasks")
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
return path
|
||||
|
||||
def log_file(self, arch, log_name, create_dir=True, ext=None):
|
||||
ext = ext or "log"
|
||||
arch = arch or "global"
|
||||
if log_name.endswith(".log"):
|
||||
log_name = log_name[:-4]
|
||||
return os.path.join(
|
||||
self.topdir(arch, create_dir=create_dir), "%s.%s.log" % (log_name, arch)
|
||||
self.topdir(arch, create_dir=create_dir), "%s.%s.%s" % (log_name, arch, ext)
|
||||
)
|
||||
|
||||
|
||||
@ -498,10 +509,23 @@ class WorkPaths(object):
|
||||
makedirs(path)
|
||||
return path
|
||||
|
||||
def module_obsoletes_dir(self, create_dir=True):
|
||||
"""
|
||||
Example:
|
||||
work/global/module_obsoletes
|
||||
"""
|
||||
path = os.path.join(self.topdir(create_dir=create_dir), "module_obsoletes")
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
return path
|
||||
|
||||
def pkgset_file_cache(self, pkgset_name):
|
||||
"""
|
||||
Returns the path to file in which the cached version of
|
||||
PackageSetBase.file_cache should be stored.
|
||||
|
||||
Example:
|
||||
work/global/pkgset_f33-compose_file_cache.pickle
|
||||
"""
|
||||
filename = "pkgset_%s_file_cache.pickle" % pkgset_name
|
||||
return os.path.join(self.topdir(arch="global"), filename)
|
||||
|
@ -25,8 +25,9 @@ from .buildinstall import BuildinstallPhase # noqa
|
||||
from .extra_files import ExtraFilesPhase # noqa
|
||||
from .createiso import CreateisoPhase # noqa
|
||||
from .extra_isos import ExtraIsosPhase # noqa
|
||||
from .live_images import LiveImagesPhase # noqa
|
||||
from .image_build import ImageBuildPhase # noqa
|
||||
from .image_container import ImageContainerPhase # noqa
|
||||
from .kiwibuild import KiwiBuildPhase # noqa
|
||||
from .osbuild import OSBuildPhase # noqa
|
||||
from .repoclosure import RepoclosurePhase # noqa
|
||||
from .test import TestPhase # noqa
|
||||
@ -34,6 +35,7 @@ from .image_checksum import ImageChecksumPhase # noqa
|
||||
from .livemedia_phase import LiveMediaPhase # noqa
|
||||
from .ostree import OSTreePhase # noqa
|
||||
from .ostree_installer import OstreeInstallerPhase # noqa
|
||||
from .ostree_container import OSTreeContainerPhase # noqa
|
||||
from .osbs import OSBSPhase # noqa
|
||||
from .phases_metadata import gather_phases_metadata # noqa
|
||||
|
||||
|
@ -14,6 +14,8 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
|
||||
from pungi import util
|
||||
|
||||
@ -58,6 +60,7 @@ class PhaseBase(object):
|
||||
self.compose.log_warning("[SKIP ] %s" % self.msg)
|
||||
self.finished = True
|
||||
return
|
||||
self._start_time = time.time()
|
||||
self.compose.log_info("[BEGIN] %s" % self.msg)
|
||||
self.compose.notifier.send("phase-start", phase_name=self.name)
|
||||
self.run()
|
||||
@ -108,6 +111,13 @@ class PhaseBase(object):
|
||||
self.pool.stop()
|
||||
self.finished = True
|
||||
self.compose.log_info("[DONE ] %s" % self.msg)
|
||||
|
||||
if hasattr(self, "_start_time"):
|
||||
self.compose.log_info(
|
||||
"PHASE %s took %d seconds"
|
||||
% (self.name.upper(), math.ceil(time.time() - self._start_time))
|
||||
)
|
||||
|
||||
if self.used_patterns is not None:
|
||||
# We only want to report this if the config was actually queried.
|
||||
self.report_unused_patterns()
|
||||
|
@ -31,14 +31,14 @@ from six.moves import shlex_quote
|
||||
from pungi.arch import get_valid_arches
|
||||
from pungi.util import get_volid, get_arch_variant_data
|
||||
from pungi.util import get_file_size, get_mtime, failable, makedirs
|
||||
from pungi.util import copy_all, translate_path, move_all
|
||||
from pungi.util import copy_all, translate_path
|
||||
from pungi.wrappers.lorax import LoraxWrapper
|
||||
from pungi.wrappers import iso
|
||||
from pungi.wrappers.scm import get_file
|
||||
from pungi.wrappers.scm import get_file_from_scm
|
||||
from pungi.wrappers import kojiwrapper
|
||||
from pungi.phases.base import PhaseBase
|
||||
from pungi.runroot import Runroot
|
||||
from pungi.runroot import Runroot, download_and_extract_archive
|
||||
|
||||
|
||||
class BuildinstallPhase(PhaseBase):
|
||||
@ -50,6 +50,9 @@ class BuildinstallPhase(PhaseBase):
|
||||
# A set of (variant_uid, arch) pairs that completed successfully. This
|
||||
# is needed to skip copying files for failed tasks.
|
||||
self.pool.finished_tasks = set()
|
||||
# A set of (variant_uid, arch) pairs that were reused from previous
|
||||
# compose.
|
||||
self.pool.reused_tasks = set()
|
||||
self.buildinstall_method = self.compose.conf.get("buildinstall_method")
|
||||
self.lorax_use_koji_plugin = self.compose.conf.get("lorax_use_koji_plugin")
|
||||
self.used_lorax = self.buildinstall_method == "lorax"
|
||||
@ -141,7 +144,7 @@ class BuildinstallPhase(PhaseBase):
|
||||
)
|
||||
if self.compose.has_comps:
|
||||
comps_repo = self.compose.paths.work.comps_repo(arch, variant)
|
||||
if final_output_dir != output_dir:
|
||||
if final_output_dir != output_dir or self.lorax_use_koji_plugin:
|
||||
comps_repo = translate_path(self.compose, comps_repo)
|
||||
repos.append(comps_repo)
|
||||
|
||||
@ -166,7 +169,6 @@ class BuildinstallPhase(PhaseBase):
|
||||
"rootfs-size": rootfs_size,
|
||||
"dracut-args": dracut_args,
|
||||
"skip_branding": skip_branding,
|
||||
"outputdir": output_dir,
|
||||
"squashfs_only": squashfs_only,
|
||||
"configuration_file": configuration_file,
|
||||
}
|
||||
@ -216,10 +218,6 @@ class BuildinstallPhase(PhaseBase):
|
||||
return repos
|
||||
|
||||
def run(self):
|
||||
lorax = LoraxWrapper()
|
||||
product = self.compose.conf["release_name"]
|
||||
version = self.compose.conf["release_version"]
|
||||
release = self.compose.conf["release_version"]
|
||||
disc_type = self.compose.conf["disc_types"].get("dvd", "dvd")
|
||||
|
||||
# Prepare kickstart file for final images.
|
||||
@ -236,7 +234,7 @@ class BuildinstallPhase(PhaseBase):
|
||||
)
|
||||
makedirs(final_output_dir)
|
||||
repo_baseurls = self.get_repos(arch)
|
||||
if final_output_dir != output_dir:
|
||||
if final_output_dir != output_dir or self.lorax_use_koji_plugin:
|
||||
repo_baseurls = [translate_path(self.compose, r) for r in repo_baseurls]
|
||||
|
||||
if self.buildinstall_method == "lorax":
|
||||
@ -272,29 +270,12 @@ class BuildinstallPhase(PhaseBase):
|
||||
),
|
||||
)
|
||||
)
|
||||
elif self.buildinstall_method == "buildinstall":
|
||||
volid = get_volid(self.compose, arch, disc_type=disc_type)
|
||||
commands.append(
|
||||
(
|
||||
None,
|
||||
lorax.get_buildinstall_cmd(
|
||||
product,
|
||||
version,
|
||||
release,
|
||||
repo_baseurls,
|
||||
output_dir,
|
||||
is_final=self.compose.supported,
|
||||
buildarch=arch,
|
||||
volid=volid,
|
||||
),
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unsupported buildinstall method: %s" % self.buildinstall_method
|
||||
)
|
||||
|
||||
for (variant, cmd) in commands:
|
||||
for variant, cmd in commands:
|
||||
self.pool.add(BuildinstallThread(self.pool))
|
||||
self.pool.queue_put(
|
||||
(self.compose, arch, variant, cmd, self.pkgset_phase)
|
||||
@ -312,6 +293,18 @@ class BuildinstallPhase(PhaseBase):
|
||||
in self.pool.finished_tasks
|
||||
)
|
||||
|
||||
def reused(self, variant, arch):
|
||||
"""
|
||||
Check if buildinstall phase reused previous results for given variant
|
||||
and arch. If the phase is skipped, the results will be considered
|
||||
reused as well.
|
||||
"""
|
||||
return (
|
||||
super(BuildinstallPhase, self).skip()
|
||||
or (variant.uid if self.used_lorax else None, arch)
|
||||
in self.pool.reused_tasks
|
||||
)
|
||||
|
||||
|
||||
def get_kickstart_file(compose):
|
||||
scm_dict = compose.conf.get("buildinstall_kickstart")
|
||||
@ -349,9 +342,17 @@ BOOT_CONFIGS = [
|
||||
"EFI/BOOT/BOOTX64.conf",
|
||||
"EFI/BOOT/grub.cfg",
|
||||
]
|
||||
BOOT_IMAGES = [
|
||||
"images/efiboot.img",
|
||||
]
|
||||
|
||||
|
||||
def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
||||
"""
|
||||
Put escaped volume ID and possibly kickstart file into the boot
|
||||
configuration files.
|
||||
:returns: list of paths to modified config files
|
||||
"""
|
||||
volid_escaped = volid.replace(" ", r"\x20").replace("\\", "\\\\")
|
||||
volid_escaped_2 = volid_escaped.replace("\\", "\\\\")
|
||||
found_configs = []
|
||||
@ -359,7 +360,6 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
||||
config_path = os.path.join(path, config)
|
||||
if not os.path.exists(config_path):
|
||||
continue
|
||||
found_configs.append(config)
|
||||
|
||||
with open(config_path, "r") as f:
|
||||
data = original_data = f.read()
|
||||
@ -368,7 +368,7 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
||||
# double-escape volid in yaboot.conf
|
||||
new_volid = volid_escaped_2 if "yaboot" in config else volid_escaped
|
||||
|
||||
ks = (" ks=hd:LABEL=%s:/ks.cfg" % new_volid) if ks_file else ""
|
||||
ks = (" inst.ks=hd:LABEL=%s:/ks.cfg" % new_volid) if ks_file else ""
|
||||
|
||||
# pre-f18
|
||||
data = re.sub(r":CDLABEL=[^ \n]*", r":CDLABEL=%s%s" % (new_volid, ks), data)
|
||||
@ -379,8 +379,13 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
||||
with open(config_path, "w") as f:
|
||||
f.write(data)
|
||||
|
||||
if logger and data != original_data:
|
||||
logger.info("Boot config %s changed" % config_path)
|
||||
if data != original_data:
|
||||
found_configs.append(config)
|
||||
if logger:
|
||||
# Generally lorax should create file with correct volume id
|
||||
# already. If we don't have a kickstart, this function should
|
||||
# be a no-op.
|
||||
logger.info("Boot config %s changed" % config_path)
|
||||
|
||||
return found_configs
|
||||
|
||||
@ -419,31 +424,32 @@ def tweak_buildinstall(
|
||||
if kickstart_file and found_configs:
|
||||
shutil.copy2(kickstart_file, os.path.join(dst, "ks.cfg"))
|
||||
|
||||
images = [
|
||||
os.path.join(tmp_dir, "images", "efiboot.img"),
|
||||
]
|
||||
for image in images:
|
||||
if not os.path.isfile(image):
|
||||
continue
|
||||
images = [os.path.join(tmp_dir, img) for img in BOOT_IMAGES]
|
||||
if found_configs:
|
||||
for image in images:
|
||||
if not os.path.isfile(image):
|
||||
continue
|
||||
|
||||
with iso.mount(
|
||||
image,
|
||||
logger=compose._logger,
|
||||
use_guestmount=compose.conf.get("buildinstall_use_guestmount"),
|
||||
) as mount_tmp_dir:
|
||||
for config in BOOT_CONFIGS:
|
||||
config_path = os.path.join(tmp_dir, config)
|
||||
config_in_image = os.path.join(mount_tmp_dir, config)
|
||||
with iso.mount(
|
||||
image,
|
||||
logger=compose._logger,
|
||||
use_guestmount=compose.conf.get("buildinstall_use_guestmount"),
|
||||
) as mount_tmp_dir:
|
||||
for config in found_configs:
|
||||
# Put each modified config file into the image (overwriting the
|
||||
# original).
|
||||
config_path = os.path.join(tmp_dir, config)
|
||||
config_in_image = os.path.join(mount_tmp_dir, config)
|
||||
|
||||
if os.path.isfile(config_in_image):
|
||||
cmd = [
|
||||
"cp",
|
||||
"-v",
|
||||
"--remove-destination",
|
||||
config_path,
|
||||
config_in_image,
|
||||
]
|
||||
run(cmd)
|
||||
if os.path.isfile(config_in_image):
|
||||
cmd = [
|
||||
"cp",
|
||||
"-v",
|
||||
"--remove-destination",
|
||||
config_path,
|
||||
config_in_image,
|
||||
]
|
||||
run(cmd)
|
||||
|
||||
# HACK: make buildinstall files world readable
|
||||
run("chmod -R a+rX %s" % shlex_quote(tmp_dir))
|
||||
@ -515,7 +521,10 @@ def link_boot_iso(compose, arch, variant, can_fail):
|
||||
setattr(img, "can_fail", can_fail)
|
||||
setattr(img, "deliverable", "buildinstall")
|
||||
try:
|
||||
img.volume_id = iso.get_volume_id(new_boot_iso_path)
|
||||
img.volume_id = iso.get_volume_id(
|
||||
new_boot_iso_path,
|
||||
compose.conf.get("createiso_use_xorrisofs"),
|
||||
)
|
||||
except RuntimeError:
|
||||
pass
|
||||
# In this phase we should add to compose only the images that
|
||||
@ -661,9 +670,16 @@ class BuildinstallThread(WorkerThread):
|
||||
return None
|
||||
|
||||
compose.log_info("Loading old BUILDINSTALL phase metadata: %s", old_metadata)
|
||||
with open(old_metadata, "rb") as f:
|
||||
old_result = pickle.load(f)
|
||||
return old_result
|
||||
try:
|
||||
with open(old_metadata, "rb") as f:
|
||||
old_result = pickle.load(f)
|
||||
return old_result
|
||||
except Exception as e:
|
||||
compose.log_debug(
|
||||
"Failed to load old BUILDINSTALL phase metadata %s : %s"
|
||||
% (old_metadata, str(e))
|
||||
)
|
||||
return None
|
||||
|
||||
def _reuse_old_buildinstall_result(self, compose, arch, variant, cmd, pkgset_phase):
|
||||
"""
|
||||
@ -703,8 +719,8 @@ class BuildinstallThread(WorkerThread):
|
||||
# input on RPM level.
|
||||
cmd_copy = copy(cmd)
|
||||
for key in ["outputdir", "sources"]:
|
||||
del cmd_copy[key]
|
||||
del old_metadata["cmd"][key]
|
||||
cmd_copy.pop(key, None)
|
||||
old_metadata["cmd"].pop(key, None)
|
||||
|
||||
# Do not reuse if command line arguments are not the same.
|
||||
if old_metadata["cmd"] != cmd_copy:
|
||||
@ -729,7 +745,7 @@ class BuildinstallThread(WorkerThread):
|
||||
# Ask Koji for all the RPMs in the `runroot_tag` and check that
|
||||
# those installed in the old buildinstall buildroot are still in the
|
||||
# very same versions/releases.
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(compose)
|
||||
rpms = koji_wrapper.koji_proxy.listTaggedRPMS(
|
||||
compose.conf.get("runroot_tag"), inherit=True, latest=True
|
||||
)[0]
|
||||
@ -799,14 +815,15 @@ class BuildinstallThread(WorkerThread):
|
||||
if buildinstall_method == "lorax":
|
||||
packages += ["lorax"]
|
||||
chown_paths.append(_get_log_dir(compose, variant, arch))
|
||||
elif buildinstall_method == "buildinstall":
|
||||
packages += ["anaconda"]
|
||||
|
||||
packages += get_arch_variant_data(
|
||||
compose.conf, "buildinstall_packages", arch, variant
|
||||
)
|
||||
if self._reuse_old_buildinstall_result(
|
||||
compose, arch, variant, cmd, pkgset_phase
|
||||
):
|
||||
self.copy_files(compose, variant, arch)
|
||||
self.pool.finished_tasks.add((variant.uid if variant else None, arch))
|
||||
self.pool.reused_tasks.add((variant.uid if variant else None, arch))
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
return
|
||||
|
||||
@ -818,13 +835,13 @@ class BuildinstallThread(WorkerThread):
|
||||
|
||||
# Start the runroot task.
|
||||
runroot = Runroot(compose, phase="buildinstall")
|
||||
task_id = None
|
||||
if buildinstall_method == "lorax" and lorax_use_koji_plugin:
|
||||
runroot.run_pungi_buildinstall(
|
||||
task_id = runroot.run_pungi_buildinstall(
|
||||
cmd,
|
||||
log_file=log_file,
|
||||
arch=arch,
|
||||
packages=packages,
|
||||
mounts=[compose.topdir],
|
||||
weight=compose.conf["runroot_weights"].get("buildinstall"),
|
||||
)
|
||||
else:
|
||||
@ -857,19 +874,17 @@ class BuildinstallThread(WorkerThread):
|
||||
log_dir = os.path.join(output_dir, "logs")
|
||||
copy_all(log_dir, final_log_dir)
|
||||
elif lorax_use_koji_plugin:
|
||||
# If Koji pungi-buildinstall is used, then the buildinstall results are
|
||||
# not stored directly in `output_dir` dir, but in "results" and "logs"
|
||||
# subdirectories. We need to move them to final_output_dir.
|
||||
results_dir = os.path.join(output_dir, "results")
|
||||
move_all(results_dir, final_output_dir, rm_src_dir=True)
|
||||
# If Koji pungi-buildinstall is used, then the buildinstall results
|
||||
# are attached as outputs to the Koji task. Download and unpack
|
||||
# them to the correct location.
|
||||
download_and_extract_archive(
|
||||
compose, task_id, "results.tar.gz", final_output_dir
|
||||
)
|
||||
|
||||
# Get the log_dir into which we should copy the resulting log files.
|
||||
# Download the logs into proper location too.
|
||||
log_fname = "buildinstall-%s-logs/dummy" % variant.uid
|
||||
final_log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_fname))
|
||||
if not os.path.exists(final_log_dir):
|
||||
makedirs(final_log_dir)
|
||||
log_dir = os.path.join(output_dir, "logs")
|
||||
move_all(log_dir, final_log_dir, rm_src_dir=True)
|
||||
download_and_extract_archive(compose, task_id, "logs.tar.gz", final_log_dir)
|
||||
|
||||
rpms = runroot.get_buildroot_rpms()
|
||||
self._write_buildinstall_metadata(
|
||||
|
@ -14,15 +14,17 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import itertools
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import stat
|
||||
import json
|
||||
|
||||
import productmd.treeinfo
|
||||
from productmd.images import Image
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
from kobo.shortcuts import run, relative_path
|
||||
from kobo.shortcuts import run, relative_path, compute_file_checksums
|
||||
from six.moves import shlex_quote
|
||||
|
||||
from pungi.wrappers import iso
|
||||
@ -36,6 +38,7 @@ from pungi.util import (
|
||||
failable,
|
||||
get_file_size,
|
||||
get_mtime,
|
||||
read_json_file,
|
||||
)
|
||||
from pungi.media_split import MediaSplitter, convert_media_size
|
||||
from pungi.compose_metadata.discinfo import read_discinfo, write_discinfo
|
||||
@ -73,6 +76,185 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
||||
return False
|
||||
return bool(self.compose.conf.get("buildinstall_method", ""))
|
||||
|
||||
def _metadata_path(self, variant, arch, disc_num, disc_count):
|
||||
return self.compose.paths.log.log_file(
|
||||
arch,
|
||||
"createiso-%s-%d-%d" % (variant.uid, disc_num, disc_count),
|
||||
ext="json",
|
||||
)
|
||||
|
||||
def save_reuse_metadata(self, cmd, variant, arch, opts):
|
||||
"""Save metadata for future composes to verify if the compose can be reused."""
|
||||
metadata = {
|
||||
"cmd": cmd,
|
||||
"opts": opts._asdict(),
|
||||
}
|
||||
|
||||
metadata_path = self._metadata_path(
|
||||
variant, arch, cmd["disc_num"], cmd["disc_count"]
|
||||
)
|
||||
with open(metadata_path, "w") as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
return metadata
|
||||
|
||||
def _load_old_metadata(self, cmd, variant, arch):
|
||||
metadata_path = self._metadata_path(
|
||||
variant, arch, cmd["disc_num"], cmd["disc_count"]
|
||||
)
|
||||
old_path = self.compose.paths.old_compose_path(metadata_path)
|
||||
self.logger.info(
|
||||
"Loading old metadata for %s.%s from: %s", variant, arch, old_path
|
||||
)
|
||||
try:
|
||||
return read_json_file(old_path)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def perform_reuse(self, cmd, variant, arch, opts, iso_path):
|
||||
"""
|
||||
Copy all related files from old compose to the new one. As a last step
|
||||
add the new image to metadata.
|
||||
"""
|
||||
linker = OldFileLinker(self.logger)
|
||||
old_file_name = os.path.basename(iso_path)
|
||||
current_file_name = os.path.basename(cmd["iso_path"])
|
||||
try:
|
||||
# Hardlink ISO and manifest
|
||||
for suffix in ("", ".manifest"):
|
||||
linker.link(iso_path + suffix, cmd["iso_path"] + suffix)
|
||||
# Copy log files
|
||||
# The log file name includes filename of the image, so we need to
|
||||
# find old file with the old name, and rename it to the new name.
|
||||
log_file = self.compose.paths.log.log_file(
|
||||
arch, "createiso-%s" % current_file_name
|
||||
)
|
||||
old_log_file = self.compose.paths.old_compose_path(
|
||||
self.compose.paths.log.log_file(arch, "createiso-%s" % old_file_name)
|
||||
)
|
||||
linker.link(old_log_file, log_file)
|
||||
# Copy jigdo files
|
||||
if opts.jigdo_dir:
|
||||
old_jigdo_dir = self.compose.paths.old_compose_path(opts.jigdo_dir)
|
||||
for suffix in (".template", ".jigdo"):
|
||||
linker.link(
|
||||
os.path.join(old_jigdo_dir, old_file_name) + suffix,
|
||||
os.path.join(opts.jigdo_dir, current_file_name) + suffix,
|
||||
)
|
||||
except Exception:
|
||||
# A problem happened while linking some file, let's clean up
|
||||
# everything.
|
||||
linker.abort()
|
||||
raise
|
||||
# Add image to manifest
|
||||
add_iso_to_metadata(
|
||||
self.compose,
|
||||
variant,
|
||||
arch,
|
||||
cmd["iso_path"],
|
||||
bootable=cmd["bootable"],
|
||||
disc_num=cmd["disc_num"],
|
||||
disc_count=cmd["disc_count"],
|
||||
)
|
||||
if self.compose.notifier:
|
||||
self.compose.notifier.send(
|
||||
"createiso-imagedone",
|
||||
file=cmd["iso_path"],
|
||||
arch=arch,
|
||||
variant=str(variant),
|
||||
)
|
||||
|
||||
def try_reuse(self, cmd, variant, arch, opts):
|
||||
"""Try to reuse image from previous compose.
|
||||
|
||||
:returns bool: True if reuse was successful, False otherwise
|
||||
"""
|
||||
if not self.compose.conf["createiso_allow_reuse"]:
|
||||
return
|
||||
|
||||
log_msg = "Cannot reuse ISO for %s.%s" % (variant, arch)
|
||||
current_metadata = self.save_reuse_metadata(cmd, variant, arch, opts)
|
||||
|
||||
if opts.buildinstall_method and not self.bi.reused(variant, arch):
|
||||
# If buildinstall phase was not reused for some reason, we can not
|
||||
# reuse any bootable image. If a package change caused rebuild of
|
||||
# boot.iso, we would catch it here too, but there could be a
|
||||
# configuration change in lorax template which would remain
|
||||
# undetected.
|
||||
self.logger.info("%s - boot configuration changed", log_msg)
|
||||
return False
|
||||
|
||||
# Check old compose configuration: extra_files and product_ids can be
|
||||
# reflected on ISO.
|
||||
old_config = self.compose.load_old_compose_config()
|
||||
if not old_config:
|
||||
self.logger.info("%s - no config for old compose", log_msg)
|
||||
return False
|
||||
|
||||
# Disable reuse if unsigned packages are allowed. The older compose
|
||||
# could have unsigned packages, and those may have been signed since
|
||||
# then. We want to regenerate the ISO to have signatures.
|
||||
if None in self.compose.conf["sigkeys"]:
|
||||
self.logger.info("%s - unsigned packages are allowed", log_msg)
|
||||
return False
|
||||
|
||||
# Convert current configuration to JSON and back to encode it similarly
|
||||
# to the old one
|
||||
config = json.loads(json.dumps(self.compose.conf))
|
||||
for opt in self.compose.conf:
|
||||
# Skip a selection of options: these affect what packages can be
|
||||
# included, which we explicitly check later on.
|
||||
config_whitelist = set(
|
||||
[
|
||||
"gather_lookaside_repos",
|
||||
"pkgset_koji_builds",
|
||||
"pkgset_koji_scratch_tasks",
|
||||
"pkgset_koji_module_builds",
|
||||
]
|
||||
)
|
||||
# Skip irrelevant options
|
||||
config_whitelist.update(["osbs", "osbuild"])
|
||||
if opt in config_whitelist:
|
||||
continue
|
||||
|
||||
if old_config.get(opt) != config.get(opt):
|
||||
self.logger.info("%s - option %s differs", log_msg, opt)
|
||||
return False
|
||||
|
||||
old_metadata = self._load_old_metadata(cmd, variant, arch)
|
||||
if not old_metadata:
|
||||
self.logger.info("%s - no old metadata found", log_msg)
|
||||
return False
|
||||
|
||||
# Test if volume ID matches - volid can be generated dynamically based on
|
||||
# other values, and could change even if nothing else is different.
|
||||
if current_metadata["opts"]["volid"] != old_metadata["opts"]["volid"]:
|
||||
self.logger.info("%s - volume ID differs", log_msg)
|
||||
return False
|
||||
|
||||
# Compare packages on the ISO.
|
||||
if compare_packages(
|
||||
old_metadata["opts"]["graft_points"],
|
||||
current_metadata["opts"]["graft_points"],
|
||||
):
|
||||
self.logger.info("%s - packages differ", log_msg)
|
||||
return False
|
||||
|
||||
try:
|
||||
self.perform_reuse(
|
||||
cmd,
|
||||
variant,
|
||||
arch,
|
||||
opts,
|
||||
old_metadata["cmd"]["iso_path"],
|
||||
)
|
||||
return True
|
||||
except Exception as exc:
|
||||
self.compose.log_error(
|
||||
"Error while reusing ISO for %s.%s: %s", variant, arch, exc
|
||||
)
|
||||
self.compose.traceback("createiso-reuse-%s-%s" % (variant, arch))
|
||||
return False
|
||||
|
||||
def run(self):
|
||||
symlink_isos_to = self.compose.conf.get("symlink_isos_to")
|
||||
disc_type = self.compose.conf["disc_types"].get("dvd", "dvd")
|
||||
@ -172,21 +354,29 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
||||
supported=self.compose.supported,
|
||||
hfs_compat=self.compose.conf["iso_hfs_ppc64le_compatible"],
|
||||
use_xorrisofs=self.compose.conf.get("createiso_use_xorrisofs"),
|
||||
iso_level=get_iso_level_config(self.compose, variant, arch),
|
||||
)
|
||||
|
||||
if bootable:
|
||||
opts = opts._replace(
|
||||
buildinstall_method=self.compose.conf["buildinstall_method"]
|
||||
buildinstall_method=self.compose.conf[
|
||||
"buildinstall_method"
|
||||
],
|
||||
boot_iso=os.path.join(os_tree, "images", "boot.iso"),
|
||||
)
|
||||
|
||||
if self.compose.conf["create_jigdo"]:
|
||||
jigdo_dir = self.compose.paths.compose.jigdo_dir(arch, variant)
|
||||
opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree)
|
||||
|
||||
script_file = os.path.join(
|
||||
self.compose.paths.work.tmp_dir(arch, variant),
|
||||
"createiso-%s.sh" % filename,
|
||||
)
|
||||
# Try to reuse
|
||||
if self.try_reuse(cmd, variant, arch, opts):
|
||||
# Reuse was successful, go to next ISO
|
||||
continue
|
||||
|
||||
script_dir = self.compose.paths.work.tmp_dir(arch, variant)
|
||||
opts = opts._replace(script_dir=script_dir)
|
||||
script_file = os.path.join(script_dir, "createiso-%s.sh" % filename)
|
||||
with open(script_file, "w") as f:
|
||||
createiso.write_script(opts, f)
|
||||
cmd["cmd"] = ["bash", script_file]
|
||||
@ -195,13 +385,43 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
||||
if self.compose.notifier:
|
||||
self.compose.notifier.send("createiso-targets", deliverables=deliverables)
|
||||
|
||||
for (cmd, variant, arch) in commands:
|
||||
for cmd, variant, arch in commands:
|
||||
self.pool.add(CreateIsoThread(self.pool))
|
||||
self.pool.queue_put((self.compose, cmd, variant, arch))
|
||||
|
||||
self.pool.start()
|
||||
|
||||
|
||||
def read_packages(graft_points):
|
||||
"""Read packages that were listed in given graft points file.
|
||||
|
||||
Only files under Packages directory are considered. Particularly this
|
||||
excludes .discinfo, .treeinfo and media.repo as well as repodata and
|
||||
any extra files.
|
||||
|
||||
Extra files are easier to check by configuration (same name doesn't
|
||||
imply same content). Repodata depend entirely on included packages (and
|
||||
possibly product id certificate), but are affected by current time
|
||||
which can change checksum despite data being the same.
|
||||
"""
|
||||
with open(graft_points) as f:
|
||||
return set(
|
||||
line.split("=", 1)[0]
|
||||
for line in f
|
||||
if line.startswith("Packages/") or "/Packages/" in line
|
||||
)
|
||||
|
||||
|
||||
def compare_packages(old_graft_points, new_graft_points):
|
||||
"""Read packages from the two files and compare them.
|
||||
|
||||
:returns bool: True if there are differences, False otherwise
|
||||
"""
|
||||
old_files = read_packages(old_graft_points)
|
||||
new_files = read_packages(new_graft_points)
|
||||
return old_files != new_files
|
||||
|
||||
|
||||
class CreateIsoThread(WorkerThread):
|
||||
def fail(self, compose, cmd, variant, arch):
|
||||
self.pool.log_error("CreateISO failed, removing ISO: %s" % cmd["iso_path"])
|
||||
@ -246,7 +466,14 @@ class CreateIsoThread(WorkerThread):
|
||||
|
||||
try:
|
||||
run_createiso_command(
|
||||
num, compose, bootable, arch, cmd["cmd"], mounts, log_file
|
||||
num,
|
||||
compose,
|
||||
bootable,
|
||||
arch,
|
||||
cmd["cmd"],
|
||||
mounts,
|
||||
log_file,
|
||||
cmd["iso_path"],
|
||||
)
|
||||
except Exception:
|
||||
self.fail(compose, cmd, variant, arch)
|
||||
@ -313,7 +540,10 @@ def add_iso_to_metadata(
|
||||
setattr(img, "can_fail", compose.can_fail(variant, arch, "iso"))
|
||||
setattr(img, "deliverable", "iso")
|
||||
try:
|
||||
img.volume_id = iso.get_volume_id(iso_path)
|
||||
img.volume_id = iso.get_volume_id(
|
||||
iso_path,
|
||||
compose.conf.get("createiso_use_xorrisofs"),
|
||||
)
|
||||
except RuntimeError:
|
||||
pass
|
||||
if arch == "src":
|
||||
@ -325,19 +555,18 @@ def add_iso_to_metadata(
|
||||
|
||||
|
||||
def run_createiso_command(
|
||||
num, compose, bootable, arch, cmd, mounts, log_file, with_jigdo=True
|
||||
num, compose, bootable, arch, cmd, mounts, log_file, iso_path
|
||||
):
|
||||
packages = [
|
||||
"coreutils",
|
||||
"xorriso" if compose.conf.get("createiso_use_xorrisofs") else "genisoimage",
|
||||
"isomd5sum",
|
||||
]
|
||||
if with_jigdo and compose.conf["create_jigdo"]:
|
||||
if compose.conf["create_jigdo"]:
|
||||
packages.append("jigdo")
|
||||
if bootable:
|
||||
extra_packages = {
|
||||
"lorax": ["lorax", "which"],
|
||||
"buildinstall": ["anaconda"],
|
||||
}
|
||||
packages.extend(extra_packages[compose.conf["buildinstall_method"]])
|
||||
|
||||
@ -346,7 +575,7 @@ def run_createiso_command(
|
||||
build_arch = arch
|
||||
if runroot.runroot_method == "koji" and not bootable:
|
||||
runroot_tag = compose.conf["runroot_tag"]
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(compose)
|
||||
koji_proxy = koji_wrapper.koji_proxy
|
||||
tag_info = koji_proxy.getTag(runroot_tag)
|
||||
if not tag_info:
|
||||
@ -369,6 +598,76 @@ def run_createiso_command(
|
||||
weight=compose.conf["runroot_weights"].get("createiso"),
|
||||
)
|
||||
|
||||
if bootable and compose.conf.get("createiso_use_xorrisofs"):
|
||||
fix_treeinfo_checksums(compose, iso_path, arch)
|
||||
|
||||
|
||||
def fix_treeinfo_checksums(compose, iso_path, arch):
|
||||
"""It is possible for the ISO to contain a .treefile with incorrect
|
||||
checksums. By modifying the ISO (adding files) some of the images may
|
||||
change.
|
||||
|
||||
This function fixes that after the fact by looking for incorrect checksums,
|
||||
recalculating them and updating the .treeinfo file. Since the size of the
|
||||
file doesn't change, this seems to not change any images.
|
||||
"""
|
||||
modified = False
|
||||
with iso.mount(iso_path, compose._logger) as mountpoint:
|
||||
ti = productmd.TreeInfo()
|
||||
ti.load(os.path.join(mountpoint, ".treeinfo"))
|
||||
for image, (type_, expected) in ti.checksums.checksums.items():
|
||||
checksums = compute_file_checksums(os.path.join(mountpoint, image), [type_])
|
||||
actual = checksums[type_]
|
||||
if actual == expected:
|
||||
# Everything fine here, skip to next image.
|
||||
continue
|
||||
|
||||
compose.log_debug("%s: %s: checksum mismatch", iso_path, image)
|
||||
# Update treeinfo with correct checksum
|
||||
ti.checksums.checksums[image] = (type_, actual)
|
||||
modified = True
|
||||
|
||||
if not modified:
|
||||
compose.log_debug("%s: All checksums match, nothing to do.", iso_path)
|
||||
return
|
||||
|
||||
try:
|
||||
tmpdir = compose.mkdtemp(arch, prefix="fix-checksum-")
|
||||
# Write modified .treeinfo
|
||||
ti_path = os.path.join(tmpdir, ".treeinfo")
|
||||
compose.log_debug("Storing modified .treeinfo in %s", ti_path)
|
||||
ti.dump(ti_path)
|
||||
# Write a modified DVD into a temporary path, that is atomically moved
|
||||
# over the original file.
|
||||
fixed_path = os.path.join(tmpdir, "fixed-checksum-dvd.iso")
|
||||
cmd = ["xorriso"]
|
||||
cmd.extend(
|
||||
itertools.chain.from_iterable(
|
||||
iso.xorriso_commands(arch, iso_path, fixed_path)
|
||||
)
|
||||
)
|
||||
cmd.extend(["-map", ti_path, ".treeinfo"])
|
||||
run(
|
||||
cmd,
|
||||
logfile=compose.paths.log.log_file(
|
||||
arch, "checksum-fix_generate_%s" % os.path.basename(iso_path)
|
||||
),
|
||||
)
|
||||
# The modified ISO no longer has implanted MD5, so that needs to be
|
||||
# fixed again.
|
||||
compose.log_debug("Implanting new MD5 to %s", fixed_path)
|
||||
run(
|
||||
iso.get_implantisomd5_cmd(fixed_path, compose.supported),
|
||||
logfile=compose.paths.log.log_file(
|
||||
arch, "checksum-fix_implantisomd5_%s" % os.path.basename(iso_path)
|
||||
),
|
||||
)
|
||||
# All done, move the updated image to the final location.
|
||||
compose.log_debug("Updating %s", iso_path)
|
||||
os.rename(fixed_path, iso_path)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
def split_iso(compose, arch, variant, no_split=False, logger=None):
|
||||
"""
|
||||
@ -598,3 +897,36 @@ def create_hardlinks(staging_dir, log_file):
|
||||
"""
|
||||
cmd = ["/usr/sbin/hardlink", "-c", "-vv", staging_dir]
|
||||
run(cmd, logfile=log_file, show_cmd=True)
|
||||
|
||||
|
||||
class OldFileLinker(object):
|
||||
"""
|
||||
A wrapper around os.link that remembers which files were linked and can
|
||||
clean them up.
|
||||
"""
|
||||
|
||||
def __init__(self, logger):
|
||||
self.logger = logger
|
||||
self.linked_files = []
|
||||
|
||||
def link(self, src, dst):
|
||||
self.logger.debug("Hardlinking %s to %s", src, dst)
|
||||
os.link(src, dst)
|
||||
self.linked_files.append(dst)
|
||||
|
||||
def abort(self):
|
||||
"""Clean up all files created by this instance."""
|
||||
for f in self.linked_files:
|
||||
os.unlink(f)
|
||||
|
||||
|
||||
def get_iso_level_config(compose, variant, arch):
|
||||
"""
|
||||
Get configured ISO level for this variant and architecture.
|
||||
"""
|
||||
level = compose.conf.get("iso_level")
|
||||
if isinstance(level, list):
|
||||
level = None
|
||||
for c in get_arch_variant_data(compose.conf, "iso_level", arch, variant):
|
||||
level = c
|
||||
return level
|
||||
|
@ -16,7 +16,6 @@
|
||||
|
||||
__all__ = ("create_variant_repo",)
|
||||
|
||||
|
||||
import copy
|
||||
import errno
|
||||
import glob
|
||||
@ -25,19 +24,22 @@ import shutil
|
||||
import threading
|
||||
import xml.dom.minidom
|
||||
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
from kobo.shortcuts import run, relative_path
|
||||
|
||||
from ..wrappers.scm import get_dir_from_scm
|
||||
from ..wrappers.createrepo import CreaterepoWrapper
|
||||
from .base import PhaseBase
|
||||
from ..util import get_arch_variant_data, temp_dir
|
||||
from ..module_util import Modulemd, collect_module_defaults
|
||||
|
||||
import productmd.rpms
|
||||
import productmd.modules
|
||||
import productmd.rpms
|
||||
from kobo.shortcuts import relative_path, run
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
|
||||
from ..module_util import Modulemd, collect_module_defaults, collect_module_obsoletes
|
||||
from ..util import (
|
||||
get_arch_variant_data,
|
||||
read_single_module_stream_from_file,
|
||||
temp_dir,
|
||||
)
|
||||
from ..wrappers.createrepo import CreaterepoWrapper
|
||||
from ..wrappers.scm import get_dir_from_scm
|
||||
from .base import PhaseBase
|
||||
|
||||
CACHE_TOPDIR = "/var/cache/pungi/createrepo_c/"
|
||||
createrepo_lock = threading.Lock()
|
||||
createrepo_dirs = set()
|
||||
|
||||
@ -79,6 +81,7 @@ class CreaterepoPhase(PhaseBase):
|
||||
get_dir_from_scm(
|
||||
self.compose.conf["createrepo_extra_modulemd"][variant.uid],
|
||||
self.compose.paths.work.tmp_dir(variant=variant, create_dir=False),
|
||||
compose=self.compose,
|
||||
)
|
||||
|
||||
self.pool.queue_put((self.compose, None, variant, "srpm"))
|
||||
@ -188,6 +191,23 @@ def create_variant_repo(
|
||||
comps_path = None
|
||||
if compose.has_comps and pkg_type == "rpm":
|
||||
comps_path = compose.paths.work.comps(arch=arch, variant=variant)
|
||||
|
||||
if compose.conf["createrepo_enable_cache"]:
|
||||
cachedir = os.path.join(
|
||||
CACHE_TOPDIR,
|
||||
"%s-%s" % (compose.conf["release_short"], os.getuid()),
|
||||
)
|
||||
if not os.path.exists(cachedir):
|
||||
try:
|
||||
os.makedirs(cachedir)
|
||||
except Exception as e:
|
||||
compose.log_warning(
|
||||
"Cache disabled because cannot create cache dir %s %s"
|
||||
% (cachedir, str(e))
|
||||
)
|
||||
cachedir = None
|
||||
else:
|
||||
cachedir = None
|
||||
cmd = repo.get_createrepo_cmd(
|
||||
repo_dir,
|
||||
update=True,
|
||||
@ -203,6 +223,7 @@ def create_variant_repo(
|
||||
oldpackagedirs=old_package_dirs,
|
||||
use_xz=compose.conf["createrepo_use_xz"],
|
||||
extra_args=compose.conf["createrepo_extra_args"],
|
||||
cachedir=cachedir,
|
||||
)
|
||||
log_file = compose.paths.log.log_file(
|
||||
arch, "createrepo-%s.%s" % (variant, pkg_type)
|
||||
@ -245,12 +266,15 @@ def create_variant_repo(
|
||||
defaults_dir, module_names, mod_index, overrides_dir=overrides_dir
|
||||
)
|
||||
|
||||
obsoletes_dir = compose.paths.work.module_obsoletes_dir()
|
||||
mod_index = collect_module_obsoletes(obsoletes_dir, module_names, mod_index)
|
||||
|
||||
# Add extra modulemd files
|
||||
if variant.uid in compose.conf.get("createrepo_extra_modulemd", {}):
|
||||
compose.log_debug("Adding extra modulemd for %s.%s", variant.uid, arch)
|
||||
dirname = compose.paths.work.tmp_dir(variant=variant, create_dir=False)
|
||||
for filepath in glob.glob(os.path.join(dirname, arch) + "/*.yaml"):
|
||||
module_stream = Modulemd.ModuleStream.read_file(filepath, strict=True)
|
||||
module_stream = read_single_module_stream_from_file(filepath)
|
||||
if not mod_index.add_module_stream(module_stream):
|
||||
raise RuntimeError(
|
||||
"Failed parsing modulemd data from %s" % filepath
|
||||
@ -343,7 +367,7 @@ def get_productids_from_scm(compose):
|
||||
|
||||
tmp_dir = compose.mkdtemp(prefix="pungi_")
|
||||
try:
|
||||
get_dir_from_scm(product_id, tmp_dir)
|
||||
get_dir_from_scm(product_id, tmp_dir, compose=compose)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT and product_id_allow_missing:
|
||||
compose.log_warning("No product IDs in %s" % product_id)
|
||||
|
@ -14,6 +14,8 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
from kobo.shortcuts import force_list
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
@ -28,8 +30,17 @@ from pungi.phases.createiso import (
|
||||
copy_boot_images,
|
||||
run_createiso_command,
|
||||
load_and_tweak_treeinfo,
|
||||
compare_packages,
|
||||
OldFileLinker,
|
||||
get_iso_level_config,
|
||||
)
|
||||
from pungi.util import (
|
||||
failable,
|
||||
get_format_substs,
|
||||
get_variant_data,
|
||||
get_volid,
|
||||
read_json_file,
|
||||
)
|
||||
from pungi.util import failable, get_format_substs, get_variant_data, get_volid
|
||||
from pungi.wrappers import iso
|
||||
from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm
|
||||
|
||||
@ -37,9 +48,10 @@ from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm
|
||||
class ExtraIsosPhase(PhaseLoggerMixin, ConfigGuardedPhase, PhaseBase):
|
||||
name = "extra_isos"
|
||||
|
||||
def __init__(self, compose):
|
||||
def __init__(self, compose, buildinstall_phase):
|
||||
super(ExtraIsosPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
self.bi = buildinstall_phase
|
||||
|
||||
def validate(self):
|
||||
for variant in self.compose.get_variants(types=["variant"]):
|
||||
@ -64,14 +76,18 @@ class ExtraIsosPhase(PhaseLoggerMixin, ConfigGuardedPhase, PhaseBase):
|
||||
for arch in sorted(arches):
|
||||
commands.append((config, variant, arch))
|
||||
|
||||
for (config, variant, arch) in commands:
|
||||
self.pool.add(ExtraIsosThread(self.pool))
|
||||
for config, variant, arch in commands:
|
||||
self.pool.add(ExtraIsosThread(self.pool, self.bi))
|
||||
self.pool.queue_put((self.compose, config, variant, arch))
|
||||
|
||||
self.pool.start()
|
||||
|
||||
|
||||
class ExtraIsosThread(WorkerThread):
|
||||
def __init__(self, pool, buildinstall_phase):
|
||||
super(ExtraIsosThread, self).__init__(pool)
|
||||
self.bi = buildinstall_phase
|
||||
|
||||
def process(self, item, num):
|
||||
self.num = num
|
||||
compose, config, variant, arch = item
|
||||
@ -115,35 +131,43 @@ class ExtraIsosThread(WorkerThread):
|
||||
supported=compose.supported,
|
||||
hfs_compat=compose.conf["iso_hfs_ppc64le_compatible"],
|
||||
use_xorrisofs=compose.conf.get("createiso_use_xorrisofs"),
|
||||
iso_level=get_iso_level_config(compose, variant, arch),
|
||||
)
|
||||
os_tree = compose.paths.compose.os_tree(arch, variant)
|
||||
if compose.conf["create_jigdo"]:
|
||||
jigdo_dir = compose.paths.compose.jigdo_dir(arch, variant)
|
||||
os_tree = compose.paths.compose.os_tree(arch, variant)
|
||||
opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree)
|
||||
|
||||
if bootable:
|
||||
opts = opts._replace(
|
||||
buildinstall_method=compose.conf["buildinstall_method"]
|
||||
buildinstall_method=compose.conf["buildinstall_method"],
|
||||
boot_iso=os.path.join(os_tree, "images", "boot.iso"),
|
||||
)
|
||||
|
||||
script_file = os.path.join(
|
||||
compose.paths.work.tmp_dir(arch, variant), "extraiso-%s.sh" % filename
|
||||
)
|
||||
with open(script_file, "w") as f:
|
||||
createiso.write_script(opts, f)
|
||||
# Check if it can be reused.
|
||||
hash = hashlib.sha256()
|
||||
hash.update(json.dumps(config, sort_keys=True).encode("utf-8"))
|
||||
config_hash = hash.hexdigest()
|
||||
|
||||
run_createiso_command(
|
||||
self.num,
|
||||
compose,
|
||||
bootable,
|
||||
arch,
|
||||
["bash", script_file],
|
||||
[compose.topdir],
|
||||
log_file=compose.paths.log.log_file(
|
||||
arch, "extraiso-%s" % os.path.basename(iso_path)
|
||||
),
|
||||
with_jigdo=compose.conf["create_jigdo"],
|
||||
)
|
||||
if not self.try_reuse(compose, variant, arch, config_hash, opts):
|
||||
script_dir = compose.paths.work.tmp_dir(arch, variant)
|
||||
opts = opts._replace(script_dir=script_dir)
|
||||
script_file = os.path.join(script_dir, "extraiso-%s.sh" % filename)
|
||||
with open(script_file, "w") as f:
|
||||
createiso.write_script(opts, f)
|
||||
|
||||
run_createiso_command(
|
||||
self.num,
|
||||
compose,
|
||||
bootable,
|
||||
arch,
|
||||
["bash", script_file],
|
||||
[compose.topdir],
|
||||
log_file=compose.paths.log.log_file(
|
||||
arch, "extraiso-%s" % os.path.basename(iso_path)
|
||||
),
|
||||
iso_path=iso_path,
|
||||
)
|
||||
|
||||
img = add_iso_to_metadata(
|
||||
compose,
|
||||
@ -155,8 +179,163 @@ class ExtraIsosThread(WorkerThread):
|
||||
)
|
||||
img._max_size = config.get("max_size")
|
||||
|
||||
save_reuse_metadata(compose, variant, arch, config_hash, opts, iso_path)
|
||||
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
|
||||
def try_reuse(self, compose, variant, arch, config_hash, opts):
|
||||
# Check explicit config
|
||||
if not compose.conf["extraiso_allow_reuse"]:
|
||||
return
|
||||
|
||||
log_msg = "Cannot reuse ISO for %s.%s" % (variant, arch)
|
||||
|
||||
if opts.buildinstall_method and not self.bi.reused(variant, arch):
|
||||
# If buildinstall phase was not reused for some reason, we can not
|
||||
# reuse any bootable image. If a package change caused rebuild of
|
||||
# boot.iso, we would catch it here too, but there could be a
|
||||
# configuration change in lorax template which would remain
|
||||
# undetected.
|
||||
self.pool.log_info("%s - boot configuration changed", log_msg)
|
||||
return False
|
||||
|
||||
# Check old compose configuration: extra_files and product_ids can be
|
||||
# reflected on ISO.
|
||||
old_config = compose.load_old_compose_config()
|
||||
if not old_config:
|
||||
self.pool.log_info("%s - no config for old compose", log_msg)
|
||||
return False
|
||||
|
||||
# Disable reuse if unsigned packages are allowed. The older compose
|
||||
# could have unsigned packages, and those may have been signed since
|
||||
# then. We want to regenerate the ISO to have signatures.
|
||||
if None in compose.conf["sigkeys"]:
|
||||
self.pool.log_info("%s - unsigned packages are allowed", log_msg)
|
||||
return False
|
||||
|
||||
# Convert current configuration to JSON and back to encode it similarly
|
||||
# to the old one
|
||||
config = json.loads(json.dumps(compose.conf))
|
||||
for opt in compose.conf:
|
||||
# Skip a selection of options: these affect what packages can be
|
||||
# included, which we explicitly check later on.
|
||||
config_whitelist = set(
|
||||
[
|
||||
"gather_lookaside_repos",
|
||||
"pkgset_koji_builds",
|
||||
"pkgset_koji_scratch_tasks",
|
||||
"pkgset_koji_module_builds",
|
||||
]
|
||||
)
|
||||
# Skip irrelevant options
|
||||
config_whitelist.update(["osbs", "osbuild"])
|
||||
if opt in config_whitelist:
|
||||
continue
|
||||
|
||||
if old_config.get(opt) != config.get(opt):
|
||||
self.pool.log_info("%s - option %s differs", log_msg, opt)
|
||||
return False
|
||||
|
||||
old_metadata = load_old_metadata(compose, variant, arch, config_hash)
|
||||
if not old_metadata:
|
||||
self.pool.log_info("%s - no old metadata found", log_msg)
|
||||
return False
|
||||
|
||||
# Test if volume ID matches - volid can be generated dynamically based on
|
||||
# other values, and could change even if nothing else is different.
|
||||
if opts.volid != old_metadata["opts"]["volid"]:
|
||||
self.pool.log_info("%s - volume ID differs", log_msg)
|
||||
return False
|
||||
|
||||
# Compare packages on the ISO.
|
||||
if compare_packages(
|
||||
old_metadata["opts"]["graft_points"],
|
||||
opts.graft_points,
|
||||
):
|
||||
self.pool.log_info("%s - packages differ", log_msg)
|
||||
return False
|
||||
|
||||
try:
|
||||
self.perform_reuse(
|
||||
compose,
|
||||
variant,
|
||||
arch,
|
||||
opts,
|
||||
old_metadata["opts"]["output_dir"],
|
||||
old_metadata["opts"]["iso_name"],
|
||||
)
|
||||
return True
|
||||
except Exception as exc:
|
||||
self.pool.log_error(
|
||||
"Error while reusing ISO for %s.%s: %s", variant, arch, exc
|
||||
)
|
||||
compose.traceback("extraiso-reuse-%s-%s-%s" % (variant, arch, config_hash))
|
||||
return False
|
||||
|
||||
def perform_reuse(self, compose, variant, arch, opts, old_iso_dir, old_file_name):
|
||||
"""
|
||||
Copy all related files from old compose to the new one. As a last step
|
||||
add the new image to metadata.
|
||||
"""
|
||||
linker = OldFileLinker(self.pool._logger)
|
||||
old_iso_path = os.path.join(old_iso_dir, old_file_name)
|
||||
iso_path = os.path.join(opts.output_dir, opts.iso_name)
|
||||
try:
|
||||
# Hardlink ISO and manifest
|
||||
for suffix in ("", ".manifest"):
|
||||
linker.link(old_iso_path + suffix, iso_path + suffix)
|
||||
# Copy log files
|
||||
# The log file name includes filename of the image, so we need to
|
||||
# find old file with the old name, and rename it to the new name.
|
||||
log_file = compose.paths.log.log_file(arch, "extraiso-%s" % opts.iso_name)
|
||||
old_log_file = compose.paths.old_compose_path(
|
||||
compose.paths.log.log_file(arch, "extraiso-%s" % old_file_name)
|
||||
)
|
||||
linker.link(old_log_file, log_file)
|
||||
# Copy jigdo files
|
||||
if opts.jigdo_dir:
|
||||
old_jigdo_dir = compose.paths.old_compose_path(opts.jigdo_dir)
|
||||
for suffix in (".template", ".jigdo"):
|
||||
linker.link(
|
||||
os.path.join(old_jigdo_dir, old_file_name) + suffix,
|
||||
os.path.join(opts.jigdo_dir, opts.iso_name) + suffix,
|
||||
)
|
||||
except Exception:
|
||||
# A problem happened while linking some file, let's clean up
|
||||
# everything.
|
||||
linker.abort()
|
||||
raise
|
||||
|
||||
|
||||
def save_reuse_metadata(compose, variant, arch, config_hash, opts, iso_path):
|
||||
"""
|
||||
Save metadata for possible reuse of this image. The file name is determined
|
||||
from the hash of a configuration snippet for this image. Any change in that
|
||||
configuration in next compose will change the hash and thus reuse will be
|
||||
blocked.
|
||||
"""
|
||||
metadata = {"opts": opts._asdict()}
|
||||
metadata_path = compose.paths.log.log_file(
|
||||
arch,
|
||||
"extraiso-reuse-%s-%s-%s" % (variant.uid, arch, config_hash),
|
||||
ext="json",
|
||||
)
|
||||
with open(metadata_path, "w") as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
|
||||
def load_old_metadata(compose, variant, arch, config_hash):
|
||||
metadata_path = compose.paths.log.log_file(
|
||||
arch,
|
||||
"extraiso-reuse-%s-%s-%s" % (variant.uid, arch, config_hash),
|
||||
ext="json",
|
||||
)
|
||||
old_path = compose.paths.old_compose_path(metadata_path)
|
||||
try:
|
||||
return read_json_file(old_path)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_extra_files(compose, variant, arch, extra_files):
|
||||
"""Clone the configured files into a directory from where they can be
|
||||
|
@ -14,51 +14,50 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
import six
|
||||
from six.moves import cPickle as pickle
|
||||
|
||||
from kobo.rpmlib import parse_nvra
|
||||
from kobo.shortcuts import run
|
||||
from productmd.rpms import Rpms
|
||||
from pungi.phases.pkgset.common import get_all_arches
|
||||
from six.moves import cPickle as pickle
|
||||
|
||||
try:
|
||||
from queue import Queue
|
||||
except ImportError:
|
||||
from Queue import Queue
|
||||
|
||||
from pungi.wrappers.scm import get_file_from_scm
|
||||
from .link import link_files
|
||||
from ...wrappers.createrepo import CreaterepoWrapper
|
||||
import pungi.wrappers.kojiwrapper
|
||||
|
||||
from pungi.compose import get_ordered_variant_uids
|
||||
from pungi.arch import get_compatible_arches, split_name_arch
|
||||
from pungi.compose import get_ordered_variant_uids
|
||||
from pungi.module_util import (
|
||||
Modulemd,
|
||||
collect_module_defaults,
|
||||
collect_module_obsoletes,
|
||||
)
|
||||
from pungi.phases.base import PhaseBase
|
||||
from pungi.util import get_arch_data, get_arch_variant_data, get_variant_data, makedirs
|
||||
from pungi.module_util import Modulemd, collect_module_defaults
|
||||
from pungi.phases.createrepo import add_modular_metadata
|
||||
from pungi.util import get_arch_data, get_arch_variant_data, get_variant_data, makedirs
|
||||
from pungi.wrappers.scm import get_file_from_scm
|
||||
|
||||
from ...wrappers.createrepo import CreaterepoWrapper
|
||||
from .link import link_files
|
||||
|
||||
|
||||
def get_gather_source(name):
|
||||
import pungi.phases.gather.sources
|
||||
from .source import GatherSourceContainer
|
||||
|
||||
GatherSourceContainer.register_module(pungi.phases.gather.sources)
|
||||
container = GatherSourceContainer()
|
||||
return container["GatherSource%s" % name]
|
||||
return pungi.phases.gather.sources.ALL_SOURCES[name.lower()]
|
||||
|
||||
|
||||
def get_gather_method(name):
|
||||
import pungi.phases.gather.methods
|
||||
from .method import GatherMethodContainer
|
||||
|
||||
GatherMethodContainer.register_module(pungi.phases.gather.methods)
|
||||
container = GatherMethodContainer()
|
||||
return container["GatherMethod%s" % name]
|
||||
return pungi.phases.gather.methods.ALL_METHODS[name.lower()]
|
||||
|
||||
|
||||
class GatherPhase(PhaseBase):
|
||||
@ -87,17 +86,34 @@ class GatherPhase(PhaseBase):
|
||||
if variant.modules:
|
||||
errors.append("Modular compose requires libmodulemd package.")
|
||||
|
||||
# check whether variants from configuration value
|
||||
# 'variant_as_lookaside' are correct
|
||||
variant_as_lookaside = self.compose.conf.get("variant_as_lookaside", [])
|
||||
all_variants = self.compose.all_variants
|
||||
for (requiring, required) in variant_as_lookaside:
|
||||
|
||||
# check whether variants from configuration value
|
||||
# 'variant_as_lookaside' are correct
|
||||
for requiring, required in variant_as_lookaside:
|
||||
if requiring in all_variants and required not in all_variants:
|
||||
errors.append(
|
||||
"variant_as_lookaside: variant %r doesn't exist but is "
|
||||
"required by %r" % (required, requiring)
|
||||
)
|
||||
|
||||
# check whether variants from configuration value
|
||||
# 'variant_as_lookaside' have same architectures
|
||||
for requiring, required in variant_as_lookaside:
|
||||
if (
|
||||
requiring in all_variants
|
||||
and required in all_variants
|
||||
and not set(all_variants[requiring].arches).issubset(
|
||||
set(all_variants[required].arches)
|
||||
)
|
||||
):
|
||||
errors.append(
|
||||
"variant_as_lookaside: architectures of variant '%s' "
|
||||
"aren't subset of architectures of variant '%s'"
|
||||
% (requiring, required)
|
||||
)
|
||||
|
||||
if errors:
|
||||
raise ValueError("\n".join(errors))
|
||||
|
||||
@ -178,27 +194,19 @@ def load_old_gather_result(compose, arch, variant):
|
||||
return None
|
||||
|
||||
compose.log_info("Loading old GATHER phase results: %s", old_gather_result)
|
||||
with open(old_gather_result, "rb") as f:
|
||||
old_result = pickle.load(f)
|
||||
return old_result
|
||||
|
||||
|
||||
def load_old_compose_config(compose):
|
||||
"""
|
||||
Helper method to load Pungi config dump from old compose.
|
||||
"""
|
||||
config_dump_full = compose.paths.log.log_file("global", "config-dump")
|
||||
config_dump_full = compose.paths.old_compose_path(config_dump_full)
|
||||
if not config_dump_full:
|
||||
try:
|
||||
with open(old_gather_result, "rb") as f:
|
||||
old_result = pickle.load(f)
|
||||
return old_result
|
||||
except Exception as e:
|
||||
compose.log_debug(
|
||||
"Failed to load old GATHER phase results %s : %s"
|
||||
% (old_gather_result, str(e))
|
||||
)
|
||||
return None
|
||||
|
||||
compose.log_info("Loading old config file: %s", config_dump_full)
|
||||
with open(config_dump_full, "r") as f:
|
||||
old_config = json.load(f)
|
||||
return old_config
|
||||
|
||||
|
||||
def reuse_old_gather_packages(compose, arch, variant, package_sets):
|
||||
def reuse_old_gather_packages(compose, arch, variant, package_sets, methods):
|
||||
"""
|
||||
Tries to reuse `gather_packages` result from older compose.
|
||||
|
||||
@ -206,6 +214,7 @@ def reuse_old_gather_packages(compose, arch, variant, package_sets):
|
||||
:param str arch: Architecture to reuse old gather data for.
|
||||
:param str variant: Variant to reuse old gather data for.
|
||||
:param list package_sets: List of package sets to gather packages from.
|
||||
:param str methods: Gather method.
|
||||
:return: Old `gather_packages` result or None if old result cannot be used.
|
||||
"""
|
||||
log_msg = "Cannot reuse old GATHER phase results - %s"
|
||||
@ -218,38 +227,38 @@ def reuse_old_gather_packages(compose, arch, variant, package_sets):
|
||||
compose.log_info(log_msg % "no old gather results.")
|
||||
return
|
||||
|
||||
old_config = load_old_compose_config(compose)
|
||||
old_config = compose.load_old_compose_config()
|
||||
if old_config is None:
|
||||
compose.log_info(log_msg % "no old compose config dump.")
|
||||
return
|
||||
|
||||
# Do not reuse when required variant is not reused.
|
||||
if not hasattr(compose, "_gather_reused_variant_arch"):
|
||||
setattr(compose, "_gather_reused_variant_arch", [])
|
||||
variant_as_lookaside = compose.conf.get("variant_as_lookaside", [])
|
||||
for requiring, required in variant_as_lookaside:
|
||||
if (
|
||||
requiring == variant.uid
|
||||
and (required, arch) not in compose._gather_reused_variant_arch
|
||||
):
|
||||
compose.log_info(
|
||||
log_msg % "variant %s as lookaside is not reused." % required
|
||||
)
|
||||
return
|
||||
|
||||
# Do not reuse if there's external lookaside repo.
|
||||
with open(compose.paths.log.log_file("global", "config-dump"), "r") as f:
|
||||
config_dump = json.load(f)
|
||||
if config_dump.get("gather_lookaside_repos") or old_config.get(
|
||||
"gather_lookaside_repos"
|
||||
):
|
||||
compose.log_info(log_msg % "there's external lookaside repo.")
|
||||
return
|
||||
|
||||
# The dumps/loads is needed to convert all unicode strings to non-unicode ones.
|
||||
config = json.loads(json.dumps(compose.conf))
|
||||
for opt, value in old_config.items():
|
||||
# Gather lookaside repos are updated during the gather phase. Check that
|
||||
# the gather_lookaside_repos except the ones added are the same.
|
||||
if opt == "gather_lookaside_repos" and opt in config:
|
||||
value_to_compare = []
|
||||
# Filter out repourls which starts with `compose.topdir` and also remove
|
||||
# their parent list in case it would be empty.
|
||||
for variant, per_arch_repos in config[opt]:
|
||||
per_arch_repos_to_compare = {}
|
||||
for arch, repourl in per_arch_repos.items():
|
||||
# The gather_lookaside_repos config allows setting multiple repourls
|
||||
# using list, but `_update_config` always uses strings. Therefore we
|
||||
# only try to filter out string_types.
|
||||
if not isinstance(repourl, six.string_types):
|
||||
continue
|
||||
if not repourl.startswith(compose.topdir):
|
||||
per_arch_repos_to_compare[arch] = repourl
|
||||
if per_arch_repos_to_compare:
|
||||
value_to_compare.append([variant, per_arch_repos_to_compare])
|
||||
if value != value_to_compare:
|
||||
compose.log_info(
|
||||
log_msg
|
||||
% ("compose configuration option gather_lookaside_repos changed.")
|
||||
)
|
||||
return
|
||||
if opt == "gather_lookaside_repos":
|
||||
continue
|
||||
|
||||
# Skip checking for frequently changing configuration options which do *not*
|
||||
@ -378,6 +387,30 @@ def reuse_old_gather_packages(compose, arch, variant, package_sets):
|
||||
compose.log_info(log_msg % "some RPMs have been removed.")
|
||||
return
|
||||
|
||||
compose._gather_reused_variant_arch.append((variant.uid, arch))
|
||||
|
||||
# Copy old gather log for debugging
|
||||
try:
|
||||
if methods == "hybrid":
|
||||
log_dir = compose.paths.log.topdir(arch, create_dir=False)
|
||||
old_log_dir = compose.paths.old_compose_path(log_dir)
|
||||
for log_file in glob.glob(
|
||||
os.path.join(old_log_dir, "hybrid-depsolver-%s-iter-*" % variant)
|
||||
):
|
||||
compose.log_info(
|
||||
"Copying old gather log %s to %s" % (log_file, log_dir)
|
||||
)
|
||||
shutil.copy2(log_file, log_dir)
|
||||
else:
|
||||
log_dir = os.path.dirname(
|
||||
compose.paths.work.pungi_log(arch, variant, create_dir=False)
|
||||
)
|
||||
old_log_dir = compose.paths.old_compose_path(log_dir)
|
||||
compose.log_info("Copying old gather log %s to %s" % (old_log_dir, log_dir))
|
||||
shutil.copytree(old_log_dir, log_dir)
|
||||
except Exception as e:
|
||||
compose.log_warning("Copying old gather log failed: %s" % str(e))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@ -404,7 +437,9 @@ def gather_packages(compose, arch, variant, package_sets, fulltree_excludes=None
|
||||
prepopulate = get_prepopulate_packages(compose, arch, variant)
|
||||
fulltree_excludes = fulltree_excludes or set()
|
||||
|
||||
reused_result = reuse_old_gather_packages(compose, arch, variant, package_sets)
|
||||
reused_result = reuse_old_gather_packages(
|
||||
compose, arch, variant, package_sets, methods
|
||||
)
|
||||
if reused_result:
|
||||
result = reused_result
|
||||
elif methods == "hybrid":
|
||||
@ -434,9 +469,7 @@ def gather_packages(compose, arch, variant, package_sets, fulltree_excludes=None
|
||||
)
|
||||
|
||||
else:
|
||||
|
||||
for source_name in ("module", "comps", "json"):
|
||||
|
||||
packages, groups, filter_packages = get_variant_packages(
|
||||
compose, arch, variant, source_name, package_sets
|
||||
)
|
||||
@ -507,7 +540,8 @@ def write_packages(compose, arch, variant, pkg_map, path_prefix):
|
||||
|
||||
|
||||
def trim_packages(compose, arch, variant, pkg_map, parent_pkgs=None, remove_pkgs=None):
|
||||
"""Remove parent variant's packages from pkg_map <-- it gets modified in this function
|
||||
"""Remove parent variant's packages from pkg_map <-- it gets modified in
|
||||
this function
|
||||
|
||||
There are three cases where changes may happen:
|
||||
|
||||
@ -540,7 +574,6 @@ def trim_packages(compose, arch, variant, pkg_map, parent_pkgs=None, remove_pkgs
|
||||
move_to_parent_pkgs = _mk_pkg_map()
|
||||
removed_pkgs = _mk_pkg_map()
|
||||
for pkg_type, pkgs in pkg_map.items():
|
||||
|
||||
new_pkgs = []
|
||||
for pkg in pkgs:
|
||||
pkg_path = pkg["path"]
|
||||
@ -612,20 +645,45 @@ def _make_lookaside_repo(compose, variant, arch, pkg_map, package_sets=None):
|
||||
compose.paths.work.topdir(arch="global"), "download"
|
||||
)
|
||||
+ "/",
|
||||
"koji": lambda: pungi.wrappers.kojiwrapper.KojiWrapper(
|
||||
compose.conf["koji_profile"]
|
||||
"koji": lambda: compose.conf.get(
|
||||
"koji_cache",
|
||||
pungi.wrappers.kojiwrapper.KojiWrapper(compose).koji_module.config.topdir,
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
"kojimock": lambda: pungi.wrappers.kojiwrapper.KojiMockWrapper(
|
||||
compose,
|
||||
get_all_arches(compose),
|
||||
).koji_module.config.topdir.rstrip("/")
|
||||
+ "/",
|
||||
}
|
||||
path_prefix = prefixes[compose.conf["pkgset_source"]]()
|
||||
package_list = set()
|
||||
for pkg_arch in pkg_map.keys():
|
||||
try:
|
||||
for pkg_type, packages in pkg_map[pkg_arch][variant.uid].items():
|
||||
# We want all packages for current arch, and SRPMs for any
|
||||
# arch. Ultimately there will only be one source repository, so
|
||||
# we need a union of all SRPMs.
|
||||
if pkg_type == "srpm" or pkg_arch == arch:
|
||||
for pkg in packages:
|
||||
if "lookaside" in pkg.get("flags", []):
|
||||
# We want to ignore lookaside packages, those will
|
||||
# be visible to the depending variants from the
|
||||
# lookaside repo directly.
|
||||
continue
|
||||
pkg = pkg["path"]
|
||||
if path_prefix and pkg.startswith(path_prefix):
|
||||
pkg = pkg[len(path_prefix) :]
|
||||
package_list.add(pkg)
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
"Variant '%s' does not have architecture " "'%s'!" % (variant, pkg_arch)
|
||||
)
|
||||
|
||||
pkglist = compose.paths.work.lookaside_package_list(arch=arch, variant=variant)
|
||||
with open(pkglist, "w") as f:
|
||||
for packages in pkg_map[arch][variant.uid].values():
|
||||
for pkg in packages:
|
||||
pkg = pkg["path"]
|
||||
if path_prefix and pkg.startswith(path_prefix):
|
||||
pkg = pkg[len(path_prefix) :]
|
||||
f.write("%s\n" % pkg)
|
||||
for pkg in sorted(package_list):
|
||||
f.write("%s\n" % pkg)
|
||||
|
||||
cr = CreaterepoWrapper(compose.conf["createrepo_c"])
|
||||
update_metadata = None
|
||||
@ -661,6 +719,8 @@ def _make_lookaside_repo(compose, variant, arch, pkg_map, package_sets=None):
|
||||
collect_module_defaults(
|
||||
defaults_dir, module_names, mod_index, overrides_dir=overrides_dir
|
||||
)
|
||||
obsoletes_dir = compose.paths.work.module_obsoletes_dir()
|
||||
mod_index = collect_module_obsoletes(obsoletes_dir, module_names, mod_index)
|
||||
|
||||
log_file = compose.paths.log.log_file(
|
||||
arch, "lookaside_repo_modules_%s" % (variant.uid)
|
||||
@ -736,6 +796,10 @@ def _gather_variants(
|
||||
try:
|
||||
que.put((arch, gather_packages(*args, **kwargs)))
|
||||
except Exception as exc:
|
||||
compose.log_error(
|
||||
"Error in gathering for %s.%s: %s", variant, arch, exc
|
||||
)
|
||||
compose.traceback("gather-%s-%s" % (variant, arch))
|
||||
errors.put(exc)
|
||||
|
||||
# Run gather_packages() in parallel with multi threads and store
|
||||
|
@ -14,15 +14,6 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import kobo.plugins
|
||||
|
||||
|
||||
class GatherMethodBase(kobo.plugins.Plugin):
|
||||
class GatherMethodBase(object):
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
|
||||
|
||||
class GatherMethodContainer(kobo.plugins.PluginContainer):
|
||||
@classmethod
|
||||
def normalize_name(cls, name):
|
||||
return name.lower()
|
||||
|
@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
from .method_deps import GatherMethodDeps
|
||||
from .method_nodeps import GatherMethodNodeps
|
||||
from .method_hybrid import GatherMethodHybrid
|
||||
|
||||
ALL_METHODS = {
|
||||
"deps": GatherMethodDeps,
|
||||
"nodeps": GatherMethodNodeps,
|
||||
"hybrid": GatherMethodHybrid,
|
||||
}
|
@ -15,6 +15,7 @@
|
||||
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from kobo.shortcuts import run
|
||||
from kobo.pkgset import SimpleRpmWrapper, RpmWrapper
|
||||
@ -31,8 +32,6 @@ import pungi.phases.gather.method
|
||||
|
||||
|
||||
class GatherMethodDeps(pungi.phases.gather.method.GatherMethodBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
arch,
|
||||
@ -243,8 +242,19 @@ def resolve_deps(compose, arch, variant, source_name=None):
|
||||
)
|
||||
# Use temp working directory directory as workaround for
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=795137
|
||||
with temp_dir(prefix="pungi_") as tmp_dir:
|
||||
run(cmd, logfile=pungi_log, show_cmd=True, workdir=tmp_dir, env=os.environ)
|
||||
with temp_dir(prefix="pungi_") as work_dir:
|
||||
run(cmd, logfile=pungi_log, show_cmd=True, workdir=work_dir, env=os.environ)
|
||||
|
||||
# Clean up tmp dir
|
||||
# Workaround for rpm not honoring sgid bit which only appears when yum is used.
|
||||
yumroot_dir = os.path.join(tmp_dir, "work", arch, "yumroot")
|
||||
if os.path.isdir(yumroot_dir):
|
||||
try:
|
||||
shutil.rmtree(yumroot_dir)
|
||||
except Exception as e:
|
||||
compose.log_warning(
|
||||
"Failed to clean up tmp dir: %s %s" % (yumroot_dir, str(e))
|
||||
)
|
||||
|
||||
with open(pungi_log, "r") as f:
|
||||
packages, broken_deps, missing_comps_pkgs = pungi_wrapper.parse_log(f)
|
||||
|
@ -47,9 +47,15 @@ class FakePackage(object):
|
||||
|
||||
@property
|
||||
def files(self):
|
||||
return [
|
||||
os.path.join(dirname, basename) for (_, dirname, basename) in self.pkg.files
|
||||
]
|
||||
paths = []
|
||||
# createrepo_c.Package.files is a tuple, but its length differs across
|
||||
# versions. The constants define index at which the related value is
|
||||
# located.
|
||||
for entry in self.pkg.files:
|
||||
paths.append(
|
||||
os.path.join(entry[cr.FILE_ENTRY_PATH], entry[cr.FILE_ENTRY_NAME])
|
||||
)
|
||||
return paths
|
||||
|
||||
@property
|
||||
def provides(self):
|
||||
@ -60,8 +66,6 @@ class FakePackage(object):
|
||||
|
||||
|
||||
class GatherMethodHybrid(pungi.phases.gather.method.GatherMethodBase):
|
||||
enabled = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(GatherMethodHybrid, self).__init__(*args, **kwargs)
|
||||
self.package_maps = {}
|
||||
@ -351,8 +355,11 @@ class GatherMethodHybrid(pungi.phases.gather.method.GatherMethodBase):
|
||||
|
||||
# There are two ways how the debuginfo package can be named. We
|
||||
# want to get them all.
|
||||
for pattern in ["%s-debuginfo", "%s-debugsource"]:
|
||||
debuginfo_name = pattern % pkg.name
|
||||
source_name = kobo.rpmlib.parse_nvra(pkg.rpm_sourcerpm)["name"]
|
||||
for debuginfo_name in [
|
||||
"%s-debuginfo" % pkg.name,
|
||||
"%s-debugsource" % source_name,
|
||||
]:
|
||||
debuginfo = self._get_debuginfo(debuginfo_name, pkg_arch)
|
||||
for dbg in debuginfo:
|
||||
# For each debuginfo package that matches on name and
|
||||
@ -501,6 +508,27 @@ def _make_result(paths):
|
||||
return [{"path": path, "flags": []} for path in sorted(paths)]
|
||||
|
||||
|
||||
def get_repo_packages(path):
|
||||
"""Extract file names of all packages in the given repository."""
|
||||
|
||||
packages = set()
|
||||
|
||||
def callback(pkg):
|
||||
packages.add(os.path.basename(pkg.location_href))
|
||||
|
||||
repomd = os.path.join(path, "repodata/repomd.xml")
|
||||
with as_local_file(repomd) as url_:
|
||||
repomd = cr.Repomd(url_)
|
||||
for rec in repomd.records:
|
||||
if rec.type != "primary":
|
||||
continue
|
||||
record_url = os.path.join(path, rec.location_href)
|
||||
with as_local_file(record_url) as url_:
|
||||
cr.xml_parse_primary(url_, pkgcb=callback, do_files=False)
|
||||
|
||||
return packages
|
||||
|
||||
|
||||
def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages):
|
||||
"""For each package add source RPM."""
|
||||
# This will serve as the final result. We collect sets of paths to the
|
||||
@ -511,25 +539,16 @@ def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages):
|
||||
|
||||
filters = set(filter_packages)
|
||||
|
||||
# Collect list of all packages in lookaside. These will not be added to the
|
||||
# result. Fus handles this in part: if a package is explicitly mentioned as
|
||||
# input (which can happen with comps group expansion), it will be in the
|
||||
# output even if it's in lookaside.
|
||||
lookaside_packages = set()
|
||||
for repo in lookasides:
|
||||
md = cr.Metadata()
|
||||
md.locate_and_load_xml(repo)
|
||||
for key in md.keys():
|
||||
pkg = md.get(key)
|
||||
url = os.path.join(pkg.location_base or repo, pkg.location_href)
|
||||
# Strip file:// prefix
|
||||
lookaside_packages.add(url[7:])
|
||||
lookaside_packages.update(get_repo_packages(repo))
|
||||
|
||||
for nvr, pkg_arch, flags in nvrs:
|
||||
pkg = nevra_to_pkg["%s.%s" % (nvr, pkg_arch)]
|
||||
if pkg.file_path in lookaside_packages:
|
||||
# Package is in lookaside, don't add it and ignore sources and
|
||||
# debuginfo too.
|
||||
if os.path.basename(pkg.file_path) in lookaside_packages:
|
||||
# Fus can return lookaside package in output if the package is
|
||||
# explicitly listed as input. This can happen during comps
|
||||
# expansion.
|
||||
continue
|
||||
if pkg_is_debug(pkg):
|
||||
debuginfo.add(pkg.file_path)
|
||||
@ -542,7 +561,7 @@ def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages):
|
||||
if (srpm.name, "src") in filters:
|
||||
# Filtered package, skipping
|
||||
continue
|
||||
if srpm.file_path not in lookaside_packages:
|
||||
if os.path.basename(srpm.file_path) not in lookaside_packages:
|
||||
srpms.add(srpm.file_path)
|
||||
except KeyError:
|
||||
# Didn't find source RPM.. this should be logged
|
||||
|
@ -28,8 +28,6 @@ from kobo.pkgset import SimpleRpmWrapper, RpmWrapper
|
||||
|
||||
|
||||
class GatherMethodNodeps(pungi.phases.gather.method.GatherMethodBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant, *args, **kwargs):
|
||||
fname = "gather-nodeps-%s" % variant.uid
|
||||
if self.source_name:
|
||||
|
@ -14,15 +14,6 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import kobo.plugins
|
||||
|
||||
|
||||
class GatherSourceBase(kobo.plugins.Plugin):
|
||||
class GatherSourceBase(object):
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
|
||||
|
||||
class GatherSourceContainer(kobo.plugins.PluginContainer):
|
||||
@classmethod
|
||||
def normalize_name(cls, name):
|
||||
return name.lower()
|
||||
|
@ -0,0 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
from .source_comps import GatherSourceComps
|
||||
from .source_json import GatherSourceJson
|
||||
from .source_module import GatherSourceModule
|
||||
from .source_none import GatherSourceNone
|
||||
|
||||
ALL_SOURCES = {
|
||||
"comps": GatherSourceComps,
|
||||
"json": GatherSourceJson,
|
||||
"module": GatherSourceModule,
|
||||
"none": GatherSourceNone,
|
||||
}
|
@ -30,8 +30,6 @@ import pungi.phases.gather.source
|
||||
|
||||
|
||||
class GatherSourceComps(pungi.phases.gather.source.GatherSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant):
|
||||
groups = set()
|
||||
if not self.compose.conf.get("comps_file"):
|
||||
|
@ -32,30 +32,31 @@ set([(rpm_name, rpm_arch or None)])
|
||||
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import pungi.phases.gather.source
|
||||
|
||||
|
||||
class GatherSourceJson(pungi.phases.gather.source.GatherSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant):
|
||||
json_path = self.compose.conf.get("gather_source_mapping")
|
||||
if not json_path:
|
||||
return set(), set()
|
||||
with open(json_path, "r") as f:
|
||||
with open(os.path.join(self.compose.config_dir, json_path), "r") as f:
|
||||
mapping = json.load(f)
|
||||
|
||||
packages = set()
|
||||
if variant is None:
|
||||
# get all packages for all variants
|
||||
for variant_uid in mapping:
|
||||
for pkg_name, pkg_arches in mapping[variant_uid][arch].items():
|
||||
for pkg_name, pkg_arches in mapping[variant_uid].get(arch, {}).items():
|
||||
for pkg_arch in pkg_arches:
|
||||
packages.add((pkg_name, pkg_arch))
|
||||
else:
|
||||
# get packages for a particular variant
|
||||
for pkg_name, pkg_arches in mapping[variant.uid][arch].items():
|
||||
for pkg_name, pkg_arches in (
|
||||
mapping.get(variant.uid, {}).get(arch, {}).items()
|
||||
):
|
||||
for pkg_arch in pkg_arches:
|
||||
packages.add((pkg_name, pkg_arch))
|
||||
return packages, set()
|
||||
|
@ -26,8 +26,6 @@ import pungi.phases.gather.source
|
||||
|
||||
|
||||
class GatherSourceModule(pungi.phases.gather.source.GatherSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant):
|
||||
groups = set()
|
||||
packages = set()
|
||||
|
@ -29,7 +29,5 @@ import pungi.phases.gather.source
|
||||
|
||||
|
||||
class GatherSourceNone(pungi.phases.gather.source.GatherSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant):
|
||||
return set(), set()
|
||||
|
@ -1,18 +1,22 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import copy
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
from kobo import shortcuts
|
||||
|
||||
from pungi.util import makedirs, get_mtime, get_file_size, failable, log_failed_task
|
||||
from pungi.util import translate_path, get_repo_urls, version_generator
|
||||
from pungi.util import as_local_file, translate_path, get_repo_urls, version_generator
|
||||
from pungi.phases import base
|
||||
from pungi.linker import Linker
|
||||
from pungi.wrappers.kojiwrapper import KojiWrapper
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
from kobo.shortcuts import force_list
|
||||
from productmd.images import Image
|
||||
from productmd.rpms import Rpms
|
||||
|
||||
|
||||
# This is a mapping from formats to file extensions. The format is what koji
|
||||
@ -21,6 +25,7 @@ from productmd.images import Image
|
||||
# results will be pulled into the compose.
|
||||
EXTENSIONS = {
|
||||
"docker": ["tar.gz", "tar.xz"],
|
||||
"iso": ["iso"],
|
||||
"liveimg-squashfs": ["liveimg.squashfs"],
|
||||
"qcow": ["qcow"],
|
||||
"qcow2": ["qcow2"],
|
||||
@ -35,6 +40,7 @@ EXTENSIONS = {
|
||||
"vdi": ["vdi"],
|
||||
"vmdk": ["vmdk"],
|
||||
"vpc": ["vhd"],
|
||||
"vhd-compressed": ["vhd.gz", "vhd.xz"],
|
||||
"vsphere-ova": ["vsphere.ova"],
|
||||
}
|
||||
|
||||
@ -46,9 +52,10 @@ class ImageBuildPhase(
|
||||
|
||||
name = "image_build"
|
||||
|
||||
def __init__(self, compose):
|
||||
def __init__(self, compose, buildinstall_phase=None):
|
||||
super(ImageBuildPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
self.buildinstall_phase = buildinstall_phase
|
||||
|
||||
def _get_install_tree(self, image_conf, variant):
|
||||
"""
|
||||
@ -117,6 +124,7 @@ class ImageBuildPhase(
|
||||
# prevent problems in next iteration where the original
|
||||
# value is needed.
|
||||
image_conf = copy.deepcopy(image_conf)
|
||||
original_image_conf = copy.deepcopy(image_conf)
|
||||
|
||||
# image_conf is passed to get_image_build_cmd as dict
|
||||
|
||||
@ -167,6 +175,7 @@ class ImageBuildPhase(
|
||||
image_conf["image-build"]["can_fail"] = sorted(can_fail)
|
||||
|
||||
cmd = {
|
||||
"original_image_conf": original_image_conf,
|
||||
"image_conf": image_conf,
|
||||
"conf_file": self.compose.paths.work.image_build_conf(
|
||||
image_conf["image-build"]["variant"],
|
||||
@ -182,7 +191,7 @@ class ImageBuildPhase(
|
||||
"scratch": image_conf["image-build"].pop("scratch", False),
|
||||
}
|
||||
self.pool.add(CreateImageBuildThread(self.pool))
|
||||
self.pool.queue_put((self.compose, cmd))
|
||||
self.pool.queue_put((self.compose, cmd, self.buildinstall_phase))
|
||||
|
||||
self.pool.start()
|
||||
|
||||
@ -192,7 +201,7 @@ class CreateImageBuildThread(WorkerThread):
|
||||
self.pool.log_error("CreateImageBuild failed.")
|
||||
|
||||
def process(self, item, num):
|
||||
compose, cmd = item
|
||||
compose, cmd, buildinstall_phase = item
|
||||
variant = cmd["image_conf"]["image-build"]["variant"]
|
||||
subvariant = cmd["image_conf"]["image-build"].get("subvariant", variant.uid)
|
||||
self.failable_arches = cmd["image_conf"]["image-build"].get("can_fail", "")
|
||||
@ -208,22 +217,54 @@ class CreateImageBuildThread(WorkerThread):
|
||||
subvariant,
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(num, compose, variant, subvariant, cmd)
|
||||
self.worker(num, compose, variant, subvariant, cmd, buildinstall_phase)
|
||||
|
||||
def worker(self, num, compose, variant, subvariant, cmd):
|
||||
def worker(self, num, compose, variant, subvariant, cmd, buildinstall_phase):
|
||||
arches = cmd["image_conf"]["image-build"]["arches"]
|
||||
formats = "-".join(cmd["image_conf"]["image-build"]["format"])
|
||||
dash_arches = "-".join(arches)
|
||||
log_file = compose.paths.log.log_file(
|
||||
dash_arches, "imagebuild-%s-%s-%s" % (variant.uid, subvariant, formats)
|
||||
)
|
||||
metadata_file = log_file[:-4] + ".reuse.json"
|
||||
|
||||
external_repo_checksum = {}
|
||||
try:
|
||||
for repo in cmd["original_image_conf"]["image-build"]["repo"]:
|
||||
if repo in compose.all_variants:
|
||||
continue
|
||||
with as_local_file(
|
||||
os.path.join(repo, "repodata/repomd.xml")
|
||||
) as filename:
|
||||
with open(filename, "rb") as f:
|
||||
external_repo_checksum[repo] = hashlib.sha256(
|
||||
f.read()
|
||||
).hexdigest()
|
||||
except Exception as e:
|
||||
external_repo_checksum = None
|
||||
self.pool.log_info(
|
||||
"Can't calculate checksum of repomd.xml of external repo - %s" % str(e)
|
||||
)
|
||||
|
||||
if self._try_to_reuse(
|
||||
compose,
|
||||
variant,
|
||||
subvariant,
|
||||
metadata_file,
|
||||
log_file,
|
||||
cmd,
|
||||
external_repo_checksum,
|
||||
buildinstall_phase,
|
||||
):
|
||||
return
|
||||
|
||||
msg = (
|
||||
"Creating image (formats: %s, arches: %s, variant: %s, subvariant: %s)"
|
||||
% (formats, dash_arches, variant, subvariant)
|
||||
)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
|
||||
koji_wrapper = KojiWrapper(compose)
|
||||
|
||||
# writes conf file for koji image-build
|
||||
self.pool.log_info(
|
||||
@ -275,6 +316,22 @@ class CreateImageBuildThread(WorkerThread):
|
||||
)
|
||||
break
|
||||
|
||||
self._link_images(compose, variant, subvariant, cmd, image_infos)
|
||||
self._write_reuse_metadata(
|
||||
compose, metadata_file, cmd, image_infos, external_repo_checksum
|
||||
)
|
||||
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
|
||||
|
||||
def _link_images(self, compose, variant, subvariant, cmd, image_infos):
|
||||
"""Link images to compose and update image manifest.
|
||||
|
||||
:param Compose compose: Current compose.
|
||||
:param Variant variant: Current variant.
|
||||
:param str subvariant:
|
||||
:param dict cmd: Dict of params for image-build.
|
||||
:param dict image_infos: Dict contains image info.
|
||||
"""
|
||||
# The usecase here is that you can run koji image-build with multiple --format
|
||||
# It's ok to do it serialized since we're talking about max 2 images per single
|
||||
# image_build record
|
||||
@ -289,7 +346,9 @@ class CreateImageBuildThread(WorkerThread):
|
||||
# let's not change filename of koji outputs
|
||||
image_dest = os.path.join(image_dir, os.path.basename(image_info["path"]))
|
||||
|
||||
src_file = os.path.realpath(image_info["path"])
|
||||
src_file = compose.koji_downloader.get_file(
|
||||
os.path.realpath(image_info["path"])
|
||||
)
|
||||
linker.link(src_file, image_dest, link_type=cmd["link_type"])
|
||||
|
||||
# Update image manifest
|
||||
@ -308,4 +367,160 @@ class CreateImageBuildThread(WorkerThread):
|
||||
setattr(img, "deliverable", "image-build")
|
||||
compose.im.add(variant=variant.uid, arch=image_info["arch"], image=img)
|
||||
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
|
||||
def _try_to_reuse(
|
||||
self,
|
||||
compose,
|
||||
variant,
|
||||
subvariant,
|
||||
metadata_file,
|
||||
log_file,
|
||||
cmd,
|
||||
external_repo_checksum,
|
||||
buildinstall_phase,
|
||||
):
|
||||
"""Try to reuse images from old compose.
|
||||
|
||||
:param Compose compose: Current compose.
|
||||
:param Variant variant: Current variant.
|
||||
:param str subvariant:
|
||||
:param str metadata_file: Path to reuse metadata file.
|
||||
:param str log_file: Path to log file.
|
||||
:param dict cmd: Dict of params for image-build.
|
||||
:param dict external_repo_checksum: Dict contains checksum of repomd.xml
|
||||
or None if can't get checksum.
|
||||
:param BuildinstallPhase buildinstall_phase: buildinstall phase of
|
||||
current compose.
|
||||
"""
|
||||
log_msg = "Cannot reuse old image_build phase results - %s"
|
||||
if not compose.conf["image_build_allow_reuse"]:
|
||||
self.pool.log_info(
|
||||
log_msg % "reuse of old image_build results is disabled."
|
||||
)
|
||||
return False
|
||||
|
||||
if external_repo_checksum is None:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't ensure that external repo is not changed."
|
||||
)
|
||||
return False
|
||||
|
||||
old_metadata_file = compose.paths.old_compose_path(metadata_file)
|
||||
if not old_metadata_file:
|
||||
self.pool.log_info(log_msg % "Can't find old reuse metadata file")
|
||||
return False
|
||||
|
||||
try:
|
||||
old_metadata = self._load_reuse_metadata(old_metadata_file)
|
||||
except Exception as e:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't load old reuse metadata file: %s" % str(e)
|
||||
)
|
||||
return False
|
||||
|
||||
if old_metadata["cmd"]["original_image_conf"] != cmd["original_image_conf"]:
|
||||
self.pool.log_info(log_msg % "image_build config changed")
|
||||
return False
|
||||
|
||||
# Make sure external repo does not change
|
||||
if (
|
||||
old_metadata["external_repo_checksum"] is None
|
||||
or old_metadata["external_repo_checksum"] != external_repo_checksum
|
||||
):
|
||||
self.pool.log_info(log_msg % "External repo may be changed")
|
||||
return False
|
||||
|
||||
# Make sure buildinstall phase is reused
|
||||
for arch in cmd["image_conf"]["image-build"]["arches"]:
|
||||
if buildinstall_phase and not buildinstall_phase.reused(variant, arch):
|
||||
self.pool.log_info(log_msg % "buildinstall phase changed")
|
||||
return False
|
||||
|
||||
# Make sure packages in variant not change
|
||||
rpm_manifest_file = compose.paths.compose.metadata("rpms.json")
|
||||
rpm_manifest = Rpms()
|
||||
rpm_manifest.load(rpm_manifest_file)
|
||||
|
||||
old_rpm_manifest_file = compose.paths.old_compose_path(rpm_manifest_file)
|
||||
old_rpm_manifest = Rpms()
|
||||
old_rpm_manifest.load(old_rpm_manifest_file)
|
||||
|
||||
for repo in cmd["original_image_conf"]["image-build"]["repo"]:
|
||||
if repo not in compose.all_variants:
|
||||
# External repos are checked using other logic.
|
||||
continue
|
||||
for arch in cmd["image_conf"]["image-build"]["arches"]:
|
||||
if (
|
||||
rpm_manifest.rpms[variant.uid][arch]
|
||||
!= old_rpm_manifest.rpms[variant.uid][arch]
|
||||
):
|
||||
self.pool.log_info(
|
||||
log_msg % "Packages in %s.%s changed." % (variant.uid, arch)
|
||||
)
|
||||
return False
|
||||
|
||||
self.pool.log_info(
|
||||
"Reusing images from old compose for variant %s" % variant.uid
|
||||
)
|
||||
try:
|
||||
self._link_images(
|
||||
compose, variant, subvariant, cmd, old_metadata["image_infos"]
|
||||
)
|
||||
except Exception as e:
|
||||
self.pool.log_info(log_msg % "Can't link images %s" % str(e))
|
||||
return False
|
||||
|
||||
old_log_file = compose.paths.old_compose_path(log_file)
|
||||
try:
|
||||
shutil.copy2(old_log_file, log_file)
|
||||
except Exception as e:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't copy old log_file: %s %s" % (old_log_file, str(e))
|
||||
)
|
||||
return False
|
||||
|
||||
self._write_reuse_metadata(
|
||||
compose,
|
||||
metadata_file,
|
||||
cmd,
|
||||
old_metadata["image_infos"],
|
||||
external_repo_checksum,
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
def _write_reuse_metadata(
|
||||
self, compose, metadata_file, cmd, image_infos, external_repo_checksum
|
||||
):
|
||||
"""Write metadata file.
|
||||
|
||||
:param Compose compose: Current compose.
|
||||
:param str metadata_file: Path to reuse metadata file.
|
||||
:param dict cmd: Dict of params for image-build.
|
||||
:param dict image_infos: Dict contains image info.
|
||||
:param dict external_repo_checksum: Dict contains checksum of repomd.xml
|
||||
or None if can't get checksum.
|
||||
"""
|
||||
msg = "Writing reuse metadata file: %s" % metadata_file
|
||||
self.pool.log_info(msg)
|
||||
|
||||
cmd_copy = copy.deepcopy(cmd)
|
||||
del cmd_copy["image_conf"]["image-build"]["variant"]
|
||||
|
||||
data = {
|
||||
"cmd": cmd_copy,
|
||||
"image_infos": image_infos,
|
||||
"external_repo_checksum": external_repo_checksum,
|
||||
}
|
||||
try:
|
||||
with open(metadata_file, "w") as f:
|
||||
json.dump(data, f, indent=4)
|
||||
except Exception as e:
|
||||
self.pool.log_info("%s Failed: %s" % (msg, str(e)))
|
||||
|
||||
def _load_reuse_metadata(self, metadata_file):
|
||||
"""Load metadata file.
|
||||
|
||||
:param str metadata_file: Path to reuse metadata file.
|
||||
"""
|
||||
with open(metadata_file, "r") as f:
|
||||
return json.load(f)
|
||||
|
@ -3,6 +3,7 @@
|
||||
import os
|
||||
from kobo import shortcuts
|
||||
from collections import defaultdict
|
||||
import threading
|
||||
|
||||
from .base import PhaseBase
|
||||
from ..util import get_format_substs, get_file_size
|
||||
@ -68,6 +69,7 @@ class ImageChecksumPhase(PhaseBase):
|
||||
|
||||
def run(self):
|
||||
topdir = self.compose.paths.compose.topdir()
|
||||
|
||||
make_checksums(
|
||||
topdir,
|
||||
self.compose.im,
|
||||
@ -87,6 +89,8 @@ def _compute_checksums(
|
||||
checksum_types,
|
||||
base_checksum_name_gen,
|
||||
one_file,
|
||||
results_lock,
|
||||
cache_lock,
|
||||
):
|
||||
for image in images:
|
||||
filename = os.path.basename(image.path)
|
||||
@ -96,14 +100,21 @@ def _compute_checksums(
|
||||
|
||||
filesize = image.size or get_file_size(full_path)
|
||||
|
||||
cache_lock.acquire()
|
||||
if full_path not in cache:
|
||||
cache_lock.release()
|
||||
# Source ISO is listed under each binary architecture. There's no
|
||||
# point in checksumming it twice, so we can just remember the
|
||||
# digest from first run..
|
||||
cache[full_path] = shortcuts.compute_file_checksums(
|
||||
full_path, checksum_types
|
||||
)
|
||||
digests = cache[full_path]
|
||||
checksum_value = shortcuts.compute_file_checksums(full_path, checksum_types)
|
||||
with cache_lock:
|
||||
cache[full_path] = checksum_value
|
||||
else:
|
||||
cache_lock.release()
|
||||
|
||||
with cache_lock:
|
||||
digests = cache[full_path]
|
||||
|
||||
for checksum, digest in digests.items():
|
||||
# Update metadata with the checksum
|
||||
image.add_checksum(None, checksum, digest)
|
||||
@ -112,7 +123,10 @@ def _compute_checksums(
|
||||
checksum_filename = os.path.join(
|
||||
path, "%s.%sSUM" % (filename, checksum.upper())
|
||||
)
|
||||
results[checksum_filename].add((filename, filesize, checksum, digest))
|
||||
with results_lock:
|
||||
results[checksum_filename].add(
|
||||
(filename, filesize, checksum, digest)
|
||||
)
|
||||
|
||||
if one_file:
|
||||
dirname = os.path.basename(path)
|
||||
@ -125,24 +139,42 @@ def _compute_checksums(
|
||||
checksum_filename = "%s%sSUM" % (base_checksum_name, checksum.upper())
|
||||
checksum_path = os.path.join(path, checksum_filename)
|
||||
|
||||
results[checksum_path].add((filename, filesize, checksum, digest))
|
||||
with results_lock:
|
||||
results[checksum_path].add((filename, filesize, checksum, digest))
|
||||
|
||||
|
||||
def make_checksums(topdir, im, checksum_types, one_file, base_checksum_name_gen):
|
||||
results = defaultdict(set)
|
||||
cache = {}
|
||||
threads = []
|
||||
results_lock = threading.Lock() # lock to synchronize access to the results dict.
|
||||
cache_lock = threading.Lock() # lock to synchronize access to the cache dict.
|
||||
|
||||
# create all worker threads
|
||||
for (variant, arch, path), images in get_images(topdir, im).items():
|
||||
_compute_checksums(
|
||||
results,
|
||||
cache,
|
||||
variant,
|
||||
arch,
|
||||
path,
|
||||
images,
|
||||
checksum_types,
|
||||
base_checksum_name_gen,
|
||||
one_file,
|
||||
threads.append(
|
||||
threading.Thread(
|
||||
target=_compute_checksums,
|
||||
args=[
|
||||
results,
|
||||
cache,
|
||||
variant,
|
||||
arch,
|
||||
path,
|
||||
images,
|
||||
checksum_types,
|
||||
base_checksum_name_gen,
|
||||
one_file,
|
||||
results_lock,
|
||||
cache_lock,
|
||||
],
|
||||
)
|
||||
)
|
||||
threads[-1].start()
|
||||
|
||||
# wait for all worker threads to finish
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
for file in results:
|
||||
dump_checksums(file, results[file])
|
||||
|
122
pungi/phases/image_container.py
Normal file
122
pungi/phases/image_container.py
Normal file
@ -0,0 +1,122 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import re
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
|
||||
from .base import ConfigGuardedPhase, PhaseLoggerMixin
|
||||
from .. import util
|
||||
from ..wrappers import kojiwrapper
|
||||
from ..phases.osbs import add_metadata
|
||||
|
||||
|
||||
class ImageContainerPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||
name = "image_container"
|
||||
|
||||
def __init__(self, compose):
|
||||
super(ImageContainerPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
self.pool.metadata = {}
|
||||
|
||||
def run(self):
|
||||
for variant in self.compose.get_variants():
|
||||
for conf in self.get_config_block(variant):
|
||||
self.pool.add(ImageContainerThread(self.pool))
|
||||
self.pool.queue_put((self.compose, variant, conf))
|
||||
|
||||
self.pool.start()
|
||||
|
||||
|
||||
class ImageContainerThread(WorkerThread):
|
||||
def process(self, item, num):
|
||||
compose, variant, config = item
|
||||
self.num = num
|
||||
with util.failable(
|
||||
compose,
|
||||
bool(config.pop("failable", None)),
|
||||
variant,
|
||||
"*",
|
||||
"osbs",
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(compose, variant, config)
|
||||
|
||||
def worker(self, compose, variant, config):
|
||||
msg = "Image container task for variant %s" % variant.uid
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
source = config.pop("url")
|
||||
target = config.pop("target")
|
||||
priority = config.pop("priority", None)
|
||||
|
||||
config["yum_repourls"] = [
|
||||
self._get_repo(
|
||||
compose,
|
||||
variant,
|
||||
config.get("arch_override", "").split(),
|
||||
config.pop("image_spec"),
|
||||
)
|
||||
]
|
||||
|
||||
# Start task
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
koji.login()
|
||||
task_id = koji.koji_proxy.buildContainer(
|
||||
source, target, config, priority=priority
|
||||
)
|
||||
|
||||
koji.save_task_id(task_id)
|
||||
|
||||
# Wait for it to finish and capture the output into log file (even
|
||||
# though there is not much there).
|
||||
log_dir = os.path.join(compose.paths.log.topdir(), "image_container")
|
||||
util.makedirs(log_dir)
|
||||
log_file = os.path.join(
|
||||
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
|
||||
)
|
||||
if koji.watch_task(task_id, log_file) != 0:
|
||||
raise RuntimeError(
|
||||
"ImageContainer task failed: %s. See %s for details"
|
||||
% (task_id, log_file)
|
||||
)
|
||||
|
||||
add_metadata(variant, task_id, compose, config.get("scratch", False))
|
||||
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
|
||||
def _get_repo(self, compose, variant, arches, image_spec):
|
||||
"""
|
||||
Return a repo file that points baseurl to the image specified by
|
||||
image_spec.
|
||||
"""
|
||||
image_paths = set()
|
||||
|
||||
for arch in arches or compose.im.images[variant.uid].keys():
|
||||
for image in compose.im.images[variant.uid].get(arch, []):
|
||||
for key, value in image_spec.items():
|
||||
if not re.match(value, getattr(image, key)):
|
||||
break
|
||||
else:
|
||||
image_paths.add(image.path.replace(arch, "$basearch"))
|
||||
|
||||
if len(image_paths) != 1:
|
||||
raise RuntimeError(
|
||||
"%d images matched specification. Only one was expected."
|
||||
% len(image_paths)
|
||||
)
|
||||
|
||||
image_path = image_paths.pop()
|
||||
absolute_path = os.path.join(compose.paths.compose.topdir(), image_path)
|
||||
|
||||
repo_file = os.path.join(
|
||||
compose.paths.work.tmp_dir(None, variant),
|
||||
"image-container-%s-%s.repo" % (variant, self.num),
|
||||
)
|
||||
with open(repo_file, "w") as f:
|
||||
f.write("[image-to-include]\n")
|
||||
f.write("name=Location of image to embed\n")
|
||||
f.write("baseurl=%s\n" % util.translate_path(compose, absolute_path))
|
||||
f.write("enabled=0\n")
|
||||
f.write("gpgcheck=0\n")
|
||||
|
||||
return util.translate_path(compose, repo_file)
|
@ -16,6 +16,7 @@
|
||||
|
||||
import collections
|
||||
import os
|
||||
import glob
|
||||
import shutil
|
||||
|
||||
from kobo.shortcuts import run
|
||||
@ -72,6 +73,10 @@ class InitPhase(PhaseBase):
|
||||
self.compose.paths.work.module_defaults_dir(create_dir=False)
|
||||
)
|
||||
|
||||
# download module obsoletes
|
||||
if self.compose.has_module_obsoletes:
|
||||
write_module_obsoletes(self.compose)
|
||||
|
||||
# write prepopulate file
|
||||
write_prepopulate_file(self.compose)
|
||||
|
||||
@ -160,12 +165,18 @@ def write_variant_comps(compose, arch, variant):
|
||||
run(cmd)
|
||||
|
||||
comps = CompsWrapper(comps_file)
|
||||
if variant.groups or variant.modules is not None or variant.type != "variant":
|
||||
# Filter groups if the variant has some, or it's a modular variant, or
|
||||
# is not a base variant.
|
||||
# Filter groups if the variant has some, or it's a modular variant, or
|
||||
# is not a base variant.
|
||||
if (
|
||||
variant.groups
|
||||
or variant.modules is not None
|
||||
or variant.modular_koji_tags is not None
|
||||
or variant.type != "variant"
|
||||
):
|
||||
unmatched = comps.filter_groups(variant.groups)
|
||||
for grp in unmatched:
|
||||
compose.log_warning(UNMATCHED_GROUP_MSG % (variant.uid, arch, grp))
|
||||
|
||||
contains_all = not variant.groups and not variant.environments
|
||||
if compose.conf["comps_filter_environments"] and not contains_all:
|
||||
# We only want to filter environments if it's enabled by configuration
|
||||
@ -218,12 +229,33 @@ def write_module_defaults(compose):
|
||||
)
|
||||
|
||||
|
||||
def write_module_obsoletes(compose):
|
||||
scm_dict = compose.conf["module_obsoletes_dir"]
|
||||
if isinstance(scm_dict, dict):
|
||||
if scm_dict["scm"] == "file":
|
||||
scm_dict["dir"] = os.path.join(compose.config_dir, scm_dict["dir"])
|
||||
else:
|
||||
scm_dict = os.path.join(compose.config_dir, scm_dict)
|
||||
|
||||
with temp_dir(prefix="moduleobsoletes_") as tmp_dir:
|
||||
get_dir_from_scm(scm_dict, tmp_dir, compose=compose)
|
||||
compose.log_debug("Writing module obsoletes")
|
||||
shutil.copytree(
|
||||
tmp_dir,
|
||||
compose.paths.work.module_obsoletes_dir(create_dir=False),
|
||||
ignore=shutil.ignore_patterns(".git"),
|
||||
)
|
||||
|
||||
|
||||
def validate_module_defaults(path):
|
||||
"""Make sure there are no conflicting defaults. Each module name can only
|
||||
have one default stream.
|
||||
"""Make sure there are no conflicting defaults and every default can be loaded.
|
||||
Each module name can onlyhave one default stream.
|
||||
|
||||
:param str path: directory with cloned module defaults
|
||||
"""
|
||||
|
||||
defaults_num = len(glob.glob(os.path.join(path, "*.yaml")))
|
||||
|
||||
seen_defaults = collections.defaultdict(set)
|
||||
|
||||
for module_name, defaults in iter_module_defaults(path):
|
||||
@ -242,6 +274,11 @@ def validate_module_defaults(path):
|
||||
"There are duplicated module defaults:\n%s" % "\n".join(errors)
|
||||
)
|
||||
|
||||
# Make sure all defaults are valid otherwise update_from_defaults_directory
|
||||
# will return empty object
|
||||
if defaults_num != len(seen_defaults):
|
||||
raise RuntimeError("Defaults contains not valid default file")
|
||||
|
||||
|
||||
def validate_comps(path):
|
||||
"""Check that there are whitespace issues in comps."""
|
||||
|
229
pungi/phases/kiwibuild.py
Normal file
229
pungi/phases/kiwibuild.py
Normal file
@ -0,0 +1,229 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
from kobo import shortcuts
|
||||
from productmd.images import Image
|
||||
|
||||
from . import base
|
||||
from .. import util
|
||||
from ..linker import Linker
|
||||
from ..wrappers import kojiwrapper
|
||||
from .image_build import EXTENSIONS
|
||||
|
||||
KIWIEXTENSIONS = [
|
||||
("vhd-compressed", ["vhdfixed.xz"], "vhd.xz"),
|
||||
("vagrant-libvirt", ["vagrant.libvirt.box"], "vagrant-libvirt.box"),
|
||||
("vagrant-virtualbox", ["vagrant.virtualbox.box"], "vagrant-virtualbox.box"),
|
||||
]
|
||||
|
||||
|
||||
class KiwiBuildPhase(
|
||||
base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase
|
||||
):
|
||||
name = "kiwibuild"
|
||||
|
||||
def __init__(self, compose):
|
||||
super(KiwiBuildPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
|
||||
def _get_arches(self, image_conf, arches):
|
||||
"""Get an intersection of arches in the config dict and the given ones."""
|
||||
if "arches" in image_conf:
|
||||
arches = set(image_conf["arches"]) & arches
|
||||
return sorted(arches)
|
||||
|
||||
@staticmethod
|
||||
def _get_repo_urls(compose, repos, arch="$basearch"):
|
||||
"""
|
||||
Get list of repos with resolved repo URLs. Preserve repos defined
|
||||
as dicts.
|
||||
"""
|
||||
resolved_repos = []
|
||||
|
||||
for repo in repos:
|
||||
repo = util.get_repo_url(compose, repo, arch=arch)
|
||||
if repo is None:
|
||||
raise RuntimeError("Failed to resolve repo URL for %s" % repo)
|
||||
resolved_repos.append(repo)
|
||||
|
||||
return resolved_repos
|
||||
|
||||
def _get_repo(self, image_conf, variant):
|
||||
"""
|
||||
Get a list of repos. First included are those explicitly listed in
|
||||
config, followed by by repo for current variant if it's not included in
|
||||
the list already.
|
||||
"""
|
||||
repos = shortcuts.force_list(image_conf.get("repos", []))
|
||||
|
||||
if not variant.is_empty and variant.uid not in repos:
|
||||
repos.append(variant.uid)
|
||||
|
||||
return KiwiBuildPhase._get_repo_urls(self.compose, repos, arch="$arch")
|
||||
|
||||
def run(self):
|
||||
for variant in self.compose.get_variants():
|
||||
arches = set([x for x in variant.arches if x != "src"])
|
||||
|
||||
for image_conf in self.get_config_block(variant):
|
||||
build_arches = self._get_arches(image_conf, arches)
|
||||
if not build_arches:
|
||||
self.log_debug("skip: no arches")
|
||||
continue
|
||||
|
||||
# these properties can be set per-image *or* as e.g.
|
||||
# kiwibuild_description_scm or global_release in the config
|
||||
generics = {
|
||||
"release": self.get_release(image_conf),
|
||||
"target": self.get_config(image_conf, "target"),
|
||||
"descscm": self.get_config(image_conf, "description_scm"),
|
||||
"descpath": self.get_config(image_conf, "description_path"),
|
||||
"type": self.get_config(image_conf, "type"),
|
||||
"type_attr": self.get_config(image_conf, "type_attr"),
|
||||
"bundle_name_format": self.get_config(
|
||||
image_conf, "bundle_name_format"
|
||||
),
|
||||
}
|
||||
|
||||
repo = self._get_repo(image_conf, variant)
|
||||
|
||||
failable_arches = image_conf.pop("failable", [])
|
||||
if failable_arches == ["*"]:
|
||||
failable_arches = image_conf["arches"]
|
||||
|
||||
self.pool.add(RunKiwiBuildThread(self.pool))
|
||||
self.pool.queue_put(
|
||||
(
|
||||
self.compose,
|
||||
variant,
|
||||
image_conf,
|
||||
build_arches,
|
||||
generics,
|
||||
repo,
|
||||
failable_arches,
|
||||
)
|
||||
)
|
||||
|
||||
self.pool.start()
|
||||
|
||||
|
||||
class RunKiwiBuildThread(WorkerThread):
|
||||
def process(self, item, num):
|
||||
(compose, variant, config, arches, generics, repo, failable_arches) = item
|
||||
self.failable_arches = failable_arches
|
||||
# the Koji task as a whole can only fail if *all* arches are failable
|
||||
can_task_fail = set(failable_arches).issuperset(set(arches))
|
||||
self.num = num
|
||||
with util.failable(
|
||||
compose,
|
||||
can_task_fail,
|
||||
variant,
|
||||
"*",
|
||||
"kiwibuild",
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(compose, variant, config, arches, generics, repo)
|
||||
|
||||
def worker(self, compose, variant, config, arches, generics, repo):
|
||||
msg = "kiwibuild task for variant %s" % variant.uid
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
koji.login()
|
||||
|
||||
task_id = koji.koji_proxy.kiwiBuild(
|
||||
generics["target"],
|
||||
arches,
|
||||
generics["descscm"],
|
||||
generics["descpath"],
|
||||
profile=config["kiwi_profile"],
|
||||
release=generics["release"],
|
||||
repos=repo,
|
||||
type=generics["type"],
|
||||
type_attr=generics["type_attr"],
|
||||
result_bundle_name_format=generics["bundle_name_format"],
|
||||
# this ensures the task won't fail if only failable arches fail
|
||||
optional_arches=self.failable_arches,
|
||||
)
|
||||
|
||||
koji.save_task_id(task_id)
|
||||
|
||||
# Wait for it to finish and capture the output into log file.
|
||||
log_dir = os.path.join(compose.paths.log.topdir(), "kiwibuild")
|
||||
util.makedirs(log_dir)
|
||||
log_file = os.path.join(
|
||||
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
|
||||
)
|
||||
if koji.watch_task(task_id, log_file) != 0:
|
||||
raise RuntimeError(
|
||||
"kiwiBuild task failed: %s. See %s for details" % (task_id, log_file)
|
||||
)
|
||||
|
||||
# Refresh koji session which may have timed out while the task was
|
||||
# running. Watching is done via a subprocess, so the session is
|
||||
# inactive.
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
|
||||
linker = Linker(logger=self.pool._logger)
|
||||
|
||||
# Process all images in the build. There should be one for each
|
||||
# architecture, but we don't verify that.
|
||||
paths = koji.get_image_paths(task_id)
|
||||
|
||||
for arch, paths in paths.items():
|
||||
for path in paths:
|
||||
type_, format_ = _find_type_and_format(path)
|
||||
if not format_:
|
||||
# Path doesn't match any known type.
|
||||
continue
|
||||
|
||||
# image_dir is absolute path to which the image should be copied.
|
||||
# We also need the same path as relative to compose directory for
|
||||
# including in the metadata.
|
||||
image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
|
||||
rel_image_dir = compose.paths.compose.image_dir(
|
||||
variant, relative=True
|
||||
) % {"arch": arch}
|
||||
util.makedirs(image_dir)
|
||||
|
||||
filename = os.path.basename(path)
|
||||
|
||||
image_dest = os.path.join(image_dir, filename)
|
||||
|
||||
src_file = compose.koji_downloader.get_file(path)
|
||||
|
||||
linker.link(src_file, image_dest, link_type=compose.conf["link_type"])
|
||||
|
||||
# Update image manifest
|
||||
img = Image(compose.im)
|
||||
|
||||
# Get the manifest type from the config if supplied, otherwise we
|
||||
# determine the manifest type based on the koji output
|
||||
img.type = type_
|
||||
img.format = format_
|
||||
img.path = os.path.join(rel_image_dir, filename)
|
||||
img.mtime = util.get_mtime(image_dest)
|
||||
img.size = util.get_file_size(image_dest)
|
||||
img.arch = arch
|
||||
img.disc_number = 1 # We don't expect multiple disks
|
||||
img.disc_count = 1
|
||||
img.bootable = False
|
||||
img.subvariant = config.get("subvariant", variant.uid)
|
||||
setattr(img, "can_fail", arch in self.failable_arches)
|
||||
setattr(img, "deliverable", "kiwibuild")
|
||||
compose.im.add(variant=variant.uid, arch=arch, image=img)
|
||||
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, task_id))
|
||||
|
||||
|
||||
def _find_type_and_format(path):
|
||||
for type_, suffixes in EXTENSIONS.items():
|
||||
for suffix in suffixes:
|
||||
if path.endswith(suffix):
|
||||
return type_, suffix
|
||||
# these are our kiwi-exclusive mappings for images whose extensions
|
||||
# aren't quite the same as imagefactory
|
||||
for type_, suffixes, format_ in KIWIEXTENSIONS:
|
||||
if any(path.endswith(suffix) for suffix in suffixes):
|
||||
return type_, format_
|
||||
return None, None
|
@ -1,406 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import shutil
|
||||
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
from kobo.shortcuts import run, save_to_file, force_list
|
||||
from productmd.images import Image
|
||||
from six.moves import shlex_quote
|
||||
|
||||
from pungi.wrappers.kojiwrapper import KojiWrapper
|
||||
from pungi.wrappers import iso
|
||||
from pungi.phases import base
|
||||
from pungi.util import makedirs, get_mtime, get_file_size, failable
|
||||
from pungi.util import get_repo_urls
|
||||
|
||||
|
||||
# HACK: define cmp in python3
|
||||
if sys.version_info[0] == 3:
|
||||
|
||||
def cmp(a, b):
|
||||
return (a > b) - (a < b)
|
||||
|
||||
|
||||
class LiveImagesPhase(
|
||||
base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase
|
||||
):
|
||||
name = "live_images"
|
||||
|
||||
def __init__(self, compose):
|
||||
super(LiveImagesPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
|
||||
def _get_repos(self, arch, variant, data):
|
||||
repos = []
|
||||
if not variant.is_empty:
|
||||
repos.append(variant.uid)
|
||||
repos.extend(force_list(data.get("repo", [])))
|
||||
return get_repo_urls(self.compose, repos, arch=arch)
|
||||
|
||||
def run(self):
|
||||
symlink_isos_to = self.compose.conf.get("symlink_isos_to")
|
||||
commands = []
|
||||
|
||||
for variant in self.compose.all_variants.values():
|
||||
for arch in variant.arches + ["src"]:
|
||||
for data in self.get_config_block(variant, arch):
|
||||
subvariant = data.get("subvariant", variant.uid)
|
||||
type = data.get("type", "live")
|
||||
|
||||
if type == "live":
|
||||
dest_dir = self.compose.paths.compose.iso_dir(
|
||||
arch, variant, symlink_to=symlink_isos_to
|
||||
)
|
||||
elif type == "appliance":
|
||||
dest_dir = self.compose.paths.compose.image_dir(
|
||||
variant, symlink_to=symlink_isos_to
|
||||
)
|
||||
dest_dir = dest_dir % {"arch": arch}
|
||||
makedirs(dest_dir)
|
||||
else:
|
||||
raise RuntimeError("Unknown live image type %s" % type)
|
||||
if not dest_dir:
|
||||
continue
|
||||
|
||||
cmd = {
|
||||
"name": data.get("name"),
|
||||
"version": self.get_version(data),
|
||||
"release": self.get_release(data),
|
||||
"dest_dir": dest_dir,
|
||||
"build_arch": arch,
|
||||
"ks_file": data["kickstart"],
|
||||
"ksurl": self.get_ksurl(data),
|
||||
# Used for images wrapped in RPM
|
||||
"specfile": data.get("specfile", None),
|
||||
# Scratch (only taken in consideration if specfile
|
||||
# specified) For images wrapped in rpm is scratch
|
||||
# disabled by default For other images is scratch
|
||||
# always on
|
||||
"scratch": data.get("scratch", False),
|
||||
"sign": False,
|
||||
"type": type,
|
||||
"label": "", # currently not used
|
||||
"subvariant": subvariant,
|
||||
"failable_arches": data.get("failable", []),
|
||||
# First see if live_target is specified, then fall back
|
||||
# to regular setup of local, phase and global setting.
|
||||
"target": self.compose.conf.get("live_target")
|
||||
or self.get_config(data, "target"),
|
||||
}
|
||||
|
||||
cmd["repos"] = self._get_repos(arch, variant, data)
|
||||
|
||||
# Signing of the rpm wrapped image
|
||||
if not cmd["scratch"] and data.get("sign"):
|
||||
cmd["sign"] = True
|
||||
|
||||
cmd["filename"] = self._get_file_name(
|
||||
arch, variant, cmd["name"], cmd["version"]
|
||||
)
|
||||
|
||||
commands.append((cmd, variant, arch))
|
||||
|
||||
for (cmd, variant, arch) in commands:
|
||||
self.pool.add(CreateLiveImageThread(self.pool))
|
||||
self.pool.queue_put((self.compose, cmd, variant, arch))
|
||||
|
||||
self.pool.start()
|
||||
|
||||
def _get_file_name(self, arch, variant, name=None, version=None):
|
||||
if self.compose.conf["live_images_no_rename"]:
|
||||
return None
|
||||
|
||||
disc_type = self.compose.conf["disc_types"].get("live", "live")
|
||||
|
||||
format = (
|
||||
"%(compose_id)s-%(variant)s-%(arch)s-%(disc_type)s%(disc_num)s%(suffix)s"
|
||||
)
|
||||
# Custom name (prefix)
|
||||
if name:
|
||||
custom_iso_name = name
|
||||
if version:
|
||||
custom_iso_name += "-%s" % version
|
||||
format = (
|
||||
custom_iso_name
|
||||
+ "-%(variant)s-%(arch)s-%(disc_type)s%(disc_num)s%(suffix)s"
|
||||
)
|
||||
|
||||
# XXX: hardcoded disc_num
|
||||
return self.compose.get_image_name(
|
||||
arch, variant, disc_type=disc_type, disc_num=None, format=format
|
||||
)
|
||||
|
||||
|
||||
class CreateLiveImageThread(WorkerThread):
|
||||
EXTS = (".iso", ".raw.xz")
|
||||
|
||||
def process(self, item, num):
|
||||
compose, cmd, variant, arch = item
|
||||
self.failable_arches = cmd.get("failable_arches", [])
|
||||
self.can_fail = bool(self.failable_arches)
|
||||
with failable(
|
||||
compose,
|
||||
self.can_fail,
|
||||
variant,
|
||||
arch,
|
||||
"live",
|
||||
cmd.get("subvariant"),
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(compose, cmd, variant, arch, num)
|
||||
|
||||
def worker(self, compose, cmd, variant, arch, num):
|
||||
self.basename = "%(name)s-%(version)s-%(release)s" % cmd
|
||||
log_file = compose.paths.log.log_file(arch, "liveimage-%s" % self.basename)
|
||||
|
||||
subvariant = cmd.pop("subvariant")
|
||||
|
||||
imgname = "%s-%s-%s-%s" % (
|
||||
compose.ci_base.release.short,
|
||||
subvariant,
|
||||
"Live" if cmd["type"] == "live" else "Disk",
|
||||
arch,
|
||||
)
|
||||
|
||||
msg = "Creating ISO (arch: %s, variant: %s): %s" % (
|
||||
arch,
|
||||
variant,
|
||||
self.basename,
|
||||
)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
|
||||
_, version = compose.compose_id.rsplit("-", 1)
|
||||
name = cmd["name"] or imgname
|
||||
version = cmd["version"] or version
|
||||
archive = False
|
||||
if cmd["specfile"] and not cmd["scratch"]:
|
||||
# Non scratch build are allowed only for rpm wrapped images
|
||||
archive = True
|
||||
koji_cmd = koji_wrapper.get_create_image_cmd(
|
||||
name,
|
||||
version,
|
||||
cmd["target"],
|
||||
cmd["build_arch"],
|
||||
cmd["ks_file"],
|
||||
cmd["repos"],
|
||||
image_type=cmd["type"],
|
||||
wait=True,
|
||||
archive=archive,
|
||||
specfile=cmd["specfile"],
|
||||
release=cmd["release"],
|
||||
ksurl=cmd["ksurl"],
|
||||
)
|
||||
|
||||
# avoid race conditions?
|
||||
# Kerberos authentication failed:
|
||||
# Permission denied in replay cache code (-1765328215)
|
||||
time.sleep(num * 3)
|
||||
|
||||
output = koji_wrapper.run_blocking_cmd(koji_cmd, log_file=log_file)
|
||||
if output["retcode"] != 0:
|
||||
raise RuntimeError(
|
||||
"LiveImage task failed: %s. See %s for more details."
|
||||
% (output["task_id"], log_file)
|
||||
)
|
||||
|
||||
# copy finished image to isos/
|
||||
image_path = [
|
||||
path
|
||||
for path in koji_wrapper.get_image_path(output["task_id"])
|
||||
if self._is_image(path)
|
||||
]
|
||||
if len(image_path) != 1:
|
||||
raise RuntimeError(
|
||||
"Got %d images from task %d, expected 1."
|
||||
% (len(image_path), output["task_id"])
|
||||
)
|
||||
image_path = image_path[0]
|
||||
filename = cmd.get("filename") or os.path.basename(image_path)
|
||||
destination = os.path.join(cmd["dest_dir"], filename)
|
||||
shutil.copy2(image_path, destination)
|
||||
|
||||
# copy finished rpm to isos/ (if rpm wrapped ISO was built)
|
||||
if cmd["specfile"]:
|
||||
rpm_paths = koji_wrapper.get_wrapped_rpm_path(output["task_id"])
|
||||
|
||||
if cmd["sign"]:
|
||||
# Sign the rpm wrapped images and get their paths
|
||||
self.pool.log_info(
|
||||
"Signing rpm wrapped images in task_id: %s (expected key ID: %s)"
|
||||
% (output["task_id"], compose.conf.get("signing_key_id"))
|
||||
)
|
||||
signed_rpm_paths = self._sign_image(
|
||||
koji_wrapper, compose, cmd, output["task_id"]
|
||||
)
|
||||
if signed_rpm_paths:
|
||||
rpm_paths = signed_rpm_paths
|
||||
|
||||
for rpm_path in rpm_paths:
|
||||
shutil.copy2(rpm_path, cmd["dest_dir"])
|
||||
|
||||
if cmd["type"] == "live":
|
||||
# ISO manifest only makes sense for live images
|
||||
self._write_manifest(destination)
|
||||
|
||||
self._add_to_images(
|
||||
compose,
|
||||
variant,
|
||||
subvariant,
|
||||
arch,
|
||||
cmd["type"],
|
||||
self._get_format(image_path),
|
||||
destination,
|
||||
)
|
||||
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
|
||||
|
||||
def _add_to_images(self, compose, variant, subvariant, arch, type, format, path):
|
||||
"""Adds the image to images.json"""
|
||||
img = Image(compose.im)
|
||||
img.type = "raw-xz" if type == "appliance" else type
|
||||
img.format = format
|
||||
img.path = os.path.relpath(path, compose.paths.compose.topdir())
|
||||
img.mtime = get_mtime(path)
|
||||
img.size = get_file_size(path)
|
||||
img.arch = arch
|
||||
img.disc_number = 1 # We don't expect multiple disks
|
||||
img.disc_count = 1
|
||||
img.bootable = True
|
||||
img.subvariant = subvariant
|
||||
setattr(img, "can_fail", self.can_fail)
|
||||
setattr(img, "deliverable", "live")
|
||||
compose.im.add(variant=variant.uid, arch=arch, image=img)
|
||||
|
||||
def _is_image(self, path):
|
||||
for ext in self.EXTS:
|
||||
if path.endswith(ext):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_format(self, path):
|
||||
"""Get format based on extension."""
|
||||
for ext in self.EXTS:
|
||||
if path.endswith(ext):
|
||||
return ext[1:]
|
||||
raise RuntimeError("Getting format for unknown image %s" % path)
|
||||
|
||||
def _write_manifest(self, iso_path):
|
||||
"""Generate manifest for ISO at given path.
|
||||
|
||||
:param iso_path: (str) absolute path to the ISO
|
||||
"""
|
||||
dir, filename = os.path.split(iso_path)
|
||||
run("cd %s && %s" % (shlex_quote(dir), iso.get_manifest_cmd(filename)))
|
||||
|
||||
def _sign_image(self, koji_wrapper, compose, cmd, koji_task_id):
|
||||
signing_key_id = compose.conf.get("signing_key_id")
|
||||
signing_command = compose.conf.get("signing_command")
|
||||
|
||||
if not signing_key_id:
|
||||
self.pool.log_warning(
|
||||
"Signing is enabled but signing_key_id is not specified"
|
||||
)
|
||||
self.pool.log_warning("Signing skipped")
|
||||
return None
|
||||
if not signing_command:
|
||||
self.pool.log_warning(
|
||||
"Signing is enabled but signing_command is not specified"
|
||||
)
|
||||
self.pool.log_warning("Signing skipped")
|
||||
return None
|
||||
|
||||
# Prepare signing log file
|
||||
signing_log_file = compose.paths.log.log_file(
|
||||
cmd["build_arch"], "live_images-signing-%s" % self.basename
|
||||
)
|
||||
|
||||
# Sign the rpm wrapped images
|
||||
try:
|
||||
sign_builds_in_task(
|
||||
koji_wrapper,
|
||||
koji_task_id,
|
||||
signing_command,
|
||||
log_file=signing_log_file,
|
||||
signing_key_password=compose.conf.get("signing_key_password"),
|
||||
)
|
||||
except RuntimeError:
|
||||
self.pool.log_error(
|
||||
"Error while signing rpm wrapped images. See log: %s" % signing_log_file
|
||||
)
|
||||
raise
|
||||
|
||||
# Get pats to the signed rpms
|
||||
signing_key_id = signing_key_id.lower() # Koji uses lowercase in paths
|
||||
rpm_paths = koji_wrapper.get_signed_wrapped_rpms_paths(
|
||||
koji_task_id, signing_key_id
|
||||
)
|
||||
|
||||
# Wait until files are available
|
||||
if wait_paths(rpm_paths, 60 * 15):
|
||||
# Files are ready
|
||||
return rpm_paths
|
||||
|
||||
# Signed RPMs are not available
|
||||
self.pool.log_warning("Signed files are not available: %s" % rpm_paths)
|
||||
self.pool.log_warning("Unsigned files will be used")
|
||||
return None
|
||||
|
||||
|
||||
def wait_paths(paths, timeout=60):
|
||||
started = time.time()
|
||||
remaining = paths[:]
|
||||
while True:
|
||||
for path in remaining[:]:
|
||||
if os.path.exists(path):
|
||||
remaining.remove(path)
|
||||
if not remaining:
|
||||
break
|
||||
time.sleep(1)
|
||||
if timeout >= 0 and (time.time() - started) > timeout:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def sign_builds_in_task(
|
||||
koji_wrapper, task_id, signing_command, log_file=None, signing_key_password=None
|
||||
):
|
||||
# Get list of nvrs that should be signed
|
||||
nvrs = koji_wrapper.get_build_nvrs(task_id)
|
||||
if not nvrs:
|
||||
# No builds are available (scratch build, etc.?)
|
||||
return
|
||||
|
||||
# Append builds to sign_cmd
|
||||
for nvr in nvrs:
|
||||
signing_command += " '%s'" % nvr
|
||||
|
||||
# Log signing command before password is filled in it
|
||||
if log_file:
|
||||
save_to_file(log_file, signing_command, append=True)
|
||||
|
||||
# Fill password into the signing command
|
||||
if signing_key_password:
|
||||
signing_command = signing_command % {
|
||||
"signing_key_password": signing_key_password
|
||||
}
|
||||
|
||||
# Sign the builds
|
||||
run(signing_command, can_fail=False, show_cmd=False, logfile=log_file)
|
@ -71,6 +71,7 @@ class LiveMediaPhase(PhaseLoggerMixin, ImageConfigMixin, ConfigGuardedPhase):
|
||||
"ksurl": self.get_ksurl(image_conf),
|
||||
"ksversion": image_conf.get("ksversion"),
|
||||
"scratch": image_conf.get("scratch", False),
|
||||
"nomacboot": image_conf.get("nomacboot", False),
|
||||
"release": self.get_release(image_conf),
|
||||
"skip_tag": image_conf.get("skip_tag"),
|
||||
"name": name,
|
||||
@ -140,7 +141,7 @@ class LiveMediaThread(WorkerThread):
|
||||
)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
|
||||
koji_wrapper = KojiWrapper(compose)
|
||||
cmd = self._get_cmd(koji_wrapper, config)
|
||||
|
||||
log_file = self._get_log_file(compose, variant, subvariant, config)
|
||||
@ -181,7 +182,9 @@ class LiveMediaThread(WorkerThread):
|
||||
# let's not change filename of koji outputs
|
||||
image_dest = os.path.join(image_dir, os.path.basename(image_info["path"]))
|
||||
|
||||
src_file = os.path.realpath(image_info["path"])
|
||||
src_file = compose.koji_downloader.get_file(
|
||||
os.path.realpath(image_info["path"])
|
||||
)
|
||||
linker.link(src_file, image_dest, link_type=link_type)
|
||||
|
||||
# Update image manifest
|
||||
|
@ -1,24 +1,29 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import copy
|
||||
import fnmatch
|
||||
import json
|
||||
import os
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
from kobo import shortcuts
|
||||
from productmd.rpms import Rpms
|
||||
from six.moves import configparser
|
||||
|
||||
from .base import ConfigGuardedPhase, PhaseLoggerMixin
|
||||
from .. import util
|
||||
from ..wrappers import kojiwrapper
|
||||
from ..wrappers.scm import get_file_from_scm
|
||||
|
||||
|
||||
class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||
name = "osbs"
|
||||
|
||||
def __init__(self, compose):
|
||||
def __init__(self, compose, pkgset_phase, buildinstall_phase):
|
||||
super(OSBSPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
self.pool.metadata = {}
|
||||
self.pool.registries = {}
|
||||
self.pool.pkgset_phase = pkgset_phase
|
||||
self.pool.buildinstall_phase = buildinstall_phase
|
||||
|
||||
def run(self):
|
||||
for variant in self.compose.get_variants():
|
||||
@ -28,15 +33,6 @@ class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||
|
||||
self.pool.start()
|
||||
|
||||
def dump_metadata(self):
|
||||
"""Create a file with image metadata if the phase actually ran."""
|
||||
if self._skipped:
|
||||
return
|
||||
with open(self.compose.paths.compose.metadata("osbs.json"), "w") as f:
|
||||
json.dump(
|
||||
self.pool.metadata, f, indent=4, sort_keys=True, separators=(",", ": ")
|
||||
)
|
||||
|
||||
def request_push(self):
|
||||
"""Store configuration data about where to push the created images and
|
||||
then send the same data to message bus.
|
||||
@ -87,8 +83,8 @@ class OSBSThread(WorkerThread):
|
||||
def worker(self, compose, variant, config):
|
||||
msg = "OSBS task for variant %s" % variant.uid
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji.login()
|
||||
|
||||
original_config = copy.deepcopy(config)
|
||||
|
||||
# Start task
|
||||
source = config.pop("url")
|
||||
@ -104,86 +100,98 @@ class OSBSThread(WorkerThread):
|
||||
|
||||
config["yum_repourls"] = repos
|
||||
|
||||
task_id = koji.koji_proxy.buildContainer(
|
||||
source, target, config, priority=priority
|
||||
)
|
||||
|
||||
# Wait for it to finish and capture the output into log file (even
|
||||
# though there is not much there).
|
||||
log_dir = os.path.join(compose.paths.log.topdir(), "osbs")
|
||||
util.makedirs(log_dir)
|
||||
log_file = os.path.join(
|
||||
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
|
||||
)
|
||||
reuse_file = log_file[:-4] + ".reuse.json"
|
||||
|
||||
try:
|
||||
image_conf = self._get_image_conf(compose, original_config)
|
||||
except Exception as e:
|
||||
image_conf = None
|
||||
self.pool.log_info(
|
||||
"Can't get image-build.conf for variant: %s source: %s - %s"
|
||||
% (variant.uid, source, str(e))
|
||||
)
|
||||
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
koji.login()
|
||||
|
||||
task_id = self._try_to_reuse(
|
||||
compose, variant, original_config, image_conf, reuse_file
|
||||
)
|
||||
|
||||
if not task_id:
|
||||
task_id = koji.koji_proxy.buildContainer(
|
||||
source, target, config, priority=priority
|
||||
)
|
||||
|
||||
koji.save_task_id(task_id)
|
||||
|
||||
# Wait for it to finish and capture the output into log file (even
|
||||
# though there is not much there).
|
||||
if koji.watch_task(task_id, log_file) != 0:
|
||||
raise RuntimeError(
|
||||
"OSBS: task %s failed: see %s for details" % (task_id, log_file)
|
||||
"OSBS task failed: %s. See %s for details" % (task_id, log_file)
|
||||
)
|
||||
|
||||
scratch = config.get("scratch", False)
|
||||
nvr = self._add_metadata(variant, task_id, compose, scratch)
|
||||
nvr, archive_ids = add_metadata(variant, task_id, compose, scratch)
|
||||
if nvr:
|
||||
registry = get_registry(compose, nvr, registry)
|
||||
if registry:
|
||||
self.pool.registries[nvr] = registry
|
||||
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
self._write_reuse_metadata(
|
||||
compose,
|
||||
variant,
|
||||
original_config,
|
||||
image_conf,
|
||||
task_id,
|
||||
archive_ids,
|
||||
reuse_file,
|
||||
)
|
||||
|
||||
def _add_metadata(self, variant, task_id, compose, is_scratch):
|
||||
# Create new Koji session. The task could take so long to finish that
|
||||
# our session will expire. This second session does not need to be
|
||||
# authenticated since it will only do reading operations.
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, task_id))
|
||||
|
||||
# Create metadata
|
||||
metadata = {
|
||||
"compose_id": compose.compose_id,
|
||||
"koji_task": task_id,
|
||||
}
|
||||
def _get_image_conf(self, compose, config):
|
||||
"""Get image-build.conf from git repo.
|
||||
|
||||
result = koji.koji_proxy.getTaskResult(task_id)
|
||||
if is_scratch:
|
||||
metadata.update({"repositories": result["repositories"]})
|
||||
# add a fake arch of 'scratch', so we can construct the metadata
|
||||
# in same data structure as real builds.
|
||||
self.pool.metadata.setdefault(variant.uid, {}).setdefault(
|
||||
"scratch", []
|
||||
).append(metadata)
|
||||
return None
|
||||
:param Compose compose: Current compose.
|
||||
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
|
||||
"""
|
||||
tmp_dir = compose.mkdtemp(prefix="osbs_")
|
||||
|
||||
url = config["url"].split("#")
|
||||
if len(url) == 1:
|
||||
url.append(config["git_branch"])
|
||||
|
||||
filename = "image-build.conf"
|
||||
get_file_from_scm(
|
||||
{
|
||||
"scm": "git",
|
||||
"repo": url[0],
|
||||
"branch": url[1],
|
||||
"file": [filename],
|
||||
},
|
||||
tmp_dir,
|
||||
)
|
||||
|
||||
c = configparser.ConfigParser()
|
||||
c.read(os.path.join(tmp_dir, filename))
|
||||
return c
|
||||
|
||||
def _get_ksurl(self, image_conf):
|
||||
"""Get ksurl from image-build.conf"""
|
||||
ksurl = image_conf.get("image-build", "ksurl")
|
||||
|
||||
if ksurl:
|
||||
resolver = util.GitUrlResolver(offline=False)
|
||||
return resolver(ksurl)
|
||||
else:
|
||||
build_id = int(result["koji_builds"][0])
|
||||
buildinfo = koji.koji_proxy.getBuild(build_id)
|
||||
archives = koji.koji_proxy.listArchives(build_id)
|
||||
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % buildinfo
|
||||
|
||||
metadata.update(
|
||||
{
|
||||
"name": buildinfo["name"],
|
||||
"version": buildinfo["version"],
|
||||
"release": buildinfo["release"],
|
||||
"nvr": nvr,
|
||||
"creation_time": buildinfo["creation_time"],
|
||||
}
|
||||
)
|
||||
for archive in archives:
|
||||
data = {
|
||||
"filename": archive["filename"],
|
||||
"size": archive["size"],
|
||||
"checksum": archive["checksum"],
|
||||
}
|
||||
data.update(archive["extra"])
|
||||
data.update(metadata)
|
||||
arch = archive["extra"]["image"]["arch"]
|
||||
self.pool.log_debug(
|
||||
"Created Docker base image %s-%s-%s.%s"
|
||||
% (metadata["name"], metadata["version"], metadata["release"], arch)
|
||||
)
|
||||
self.pool.metadata.setdefault(variant.uid, {}).setdefault(
|
||||
arch, []
|
||||
).append(data)
|
||||
return nvr
|
||||
return None
|
||||
|
||||
def _get_repo(self, compose, repo, gpgkey=None):
|
||||
"""
|
||||
@ -192,7 +200,7 @@ class OSBSThread(WorkerThread):
|
||||
file pointing to that location and return the URL to .repo file.
|
||||
"""
|
||||
if "://" in repo:
|
||||
return repo
|
||||
return repo.replace("$COMPOSE_ID", compose.compose_id)
|
||||
|
||||
if repo.startswith("/"):
|
||||
# The repo is an absolute path on the filesystem
|
||||
@ -211,6 +219,15 @@ class OSBSThread(WorkerThread):
|
||||
raise RuntimeError(
|
||||
"There is no variant %s to get repo from to pass to OSBS." % repo
|
||||
)
|
||||
cts_url = compose.conf.get("cts_url", None)
|
||||
if cts_url:
|
||||
return os.path.join(
|
||||
cts_url,
|
||||
"api/1/composes",
|
||||
compose.compose_id,
|
||||
"repo/?variant=%s" % variant,
|
||||
)
|
||||
|
||||
repo_path = compose.paths.compose.repository(
|
||||
"$basearch", variant, create_dir=False
|
||||
)
|
||||
@ -231,3 +248,209 @@ class OSBSThread(WorkerThread):
|
||||
f.write("gpgkey=%s\n" % gpgkey)
|
||||
|
||||
return util.translate_path(compose, repo_file)
|
||||
|
||||
def _try_to_reuse(self, compose, variant, config, image_conf, reuse_file):
|
||||
"""Try to reuse results of old compose.
|
||||
|
||||
:param Compose compose: Current compose.
|
||||
:param Variant variant: Current variant.
|
||||
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
|
||||
:param ConfigParser image_conf: ConfigParser obj of image-build.conf.
|
||||
:param str reuse_file: Path to reuse metadata file
|
||||
"""
|
||||
log_msg = "Cannot reuse old osbs phase results - %s"
|
||||
|
||||
if not compose.conf["osbs_allow_reuse"]:
|
||||
self.pool.log_info(log_msg % "reuse of old osbs results is disabled.")
|
||||
return False
|
||||
|
||||
old_reuse_file = compose.paths.old_compose_path(reuse_file)
|
||||
if not old_reuse_file:
|
||||
self.pool.log_info(log_msg % "Can't find old reuse metadata file")
|
||||
return False
|
||||
|
||||
try:
|
||||
with open(old_reuse_file) as f:
|
||||
old_reuse_metadata = json.load(f)
|
||||
except Exception as e:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't load old reuse metadata file: %s" % str(e)
|
||||
)
|
||||
return False
|
||||
|
||||
if old_reuse_metadata["config"] != config:
|
||||
self.pool.log_info(log_msg % "osbs config changed")
|
||||
return False
|
||||
|
||||
if not image_conf:
|
||||
self.pool.log_info(log_msg % "Can't get image-build.conf")
|
||||
return False
|
||||
|
||||
# Make sure ksurl not change
|
||||
try:
|
||||
ksurl = self._get_ksurl(image_conf)
|
||||
except Exception as e:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't get ksurl from image-build.conf - %s" % str(e)
|
||||
)
|
||||
return False
|
||||
|
||||
if not old_reuse_metadata["ksurl"]:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't get ksurl from old compose reuse metadata."
|
||||
)
|
||||
return False
|
||||
|
||||
if ksurl != old_reuse_metadata["ksurl"]:
|
||||
self.pool.log_info(log_msg % "ksurl changed")
|
||||
return False
|
||||
|
||||
# Make sure buildinstall phase is reused
|
||||
try:
|
||||
arches = image_conf.get("image-build", "arches").split(",")
|
||||
except Exception as e:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't get arches from image-build.conf - %s" % str(e)
|
||||
)
|
||||
for arch in arches:
|
||||
if not self.pool.buildinstall_phase.reused(variant, arch):
|
||||
self.pool.log_info(
|
||||
log_msg % "buildinstall phase changed %s.%s" % (variant, arch)
|
||||
)
|
||||
return False
|
||||
|
||||
# Make sure rpms installed in image exists in current compose
|
||||
rpm_manifest_file = compose.paths.compose.metadata("rpms.json")
|
||||
rpm_manifest = Rpms()
|
||||
rpm_manifest.load(rpm_manifest_file)
|
||||
rpms = set()
|
||||
for variant in rpm_manifest.rpms:
|
||||
for arch in rpm_manifest.rpms[variant]:
|
||||
for src in rpm_manifest.rpms[variant][arch]:
|
||||
for nevra in rpm_manifest.rpms[variant][arch][src]:
|
||||
rpms.add(nevra)
|
||||
|
||||
for nevra in old_reuse_metadata["rpmlist"]:
|
||||
if nevra not in rpms:
|
||||
self.pool.log_info(
|
||||
log_msg % "%s does not exist in current compose" % nevra
|
||||
)
|
||||
return False
|
||||
|
||||
self.pool.log_info(
|
||||
"Reusing old OSBS task %d result" % old_reuse_file["task_id"]
|
||||
)
|
||||
return old_reuse_file["task_id"]
|
||||
|
||||
def _write_reuse_metadata(
|
||||
self, compose, variant, config, image_conf, task_id, archive_ids, reuse_file
|
||||
):
|
||||
"""Write metadata to file for reusing.
|
||||
|
||||
:param Compose compose: Current compose.
|
||||
:param Variant variant: Current variant.
|
||||
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
|
||||
:param ConfigParser image_conf: ConfigParser obj of image-build.conf.
|
||||
:param int task_id: Koji task id of osbs task.
|
||||
:param list archive_ids: List of koji archive id
|
||||
:param str reuse_file: Path to reuse metadata file.
|
||||
"""
|
||||
msg = "Writing reuse metadata file %s" % reuse_file
|
||||
compose.log_info(msg)
|
||||
|
||||
rpmlist = set()
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
for archive_id in archive_ids:
|
||||
rpms = koji.koji_proxy.listRPMs(imageID=archive_id)
|
||||
for item in rpms:
|
||||
if item["epoch"]:
|
||||
rpmlist.add(
|
||||
"%s:%s-%s-%s.%s"
|
||||
% (
|
||||
item["name"],
|
||||
item["epoch"],
|
||||
item["version"],
|
||||
item["release"],
|
||||
item["arch"],
|
||||
)
|
||||
)
|
||||
else:
|
||||
rpmlist.add("%s.%s" % (item["nvr"], item["arch"]))
|
||||
|
||||
try:
|
||||
ksurl = self._get_ksurl(image_conf)
|
||||
except Exception:
|
||||
ksurl = None
|
||||
|
||||
data = {
|
||||
"config": config,
|
||||
"ksurl": ksurl,
|
||||
"rpmlist": sorted(rpmlist),
|
||||
"task_id": task_id,
|
||||
}
|
||||
try:
|
||||
with open(reuse_file, "w") as f:
|
||||
json.dump(data, f, indent=4)
|
||||
except Exception as e:
|
||||
compose.log_info(msg + " failed - %s" % str(e))
|
||||
|
||||
|
||||
def add_metadata(variant, task_id, compose, is_scratch):
|
||||
"""Given a task ID, find details about the container and add it to global
|
||||
metadata."""
|
||||
# Create new Koji session. The task could take so long to finish that
|
||||
# our session will expire. This second session does not need to be
|
||||
# authenticated since it will only do reading operations.
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
|
||||
# Create metadata
|
||||
metadata = {
|
||||
"compose_id": compose.compose_id,
|
||||
"koji_task": task_id,
|
||||
}
|
||||
|
||||
result = koji.koji_proxy.getTaskResult(task_id)
|
||||
if is_scratch:
|
||||
metadata.update({"repositories": result["repositories"]})
|
||||
# add a fake arch of 'scratch', so we can construct the metadata
|
||||
# in same data structure as real builds.
|
||||
compose.containers_metadata.setdefault(variant.uid, {}).setdefault(
|
||||
"scratch", []
|
||||
).append(metadata)
|
||||
return None, []
|
||||
|
||||
else:
|
||||
build_id = int(result["koji_builds"][0])
|
||||
buildinfo = koji.koji_proxy.getBuild(build_id)
|
||||
archives = koji.koji_proxy.listArchives(build_id, type="image")
|
||||
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % buildinfo
|
||||
|
||||
metadata.update(
|
||||
{
|
||||
"name": buildinfo["name"],
|
||||
"version": buildinfo["version"],
|
||||
"release": buildinfo["release"],
|
||||
"nvr": nvr,
|
||||
"creation_time": buildinfo["creation_time"],
|
||||
}
|
||||
)
|
||||
archive_ids = []
|
||||
for archive in archives:
|
||||
data = {
|
||||
"filename": archive["filename"],
|
||||
"size": archive["size"],
|
||||
"checksum": archive["checksum"],
|
||||
}
|
||||
data.update(archive["extra"])
|
||||
data.update(metadata)
|
||||
arch = archive["extra"]["image"]["arch"]
|
||||
compose.log_debug(
|
||||
"Created Docker base image %s-%s-%s.%s"
|
||||
% (metadata["name"], metadata["version"], metadata["release"], arch)
|
||||
)
|
||||
compose.containers_metadata.setdefault(variant.uid, {}).setdefault(
|
||||
arch, []
|
||||
).append(data)
|
||||
archive_ids.append(archive["id"])
|
||||
return nvr, archive_ids
|
||||
|
@ -27,6 +27,35 @@ class OSBuildPhase(
|
||||
arches = set(image_conf["arches"]) & arches
|
||||
return sorted(arches)
|
||||
|
||||
@staticmethod
|
||||
def _get_repo_urls(compose, repos, arch="$basearch"):
|
||||
"""
|
||||
Get list of repos with resolved repo URLs. Preserve repos defined
|
||||
as dicts.
|
||||
"""
|
||||
resolved_repos = []
|
||||
|
||||
for repo in repos:
|
||||
if isinstance(repo, dict):
|
||||
try:
|
||||
url = repo["baseurl"]
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
"`baseurl` is required in repo dict %s" % str(repo)
|
||||
)
|
||||
url = util.get_repo_url(compose, url, arch=arch)
|
||||
if url is None:
|
||||
raise RuntimeError("Failed to resolve repo URL for %s" % str(repo))
|
||||
repo["baseurl"] = url
|
||||
resolved_repos.append(repo)
|
||||
else:
|
||||
repo = util.get_repo_url(compose, repo, arch=arch)
|
||||
if repo is None:
|
||||
raise RuntimeError("Failed to resolve repo URL for %s" % repo)
|
||||
resolved_repos.append(repo)
|
||||
|
||||
return resolved_repos
|
||||
|
||||
def _get_repo(self, image_conf, variant):
|
||||
"""
|
||||
Get a list of repos. First included are those explicitly listed in
|
||||
@ -38,7 +67,7 @@ class OSBuildPhase(
|
||||
if not variant.is_empty and variant.uid not in repos:
|
||||
repos.append(variant.uid)
|
||||
|
||||
return util.get_repo_urls(self.compose, repos, arch="$arch")
|
||||
return OSBuildPhase._get_repo_urls(self.compose, repos, arch="$arch")
|
||||
|
||||
def run(self):
|
||||
for variant in self.compose.get_variants():
|
||||
@ -96,7 +125,12 @@ class RunOSBuildThread(WorkerThread):
|
||||
self.can_fail = can_fail
|
||||
self.num = num
|
||||
with util.failable(
|
||||
compose, can_fail, variant, "*", "osbuild", logger=self.pool._logger,
|
||||
compose,
|
||||
can_fail,
|
||||
variant,
|
||||
"*",
|
||||
"osbuild",
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(
|
||||
compose, variant, config, arches, version, release, target, repo
|
||||
@ -105,11 +139,30 @@ class RunOSBuildThread(WorkerThread):
|
||||
def worker(self, compose, variant, config, arches, version, release, target, repo):
|
||||
msg = "OSBuild task for variant %s" % variant.uid
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
koji.login()
|
||||
|
||||
ostree = {}
|
||||
if config.get("ostree_url"):
|
||||
ostree["url"] = config["ostree_url"]
|
||||
if config.get("ostree_ref"):
|
||||
ostree["ref"] = config["ostree_ref"]
|
||||
if config.get("ostree_parent"):
|
||||
ostree["parent"] = config["ostree_parent"]
|
||||
|
||||
# Start task
|
||||
opts = {"repo": repo}
|
||||
if ostree:
|
||||
opts["ostree"] = ostree
|
||||
|
||||
upload_options = config.get("upload_options")
|
||||
if upload_options:
|
||||
opts["upload_options"] = upload_options
|
||||
|
||||
customizations = config.get("customizations")
|
||||
if customizations:
|
||||
opts["customizations"] = customizations
|
||||
|
||||
if release:
|
||||
opts["release"] = release
|
||||
task_id = koji.koji_proxy.osbuildImage(
|
||||
@ -122,6 +175,8 @@ class RunOSBuildThread(WorkerThread):
|
||||
opts=opts,
|
||||
)
|
||||
|
||||
koji.save_task_id(task_id)
|
||||
|
||||
# Wait for it to finish and capture the output into log file.
|
||||
log_dir = os.path.join(compose.paths.log.topdir(), "osbuild")
|
||||
util.makedirs(log_dir)
|
||||
@ -130,13 +185,13 @@ class RunOSBuildThread(WorkerThread):
|
||||
)
|
||||
if koji.watch_task(task_id, log_file) != 0:
|
||||
raise RuntimeError(
|
||||
"OSBuild: task %s failed: see %s for details" % (task_id, log_file)
|
||||
"OSBuild task failed: %s. See %s for details" % (task_id, log_file)
|
||||
)
|
||||
|
||||
# Refresh koji session which may have timed out while the task was
|
||||
# running. Watching is done via a subprocess, so the session is
|
||||
# inactive.
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
|
||||
# Get build id via the task's result json data
|
||||
result = koji.koji_proxy.getTaskResult(task_id)
|
||||
@ -148,7 +203,7 @@ class RunOSBuildThread(WorkerThread):
|
||||
# architecture, but we don't verify that.
|
||||
build_info = koji.koji_proxy.getBuild(build_id)
|
||||
for archive in koji.koji_proxy.listArchives(buildID=build_id):
|
||||
if archive["type_name"] not in config["image_types"]:
|
||||
if archive["type_name"] not in EXTENSIONS:
|
||||
# Ignore values that are not of required types.
|
||||
continue
|
||||
|
||||
@ -161,22 +216,36 @@ class RunOSBuildThread(WorkerThread):
|
||||
# image_dir is absolute path to which the image should be copied.
|
||||
# We also need the same path as relative to compose directory for
|
||||
# including in the metadata.
|
||||
image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
|
||||
rel_image_dir = compose.paths.compose.image_dir(variant, relative=True) % {
|
||||
"arch": arch
|
||||
}
|
||||
if archive["type_name"] == "iso":
|
||||
# If the produced image is actually an ISO, it should go to
|
||||
# iso/ subdirectory.
|
||||
image_dir = compose.paths.compose.iso_dir(arch, variant)
|
||||
rel_image_dir = compose.paths.compose.iso_dir(
|
||||
arch, variant, relative=True
|
||||
)
|
||||
else:
|
||||
image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
|
||||
rel_image_dir = compose.paths.compose.image_dir(
|
||||
variant, relative=True
|
||||
) % {"arch": arch}
|
||||
util.makedirs(image_dir)
|
||||
|
||||
image_dest = os.path.join(image_dir, archive["filename"])
|
||||
|
||||
src_file = os.path.join(
|
||||
koji.koji_module.pathinfo.imagebuild(build_info), archive["filename"]
|
||||
src_file = compose.koji_downloader.get_file(
|
||||
os.path.join(
|
||||
koji.koji_module.pathinfo.imagebuild(build_info),
|
||||
archive["filename"],
|
||||
),
|
||||
)
|
||||
|
||||
linker.link(src_file, image_dest, link_type=compose.conf["link_type"])
|
||||
|
||||
suffix = archive["filename"].rsplit(".", 1)[-1]
|
||||
if suffix not in EXTENSIONS[archive["type_name"]]:
|
||||
for suffix in EXTENSIONS[archive["type_name"]]:
|
||||
if archive["filename"].endswith(suffix):
|
||||
break
|
||||
else:
|
||||
# No suffix matched.
|
||||
raise RuntimeError(
|
||||
"Failed to generate metadata. Format %s doesn't match type %s"
|
||||
% (suffix, archive["type_name"])
|
||||
@ -184,7 +253,24 @@ class RunOSBuildThread(WorkerThread):
|
||||
|
||||
# Update image manifest
|
||||
img = Image(compose.im)
|
||||
img.type = archive["type_name"]
|
||||
|
||||
# Get the manifest type from the config if supplied, otherwise we
|
||||
# determine the manifest type based on the koji output
|
||||
img.type = config.get("manifest_type")
|
||||
if not img.type:
|
||||
if archive["type_name"] != "iso":
|
||||
img.type = archive["type_name"]
|
||||
else:
|
||||
fn = archive["filename"].lower()
|
||||
if "ostree" in fn:
|
||||
img.type = "dvd-ostree-osbuild"
|
||||
elif "live" in fn:
|
||||
img.type = "live-osbuild"
|
||||
elif "netinst" in fn or "boot" in fn:
|
||||
img.type = "boot"
|
||||
else:
|
||||
img.type = "dvd"
|
||||
|
||||
img.format = suffix
|
||||
img.path = os.path.join(rel_image_dir, archive["filename"])
|
||||
img.mtime = util.get_mtime(image_dest)
|
||||
|
@ -85,7 +85,7 @@ class OSTreeThread(WorkerThread):
|
||||
comps_repo = compose.paths.work.comps_repo(
|
||||
"$basearch", variant=variant, create_dir=False
|
||||
)
|
||||
repos = shortcuts.force_list(config["repo"]) + self.repos
|
||||
repos = shortcuts.force_list(config.get("repo", [])) + self.repos
|
||||
if compose.has_comps:
|
||||
repos.append(translate_path(compose, comps_repo))
|
||||
repos = get_repo_dicts(repos, logger=self.pool)
|
||||
@ -165,9 +165,12 @@ class OSTreeThread(WorkerThread):
|
||||
("update-summary", config.get("update_summary", False)),
|
||||
("ostree-ref", config.get("ostree_ref")),
|
||||
("force-new-commit", config.get("force_new_commit", False)),
|
||||
("unified-core", config.get("unified_core", False)),
|
||||
]
|
||||
)
|
||||
packages = ["pungi", "ostree", "rpm-ostree"]
|
||||
default_packages = ["pungi", "ostree", "rpm-ostree"]
|
||||
additional_packages = config.get("runroot_packages", [])
|
||||
packages = default_packages + additional_packages
|
||||
log_file = os.path.join(self.logdir, "runroot.log")
|
||||
mounts = [compose.topdir, config["ostree_repo"]]
|
||||
runroot = Runroot(compose, phase="ostree")
|
||||
|
190
pungi/phases/ostree_container.py
Normal file
190
pungi/phases/ostree_container.py
Normal file
@ -0,0 +1,190 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
from kobo import shortcuts
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
|
||||
from productmd.images import Image
|
||||
|
||||
from pungi.runroot import Runroot
|
||||
from .base import ConfigGuardedPhase
|
||||
from .. import util
|
||||
from ..util import get_repo_dicts, translate_path
|
||||
from ..wrappers import scm
|
||||
|
||||
|
||||
class OSTreeContainerPhase(ConfigGuardedPhase):
|
||||
name = "ostree_container"
|
||||
|
||||
def __init__(self, compose, pkgset_phase=None):
|
||||
super(OSTreeContainerPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.compose._logger)
|
||||
self.pkgset_phase = pkgset_phase
|
||||
|
||||
def get_repos(self):
|
||||
return [
|
||||
translate_path(
|
||||
self.compose,
|
||||
self.compose.paths.work.pkgset_repo(
|
||||
pkgset.name, "$basearch", create_dir=False
|
||||
),
|
||||
)
|
||||
for pkgset in self.pkgset_phase.package_sets
|
||||
]
|
||||
|
||||
def _enqueue(self, variant, arch, conf):
|
||||
self.pool.add(OSTreeContainerThread(self.pool, self.get_repos()))
|
||||
self.pool.queue_put((self.compose, variant, arch, conf))
|
||||
|
||||
def run(self):
|
||||
if isinstance(self.compose.conf.get(self.name), dict):
|
||||
for variant in self.compose.get_variants():
|
||||
for conf in self.get_config_block(variant):
|
||||
for arch in conf.get("arches", []) or variant.arches:
|
||||
self._enqueue(variant, arch, conf)
|
||||
else:
|
||||
# Legacy code path to support original configuration.
|
||||
for variant in self.compose.get_variants():
|
||||
for arch in variant.arches:
|
||||
for conf in self.get_config_block(variant, arch):
|
||||
self._enqueue(variant, arch, conf)
|
||||
|
||||
self.pool.start()
|
||||
|
||||
|
||||
class OSTreeContainerThread(WorkerThread):
|
||||
def __init__(self, pool, repos):
|
||||
super(OSTreeContainerThread, self).__init__(pool)
|
||||
self.repos = repos
|
||||
|
||||
def process(self, item, num):
|
||||
compose, variant, arch, config = item
|
||||
self.num = num
|
||||
failable_arches = config.get("failable", [])
|
||||
self.can_fail = util.can_arch_fail(failable_arches, arch)
|
||||
with util.failable(compose, self.can_fail, variant, arch, "ostree-container"):
|
||||
self.worker(compose, variant, arch, config)
|
||||
|
||||
def worker(self, compose, variant, arch, config):
|
||||
msg = "OSTree container phase for variant %s, arch %s" % (variant.uid, arch)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
workdir = compose.paths.work.topdir("ostree-container-%d" % self.num)
|
||||
self.logdir = compose.paths.log.topdir(
|
||||
"%s/%s/ostree-container-%d" % (arch, variant.uid, self.num)
|
||||
)
|
||||
repodir = os.path.join(workdir, "config_repo")
|
||||
self._clone_repo(
|
||||
compose,
|
||||
repodir,
|
||||
config["config_url"],
|
||||
config.get("config_branch", "main"),
|
||||
)
|
||||
|
||||
repos = shortcuts.force_list(config.get("repo", [])) + self.repos
|
||||
repos = get_repo_dicts(repos, logger=self.pool)
|
||||
|
||||
# copy the original config and update before save to a json file
|
||||
new_config = copy.copy(config)
|
||||
|
||||
# repos in configuration can have repo url set to variant UID,
|
||||
# update it to have the actual url that we just translated.
|
||||
new_config.update({"repo": repos})
|
||||
|
||||
# remove unnecessary (for 'pungi-make-ostree container' script ) elements
|
||||
# from config, it doesn't hurt to have them, however remove them can
|
||||
# reduce confusion
|
||||
for k in [
|
||||
"treefile",
|
||||
"config_url",
|
||||
"config_branch",
|
||||
"failable",
|
||||
"version",
|
||||
]:
|
||||
new_config.pop(k, None)
|
||||
|
||||
# write a json file to save the configuration, so 'pungi-make-ostree tree'
|
||||
# can take use of it
|
||||
extra_config_file = os.path.join(workdir, "extra_config.json")
|
||||
with open(extra_config_file, "w") as f:
|
||||
json.dump(new_config, f, indent=4)
|
||||
|
||||
self._run_ostree_container_cmd(
|
||||
compose, variant, arch, config, repodir, extra_config_file=extra_config_file
|
||||
)
|
||||
|
||||
self.pool.log_info("[DONE ] %s" % (msg))
|
||||
|
||||
def _run_ostree_container_cmd(
|
||||
self, compose, variant, arch, config, config_repo, extra_config_file=None
|
||||
):
|
||||
target_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
|
||||
util.makedirs(target_dir)
|
||||
version = util.version_generator(compose, config.get("version"))
|
||||
archive_name = "%s-%s-%s" % (
|
||||
compose.conf["release_short"],
|
||||
variant.uid,
|
||||
version,
|
||||
)
|
||||
|
||||
# Run the pungi-make-ostree command locally to create a script to
|
||||
# execute in runroot environment.
|
||||
cmd = [
|
||||
"pungi-make-ostree",
|
||||
"container",
|
||||
"--log-dir=%s" % self.logdir,
|
||||
"--name=%s" % archive_name,
|
||||
"--path=%s" % target_dir,
|
||||
"--treefile=%s" % os.path.join(config_repo, config["treefile"]),
|
||||
"--extra-config=%s" % extra_config_file,
|
||||
"--version=%s" % version,
|
||||
]
|
||||
|
||||
_, runroot_script = shortcuts.run(cmd, universal_newlines=True)
|
||||
|
||||
default_packages = ["ostree", "rpm-ostree", "selinux-policy-targeted"]
|
||||
additional_packages = config.get("runroot_packages", [])
|
||||
packages = default_packages + additional_packages
|
||||
log_file = os.path.join(self.logdir, "runroot.log")
|
||||
# TODO: Use to get previous build
|
||||
mounts = [compose.topdir]
|
||||
|
||||
runroot = Runroot(compose, phase="ostree_container")
|
||||
runroot.run(
|
||||
" && ".join(runroot_script.splitlines()),
|
||||
log_file=log_file,
|
||||
arch=arch,
|
||||
packages=packages,
|
||||
mounts=mounts,
|
||||
new_chroot=True,
|
||||
weight=compose.conf["runroot_weights"].get("ostree"),
|
||||
)
|
||||
|
||||
fullpath = os.path.join(target_dir, "%s.ociarchive" % archive_name)
|
||||
|
||||
# Update image manifest
|
||||
img = Image(compose.im)
|
||||
|
||||
# Get the manifest type from the config if supplied, otherwise we
|
||||
# determine the manifest type based on the koji output
|
||||
img.type = "ociarchive"
|
||||
img.format = "ociarchive"
|
||||
img.path = os.path.relpath(fullpath, compose.paths.compose.topdir())
|
||||
img.mtime = util.get_mtime(fullpath)
|
||||
img.size = util.get_file_size(fullpath)
|
||||
img.arch = arch
|
||||
img.disc_number = 1
|
||||
img.disc_count = 1
|
||||
img.bootable = False
|
||||
img.subvariant = config.get("subvariant", variant.uid)
|
||||
setattr(img, "can_fail", self.can_fail)
|
||||
setattr(img, "deliverable", "ostree-container")
|
||||
compose.im.add(variant=variant.uid, arch=arch, image=img)
|
||||
|
||||
def _clone_repo(self, compose, repodir, url, branch):
|
||||
scm.get_dir_from_scm(
|
||||
{"scm": "git", "repo": url, "branch": branch, "dir": "."},
|
||||
repodir,
|
||||
compose=compose,
|
||||
)
|
@ -272,6 +272,7 @@ class OstreeInstallerThread(WorkerThread):
|
||||
rootfs_size=config.get("rootfs_size"),
|
||||
is_final=compose.supported,
|
||||
log_dir=self.logdir,
|
||||
skip_branding=config.get("skip_branding"),
|
||||
)
|
||||
cmd = "rm -rf %s && %s" % (
|
||||
shlex_quote(output_dir),
|
||||
|
@ -29,13 +29,10 @@ class PkgsetPhase(PhaseBase):
|
||||
self.path_prefix = None
|
||||
|
||||
def run(self):
|
||||
pkgset_source = "PkgsetSource%s" % self.compose.conf["pkgset_source"]
|
||||
from .source import PkgsetSourceContainer
|
||||
from . import sources
|
||||
|
||||
PkgsetSourceContainer.register_module(sources)
|
||||
container = PkgsetSourceContainer()
|
||||
SourceClass = container[pkgset_source]
|
||||
SourceClass = sources.ALL_SOURCES[self.compose.conf["pkgset_source"].lower()]
|
||||
|
||||
self.package_sets, self.path_prefix = SourceClass(self.compose)()
|
||||
|
||||
def validate(self):
|
||||
|
@ -28,18 +28,27 @@ from pungi.util import (
|
||||
PartialFuncWorkerThread,
|
||||
PartialFuncThreadPool,
|
||||
)
|
||||
from pungi.module_util import Modulemd, collect_module_defaults
|
||||
from pungi.module_util import (
|
||||
Modulemd,
|
||||
collect_module_defaults,
|
||||
collect_module_obsoletes,
|
||||
)
|
||||
from pungi.phases.createrepo import add_modular_metadata
|
||||
|
||||
|
||||
def populate_arch_pkgsets(compose, path_prefix, global_pkgset):
|
||||
result = {}
|
||||
exclusive_noarch = compose.conf["pkgset_exclusive_arch_considers_noarch"]
|
||||
|
||||
for arch in compose.get_arches():
|
||||
compose.log_info("Populating package set for arch: %s", arch)
|
||||
is_multilib = is_arch_multilib(compose.conf, arch)
|
||||
arches = get_valid_arches(arch, is_multilib, add_src=True)
|
||||
pkgset = global_pkgset.subset(arch, arches, exclusive_noarch=exclusive_noarch)
|
||||
pkgset = global_pkgset.subset(
|
||||
arch,
|
||||
arches,
|
||||
exclusive_noarch=compose.conf["pkgset_exclusive_arch_considers_noarch"],
|
||||
inherit_to_noarch=compose.conf["pkgset_inherit_exclusive_arch_to_noarch"],
|
||||
)
|
||||
pkgset.save_file_list(
|
||||
compose.paths.work.package_list(arch=arch, pkgset=global_pkgset),
|
||||
remove_path_prefix=path_prefix,
|
||||
@ -159,6 +168,9 @@ def _create_arch_repo(worker_thread, args, task_num):
|
||||
mod_index = collect_module_defaults(
|
||||
compose.paths.work.module_defaults_dir(), names, overrides_dir=overrides_dir
|
||||
)
|
||||
mod_index = collect_module_obsoletes(
|
||||
compose.paths.work.module_obsoletes_dir(), names, mod_index
|
||||
)
|
||||
for x in mmd:
|
||||
mod_index.add_module_stream(x)
|
||||
add_modular_metadata(
|
||||
|
@ -22,17 +22,22 @@ It automatically finds a signed copies according to *sigkey_ordering*.
|
||||
import itertools
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import pgpy
|
||||
import rpm
|
||||
from six.moves import cPickle as pickle
|
||||
from functools import partial
|
||||
|
||||
import kobo.log
|
||||
import kobo.pkgset
|
||||
import kobo.rpmlib
|
||||
from kobo.shortcuts import compute_file_checksums
|
||||
|
||||
from kobo.threads import WorkerThread, ThreadPool
|
||||
|
||||
import pungi.wrappers.kojiwrapper
|
||||
from pungi.util import pkg_is_srpm, copy_all
|
||||
from pungi.arch import get_valid_arches, is_excluded
|
||||
from pungi.errors import UnsignedPackagesError
|
||||
|
||||
|
||||
class ExtendedRpmWrapper(kobo.pkgset.SimpleRpmWrapper):
|
||||
@ -144,14 +149,20 @@ class PackageSetBase(kobo.log.LoggingBase):
|
||||
|
||||
def raise_invalid_sigkeys_exception(self, rpminfos):
|
||||
"""
|
||||
Raises RuntimeError containing details of RPMs with invalid
|
||||
Raises UnsignedPackagesError containing details of RPMs with invalid
|
||||
sigkeys defined in `rpminfos`.
|
||||
"""
|
||||
|
||||
def nvr_formatter(package_info):
|
||||
# joins NVR parts of the package with '-' character.
|
||||
return "-".join(
|
||||
(package_info["name"], package_info["version"], package_info["release"])
|
||||
epoch_suffix = ''
|
||||
if package_info['epoch'] is not None:
|
||||
epoch_suffix = ':' + package_info['epoch']
|
||||
return (
|
||||
f"{package_info['name']}"
|
||||
f"{epoch_suffix}-"
|
||||
f"{package_info['version']}-"
|
||||
f"{package_info['release']}."
|
||||
f"{package_info['arch']}"
|
||||
)
|
||||
|
||||
def get_error(sigkeys, infos):
|
||||
@ -166,7 +177,9 @@ class PackageSetBase(kobo.log.LoggingBase):
|
||||
|
||||
if not isinstance(rpminfos, dict):
|
||||
rpminfos = {self.sigkey_ordering: rpminfos}
|
||||
raise RuntimeError("\n".join(get_error(k, v) for k, v in rpminfos.items()))
|
||||
raise UnsignedPackagesError(
|
||||
"\n".join(get_error(k, v) for k, v in rpminfos.items())
|
||||
)
|
||||
|
||||
def read_packages(self, rpms, srpms):
|
||||
srpm_pool = ReaderPool(self, self._logger)
|
||||
@ -200,16 +213,31 @@ class PackageSetBase(kobo.log.LoggingBase):
|
||||
|
||||
return self.rpms_by_arch
|
||||
|
||||
def subset(self, primary_arch, arch_list, exclusive_noarch=True):
|
||||
def subset(
|
||||
self, primary_arch, arch_list, exclusive_noarch=True, inherit_to_noarch=True
|
||||
):
|
||||
"""Create a subset of this package set that only includes
|
||||
packages compatible with"""
|
||||
pkgset = PackageSetBase(
|
||||
self.name, self.sigkey_ordering, logger=self._logger, arches=arch_list
|
||||
)
|
||||
pkgset.merge(self, primary_arch, arch_list, exclusive_noarch=exclusive_noarch)
|
||||
pkgset.merge(
|
||||
self,
|
||||
primary_arch,
|
||||
arch_list,
|
||||
exclusive_noarch=exclusive_noarch,
|
||||
inherit_to_noarch=inherit_to_noarch,
|
||||
)
|
||||
return pkgset
|
||||
|
||||
def merge(self, other, primary_arch, arch_list, exclusive_noarch=True):
|
||||
def merge(
|
||||
self,
|
||||
other,
|
||||
primary_arch,
|
||||
arch_list,
|
||||
exclusive_noarch=True,
|
||||
inherit_to_noarch=True,
|
||||
):
|
||||
"""
|
||||
Merge ``other`` package set into this instance.
|
||||
"""
|
||||
@ -248,7 +276,7 @@ class PackageSetBase(kobo.log.LoggingBase):
|
||||
if i.file_path in self.file_cache:
|
||||
# TODO: test if it really works
|
||||
continue
|
||||
if exclusivearch_list and arch == "noarch":
|
||||
if inherit_to_noarch and exclusivearch_list and arch == "noarch":
|
||||
if is_excluded(i, exclusivearch_list, logger=self._logger):
|
||||
continue
|
||||
|
||||
@ -315,6 +343,11 @@ class FilelistPackageSet(PackageSetBase):
|
||||
return result
|
||||
|
||||
|
||||
# This is a marker to indicate package set with only extra builds/tasks and no
|
||||
# tasks.
|
||||
MISSING_KOJI_TAG = object()
|
||||
|
||||
|
||||
class KojiPackageSet(PackageSetBase):
|
||||
def __init__(
|
||||
self,
|
||||
@ -329,6 +362,9 @@ class KojiPackageSet(PackageSetBase):
|
||||
cache_region=None,
|
||||
extra_builds=None,
|
||||
extra_tasks=None,
|
||||
signed_packages_retries=0,
|
||||
signed_packages_wait=30,
|
||||
downloader=None,
|
||||
):
|
||||
"""
|
||||
Creates new KojiPackageSet.
|
||||
@ -361,9 +397,12 @@ class KojiPackageSet(PackageSetBase):
|
||||
:param list extra_tasks: Extra RPMs defined as Koji task IDs to get from Koji
|
||||
and include in the package set. Useful when building testing compose
|
||||
with RPM scratch builds.
|
||||
:param int signed_packages_retries: How many times should a search for
|
||||
signed package be repeated.
|
||||
:param int signed_packages_wait: How long to wait between search attemts.
|
||||
"""
|
||||
super(KojiPackageSet, self).__init__(
|
||||
name,
|
||||
name if name != MISSING_KOJI_TAG else "no-tag",
|
||||
sigkey_ordering=sigkey_ordering,
|
||||
arches=arches,
|
||||
logger=logger,
|
||||
@ -377,10 +416,13 @@ class KojiPackageSet(PackageSetBase):
|
||||
self.extra_builds = extra_builds or []
|
||||
self.extra_tasks = extra_tasks or []
|
||||
self.reuse = None
|
||||
self.signed_packages_retries = signed_packages_retries
|
||||
self.signed_packages_wait = signed_packages_wait
|
||||
|
||||
self.downloader = downloader
|
||||
|
||||
def __getstate__(self):
|
||||
result = self.__dict__.copy()
|
||||
result["koji_profile"] = self.koji_wrapper.profile
|
||||
del result["koji_wrapper"]
|
||||
del result["_logger"]
|
||||
if "cache_region" in result:
|
||||
@ -388,8 +430,6 @@ class KojiPackageSet(PackageSetBase):
|
||||
return result
|
||||
|
||||
def __setstate__(self, data):
|
||||
koji_profile = data.pop("koji_profile")
|
||||
self.koji_wrapper = pungi.wrappers.kojiwrapper.KojiWrapper(koji_profile)
|
||||
self._logger = None
|
||||
self.__dict__.update(data)
|
||||
|
||||
@ -471,7 +511,8 @@ class KojiPackageSet(PackageSetBase):
|
||||
|
||||
response = None
|
||||
if self.cache_region:
|
||||
cache_key = "KojiPackageSet.get_latest_rpms_%s_%s_%s" % (
|
||||
cache_key = "%s.get_latest_rpms_%s_%s_%s" % (
|
||||
str(self.__class__.__name__),
|
||||
str(tag),
|
||||
str(event),
|
||||
str(inherit),
|
||||
@ -493,27 +534,84 @@ class KojiPackageSet(PackageSetBase):
|
||||
|
||||
return response
|
||||
|
||||
|
||||
|
||||
def get_package_path(self, queue_item):
|
||||
rpm_info, build_info = queue_item
|
||||
|
||||
# Check if this RPM is coming from scratch task. In this case, we already
|
||||
# know the path.
|
||||
if "path_from_task" in rpm_info:
|
||||
return rpm_info["path_from_task"]
|
||||
return self.downloader.get_file(rpm_info["path_from_task"])
|
||||
|
||||
# we replaced this part because pungi uses way
|
||||
# of guessing path of package on koji based on sigkey
|
||||
# we don't need that because all our packages will
|
||||
# be ready for release
|
||||
# signature verification is still done during deps resolution
|
||||
pathinfo = self.koji_wrapper.koji_module.pathinfo
|
||||
paths = []
|
||||
|
||||
if "getRPMChecksums" in self.koji_proxy.system.listMethods():
|
||||
|
||||
def checksum_validator(keyname, pkg_path):
|
||||
checksums = self.koji_proxy.getRPMChecksums(
|
||||
rpm_info["id"], checksum_types=("sha256",)
|
||||
)
|
||||
if "sha256" in checksums.get(keyname, {}):
|
||||
computed = compute_file_checksums(pkg_path, ("sha256",))
|
||||
if computed["sha256"] != checksums[keyname]["sha256"]:
|
||||
raise RuntimeError("Checksum mismatch for %s" % pkg_path)
|
||||
|
||||
rpm_path = os.path.join(pathinfo.topdir, pathinfo.rpm(rpm_info))
|
||||
if os.path.isfile(rpm_path):
|
||||
return rpm_path
|
||||
else:
|
||||
self.log_warning("RPM %s not found" % rpm_path)
|
||||
return None
|
||||
|
||||
def checksum_validator(keyname, pkg_path):
|
||||
# Koji doesn't support checksums yet
|
||||
pass
|
||||
|
||||
attempts_left = self.signed_packages_retries + 1
|
||||
while attempts_left > 0:
|
||||
for sigkey in self.sigkey_ordering:
|
||||
if not sigkey:
|
||||
# we're looking for *signed* copies here
|
||||
continue
|
||||
sigkey = sigkey.lower()
|
||||
rpm_path = os.path.join(
|
||||
pathinfo.build(build_info), pathinfo.signed(rpm_info, sigkey)
|
||||
)
|
||||
if rpm_path not in paths:
|
||||
paths.append(rpm_path)
|
||||
path = self.downloader.get_file(
|
||||
rpm_path, partial(checksum_validator, sigkey)
|
||||
)
|
||||
if path:
|
||||
return path
|
||||
|
||||
# No signed copy was found, wait a little and try again.
|
||||
attempts_left -= 1
|
||||
if attempts_left > 0:
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % rpm_info
|
||||
self.log_debug("Waiting for signed package to appear for %s", nvr)
|
||||
time.sleep(self.signed_packages_wait)
|
||||
|
||||
if None in self.sigkey_ordering or "" in self.sigkey_ordering:
|
||||
# use an unsigned copy (if allowed)
|
||||
rpm_path = os.path.join(pathinfo.build(build_info), pathinfo.rpm(rpm_info))
|
||||
paths.append(rpm_path)
|
||||
path = self.downloader.get_file(rpm_path, partial(checksum_validator, ""))
|
||||
if path:
|
||||
return path
|
||||
|
||||
if self._allow_invalid_sigkeys and rpm_info["name"] not in self.packages:
|
||||
# use an unsigned copy (if allowed)
|
||||
rpm_path = os.path.join(pathinfo.build(build_info), pathinfo.rpm(rpm_info))
|
||||
paths.append(rpm_path)
|
||||
path = self.downloader.get_file(rpm_path)
|
||||
if path:
|
||||
self._invalid_sigkey_rpms.append(rpm_info)
|
||||
return path
|
||||
|
||||
self._invalid_sigkey_rpms.append(rpm_info)
|
||||
self.log_error(
|
||||
"RPM %s not found for sigs: %s. Paths checked: %s"
|
||||
% (rpm_info, self.sigkey_ordering, paths)
|
||||
)
|
||||
return None
|
||||
|
||||
def populate(self, tag, event=None, inherit=True, include_packages=None):
|
||||
"""Populate the package set with packages from given tag.
|
||||
@ -527,7 +625,7 @@ class KojiPackageSet(PackageSetBase):
|
||||
result_srpms = []
|
||||
include_packages = set(include_packages or [])
|
||||
|
||||
if type(event) is dict:
|
||||
if isinstance(event, dict):
|
||||
event = event["id"]
|
||||
|
||||
msg = "Getting latest RPMs (tag: %s, event: %s, inherit: %s)" % (
|
||||
@ -536,7 +634,9 @@ class KojiPackageSet(PackageSetBase):
|
||||
inherit,
|
||||
)
|
||||
self.log_info("[BEGIN] %s" % msg)
|
||||
rpms, builds = self.get_latest_rpms(tag, event, inherit=inherit)
|
||||
rpms, builds = [], []
|
||||
if tag != MISSING_KOJI_TAG:
|
||||
rpms, builds = self.get_latest_rpms(tag, event, inherit=inherit)
|
||||
extra_rpms, extra_builds = self.get_extra_rpms()
|
||||
rpms += extra_rpms
|
||||
builds += extra_builds
|
||||
@ -641,6 +741,15 @@ class KojiPackageSet(PackageSetBase):
|
||||
:param include_packages: an iterable of tuples (package name, arch) that should
|
||||
be included.
|
||||
"""
|
||||
if len(self.sigkey_ordering) > 1 and (
|
||||
None in self.sigkey_ordering or "" in self.sigkey_ordering
|
||||
):
|
||||
self.log_warning(
|
||||
"Stop writing reuse file as unsigned packages are allowed "
|
||||
"in the compose."
|
||||
)
|
||||
return
|
||||
|
||||
reuse_file = compose.paths.work.pkgset_reuse_file(self.name)
|
||||
self.log_info("Writing pkgset reuse file: %s" % reuse_file)
|
||||
try:
|
||||
@ -657,6 +766,13 @@ class KojiPackageSet(PackageSetBase):
|
||||
"srpms_by_name": self.srpms_by_name,
|
||||
"extra_builds": self.extra_builds,
|
||||
"include_packages": include_packages,
|
||||
"inherit_to_noarch": compose.conf[
|
||||
"pkgset_inherit_exclusive_arch_to_noarch"
|
||||
],
|
||||
"exclusive_noarch": compose.conf[
|
||||
"pkgset_exclusive_arch_considers_noarch"
|
||||
],
|
||||
"module_defaults_dir": compose.conf.get("module_defaults_dir"),
|
||||
},
|
||||
f,
|
||||
protocol=pickle.HIGHEST_PROTOCOL,
|
||||
@ -703,20 +819,26 @@ class KojiPackageSet(PackageSetBase):
|
||||
% (old_koji_event, koji_event)
|
||||
)
|
||||
changed = self.koji_proxy.queryHistory(
|
||||
tables=["tag_listing"], tag=tag, afterEvent=old_koji_event
|
||||
tables=["tag_listing", "tag_inheritance"],
|
||||
tag=tag,
|
||||
afterEvent=min(koji_event, old_koji_event),
|
||||
beforeEvent=max(koji_event, old_koji_event) + 1,
|
||||
)
|
||||
if changed["tag_listing"]:
|
||||
self.log_debug("Builds under tag %s changed. Can't reuse." % tag)
|
||||
return False
|
||||
if changed["tag_inheritance"]:
|
||||
self.log_debug("Tag inheritance %s changed. Can't reuse." % tag)
|
||||
return False
|
||||
|
||||
if inherit:
|
||||
inherit_tags = self.koji_proxy.getFullInheritance(tag, koji_event)
|
||||
for t in inherit_tags:
|
||||
changed = self.koji_proxy.queryHistory(
|
||||
tables=["tag_listing"],
|
||||
tables=["tag_listing", "tag_inheritance"],
|
||||
tag=t["name"],
|
||||
afterEvent=old_koji_event,
|
||||
beforeEvent=koji_event + 1,
|
||||
afterEvent=min(koji_event, old_koji_event),
|
||||
beforeEvent=max(koji_event, old_koji_event) + 1,
|
||||
)
|
||||
if changed["tag_listing"]:
|
||||
self.log_debug(
|
||||
@ -724,6 +846,9 @@ class KojiPackageSet(PackageSetBase):
|
||||
% t["name"]
|
||||
)
|
||||
return False
|
||||
if changed["tag_inheritance"]:
|
||||
self.log_debug("Tag inheritance %s changed. Can't reuse." % tag)
|
||||
return False
|
||||
|
||||
repo_dir = compose.paths.work.pkgset_repo(tag, create_dir=False)
|
||||
old_repo_dir = compose.paths.old_compose_path(repo_dir)
|
||||
@ -742,6 +867,9 @@ class KojiPackageSet(PackageSetBase):
|
||||
self.log_debug("Failed to load reuse file: %s" % str(e))
|
||||
return False
|
||||
|
||||
inherit_to_noarch = compose.conf["pkgset_inherit_exclusive_arch_to_noarch"]
|
||||
exclusive_noarch = compose.conf["pkgset_exclusive_arch_considers_noarch"]
|
||||
module_defaults_dir = compose.conf.get("module_defaults_dir")
|
||||
if (
|
||||
reuse_data["allow_invalid_sigkeys"] == self._allow_invalid_sigkeys
|
||||
and reuse_data["packages"] == self.packages
|
||||
@ -749,6 +877,11 @@ class KojiPackageSet(PackageSetBase):
|
||||
and reuse_data["extra_builds"] == self.extra_builds
|
||||
and reuse_data["sigkeys"] == self.sigkey_ordering
|
||||
and reuse_data["include_packages"] == include_packages
|
||||
# If the value is not present in reuse data, the compose was
|
||||
# generated with older version of Pungi. Best to not reuse.
|
||||
and reuse_data.get("inherit_to_noarch") == inherit_to_noarch
|
||||
and reuse_data.get("exclusive_noarch") == exclusive_noarch
|
||||
and reuse_data.get("module_defaults_dir") == module_defaults_dir
|
||||
):
|
||||
self.log_info("Copying repo data for reuse: %s" % old_repo_dir)
|
||||
copy_all(old_repo_dir, repo_dir)
|
||||
@ -763,6 +896,67 @@ class KojiPackageSet(PackageSetBase):
|
||||
return False
|
||||
|
||||
|
||||
class KojiMockPackageSet(KojiPackageSet):
|
||||
|
||||
def _is_rpm_signed(self, rpm_path) -> bool:
|
||||
ts = rpm.TransactionSet()
|
||||
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
|
||||
sigkeys = [
|
||||
sigkey.lower() for sigkey in self.sigkey_ordering
|
||||
if sigkey is not None
|
||||
]
|
||||
if not sigkeys:
|
||||
return True
|
||||
with open(rpm_path, 'rb') as fd:
|
||||
header = ts.hdrFromFdno(fd)
|
||||
signature = header[rpm.RPMTAG_SIGGPG] or header[rpm.RPMTAG_SIGPGP]
|
||||
if signature is None:
|
||||
return False
|
||||
pgp_msg = pgpy.PGPMessage.from_blob(signature)
|
||||
return any(
|
||||
signature.signer.lower() in sigkeys
|
||||
for signature in pgp_msg.signatures
|
||||
)
|
||||
|
||||
def get_package_path(self, queue_item):
|
||||
rpm_info, build_info = queue_item
|
||||
|
||||
# Check if this RPM is coming from scratch task.
|
||||
# In this case, we already know the path.
|
||||
if "path_from_task" in rpm_info:
|
||||
return rpm_info["path_from_task"]
|
||||
|
||||
# we replaced this part because pungi uses way
|
||||
# of guessing path of package on koji based on sigkey
|
||||
# we don't need that because all our packages will
|
||||
# be ready for release
|
||||
# signature verification is still done during deps resolution
|
||||
pathinfo = self.koji_wrapper.koji_module.pathinfo
|
||||
|
||||
rpm_path = os.path.join(pathinfo.topdir, pathinfo.rpm(rpm_info))
|
||||
if os.path.isfile(rpm_path):
|
||||
if not self._is_rpm_signed(rpm_path):
|
||||
self._invalid_sigkey_rpms.append(rpm_info)
|
||||
self.log_error(
|
||||
'RPM "%s" not found for sigs: "%s". Path checked: "%s"',
|
||||
rpm_info, self.sigkey_ordering, rpm_path
|
||||
)
|
||||
return
|
||||
return rpm_path
|
||||
else:
|
||||
self.log_warning("RPM %s not found" % rpm_path)
|
||||
return None
|
||||
|
||||
def populate(self, tag, event=None, inherit=True, include_packages=None):
|
||||
result = super().populate(
|
||||
tag=tag,
|
||||
event=event,
|
||||
inherit=inherit,
|
||||
include_packages=include_packages,
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def _is_src(rpm_info):
|
||||
"""Check if rpm info object returned by Koji refers to source packages."""
|
||||
return rpm_info["arch"] in ("src", "nosrc")
|
||||
|
@ -14,15 +14,6 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import kobo.plugins
|
||||
|
||||
|
||||
class PkgsetSourceBase(kobo.plugins.Plugin):
|
||||
class PkgsetSourceBase(object):
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
|
||||
|
||||
class PkgsetSourceContainer(kobo.plugins.PluginContainer):
|
||||
@classmethod
|
||||
def normalize_name(cls, name):
|
||||
return name.lower()
|
||||
|
@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
from .source_koji import PkgsetSourceKoji
|
||||
from .source_repos import PkgsetSourceRepos
|
||||
from .source_kojimock import PkgsetSourceKojiMock
|
||||
|
||||
ALL_SOURCES = {
|
||||
"koji": PkgsetSourceKoji,
|
||||
"repos": PkgsetSourceRepos,
|
||||
"kojimock": PkgsetSourceKojiMock,
|
||||
}
|
@ -23,19 +23,19 @@ from itertools import groupby
|
||||
|
||||
from kobo.rpmlib import parse_nvra
|
||||
from kobo.shortcuts import force_list
|
||||
from typing import (
|
||||
Dict,
|
||||
AnyStr,
|
||||
List,
|
||||
Tuple,
|
||||
Set,
|
||||
)
|
||||
|
||||
import pungi.wrappers.kojiwrapper
|
||||
from pungi.wrappers.comps import CompsWrapper
|
||||
from pungi.wrappers.mbs import MBSWrapper
|
||||
import pungi.phases.pkgset.pkgsets
|
||||
from pungi.util import retry, get_arch_variant_data, get_variant_data
|
||||
from pungi.arch import getBaseArch
|
||||
from pungi.util import (
|
||||
retry,
|
||||
get_arch_variant_data,
|
||||
get_variant_data,
|
||||
read_single_module_stream_from_file,
|
||||
read_single_module_stream_from_string,
|
||||
)
|
||||
from pungi.module_util import Modulemd
|
||||
|
||||
from pungi.phases.pkgset.common import MaterializedPackageSet, get_all_arches
|
||||
@ -190,27 +190,25 @@ def get_koji_modules(compose, koji_wrapper, event, module_info_str):
|
||||
|
||||
|
||||
class PkgsetSourceKoji(pungi.phases.pkgset.source.PkgsetSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self):
|
||||
compose = self.compose
|
||||
koji_profile = compose.conf["koji_profile"]
|
||||
self.koji_wrapper = pungi.wrappers.kojiwrapper.KojiWrapper(koji_profile)
|
||||
# path prefix must contain trailing '/'
|
||||
path_prefix = self.koji_wrapper.koji_module.config.topdir.rstrip("/") + "/"
|
||||
package_sets = get_pkgset_from_koji(
|
||||
self.compose, self.koji_wrapper, path_prefix
|
||||
)
|
||||
return (package_sets, path_prefix)
|
||||
self.koji_wrapper = pungi.wrappers.kojiwrapper.KojiWrapper(compose)
|
||||
package_sets = get_pkgset_from_koji(self.compose, self.koji_wrapper)
|
||||
return (package_sets, self.compose.koji_downloader.path_prefix)
|
||||
|
||||
|
||||
def get_pkgset_from_koji(compose, koji_wrapper, path_prefix):
|
||||
def get_pkgset_from_koji(compose, koji_wrapper):
|
||||
event_info = get_koji_event_info(compose, koji_wrapper)
|
||||
return populate_global_pkgset(compose, koji_wrapper, path_prefix, event_info)
|
||||
return populate_global_pkgset(compose, koji_wrapper, event_info)
|
||||
|
||||
|
||||
def _add_module_to_variant(
|
||||
koji_wrapper, variant, build, add_to_variant_modules=False, compose=None
|
||||
koji_wrapper,
|
||||
variant,
|
||||
build,
|
||||
add_to_variant_modules=False,
|
||||
compose=None,
|
||||
exclude_module_ns=None,
|
||||
):
|
||||
"""
|
||||
Adds module defined by Koji build info to variant.
|
||||
@ -220,20 +218,29 @@ def _add_module_to_variant(
|
||||
:param bool add_to_variant_modules: Adds the modules also to
|
||||
variant.modules.
|
||||
:param compose: Compose object to get filters from
|
||||
:param list exclude_module_ns: Module name:stream which will be excluded.
|
||||
"""
|
||||
mmds = {}
|
||||
archives = koji_wrapper.koji_proxy.listArchives(build["id"])
|
||||
available_arches = set()
|
||||
for archive in archives:
|
||||
if archive["btype"] != "module":
|
||||
# Skip non module archives
|
||||
continue
|
||||
typedir = koji_wrapper.koji_module.pathinfo.typedir(build, archive["btype"])
|
||||
filename = archive["filename"]
|
||||
file_path = os.path.join(
|
||||
koji_wrapper.koji_module.pathinfo.topdir,
|
||||
'modules',
|
||||
build['arch'],
|
||||
build['extra']['typeinfo']['module']['content_koji_tag']
|
||||
)
|
||||
file_path = compose.koji_downloader.get_file(os.path.join(typedir, filename))
|
||||
try:
|
||||
# If there are two dots, the arch is in the middle. MBS uploads
|
||||
# files with actual architecture in the filename, but Pungi deals
|
||||
# in basearch. This assumes that each arch in the build maps to a
|
||||
# unique basearch.
|
||||
_, arch, _ = filename.split(".")
|
||||
basearch = getBaseArch(arch)
|
||||
filename = "modulemd.%s.txt" % basearch
|
||||
available_arches.add(basearch)
|
||||
except ValueError:
|
||||
pass
|
||||
mmds[filename] = file_path
|
||||
|
||||
if len(mmds) <= 1:
|
||||
@ -244,6 +251,10 @@ def _add_module_to_variant(
|
||||
|
||||
info = build["extra"]["typeinfo"]["module"]
|
||||
nsvc = "%(name)s:%(stream)s:%(version)s:%(context)s" % info
|
||||
ns = "%(name)s:%(stream)s" % info
|
||||
|
||||
if exclude_module_ns and ns in exclude_module_ns:
|
||||
return
|
||||
|
||||
added = False
|
||||
|
||||
@ -252,17 +263,29 @@ def _add_module_to_variant(
|
||||
compose.log_debug("Module %s is filtered from %s.%s", nsvc, variant, arch)
|
||||
continue
|
||||
|
||||
try:
|
||||
mmd = Modulemd.ModuleStream.read_file(
|
||||
mmds["modulemd.%s.txt" % arch], strict=True
|
||||
if arch not in available_arches:
|
||||
compose.log_debug(
|
||||
"Module %s is not available for arch %s.%s", nsvc, variant, arch
|
||||
)
|
||||
variant.arch_mmds.setdefault(arch, {})[nsvc] = mmd
|
||||
continue
|
||||
|
||||
filename = "modulemd.%s.txt" % arch
|
||||
if filename not in mmds:
|
||||
raise RuntimeError(
|
||||
"Module %s does not have metadata for arch %s and is not filtered "
|
||||
"out via filter_modules option." % (nsvc, arch)
|
||||
)
|
||||
try:
|
||||
mod_stream = read_single_module_stream_from_file(
|
||||
mmds[filename], compose, arch, build
|
||||
)
|
||||
except Exception as exc:
|
||||
# libmodulemd raises various GLib exceptions with not very helpful
|
||||
# messages. Let's replace it with something more useful.
|
||||
raise RuntimeError("Failed to read %s: %s", mmds[filename], str(exc))
|
||||
if mod_stream:
|
||||
added = True
|
||||
except KeyError:
|
||||
# There is no modulemd for this arch. This could mean an arch was
|
||||
# added to the compose after the module was built. We don't want to
|
||||
# process this, let's skip this module.
|
||||
pass
|
||||
variant.arch_mmds.setdefault(arch, {})[nsvc] = mod_stream
|
||||
|
||||
if not added:
|
||||
# The module is filtered on all arches of this variant.
|
||||
@ -342,9 +365,7 @@ def _add_scratch_modules_to_variant(
|
||||
tag_to_mmd.setdefault(tag, {})
|
||||
for arch in variant.arches:
|
||||
try:
|
||||
mmd = Modulemd.ModuleStream.read_string(
|
||||
final_modulemd[arch], strict=True
|
||||
)
|
||||
mmd = read_single_module_stream_from_string(final_modulemd[arch])
|
||||
variant.arch_mmds.setdefault(arch, {})[nsvc] = mmd
|
||||
except KeyError:
|
||||
continue
|
||||
@ -384,7 +405,13 @@ def _is_filtered_out(compose, variant, arch, module_name, module_stream):
|
||||
|
||||
|
||||
def _get_modules_from_koji(
|
||||
compose, koji_wrapper, event, variant, variant_tags, tag_to_mmd
|
||||
compose,
|
||||
koji_wrapper,
|
||||
event,
|
||||
variant,
|
||||
variant_tags,
|
||||
tag_to_mmd,
|
||||
exclude_module_ns,
|
||||
):
|
||||
"""
|
||||
Loads modules for given `variant` from koji `session`, adds them to
|
||||
@ -395,6 +422,7 @@ def _get_modules_from_koji(
|
||||
:param Variant variant: Variant with modules to find.
|
||||
:param dict variant_tags: Dict populated by this method. Key is `variant`
|
||||
and value is list of Koji tags to get the RPMs from.
|
||||
:param list exclude_module_ns: Module name:stream which will be excluded.
|
||||
"""
|
||||
|
||||
# Find out all modules in every variant and add their Koji tags
|
||||
@ -403,7 +431,11 @@ def _get_modules_from_koji(
|
||||
koji_modules = get_koji_modules(compose, koji_wrapper, event, module["name"])
|
||||
for koji_module in koji_modules:
|
||||
nsvc = _add_module_to_variant(
|
||||
koji_wrapper, variant, koji_module, compose=compose
|
||||
koji_wrapper,
|
||||
variant,
|
||||
koji_module,
|
||||
compose=compose,
|
||||
exclude_module_ns=exclude_module_ns,
|
||||
)
|
||||
if not nsvc:
|
||||
continue
|
||||
@ -464,7 +496,16 @@ def filter_inherited(koji_proxy, event, module_builds, top_tag):
|
||||
# And keep only builds from that topmost tag
|
||||
result.extend(build for build in builds if build["tag_name"] == tag)
|
||||
|
||||
return result
|
||||
# If the same module was inherited multiple times, it will be in result
|
||||
# multiple times. We need to deduplicate.
|
||||
deduplicated_result = []
|
||||
included_nvrs = set()
|
||||
for build in result:
|
||||
if build["nvr"] not in included_nvrs:
|
||||
deduplicated_result.append(build)
|
||||
included_nvrs.add(build["nvr"])
|
||||
|
||||
return deduplicated_result
|
||||
|
||||
|
||||
def filter_by_whitelist(compose, module_builds, input_modules, expected_modules):
|
||||
@ -491,15 +532,16 @@ def filter_by_whitelist(compose, module_builds, input_modules, expected_modules)
|
||||
info.get("context"),
|
||||
)
|
||||
nvr_patterns.add((pattern, spec["name"]))
|
||||
|
||||
modules_to_keep = []
|
||||
|
||||
for mb in sorted(module_builds, key=lambda i: i['name']):
|
||||
for mb in module_builds:
|
||||
# Split release from the build into version and context
|
||||
ver, ctx = mb["release"].split(".")
|
||||
# Values in `mb` are from Koji build. There's nvr and name, version and
|
||||
# release. The input pattern specifies modular name, stream, version
|
||||
# and context.
|
||||
for (n, s, v, c), spec in sorted(nvr_patterns):
|
||||
for (n, s, v, c), spec in nvr_patterns:
|
||||
if (
|
||||
# We always have a name and stream...
|
||||
mb["name"] == n
|
||||
@ -511,51 +553,19 @@ def filter_by_whitelist(compose, module_builds, input_modules, expected_modules)
|
||||
):
|
||||
modules_to_keep.append(mb)
|
||||
expected_modules.discard(spec)
|
||||
break
|
||||
|
||||
return modules_to_keep
|
||||
|
||||
|
||||
def _filter_expected_modules(
|
||||
variant_name: AnyStr,
|
||||
variant_arches: List[AnyStr],
|
||||
expected_modules: Set[AnyStr],
|
||||
filtered_modules: List[Tuple[AnyStr, Dict[AnyStr, List[AnyStr]]]],
|
||||
) -> set:
|
||||
"""
|
||||
Function filters out all modules which are listed in Pungi config.
|
||||
Those modules can be absent in koji env so we must remove it from
|
||||
the expected modules list otherwise Pungi will fail
|
||||
"""
|
||||
for variant_regexp, filters_dict in filtered_modules:
|
||||
for arch, modules in filters_dict.items():
|
||||
arch = '.*' if arch == '*' else arch
|
||||
variant_regexp = '.*' if variant_regexp == '*' else variant_regexp
|
||||
modules = ['.*' if module == '*' else module for module in modules]
|
||||
cond1 = re.findall(
|
||||
variant_regexp,
|
||||
variant_name,
|
||||
)
|
||||
cond2 = any(
|
||||
re.findall(
|
||||
arch,
|
||||
variant_arch,
|
||||
) for variant_arch in variant_arches
|
||||
)
|
||||
if cond1 and cond2:
|
||||
expected_modules = {
|
||||
expected_module for expected_module in expected_modules if
|
||||
not any(
|
||||
re.findall(
|
||||
filtered_module,
|
||||
expected_module,
|
||||
) for filtered_module in modules
|
||||
)
|
||||
}
|
||||
return expected_modules
|
||||
|
||||
|
||||
def _get_modules_from_koji_tags(
|
||||
compose, koji_wrapper, event_id, variant, variant_tags, tag_to_mmd
|
||||
compose,
|
||||
koji_wrapper,
|
||||
event_id,
|
||||
variant,
|
||||
variant_tags,
|
||||
tag_to_mmd,
|
||||
exclude_module_ns,
|
||||
):
|
||||
"""
|
||||
Loads modules for given `variant` from Koji, adds them to
|
||||
@ -567,6 +577,7 @@ def _get_modules_from_koji_tags(
|
||||
:param Variant variant: Variant with modules to find.
|
||||
:param dict variant_tags: Dict populated by this method. Key is `variant`
|
||||
and value is list of Koji tags to get the RPMs from.
|
||||
:param list exclude_module_ns: Module name:stream which will be excluded.
|
||||
"""
|
||||
# Compose tags from configuration
|
||||
compose_tags = [
|
||||
@ -574,13 +585,7 @@ def _get_modules_from_koji_tags(
|
||||
]
|
||||
# Get set of configured module names for this variant. If nothing is
|
||||
# configured, the set is empty.
|
||||
expected_modules = []
|
||||
for spec in variant.get_modules():
|
||||
name, stream = spec['name'].split(':')
|
||||
expected_modules.append(
|
||||
':'.join((name, stream.replace('-', '_')))
|
||||
)
|
||||
expected_modules = set(expected_modules)
|
||||
expected_modules = set(spec["name"] for spec in variant.get_modules())
|
||||
# Find out all modules in every variant and add their Koji tags
|
||||
# to variant and variant_tags list.
|
||||
koji_proxy = koji_wrapper.koji_proxy
|
||||
@ -639,21 +644,26 @@ def _get_modules_from_koji_tags(
|
||||
for build in latest_builds:
|
||||
# Get the Build from Koji to get modulemd and module_tag.
|
||||
build = koji_proxy.getBuild(build["build_id"])
|
||||
|
||||
nsvc = _add_module_to_variant(
|
||||
koji_wrapper,
|
||||
variant,
|
||||
build,
|
||||
True,
|
||||
compose=compose,
|
||||
exclude_module_ns=exclude_module_ns,
|
||||
)
|
||||
if not nsvc:
|
||||
continue
|
||||
|
||||
module_tag = (
|
||||
build.get("extra", {})
|
||||
.get("typeinfo", {})
|
||||
.get("module", {})
|
||||
.get("content_koji_tag", "")
|
||||
)
|
||||
|
||||
variant_tags[variant].append(module_tag)
|
||||
|
||||
nsvc = _add_module_to_variant(
|
||||
koji_wrapper, variant, build, True, compose=compose
|
||||
)
|
||||
if not nsvc:
|
||||
continue
|
||||
|
||||
tag_to_mmd.setdefault(module_tag, {})
|
||||
for arch in variant.arch_mmds:
|
||||
try:
|
||||
@ -675,22 +685,17 @@ def _get_modules_from_koji_tags(
|
||||
# needed in createrepo phase where metadata is exposed by
|
||||
# productmd
|
||||
variant.module_uid_to_koji_tag[nsvc] = module_tag
|
||||
expected_modules = _filter_expected_modules(
|
||||
variant_name=variant.name,
|
||||
variant_arches=variant.arches,
|
||||
expected_modules=expected_modules,
|
||||
filtered_modules=compose.conf['filter_modules'],
|
||||
)
|
||||
|
||||
if expected_modules:
|
||||
# There are some module names that were listed in configuration and not
|
||||
# found in any tag...
|
||||
raise RuntimeError(
|
||||
compose.log_warning(
|
||||
"Configuration specified patterns (%s) that don't match "
|
||||
"any modules in the configured tags." % ", ".join(expected_modules)
|
||||
)
|
||||
|
||||
|
||||
def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
def populate_global_pkgset(compose, koji_wrapper, event):
|
||||
all_arches = get_all_arches(compose)
|
||||
|
||||
# List of compose tags from which we create this compose
|
||||
@ -744,26 +749,52 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
"modules."
|
||||
)
|
||||
|
||||
extra_modules = get_variant_data(
|
||||
compose.conf, "pkgset_koji_module_builds", variant
|
||||
)
|
||||
|
||||
# When adding extra modules, other modules of the same name:stream available
|
||||
# in brew tag should be excluded.
|
||||
exclude_module_ns = []
|
||||
if extra_modules:
|
||||
exclude_module_ns = [
|
||||
":".join(nsvc.split(":")[:2]) for nsvc in extra_modules
|
||||
]
|
||||
|
||||
if modular_koji_tags or (
|
||||
compose.conf["pkgset_koji_module_tag"] and variant.modules
|
||||
):
|
||||
# List modules tagged in particular tags.
|
||||
_get_modules_from_koji_tags(
|
||||
compose, koji_wrapper, event, variant, variant_tags, tag_to_mmd
|
||||
compose,
|
||||
koji_wrapper,
|
||||
event,
|
||||
variant,
|
||||
variant_tags,
|
||||
tag_to_mmd,
|
||||
exclude_module_ns,
|
||||
)
|
||||
elif variant.modules:
|
||||
# Search each module in Koji separately. Tagging does not come into
|
||||
# play here.
|
||||
_get_modules_from_koji(
|
||||
compose, koji_wrapper, event, variant, variant_tags, tag_to_mmd
|
||||
compose,
|
||||
koji_wrapper,
|
||||
event,
|
||||
variant,
|
||||
variant_tags,
|
||||
tag_to_mmd,
|
||||
exclude_module_ns,
|
||||
)
|
||||
|
||||
extra_modules = get_variant_data(
|
||||
compose.conf, "pkgset_koji_module_builds", variant
|
||||
)
|
||||
if extra_modules:
|
||||
_add_extra_modules_to_variant(
|
||||
compose, koji_wrapper, variant, extra_modules, variant_tags, tag_to_mmd
|
||||
compose,
|
||||
koji_wrapper,
|
||||
variant,
|
||||
extra_modules,
|
||||
variant_tags,
|
||||
tag_to_mmd,
|
||||
)
|
||||
|
||||
variant_scratch_modules = get_variant_data(
|
||||
@ -790,17 +821,23 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
|
||||
pkgsets = []
|
||||
|
||||
extra_builds = force_list(compose.conf.get("pkgset_koji_builds", []))
|
||||
extra_tasks = force_list(compose.conf.get("pkgset_koji_scratch_tasks", []))
|
||||
|
||||
if not pkgset_koji_tags and (extra_builds or extra_tasks):
|
||||
# We have extra packages to pull in, but no tag to merge them with.
|
||||
compose_tags.append(pungi.phases.pkgset.pkgsets.MISSING_KOJI_TAG)
|
||||
pkgset_koji_tags.append(pungi.phases.pkgset.pkgsets.MISSING_KOJI_TAG)
|
||||
|
||||
# Get package set for each compose tag and merge it to global package
|
||||
# list. Also prepare per-variant pkgset, because we do not have list
|
||||
# of binary RPMs in module definition - there is just list of SRPMs.
|
||||
for compose_tag in compose_tags:
|
||||
compose.log_info("Loading package set for tag %s", compose_tag)
|
||||
kwargs = {}
|
||||
if compose_tag in pkgset_koji_tags:
|
||||
extra_builds = force_list(compose.conf.get("pkgset_koji_builds", []))
|
||||
extra_tasks = force_list(compose.conf.get("pkgset_koji_scratch_tasks", []))
|
||||
else:
|
||||
extra_builds = []
|
||||
extra_tasks = []
|
||||
kwargs["extra_builds"] = extra_builds
|
||||
kwargs["extra_tasks"] = extra_tasks
|
||||
|
||||
pkgset = pungi.phases.pkgset.pkgsets.KojiPackageSet(
|
||||
compose_tag,
|
||||
@ -812,8 +849,10 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
allow_invalid_sigkeys=allow_invalid_sigkeys,
|
||||
populate_only_packages=populate_only_packages_to_gather,
|
||||
cache_region=compose.cache_region,
|
||||
extra_builds=extra_builds,
|
||||
extra_tasks=extra_tasks,
|
||||
signed_packages_retries=compose.conf["signed_packages_retries"],
|
||||
signed_packages_wait=compose.conf["signed_packages_wait"],
|
||||
downloader=compose.koji_downloader,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
# Check if we have cache for this tag from previous compose. If so, use
|
||||
@ -822,11 +861,16 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
compose.paths.work.pkgset_file_cache(compose_tag)
|
||||
)
|
||||
if old_cache_path:
|
||||
pkgset.set_old_file_cache(
|
||||
pungi.phases.pkgset.pkgsets.KojiPackageSet.load_old_file_cache(
|
||||
old_cache_path
|
||||
try:
|
||||
pkgset.set_old_file_cache(
|
||||
pungi.phases.pkgset.pkgsets.KojiPackageSet.load_old_file_cache(
|
||||
old_cache_path
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
compose.log_debug(
|
||||
"Failed to load old cache file %s : %s" % (old_cache_path, str(e))
|
||||
)
|
||||
)
|
||||
|
||||
is_traditional = compose_tag in compose.conf.get("pkgset_koji_tag", [])
|
||||
should_inherit = inherit if is_traditional else inherit_modules
|
||||
@ -866,13 +910,18 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
if pkgset.reuse is None:
|
||||
pkgset.populate(
|
||||
compose_tag,
|
||||
event,
|
||||
# We care about packages as they existed on the specified
|
||||
# event. However, modular content tags are not expected to
|
||||
# change, so the event doesn't matter there. If an exact NSVC
|
||||
# of a module is specified, the code above would happily find
|
||||
# its content tag, but fail here if the content tag doesn't
|
||||
# exist at the given event.
|
||||
event=event if is_traditional else None,
|
||||
inherit=should_inherit,
|
||||
include_packages=modular_packages,
|
||||
)
|
||||
for variant in compose.all_variants.values():
|
||||
if compose_tag in variant_tags[variant]:
|
||||
|
||||
# If it's a modular tag, store the package set for the module.
|
||||
for nsvc, koji_tag in variant.module_uid_to_koji_tag.items():
|
||||
if compose_tag == koji_tag:
|
||||
@ -895,7 +944,7 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
MaterializedPackageSet.create,
|
||||
compose,
|
||||
pkgset,
|
||||
path_prefix,
|
||||
compose.koji_downloader.path_prefix,
|
||||
mmd=tag_to_mmd.get(pkgset.name),
|
||||
)
|
||||
)
|
||||
|
1024
pungi/phases/pkgset/sources/source_kojimock.py
Normal file
1024
pungi/phases/pkgset/sources/source_kojimock.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -15,6 +15,7 @@
|
||||
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from kobo.shortcuts import run
|
||||
|
||||
@ -31,8 +32,6 @@ import pungi.phases.pkgset.source
|
||||
|
||||
|
||||
class PkgsetSourceRepos(pungi.phases.pkgset.source.PkgsetSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self):
|
||||
package_sets, path_prefix = get_pkgset_from_repos(self.compose)
|
||||
return (package_sets, path_prefix)
|
||||
@ -112,6 +111,17 @@ def get_pkgset_from_repos(compose):
|
||||
flist.append(dst)
|
||||
pool.queue_put((src, dst))
|
||||
|
||||
# Clean up tmp dir
|
||||
# Workaround for rpm not honoring sgid bit which only appears when yum is used.
|
||||
yumroot_dir = os.path.join(pungi_dir, "work", arch, "yumroot")
|
||||
if os.path.isdir(yumroot_dir):
|
||||
try:
|
||||
shutil.rmtree(yumroot_dir)
|
||||
except Exception as e:
|
||||
compose.log_warning(
|
||||
"Failed to clean up tmp dir: %s %s" % (yumroot_dir, str(e))
|
||||
)
|
||||
|
||||
msg = "Linking downloaded pkgset packages"
|
||||
compose.log_info("[BEGIN] %s" % msg)
|
||||
pool.start()
|
||||
|
@ -18,6 +18,7 @@ import os
|
||||
|
||||
from pungi.phases.base import PhaseBase
|
||||
from pungi.util import failable, get_arch_variant_data
|
||||
import productmd.compose
|
||||
|
||||
|
||||
class TestPhase(PhaseBase):
|
||||
@ -25,6 +26,7 @@ class TestPhase(PhaseBase):
|
||||
|
||||
def run(self):
|
||||
check_image_sanity(self.compose)
|
||||
check_image_metadata(self.compose)
|
||||
|
||||
|
||||
def check_image_sanity(compose):
|
||||
@ -45,6 +47,17 @@ def check_image_sanity(compose):
|
||||
check_size_limit(compose, variant, arch, img)
|
||||
|
||||
|
||||
def check_image_metadata(compose):
|
||||
"""
|
||||
Check the images metadata for entries that cannot be serialized.
|
||||
Often caused by isos with duplicate metadata.
|
||||
Accessing the `images` attribute will raise an exception if there's a problem
|
||||
"""
|
||||
if compose.im.images:
|
||||
compose = productmd.compose.Compose(compose.paths.compose.topdir())
|
||||
return compose.images
|
||||
|
||||
|
||||
def check_sanity(compose, variant, arch, image):
|
||||
path = os.path.join(compose.paths.compose.topdir(), image.path)
|
||||
deliverable = getattr(image, "deliverable")
|
||||
|
@ -69,10 +69,13 @@ class Profiler(object):
|
||||
|
||||
@classmethod
|
||||
def print_results(cls, stream=sys.stdout):
|
||||
print("Profiling results:", file=sys.stdout)
|
||||
# Ensure all data that was printed to stdout was already flushed. If
|
||||
# the caller is redirecting stderr to stdout, and there's buffered
|
||||
# data, we may end up in a situation where the stderr output printed
|
||||
# below ends up mixed with the stdout lines.
|
||||
sys.stdout.flush()
|
||||
print("Profiling results:", file=stream)
|
||||
results = cls._data.items()
|
||||
results = sorted(results, key=lambda x: x[1]["time"], reverse=True)
|
||||
for name, data in results:
|
||||
print(
|
||||
" %6.2f %5d %s" % (data["time"], data["calls"], name), file=sys.stdout
|
||||
)
|
||||
print(" %6.2f %5d %s" % (data["time"], data["calls"], name), file=stream)
|
||||
|
105
pungi/runroot.py
105
pungi/runroot.py
@ -13,12 +13,19 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tarfile
|
||||
|
||||
import requests
|
||||
import six
|
||||
from six.moves import shlex_quote
|
||||
import kobo.log
|
||||
from kobo.shortcuts import run
|
||||
|
||||
from pungi import util
|
||||
from pungi.wrappers import kojiwrapper
|
||||
|
||||
|
||||
@ -110,7 +117,7 @@ class Runroot(kobo.log.LoggingBase):
|
||||
runroot_tag = self.compose.conf["runroot_tag"]
|
||||
log_dir = kwargs.pop("log_dir", None)
|
||||
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose)
|
||||
koji_cmd = koji_wrapper.get_runroot_cmd(
|
||||
runroot_tag,
|
||||
arch,
|
||||
@ -149,7 +156,11 @@ class Runroot(kobo.log.LoggingBase):
|
||||
"""
|
||||
formatted_cmd = command.format(**fmt_dict) if fmt_dict else command
|
||||
ssh_cmd = ["ssh", "-oBatchMode=yes", "-n", "-l", user, hostname, formatted_cmd]
|
||||
return run(ssh_cmd, show_cmd=True, logfile=log_file)[1]
|
||||
output = run(ssh_cmd, show_cmd=True, logfile=log_file)[1]
|
||||
if six.PY3 and isinstance(output, bytes):
|
||||
return output.decode()
|
||||
else:
|
||||
return output
|
||||
|
||||
def _log_file(self, base, suffix):
|
||||
return base.replace(".log", "." + suffix + ".log")
|
||||
@ -174,10 +185,13 @@ class Runroot(kobo.log.LoggingBase):
|
||||
# by the runroot task, so the Pungi user can access them.
|
||||
if chown_paths:
|
||||
paths = " ".join(shlex_quote(pth) for pth in chown_paths)
|
||||
command += " ; EXIT_CODE=$?"
|
||||
# Make the files world readable
|
||||
command += " && chmod -R a+r %s" % paths
|
||||
command += " ; chmod -R a+r %s" % paths
|
||||
# and owned by the same user that is running the process
|
||||
command += " && chown -R %d %s" % (os.getuid(), paths)
|
||||
command += " ; chown -R %d %s" % (os.getuid(), paths)
|
||||
# Exit with code of main command
|
||||
command += " ; exit $EXIT_CODE"
|
||||
|
||||
hostname = runroot_ssh_hostnames[arch]
|
||||
user = self.compose.conf.get("runroot_ssh_username", "root")
|
||||
@ -222,9 +236,9 @@ class Runroot(kobo.log.LoggingBase):
|
||||
fmt_dict["runroot_key"] = runroot_key
|
||||
self._ssh_run(hostname, user, run_template, fmt_dict, log_file=log_file)
|
||||
|
||||
fmt_dict[
|
||||
"command"
|
||||
] = "rpm -qa --qf='%{name}-%{version}-%{release}.%{arch}\n'"
|
||||
fmt_dict["command"] = (
|
||||
"rpm -qa --qf='%{name}-%{version}-%{release}.%{arch}\n'"
|
||||
)
|
||||
buildroot_rpms = self._ssh_run(
|
||||
hostname,
|
||||
user,
|
||||
@ -300,13 +314,14 @@ class Runroot(kobo.log.LoggingBase):
|
||||
runroot_channel = self.compose.conf.get("runroot_channel")
|
||||
runroot_tag = self.compose.conf["runroot_tag"]
|
||||
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose)
|
||||
koji_cmd = koji_wrapper.get_pungi_buildinstall_cmd(
|
||||
runroot_tag,
|
||||
arch,
|
||||
args,
|
||||
channel=runroot_channel,
|
||||
chown_uid=os.getuid(),
|
||||
# We want to change owner only if shared NFS directory is used.
|
||||
chown_uid=os.getuid() if kwargs.get("mounts") else None,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
@ -317,6 +332,7 @@ class Runroot(kobo.log.LoggingBase):
|
||||
% (output["task_id"], log_file)
|
||||
)
|
||||
self._result = output
|
||||
return output["task_id"]
|
||||
|
||||
def run_pungi_ostree(self, args, log_file=None, arch=None, **kwargs):
|
||||
"""
|
||||
@ -334,7 +350,7 @@ class Runroot(kobo.log.LoggingBase):
|
||||
runroot_channel = self.compose.conf.get("runroot_channel")
|
||||
runroot_tag = self.compose.conf["runroot_tag"]
|
||||
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose)
|
||||
koji_cmd = koji_wrapper.get_pungi_ostree_cmd(
|
||||
runroot_tag, arch, args, channel=runroot_channel, **kwargs
|
||||
)
|
||||
@ -373,3 +389,72 @@ class Runroot(kobo.log.LoggingBase):
|
||||
return self._result
|
||||
else:
|
||||
raise ValueError("Unknown runroot_method %r." % self.runroot_method)
|
||||
|
||||
|
||||
@util.retry(wait_on=requests.exceptions.RequestException)
|
||||
def _download_file(url, dest):
|
||||
# contextlib.closing is only needed in requests<2.18
|
||||
with contextlib.closing(requests.get(url, stream=True, timeout=5)) as r:
|
||||
if r.status_code == 404:
|
||||
raise RuntimeError("Archive %s not found" % url)
|
||||
r.raise_for_status()
|
||||
with open(dest, "wb") as f:
|
||||
shutil.copyfileobj(r.raw, f)
|
||||
|
||||
|
||||
def _download_archive(task_id, fname, archive_url, dest_dir):
|
||||
"""Download file from URL to a destination, with retries."""
|
||||
temp_file = os.path.join(dest_dir, fname)
|
||||
_download_file(archive_url, temp_file)
|
||||
return temp_file
|
||||
|
||||
|
||||
def _extract_archive(task_id, fname, archive_file, dest_path):
|
||||
"""Extract the archive into given destination.
|
||||
|
||||
All items of the archive must match the name of the archive, i.e. all
|
||||
paths in foo.tar.gz must start with foo/.
|
||||
"""
|
||||
basename = os.path.basename(fname).split(".")[0]
|
||||
strip_prefix = basename + "/"
|
||||
with tarfile.open(archive_file, "r") as archive:
|
||||
for member in archive.getmembers():
|
||||
# Check if each item is either the root directory or is within it.
|
||||
if member.name != basename and not member.name.startswith(strip_prefix):
|
||||
raise RuntimeError(
|
||||
"Archive %s from task %s contains file without expected prefix: %s"
|
||||
% (fname, task_id, member)
|
||||
)
|
||||
dest = os.path.join(dest_path, member.name[len(strip_prefix) :])
|
||||
if member.isdir():
|
||||
# Create directories where needed...
|
||||
util.makedirs(dest)
|
||||
elif member.isfile():
|
||||
# ... and extract files into them.
|
||||
with open(dest, "wb") as dest_obj:
|
||||
shutil.copyfileobj(archive.extractfile(member), dest_obj)
|
||||
elif member.islnk():
|
||||
# We have a hardlink. Let's also link it.
|
||||
linked_file = os.path.join(
|
||||
dest_path, member.linkname[len(strip_prefix) :]
|
||||
)
|
||||
os.link(linked_file, dest)
|
||||
else:
|
||||
# Any other file type is an error.
|
||||
raise RuntimeError(
|
||||
"Unexpected file type in %s from task %s: %s"
|
||||
% (fname, task_id, member)
|
||||
)
|
||||
|
||||
|
||||
def download_and_extract_archive(compose, task_id, fname, destination):
|
||||
"""Download a tar archive from task outputs and extract it to the destination."""
|
||||
koji = kojiwrapper.KojiWrapper(compose).koji_module
|
||||
# Koji API provides downloadTaskOutput method, but it's not usable as it
|
||||
# will attempt to load the entire file into memory.
|
||||
# So instead let's generate a patch and attempt to convert it to a URL.
|
||||
server_path = os.path.join(koji.pathinfo.task(task_id), fname)
|
||||
archive_url = server_path.replace(koji.config.topdir, koji.config.topurl)
|
||||
with util.temp_dir(prefix="buildinstall-download") as tmp_dir:
|
||||
local_path = _download_archive(task_id, fname, archive_url, tmp_dir)
|
||||
_extract_archive(task_id, fname, local_path, destination)
|
||||
|
63
pungi/scripts/cache_cleanup.py
Normal file
63
pungi/scripts/cache_cleanup.py
Normal file
@ -0,0 +1,63 @@
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
|
||||
from pungi.util import format_size
|
||||
|
||||
|
||||
LOCK_RE = re.compile(r".*\.lock(\|[A-Za-z0-9]+)*$")
|
||||
|
||||
|
||||
def should_be_cleaned_up(path, st, threshold):
|
||||
if st.st_nlink == 1 and st.st_mtime < threshold:
|
||||
# No other instances, older than limit
|
||||
return True
|
||||
|
||||
if LOCK_RE.match(path) and st.st_mtime < threshold:
|
||||
# Suspiciously old lock
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("CACHE_DIR")
|
||||
parser.add_argument("-n", "--dry-run", action="store_true")
|
||||
parser.add_argument("--verbose", action="store_true")
|
||||
parser.add_argument(
|
||||
"--max-age",
|
||||
help="how old files should be considered for deletion",
|
||||
default=7,
|
||||
type=int,
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
topdir = os.path.abspath(args.CACHE_DIR)
|
||||
max_age = args.max_age * 24 * 3600
|
||||
|
||||
cleaned_up = 0
|
||||
|
||||
threshold = time.time() - max_age
|
||||
for dirpath, dirnames, filenames in os.walk(topdir):
|
||||
for f in filenames:
|
||||
filepath = os.path.join(dirpath, f)
|
||||
st = os.stat(filepath)
|
||||
if should_be_cleaned_up(filepath, st, threshold):
|
||||
if args.verbose:
|
||||
print("RM %s" % filepath)
|
||||
cleaned_up += st.st_size
|
||||
if not args.dry_run:
|
||||
os.remove(filepath)
|
||||
if not dirnames and not filenames:
|
||||
if args.verbose:
|
||||
print("RMDIR %s" % dirpath)
|
||||
if not args.dry_run:
|
||||
os.rmdir(dirpath)
|
||||
|
||||
if args.dry_run:
|
||||
print("Would reclaim %s bytes." % format_size(cleaned_up))
|
||||
else:
|
||||
print("Reclaimed %s bytes." % format_size(cleaned_up))
|
@ -96,7 +96,7 @@ def main():
|
||||
f.filter_environments(opts.arch, opts.variant, opts.arch_only_environments)
|
||||
|
||||
if not opts.no_cleanup:
|
||||
f.cleanup(opts.keep_empty_group, opts.lookaside_group)
|
||||
f.cleanup(opts.arch, opts.keep_empty_group, opts.lookaside_group)
|
||||
|
||||
if opts.remove_categories:
|
||||
f.remove_categories()
|
||||
|
@ -171,32 +171,11 @@ def main():
|
||||
group.add_argument(
|
||||
"--offline", action="store_true", help="Do not resolve git references."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--multi",
|
||||
metavar="DIR",
|
||||
help=(
|
||||
"Treat source as config for pungi-orchestrate and store dump into "
|
||||
"given directory."
|
||||
),
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
defines = config_utils.extract_defines(args.define)
|
||||
|
||||
if args.multi:
|
||||
if len(args.sources) > 1:
|
||||
parser.error("Only one multi config can be specified.")
|
||||
|
||||
return dump_multi_config(
|
||||
args.sources[0],
|
||||
dest=args.multi,
|
||||
defines=defines,
|
||||
just_dump=args.just_dump,
|
||||
event=args.freeze_event,
|
||||
offline=args.offline,
|
||||
)
|
||||
|
||||
return process_file(
|
||||
args.sources,
|
||||
defines=defines,
|
||||
|
@ -127,8 +127,7 @@ def run(config, topdir, has_old, offline, defined_variables, schema_overrides):
|
||||
pungi.phases.OstreeInstallerPhase(compose, buildinstall_phase),
|
||||
pungi.phases.OSTreePhase(compose),
|
||||
pungi.phases.CreateisoPhase(compose, buildinstall_phase),
|
||||
pungi.phases.ExtraIsosPhase(compose),
|
||||
pungi.phases.LiveImagesPhase(compose),
|
||||
pungi.phases.ExtraIsosPhase(compose, buildinstall_phase),
|
||||
pungi.phases.LiveMediaPhase(compose),
|
||||
pungi.phases.ImageBuildPhase(compose),
|
||||
pungi.phases.ImageChecksumPhase(compose),
|
||||
|
@ -5,35 +5,43 @@ import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
from shutil import rmtree
|
||||
from typing import AnyStr, List, Dict, Optional
|
||||
from typing import (
|
||||
AnyStr,
|
||||
List,
|
||||
Dict,
|
||||
Optional,
|
||||
)
|
||||
|
||||
import createrepo_c as cr
|
||||
import requests
|
||||
import yaml
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from .create_packages_json import PackagesGenerator, RepoInfo
|
||||
from .create_packages_json import (
|
||||
PackagesGenerator,
|
||||
RepoInfo,
|
||||
VariantInfo,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtraRepoInfo(RepoInfo):
|
||||
class ExtraVariantInfo(VariantInfo):
|
||||
|
||||
modules: List[AnyStr] = field(default_factory=list)
|
||||
packages: List[AnyStr] = field(default_factory=list)
|
||||
is_remote: bool = True
|
||||
|
||||
|
||||
class CreateExtraRepo(PackagesGenerator):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
repos: List[ExtraRepoInfo],
|
||||
variants: List[ExtraVariantInfo],
|
||||
bs_auth_token: AnyStr,
|
||||
local_repository_path: AnyStr,
|
||||
clear_target_repo: bool = True,
|
||||
):
|
||||
self.repos = [] # type: List[ExtraRepoInfo]
|
||||
super().__init__(repos, [], [])
|
||||
self.variants = [] # type: List[ExtraVariantInfo]
|
||||
super().__init__(variants, [], [])
|
||||
self.auth_headers = {
|
||||
'Authorization': f'Bearer {bs_auth_token}',
|
||||
}
|
||||
@ -92,7 +100,7 @@ class CreateExtraRepo(PackagesGenerator):
|
||||
arch: AnyStr,
|
||||
packages: Optional[List[AnyStr]] = None,
|
||||
modules: Optional[List[AnyStr]] = None,
|
||||
) -> List[ExtraRepoInfo]:
|
||||
) -> List[ExtraVariantInfo]:
|
||||
"""
|
||||
Get info about a BS repo and save it to
|
||||
an object of class ExtraRepoInfo
|
||||
@ -110,7 +118,7 @@ class CreateExtraRepo(PackagesGenerator):
|
||||
api_uri = 'api/v1'
|
||||
bs_repo_suffix = 'build_repos'
|
||||
|
||||
repos_info = []
|
||||
variants_info = []
|
||||
|
||||
# get the full info about a BS repo
|
||||
repo_request = requests.get(
|
||||
@ -132,22 +140,26 @@ class CreateExtraRepo(PackagesGenerator):
|
||||
# skip repo with unsuitable architecture
|
||||
if architecture != arch:
|
||||
continue
|
||||
repo_info = ExtraRepoInfo(
|
||||
path=os.path.join(
|
||||
bs_url,
|
||||
bs_repo_suffix,
|
||||
build_id,
|
||||
platform_name,
|
||||
),
|
||||
folder=architecture,
|
||||
variant_info = ExtraVariantInfo(
|
||||
name=f'{build_id}-{platform_name}-{architecture}',
|
||||
arch=architecture,
|
||||
is_remote=True,
|
||||
packages=packages,
|
||||
modules=modules,
|
||||
repos=[
|
||||
RepoInfo(
|
||||
path=os.path.join(
|
||||
bs_url,
|
||||
bs_repo_suffix,
|
||||
build_id,
|
||||
platform_name,
|
||||
),
|
||||
folder=architecture,
|
||||
is_remote=True,
|
||||
)
|
||||
]
|
||||
)
|
||||
repos_info.append(repo_info)
|
||||
return repos_info
|
||||
variants_info.append(variant_info)
|
||||
return variants_info
|
||||
|
||||
def _create_local_extra_repo(self):
|
||||
"""
|
||||
@ -161,7 +173,7 @@ class CreateExtraRepo(PackagesGenerator):
|
||||
if os.path.exists(self.default_modules_yaml_path):
|
||||
os.remove(self.default_modules_yaml_path)
|
||||
|
||||
def _get_remote_file_content(
|
||||
def get_remote_file_content(
|
||||
self,
|
||||
file_url: AnyStr,
|
||||
) -> AnyStr:
|
||||
@ -184,7 +196,7 @@ class CreateExtraRepo(PackagesGenerator):
|
||||
def _download_rpm_to_local_repo(
|
||||
self,
|
||||
package_location: AnyStr,
|
||||
repo_info: ExtraRepoInfo,
|
||||
repo_info: RepoInfo,
|
||||
) -> None:
|
||||
"""
|
||||
Download a rpm package from a remote repo and save it to a local repo
|
||||
@ -212,46 +224,47 @@ class CreateExtraRepo(PackagesGenerator):
|
||||
def _download_packages(
|
||||
self,
|
||||
packages: Dict[AnyStr, cr.Package],
|
||||
repo_info: ExtraRepoInfo
|
||||
variant_info: ExtraVariantInfo
|
||||
):
|
||||
"""
|
||||
Download all defined packages from a remote repo
|
||||
:param packages: information about all of packages (including
|
||||
:param packages: information about all packages (including
|
||||
modularity) in a remote repo
|
||||
:param repo_info: information about a remote repo
|
||||
:param variant_info: information about a remote variant
|
||||
"""
|
||||
for package in packages.values():
|
||||
package_name = package.name
|
||||
# Skip a current package from a remote repo if we defined
|
||||
# the list packages and a current package doesn't belong to it
|
||||
if repo_info.packages and \
|
||||
package_name not in repo_info.packages:
|
||||
if variant_info.packages and \
|
||||
package_name not in variant_info.packages:
|
||||
continue
|
||||
self._download_rpm_to_local_repo(
|
||||
package_location=package.location_href,
|
||||
repo_info=repo_info,
|
||||
)
|
||||
for repo_info in variant_info.repos:
|
||||
self._download_rpm_to_local_repo(
|
||||
package_location=package.location_href,
|
||||
repo_info=repo_info,
|
||||
)
|
||||
|
||||
def _download_modules(
|
||||
self,
|
||||
modules_data: List[Dict],
|
||||
repo_info: ExtraRepoInfo,
|
||||
variant_info: ExtraVariantInfo,
|
||||
packages: Dict[AnyStr, cr.Package]
|
||||
):
|
||||
"""
|
||||
Download all defined modularity packages and their data from
|
||||
a remote repo
|
||||
:param modules_data: information about all of modules in a remote repo
|
||||
:param repo_info: information about a remote repo
|
||||
:param packages: information about all of packages (including
|
||||
:param modules_data: information about all modules in a remote repo
|
||||
:param variant_info: information about a remote variant
|
||||
:param packages: information about all packages (including
|
||||
modularity) in a remote repo
|
||||
"""
|
||||
for module in modules_data:
|
||||
module_data = module['data']
|
||||
# Skip a current module from a remote repo if we defined
|
||||
# the list modules and a current module doesn't belong to it
|
||||
if repo_info.modules and \
|
||||
module_data['name'] not in repo_info.modules:
|
||||
if variant_info.modules and \
|
||||
module_data['name'] not in variant_info.modules:
|
||||
continue
|
||||
# we should add info about a module if the local repodata
|
||||
# doesn't have it
|
||||
@ -264,17 +277,18 @@ class CreateExtraRepo(PackagesGenerator):
|
||||
continue
|
||||
for rpm in module['data']['artifacts']['rpms']:
|
||||
# Empty repo_info.packages means that we will download
|
||||
# all of packages from repo including
|
||||
# all packages from repo including
|
||||
# the modularity packages
|
||||
if not repo_info.packages:
|
||||
if not variant_info.packages:
|
||||
break
|
||||
# skip a rpm if it doesn't belong to a processed repo
|
||||
if rpm not in packages:
|
||||
continue
|
||||
self._download_rpm_to_local_repo(
|
||||
package_location=packages[rpm].location_href,
|
||||
repo_info=repo_info,
|
||||
)
|
||||
for repo_info in variant_info.repos:
|
||||
self._download_rpm_to_local_repo(
|
||||
package_location=packages[rpm].location_href,
|
||||
repo_info=repo_info,
|
||||
)
|
||||
|
||||
def create_extra_repo(self):
|
||||
"""
|
||||
@ -284,34 +298,34 @@ class CreateExtraRepo(PackagesGenerator):
|
||||
3. Call `createrepo_c` which creates a local repo
|
||||
with the right repodata
|
||||
"""
|
||||
for repo_info in self.repos:
|
||||
packages = {} # type: Dict[AnyStr, cr.Package]
|
||||
repomd_records = self._get_repomd_records(
|
||||
repo_info=repo_info,
|
||||
)
|
||||
# parse the repodata (including modules.yaml.gz)
|
||||
modules_data = self._parse_repomd_records(
|
||||
repo_info=repo_info,
|
||||
repomd_records=repomd_records,
|
||||
packages=packages,
|
||||
)
|
||||
# convert the packages dict to more usable form
|
||||
# for future checking that a rpm from the module's artifacts
|
||||
# belongs to a processed repository
|
||||
packages = {
|
||||
f'{package.name}-{package.epoch}:{package.version}-'
|
||||
f'{package.release}.{package.arch}':
|
||||
package for package in packages.values()
|
||||
}
|
||||
self._download_modules(
|
||||
modules_data=modules_data,
|
||||
repo_info=repo_info,
|
||||
packages=packages,
|
||||
)
|
||||
self._download_packages(
|
||||
packages=packages,
|
||||
repo_info=repo_info,
|
||||
)
|
||||
for variant_info in self.variants:
|
||||
for repo_info in variant_info.repos:
|
||||
repomd_records = self._get_repomd_records(
|
||||
repo_info=repo_info,
|
||||
)
|
||||
packages_iterator = self.get_packages_iterator(repo_info)
|
||||
# parse the repodata (including modules.yaml.gz)
|
||||
modules_data = self._parse_module_repomd_record(
|
||||
repo_info=repo_info,
|
||||
repomd_records=repomd_records,
|
||||
)
|
||||
# convert the packages dict to more usable form
|
||||
# for future checking that a rpm from the module's artifacts
|
||||
# belongs to a processed repository
|
||||
packages = {
|
||||
f'{package.name}-{package.epoch}:{package.version}-'
|
||||
f'{package.release}.{package.arch}':
|
||||
package for package in packages_iterator
|
||||
}
|
||||
self._download_modules(
|
||||
modules_data=modules_data,
|
||||
variant_info=variant_info,
|
||||
packages=packages,
|
||||
)
|
||||
self._download_packages(
|
||||
packages=packages,
|
||||
variant_info=variant_info,
|
||||
)
|
||||
|
||||
self._dump_local_modules_yaml()
|
||||
self._create_local_extra_repo()
|
||||
@ -322,7 +336,6 @@ def create_parser():
|
||||
parser.add_argument(
|
||||
'--bs-auth-token',
|
||||
help='Auth token for Build System',
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--local-repo-path',
|
||||
@ -391,11 +404,16 @@ def cli_main():
|
||||
packages = packages.split()
|
||||
if repo.startswith('http://'):
|
||||
repos_info.append(
|
||||
ExtraRepoInfo(
|
||||
path=repo,
|
||||
folder=repo_folder,
|
||||
ExtraVariantInfo(
|
||||
name=repo_folder,
|
||||
arch=repo_arch,
|
||||
repos=[
|
||||
RepoInfo(
|
||||
path=repo,
|
||||
folder=repo_folder,
|
||||
is_remote=True,
|
||||
)
|
||||
],
|
||||
modules=modules,
|
||||
packages=packages,
|
||||
)
|
||||
@ -411,7 +429,7 @@ def cli_main():
|
||||
)
|
||||
)
|
||||
cer = CreateExtraRepo(
|
||||
repos=repos_info,
|
||||
variants=repos_info,
|
||||
bs_auth_token=args.bs_auth_token,
|
||||
local_repository_path=args.local_repo_path,
|
||||
clear_target_repo=args.clear_local_repo,
|
||||
|
@ -9,23 +9,60 @@ https://github.com/rpm-software-management/createrepo_c/blob/master/examples/pyt
|
||||
import argparse
|
||||
import gzip
|
||||
import json
|
||||
import logging
|
||||
import lzma
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
from collections import defaultdict
|
||||
from typing import AnyStr, Dict, List, Optional
|
||||
from itertools import tee
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
AnyStr,
|
||||
Dict,
|
||||
List,
|
||||
Any,
|
||||
Iterator,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
import binascii
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import createrepo_c as cr
|
||||
import dnf.subject
|
||||
import hawkey
|
||||
import requests
|
||||
import rpm
|
||||
import yaml
|
||||
from createrepo_c import Package
|
||||
from dataclasses import dataclass
|
||||
from createrepo_c import (
|
||||
Package,
|
||||
PackageIterator,
|
||||
Repomd,
|
||||
RepomdRecord,
|
||||
)
|
||||
from dataclasses import dataclass, field
|
||||
from kobo.rpmlib import parse_nvra
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
|
||||
def _is_compressed_file(first_two_bytes: bytes, initial_bytes: bytes):
|
||||
return binascii.hexlify(first_two_bytes) == initial_bytes
|
||||
|
||||
|
||||
def is_gzip_file(first_two_bytes):
|
||||
return _is_compressed_file(
|
||||
first_two_bytes=first_two_bytes,
|
||||
initial_bytes=b'1f8b',
|
||||
)
|
||||
|
||||
|
||||
def is_xz_file(first_two_bytes):
|
||||
return _is_compressed_file(
|
||||
first_two_bytes=first_two_bytes,
|
||||
initial_bytes=b'fd37',
|
||||
)
|
||||
|
||||
from .gather_modules import is_gzip_file, is_xz_file
|
||||
|
||||
@dataclass
|
||||
class RepoInfo:
|
||||
@ -33,32 +70,76 @@ class RepoInfo:
|
||||
# 'appstream', 'baseos', etc.
|
||||
# Or 'http://koji.cloudlinux.com/mirrors/rhel_mirror' if you are
|
||||
# using remote repo
|
||||
path: AnyStr
|
||||
path: str
|
||||
# name of folder with a repodata folder. E.g. 'baseos', 'appstream', etc
|
||||
folder: AnyStr
|
||||
# name of repo. E.g. 'BaseOS', 'AppStream', etc
|
||||
name: AnyStr
|
||||
# architecture of repo. E.g. 'x86_64', 'i686', etc
|
||||
arch: AnyStr
|
||||
folder: str
|
||||
# Is a repo remote or local
|
||||
is_remote: bool
|
||||
# Is an reference repository (usually it's a RHEL repo)
|
||||
# Is a reference repository (usually it's a RHEL repo)
|
||||
# Layout of packages from such repository will be taken as example
|
||||
# Only layout of specific package (which don't exist
|
||||
# in an reference repository) will be taken as example
|
||||
# Only layout of specific package (which doesn't exist
|
||||
# in a reference repository) will be taken as example
|
||||
is_reference: bool = False
|
||||
# The packages from 'present' repo will be added to a variant.
|
||||
# The packages from 'absent' repo will be removed from a variant.
|
||||
repo_type: str = 'present'
|
||||
|
||||
|
||||
@dataclass
|
||||
class VariantInfo:
|
||||
# name of variant. E.g. 'BaseOS', 'AppStream', etc
|
||||
name: AnyStr
|
||||
# architecture of variant. E.g. 'x86_64', 'i686', etc
|
||||
arch: AnyStr
|
||||
# The packages which will be not added to a variant
|
||||
excluded_packages: List[str] = field(default_factory=list)
|
||||
# Repos of a variant
|
||||
repos: List[RepoInfo] = field(default_factory=list)
|
||||
|
||||
|
||||
class PackagesGenerator:
|
||||
|
||||
repo_arches = defaultdict(lambda: list(('noarch',)))
|
||||
addon_repos = {
|
||||
'x86_64': ['i686'],
|
||||
'ppc64le': [],
|
||||
'aarch64': [],
|
||||
's390x': [],
|
||||
'i686': [],
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
repos: List[RepoInfo],
|
||||
variants: List[VariantInfo],
|
||||
excluded_packages: List[AnyStr],
|
||||
included_packages: List[AnyStr],
|
||||
):
|
||||
self.repos = repos
|
||||
self.variants = variants
|
||||
self.pkgs = dict()
|
||||
self.excluded_packages = excluded_packages
|
||||
self.included_packages = included_packages
|
||||
self.tmp_files = [] # type: list[Path]
|
||||
for arch, arch_list in self.addon_repos.items():
|
||||
self.repo_arches[arch].extend(arch_list)
|
||||
self.repo_arches[arch].append(arch)
|
||||
|
||||
def __del__(self):
|
||||
for tmp_file in self.tmp_files:
|
||||
if tmp_file.exists():
|
||||
tmp_file.unlink()
|
||||
|
||||
@staticmethod
|
||||
def _get_full_repo_path(repo_info: RepoInfo):
|
||||
result = os.path.join(
|
||||
repo_info.path,
|
||||
repo_info.folder
|
||||
)
|
||||
if repo_info.is_remote:
|
||||
result = urljoin(
|
||||
repo_info.path + '/',
|
||||
repo_info.folder,
|
||||
)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _warning_callback(warning_type, message):
|
||||
@ -68,8 +149,7 @@ class PackagesGenerator:
|
||||
print(f'Warning message: "{message}"; warning type: "{warning_type}"')
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _get_remote_file_content(file_url: AnyStr) -> AnyStr:
|
||||
def get_remote_file_content(self, file_url: AnyStr) -> AnyStr:
|
||||
"""
|
||||
Get content from a remote file and write it to a temp file
|
||||
:param file_url: url of a remote file
|
||||
@ -82,89 +162,27 @@ class PackagesGenerator:
|
||||
file_request.raise_for_status()
|
||||
with tempfile.NamedTemporaryFile(delete=False) as file_stream:
|
||||
file_stream.write(file_request.content)
|
||||
self.tmp_files.append(Path(file_stream.name))
|
||||
return file_stream.name
|
||||
|
||||
@staticmethod
|
||||
def _parse_repomd(repomd_file_path: AnyStr) -> cr.Repomd:
|
||||
def _parse_repomd(repomd_file_path: AnyStr) -> Repomd:
|
||||
"""
|
||||
Parse file repomd.xml and create object Repomd
|
||||
:param repomd_file_path: path to local repomd.xml
|
||||
"""
|
||||
return cr.Repomd(repomd_file_path)
|
||||
|
||||
def _parse_primary_file(
|
||||
self,
|
||||
primary_file_path: AnyStr,
|
||||
packages: Dict[AnyStr, cr.Package],
|
||||
) -> None:
|
||||
"""
|
||||
Parse primary.xml.gz, take from it info about packages and put it to
|
||||
dict packages
|
||||
:param primary_file_path: path to local primary.xml.gz
|
||||
:param packages: dictionary which will be contain info about packages
|
||||
from repository
|
||||
"""
|
||||
cr.xml_parse_primary(
|
||||
path=primary_file_path,
|
||||
pkgcb=lambda pkg: packages.update({
|
||||
pkg.pkgId: pkg,
|
||||
}),
|
||||
do_files=False,
|
||||
warningcb=self._warning_callback,
|
||||
)
|
||||
|
||||
def _parse_filelists_file(
|
||||
self,
|
||||
filelists_file_path: AnyStr,
|
||||
packages: Dict[AnyStr, cr.Package],
|
||||
) -> None:
|
||||
"""
|
||||
Parse filelists.xml.gz, take from it info about packages and put it to
|
||||
dict packages
|
||||
:param filelists_file_path: path to local filelists.xml.gz
|
||||
:param packages: dictionary which will be contain info about packages
|
||||
from repository
|
||||
"""
|
||||
cr.xml_parse_filelists(
|
||||
path=filelists_file_path,
|
||||
newpkgcb=lambda pkg_id, name, arch: packages.get(
|
||||
pkg_id,
|
||||
None,
|
||||
),
|
||||
warningcb=self._warning_callback,
|
||||
)
|
||||
|
||||
def _parse_other_file(
|
||||
self,
|
||||
other_file_path: AnyStr,
|
||||
packages: Dict[AnyStr, cr.Package],
|
||||
) -> None:
|
||||
"""
|
||||
Parse other.xml.gz, take from it info about packages and put it to
|
||||
dict packages
|
||||
:param other_file_path: path to local other.xml.gz
|
||||
:param packages: dictionary which will be contain info about packages
|
||||
from repository
|
||||
"""
|
||||
cr.xml_parse_other(
|
||||
path=other_file_path,
|
||||
newpkgcb=lambda pkg_id, name, arch: packages.get(
|
||||
pkg_id,
|
||||
None,
|
||||
),
|
||||
warningcb=self._warning_callback,
|
||||
)
|
||||
return Repomd(repomd_file_path)
|
||||
|
||||
@classmethod
|
||||
def _parse_modules_file(
|
||||
cls,
|
||||
modules_file_path: AnyStr,
|
||||
|
||||
) -> List[Dict]:
|
||||
) -> Iterator[Any]:
|
||||
"""
|
||||
Parse modules.yaml.gz and returns parsed data
|
||||
:param modules_file_path: path to local modules.yaml.gz
|
||||
:return: List of dict for an each modules in a repo
|
||||
:return: List of dict for each module in a repo
|
||||
"""
|
||||
|
||||
with open(modules_file_path, 'rb') as modules_file:
|
||||
@ -181,7 +199,7 @@ class PackagesGenerator:
|
||||
def _get_repomd_records(
|
||||
self,
|
||||
repo_info: RepoInfo,
|
||||
) -> List[cr.RepomdRecord]:
|
||||
) -> List[RepomdRecord]:
|
||||
"""
|
||||
Get, parse file repomd.xml and extract from it repomd records
|
||||
:param repo_info: structure which contains info about a current repo
|
||||
@ -194,36 +212,37 @@ class PackagesGenerator:
|
||||
'repomd.xml',
|
||||
)
|
||||
if repo_info.is_remote:
|
||||
repomd_file_path = self._get_remote_file_content(repomd_file_path)
|
||||
else:
|
||||
repomd_file_path = repomd_file_path
|
||||
repomd_file_path = urljoin(
|
||||
urljoin(
|
||||
repo_info.path + '/',
|
||||
repo_info.folder
|
||||
) + '/',
|
||||
'repodata/repomd.xml'
|
||||
)
|
||||
repomd_file_path = self.get_remote_file_content(repomd_file_path)
|
||||
|
||||
repomd_object = self._parse_repomd(repomd_file_path)
|
||||
if repo_info.is_remote:
|
||||
os.remove(repomd_file_path)
|
||||
return repomd_object.records
|
||||
|
||||
def _parse_repomd_records(
|
||||
def _download_repomd_records(
|
||||
self,
|
||||
repo_info: RepoInfo,
|
||||
repomd_records: List[cr.RepomdRecord],
|
||||
packages: Dict[AnyStr, cr.Package],
|
||||
) -> Optional[List[Dict]]:
|
||||
repomd_records: List[RepomdRecord],
|
||||
repomd_records_dict: Dict[str, str],
|
||||
):
|
||||
"""
|
||||
Parse repomd records and extract from repodata file info about packages
|
||||
Download repomd records
|
||||
:param repo_info: structure which contains info about a current repo
|
||||
:param repomd_records: list with repomd records
|
||||
:param packages: dictionary which will be contain info about packages
|
||||
from repository
|
||||
:return: List of dict for an each modules in a repo if it contains
|
||||
modules info otherwise returns None
|
||||
:param repomd_records_dict: dict with paths to repodata files
|
||||
"""
|
||||
modules_data = []
|
||||
for repomd_record in repomd_records:
|
||||
if repomd_record.type not in (
|
||||
'primary',
|
||||
'filelists',
|
||||
'other',
|
||||
'modules',
|
||||
):
|
||||
continue
|
||||
repomd_record_file_path = os.path.join(
|
||||
@ -232,25 +251,35 @@ class PackagesGenerator:
|
||||
repomd_record.location_href,
|
||||
)
|
||||
if repo_info.is_remote:
|
||||
repomd_record_file_path = self._get_remote_file_content(
|
||||
repomd_record_file_path,
|
||||
)
|
||||
if repomd_record.type == 'modules':
|
||||
modules_data = self._parse_modules_file(
|
||||
repomd_record_file_path,
|
||||
)
|
||||
else:
|
||||
parse_file_method = getattr(
|
||||
self,
|
||||
f'_parse_{repomd_record.type}_file'
|
||||
)
|
||||
parse_file_method(
|
||||
repomd_record_file_path,
|
||||
packages,
|
||||
)
|
||||
repomd_record_file_path = self.get_remote_file_content(
|
||||
repomd_record_file_path)
|
||||
repomd_records_dict[repomd_record.type] = repomd_record_file_path
|
||||
|
||||
def _parse_module_repomd_record(
|
||||
self,
|
||||
repo_info: RepoInfo,
|
||||
repomd_records: List[RepomdRecord],
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Download repomd records
|
||||
:param repo_info: structure which contains info about a current repo
|
||||
:param repomd_records: list with repomd records
|
||||
"""
|
||||
for repomd_record in repomd_records:
|
||||
if repomd_record.type != 'modules':
|
||||
continue
|
||||
repomd_record_file_path = os.path.join(
|
||||
repo_info.path,
|
||||
repo_info.folder,
|
||||
repomd_record.location_href,
|
||||
)
|
||||
if repo_info.is_remote:
|
||||
os.remove(repomd_record_file_path)
|
||||
return list(modules_data)
|
||||
repomd_record_file_path = self.get_remote_file_content(
|
||||
repomd_record_file_path)
|
||||
return list(self._parse_modules_file(
|
||||
repomd_record_file_path,
|
||||
))
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def compare_pkgs_version(package_1: Package, package_2: Package) -> int:
|
||||
@ -266,183 +295,162 @@ class PackagesGenerator:
|
||||
)
|
||||
return rpm.labelCompare(version_tuple_1, version_tuple_2)
|
||||
|
||||
def get_packages_iterator(
|
||||
self,
|
||||
repo_info: RepoInfo,
|
||||
) -> Union[PackageIterator, Iterator]:
|
||||
full_repo_path = self._get_full_repo_path(repo_info)
|
||||
pkgs_iterator = self.pkgs.get(full_repo_path)
|
||||
if pkgs_iterator is None:
|
||||
repomd_records = self._get_repomd_records(
|
||||
repo_info=repo_info,
|
||||
)
|
||||
repomd_records_dict = {} # type: Dict[str, str]
|
||||
self._download_repomd_records(
|
||||
repo_info=repo_info,
|
||||
repomd_records=repomd_records,
|
||||
repomd_records_dict=repomd_records_dict,
|
||||
)
|
||||
pkgs_iterator = PackageIterator(
|
||||
primary_path=repomd_records_dict['primary'],
|
||||
filelists_path=repomd_records_dict['filelists'],
|
||||
other_path=repomd_records_dict['other'],
|
||||
warningcb=self._warning_callback,
|
||||
)
|
||||
pkgs_iterator, self.pkgs[full_repo_path] = tee(pkgs_iterator)
|
||||
return pkgs_iterator
|
||||
|
||||
def get_package_arch(
|
||||
self,
|
||||
package: Package,
|
||||
variant_arch: str,
|
||||
) -> str:
|
||||
result = variant_arch
|
||||
if package.arch in self.repo_arches[variant_arch]:
|
||||
result = package.arch
|
||||
return result
|
||||
|
||||
def is_skipped_module_package(
|
||||
self,
|
||||
package: Package,
|
||||
variant_arch: str,
|
||||
) -> bool:
|
||||
package_key = self.get_package_key(package, variant_arch)
|
||||
# Even a module package will be added to packages.json if
|
||||
# it presents in the list of included packages
|
||||
return 'module' in package.release and not any(
|
||||
re.search(
|
||||
f'^{included_pkg}$',
|
||||
package_key,
|
||||
) or included_pkg in (package.name, package_key)
|
||||
for included_pkg in self.included_packages
|
||||
)
|
||||
|
||||
def is_excluded_package(
|
||||
self,
|
||||
package: Package,
|
||||
variant_arch: str,
|
||||
excluded_packages: List[str],
|
||||
) -> bool:
|
||||
package_key = self.get_package_key(package, variant_arch)
|
||||
return any(
|
||||
re.search(
|
||||
f'^{excluded_pkg}$',
|
||||
package_key,
|
||||
) or excluded_pkg in (package.name, package_key)
|
||||
for excluded_pkg in excluded_packages
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_source_rpm_name(package: Package) -> str:
|
||||
source_rpm_nvra = parse_nvra(package.rpm_sourcerpm)
|
||||
return source_rpm_nvra['name']
|
||||
|
||||
def get_package_key(self, package: Package, variant_arch: str) -> str:
|
||||
return (
|
||||
f'{package.name}.'
|
||||
f'{self.get_package_arch(package, variant_arch)}'
|
||||
)
|
||||
|
||||
def generate_packages_json(
|
||||
self
|
||||
) -> Dict[AnyStr, Dict[AnyStr, Dict[AnyStr, List[AnyStr]]]]:
|
||||
"""
|
||||
Generate packages.json
|
||||
"""
|
||||
packages_json = defaultdict(
|
||||
lambda: defaultdict(
|
||||
lambda: defaultdict(
|
||||
list,
|
||||
)
|
||||
)
|
||||
)
|
||||
all_packages = defaultdict(lambda: {'variants': list()})
|
||||
for repo_info in self.repos:
|
||||
repo_arches = [
|
||||
repo_info.arch,
|
||||
'noarch',
|
||||
]
|
||||
if repo_info.arch == 'x86_64':
|
||||
repo_arches.extend([
|
||||
'i686',
|
||||
'i386',
|
||||
])
|
||||
packages = {} # type: Dict[AnyStr, cr.Package]
|
||||
repomd_records = self._get_repomd_records(
|
||||
repo_info=repo_info,
|
||||
)
|
||||
self._parse_repomd_records(
|
||||
repo_info=repo_info,
|
||||
repomd_records=repomd_records,
|
||||
packages=packages,
|
||||
)
|
||||
for package in packages.values():
|
||||
if package.arch not in repo_arches:
|
||||
package_arch = repo_info.arch
|
||||
else:
|
||||
package_arch = package.arch
|
||||
package_key = f'{package.name}.{package_arch}'
|
||||
if 'module' in package.release and not any(
|
||||
re.search(included_package, package.name)
|
||||
for included_package in self.included_packages
|
||||
):
|
||||
# Even a module package will be added to packages.json if
|
||||
# it presents in the list of included packages
|
||||
continue
|
||||
if package_key not in all_packages:
|
||||
all_packages[package_key]['variants'].append(
|
||||
repo_info.name
|
||||
)
|
||||
all_packages[package_key]['arch'] = repo_info.arch
|
||||
all_packages[package_key]['package'] = package
|
||||
all_packages[package_key]['type'] = repo_info.is_reference
|
||||
# replace an older package if it's not reference or
|
||||
# a newer package is from reference repo
|
||||
elif (not all_packages[package_key]['type'] or
|
||||
all_packages[package_key]['type'] ==
|
||||
repo_info.is_reference) and \
|
||||
self.compare_pkgs_version(
|
||||
packages = defaultdict(lambda: defaultdict(lambda: {
|
||||
'variants': list(),
|
||||
}))
|
||||
for variant_info in self.variants:
|
||||
for repo_info in variant_info.repos:
|
||||
is_reference = repo_info.is_reference
|
||||
for package in self.get_packages_iterator(repo_info=repo_info):
|
||||
if self.is_skipped_module_package(
|
||||
package=package,
|
||||
variant_arch=variant_info.arch,
|
||||
):
|
||||
continue
|
||||
if self.is_excluded_package(
|
||||
package=package,
|
||||
variant_arch=variant_info.arch,
|
||||
excluded_packages=self.excluded_packages,
|
||||
):
|
||||
continue
|
||||
if self.is_excluded_package(
|
||||
package=package,
|
||||
variant_arch=variant_info.arch,
|
||||
excluded_packages=variant_info.excluded_packages,
|
||||
):
|
||||
continue
|
||||
package_key = self.get_package_key(
|
||||
package,
|
||||
all_packages[package_key]['package']
|
||||
) > 0:
|
||||
all_packages[package_key]['variants'] = [repo_info.name]
|
||||
all_packages[package_key]['arch'] = repo_info.arch
|
||||
all_packages[package_key]['package'] = package
|
||||
elif self.compare_pkgs_version(
|
||||
package,
|
||||
all_packages[package_key]['package']
|
||||
) == 0:
|
||||
all_packages[package_key]['variants'].append(
|
||||
repo_info.name
|
||||
variant_info.arch,
|
||||
)
|
||||
|
||||
for package_dict in all_packages.values():
|
||||
repo_arches = [
|
||||
package_dict['arch'],
|
||||
'noarch',
|
||||
]
|
||||
if package_dict['arch'] == 'x86_64':
|
||||
repo_arches.extend([
|
||||
'i686',
|
||||
'i386',
|
||||
])
|
||||
for variant in package_dict['variants']:
|
||||
repo_arch = package_dict['arch']
|
||||
package = package_dict['package']
|
||||
package_name = package.name
|
||||
if package.arch not in repo_arches:
|
||||
package_arch = package_dict['arch']
|
||||
else:
|
||||
package_arch = package.arch
|
||||
if any(re.search(excluded_package, package_name)
|
||||
for excluded_package in self.excluded_packages):
|
||||
continue
|
||||
src_package_name = dnf.subject.Subject(
|
||||
package.rpm_sourcerpm,
|
||||
).get_nevra_possibilities(
|
||||
forms=hawkey.FORM_NEVRA,
|
||||
)
|
||||
if len(src_package_name) > 1:
|
||||
# We should stop utility if we can't get exact name of srpm
|
||||
raise ValueError(
|
||||
'We can\'t get exact name of srpm '
|
||||
f'by its NEVRA "{package.rpm_sourcerpm}"'
|
||||
)
|
||||
else:
|
||||
src_package_name = src_package_name[0].name
|
||||
pkgs_list = packages_json[variant][
|
||||
repo_arch][src_package_name]
|
||||
added_pkg = f'{package_name}.{package_arch}'
|
||||
if added_pkg not in pkgs_list:
|
||||
pkgs_list.append(added_pkg)
|
||||
return packages_json
|
||||
source_rpm_name = self.get_source_rpm_name(package)
|
||||
package_info = packages[source_rpm_name][package_key]
|
||||
if 'is_reference' not in package_info:
|
||||
package_info['variants'].append(variant_info.name)
|
||||
package_info['is_reference'] = is_reference
|
||||
package_info['package'] = package
|
||||
elif not package_info['is_reference'] or \
|
||||
package_info['is_reference'] == is_reference and \
|
||||
self.compare_pkgs_version(
|
||||
package_1=package,
|
||||
package_2=package_info['package'],
|
||||
) > 0:
|
||||
package_info['variants'] = [variant_info.name]
|
||||
package_info['is_reference'] = is_reference
|
||||
package_info['package'] = package
|
||||
elif self.compare_pkgs_version(
|
||||
package_1=package,
|
||||
package_2=package_info['package'],
|
||||
) == 0 and repo_info.repo_type != 'absent':
|
||||
package_info['variants'].append(variant_info.name)
|
||||
result = defaultdict(lambda: defaultdict(
|
||||
lambda: defaultdict(list),
|
||||
))
|
||||
for variant_info in self.variants:
|
||||
for source_rpm_name, packages_info in packages.items():
|
||||
for package_key, package_info in packages_info.items():
|
||||
variant_pkgs = result[variant_info.name][variant_info.arch]
|
||||
if variant_info.name not in package_info['variants']:
|
||||
continue
|
||||
variant_pkgs[source_rpm_name].append(package_key)
|
||||
return result
|
||||
|
||||
|
||||
def create_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--repo-path',
|
||||
action='append',
|
||||
help='Path to a folder with repofolders. E.g. "/var/repos" or '
|
||||
'"http://koji.cloudlinux.com/mirrors/rhel_mirror"',
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--repo-folder',
|
||||
action='append',
|
||||
help='A folder which contains folder repodata . E.g. "baseos-stream"',
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--repo-arch',
|
||||
action='append',
|
||||
help='What architecture packages a repository contains. E.g. "x86_64"',
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--repo-name',
|
||||
action='append',
|
||||
help='Name of a repository. E.g. "AppStream"',
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--is-remote',
|
||||
action='append',
|
||||
type=str,
|
||||
help='A repository is remote or local',
|
||||
choices=['yes', 'no'],
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--is-reference',
|
||||
action='append',
|
||||
type=str,
|
||||
help='A repository is used as reference for packages layout',
|
||||
choices=['yes', 'no'],
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--excluded-packages',
|
||||
nargs='+',
|
||||
type=str,
|
||||
default=[],
|
||||
help='A list of globally excluded packages from generated json.'
|
||||
'All of list elements should be separated by space',
|
||||
required=False,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--included-packages',
|
||||
nargs='+',
|
||||
type=str,
|
||||
default=[],
|
||||
help='A list of globally included packages from generated json.'
|
||||
'All of list elements should be separated by space',
|
||||
'-c',
|
||||
'--config',
|
||||
type=Path,
|
||||
default=Path('config.yaml'),
|
||||
required=False,
|
||||
help='Path to a config',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o',
|
||||
'--json-output-path',
|
||||
type=str,
|
||||
help='Full path to output json file',
|
||||
@ -452,30 +460,45 @@ def create_parser():
|
||||
return parser
|
||||
|
||||
|
||||
def read_config(config_path: Path) -> Optional[Dict]:
|
||||
if not config_path.exists():
|
||||
logging.error('A config by path "%s" does not exist', config_path)
|
||||
exit(1)
|
||||
with config_path.open('r') as config_fd:
|
||||
return yaml.safe_load(config_fd)
|
||||
|
||||
|
||||
def process_config(config_data: Dict) -> Tuple[
|
||||
List[VariantInfo],
|
||||
List[str],
|
||||
List[str],
|
||||
]:
|
||||
excluded_packages = config_data.get('excluded_packages', [])
|
||||
included_packages = config_data.get('included_packages', [])
|
||||
variants = [VariantInfo(
|
||||
name=variant_name,
|
||||
arch=variant_info['arch'],
|
||||
excluded_packages=variant_info.get('excluded_packages', []),
|
||||
repos=[RepoInfo(
|
||||
path=variant_repo['path'],
|
||||
folder=variant_repo['folder'],
|
||||
is_remote=variant_repo['remote'],
|
||||
is_reference=variant_repo['reference'],
|
||||
repo_type=variant_repo.get('repo_type', 'present'),
|
||||
) for variant_repo in variant_info['repos']]
|
||||
) for variant_name, variant_info in config_data['variants'].items()]
|
||||
return variants, excluded_packages, included_packages
|
||||
|
||||
|
||||
def cli_main():
|
||||
args = create_parser().parse_args()
|
||||
repos = []
|
||||
for repo_path, repo_folder, repo_name, \
|
||||
repo_arch, is_remote, is_reference in zip(
|
||||
args.repo_path,
|
||||
args.repo_folder,
|
||||
args.repo_name,
|
||||
args.repo_arch,
|
||||
args.is_remote,
|
||||
args.is_reference,
|
||||
):
|
||||
repos.append(RepoInfo(
|
||||
path=repo_path,
|
||||
folder=repo_folder,
|
||||
name=repo_name,
|
||||
arch=repo_arch,
|
||||
is_remote=True if is_remote == 'yes' else False,
|
||||
is_reference=True if is_reference == 'yes' else False
|
||||
))
|
||||
variants, excluded_packages, included_packages = process_config(
|
||||
config_data=read_config(args.config)
|
||||
)
|
||||
pg = PackagesGenerator(
|
||||
repos=repos,
|
||||
excluded_packages=args.excluded_packages,
|
||||
included_packages=args.included_packages,
|
||||
variants=variants,
|
||||
excluded_packages=excluded_packages,
|
||||
included_packages=included_packages,
|
||||
)
|
||||
result = pg.generate_packages_json()
|
||||
with open(args.json_output_path, 'w') as packages_file:
|
||||
|
@ -16,7 +16,10 @@ def parse_args():
|
||||
parser = argparse.ArgumentParser(add_help=True)
|
||||
|
||||
parser.add_argument(
|
||||
"compose", metavar="<compose-path>", nargs=1, help="path to compose",
|
||||
"compose",
|
||||
metavar="<compose-path>",
|
||||
nargs=1,
|
||||
help="path to compose",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch",
|
||||
|
@ -14,6 +14,9 @@ def send(cmd, data):
|
||||
topic = "compose.%s" % cmd.replace("-", ".").lower()
|
||||
try:
|
||||
msg = fedora_messaging.api.Message(topic="pungi.{}".format(topic), body=data)
|
||||
if cmd == "ostree":
|
||||
# https://pagure.io/fedora-infrastructure/issue/10899
|
||||
msg.priority = 3
|
||||
fedora_messaging.api.publish(msg)
|
||||
except fedora_messaging.exceptions.PublishReturned as e:
|
||||
print("Fedora Messaging broker rejected message %s: %s" % (msg.id, e))
|
||||
|
@ -1,70 +1,150 @@
|
||||
import binascii
|
||||
import gzip
|
||||
import lzma
|
||||
import os
|
||||
from argparse import ArgumentParser, FileType
|
||||
from glob import iglob
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from typing import List, AnyStr
|
||||
from typing import List, AnyStr, Iterable, Union, Optional
|
||||
import logging
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import yaml
|
||||
import createrepo_c as cr
|
||||
from typing.io import BinaryIO
|
||||
|
||||
from .create_packages_json import PackagesGenerator, is_gzip_file, is_xz_file
|
||||
|
||||
def _is_compressed_file(first_two_bytes: bytes, initial_bytes: bytes):
|
||||
return binascii.hexlify(first_two_bytes) == initial_bytes
|
||||
EMPTY_FILE = '.empty'
|
||||
|
||||
|
||||
def is_gzip_file(first_two_bytes):
|
||||
return _is_compressed_file(
|
||||
first_two_bytes=first_two_bytes,
|
||||
initial_bytes=b'1f8b',
|
||||
)
|
||||
def read_modules_yaml(modules_yaml_path: Union[str, Path]) -> BytesIO:
|
||||
with open(modules_yaml_path, 'rb') as fp:
|
||||
return BytesIO(fp.read())
|
||||
|
||||
|
||||
def is_xz_file(first_two_bytes):
|
||||
return _is_compressed_file(
|
||||
first_two_bytes=first_two_bytes,
|
||||
initial_bytes=b'fd37',
|
||||
)
|
||||
|
||||
|
||||
def grep_list_of_modules_yaml_gz(repo_path: AnyStr) -> List[BytesIO]:
|
||||
def grep_list_of_modules_yaml(repos_path: AnyStr) -> Iterable[BytesIO]:
|
||||
"""
|
||||
Find all of valid *modules.yaml.gz in repos
|
||||
:param repo_path: path to a directory which contains repodirs
|
||||
:return: list of content from *modules.yaml.gz
|
||||
:param repos_path: path to a directory which contains repo dirs
|
||||
:return: iterable object of content from *modules.yaml.*
|
||||
"""
|
||||
|
||||
result = []
|
||||
for path in Path(repo_path).rglob('repomd.xml'):
|
||||
repo_dir_path = Path(path.parent).parent
|
||||
repomd_obj = cr.Repomd(str(path))
|
||||
for record in repomd_obj.records:
|
||||
if record.type != 'modules':
|
||||
continue
|
||||
with open(os.path.join(
|
||||
repo_dir_path,
|
||||
return (
|
||||
read_modules_yaml_from_specific_repo(repo_path=Path(path).parent)
|
||||
for path in iglob(
|
||||
str(Path(repos_path).joinpath('**/repodata')),
|
||||
recursive=True
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _is_remote(path: str):
|
||||
return any(str(path).startswith(protocol)
|
||||
for protocol in ('http', 'https'))
|
||||
|
||||
|
||||
def read_modules_yaml_from_specific_repo(
|
||||
repo_path: Union[str, Path]
|
||||
) -> Optional[BytesIO]:
|
||||
"""
|
||||
Read modules_yaml from a specific repo (remote or local)
|
||||
:param repo_path: path/url to a specific repo
|
||||
(final dir should contain dir `repodata`)
|
||||
:return: iterable object of content from *modules.yaml.*
|
||||
"""
|
||||
|
||||
if _is_remote(repo_path):
|
||||
repomd_url = urljoin(
|
||||
repo_path + '/',
|
||||
'repodata/repomd.xml',
|
||||
)
|
||||
packages_generator = PackagesGenerator(
|
||||
variants=[],
|
||||
excluded_packages=[],
|
||||
included_packages=[],
|
||||
)
|
||||
repomd_file_path = packages_generator.get_remote_file_content(
|
||||
file_url=repomd_url
|
||||
)
|
||||
else:
|
||||
repomd_file_path = os.path.join(
|
||||
repo_path,
|
||||
'repodata/repomd.xml',
|
||||
)
|
||||
repomd_obj = cr.Repomd(str(repomd_file_path))
|
||||
for record in repomd_obj.records:
|
||||
if record.type != 'modules':
|
||||
continue
|
||||
else:
|
||||
if _is_remote(repo_path):
|
||||
modules_yaml_url = urljoin(
|
||||
repo_path + '/',
|
||||
record.location_href,
|
||||
), 'rb') as fp:
|
||||
result.append(
|
||||
BytesIO(fp.read())
|
||||
)
|
||||
return result
|
||||
packages_generator = PackagesGenerator(
|
||||
variants=[],
|
||||
excluded_packages=[],
|
||||
included_packages=[],
|
||||
)
|
||||
modules_yaml_path = packages_generator.get_remote_file_content(
|
||||
file_url=modules_yaml_url
|
||||
)
|
||||
else:
|
||||
modules_yaml_path = os.path.join(
|
||||
repo_path,
|
||||
record.location_href,
|
||||
)
|
||||
return read_modules_yaml(modules_yaml_path=modules_yaml_path)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def collect_modules(modules_paths: List[BinaryIO], target_dir: str):
|
||||
def _should_grep_defaults(
|
||||
document_type: str,
|
||||
grep_only_modules_data: bool = False,
|
||||
grep_only_modules_defaults_data: bool = False,
|
||||
) -> bool:
|
||||
xor_flag = grep_only_modules_data == grep_only_modules_defaults_data
|
||||
if document_type == 'modulemd' and (xor_flag or grep_only_modules_data):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _should_grep_modules(
|
||||
document_type: str,
|
||||
grep_only_modules_data: bool = False,
|
||||
grep_only_modules_defaults_data: bool = False,
|
||||
) -> bool:
|
||||
xor_flag = grep_only_modules_data == grep_only_modules_defaults_data
|
||||
if document_type == 'modulemd-defaults' and \
|
||||
(xor_flag or grep_only_modules_defaults_data):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def collect_modules(
|
||||
modules_paths: List[BinaryIO],
|
||||
target_dir: str,
|
||||
grep_only_modules_data: bool = False,
|
||||
grep_only_modules_defaults_data: bool = False,
|
||||
):
|
||||
"""
|
||||
Read given modules.yaml.gz files and export modules
|
||||
and modulemd files from it.
|
||||
Returns:
|
||||
object:
|
||||
"""
|
||||
xor_flag = grep_only_modules_defaults_data is grep_only_modules_data
|
||||
modules_path = os.path.join(target_dir, 'modules')
|
||||
module_defaults_path = os.path.join(target_dir, 'module_defaults')
|
||||
os.makedirs(modules_path, exist_ok=True)
|
||||
os.makedirs(module_defaults_path, exist_ok=True)
|
||||
if grep_only_modules_data or xor_flag:
|
||||
os.makedirs(modules_path, exist_ok=True)
|
||||
if grep_only_modules_defaults_data or xor_flag:
|
||||
os.makedirs(module_defaults_path, exist_ok=True)
|
||||
# Defaults modules can be empty, but pungi detects
|
||||
# empty folder while copying and raises the exception in this case
|
||||
Path(os.path.join(module_defaults_path, EMPTY_FILE)).touch()
|
||||
|
||||
for module_file in modules_paths:
|
||||
data = module_file.read()
|
||||
@ -74,11 +154,20 @@ def collect_modules(modules_paths: List[BinaryIO], target_dir: str):
|
||||
data = lzma.decompress(data)
|
||||
documents = yaml.load_all(data, Loader=yaml.BaseLoader)
|
||||
for doc in documents:
|
||||
if doc['document'] == 'modulemd-defaults':
|
||||
path = None
|
||||
if _should_grep_modules(
|
||||
doc['document'],
|
||||
grep_only_modules_data,
|
||||
grep_only_modules_defaults_data,
|
||||
):
|
||||
name = f"{doc['data']['module']}.yaml"
|
||||
path = os.path.join(module_defaults_path, name)
|
||||
logging.info('Found %s module defaults', name)
|
||||
else:
|
||||
elif _should_grep_defaults(
|
||||
doc['document'],
|
||||
grep_only_modules_data,
|
||||
grep_only_modules_defaults_data,
|
||||
):
|
||||
# pungi.phases.pkgset.sources.source_koji.get_koji_modules
|
||||
stream = doc['data']['stream'].replace('-', '_')
|
||||
doc_data = doc['data']
|
||||
@ -100,13 +189,24 @@ def collect_modules(modules_paths: List[BinaryIO], target_dir: str):
|
||||
'RPM %s does not have explicit list of artifacts',
|
||||
name
|
||||
)
|
||||
|
||||
with open(path, 'w') as f:
|
||||
yaml.dump(doc, f, default_flow_style=False)
|
||||
if path is not None:
|
||||
with open(path, 'w') as f:
|
||||
yaml.dump(doc, f, default_flow_style=False)
|
||||
|
||||
|
||||
def cli_main():
|
||||
parser = ArgumentParser()
|
||||
content_type_group = parser.add_mutually_exclusive_group(required=False)
|
||||
content_type_group.add_argument(
|
||||
'--get-only-modules-data',
|
||||
action='store_true',
|
||||
help='Parse and get only modules data',
|
||||
)
|
||||
content_type_group.add_argument(
|
||||
'--get-only-modules-defaults-data',
|
||||
action='store_true',
|
||||
help='Parse and get only modules_defaults data',
|
||||
)
|
||||
path_group = parser.add_mutually_exclusive_group(required=True)
|
||||
path_group.add_argument(
|
||||
'-p', '--path',
|
||||
@ -121,16 +221,33 @@ def cli_main():
|
||||
default=None,
|
||||
help='Path to a directory which contains repodirs. E.g. /var/repos'
|
||||
)
|
||||
path_group.add_argument(
|
||||
'-rd', '--repodata-paths',
|
||||
required=False,
|
||||
type=str,
|
||||
nargs='+',
|
||||
default=[],
|
||||
help='Paths/urls to the directories with directory `repodata`',
|
||||
)
|
||||
parser.add_argument('-t', '--target', required=True)
|
||||
|
||||
namespace = parser.parse_args()
|
||||
if namespace.repo_path is None:
|
||||
if namespace.repodata_paths:
|
||||
modules = []
|
||||
for repodata_path in namespace.repodata_paths:
|
||||
modules.append(read_modules_yaml_from_specific_repo(
|
||||
repodata_path,
|
||||
))
|
||||
elif namespace.path is not None:
|
||||
modules = namespace.path
|
||||
else:
|
||||
modules = grep_list_of_modules_yaml_gz(namespace.repo_path)
|
||||
modules = grep_list_of_modules_yaml(namespace.repo_path)
|
||||
modules = list(filter(lambda i: i is not None, modules))
|
||||
collect_modules(
|
||||
modules,
|
||||
namespace.target,
|
||||
namespace.get_only_modules_data,
|
||||
namespace.get_only_modules_defaults_data,
|
||||
)
|
||||
|
||||
|
||||
|
@ -1,39 +1,53 @@
|
||||
import re
|
||||
from argparse import ArgumentParser
|
||||
|
||||
import os
|
||||
from glob import iglob
|
||||
from typing import List
|
||||
from pathlib import Path
|
||||
|
||||
from attr import dataclass
|
||||
from dataclasses import dataclass
|
||||
from productmd.common import parse_nvra
|
||||
|
||||
|
||||
@dataclass
|
||||
class Package:
|
||||
nvra: str
|
||||
path: str
|
||||
nvra: dict
|
||||
path: Path
|
||||
|
||||
|
||||
def search_rpms(top_dir) -> List[Package]:
|
||||
def search_rpms(top_dir: Path) -> List[Package]:
|
||||
"""
|
||||
Search for all *.rpm files recursively
|
||||
in given top directory
|
||||
Returns:
|
||||
list: list of paths
|
||||
"""
|
||||
rpms = []
|
||||
for root, dirs, files in os.walk(top_dir):
|
||||
path = root.split(os.sep)
|
||||
for file in files:
|
||||
if not file.endswith('.rpm'):
|
||||
continue
|
||||
nvra, _ = os.path.splitext(file)
|
||||
rpms.append(
|
||||
Package(nvra=nvra, path=os.path.join('/', *path, file))
|
||||
)
|
||||
return rpms
|
||||
return [Package(
|
||||
nvra=parse_nvra(Path(path).stem),
|
||||
path=Path(path),
|
||||
) for path in iglob(str(top_dir.joinpath('**/*.rpm')), recursive=True)]
|
||||
|
||||
|
||||
def copy_rpms(packages: List[Package], target_top_dir: str):
|
||||
def is_excluded_package(
|
||||
package: Package,
|
||||
excluded_packages: List[str],
|
||||
) -> bool:
|
||||
package_key = f'{package.nvra["name"]}.{package.nvra["arch"]}'
|
||||
return any(
|
||||
re.search(
|
||||
f'^{excluded_pkg}$',
|
||||
package_key,
|
||||
) or excluded_pkg in (package.nvra['name'], package_key)
|
||||
for excluded_pkg in excluded_packages
|
||||
)
|
||||
|
||||
|
||||
def copy_rpms(
|
||||
packages: List[Package],
|
||||
target_top_dir: Path,
|
||||
excluded_packages: List[str],
|
||||
):
|
||||
"""
|
||||
Search synced repos for rpms and prepare
|
||||
koji-like structure for pungi
|
||||
@ -45,30 +59,37 @@ def copy_rpms(packages: List[Package], target_top_dir: str):
|
||||
Nothing:
|
||||
"""
|
||||
for package in packages:
|
||||
info = parse_nvra(package.nvra)
|
||||
|
||||
target_arch_dir = os.path.join(target_top_dir, info['arch'])
|
||||
if is_excluded_package(package, excluded_packages):
|
||||
continue
|
||||
target_arch_dir = target_top_dir.joinpath(package.nvra['arch'])
|
||||
target_file = target_arch_dir.joinpath(package.path.name)
|
||||
os.makedirs(target_arch_dir, exist_ok=True)
|
||||
|
||||
target_file = os.path.join(target_arch_dir, os.path.basename(package.path))
|
||||
|
||||
if not os.path.exists(target_file):
|
||||
if not target_file.exists():
|
||||
try:
|
||||
os.link(package.path, target_file)
|
||||
except OSError:
|
||||
# hardlink failed, try symlinking
|
||||
os.symlink(package.path, target_file)
|
||||
package.path.symlink_to(target_file)
|
||||
|
||||
|
||||
def cli_main():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('-p', '--path', required=True)
|
||||
parser.add_argument('-t', '--target', required=True)
|
||||
parser.add_argument('-p', '--path', required=True, type=Path)
|
||||
parser.add_argument('-t', '--target', required=True, type=Path)
|
||||
parser.add_argument(
|
||||
'-e',
|
||||
'--excluded-packages',
|
||||
required=False,
|
||||
nargs='+',
|
||||
type=str,
|
||||
default=[],
|
||||
)
|
||||
|
||||
namespace = parser.parse_args()
|
||||
|
||||
rpms = search_rpms(namespace.path)
|
||||
copy_rpms(rpms, namespace.target)
|
||||
copy_rpms(rpms, namespace.target, namespace.excluded_packages)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -319,7 +319,6 @@ def get_arguments(config):
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
config = pungi.config.Config()
|
||||
opts = get_arguments(config)
|
||||
|
||||
@ -476,14 +475,13 @@ def main():
|
||||
else:
|
||||
mypungi.downloadSRPMs()
|
||||
|
||||
print("RPM size: %s MiB" % (mypungi.size_packages() / 1024 ** 2))
|
||||
print("RPM size: %s MiB" % (mypungi.size_packages() / 1024**2))
|
||||
if not opts.nodebuginfo:
|
||||
print(
|
||||
"DEBUGINFO size: %s MiB"
|
||||
% (mypungi.size_debuginfo() / 1024 ** 2)
|
||||
"DEBUGINFO size: %s MiB" % (mypungi.size_debuginfo() / 1024**2)
|
||||
)
|
||||
if not opts.nosource:
|
||||
print("SRPM size: %s MiB" % (mypungi.size_srpms() / 1024 ** 2))
|
||||
print("SRPM size: %s MiB" % (mypungi.size_srpms() / 1024**2))
|
||||
|
||||
# Furthermore (but without the yumlock...)
|
||||
if not opts.sourceisos:
|
||||
|
@ -18,13 +18,18 @@ from pungi.util import temp_dir
|
||||
def get_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--profiler", action="store_true",
|
||||
"--profiler",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch", required=True,
|
||||
"--arch",
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config", metavar="PATH", required=True, help="path to kickstart config file",
|
||||
"--config",
|
||||
metavar="PATH",
|
||||
required=True,
|
||||
help="path to kickstart config file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--download-to",
|
||||
@ -42,7 +47,9 @@ def get_parser():
|
||||
|
||||
group = parser.add_argument_group("Gather options")
|
||||
group.add_argument(
|
||||
"--nodeps", action="store_true", help="disable resolving dependencies",
|
||||
"--nodeps",
|
||||
action="store_true",
|
||||
help="disable resolving dependencies",
|
||||
)
|
||||
group.add_argument(
|
||||
"--selfhosting",
|
||||
@ -61,7 +68,9 @@ def get_parser():
|
||||
choices=["none", "all", "build"],
|
||||
)
|
||||
group.add_argument(
|
||||
"--multilib", metavar="[METHOD]", action="append",
|
||||
"--multilib",
|
||||
metavar="[METHOD]",
|
||||
action="append",
|
||||
)
|
||||
group.add_argument(
|
||||
"--tempdir",
|
||||
@ -88,6 +97,7 @@ def main(ns, persistdir, cachedir):
|
||||
dnf_conf = Conf(ns.arch)
|
||||
dnf_conf.persistdir = persistdir
|
||||
dnf_conf.cachedir = cachedir
|
||||
dnf_conf.optional_metadata_types = ["filelists"]
|
||||
dnf_obj = DnfWrapper(dnf_conf)
|
||||
|
||||
gather_opts = GatherOptions()
|
||||
|
@ -5,6 +5,7 @@ from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import getpass
|
||||
import glob
|
||||
import json
|
||||
import locale
|
||||
import logging
|
||||
@ -20,6 +21,9 @@ from six.moves import shlex_quote
|
||||
|
||||
from pungi.phases import PHASES_NAMES
|
||||
from pungi import get_full_version, util
|
||||
from pungi.errors import UnsignedPackagesError
|
||||
from pungi.wrappers import kojiwrapper
|
||||
from pungi.util import rmtree
|
||||
|
||||
|
||||
# force C locales
|
||||
@ -248,9 +252,15 @@ def main():
|
||||
kobo.log.add_stderr_logger(logger)
|
||||
|
||||
conf = util.load_config(opts.config)
|
||||
|
||||
compose_type = opts.compose_type or conf.get("compose_type", "production")
|
||||
if compose_type == "production" and not opts.label and not opts.no_label:
|
||||
label = opts.label or conf.get("label")
|
||||
if label:
|
||||
try:
|
||||
productmd.composeinfo.verify_label(label)
|
||||
except ValueError as ex:
|
||||
abort(str(ex))
|
||||
|
||||
if compose_type == "production" and not label and not opts.no_label:
|
||||
abort("must specify label for a production compose")
|
||||
|
||||
if (
|
||||
@ -262,14 +272,12 @@ def main():
|
||||
# check if all requirements are met
|
||||
import pungi.checks
|
||||
|
||||
if not pungi.checks.check(conf):
|
||||
sys.exit(1)
|
||||
pungi.checks.check_umask(logger)
|
||||
if not pungi.checks.check_skip_phases(
|
||||
logger, opts.skip_phase + conf.get("skip_phases", []), opts.just_phase
|
||||
):
|
||||
sys.exit(1)
|
||||
errors, warnings = pungi.checks.validate(conf)
|
||||
errors, warnings = pungi.checks.validate(conf, offline=True)
|
||||
|
||||
if not opts.quiet:
|
||||
# TODO: workaround for config files containing skip_phase = productimg
|
||||
@ -294,9 +302,17 @@ def main():
|
||||
fail_to_start("Config validation failed", errors=errors)
|
||||
sys.exit(1)
|
||||
|
||||
if not pungi.checks.check(conf):
|
||||
sys.exit(1)
|
||||
|
||||
if opts.target_dir:
|
||||
compose_dir = Compose.get_compose_dir(
|
||||
opts.target_dir, conf, compose_type=compose_type, compose_label=opts.label
|
||||
opts.target_dir,
|
||||
conf,
|
||||
compose_type=compose_type,
|
||||
compose_label=label,
|
||||
parent_compose_ids=opts.parent_compose_id,
|
||||
respin_of=opts.respin_of,
|
||||
)
|
||||
else:
|
||||
compose_dir = opts.compose_dir
|
||||
@ -305,7 +321,7 @@ def main():
|
||||
ci = Compose.get_compose_info(
|
||||
conf,
|
||||
compose_type=compose_type,
|
||||
compose_label=opts.label,
|
||||
compose_label=label,
|
||||
parent_compose_ids=opts.parent_compose_id,
|
||||
respin_of=opts.respin_of,
|
||||
)
|
||||
@ -325,14 +341,34 @@ def main():
|
||||
logger=logger,
|
||||
notifier=notifier,
|
||||
)
|
||||
|
||||
rv = Compose.update_compose_url(compose.compose_id, compose_dir, conf)
|
||||
if rv and not rv.ok:
|
||||
logger.error("CTS compose_url update failed with the error: %s" % rv.text)
|
||||
|
||||
errors, warnings = pungi.checks.validate(conf, offline=False)
|
||||
if errors:
|
||||
for error in errors:
|
||||
logger.error("Config validation failed with the error: %s" % error)
|
||||
fail_to_start("Config validation failed", errors=errors)
|
||||
sys.exit(1)
|
||||
|
||||
notifier.compose = compose
|
||||
COMPOSE = compose
|
||||
run_compose(
|
||||
compose,
|
||||
create_latest_link=create_latest_link,
|
||||
latest_link_status=latest_link_status,
|
||||
latest_link_components=latest_link_components,
|
||||
)
|
||||
try:
|
||||
run_compose(
|
||||
compose,
|
||||
create_latest_link=create_latest_link,
|
||||
latest_link_status=latest_link_status,
|
||||
latest_link_components=latest_link_components,
|
||||
)
|
||||
except UnsignedPackagesError:
|
||||
# There was an unsigned package somewhere. It is not safe to reuse any
|
||||
# package set from this compose (since we could leak the unsigned
|
||||
# package). Let's make sure all reuse files are deleted.
|
||||
for fp in glob.glob(compose.paths.work.pkgset_reuse_file("*")):
|
||||
os.unlink(fp)
|
||||
raise
|
||||
|
||||
|
||||
def run_compose(
|
||||
@ -354,6 +390,16 @@ def run_compose(
|
||||
)
|
||||
compose.log_info("Compose top directory: %s" % compose.topdir)
|
||||
compose.log_info("Current timezone offset: %s" % pungi.util.get_tz_offset())
|
||||
compose.log_info("COMPOSE_ID=%s" % compose.compose_id)
|
||||
|
||||
installed_pkgs_log = compose.paths.log.log_file("global", "installed-pkgs")
|
||||
compose.log_info("Logging installed packages to %s" % installed_pkgs_log)
|
||||
try:
|
||||
with open(installed_pkgs_log, "w") as f:
|
||||
subprocess.Popen(["rpm", "-qa"], stdout=f)
|
||||
except Exception as e:
|
||||
compose.log_warning("Failed to log installed packages: %s" % str(e))
|
||||
|
||||
compose.read_variants()
|
||||
|
||||
# dump the config file
|
||||
@ -377,13 +423,15 @@ def run_compose(
|
||||
compose, buildinstall_phase, pkgset_phase
|
||||
)
|
||||
ostree_phase = pungi.phases.OSTreePhase(compose, pkgset_phase)
|
||||
ostree_container_phase = pungi.phases.OSTreeContainerPhase(compose, pkgset_phase)
|
||||
createiso_phase = pungi.phases.CreateisoPhase(compose, buildinstall_phase)
|
||||
extra_isos_phase = pungi.phases.ExtraIsosPhase(compose)
|
||||
liveimages_phase = pungi.phases.LiveImagesPhase(compose)
|
||||
extra_isos_phase = pungi.phases.ExtraIsosPhase(compose, buildinstall_phase)
|
||||
livemedia_phase = pungi.phases.LiveMediaPhase(compose)
|
||||
image_build_phase = pungi.phases.ImageBuildPhase(compose)
|
||||
image_build_phase = pungi.phases.ImageBuildPhase(compose, buildinstall_phase)
|
||||
kiwibuild_phase = pungi.phases.KiwiBuildPhase(compose)
|
||||
osbuild_phase = pungi.phases.OSBuildPhase(compose)
|
||||
osbs_phase = pungi.phases.OSBSPhase(compose)
|
||||
osbs_phase = pungi.phases.OSBSPhase(compose, pkgset_phase, buildinstall_phase)
|
||||
image_container_phase = pungi.phases.ImageContainerPhase(compose)
|
||||
image_checksum_phase = pungi.phases.ImageChecksumPhase(compose)
|
||||
repoclosure_phase = pungi.phases.RepoclosurePhase(compose)
|
||||
test_phase = pungi.phases.TestPhase(compose)
|
||||
@ -397,16 +445,18 @@ def run_compose(
|
||||
gather_phase,
|
||||
extrafiles_phase,
|
||||
createiso_phase,
|
||||
liveimages_phase,
|
||||
livemedia_phase,
|
||||
image_build_phase,
|
||||
image_checksum_phase,
|
||||
test_phase,
|
||||
ostree_phase,
|
||||
ostree_installer_phase,
|
||||
ostree_container_phase,
|
||||
extra_isos_phase,
|
||||
osbs_phase,
|
||||
osbuild_phase,
|
||||
image_container_phase,
|
||||
kiwibuild_phase,
|
||||
):
|
||||
if phase.skip():
|
||||
continue
|
||||
@ -421,50 +471,6 @@ def run_compose(
|
||||
print(i)
|
||||
raise RuntimeError("Configuration is not valid")
|
||||
|
||||
# PREP
|
||||
|
||||
# Note: This may be put into a new method of phase classes (e.g. .prep())
|
||||
# in same way as .validate() or .run()
|
||||
|
||||
# Prep for liveimages - Obtain a password for signing rpm wrapped images
|
||||
if (
|
||||
"signing_key_password_file" in compose.conf
|
||||
and "signing_command" in compose.conf
|
||||
and "%(signing_key_password)s" in compose.conf["signing_command"]
|
||||
and not liveimages_phase.skip()
|
||||
):
|
||||
# TODO: Don't require key if signing is turned off
|
||||
# Obtain signing key password
|
||||
signing_key_password = None
|
||||
|
||||
# Use appropriate method
|
||||
if compose.conf["signing_key_password_file"] == "-":
|
||||
# Use stdin (by getpass module)
|
||||
try:
|
||||
signing_key_password = getpass.getpass("Signing key password: ")
|
||||
except EOFError:
|
||||
compose.log_debug("Ignoring signing key password")
|
||||
pass
|
||||
else:
|
||||
# Use text file with password
|
||||
try:
|
||||
signing_key_password = (
|
||||
open(compose.conf["signing_key_password_file"], "r")
|
||||
.readline()
|
||||
.rstrip("\n")
|
||||
)
|
||||
except IOError:
|
||||
# Filename is not print intentionally in case someone puts
|
||||
# password directly into the option
|
||||
err_msg = "Cannot load password from file specified by 'signing_key_password_file' option" # noqa: E501
|
||||
compose.log_error(err_msg)
|
||||
print(err_msg)
|
||||
raise RuntimeError(err_msg)
|
||||
|
||||
if signing_key_password:
|
||||
# Store the password
|
||||
compose.conf["signing_key_password"] = signing_key_password
|
||||
|
||||
init_phase.start()
|
||||
init_phase.stop()
|
||||
|
||||
@ -477,6 +483,7 @@ def run_compose(
|
||||
(gather_phase, createrepo_phase),
|
||||
extrafiles_phase,
|
||||
(ostree_phase, ostree_installer_phase),
|
||||
ostree_container_phase,
|
||||
)
|
||||
essentials_phase = pungi.phases.WeaverPhase(compose, essentials_schema)
|
||||
essentials_phase.start()
|
||||
@ -501,14 +508,17 @@ def run_compose(
|
||||
compose_images_schema = (
|
||||
createiso_phase,
|
||||
extra_isos_phase,
|
||||
liveimages_phase,
|
||||
image_build_phase,
|
||||
livemedia_phase,
|
||||
osbuild_phase,
|
||||
kiwibuild_phase,
|
||||
)
|
||||
post_image_phase = pungi.phases.WeaverPhase(
|
||||
compose, (image_checksum_phase, image_container_phase)
|
||||
)
|
||||
compose_images_phase = pungi.phases.WeaverPhase(compose, compose_images_schema)
|
||||
extra_phase_schema = (
|
||||
(compose_images_phase, image_checksum_phase),
|
||||
(compose_images_phase, post_image_phase),
|
||||
osbs_phase,
|
||||
repoclosure_phase,
|
||||
)
|
||||
@ -522,13 +532,15 @@ def run_compose(
|
||||
buildinstall_phase.skip()
|
||||
and ostree_installer_phase.skip()
|
||||
and createiso_phase.skip()
|
||||
and liveimages_phase.skip()
|
||||
and extra_isos_phase.skip()
|
||||
and livemedia_phase.skip()
|
||||
and image_build_phase.skip()
|
||||
and kiwibuild_phase.skip()
|
||||
and osbuild_phase.skip()
|
||||
and ostree_container_phase.skip()
|
||||
):
|
||||
compose.im.dump(compose.paths.compose.metadata("images.json"))
|
||||
osbs_phase.dump_metadata()
|
||||
compose.dump_containers_metadata()
|
||||
|
||||
test_phase.start()
|
||||
test_phase.stop()
|
||||
@ -600,9 +612,25 @@ def try_kill_children(signal):
|
||||
COMPOSE.log_warning("Failed to kill all subprocesses")
|
||||
|
||||
|
||||
def try_kill_koji_tasks():
|
||||
try:
|
||||
if COMPOSE:
|
||||
koji_tasks_dir = COMPOSE.paths.log.koji_tasks_dir(create_dir=False)
|
||||
if os.path.exists(koji_tasks_dir):
|
||||
COMPOSE.log_warning("Trying to kill koji tasks")
|
||||
koji = kojiwrapper.KojiWrapper(COMPOSE)
|
||||
koji.login()
|
||||
for task_id in os.listdir(koji_tasks_dir):
|
||||
koji.koji_proxy.cancelTask(int(task_id))
|
||||
except Exception:
|
||||
if COMPOSE:
|
||||
COMPOSE.log_warning("Failed to kill koji tasks")
|
||||
|
||||
|
||||
def sigterm_handler(signum, frame):
|
||||
if COMPOSE:
|
||||
try_kill_children(signum)
|
||||
try_kill_koji_tasks()
|
||||
COMPOSE.log_error("Compose run failed: signal %s" % signum)
|
||||
COMPOSE.log_error("Traceback:\n%s" % "\n".join(traceback.format_stack(frame)))
|
||||
COMPOSE.log_critical("Compose failed: %s" % COMPOSE.topdir)
|
||||
@ -622,18 +650,18 @@ def cli_main():
|
||||
main()
|
||||
except (Exception, KeyboardInterrupt) as ex:
|
||||
if COMPOSE:
|
||||
tb_path = COMPOSE.paths.log.log_file("global", "traceback")
|
||||
COMPOSE.log_error("Compose run failed: %s" % ex)
|
||||
COMPOSE.log_error("Extended traceback in: %s" % tb_path)
|
||||
COMPOSE.traceback(show_locals=getattr(ex, "show_locals", True))
|
||||
COMPOSE.log_critical("Compose failed: %s" % COMPOSE.topdir)
|
||||
COMPOSE.write_status("DOOMED")
|
||||
import kobo.tback
|
||||
|
||||
with open(tb_path, "wb") as f:
|
||||
f.write(kobo.tback.Traceback().get_traceback())
|
||||
else:
|
||||
print("Exception: %s" % ex)
|
||||
raise
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
sys.exit(1)
|
||||
finally:
|
||||
# Remove repositories cloned during ExtraFiles phase
|
||||
process_id = os.getpid()
|
||||
directoy_to_remove = "/tmp/pungi-temp-git-repos-" + str(process_id) + "/"
|
||||
rmtree(directoy_to_remove)
|
||||
|
109
pungi/util.py
109
pungi/util.py
@ -34,6 +34,7 @@ import kobo.conf
|
||||
from kobo.shortcuts import run, force_list
|
||||
from kobo.threads import WorkerThread, ThreadPool
|
||||
from productmd.common import get_major_version
|
||||
from pungi.module_util import Modulemd
|
||||
|
||||
# Patterns that match all names of debuginfo packages
|
||||
DEBUG_PATTERNS = ["*-debuginfo", "*-debuginfo-*", "*-debugsource"]
|
||||
@ -278,7 +279,7 @@ class GitUrlResolveError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
def resolve_git_ref(repourl, ref):
|
||||
def resolve_git_ref(repourl, ref, credential_helper=None):
|
||||
"""Resolve a reference in a Git repo to a commit.
|
||||
|
||||
Raises RuntimeError if there was an error. Most likely cause is failure to
|
||||
@ -287,8 +288,13 @@ def resolve_git_ref(repourl, ref):
|
||||
if re.match(r"^[a-f0-9]{40}$", ref):
|
||||
# This looks like a commit ID already.
|
||||
return ref
|
||||
|
||||
_, output = git_ls_remote(repourl, ref)
|
||||
try:
|
||||
_, output = git_ls_remote(repourl, ref, credential_helper)
|
||||
except RuntimeError as e:
|
||||
raise GitUrlResolveError(
|
||||
"ref does not exist in remote repo %s with the error %s %s"
|
||||
% (repourl, e, e.output)
|
||||
)
|
||||
|
||||
lines = []
|
||||
for line in output.split("\n"):
|
||||
@ -310,7 +316,7 @@ def resolve_git_ref(repourl, ref):
|
||||
return lines[0].split()[0]
|
||||
|
||||
|
||||
def resolve_git_url(url):
|
||||
def resolve_git_url(url, credential_helper=None):
|
||||
"""Given a url to a Git repo specifying HEAD or origin/<branch> as a ref,
|
||||
replace that specifier with actual SHA1 of the commit.
|
||||
|
||||
@ -329,7 +335,7 @@ def resolve_git_url(url):
|
||||
scheme = r.scheme.replace("git+", "")
|
||||
|
||||
baseurl = urllib.parse.urlunsplit((scheme, r.netloc, r.path, "", ""))
|
||||
fragment = resolve_git_ref(baseurl, ref)
|
||||
fragment = resolve_git_ref(baseurl, ref, credential_helper)
|
||||
|
||||
result = urllib.parse.urlunsplit((r.scheme, r.netloc, r.path, r.query, fragment))
|
||||
if "?#" in url:
|
||||
@ -348,13 +354,18 @@ class GitUrlResolver(object):
|
||||
self.offline = offline
|
||||
self.cache = {}
|
||||
|
||||
def __call__(self, url, branch=None):
|
||||
def __call__(self, url, branch=None, options=None):
|
||||
credential_helper = options.get("credential_helper") if options else None
|
||||
if self.offline:
|
||||
return branch or url
|
||||
key = (url, branch)
|
||||
if key not in self.cache:
|
||||
try:
|
||||
res = resolve_git_ref(url, branch) if branch else resolve_git_url(url)
|
||||
res = (
|
||||
resolve_git_ref(url, branch, credential_helper)
|
||||
if branch
|
||||
else resolve_git_url(url, credential_helper)
|
||||
)
|
||||
self.cache[key] = res
|
||||
except GitUrlResolveError as exc:
|
||||
self.cache[key] = exc
|
||||
@ -450,6 +461,9 @@ def get_volid(compose, arch, variant=None, disc_type=False, formats=None, **kwar
|
||||
if not variant_uid and "%(variant)s" in i:
|
||||
continue
|
||||
try:
|
||||
# fmt: off
|
||||
# Black wants to add a comma after kwargs, but that's not valid in
|
||||
# Python 2.7
|
||||
args = get_format_substs(
|
||||
compose,
|
||||
variant=variant_uid,
|
||||
@ -461,6 +475,7 @@ def get_volid(compose, arch, variant=None, disc_type=False, formats=None, **kwar
|
||||
base_product_version=base_product_version,
|
||||
**kwargs
|
||||
)
|
||||
# fmt: on
|
||||
volid = (i % args).format(**args)
|
||||
except KeyError as err:
|
||||
raise RuntimeError(
|
||||
@ -472,10 +487,7 @@ def get_volid(compose, arch, variant=None, disc_type=False, formats=None, **kwar
|
||||
tried.add(volid)
|
||||
|
||||
if volid and len(volid) > 32:
|
||||
raise ValueError(
|
||||
"Could not create volume ID longer than 32 bytes, options are %r",
|
||||
sorted(tried, key=len),
|
||||
)
|
||||
volid = volid[:32]
|
||||
|
||||
if compose.conf["restricted_volid"]:
|
||||
# Replace all non-alphanumeric characters and non-underscores) with
|
||||
@ -941,7 +953,7 @@ def get_repo_dicts(repos, logger=None):
|
||||
|
||||
def version_generator(compose, gen):
|
||||
"""If ``gen`` is a known generator, create a value. Otherwise return
|
||||
the argument value unchanged.
|
||||
the argument value unchanged.
|
||||
"""
|
||||
if gen == "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN":
|
||||
return "%s.%s" % (compose.image_version, compose.image_release)
|
||||
@ -963,8 +975,8 @@ def version_generator(compose, gen):
|
||||
|
||||
|
||||
def retry(timeout=120, interval=30, wait_on=Exception):
|
||||
""" A decorator that allows to retry a section of code until success or
|
||||
timeout.
|
||||
"""A decorator that allows to retry a section of code until success or
|
||||
timeout.
|
||||
"""
|
||||
|
||||
def wrapper(function):
|
||||
@ -985,8 +997,12 @@ def retry(timeout=120, interval=30, wait_on=Exception):
|
||||
|
||||
|
||||
@retry(wait_on=RuntimeError)
|
||||
def git_ls_remote(baseurl, ref):
|
||||
return run(["git", "ls-remote", baseurl, ref], universal_newlines=True)
|
||||
def git_ls_remote(baseurl, ref, credential_helper=None):
|
||||
cmd = ["git"]
|
||||
if credential_helper:
|
||||
cmd.extend(["-c", "credential.useHttpPath=true"])
|
||||
cmd.extend(["-c", "credential.helper=%s" % credential_helper])
|
||||
return run(cmd + ["ls-remote", baseurl, ref], universal_newlines=True)
|
||||
|
||||
|
||||
def get_tz_offset():
|
||||
@ -1034,6 +1050,46 @@ def load_config(file_path, defaults={}):
|
||||
return conf
|
||||
|
||||
|
||||
def _read_single_module_stream(
|
||||
file_or_string, compose=None, arch=None, build=None, is_file=True
|
||||
):
|
||||
try:
|
||||
mod_index = Modulemd.ModuleIndex.new()
|
||||
if is_file:
|
||||
mod_index.update_from_file(file_or_string, True)
|
||||
else:
|
||||
mod_index.update_from_string(file_or_string, True)
|
||||
mod_names = mod_index.get_module_names()
|
||||
emit_warning = False
|
||||
if len(mod_names) > 1:
|
||||
emit_warning = True
|
||||
mod_streams = mod_index.get_module(mod_names[0]).get_all_streams()
|
||||
if len(mod_streams) > 1:
|
||||
emit_warning = True
|
||||
if emit_warning and compose:
|
||||
compose.log_warning(
|
||||
"Multiple modules/streams for arch: %s. Build: %s. "
|
||||
"Processing first module/stream only.",
|
||||
arch,
|
||||
build,
|
||||
)
|
||||
return mod_streams[0]
|
||||
except (KeyError, IndexError):
|
||||
# There is no modulemd for this arch. This could mean an arch was
|
||||
# added to the compose after the module was built. We don't want to
|
||||
# process this, let's skip this module.
|
||||
if compose:
|
||||
compose.log_info("Skipping arch: %s. Build: %s", arch, build)
|
||||
|
||||
|
||||
def read_single_module_stream_from_file(*args, **kwargs):
|
||||
return _read_single_module_stream(*args, is_file=True, **kwargs)
|
||||
|
||||
|
||||
def read_single_module_stream_from_string(*args, **kwargs):
|
||||
return _read_single_module_stream(*args, is_file=False, **kwargs)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def as_local_file(url):
|
||||
"""If URL points to a file over HTTP, the file will be downloaded locally
|
||||
@ -1046,6 +1102,8 @@ def as_local_file(url):
|
||||
yield local_filename
|
||||
finally:
|
||||
os.remove(local_filename)
|
||||
elif url.startswith("file://"):
|
||||
yield url[7:]
|
||||
else:
|
||||
# Not a remote url, return unchanged.
|
||||
yield url
|
||||
@ -1083,3 +1141,22 @@ class PartialFuncThreadPool(ThreadPool):
|
||||
@property
|
||||
def results(self):
|
||||
return self._results
|
||||
|
||||
|
||||
def read_json_file(file_path):
|
||||
"""A helper function to read a JSON file."""
|
||||
with open(file_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
UNITS = ["", "Ki", "Mi", "Gi", "Ti"]
|
||||
|
||||
|
||||
def format_size(sz):
|
||||
sz = float(sz)
|
||||
unit = 0
|
||||
while sz > 1024:
|
||||
sz /= 1024
|
||||
unit += 1
|
||||
|
||||
return "%.3g %sB" % (sz, UNITS[unit])
|
||||
|
@ -177,15 +177,22 @@ class CompsFilter(object):
|
||||
for i in self.tree.xpath("//*[@xml:lang]"):
|
||||
i.getparent().remove(i)
|
||||
|
||||
def filter_environment_groups(self, lookaside_groups=[]):
|
||||
def filter_environment_groups(self, arch, lookaside_groups=[]):
|
||||
"""
|
||||
Remove undefined groups from environments.
|
||||
Remove undefined groups or groups not matching given arch from environments.
|
||||
"""
|
||||
all_groups = self.tree.xpath("/comps/group/id/text()") + lookaside_groups
|
||||
for environment in self.tree.xpath("/comps/environment"):
|
||||
for group in environment.xpath("grouplist/groupid"):
|
||||
if group.text not in all_groups:
|
||||
group.getparent().remove(group)
|
||||
for parent_tag in ("grouplist", "optionlist"):
|
||||
for group in environment.xpath("%s/groupid" % parent_tag):
|
||||
if group.text not in all_groups:
|
||||
group.getparent().remove(group)
|
||||
|
||||
for group in environment.xpath("%s/groupid[@arch]" % parent_tag):
|
||||
value = group.attrib.get("arch")
|
||||
values = [v for v in re.split(r"[, ]+", value) if v]
|
||||
if arch not in values:
|
||||
group.getparent().remove(group)
|
||||
|
||||
def remove_empty_environments(self):
|
||||
"""
|
||||
@ -212,7 +219,7 @@ class CompsFilter(object):
|
||||
)
|
||||
file_obj.write(b"\n")
|
||||
|
||||
def cleanup(self, keep_groups=[], lookaside_groups=[]):
|
||||
def cleanup(self, arch, keep_groups=[], lookaside_groups=[]):
|
||||
"""
|
||||
Remove empty groups, categories and environment from the comps file.
|
||||
Groups given in ``keep_groups`` will be preserved even if empty.
|
||||
@ -223,7 +230,7 @@ class CompsFilter(object):
|
||||
self.remove_empty_groups(keep_groups)
|
||||
self.filter_category_groups()
|
||||
self.remove_empty_categories()
|
||||
self.filter_environment_groups(lookaside_groups)
|
||||
self.filter_environment_groups(arch, lookaside_groups)
|
||||
self.remove_empty_environments()
|
||||
|
||||
|
||||
@ -357,7 +364,10 @@ class CompsWrapper(object):
|
||||
|
||||
if environment.option_ids:
|
||||
append_grouplist(
|
||||
doc, env_node, set(environment.option_ids), "optionlist",
|
||||
doc,
|
||||
env_node,
|
||||
set(environment.option_ids),
|
||||
"optionlist",
|
||||
)
|
||||
|
||||
if self.comps.langpacks:
|
||||
|
@ -26,7 +26,12 @@ Pungi).
|
||||
|
||||
|
||||
def get_cmd(
|
||||
conf_file, arch, repos, lookasides, platform=None, filter_packages=None,
|
||||
conf_file,
|
||||
arch,
|
||||
repos,
|
||||
lookasides,
|
||||
platform=None,
|
||||
filter_packages=None,
|
||||
):
|
||||
cmd = ["fus", "--verbose", "--arch", arch]
|
||||
|
||||
|
@ -146,6 +146,7 @@ def get_mkisofs_cmd(
|
||||
input_charset="utf-8",
|
||||
graft_points=None,
|
||||
use_xorrisofs=False,
|
||||
iso_level=None,
|
||||
):
|
||||
# following options are always enabled
|
||||
untranslated_filenames = True
|
||||
@ -155,6 +156,10 @@ def get_mkisofs_cmd(
|
||||
rock = True
|
||||
|
||||
cmd = ["/usr/bin/xorrisofs" if use_xorrisofs else "/usr/bin/genisoimage"]
|
||||
|
||||
if iso_level:
|
||||
cmd.extend(["-iso-level", str(iso_level)])
|
||||
|
||||
if appid:
|
||||
cmd.extend(["-appid", appid])
|
||||
|
||||
@ -255,21 +260,41 @@ def get_isohybrid_cmd(iso_path, arch):
|
||||
return cmd
|
||||
|
||||
|
||||
def get_manifest_cmd(iso_name):
|
||||
return "isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s.manifest" % (
|
||||
shlex_quote(iso_name),
|
||||
shlex_quote(iso_name),
|
||||
)
|
||||
def get_manifest_cmd(iso_name, xorriso=False, output_file=None):
|
||||
if not output_file:
|
||||
output_file = "%s.manifest" % iso_name
|
||||
|
||||
if xorriso:
|
||||
return """xorriso -dev %s --find |
|
||||
tail -n+2 |
|
||||
tr -d "'" |
|
||||
cut -c2- |
|
||||
sort >> %s""" % (
|
||||
shlex_quote(iso_name),
|
||||
shlex_quote(output_file),
|
||||
)
|
||||
else:
|
||||
return "isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s" % (
|
||||
shlex_quote(iso_name),
|
||||
shlex_quote(output_file),
|
||||
)
|
||||
|
||||
|
||||
def get_volume_id(path):
|
||||
cmd = ["isoinfo", "-d", "-i", path]
|
||||
retcode, output = run(cmd, universal_newlines=True)
|
||||
def get_volume_id(path, xorriso=False):
|
||||
if xorriso:
|
||||
cmd = ["xorriso", "-indev", path]
|
||||
retcode, output = run(cmd, universal_newlines=True)
|
||||
for line in output.splitlines():
|
||||
if line.startswith("Volume id"):
|
||||
return line.split("'")[1]
|
||||
else:
|
||||
cmd = ["isoinfo", "-d", "-i", path]
|
||||
retcode, output = run(cmd, universal_newlines=True)
|
||||
|
||||
for line in output.splitlines():
|
||||
line = line.strip()
|
||||
if line.startswith("Volume id:"):
|
||||
return line[11:].strip()
|
||||
for line in output.splitlines():
|
||||
line = line.strip()
|
||||
if line.startswith("Volume id:"):
|
||||
return line[11:].strip()
|
||||
|
||||
raise RuntimeError("Could not read Volume ID")
|
||||
|
||||
@ -491,3 +516,21 @@ def mount(image, logger=None, use_guestmount=True):
|
||||
util.run_unmount_cmd(["fusermount", "-u", mount_dir], path=mount_dir)
|
||||
else:
|
||||
util.run_unmount_cmd(["umount", mount_dir], path=mount_dir)
|
||||
|
||||
|
||||
def xorriso_commands(arch, input, output):
|
||||
"""List of xorriso commands to modify a bootable image."""
|
||||
commands = [
|
||||
("-indev", input),
|
||||
("-outdev", output),
|
||||
# isoinfo -J uses the Joliet tree, and it's used by virt-install
|
||||
("-joliet", "on"),
|
||||
# Support long filenames in the Joliet trees. Repodata is particularly
|
||||
# likely to run into this limit.
|
||||
("-compliance", "joliet_long_names"),
|
||||
("-boot_image", "any", "replay"),
|
||||
]
|
||||
if arch == "ppc64le":
|
||||
# This is needed for the image to be bootable.
|
||||
commands.append(("-as", "mkisofs", "-U", "--"))
|
||||
return commands
|
||||
|
@ -25,7 +25,7 @@ class JigdoWrapper(kobo.log.LoggingBase):
|
||||
self, image, files, output_dir, cache=None, no_servers=False, report=None
|
||||
):
|
||||
"""
|
||||
files: [{"path", "label", "uri"}]
|
||||
files: [{"path", "label", "uri"}]
|
||||
"""
|
||||
cmd = ["jigdo-file", "make-template"]
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
from attr import dataclass
|
||||
from kobo.rpmlib import parse_nvra
|
||||
@ -43,32 +43,34 @@ class KojiMock:
|
||||
Class that acts like real koji (for some needed methods)
|
||||
but uses local storage as data source
|
||||
"""
|
||||
def __init__(self, packages_dir, modules_dir):
|
||||
def __init__(self, packages_dir, modules_dir, all_arches):
|
||||
self._modules = self._gather_modules(modules_dir)
|
||||
self._modules_dir = modules_dir
|
||||
self._packages_dir = packages_dir
|
||||
self._all_arches = all_arches
|
||||
|
||||
def _gather_modules(self, modules_dir):
|
||||
@staticmethod
|
||||
def _gather_modules(modules_dir):
|
||||
modules = {}
|
||||
for arch in os.listdir(modules_dir):
|
||||
arch_dir = os.path.join(
|
||||
modules_dir,
|
||||
arch,
|
||||
for index, (f, arch) in enumerate(
|
||||
(sub_path.name, sub_path.parent.name)
|
||||
for path in Path(modules_dir).glob('*')
|
||||
for sub_path in path.iterdir()
|
||||
):
|
||||
parsed = parse_nvra(f)
|
||||
modules[index] = Module(
|
||||
name=parsed['name'],
|
||||
nvr=f,
|
||||
version=parsed['release'],
|
||||
context=parsed['arch'],
|
||||
stream=parsed['version'],
|
||||
build_id=index,
|
||||
arch=arch,
|
||||
)
|
||||
for index, f in enumerate(os.listdir(arch_dir)):
|
||||
parsed = parse_nvra(f)
|
||||
modules[index] = Module(
|
||||
name=parsed['name'],
|
||||
nvr=f,
|
||||
version=parsed['release'],
|
||||
context=parsed['arch'],
|
||||
stream=parsed['version'],
|
||||
build_id=index,
|
||||
arch=arch,
|
||||
)
|
||||
return modules
|
||||
|
||||
def getLastEvent(self, *args, **kwargs):
|
||||
@staticmethod
|
||||
def getLastEvent(*args, **kwargs):
|
||||
return {'id': LAST_EVENT_ID, 'ts': LAST_EVENT_TIME}
|
||||
|
||||
def listTagged(self, tag_name, *args, **kwargs):
|
||||
@ -92,6 +94,7 @@ class KojiMock:
|
||||
'name': module.name,
|
||||
'id': module.build_id,
|
||||
'tag_name': tag_name,
|
||||
'arch': module.arch,
|
||||
# Following fields are currently not
|
||||
# used but returned by real koji
|
||||
# left them here just for reference
|
||||
@ -111,7 +114,8 @@ class KojiMock:
|
||||
|
||||
return builds
|
||||
|
||||
def getFullInheritance(self, *args, **kwargs):
|
||||
@staticmethod
|
||||
def getFullInheritance(*args, **kwargs):
|
||||
"""
|
||||
Unneeded because we use local storage.
|
||||
"""
|
||||
@ -199,31 +203,12 @@ class KojiMock:
|
||||
packages = []
|
||||
|
||||
# get all rpms in folder
|
||||
rpms = search_rpms(self._packages_dir)
|
||||
all_rpms = [package.path for package in rpms]
|
||||
rpms = search_rpms(Path(self._packages_dir))
|
||||
|
||||
# get nvras for modular packages
|
||||
nvras = set()
|
||||
for module in self._modules.values():
|
||||
path = os.path.join(
|
||||
self._modules_dir,
|
||||
module.arch,
|
||||
module.nvr,
|
||||
)
|
||||
info = Modulemd.ModuleStream.read_string(open(path).read(), strict=True)
|
||||
|
||||
for package in info.get_rpm_artifacts():
|
||||
data = parse_nvra(package)
|
||||
nvras.add((data['name'], data['version'], data['release'], data['arch']))
|
||||
|
||||
# and remove modular packages from global list
|
||||
for rpm in all_rpms[:]:
|
||||
data = parse_nvra(os.path.basename(rpm[:-4]))
|
||||
if (data['name'], data['version'], data['release'], data['arch']) in nvras:
|
||||
all_rpms.remove(rpm)
|
||||
|
||||
for rpm in all_rpms:
|
||||
info = parse_nvra(os.path.basename(rpm))
|
||||
for rpm in rpms:
|
||||
info = parse_nvra(rpm.path.stem)
|
||||
if 'module' in info['release']:
|
||||
continue
|
||||
packages.append({
|
||||
"build_id": RELEASE_BUILD_ID,
|
||||
"name": info['name'],
|
||||
@ -244,15 +229,19 @@ class KojiMock:
|
||||
"""
|
||||
Get list of builds for module and given module tag name.
|
||||
"""
|
||||
module = self._get_module_by_name(tag_name)
|
||||
path = os.path.join(
|
||||
self._modules_dir,
|
||||
module.arch,
|
||||
tag_name,
|
||||
)
|
||||
builds = []
|
||||
packages = []
|
||||
modules = self._get_modules_by_name(tag_name)
|
||||
for module in modules:
|
||||
if module is None:
|
||||
raise ValueError('Module %s is not found' % tag_name)
|
||||
path = os.path.join(
|
||||
self._modules_dir,
|
||||
module.arch,
|
||||
tag_name,
|
||||
)
|
||||
|
||||
builds = [
|
||||
{
|
||||
builds.append({
|
||||
"build_id": module.build_id,
|
||||
"package_name": module.name,
|
||||
"nvr": module.nvr,
|
||||
@ -278,35 +267,33 @@ class KojiMock:
|
||||
# "volume_id": 0,
|
||||
# "package_id": 104,
|
||||
# "owner_id": 6,
|
||||
}
|
||||
]
|
||||
if module is None:
|
||||
raise ValueError('Module %s is not found' % tag_name)
|
||||
})
|
||||
|
||||
packages = []
|
||||
if os.path.exists(path):
|
||||
info = Modulemd.ModuleStream.read_string(open(path).read(), strict=True)
|
||||
for art in info.get_rpm_artifacts():
|
||||
data = parse_nvra(art)
|
||||
packages.append({
|
||||
"build_id": module.build_id,
|
||||
"name": data['name'],
|
||||
"extra": None,
|
||||
"arch": data['arch'],
|
||||
"epoch": data['epoch'] or None,
|
||||
"version": data['version'],
|
||||
"metadata_only": False,
|
||||
"release": data['release'],
|
||||
"id": 262555,
|
||||
"size": 0
|
||||
})
|
||||
else:
|
||||
raise RuntimeError('Unable to find module %s' % path)
|
||||
if os.path.exists(path):
|
||||
info = Modulemd.ModuleStream.read_string(open(path).read(), strict=True)
|
||||
for art in info.get_rpm_artifacts():
|
||||
data = parse_nvra(art)
|
||||
packages.append({
|
||||
"build_id": module.build_id,
|
||||
"name": data['name'],
|
||||
"extra": None,
|
||||
"arch": data['arch'],
|
||||
"epoch": data['epoch'] or None,
|
||||
"version": data['version'],
|
||||
"metadata_only": False,
|
||||
"release": data['release'],
|
||||
"id": 262555,
|
||||
"size": 0
|
||||
})
|
||||
else:
|
||||
raise RuntimeError('Unable to find module %s' % path)
|
||||
return builds, packages
|
||||
|
||||
def _get_module_by_name(self, tag_name):
|
||||
for module in self._modules.values():
|
||||
if module.nvr != tag_name:
|
||||
continue
|
||||
return module
|
||||
return None
|
||||
def _get_modules_by_name(self, tag_name):
|
||||
modules = []
|
||||
for arch in self._all_arches:
|
||||
for module in self._modules.values():
|
||||
if module.nvr != tag_name or module.arch != arch:
|
||||
continue
|
||||
modules.append(module)
|
||||
return modules
|
||||
|
@ -14,17 +14,23 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import shutil
|
||||
import time
|
||||
import threading
|
||||
import contextlib
|
||||
|
||||
import requests
|
||||
|
||||
import koji
|
||||
from kobo.shortcuts import run, force_list
|
||||
import six
|
||||
from six.moves import configparser, shlex_quote
|
||||
import six.moves.xmlrpc_client as xmlrpclib
|
||||
from flufl.lock import Lock
|
||||
from datetime import timedelta
|
||||
|
||||
from .kojimock import KojiMock
|
||||
from .. import util
|
||||
@ -37,10 +43,14 @@ KOJI_BUILD_DELETED = koji.BUILD_STATES["DELETED"]
|
||||
class KojiWrapper(object):
|
||||
lock = threading.Lock()
|
||||
|
||||
def __init__(self, profile, real_koji=False):
|
||||
self.profile = profile
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
try:
|
||||
self.profile = self.compose.conf["koji_profile"]
|
||||
except KeyError:
|
||||
raise RuntimeError("Koji profile must be configured")
|
||||
with self.lock:
|
||||
self.koji_module = koji.get_profile_module(profile)
|
||||
self.koji_module = koji.get_profile_module(self.profile)
|
||||
session_opts = {}
|
||||
for key in (
|
||||
"timeout",
|
||||
@ -58,15 +68,13 @@ class KojiWrapper(object):
|
||||
value = getattr(self.koji_module.config, key, None)
|
||||
if value is not None:
|
||||
session_opts[key] = value
|
||||
if real_koji:
|
||||
self.koji_proxy = koji.ClientSession(
|
||||
self.koji_module.config.server, session_opts
|
||||
)
|
||||
else:
|
||||
self.koji_proxy = KojiMock(
|
||||
packages_dir=self.koji_module.config.topdir,
|
||||
modules_dir=os.path.join(self.koji_module.config.topdir, 'modules'))
|
||||
self.koji_proxy = koji.ClientSession(
|
||||
self.koji_module.config.server, session_opts
|
||||
)
|
||||
|
||||
# This retry should be removed once https://pagure.io/koji/issue/3170 is
|
||||
# fixed and released.
|
||||
@util.retry(wait_on=(xmlrpclib.ProtocolError, koji.GenericError))
|
||||
def login(self):
|
||||
"""Authenticate to the hub."""
|
||||
auth_type = self.koji_module.config.authtype
|
||||
@ -117,8 +125,6 @@ class KojiWrapper(object):
|
||||
|
||||
if channel:
|
||||
cmd.append("--channel-override=%s" % channel)
|
||||
else:
|
||||
cmd.append("--channel-override=runroot-local")
|
||||
|
||||
if weight:
|
||||
cmd.append("--weight=%s" % int(weight))
|
||||
@ -148,10 +154,13 @@ class KojiWrapper(object):
|
||||
|
||||
if chown_paths:
|
||||
paths = " ".join(shlex_quote(pth) for pth in chown_paths)
|
||||
command += " ; EXIT_CODE=$?"
|
||||
# Make the files world readable
|
||||
command += " && chmod -R a+r %s" % paths
|
||||
command += " ; chmod -R a+r %s" % paths
|
||||
# and owned by the same user that is running the process
|
||||
command += " && chown -R %d %s" % (os.getuid(), paths)
|
||||
command += " ; chown -R %d %s" % (os.getuid(), paths)
|
||||
# Exit with code of main command
|
||||
command += " ; exit $EXIT_CODE"
|
||||
cmd.append(command)
|
||||
|
||||
return cmd
|
||||
@ -171,8 +180,6 @@ class KojiWrapper(object):
|
||||
|
||||
if channel:
|
||||
cmd.append("--channel-override=%s" % channel)
|
||||
else:
|
||||
cmd.append("--channel-override=runroot-local")
|
||||
|
||||
if weight:
|
||||
cmd.append("--weight=%s" % int(weight))
|
||||
@ -208,14 +215,19 @@ class KojiWrapper(object):
|
||||
return cmd
|
||||
|
||||
def get_pungi_ostree_cmd(
|
||||
self, target, arch, args, channel=None, packages=None, mounts=None, weight=None,
|
||||
self,
|
||||
target,
|
||||
arch,
|
||||
args,
|
||||
channel=None,
|
||||
packages=None,
|
||||
mounts=None,
|
||||
weight=None,
|
||||
):
|
||||
cmd = self._get_cmd("pungi-ostree", "--nowait", "--task-id")
|
||||
|
||||
if channel:
|
||||
cmd.append("--channel-override=%s" % channel)
|
||||
else:
|
||||
cmd.append("--channel-override=runroot-local")
|
||||
|
||||
if weight:
|
||||
cmd.append("--weight=%s" % int(weight))
|
||||
@ -286,15 +298,22 @@ class KojiWrapper(object):
|
||||
universal_newlines=True,
|
||||
)
|
||||
|
||||
first_line = output.splitlines()[0]
|
||||
match = re.search(r"^(\d+)$", first_line)
|
||||
if not match:
|
||||
# Look for first line that contains only a number. This is the ID of
|
||||
# the new task. Usually this should be the first line, but there may be
|
||||
# warnings before it.
|
||||
for line in output.splitlines():
|
||||
match = re.search(r"^(\d+)$", line)
|
||||
if match:
|
||||
task_id = int(match.groups()[0])
|
||||
break
|
||||
|
||||
if not task_id:
|
||||
raise RuntimeError(
|
||||
"Could not find task ID in output. Command '%s' returned '%s'."
|
||||
% (" ".join(command), output)
|
||||
)
|
||||
|
||||
task_id = int(match.groups()[0])
|
||||
self.save_task_id(task_id)
|
||||
|
||||
retcode, output = self._wait_for_task(task_id, logfile=log_file)
|
||||
|
||||
@ -328,9 +347,11 @@ class KojiWrapper(object):
|
||||
"ksurl",
|
||||
"distro",
|
||||
)
|
||||
assert set(min_options).issubset(set(config_options["image-build"].keys())), (
|
||||
"image-build requires at least %s got '%s'"
|
||||
% (", ".join(min_options), config_options)
|
||||
assert set(min_options).issubset(
|
||||
set(config_options["image-build"].keys())
|
||||
), "image-build requires at least %s got '%s'" % (
|
||||
", ".join(min_options),
|
||||
config_options,
|
||||
)
|
||||
cfg_parser = configparser.ConfigParser()
|
||||
for section, opts in config_options.items():
|
||||
@ -385,94 +406,11 @@ class KojiWrapper(object):
|
||||
if "can_fail" in options:
|
||||
cmd.append("--can-fail=%s" % ",".join(options["can_fail"]))
|
||||
|
||||
if wait:
|
||||
cmd.append("--wait")
|
||||
|
||||
return cmd
|
||||
|
||||
def get_create_image_cmd(
|
||||
self,
|
||||
name,
|
||||
version,
|
||||
target,
|
||||
arch,
|
||||
ks_file,
|
||||
repos,
|
||||
image_type="live",
|
||||
image_format=None,
|
||||
release=None,
|
||||
wait=True,
|
||||
archive=False,
|
||||
specfile=None,
|
||||
ksurl=None,
|
||||
):
|
||||
# Usage: koji spin-livecd [options] <name> <version> <target> <arch> <kickstart-file> # noqa: E501
|
||||
# Usage: koji spin-appliance [options] <name> <version> <target> <arch> <kickstart-file> # noqa: E501
|
||||
# Examples:
|
||||
# * name: RHEL-7.0
|
||||
# * name: Satellite-6.0.1-RHEL-6
|
||||
# ** -<type>.<arch>
|
||||
# * version: YYYYMMDD[.n|.t].X
|
||||
# * release: 1
|
||||
|
||||
cmd = self._get_cmd()
|
||||
|
||||
if image_type == "live":
|
||||
cmd.append("spin-livecd")
|
||||
elif image_type == "appliance":
|
||||
cmd.append("spin-appliance")
|
||||
else:
|
||||
raise ValueError("Invalid image type: %s" % image_type)
|
||||
|
||||
if not archive:
|
||||
cmd.append("--scratch")
|
||||
|
||||
cmd.append("--noprogress")
|
||||
if options.get("nomacboot"):
|
||||
cmd.append("--nomacboot")
|
||||
|
||||
if wait:
|
||||
cmd.append("--wait")
|
||||
else:
|
||||
cmd.append("--nowait")
|
||||
|
||||
if specfile:
|
||||
cmd.append("--specfile=%s" % specfile)
|
||||
|
||||
if ksurl:
|
||||
cmd.append("--ksurl=%s" % ksurl)
|
||||
|
||||
if isinstance(repos, list):
|
||||
for repo in repos:
|
||||
cmd.append("--repo=%s" % repo)
|
||||
else:
|
||||
cmd.append("--repo=%s" % repos)
|
||||
|
||||
if image_format:
|
||||
if image_type != "appliance":
|
||||
raise ValueError("Format can be specified only for appliance images'")
|
||||
supported_formats = ["raw", "qcow", "qcow2", "vmx"]
|
||||
if image_format not in supported_formats:
|
||||
raise ValueError(
|
||||
"Format is not supported: %s. Supported formats: %s"
|
||||
% (image_format, " ".join(sorted(supported_formats)))
|
||||
)
|
||||
cmd.append("--format=%s" % image_format)
|
||||
|
||||
if release is not None:
|
||||
cmd.append("--release=%s" % release)
|
||||
|
||||
# IMPORTANT: all --opts have to be provided *before* args
|
||||
# Usage:
|
||||
# koji spin-livecd [options] <name> <version> <target> <arch> <kickstart-file>
|
||||
|
||||
cmd.append(name)
|
||||
cmd.append(version)
|
||||
cmd.append(target)
|
||||
|
||||
# i686 -> i386 etc.
|
||||
arch = getBaseArch(arch)
|
||||
cmd.append(arch)
|
||||
|
||||
cmd.append(ks_file)
|
||||
|
||||
return cmd
|
||||
|
||||
@ -522,6 +460,7 @@ class KojiWrapper(object):
|
||||
retcode, output = run(
|
||||
command,
|
||||
can_fail=True,
|
||||
show_cmd=True,
|
||||
logfile=log_file,
|
||||
env=env,
|
||||
buffer_size=-1,
|
||||
@ -536,6 +475,8 @@ class KojiWrapper(object):
|
||||
)
|
||||
task_id = int(match.groups()[0])
|
||||
|
||||
self.save_task_id(task_id)
|
||||
|
||||
if retcode != 0 and (
|
||||
self._has_connection_error(output) or self._has_offline_error(output)
|
||||
):
|
||||
@ -550,6 +491,19 @@ class KojiWrapper(object):
|
||||
}
|
||||
|
||||
def watch_task(self, task_id, log_file=None, max_retries=None):
|
||||
"""Watch and wait for a task to finish.
|
||||
|
||||
:param int task_id: ID of koji task.
|
||||
:param str log_file: Path to log file.
|
||||
:param int max_retries: Max times to retry when error occurs,
|
||||
no limits by default.
|
||||
"""
|
||||
if log_file:
|
||||
task_url = os.path.join(
|
||||
self.koji_module.config.weburl, "taskinfo?taskID=%d" % task_id
|
||||
)
|
||||
with open(log_file, "a") as f:
|
||||
f.write("Task URL: %s\n" % task_url)
|
||||
retcode, _ = self._wait_for_task(
|
||||
task_id, logfile=log_file, max_retries=max_retries
|
||||
)
|
||||
@ -573,6 +527,7 @@ class KojiWrapper(object):
|
||||
"createImage",
|
||||
"createLiveMedia",
|
||||
"createAppliance",
|
||||
"createKiwiImage",
|
||||
]:
|
||||
continue
|
||||
|
||||
@ -752,11 +707,10 @@ class KojiWrapper(object):
|
||||
if list_of_args is None and list_of_kwargs is None:
|
||||
raise ValueError("One of list_of_args or list_of_kwargs must be set.")
|
||||
|
||||
if type(list_of_args) not in [type(None), list] or type(list_of_kwargs) not in [
|
||||
type(None),
|
||||
list,
|
||||
]:
|
||||
raise ValueError("list_of_args and list_of_kwargs must be list or None.")
|
||||
if list_of_args is not None and not isinstance(list_of_args, list):
|
||||
raise ValueError("list_of_args must be list or None.")
|
||||
if list_of_kwargs is not None and not isinstance(list_of_kwargs, list):
|
||||
raise ValueError("list_of_kwargs must be list or None.")
|
||||
|
||||
if list_of_kwargs is None:
|
||||
list_of_kwargs = [{}] * len(list_of_args)
|
||||
@ -770,9 +724,9 @@ class KojiWrapper(object):
|
||||
|
||||
koji_session.multicall = True
|
||||
for args, kwargs in zip(list_of_args, list_of_kwargs):
|
||||
if type(args) != list:
|
||||
if not isinstance(args, list):
|
||||
args = [args]
|
||||
if type(kwargs) != dict:
|
||||
if not isinstance(kwargs, dict):
|
||||
raise ValueError("Every item in list_of_kwargs must be a dict")
|
||||
koji_session_fnc(*args, **kwargs)
|
||||
|
||||
@ -780,7 +734,7 @@ class KojiWrapper(object):
|
||||
|
||||
if not responses:
|
||||
return None
|
||||
if type(responses) != list:
|
||||
if not isinstance(responses, list):
|
||||
raise ValueError(
|
||||
"Fault element was returned for multicall of method %r: %r"
|
||||
% (koji_session_fnc, responses)
|
||||
@ -796,7 +750,7 @@ class KojiWrapper(object):
|
||||
# a one-item array containing the result value,
|
||||
# or a struct of the form found inside the standard <fault> element.
|
||||
for response, args, kwargs in zip(responses, list_of_args, list_of_kwargs):
|
||||
if type(response) == list:
|
||||
if isinstance(response, list):
|
||||
if not response:
|
||||
raise ValueError(
|
||||
"Empty list returned for multicall of method %r with args %r, %r" # noqa: E501
|
||||
@ -821,13 +775,61 @@ class KojiWrapper(object):
|
||||
"""
|
||||
return self.multicall_map(*args, **kwargs)
|
||||
|
||||
def save_task_id(self, task_id):
|
||||
"""Save task id by creating a file using task_id as file name
|
||||
|
||||
:param int task_id: ID of koji task
|
||||
"""
|
||||
log_dir = self.compose.paths.log.koji_tasks_dir()
|
||||
with open(os.path.join(log_dir, str(task_id)), "w"):
|
||||
pass
|
||||
|
||||
|
||||
class KojiMockWrapper(object):
|
||||
lock = threading.Lock()
|
||||
|
||||
def __init__(self, compose, all_arches):
|
||||
self.all_arches = all_arches
|
||||
self.compose = compose
|
||||
try:
|
||||
self.profile = self.compose.conf["koji_profile"]
|
||||
except KeyError:
|
||||
raise RuntimeError("Koji profile must be configured")
|
||||
with self.lock:
|
||||
self.koji_module = koji.get_profile_module(self.profile)
|
||||
session_opts = {}
|
||||
for key in (
|
||||
"timeout",
|
||||
"keepalive",
|
||||
"max_retries",
|
||||
"retry_interval",
|
||||
"anon_retry",
|
||||
"offline_retry",
|
||||
"offline_retry_interval",
|
||||
"debug",
|
||||
"debug_xmlrpc",
|
||||
"serverca",
|
||||
"use_fast_upload",
|
||||
):
|
||||
value = getattr(self.koji_module.config, key, None)
|
||||
if value is not None:
|
||||
session_opts[key] = value
|
||||
self.koji_proxy = KojiMock(
|
||||
packages_dir=self.koji_module.config.topdir,
|
||||
modules_dir=os.path.join(
|
||||
self.koji_module.config.topdir,
|
||||
'modules',
|
||||
),
|
||||
all_arches=self.all_arches,
|
||||
)
|
||||
|
||||
|
||||
def get_buildroot_rpms(compose, task_id):
|
||||
"""Get build root RPMs - either from runroot or local"""
|
||||
result = []
|
||||
if task_id:
|
||||
# runroot
|
||||
koji = KojiWrapper(compose.conf["koji_profile"])
|
||||
koji = KojiWrapper(compose)
|
||||
buildroot_infos = koji.koji_proxy.listBuildroots(taskID=task_id)
|
||||
if not buildroot_infos:
|
||||
children_tasks = koji.koji_proxy.getTaskChildren(task_id)
|
||||
@ -853,3 +855,177 @@ def get_buildroot_rpms(compose, task_id):
|
||||
continue
|
||||
result.append(i)
|
||||
return sorted(result)
|
||||
|
||||
|
||||
class KojiDownloadProxy:
|
||||
def __init__(self, topdir, topurl, cache_dir, logger):
|
||||
if not topdir:
|
||||
# This will only happen if there is either no koji_profile
|
||||
# configured, or the profile doesn't have a topdir. In the first
|
||||
# case there will be no koji interaction, and the second indicates
|
||||
# broken koji configuration.
|
||||
# We can pretend to have local access in both cases to avoid any
|
||||
# external requests.
|
||||
self.has_local_access = True
|
||||
return
|
||||
|
||||
self.cache_dir = cache_dir
|
||||
self.logger = logger
|
||||
|
||||
self.topdir = topdir
|
||||
self.topurl = topurl
|
||||
|
||||
# If cache directory is configured, we want to use it (even if we
|
||||
# actually have local access to the storage).
|
||||
self.has_local_access = not bool(cache_dir)
|
||||
# This is used for temporary downloaded files. The suffix is unique
|
||||
# per-process. To prevent threads in the same process from colliding, a
|
||||
# thread id is added later.
|
||||
self.unique_suffix = "%s.%s" % (socket.gethostname(), os.getpid())
|
||||
self.session = None
|
||||
if not self.has_local_access:
|
||||
self.session = requests.Session()
|
||||
|
||||
@property
|
||||
def path_prefix(self):
|
||||
dir = self.topdir if self.has_local_access else self.cache_dir
|
||||
return dir.rstrip("/") + "/"
|
||||
|
||||
@classmethod
|
||||
def from_config(klass, conf, logger):
|
||||
topdir = None
|
||||
topurl = None
|
||||
cache_dir = None
|
||||
if "koji_profile" in conf:
|
||||
koji_module = koji.get_profile_module(conf["koji_profile"])
|
||||
topdir = koji_module.config.topdir
|
||||
topurl = koji_module.config.topurl
|
||||
|
||||
cache_dir = conf.get("koji_cache")
|
||||
if cache_dir:
|
||||
cache_dir = cache_dir.rstrip("/") + "/"
|
||||
return klass(topdir, topurl, cache_dir, logger)
|
||||
|
||||
@util.retry(wait_on=requests.exceptions.RequestException)
|
||||
def _download(self, url, dest):
|
||||
"""Download file into given location
|
||||
|
||||
:param str url: URL of the file to download
|
||||
:param str dest: file path to store the result in
|
||||
:returns: path to the downloaded file (same as dest) or None if the URL
|
||||
"""
|
||||
# contextlib.closing is only needed in requests<2.18
|
||||
with contextlib.closing(self.session.get(url, stream=True)) as r:
|
||||
if r.status_code == 404:
|
||||
self.logger.warning("GET %s NOT FOUND", url)
|
||||
return None
|
||||
if r.status_code != 200:
|
||||
self.logger.error("GET %s %s", url, r.status_code)
|
||||
r.raise_for_status()
|
||||
# The exception from here will be retried by the decorator.
|
||||
|
||||
file_size = int(r.headers.get("Content-Length", 0))
|
||||
self.logger.info("GET %s OK %s", url, util.format_size(file_size))
|
||||
with open(dest, "wb") as f:
|
||||
shutil.copyfileobj(r.raw, f)
|
||||
return dest
|
||||
|
||||
def _delete(self, path):
|
||||
"""Try to delete file at given path and ignore errors."""
|
||||
try:
|
||||
os.remove(path)
|
||||
except Exception:
|
||||
self.logger.warning("Failed to delete %s", path)
|
||||
|
||||
def _atomic_download(self, url, dest, validator):
|
||||
"""Atomically download a file
|
||||
|
||||
:param str url: URL of the file to download
|
||||
:param str dest: file path to store the result in
|
||||
:returns: path to the downloaded file (same as dest) or None if the URL
|
||||
return 404.
|
||||
"""
|
||||
temp_file = "%s.%s.%s" % (dest, self.unique_suffix, threading.get_ident())
|
||||
|
||||
# First download to the temporary location.
|
||||
try:
|
||||
if self._download(url, temp_file) is None:
|
||||
# The file was not found.
|
||||
return None
|
||||
except Exception:
|
||||
# Download failed, let's make sure to clean up potentially partial
|
||||
# temporary file.
|
||||
self._delete(temp_file)
|
||||
raise
|
||||
|
||||
# Check if the temporary file is correct (assuming we were provided a
|
||||
# validator function).
|
||||
try:
|
||||
if validator:
|
||||
validator(temp_file)
|
||||
except Exception:
|
||||
# Validation failed. Let's delete the problematic file and re-raise
|
||||
# the exception.
|
||||
self._delete(temp_file)
|
||||
raise
|
||||
|
||||
# Atomically move the temporary file into final location
|
||||
os.rename(temp_file, dest)
|
||||
return dest
|
||||
|
||||
def _download_file(self, path, validator):
|
||||
"""Ensure file on Koji volume in ``path`` is present in the local
|
||||
cache.
|
||||
|
||||
:returns: path to the local file or None if file is not found
|
||||
"""
|
||||
url = path.replace(self.topdir, self.topurl)
|
||||
destination_file = path.replace(self.topdir, self.cache_dir)
|
||||
util.makedirs(os.path.dirname(destination_file))
|
||||
|
||||
lock = Lock(destination_file + ".lock")
|
||||
# Hold the lock for this file for 5 minutes. If another compose needs
|
||||
# the same file but it's not downloaded yet, the process will wait.
|
||||
#
|
||||
# If the download finishes in time, the downloaded file will be used
|
||||
# here.
|
||||
#
|
||||
# If the download takes longer, this process will steal the lock and
|
||||
# start its own download.
|
||||
#
|
||||
# That should not be a problem: the same file will be downloaded and
|
||||
# then replaced atomically on the filesystem. If the original process
|
||||
# managed to hardlink the first file already, that hardlink will be
|
||||
# broken, but that will only result in the same file stored twice.
|
||||
lock.lifetime = timedelta(minutes=5)
|
||||
|
||||
with lock:
|
||||
# Check if the file already exists. If yes, return the path.
|
||||
if os.path.exists(destination_file):
|
||||
# Update mtime of the file. This covers the case of packages in the
|
||||
# tag that are not included in the compose. Updating mtime will
|
||||
# exempt them from cleanup for extra time.
|
||||
os.utime(destination_file)
|
||||
return destination_file
|
||||
|
||||
return self._atomic_download(url, destination_file, validator)
|
||||
|
||||
def get_file(self, path, validator=None):
|
||||
"""
|
||||
If path refers to an existing file in Koji, return a valid local path
|
||||
to it. If no such file exists, return None.
|
||||
|
||||
:param validator: A callable that will be called with the path to the
|
||||
downloaded file if and only if the file was actually downloaded.
|
||||
Any exception raised from there will be abort the download and be
|
||||
propagated.
|
||||
"""
|
||||
if self.has_local_access:
|
||||
# We have koji volume mounted locally. No transformation needed for
|
||||
# the path, just check it exists.
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
return None
|
||||
else:
|
||||
# We need to download the file.
|
||||
return self._download_file(path, validator)
|
||||
|
@ -109,55 +109,3 @@ class LoraxWrapper(object):
|
||||
# TODO: workdir
|
||||
|
||||
return cmd
|
||||
|
||||
def get_buildinstall_cmd(
|
||||
self,
|
||||
product,
|
||||
version,
|
||||
release,
|
||||
repo_baseurl,
|
||||
output_dir,
|
||||
variant=None,
|
||||
bugurl=None,
|
||||
nomacboot=False,
|
||||
noupgrade=False,
|
||||
is_final=False,
|
||||
buildarch=None,
|
||||
volid=None,
|
||||
brand=None,
|
||||
):
|
||||
# RHEL 6 compatibility
|
||||
# Usage: buildinstall [--debug] --version <version> --brand <brand> --product <product> --release <comment> --final [--output outputdir] [--discs <discstring>] <root> # noqa: E501
|
||||
|
||||
brand = brand or "redhat"
|
||||
# HACK: ignore provided release
|
||||
release = "%s %s" % (brand, version)
|
||||
bugurl = bugurl or "https://bugzilla.redhat.com"
|
||||
|
||||
cmd = ["/usr/lib/anaconda-runtime/buildinstall"]
|
||||
|
||||
cmd.append("--debug")
|
||||
|
||||
cmd.extend(["--version", version])
|
||||
cmd.extend(["--brand", brand])
|
||||
cmd.extend(["--product", product])
|
||||
cmd.extend(["--release", release])
|
||||
|
||||
if is_final:
|
||||
cmd.append("--final")
|
||||
|
||||
if buildarch:
|
||||
cmd.extend(["--buildarch", buildarch])
|
||||
|
||||
if bugurl:
|
||||
cmd.extend(["--bugurl", bugurl])
|
||||
|
||||
output_dir = os.path.abspath(output_dir)
|
||||
cmd.extend(["--output", output_dir])
|
||||
|
||||
for i in force_list(repo_baseurl):
|
||||
if "://" not in i:
|
||||
i = "file://%s" % os.path.abspath(i)
|
||||
cmd.append(i)
|
||||
|
||||
return cmd
|
||||
|
@ -40,9 +40,13 @@ def get_repoclosure_cmd(backend="yum", arch=None, repos=None, lookaside=None):
|
||||
# There are options that are not exposed here, because we don't need
|
||||
# them.
|
||||
|
||||
for i in force_list(arch or []):
|
||||
arches = force_list(arch or [])
|
||||
for i in arches:
|
||||
cmd.append("--arch=%s" % i)
|
||||
|
||||
if backend == "dnf" and arches:
|
||||
cmd.append("--forcearch=%s" % arches[0])
|
||||
|
||||
repos = repos or {}
|
||||
for repo_id, repo_path in repos.items():
|
||||
cmd.append("--repofrompath=%s,%s" % (repo_id, _to_url(repo_path)))
|
||||
|
@ -20,6 +20,7 @@ import os
|
||||
import shutil
|
||||
import glob
|
||||
import six
|
||||
import threading
|
||||
from six.moves import shlex_quote
|
||||
from six.moves.urllib.request import urlretrieve
|
||||
from fnmatch import fnmatch
|
||||
@ -29,12 +30,15 @@ from kobo.shortcuts import run, force_list
|
||||
from pungi.util import explode_rpm_package, makedirs, copy_all, temp_dir, retry
|
||||
from .kojiwrapper import KojiWrapper
|
||||
|
||||
lock = threading.Lock()
|
||||
|
||||
|
||||
class ScmBase(kobo.log.LoggingBase):
|
||||
def __init__(self, logger=None, command=None, compose=None):
|
||||
def __init__(self, logger=None, command=None, compose=None, options=None):
|
||||
kobo.log.LoggingBase.__init__(self, logger=logger)
|
||||
self.command = command
|
||||
self.compose = compose
|
||||
self.options = options or {}
|
||||
|
||||
@retry(interval=60, timeout=300, wait_on=RuntimeError)
|
||||
def retry_run(self, cmd, **kwargs):
|
||||
@ -156,22 +160,31 @@ class GitWrapper(ScmBase):
|
||||
if "://" not in repo:
|
||||
repo = "file://%s" % repo
|
||||
|
||||
git_cmd = ["git"]
|
||||
if "credential_helper" in self.options:
|
||||
git_cmd.extend(["-c", "credential.useHttpPath=true"])
|
||||
git_cmd.extend(
|
||||
["-c", "credential.helper=%s" % self.options["credential_helper"]]
|
||||
)
|
||||
|
||||
run(["git", "init"], workdir=destdir)
|
||||
try:
|
||||
run(["git", "fetch", "--depth=1", repo, branch], workdir=destdir)
|
||||
run(git_cmd + ["fetch", "--depth=1", repo, branch], workdir=destdir)
|
||||
run(["git", "checkout", "FETCH_HEAD"], workdir=destdir)
|
||||
except RuntimeError as e:
|
||||
# Fetch failed, to do a full clone we add a remote to our empty
|
||||
# repo, get its content and check out the reference we want.
|
||||
self.log_debug(
|
||||
"Trying to do a full clone because shallow clone failed: %s %s"
|
||||
% (e, e.output)
|
||||
% (e, getattr(e, "output", ""))
|
||||
)
|
||||
try:
|
||||
# Re-run git init in case of previous failure breaking .git dir
|
||||
run(["git", "init"], workdir=destdir)
|
||||
run(["git", "remote", "add", "origin", repo], workdir=destdir)
|
||||
self.retry_run(["git", "remote", "update", "origin"], workdir=destdir)
|
||||
self.retry_run(
|
||||
git_cmd + ["remote", "update", "origin"], workdir=destdir
|
||||
)
|
||||
run(["git", "checkout", branch], workdir=destdir)
|
||||
except RuntimeError:
|
||||
if self.compose:
|
||||
@ -185,19 +198,38 @@ class GitWrapper(ScmBase):
|
||||
copy_all(destdir, debugdir)
|
||||
raise
|
||||
|
||||
self.run_process_command(destdir)
|
||||
def get_temp_repo_path(self, scm_root, scm_branch):
|
||||
scm_repo = scm_root.split("/")[-1]
|
||||
process_id = os.getpid()
|
||||
tmp_dir = (
|
||||
"/tmp/pungi-temp-git-repos-"
|
||||
+ str(process_id)
|
||||
+ "/"
|
||||
+ scm_repo
|
||||
+ "-"
|
||||
+ scm_branch
|
||||
)
|
||||
return tmp_dir
|
||||
|
||||
def setup_repo(self, scm_root, scm_branch):
|
||||
tmp_dir = self.get_temp_repo_path(scm_root, scm_branch)
|
||||
if not os.path.isdir(tmp_dir):
|
||||
makedirs(tmp_dir)
|
||||
self._clone(scm_root, scm_branch, tmp_dir)
|
||||
self.run_process_command(tmp_dir)
|
||||
return tmp_dir
|
||||
|
||||
def export_dir(self, scm_root, scm_dir, target_dir, scm_branch=None):
|
||||
scm_dir = scm_dir.lstrip("/")
|
||||
scm_branch = scm_branch or "master"
|
||||
|
||||
with temp_dir() as tmp_dir:
|
||||
self.log_debug(
|
||||
"Exporting directory %s from git %s (branch %s)..."
|
||||
% (scm_dir, scm_root, scm_branch)
|
||||
)
|
||||
self.log_debug(
|
||||
"Exporting directory %s from git %s (branch %s)..."
|
||||
% (scm_dir, scm_root, scm_branch)
|
||||
)
|
||||
|
||||
self._clone(scm_root, scm_branch, tmp_dir)
|
||||
with lock:
|
||||
tmp_dir = self.setup_repo(scm_root, scm_branch)
|
||||
|
||||
copy_all(os.path.join(tmp_dir, scm_dir), target_dir)
|
||||
|
||||
@ -205,15 +237,15 @@ class GitWrapper(ScmBase):
|
||||
scm_file = scm_file.lstrip("/")
|
||||
scm_branch = scm_branch or "master"
|
||||
|
||||
with temp_dir() as tmp_dir:
|
||||
target_path = os.path.join(target_dir, os.path.basename(scm_file))
|
||||
target_path = os.path.join(target_dir, os.path.basename(scm_file))
|
||||
|
||||
self.log_debug(
|
||||
"Exporting file %s from git %s (branch %s)..."
|
||||
% (scm_file, scm_root, scm_branch)
|
||||
)
|
||||
self.log_debug(
|
||||
"Exporting file %s from git %s (branch %s)..."
|
||||
% (scm_file, scm_root, scm_branch)
|
||||
)
|
||||
|
||||
self._clone(scm_root, scm_branch, tmp_dir)
|
||||
with lock:
|
||||
tmp_dir = self.setup_repo(scm_root, scm_branch)
|
||||
|
||||
makedirs(target_dir)
|
||||
shutil.copy2(os.path.join(tmp_dir, scm_file), target_path)
|
||||
@ -265,11 +297,7 @@ class RpmScmWrapper(ScmBase):
|
||||
class KojiScmWrapper(ScmBase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(KojiScmWrapper, self).__init__(*args, **kwargs)
|
||||
try:
|
||||
profile = kwargs["compose"].conf["koji_profile"]
|
||||
except KeyError:
|
||||
raise RuntimeError("Koji profile must be configured")
|
||||
wrapper = KojiWrapper(profile)
|
||||
wrapper = KojiWrapper(kwargs["compose"])
|
||||
self.koji = wrapper.koji_module
|
||||
self.proxy = wrapper.koji_proxy
|
||||
|
||||
@ -365,15 +393,19 @@ def get_file_from_scm(scm_dict, target_path, compose=None):
|
||||
scm_file = os.path.abspath(scm_dict)
|
||||
scm_branch = None
|
||||
command = None
|
||||
options = {}
|
||||
else:
|
||||
scm_type = scm_dict["scm"]
|
||||
scm_repo = scm_dict["repo"]
|
||||
scm_file = scm_dict["file"]
|
||||
scm_branch = scm_dict.get("branch", None)
|
||||
command = scm_dict.get("command")
|
||||
options = scm_dict.get("options", {})
|
||||
|
||||
logger = compose._logger if compose else None
|
||||
scm = _get_wrapper(scm_type, logger=logger, command=command, compose=compose)
|
||||
scm = _get_wrapper(
|
||||
scm_type, logger=logger, command=command, compose=compose, options=options
|
||||
)
|
||||
|
||||
files_copied = []
|
||||
for i in force_list(scm_file):
|
||||
@ -454,15 +486,19 @@ def get_dir_from_scm(scm_dict, target_path, compose=None):
|
||||
scm_dir = os.path.abspath(scm_dict)
|
||||
scm_branch = None
|
||||
command = None
|
||||
options = {}
|
||||
else:
|
||||
scm_type = scm_dict["scm"]
|
||||
scm_repo = scm_dict.get("repo", None)
|
||||
scm_dir = scm_dict["dir"]
|
||||
scm_branch = scm_dict.get("branch", None)
|
||||
command = scm_dict.get("command")
|
||||
options = scm_dict.get("options", {})
|
||||
|
||||
logger = compose._logger if compose else None
|
||||
scm = _get_wrapper(scm_type, logger=logger, command=command, compose=compose)
|
||||
scm = _get_wrapper(
|
||||
scm_type, logger=logger, command=command, compose=compose, options=options
|
||||
)
|
||||
|
||||
with temp_dir(prefix="scm_checkout_") as tmp_dir:
|
||||
scm.export_dir(scm_repo, scm_dir, scm_branch=scm_branch, target_dir=tmp_dir)
|
||||
|
@ -276,7 +276,6 @@ class Variant(object):
|
||||
modules=None,
|
||||
modular_koji_tags=None,
|
||||
):
|
||||
|
||||
environments = environments or []
|
||||
buildinstallpackages = buildinstallpackages or []
|
||||
|
||||
|
@ -1,706 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import atexit
|
||||
import errno
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import threading
|
||||
from collections import namedtuple
|
||||
|
||||
import kobo.conf
|
||||
import kobo.log
|
||||
import productmd
|
||||
from kobo import shortcuts
|
||||
from six.moves import configparser, shlex_quote
|
||||
|
||||
import pungi.util
|
||||
from pungi.compose import get_compose_dir
|
||||
from pungi.linker import linker_pool
|
||||
from pungi.phases.pkgset.sources.source_koji import get_koji_event_raw
|
||||
from pungi.util import find_old_compose, parse_koji_event, temp_dir
|
||||
from pungi.wrappers.kojiwrapper import KojiWrapper
|
||||
|
||||
|
||||
Config = namedtuple(
|
||||
"Config",
|
||||
[
|
||||
# Path to directory with the compose
|
||||
"target",
|
||||
"compose_type",
|
||||
"label",
|
||||
# Path to the selected old compose that will be reused
|
||||
"old_compose",
|
||||
# Path to directory with config file copies
|
||||
"config_dir",
|
||||
# Which koji event to use (if any)
|
||||
"event",
|
||||
# Additional arguments to pungi-koji executable
|
||||
"extra_args",
|
||||
],
|
||||
)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Status(object):
|
||||
# Ready to start
|
||||
READY = "READY"
|
||||
# Waiting for dependencies to finish.
|
||||
WAITING = "WAITING"
|
||||
# Part is currently running
|
||||
STARTED = "STARTED"
|
||||
# A dependency failed, this one will never start.
|
||||
BLOCKED = "BLOCKED"
|
||||
|
||||
|
||||
class ComposePart(object):
|
||||
def __init__(self, name, config, just_phase=[], skip_phase=[], dependencies=[]):
|
||||
self.name = name
|
||||
self.config = config
|
||||
self.status = Status.WAITING if dependencies else Status.READY
|
||||
self.just_phase = just_phase
|
||||
self.skip_phase = skip_phase
|
||||
self.blocked_on = set(dependencies)
|
||||
self.depends_on = set(dependencies)
|
||||
self.path = None
|
||||
self.log_file = None
|
||||
self.failable = False
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"ComposePart({0.name!r},"
|
||||
" {0.config!r},"
|
||||
" {0.status!r},"
|
||||
" just_phase={0.just_phase!r},"
|
||||
" skip_phase={0.skip_phase!r},"
|
||||
" dependencies={0.depends_on!r})"
|
||||
).format(self)
|
||||
|
||||
def refresh_status(self):
|
||||
"""Refresh status of this part with the result of the compose. This
|
||||
should only be called once the compose finished.
|
||||
"""
|
||||
try:
|
||||
with open(os.path.join(self.path, "STATUS")) as fh:
|
||||
self.status = fh.read().strip()
|
||||
except IOError as exc:
|
||||
log.error("Failed to update status of %s: %s", self.name, exc)
|
||||
log.error("Assuming %s is DOOMED", self.name)
|
||||
self.status = "DOOMED"
|
||||
|
||||
def is_finished(self):
|
||||
return "FINISHED" in self.status
|
||||
|
||||
def unblock_on(self, finished_part):
|
||||
"""Update set of blockers for this part. If it's empty, mark us as ready."""
|
||||
self.blocked_on.discard(finished_part)
|
||||
if self.status == Status.WAITING and not self.blocked_on:
|
||||
log.debug("%s is ready to start", self)
|
||||
self.status = Status.READY
|
||||
|
||||
def setup_start(self, global_config, parts):
|
||||
substitutions = dict(
|
||||
("part-%s" % name, p.path) for name, p in parts.items() if p.is_finished()
|
||||
)
|
||||
substitutions["configdir"] = global_config.config_dir
|
||||
|
||||
config = pungi.util.load_config(self.config)
|
||||
|
||||
for f in config.opened_files:
|
||||
# apply substitutions
|
||||
fill_in_config_file(f, substitutions)
|
||||
|
||||
self.status = Status.STARTED
|
||||
self.path = get_compose_dir(
|
||||
os.path.join(global_config.target, "parts"),
|
||||
config,
|
||||
compose_type=global_config.compose_type,
|
||||
compose_label=global_config.label,
|
||||
)
|
||||
self.log_file = os.path.join(global_config.target, "logs", "%s.log" % self.name)
|
||||
log.info("Starting %s in %s", self.name, self.path)
|
||||
|
||||
def get_cmd(self, global_config):
|
||||
cmd = ["pungi-koji", "--config", self.config, "--compose-dir", self.path]
|
||||
cmd.append("--%s" % global_config.compose_type)
|
||||
if global_config.label:
|
||||
cmd.extend(["--label", global_config.label])
|
||||
for phase in self.just_phase:
|
||||
cmd.extend(["--just-phase", phase])
|
||||
for phase in self.skip_phase:
|
||||
cmd.extend(["--skip-phase", phase])
|
||||
if global_config.old_compose:
|
||||
cmd.extend(
|
||||
["--old-compose", os.path.join(global_config.old_compose, "parts")]
|
||||
)
|
||||
if global_config.event:
|
||||
cmd.extend(["--koji-event", str(global_config.event)])
|
||||
if global_config.extra_args:
|
||||
cmd.extend(global_config.extra_args)
|
||||
cmd.extend(["--no-latest-link"])
|
||||
return cmd
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, config, section, config_dir):
|
||||
part = cls(
|
||||
name=section,
|
||||
config=os.path.join(config_dir, config.get(section, "config")),
|
||||
just_phase=_safe_get_list(config, section, "just_phase", []),
|
||||
skip_phase=_safe_get_list(config, section, "skip_phase", []),
|
||||
dependencies=_safe_get_list(config, section, "depends_on", []),
|
||||
)
|
||||
if config.has_option(section, "failable"):
|
||||
part.failable = config.getboolean(section, "failable")
|
||||
return part
|
||||
|
||||
|
||||
def _safe_get_list(config, section, option, default=None):
|
||||
"""Get a value from config parser. The result is split into a list on
|
||||
commas or spaces, and `default` is returned if the key does not exist.
|
||||
"""
|
||||
if config.has_option(section, option):
|
||||
value = config.get(section, option)
|
||||
return [x.strip() for x in re.split(r"[, ]+", value) if x]
|
||||
return default
|
||||
|
||||
|
||||
def fill_in_config_file(fp, substs):
|
||||
"""Templating function. It works with Jinja2 style placeholders such as
|
||||
{{foo}}. Whitespace around the key name is fine. The file is modified in place.
|
||||
|
||||
:param fp string: path to the file to process
|
||||
:param substs dict: a mapping for values to put into the file
|
||||
"""
|
||||
|
||||
def repl(match):
|
||||
try:
|
||||
return substs[match.group(1)]
|
||||
except KeyError as exc:
|
||||
raise RuntimeError(
|
||||
"Unknown placeholder %s in %s" % (exc, os.path.basename(fp))
|
||||
)
|
||||
|
||||
with open(fp, "r") as f:
|
||||
contents = re.sub(r"{{ *([a-zA-Z-_]+) *}}", repl, f.read())
|
||||
with open(fp, "w") as f:
|
||||
f.write(contents)
|
||||
|
||||
|
||||
def start_part(global_config, parts, part):
|
||||
part.setup_start(global_config, parts)
|
||||
fh = open(part.log_file, "w")
|
||||
cmd = part.get_cmd(global_config)
|
||||
log.debug("Running command %r", " ".join(shlex_quote(x) for x in cmd))
|
||||
return subprocess.Popen(cmd, stdout=fh, stderr=subprocess.STDOUT)
|
||||
|
||||
|
||||
def handle_finished(global_config, linker, parts, proc, finished_part):
|
||||
finished_part.refresh_status()
|
||||
log.info("%s finished with status %s", finished_part, finished_part.status)
|
||||
if proc.returncode == 0:
|
||||
# Success, unblock other parts...
|
||||
for part in parts.values():
|
||||
part.unblock_on(finished_part.name)
|
||||
# ...and link the results into final destination.
|
||||
copy_part(global_config, linker, finished_part)
|
||||
update_metadata(global_config, finished_part)
|
||||
else:
|
||||
# Failure, other stuff may be blocked.
|
||||
log.info("See details in %s", finished_part.log_file)
|
||||
block_on(parts, finished_part.name)
|
||||
|
||||
|
||||
def copy_part(global_config, linker, part):
|
||||
c = productmd.Compose(part.path)
|
||||
for variant in c.info.variants:
|
||||
data_path = os.path.join(part.path, "compose", variant)
|
||||
link = os.path.join(global_config.target, "compose", variant)
|
||||
log.info("Hardlinking content %s -> %s", data_path, link)
|
||||
hardlink_dir(linker, data_path, link)
|
||||
|
||||
|
||||
def hardlink_dir(linker, srcdir, dstdir):
|
||||
for root, dirs, files in os.walk(srcdir):
|
||||
root = os.path.relpath(root, srcdir)
|
||||
for f in files:
|
||||
src = os.path.normpath(os.path.join(srcdir, root, f))
|
||||
dst = os.path.normpath(os.path.join(dstdir, root, f))
|
||||
linker.queue_put((src, dst))
|
||||
|
||||
|
||||
def update_metadata(global_config, part):
|
||||
part_metadata_dir = os.path.join(part.path, "compose", "metadata")
|
||||
final_metadata_dir = os.path.join(global_config.target, "compose", "metadata")
|
||||
for f in os.listdir(part_metadata_dir):
|
||||
# Load the metadata
|
||||
with open(os.path.join(part_metadata_dir, f)) as fh:
|
||||
part_metadata = json.load(fh)
|
||||
final_metadata = os.path.join(final_metadata_dir, f)
|
||||
if os.path.exists(final_metadata):
|
||||
# We already have this file, will need to merge.
|
||||
merge_metadata(final_metadata, part_metadata)
|
||||
else:
|
||||
# A new file, just copy it.
|
||||
copy_metadata(global_config, final_metadata, part_metadata)
|
||||
|
||||
|
||||
def copy_metadata(global_config, final_metadata, source):
|
||||
"""Copy file to final location, but update compose information."""
|
||||
with open(
|
||||
os.path.join(global_config.target, "compose/metadata/composeinfo.json")
|
||||
) as f:
|
||||
composeinfo = json.load(f)
|
||||
try:
|
||||
source["payload"]["compose"].update(composeinfo["payload"]["compose"])
|
||||
except KeyError:
|
||||
# No [payload][compose], probably OSBS metadata
|
||||
pass
|
||||
with open(final_metadata, "w") as f:
|
||||
json.dump(source, f, indent=2, sort_keys=True)
|
||||
|
||||
|
||||
def merge_metadata(final_metadata, source):
|
||||
with open(final_metadata) as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
try:
|
||||
key = {
|
||||
"productmd.composeinfo": "variants",
|
||||
"productmd.modules": "modules",
|
||||
"productmd.images": "images",
|
||||
"productmd.rpms": "rpms",
|
||||
}[source["header"]["type"]]
|
||||
# TODO what if multiple parts create images for the same variant
|
||||
metadata["payload"][key].update(source["payload"][key])
|
||||
except KeyError:
|
||||
# OSBS metadata, merge whole file
|
||||
metadata.update(source)
|
||||
with open(final_metadata, "w") as f:
|
||||
json.dump(metadata, f, indent=2, sort_keys=True)
|
||||
|
||||
|
||||
def block_on(parts, name):
|
||||
"""Part ``name`` failed, mark everything depending on it as blocked."""
|
||||
for part in parts.values():
|
||||
if name in part.blocked_on:
|
||||
log.warning("%s is blocked now and will not run", part)
|
||||
part.status = Status.BLOCKED
|
||||
block_on(parts, part.name)
|
||||
|
||||
|
||||
def check_finished_processes(processes):
|
||||
"""Walk through all active processes and check if something finished.
|
||||
"""
|
||||
for proc in processes.keys():
|
||||
proc.poll()
|
||||
if proc.returncode is not None:
|
||||
yield proc, processes[proc]
|
||||
|
||||
|
||||
def run_all(global_config, parts):
|
||||
# Mapping subprocess.Popen -> ComposePart
|
||||
processes = dict()
|
||||
remaining = set(p.name for p in parts.values() if not p.is_finished())
|
||||
|
||||
with linker_pool("hardlink") as linker:
|
||||
while remaining or processes:
|
||||
update_status(global_config, parts)
|
||||
|
||||
for proc, part in check_finished_processes(processes):
|
||||
del processes[proc]
|
||||
handle_finished(global_config, linker, parts, proc, part)
|
||||
|
||||
# Start new available processes.
|
||||
for name in list(remaining):
|
||||
part = parts[name]
|
||||
# Start all ready parts
|
||||
if part.status == Status.READY:
|
||||
remaining.remove(name)
|
||||
processes[start_part(global_config, parts, part)] = part
|
||||
# Remove blocked parts from todo list
|
||||
elif part.status == Status.BLOCKED:
|
||||
remaining.remove(part.name)
|
||||
|
||||
# Wait for any child process to finish if there is any.
|
||||
if processes:
|
||||
pid, reason = os.wait()
|
||||
for proc in processes.keys():
|
||||
# Set the return code for process that we caught by os.wait().
|
||||
# Calling poll() on it would not set the return code properly
|
||||
# since the value was already consumed by os.wait().
|
||||
if proc.pid == pid:
|
||||
proc.returncode = (reason >> 8) & 0xFF
|
||||
|
||||
log.info("Waiting for linking to finish...")
|
||||
return update_status(global_config, parts)
|
||||
|
||||
|
||||
def get_target_dir(config, compose_info, label, reldir=""):
|
||||
"""Find directory where this compose will be.
|
||||
|
||||
@param reldir: if target path in config is relative, it will be resolved
|
||||
against this directory
|
||||
"""
|
||||
dir = os.path.realpath(os.path.join(reldir, config.get("general", "target")))
|
||||
target_dir = get_compose_dir(
|
||||
dir,
|
||||
compose_info,
|
||||
compose_type=config.get("general", "compose_type"),
|
||||
compose_label=label,
|
||||
)
|
||||
return target_dir
|
||||
|
||||
|
||||
def setup_logging(debug=False):
|
||||
FORMAT = "%(asctime)s: %(levelname)s: %(message)s"
|
||||
level = logging.DEBUG if debug else logging.INFO
|
||||
kobo.log.add_stderr_logger(log, log_level=level, format=FORMAT)
|
||||
log.setLevel(level)
|
||||
|
||||
|
||||
def compute_status(statuses):
|
||||
if any(map(lambda x: x[0] in ("STARTED", "WAITING"), statuses)):
|
||||
# If there is anything still running or waiting to start, the whole is
|
||||
# still running.
|
||||
return "STARTED"
|
||||
elif any(map(lambda x: x[0] in ("DOOMED", "BLOCKED") and not x[1], statuses)):
|
||||
# If any required part is doomed or blocked, the whole is doomed
|
||||
return "DOOMED"
|
||||
elif all(map(lambda x: x[0] == "FINISHED", statuses)):
|
||||
# If all parts are complete, the whole is complete
|
||||
return "FINISHED"
|
||||
else:
|
||||
return "FINISHED_INCOMPLETE"
|
||||
|
||||
|
||||
def update_status(global_config, parts):
|
||||
log.debug("Updating status metadata")
|
||||
metadata = {}
|
||||
statuses = set()
|
||||
for part in parts.values():
|
||||
metadata[part.name] = {"status": part.status, "path": part.path}
|
||||
statuses.add((part.status, part.failable))
|
||||
metadata_path = os.path.join(
|
||||
global_config.target, "compose", "metadata", "parts.json"
|
||||
)
|
||||
with open(metadata_path, "w") as fh:
|
||||
json.dump(metadata, fh, indent=2, sort_keys=True, separators=(",", ": "))
|
||||
|
||||
status = compute_status(statuses)
|
||||
log.info("Overall status is %s", status)
|
||||
with open(os.path.join(global_config.target, "STATUS"), "w") as fh:
|
||||
fh.write(status)
|
||||
|
||||
return status != "DOOMED"
|
||||
|
||||
|
||||
def prepare_compose_dir(config, args, main_config_file, compose_info):
|
||||
if not hasattr(args, "compose_path"):
|
||||
# Creating a brand new compose
|
||||
target_dir = get_target_dir(
|
||||
config, compose_info, args.label, reldir=os.path.dirname(main_config_file)
|
||||
)
|
||||
for dir in ("logs", "parts", "compose/metadata", "work/global"):
|
||||
try:
|
||||
os.makedirs(os.path.join(target_dir, dir))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EEXIST:
|
||||
raise
|
||||
with open(os.path.join(target_dir, "STATUS"), "w") as fh:
|
||||
fh.write("STARTED")
|
||||
# Copy initial composeinfo for new compose
|
||||
shutil.copy(
|
||||
os.path.join(target_dir, "work/global/composeinfo-base.json"),
|
||||
os.path.join(target_dir, "compose/metadata/composeinfo.json"),
|
||||
)
|
||||
else:
|
||||
# Restarting a particular compose
|
||||
target_dir = args.compose_path
|
||||
|
||||
return target_dir
|
||||
|
||||
|
||||
def load_parts_metadata(global_config):
|
||||
parts_metadata = os.path.join(global_config.target, "compose/metadata/parts.json")
|
||||
with open(parts_metadata) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def setup_for_restart(global_config, parts, to_restart):
|
||||
has_stuff_to_do = False
|
||||
metadata = load_parts_metadata(global_config)
|
||||
for key in metadata:
|
||||
# Update state to match what is on disk
|
||||
log.debug(
|
||||
"Reusing %s (%s) from %s",
|
||||
key,
|
||||
metadata[key]["status"],
|
||||
metadata[key]["path"],
|
||||
)
|
||||
parts[key].status = metadata[key]["status"]
|
||||
parts[key].path = metadata[key]["path"]
|
||||
for key in to_restart:
|
||||
# Set restarted parts to run again
|
||||
parts[key].status = Status.WAITING
|
||||
parts[key].path = None
|
||||
|
||||
for key in to_restart:
|
||||
# Remove blockers that are already finished
|
||||
for blocker in list(parts[key].blocked_on):
|
||||
if parts[blocker].is_finished():
|
||||
parts[key].blocked_on.discard(blocker)
|
||||
if not parts[key].blocked_on:
|
||||
log.debug("Part %s in not blocked", key)
|
||||
# Nothing blocks it; let's go
|
||||
parts[key].status = Status.READY
|
||||
has_stuff_to_do = True
|
||||
|
||||
if not has_stuff_to_do:
|
||||
raise RuntimeError("All restarted parts are blocked. Nothing to do.")
|
||||
|
||||
|
||||
def run_kinit(config):
|
||||
if not config.getboolean("general", "kerberos"):
|
||||
return
|
||||
|
||||
keytab = config.get("general", "kerberos_keytab")
|
||||
principal = config.get("general", "kerberos_principal")
|
||||
|
||||
fd, fname = tempfile.mkstemp(prefix="krb5cc_pungi-orchestrate_")
|
||||
os.close(fd)
|
||||
os.environ["KRB5CCNAME"] = fname
|
||||
shortcuts.run(["kinit", "-k", "-t", keytab, principal])
|
||||
log.debug("Created a kerberos ticket for %s", principal)
|
||||
|
||||
atexit.register(os.remove, fname)
|
||||
|
||||
|
||||
def get_compose_data(compose_path):
|
||||
try:
|
||||
compose = productmd.compose.Compose(compose_path)
|
||||
data = {
|
||||
"compose_id": compose.info.compose.id,
|
||||
"compose_date": compose.info.compose.date,
|
||||
"compose_type": compose.info.compose.type,
|
||||
"compose_respin": str(compose.info.compose.respin),
|
||||
"compose_label": compose.info.compose.label,
|
||||
"release_id": compose.info.release_id,
|
||||
"release_name": compose.info.release.name,
|
||||
"release_short": compose.info.release.short,
|
||||
"release_version": compose.info.release.version,
|
||||
"release_type": compose.info.release.type,
|
||||
"release_is_layered": compose.info.release.is_layered,
|
||||
}
|
||||
if compose.info.release.is_layered:
|
||||
data.update(
|
||||
{
|
||||
"base_product_name": compose.info.base_product.name,
|
||||
"base_product_short": compose.info.base_product.short,
|
||||
"base_product_version": compose.info.base_product.version,
|
||||
"base_product_type": compose.info.base_product.type,
|
||||
}
|
||||
)
|
||||
return data
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def get_script_env(compose_path):
|
||||
env = os.environ.copy()
|
||||
env["COMPOSE_PATH"] = compose_path
|
||||
for key, value in get_compose_data(compose_path).items():
|
||||
if isinstance(value, bool):
|
||||
env[key.upper()] = "YES" if value else ""
|
||||
else:
|
||||
env[key.upper()] = str(value) if value else ""
|
||||
return env
|
||||
|
||||
|
||||
def run_scripts(prefix, compose_dir, scripts):
|
||||
env = get_script_env(compose_dir)
|
||||
for idx, script in enumerate(scripts.strip().splitlines()):
|
||||
command = script.strip()
|
||||
logfile = os.path.join(compose_dir, "logs", "%s%s.log" % (prefix, idx))
|
||||
log.debug("Running command: %r", command)
|
||||
log.debug("See output in %s", logfile)
|
||||
shortcuts.run(command, env=env, logfile=logfile)
|
||||
|
||||
|
||||
def try_translate_path(parts, path):
|
||||
translation = []
|
||||
for part in parts.values():
|
||||
conf = pungi.util.load_config(part.config)
|
||||
translation.extend(conf.get("translate_paths", []))
|
||||
return pungi.util.translate_path_raw(translation, path)
|
||||
|
||||
|
||||
def send_notification(compose_dir, command, parts):
|
||||
if not command:
|
||||
return
|
||||
from pungi.notifier import PungiNotifier
|
||||
|
||||
data = get_compose_data(compose_dir)
|
||||
data["location"] = try_translate_path(parts, compose_dir)
|
||||
notifier = PungiNotifier([command])
|
||||
with open(os.path.join(compose_dir, "STATUS")) as f:
|
||||
status = f.read().strip()
|
||||
notifier.send("status-change", workdir=compose_dir, status=status, **data)
|
||||
|
||||
|
||||
def setup_progress_monitor(global_config, parts):
|
||||
"""Update configuration so that each part send notifications about its
|
||||
progress to the orchestrator.
|
||||
|
||||
There is a file to which the notification is written. The orchestrator is
|
||||
reading it and mapping the entries to particular parts. The path to this
|
||||
file is stored in an environment variable.
|
||||
"""
|
||||
tmp_file = tempfile.NamedTemporaryFile(prefix="pungi-progress-monitor_")
|
||||
os.environ["_PUNGI_ORCHESTRATOR_PROGRESS_MONITOR"] = tmp_file.name
|
||||
atexit.register(os.remove, tmp_file.name)
|
||||
|
||||
global_config.extra_args.append(
|
||||
"--notification-script=pungi-notification-report-progress"
|
||||
)
|
||||
|
||||
def reader():
|
||||
while True:
|
||||
line = tmp_file.readline()
|
||||
if not line:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
path, msg = line.split(":", 1)
|
||||
for part in parts:
|
||||
if parts[part].path == os.path.dirname(path):
|
||||
log.debug("%s: %s", part, msg.strip())
|
||||
break
|
||||
|
||||
monitor = threading.Thread(target=reader)
|
||||
monitor.daemon = True
|
||||
monitor.start()
|
||||
|
||||
|
||||
def run(work_dir, main_config_file, args):
|
||||
config_dir = os.path.join(work_dir, "config")
|
||||
shutil.copytree(os.path.dirname(main_config_file), config_dir)
|
||||
|
||||
# Read main config
|
||||
parser = configparser.RawConfigParser(
|
||||
defaults={
|
||||
"kerberos": "false",
|
||||
"pre_compose_script": "",
|
||||
"post_compose_script": "",
|
||||
"notification_script": "",
|
||||
}
|
||||
)
|
||||
parser.read(main_config_file)
|
||||
|
||||
# Create kerberos ticket
|
||||
run_kinit(parser)
|
||||
|
||||
compose_info = dict(parser.items("general"))
|
||||
compose_type = parser.get("general", "compose_type")
|
||||
|
||||
target_dir = prepare_compose_dir(parser, args, main_config_file, compose_info)
|
||||
kobo.log.add_file_logger(log, os.path.join(target_dir, "logs", "orchestrator.log"))
|
||||
log.info("Composing %s", target_dir)
|
||||
|
||||
run_scripts("pre_compose_", target_dir, parser.get("general", "pre_compose_script"))
|
||||
|
||||
old_compose = find_old_compose(
|
||||
os.path.dirname(target_dir),
|
||||
compose_info["release_short"],
|
||||
compose_info["release_version"],
|
||||
"",
|
||||
)
|
||||
if old_compose:
|
||||
log.info("Reusing old compose %s", old_compose)
|
||||
|
||||
global_config = Config(
|
||||
target=target_dir,
|
||||
compose_type=compose_type,
|
||||
label=args.label,
|
||||
old_compose=old_compose,
|
||||
config_dir=os.path.dirname(main_config_file),
|
||||
event=args.koji_event,
|
||||
extra_args=_safe_get_list(parser, "general", "extra_args"),
|
||||
)
|
||||
|
||||
if not global_config.event and parser.has_option("general", "koji_profile"):
|
||||
koji_wrapper = KojiWrapper(parser.get("general", "koji_profile"))
|
||||
event_file = os.path.join(global_config.target, "work/global/koji-event")
|
||||
result = get_koji_event_raw(koji_wrapper, None, event_file)
|
||||
global_config = global_config._replace(event=result["id"])
|
||||
|
||||
parts = {}
|
||||
for section in parser.sections():
|
||||
if section == "general":
|
||||
continue
|
||||
parts[section] = ComposePart.from_config(parser, section, config_dir)
|
||||
|
||||
if hasattr(args, "part"):
|
||||
setup_for_restart(global_config, parts, args.part)
|
||||
|
||||
setup_progress_monitor(global_config, parts)
|
||||
|
||||
send_notification(target_dir, parser.get("general", "notification_script"), parts)
|
||||
|
||||
retcode = run_all(global_config, parts)
|
||||
|
||||
if retcode:
|
||||
# Only run the script if we are not doomed.
|
||||
run_scripts(
|
||||
"post_compose_", target_dir, parser.get("general", "post_compose_script")
|
||||
)
|
||||
|
||||
send_notification(target_dir, parser.get("general", "notification_script"), parts)
|
||||
|
||||
return retcode
|
||||
|
||||
|
||||
def parse_args(argv):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--debug", action="store_true")
|
||||
parser.add_argument("--koji-event", metavar="ID", type=parse_koji_event)
|
||||
subparsers = parser.add_subparsers()
|
||||
start = subparsers.add_parser("start")
|
||||
start.add_argument("config", metavar="CONFIG")
|
||||
start.add_argument("--label")
|
||||
|
||||
restart = subparsers.add_parser("restart")
|
||||
restart.add_argument("config", metavar="CONFIG")
|
||||
restart.add_argument("compose_path", metavar="COMPOSE_PATH")
|
||||
restart.add_argument(
|
||||
"part", metavar="PART", nargs="*", help="which parts to restart"
|
||||
)
|
||||
restart.add_argument("--label")
|
||||
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
args = parse_args(argv)
|
||||
setup_logging(args.debug)
|
||||
|
||||
main_config_file = os.path.abspath(args.config)
|
||||
|
||||
with temp_dir() as work_dir:
|
||||
try:
|
||||
if not run(work_dir, main_config_file, args):
|
||||
sys.exit(1)
|
||||
except Exception:
|
||||
log.exception("Unhandled exception!")
|
||||
sys.exit(1)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user