Compare commits
297 Commits
4.2.18-1.e
...
master
Author | SHA1 | Date |
---|---|---|
Stepan Oksanichenko | bc8c776872 | |
Stepan Oksanichenko | 91d282708e | |
Stepan Oksanichenko | ccaf31bc87 | |
Stepan Oksanichenko | 5fe0504265 | |
Stepan Oksanichenko | d79f163685 | |
Stepan Oksanichenko | 793fb23958 | |
Stepan Oksanichenko | 65d0c09e97 | |
Stepan Oksanichenko | 0a9e5df66c | |
Stepan Oksanichenko | ae527a2e01 | |
Aditya Bisoi | 4991144a01 | |
Lubomír Sedlář | 68d94ff488 | |
Ozan Unsal | ce45fdc39a | |
Lubomír Sedlář | b625ccea06 | |
Lubomír Sedlář | 8eccfc5a03 | |
Lubomír Sedlář | f5a0e06af5 | |
Lubomír Sedlář | f6f54b56ca | |
Aditya Bisoi | fcee346c7c | |
Lubomír Sedlář | 82ec38ad60 | |
Lubomír Sedlář | c9cbd80569 | |
Aditya Bisoi | 035fca1e6d | |
Lubomír Sedlář | 0f8cae69b7 | |
Lubomír Sedlář | f17628dd5f | |
Lubomír Sedlář | f3485410ad | |
Haibo Lin | cccfaea14e | |
Lubomír Sedlář | e2057b75c5 | |
Lubomír Sedlář | 44ea4d4419 | |
Lubomír Sedlář | d4425f7935 | |
Lubomír Sedlář | c8118527ea | |
Lubomír Sedlář | a8ea322907 | |
Lubomír Sedlář | c4995c8f4b | |
Lubomír Sedlář | 997e372f25 | |
Lubomír Sedlář | 42f1c62528 | |
Lubomír Sedlář | 3fd29d0ee0 | |
Lubomír Sedlář | c1f2fa5035 | |
Aurélien Bompard | 85c9e9e776 | |
Lubomír Sedlář | 33012ab31e | |
Lubomír Sedlář | 72ddf65e62 | |
Haibo Lin | c402ff3d60 | |
Haibo Lin | 8dd344f9ee | |
Lubomír Sedlář | d07f517a90 | |
Lubomír Sedlář | 48366177cc | |
Lubomír Sedlář | 4cb8671fe4 | |
Lubomír Sedlář | 135bbbfe7e | |
Lubomír Sedlář | 5624829564 | |
Haibo Lin | 5fb4f86312 | |
Lubomír Sedlář | e891fe7b09 | |
Haibo Lin | 4cd7d39914 | |
Lubomír Sedlář | 5de829d05b | |
Lubomír Sedlář | 2930a1cc54 | |
Lubomír Sedlář | 9c4d3d496d | |
Haibo Lin | 4637fd6697 | |
Lubomír Sedlář | 2ff8132eaf | |
Lubomír Sedlář | f9190d1fd1 | |
Lubomír Sedlář | 80ad0448ec | |
Lubomír Sedlář | 027380f969 | |
Lubomír Sedlář | 41048f60b7 | |
Ondrej Nosek | 9f8f6a7956 | |
Lubomír Sedlář | 3d3e4bafdf | |
Lubomír Sedlář | 8fe0257e93 | |
Fedora Release Engineering | d7b5fd2278 | |
Lubomír Sedlář | 8b49d4ad61 | |
Lubomír Sedlář | 57443cd0aa | |
Python Maint | 1d146bb8d5 | |
Lubomír Sedlář | 790091b7d7 | |
Lubomír Sedlář | 28aad3ea40 | |
Pierre-Yves Chibon | 7373b4dbbf | |
Lubomír Sedlář | 218b11f1b7 | |
Haibo Lin | bfbe9095d2 | |
Lubomír Sedlář | eb17182c04 | |
Stepan Oksanichenko | f91f90cf64 | |
Stepan Oksanichenko | 49931082b2 | |
Stepan Oksanichenko | 8ba8609bda | |
Stepan Oksanichenko | 6f495a8133 | |
Stepan Oksanichenko | 2b4bddbfe0 | |
Stepan Oksanichenko | 032cf725de | |
Stepan Oksanichenko | 8b11bb81af | |
soksanichenko | 114a73f100 | |
soksanichenko | 1c3e5dce5e | |
soksanichenko | e55abb17f1 | |
soksanichenko | e81d78a1d1 | |
soksanichenko | 68915d04f8 | |
soksanichenko | a25bf72fb8 | |
Stepan Oksanichenko | 68aee1fa2d | |
soksanichenko | 6592735aec | |
soksanichenko | 943fd8e77d | |
soksanichenko | 004fc4382f | |
soksanichenko | 596c5c0b7f | |
soksanichenko | 141d00e941 | |
soksanichenko | 4b64d20826 | |
soksanichenko | 0747e967b0 | |
soksanichenko | 6d58bc2ed8 | |
Stepan Oksanichenko | 60a347a4a2 | |
soksanichenko | 53ed7386f3 | |
soksanichenko | ed43f0038e | |
soksanichenko | fcc9b4f1ca | |
soksanichenko | d32c293bca | |
soksanichenko | f0bd1af999 | |
soksanichenko | 1b4747b915 | |
Lubomír Sedlář | 6aabfc9285 | |
Tomáš Hozza | 9e014fed6a | |
Tomáš Hozza | 7ccb1d4849 | |
Tomáš Hozza | abec28256d | |
Lubomír Sedlář | 46216b4f17 | |
Lubomír Sedlář | 02b3adbaeb | |
Lubomír Sedlář | d17e578645 | |
Lubomír Sedlář | 6c1c9d9efd | |
Stepan Oksanichenko | 8dd7d8326f | |
soksanichenko | d7b173cae5 | |
soksanichenko | fa4640f03e | |
Stepan Oksanichenko | d66eb0dea8 | |
soksanichenko | d56227ab4a | |
soksanichenko | 12433157dd | |
soksanichenko | 623955cb1f | |
soksanichenko | 4e0d2d14c9 | |
soksanichenko | b61e59d676 | |
soksanichenko | eb35d7baac | |
soksanichenko | 54209f3643 | |
soksanichenko | 80c4536eaa | |
soksanichenko | 9bb5550d36 | |
soksanichenko | 364ed6c3af | |
soksanichenko | 0b965096ee | |
soksanichenko | d914626d92 | |
soksanichenko | 32215d955a | |
soksanichenko | d711f8a2d6 | |
soksanichenko | bd9d800b52 | |
soksanichenko | e03648589d | |
soksanichenko | b5fe2e8129 | |
soksanichenko | b14e85324c | |
soksanichenko | 5a19ad2258 | |
soksanichenko | 9ae49dae5b | |
soksanichenko | c82cbfdc32 | |
soksanichenko | ee9c9a74e6 | |
soksanichenko | ea0f933315 | |
soksanichenko | 323d31df2b | |
soksanichenko | 9acd7f5fa4 | |
soksanichenko | a2b16eb44f | |
soksanichenko | ff946d3f7b | |
soksanichenko | ede91bcd03 | |
soksanichenko | 0fa459eb9e | |
soksanichenko | b49ffee06d | |
soksanichenko | fce5493f09 | |
Lubomír Sedlář | 479849042f | |
Haibo Lin | 8cd19605bd | |
soksanichenko | 750499eda1 | |
soksanichenko | d999960235 | |
soksanichenko | 6edece449d | |
Stepan Oksanichenko | dd22d94a9e | |
soksanichenko | b157a1825a | |
soksanichenko | fd298d4f17 | |
Lubomír Sedlář | fa967f79b5 | |
Tomas Hozza | 57739c238f | |
Tomas Hozza | 805a1083a2 | |
Haibo Lin | 57ea640916 | |
Lubomír Sedlář | c7121f9378 | |
Lubomír Sedlář | 146b88e1e9 | |
Lubomír Sedlář | 8aba2363e2 | |
Ondřej Budai | 779793386c | |
Timothée Ravier | 603c61a033 | |
Lubomír Sedlář | 11fa342507 | |
Lubomír Sedlář | 13ea8e5834 | |
Lubomír Sedlář | 0abf937b0e | |
Lubomír Sedlář | 778dcfa587 | |
Ondřej Budai | ea8020473d | |
Haibo Lin | b0b494fff0 | |
Haibo Lin | 19cb013fec | |
Haibo Lin | b27301641a | |
Haibo Lin | da336f75f8 | |
Lubomír Sedlář | 960c85efde | |
Lubomír Sedlář | d7aebfc7f9 | |
Marek Kulik | ca185aaea8 | |
Ozan Unsal | 895b3982d7 | |
Lingyan Zhuang | c4aa45beab | |
soksanichenko | f21ed6f607 | |
Stepan Oksanichenko | cfe6ec3f4e | |
soksanichenko | e6c6f74176 | |
soksanichenko | 8676941655 | |
soksanichenko | 5f74175c33 | |
soksanichenko | 1e18e8995d | |
soksanichenko | 38ea822260 | |
soksanichenko | 34eb45c7ec | |
soksanichenko | 7422d1e045 | |
soksanichenko | 97801e772e | |
soksanichenko | dff346eedb | |
soksanichenko | de53dd0bbd | |
Lubomír Sedlář | 80957f5205 | |
Lubomír Sedlář | e8d79e9269 | |
Haibo Lin | c5cdd498ac | |
Haibo Lin | e490764985 | |
Ondrej Nosek | 707a2c8d10 | |
Lubomír Sedlář | f8c7ad28e4 | |
Ondrej Nosek | bebbefe46e | |
Christopher O'Brien | d55770898c | |
Ken Dreyer | 903ab076ba | |
soksanichenko | 88121619bc | |
Ken Dreyer | b805ce3d12 | |
Ozan Unsal | 0e82663327 | |
Ozan Unsal | ecb1646042 | |
Haibo Lin | 6c280f2c46 | |
Lubomír Sedlář | aabf8faea0 | |
Petr Písař | 38810b3f13 | |
Ozan Unsal | 330ba9b9c4 | |
Haibo Lin | 52c9816755 | |
Lubomír Sedlář | 32221e8f36 | |
Filip Valder | fe986d68b9 | |
Ozan Unsal | 42f668d969 | |
Haibo Lin | 894cce6a5a | |
Lubomír Sedlář | 260b3fce8d | |
Haibo Lin | 20c2e59218 | |
Haibo Lin | 5e6248e3e0 | |
Haibo Lin | f681956cf1 | |
Haibo Lin | cfb9882269 | |
Lubomír Sedlář | b652119d54 | |
Lubomír Sedlář | 33d7290d78 | |
Ken Dreyer | 9bae86a51e | |
Lubomír Sedlář | 1d654522be | |
Lubomír Sedlář | 80bd254347 | |
Ken Dreyer | 94ffa1c5c6 | |
Lubomír Sedlář | 9d02f87c99 | |
fdiprete | 7b9e08ab28 | |
Lubomír Sedlář | e2b3002726 | |
Lubomír Sedlář | e8305f3978 | |
Lubomír Sedlář | ac66c3d7f3 | |
Lubomír Sedlář | eb61c97cdb | |
Ozan Unsal | b03490bf18 | |
Dan Čermák | ab19043773 | |
Lubomír Sedlář | 204d88a351 | |
Haibo Lin | 8133676270 | |
Haibo Lin | e42e65783d | |
Ozan Unsal | 7475d2a3a9 | |
Lubomír Sedlář | ac061b2ea8 | |
Jan Kaluza | 0530cf2712 | |
Ozan Unsal | 9612241396 | |
Lubomír Sedlář | ba6f7429ee | |
Lubomír Sedlář | 72bcee01be | |
Lubomír Sedlář | a1ebd234a4 | |
Lubomír Sedlář | 5c26aa9127 | |
Jan Kaluza | 195bfbefa4 | |
Lubomír Sedlář | 20dc4beb6b | |
Lubomír Sedlář | d8d1cc520b | |
Ozan Unsal | 904a1c3271 | |
Lubomír Sedlář | e8ddacd10e | |
Ozan Unsal | b7666ba4a4 | |
Ozan Unsal | 3d9335e90e | |
Dominik Rumian | 7c3e8d4276 | |
Dominik Rumian | 9cd42a2b5e | |
Ozan Unsal | 980c7ba8fb | |
Haibo Lin | 66dacb21e0 | |
Haibo Lin | 795bbe31e3 | |
Haibo Lin | 1bb038ca72 | |
Filip Valder | efff2c9504 | |
Filip Valder | a7c111643d | |
Dominik Rumian | 5831d4ae1e | |
JamesKunstle | 3349585d78 | |
Ken Dreyer | 5a8df7b69c | |
Ken Dreyer | 6afcfef919 | |
Ken Dreyer | 2a679dcb81 | |
Dominik Rumian | 8a2d0162d9 | |
Ken Dreyer | 01a52447bc | |
Haibo Lin | cf761633f4 | |
fdiprete | 446334fb95 | |
Haibo Lin | 56a55db966 | |
Lubomír Sedlář | a435fd58da | |
Haibo Lin | edb091b7b1 | |
Haibo Lin | 9a5e901cfe | |
Lubomír Sedlář | bf28e8d50c | |
Lubomír Sedlář | 7fe32ae758 | |
Haibo Lin | c27bfe0c59 | |
Ondrej Nosek | 76d13d0062 | |
Romain Forlot | da791ed15c | |
Lev Veyde | 00a9861367 | |
Haibo Lin | e866d22c04 | |
Lubomír Sedlář | ab1b5b48ec | |
Haibo Lin | c8091899b2 | |
Haibo Lin | 035b37c566 | |
Haibo Lin | edb4517e80 | |
Lubomír Sedlář | 535034ef91 | |
Haibo Lin | 2769232b72 | |
Haibo Lin | b217470464 | |
Lubomír Sedlář | 735bfaa0d6 | |
Lubomír Sedlář | 5b5069175d | |
Lubomír Sedlář | 477dcf37d9 | |
Ondrej Nosek | 98359654cf | |
Lubomír Sedlář | 64897d7d48 | |
Lubomír Sedlář | 40133074b3 | |
Lubomír Sedlář | 61e90fd7e0 | |
Lubomír Sedlář | 36373479db | |
Lubomír Sedlář | 44f7eff1b7 | |
Haibo Lin | daa0ca6106 | |
Haibo Lin | d4ee42ec23 | |
Lubomír Sedlář | 49a5661521 | |
Lubomír Sedlář | c87fce30ac | |
Lubomír Sedlář | 0f4b0577f7 | |
Lubomír Sedlář | 83458f26c2 | |
Ken Dreyer | 39b847094a | |
Lubomír Sedlář | 9ea1098eae | |
Haibo Lin | f518c1bb7c | |
Haibo Lin | f470599f6c |
|
@ -11,5 +11,9 @@ tests/data/repo-krb5-lookaside
|
|||
tests/_composes
|
||||
htmlcov/
|
||||
.coverage
|
||||
.eggs
|
||||
.idea/
|
||||
.tox
|
||||
.venv
|
||||
.kdev4/
|
||||
pungi.kdev4
|
||||
|
|
|
@ -2,6 +2,7 @@ include AUTHORS
|
|||
include COPYING
|
||||
include GPL
|
||||
include pungi.spec
|
||||
include setup.cfg
|
||||
include tox.ini
|
||||
include share/*
|
||||
include share/multilib/*
|
||||
|
|
|
@ -34,4 +34,6 @@ also moves the artifacts to correct locations.
|
|||
- Documentation: https://docs.pagure.org/pungi/
|
||||
- Upstream GIT: https://pagure.io/pungi/
|
||||
- Issue tracker: https://pagure.io/pungi/issues
|
||||
- Questions can be asked on *#fedora-releng* IRC channel on FreeNode
|
||||
- Questions can be asked in the *#fedora-releng* IRC channel on irc.libera.chat
|
||||
or in the matrix room
|
||||
[`#releng:fedoraproject.org`](https://matrix.to/#/#releng:fedoraproject.org)
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
# Clean up pungi cache
|
||||
d /var/cache/pungi/createrepo_c/ - - - 30d
|
|
@ -12,7 +12,7 @@
|
|||
viewBox="0 0 610.46457 301.1662"
|
||||
id="svg2"
|
||||
version="1.1"
|
||||
inkscape:version="1.0.1 (3bc2e813f5, 2020-09-07)"
|
||||
inkscape:version="1.0.2 (e86c870879, 2021-01-15)"
|
||||
sodipodi:docname="phases.svg"
|
||||
inkscape:export-filename="/home/lsedlar/repos/pungi/doc/_static/phases.png"
|
||||
inkscape:export-xdpi="90"
|
||||
|
@ -24,9 +24,9 @@
|
|||
borderopacity="1.0"
|
||||
inkscape:pageopacity="1"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="2.1213203"
|
||||
inkscape:cx="276.65806"
|
||||
inkscape:cy="189.24198"
|
||||
inkscape:zoom="1.5"
|
||||
inkscape:cx="9.4746397"
|
||||
inkscape:cy="58.833855"
|
||||
inkscape:document-units="px"
|
||||
inkscape:current-layer="layer1"
|
||||
showgrid="false"
|
||||
|
@ -70,7 +70,7 @@
|
|||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title></dc:title>
|
||||
<dc:title />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
|
@ -303,15 +303,15 @@
|
|||
</g>
|
||||
<rect
|
||||
transform="matrix(0,1,1,0,0,0)"
|
||||
style="fill:#e9b96e;fill-rule:evenodd;stroke:none;stroke-width:2.65937px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
style="fill:#e9b96e;fill-rule:evenodd;stroke:none;stroke-width:1.85901px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3338-1"
|
||||
width="185.96895"
|
||||
width="90.874992"
|
||||
height="115.80065"
|
||||
x="872.67383"
|
||||
y="486.55563" />
|
||||
<text
|
||||
id="text3384-0"
|
||||
y="969.2854"
|
||||
y="921.73846"
|
||||
x="489.56451"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
xml:space="preserve"><tspan
|
||||
|
@ -319,7 +319,7 @@
|
|||
id="tspan3391"
|
||||
sodipodi:role="line"
|
||||
x="489.56451"
|
||||
y="969.2854">ImageChecksum</tspan></text>
|
||||
y="921.73846">ImageChecksum</tspan></text>
|
||||
<g
|
||||
transform="translate(-42.209584,-80.817124)"
|
||||
id="g3458">
|
||||
|
@ -518,5 +518,24 @@
|
|||
id="tspan301-5"
|
||||
style="font-size:12px;line-height:0">OSBuild</tspan></text>
|
||||
</g>
|
||||
<rect
|
||||
transform="matrix(0,1,1,0,0,0)"
|
||||
style="fill:#729fcf;fill-rule:evenodd;stroke:none;stroke-width:1.83502px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3338-1-3"
|
||||
width="88.544876"
|
||||
height="115.80065"
|
||||
x="970.31763"
|
||||
y="486.55563" />
|
||||
<text
|
||||
id="text3384-0-6"
|
||||
y="1018.2172"
|
||||
x="489.56451"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-size:13.1475px;line-height:1.25"
|
||||
id="tspan3391-7"
|
||||
sodipodi:role="line"
|
||||
x="489.56451"
|
||||
y="1018.2172">ImageContainer</tspan></text>
|
||||
</g>
|
||||
</svg>
|
||||
|
|
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 22 KiB |
142
doc/conf.py
142
doc/conf.py
|
@ -18,12 +18,12 @@ import os
|
|||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
|
@ -31,207 +31,201 @@ import os
|
|||
extensions = []
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
source_suffix = ".rst"
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = "index"
|
||||
|
||||
# General information about the project.
|
||||
project = u'Pungi'
|
||||
copyright = u'2016, Red Hat, Inc.'
|
||||
project = "Pungi"
|
||||
copyright = "2016, Red Hat, Inc."
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '4.2'
|
||||
version = "4.5"
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '4.2.7'
|
||||
release = "4.5.0"
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
# language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
exclude_patterns = ["_build"]
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
# add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
pygments_style = "sphinx"
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
# modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
# keep_warnings = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
html_theme = "default"
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
# html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
# html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
# html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
# html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
html_static_path = ["_static"]
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
# html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
# html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
# html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
# html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
# html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
# html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
# html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
# html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'Pungidoc'
|
||||
htmlhelp_basename = "Pungidoc"
|
||||
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
('index', 'Pungi.tex', u'Pungi Documentation',
|
||||
u'Daniel Mach', 'manual'),
|
||||
("index", "Pungi.tex", "Pungi Documentation", "Daniel Mach", "manual"),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
# latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
# latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
# latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
# latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
# latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
# latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'pungi', u'Pungi Documentation',
|
||||
[u'Daniel Mach'], 1)
|
||||
]
|
||||
man_pages = [("index", "pungi", "Pungi Documentation", ["Daniel Mach"], 1)]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
# man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
@ -240,19 +234,25 @@ man_pages = [
|
|||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'Pungi', u'Pungi Documentation',
|
||||
u'Daniel Mach', 'Pungi', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
(
|
||||
"index",
|
||||
"Pungi",
|
||||
"Pungi Documentation",
|
||||
"Daniel Mach",
|
||||
"Pungi",
|
||||
"One line description of project.",
|
||||
"Miscellaneous",
|
||||
),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
# texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
# texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
# texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
# texinfo_no_detailmenu = False
|
||||
|
|
|
@ -182,6 +182,8 @@ Options
|
|||
Please note that when ``dnf`` is used, the build dependencies check is
|
||||
skipped. On Python 3, only ``dnf`` backend is available.
|
||||
|
||||
See also: the ``gather_backend`` setting for Pungi's gather phase.
|
||||
|
||||
**cts_url**
|
||||
(*str*) -- URL to Compose Tracking Service. If defined, Pungi will add
|
||||
the compose to Compose Tracking Service and ge the compose ID from it.
|
||||
|
@ -192,6 +194,17 @@ Options
|
|||
Tracking Service Kerberos authentication. If not defined, the default
|
||||
Kerberos principal is used.
|
||||
|
||||
**cts_oidc_token_url**
|
||||
(*str*) -- URL to the OIDC token endpoint.
|
||||
For example ``https://oidc.example.com/openid-connect/token``.
|
||||
This option can be overridden by the environment variable ``CTS_OIDC_TOKEN_URL``.
|
||||
|
||||
**cts_oidc_client_id*
|
||||
(*str*) -- OIDC client ID.
|
||||
This option can be overridden by the environment variable ``CTS_OIDC_CLIENT_ID``.
|
||||
Note that environment variable ``CTS_OIDC_CLIENT_SECRET`` must be configured with
|
||||
corresponding client secret to authenticate to CTS via OIDC.
|
||||
|
||||
**compose_type**
|
||||
(*str*) -- Allows to set default compose type. Type set via a command-line
|
||||
option overwrites this.
|
||||
|
@ -457,6 +470,12 @@ Options
|
|||
cloned files should be split into subdirectories for each architecture of
|
||||
the variant.
|
||||
|
||||
**createrepo_enable_cache** = True
|
||||
(*bool*) -- whether to use ``--cachedir`` option of ``createrepo``. It will
|
||||
cache and reuse checksum vaules to speed up createrepo phase.
|
||||
The cache dir is located at ``/var/cache/pungi/createrepo_c/$release_short-$uid``
|
||||
e.g. /var/cache/pungi/createrepo_c/Fedora-1000
|
||||
|
||||
**product_id** = None
|
||||
(:ref:`scm_dict <scm_support>`) -- If specified, it should point to a
|
||||
directory with certificates ``*<variant_uid>-<arch>-*.pem``. Pungi will
|
||||
|
@ -573,6 +592,16 @@ Options
|
|||
with everything. Set this option to ``False`` to ignore ``noarch`` in
|
||||
``ExclusiveArch`` and always consider only binary architectures.
|
||||
|
||||
**pkgset_inherit_exclusive_arch_to_noarch** = True
|
||||
(*bool*) -- When set to ``True``, the value of ``ExclusiveArch`` or
|
||||
``ExcludeArch`` will be copied from source rpm to all its noarch packages.
|
||||
That will than limit which architectures the noarch packages can be
|
||||
included in.
|
||||
|
||||
By setting this option to ``False`` this step is skipped, and noarch
|
||||
packages will by default land in all architectures. They can still be
|
||||
excluded by listing them in a relevant section of ``filter_packages``.
|
||||
|
||||
**pkgset_allow_reuse** = True
|
||||
(*bool*) -- When set to ``True``, *Pungi* will try to reuse pkgset data
|
||||
from the old composes specified by ``--old-composes``. When enabled, this
|
||||
|
@ -581,6 +610,18 @@ Options
|
|||
(for example) between composes, then Pungi may not respect those changes
|
||||
in your new compose.
|
||||
|
||||
**signed_packages_retries** = 0
|
||||
(*int*) -- In automated workflows, you might start a compose before Koji
|
||||
has completely written all signed packages to disk. In this case you may
|
||||
want Pungi to wait for the package to appear in Koji's storage. This
|
||||
option controls how many times Pungi will retry looking for the signed
|
||||
copy.
|
||||
|
||||
**signed_packages_wait** = 30
|
||||
(*int*) -- Interval in seconds for how long to wait between attempts to
|
||||
find signed packages. This option only makes sense when
|
||||
``signed_packages_retries`` is set higher than 0.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
@ -652,6 +693,11 @@ Options
|
|||
**buildinstall_allow_reuse** = False
|
||||
(*bool*) -- When set to ``True``, *Pungi* will try to reuse buildinstall
|
||||
results from old compose specified by ``--old-composes``.
|
||||
**buildinstall_packages**
|
||||
(list) – Additional packages to be installed in the runroot environment
|
||||
where lorax will run to create installer. Format: ``[(variant_uid_regex,
|
||||
{arch|*: [package_globs]})]``.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
@ -686,6 +732,13 @@ Example
|
|||
})
|
||||
]
|
||||
|
||||
# Additional packages to be installed in the Koji runroot environment where
|
||||
# lorax will run.
|
||||
buildinstall_packages = [
|
||||
('^Simple$', {
|
||||
'*': ['dummy-package'],
|
||||
})
|
||||
]
|
||||
|
||||
.. note::
|
||||
|
||||
|
@ -728,7 +781,7 @@ Options
|
|||
(*bool*) -- When set to ``True``, *Pungi* will try to reuse gather results
|
||||
from old compose specified by ``--old-composes``.
|
||||
|
||||
**greedy_method**
|
||||
**greedy_method** = none
|
||||
(*str*) -- This option controls how package requirements are satisfied in
|
||||
case a particular ``Requires`` has multiple candidates.
|
||||
|
||||
|
@ -749,7 +802,7 @@ Options
|
|||
pulled in.
|
||||
* With ``greedy_method = "all"`` all three packages will be
|
||||
pulled in.
|
||||
* With ``greedy_method = "build" ``pkg-b-provider-1`` and
|
||||
* With ``greedy_method = "build"`` ``pkg-b-provider-1`` and
|
||||
``pkg-b-provider-2`` will be pulled in.
|
||||
|
||||
**gather_backend**
|
||||
|
@ -763,6 +816,9 @@ Options
|
|||
``python-multilib`` library. Please refer to ``multilib`` option to see the
|
||||
differences.
|
||||
|
||||
See also: the ``repoclosure_backend`` setting for Pungi's repoclosure
|
||||
phase.
|
||||
|
||||
**multilib**
|
||||
(*list*) -- mapping of variant regexes and arches to list of multilib
|
||||
methods
|
||||
|
@ -787,8 +843,14 @@ Options
|
|||
(*list*) -- additional packages to be included in a variant and
|
||||
architecture; format: ``[(variant_uid_regex, {arch|*: [package_globs]})]``
|
||||
|
||||
In contrast to the ``comps_file`` setting, the ``additional_packages``
|
||||
setting merely adds the list of packages to the compose. When a package
|
||||
is in a comps group, it is visible to users via ``dnf groupinstall`` and
|
||||
Anaconda's Groups selection, but ``additional_packages`` does not affect
|
||||
DNF groups.
|
||||
|
||||
The packages specified here are matched against RPM names, not any other
|
||||
provides in the package not the name of source package. Shell globbing is
|
||||
provides in the package nor the name of source package. Shell globbing is
|
||||
used, so wildcards are possible. The package can be specified as name only
|
||||
or ``name.arch``.
|
||||
|
||||
|
@ -797,6 +859,21 @@ Options
|
|||
it. If you add a debuginfo package that does not have anything else from
|
||||
the same build included in the compose, the sources will not be pulled in.
|
||||
|
||||
If you list a package in ``additional_packages`` but Pungi cannot find
|
||||
it (for example, it's not available in the Koji tag), Pungi will log a
|
||||
warning in the "work" or "logs" directories and continue without aborting.
|
||||
|
||||
*Example*: This configuration will add all packages in a Koji tag to an
|
||||
"Everything" variant::
|
||||
|
||||
additional_packages = [
|
||||
('^Everything$', {
|
||||
'*': [
|
||||
'*',
|
||||
],
|
||||
})
|
||||
]
|
||||
|
||||
**filter_packages**
|
||||
(*list*) -- packages to be excluded from a variant and architecture;
|
||||
format: ``[(variant_uid_regex, {arch|*: [package_globs]})]``
|
||||
|
@ -864,10 +941,15 @@ Options
|
|||
comps file can not be found in the package set. When disabled (the
|
||||
default), such cases are still reported as warnings in the log.
|
||||
|
||||
With ``dnf`` gather backend, this option will abort the compose on any
|
||||
missing package no matter if it's listed in comps, ``additional_packages``
|
||||
or prepopulate file.
|
||||
|
||||
**gather_source_mapping**
|
||||
(*str*) -- JSON mapping with initial packages for the compose. The value
|
||||
should be a path to JSON file with following mapping: ``{variant: {arch:
|
||||
{rpm_name: [rpm_arch|None]}}}``.
|
||||
{rpm_name: [rpm_arch|None]}}}``. Relative paths are interpreted relative to
|
||||
the location of main config file.
|
||||
|
||||
**gather_profiler** = False
|
||||
(*bool*) -- When set to ``True`` the gather tool will produce additional
|
||||
|
@ -1201,7 +1283,7 @@ Options
|
|||
|
||||
Format: ``[(variant_uid_regex, {arch|*: bool})]``
|
||||
|
||||
**create_jigdo** = True
|
||||
**create_jigdo** = False
|
||||
(*bool*) -- controls the creation of jigdo from ISO
|
||||
|
||||
**create_optional_isos** = False
|
||||
|
@ -1228,6 +1310,11 @@ Options
|
|||
meaning size in bytes, or it can be a string with ``k``, ``M``, ``G``
|
||||
suffix (using multiples of 1024).
|
||||
|
||||
**iso_level**
|
||||
(*int|list*) [optional] -- Set the ISO9660 conformance level. This is
|
||||
either a global single value (a number from 1 to 4), or a variant/arch
|
||||
mapping.
|
||||
|
||||
**split_iso_reserve** = 10MiB
|
||||
(*int|str*) -- how much free space should be left on each disk. The format
|
||||
is the same as for ``iso_size`` option.
|
||||
|
@ -1391,6 +1478,7 @@ Live Media Settings
|
|||
* ``repo`` (*str|[str]*) -- repos specified by URL or variant UID
|
||||
* ``title`` (*str*)
|
||||
* ``install_tree_from`` (*str*) -- variant to take install tree from
|
||||
* ``nomacboot`` (*bool*)
|
||||
|
||||
|
||||
Image Build Settings
|
||||
|
@ -1531,7 +1619,9 @@ OSBuild Composer for building images
|
|||
|
||||
* ``name`` -- name of the Koji package
|
||||
* ``distro`` -- image for which distribution should be build TODO examples
|
||||
* ``image_type`` -- a list of image types to build (e.g. ``qcow2``)
|
||||
* ``image_types`` -- a list with a single image type string or just a
|
||||
string representing the image type to build (e.g. ``qcow2``). In any
|
||||
case, only a single image type can be provided as an argument.
|
||||
|
||||
Optional keys:
|
||||
|
||||
|
@ -1542,11 +1632,70 @@ OSBuild Composer for building images
|
|||
* ``release`` -- release part of the final NVR. If neither this option nor
|
||||
the global ``osbuild_release`` is set, Koji will automatically generate a
|
||||
value.
|
||||
* ``repo`` -- a list of repository URLs from which to consume packages for
|
||||
* ``repo`` -- a list of repositories from which to consume packages for
|
||||
building the image. By default only the variant repository is used.
|
||||
The list items may use one of the following formats:
|
||||
|
||||
* String with just the repository URL.
|
||||
|
||||
* Dictionary with the following keys:
|
||||
|
||||
* ``baseurl`` -- URL of the repository.
|
||||
* ``package_sets`` -- a list of package set names to use for this
|
||||
repository. Package sets are an internal concept of Image Builder
|
||||
and are used in image definitions. If specified, the repository is
|
||||
used by Image Builder only for the pipeline with the same name.
|
||||
For example, specifying the ``build`` package set name will make
|
||||
the repository to be used only for the build environment in which
|
||||
the image will be built. (optional)
|
||||
|
||||
* ``arches`` -- list of architectures for which to build the image. By
|
||||
default, the variant arches are used. This option can only restrict it,
|
||||
not add a new one.
|
||||
* ``ostree_url`` -- URL of the repository that's used to fetch the parent
|
||||
commit from.
|
||||
* ``ostree_ref`` -- name of the ostree branch
|
||||
* ``ostree_parent`` -- commit hash or a a branch-like reference to the
|
||||
parent commit.
|
||||
* ``upload_options`` -- a dictionary with upload options specific to the
|
||||
target cloud environment. If provided, the image will be uploaded to the
|
||||
cloud environment, in addition to the Koji server. One can't combine
|
||||
arbitrary image types with arbitrary upload options.
|
||||
The dictionary keys differ based on the target cloud environment. The
|
||||
following keys are supported:
|
||||
|
||||
* **AWS EC2 upload options** -- upload to Amazon Web Services.
|
||||
|
||||
* ``region`` -- AWS region to upload the image to
|
||||
* ``share_with_accounts`` -- list of AWS account IDs to share the image
|
||||
with
|
||||
* ``snapshot_name`` -- Snapshot name of the uploaded EC2 image
|
||||
(optional)
|
||||
|
||||
* **AWS S3 upload options** -- upload to Amazon Web Services S3.
|
||||
|
||||
* ``region`` -- AWS region to upload the image to
|
||||
|
||||
* **Azure upload options** -- upload to Microsoft Azure.
|
||||
|
||||
* ``tenant_id`` -- Azure tenant ID to upload the image to
|
||||
* ``subscription_id`` -- Azure subscription ID to upload the image to
|
||||
* ``resource_group`` -- Azure resource group to upload the image to
|
||||
* ``location`` -- Azure location of the resource group (optional)
|
||||
* ``image_name`` -- Image name of the uploaded Azure image (optional)
|
||||
|
||||
* **GCP upload options** -- upload to Google Cloud Platform.
|
||||
|
||||
* ``region`` -- GCP region to upload the image to
|
||||
* ``bucket`` -- GCP bucket to upload the image to (optional)
|
||||
* ``share_with_accounts`` -- list of GCP accounts to share the image
|
||||
with
|
||||
* ``image_name`` -- Image name of the uploaded GCP image (optional)
|
||||
|
||||
* **Container upload options** -- upload to a container registry.
|
||||
|
||||
* ``name`` -- name of the container image (optional)
|
||||
* ``tag`` -- container tag to upload the image to (optional)
|
||||
|
||||
.. note::
|
||||
There is initial support for having this task as failable without aborting
|
||||
|
@ -1555,6 +1704,56 @@ OSBuild Composer for building images
|
|||
arch.
|
||||
|
||||
|
||||
Image container
|
||||
===============
|
||||
|
||||
This phase supports building containers in OSBS that embed an image created in
|
||||
the same compose. This can be useful for delivering the image to users running
|
||||
in containerized environments.
|
||||
|
||||
Pungi will start a ``buildContainer`` task in Koji with configured source
|
||||
repository. The ``Dockerfile`` can expect that a repo file will be injected
|
||||
into the container that defines a repo named ``image-to-include``, and its
|
||||
``baseurl`` will point to the image to include. It is possible to extract the
|
||||
URL with a command like ``dnf config-manager --dump image-to-include | awk
|
||||
'/baseurl =/{print $3}'```
|
||||
|
||||
**image_container**
|
||||
(*dict*) -- configuration for building containers embedding an image.
|
||||
|
||||
Format: ``{variant_uid_regex: [{...}]}``.
|
||||
|
||||
The inner object will define a single container. These keys are required:
|
||||
|
||||
* ``url``, ``target``, ``git_branch``. See OSBS section for definition of
|
||||
these.
|
||||
* ``image_spec`` -- (*object*) A string mapping of filters used to select
|
||||
the image to embed. All images listed in metadata for the variant will be
|
||||
processed. The keys of this filter are used to select metadata fields for
|
||||
the image, and values are regular expression that need to match the
|
||||
metadata value.
|
||||
|
||||
The filter should match exactly one image.
|
||||
|
||||
|
||||
Example config
|
||||
--------------
|
||||
::
|
||||
|
||||
image_container = {
|
||||
"^Server$": [{
|
||||
"url": "git://example.com/dockerfiles.git?#HEAD",
|
||||
"target": "f24-container-candidate",
|
||||
"git_branch": "f24",
|
||||
"image_spec": {
|
||||
"format": "qcow2",
|
||||
"arch": "x86_64",
|
||||
"path": ".*/guest-image-.*$",
|
||||
}
|
||||
}]
|
||||
}
|
||||
|
||||
|
||||
OSTree Settings
|
||||
===============
|
||||
|
||||
|
@ -1594,6 +1793,8 @@ repository with a new commit.
|
|||
* ``force_new_commit`` -- (*bool*) Do not use rpm-ostree's built-in change
|
||||
detection.
|
||||
Defaults to ``False``.
|
||||
* ``unified_core`` -- (*bool*) Use rpm-ostree in unified core mode for composes.
|
||||
Defaults to ``False``.
|
||||
* ``version`` -- (*str*) Version string to be added as versioning metadata.
|
||||
If this option is set to ``!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN``,
|
||||
a value will be generated automatically as ``$VERSION.$RELEASE``.
|
||||
|
@ -1603,6 +1804,8 @@ repository with a new commit.
|
|||
* ``tag_ref`` -- (*bool*, default ``True``) If set to ``False``, a git
|
||||
reference will not be created.
|
||||
* ``ostree_ref`` -- (*str*) To override value ``ref`` from ``treefile``.
|
||||
* ``runroot_packages`` -- (*list*) A list of additional package names to be
|
||||
installed in the runroot environment in Koji.
|
||||
|
||||
Example config
|
||||
--------------
|
||||
|
@ -1675,6 +1878,8 @@ an OSTree repository. This always runs in Koji as a ``runroot`` task.
|
|||
with the optional key:
|
||||
|
||||
* ``extra_runroot_pkgs`` -- (*[str]*)
|
||||
* ``skip_branding`` -- (*bool*) Stops lorax to install packages with branding.
|
||||
Defaults to ``False``.
|
||||
|
||||
**ostree_installer_overwrite** = False
|
||||
(*bool*) -- by default if a variant including OSTree installer also creates
|
||||
|
@ -1754,24 +1959,34 @@ they are not scratch builds).
|
|||
to create the image will not abort the whole compose.
|
||||
|
||||
The configuration will pass other attributes directly to the Koji task.
|
||||
This includes ``scratch`` and ``priority``.
|
||||
This includes ``scratch`` and ``priority``. See ``koji list-api
|
||||
buildContainer`` for more details about these options.
|
||||
|
||||
A value for ``yum_repourls`` will be created automatically and point at a
|
||||
repository in the current compose. You can add extra repositories with
|
||||
``repo`` key having a list of urls pointing to ``.repo`` files or just
|
||||
variant uid, Pungi will create the .repo file for that variant. ``gpgkey``
|
||||
can be specified to enable gpgcheck in repo files for variants.
|
||||
variant uid, Pungi will create the .repo file for that variant. If
|
||||
specific URL is used in the ``repo``, the ``$COMPOSE_ID`` variable in
|
||||
the ``repo`` string will be replaced with the real compose ID.
|
||||
``gpgkey`` can be specified to enable gpgcheck in repo files for variants.
|
||||
|
||||
**osbs_registries**
|
||||
(*dict*) -- It is possible to configure extra information about where to
|
||||
push the image (unless it is a scratch build). For each finished build,
|
||||
Pungi will try to match NVR against a key in this mapping (using shell-style
|
||||
globbing) and take the corresponding value and collect them across all built
|
||||
images. The data will be saved into ``logs/global/osbs-registries.json`` as
|
||||
a mapping from Koji NVR to the registry data. The same data is also sent to
|
||||
the message bus on ``osbs-request-push`` topic once the compose finishes
|
||||
successfully. Handling the message and performing the actual push is outside
|
||||
of scope for Pungi.
|
||||
(*dict*) -- Use this optional setting to emit ``osbs-request-push``
|
||||
messages for each non-scratch container build. These messages can guide
|
||||
other tools how to push the images to other registries. For example, an
|
||||
external tool might trigger on these messages and copy the images from
|
||||
OSBS's registry to a staging or production registry.
|
||||
|
||||
For each completed container build, Pungi will try to match the NVR against
|
||||
a key in ``osbs_registries`` mapping (using shell-style globbing) and take
|
||||
the corresponding value and collect them across all built images. Pungi
|
||||
will save this data into ``logs/global/osbs-registries.json``, mapping each
|
||||
Koji NVR to the registry data. Pungi will also send this data to the
|
||||
message bus on the ``osbs-request-push`` topic once the compose finishes
|
||||
successfully.
|
||||
|
||||
Pungi simply logs the mapped data and emits the messages. It does not
|
||||
handle the messages or push images. A separate tool must do that.
|
||||
|
||||
|
||||
Example config
|
||||
|
|
|
@ -30,9 +30,17 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
|
|||
module_defaults_dir = {
|
||||
'scm': 'git',
|
||||
'repo': 'https://pagure.io/releng/fedora-module-defaults.git',
|
||||
'branch': 'master',
|
||||
'branch': 'main',
|
||||
'dir': '.'
|
||||
}
|
||||
# Optional module obsoletes configuration which is merged
|
||||
# into the module index and gets resolved
|
||||
module_obsoletes_dir = {
|
||||
'scm': 'git',
|
||||
'repo': 'https://pagure.io/releng/fedora-module-defaults.git',
|
||||
'branch': 'main',
|
||||
'dir': 'obsoletes'
|
||||
}
|
||||
|
||||
variants_file='variants-fedora.xml'
|
||||
sigkeys = ['12C944D0']
|
||||
|
@ -83,7 +91,6 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
|
|||
|
||||
# CREATEISO
|
||||
iso_hfs_ppc64le_compatible = False
|
||||
create_jigdo = False
|
||||
|
||||
# BUILDINSTALL
|
||||
buildinstall_method = 'lorax'
|
||||
|
@ -325,6 +332,8 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
|
|||
"tag_ref": False,
|
||||
# Don't use change detection in ostree.
|
||||
"force_new_commit": True,
|
||||
# Use unified core mode for rpm-ostree composes
|
||||
"unified_core": True,
|
||||
# This is the location for the repo where new commit will be
|
||||
# created. Note that this is outside of the compose dir.
|
||||
"ostree_repo": "/mnt/koji/compose/ostree/repo/",
|
||||
|
|
|
@ -19,7 +19,7 @@ Contents:
|
|||
scm_support
|
||||
messaging
|
||||
gathering
|
||||
koji
|
||||
comps
|
||||
contributing
|
||||
testing
|
||||
multi_compose
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
======================
|
||||
Getting data from koji
|
||||
======================
|
||||
|
||||
When Pungi is configured to get packages from a Koji tag, it somehow needs to
|
||||
access the actual RPM files.
|
||||
|
||||
Historically, this required the storage used by Koji to be directly available
|
||||
on the host where Pungi was running. This was usually achieved by using NFS for
|
||||
the Koji volume, and mounting it on the compose host.
|
||||
|
||||
The compose could be created directly on the same volume. In such case the
|
||||
packages would be hardlinked, significantly reducing space consumption.
|
||||
|
||||
The compose could also be created on a different storage, in which case the
|
||||
packages would either need to be copied over or symlinked. Using symlinks
|
||||
requires that anything that accesses the compose (e.g. a download server) would
|
||||
also need to mount the Koji volume in the same location.
|
||||
|
||||
There is also a risk with symlinks that the package in Koji can change (due to
|
||||
being resigned for example), which would invalidate composes linking to it.
|
||||
|
||||
|
||||
Using Koji without direct mount
|
||||
===============================
|
||||
|
||||
It is possible now to run a compose from a Koji tag without direct access to
|
||||
Koji storage.
|
||||
|
||||
Pungi can download the packages over HTTP protocol, store them in a local
|
||||
cache, and consume them from there.
|
||||
|
||||
The local cache has similar structure to what is on the Koji volume.
|
||||
|
||||
When Pungi needs some package, it has a path on Koji volume. It will replace
|
||||
the ``topdir`` with the cache location. If such file exists, it will be used.
|
||||
If it doesn't exist, it will be downloaded from Koji (by replacing the
|
||||
``topdir`` with ``topurl``).
|
||||
|
||||
::
|
||||
|
||||
Koji path /mnt/koji/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
|
||||
Koji URL https://kojipkgs.fedoraproject.org/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
|
||||
Local path /mnt/compose/cache/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
|
||||
|
||||
The packages can be hardlinked from this cache directory.
|
||||
|
||||
|
||||
Cleanup
|
||||
-------
|
||||
|
||||
While the approach above allows each RPM to be downloaded only once, it will
|
||||
eventually result in the Koji volume being mirrored locally. Most of the
|
||||
packages will however no longer be needed.
|
||||
|
||||
There is a script ``pungi-cache-cleanup`` that can help with that. It can find
|
||||
and remove files from the cache that are no longer needed.
|
||||
|
||||
A file is no longer needed if it has a single link (meaning it is only in the
|
||||
cache, not in any compose), and it has mtime older than a given threshold.
|
||||
|
||||
It doesn't make sense to delete files that are hardlinked in an existing
|
||||
compose as it would not save any space anyway.
|
||||
|
||||
The mtime check is meant to preserve files that are downloaded but not actually
|
||||
used in a compose, like a subpackage that is not included in any variant. Every
|
||||
time its existence in the local cache is checked, the mtime is updated.
|
||||
|
||||
|
||||
Race conditions?
|
||||
----------------
|
||||
|
||||
It should be safe to have multiple compose hosts share the same storage volume
|
||||
for generated composes and local cache.
|
||||
|
||||
If a cache file is accessed and it exists, there's no risk of race condition.
|
||||
|
||||
If two composes need the same file at the same time and it is not present yet,
|
||||
one of them will take a lock on it and start downloading. The other will wait
|
||||
until the download is finished.
|
||||
|
||||
The lock is only valid for a set amount of time (5 minutes) to avoid issues
|
||||
where the downloading process is killed in a way that blocks it from releasing
|
||||
the lock.
|
||||
|
||||
If the file is large and network slow, the limit may not be enough finish
|
||||
downloading. In that case the second process will steal the lock while the
|
||||
first process is still downloading. This will result in the same file being
|
||||
downloaded twice.
|
||||
|
||||
When the first process finishes the download, it will put the file into the
|
||||
local cache location. When the second process finishes, it will atomically
|
||||
replace it, but since it's the same file it will be the same file.
|
||||
|
||||
If the first compose already managed to hardlink the file before it gets
|
||||
replaced, there will be two copies of the file present locally.
|
||||
|
||||
|
||||
Integrity checking
|
||||
------------------
|
||||
|
||||
There is minimal integrity checking. RPM packages belonging to real builds will
|
||||
be check to match the checksum provided by Koji hub.
|
||||
|
||||
There is no checking for scratch builds or any images.
|
|
@ -12,8 +12,9 @@ happened. A JSON-encoded object will be passed to standard input to provide
|
|||
more information about the event. At the very least, the object will contain a
|
||||
``compose_id`` key.
|
||||
|
||||
The script is invoked in compose directory and can read other information
|
||||
there.
|
||||
The notification script inherits working directory from the parent process and it
|
||||
can be called from the same directory ``pungi-koji`` is called from. The working directory
|
||||
is listed at the start of main log.
|
||||
|
||||
Currently these messages are sent:
|
||||
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
.. _multi_compose:
|
||||
|
||||
Managing compose from multiple parts
|
||||
====================================
|
||||
|
||||
There may be cases where it makes sense to split a big compose into separate
|
||||
parts, but create a compose output that links all output into one familiar
|
||||
structure.
|
||||
|
||||
The `pungi-orchestrate` tools allows that.
|
||||
|
||||
It works with an INI-style configuration file. The ``[general]`` section
|
||||
contains information about identity of the main compose. Other sections define
|
||||
individual parts.
|
||||
|
||||
The parts are scheduled to run in parallel, with the minimal amount of
|
||||
serialization. The final compose directory will contain hard-links to the
|
||||
files.
|
||||
|
||||
|
||||
General settings
|
||||
----------------
|
||||
|
||||
**target**
|
||||
Path to directory where the final compose should be created.
|
||||
**compose_type**
|
||||
Type of compose to make.
|
||||
**release_name**
|
||||
Name of the product for the final compose.
|
||||
**release_short**
|
||||
Short name of the product for the final compose.
|
||||
**release_version**
|
||||
Version of the product for the final compose.
|
||||
**release_type**
|
||||
Type of the product for the final compose.
|
||||
**extra_args**
|
||||
Additional arguments that will be passed to the child Pungi processes.
|
||||
**koji_profile**
|
||||
If specified, a current event will be retrieved from the Koji instance and
|
||||
used for all parts.
|
||||
|
||||
**kerberos**
|
||||
If set to yes, a kerberos ticket will be automatically created at the start.
|
||||
Set keytab and principal as well.
|
||||
**kerberos_keytab**
|
||||
Path to keytab file used to create the kerberos ticket.
|
||||
**kerberos_principal**
|
||||
Kerberos principal for the ticket
|
||||
|
||||
**pre_compose_script**
|
||||
Commands to execute before first part is started. Can contain multiple
|
||||
commands on separate lines.
|
||||
**post_compose_script**
|
||||
Commands to execute after the last part finishes and final status is
|
||||
updated. Can contain multiple commands on separate lines. ::
|
||||
|
||||
post_compose_script =
|
||||
compose-latest-symlink $COMPOSE_PATH
|
||||
custom-post-compose-script.sh
|
||||
|
||||
Multiple environment variables are defined for the scripts:
|
||||
|
||||
* ``COMPOSE_PATH``
|
||||
* ``COMPOSE_ID``
|
||||
* ``COMPOSE_DATE``
|
||||
* ``COMPOSE_TYPE``
|
||||
* ``COMPOSE_RESPIN``
|
||||
* ``COMPOSE_LABEL``
|
||||
* ``RELEASE_ID``
|
||||
* ``RELEASE_NAME``
|
||||
* ``RELEASE_SHORT``
|
||||
* ``RELEASE_VERSION``
|
||||
* ``RELEASE_TYPE``
|
||||
* ``RELEASE_IS_LAYERED`` – ``YES`` for layered products, empty otherwise
|
||||
* ``BASE_PRODUCT_NAME`` – only set for layered products
|
||||
* ``BASE_PRODUCT_SHORT`` – only set for layered products
|
||||
* ``BASE_PRODUCT_VERSION`` – only set for layered products
|
||||
* ``BASE_PRODUCT_TYPE`` – only set for layered products
|
||||
|
||||
**notification_script**
|
||||
Executable name (or path to a script) that will be used to send a message
|
||||
once the compose is finished. In order for a valid URL to be included in the
|
||||
message, at least one part must configure path translation that would apply
|
||||
to location of main compose.
|
||||
|
||||
Only two messages will be sent, one for start and one for finish (either
|
||||
successful or not).
|
||||
|
||||
|
||||
Partial compose settings
|
||||
------------------------
|
||||
|
||||
Each part should have a separate section in the config file.
|
||||
|
||||
It can specify these options:
|
||||
|
||||
**config**
|
||||
Path to configuration file that describes this part. If relative, it is
|
||||
resolved relative to the file with parts configuration.
|
||||
**just_phase**, **skip_phase**
|
||||
Customize which phases should run for this part.
|
||||
**depends_on**
|
||||
A comma separated list of other parts that must be finished before this part
|
||||
starts.
|
||||
**failable**
|
||||
A boolean toggle to mark a part as failable. A failure in such part will
|
||||
mark the final compose as incomplete, but still successful.
|
|
@ -115,16 +115,30 @@ ImageBuild
|
|||
This phase wraps up ``koji image-build``. It also updates the metadata
|
||||
ultimately responsible for ``images.json`` manifest.
|
||||
|
||||
OSBuild
|
||||
-------
|
||||
|
||||
Similarly to image build, this phases creates a koji `osbuild` task. In the
|
||||
background it uses OSBuild Composer to create images.
|
||||
|
||||
OSBS
|
||||
----
|
||||
|
||||
This phase builds docker base images in `OSBS
|
||||
This phase builds container base images in `OSBS
|
||||
<http://osbs.readthedocs.io/en/latest/index.html>`_.
|
||||
|
||||
The finished images are available in registry provided by OSBS, but not
|
||||
downloaded directly into the compose. The is metadata about the created image
|
||||
in ``compose/metadata/osbs.json``.
|
||||
|
||||
ImageContainer
|
||||
--------------
|
||||
|
||||
This phase builds a container image in OSBS, and stores the metadata in the
|
||||
same file as OSBS phase. The container produced here wraps a different image,
|
||||
created it ImageBuild or OSBuild phase. It can be useful to deliver a VM image
|
||||
to containerized environments.
|
||||
|
||||
OSTreeInstaller
|
||||
---------------
|
||||
|
||||
|
|
|
@ -41,6 +41,14 @@ which can contain following keys.
|
|||
* ``command`` -- defines a shell command to run after Git clone to generate the
|
||||
needed file (for example to run ``make``). Only supported in Git backend.
|
||||
|
||||
* ``options`` -- a dictionary of additional configuration options. These are
|
||||
specific to different backends.
|
||||
|
||||
Currently supported values for Git:
|
||||
|
||||
* ``credential_helper`` -- path to a credential helper used to supply
|
||||
username/password for remotes that require authentication.
|
||||
|
||||
|
||||
Koji examples
|
||||
-------------
|
||||
|
|
388
pungi.spec
388
pungi.spec
|
@ -1,26 +1,24 @@
|
|||
%{?python_enable_dependency_generator}
|
||||
|
||||
Name: pungi
|
||||
Version: 4.2.15
|
||||
Release: 1%{?dist}.cloudlinux
|
||||
Version: 4.5.0
|
||||
Release: 3%{?dist}.alma
|
||||
Summary: Distribution compose tool
|
||||
|
||||
License: GPLv2
|
||||
License: GPL-2.0-only
|
||||
URL: https://pagure.io/pungi
|
||||
Source0: %{name}-%{version}.tar.bz2
|
||||
|
||||
BuildRequires: python3-nose
|
||||
BuildRequires: make
|
||||
BuildRequires: python3-pytest
|
||||
BuildRequires: python3-mock
|
||||
BuildRequires: python3-pyfakefs
|
||||
BuildRequires: python3-ddt
|
||||
# replaced by unittest.mock
|
||||
# BuildRequires: python3-mock
|
||||
BuildRequires: python3-devel
|
||||
BuildRequires: python3-setuptools
|
||||
BuildRequires: python3-productmd >= 1.33
|
||||
BuildRequires: python3-kobo-rpmlib >= 0.18.0
|
||||
BuildRequires: createrepo_c
|
||||
BuildRequires: createrepo_c >= 0.20.1
|
||||
BuildRequires: python3-lxml
|
||||
BuildRequires: python3-ddt
|
||||
BuildRequires: python3-kickstart
|
||||
BuildRequires: python3-rpm
|
||||
BuildRequires: python3-dnf
|
||||
|
@ -33,23 +31,27 @@ BuildRequires: python3-kobo
|
|||
BuildRequires: python3-koji
|
||||
BuildRequires: lorax
|
||||
BuildRequires: python3-PyYAML
|
||||
BuildRequires: libmodulemd >= 2.8.0
|
||||
BuildRequires: python3-libmodulemd >= 2.8.0
|
||||
BuildRequires: python3-gobject
|
||||
BuildRequires: python3-createrepo_c
|
||||
BuildRequires: python3-createrepo_c >= 0.20.1
|
||||
BuildRequires: python3-dogpile-cache
|
||||
BuildRequires: python3-parameterized
|
||||
BuildRequires: python3-flufl-lock
|
||||
BuildRequires: python3-ddt
|
||||
BuildRequires: python3-distro
|
||||
BuildRequires: python3-gobject-base
|
||||
BuildRequires: python3-pgpy
|
||||
BuildRequires: python3-pyfakefs
|
||||
%if %{rhel} == 8
|
||||
BuildRequires: python3-dataclasses
|
||||
%endif
|
||||
|
||||
#deps for doc building
|
||||
BuildRequires: python3-sphinx
|
||||
|
||||
Requires: python3-kobo-rpmlib >= 0.18.0
|
||||
Requires: python3-productmd >= 1.33
|
||||
Requires: python3-kickstart
|
||||
Requires: python3-requests
|
||||
Requires: python3-dataclasses
|
||||
Requires: createrepo_c
|
||||
Requires: createrepo_c >= 0.20.1
|
||||
Requires: koji >= 1.10.1-13
|
||||
Requires: python3-koji-cli-plugins
|
||||
Requires: isomd5sum
|
||||
|
@ -59,12 +61,21 @@ Requires: python3-dnf
|
|||
Requires: python3-multilib
|
||||
Requires: python3-libcomps
|
||||
Requires: python3-koji
|
||||
Requires: libmodulemd >= 2.8.0
|
||||
Requires: python3-libmodulemd >= 2.8.0
|
||||
Requires: python3-gobject
|
||||
Requires: python3-createrepo_c
|
||||
Requires: python3-createrepo_c >= 0.20.1
|
||||
Requires: python3-PyYAML
|
||||
Requires: python3-gobject-base
|
||||
Requires: python3-productmd >= 1.28
|
||||
Requires: python3-flufl-lock
|
||||
Requires: python3-productmd >= 1.33
|
||||
Requires: lorax
|
||||
Requires: python3-distro
|
||||
Requires: python3-gobject-base
|
||||
Requires: python3-pgpy
|
||||
Requires: python3-requests
|
||||
%if %{rhel} == 8
|
||||
Requires: python3-dataclasses
|
||||
%endif
|
||||
|
||||
# This package is not available on i686, hence we cannot require it
|
||||
# See https://bugzilla.redhat.com/show_bug.cgi?id=1743421
|
||||
|
@ -80,7 +91,7 @@ A tool to create anaconda based installation trees/isos of a set of rpms.
|
|||
%package utils
|
||||
Summary: Utilities for working with finished composes
|
||||
Requires: pungi = %{version}-%{release}
|
||||
# Requires: python3-fedmsg
|
||||
Requires: python3-fedora-messaging
|
||||
|
||||
%description utils
|
||||
These utilities work with finished composes produced by Pungi. They can be used
|
||||
|
@ -89,8 +100,8 @@ notification to Fedora Message Bus.
|
|||
|
||||
%package -n python3-%{name}
|
||||
Summary: Python 3 libraries for pungi
|
||||
Requires: python3-attrs
|
||||
Requires: fus
|
||||
Requires: python3-attrs
|
||||
|
||||
%description -n python3-%{name}
|
||||
Python library with code for Pungi. This is not a public library and there are
|
||||
|
@ -110,21 +121,14 @@ gzip _build/man/pungi.1
|
|||
|
||||
%install
|
||||
%py3_install
|
||||
%{__install} -d %{buildroot}/var/cache/pungi
|
||||
%{__install} -d %{buildroot}/var/cache/pungi/createrepo_c
|
||||
%{__install} -d %{buildroot}%{_mandir}/man1
|
||||
%{__install} -m 0644 doc/_build/man/pungi.1.gz %{buildroot}%{_mandir}/man1
|
||||
|
||||
rm %{buildroot}%{_bindir}/pungi
|
||||
|
||||
# CLOUDLINUX: We don't need fedmsg stuff
|
||||
rm %{buildroot}%{_bindir}/%{name}-fedmsg-notification
|
||||
|
||||
%check
|
||||
python3 -m pytest
|
||||
# master branch part of %check segment. Currently it doesn't work
|
||||
# because of pungi-koji requirement in bash tests
|
||||
#./tests/data/specs/build.sh
|
||||
#cd tests && ./test_compose.sh
|
||||
%pytest
|
||||
|
||||
%files
|
||||
%license COPYING GPL
|
||||
|
@ -140,7 +144,9 @@ python3 -m pytest
|
|||
%{_bindir}/%{name}-make-ostree
|
||||
%{_mandir}/man1/pungi.1.gz
|
||||
%{_datadir}/pungi
|
||||
/var/cache/pungi
|
||||
%{_localstatedir}/cache/pungi
|
||||
%dir %attr(1777, root, root) %{_localstatedir}/cache/pungi/createrepo_c
|
||||
%{_tmpfilesdir}/pungi-clean-cache.conf
|
||||
|
||||
%files -n python3-%{name}
|
||||
%{python3_sitelib}/%{name}
|
||||
|
@ -151,15 +157,224 @@ python3 -m pytest
|
|||
%{_bindir}/%{name}-create-unified-isos
|
||||
%{_bindir}/%{name}-config-dump
|
||||
%{_bindir}/%{name}-config-validate
|
||||
# %{_bindir}/%{name}-fedmsg-notification
|
||||
%{_bindir}/%{name}-fedmsg-notification
|
||||
%{_bindir}/%{name}-notification-report-progress
|
||||
%{_bindir}/%{name}-orchestrate
|
||||
%{_bindir}/%{name}-patch-iso
|
||||
%{_bindir}/%{name}-compare-depsolving
|
||||
%{_bindir}/%{name}-wait-for-signed-ostree-handler
|
||||
|
||||
%{_bindir}/%{name}-cache-cleanup
|
||||
|
||||
%changelog
|
||||
* Mon Nov 21 2023 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.5.0-3
|
||||
- Method `get_remote_file_content` is object's method now
|
||||
|
||||
* Wed Nov 15 2023 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.5.0-2
|
||||
- Return empty list if a repo doesn't contain any module
|
||||
|
||||
* Thu Aug 31 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.5.0-1
|
||||
- kojiwrapper: Stop being smart about local access (lsedlar)
|
||||
- Fix unittest errors (ounsal)
|
||||
- Add integrity checking for builds (lsedlar)
|
||||
- Add script for cleaning up the cache (lsedlar)
|
||||
- Add ability to download images (lsedlar)
|
||||
- Add support for not having koji volume mounted locally (lsedlar)
|
||||
- Remove repository cloning multiple times (abisoi)
|
||||
- Support require_all_comps_packages on DNF backend (lsedlar)
|
||||
- Fix new warnings from flake8 (lsedlar)
|
||||
|
||||
* Tue Jul 25 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-8
|
||||
- Option `excluded-packages` for script `pungi-gather-rpms`
|
||||
|
||||
* Tue Jul 25 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.1-1
|
||||
- ostree: Add configuration for custom runroot packages (lsedlar)
|
||||
- pkgset: Emit better error for missing modulemd file (lsedlar)
|
||||
- Add support for git-credential-helper (lsedlar)
|
||||
- Support OIDC Client Credentials authentication to CTS (hlin)
|
||||
|
||||
* Fri Jul 21 2023 Fedora Release Engineering <releng@fedoraproject.org> - 4.4.0-4
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild
|
||||
|
||||
* Wed Jul 19 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.0-3
|
||||
- Backport ostree runroot package additions
|
||||
|
||||
* Wed Jul 19 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.0-2
|
||||
- Backport ostree runroot package additions
|
||||
|
||||
* Mon Jun 19 2023 Python Maint <python-maint@redhat.com> - 4.4.0-2
|
||||
- Rebuilt for Python 3.12
|
||||
|
||||
* Wed Jun 07 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.0-1
|
||||
- gather-dnf: Run latest() later (lsedlar)
|
||||
- iso: Support joliet long names (lsedlar)
|
||||
- Drop pungi-orchestrator code (lsedlar)
|
||||
- isos: Ensure proper file ownership and permissions (lsedlar)
|
||||
- gather: Always get latest packages (lsedlar)
|
||||
- Add back compatibility with jsonschema <3.0.0 (lsedlar)
|
||||
- Remove useless debug message (lsedlar)
|
||||
- Remove fedmsg from requirements (lsedlar)
|
||||
- gather: Support dotarch in DNF backend (lsedlar)
|
||||
- Fix compatibility with createrepo_c 0.21.1 (lsedlar)
|
||||
- comps: Apply arch filtering to environment/optionlist (lsedlar)
|
||||
- Add config file for cleaning up cache files (hlin)
|
||||
|
||||
* Wed May 17 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.8-3
|
||||
- Rebuild without fedmsg dependency
|
||||
|
||||
* Wed May 03 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.8-1
|
||||
- Set priority for Fedora messages
|
||||
|
||||
* Thu Apr 13 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-7
|
||||
- gather-module can find modules through symlinks
|
||||
|
||||
* Thu Apr 13 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-6
|
||||
- CLI option `--label` can be passed through a Pungi config file
|
||||
|
||||
* Fri Mar 31 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-4
|
||||
- ALBS-1030: Generate Devel section in packages.json
|
||||
- Also the tool can combine (remove and add) packages in a variant from different sources according to an url's type of source
|
||||
- Some upstream changes to KojiMock part
|
||||
- Skip verifying an RPM signature if sigkeys are empty
|
||||
- ALBS-987: Generate i686 and dev repositories with pungi on building new distr. version automatically
|
||||
- [Generator of packages.json] Replace using CLI by config.yaml
|
||||
- [Gather RPMs] os.path is replaced by Pat
|
||||
|
||||
* Thu Mar 30 2023 Haibo Lin <hlin@redhat.com> - 4.3.8-1
|
||||
- createiso: Update possibly changed file on DVD (lsedlar)
|
||||
- pkgset: Stop reuse if configuration changed (lsedlar)
|
||||
- Allow disabling inheriting ExcludeArch to noarch packages (lsedlar)
|
||||
- pkgset: Support extra builds with no tags (lsedlar)
|
||||
- buildinstall: Avoid pointlessly tweaking the boot images (lsedlar)
|
||||
- Prevent to reuse if unsigned packages are allowed (hlin)
|
||||
- Pass parent id/respin id to CTS (lsedlar)
|
||||
- Exclude existing files in boot.iso (hlin)
|
||||
- image-build/osbuild: Pull ISOs into the compose (lsedlar)
|
||||
- Retry 401 error from CTS (lsedlar)
|
||||
- gather: Better detection of debuginfo in lookaside (lsedlar)
|
||||
- Log versions of all installed packages (hlin)
|
||||
- Use authentication for all CTS calls (lsedlar)
|
||||
- Fix black complaints (lsedlar)
|
||||
- Add vhd.gz extension to compressed VHD images (lsedlar)
|
||||
- Add vhd-compressed image type (lsedlar)
|
||||
- Update to work with latest mock (lsedlar)
|
||||
- Default bztar format for sdist command (onosek)
|
||||
|
||||
* Fri Mar 17 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-3
|
||||
- ALBS-987: Generate i686 repositories with pungi on building new distr. version automatically
|
||||
- KojiMock extracts all modules which are suitable for the variant's arches
|
||||
- An old code is removed or refactored
|
||||
|
||||
* Fri Jan 20 2023 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.7-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild
|
||||
|
||||
* Fri Dec 09 2022 Ondřej Nosek <onosek@redhat.com> - 4.3.7-1
|
||||
- osbuild: test passing of rich repos from configuration (lsedlar)
|
||||
- osbuild: support specifying `package_sets` for repos (thozza)
|
||||
- osbuild: don't use `util.get_repo_urls()` (thozza)
|
||||
- osbuild: update schema and config documentation (thozza)
|
||||
- Speed up tests by 30 seconds (lsedlar)
|
||||
- Stop sending compose paths to CTS (lsedlar)
|
||||
- Report errors from CTS (lsedlar)
|
||||
- createiso: Create Joliet tree with xorriso (lsedlar)
|
||||
- init: Filter comps for modular variants with tags (lsedlar)
|
||||
- Retry failed cts requests (hlin)
|
||||
- Ignore existing kerberos ticket for CTS auth (lsedlar)
|
||||
- osbuild: support specifying upload_options (thozza)
|
||||
- osbuild: accept only a single image type in the configuration (thozza)
|
||||
- Add Jenkinsfile for CI (hlin)
|
||||
- profiler: Flush stdout before printing (lsedlar)
|
||||
|
||||
* Sat Nov 12 2022 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.6-3
|
||||
- AlmaLinux version. Updates from upstream
|
||||
|
||||
* Mon Nov 07 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.6-2
|
||||
- Stop including comps in modular repos
|
||||
|
||||
* Wed Oct 19 2022 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.17-1
|
||||
- Replace list of cr.packages by cr.PackageIterator in package JSON generator
|
||||
- Do not lose a module from koji if we have more than one arch (e.g. x86_64 + i686)
|
||||
|
||||
* Fri Aug 26 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.6-1
|
||||
- pkgset: Report better error when module is missing an arch (lsedlar)
|
||||
- osbuild: add support for building ostree artifacts (ondrej)
|
||||
- ostree: Add unified core mode for compose in rpm-ostree (tim)
|
||||
- createiso: Make ISO level more granular (lsedlar)
|
||||
- Create DVDs with xorriso (lsedlar)
|
||||
- Fix compatibility with jsonschema >= 4.0.0 (lsedlar)
|
||||
- Fix black complaint (lsedlar)
|
||||
- doc: fix osbuild's image_types field name (ondrej)
|
||||
- Convert _ssh_run output to str for python3 (hlin)
|
||||
- Print more logs for git_ls_remote (hlin)
|
||||
- Log time taken of each phase (hlin)
|
||||
- Avoid crash when loading pickle file failed (hlin)
|
||||
- extra_isos: Fix detection of changed packages (lsedlar)
|
||||
|
||||
* Thu Aug 11 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-8
|
||||
- Backport jsonschema compatibility patch (rhbz#2113607)
|
||||
|
||||
* Mon Jul 25 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-7
|
||||
- Update xorriso patch
|
||||
|
||||
* Fri Jul 22 2022 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.5-6
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild
|
||||
|
||||
* Mon Jun 20 2022 Python Maint <python-maint@redhat.com> - 4.3.5-5
|
||||
- Rebuilt for Python 3.11
|
||||
|
||||
* Thu Jun 16 2022 Adam Williamson <awilliam@redhat.com> - 4.3.5-4
|
||||
- Don't try and run isohybrid when using xorriso
|
||||
|
||||
* Wed Jun 15 2022 Python Maint <python-maint@redhat.com> - 4.3.5-3
|
||||
- Rebuilt for Python 3.11
|
||||
|
||||
* Wed Jun 15 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-2
|
||||
- Backport patch for building DVDs with xorriso command again
|
||||
|
||||
* Wed Jun 15 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-1
|
||||
- Fix module defaults and obsoletes validation (mkulik)
|
||||
- Update the cts_keytab field in order to get the hostname of the server
|
||||
(ounsal)
|
||||
- Add skip_branding to ostree_installer. (lzhuang)
|
||||
- kojiwrapper: Ignore warnings before task id (lsedlar)
|
||||
- Restrict jsonschema version (lsedlar)
|
||||
- Revert "Do not clone the same repository multiple times, re-use already
|
||||
cloned repository" (hlin)
|
||||
- Involve bandit (hlin)
|
||||
|
||||
* Wed Jun 08 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.4-2
|
||||
- Backport patch for building DVDs with xorriso command
|
||||
|
||||
* Wed May 4 2022 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.16-1
|
||||
- ALBS-334: Make the ability of Pungi to give module_defaults from remote sources
|
||||
|
||||
* Mon Apr 04 2022 Ondřej Nosek <onosek@redhat.com> - 4.3.4-1
|
||||
- kojiwrapper: Add retries to login call (lsedlar)
|
||||
- Variants file in config can contain path (onosek)
|
||||
- nomacboot option for livemedia koji tasks (cobrien)
|
||||
- doc: improve osbs_registries explanation (kdreyer)
|
||||
- osbs: only handle archives of type "image" (kdreyer)
|
||||
- Update the default greedy_method value in doc (ounsal)
|
||||
- Fix the wrong working directory for the progress_notification script (ounsal)
|
||||
- Filter out environment groups unmatch given arch (hlin)
|
||||
- profiler: Respect provided output stream (lsedlar)
|
||||
- modules: Correct a typo in loading obsoletes (ppisar)
|
||||
- Do not clone the same repository multiple times, re-use already cloned
|
||||
repository (ounsal)
|
||||
|
||||
* Fri Feb 04 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.3-3
|
||||
- Backport typo fix
|
||||
|
||||
* Fri Jan 21 2022 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.3-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild
|
||||
|
||||
* Fri Jan 14 2022 Haibo Lin <hlin@redhat.com> - 4.3.3-1
|
||||
- hybrid: Explicitly pull in debugsource packages (lsedlar)
|
||||
- Add module obsoletes feature (fvalder)
|
||||
- buildinstall: Add ability to install extra packages in runroot (ounsal)
|
||||
- Ignore osbs/osbuild config when reusing iso images (hlin)
|
||||
- compose: Make sure temporary dirs are world readable (lsedlar)
|
||||
- Pass compose parameter for debugging git issue (hlin)
|
||||
- Generate images.json for extra_isos phase (hlin)
|
||||
- Fix tests for python 2.6 (hlin)
|
||||
|
||||
* Thu Dec 30 2021 stepan_oksanichenio <soksanichenko@cloudlinux.com> - 4.2.15-1
|
||||
- ALBS-97: The scripts `gather_modules` and `generate_packages_json` support LZMA compression
|
||||
|
@ -168,21 +383,116 @@ python3 -m pytest
|
|||
* Mon Dec 20 2021 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.14-1
|
||||
- ALBS-66: The generator of packages JSON can process the same packages with different versions
|
||||
|
||||
* Mon Nov 15 2021 Haibo Lin <hlin@redhat.com> - 4.3.2-2
|
||||
- Backport patch for generating images.json
|
||||
|
||||
* Thu Nov 11 2021 Haibo Lin <hlin@redhat.com> - 4.3.2-1
|
||||
- gather: Load JSON mapping relative to config dir (lsedlar)
|
||||
- gather: Stop requiring all variants/arches in JSON (lsedlar)
|
||||
- doc: make dnf "backend" settings easier to discover (kdreyer)
|
||||
- Remove with_jigdo argument (lsedlar)
|
||||
- Check dependencies after config validation (lsedlar)
|
||||
- default "with_jigdo" to False (kdreyer)
|
||||
- Stop trying to validate non-existent metadata (lsedlar)
|
||||
- test images for metadata deserialization error (fdipretre)
|
||||
- repoclosure: Use --forcearch for dnf repoclosure (lsedlar)
|
||||
- extra_isos: Allow reusing old images (lsedlar)
|
||||
- createiso: Allow reusing old images (lsedlar)
|
||||
- Remove default runroot channel (lsedlar)
|
||||
|
||||
* Tue Oct 26 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.1-1
|
||||
- Correct irc network name & add matrix room (dan.cermak)
|
||||
- Add missing mock to osbs tests (lsedlar)
|
||||
- osbs: Reuse images from old compose (hlin)
|
||||
- image_build: Allow reusing old image_build results (hlin)
|
||||
- Allow ISO-Level configuration within the config file (ounsal)
|
||||
- Work around ODCS creating COMPOSE_ID later (lsedlar)
|
||||
- When `cts_url` is configured, use CTS `/repo` API for buildContainer
|
||||
yum_repourls. (jkaluza)
|
||||
- Add COMPOSE_ID into the pungi log file (ounsal)
|
||||
- buildinstall: Add easy way to check if previous result was reused (lsedlar)
|
||||
|
||||
* Mon Oct 04 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.0-2
|
||||
- Backport patch to avoid crash on missing COMPOSE_ID
|
||||
|
||||
* Wed Sep 15 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.0-1
|
||||
- Only build CTS url when configured (lsedlar)
|
||||
- Require requests_kerberos only when needed (lsedlar)
|
||||
- Allow specifying $COMPOSE_ID in the `repo` value for osbs phase. (jkaluza)
|
||||
- Make getting old compose config reusable (lsedlar)
|
||||
- paths: Allow customizing log file extension (lsedlar)
|
||||
- Add authentication for updating the compose URL in CTS. (ounsal)
|
||||
- Fix type detection for osbuild images (lsedlar)
|
||||
- Enable pungi to send compose_url patches to CTS (ounsal)
|
||||
- Use xorriso instead of isoinfo when createiso_use_xorrisofs is enabled
|
||||
(ounsal)
|
||||
- Fix tests for createrepo (drumian)
|
||||
- Formatted files according to flake8 and black feedback (drumian)
|
||||
- Handle the pungi failures to ensure creation of log files (ounsal)
|
||||
- Add createrepo_enable_cache to configuration doc (hlin)
|
||||
- Fix formatting (hlin)
|
||||
- Install missing deps in ci image (hlin)
|
||||
- Use pytest directly incl. support for posargs, e.g.: tox -- -s -vvv
|
||||
tests/path/to/a/single/test_something.py (fvalder)
|
||||
- Supersede ModuleStream loading with ModuleIndex (fvalder)
|
||||
- Better error message than 'KeyError' in pungi (drumian)
|
||||
- Adding multithreading support for pungi/phases/image_checksum.py (jkunstle)
|
||||
- doc: more additional_packages documentation (kdreyer)
|
||||
- doc: fix typo in additional_packages description (kdreyer)
|
||||
- doc: improve signed packages retry docs (kdreyer)
|
||||
- Better error message than 'KeyError' in pungi (drumian)
|
||||
- doc: explain buildContainer API (kdreyer)
|
||||
|
||||
* Wed Aug 04 2021 Haibo Lin <hlin@redhat.com> - 4.2.10-1
|
||||
- Show and log command when using the run_blocking_cmd() method (fdipretre)
|
||||
- Use cachedir when createrepo (hlin)
|
||||
- gather: Add all srpms to variant lookaside repo (lsedlar)
|
||||
- Add task URL to watch task log (hlin)
|
||||
- Log warning when module defined in variants.xml not found (hlin)
|
||||
- pkgset: Compare future events correctly (lsedlar)
|
||||
- util: Strip file:// from local urls (lsedlar)
|
||||
- Clean up temporary yumroot dir (hlin)
|
||||
|
||||
* Fri Jul 23 2021 Fedora Release Engineering <releng@fedoraproject.org> - 4.2.9-3
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_35_Mass_Rebuild
|
||||
|
||||
* Fri Jun 18 2021 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.13-1
|
||||
- LNX-326: Add the ability to include any package by mask in packages.json to the generator
|
||||
- LNX-318: Modify build scripts for building CloudLinux OS 8.4
|
||||
|
||||
* Fri Jun 04 2021 Python Maint <python-maint@redhat.com> - 4.2.9-2
|
||||
- Rebuilt for Python 3.10
|
||||
|
||||
* Tue May 25 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.12-1
|
||||
- LNX-108: Add multiarch support to pungi
|
||||
|
||||
* Thu Apr 29 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.11-1
|
||||
- LNX-311: Add ability to productmd set a main variant while dumping TreeInfo
|
||||
|
||||
* Thu Apr 29 2021 onosek - 4.2.9-1
|
||||
- New upstream release 4.2.9
|
||||
- Fix can't link XDEV using repos as pkgset_sources (romain.forlot)
|
||||
- Updated the deprecated ks argument name (to the current inst.ks) (lveyde)
|
||||
- gather: Adjust reusing with lookaside (hlin)
|
||||
- hybrid: Optimize getting lookaside packages (lsedlar)
|
||||
- gather: Copy old logs when reusing gather result (hlin)
|
||||
- Cancel koji tasks when pungi terminated (hlin)
|
||||
- Add Dockerfile for building testing image (hlin)
|
||||
- image_container: Fix incorrect arch processing (lsedlar)
|
||||
- runroot: Adjust permissions always (hlin)
|
||||
- Format code (hlin)
|
||||
- pkgset: Fix meaning of retries (lsedlar)
|
||||
- pkgset: Store module tag only if module is used (lsedlar)
|
||||
- Store extended traceback for gather errors (lsedlar)
|
||||
|
||||
* Wed Feb 24 2021 Danylo Kuropiatnyk <dkuropiatnyk@cloudlinux.com>, Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.10-1
|
||||
- LU-2186 .treeinfo file in AlmaLinux public kickstart repo should contain AppStream variant
|
||||
- LU-2195 Change path to sources and iso when generating repositories
|
||||
- LU-2202: Start unittests during installation or build of pungi
|
||||
|
||||
* Fri Feb 12 2021 Ondrej Nosek <onosek@redhat.com> - 4.2.8-1
|
||||
- New upstream version
|
||||
|
||||
* Thu Feb 11 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.9-1
|
||||
- LNX-133: Create a server for building nightly builds of AlmaLinux
|
||||
- LU-2133: Prepare CI for iso builds of CLOSS 8
|
||||
|
@ -195,6 +505,18 @@ python3 -m pytest
|
|||
- LNX-102: Add tool that collects information about modules
|
||||
- LNX-103 Update .spec file for AlmaLinux
|
||||
|
||||
* Wed Jan 27 2021 Fedora Release Engineering <releng@fedoraproject.org> - 4.2.7-3
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
|
||||
|
||||
* Fri Jan 22 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.7-2
|
||||
- Backport patch for preserving default attribute in comps
|
||||
|
||||
* Tue Dec 8 09:01:52 CET 2020 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.7-1
|
||||
- New upstream version
|
||||
|
||||
* Thu Nov 05 2020 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.6-1
|
||||
- New upstream release
|
||||
|
||||
* Fri Sep 25 2020 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.5-1
|
||||
- New upstream release
|
||||
|
||||
|
|
|
@ -131,8 +131,8 @@ def getArchList(thisarch=None): # pragma: no cover
|
|||
|
||||
|
||||
def _try_read_cpuinfo(): # pragma: no cover
|
||||
""" Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not
|
||||
mounted). """
|
||||
"""Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not
|
||||
mounted)."""
|
||||
try:
|
||||
with open("/proc/cpuinfo", "r") as f:
|
||||
return f.readlines()
|
||||
|
@ -141,8 +141,8 @@ def _try_read_cpuinfo(): # pragma: no cover
|
|||
|
||||
|
||||
def _parse_auxv(): # pragma: no cover
|
||||
""" Read /proc/self/auxv and parse it into global dict for easier access
|
||||
later on, very similar to what rpm does. """
|
||||
"""Read /proc/self/auxv and parse it into global dict for easier access
|
||||
later on, very similar to what rpm does."""
|
||||
# In case we can't open and read /proc/self/auxv, just return
|
||||
try:
|
||||
with open("/proc/self/auxv", "rb") as f:
|
||||
|
|
224
pungi/checks.py
224
pungi/checks.py
|
@ -53,7 +53,7 @@ from . import util
|
|||
|
||||
|
||||
def is_jigdo_needed(conf):
|
||||
return conf.get("create_jigdo", True)
|
||||
return conf.get("create_jigdo")
|
||||
|
||||
|
||||
def is_isohybrid_needed(conf):
|
||||
|
@ -75,8 +75,7 @@ def is_isohybrid_needed(conf):
|
|||
|
||||
|
||||
def is_genisoimage_needed(conf):
|
||||
"""This is only needed locally for createiso without runroot.
|
||||
"""
|
||||
"""This is only needed locally for createiso without runroot."""
|
||||
runroot_tag = conf.get("runroot_tag", "")
|
||||
if runroot_tag or conf.get("createiso_use_xorrisofs"):
|
||||
return False
|
||||
|
@ -94,7 +93,7 @@ def is_xorrisofs_needed(conf):
|
|||
|
||||
|
||||
def is_createrepo_c_needed(conf):
|
||||
return conf.get("createrepo_c", True)
|
||||
return conf.get("createrepo_c")
|
||||
|
||||
|
||||
# The first element in the tuple is package name expected to have the
|
||||
|
@ -228,9 +227,18 @@ def validate(config, offline=False, schema=None):
|
|||
DefaultValidator = _extend_with_default_and_alias(
|
||||
jsonschema.Draft4Validator, offline=offline
|
||||
)
|
||||
|
||||
if hasattr(jsonschema.Draft4Validator, "TYPE_CHECKER"):
|
||||
# jsonschema >= 3.0 has new interface for checking types
|
||||
validator = DefaultValidator(schema)
|
||||
else:
|
||||
validator = DefaultValidator(
|
||||
schema,
|
||||
{"array": (tuple, list), "regex": six.string_types, "url": six.string_types},
|
||||
{
|
||||
"array": (tuple, list),
|
||||
"regex": six.string_types,
|
||||
"url": six.string_types,
|
||||
},
|
||||
)
|
||||
errors = []
|
||||
warnings = []
|
||||
|
@ -379,6 +387,7 @@ def _extend_with_default_and_alias(validator_class, offline=False):
|
|||
instance[property]["branch"] = resolver(
|
||||
instance[property]["repo"],
|
||||
instance[property].get("branch") or "HEAD",
|
||||
instance[property].get("options"),
|
||||
)
|
||||
|
||||
for error in _hook_errors(properties, instance, schema):
|
||||
|
@ -446,6 +455,19 @@ def _extend_with_default_and_alias(validator_class, offline=False):
|
|||
context=all_errors,
|
||||
)
|
||||
|
||||
kwargs = {}
|
||||
if hasattr(validator_class, "TYPE_CHECKER"):
|
||||
# jsonschema >= 3
|
||||
def is_array(checker, instance):
|
||||
return isinstance(instance, (tuple, list))
|
||||
|
||||
def is_string_type(checker, instance):
|
||||
return isinstance(instance, six.string_types)
|
||||
|
||||
kwargs["type_checker"] = validator_class.TYPE_CHECKER.redefine_many(
|
||||
{"array": is_array, "regex": is_string_type, "url": is_string_type}
|
||||
)
|
||||
|
||||
return jsonschema.validators.extend(
|
||||
validator_class,
|
||||
{
|
||||
|
@ -456,6 +478,7 @@ def _extend_with_default_and_alias(validator_class, offline=False):
|
|||
"additionalProperties": _validate_additional_properties,
|
||||
"anyOf": _validate_any_of,
|
||||
},
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
|
@ -498,6 +521,13 @@ def make_schema():
|
|||
"file": {"type": "string"},
|
||||
"dir": {"type": "string"},
|
||||
"command": {"type": "string"},
|
||||
"options": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"credential_helper": {"type": "string"},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
|
@ -579,6 +609,7 @@ def make_schema():
|
|||
"release_discinfo_description": {"type": "string"},
|
||||
"treeinfo_version": {"type": "string"},
|
||||
"compose_type": {"type": "string", "enum": COMPOSE_TYPES},
|
||||
"label": {"type": "string"},
|
||||
"base_product_name": {"type": "string"},
|
||||
"base_product_short": {"type": "string"},
|
||||
"base_product_version": {"type": "string"},
|
||||
|
@ -610,7 +641,7 @@ def make_schema():
|
|||
"runroot_ssh_init_template": {"type": "string"},
|
||||
"runroot_ssh_install_packages_template": {"type": "string"},
|
||||
"runroot_ssh_run_template": {"type": "string"},
|
||||
"create_jigdo": {"type": "boolean", "default": True},
|
||||
"create_jigdo": {"type": "boolean", "default": False},
|
||||
"check_deps": {"type": "boolean", "default": True},
|
||||
"require_all_comps_packages": {"type": "boolean", "default": False},
|
||||
"bootable": {
|
||||
|
@ -654,13 +685,20 @@ def make_schema():
|
|||
"gather_profiler": {"type": "boolean", "default": False},
|
||||
"gather_allow_reuse": {"type": "boolean", "default": False},
|
||||
"pkgset_allow_reuse": {"type": "boolean", "default": True},
|
||||
"pkgset_source": {"type": "string", "enum": ["koji", "repos"]},
|
||||
"createiso_allow_reuse": {"type": "boolean", "default": True},
|
||||
"extraiso_allow_reuse": {"type": "boolean", "default": True},
|
||||
"pkgset_source": {"type": "string", "enum": [
|
||||
"koji",
|
||||
"repos",
|
||||
"kojimock",
|
||||
]},
|
||||
"createrepo_c": {"type": "boolean", "default": True},
|
||||
"createrepo_checksum": {
|
||||
"type": "string",
|
||||
"default": "sha256",
|
||||
"enum": ["sha1", "sha256", "sha512"],
|
||||
},
|
||||
"createrepo_enable_cache": {"type": "boolean", "default": True},
|
||||
"createrepo_use_xz": {"type": "boolean", "default": False},
|
||||
"createrepo_num_threads": {"type": "number", "default": get_num_cpus()},
|
||||
"createrepo_num_workers": {"type": "number", "default": 3},
|
||||
|
@ -722,6 +760,8 @@ def make_schema():
|
|||
"minItems": 1,
|
||||
"default": [None],
|
||||
},
|
||||
"signed_packages_retries": {"type": "number", "default": 0},
|
||||
"signed_packages_wait": {"type": "number", "default": 30},
|
||||
"variants_file": {"$ref": "#/definitions/str_or_scm_dict"},
|
||||
"comps_file": {"$ref": "#/definitions/str_or_scm_dict"},
|
||||
"comps_filter_environments": {"type": "boolean", "default": True},
|
||||
|
@ -732,6 +772,7 @@ def make_schema():
|
|||
"patternProperties": {".+": {"$ref": "#/definitions/strings"}},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"module_obsoletes_dir": {"$ref": "#/definitions/str_or_scm_dict"},
|
||||
"create_optional_isos": {"type": "boolean", "default": False},
|
||||
"symlink_isos_to": {"type": "string"},
|
||||
"dogpile_cache_backend": {"type": "string"},
|
||||
|
@ -744,6 +785,12 @@ def make_schema():
|
|||
),
|
||||
"createiso_break_hardlinks": {"type": "boolean", "default": False},
|
||||
"createiso_use_xorrisofs": {"type": "boolean", "default": False},
|
||||
"iso_level": {
|
||||
"anyOf": [
|
||||
{"type": "number", "enum": [1, 2, 3, 4]},
|
||||
_variant_arch_mapping({"type": "number", "enum": [1, 2, 3, 4]}),
|
||||
],
|
||||
},
|
||||
"iso_hfs_ppc64le_compatible": {"type": "boolean", "default": True},
|
||||
"multilib": _variant_arch_mapping(
|
||||
{"$ref": "#/definitions/list_of_strings"}
|
||||
|
@ -785,6 +832,10 @@ def make_schema():
|
|||
"buildinstall_kickstart": {"$ref": "#/definitions/str_or_scm_dict"},
|
||||
"buildinstall_use_guestmount": {"type": "boolean", "default": True},
|
||||
"buildinstall_skip": _variant_arch_mapping({"type": "boolean"}),
|
||||
"buildinstall_packages": {
|
||||
"$ref": "#/definitions/package_mapping",
|
||||
"default": [],
|
||||
},
|
||||
"global_ksurl": {"type": "url"},
|
||||
"global_version": {"type": "string"},
|
||||
"global_target": {"type": "string"},
|
||||
|
@ -794,8 +845,11 @@ def make_schema():
|
|||
"pdc_insecure": {"deprecated": "Koji is queried instead"},
|
||||
"cts_url": {"type": "string"},
|
||||
"cts_keytab": {"type": "string"},
|
||||
"cts_oidc_token_url": {"type": "url"},
|
||||
"cts_oidc_client_id": {"type": "string"},
|
||||
"koji_profile": {"type": "string"},
|
||||
"koji_event": {"type": "number"},
|
||||
"koji_cache": {"type": "string"},
|
||||
"pkgset_koji_tag": {"$ref": "#/definitions/strings"},
|
||||
"pkgset_koji_builds": {"$ref": "#/definitions/strings"},
|
||||
"pkgset_koji_scratch_tasks": {"$ref": "#/definitions/strings"},
|
||||
|
@ -813,6 +867,10 @@ def make_schema():
|
|||
"type": "boolean",
|
||||
"default": True,
|
||||
},
|
||||
"pkgset_inherit_exclusive_arch_to_noarch": {
|
||||
"type": "boolean",
|
||||
"default": True,
|
||||
},
|
||||
"pkgset_scratch_modules": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
|
@ -976,6 +1034,7 @@ def make_schema():
|
|||
"arches": {"$ref": "#/definitions/list_of_strings"},
|
||||
"failable": {"$ref": "#/definitions/list_of_strings"},
|
||||
"release": {"$ref": "#/definitions/optional_string"},
|
||||
"nomacboot": {"type": "boolean"},
|
||||
},
|
||||
"required": ["name", "kickstart"],
|
||||
"additionalProperties": False,
|
||||
|
@ -1009,10 +1068,14 @@ def make_schema():
|
|||
},
|
||||
"update_summary": {"type": "boolean"},
|
||||
"force_new_commit": {"type": "boolean"},
|
||||
"unified_core": {"type": "boolean"},
|
||||
"version": {"type": "string"},
|
||||
"config_branch": {"type": "string"},
|
||||
"tag_ref": {"type": "boolean"},
|
||||
"ostree_ref": {"type": "string"},
|
||||
"runroot_packages": {
|
||||
"$ref": "#/definitions/list_of_strings",
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"treefile",
|
||||
|
@ -1043,6 +1106,7 @@ def make_schema():
|
|||
"failable": {"$ref": "#/definitions/list_of_strings"},
|
||||
"update_summary": {"type": "boolean"},
|
||||
"force_new_commit": {"type": "boolean"},
|
||||
"unified_core": {"type": "boolean"},
|
||||
"version": {"type": "string"},
|
||||
"config_branch": {"type": "string"},
|
||||
"tag_ref": {"type": "boolean"},
|
||||
|
@ -1072,6 +1136,7 @@ def make_schema():
|
|||
"template_repo": {"type": "string"},
|
||||
"template_branch": {"type": "string"},
|
||||
"extra_runroot_pkgs": {"$ref": "#/definitions/list_of_strings"},
|
||||
"skip_branding": {"type": "boolean"},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
@ -1082,6 +1147,7 @@ def make_schema():
|
|||
"live_images": _variant_arch_mapping(
|
||||
_one_or_list({"$ref": "#/definitions/live_image_config"})
|
||||
),
|
||||
"image_build_allow_reuse": {"type": "boolean", "default": False},
|
||||
"image_build": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
|
@ -1149,12 +1215,130 @@ def make_schema():
|
|||
"version": {"type": "string"},
|
||||
"distro": {"type": "string"},
|
||||
"target": {"type": "string"},
|
||||
"image_types": {"$ref": "#/definitions/strings"},
|
||||
# Only a single image_type can be specified
|
||||
# https://github.com/osbuild/koji-osbuild/commit/c7252650814f82281ee57b598cb2ad970b580451
|
||||
# https://github.com/osbuild/koji-osbuild/commit/f21a2de39b145eb94f3d49cb4d8775a33ba56752
|
||||
"image_types": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Deprecated variant",
|
||||
"minItems": 1,
|
||||
"maxItems": 1,
|
||||
},
|
||||
{"type": "string"},
|
||||
]
|
||||
},
|
||||
"arches": {"$ref": "#/definitions/list_of_strings"},
|
||||
"release": {"type": "string"},
|
||||
"repo": {"$ref": "#/definitions/list_of_strings"},
|
||||
"repo": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["baseurl"],
|
||||
"properties": {
|
||||
"baseurl": {"type": "string"},
|
||||
"package_sets": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{"type": "string"},
|
||||
]
|
||||
},
|
||||
},
|
||||
"failable": {"$ref": "#/definitions/list_of_strings"},
|
||||
"subvariant": {"type": "string"},
|
||||
"ostree_url": {"type": "string"},
|
||||
"ostree_ref": {"type": "string"},
|
||||
"ostree_parent": {"type": "string"},
|
||||
"upload_options": {
|
||||
# this should be really 'oneOf', but the minimal
|
||||
# required properties in AWSEC2 and GCP options
|
||||
# overlap.
|
||||
"anyOf": [
|
||||
# AWSEC2UploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": [
|
||||
"region",
|
||||
"share_with_accounts",
|
||||
],
|
||||
"properties": {
|
||||
"region": {
|
||||
"type": "string",
|
||||
},
|
||||
"snapshot_name": {
|
||||
"type": "string",
|
||||
},
|
||||
"share_with_accounts": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
# AWSS3UploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["region"],
|
||||
"properties": {
|
||||
"region": {"type": "string"}
|
||||
},
|
||||
},
|
||||
# AzureUploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": [
|
||||
"tenant_id",
|
||||
"subscription_id",
|
||||
"resource_group",
|
||||
],
|
||||
"properties": {
|
||||
"tenant_id": {"type": "string"},
|
||||
"subscription_id": {"type": "string"},
|
||||
"resource_group": {"type": "string"},
|
||||
"location": {"type": "string"},
|
||||
"image_name": {
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
# GCPUploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["region"],
|
||||
"properties": {
|
||||
"region": {"type": "string"},
|
||||
"bucket": {"type": "string"},
|
||||
"image_name": {
|
||||
"type": "string",
|
||||
},
|
||||
"share_with_accounts": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
# ContainerUploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"tag": {"type": "string"},
|
||||
},
|
||||
},
|
||||
]
|
||||
},
|
||||
},
|
||||
"required": ["name", "distro", "image_types"],
|
||||
"additionalProperties": False,
|
||||
|
@ -1203,6 +1387,7 @@ def make_schema():
|
|||
"anyOf": [{"type": "string"}, {"type": "number"}],
|
||||
"default": 10 * 1024 * 1024,
|
||||
},
|
||||
"osbs_allow_reuse": {"type": "boolean", "default": False},
|
||||
"osbs": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
|
@ -1221,6 +1406,26 @@ def make_schema():
|
|||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"image_container": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
".+": _one_or_list(
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"url": {"type": "url"},
|
||||
"target": {"type": "string"},
|
||||
"priority": {"type": "number"},
|
||||
"failable": {"type": "boolean"},
|
||||
"git_branch": {"type": "string"},
|
||||
"image_spec": {"type": "object"},
|
||||
},
|
||||
"required": ["url", "target", "git_branch", "image_spec"],
|
||||
}
|
||||
),
|
||||
},
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"extra_files": _variant_arch_mapping(
|
||||
{
|
||||
"type": "array",
|
||||
|
@ -1325,6 +1530,7 @@ CONFIG_DEPS = {
|
|||
"requires": ((lambda x: x, ["base_product_name", "base_product_short"]),),
|
||||
"conflicts": ((lambda x: not x, ["base_product_name", "base_product_short"]),),
|
||||
},
|
||||
"cts_url": {"requires": ((lambda x: x, ["translate_paths"]),)},
|
||||
"product_id": {"conflicts": [(lambda x: not x, ["product_id_allow_missing"])]},
|
||||
"pkgset_scratch_modules": {"requires": ((lambda x: x, ["mbs_api_url"]),)},
|
||||
"pkgset_source": {
|
||||
|
|
224
pungi/compose.py
224
pungi/compose.py
|
@ -17,6 +17,7 @@
|
|||
__all__ = ("Compose",)
|
||||
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
|
@ -24,8 +25,12 @@ import time
|
|||
import tempfile
|
||||
import shutil
|
||||
import json
|
||||
import socket
|
||||
|
||||
import kobo.log
|
||||
import kobo.tback
|
||||
import requests
|
||||
from requests.exceptions import RequestException
|
||||
from productmd.composeinfo import ComposeInfo
|
||||
from productmd.images import Images
|
||||
from dogpile.cache import make_region
|
||||
|
@ -34,12 +39,15 @@ from dogpile.cache import make_region
|
|||
from pungi.graph import SimpleAcyclicOrientedGraph
|
||||
from pungi.wrappers.variants import VariantsXmlParser
|
||||
from pungi.paths import Paths
|
||||
from pungi.wrappers.kojiwrapper import KojiDownloadProxy
|
||||
from pungi.wrappers.scm import get_file_from_scm
|
||||
from pungi.util import (
|
||||
makedirs,
|
||||
get_arch_variant_data,
|
||||
get_format_substs,
|
||||
get_variant_data,
|
||||
retry,
|
||||
translate_path_raw,
|
||||
)
|
||||
from pungi.metadata import compose_to_composeinfo
|
||||
|
||||
|
@ -51,6 +59,101 @@ except ImportError:
|
|||
SUPPORTED_MILESTONES = ["RC", "Update", "SecurityFix"]
|
||||
|
||||
|
||||
def is_status_fatal(status_code):
|
||||
"""Check if status code returned from CTS reports an error that is unlikely
|
||||
to be fixed by retrying. Generally client errors (4XX) are fatal, with the
|
||||
exception of 401 Unauthorized which could be caused by transient network
|
||||
issue between compose host and KDC.
|
||||
"""
|
||||
if status_code == 401:
|
||||
return False
|
||||
return status_code >= 400 and status_code < 500
|
||||
|
||||
|
||||
@retry(wait_on=RequestException)
|
||||
def retry_request(method, url, data=None, json_data=None, auth=None):
|
||||
"""
|
||||
:param str method: Reqest method.
|
||||
:param str url: Target URL.
|
||||
:param dict data: form-urlencoded data to send in the body of the request.
|
||||
:param dict json_data: json data to send in the body of the request.
|
||||
"""
|
||||
request_method = getattr(requests, method)
|
||||
rv = request_method(url, data=data, json=json_data, auth=auth)
|
||||
if is_status_fatal(rv.status_code):
|
||||
try:
|
||||
error = rv.json()
|
||||
except ValueError:
|
||||
error = rv.text
|
||||
raise RuntimeError("%s responded with %d: %s" % (url, rv.status_code, error))
|
||||
rv.raise_for_status()
|
||||
return rv
|
||||
|
||||
|
||||
class BearerAuth(requests.auth.AuthBase):
|
||||
def __init__(self, token):
|
||||
self.token = token
|
||||
|
||||
def __call__(self, r):
|
||||
r.headers["authorization"] = "Bearer " + self.token
|
||||
return r
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def cts_auth(pungi_conf):
|
||||
"""
|
||||
:param dict pungi_conf: dict obj of pungi.json config.
|
||||
"""
|
||||
auth = None
|
||||
token = None
|
||||
cts_keytab = pungi_conf.get("cts_keytab")
|
||||
cts_oidc_token_url = os.environ.get("CTS_OIDC_TOKEN_URL", "") or pungi_conf.get(
|
||||
"cts_oidc_token_url"
|
||||
)
|
||||
|
||||
try:
|
||||
if cts_keytab:
|
||||
# requests-kerberos cannot accept custom keytab, we need to use
|
||||
# environment variable for this. But we need to change environment
|
||||
# only temporarily just for this single requests.post.
|
||||
# So at first backup the current environment and revert to it
|
||||
# after the requests call.
|
||||
from requests_kerberos import HTTPKerberosAuth
|
||||
|
||||
auth = HTTPKerberosAuth()
|
||||
environ_copy = dict(os.environ)
|
||||
if "$HOSTNAME" in cts_keytab:
|
||||
cts_keytab = cts_keytab.replace("$HOSTNAME", socket.gethostname())
|
||||
os.environ["KRB5_CLIENT_KTNAME"] = cts_keytab
|
||||
os.environ["KRB5CCNAME"] = "DIR:%s" % tempfile.mkdtemp()
|
||||
elif cts_oidc_token_url:
|
||||
cts_oidc_client_id = os.environ.get(
|
||||
"CTS_OIDC_CLIENT_ID", ""
|
||||
) or pungi_conf.get("cts_oidc_client_id", "")
|
||||
token = retry_request(
|
||||
"post",
|
||||
cts_oidc_token_url,
|
||||
data={
|
||||
"grant_type": "client_credentials",
|
||||
"client_id": cts_oidc_client_id,
|
||||
"client_secret": os.environ.get("CTS_OIDC_CLIENT_SECRET", ""),
|
||||
},
|
||||
).json()["access_token"]
|
||||
auth = BearerAuth(token)
|
||||
del token
|
||||
|
||||
yield auth
|
||||
except Exception as e:
|
||||
# Avoid leaking client secret in trackback
|
||||
e.show_locals = False
|
||||
raise e
|
||||
finally:
|
||||
if cts_keytab:
|
||||
shutil.rmtree(os.environ["KRB5CCNAME"].split(":", 1)[1])
|
||||
os.environ.clear()
|
||||
os.environ.update(environ_copy)
|
||||
|
||||
|
||||
def get_compose_info(
|
||||
conf,
|
||||
compose_type="production",
|
||||
|
@ -80,47 +183,25 @@ def get_compose_info(
|
|||
ci.compose.type = compose_type
|
||||
ci.compose.date = compose_date or time.strftime("%Y%m%d", time.localtime())
|
||||
ci.compose.respin = compose_respin or 0
|
||||
|
||||
cts_url = conf.get("cts_url", None)
|
||||
if cts_url:
|
||||
# Import requests and requests-kerberos here so it is not needed
|
||||
# if running without Compose Tracking Service.
|
||||
import requests
|
||||
from requests_kerberos import HTTPKerberosAuth
|
||||
|
||||
# Requests-kerberos cannot accept custom keytab, we need to use
|
||||
# environment variable for this. But we need to change environment
|
||||
# only temporarily just for this single requests.post.
|
||||
# So at first backup the current environment and revert to it
|
||||
# after the requests.post call.
|
||||
cts_keytab = conf.get("cts_keytab", None)
|
||||
if cts_keytab:
|
||||
environ_copy = dict(os.environ)
|
||||
os.environ["KRB5_CLIENT_KTNAME"] = cts_keytab
|
||||
|
||||
try:
|
||||
# Create compose in CTS and get the reserved compose ID.
|
||||
ci.compose.id = ci.create_compose_id()
|
||||
|
||||
cts_url = conf.get("cts_url")
|
||||
if cts_url:
|
||||
# Create compose in CTS and get the reserved compose ID.
|
||||
url = os.path.join(cts_url, "api/1/composes/")
|
||||
data = {
|
||||
"compose_info": json.loads(ci.dumps()),
|
||||
"parent_compose_ids": parent_compose_ids,
|
||||
"respin_of": respin_of,
|
||||
}
|
||||
rv = requests.post(url, json=data, auth=HTTPKerberosAuth())
|
||||
rv.raise_for_status()
|
||||
finally:
|
||||
if cts_keytab:
|
||||
os.environ.clear()
|
||||
os.environ.update(environ_copy)
|
||||
with cts_auth(conf) as authentication:
|
||||
rv = retry_request("post", url, json_data=data, auth=authentication)
|
||||
|
||||
# Update local ComposeInfo with received ComposeInfo.
|
||||
cts_ci = ComposeInfo()
|
||||
cts_ci.loads(rv.text)
|
||||
ci.compose.respin = cts_ci.compose.respin
|
||||
ci.compose.id = cts_ci.compose.id
|
||||
else:
|
||||
ci.compose.id = ci.create_compose_id()
|
||||
|
||||
return ci
|
||||
|
||||
|
@ -137,6 +218,23 @@ def write_compose_info(compose_dir, ci):
|
|||
ci.dump(os.path.join(work_dir, "composeinfo-base.json"))
|
||||
|
||||
|
||||
def update_compose_url(compose_id, compose_dir, conf):
|
||||
cts_url = conf.get("cts_url", None)
|
||||
if cts_url:
|
||||
url = os.path.join(cts_url, "api/1/composes", compose_id)
|
||||
tp = conf.get("translate_paths", None)
|
||||
compose_url = translate_path_raw(tp, compose_dir)
|
||||
if compose_url == compose_dir:
|
||||
# We do not have a URL, do not attempt the update.
|
||||
return
|
||||
data = {
|
||||
"action": "set_url",
|
||||
"compose_url": compose_url,
|
||||
}
|
||||
with cts_auth(conf) as authentication:
|
||||
return retry_request("patch", url, json_data=data, auth=authentication)
|
||||
|
||||
|
||||
def get_compose_dir(
|
||||
topdir,
|
||||
conf,
|
||||
|
@ -145,11 +243,19 @@ def get_compose_dir(
|
|||
compose_respin=None,
|
||||
compose_label=None,
|
||||
already_exists_callbacks=None,
|
||||
parent_compose_ids=None,
|
||||
respin_of=None,
|
||||
):
|
||||
already_exists_callbacks = already_exists_callbacks or []
|
||||
|
||||
ci = get_compose_info(
|
||||
conf, compose_type, compose_date, compose_respin, compose_label
|
||||
conf,
|
||||
compose_type,
|
||||
compose_date,
|
||||
compose_respin,
|
||||
compose_label,
|
||||
parent_compose_ids,
|
||||
respin_of,
|
||||
)
|
||||
|
||||
cts_url = conf.get("cts_url", None)
|
||||
|
@ -222,6 +328,8 @@ class Compose(kobo.log.LoggingBase):
|
|||
self.koji_event = koji_event or conf.get("koji_event")
|
||||
self.notifier = notifier
|
||||
|
||||
self._old_config = None
|
||||
|
||||
# path definitions
|
||||
self.paths = Paths(self)
|
||||
|
||||
|
@ -284,6 +392,8 @@ class Compose(kobo.log.LoggingBase):
|
|||
self.im.compose.respin = self.compose_respin
|
||||
self.im.metadata_path = self.paths.compose.metadata()
|
||||
|
||||
self.containers_metadata = {}
|
||||
|
||||
# Stores list of deliverables that failed, but did not abort the
|
||||
# compose.
|
||||
# {deliverable: [(Variant.uid, arch, subvariant)]}
|
||||
|
@ -300,9 +410,12 @@ class Compose(kobo.log.LoggingBase):
|
|||
else:
|
||||
self.cache_region = make_region().configure("dogpile.cache.null")
|
||||
|
||||
self.koji_downloader = KojiDownloadProxy.from_config(self.conf, self._logger)
|
||||
|
||||
get_compose_info = staticmethod(get_compose_info)
|
||||
write_compose_info = staticmethod(write_compose_info)
|
||||
get_compose_dir = staticmethod(get_compose_dir)
|
||||
update_compose_url = staticmethod(update_compose_url)
|
||||
|
||||
def __getitem__(self, name):
|
||||
return self.variants[name]
|
||||
|
@ -343,6 +456,10 @@ class Compose(kobo.log.LoggingBase):
|
|||
def has_module_defaults(self):
|
||||
return bool(self.conf.get("module_defaults_dir", False))
|
||||
|
||||
@property
|
||||
def has_module_obsoletes(self):
|
||||
return bool(self.conf.get("module_obsoletes_dir", False))
|
||||
|
||||
@property
|
||||
def config_dir(self):
|
||||
return os.path.dirname(self.conf._open_file or "")
|
||||
|
@ -370,7 +487,7 @@ class Compose(kobo.log.LoggingBase):
|
|||
)
|
||||
else:
|
||||
file_name = os.path.basename(scm_dict)
|
||||
scm_dict = os.path.join(self.config_dir, os.path.basename(scm_dict))
|
||||
scm_dict = os.path.join(self.config_dir, scm_dict)
|
||||
|
||||
self.log_debug("Writing variants file: %s", variants_file)
|
||||
tmp_dir = self.mkdtemp(prefix="variants_file_")
|
||||
|
@ -573,7 +690,52 @@ class Compose(kobo.log.LoggingBase):
|
|||
<compose_topdir>/work/{global,<arch>}/tmp[-<variant>]/
|
||||
"""
|
||||
path = os.path.join(self.paths.work.tmp_dir(arch=arch, variant=variant))
|
||||
return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=path)
|
||||
tmpdir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=path)
|
||||
os.chmod(tmpdir, 0o755)
|
||||
return tmpdir
|
||||
|
||||
def dump_containers_metadata(self):
|
||||
"""Create a file with container metadata if there are any containers."""
|
||||
if not self.containers_metadata:
|
||||
return
|
||||
with open(self.paths.compose.metadata("osbs.json"), "w") as f:
|
||||
json.dump(
|
||||
self.containers_metadata,
|
||||
f,
|
||||
indent=4,
|
||||
sort_keys=True,
|
||||
separators=(",", ": "),
|
||||
)
|
||||
|
||||
def traceback(self, detail=None, show_locals=True):
|
||||
"""Store an extended traceback. This method should only be called when
|
||||
handling an exception.
|
||||
|
||||
:param str detail: Extra information appended to the filename
|
||||
"""
|
||||
basename = "traceback"
|
||||
if detail:
|
||||
basename += "-" + detail
|
||||
tb_path = self.paths.log.log_file("global", basename)
|
||||
self.log_error("Extended traceback in: %s", tb_path)
|
||||
with open(tb_path, "wb") as f:
|
||||
f.write(kobo.tback.Traceback(show_locals=show_locals).get_traceback())
|
||||
|
||||
def load_old_compose_config(self):
|
||||
"""
|
||||
Helper method to load Pungi config dump from old compose.
|
||||
"""
|
||||
if not self._old_config:
|
||||
config_dump_full = self.paths.log.log_file("global", "config-dump")
|
||||
config_dump_full = self.paths.old_compose_path(config_dump_full)
|
||||
if not config_dump_full:
|
||||
return None
|
||||
|
||||
self.log_info("Loading old config file: %s", config_dump_full)
|
||||
with open(config_dump_full, "r") as f:
|
||||
self._old_config = json.load(f)
|
||||
|
||||
return self._old_config
|
||||
|
||||
|
||||
def get_ordered_variant_uids(compose):
|
||||
|
|
|
@ -5,16 +5,20 @@ from __future__ import print_function
|
|||
import os
|
||||
import six
|
||||
from collections import namedtuple
|
||||
from kobo.shortcuts import run
|
||||
from six.moves import shlex_quote
|
||||
|
||||
from .wrappers import iso
|
||||
from .wrappers.jigdo import JigdoWrapper
|
||||
|
||||
from .phases.buildinstall import BOOT_CONFIGS, BOOT_IMAGES
|
||||
|
||||
|
||||
CreateIsoOpts = namedtuple(
|
||||
"CreateIsoOpts",
|
||||
[
|
||||
"buildinstall_method",
|
||||
"boot_iso",
|
||||
"arch",
|
||||
"output_dir",
|
||||
"jigdo_dir",
|
||||
|
@ -25,6 +29,8 @@ CreateIsoOpts = namedtuple(
|
|||
"os_tree",
|
||||
"hfs_compat",
|
||||
"use_xorrisofs",
|
||||
"iso_level",
|
||||
"script_dir",
|
||||
],
|
||||
)
|
||||
CreateIsoOpts.__new__.__defaults__ = (None,) * len(CreateIsoOpts._fields)
|
||||
|
@ -76,6 +82,8 @@ def make_image(f, opts):
|
|||
volid=opts.volid,
|
||||
exclude=["./lost+found"],
|
||||
graft_points=opts.graft_points,
|
||||
use_xorrisofs=opts.use_xorrisofs,
|
||||
iso_level=opts.iso_level,
|
||||
**mkisofs_kwargs
|
||||
)
|
||||
emit(f, cmd)
|
||||
|
@ -97,7 +105,7 @@ def run_isohybrid(f, opts):
|
|||
|
||||
|
||||
def make_manifest(f, opts):
|
||||
emit(f, iso.get_manifest_cmd(opts.iso_name))
|
||||
emit(f, iso.get_manifest_cmd(opts.iso_name, opts.use_xorrisofs))
|
||||
|
||||
|
||||
def make_jigdo(f, opts):
|
||||
|
@ -113,6 +121,77 @@ def make_jigdo(f, opts):
|
|||
emit(f, cmd)
|
||||
|
||||
|
||||
def _get_perms(fs_path):
|
||||
"""Compute proper permissions for a file.
|
||||
|
||||
This mimicks what -rational-rock option of genisoimage does. All read bits
|
||||
are set, so that files and directories are globally readable. If any
|
||||
execute bit is set for a file, set them all. No writes are allowed and
|
||||
special bits are erased too.
|
||||
"""
|
||||
statinfo = os.stat(fs_path)
|
||||
perms = 0o444
|
||||
if statinfo.st_mode & 0o111:
|
||||
perms |= 0o111
|
||||
return perms
|
||||
|
||||
|
||||
def write_xorriso_commands(opts):
|
||||
# Create manifest for the boot.iso listing all contents
|
||||
boot_iso_manifest = "%s.manifest" % os.path.join(
|
||||
opts.script_dir, os.path.basename(opts.boot_iso)
|
||||
)
|
||||
run(
|
||||
iso.get_manifest_cmd(
|
||||
opts.boot_iso, opts.use_xorrisofs, output_file=boot_iso_manifest
|
||||
)
|
||||
)
|
||||
# Find which files may have been updated by pungi. This only includes a few
|
||||
# files from tweaking buildinstall and .discinfo metadata. There's no good
|
||||
# way to detect whether the boot config files actually changed, so we may
|
||||
# be updating files in the ISO with the same data.
|
||||
UPDATEABLE_FILES = set(BOOT_IMAGES + BOOT_CONFIGS + [".discinfo"])
|
||||
updated_files = set()
|
||||
excluded_files = set()
|
||||
with open(boot_iso_manifest) as f:
|
||||
for line in f:
|
||||
path = line.lstrip("/").rstrip("\n")
|
||||
if path in UPDATEABLE_FILES:
|
||||
updated_files.add(path)
|
||||
else:
|
||||
excluded_files.add(path)
|
||||
|
||||
script = os.path.join(opts.script_dir, "xorriso-%s.txt" % id(opts))
|
||||
with open(script, "w") as f:
|
||||
emit(f, "-indev %s" % opts.boot_iso)
|
||||
emit(f, "-outdev %s" % os.path.join(opts.output_dir, opts.iso_name))
|
||||
emit(f, "-boot_image any replay")
|
||||
emit(f, "-volid %s" % opts.volid)
|
||||
# isoinfo -J uses the Joliet tree, and it's used by virt-install
|
||||
emit(f, "-joliet on")
|
||||
# Support long filenames in the Joliet trees. Repodata is particularly
|
||||
# likely to run into this limit.
|
||||
emit(f, "-compliance joliet_long_names")
|
||||
|
||||
with open(opts.graft_points) as gp:
|
||||
for line in gp:
|
||||
iso_path, fs_path = line.strip().split("=", 1)
|
||||
if iso_path in excluded_files:
|
||||
continue
|
||||
cmd = "-update" if iso_path in updated_files else "-map"
|
||||
emit(f, "%s %s %s" % (cmd, fs_path, iso_path))
|
||||
emit(f, "-chmod 0%o %s" % (_get_perms(fs_path), iso_path))
|
||||
|
||||
if opts.arch == "ppc64le":
|
||||
# This is needed for the image to be bootable.
|
||||
emit(f, "-as mkisofs -U --")
|
||||
|
||||
emit(f, "-chown_r 0 /")
|
||||
emit(f, "-chgrp_r 0 /")
|
||||
emit(f, "-end")
|
||||
return script
|
||||
|
||||
|
||||
def write_script(opts, f):
|
||||
if bool(opts.jigdo_dir) != bool(opts.os_tree):
|
||||
raise RuntimeError("jigdo_dir must be used together with os_tree")
|
||||
|
@ -120,8 +199,14 @@ def write_script(opts, f):
|
|||
emit(f, "#!/bin/bash")
|
||||
emit(f, "set -ex")
|
||||
emit(f, "cd %s" % opts.output_dir)
|
||||
|
||||
if opts.use_xorrisofs and opts.buildinstall_method:
|
||||
script = write_xorriso_commands(opts)
|
||||
emit(f, "xorriso -dialog on <%s" % script)
|
||||
else:
|
||||
make_image(f, opts)
|
||||
run_isohybrid(f, opts)
|
||||
|
||||
implant_md5(f, opts)
|
||||
make_manifest(f, opts)
|
||||
if opts.jigdo_dir:
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
class UnsignedPackagesError(RuntimeError):
|
||||
"""Raised when package set fails to find a properly signed copy of an
|
||||
RPM."""
|
||||
|
||||
pass
|
|
@ -35,7 +35,7 @@ from pungi.wrappers.createrepo import CreaterepoWrapper
|
|||
|
||||
|
||||
class ReentrantYumLock(object):
|
||||
""" A lock that can be acquired multiple times by the same process. """
|
||||
"""A lock that can be acquired multiple times by the same process."""
|
||||
|
||||
def __init__(self, lock, log):
|
||||
self.lock = lock
|
||||
|
@ -60,7 +60,7 @@ class ReentrantYumLock(object):
|
|||
|
||||
|
||||
def yumlocked(method):
|
||||
""" A locking decorator. """
|
||||
"""A locking decorator."""
|
||||
|
||||
def wrapper(self, *args, **kwargs):
|
||||
with self.yumlock:
|
||||
|
@ -1118,7 +1118,6 @@ class Pungi(PungiBase):
|
|||
self.logger.info("Finished gathering package objects.")
|
||||
|
||||
def gather(self):
|
||||
|
||||
# get package objects according to the input list
|
||||
self.getPackageObjects()
|
||||
if self.is_sources:
|
||||
|
|
|
@ -15,17 +15,20 @@
|
|||
|
||||
|
||||
from enum import Enum
|
||||
from itertools import count
|
||||
from functools import cmp_to_key
|
||||
from itertools import count, groupby
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from kobo.rpmlib import parse_nvra
|
||||
import rpm
|
||||
|
||||
import pungi.common
|
||||
import pungi.dnf_wrapper
|
||||
import pungi.multilib_dnf
|
||||
import pungi.util
|
||||
from pungi import arch_utils
|
||||
from pungi.linker import Linker
|
||||
from pungi.profiler import Profiler
|
||||
from pungi.util import DEBUG_PATTERNS
|
||||
|
@ -245,13 +248,37 @@ class Gather(GatherBase):
|
|||
# from lookaside. This can be achieved by removing any package that is
|
||||
# also in lookaside from the list.
|
||||
lookaside_pkgs = set()
|
||||
|
||||
if self.opts.lookaside_repos:
|
||||
# We will call `latest()` to get the highest version packages only.
|
||||
# However, that is per name and architecture. If a package switches
|
||||
# from arched to noarch or the other way, it is possible that the
|
||||
# package_list contains different versions in main repos and in
|
||||
# lookaside repos.
|
||||
# We need to manually filter the latest version.
|
||||
def vercmp(x, y):
|
||||
return rpm.labelCompare(x[1], y[1])
|
||||
|
||||
# Annotate the packages with their version.
|
||||
versioned_packages = [
|
||||
(pkg, (str(pkg.epoch) or "0", pkg.version, pkg.release))
|
||||
for pkg in package_list
|
||||
]
|
||||
# Sort the packages newest first.
|
||||
sorted_packages = sorted(
|
||||
versioned_packages, key=cmp_to_key(vercmp), reverse=True
|
||||
)
|
||||
# Group packages by version, take the first group and discard the
|
||||
# version info from the tuple.
|
||||
package_list = list(
|
||||
x[0] for x in next(groupby(sorted_packages, key=lambda x: x[1]))[1]
|
||||
)
|
||||
|
||||
# Now we can decide what is used from lookaside.
|
||||
for pkg in package_list:
|
||||
if pkg.repoid in self.opts.lookaside_repos:
|
||||
lookaside_pkgs.add("{0.name}-{0.evr}".format(pkg))
|
||||
|
||||
if self.opts.greedy_method == "all":
|
||||
return list(package_list)
|
||||
|
||||
all_pkgs = []
|
||||
for pkg in package_list:
|
||||
# Remove packages that are also in lookaside
|
||||
|
@ -263,16 +290,21 @@ class Gather(GatherBase):
|
|||
|
||||
if not debuginfo:
|
||||
native_pkgs = set(
|
||||
self.q_native_binary_packages.filter(pkg=all_pkgs).apply()
|
||||
self.q_native_binary_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_binary_packages.filter(pkg=all_pkgs).apply()
|
||||
self.q_multilib_binary_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
else:
|
||||
native_pkgs = set(self.q_native_debug_packages.filter(pkg=all_pkgs).apply())
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_debug_packages.filter(pkg=all_pkgs).apply()
|
||||
native_pkgs = set(
|
||||
self.q_native_debug_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_debug_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
|
||||
if self.opts.greedy_method == "all":
|
||||
return list(native_pkgs | multilib_pkgs)
|
||||
|
||||
result = set()
|
||||
|
||||
|
@ -392,9 +424,7 @@ class Gather(GatherBase):
|
|||
"""Given an name of a queue (stored as attribute in `self`), exclude
|
||||
all given packages and keep only the latest per package name and arch.
|
||||
"""
|
||||
setattr(
|
||||
self, queue, getattr(self, queue).filter(pkg__neq=exclude).latest().apply()
|
||||
)
|
||||
setattr(self, queue, getattr(self, queue).filter(pkg__neq=exclude).apply())
|
||||
|
||||
@Profiler("Gather._apply_excludes()")
|
||||
def _apply_excludes(self, excludes):
|
||||
|
@ -500,12 +530,21 @@ class Gather(GatherBase):
|
|||
name__glob=pattern[:-2]
|
||||
).apply()
|
||||
else:
|
||||
pkgs = self.q_binary_packages.filter(
|
||||
name__glob=pattern
|
||||
).apply()
|
||||
kwargs = {"name__glob": pattern}
|
||||
if "." in pattern:
|
||||
# The pattern could be name.arch. Check if the
|
||||
# arch is valid, and if yes, make a more
|
||||
# specific query.
|
||||
name, arch = pattern.split(".", 1)
|
||||
if arch in arch_utils.arches:
|
||||
kwargs["name__glob"] = name
|
||||
kwargs["arch__eq"] = arch
|
||||
pkgs = self.q_binary_packages.filter(**kwargs).apply()
|
||||
|
||||
if not pkgs:
|
||||
self.logger.error("No package matches pattern %s" % pattern)
|
||||
self.logger.error(
|
||||
"Could not find a match for %s in any configured repo", pattern
|
||||
)
|
||||
|
||||
# The pattern could have been a glob. In that case we want to
|
||||
# group the packages by name and get best match in those
|
||||
|
@ -616,7 +655,6 @@ class Gather(GatherBase):
|
|||
return added
|
||||
|
||||
for pkg in self.result_debug_packages.copy():
|
||||
|
||||
if pkg not in self.finished_add_debug_package_deps:
|
||||
deps = self._get_package_deps(pkg, debuginfo=True)
|
||||
for i, req in deps:
|
||||
|
@ -784,7 +822,6 @@ class Gather(GatherBase):
|
|||
continue
|
||||
|
||||
debug_pkgs = []
|
||||
pkg_in_lookaside = pkg.repoid in self.opts.lookaside_repos
|
||||
for i in candidates:
|
||||
if pkg.arch != i.arch:
|
||||
continue
|
||||
|
@ -792,7 +829,7 @@ class Gather(GatherBase):
|
|||
# If it's not debugsource package or does not match name of
|
||||
# the package, we don't want it in.
|
||||
continue
|
||||
if i.repoid in self.opts.lookaside_repos or pkg_in_lookaside:
|
||||
if self.is_from_lookaside(i):
|
||||
self._set_flag(i, PkgFlag.lookaside)
|
||||
if i not in self.result_debug_packages:
|
||||
added.add(i)
|
||||
|
@ -1029,7 +1066,7 @@ class Gather(GatherBase):
|
|||
|
||||
# Link downloaded package in (or link package from file repo)
|
||||
try:
|
||||
linker.hardlink(pkg.localPkg(), target)
|
||||
linker.link(pkg.localPkg(), target)
|
||||
except Exception:
|
||||
self.logger.error("Unable to link %s from the yum cache." % pkg.name)
|
||||
raise
|
||||
|
|
|
@ -54,8 +54,7 @@ class SimpleAcyclicOrientedGraph(object):
|
|||
return False if node in self._graph else True
|
||||
|
||||
def remove_final_endpoint(self, node):
|
||||
"""
|
||||
"""
|
||||
""""""
|
||||
remove_start_points = []
|
||||
for start, ends in self._graph.items():
|
||||
if node in ends:
|
||||
|
|
|
@ -20,8 +20,8 @@ import os
|
|||
SIZE_UNITS = {
|
||||
"b": 1,
|
||||
"k": 1024,
|
||||
"M": 1024 ** 2,
|
||||
"G": 1024 ** 3,
|
||||
"M": 1024**2,
|
||||
"G": 1024**3,
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -306,11 +306,6 @@ def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
|
|||
if variant.type in ("addon",) or variant.is_empty:
|
||||
return
|
||||
|
||||
compose.log_debug(
|
||||
"on arch '%s' looking at variant '%s' of type '%s'"
|
||||
% (arch, variant, variant.type)
|
||||
)
|
||||
|
||||
if not timestamp:
|
||||
timestamp = int(time.time())
|
||||
else:
|
||||
|
|
|
@ -44,6 +44,30 @@ def iter_module_defaults(path):
|
|||
yield module_name, index.get_module(module_name).get_defaults()
|
||||
|
||||
|
||||
def get_module_obsoletes_idx(path, mod_list):
|
||||
"""Given a path to a directory with yaml files, return Index with
|
||||
merged all obsoletes.
|
||||
"""
|
||||
|
||||
merger = Modulemd.ModuleIndexMerger.new()
|
||||
md_idxs = []
|
||||
|
||||
# associate_index does NOT copy it's argument (nor increases a
|
||||
# reference counter on the object). It only stores a pointer.
|
||||
for file in glob.glob(os.path.join(path, "*.yaml")):
|
||||
index = Modulemd.ModuleIndex()
|
||||
index.update_from_file(file, strict=False)
|
||||
mod_name = index.get_module_names()[0]
|
||||
|
||||
if mod_name and (mod_name in mod_list or not mod_list):
|
||||
md_idxs.append(index)
|
||||
merger.associate_index(md_idxs[-1], 0)
|
||||
|
||||
merged_idx = merger.resolve()
|
||||
|
||||
return merged_idx
|
||||
|
||||
|
||||
def collect_module_defaults(
|
||||
defaults_dir, modules_to_load=None, mod_index=None, overrides_dir=None
|
||||
):
|
||||
|
@ -69,3 +93,26 @@ def collect_module_defaults(
|
|||
mod_index.add_defaults(defaults)
|
||||
|
||||
return mod_index
|
||||
|
||||
|
||||
def collect_module_obsoletes(obsoletes_dir, modules_to_load, mod_index=None):
|
||||
"""Load module obsoletes into index.
|
||||
|
||||
This works in a similar fashion as collect_module_defaults except it
|
||||
merges indexes together instead of adding them during iteration.
|
||||
|
||||
Additionally if modules_to_load is not empty returned Index will include
|
||||
only obsoletes for those modules.
|
||||
"""
|
||||
|
||||
obsoletes_index = get_module_obsoletes_idx(obsoletes_dir, modules_to_load)
|
||||
|
||||
# Merge Obsoletes with Modules Index.
|
||||
if mod_index:
|
||||
merger = Modulemd.ModuleIndexMerger.new()
|
||||
merger.associate_index(mod_index, 0)
|
||||
merger.associate_index(obsoletes_index, 0)
|
||||
merged_idx = merger.resolve()
|
||||
obsoletes_index = merged_idx
|
||||
|
||||
return obsoletes_index
|
||||
|
|
|
@ -81,9 +81,6 @@ class PungiNotifier(object):
|
|||
|
||||
self._update_args(kwargs)
|
||||
|
||||
if self.compose:
|
||||
workdir = self.compose.paths.compose.topdir()
|
||||
|
||||
with self.lock:
|
||||
for cmd in self.cmds:
|
||||
self._run_script(cmd, msg, workdir, kwargs)
|
||||
|
|
|
@ -65,6 +65,11 @@ def main(args=None):
|
|||
action="store_true",
|
||||
help="do not use rpm-ostree's built-in change detection",
|
||||
)
|
||||
treep.add_argument(
|
||||
"--unified-core",
|
||||
action="store_true",
|
||||
help="use unified core mode in rpm-ostree",
|
||||
)
|
||||
|
||||
installerp = subparser.add_parser(
|
||||
"installer", help="Create an OSTree installer image"
|
||||
|
|
|
@ -43,6 +43,9 @@ class Tree(OSTree):
|
|||
# because something went wrong.
|
||||
"--touch-if-changed=%s.stamp" % self.commitid_file,
|
||||
]
|
||||
if self.unified_core:
|
||||
# See https://github.com/coreos/rpm-ostree/issues/729
|
||||
cmd.append("--unified-core")
|
||||
if self.version:
|
||||
# Add versioning metadata
|
||||
cmd.append("--add-metadata-string=version=%s" % self.version)
|
||||
|
@ -121,6 +124,7 @@ class Tree(OSTree):
|
|||
self.extra_config = self.args.extra_config
|
||||
self.ostree_ref = self.args.ostree_ref
|
||||
self.force_new_commit = self.args.force_new_commit
|
||||
self.unified_core = self.args.unified_core
|
||||
|
||||
if self.extra_config or self.ostree_ref:
|
||||
if self.extra_config:
|
||||
|
|
|
@ -103,12 +103,23 @@ class LogPaths(object):
|
|||
makedirs(path)
|
||||
return path
|
||||
|
||||
def log_file(self, arch, log_name, create_dir=True):
|
||||
def koji_tasks_dir(self, create_dir=True):
|
||||
"""
|
||||
Examples:
|
||||
logs/global/koji-tasks
|
||||
"""
|
||||
path = os.path.join(self.topdir(create_dir=create_dir), "koji-tasks")
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
return path
|
||||
|
||||
def log_file(self, arch, log_name, create_dir=True, ext=None):
|
||||
ext = ext or "log"
|
||||
arch = arch or "global"
|
||||
if log_name.endswith(".log"):
|
||||
log_name = log_name[:-4]
|
||||
return os.path.join(
|
||||
self.topdir(arch, create_dir=create_dir), "%s.%s.log" % (log_name, arch)
|
||||
self.topdir(arch, create_dir=create_dir), "%s.%s.%s" % (log_name, arch, ext)
|
||||
)
|
||||
|
||||
|
||||
|
@ -498,10 +509,23 @@ class WorkPaths(object):
|
|||
makedirs(path)
|
||||
return path
|
||||
|
||||
def module_obsoletes_dir(self, create_dir=True):
|
||||
"""
|
||||
Example:
|
||||
work/global/module_obsoletes
|
||||
"""
|
||||
path = os.path.join(self.topdir(create_dir=create_dir), "module_obsoletes")
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
return path
|
||||
|
||||
def pkgset_file_cache(self, pkgset_name):
|
||||
"""
|
||||
Returns the path to file in which the cached version of
|
||||
PackageSetBase.file_cache should be stored.
|
||||
|
||||
Example:
|
||||
work/global/pkgset_f33-compose_file_cache.pickle
|
||||
"""
|
||||
filename = "pkgset_%s_file_cache.pickle" % pkgset_name
|
||||
return os.path.join(self.topdir(arch="global"), filename)
|
||||
|
|
|
@ -27,6 +27,7 @@ from .createiso import CreateisoPhase # noqa
|
|||
from .extra_isos import ExtraIsosPhase # noqa
|
||||
from .live_images import LiveImagesPhase # noqa
|
||||
from .image_build import ImageBuildPhase # noqa
|
||||
from .image_container import ImageContainerPhase # noqa
|
||||
from .osbuild import OSBuildPhase # noqa
|
||||
from .repoclosure import RepoclosurePhase # noqa
|
||||
from .test import TestPhase # noqa
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
|
||||
from pungi import util
|
||||
|
||||
|
@ -58,6 +60,7 @@ class PhaseBase(object):
|
|||
self.compose.log_warning("[SKIP ] %s" % self.msg)
|
||||
self.finished = True
|
||||
return
|
||||
self._start_time = time.time()
|
||||
self.compose.log_info("[BEGIN] %s" % self.msg)
|
||||
self.compose.notifier.send("phase-start", phase_name=self.name)
|
||||
self.run()
|
||||
|
@ -108,6 +111,13 @@ class PhaseBase(object):
|
|||
self.pool.stop()
|
||||
self.finished = True
|
||||
self.compose.log_info("[DONE ] %s" % self.msg)
|
||||
|
||||
if hasattr(self, "_start_time"):
|
||||
self.compose.log_info(
|
||||
"PHASE %s took %d seconds"
|
||||
% (self.name.upper(), math.ceil(time.time() - self._start_time))
|
||||
)
|
||||
|
||||
if self.used_patterns is not None:
|
||||
# We only want to report this if the config was actually queried.
|
||||
self.report_unused_patterns()
|
||||
|
|
|
@ -50,6 +50,9 @@ class BuildinstallPhase(PhaseBase):
|
|||
# A set of (variant_uid, arch) pairs that completed successfully. This
|
||||
# is needed to skip copying files for failed tasks.
|
||||
self.pool.finished_tasks = set()
|
||||
# A set of (variant_uid, arch) pairs that were reused from previous
|
||||
# compose.
|
||||
self.pool.reused_tasks = set()
|
||||
self.buildinstall_method = self.compose.conf.get("buildinstall_method")
|
||||
self.lorax_use_koji_plugin = self.compose.conf.get("lorax_use_koji_plugin")
|
||||
self.used_lorax = self.buildinstall_method == "lorax"
|
||||
|
@ -294,7 +297,7 @@ class BuildinstallPhase(PhaseBase):
|
|||
"Unsupported buildinstall method: %s" % self.buildinstall_method
|
||||
)
|
||||
|
||||
for (variant, cmd) in commands:
|
||||
for variant, cmd in commands:
|
||||
self.pool.add(BuildinstallThread(self.pool))
|
||||
self.pool.queue_put(
|
||||
(self.compose, arch, variant, cmd, self.pkgset_phase)
|
||||
|
@ -312,6 +315,18 @@ class BuildinstallPhase(PhaseBase):
|
|||
in self.pool.finished_tasks
|
||||
)
|
||||
|
||||
def reused(self, variant, arch):
|
||||
"""
|
||||
Check if buildinstall phase reused previous results for given variant
|
||||
and arch. If the phase is skipped, the results will be considered
|
||||
reused as well.
|
||||
"""
|
||||
return (
|
||||
super(BuildinstallPhase, self).skip()
|
||||
or (variant.uid if self.used_lorax else None, arch)
|
||||
in self.pool.reused_tasks
|
||||
)
|
||||
|
||||
|
||||
def get_kickstart_file(compose):
|
||||
scm_dict = compose.conf.get("buildinstall_kickstart")
|
||||
|
@ -349,9 +364,17 @@ BOOT_CONFIGS = [
|
|||
"EFI/BOOT/BOOTX64.conf",
|
||||
"EFI/BOOT/grub.cfg",
|
||||
]
|
||||
BOOT_IMAGES = [
|
||||
"images/efiboot.img",
|
||||
]
|
||||
|
||||
|
||||
def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
||||
"""
|
||||
Put escaped volume ID and possibly kickstart file into the boot
|
||||
configuration files.
|
||||
:returns: list of paths to modified config files
|
||||
"""
|
||||
volid_escaped = volid.replace(" ", r"\x20").replace("\\", "\\\\")
|
||||
volid_escaped_2 = volid_escaped.replace("\\", "\\\\")
|
||||
found_configs = []
|
||||
|
@ -359,7 +382,6 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
|||
config_path = os.path.join(path, config)
|
||||
if not os.path.exists(config_path):
|
||||
continue
|
||||
found_configs.append(config)
|
||||
|
||||
with open(config_path, "r") as f:
|
||||
data = original_data = f.read()
|
||||
|
@ -368,7 +390,7 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
|||
# double-escape volid in yaboot.conf
|
||||
new_volid = volid_escaped_2 if "yaboot" in config else volid_escaped
|
||||
|
||||
ks = (" ks=hd:LABEL=%s:/ks.cfg" % new_volid) if ks_file else ""
|
||||
ks = (" inst.ks=hd:LABEL=%s:/ks.cfg" % new_volid) if ks_file else ""
|
||||
|
||||
# pre-f18
|
||||
data = re.sub(r":CDLABEL=[^ \n]*", r":CDLABEL=%s%s" % (new_volid, ks), data)
|
||||
|
@ -379,7 +401,12 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
|||
with open(config_path, "w") as f:
|
||||
f.write(data)
|
||||
|
||||
if logger and data != original_data:
|
||||
if data != original_data:
|
||||
found_configs.append(config)
|
||||
if logger:
|
||||
# Generally lorax should create file with correct volume id
|
||||
# already. If we don't have a kickstart, this function should
|
||||
# be a no-op.
|
||||
logger.info("Boot config %s changed" % config_path)
|
||||
|
||||
return found_configs
|
||||
|
@ -419,9 +446,8 @@ def tweak_buildinstall(
|
|||
if kickstart_file and found_configs:
|
||||
shutil.copy2(kickstart_file, os.path.join(dst, "ks.cfg"))
|
||||
|
||||
images = [
|
||||
os.path.join(tmp_dir, "images", "efiboot.img"),
|
||||
]
|
||||
images = [os.path.join(tmp_dir, img) for img in BOOT_IMAGES]
|
||||
if found_configs:
|
||||
for image in images:
|
||||
if not os.path.isfile(image):
|
||||
continue
|
||||
|
@ -431,7 +457,9 @@ def tweak_buildinstall(
|
|||
logger=compose._logger,
|
||||
use_guestmount=compose.conf.get("buildinstall_use_guestmount"),
|
||||
) as mount_tmp_dir:
|
||||
for config in BOOT_CONFIGS:
|
||||
for config in found_configs:
|
||||
# Put each modified config file into the image (overwriting the
|
||||
# original).
|
||||
config_path = os.path.join(tmp_dir, config)
|
||||
config_in_image = os.path.join(mount_tmp_dir, config)
|
||||
|
||||
|
@ -661,9 +689,16 @@ class BuildinstallThread(WorkerThread):
|
|||
return None
|
||||
|
||||
compose.log_info("Loading old BUILDINSTALL phase metadata: %s", old_metadata)
|
||||
try:
|
||||
with open(old_metadata, "rb") as f:
|
||||
old_result = pickle.load(f)
|
||||
return old_result
|
||||
except Exception as e:
|
||||
compose.log_debug(
|
||||
"Failed to load old BUILDINSTALL phase metadata %s : %s"
|
||||
% (old_metadata, str(e))
|
||||
)
|
||||
return None
|
||||
|
||||
def _reuse_old_buildinstall_result(self, compose, arch, variant, cmd, pkgset_phase):
|
||||
"""
|
||||
|
@ -729,7 +764,7 @@ class BuildinstallThread(WorkerThread):
|
|||
# Ask Koji for all the RPMs in the `runroot_tag` and check that
|
||||
# those installed in the old buildinstall buildroot are still in the
|
||||
# very same versions/releases.
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(compose)
|
||||
rpms = koji_wrapper.koji_proxy.listTaggedRPMS(
|
||||
compose.conf.get("runroot_tag"), inherit=True, latest=True
|
||||
)[0]
|
||||
|
@ -801,12 +836,15 @@ class BuildinstallThread(WorkerThread):
|
|||
chown_paths.append(_get_log_dir(compose, variant, arch))
|
||||
elif buildinstall_method == "buildinstall":
|
||||
packages += ["anaconda"]
|
||||
|
||||
packages += get_arch_variant_data(
|
||||
compose.conf, "buildinstall_packages", arch, variant
|
||||
)
|
||||
if self._reuse_old_buildinstall_result(
|
||||
compose, arch, variant, cmd, pkgset_phase
|
||||
):
|
||||
self.copy_files(compose, variant, arch)
|
||||
self.pool.finished_tasks.add((variant.uid if variant else None, arch))
|
||||
self.pool.reused_tasks.add((variant.uid if variant else None, arch))
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
return
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ import os
|
|||
import random
|
||||
import shutil
|
||||
import stat
|
||||
import json
|
||||
|
||||
import productmd.treeinfo
|
||||
from productmd.images import Image
|
||||
|
@ -36,6 +37,7 @@ from pungi.util import (
|
|||
failable,
|
||||
get_file_size,
|
||||
get_mtime,
|
||||
read_json_file,
|
||||
)
|
||||
from pungi.media_split import MediaSplitter, convert_media_size
|
||||
from pungi.compose_metadata.discinfo import read_discinfo, write_discinfo
|
||||
|
@ -73,6 +75,170 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
|||
return False
|
||||
return bool(self.compose.conf.get("buildinstall_method", ""))
|
||||
|
||||
def _metadata_path(self, variant, arch, disc_num, disc_count):
|
||||
return self.compose.paths.log.log_file(
|
||||
arch,
|
||||
"createiso-%s-%d-%d" % (variant.uid, disc_num, disc_count),
|
||||
ext="json",
|
||||
)
|
||||
|
||||
def save_reuse_metadata(self, cmd, variant, arch, opts):
|
||||
"""Save metadata for future composes to verify if the compose can be reused."""
|
||||
metadata = {
|
||||
"cmd": cmd,
|
||||
"opts": opts._asdict(),
|
||||
}
|
||||
|
||||
metadata_path = self._metadata_path(
|
||||
variant, arch, cmd["disc_num"], cmd["disc_count"]
|
||||
)
|
||||
with open(metadata_path, "w") as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
return metadata
|
||||
|
||||
def _load_old_metadata(self, cmd, variant, arch):
|
||||
metadata_path = self._metadata_path(
|
||||
variant, arch, cmd["disc_num"], cmd["disc_count"]
|
||||
)
|
||||
old_path = self.compose.paths.old_compose_path(metadata_path)
|
||||
self.logger.info(
|
||||
"Loading old metadata for %s.%s from: %s", variant, arch, old_path
|
||||
)
|
||||
try:
|
||||
return read_json_file(old_path)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def perform_reuse(self, cmd, variant, arch, opts, iso_path):
|
||||
"""
|
||||
Copy all related files from old compose to the new one. As a last step
|
||||
add the new image to metadata.
|
||||
"""
|
||||
linker = OldFileLinker(self.logger)
|
||||
old_file_name = os.path.basename(iso_path)
|
||||
current_file_name = os.path.basename(cmd["iso_path"])
|
||||
try:
|
||||
# Hardlink ISO and manifest
|
||||
for suffix in ("", ".manifest"):
|
||||
linker.link(iso_path + suffix, cmd["iso_path"] + suffix)
|
||||
# Copy log files
|
||||
# The log file name includes filename of the image, so we need to
|
||||
# find old file with the old name, and rename it to the new name.
|
||||
log_file = self.compose.paths.log.log_file(
|
||||
arch, "createiso-%s" % current_file_name
|
||||
)
|
||||
old_log_file = self.compose.paths.old_compose_path(
|
||||
self.compose.paths.log.log_file(arch, "createiso-%s" % old_file_name)
|
||||
)
|
||||
linker.link(old_log_file, log_file)
|
||||
# Copy jigdo files
|
||||
if opts.jigdo_dir:
|
||||
old_jigdo_dir = self.compose.paths.old_compose_path(opts.jigdo_dir)
|
||||
for suffix in (".template", ".jigdo"):
|
||||
linker.link(
|
||||
os.path.join(old_jigdo_dir, old_file_name) + suffix,
|
||||
os.path.join(opts.jigdo_dir, current_file_name) + suffix,
|
||||
)
|
||||
except Exception:
|
||||
# A problem happened while linking some file, let's clean up
|
||||
# everything.
|
||||
linker.abort()
|
||||
raise
|
||||
# Add image to manifest
|
||||
add_iso_to_metadata(
|
||||
self.compose,
|
||||
variant,
|
||||
arch,
|
||||
cmd["iso_path"],
|
||||
bootable=cmd["bootable"],
|
||||
disc_num=cmd["disc_num"],
|
||||
disc_count=cmd["disc_count"],
|
||||
)
|
||||
|
||||
def try_reuse(self, cmd, variant, arch, opts):
|
||||
"""Try to reuse image from previous compose.
|
||||
|
||||
:returns bool: True if reuse was successful, False otherwise
|
||||
"""
|
||||
if not self.compose.conf["createiso_allow_reuse"]:
|
||||
return
|
||||
|
||||
log_msg = "Cannot reuse ISO for %s.%s" % (variant, arch)
|
||||
current_metadata = self.save_reuse_metadata(cmd, variant, arch, opts)
|
||||
|
||||
if opts.buildinstall_method and not self.bi.reused(variant, arch):
|
||||
# If buildinstall phase was not reused for some reason, we can not
|
||||
# reuse any bootable image. If a package change caused rebuild of
|
||||
# boot.iso, we would catch it here too, but there could be a
|
||||
# configuration change in lorax template which would remain
|
||||
# undetected.
|
||||
self.logger.info("%s - boot configuration changed", log_msg)
|
||||
return False
|
||||
|
||||
# Check old compose configuration: extra_files and product_ids can be
|
||||
# reflected on ISO.
|
||||
old_config = self.compose.load_old_compose_config()
|
||||
if not old_config:
|
||||
self.logger.info("%s - no config for old compose", log_msg)
|
||||
return False
|
||||
# Convert current configuration to JSON and back to encode it similarly
|
||||
# to the old one
|
||||
config = json.loads(json.dumps(self.compose.conf))
|
||||
for opt in self.compose.conf:
|
||||
# Skip a selection of options: these affect what packages can be
|
||||
# included, which we explicitly check later on.
|
||||
config_whitelist = set(
|
||||
[
|
||||
"gather_lookaside_repos",
|
||||
"pkgset_koji_builds",
|
||||
"pkgset_koji_scratch_tasks",
|
||||
"pkgset_koji_module_builds",
|
||||
]
|
||||
)
|
||||
# Skip irrelevant options
|
||||
config_whitelist.update(["osbs", "osbuild"])
|
||||
if opt in config_whitelist:
|
||||
continue
|
||||
|
||||
if old_config.get(opt) != config.get(opt):
|
||||
self.logger.info("%s - option %s differs", log_msg, opt)
|
||||
return False
|
||||
|
||||
old_metadata = self._load_old_metadata(cmd, variant, arch)
|
||||
if not old_metadata:
|
||||
self.logger.info("%s - no old metadata found", log_msg)
|
||||
return False
|
||||
|
||||
# Test if volume ID matches - volid can be generated dynamically based on
|
||||
# other values, and could change even if nothing else is different.
|
||||
if current_metadata["opts"]["volid"] != old_metadata["opts"]["volid"]:
|
||||
self.logger.info("%s - volume ID differs", log_msg)
|
||||
return False
|
||||
|
||||
# Compare packages on the ISO.
|
||||
if compare_packages(
|
||||
old_metadata["opts"]["graft_points"],
|
||||
current_metadata["opts"]["graft_points"],
|
||||
):
|
||||
self.logger.info("%s - packages differ", log_msg)
|
||||
return False
|
||||
|
||||
try:
|
||||
self.perform_reuse(
|
||||
cmd,
|
||||
variant,
|
||||
arch,
|
||||
opts,
|
||||
old_metadata["cmd"]["iso_path"],
|
||||
)
|
||||
return True
|
||||
except Exception as exc:
|
||||
self.compose.log_error(
|
||||
"Error while reusing ISO for %s.%s: %s", variant, arch, exc
|
||||
)
|
||||
self.compose.traceback("createiso-reuse-%s-%s" % (variant, arch))
|
||||
return False
|
||||
|
||||
def run(self):
|
||||
symlink_isos_to = self.compose.conf.get("symlink_isos_to")
|
||||
disc_type = self.compose.conf["disc_types"].get("dvd", "dvd")
|
||||
|
@ -172,21 +338,29 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
|||
supported=self.compose.supported,
|
||||
hfs_compat=self.compose.conf["iso_hfs_ppc64le_compatible"],
|
||||
use_xorrisofs=self.compose.conf.get("createiso_use_xorrisofs"),
|
||||
iso_level=get_iso_level_config(self.compose, variant, arch),
|
||||
)
|
||||
|
||||
if bootable:
|
||||
opts = opts._replace(
|
||||
buildinstall_method=self.compose.conf["buildinstall_method"]
|
||||
buildinstall_method=self.compose.conf[
|
||||
"buildinstall_method"
|
||||
],
|
||||
boot_iso=os.path.join(os_tree, "images", "boot.iso"),
|
||||
)
|
||||
|
||||
if self.compose.conf["create_jigdo"]:
|
||||
jigdo_dir = self.compose.paths.compose.jigdo_dir(arch, variant)
|
||||
opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree)
|
||||
|
||||
script_file = os.path.join(
|
||||
self.compose.paths.work.tmp_dir(arch, variant),
|
||||
"createiso-%s.sh" % filename,
|
||||
)
|
||||
# Try to reuse
|
||||
if self.try_reuse(cmd, variant, arch, opts):
|
||||
# Reuse was successful, go to next ISO
|
||||
continue
|
||||
|
||||
script_dir = self.compose.paths.work.tmp_dir(arch, variant)
|
||||
opts = opts._replace(script_dir=script_dir)
|
||||
script_file = os.path.join(script_dir, "createiso-%s.sh" % filename)
|
||||
with open(script_file, "w") as f:
|
||||
createiso.write_script(opts, f)
|
||||
cmd["cmd"] = ["bash", script_file]
|
||||
|
@ -195,13 +369,43 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
|||
if self.compose.notifier:
|
||||
self.compose.notifier.send("createiso-targets", deliverables=deliverables)
|
||||
|
||||
for (cmd, variant, arch) in commands:
|
||||
for cmd, variant, arch in commands:
|
||||
self.pool.add(CreateIsoThread(self.pool))
|
||||
self.pool.queue_put((self.compose, cmd, variant, arch))
|
||||
|
||||
self.pool.start()
|
||||
|
||||
|
||||
def read_packages(graft_points):
|
||||
"""Read packages that were listed in given graft points file.
|
||||
|
||||
Only files under Packages directory are considered. Particularly this
|
||||
excludes .discinfo, .treeinfo and media.repo as well as repodata and
|
||||
any extra files.
|
||||
|
||||
Extra files are easier to check by configuration (same name doesn't
|
||||
imply same content). Repodata depend entirely on included packages (and
|
||||
possibly product id certificate), but are affected by current time
|
||||
which can change checksum despite data being the same.
|
||||
"""
|
||||
with open(graft_points) as f:
|
||||
return set(
|
||||
line.split("=", 1)[0]
|
||||
for line in f
|
||||
if line.startswith("Packages/") or "/Packages/" in line
|
||||
)
|
||||
|
||||
|
||||
def compare_packages(old_graft_points, new_graft_points):
|
||||
"""Read packages from the two files and compare them.
|
||||
|
||||
:returns bool: True if there are differences, False otherwise
|
||||
"""
|
||||
old_files = read_packages(old_graft_points)
|
||||
new_files = read_packages(new_graft_points)
|
||||
return old_files != new_files
|
||||
|
||||
|
||||
class CreateIsoThread(WorkerThread):
|
||||
def fail(self, compose, cmd, variant, arch):
|
||||
self.pool.log_error("CreateISO failed, removing ISO: %s" % cmd["iso_path"])
|
||||
|
@ -324,15 +528,13 @@ def add_iso_to_metadata(
|
|||
return img
|
||||
|
||||
|
||||
def run_createiso_command(
|
||||
num, compose, bootable, arch, cmd, mounts, log_file, with_jigdo=True
|
||||
):
|
||||
def run_createiso_command(num, compose, bootable, arch, cmd, mounts, log_file):
|
||||
packages = [
|
||||
"coreutils",
|
||||
"xorriso" if compose.conf.get("createiso_use_xorrisofs") else "genisoimage",
|
||||
"isomd5sum",
|
||||
]
|
||||
if with_jigdo and compose.conf["create_jigdo"]:
|
||||
if compose.conf["create_jigdo"]:
|
||||
packages.append("jigdo")
|
||||
if bootable:
|
||||
extra_packages = {
|
||||
|
@ -346,7 +548,7 @@ def run_createiso_command(
|
|||
build_arch = arch
|
||||
if runroot.runroot_method == "koji" and not bootable:
|
||||
runroot_tag = compose.conf["runroot_tag"]
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(compose)
|
||||
koji_proxy = koji_wrapper.koji_proxy
|
||||
tag_info = koji_proxy.getTag(runroot_tag)
|
||||
if not tag_info:
|
||||
|
@ -598,3 +800,36 @@ def create_hardlinks(staging_dir, log_file):
|
|||
"""
|
||||
cmd = ["/usr/sbin/hardlink", "-c", "-vv", staging_dir]
|
||||
run(cmd, logfile=log_file, show_cmd=True)
|
||||
|
||||
|
||||
class OldFileLinker(object):
|
||||
"""
|
||||
A wrapper around os.link that remembers which files were linked and can
|
||||
clean them up.
|
||||
"""
|
||||
|
||||
def __init__(self, logger):
|
||||
self.logger = logger
|
||||
self.linked_files = []
|
||||
|
||||
def link(self, src, dst):
|
||||
self.logger.debug("Hardlinking %s to %s", src, dst)
|
||||
os.link(src, dst)
|
||||
self.linked_files.append(dst)
|
||||
|
||||
def abort(self):
|
||||
"""Clean up all files created by this instance."""
|
||||
for f in self.linked_files:
|
||||
os.unlink(f)
|
||||
|
||||
|
||||
def get_iso_level_config(compose, variant, arch):
|
||||
"""
|
||||
Get configured ISO level for this variant and architecture.
|
||||
"""
|
||||
level = compose.conf.get("iso_level")
|
||||
if isinstance(level, list):
|
||||
level = None
|
||||
for c in get_arch_variant_data(compose.conf, "iso_level", arch, variant):
|
||||
level = c
|
||||
return level
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
__all__ = ("create_variant_repo",)
|
||||
|
||||
|
||||
import copy
|
||||
import errno
|
||||
import glob
|
||||
|
@ -25,19 +24,22 @@ import shutil
|
|||
import threading
|
||||
import xml.dom.minidom
|
||||
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
from kobo.shortcuts import run, relative_path
|
||||
|
||||
from ..wrappers.scm import get_dir_from_scm
|
||||
from ..wrappers.createrepo import CreaterepoWrapper
|
||||
from .base import PhaseBase
|
||||
from ..util import get_arch_variant_data, temp_dir
|
||||
from ..module_util import Modulemd, collect_module_defaults
|
||||
|
||||
import productmd.rpms
|
||||
import productmd.modules
|
||||
import productmd.rpms
|
||||
from kobo.shortcuts import relative_path, run
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
|
||||
from ..module_util import Modulemd, collect_module_defaults, collect_module_obsoletes
|
||||
from ..util import (
|
||||
get_arch_variant_data,
|
||||
read_single_module_stream_from_file,
|
||||
temp_dir,
|
||||
)
|
||||
from ..wrappers.createrepo import CreaterepoWrapper
|
||||
from ..wrappers.scm import get_dir_from_scm
|
||||
from .base import PhaseBase
|
||||
|
||||
CACHE_TOPDIR = "/var/cache/pungi/createrepo_c/"
|
||||
createrepo_lock = threading.Lock()
|
||||
createrepo_dirs = set()
|
||||
|
||||
|
@ -79,6 +81,7 @@ class CreaterepoPhase(PhaseBase):
|
|||
get_dir_from_scm(
|
||||
self.compose.conf["createrepo_extra_modulemd"][variant.uid],
|
||||
self.compose.paths.work.tmp_dir(variant=variant, create_dir=False),
|
||||
compose=self.compose,
|
||||
)
|
||||
|
||||
self.pool.queue_put((self.compose, None, variant, "srpm"))
|
||||
|
@ -188,6 +191,23 @@ def create_variant_repo(
|
|||
comps_path = None
|
||||
if compose.has_comps and pkg_type == "rpm":
|
||||
comps_path = compose.paths.work.comps(arch=arch, variant=variant)
|
||||
|
||||
if compose.conf["createrepo_enable_cache"]:
|
||||
cachedir = os.path.join(
|
||||
CACHE_TOPDIR,
|
||||
"%s-%s" % (compose.conf["release_short"], os.getuid()),
|
||||
)
|
||||
if not os.path.exists(cachedir):
|
||||
try:
|
||||
os.makedirs(cachedir)
|
||||
except Exception as e:
|
||||
compose.log_warning(
|
||||
"Cache disabled because cannot create cache dir %s %s"
|
||||
% (cachedir, str(e))
|
||||
)
|
||||
cachedir = None
|
||||
else:
|
||||
cachedir = None
|
||||
cmd = repo.get_createrepo_cmd(
|
||||
repo_dir,
|
||||
update=True,
|
||||
|
@ -203,6 +223,7 @@ def create_variant_repo(
|
|||
oldpackagedirs=old_package_dirs,
|
||||
use_xz=compose.conf["createrepo_use_xz"],
|
||||
extra_args=compose.conf["createrepo_extra_args"],
|
||||
cachedir=cachedir,
|
||||
)
|
||||
log_file = compose.paths.log.log_file(
|
||||
arch, "createrepo-%s.%s" % (variant, pkg_type)
|
||||
|
@ -245,12 +266,15 @@ def create_variant_repo(
|
|||
defaults_dir, module_names, mod_index, overrides_dir=overrides_dir
|
||||
)
|
||||
|
||||
obsoletes_dir = compose.paths.work.module_obsoletes_dir()
|
||||
mod_index = collect_module_obsoletes(obsoletes_dir, module_names, mod_index)
|
||||
|
||||
# Add extra modulemd files
|
||||
if variant.uid in compose.conf.get("createrepo_extra_modulemd", {}):
|
||||
compose.log_debug("Adding extra modulemd for %s.%s", variant.uid, arch)
|
||||
dirname = compose.paths.work.tmp_dir(variant=variant, create_dir=False)
|
||||
for filepath in glob.glob(os.path.join(dirname, arch) + "/*.yaml"):
|
||||
module_stream = Modulemd.ModuleStream.read_file(filepath, strict=True)
|
||||
module_stream = read_single_module_stream_from_file(filepath)
|
||||
if not mod_index.add_module_stream(module_stream):
|
||||
raise RuntimeError(
|
||||
"Failed parsing modulemd data from %s" % filepath
|
||||
|
@ -343,7 +367,7 @@ def get_productids_from_scm(compose):
|
|||
|
||||
tmp_dir = compose.mkdtemp(prefix="pungi_")
|
||||
try:
|
||||
get_dir_from_scm(product_id, tmp_dir)
|
||||
get_dir_from_scm(product_id, tmp_dir, compose=compose)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT and product_id_allow_missing:
|
||||
compose.log_warning("No product IDs in %s" % product_id)
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
from kobo.shortcuts import force_list
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
|
@ -28,8 +30,17 @@ from pungi.phases.createiso import (
|
|||
copy_boot_images,
|
||||
run_createiso_command,
|
||||
load_and_tweak_treeinfo,
|
||||
compare_packages,
|
||||
OldFileLinker,
|
||||
get_iso_level_config,
|
||||
)
|
||||
from pungi.util import (
|
||||
failable,
|
||||
get_format_substs,
|
||||
get_variant_data,
|
||||
get_volid,
|
||||
read_json_file,
|
||||
)
|
||||
from pungi.util import failable, get_format_substs, get_variant_data, get_volid
|
||||
from pungi.wrappers import iso
|
||||
from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm
|
||||
|
||||
|
@ -37,9 +48,10 @@ from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm
|
|||
class ExtraIsosPhase(PhaseLoggerMixin, ConfigGuardedPhase, PhaseBase):
|
||||
name = "extra_isos"
|
||||
|
||||
def __init__(self, compose):
|
||||
def __init__(self, compose, buildinstall_phase):
|
||||
super(ExtraIsosPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
self.bi = buildinstall_phase
|
||||
|
||||
def validate(self):
|
||||
for variant in self.compose.get_variants(types=["variant"]):
|
||||
|
@ -64,14 +76,18 @@ class ExtraIsosPhase(PhaseLoggerMixin, ConfigGuardedPhase, PhaseBase):
|
|||
for arch in sorted(arches):
|
||||
commands.append((config, variant, arch))
|
||||
|
||||
for (config, variant, arch) in commands:
|
||||
self.pool.add(ExtraIsosThread(self.pool))
|
||||
for config, variant, arch in commands:
|
||||
self.pool.add(ExtraIsosThread(self.pool, self.bi))
|
||||
self.pool.queue_put((self.compose, config, variant, arch))
|
||||
|
||||
self.pool.start()
|
||||
|
||||
|
||||
class ExtraIsosThread(WorkerThread):
|
||||
def __init__(self, pool, buildinstall_phase):
|
||||
super(ExtraIsosThread, self).__init__(pool)
|
||||
self.bi = buildinstall_phase
|
||||
|
||||
def process(self, item, num):
|
||||
self.num = num
|
||||
compose, config, variant, arch = item
|
||||
|
@ -115,20 +131,28 @@ class ExtraIsosThread(WorkerThread):
|
|||
supported=compose.supported,
|
||||
hfs_compat=compose.conf["iso_hfs_ppc64le_compatible"],
|
||||
use_xorrisofs=compose.conf.get("createiso_use_xorrisofs"),
|
||||
iso_level=get_iso_level_config(compose, variant, arch),
|
||||
)
|
||||
os_tree = compose.paths.compose.os_tree(arch, variant)
|
||||
if compose.conf["create_jigdo"]:
|
||||
jigdo_dir = compose.paths.compose.jigdo_dir(arch, variant)
|
||||
os_tree = compose.paths.compose.os_tree(arch, variant)
|
||||
opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree)
|
||||
|
||||
if bootable:
|
||||
opts = opts._replace(
|
||||
buildinstall_method=compose.conf["buildinstall_method"]
|
||||
buildinstall_method=compose.conf["buildinstall_method"],
|
||||
boot_iso=os.path.join(os_tree, "images", "boot.iso"),
|
||||
)
|
||||
|
||||
script_file = os.path.join(
|
||||
compose.paths.work.tmp_dir(arch, variant), "extraiso-%s.sh" % filename
|
||||
)
|
||||
# Check if it can be reused.
|
||||
hash = hashlib.sha256()
|
||||
hash.update(json.dumps(config, sort_keys=True).encode("utf-8"))
|
||||
config_hash = hash.hexdigest()
|
||||
|
||||
if not self.try_reuse(compose, variant, arch, config_hash, opts):
|
||||
script_dir = compose.paths.work.tmp_dir(arch, variant)
|
||||
opts = opts._replace(script_dir=script_dir)
|
||||
script_file = os.path.join(script_dir, "extraiso-%s.sh" % filename)
|
||||
with open(script_file, "w") as f:
|
||||
createiso.write_script(opts, f)
|
||||
|
||||
|
@ -142,7 +166,6 @@ class ExtraIsosThread(WorkerThread):
|
|||
log_file=compose.paths.log.log_file(
|
||||
arch, "extraiso-%s" % os.path.basename(iso_path)
|
||||
),
|
||||
with_jigdo=compose.conf["create_jigdo"],
|
||||
)
|
||||
|
||||
img = add_iso_to_metadata(
|
||||
|
@ -155,8 +178,155 @@ class ExtraIsosThread(WorkerThread):
|
|||
)
|
||||
img._max_size = config.get("max_size")
|
||||
|
||||
save_reuse_metadata(compose, variant, arch, config_hash, opts, iso_path)
|
||||
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
|
||||
def try_reuse(self, compose, variant, arch, config_hash, opts):
|
||||
# Check explicit config
|
||||
if not compose.conf["extraiso_allow_reuse"]:
|
||||
return
|
||||
|
||||
log_msg = "Cannot reuse ISO for %s.%s" % (variant, arch)
|
||||
|
||||
if opts.buildinstall_method and not self.bi.reused(variant, arch):
|
||||
# If buildinstall phase was not reused for some reason, we can not
|
||||
# reuse any bootable image. If a package change caused rebuild of
|
||||
# boot.iso, we would catch it here too, but there could be a
|
||||
# configuration change in lorax template which would remain
|
||||
# undetected.
|
||||
self.pool.log_info("%s - boot configuration changed", log_msg)
|
||||
return False
|
||||
|
||||
# Check old compose configuration: extra_files and product_ids can be
|
||||
# reflected on ISO.
|
||||
old_config = compose.load_old_compose_config()
|
||||
if not old_config:
|
||||
self.pool.log_info("%s - no config for old compose", log_msg)
|
||||
return False
|
||||
# Convert current configuration to JSON and back to encode it similarly
|
||||
# to the old one
|
||||
config = json.loads(json.dumps(compose.conf))
|
||||
for opt in compose.conf:
|
||||
# Skip a selection of options: these affect what packages can be
|
||||
# included, which we explicitly check later on.
|
||||
config_whitelist = set(
|
||||
[
|
||||
"gather_lookaside_repos",
|
||||
"pkgset_koji_builds",
|
||||
"pkgset_koji_scratch_tasks",
|
||||
"pkgset_koji_module_builds",
|
||||
]
|
||||
)
|
||||
# Skip irrelevant options
|
||||
config_whitelist.update(["osbs", "osbuild"])
|
||||
if opt in config_whitelist:
|
||||
continue
|
||||
|
||||
if old_config.get(opt) != config.get(opt):
|
||||
self.pool.log_info("%s - option %s differs", log_msg, opt)
|
||||
return False
|
||||
|
||||
old_metadata = load_old_metadata(compose, variant, arch, config_hash)
|
||||
if not old_metadata:
|
||||
self.pool.log_info("%s - no old metadata found", log_msg)
|
||||
return False
|
||||
|
||||
# Test if volume ID matches - volid can be generated dynamically based on
|
||||
# other values, and could change even if nothing else is different.
|
||||
if opts.volid != old_metadata["opts"]["volid"]:
|
||||
self.pool.log_info("%s - volume ID differs", log_msg)
|
||||
return False
|
||||
|
||||
# Compare packages on the ISO.
|
||||
if compare_packages(
|
||||
old_metadata["opts"]["graft_points"],
|
||||
opts.graft_points,
|
||||
):
|
||||
self.pool.log_info("%s - packages differ", log_msg)
|
||||
return False
|
||||
|
||||
try:
|
||||
self.perform_reuse(
|
||||
compose,
|
||||
variant,
|
||||
arch,
|
||||
opts,
|
||||
old_metadata["opts"]["output_dir"],
|
||||
old_metadata["opts"]["iso_name"],
|
||||
)
|
||||
return True
|
||||
except Exception as exc:
|
||||
self.pool.log_error(
|
||||
"Error while reusing ISO for %s.%s: %s", variant, arch, exc
|
||||
)
|
||||
compose.traceback("extraiso-reuse-%s-%s-%s" % (variant, arch, config_hash))
|
||||
return False
|
||||
|
||||
def perform_reuse(self, compose, variant, arch, opts, old_iso_dir, old_file_name):
|
||||
"""
|
||||
Copy all related files from old compose to the new one. As a last step
|
||||
add the new image to metadata.
|
||||
"""
|
||||
linker = OldFileLinker(self.pool._logger)
|
||||
old_iso_path = os.path.join(old_iso_dir, old_file_name)
|
||||
iso_path = os.path.join(opts.output_dir, opts.iso_name)
|
||||
try:
|
||||
# Hardlink ISO and manifest
|
||||
for suffix in ("", ".manifest"):
|
||||
linker.link(old_iso_path + suffix, iso_path + suffix)
|
||||
# Copy log files
|
||||
# The log file name includes filename of the image, so we need to
|
||||
# find old file with the old name, and rename it to the new name.
|
||||
log_file = compose.paths.log.log_file(arch, "extraiso-%s" % opts.iso_name)
|
||||
old_log_file = compose.paths.old_compose_path(
|
||||
compose.paths.log.log_file(arch, "extraiso-%s" % old_file_name)
|
||||
)
|
||||
linker.link(old_log_file, log_file)
|
||||
# Copy jigdo files
|
||||
if opts.jigdo_dir:
|
||||
old_jigdo_dir = compose.paths.old_compose_path(opts.jigdo_dir)
|
||||
for suffix in (".template", ".jigdo"):
|
||||
linker.link(
|
||||
os.path.join(old_jigdo_dir, old_file_name) + suffix,
|
||||
os.path.join(opts.jigdo_dir, opts.iso_name) + suffix,
|
||||
)
|
||||
except Exception:
|
||||
# A problem happened while linking some file, let's clean up
|
||||
# everything.
|
||||
linker.abort()
|
||||
raise
|
||||
|
||||
|
||||
def save_reuse_metadata(compose, variant, arch, config_hash, opts, iso_path):
|
||||
"""
|
||||
Save metadata for possible reuse of this image. The file name is determined
|
||||
from the hash of a configuration snippet for this image. Any change in that
|
||||
configuration in next compose will change the hash and thus reuse will be
|
||||
blocked.
|
||||
"""
|
||||
metadata = {"opts": opts._asdict()}
|
||||
metadata_path = compose.paths.log.log_file(
|
||||
arch,
|
||||
"extraiso-reuse-%s-%s-%s" % (variant.uid, arch, config_hash),
|
||||
ext="json",
|
||||
)
|
||||
with open(metadata_path, "w") as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
|
||||
def load_old_metadata(compose, variant, arch, config_hash):
|
||||
metadata_path = compose.paths.log.log_file(
|
||||
arch,
|
||||
"extraiso-reuse-%s-%s-%s" % (variant.uid, arch, config_hash),
|
||||
ext="json",
|
||||
)
|
||||
old_path = compose.paths.old_compose_path(metadata_path)
|
||||
try:
|
||||
return read_json_file(old_path)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_extra_files(compose, variant, arch, extra_files):
|
||||
"""Clone the configured files into a directory from where they can be
|
||||
|
|
|
@ -14,51 +14,50 @@
|
|||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
import six
|
||||
from six.moves import cPickle as pickle
|
||||
|
||||
from kobo.rpmlib import parse_nvra
|
||||
from kobo.shortcuts import run
|
||||
from productmd.rpms import Rpms
|
||||
from pungi.phases.pkgset.common import get_all_arches
|
||||
from six.moves import cPickle as pickle
|
||||
|
||||
try:
|
||||
from queue import Queue
|
||||
except ImportError:
|
||||
from Queue import Queue
|
||||
|
||||
from pungi.wrappers.scm import get_file_from_scm
|
||||
from .link import link_files
|
||||
from ...wrappers.createrepo import CreaterepoWrapper
|
||||
import pungi.wrappers.kojiwrapper
|
||||
|
||||
from pungi.compose import get_ordered_variant_uids
|
||||
from pungi.arch import get_compatible_arches, split_name_arch
|
||||
from pungi.compose import get_ordered_variant_uids
|
||||
from pungi.module_util import (
|
||||
Modulemd,
|
||||
collect_module_defaults,
|
||||
collect_module_obsoletes,
|
||||
)
|
||||
from pungi.phases.base import PhaseBase
|
||||
from pungi.util import get_arch_data, get_arch_variant_data, get_variant_data, makedirs
|
||||
from pungi.module_util import Modulemd, collect_module_defaults
|
||||
from pungi.phases.createrepo import add_modular_metadata
|
||||
from pungi.util import get_arch_data, get_arch_variant_data, get_variant_data, makedirs
|
||||
from pungi.wrappers.scm import get_file_from_scm
|
||||
|
||||
from ...wrappers.createrepo import CreaterepoWrapper
|
||||
from .link import link_files
|
||||
|
||||
|
||||
def get_gather_source(name):
|
||||
import pungi.phases.gather.sources
|
||||
from .source import GatherSourceContainer
|
||||
|
||||
GatherSourceContainer.register_module(pungi.phases.gather.sources)
|
||||
container = GatherSourceContainer()
|
||||
return container["GatherSource%s" % name]
|
||||
return pungi.phases.gather.sources.ALL_SOURCES[name.lower()]
|
||||
|
||||
|
||||
def get_gather_method(name):
|
||||
import pungi.phases.gather.methods
|
||||
from .method import GatherMethodContainer
|
||||
|
||||
GatherMethodContainer.register_module(pungi.phases.gather.methods)
|
||||
container = GatherMethodContainer()
|
||||
return container["GatherMethod%s" % name]
|
||||
return pungi.phases.gather.methods.ALL_METHODS[name.lower()]
|
||||
|
||||
|
||||
class GatherPhase(PhaseBase):
|
||||
|
@ -87,17 +86,34 @@ class GatherPhase(PhaseBase):
|
|||
if variant.modules:
|
||||
errors.append("Modular compose requires libmodulemd package.")
|
||||
|
||||
# check whether variants from configuration value
|
||||
# 'variant_as_lookaside' are correct
|
||||
variant_as_lookaside = self.compose.conf.get("variant_as_lookaside", [])
|
||||
all_variants = self.compose.all_variants
|
||||
for (requiring, required) in variant_as_lookaside:
|
||||
|
||||
# check whether variants from configuration value
|
||||
# 'variant_as_lookaside' are correct
|
||||
for requiring, required in variant_as_lookaside:
|
||||
if requiring in all_variants and required not in all_variants:
|
||||
errors.append(
|
||||
"variant_as_lookaside: variant %r doesn't exist but is "
|
||||
"required by %r" % (required, requiring)
|
||||
)
|
||||
|
||||
# check whether variants from configuration value
|
||||
# 'variant_as_lookaside' have same architectures
|
||||
for requiring, required in variant_as_lookaside:
|
||||
if (
|
||||
requiring in all_variants
|
||||
and required in all_variants
|
||||
and not set(all_variants[requiring].arches).issubset(
|
||||
set(all_variants[required].arches)
|
||||
)
|
||||
):
|
||||
errors.append(
|
||||
"variant_as_lookaside: architectures of variant '%s' "
|
||||
"aren't subset of architectures of variant '%s'"
|
||||
% (requiring, required)
|
||||
)
|
||||
|
||||
if errors:
|
||||
raise ValueError("\n".join(errors))
|
||||
|
||||
|
@ -178,27 +194,19 @@ def load_old_gather_result(compose, arch, variant):
|
|||
return None
|
||||
|
||||
compose.log_info("Loading old GATHER phase results: %s", old_gather_result)
|
||||
try:
|
||||
with open(old_gather_result, "rb") as f:
|
||||
old_result = pickle.load(f)
|
||||
return old_result
|
||||
|
||||
|
||||
def load_old_compose_config(compose):
|
||||
"""
|
||||
Helper method to load Pungi config dump from old compose.
|
||||
"""
|
||||
config_dump_full = compose.paths.log.log_file("global", "config-dump")
|
||||
config_dump_full = compose.paths.old_compose_path(config_dump_full)
|
||||
if not config_dump_full:
|
||||
except Exception as e:
|
||||
compose.log_debug(
|
||||
"Failed to load old GATHER phase results %s : %s"
|
||||
% (old_gather_result, str(e))
|
||||
)
|
||||
return None
|
||||
|
||||
compose.log_info("Loading old config file: %s", config_dump_full)
|
||||
with open(config_dump_full, "r") as f:
|
||||
old_config = json.load(f)
|
||||
return old_config
|
||||
|
||||
|
||||
def reuse_old_gather_packages(compose, arch, variant, package_sets):
|
||||
def reuse_old_gather_packages(compose, arch, variant, package_sets, methods):
|
||||
"""
|
||||
Tries to reuse `gather_packages` result from older compose.
|
||||
|
||||
|
@ -206,6 +214,7 @@ def reuse_old_gather_packages(compose, arch, variant, package_sets):
|
|||
:param str arch: Architecture to reuse old gather data for.
|
||||
:param str variant: Variant to reuse old gather data for.
|
||||
:param list package_sets: List of package sets to gather packages from.
|
||||
:param str methods: Gather method.
|
||||
:return: Old `gather_packages` result or None if old result cannot be used.
|
||||
"""
|
||||
log_msg = "Cannot reuse old GATHER phase results - %s"
|
||||
|
@ -218,38 +227,38 @@ def reuse_old_gather_packages(compose, arch, variant, package_sets):
|
|||
compose.log_info(log_msg % "no old gather results.")
|
||||
return
|
||||
|
||||
old_config = load_old_compose_config(compose)
|
||||
old_config = compose.load_old_compose_config()
|
||||
if old_config is None:
|
||||
compose.log_info(log_msg % "no old compose config dump.")
|
||||
return
|
||||
|
||||
# Do not reuse when required variant is not reused.
|
||||
if not hasattr(compose, "_gather_reused_variant_arch"):
|
||||
setattr(compose, "_gather_reused_variant_arch", [])
|
||||
variant_as_lookaside = compose.conf.get("variant_as_lookaside", [])
|
||||
for requiring, required in variant_as_lookaside:
|
||||
if (
|
||||
requiring == variant.uid
|
||||
and (required, arch) not in compose._gather_reused_variant_arch
|
||||
):
|
||||
compose.log_info(
|
||||
log_msg % "variant %s as lookaside is not reused." % required
|
||||
)
|
||||
return
|
||||
|
||||
# Do not reuse if there's external lookaside repo.
|
||||
with open(compose.paths.log.log_file("global", "config-dump"), "r") as f:
|
||||
config_dump = json.load(f)
|
||||
if config_dump.get("gather_lookaside_repos") or old_config.get(
|
||||
"gather_lookaside_repos"
|
||||
):
|
||||
compose.log_info(log_msg % "there's external lookaside repo.")
|
||||
return
|
||||
|
||||
# The dumps/loads is needed to convert all unicode strings to non-unicode ones.
|
||||
config = json.loads(json.dumps(compose.conf))
|
||||
for opt, value in old_config.items():
|
||||
# Gather lookaside repos are updated during the gather phase. Check that
|
||||
# the gather_lookaside_repos except the ones added are the same.
|
||||
if opt == "gather_lookaside_repos" and opt in config:
|
||||
value_to_compare = []
|
||||
# Filter out repourls which starts with `compose.topdir` and also remove
|
||||
# their parent list in case it would be empty.
|
||||
for variant, per_arch_repos in config[opt]:
|
||||
per_arch_repos_to_compare = {}
|
||||
for arch, repourl in per_arch_repos.items():
|
||||
# The gather_lookaside_repos config allows setting multiple repourls
|
||||
# using list, but `_update_config` always uses strings. Therefore we
|
||||
# only try to filter out string_types.
|
||||
if not isinstance(repourl, six.string_types):
|
||||
continue
|
||||
if not repourl.startswith(compose.topdir):
|
||||
per_arch_repos_to_compare[arch] = repourl
|
||||
if per_arch_repos_to_compare:
|
||||
value_to_compare.append([variant, per_arch_repos_to_compare])
|
||||
if value != value_to_compare:
|
||||
compose.log_info(
|
||||
log_msg
|
||||
% ("compose configuration option gather_lookaside_repos changed.")
|
||||
)
|
||||
return
|
||||
if opt == "gather_lookaside_repos":
|
||||
continue
|
||||
|
||||
# Skip checking for frequently changing configuration options which do *not*
|
||||
|
@ -378,6 +387,30 @@ def reuse_old_gather_packages(compose, arch, variant, package_sets):
|
|||
compose.log_info(log_msg % "some RPMs have been removed.")
|
||||
return
|
||||
|
||||
compose._gather_reused_variant_arch.append((variant.uid, arch))
|
||||
|
||||
# Copy old gather log for debugging
|
||||
try:
|
||||
if methods == "hybrid":
|
||||
log_dir = compose.paths.log.topdir(arch, create_dir=False)
|
||||
old_log_dir = compose.paths.old_compose_path(log_dir)
|
||||
for log_file in glob.glob(
|
||||
os.path.join(old_log_dir, "hybrid-depsolver-%s-iter-*" % variant)
|
||||
):
|
||||
compose.log_info(
|
||||
"Copying old gather log %s to %s" % (log_file, log_dir)
|
||||
)
|
||||
shutil.copy2(log_file, log_dir)
|
||||
else:
|
||||
log_dir = os.path.dirname(
|
||||
compose.paths.work.pungi_log(arch, variant, create_dir=False)
|
||||
)
|
||||
old_log_dir = compose.paths.old_compose_path(log_dir)
|
||||
compose.log_info("Copying old gather log %s to %s" % (old_log_dir, log_dir))
|
||||
shutil.copytree(old_log_dir, log_dir)
|
||||
except Exception as e:
|
||||
compose.log_warning("Copying old gather log failed: %s" % str(e))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
@ -404,7 +437,9 @@ def gather_packages(compose, arch, variant, package_sets, fulltree_excludes=None
|
|||
prepopulate = get_prepopulate_packages(compose, arch, variant)
|
||||
fulltree_excludes = fulltree_excludes or set()
|
||||
|
||||
reused_result = reuse_old_gather_packages(compose, arch, variant, package_sets)
|
||||
reused_result = reuse_old_gather_packages(
|
||||
compose, arch, variant, package_sets, methods
|
||||
)
|
||||
if reused_result:
|
||||
result = reused_result
|
||||
elif methods == "hybrid":
|
||||
|
@ -434,9 +469,7 @@ def gather_packages(compose, arch, variant, package_sets, fulltree_excludes=None
|
|||
)
|
||||
|
||||
else:
|
||||
|
||||
for source_name in ("module", "comps", "json"):
|
||||
|
||||
packages, groups, filter_packages = get_variant_packages(
|
||||
compose, arch, variant, source_name, package_sets
|
||||
)
|
||||
|
@ -507,7 +540,8 @@ def write_packages(compose, arch, variant, pkg_map, path_prefix):
|
|||
|
||||
|
||||
def trim_packages(compose, arch, variant, pkg_map, parent_pkgs=None, remove_pkgs=None):
|
||||
"""Remove parent variant's packages from pkg_map <-- it gets modified in this function
|
||||
"""Remove parent variant's packages from pkg_map <-- it gets modified in
|
||||
this function
|
||||
|
||||
There are three cases where changes may happen:
|
||||
|
||||
|
@ -540,7 +574,6 @@ def trim_packages(compose, arch, variant, pkg_map, parent_pkgs=None, remove_pkgs
|
|||
move_to_parent_pkgs = _mk_pkg_map()
|
||||
removed_pkgs = _mk_pkg_map()
|
||||
for pkg_type, pkgs in pkg_map.items():
|
||||
|
||||
new_pkgs = []
|
||||
for pkg in pkgs:
|
||||
pkg_path = pkg["path"]
|
||||
|
@ -612,19 +645,39 @@ def _make_lookaside_repo(compose, variant, arch, pkg_map, package_sets=None):
|
|||
compose.paths.work.topdir(arch="global"), "download"
|
||||
)
|
||||
+ "/",
|
||||
"koji": lambda: pungi.wrappers.kojiwrapper.KojiWrapper(
|
||||
compose.conf["koji_profile"]
|
||||
"koji": lambda: compose.conf.get(
|
||||
"koji_cache",
|
||||
pungi.wrappers.kojiwrapper.KojiWrapper(compose).koji_module.config.topdir,
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
"kojimock": lambda: pungi.wrappers.kojiwrapper.KojiMockWrapper(
|
||||
compose,
|
||||
get_all_arches(compose),
|
||||
).koji_module.config.topdir.rstrip("/")
|
||||
+ "/",
|
||||
}
|
||||
path_prefix = prefixes[compose.conf["pkgset_source"]]()
|
||||
pkglist = compose.paths.work.lookaside_package_list(arch=arch, variant=variant)
|
||||
with open(pkglist, "w") as f:
|
||||
for packages in pkg_map[arch][variant.uid].values():
|
||||
package_list = set()
|
||||
for pkg_arch in pkg_map.keys():
|
||||
try:
|
||||
for pkg_type, packages in pkg_map[pkg_arch][variant.uid].items():
|
||||
# We want all packages for current arch, and SRPMs for any
|
||||
# arch. Ultimately there will only be one source repository, so
|
||||
# we need a union of all SRPMs.
|
||||
if pkg_type == "srpm" or pkg_arch == arch:
|
||||
for pkg in packages:
|
||||
pkg = pkg["path"]
|
||||
if path_prefix and pkg.startswith(path_prefix):
|
||||
pkg = pkg[len(path_prefix) :]
|
||||
package_list.add(pkg)
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
"Variant '%s' does not have architecture " "'%s'!" % (variant, pkg_arch)
|
||||
)
|
||||
|
||||
pkglist = compose.paths.work.lookaside_package_list(arch=arch, variant=variant)
|
||||
with open(pkglist, "w") as f:
|
||||
for pkg in sorted(package_list):
|
||||
f.write("%s\n" % pkg)
|
||||
|
||||
cr = CreaterepoWrapper(compose.conf["createrepo_c"])
|
||||
|
@ -661,6 +714,8 @@ def _make_lookaside_repo(compose, variant, arch, pkg_map, package_sets=None):
|
|||
collect_module_defaults(
|
||||
defaults_dir, module_names, mod_index, overrides_dir=overrides_dir
|
||||
)
|
||||
obsoletes_dir = compose.paths.work.module_obsoletes_dir()
|
||||
mod_index = collect_module_obsoletes(obsoletes_dir, module_names, mod_index)
|
||||
|
||||
log_file = compose.paths.log.log_file(
|
||||
arch, "lookaside_repo_modules_%s" % (variant.uid)
|
||||
|
@ -736,6 +791,10 @@ def _gather_variants(
|
|||
try:
|
||||
que.put((arch, gather_packages(*args, **kwargs)))
|
||||
except Exception as exc:
|
||||
compose.log_error(
|
||||
"Error in gathering for %s.%s: %s", variant, arch, exc
|
||||
)
|
||||
compose.traceback("gather-%s-%s" % (variant, arch))
|
||||
errors.put(exc)
|
||||
|
||||
# Run gather_packages() in parallel with multi threads and store
|
||||
|
|
|
@ -14,15 +14,6 @@
|
|||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import kobo.plugins
|
||||
|
||||
|
||||
class GatherMethodBase(kobo.plugins.Plugin):
|
||||
class GatherMethodBase(object):
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
|
||||
|
||||
class GatherMethodContainer(kobo.plugins.PluginContainer):
|
||||
@classmethod
|
||||
def normalize_name(cls, name):
|
||||
return name.lower()
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
from .method_deps import GatherMethodDeps
|
||||
from .method_nodeps import GatherMethodNodeps
|
||||
from .method_hybrid import GatherMethodHybrid
|
||||
|
||||
ALL_METHODS = {
|
||||
"deps": GatherMethodDeps,
|
||||
"nodeps": GatherMethodNodeps,
|
||||
"hybrid": GatherMethodHybrid,
|
||||
}
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from kobo.shortcuts import run
|
||||
from kobo.pkgset import SimpleRpmWrapper, RpmWrapper
|
||||
|
@ -31,8 +32,6 @@ import pungi.phases.gather.method
|
|||
|
||||
|
||||
class GatherMethodDeps(pungi.phases.gather.method.GatherMethodBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
arch,
|
||||
|
@ -243,8 +242,19 @@ def resolve_deps(compose, arch, variant, source_name=None):
|
|||
)
|
||||
# Use temp working directory directory as workaround for
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=795137
|
||||
with temp_dir(prefix="pungi_") as tmp_dir:
|
||||
run(cmd, logfile=pungi_log, show_cmd=True, workdir=tmp_dir, env=os.environ)
|
||||
with temp_dir(prefix="pungi_") as work_dir:
|
||||
run(cmd, logfile=pungi_log, show_cmd=True, workdir=work_dir, env=os.environ)
|
||||
|
||||
# Clean up tmp dir
|
||||
# Workaround for rpm not honoring sgid bit which only appears when yum is used.
|
||||
yumroot_dir = os.path.join(tmp_dir, "work", arch, "yumroot")
|
||||
if os.path.isdir(yumroot_dir):
|
||||
try:
|
||||
shutil.rmtree(yumroot_dir)
|
||||
except Exception as e:
|
||||
compose.log_warning(
|
||||
"Failed to clean up tmp dir: %s %s" % (yumroot_dir, str(e))
|
||||
)
|
||||
|
||||
with open(pungi_log, "r") as f:
|
||||
packages, broken_deps, missing_comps_pkgs = pungi_wrapper.parse_log(f)
|
||||
|
|
|
@ -47,9 +47,15 @@ class FakePackage(object):
|
|||
|
||||
@property
|
||||
def files(self):
|
||||
return [
|
||||
os.path.join(dirname, basename) for (_, dirname, basename) in self.pkg.files
|
||||
]
|
||||
paths = []
|
||||
# createrepo_c.Package.files is a tuple, but its length differs across
|
||||
# versions. The constants define index at which the related value is
|
||||
# located.
|
||||
for entry in self.pkg.files:
|
||||
paths.append(
|
||||
os.path.join(entry[cr.FILE_ENTRY_PATH], entry[cr.FILE_ENTRY_NAME])
|
||||
)
|
||||
return paths
|
||||
|
||||
@property
|
||||
def provides(self):
|
||||
|
@ -60,8 +66,6 @@ class FakePackage(object):
|
|||
|
||||
|
||||
class GatherMethodHybrid(pungi.phases.gather.method.GatherMethodBase):
|
||||
enabled = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(GatherMethodHybrid, self).__init__(*args, **kwargs)
|
||||
self.package_maps = {}
|
||||
|
@ -351,8 +355,11 @@ class GatherMethodHybrid(pungi.phases.gather.method.GatherMethodBase):
|
|||
|
||||
# There are two ways how the debuginfo package can be named. We
|
||||
# want to get them all.
|
||||
for pattern in ["%s-debuginfo", "%s-debugsource"]:
|
||||
debuginfo_name = pattern % pkg.name
|
||||
source_name = kobo.rpmlib.parse_nvra(pkg.rpm_sourcerpm)["name"]
|
||||
for debuginfo_name in [
|
||||
"%s-debuginfo" % pkg.name,
|
||||
"%s-debugsource" % source_name,
|
||||
]:
|
||||
debuginfo = self._get_debuginfo(debuginfo_name, pkg_arch)
|
||||
for dbg in debuginfo:
|
||||
# For each debuginfo package that matches on name and
|
||||
|
@ -501,6 +508,27 @@ def _make_result(paths):
|
|||
return [{"path": path, "flags": []} for path in sorted(paths)]
|
||||
|
||||
|
||||
def get_repo_packages(path):
|
||||
"""Extract file names of all packages in the given repository."""
|
||||
|
||||
packages = set()
|
||||
|
||||
def callback(pkg):
|
||||
packages.add(os.path.basename(pkg.location_href))
|
||||
|
||||
repomd = os.path.join(path, "repodata/repomd.xml")
|
||||
with as_local_file(repomd) as url_:
|
||||
repomd = cr.Repomd(url_)
|
||||
for rec in repomd.records:
|
||||
if rec.type != "primary":
|
||||
continue
|
||||
record_url = os.path.join(path, rec.location_href)
|
||||
with as_local_file(record_url) as url_:
|
||||
cr.xml_parse_primary(url_, pkgcb=callback, do_files=False)
|
||||
|
||||
return packages
|
||||
|
||||
|
||||
def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages):
|
||||
"""For each package add source RPM."""
|
||||
# This will serve as the final result. We collect sets of paths to the
|
||||
|
@ -511,25 +539,16 @@ def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages):
|
|||
|
||||
filters = set(filter_packages)
|
||||
|
||||
# Collect list of all packages in lookaside. These will not be added to the
|
||||
# result. Fus handles this in part: if a package is explicitly mentioned as
|
||||
# input (which can happen with comps group expansion), it will be in the
|
||||
# output even if it's in lookaside.
|
||||
lookaside_packages = set()
|
||||
for repo in lookasides:
|
||||
md = cr.Metadata()
|
||||
md.locate_and_load_xml(repo)
|
||||
for key in md.keys():
|
||||
pkg = md.get(key)
|
||||
url = os.path.join(pkg.location_base or repo, pkg.location_href)
|
||||
# Strip file:// prefix
|
||||
lookaside_packages.add(url[7:])
|
||||
lookaside_packages.update(get_repo_packages(repo))
|
||||
|
||||
for nvr, pkg_arch, flags in nvrs:
|
||||
pkg = nevra_to_pkg["%s.%s" % (nvr, pkg_arch)]
|
||||
if pkg.file_path in lookaside_packages:
|
||||
# Package is in lookaside, don't add it and ignore sources and
|
||||
# debuginfo too.
|
||||
if os.path.basename(pkg.file_path) in lookaside_packages:
|
||||
# Fus can return lookaside package in output if the package is
|
||||
# explicitly listed as input. This can happen during comps
|
||||
# expansion.
|
||||
continue
|
||||
if pkg_is_debug(pkg):
|
||||
debuginfo.add(pkg.file_path)
|
||||
|
@ -542,7 +561,7 @@ def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages):
|
|||
if (srpm.name, "src") in filters:
|
||||
# Filtered package, skipping
|
||||
continue
|
||||
if srpm.file_path not in lookaside_packages:
|
||||
if os.path.basename(srpm.file_path) not in lookaside_packages:
|
||||
srpms.add(srpm.file_path)
|
||||
except KeyError:
|
||||
# Didn't find source RPM.. this should be logged
|
||||
|
|
|
@ -28,8 +28,6 @@ from kobo.pkgset import SimpleRpmWrapper, RpmWrapper
|
|||
|
||||
|
||||
class GatherMethodNodeps(pungi.phases.gather.method.GatherMethodBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant, *args, **kwargs):
|
||||
fname = "gather-nodeps-%s" % variant.uid
|
||||
if self.source_name:
|
||||
|
|
|
@ -14,15 +14,6 @@
|
|||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import kobo.plugins
|
||||
|
||||
|
||||
class GatherSourceBase(kobo.plugins.Plugin):
|
||||
class GatherSourceBase(object):
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
|
||||
|
||||
class GatherSourceContainer(kobo.plugins.PluginContainer):
|
||||
@classmethod
|
||||
def normalize_name(cls, name):
|
||||
return name.lower()
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
from .source_comps import GatherSourceComps
|
||||
from .source_json import GatherSourceJson
|
||||
from .source_module import GatherSourceModule
|
||||
from .source_none import GatherSourceNone
|
||||
|
||||
ALL_SOURCES = {
|
||||
"comps": GatherSourceComps,
|
||||
"json": GatherSourceJson,
|
||||
"module": GatherSourceModule,
|
||||
"none": GatherSourceNone,
|
||||
}
|
|
@ -30,8 +30,6 @@ import pungi.phases.gather.source
|
|||
|
||||
|
||||
class GatherSourceComps(pungi.phases.gather.source.GatherSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant):
|
||||
groups = set()
|
||||
if not self.compose.conf.get("comps_file"):
|
||||
|
|
|
@ -32,30 +32,31 @@ set([(rpm_name, rpm_arch or None)])
|
|||
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import pungi.phases.gather.source
|
||||
|
||||
|
||||
class GatherSourceJson(pungi.phases.gather.source.GatherSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant):
|
||||
json_path = self.compose.conf.get("gather_source_mapping")
|
||||
if not json_path:
|
||||
return set(), set()
|
||||
with open(json_path, "r") as f:
|
||||
with open(os.path.join(self.compose.config_dir, json_path), "r") as f:
|
||||
mapping = json.load(f)
|
||||
|
||||
packages = set()
|
||||
if variant is None:
|
||||
# get all packages for all variants
|
||||
for variant_uid in mapping:
|
||||
for pkg_name, pkg_arches in mapping[variant_uid][arch].items():
|
||||
for pkg_name, pkg_arches in mapping[variant_uid].get(arch, {}).items():
|
||||
for pkg_arch in pkg_arches:
|
||||
packages.add((pkg_name, pkg_arch))
|
||||
else:
|
||||
# get packages for a particular variant
|
||||
for pkg_name, pkg_arches in mapping[variant.uid][arch].items():
|
||||
for pkg_name, pkg_arches in (
|
||||
mapping.get(variant.uid, {}).get(arch, {}).items()
|
||||
):
|
||||
for pkg_arch in pkg_arches:
|
||||
packages.add((pkg_name, pkg_arch))
|
||||
return packages, set()
|
||||
|
|
|
@ -26,8 +26,6 @@ import pungi.phases.gather.source
|
|||
|
||||
|
||||
class GatherSourceModule(pungi.phases.gather.source.GatherSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant):
|
||||
groups = set()
|
||||
packages = set()
|
||||
|
|
|
@ -29,7 +29,5 @@ import pungi.phases.gather.source
|
|||
|
||||
|
||||
class GatherSourceNone(pungi.phases.gather.source.GatherSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant):
|
||||
return set(), set()
|
||||
|
|
|
@ -1,18 +1,22 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import copy
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
from kobo import shortcuts
|
||||
|
||||
from pungi.util import makedirs, get_mtime, get_file_size, failable, log_failed_task
|
||||
from pungi.util import translate_path, get_repo_urls, version_generator
|
||||
from pungi.util import as_local_file, translate_path, get_repo_urls, version_generator
|
||||
from pungi.phases import base
|
||||
from pungi.linker import Linker
|
||||
from pungi.wrappers.kojiwrapper import KojiWrapper
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
from kobo.shortcuts import force_list
|
||||
from productmd.images import Image
|
||||
from productmd.rpms import Rpms
|
||||
|
||||
|
||||
# This is a mapping from formats to file extensions. The format is what koji
|
||||
|
@ -21,6 +25,7 @@ from productmd.images import Image
|
|||
# results will be pulled into the compose.
|
||||
EXTENSIONS = {
|
||||
"docker": ["tar.gz", "tar.xz"],
|
||||
"iso": ["iso"],
|
||||
"liveimg-squashfs": ["liveimg.squashfs"],
|
||||
"qcow": ["qcow"],
|
||||
"qcow2": ["qcow2"],
|
||||
|
@ -35,6 +40,7 @@ EXTENSIONS = {
|
|||
"vdi": ["vdi"],
|
||||
"vmdk": ["vmdk"],
|
||||
"vpc": ["vhd"],
|
||||
"vhd-compressed": ["vhd.gz", "vhd.xz"],
|
||||
"vsphere-ova": ["vsphere.ova"],
|
||||
}
|
||||
|
||||
|
@ -46,9 +52,10 @@ class ImageBuildPhase(
|
|||
|
||||
name = "image_build"
|
||||
|
||||
def __init__(self, compose):
|
||||
def __init__(self, compose, buildinstall_phase=None):
|
||||
super(ImageBuildPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
self.buildinstall_phase = buildinstall_phase
|
||||
|
||||
def _get_install_tree(self, image_conf, variant):
|
||||
"""
|
||||
|
@ -117,6 +124,7 @@ class ImageBuildPhase(
|
|||
# prevent problems in next iteration where the original
|
||||
# value is needed.
|
||||
image_conf = copy.deepcopy(image_conf)
|
||||
original_image_conf = copy.deepcopy(image_conf)
|
||||
|
||||
# image_conf is passed to get_image_build_cmd as dict
|
||||
|
||||
|
@ -167,6 +175,7 @@ class ImageBuildPhase(
|
|||
image_conf["image-build"]["can_fail"] = sorted(can_fail)
|
||||
|
||||
cmd = {
|
||||
"original_image_conf": original_image_conf,
|
||||
"image_conf": image_conf,
|
||||
"conf_file": self.compose.paths.work.image_build_conf(
|
||||
image_conf["image-build"]["variant"],
|
||||
|
@ -182,7 +191,7 @@ class ImageBuildPhase(
|
|||
"scratch": image_conf["image-build"].pop("scratch", False),
|
||||
}
|
||||
self.pool.add(CreateImageBuildThread(self.pool))
|
||||
self.pool.queue_put((self.compose, cmd))
|
||||
self.pool.queue_put((self.compose, cmd, self.buildinstall_phase))
|
||||
|
||||
self.pool.start()
|
||||
|
||||
|
@ -192,7 +201,7 @@ class CreateImageBuildThread(WorkerThread):
|
|||
self.pool.log_error("CreateImageBuild failed.")
|
||||
|
||||
def process(self, item, num):
|
||||
compose, cmd = item
|
||||
compose, cmd, buildinstall_phase = item
|
||||
variant = cmd["image_conf"]["image-build"]["variant"]
|
||||
subvariant = cmd["image_conf"]["image-build"].get("subvariant", variant.uid)
|
||||
self.failable_arches = cmd["image_conf"]["image-build"].get("can_fail", "")
|
||||
|
@ -208,22 +217,54 @@ class CreateImageBuildThread(WorkerThread):
|
|||
subvariant,
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(num, compose, variant, subvariant, cmd)
|
||||
self.worker(num, compose, variant, subvariant, cmd, buildinstall_phase)
|
||||
|
||||
def worker(self, num, compose, variant, subvariant, cmd):
|
||||
def worker(self, num, compose, variant, subvariant, cmd, buildinstall_phase):
|
||||
arches = cmd["image_conf"]["image-build"]["arches"]
|
||||
formats = "-".join(cmd["image_conf"]["image-build"]["format"])
|
||||
dash_arches = "-".join(arches)
|
||||
log_file = compose.paths.log.log_file(
|
||||
dash_arches, "imagebuild-%s-%s-%s" % (variant.uid, subvariant, formats)
|
||||
)
|
||||
metadata_file = log_file[:-4] + ".reuse.json"
|
||||
|
||||
external_repo_checksum = {}
|
||||
try:
|
||||
for repo in cmd["original_image_conf"]["image-build"]["repo"]:
|
||||
if repo in compose.all_variants:
|
||||
continue
|
||||
with as_local_file(
|
||||
os.path.join(repo, "repodata/repomd.xml")
|
||||
) as filename:
|
||||
with open(filename, "rb") as f:
|
||||
external_repo_checksum[repo] = hashlib.sha256(
|
||||
f.read()
|
||||
).hexdigest()
|
||||
except Exception as e:
|
||||
external_repo_checksum = None
|
||||
self.pool.log_info(
|
||||
"Can't calculate checksum of repomd.xml of external repo - %s" % str(e)
|
||||
)
|
||||
|
||||
if self._try_to_reuse(
|
||||
compose,
|
||||
variant,
|
||||
subvariant,
|
||||
metadata_file,
|
||||
log_file,
|
||||
cmd,
|
||||
external_repo_checksum,
|
||||
buildinstall_phase,
|
||||
):
|
||||
return
|
||||
|
||||
msg = (
|
||||
"Creating image (formats: %s, arches: %s, variant: %s, subvariant: %s)"
|
||||
% (formats, dash_arches, variant, subvariant)
|
||||
)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
|
||||
koji_wrapper = KojiWrapper(compose)
|
||||
|
||||
# writes conf file for koji image-build
|
||||
self.pool.log_info(
|
||||
|
@ -275,6 +316,22 @@ class CreateImageBuildThread(WorkerThread):
|
|||
)
|
||||
break
|
||||
|
||||
self._link_images(compose, variant, subvariant, cmd, image_infos)
|
||||
self._write_reuse_metadata(
|
||||
compose, metadata_file, cmd, image_infos, external_repo_checksum
|
||||
)
|
||||
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
|
||||
|
||||
def _link_images(self, compose, variant, subvariant, cmd, image_infos):
|
||||
"""Link images to compose and update image manifest.
|
||||
|
||||
:param Compose compose: Current compose.
|
||||
:param Variant variant: Current variant.
|
||||
:param str subvariant:
|
||||
:param dict cmd: Dict of params for image-build.
|
||||
:param dict image_infos: Dict contains image info.
|
||||
"""
|
||||
# The usecase here is that you can run koji image-build with multiple --format
|
||||
# It's ok to do it serialized since we're talking about max 2 images per single
|
||||
# image_build record
|
||||
|
@ -289,7 +346,9 @@ class CreateImageBuildThread(WorkerThread):
|
|||
# let's not change filename of koji outputs
|
||||
image_dest = os.path.join(image_dir, os.path.basename(image_info["path"]))
|
||||
|
||||
src_file = os.path.realpath(image_info["path"])
|
||||
src_file = compose.koji_downloader.get_file(
|
||||
os.path.realpath(image_info["path"])
|
||||
)
|
||||
linker.link(src_file, image_dest, link_type=cmd["link_type"])
|
||||
|
||||
# Update image manifest
|
||||
|
@ -308,4 +367,160 @@ class CreateImageBuildThread(WorkerThread):
|
|||
setattr(img, "deliverable", "image-build")
|
||||
compose.im.add(variant=variant.uid, arch=image_info["arch"], image=img)
|
||||
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
|
||||
def _try_to_reuse(
|
||||
self,
|
||||
compose,
|
||||
variant,
|
||||
subvariant,
|
||||
metadata_file,
|
||||
log_file,
|
||||
cmd,
|
||||
external_repo_checksum,
|
||||
buildinstall_phase,
|
||||
):
|
||||
"""Try to reuse images from old compose.
|
||||
|
||||
:param Compose compose: Current compose.
|
||||
:param Variant variant: Current variant.
|
||||
:param str subvariant:
|
||||
:param str metadata_file: Path to reuse metadata file.
|
||||
:param str log_file: Path to log file.
|
||||
:param dict cmd: Dict of params for image-build.
|
||||
:param dict external_repo_checksum: Dict contains checksum of repomd.xml
|
||||
or None if can't get checksum.
|
||||
:param BuildinstallPhase buildinstall_phase: buildinstall phase of
|
||||
current compose.
|
||||
"""
|
||||
log_msg = "Cannot reuse old image_build phase results - %s"
|
||||
if not compose.conf["image_build_allow_reuse"]:
|
||||
self.pool.log_info(
|
||||
log_msg % "reuse of old image_build results is disabled."
|
||||
)
|
||||
return False
|
||||
|
||||
if external_repo_checksum is None:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't ensure that external repo is not changed."
|
||||
)
|
||||
return False
|
||||
|
||||
old_metadata_file = compose.paths.old_compose_path(metadata_file)
|
||||
if not old_metadata_file:
|
||||
self.pool.log_info(log_msg % "Can't find old reuse metadata file")
|
||||
return False
|
||||
|
||||
try:
|
||||
old_metadata = self._load_reuse_metadata(old_metadata_file)
|
||||
except Exception as e:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't load old reuse metadata file: %s" % str(e)
|
||||
)
|
||||
return False
|
||||
|
||||
if old_metadata["cmd"]["original_image_conf"] != cmd["original_image_conf"]:
|
||||
self.pool.log_info(log_msg % "image_build config changed")
|
||||
return False
|
||||
|
||||
# Make sure external repo does not change
|
||||
if (
|
||||
old_metadata["external_repo_checksum"] is None
|
||||
or old_metadata["external_repo_checksum"] != external_repo_checksum
|
||||
):
|
||||
self.pool.log_info(log_msg % "External repo may be changed")
|
||||
return False
|
||||
|
||||
# Make sure buildinstall phase is reused
|
||||
for arch in cmd["image_conf"]["image-build"]["arches"]:
|
||||
if buildinstall_phase and not buildinstall_phase.reused(variant, arch):
|
||||
self.pool.log_info(log_msg % "buildinstall phase changed")
|
||||
return False
|
||||
|
||||
# Make sure packages in variant not change
|
||||
rpm_manifest_file = compose.paths.compose.metadata("rpms.json")
|
||||
rpm_manifest = Rpms()
|
||||
rpm_manifest.load(rpm_manifest_file)
|
||||
|
||||
old_rpm_manifest_file = compose.paths.old_compose_path(rpm_manifest_file)
|
||||
old_rpm_manifest = Rpms()
|
||||
old_rpm_manifest.load(old_rpm_manifest_file)
|
||||
|
||||
for repo in cmd["original_image_conf"]["image-build"]["repo"]:
|
||||
if repo not in compose.all_variants:
|
||||
# External repos are checked using other logic.
|
||||
continue
|
||||
for arch in cmd["image_conf"]["image-build"]["arches"]:
|
||||
if (
|
||||
rpm_manifest.rpms[variant.uid][arch]
|
||||
!= old_rpm_manifest.rpms[variant.uid][arch]
|
||||
):
|
||||
self.pool.log_info(
|
||||
log_msg % "Packages in %s.%s changed." % (variant.uid, arch)
|
||||
)
|
||||
return False
|
||||
|
||||
self.pool.log_info(
|
||||
"Reusing images from old compose for variant %s" % variant.uid
|
||||
)
|
||||
try:
|
||||
self._link_images(
|
||||
compose, variant, subvariant, cmd, old_metadata["image_infos"]
|
||||
)
|
||||
except Exception as e:
|
||||
self.pool.log_info(log_msg % "Can't link images %s" % str(e))
|
||||
return False
|
||||
|
||||
old_log_file = compose.paths.old_compose_path(log_file)
|
||||
try:
|
||||
shutil.copy2(old_log_file, log_file)
|
||||
except Exception as e:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't copy old log_file: %s %s" % (old_log_file, str(e))
|
||||
)
|
||||
return False
|
||||
|
||||
self._write_reuse_metadata(
|
||||
compose,
|
||||
metadata_file,
|
||||
cmd,
|
||||
old_metadata["image_infos"],
|
||||
external_repo_checksum,
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
def _write_reuse_metadata(
|
||||
self, compose, metadata_file, cmd, image_infos, external_repo_checksum
|
||||
):
|
||||
"""Write metadata file.
|
||||
|
||||
:param Compose compose: Current compose.
|
||||
:param str metadata_file: Path to reuse metadata file.
|
||||
:param dict cmd: Dict of params for image-build.
|
||||
:param dict image_infos: Dict contains image info.
|
||||
:param dict external_repo_checksum: Dict contains checksum of repomd.xml
|
||||
or None if can't get checksum.
|
||||
"""
|
||||
msg = "Writing reuse metadata file: %s" % metadata_file
|
||||
self.pool.log_info(msg)
|
||||
|
||||
cmd_copy = copy.deepcopy(cmd)
|
||||
del cmd_copy["image_conf"]["image-build"]["variant"]
|
||||
|
||||
data = {
|
||||
"cmd": cmd_copy,
|
||||
"image_infos": image_infos,
|
||||
"external_repo_checksum": external_repo_checksum,
|
||||
}
|
||||
try:
|
||||
with open(metadata_file, "w") as f:
|
||||
json.dump(data, f, indent=4)
|
||||
except Exception as e:
|
||||
self.pool.log_info("%s Failed: %s" % (msg, str(e)))
|
||||
|
||||
def _load_reuse_metadata(self, metadata_file):
|
||||
"""Load metadata file.
|
||||
|
||||
:param str metadata_file: Path to reuse metadata file.
|
||||
"""
|
||||
with open(metadata_file, "r") as f:
|
||||
return json.load(f)
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
import os
|
||||
from kobo import shortcuts
|
||||
from collections import defaultdict
|
||||
import threading
|
||||
|
||||
from .base import PhaseBase
|
||||
from ..util import get_format_substs, get_file_size
|
||||
|
@ -68,6 +69,7 @@ class ImageChecksumPhase(PhaseBase):
|
|||
|
||||
def run(self):
|
||||
topdir = self.compose.paths.compose.topdir()
|
||||
|
||||
make_checksums(
|
||||
topdir,
|
||||
self.compose.im,
|
||||
|
@ -87,6 +89,8 @@ def _compute_checksums(
|
|||
checksum_types,
|
||||
base_checksum_name_gen,
|
||||
one_file,
|
||||
results_lock,
|
||||
cache_lock,
|
||||
):
|
||||
for image in images:
|
||||
filename = os.path.basename(image.path)
|
||||
|
@ -96,14 +100,21 @@ def _compute_checksums(
|
|||
|
||||
filesize = image.size or get_file_size(full_path)
|
||||
|
||||
cache_lock.acquire()
|
||||
if full_path not in cache:
|
||||
cache_lock.release()
|
||||
# Source ISO is listed under each binary architecture. There's no
|
||||
# point in checksumming it twice, so we can just remember the
|
||||
# digest from first run..
|
||||
cache[full_path] = shortcuts.compute_file_checksums(
|
||||
full_path, checksum_types
|
||||
)
|
||||
checksum_value = shortcuts.compute_file_checksums(full_path, checksum_types)
|
||||
with cache_lock:
|
||||
cache[full_path] = checksum_value
|
||||
else:
|
||||
cache_lock.release()
|
||||
|
||||
with cache_lock:
|
||||
digests = cache[full_path]
|
||||
|
||||
for checksum, digest in digests.items():
|
||||
# Update metadata with the checksum
|
||||
image.add_checksum(None, checksum, digest)
|
||||
|
@ -112,7 +123,10 @@ def _compute_checksums(
|
|||
checksum_filename = os.path.join(
|
||||
path, "%s.%sSUM" % (filename, checksum.upper())
|
||||
)
|
||||
results[checksum_filename].add((filename, filesize, checksum, digest))
|
||||
with results_lock:
|
||||
results[checksum_filename].add(
|
||||
(filename, filesize, checksum, digest)
|
||||
)
|
||||
|
||||
if one_file:
|
||||
dirname = os.path.basename(path)
|
||||
|
@ -125,14 +139,23 @@ def _compute_checksums(
|
|||
checksum_filename = "%s%sSUM" % (base_checksum_name, checksum.upper())
|
||||
checksum_path = os.path.join(path, checksum_filename)
|
||||
|
||||
with results_lock:
|
||||
results[checksum_path].add((filename, filesize, checksum, digest))
|
||||
|
||||
|
||||
def make_checksums(topdir, im, checksum_types, one_file, base_checksum_name_gen):
|
||||
results = defaultdict(set)
|
||||
cache = {}
|
||||
threads = []
|
||||
results_lock = threading.Lock() # lock to synchronize access to the results dict.
|
||||
cache_lock = threading.Lock() # lock to synchronize access to the cache dict.
|
||||
|
||||
# create all worker threads
|
||||
for (variant, arch, path), images in get_images(topdir, im).items():
|
||||
_compute_checksums(
|
||||
threads.append(
|
||||
threading.Thread(
|
||||
target=_compute_checksums,
|
||||
args=[
|
||||
results,
|
||||
cache,
|
||||
variant,
|
||||
|
@ -142,7 +165,16 @@ def make_checksums(topdir, im, checksum_types, one_file, base_checksum_name_gen)
|
|||
checksum_types,
|
||||
base_checksum_name_gen,
|
||||
one_file,
|
||||
results_lock,
|
||||
cache_lock,
|
||||
],
|
||||
)
|
||||
)
|
||||
threads[-1].start()
|
||||
|
||||
# wait for all worker threads to finish
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
for file in results:
|
||||
dump_checksums(file, results[file])
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import re
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
|
||||
from .base import ConfigGuardedPhase, PhaseLoggerMixin
|
||||
from .. import util
|
||||
from ..wrappers import kojiwrapper
|
||||
from ..phases.osbs import add_metadata
|
||||
|
||||
|
||||
class ImageContainerPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||
name = "image_container"
|
||||
|
||||
def __init__(self, compose):
|
||||
super(ImageContainerPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
self.pool.metadata = {}
|
||||
|
||||
def run(self):
|
||||
for variant in self.compose.get_variants():
|
||||
for conf in self.get_config_block(variant):
|
||||
self.pool.add(ImageContainerThread(self.pool))
|
||||
self.pool.queue_put((self.compose, variant, conf))
|
||||
|
||||
self.pool.start()
|
||||
|
||||
|
||||
class ImageContainerThread(WorkerThread):
|
||||
def process(self, item, num):
|
||||
compose, variant, config = item
|
||||
self.num = num
|
||||
with util.failable(
|
||||
compose,
|
||||
bool(config.pop("failable", None)),
|
||||
variant,
|
||||
"*",
|
||||
"osbs",
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(compose, variant, config)
|
||||
|
||||
def worker(self, compose, variant, config):
|
||||
msg = "Image container task for variant %s" % variant.uid
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
source = config.pop("url")
|
||||
target = config.pop("target")
|
||||
priority = config.pop("priority", None)
|
||||
|
||||
config["yum_repourls"] = [
|
||||
self._get_repo(
|
||||
compose,
|
||||
variant,
|
||||
config.get("arch_override", "").split(),
|
||||
config.pop("image_spec"),
|
||||
)
|
||||
]
|
||||
|
||||
# Start task
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
koji.login()
|
||||
task_id = koji.koji_proxy.buildContainer(
|
||||
source, target, config, priority=priority
|
||||
)
|
||||
|
||||
koji.save_task_id(task_id)
|
||||
|
||||
# Wait for it to finish and capture the output into log file (even
|
||||
# though there is not much there).
|
||||
log_dir = os.path.join(compose.paths.log.topdir(), "image_container")
|
||||
util.makedirs(log_dir)
|
||||
log_file = os.path.join(
|
||||
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
|
||||
)
|
||||
if koji.watch_task(task_id, log_file) != 0:
|
||||
raise RuntimeError(
|
||||
"ImageContainer: task %s failed: see %s for details"
|
||||
% (task_id, log_file)
|
||||
)
|
||||
|
||||
add_metadata(variant, task_id, compose, config.get("scratch", False))
|
||||
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
|
||||
def _get_repo(self, compose, variant, arches, image_spec):
|
||||
"""
|
||||
Return a repo file that points baseurl to the image specified by
|
||||
image_spec.
|
||||
"""
|
||||
image_paths = set()
|
||||
|
||||
for arch in arches or compose.im.images[variant.uid].keys():
|
||||
for image in compose.im.images[variant.uid].get(arch, []):
|
||||
for key, value in image_spec.items():
|
||||
if not re.match(value, getattr(image, key)):
|
||||
break
|
||||
else:
|
||||
image_paths.add(image.path.replace(arch, "$basearch"))
|
||||
|
||||
if len(image_paths) != 1:
|
||||
raise RuntimeError(
|
||||
"%d images matched specification. Only one was expected."
|
||||
% len(image_paths)
|
||||
)
|
||||
|
||||
image_path = image_paths.pop()
|
||||
absolute_path = os.path.join(compose.paths.compose.topdir(), image_path)
|
||||
|
||||
repo_file = os.path.join(
|
||||
compose.paths.work.tmp_dir(None, variant),
|
||||
"image-container-%s-%s.repo" % (variant, self.num),
|
||||
)
|
||||
with open(repo_file, "w") as f:
|
||||
f.write("[image-to-include]\n")
|
||||
f.write("name=Location of image to embed\n")
|
||||
f.write("baseurl=%s\n" % util.translate_path(compose, absolute_path))
|
||||
f.write("enabled=0\n")
|
||||
f.write("gpgcheck=0\n")
|
||||
|
||||
return util.translate_path(compose, repo_file)
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
import collections
|
||||
import os
|
||||
import glob
|
||||
import shutil
|
||||
|
||||
from kobo.shortcuts import run
|
||||
|
@ -72,6 +73,10 @@ class InitPhase(PhaseBase):
|
|||
self.compose.paths.work.module_defaults_dir(create_dir=False)
|
||||
)
|
||||
|
||||
# download module obsoletes
|
||||
if self.compose.has_module_obsoletes:
|
||||
write_module_obsoletes(self.compose)
|
||||
|
||||
# write prepopulate file
|
||||
write_prepopulate_file(self.compose)
|
||||
|
||||
|
@ -160,12 +165,18 @@ def write_variant_comps(compose, arch, variant):
|
|||
run(cmd)
|
||||
|
||||
comps = CompsWrapper(comps_file)
|
||||
if variant.groups or variant.modules is not None or variant.type != "variant":
|
||||
# Filter groups if the variant has some, or it's a modular variant, or
|
||||
# is not a base variant.
|
||||
if (
|
||||
variant.groups
|
||||
or variant.modules is not None
|
||||
or variant.modular_koji_tags is not None
|
||||
or variant.type != "variant"
|
||||
):
|
||||
unmatched = comps.filter_groups(variant.groups)
|
||||
for grp in unmatched:
|
||||
compose.log_warning(UNMATCHED_GROUP_MSG % (variant.uid, arch, grp))
|
||||
|
||||
contains_all = not variant.groups and not variant.environments
|
||||
if compose.conf["comps_filter_environments"] and not contains_all:
|
||||
# We only want to filter environments if it's enabled by configuration
|
||||
|
@ -218,12 +229,33 @@ def write_module_defaults(compose):
|
|||
)
|
||||
|
||||
|
||||
def write_module_obsoletes(compose):
|
||||
scm_dict = compose.conf["module_obsoletes_dir"]
|
||||
if isinstance(scm_dict, dict):
|
||||
if scm_dict["scm"] == "file":
|
||||
scm_dict["dir"] = os.path.join(compose.config_dir, scm_dict["dir"])
|
||||
else:
|
||||
scm_dict = os.path.join(compose.config_dir, scm_dict)
|
||||
|
||||
with temp_dir(prefix="moduleobsoletes_") as tmp_dir:
|
||||
get_dir_from_scm(scm_dict, tmp_dir, compose=compose)
|
||||
compose.log_debug("Writing module obsoletes")
|
||||
shutil.copytree(
|
||||
tmp_dir,
|
||||
compose.paths.work.module_obsoletes_dir(create_dir=False),
|
||||
ignore=shutil.ignore_patterns(".git"),
|
||||
)
|
||||
|
||||
|
||||
def validate_module_defaults(path):
|
||||
"""Make sure there are no conflicting defaults. Each module name can only
|
||||
have one default stream.
|
||||
"""Make sure there are no conflicting defaults and every default can be loaded.
|
||||
Each module name can onlyhave one default stream.
|
||||
|
||||
:param str path: directory with cloned module defaults
|
||||
"""
|
||||
|
||||
defaults_num = len(glob.glob(os.path.join(path, "*.yaml")))
|
||||
|
||||
seen_defaults = collections.defaultdict(set)
|
||||
|
||||
for module_name, defaults in iter_module_defaults(path):
|
||||
|
@ -242,6 +274,11 @@ def validate_module_defaults(path):
|
|||
"There are duplicated module defaults:\n%s" % "\n".join(errors)
|
||||
)
|
||||
|
||||
# Make sure all defaults are valid otherwise update_from_defaults_directory
|
||||
# will return empty object
|
||||
if defaults_num != len(seen_defaults):
|
||||
raise RuntimeError("Defaults contains not valid default file")
|
||||
|
||||
|
||||
def validate_comps(path):
|
||||
"""Check that there are whitespace issues in comps."""
|
||||
|
|
|
@ -117,7 +117,7 @@ class LiveImagesPhase(
|
|||
|
||||
commands.append((cmd, variant, arch))
|
||||
|
||||
for (cmd, variant, arch) in commands:
|
||||
for cmd, variant, arch in commands:
|
||||
self.pool.add(CreateLiveImageThread(self.pool))
|
||||
self.pool.queue_put((self.compose, cmd, variant, arch))
|
||||
|
||||
|
@ -186,7 +186,7 @@ class CreateLiveImageThread(WorkerThread):
|
|||
)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
|
||||
koji_wrapper = KojiWrapper(compose)
|
||||
_, version = compose.compose_id.rsplit("-", 1)
|
||||
name = cmd["name"] or imgname
|
||||
version = cmd["version"] or version
|
||||
|
@ -232,7 +232,7 @@ class CreateLiveImageThread(WorkerThread):
|
|||
"Got %d images from task %d, expected 1."
|
||||
% (len(image_path), output["task_id"])
|
||||
)
|
||||
image_path = image_path[0]
|
||||
image_path = compose.koji_downloader.get_file(image_path[0])
|
||||
filename = cmd.get("filename") or os.path.basename(image_path)
|
||||
destination = os.path.join(cmd["dest_dir"], filename)
|
||||
shutil.copy2(image_path, destination)
|
||||
|
|
|
@ -71,6 +71,7 @@ class LiveMediaPhase(PhaseLoggerMixin, ImageConfigMixin, ConfigGuardedPhase):
|
|||
"ksurl": self.get_ksurl(image_conf),
|
||||
"ksversion": image_conf.get("ksversion"),
|
||||
"scratch": image_conf.get("scratch", False),
|
||||
"nomacboot": image_conf.get("nomacboot", False),
|
||||
"release": self.get_release(image_conf),
|
||||
"skip_tag": image_conf.get("skip_tag"),
|
||||
"name": name,
|
||||
|
@ -140,7 +141,7 @@ class LiveMediaThread(WorkerThread):
|
|||
)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
|
||||
koji_wrapper = KojiWrapper(compose)
|
||||
cmd = self._get_cmd(koji_wrapper, config)
|
||||
|
||||
log_file = self._get_log_file(compose, variant, subvariant, config)
|
||||
|
@ -181,7 +182,9 @@ class LiveMediaThread(WorkerThread):
|
|||
# let's not change filename of koji outputs
|
||||
image_dest = os.path.join(image_dir, os.path.basename(image_info["path"]))
|
||||
|
||||
src_file = os.path.realpath(image_info["path"])
|
||||
src_file = compose.koji_downloader.get_file(
|
||||
os.path.realpath(image_info["path"])
|
||||
)
|
||||
linker.link(src_file, image_dest, link_type=link_type)
|
||||
|
||||
# Update image manifest
|
||||
|
|
|
@ -1,24 +1,29 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import copy
|
||||
import fnmatch
|
||||
import json
|
||||
import os
|
||||
from kobo.threads import ThreadPool, WorkerThread
|
||||
from kobo import shortcuts
|
||||
from productmd.rpms import Rpms
|
||||
from six.moves import configparser
|
||||
|
||||
from .base import ConfigGuardedPhase, PhaseLoggerMixin
|
||||
from .. import util
|
||||
from ..wrappers import kojiwrapper
|
||||
from ..wrappers.scm import get_file_from_scm
|
||||
|
||||
|
||||
class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||
name = "osbs"
|
||||
|
||||
def __init__(self, compose):
|
||||
def __init__(self, compose, pkgset_phase, buildinstall_phase):
|
||||
super(OSBSPhase, self).__init__(compose)
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
self.pool.metadata = {}
|
||||
self.pool.registries = {}
|
||||
self.pool.pkgset_phase = pkgset_phase
|
||||
self.pool.buildinstall_phase = buildinstall_phase
|
||||
|
||||
def run(self):
|
||||
for variant in self.compose.get_variants():
|
||||
|
@ -28,15 +33,6 @@ class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
|||
|
||||
self.pool.start()
|
||||
|
||||
def dump_metadata(self):
|
||||
"""Create a file with image metadata if the phase actually ran."""
|
||||
if self._skipped:
|
||||
return
|
||||
with open(self.compose.paths.compose.metadata("osbs.json"), "w") as f:
|
||||
json.dump(
|
||||
self.pool.metadata, f, indent=4, sort_keys=True, separators=(",", ": ")
|
||||
)
|
||||
|
||||
def request_push(self):
|
||||
"""Store configuration data about where to push the created images and
|
||||
then send the same data to message bus.
|
||||
|
@ -87,8 +83,8 @@ class OSBSThread(WorkerThread):
|
|||
def worker(self, compose, variant, config):
|
||||
msg = "OSBS task for variant %s" % variant.uid
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji.login()
|
||||
|
||||
original_config = copy.deepcopy(config)
|
||||
|
||||
# Start task
|
||||
source = config.pop("url")
|
||||
|
@ -104,86 +100,98 @@ class OSBSThread(WorkerThread):
|
|||
|
||||
config["yum_repourls"] = repos
|
||||
|
||||
task_id = koji.koji_proxy.buildContainer(
|
||||
source, target, config, priority=priority
|
||||
)
|
||||
|
||||
# Wait for it to finish and capture the output into log file (even
|
||||
# though there is not much there).
|
||||
log_dir = os.path.join(compose.paths.log.topdir(), "osbs")
|
||||
util.makedirs(log_dir)
|
||||
log_file = os.path.join(
|
||||
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
|
||||
)
|
||||
reuse_file = log_file[:-4] + ".reuse.json"
|
||||
|
||||
try:
|
||||
image_conf = self._get_image_conf(compose, original_config)
|
||||
except Exception as e:
|
||||
image_conf = None
|
||||
self.pool.log_info(
|
||||
"Can't get image-build.conf for variant: %s source: %s - %s"
|
||||
% (variant.uid, source, str(e))
|
||||
)
|
||||
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
koji.login()
|
||||
|
||||
task_id = self._try_to_reuse(
|
||||
compose, variant, original_config, image_conf, reuse_file
|
||||
)
|
||||
|
||||
if not task_id:
|
||||
task_id = koji.koji_proxy.buildContainer(
|
||||
source, target, config, priority=priority
|
||||
)
|
||||
|
||||
koji.save_task_id(task_id)
|
||||
|
||||
# Wait for it to finish and capture the output into log file (even
|
||||
# though there is not much there).
|
||||
if koji.watch_task(task_id, log_file) != 0:
|
||||
raise RuntimeError(
|
||||
"OSBS: task %s failed: see %s for details" % (task_id, log_file)
|
||||
)
|
||||
|
||||
scratch = config.get("scratch", False)
|
||||
nvr = self._add_metadata(variant, task_id, compose, scratch)
|
||||
nvr, archive_ids = add_metadata(variant, task_id, compose, scratch)
|
||||
if nvr:
|
||||
registry = get_registry(compose, nvr, registry)
|
||||
if registry:
|
||||
self.pool.registries[nvr] = registry
|
||||
|
||||
self._write_reuse_metadata(
|
||||
compose,
|
||||
variant,
|
||||
original_config,
|
||||
image_conf,
|
||||
task_id,
|
||||
archive_ids,
|
||||
reuse_file,
|
||||
)
|
||||
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
|
||||
def _add_metadata(self, variant, task_id, compose, is_scratch):
|
||||
# Create new Koji session. The task could take so long to finish that
|
||||
# our session will expire. This second session does not need to be
|
||||
# authenticated since it will only do reading operations.
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
def _get_image_conf(self, compose, config):
|
||||
"""Get image-build.conf from git repo.
|
||||
|
||||
# Create metadata
|
||||
metadata = {
|
||||
"compose_id": compose.compose_id,
|
||||
"koji_task": task_id,
|
||||
}
|
||||
:param Compose compose: Current compose.
|
||||
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
|
||||
"""
|
||||
tmp_dir = compose.mkdtemp(prefix="osbs_")
|
||||
|
||||
result = koji.koji_proxy.getTaskResult(task_id)
|
||||
if is_scratch:
|
||||
metadata.update({"repositories": result["repositories"]})
|
||||
# add a fake arch of 'scratch', so we can construct the metadata
|
||||
# in same data structure as real builds.
|
||||
self.pool.metadata.setdefault(variant.uid, {}).setdefault(
|
||||
"scratch", []
|
||||
).append(metadata)
|
||||
return None
|
||||
url = config["url"].split("#")
|
||||
if len(url) == 1:
|
||||
url.append(config["git_branch"])
|
||||
|
||||
else:
|
||||
build_id = int(result["koji_builds"][0])
|
||||
buildinfo = koji.koji_proxy.getBuild(build_id)
|
||||
archives = koji.koji_proxy.listArchives(build_id)
|
||||
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % buildinfo
|
||||
|
||||
metadata.update(
|
||||
filename = "image-build.conf"
|
||||
get_file_from_scm(
|
||||
{
|
||||
"name": buildinfo["name"],
|
||||
"version": buildinfo["version"],
|
||||
"release": buildinfo["release"],
|
||||
"nvr": nvr,
|
||||
"creation_time": buildinfo["creation_time"],
|
||||
}
|
||||
"scm": "git",
|
||||
"repo": url[0],
|
||||
"branch": url[1],
|
||||
"file": [filename],
|
||||
},
|
||||
tmp_dir,
|
||||
)
|
||||
for archive in archives:
|
||||
data = {
|
||||
"filename": archive["filename"],
|
||||
"size": archive["size"],
|
||||
"checksum": archive["checksum"],
|
||||
}
|
||||
data.update(archive["extra"])
|
||||
data.update(metadata)
|
||||
arch = archive["extra"]["image"]["arch"]
|
||||
self.pool.log_debug(
|
||||
"Created Docker base image %s-%s-%s.%s"
|
||||
% (metadata["name"], metadata["version"], metadata["release"], arch)
|
||||
)
|
||||
self.pool.metadata.setdefault(variant.uid, {}).setdefault(
|
||||
arch, []
|
||||
).append(data)
|
||||
return nvr
|
||||
|
||||
c = configparser.ConfigParser()
|
||||
c.read(os.path.join(tmp_dir, filename))
|
||||
return c
|
||||
|
||||
def _get_ksurl(self, image_conf):
|
||||
"""Get ksurl from image-build.conf"""
|
||||
ksurl = image_conf.get("image-build", "ksurl")
|
||||
|
||||
if ksurl:
|
||||
resolver = util.GitUrlResolver(offline=False)
|
||||
return resolver(ksurl)
|
||||
else:
|
||||
return None
|
||||
|
||||
def _get_repo(self, compose, repo, gpgkey=None):
|
||||
"""
|
||||
|
@ -192,7 +200,7 @@ class OSBSThread(WorkerThread):
|
|||
file pointing to that location and return the URL to .repo file.
|
||||
"""
|
||||
if "://" in repo:
|
||||
return repo
|
||||
return repo.replace("$COMPOSE_ID", compose.compose_id)
|
||||
|
||||
if repo.startswith("/"):
|
||||
# The repo is an absolute path on the filesystem
|
||||
|
@ -211,6 +219,15 @@ class OSBSThread(WorkerThread):
|
|||
raise RuntimeError(
|
||||
"There is no variant %s to get repo from to pass to OSBS." % repo
|
||||
)
|
||||
cts_url = compose.conf.get("cts_url", None)
|
||||
if cts_url:
|
||||
return os.path.join(
|
||||
cts_url,
|
||||
"api/1/composes",
|
||||
compose.compose_id,
|
||||
"repo/?variant=%s" % variant,
|
||||
)
|
||||
|
||||
repo_path = compose.paths.compose.repository(
|
||||
"$basearch", variant, create_dir=False
|
||||
)
|
||||
|
@ -231,3 +248,209 @@ class OSBSThread(WorkerThread):
|
|||
f.write("gpgkey=%s\n" % gpgkey)
|
||||
|
||||
return util.translate_path(compose, repo_file)
|
||||
|
||||
def _try_to_reuse(self, compose, variant, config, image_conf, reuse_file):
|
||||
"""Try to reuse results of old compose.
|
||||
|
||||
:param Compose compose: Current compose.
|
||||
:param Variant variant: Current variant.
|
||||
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
|
||||
:param ConfigParser image_conf: ConfigParser obj of image-build.conf.
|
||||
:param str reuse_file: Path to reuse metadata file
|
||||
"""
|
||||
log_msg = "Cannot reuse old osbs phase results - %s"
|
||||
|
||||
if not compose.conf["osbs_allow_reuse"]:
|
||||
self.pool.log_info(log_msg % "reuse of old osbs results is disabled.")
|
||||
return False
|
||||
|
||||
old_reuse_file = compose.paths.old_compose_path(reuse_file)
|
||||
if not old_reuse_file:
|
||||
self.pool.log_info(log_msg % "Can't find old reuse metadata file")
|
||||
return False
|
||||
|
||||
try:
|
||||
with open(old_reuse_file) as f:
|
||||
old_reuse_metadata = json.load(f)
|
||||
except Exception as e:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't load old reuse metadata file: %s" % str(e)
|
||||
)
|
||||
return False
|
||||
|
||||
if old_reuse_metadata["config"] != config:
|
||||
self.pool.log_info(log_msg % "osbs config changed")
|
||||
return False
|
||||
|
||||
if not image_conf:
|
||||
self.pool.log_info(log_msg % "Can't get image-build.conf")
|
||||
return False
|
||||
|
||||
# Make sure ksurl not change
|
||||
try:
|
||||
ksurl = self._get_ksurl(image_conf)
|
||||
except Exception as e:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't get ksurl from image-build.conf - %s" % str(e)
|
||||
)
|
||||
return False
|
||||
|
||||
if not old_reuse_metadata["ksurl"]:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't get ksurl from old compose reuse metadata."
|
||||
)
|
||||
return False
|
||||
|
||||
if ksurl != old_reuse_metadata["ksurl"]:
|
||||
self.pool.log_info(log_msg % "ksurl changed")
|
||||
return False
|
||||
|
||||
# Make sure buildinstall phase is reused
|
||||
try:
|
||||
arches = image_conf.get("image-build", "arches").split(",")
|
||||
except Exception as e:
|
||||
self.pool.log_info(
|
||||
log_msg % "Can't get arches from image-build.conf - %s" % str(e)
|
||||
)
|
||||
for arch in arches:
|
||||
if not self.pool.buildinstall_phase.reused(variant, arch):
|
||||
self.pool.log_info(
|
||||
log_msg % "buildinstall phase changed %s.%s" % (variant, arch)
|
||||
)
|
||||
return False
|
||||
|
||||
# Make sure rpms installed in image exists in current compose
|
||||
rpm_manifest_file = compose.paths.compose.metadata("rpms.json")
|
||||
rpm_manifest = Rpms()
|
||||
rpm_manifest.load(rpm_manifest_file)
|
||||
rpms = set()
|
||||
for variant in rpm_manifest.rpms:
|
||||
for arch in rpm_manifest.rpms[variant]:
|
||||
for src in rpm_manifest.rpms[variant][arch]:
|
||||
for nevra in rpm_manifest.rpms[variant][arch][src]:
|
||||
rpms.add(nevra)
|
||||
|
||||
for nevra in old_reuse_metadata["rpmlist"]:
|
||||
if nevra not in rpms:
|
||||
self.pool.log_info(
|
||||
log_msg % "%s does not exist in current compose" % nevra
|
||||
)
|
||||
return False
|
||||
|
||||
self.pool.log_info(
|
||||
"Reusing old OSBS task %d result" % old_reuse_file["task_id"]
|
||||
)
|
||||
return old_reuse_file["task_id"]
|
||||
|
||||
def _write_reuse_metadata(
|
||||
self, compose, variant, config, image_conf, task_id, archive_ids, reuse_file
|
||||
):
|
||||
"""Write metadata to file for reusing.
|
||||
|
||||
:param Compose compose: Current compose.
|
||||
:param Variant variant: Current variant.
|
||||
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
|
||||
:param ConfigParser image_conf: ConfigParser obj of image-build.conf.
|
||||
:param int task_id: Koji task id of osbs task.
|
||||
:param list archive_ids: List of koji archive id
|
||||
:param str reuse_file: Path to reuse metadata file.
|
||||
"""
|
||||
msg = "Writing reuse metadata file %s" % reuse_file
|
||||
compose.log_info(msg)
|
||||
|
||||
rpmlist = set()
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
for archive_id in archive_ids:
|
||||
rpms = koji.koji_proxy.listRPMs(imageID=archive_id)
|
||||
for item in rpms:
|
||||
if item["epoch"]:
|
||||
rpmlist.add(
|
||||
"%s:%s-%s-%s.%s"
|
||||
% (
|
||||
item["name"],
|
||||
item["epoch"],
|
||||
item["version"],
|
||||
item["release"],
|
||||
item["arch"],
|
||||
)
|
||||
)
|
||||
else:
|
||||
rpmlist.add("%s.%s" % (item["nvr"], item["arch"]))
|
||||
|
||||
try:
|
||||
ksurl = self._get_ksurl(image_conf)
|
||||
except Exception:
|
||||
ksurl = None
|
||||
|
||||
data = {
|
||||
"config": config,
|
||||
"ksurl": ksurl,
|
||||
"rpmlist": sorted(rpmlist),
|
||||
"task_id": task_id,
|
||||
}
|
||||
try:
|
||||
with open(reuse_file, "w") as f:
|
||||
json.dump(data, f, indent=4)
|
||||
except Exception as e:
|
||||
compose.log_info(msg + " failed - %s" % str(e))
|
||||
|
||||
|
||||
def add_metadata(variant, task_id, compose, is_scratch):
|
||||
"""Given a task ID, find details about the container and add it to global
|
||||
metadata."""
|
||||
# Create new Koji session. The task could take so long to finish that
|
||||
# our session will expire. This second session does not need to be
|
||||
# authenticated since it will only do reading operations.
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
|
||||
# Create metadata
|
||||
metadata = {
|
||||
"compose_id": compose.compose_id,
|
||||
"koji_task": task_id,
|
||||
}
|
||||
|
||||
result = koji.koji_proxy.getTaskResult(task_id)
|
||||
if is_scratch:
|
||||
metadata.update({"repositories": result["repositories"]})
|
||||
# add a fake arch of 'scratch', so we can construct the metadata
|
||||
# in same data structure as real builds.
|
||||
compose.containers_metadata.setdefault(variant.uid, {}).setdefault(
|
||||
"scratch", []
|
||||
).append(metadata)
|
||||
return None, []
|
||||
|
||||
else:
|
||||
build_id = int(result["koji_builds"][0])
|
||||
buildinfo = koji.koji_proxy.getBuild(build_id)
|
||||
archives = koji.koji_proxy.listArchives(build_id, type="image")
|
||||
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % buildinfo
|
||||
|
||||
metadata.update(
|
||||
{
|
||||
"name": buildinfo["name"],
|
||||
"version": buildinfo["version"],
|
||||
"release": buildinfo["release"],
|
||||
"nvr": nvr,
|
||||
"creation_time": buildinfo["creation_time"],
|
||||
}
|
||||
)
|
||||
archive_ids = []
|
||||
for archive in archives:
|
||||
data = {
|
||||
"filename": archive["filename"],
|
||||
"size": archive["size"],
|
||||
"checksum": archive["checksum"],
|
||||
}
|
||||
data.update(archive["extra"])
|
||||
data.update(metadata)
|
||||
arch = archive["extra"]["image"]["arch"]
|
||||
compose.log_debug(
|
||||
"Created Docker base image %s-%s-%s.%s"
|
||||
% (metadata["name"], metadata["version"], metadata["release"], arch)
|
||||
)
|
||||
compose.containers_metadata.setdefault(variant.uid, {}).setdefault(
|
||||
arch, []
|
||||
).append(data)
|
||||
archive_ids.append(archive["id"])
|
||||
return nvr, archive_ids
|
||||
|
|
|
@ -27,6 +27,35 @@ class OSBuildPhase(
|
|||
arches = set(image_conf["arches"]) & arches
|
||||
return sorted(arches)
|
||||
|
||||
@staticmethod
|
||||
def _get_repo_urls(compose, repos, arch="$basearch"):
|
||||
"""
|
||||
Get list of repos with resolved repo URLs. Preserve repos defined
|
||||
as dicts.
|
||||
"""
|
||||
resolved_repos = []
|
||||
|
||||
for repo in repos:
|
||||
if isinstance(repo, dict):
|
||||
try:
|
||||
url = repo["baseurl"]
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
"`baseurl` is required in repo dict %s" % str(repo)
|
||||
)
|
||||
url = util.get_repo_url(compose, url, arch=arch)
|
||||
if url is None:
|
||||
raise RuntimeError("Failed to resolve repo URL for %s" % str(repo))
|
||||
repo["baseurl"] = url
|
||||
resolved_repos.append(repo)
|
||||
else:
|
||||
repo = util.get_repo_url(compose, repo, arch=arch)
|
||||
if repo is None:
|
||||
raise RuntimeError("Failed to resolve repo URL for %s" % repo)
|
||||
resolved_repos.append(repo)
|
||||
|
||||
return resolved_repos
|
||||
|
||||
def _get_repo(self, image_conf, variant):
|
||||
"""
|
||||
Get a list of repos. First included are those explicitly listed in
|
||||
|
@ -38,7 +67,7 @@ class OSBuildPhase(
|
|||
if not variant.is_empty and variant.uid not in repos:
|
||||
repos.append(variant.uid)
|
||||
|
||||
return util.get_repo_urls(self.compose, repos, arch="$arch")
|
||||
return OSBuildPhase._get_repo_urls(self.compose, repos, arch="$arch")
|
||||
|
||||
def run(self):
|
||||
for variant in self.compose.get_variants():
|
||||
|
@ -96,7 +125,12 @@ class RunOSBuildThread(WorkerThread):
|
|||
self.can_fail = can_fail
|
||||
self.num = num
|
||||
with util.failable(
|
||||
compose, can_fail, variant, "*", "osbuild", logger=self.pool._logger,
|
||||
compose,
|
||||
can_fail,
|
||||
variant,
|
||||
"*",
|
||||
"osbuild",
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(
|
||||
compose, variant, config, arches, version, release, target, repo
|
||||
|
@ -105,11 +139,26 @@ class RunOSBuildThread(WorkerThread):
|
|||
def worker(self, compose, variant, config, arches, version, release, target, repo):
|
||||
msg = "OSBuild task for variant %s" % variant.uid
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
koji.login()
|
||||
|
||||
ostree = {}
|
||||
if config.get("ostree_url"):
|
||||
ostree["url"] = config["ostree_url"]
|
||||
if config.get("ostree_ref"):
|
||||
ostree["ref"] = config["ostree_ref"]
|
||||
if config.get("ostree_parent"):
|
||||
ostree["parent"] = config["ostree_parent"]
|
||||
|
||||
# Start task
|
||||
opts = {"repo": repo}
|
||||
if ostree:
|
||||
opts["ostree"] = ostree
|
||||
|
||||
upload_options = config.get("upload_options")
|
||||
if upload_options:
|
||||
opts["upload_options"] = upload_options
|
||||
|
||||
if release:
|
||||
opts["release"] = release
|
||||
task_id = koji.koji_proxy.osbuildImage(
|
||||
|
@ -122,6 +171,8 @@ class RunOSBuildThread(WorkerThread):
|
|||
opts=opts,
|
||||
)
|
||||
|
||||
koji.save_task_id(task_id)
|
||||
|
||||
# Wait for it to finish and capture the output into log file.
|
||||
log_dir = os.path.join(compose.paths.log.topdir(), "osbuild")
|
||||
util.makedirs(log_dir)
|
||||
|
@ -136,7 +187,7 @@ class RunOSBuildThread(WorkerThread):
|
|||
# Refresh koji session which may have timed out while the task was
|
||||
# running. Watching is done via a subprocess, so the session is
|
||||
# inactive.
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji = kojiwrapper.KojiWrapper(compose)
|
||||
|
||||
# Get build id via the task's result json data
|
||||
result = koji.koji_proxy.getTaskResult(task_id)
|
||||
|
@ -148,7 +199,7 @@ class RunOSBuildThread(WorkerThread):
|
|||
# architecture, but we don't verify that.
|
||||
build_info = koji.koji_proxy.getBuild(build_id)
|
||||
for archive in koji.koji_proxy.listArchives(buildID=build_id):
|
||||
if archive["type_name"] not in config["image_types"]:
|
||||
if archive["type_name"] not in EXTENSIONS:
|
||||
# Ignore values that are not of required types.
|
||||
continue
|
||||
|
||||
|
@ -161,22 +212,36 @@ class RunOSBuildThread(WorkerThread):
|
|||
# image_dir is absolute path to which the image should be copied.
|
||||
# We also need the same path as relative to compose directory for
|
||||
# including in the metadata.
|
||||
if archive["type_name"] == "iso":
|
||||
# If the produced image is actually an ISO, it should go to
|
||||
# iso/ subdirectory.
|
||||
image_dir = compose.paths.compose.iso_dir(arch, variant)
|
||||
rel_image_dir = compose.paths.compose.iso_dir(
|
||||
arch, variant, relative=True
|
||||
)
|
||||
else:
|
||||
image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
|
||||
rel_image_dir = compose.paths.compose.image_dir(variant, relative=True) % {
|
||||
"arch": arch
|
||||
}
|
||||
rel_image_dir = compose.paths.compose.image_dir(
|
||||
variant, relative=True
|
||||
) % {"arch": arch}
|
||||
util.makedirs(image_dir)
|
||||
|
||||
image_dest = os.path.join(image_dir, archive["filename"])
|
||||
|
||||
src_file = os.path.join(
|
||||
koji.koji_module.pathinfo.imagebuild(build_info), archive["filename"]
|
||||
src_file = compose.koji_downloader.get_file(
|
||||
os.path.join(
|
||||
koji.koji_module.pathinfo.imagebuild(build_info),
|
||||
archive["filename"],
|
||||
),
|
||||
)
|
||||
|
||||
linker.link(src_file, image_dest, link_type=compose.conf["link_type"])
|
||||
|
||||
suffix = archive["filename"].rsplit(".", 1)[-1]
|
||||
if suffix not in EXTENSIONS[archive["type_name"]]:
|
||||
for suffix in EXTENSIONS[archive["type_name"]]:
|
||||
if archive["filename"].endswith(suffix):
|
||||
break
|
||||
else:
|
||||
# No suffix matched.
|
||||
raise RuntimeError(
|
||||
"Failed to generate metadata. Format %s doesn't match type %s"
|
||||
% (suffix, archive["type_name"])
|
||||
|
@ -184,7 +249,7 @@ class RunOSBuildThread(WorkerThread):
|
|||
|
||||
# Update image manifest
|
||||
img = Image(compose.im)
|
||||
img.type = archive["type_name"]
|
||||
img.type = archive["type_name"] if archive["type_name"] != "iso" else "dvd"
|
||||
img.format = suffix
|
||||
img.path = os.path.join(rel_image_dir, archive["filename"])
|
||||
img.mtime = util.get_mtime(image_dest)
|
||||
|
|
|
@ -165,9 +165,12 @@ class OSTreeThread(WorkerThread):
|
|||
("update-summary", config.get("update_summary", False)),
|
||||
("ostree-ref", config.get("ostree_ref")),
|
||||
("force-new-commit", config.get("force_new_commit", False)),
|
||||
("unified-core", config.get("unified_core", False)),
|
||||
]
|
||||
)
|
||||
packages = ["pungi", "ostree", "rpm-ostree"]
|
||||
default_packages = ["pungi", "ostree", "rpm-ostree"]
|
||||
additional_packages = config.get("runroot_packages", [])
|
||||
packages = default_packages + additional_packages
|
||||
log_file = os.path.join(self.logdir, "runroot.log")
|
||||
mounts = [compose.topdir, config["ostree_repo"]]
|
||||
runroot = Runroot(compose, phase="ostree")
|
||||
|
|
|
@ -272,6 +272,7 @@ class OstreeInstallerThread(WorkerThread):
|
|||
rootfs_size=config.get("rootfs_size"),
|
||||
is_final=compose.supported,
|
||||
log_dir=self.logdir,
|
||||
skip_branding=config.get("skip_branding"),
|
||||
)
|
||||
cmd = "rm -rf %s && %s" % (
|
||||
shlex_quote(output_dir),
|
||||
|
|
|
@ -29,13 +29,10 @@ class PkgsetPhase(PhaseBase):
|
|||
self.path_prefix = None
|
||||
|
||||
def run(self):
|
||||
pkgset_source = "PkgsetSource%s" % self.compose.conf["pkgset_source"]
|
||||
from .source import PkgsetSourceContainer
|
||||
from . import sources
|
||||
|
||||
PkgsetSourceContainer.register_module(sources)
|
||||
container = PkgsetSourceContainer()
|
||||
SourceClass = container[pkgset_source]
|
||||
SourceClass = sources.ALL_SOURCES[self.compose.conf["pkgset_source"].lower()]
|
||||
|
||||
self.package_sets, self.path_prefix = SourceClass(self.compose)()
|
||||
|
||||
def validate(self):
|
||||
|
|
|
@ -28,18 +28,27 @@ from pungi.util import (
|
|||
PartialFuncWorkerThread,
|
||||
PartialFuncThreadPool,
|
||||
)
|
||||
from pungi.module_util import Modulemd, collect_module_defaults
|
||||
from pungi.module_util import (
|
||||
Modulemd,
|
||||
collect_module_defaults,
|
||||
collect_module_obsoletes,
|
||||
)
|
||||
from pungi.phases.createrepo import add_modular_metadata
|
||||
|
||||
|
||||
def populate_arch_pkgsets(compose, path_prefix, global_pkgset):
|
||||
result = {}
|
||||
exclusive_noarch = compose.conf["pkgset_exclusive_arch_considers_noarch"]
|
||||
|
||||
for arch in compose.get_arches():
|
||||
compose.log_info("Populating package set for arch: %s", arch)
|
||||
is_multilib = is_arch_multilib(compose.conf, arch)
|
||||
arches = get_valid_arches(arch, is_multilib, add_src=True)
|
||||
pkgset = global_pkgset.subset(arch, arches, exclusive_noarch=exclusive_noarch)
|
||||
pkgset = global_pkgset.subset(
|
||||
arch,
|
||||
arches,
|
||||
exclusive_noarch=compose.conf["pkgset_exclusive_arch_considers_noarch"],
|
||||
inherit_to_noarch=compose.conf["pkgset_inherit_exclusive_arch_to_noarch"],
|
||||
)
|
||||
pkgset.save_file_list(
|
||||
compose.paths.work.package_list(arch=arch, pkgset=global_pkgset),
|
||||
remove_path_prefix=path_prefix,
|
||||
|
@ -159,6 +168,9 @@ def _create_arch_repo(worker_thread, args, task_num):
|
|||
mod_index = collect_module_defaults(
|
||||
compose.paths.work.module_defaults_dir(), names, overrides_dir=overrides_dir
|
||||
)
|
||||
mod_index = collect_module_obsoletes(
|
||||
compose.paths.work.module_obsoletes_dir(), names, mod_index
|
||||
)
|
||||
for x in mmd:
|
||||
mod_index.add_module_stream(x)
|
||||
add_modular_metadata(
|
||||
|
|
|
@ -22,17 +22,22 @@ It automatically finds a signed copies according to *sigkey_ordering*.
|
|||
import itertools
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import pgpy
|
||||
import rpm
|
||||
from six.moves import cPickle as pickle
|
||||
from functools import partial
|
||||
|
||||
import kobo.log
|
||||
import kobo.pkgset
|
||||
import kobo.rpmlib
|
||||
from kobo.shortcuts import compute_file_checksums
|
||||
|
||||
from kobo.threads import WorkerThread, ThreadPool
|
||||
|
||||
import pungi.wrappers.kojiwrapper
|
||||
from pungi.util import pkg_is_srpm, copy_all
|
||||
from pungi.arch import get_valid_arches, is_excluded
|
||||
from pungi.errors import UnsignedPackagesError
|
||||
|
||||
|
||||
class ExtendedRpmWrapper(kobo.pkgset.SimpleRpmWrapper):
|
||||
|
@ -144,14 +149,20 @@ class PackageSetBase(kobo.log.LoggingBase):
|
|||
|
||||
def raise_invalid_sigkeys_exception(self, rpminfos):
|
||||
"""
|
||||
Raises RuntimeError containing details of RPMs with invalid
|
||||
Raises UnsignedPackagesError containing details of RPMs with invalid
|
||||
sigkeys defined in `rpminfos`.
|
||||
"""
|
||||
|
||||
def nvr_formatter(package_info):
|
||||
# joins NVR parts of the package with '-' character.
|
||||
return "-".join(
|
||||
(package_info["name"], package_info["version"], package_info["release"])
|
||||
epoch_suffix = ''
|
||||
if package_info['epoch'] is not None:
|
||||
epoch_suffix = ':' + package_info['epoch']
|
||||
return (
|
||||
f"{package_info['name']}"
|
||||
f"{epoch_suffix}-"
|
||||
f"{package_info['version']}-"
|
||||
f"{package_info['release']}."
|
||||
f"{package_info['arch']}"
|
||||
)
|
||||
|
||||
def get_error(sigkeys, infos):
|
||||
|
@ -166,7 +177,9 @@ class PackageSetBase(kobo.log.LoggingBase):
|
|||
|
||||
if not isinstance(rpminfos, dict):
|
||||
rpminfos = {self.sigkey_ordering: rpminfos}
|
||||
raise RuntimeError("\n".join(get_error(k, v) for k, v in rpminfos.items()))
|
||||
raise UnsignedPackagesError(
|
||||
"\n".join(get_error(k, v) for k, v in rpminfos.items())
|
||||
)
|
||||
|
||||
def read_packages(self, rpms, srpms):
|
||||
srpm_pool = ReaderPool(self, self._logger)
|
||||
|
@ -200,16 +213,31 @@ class PackageSetBase(kobo.log.LoggingBase):
|
|||
|
||||
return self.rpms_by_arch
|
||||
|
||||
def subset(self, primary_arch, arch_list, exclusive_noarch=True):
|
||||
def subset(
|
||||
self, primary_arch, arch_list, exclusive_noarch=True, inherit_to_noarch=True
|
||||
):
|
||||
"""Create a subset of this package set that only includes
|
||||
packages compatible with"""
|
||||
pkgset = PackageSetBase(
|
||||
self.name, self.sigkey_ordering, logger=self._logger, arches=arch_list
|
||||
)
|
||||
pkgset.merge(self, primary_arch, arch_list, exclusive_noarch=exclusive_noarch)
|
||||
pkgset.merge(
|
||||
self,
|
||||
primary_arch,
|
||||
arch_list,
|
||||
exclusive_noarch=exclusive_noarch,
|
||||
inherit_to_noarch=inherit_to_noarch,
|
||||
)
|
||||
return pkgset
|
||||
|
||||
def merge(self, other, primary_arch, arch_list, exclusive_noarch=True):
|
||||
def merge(
|
||||
self,
|
||||
other,
|
||||
primary_arch,
|
||||
arch_list,
|
||||
exclusive_noarch=True,
|
||||
inherit_to_noarch=True,
|
||||
):
|
||||
"""
|
||||
Merge ``other`` package set into this instance.
|
||||
"""
|
||||
|
@ -248,7 +276,7 @@ class PackageSetBase(kobo.log.LoggingBase):
|
|||
if i.file_path in self.file_cache:
|
||||
# TODO: test if it really works
|
||||
continue
|
||||
if exclusivearch_list and arch == "noarch":
|
||||
if inherit_to_noarch and exclusivearch_list and arch == "noarch":
|
||||
if is_excluded(i, exclusivearch_list, logger=self._logger):
|
||||
continue
|
||||
|
||||
|
@ -315,6 +343,11 @@ class FilelistPackageSet(PackageSetBase):
|
|||
return result
|
||||
|
||||
|
||||
# This is a marker to indicate package set with only extra builds/tasks and no
|
||||
# tasks.
|
||||
MISSING_KOJI_TAG = object()
|
||||
|
||||
|
||||
class KojiPackageSet(PackageSetBase):
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -329,6 +362,9 @@ class KojiPackageSet(PackageSetBase):
|
|||
cache_region=None,
|
||||
extra_builds=None,
|
||||
extra_tasks=None,
|
||||
signed_packages_retries=0,
|
||||
signed_packages_wait=30,
|
||||
downloader=None,
|
||||
):
|
||||
"""
|
||||
Creates new KojiPackageSet.
|
||||
|
@ -361,9 +397,12 @@ class KojiPackageSet(PackageSetBase):
|
|||
:param list extra_tasks: Extra RPMs defined as Koji task IDs to get from Koji
|
||||
and include in the package set. Useful when building testing compose
|
||||
with RPM scratch builds.
|
||||
:param int signed_packages_retries: How many times should a search for
|
||||
signed package be repeated.
|
||||
:param int signed_packages_wait: How long to wait between search attemts.
|
||||
"""
|
||||
super(KojiPackageSet, self).__init__(
|
||||
name,
|
||||
name if name != MISSING_KOJI_TAG else "no-tag",
|
||||
sigkey_ordering=sigkey_ordering,
|
||||
arches=arches,
|
||||
logger=logger,
|
||||
|
@ -377,10 +416,13 @@ class KojiPackageSet(PackageSetBase):
|
|||
self.extra_builds = extra_builds or []
|
||||
self.extra_tasks = extra_tasks or []
|
||||
self.reuse = None
|
||||
self.signed_packages_retries = signed_packages_retries
|
||||
self.signed_packages_wait = signed_packages_wait
|
||||
|
||||
self.downloader = downloader
|
||||
|
||||
def __getstate__(self):
|
||||
result = self.__dict__.copy()
|
||||
result["koji_profile"] = self.koji_wrapper.profile
|
||||
del result["koji_wrapper"]
|
||||
del result["_logger"]
|
||||
if "cache_region" in result:
|
||||
|
@ -388,8 +430,6 @@ class KojiPackageSet(PackageSetBase):
|
|||
return result
|
||||
|
||||
def __setstate__(self, data):
|
||||
koji_profile = data.pop("koji_profile")
|
||||
self.koji_wrapper = pungi.wrappers.kojiwrapper.KojiWrapper(koji_profile)
|
||||
self._logger = None
|
||||
self.__dict__.update(data)
|
||||
|
||||
|
@ -471,7 +511,8 @@ class KojiPackageSet(PackageSetBase):
|
|||
|
||||
response = None
|
||||
if self.cache_region:
|
||||
cache_key = "KojiPackageSet.get_latest_rpms_%s_%s_%s" % (
|
||||
cache_key = "%s.get_latest_rpms_%s_%s_%s" % (
|
||||
str(self.__class__.__name__),
|
||||
str(tag),
|
||||
str(event),
|
||||
str(inherit),
|
||||
|
@ -493,26 +534,83 @@ class KojiPackageSet(PackageSetBase):
|
|||
|
||||
return response
|
||||
|
||||
|
||||
|
||||
def get_package_path(self, queue_item):
|
||||
rpm_info, build_info = queue_item
|
||||
|
||||
# Check if this RPM is coming from scratch task. In this case, we already
|
||||
# know the path.
|
||||
if "path_from_task" in rpm_info:
|
||||
return rpm_info["path_from_task"]
|
||||
return self.downloader.get_file(rpm_info["path_from_task"])
|
||||
|
||||
# we replaced this part because pungi uses way
|
||||
# of guessing path of package on koji based on sigkey
|
||||
# we don't need that because all our packages will
|
||||
# be ready for release
|
||||
# signature verification is still done during deps resolution
|
||||
pathinfo = self.koji_wrapper.koji_module.pathinfo
|
||||
paths = []
|
||||
|
||||
if "getRPMChecksums" in self.koji_proxy.system.listMethods():
|
||||
|
||||
def checksum_validator(keyname, pkg_path):
|
||||
checksums = self.koji_proxy.getRPMChecksums(
|
||||
rpm_info["id"], checksum_types=("sha256",)
|
||||
)
|
||||
if "sha256" in checksums.get(keyname, {}):
|
||||
computed = compute_file_checksums(pkg_path, ("sha256",))
|
||||
if computed["sha256"] != checksums[keyname]["sha256"]:
|
||||
raise RuntimeError("Checksum mismatch for %s" % pkg_path)
|
||||
|
||||
rpm_path = os.path.join(pathinfo.topdir, pathinfo.rpm(rpm_info))
|
||||
if os.path.isfile(rpm_path):
|
||||
return rpm_path
|
||||
else:
|
||||
self.log_warning("RPM %s not found" % rpm_path)
|
||||
|
||||
def checksum_validator(keyname, pkg_path):
|
||||
# Koji doesn't support checksums yet
|
||||
pass
|
||||
|
||||
attempts_left = self.signed_packages_retries + 1
|
||||
while attempts_left > 0:
|
||||
for sigkey in self.sigkey_ordering:
|
||||
if not sigkey:
|
||||
# we're looking for *signed* copies here
|
||||
continue
|
||||
sigkey = sigkey.lower()
|
||||
rpm_path = os.path.join(
|
||||
pathinfo.build(build_info), pathinfo.signed(rpm_info, sigkey)
|
||||
)
|
||||
if rpm_path not in paths:
|
||||
paths.append(rpm_path)
|
||||
path = self.downloader.get_file(
|
||||
rpm_path, partial(checksum_validator, sigkey)
|
||||
)
|
||||
if path:
|
||||
return path
|
||||
|
||||
# No signed copy was found, wait a little and try again.
|
||||
attempts_left -= 1
|
||||
if attempts_left > 0:
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % rpm_info
|
||||
self.log_debug("Waiting for signed package to appear for %s", nvr)
|
||||
time.sleep(self.signed_packages_wait)
|
||||
|
||||
if None in self.sigkey_ordering or "" in self.sigkey_ordering:
|
||||
# use an unsigned copy (if allowed)
|
||||
rpm_path = os.path.join(pathinfo.build(build_info), pathinfo.rpm(rpm_info))
|
||||
paths.append(rpm_path)
|
||||
path = self.downloader.get_file(rpm_path, partial(checksum_validator, ""))
|
||||
if path:
|
||||
return path
|
||||
|
||||
if self._allow_invalid_sigkeys and rpm_info["name"] not in self.packages:
|
||||
# use an unsigned copy (if allowed)
|
||||
rpm_path = os.path.join(pathinfo.build(build_info), pathinfo.rpm(rpm_info))
|
||||
paths.append(rpm_path)
|
||||
path = self.downloader.get_file(rpm_path)
|
||||
if path:
|
||||
self._invalid_sigkey_rpms.append(rpm_info)
|
||||
return path
|
||||
|
||||
self._invalid_sigkey_rpms.append(rpm_info)
|
||||
self.log_error(
|
||||
"RPM %s not found for sigs: %s. Paths checked: %s"
|
||||
% (rpm_info, self.sigkey_ordering, paths)
|
||||
)
|
||||
return None
|
||||
|
||||
def populate(self, tag, event=None, inherit=True, include_packages=None):
|
||||
|
@ -536,6 +634,8 @@ class KojiPackageSet(PackageSetBase):
|
|||
inherit,
|
||||
)
|
||||
self.log_info("[BEGIN] %s" % msg)
|
||||
rpms, builds = [], []
|
||||
if tag != MISSING_KOJI_TAG:
|
||||
rpms, builds = self.get_latest_rpms(tag, event, inherit=inherit)
|
||||
extra_rpms, extra_builds = self.get_extra_rpms()
|
||||
rpms += extra_rpms
|
||||
|
@ -641,6 +741,15 @@ class KojiPackageSet(PackageSetBase):
|
|||
:param include_packages: an iterable of tuples (package name, arch) that should
|
||||
be included.
|
||||
"""
|
||||
if len(self.sigkey_ordering) > 1 and (
|
||||
None in self.sigkey_ordering or "" in self.sigkey_ordering
|
||||
):
|
||||
self.log_warning(
|
||||
"Stop writing reuse file as unsigned packages are allowed "
|
||||
"in the compose."
|
||||
)
|
||||
return
|
||||
|
||||
reuse_file = compose.paths.work.pkgset_reuse_file(self.name)
|
||||
self.log_info("Writing pkgset reuse file: %s" % reuse_file)
|
||||
try:
|
||||
|
@ -657,6 +766,12 @@ class KojiPackageSet(PackageSetBase):
|
|||
"srpms_by_name": self.srpms_by_name,
|
||||
"extra_builds": self.extra_builds,
|
||||
"include_packages": include_packages,
|
||||
"inherit_to_noarch": compose.conf[
|
||||
"pkgset_inherit_exclusive_arch_to_noarch"
|
||||
],
|
||||
"exclusive_noarch": compose.conf[
|
||||
"pkgset_exclusive_arch_considers_noarch"
|
||||
],
|
||||
},
|
||||
f,
|
||||
protocol=pickle.HIGHEST_PROTOCOL,
|
||||
|
@ -703,20 +818,26 @@ class KojiPackageSet(PackageSetBase):
|
|||
% (old_koji_event, koji_event)
|
||||
)
|
||||
changed = self.koji_proxy.queryHistory(
|
||||
tables=["tag_listing"], tag=tag, afterEvent=old_koji_event
|
||||
tables=["tag_listing", "tag_inheritance"],
|
||||
tag=tag,
|
||||
afterEvent=min(koji_event, old_koji_event),
|
||||
beforeEvent=max(koji_event, old_koji_event) + 1,
|
||||
)
|
||||
if changed["tag_listing"]:
|
||||
self.log_debug("Builds under tag %s changed. Can't reuse." % tag)
|
||||
return False
|
||||
if changed["tag_inheritance"]:
|
||||
self.log_debug("Tag inheritance %s changed. Can't reuse." % tag)
|
||||
return False
|
||||
|
||||
if inherit:
|
||||
inherit_tags = self.koji_proxy.getFullInheritance(tag, koji_event)
|
||||
for t in inherit_tags:
|
||||
changed = self.koji_proxy.queryHistory(
|
||||
tables=["tag_listing"],
|
||||
tables=["tag_listing", "tag_inheritance"],
|
||||
tag=t["name"],
|
||||
afterEvent=old_koji_event,
|
||||
beforeEvent=koji_event + 1,
|
||||
afterEvent=min(koji_event, old_koji_event),
|
||||
beforeEvent=max(koji_event, old_koji_event) + 1,
|
||||
)
|
||||
if changed["tag_listing"]:
|
||||
self.log_debug(
|
||||
|
@ -724,6 +845,9 @@ class KojiPackageSet(PackageSetBase):
|
|||
% t["name"]
|
||||
)
|
||||
return False
|
||||
if changed["tag_inheritance"]:
|
||||
self.log_debug("Tag inheritance %s changed. Can't reuse." % tag)
|
||||
return False
|
||||
|
||||
repo_dir = compose.paths.work.pkgset_repo(tag, create_dir=False)
|
||||
old_repo_dir = compose.paths.old_compose_path(repo_dir)
|
||||
|
@ -742,6 +866,8 @@ class KojiPackageSet(PackageSetBase):
|
|||
self.log_debug("Failed to load reuse file: %s" % str(e))
|
||||
return False
|
||||
|
||||
inherit_to_noarch = compose.conf["pkgset_inherit_exclusive_arch_to_noarch"]
|
||||
exclusive_noarch = compose.conf["pkgset_exclusive_arch_considers_noarch"]
|
||||
if (
|
||||
reuse_data["allow_invalid_sigkeys"] == self._allow_invalid_sigkeys
|
||||
and reuse_data["packages"] == self.packages
|
||||
|
@ -749,6 +875,10 @@ class KojiPackageSet(PackageSetBase):
|
|||
and reuse_data["extra_builds"] == self.extra_builds
|
||||
and reuse_data["sigkeys"] == self.sigkey_ordering
|
||||
and reuse_data["include_packages"] == include_packages
|
||||
# If the value is not present in reuse data, the compose was
|
||||
# generated with older version of Pungi. Best to not reuse.
|
||||
and reuse_data.get("inherit_to_noarch") == inherit_to_noarch
|
||||
and reuse_data.get("exclusive_noarch") == exclusive_noarch
|
||||
):
|
||||
self.log_info("Copying repo data for reuse: %s" % old_repo_dir)
|
||||
copy_all(old_repo_dir, repo_dir)
|
||||
|
@ -763,6 +893,67 @@ class KojiPackageSet(PackageSetBase):
|
|||
return False
|
||||
|
||||
|
||||
class KojiMockPackageSet(KojiPackageSet):
|
||||
|
||||
def _is_rpm_signed(self, rpm_path) -> bool:
|
||||
ts = rpm.TransactionSet()
|
||||
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
|
||||
sigkeys = [
|
||||
sigkey.lower() for sigkey in self.sigkey_ordering
|
||||
if sigkey is not None
|
||||
]
|
||||
if not sigkeys:
|
||||
return True
|
||||
with open(rpm_path, 'rb') as fd:
|
||||
header = ts.hdrFromFdno(fd)
|
||||
signature = header[rpm.RPMTAG_SIGGPG] or header[rpm.RPMTAG_SIGPGP]
|
||||
if signature is None:
|
||||
return False
|
||||
pgp_msg = pgpy.PGPMessage.from_blob(signature)
|
||||
return any(
|
||||
signature.signer.lower() in sigkeys
|
||||
for signature in pgp_msg.signatures
|
||||
)
|
||||
|
||||
def get_package_path(self, queue_item):
|
||||
rpm_info, build_info = queue_item
|
||||
|
||||
# Check if this RPM is coming from scratch task.
|
||||
# In this case, we already know the path.
|
||||
if "path_from_task" in rpm_info:
|
||||
return rpm_info["path_from_task"]
|
||||
|
||||
# we replaced this part because pungi uses way
|
||||
# of guessing path of package on koji based on sigkey
|
||||
# we don't need that because all our packages will
|
||||
# be ready for release
|
||||
# signature verification is still done during deps resolution
|
||||
pathinfo = self.koji_wrapper.koji_module.pathinfo
|
||||
|
||||
rpm_path = os.path.join(pathinfo.topdir, pathinfo.rpm(rpm_info))
|
||||
if os.path.isfile(rpm_path):
|
||||
if not self._is_rpm_signed(rpm_path):
|
||||
self._invalid_sigkey_rpms.append(rpm_info)
|
||||
self.log_error(
|
||||
'RPM "%s" not found for sigs: "%s". Path checked: "%s"',
|
||||
rpm_info, self.sigkey_ordering, rpm_path
|
||||
)
|
||||
return
|
||||
return rpm_path
|
||||
else:
|
||||
self.log_warning("RPM %s not found" % rpm_path)
|
||||
return None
|
||||
|
||||
def populate(self, tag, event=None, inherit=True, include_packages=None):
|
||||
result = super().populate(
|
||||
tag=tag,
|
||||
event=event,
|
||||
inherit=inherit,
|
||||
include_packages=include_packages,
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def _is_src(rpm_info):
|
||||
"""Check if rpm info object returned by Koji refers to source packages."""
|
||||
return rpm_info["arch"] in ("src", "nosrc")
|
||||
|
|
|
@ -14,15 +14,6 @@
|
|||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import kobo.plugins
|
||||
|
||||
|
||||
class PkgsetSourceBase(kobo.plugins.Plugin):
|
||||
class PkgsetSourceBase(object):
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
|
||||
|
||||
class PkgsetSourceContainer(kobo.plugins.PluginContainer):
|
||||
@classmethod
|
||||
def normalize_name(cls, name):
|
||||
return name.lower()
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
from .source_koji import PkgsetSourceKoji
|
||||
from .source_repos import PkgsetSourceRepos
|
||||
from .source_kojimock import PkgsetSourceKojiMock
|
||||
|
||||
ALL_SOURCES = {
|
||||
"koji": PkgsetSourceKoji,
|
||||
"repos": PkgsetSourceRepos,
|
||||
"kojimock": PkgsetSourceKojiMock,
|
||||
}
|
|
@ -23,19 +23,19 @@ from itertools import groupby
|
|||
|
||||
from kobo.rpmlib import parse_nvra
|
||||
from kobo.shortcuts import force_list
|
||||
from typing import (
|
||||
Dict,
|
||||
AnyStr,
|
||||
List,
|
||||
Tuple,
|
||||
Set,
|
||||
)
|
||||
|
||||
import pungi.wrappers.kojiwrapper
|
||||
from pungi.wrappers.comps import CompsWrapper
|
||||
from pungi.wrappers.mbs import MBSWrapper
|
||||
import pungi.phases.pkgset.pkgsets
|
||||
from pungi.util import retry, get_arch_variant_data, get_variant_data
|
||||
from pungi.arch import getBaseArch
|
||||
from pungi.util import (
|
||||
retry,
|
||||
get_arch_variant_data,
|
||||
get_variant_data,
|
||||
read_single_module_stream_from_file,
|
||||
read_single_module_stream_from_string,
|
||||
)
|
||||
from pungi.module_util import Modulemd
|
||||
|
||||
from pungi.phases.pkgset.common import MaterializedPackageSet, get_all_arches
|
||||
|
@ -190,27 +190,25 @@ def get_koji_modules(compose, koji_wrapper, event, module_info_str):
|
|||
|
||||
|
||||
class PkgsetSourceKoji(pungi.phases.pkgset.source.PkgsetSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self):
|
||||
compose = self.compose
|
||||
koji_profile = compose.conf["koji_profile"]
|
||||
self.koji_wrapper = pungi.wrappers.kojiwrapper.KojiWrapper(koji_profile)
|
||||
# path prefix must contain trailing '/'
|
||||
path_prefix = self.koji_wrapper.koji_module.config.topdir.rstrip("/") + "/"
|
||||
package_sets = get_pkgset_from_koji(
|
||||
self.compose, self.koji_wrapper, path_prefix
|
||||
)
|
||||
return (package_sets, path_prefix)
|
||||
self.koji_wrapper = pungi.wrappers.kojiwrapper.KojiWrapper(compose)
|
||||
package_sets = get_pkgset_from_koji(self.compose, self.koji_wrapper)
|
||||
return (package_sets, self.compose.koji_downloader.path_prefix)
|
||||
|
||||
|
||||
def get_pkgset_from_koji(compose, koji_wrapper, path_prefix):
|
||||
def get_pkgset_from_koji(compose, koji_wrapper):
|
||||
event_info = get_koji_event_info(compose, koji_wrapper)
|
||||
return populate_global_pkgset(compose, koji_wrapper, path_prefix, event_info)
|
||||
return populate_global_pkgset(compose, koji_wrapper, event_info)
|
||||
|
||||
|
||||
def _add_module_to_variant(
|
||||
koji_wrapper, variant, build, add_to_variant_modules=False, compose=None
|
||||
koji_wrapper,
|
||||
variant,
|
||||
build,
|
||||
add_to_variant_modules=False,
|
||||
compose=None,
|
||||
exclude_module_ns=None,
|
||||
):
|
||||
"""
|
||||
Adds module defined by Koji build info to variant.
|
||||
|
@ -220,6 +218,7 @@ def _add_module_to_variant(
|
|||
:param bool add_to_variant_modules: Adds the modules also to
|
||||
variant.modules.
|
||||
:param compose: Compose object to get filters from
|
||||
:param list exclude_module_ns: Module name:stream which will be excluded.
|
||||
"""
|
||||
mmds = {}
|
||||
archives = koji_wrapper.koji_proxy.listArchives(build["id"])
|
||||
|
@ -227,13 +226,18 @@ def _add_module_to_variant(
|
|||
if archive["btype"] != "module":
|
||||
# Skip non module archives
|
||||
continue
|
||||
typedir = koji_wrapper.koji_module.pathinfo.typedir(build, archive["btype"])
|
||||
filename = archive["filename"]
|
||||
file_path = os.path.join(
|
||||
koji_wrapper.koji_module.pathinfo.topdir,
|
||||
'modules',
|
||||
build['arch'],
|
||||
build['extra']['typeinfo']['module']['content_koji_tag']
|
||||
)
|
||||
file_path = compose.koji_downloader.get_file(os.path.join(typedir, filename))
|
||||
try:
|
||||
# If there are two dots, the arch is in the middle. MBS uploads
|
||||
# files with actual architecture in the filename, but Pungi deals
|
||||
# in basearch. This assumes that each arch in the build maps to a
|
||||
# unique basearch.
|
||||
_, arch, _ = filename.split(".")
|
||||
filename = "modulemd.%s.txt" % getBaseArch(arch)
|
||||
except ValueError:
|
||||
pass
|
||||
mmds[filename] = file_path
|
||||
|
||||
if len(mmds) <= 1:
|
||||
|
@ -244,6 +248,10 @@ def _add_module_to_variant(
|
|||
|
||||
info = build["extra"]["typeinfo"]["module"]
|
||||
nsvc = "%(name)s:%(stream)s:%(version)s:%(context)s" % info
|
||||
ns = "%(name)s:%(stream)s" % info
|
||||
|
||||
if exclude_module_ns and ns in exclude_module_ns:
|
||||
return
|
||||
|
||||
added = False
|
||||
|
||||
|
@ -252,17 +260,23 @@ def _add_module_to_variant(
|
|||
compose.log_debug("Module %s is filtered from %s.%s", nsvc, variant, arch)
|
||||
continue
|
||||
|
||||
try:
|
||||
mmd = Modulemd.ModuleStream.read_file(
|
||||
mmds["modulemd.%s.txt" % arch], strict=True
|
||||
filename = "modulemd.%s.txt" % arch
|
||||
if filename not in mmds:
|
||||
raise RuntimeError(
|
||||
"Module %s does not have metadata for arch %s and is not filtered "
|
||||
"out via filter_modules option." % (nsvc, arch)
|
||||
)
|
||||
variant.arch_mmds.setdefault(arch, {})[nsvc] = mmd
|
||||
try:
|
||||
mod_stream = read_single_module_stream_from_file(
|
||||
mmds[filename], compose, arch, build
|
||||
)
|
||||
except Exception as exc:
|
||||
# libmodulemd raises various GLib exceptions with not very helpful
|
||||
# messages. Let's replace it with something more useful.
|
||||
raise RuntimeError("Failed to read %s: %s", mmds[filename], str(exc))
|
||||
if mod_stream:
|
||||
added = True
|
||||
except KeyError:
|
||||
# There is no modulemd for this arch. This could mean an arch was
|
||||
# added to the compose after the module was built. We don't want to
|
||||
# process this, let's skip this module.
|
||||
pass
|
||||
variant.arch_mmds.setdefault(arch, {})[nsvc] = mod_stream
|
||||
|
||||
if not added:
|
||||
# The module is filtered on all arches of this variant.
|
||||
|
@ -342,9 +356,7 @@ def _add_scratch_modules_to_variant(
|
|||
tag_to_mmd.setdefault(tag, {})
|
||||
for arch in variant.arches:
|
||||
try:
|
||||
mmd = Modulemd.ModuleStream.read_string(
|
||||
final_modulemd[arch], strict=True
|
||||
)
|
||||
mmd = read_single_module_stream_from_string(final_modulemd[arch])
|
||||
variant.arch_mmds.setdefault(arch, {})[nsvc] = mmd
|
||||
except KeyError:
|
||||
continue
|
||||
|
@ -384,7 +396,13 @@ def _is_filtered_out(compose, variant, arch, module_name, module_stream):
|
|||
|
||||
|
||||
def _get_modules_from_koji(
|
||||
compose, koji_wrapper, event, variant, variant_tags, tag_to_mmd
|
||||
compose,
|
||||
koji_wrapper,
|
||||
event,
|
||||
variant,
|
||||
variant_tags,
|
||||
tag_to_mmd,
|
||||
exclude_module_ns,
|
||||
):
|
||||
"""
|
||||
Loads modules for given `variant` from koji `session`, adds them to
|
||||
|
@ -395,6 +413,7 @@ def _get_modules_from_koji(
|
|||
:param Variant variant: Variant with modules to find.
|
||||
:param dict variant_tags: Dict populated by this method. Key is `variant`
|
||||
and value is list of Koji tags to get the RPMs from.
|
||||
:param list exclude_module_ns: Module name:stream which will be excluded.
|
||||
"""
|
||||
|
||||
# Find out all modules in every variant and add their Koji tags
|
||||
|
@ -403,7 +422,11 @@ def _get_modules_from_koji(
|
|||
koji_modules = get_koji_modules(compose, koji_wrapper, event, module["name"])
|
||||
for koji_module in koji_modules:
|
||||
nsvc = _add_module_to_variant(
|
||||
koji_wrapper, variant, koji_module, compose=compose
|
||||
koji_wrapper,
|
||||
variant,
|
||||
koji_module,
|
||||
compose=compose,
|
||||
exclude_module_ns=exclude_module_ns,
|
||||
)
|
||||
if not nsvc:
|
||||
continue
|
||||
|
@ -491,15 +514,16 @@ def filter_by_whitelist(compose, module_builds, input_modules, expected_modules)
|
|||
info.get("context"),
|
||||
)
|
||||
nvr_patterns.add((pattern, spec["name"]))
|
||||
|
||||
modules_to_keep = []
|
||||
|
||||
for mb in sorted(module_builds, key=lambda i: i['name']):
|
||||
for mb in module_builds:
|
||||
# Split release from the build into version and context
|
||||
ver, ctx = mb["release"].split(".")
|
||||
# Values in `mb` are from Koji build. There's nvr and name, version and
|
||||
# release. The input pattern specifies modular name, stream, version
|
||||
# and context.
|
||||
for (n, s, v, c), spec in sorted(nvr_patterns):
|
||||
for (n, s, v, c), spec in nvr_patterns:
|
||||
if (
|
||||
# We always have a name and stream...
|
||||
mb["name"] == n
|
||||
|
@ -511,51 +535,19 @@ def filter_by_whitelist(compose, module_builds, input_modules, expected_modules)
|
|||
):
|
||||
modules_to_keep.append(mb)
|
||||
expected_modules.discard(spec)
|
||||
break
|
||||
|
||||
return modules_to_keep
|
||||
|
||||
|
||||
def _filter_expected_modules(
|
||||
variant_name: AnyStr,
|
||||
variant_arches: List[AnyStr],
|
||||
expected_modules: Set[AnyStr],
|
||||
filtered_modules: List[Tuple[AnyStr, Dict[AnyStr, List[AnyStr]]]],
|
||||
) -> set:
|
||||
"""
|
||||
Function filters out all modules which are listed in Pungi config.
|
||||
Those modules can be absent in koji env so we must remove it from
|
||||
the expected modules list otherwise Pungi will fail
|
||||
"""
|
||||
for variant_regexp, filters_dict in filtered_modules:
|
||||
for arch, modules in filters_dict.items():
|
||||
arch = '.*' if arch == '*' else arch
|
||||
variant_regexp = '.*' if variant_regexp == '*' else variant_regexp
|
||||
modules = ['.*' if module == '*' else module for module in modules]
|
||||
cond1 = re.findall(
|
||||
variant_regexp,
|
||||
variant_name,
|
||||
)
|
||||
cond2 = any(
|
||||
re.findall(
|
||||
arch,
|
||||
variant_arch,
|
||||
) for variant_arch in variant_arches
|
||||
)
|
||||
if cond1 and cond2:
|
||||
expected_modules = {
|
||||
expected_module for expected_module in expected_modules if
|
||||
not any(
|
||||
re.findall(
|
||||
filtered_module,
|
||||
expected_module,
|
||||
) for filtered_module in modules
|
||||
)
|
||||
}
|
||||
return expected_modules
|
||||
|
||||
|
||||
def _get_modules_from_koji_tags(
|
||||
compose, koji_wrapper, event_id, variant, variant_tags, tag_to_mmd
|
||||
compose,
|
||||
koji_wrapper,
|
||||
event_id,
|
||||
variant,
|
||||
variant_tags,
|
||||
tag_to_mmd,
|
||||
exclude_module_ns,
|
||||
):
|
||||
"""
|
||||
Loads modules for given `variant` from Koji, adds them to
|
||||
|
@ -567,6 +559,7 @@ def _get_modules_from_koji_tags(
|
|||
:param Variant variant: Variant with modules to find.
|
||||
:param dict variant_tags: Dict populated by this method. Key is `variant`
|
||||
and value is list of Koji tags to get the RPMs from.
|
||||
:param list exclude_module_ns: Module name:stream which will be excluded.
|
||||
"""
|
||||
# Compose tags from configuration
|
||||
compose_tags = [
|
||||
|
@ -574,13 +567,7 @@ def _get_modules_from_koji_tags(
|
|||
]
|
||||
# Get set of configured module names for this variant. If nothing is
|
||||
# configured, the set is empty.
|
||||
expected_modules = []
|
||||
for spec in variant.get_modules():
|
||||
name, stream = spec['name'].split(':')
|
||||
expected_modules.append(
|
||||
':'.join((name, stream.replace('-', '_')))
|
||||
)
|
||||
expected_modules = set(expected_modules)
|
||||
expected_modules = set(spec["name"] for spec in variant.get_modules())
|
||||
# Find out all modules in every variant and add their Koji tags
|
||||
# to variant and variant_tags list.
|
||||
koji_proxy = koji_wrapper.koji_proxy
|
||||
|
@ -639,21 +626,26 @@ def _get_modules_from_koji_tags(
|
|||
for build in latest_builds:
|
||||
# Get the Build from Koji to get modulemd and module_tag.
|
||||
build = koji_proxy.getBuild(build["build_id"])
|
||||
|
||||
nsvc = _add_module_to_variant(
|
||||
koji_wrapper,
|
||||
variant,
|
||||
build,
|
||||
True,
|
||||
compose=compose,
|
||||
exclude_module_ns=exclude_module_ns,
|
||||
)
|
||||
if not nsvc:
|
||||
continue
|
||||
|
||||
module_tag = (
|
||||
build.get("extra", {})
|
||||
.get("typeinfo", {})
|
||||
.get("module", {})
|
||||
.get("content_koji_tag", "")
|
||||
)
|
||||
|
||||
variant_tags[variant].append(module_tag)
|
||||
|
||||
nsvc = _add_module_to_variant(
|
||||
koji_wrapper, variant, build, True, compose=compose
|
||||
)
|
||||
if not nsvc:
|
||||
continue
|
||||
|
||||
tag_to_mmd.setdefault(module_tag, {})
|
||||
for arch in variant.arch_mmds:
|
||||
try:
|
||||
|
@ -675,22 +667,17 @@ def _get_modules_from_koji_tags(
|
|||
# needed in createrepo phase where metadata is exposed by
|
||||
# productmd
|
||||
variant.module_uid_to_koji_tag[nsvc] = module_tag
|
||||
expected_modules = _filter_expected_modules(
|
||||
variant_name=variant.name,
|
||||
variant_arches=variant.arches,
|
||||
expected_modules=expected_modules,
|
||||
filtered_modules=compose.conf['filter_modules'],
|
||||
)
|
||||
|
||||
if expected_modules:
|
||||
# There are some module names that were listed in configuration and not
|
||||
# found in any tag...
|
||||
raise RuntimeError(
|
||||
compose.log_warning(
|
||||
"Configuration specified patterns (%s) that don't match "
|
||||
"any modules in the configured tags." % ", ".join(expected_modules)
|
||||
)
|
||||
|
||||
|
||||
def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
def populate_global_pkgset(compose, koji_wrapper, event):
|
||||
all_arches = get_all_arches(compose)
|
||||
|
||||
# List of compose tags from which we create this compose
|
||||
|
@ -744,26 +731,52 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
|||
"modules."
|
||||
)
|
||||
|
||||
extra_modules = get_variant_data(
|
||||
compose.conf, "pkgset_koji_module_builds", variant
|
||||
)
|
||||
|
||||
# When adding extra modules, other modules of the same name:stream available
|
||||
# in brew tag should be excluded.
|
||||
exclude_module_ns = []
|
||||
if extra_modules:
|
||||
exclude_module_ns = [
|
||||
":".join(nsvc.split(":")[:2]) for nsvc in extra_modules
|
||||
]
|
||||
|
||||
if modular_koji_tags or (
|
||||
compose.conf["pkgset_koji_module_tag"] and variant.modules
|
||||
):
|
||||
# List modules tagged in particular tags.
|
||||
_get_modules_from_koji_tags(
|
||||
compose, koji_wrapper, event, variant, variant_tags, tag_to_mmd
|
||||
compose,
|
||||
koji_wrapper,
|
||||
event,
|
||||
variant,
|
||||
variant_tags,
|
||||
tag_to_mmd,
|
||||
exclude_module_ns,
|
||||
)
|
||||
elif variant.modules:
|
||||
# Search each module in Koji separately. Tagging does not come into
|
||||
# play here.
|
||||
_get_modules_from_koji(
|
||||
compose, koji_wrapper, event, variant, variant_tags, tag_to_mmd
|
||||
compose,
|
||||
koji_wrapper,
|
||||
event,
|
||||
variant,
|
||||
variant_tags,
|
||||
tag_to_mmd,
|
||||
exclude_module_ns,
|
||||
)
|
||||
|
||||
extra_modules = get_variant_data(
|
||||
compose.conf, "pkgset_koji_module_builds", variant
|
||||
)
|
||||
if extra_modules:
|
||||
_add_extra_modules_to_variant(
|
||||
compose, koji_wrapper, variant, extra_modules, variant_tags, tag_to_mmd
|
||||
compose,
|
||||
koji_wrapper,
|
||||
variant,
|
||||
extra_modules,
|
||||
variant_tags,
|
||||
tag_to_mmd,
|
||||
)
|
||||
|
||||
variant_scratch_modules = get_variant_data(
|
||||
|
@ -790,17 +803,23 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
|||
|
||||
pkgsets = []
|
||||
|
||||
extra_builds = force_list(compose.conf.get("pkgset_koji_builds", []))
|
||||
extra_tasks = force_list(compose.conf.get("pkgset_koji_scratch_tasks", []))
|
||||
|
||||
if not pkgset_koji_tags and (extra_builds or extra_tasks):
|
||||
# We have extra packages to pull in, but no tag to merge them with.
|
||||
compose_tags.append(pungi.phases.pkgset.pkgsets.MISSING_KOJI_TAG)
|
||||
pkgset_koji_tags.append(pungi.phases.pkgset.pkgsets.MISSING_KOJI_TAG)
|
||||
|
||||
# Get package set for each compose tag and merge it to global package
|
||||
# list. Also prepare per-variant pkgset, because we do not have list
|
||||
# of binary RPMs in module definition - there is just list of SRPMs.
|
||||
for compose_tag in compose_tags:
|
||||
compose.log_info("Loading package set for tag %s", compose_tag)
|
||||
kwargs = {}
|
||||
if compose_tag in pkgset_koji_tags:
|
||||
extra_builds = force_list(compose.conf.get("pkgset_koji_builds", []))
|
||||
extra_tasks = force_list(compose.conf.get("pkgset_koji_scratch_tasks", []))
|
||||
else:
|
||||
extra_builds = []
|
||||
extra_tasks = []
|
||||
kwargs["extra_builds"] = extra_builds
|
||||
kwargs["extra_tasks"] = extra_tasks
|
||||
|
||||
pkgset = pungi.phases.pkgset.pkgsets.KojiPackageSet(
|
||||
compose_tag,
|
||||
|
@ -812,8 +831,10 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
|||
allow_invalid_sigkeys=allow_invalid_sigkeys,
|
||||
populate_only_packages=populate_only_packages_to_gather,
|
||||
cache_region=compose.cache_region,
|
||||
extra_builds=extra_builds,
|
||||
extra_tasks=extra_tasks,
|
||||
signed_packages_retries=compose.conf["signed_packages_retries"],
|
||||
signed_packages_wait=compose.conf["signed_packages_wait"],
|
||||
downloader=compose.koji_downloader,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
# Check if we have cache for this tag from previous compose. If so, use
|
||||
|
@ -822,11 +843,16 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
|||
compose.paths.work.pkgset_file_cache(compose_tag)
|
||||
)
|
||||
if old_cache_path:
|
||||
try:
|
||||
pkgset.set_old_file_cache(
|
||||
pungi.phases.pkgset.pkgsets.KojiPackageSet.load_old_file_cache(
|
||||
old_cache_path
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
compose.log_debug(
|
||||
"Failed to load old cache file %s : %s" % (old_cache_path, str(e))
|
||||
)
|
||||
|
||||
is_traditional = compose_tag in compose.conf.get("pkgset_koji_tag", [])
|
||||
should_inherit = inherit if is_traditional else inherit_modules
|
||||
|
@ -872,7 +898,6 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
|||
)
|
||||
for variant in compose.all_variants.values():
|
||||
if compose_tag in variant_tags[variant]:
|
||||
|
||||
# If it's a modular tag, store the package set for the module.
|
||||
for nsvc, koji_tag in variant.module_uid_to_koji_tag.items():
|
||||
if compose_tag == koji_tag:
|
||||
|
@ -895,7 +920,7 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
|||
MaterializedPackageSet.create,
|
||||
compose,
|
||||
pkgset,
|
||||
path_prefix,
|
||||
compose.koji_downloader.path_prefix,
|
||||
mmd=tag_to_mmd.get(pkgset.name),
|
||||
)
|
||||
)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -15,6 +15,7 @@
|
|||
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from kobo.shortcuts import run
|
||||
|
||||
|
@ -31,8 +32,6 @@ import pungi.phases.pkgset.source
|
|||
|
||||
|
||||
class PkgsetSourceRepos(pungi.phases.pkgset.source.PkgsetSourceBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self):
|
||||
package_sets, path_prefix = get_pkgset_from_repos(self.compose)
|
||||
return (package_sets, path_prefix)
|
||||
|
@ -112,6 +111,17 @@ def get_pkgset_from_repos(compose):
|
|||
flist.append(dst)
|
||||
pool.queue_put((src, dst))
|
||||
|
||||
# Clean up tmp dir
|
||||
# Workaround for rpm not honoring sgid bit which only appears when yum is used.
|
||||
yumroot_dir = os.path.join(pungi_dir, "work", arch, "yumroot")
|
||||
if os.path.isdir(yumroot_dir):
|
||||
try:
|
||||
shutil.rmtree(yumroot_dir)
|
||||
except Exception as e:
|
||||
compose.log_warning(
|
||||
"Failed to clean up tmp dir: %s %s" % (yumroot_dir, str(e))
|
||||
)
|
||||
|
||||
msg = "Linking downloaded pkgset packages"
|
||||
compose.log_info("[BEGIN] %s" % msg)
|
||||
pool.start()
|
||||
|
|
|
@ -18,6 +18,7 @@ import os
|
|||
|
||||
from pungi.phases.base import PhaseBase
|
||||
from pungi.util import failable, get_arch_variant_data
|
||||
import productmd.compose
|
||||
|
||||
|
||||
class TestPhase(PhaseBase):
|
||||
|
@ -25,6 +26,7 @@ class TestPhase(PhaseBase):
|
|||
|
||||
def run(self):
|
||||
check_image_sanity(self.compose)
|
||||
check_image_metadata(self.compose)
|
||||
|
||||
|
||||
def check_image_sanity(compose):
|
||||
|
@ -45,6 +47,17 @@ def check_image_sanity(compose):
|
|||
check_size_limit(compose, variant, arch, img)
|
||||
|
||||
|
||||
def check_image_metadata(compose):
|
||||
"""
|
||||
Check the images metadata for entries that cannot be serialized.
|
||||
Often caused by isos with duplicate metadata.
|
||||
Accessing the `images` attribute will raise an exception if there's a problem
|
||||
"""
|
||||
if compose.im.images:
|
||||
compose = productmd.compose.Compose(compose.paths.compose.topdir())
|
||||
return compose.images
|
||||
|
||||
|
||||
def check_sanity(compose, variant, arch, image):
|
||||
path = os.path.join(compose.paths.compose.topdir(), image.path)
|
||||
deliverable = getattr(image, "deliverable")
|
||||
|
|
|
@ -69,10 +69,13 @@ class Profiler(object):
|
|||
|
||||
@classmethod
|
||||
def print_results(cls, stream=sys.stdout):
|
||||
print("Profiling results:", file=sys.stdout)
|
||||
# Ensure all data that was printed to stdout was already flushed. If
|
||||
# the caller is redirecting stderr to stdout, and there's buffered
|
||||
# data, we may end up in a situation where the stderr output printed
|
||||
# below ends up mixed with the stdout lines.
|
||||
sys.stdout.flush()
|
||||
print("Profiling results:", file=stream)
|
||||
results = cls._data.items()
|
||||
results = sorted(results, key=lambda x: x[1]["time"], reverse=True)
|
||||
for name, data in results:
|
||||
print(
|
||||
" %6.2f %5d %s" % (data["time"], data["calls"], name), file=sys.stdout
|
||||
)
|
||||
print(" %6.2f %5d %s" % (data["time"], data["calls"], name), file=stream)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
import os
|
||||
import re
|
||||
import six
|
||||
from six.moves import shlex_quote
|
||||
import kobo.log
|
||||
from kobo.shortcuts import run
|
||||
|
@ -110,7 +111,7 @@ class Runroot(kobo.log.LoggingBase):
|
|||
runroot_tag = self.compose.conf["runroot_tag"]
|
||||
log_dir = kwargs.pop("log_dir", None)
|
||||
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose)
|
||||
koji_cmd = koji_wrapper.get_runroot_cmd(
|
||||
runroot_tag,
|
||||
arch,
|
||||
|
@ -149,7 +150,11 @@ class Runroot(kobo.log.LoggingBase):
|
|||
"""
|
||||
formatted_cmd = command.format(**fmt_dict) if fmt_dict else command
|
||||
ssh_cmd = ["ssh", "-oBatchMode=yes", "-n", "-l", user, hostname, formatted_cmd]
|
||||
return run(ssh_cmd, show_cmd=True, logfile=log_file)[1]
|
||||
output = run(ssh_cmd, show_cmd=True, logfile=log_file)[1]
|
||||
if six.PY3 and isinstance(output, bytes):
|
||||
return output.decode()
|
||||
else:
|
||||
return output
|
||||
|
||||
def _log_file(self, base, suffix):
|
||||
return base.replace(".log", "." + suffix + ".log")
|
||||
|
@ -174,10 +179,13 @@ class Runroot(kobo.log.LoggingBase):
|
|||
# by the runroot task, so the Pungi user can access them.
|
||||
if chown_paths:
|
||||
paths = " ".join(shlex_quote(pth) for pth in chown_paths)
|
||||
command += " ; EXIT_CODE=$?"
|
||||
# Make the files world readable
|
||||
command += " && chmod -R a+r %s" % paths
|
||||
command += " ; chmod -R a+r %s" % paths
|
||||
# and owned by the same user that is running the process
|
||||
command += " && chown -R %d %s" % (os.getuid(), paths)
|
||||
command += " ; chown -R %d %s" % (os.getuid(), paths)
|
||||
# Exit with code of main command
|
||||
command += " ; exit $EXIT_CODE"
|
||||
|
||||
hostname = runroot_ssh_hostnames[arch]
|
||||
user = self.compose.conf.get("runroot_ssh_username", "root")
|
||||
|
@ -300,7 +308,7 @@ class Runroot(kobo.log.LoggingBase):
|
|||
runroot_channel = self.compose.conf.get("runroot_channel")
|
||||
runroot_tag = self.compose.conf["runroot_tag"]
|
||||
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose)
|
||||
koji_cmd = koji_wrapper.get_pungi_buildinstall_cmd(
|
||||
runroot_tag,
|
||||
arch,
|
||||
|
@ -334,7 +342,7 @@ class Runroot(kobo.log.LoggingBase):
|
|||
runroot_channel = self.compose.conf.get("runroot_channel")
|
||||
runroot_tag = self.compose.conf["runroot_tag"]
|
||||
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose)
|
||||
koji_cmd = koji_wrapper.get_pungi_ostree_cmd(
|
||||
runroot_tag, arch, args, channel=runroot_channel, **kwargs
|
||||
)
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
import argparse
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
|
||||
from pungi.util import format_size
|
||||
|
||||
|
||||
LOCK_RE = re.compile(r".*\.lock(\|[A-Za-z0-9]+)*$")
|
||||
|
||||
|
||||
def should_be_cleaned_up(path, st, threshold):
|
||||
if st.st_nlink == 1 and st.st_mtime < threshold:
|
||||
# No other instances, older than limit
|
||||
return True
|
||||
|
||||
if LOCK_RE.match(path) and st.st_mtime < threshold:
|
||||
# Suspiciously old lock
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("CACHE_DIR")
|
||||
parser.add_argument("-n", "--dry-run", action="store_true")
|
||||
parser.add_argument("--verbose", action="store_true")
|
||||
parser.add_argument(
|
||||
"--max-age",
|
||||
help="how old files should be considered for deletion",
|
||||
default=7,
|
||||
type=int,
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
topdir = os.path.abspath(args.CACHE_DIR)
|
||||
max_age = args.max_age * 24 * 3600
|
||||
|
||||
cleaned_up = 0
|
||||
|
||||
threshold = time.time() - max_age
|
||||
for dirpath, dirnames, filenames in os.walk(topdir):
|
||||
for f in filenames:
|
||||
filepath = os.path.join(dirpath, f)
|
||||
st = os.stat(filepath)
|
||||
if should_be_cleaned_up(filepath, st, threshold):
|
||||
if args.verbose:
|
||||
print("RM %s" % filepath)
|
||||
cleaned_up += st.st_size
|
||||
if not args.dry_run:
|
||||
os.remove(filepath)
|
||||
if not dirnames and not filenames:
|
||||
if args.verbose:
|
||||
print("RMDIR %s" % dirpath)
|
||||
if not args.dry_run:
|
||||
os.rmdir(dirpath)
|
||||
|
||||
if args.dry_run:
|
||||
print("Would reclaim %s bytes." % format_size(cleaned_up))
|
||||
else:
|
||||
print("Reclaimed %s bytes." % format_size(cleaned_up))
|
|
@ -96,7 +96,7 @@ def main():
|
|||
f.filter_environments(opts.arch, opts.variant, opts.arch_only_environments)
|
||||
|
||||
if not opts.no_cleanup:
|
||||
f.cleanup(opts.keep_empty_group, opts.lookaside_group)
|
||||
f.cleanup(opts.arch, opts.keep_empty_group, opts.lookaside_group)
|
||||
|
||||
if opts.remove_categories:
|
||||
f.remove_categories()
|
||||
|
|
|
@ -171,32 +171,11 @@ def main():
|
|||
group.add_argument(
|
||||
"--offline", action="store_true", help="Do not resolve git references."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--multi",
|
||||
metavar="DIR",
|
||||
help=(
|
||||
"Treat source as config for pungi-orchestrate and store dump into "
|
||||
"given directory."
|
||||
),
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
defines = config_utils.extract_defines(args.define)
|
||||
|
||||
if args.multi:
|
||||
if len(args.sources) > 1:
|
||||
parser.error("Only one multi config can be specified.")
|
||||
|
||||
return dump_multi_config(
|
||||
args.sources[0],
|
||||
dest=args.multi,
|
||||
defines=defines,
|
||||
just_dump=args.just_dump,
|
||||
event=args.freeze_event,
|
||||
offline=args.offline,
|
||||
)
|
||||
|
||||
return process_file(
|
||||
args.sources,
|
||||
defines=defines,
|
||||
|
|
|
@ -127,7 +127,7 @@ def run(config, topdir, has_old, offline, defined_variables, schema_overrides):
|
|||
pungi.phases.OstreeInstallerPhase(compose, buildinstall_phase),
|
||||
pungi.phases.OSTreePhase(compose),
|
||||
pungi.phases.CreateisoPhase(compose, buildinstall_phase),
|
||||
pungi.phases.ExtraIsosPhase(compose),
|
||||
pungi.phases.ExtraIsosPhase(compose, buildinstall_phase),
|
||||
pungi.phases.LiveImagesPhase(compose),
|
||||
pungi.phases.LiveMediaPhase(compose),
|
||||
pungi.phases.ImageBuildPhase(compose),
|
||||
|
|
|
@ -5,35 +5,43 @@ import os
|
|||
import subprocess
|
||||
import tempfile
|
||||
from shutil import rmtree
|
||||
from typing import AnyStr, List, Dict, Optional
|
||||
from typing import (
|
||||
AnyStr,
|
||||
List,
|
||||
Dict,
|
||||
Optional,
|
||||
)
|
||||
|
||||
import createrepo_c as cr
|
||||
import requests
|
||||
import yaml
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from .create_packages_json import PackagesGenerator, RepoInfo
|
||||
from .create_packages_json import (
|
||||
PackagesGenerator,
|
||||
RepoInfo,
|
||||
VariantInfo,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtraRepoInfo(RepoInfo):
|
||||
class ExtraVariantInfo(VariantInfo):
|
||||
|
||||
modules: List[AnyStr] = field(default_factory=list)
|
||||
packages: List[AnyStr] = field(default_factory=list)
|
||||
is_remote: bool = True
|
||||
|
||||
|
||||
class CreateExtraRepo(PackagesGenerator):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
repos: List[ExtraRepoInfo],
|
||||
variants: List[ExtraVariantInfo],
|
||||
bs_auth_token: AnyStr,
|
||||
local_repository_path: AnyStr,
|
||||
clear_target_repo: bool = True,
|
||||
):
|
||||
self.repos = [] # type: List[ExtraRepoInfo]
|
||||
super().__init__(repos, [], [])
|
||||
self.variants = [] # type: List[ExtraVariantInfo]
|
||||
super().__init__(variants, [], [])
|
||||
self.auth_headers = {
|
||||
'Authorization': f'Bearer {bs_auth_token}',
|
||||
}
|
||||
|
@ -92,7 +100,7 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
arch: AnyStr,
|
||||
packages: Optional[List[AnyStr]] = None,
|
||||
modules: Optional[List[AnyStr]] = None,
|
||||
) -> List[ExtraRepoInfo]:
|
||||
) -> List[ExtraVariantInfo]:
|
||||
"""
|
||||
Get info about a BS repo and save it to
|
||||
an object of class ExtraRepoInfo
|
||||
|
@ -110,7 +118,7 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
api_uri = 'api/v1'
|
||||
bs_repo_suffix = 'build_repos'
|
||||
|
||||
repos_info = []
|
||||
variants_info = []
|
||||
|
||||
# get the full info about a BS repo
|
||||
repo_request = requests.get(
|
||||
|
@ -132,7 +140,13 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
# skip repo with unsuitable architecture
|
||||
if architecture != arch:
|
||||
continue
|
||||
repo_info = ExtraRepoInfo(
|
||||
variant_info = ExtraVariantInfo(
|
||||
name=f'{build_id}-{platform_name}-{architecture}',
|
||||
arch=architecture,
|
||||
packages=packages,
|
||||
modules=modules,
|
||||
repos=[
|
||||
RepoInfo(
|
||||
path=os.path.join(
|
||||
bs_url,
|
||||
bs_repo_suffix,
|
||||
|
@ -140,14 +154,12 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
platform_name,
|
||||
),
|
||||
folder=architecture,
|
||||
name=f'{build_id}-{platform_name}-{architecture}',
|
||||
arch=architecture,
|
||||
is_remote=True,
|
||||
packages=packages,
|
||||
modules=modules,
|
||||
)
|
||||
repos_info.append(repo_info)
|
||||
return repos_info
|
||||
]
|
||||
)
|
||||
variants_info.append(variant_info)
|
||||
return variants_info
|
||||
|
||||
def _create_local_extra_repo(self):
|
||||
"""
|
||||
|
@ -161,7 +173,7 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
if os.path.exists(self.default_modules_yaml_path):
|
||||
os.remove(self.default_modules_yaml_path)
|
||||
|
||||
def _get_remote_file_content(
|
||||
def get_remote_file_content(
|
||||
self,
|
||||
file_url: AnyStr,
|
||||
) -> AnyStr:
|
||||
|
@ -184,7 +196,7 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
def _download_rpm_to_local_repo(
|
||||
self,
|
||||
package_location: AnyStr,
|
||||
repo_info: ExtraRepoInfo,
|
||||
repo_info: RepoInfo,
|
||||
) -> None:
|
||||
"""
|
||||
Download a rpm package from a remote repo and save it to a local repo
|
||||
|
@ -212,21 +224,22 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
def _download_packages(
|
||||
self,
|
||||
packages: Dict[AnyStr, cr.Package],
|
||||
repo_info: ExtraRepoInfo
|
||||
variant_info: ExtraVariantInfo
|
||||
):
|
||||
"""
|
||||
Download all defined packages from a remote repo
|
||||
:param packages: information about all of packages (including
|
||||
:param packages: information about all packages (including
|
||||
modularity) in a remote repo
|
||||
:param repo_info: information about a remote repo
|
||||
:param variant_info: information about a remote variant
|
||||
"""
|
||||
for package in packages.values():
|
||||
package_name = package.name
|
||||
# Skip a current package from a remote repo if we defined
|
||||
# the list packages and a current package doesn't belong to it
|
||||
if repo_info.packages and \
|
||||
package_name not in repo_info.packages:
|
||||
if variant_info.packages and \
|
||||
package_name not in variant_info.packages:
|
||||
continue
|
||||
for repo_info in variant_info.repos:
|
||||
self._download_rpm_to_local_repo(
|
||||
package_location=package.location_href,
|
||||
repo_info=repo_info,
|
||||
|
@ -235,23 +248,23 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
def _download_modules(
|
||||
self,
|
||||
modules_data: List[Dict],
|
||||
repo_info: ExtraRepoInfo,
|
||||
variant_info: ExtraVariantInfo,
|
||||
packages: Dict[AnyStr, cr.Package]
|
||||
):
|
||||
"""
|
||||
Download all defined modularity packages and their data from
|
||||
a remote repo
|
||||
:param modules_data: information about all of modules in a remote repo
|
||||
:param repo_info: information about a remote repo
|
||||
:param packages: information about all of packages (including
|
||||
:param modules_data: information about all modules in a remote repo
|
||||
:param variant_info: information about a remote variant
|
||||
:param packages: information about all packages (including
|
||||
modularity) in a remote repo
|
||||
"""
|
||||
for module in modules_data:
|
||||
module_data = module['data']
|
||||
# Skip a current module from a remote repo if we defined
|
||||
# the list modules and a current module doesn't belong to it
|
||||
if repo_info.modules and \
|
||||
module_data['name'] not in repo_info.modules:
|
||||
if variant_info.modules and \
|
||||
module_data['name'] not in variant_info.modules:
|
||||
continue
|
||||
# we should add info about a module if the local repodata
|
||||
# doesn't have it
|
||||
|
@ -264,13 +277,14 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
continue
|
||||
for rpm in module['data']['artifacts']['rpms']:
|
||||
# Empty repo_info.packages means that we will download
|
||||
# all of packages from repo including
|
||||
# all packages from repo including
|
||||
# the modularity packages
|
||||
if not repo_info.packages:
|
||||
if not variant_info.packages:
|
||||
break
|
||||
# skip a rpm if it doesn't belong to a processed repo
|
||||
if rpm not in packages:
|
||||
continue
|
||||
for repo_info in variant_info.repos:
|
||||
self._download_rpm_to_local_repo(
|
||||
package_location=packages[rpm].location_href,
|
||||
repo_info=repo_info,
|
||||
|
@ -284,16 +298,16 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
3. Call `createrepo_c` which creates a local repo
|
||||
with the right repodata
|
||||
"""
|
||||
for repo_info in self.repos:
|
||||
packages = {} # type: Dict[AnyStr, cr.Package]
|
||||
for variant_info in self.variants:
|
||||
for repo_info in variant_info.repos:
|
||||
repomd_records = self._get_repomd_records(
|
||||
repo_info=repo_info,
|
||||
)
|
||||
packages_iterator = self.get_packages_iterator(repo_info)
|
||||
# parse the repodata (including modules.yaml.gz)
|
||||
modules_data = self._parse_repomd_records(
|
||||
modules_data = self._parse_module_repomd_record(
|
||||
repo_info=repo_info,
|
||||
repomd_records=repomd_records,
|
||||
packages=packages,
|
||||
)
|
||||
# convert the packages dict to more usable form
|
||||
# for future checking that a rpm from the module's artifacts
|
||||
|
@ -301,16 +315,16 @@ class CreateExtraRepo(PackagesGenerator):
|
|||
packages = {
|
||||
f'{package.name}-{package.epoch}:{package.version}-'
|
||||
f'{package.release}.{package.arch}':
|
||||
package for package in packages.values()
|
||||
package for package in packages_iterator
|
||||
}
|
||||
self._download_modules(
|
||||
modules_data=modules_data,
|
||||
repo_info=repo_info,
|
||||
variant_info=variant_info,
|
||||
packages=packages,
|
||||
)
|
||||
self._download_packages(
|
||||
packages=packages,
|
||||
repo_info=repo_info,
|
||||
variant_info=variant_info,
|
||||
)
|
||||
|
||||
self._dump_local_modules_yaml()
|
||||
|
@ -322,7 +336,6 @@ def create_parser():
|
|||
parser.add_argument(
|
||||
'--bs-auth-token',
|
||||
help='Auth token for Build System',
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--local-repo-path',
|
||||
|
@ -391,11 +404,16 @@ def cli_main():
|
|||
packages = packages.split()
|
||||
if repo.startswith('http://'):
|
||||
repos_info.append(
|
||||
ExtraRepoInfo(
|
||||
path=repo,
|
||||
folder=repo_folder,
|
||||
ExtraVariantInfo(
|
||||
name=repo_folder,
|
||||
arch=repo_arch,
|
||||
repos=[
|
||||
RepoInfo(
|
||||
path=repo,
|
||||
folder=repo_folder,
|
||||
is_remote=True,
|
||||
)
|
||||
],
|
||||
modules=modules,
|
||||
packages=packages,
|
||||
)
|
||||
|
@ -411,7 +429,7 @@ def cli_main():
|
|||
)
|
||||
)
|
||||
cer = CreateExtraRepo(
|
||||
repos=repos_info,
|
||||
variants=repos_info,
|
||||
bs_auth_token=args.bs_auth_token,
|
||||
local_repository_path=args.local_repo_path,
|
||||
clear_target_repo=args.clear_local_repo,
|
||||
|
|
|
@ -9,23 +9,60 @@ https://github.com/rpm-software-management/createrepo_c/blob/master/examples/pyt
|
|||
import argparse
|
||||
import gzip
|
||||
import json
|
||||
import logging
|
||||
import lzma
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
from collections import defaultdict
|
||||
from typing import AnyStr, Dict, List, Optional
|
||||
from itertools import tee
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
AnyStr,
|
||||
Dict,
|
||||
List,
|
||||
Any,
|
||||
Iterator,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
import binascii
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import createrepo_c as cr
|
||||
import dnf.subject
|
||||
import hawkey
|
||||
import requests
|
||||
import rpm
|
||||
import yaml
|
||||
from createrepo_c import Package
|
||||
from dataclasses import dataclass
|
||||
from createrepo_c import (
|
||||
Package,
|
||||
PackageIterator,
|
||||
Repomd,
|
||||
RepomdRecord,
|
||||
)
|
||||
from dataclasses import dataclass, field
|
||||
from kobo.rpmlib import parse_nvra
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
|
||||
def _is_compressed_file(first_two_bytes: bytes, initial_bytes: bytes):
|
||||
return binascii.hexlify(first_two_bytes) == initial_bytes
|
||||
|
||||
|
||||
def is_gzip_file(first_two_bytes):
|
||||
return _is_compressed_file(
|
||||
first_two_bytes=first_two_bytes,
|
||||
initial_bytes=b'1f8b',
|
||||
)
|
||||
|
||||
|
||||
def is_xz_file(first_two_bytes):
|
||||
return _is_compressed_file(
|
||||
first_two_bytes=first_two_bytes,
|
||||
initial_bytes=b'fd37',
|
||||
)
|
||||
|
||||
from .gather_modules import is_gzip_file, is_xz_file
|
||||
|
||||
@dataclass
|
||||
class RepoInfo:
|
||||
|
@ -33,32 +70,76 @@ class RepoInfo:
|
|||
# 'appstream', 'baseos', etc.
|
||||
# Or 'http://koji.cloudlinux.com/mirrors/rhel_mirror' if you are
|
||||
# using remote repo
|
||||
path: AnyStr
|
||||
path: str
|
||||
# name of folder with a repodata folder. E.g. 'baseos', 'appstream', etc
|
||||
folder: AnyStr
|
||||
# name of repo. E.g. 'BaseOS', 'AppStream', etc
|
||||
name: AnyStr
|
||||
# architecture of repo. E.g. 'x86_64', 'i686', etc
|
||||
arch: AnyStr
|
||||
folder: str
|
||||
# Is a repo remote or local
|
||||
is_remote: bool
|
||||
# Is an reference repository (usually it's a RHEL repo)
|
||||
# Is a reference repository (usually it's a RHEL repo)
|
||||
# Layout of packages from such repository will be taken as example
|
||||
# Only layout of specific package (which don't exist
|
||||
# in an reference repository) will be taken as example
|
||||
# Only layout of specific package (which doesn't exist
|
||||
# in a reference repository) will be taken as example
|
||||
is_reference: bool = False
|
||||
# The packages from 'present' repo will be added to a variant.
|
||||
# The packages from 'absent' repo will be removed from a variant.
|
||||
repo_type: str = 'present'
|
||||
|
||||
|
||||
@dataclass
|
||||
class VariantInfo:
|
||||
# name of variant. E.g. 'BaseOS', 'AppStream', etc
|
||||
name: AnyStr
|
||||
# architecture of variant. E.g. 'x86_64', 'i686', etc
|
||||
arch: AnyStr
|
||||
# The packages which will be not added to a variant
|
||||
excluded_packages: List[str] = field(default_factory=list)
|
||||
# Repos of a variant
|
||||
repos: List[RepoInfo] = field(default_factory=list)
|
||||
|
||||
|
||||
class PackagesGenerator:
|
||||
|
||||
repo_arches = defaultdict(lambda: list(('noarch',)))
|
||||
addon_repos = {
|
||||
'x86_64': ['i686'],
|
||||
'ppc64le': [],
|
||||
'aarch64': [],
|
||||
's390x': [],
|
||||
'i686': [],
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
repos: List[RepoInfo],
|
||||
variants: List[VariantInfo],
|
||||
excluded_packages: List[AnyStr],
|
||||
included_packages: List[AnyStr],
|
||||
):
|
||||
self.repos = repos
|
||||
self.variants = variants
|
||||
self.pkgs = dict()
|
||||
self.excluded_packages = excluded_packages
|
||||
self.included_packages = included_packages
|
||||
self.tmp_files = [] # type: list[Path]
|
||||
for arch, arch_list in self.addon_repos.items():
|
||||
self.repo_arches[arch].extend(arch_list)
|
||||
self.repo_arches[arch].append(arch)
|
||||
|
||||
def __del__(self):
|
||||
for tmp_file in self.tmp_files:
|
||||
if tmp_file.exists():
|
||||
tmp_file.unlink()
|
||||
|
||||
@staticmethod
|
||||
def _get_full_repo_path(repo_info: RepoInfo):
|
||||
result = os.path.join(
|
||||
repo_info.path,
|
||||
repo_info.folder
|
||||
)
|
||||
if repo_info.is_remote:
|
||||
result = urljoin(
|
||||
repo_info.path + '/',
|
||||
repo_info.folder,
|
||||
)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _warning_callback(warning_type, message):
|
||||
|
@ -68,8 +149,7 @@ class PackagesGenerator:
|
|||
print(f'Warning message: "{message}"; warning type: "{warning_type}"')
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _get_remote_file_content(file_url: AnyStr) -> AnyStr:
|
||||
def get_remote_file_content(self, file_url: AnyStr) -> AnyStr:
|
||||
"""
|
||||
Get content from a remote file and write it to a temp file
|
||||
:param file_url: url of a remote file
|
||||
|
@ -82,89 +162,27 @@ class PackagesGenerator:
|
|||
file_request.raise_for_status()
|
||||
with tempfile.NamedTemporaryFile(delete=False) as file_stream:
|
||||
file_stream.write(file_request.content)
|
||||
self.tmp_files.append(Path(file_stream.name))
|
||||
return file_stream.name
|
||||
|
||||
@staticmethod
|
||||
def _parse_repomd(repomd_file_path: AnyStr) -> cr.Repomd:
|
||||
def _parse_repomd(repomd_file_path: AnyStr) -> Repomd:
|
||||
"""
|
||||
Parse file repomd.xml and create object Repomd
|
||||
:param repomd_file_path: path to local repomd.xml
|
||||
"""
|
||||
return cr.Repomd(repomd_file_path)
|
||||
|
||||
def _parse_primary_file(
|
||||
self,
|
||||
primary_file_path: AnyStr,
|
||||
packages: Dict[AnyStr, cr.Package],
|
||||
) -> None:
|
||||
"""
|
||||
Parse primary.xml.gz, take from it info about packages and put it to
|
||||
dict packages
|
||||
:param primary_file_path: path to local primary.xml.gz
|
||||
:param packages: dictionary which will be contain info about packages
|
||||
from repository
|
||||
"""
|
||||
cr.xml_parse_primary(
|
||||
path=primary_file_path,
|
||||
pkgcb=lambda pkg: packages.update({
|
||||
pkg.pkgId: pkg,
|
||||
}),
|
||||
do_files=False,
|
||||
warningcb=self._warning_callback,
|
||||
)
|
||||
|
||||
def _parse_filelists_file(
|
||||
self,
|
||||
filelists_file_path: AnyStr,
|
||||
packages: Dict[AnyStr, cr.Package],
|
||||
) -> None:
|
||||
"""
|
||||
Parse filelists.xml.gz, take from it info about packages and put it to
|
||||
dict packages
|
||||
:param filelists_file_path: path to local filelists.xml.gz
|
||||
:param packages: dictionary which will be contain info about packages
|
||||
from repository
|
||||
"""
|
||||
cr.xml_parse_filelists(
|
||||
path=filelists_file_path,
|
||||
newpkgcb=lambda pkg_id, name, arch: packages.get(
|
||||
pkg_id,
|
||||
None,
|
||||
),
|
||||
warningcb=self._warning_callback,
|
||||
)
|
||||
|
||||
def _parse_other_file(
|
||||
self,
|
||||
other_file_path: AnyStr,
|
||||
packages: Dict[AnyStr, cr.Package],
|
||||
) -> None:
|
||||
"""
|
||||
Parse other.xml.gz, take from it info about packages and put it to
|
||||
dict packages
|
||||
:param other_file_path: path to local other.xml.gz
|
||||
:param packages: dictionary which will be contain info about packages
|
||||
from repository
|
||||
"""
|
||||
cr.xml_parse_other(
|
||||
path=other_file_path,
|
||||
newpkgcb=lambda pkg_id, name, arch: packages.get(
|
||||
pkg_id,
|
||||
None,
|
||||
),
|
||||
warningcb=self._warning_callback,
|
||||
)
|
||||
return Repomd(repomd_file_path)
|
||||
|
||||
@classmethod
|
||||
def _parse_modules_file(
|
||||
cls,
|
||||
modules_file_path: AnyStr,
|
||||
|
||||
) -> List[Dict]:
|
||||
) -> Iterator[Any]:
|
||||
"""
|
||||
Parse modules.yaml.gz and returns parsed data
|
||||
:param modules_file_path: path to local modules.yaml.gz
|
||||
:return: List of dict for an each modules in a repo
|
||||
:return: List of dict for each module in a repo
|
||||
"""
|
||||
|
||||
with open(modules_file_path, 'rb') as modules_file:
|
||||
|
@ -181,7 +199,7 @@ class PackagesGenerator:
|
|||
def _get_repomd_records(
|
||||
self,
|
||||
repo_info: RepoInfo,
|
||||
) -> List[cr.RepomdRecord]:
|
||||
) -> List[RepomdRecord]:
|
||||
"""
|
||||
Get, parse file repomd.xml and extract from it repomd records
|
||||
:param repo_info: structure which contains info about a current repo
|
||||
|
@ -194,36 +212,37 @@ class PackagesGenerator:
|
|||
'repomd.xml',
|
||||
)
|
||||
if repo_info.is_remote:
|
||||
repomd_file_path = self._get_remote_file_content(repomd_file_path)
|
||||
else:
|
||||
repomd_file_path = repomd_file_path
|
||||
repomd_file_path = urljoin(
|
||||
urljoin(
|
||||
repo_info.path + '/',
|
||||
repo_info.folder
|
||||
) + '/',
|
||||
'repodata/repomd.xml'
|
||||
)
|
||||
repomd_file_path = self.get_remote_file_content(repomd_file_path)
|
||||
|
||||
repomd_object = self._parse_repomd(repomd_file_path)
|
||||
if repo_info.is_remote:
|
||||
os.remove(repomd_file_path)
|
||||
return repomd_object.records
|
||||
|
||||
def _parse_repomd_records(
|
||||
def _download_repomd_records(
|
||||
self,
|
||||
repo_info: RepoInfo,
|
||||
repomd_records: List[cr.RepomdRecord],
|
||||
packages: Dict[AnyStr, cr.Package],
|
||||
) -> Optional[List[Dict]]:
|
||||
repomd_records: List[RepomdRecord],
|
||||
repomd_records_dict: Dict[str, str],
|
||||
):
|
||||
"""
|
||||
Parse repomd records and extract from repodata file info about packages
|
||||
Download repomd records
|
||||
:param repo_info: structure which contains info about a current repo
|
||||
:param repomd_records: list with repomd records
|
||||
:param packages: dictionary which will be contain info about packages
|
||||
from repository
|
||||
:return: List of dict for an each modules in a repo if it contains
|
||||
modules info otherwise returns None
|
||||
:param repomd_records_dict: dict with paths to repodata files
|
||||
"""
|
||||
modules_data = []
|
||||
for repomd_record in repomd_records:
|
||||
if repomd_record.type not in (
|
||||
'primary',
|
||||
'filelists',
|
||||
'other',
|
||||
'modules',
|
||||
):
|
||||
continue
|
||||
repomd_record_file_path = os.path.join(
|
||||
|
@ -232,25 +251,35 @@ class PackagesGenerator:
|
|||
repomd_record.location_href,
|
||||
)
|
||||
if repo_info.is_remote:
|
||||
repomd_record_file_path = self._get_remote_file_content(
|
||||
repomd_record_file_path,
|
||||
)
|
||||
if repomd_record.type == 'modules':
|
||||
modules_data = self._parse_modules_file(
|
||||
repomd_record_file_path,
|
||||
)
|
||||
else:
|
||||
parse_file_method = getattr(
|
||||
repomd_record_file_path = self.get_remote_file_content(
|
||||
repomd_record_file_path)
|
||||
repomd_records_dict[repomd_record.type] = repomd_record_file_path
|
||||
|
||||
def _parse_module_repomd_record(
|
||||
self,
|
||||
f'_parse_{repomd_record.type}_file'
|
||||
)
|
||||
parse_file_method(
|
||||
repomd_record_file_path,
|
||||
packages,
|
||||
repo_info: RepoInfo,
|
||||
repomd_records: List[RepomdRecord],
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Download repomd records
|
||||
:param repo_info: structure which contains info about a current repo
|
||||
:param repomd_records: list with repomd records
|
||||
"""
|
||||
for repomd_record in repomd_records:
|
||||
if repomd_record.type != 'modules':
|
||||
continue
|
||||
repomd_record_file_path = os.path.join(
|
||||
repo_info.path,
|
||||
repo_info.folder,
|
||||
repomd_record.location_href,
|
||||
)
|
||||
if repo_info.is_remote:
|
||||
os.remove(repomd_record_file_path)
|
||||
return list(modules_data)
|
||||
repomd_record_file_path = self.get_remote_file_content(
|
||||
repomd_record_file_path)
|
||||
return list(self._parse_modules_file(
|
||||
repomd_record_file_path,
|
||||
))
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def compare_pkgs_version(package_1: Package, package_2: Package) -> int:
|
||||
|
@ -266,183 +295,162 @@ class PackagesGenerator:
|
|||
)
|
||||
return rpm.labelCompare(version_tuple_1, version_tuple_2)
|
||||
|
||||
def get_packages_iterator(
|
||||
self,
|
||||
repo_info: RepoInfo,
|
||||
) -> Union[PackageIterator, Iterator]:
|
||||
full_repo_path = self._get_full_repo_path(repo_info)
|
||||
pkgs_iterator = self.pkgs.get(full_repo_path)
|
||||
if pkgs_iterator is None:
|
||||
repomd_records = self._get_repomd_records(
|
||||
repo_info=repo_info,
|
||||
)
|
||||
repomd_records_dict = {} # type: Dict[str, str]
|
||||
self._download_repomd_records(
|
||||
repo_info=repo_info,
|
||||
repomd_records=repomd_records,
|
||||
repomd_records_dict=repomd_records_dict,
|
||||
)
|
||||
pkgs_iterator = PackageIterator(
|
||||
primary_path=repomd_records_dict['primary'],
|
||||
filelists_path=repomd_records_dict['filelists'],
|
||||
other_path=repomd_records_dict['other'],
|
||||
warningcb=self._warning_callback,
|
||||
)
|
||||
pkgs_iterator, self.pkgs[full_repo_path] = tee(pkgs_iterator)
|
||||
return pkgs_iterator
|
||||
|
||||
def get_package_arch(
|
||||
self,
|
||||
package: Package,
|
||||
variant_arch: str,
|
||||
) -> str:
|
||||
result = variant_arch
|
||||
if package.arch in self.repo_arches[variant_arch]:
|
||||
result = package.arch
|
||||
return result
|
||||
|
||||
def is_skipped_module_package(
|
||||
self,
|
||||
package: Package,
|
||||
variant_arch: str,
|
||||
) -> bool:
|
||||
package_key = self.get_package_key(package, variant_arch)
|
||||
# Even a module package will be added to packages.json if
|
||||
# it presents in the list of included packages
|
||||
return 'module' in package.release and not any(
|
||||
re.search(
|
||||
f'^{included_pkg}$',
|
||||
package_key,
|
||||
) or included_pkg in (package.name, package_key)
|
||||
for included_pkg in self.included_packages
|
||||
)
|
||||
|
||||
def is_excluded_package(
|
||||
self,
|
||||
package: Package,
|
||||
variant_arch: str,
|
||||
excluded_packages: List[str],
|
||||
) -> bool:
|
||||
package_key = self.get_package_key(package, variant_arch)
|
||||
return any(
|
||||
re.search(
|
||||
f'^{excluded_pkg}$',
|
||||
package_key,
|
||||
) or excluded_pkg in (package.name, package_key)
|
||||
for excluded_pkg in excluded_packages
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_source_rpm_name(package: Package) -> str:
|
||||
source_rpm_nvra = parse_nvra(package.rpm_sourcerpm)
|
||||
return source_rpm_nvra['name']
|
||||
|
||||
def get_package_key(self, package: Package, variant_arch: str) -> str:
|
||||
return (
|
||||
f'{package.name}.'
|
||||
f'{self.get_package_arch(package, variant_arch)}'
|
||||
)
|
||||
|
||||
def generate_packages_json(
|
||||
self
|
||||
) -> Dict[AnyStr, Dict[AnyStr, Dict[AnyStr, List[AnyStr]]]]:
|
||||
"""
|
||||
Generate packages.json
|
||||
"""
|
||||
packages_json = defaultdict(
|
||||
lambda: defaultdict(
|
||||
lambda: defaultdict(
|
||||
list,
|
||||
)
|
||||
)
|
||||
)
|
||||
all_packages = defaultdict(lambda: {'variants': list()})
|
||||
for repo_info in self.repos:
|
||||
repo_arches = [
|
||||
repo_info.arch,
|
||||
'noarch',
|
||||
]
|
||||
if repo_info.arch == 'x86_64':
|
||||
repo_arches.extend([
|
||||
'i686',
|
||||
'i386',
|
||||
])
|
||||
packages = {} # type: Dict[AnyStr, cr.Package]
|
||||
repomd_records = self._get_repomd_records(
|
||||
repo_info=repo_info,
|
||||
)
|
||||
self._parse_repomd_records(
|
||||
repo_info=repo_info,
|
||||
repomd_records=repomd_records,
|
||||
packages=packages,
|
||||
)
|
||||
for package in packages.values():
|
||||
if package.arch not in repo_arches:
|
||||
package_arch = repo_info.arch
|
||||
else:
|
||||
package_arch = package.arch
|
||||
package_key = f'{package.name}.{package_arch}'
|
||||
if 'module' in package.release and not any(
|
||||
re.search(included_package, package.name)
|
||||
for included_package in self.included_packages
|
||||
packages = defaultdict(lambda: defaultdict(lambda: {
|
||||
'variants': list(),
|
||||
}))
|
||||
for variant_info in self.variants:
|
||||
for repo_info in variant_info.repos:
|
||||
is_reference = repo_info.is_reference
|
||||
for package in self.get_packages_iterator(repo_info=repo_info):
|
||||
if self.is_skipped_module_package(
|
||||
package=package,
|
||||
variant_arch=variant_info.arch,
|
||||
):
|
||||
# Even a module package will be added to packages.json if
|
||||
# it presents in the list of included packages
|
||||
continue
|
||||
if package_key not in all_packages:
|
||||
all_packages[package_key]['variants'].append(
|
||||
repo_info.name
|
||||
if self.is_excluded_package(
|
||||
package=package,
|
||||
variant_arch=variant_info.arch,
|
||||
excluded_packages=self.excluded_packages,
|
||||
):
|
||||
continue
|
||||
if self.is_excluded_package(
|
||||
package=package,
|
||||
variant_arch=variant_info.arch,
|
||||
excluded_packages=variant_info.excluded_packages,
|
||||
):
|
||||
continue
|
||||
package_key = self.get_package_key(
|
||||
package,
|
||||
variant_info.arch,
|
||||
)
|
||||
all_packages[package_key]['arch'] = repo_info.arch
|
||||
all_packages[package_key]['package'] = package
|
||||
all_packages[package_key]['type'] = repo_info.is_reference
|
||||
# replace an older package if it's not reference or
|
||||
# a newer package is from reference repo
|
||||
elif (not all_packages[package_key]['type'] or
|
||||
all_packages[package_key]['type'] ==
|
||||
repo_info.is_reference) and \
|
||||
source_rpm_name = self.get_source_rpm_name(package)
|
||||
package_info = packages[source_rpm_name][package_key]
|
||||
if 'is_reference' not in package_info:
|
||||
package_info['variants'].append(variant_info.name)
|
||||
package_info['is_reference'] = is_reference
|
||||
package_info['package'] = package
|
||||
elif not package_info['is_reference'] or \
|
||||
package_info['is_reference'] == is_reference and \
|
||||
self.compare_pkgs_version(
|
||||
package,
|
||||
all_packages[package_key]['package']
|
||||
package_1=package,
|
||||
package_2=package_info['package'],
|
||||
) > 0:
|
||||
all_packages[package_key]['variants'] = [repo_info.name]
|
||||
all_packages[package_key]['arch'] = repo_info.arch
|
||||
all_packages[package_key]['package'] = package
|
||||
package_info['variants'] = [variant_info.name]
|
||||
package_info['is_reference'] = is_reference
|
||||
package_info['package'] = package
|
||||
elif self.compare_pkgs_version(
|
||||
package,
|
||||
all_packages[package_key]['package']
|
||||
) == 0:
|
||||
all_packages[package_key]['variants'].append(
|
||||
repo_info.name
|
||||
)
|
||||
|
||||
for package_dict in all_packages.values():
|
||||
repo_arches = [
|
||||
package_dict['arch'],
|
||||
'noarch',
|
||||
]
|
||||
if package_dict['arch'] == 'x86_64':
|
||||
repo_arches.extend([
|
||||
'i686',
|
||||
'i386',
|
||||
])
|
||||
for variant in package_dict['variants']:
|
||||
repo_arch = package_dict['arch']
|
||||
package = package_dict['package']
|
||||
package_name = package.name
|
||||
if package.arch not in repo_arches:
|
||||
package_arch = package_dict['arch']
|
||||
else:
|
||||
package_arch = package.arch
|
||||
if any(re.search(excluded_package, package_name)
|
||||
for excluded_package in self.excluded_packages):
|
||||
package_1=package,
|
||||
package_2=package_info['package'],
|
||||
) == 0 and repo_info.repo_type != 'absent':
|
||||
package_info['variants'].append(variant_info.name)
|
||||
result = defaultdict(lambda: defaultdict(
|
||||
lambda: defaultdict(list),
|
||||
))
|
||||
for variant_info in self.variants:
|
||||
for source_rpm_name, packages_info in packages.items():
|
||||
for package_key, package_info in packages_info.items():
|
||||
variant_pkgs = result[variant_info.name][variant_info.arch]
|
||||
if variant_info.name not in package_info['variants']:
|
||||
continue
|
||||
src_package_name = dnf.subject.Subject(
|
||||
package.rpm_sourcerpm,
|
||||
).get_nevra_possibilities(
|
||||
forms=hawkey.FORM_NEVRA,
|
||||
)
|
||||
if len(src_package_name) > 1:
|
||||
# We should stop utility if we can't get exact name of srpm
|
||||
raise ValueError(
|
||||
'We can\'t get exact name of srpm '
|
||||
f'by its NEVRA "{package.rpm_sourcerpm}"'
|
||||
)
|
||||
else:
|
||||
src_package_name = src_package_name[0].name
|
||||
pkgs_list = packages_json[variant][
|
||||
repo_arch][src_package_name]
|
||||
added_pkg = f'{package_name}.{package_arch}'
|
||||
if added_pkg not in pkgs_list:
|
||||
pkgs_list.append(added_pkg)
|
||||
return packages_json
|
||||
variant_pkgs[source_rpm_name].append(package_key)
|
||||
return result
|
||||
|
||||
|
||||
def create_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--repo-path',
|
||||
action='append',
|
||||
help='Path to a folder with repofolders. E.g. "/var/repos" or '
|
||||
'"http://koji.cloudlinux.com/mirrors/rhel_mirror"',
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--repo-folder',
|
||||
action='append',
|
||||
help='A folder which contains folder repodata . E.g. "baseos-stream"',
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--repo-arch',
|
||||
action='append',
|
||||
help='What architecture packages a repository contains. E.g. "x86_64"',
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--repo-name',
|
||||
action='append',
|
||||
help='Name of a repository. E.g. "AppStream"',
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--is-remote',
|
||||
action='append',
|
||||
type=str,
|
||||
help='A repository is remote or local',
|
||||
choices=['yes', 'no'],
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--is-reference',
|
||||
action='append',
|
||||
type=str,
|
||||
help='A repository is used as reference for packages layout',
|
||||
choices=['yes', 'no'],
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--excluded-packages',
|
||||
nargs='+',
|
||||
type=str,
|
||||
default=[],
|
||||
help='A list of globally excluded packages from generated json.'
|
||||
'All of list elements should be separated by space',
|
||||
required=False,
|
||||
)
|
||||
parser.add_argument(
|
||||
'--included-packages',
|
||||
nargs='+',
|
||||
type=str,
|
||||
default=[],
|
||||
help='A list of globally included packages from generated json.'
|
||||
'All of list elements should be separated by space',
|
||||
'-c',
|
||||
'--config',
|
||||
type=Path,
|
||||
default=Path('config.yaml'),
|
||||
required=False,
|
||||
help='Path to a config',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o',
|
||||
'--json-output-path',
|
||||
type=str,
|
||||
help='Full path to output json file',
|
||||
|
@ -452,30 +460,45 @@ def create_parser():
|
|||
return parser
|
||||
|
||||
|
||||
def read_config(config_path: Path) -> Optional[Dict]:
|
||||
if not config_path.exists():
|
||||
logging.error('A config by path "%s" does not exist', config_path)
|
||||
exit(1)
|
||||
with config_path.open('r') as config_fd:
|
||||
return yaml.safe_load(config_fd)
|
||||
|
||||
|
||||
def process_config(config_data: Dict) -> Tuple[
|
||||
List[VariantInfo],
|
||||
List[str],
|
||||
List[str],
|
||||
]:
|
||||
excluded_packages = config_data.get('excluded_packages', [])
|
||||
included_packages = config_data.get('included_packages', [])
|
||||
variants = [VariantInfo(
|
||||
name=variant_name,
|
||||
arch=variant_info['arch'],
|
||||
excluded_packages=variant_info.get('excluded_packages', []),
|
||||
repos=[RepoInfo(
|
||||
path=variant_repo['path'],
|
||||
folder=variant_repo['folder'],
|
||||
is_remote=variant_repo['remote'],
|
||||
is_reference=variant_repo['reference'],
|
||||
repo_type=variant_repo.get('repo_type', 'present'),
|
||||
) for variant_repo in variant_info['repos']]
|
||||
) for variant_name, variant_info in config_data['variants'].items()]
|
||||
return variants, excluded_packages, included_packages
|
||||
|
||||
|
||||
def cli_main():
|
||||
args = create_parser().parse_args()
|
||||
repos = []
|
||||
for repo_path, repo_folder, repo_name, \
|
||||
repo_arch, is_remote, is_reference in zip(
|
||||
args.repo_path,
|
||||
args.repo_folder,
|
||||
args.repo_name,
|
||||
args.repo_arch,
|
||||
args.is_remote,
|
||||
args.is_reference,
|
||||
):
|
||||
repos.append(RepoInfo(
|
||||
path=repo_path,
|
||||
folder=repo_folder,
|
||||
name=repo_name,
|
||||
arch=repo_arch,
|
||||
is_remote=True if is_remote == 'yes' else False,
|
||||
is_reference=True if is_reference == 'yes' else False
|
||||
))
|
||||
variants, excluded_packages, included_packages = process_config(
|
||||
config_data=read_config(args.config)
|
||||
)
|
||||
pg = PackagesGenerator(
|
||||
repos=repos,
|
||||
excluded_packages=args.excluded_packages,
|
||||
included_packages=args.included_packages,
|
||||
variants=variants,
|
||||
excluded_packages=excluded_packages,
|
||||
included_packages=included_packages,
|
||||
)
|
||||
result = pg.generate_packages_json()
|
||||
with open(args.json_output_path, 'w') as packages_file:
|
||||
|
|
|
@ -16,7 +16,10 @@ def parse_args():
|
|||
parser = argparse.ArgumentParser(add_help=True)
|
||||
|
||||
parser.add_argument(
|
||||
"compose", metavar="<compose-path>", nargs=1, help="path to compose",
|
||||
"compose",
|
||||
metavar="<compose-path>",
|
||||
nargs=1,
|
||||
help="path to compose",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch",
|
||||
|
|
|
@ -14,6 +14,9 @@ def send(cmd, data):
|
|||
topic = "compose.%s" % cmd.replace("-", ".").lower()
|
||||
try:
|
||||
msg = fedora_messaging.api.Message(topic="pungi.{}".format(topic), body=data)
|
||||
if cmd == "ostree":
|
||||
# https://pagure.io/fedora-infrastructure/issue/10899
|
||||
msg.priority = 3
|
||||
fedora_messaging.api.publish(msg)
|
||||
except fedora_messaging.exceptions.PublishReturned as e:
|
||||
print("Fedora Messaging broker rejected message %s: %s" % (msg.id, e))
|
||||
|
|
|
@ -1,70 +1,150 @@
|
|||
import binascii
|
||||
import gzip
|
||||
import lzma
|
||||
import os
|
||||
from argparse import ArgumentParser, FileType
|
||||
from glob import iglob
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from typing import List, AnyStr
|
||||
from typing import List, AnyStr, Iterable, Union, Optional
|
||||
import logging
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import yaml
|
||||
import createrepo_c as cr
|
||||
from typing.io import BinaryIO
|
||||
|
||||
from .create_packages_json import PackagesGenerator, is_gzip_file, is_xz_file
|
||||
|
||||
def _is_compressed_file(first_two_bytes: bytes, initial_bytes: bytes):
|
||||
return binascii.hexlify(first_two_bytes) == initial_bytes
|
||||
EMPTY_FILE = '.empty'
|
||||
|
||||
|
||||
def is_gzip_file(first_two_bytes):
|
||||
return _is_compressed_file(
|
||||
first_two_bytes=first_two_bytes,
|
||||
initial_bytes=b'1f8b',
|
||||
)
|
||||
def read_modules_yaml(modules_yaml_path: Union[str, Path]) -> BytesIO:
|
||||
with open(modules_yaml_path, 'rb') as fp:
|
||||
return BytesIO(fp.read())
|
||||
|
||||
|
||||
def is_xz_file(first_two_bytes):
|
||||
return _is_compressed_file(
|
||||
first_two_bytes=first_two_bytes,
|
||||
initial_bytes=b'fd37',
|
||||
)
|
||||
|
||||
|
||||
def grep_list_of_modules_yaml_gz(repo_path: AnyStr) -> List[BytesIO]:
|
||||
def grep_list_of_modules_yaml(repos_path: AnyStr) -> Iterable[BytesIO]:
|
||||
"""
|
||||
Find all of valid *modules.yaml.gz in repos
|
||||
:param repo_path: path to a directory which contains repodirs
|
||||
:return: list of content from *modules.yaml.gz
|
||||
:param repos_path: path to a directory which contains repo dirs
|
||||
:return: iterable object of content from *modules.yaml.*
|
||||
"""
|
||||
|
||||
result = []
|
||||
for path in Path(repo_path).rglob('repomd.xml'):
|
||||
repo_dir_path = Path(path.parent).parent
|
||||
repomd_obj = cr.Repomd(str(path))
|
||||
return (
|
||||
read_modules_yaml_from_specific_repo(repo_path=Path(path).parent)
|
||||
for path in iglob(
|
||||
str(Path(repos_path).joinpath('**/repodata')),
|
||||
recursive=True
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _is_remote(path: str):
|
||||
return any(str(path).startswith(protocol)
|
||||
for protocol in ('http', 'https'))
|
||||
|
||||
|
||||
def read_modules_yaml_from_specific_repo(
|
||||
repo_path: Union[str, Path]
|
||||
) -> Optional[BytesIO]:
|
||||
"""
|
||||
Read modules_yaml from a specific repo (remote or local)
|
||||
:param repo_path: path/url to a specific repo
|
||||
(final dir should contain dir `repodata`)
|
||||
:return: iterable object of content from *modules.yaml.*
|
||||
"""
|
||||
|
||||
if _is_remote(repo_path):
|
||||
repomd_url = urljoin(
|
||||
repo_path + '/',
|
||||
'repodata/repomd.xml',
|
||||
)
|
||||
packages_generator = PackagesGenerator(
|
||||
variants=[],
|
||||
excluded_packages=[],
|
||||
included_packages=[],
|
||||
)
|
||||
repomd_file_path = packages_generator.get_remote_file_content(
|
||||
file_url=repomd_url
|
||||
)
|
||||
else:
|
||||
repomd_file_path = os.path.join(
|
||||
repo_path,
|
||||
'repodata/repomd.xml',
|
||||
)
|
||||
repomd_obj = cr.Repomd(str(repomd_file_path))
|
||||
for record in repomd_obj.records:
|
||||
if record.type != 'modules':
|
||||
continue
|
||||
with open(os.path.join(
|
||||
repo_dir_path,
|
||||
else:
|
||||
if _is_remote(repo_path):
|
||||
modules_yaml_url = urljoin(
|
||||
repo_path + '/',
|
||||
record.location_href,
|
||||
), 'rb') as fp:
|
||||
result.append(
|
||||
BytesIO(fp.read())
|
||||
)
|
||||
return result
|
||||
packages_generator = PackagesGenerator(
|
||||
variants=[],
|
||||
excluded_packages=[],
|
||||
included_packages=[],
|
||||
)
|
||||
modules_yaml_path = packages_generator.get_remote_file_content(
|
||||
file_url=modules_yaml_url
|
||||
)
|
||||
else:
|
||||
modules_yaml_path = os.path.join(
|
||||
repo_path,
|
||||
record.location_href,
|
||||
)
|
||||
return read_modules_yaml(modules_yaml_path=modules_yaml_path)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def collect_modules(modules_paths: List[BinaryIO], target_dir: str):
|
||||
def _should_grep_defaults(
|
||||
document_type: str,
|
||||
grep_only_modules_data: bool = False,
|
||||
grep_only_modules_defaults_data: bool = False,
|
||||
) -> bool:
|
||||
xor_flag = grep_only_modules_data == grep_only_modules_defaults_data
|
||||
if document_type == 'modulemd' and (xor_flag or grep_only_modules_data):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _should_grep_modules(
|
||||
document_type: str,
|
||||
grep_only_modules_data: bool = False,
|
||||
grep_only_modules_defaults_data: bool = False,
|
||||
) -> bool:
|
||||
xor_flag = grep_only_modules_data == grep_only_modules_defaults_data
|
||||
if document_type == 'modulemd-defaults' and \
|
||||
(xor_flag or grep_only_modules_defaults_data):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def collect_modules(
|
||||
modules_paths: List[BinaryIO],
|
||||
target_dir: str,
|
||||
grep_only_modules_data: bool = False,
|
||||
grep_only_modules_defaults_data: bool = False,
|
||||
):
|
||||
"""
|
||||
Read given modules.yaml.gz files and export modules
|
||||
and modulemd files from it.
|
||||
Returns:
|
||||
object:
|
||||
"""
|
||||
xor_flag = grep_only_modules_defaults_data is grep_only_modules_data
|
||||
modules_path = os.path.join(target_dir, 'modules')
|
||||
module_defaults_path = os.path.join(target_dir, 'module_defaults')
|
||||
if grep_only_modules_data or xor_flag:
|
||||
os.makedirs(modules_path, exist_ok=True)
|
||||
if grep_only_modules_defaults_data or xor_flag:
|
||||
os.makedirs(module_defaults_path, exist_ok=True)
|
||||
# Defaults modules can be empty, but pungi detects
|
||||
# empty folder while copying and raises the exception in this case
|
||||
Path(os.path.join(module_defaults_path, EMPTY_FILE)).touch()
|
||||
|
||||
for module_file in modules_paths:
|
||||
data = module_file.read()
|
||||
|
@ -74,11 +154,20 @@ def collect_modules(modules_paths: List[BinaryIO], target_dir: str):
|
|||
data = lzma.decompress(data)
|
||||
documents = yaml.load_all(data, Loader=yaml.BaseLoader)
|
||||
for doc in documents:
|
||||
if doc['document'] == 'modulemd-defaults':
|
||||
path = None
|
||||
if _should_grep_modules(
|
||||
doc['document'],
|
||||
grep_only_modules_data,
|
||||
grep_only_modules_defaults_data,
|
||||
):
|
||||
name = f"{doc['data']['module']}.yaml"
|
||||
path = os.path.join(module_defaults_path, name)
|
||||
logging.info('Found %s module defaults', name)
|
||||
else:
|
||||
elif _should_grep_defaults(
|
||||
doc['document'],
|
||||
grep_only_modules_data,
|
||||
grep_only_modules_defaults_data,
|
||||
):
|
||||
# pungi.phases.pkgset.sources.source_koji.get_koji_modules
|
||||
stream = doc['data']['stream'].replace('-', '_')
|
||||
doc_data = doc['data']
|
||||
|
@ -100,13 +189,24 @@ def collect_modules(modules_paths: List[BinaryIO], target_dir: str):
|
|||
'RPM %s does not have explicit list of artifacts',
|
||||
name
|
||||
)
|
||||
|
||||
if path is not None:
|
||||
with open(path, 'w') as f:
|
||||
yaml.dump(doc, f, default_flow_style=False)
|
||||
|
||||
|
||||
def cli_main():
|
||||
parser = ArgumentParser()
|
||||
content_type_group = parser.add_mutually_exclusive_group(required=False)
|
||||
content_type_group.add_argument(
|
||||
'--get-only-modules-data',
|
||||
action='store_true',
|
||||
help='Parse and get only modules data',
|
||||
)
|
||||
content_type_group.add_argument(
|
||||
'--get-only-modules-defaults-data',
|
||||
action='store_true',
|
||||
help='Parse and get only modules_defaults data',
|
||||
)
|
||||
path_group = parser.add_mutually_exclusive_group(required=True)
|
||||
path_group.add_argument(
|
||||
'-p', '--path',
|
||||
|
@ -121,16 +221,33 @@ def cli_main():
|
|||
default=None,
|
||||
help='Path to a directory which contains repodirs. E.g. /var/repos'
|
||||
)
|
||||
path_group.add_argument(
|
||||
'-rd', '--repodata-paths',
|
||||
required=False,
|
||||
type=str,
|
||||
nargs='+',
|
||||
default=[],
|
||||
help='Paths/urls to the directories with directory `repodata`',
|
||||
)
|
||||
parser.add_argument('-t', '--target', required=True)
|
||||
|
||||
namespace = parser.parse_args()
|
||||
if namespace.repo_path is None:
|
||||
if namespace.repodata_paths:
|
||||
modules = []
|
||||
for repodata_path in namespace.repodata_paths:
|
||||
modules.append(read_modules_yaml_from_specific_repo(
|
||||
repodata_path,
|
||||
))
|
||||
elif namespace.path is not None:
|
||||
modules = namespace.path
|
||||
else:
|
||||
modules = grep_list_of_modules_yaml_gz(namespace.repo_path)
|
||||
modules = grep_list_of_modules_yaml(namespace.repo_path)
|
||||
modules = list(filter(lambda i: i is not None, modules))
|
||||
collect_modules(
|
||||
modules,
|
||||
namespace.target,
|
||||
namespace.get_only_modules_data,
|
||||
namespace.get_only_modules_defaults_data,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -1,39 +1,53 @@
|
|||
import re
|
||||
from argparse import ArgumentParser
|
||||
|
||||
import os
|
||||
from glob import iglob
|
||||
from typing import List
|
||||
from pathlib import Path
|
||||
|
||||
from attr import dataclass
|
||||
from dataclasses import dataclass
|
||||
from productmd.common import parse_nvra
|
||||
|
||||
|
||||
@dataclass
|
||||
class Package:
|
||||
nvra: str
|
||||
path: str
|
||||
nvra: dict
|
||||
path: Path
|
||||
|
||||
|
||||
def search_rpms(top_dir) -> List[Package]:
|
||||
def search_rpms(top_dir: Path) -> List[Package]:
|
||||
"""
|
||||
Search for all *.rpm files recursively
|
||||
in given top directory
|
||||
Returns:
|
||||
list: list of paths
|
||||
"""
|
||||
rpms = []
|
||||
for root, dirs, files in os.walk(top_dir):
|
||||
path = root.split(os.sep)
|
||||
for file in files:
|
||||
if not file.endswith('.rpm'):
|
||||
continue
|
||||
nvra, _ = os.path.splitext(file)
|
||||
rpms.append(
|
||||
Package(nvra=nvra, path=os.path.join('/', *path, file))
|
||||
return [Package(
|
||||
nvra=parse_nvra(Path(path).stem),
|
||||
path=Path(path),
|
||||
) for path in iglob(str(top_dir.joinpath('**/*.rpm')), recursive=True)]
|
||||
|
||||
|
||||
def is_excluded_package(
|
||||
package: Package,
|
||||
excluded_packages: List[str],
|
||||
) -> bool:
|
||||
package_key = f'{package.nvra["name"]}.{package.nvra["arch"]}'
|
||||
return any(
|
||||
re.search(
|
||||
f'^{excluded_pkg}$',
|
||||
package_key,
|
||||
) or excluded_pkg in (package.nvra['name'], package_key)
|
||||
for excluded_pkg in excluded_packages
|
||||
)
|
||||
return rpms
|
||||
|
||||
|
||||
def copy_rpms(packages: List[Package], target_top_dir: str):
|
||||
def copy_rpms(
|
||||
packages: List[Package],
|
||||
target_top_dir: Path,
|
||||
excluded_packages: List[str],
|
||||
):
|
||||
"""
|
||||
Search synced repos for rpms and prepare
|
||||
koji-like structure for pungi
|
||||
|
@ -45,30 +59,37 @@ def copy_rpms(packages: List[Package], target_top_dir: str):
|
|||
Nothing:
|
||||
"""
|
||||
for package in packages:
|
||||
info = parse_nvra(package.nvra)
|
||||
|
||||
target_arch_dir = os.path.join(target_top_dir, info['arch'])
|
||||
if is_excluded_package(package, excluded_packages):
|
||||
continue
|
||||
target_arch_dir = target_top_dir.joinpath(package.nvra['arch'])
|
||||
target_file = target_arch_dir.joinpath(package.path.name)
|
||||
os.makedirs(target_arch_dir, exist_ok=True)
|
||||
|
||||
target_file = os.path.join(target_arch_dir, os.path.basename(package.path))
|
||||
|
||||
if not os.path.exists(target_file):
|
||||
if not target_file.exists():
|
||||
try:
|
||||
os.link(package.path, target_file)
|
||||
except OSError:
|
||||
# hardlink failed, try symlinking
|
||||
os.symlink(package.path, target_file)
|
||||
package.path.symlink_to(target_file)
|
||||
|
||||
|
||||
def cli_main():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('-p', '--path', required=True)
|
||||
parser.add_argument('-t', '--target', required=True)
|
||||
parser.add_argument('-p', '--path', required=True, type=Path)
|
||||
parser.add_argument('-t', '--target', required=True, type=Path)
|
||||
parser.add_argument(
|
||||
'-e',
|
||||
'--excluded-packages',
|
||||
required=False,
|
||||
nargs='+',
|
||||
type=str,
|
||||
default=[],
|
||||
)
|
||||
|
||||
namespace = parser.parse_args()
|
||||
|
||||
rpms = search_rpms(namespace.path)
|
||||
copy_rpms(rpms, namespace.target)
|
||||
copy_rpms(rpms, namespace.target, namespace.excluded_packages)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -319,7 +319,6 @@ def get_arguments(config):
|
|||
|
||||
|
||||
def main():
|
||||
|
||||
config = pungi.config.Config()
|
||||
opts = get_arguments(config)
|
||||
|
||||
|
@ -476,14 +475,14 @@ def main():
|
|||
else:
|
||||
mypungi.downloadSRPMs()
|
||||
|
||||
print("RPM size: %s MiB" % (mypungi.size_packages() / 1024 ** 2))
|
||||
print("RPM size: %s MiB" % (mypungi.size_packages() / 1024**2))
|
||||
if not opts.nodebuginfo:
|
||||
print(
|
||||
"DEBUGINFO size: %s MiB"
|
||||
% (mypungi.size_debuginfo() / 1024 ** 2)
|
||||
% (mypungi.size_debuginfo() / 1024**2)
|
||||
)
|
||||
if not opts.nosource:
|
||||
print("SRPM size: %s MiB" % (mypungi.size_srpms() / 1024 ** 2))
|
||||
print("SRPM size: %s MiB" % (mypungi.size_srpms() / 1024**2))
|
||||
|
||||
# Furthermore (but without the yumlock...)
|
||||
if not opts.sourceisos:
|
||||
|
|
|
@ -18,13 +18,18 @@ from pungi.util import temp_dir
|
|||
def get_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--profiler", action="store_true",
|
||||
"--profiler",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch", required=True,
|
||||
"--arch",
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config", metavar="PATH", required=True, help="path to kickstart config file",
|
||||
"--config",
|
||||
metavar="PATH",
|
||||
required=True,
|
||||
help="path to kickstart config file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--download-to",
|
||||
|
@ -42,7 +47,9 @@ def get_parser():
|
|||
|
||||
group = parser.add_argument_group("Gather options")
|
||||
group.add_argument(
|
||||
"--nodeps", action="store_true", help="disable resolving dependencies",
|
||||
"--nodeps",
|
||||
action="store_true",
|
||||
help="disable resolving dependencies",
|
||||
)
|
||||
group.add_argument(
|
||||
"--selfhosting",
|
||||
|
@ -61,7 +68,9 @@ def get_parser():
|
|||
choices=["none", "all", "build"],
|
||||
)
|
||||
group.add_argument(
|
||||
"--multilib", metavar="[METHOD]", action="append",
|
||||
"--multilib",
|
||||
metavar="[METHOD]",
|
||||
action="append",
|
||||
)
|
||||
group.add_argument(
|
||||
"--tempdir",
|
||||
|
|
|
@ -5,6 +5,7 @@ from __future__ import print_function
|
|||
|
||||
import argparse
|
||||
import getpass
|
||||
import glob
|
||||
import json
|
||||
import locale
|
||||
import logging
|
||||
|
@ -20,6 +21,9 @@ from six.moves import shlex_quote
|
|||
|
||||
from pungi.phases import PHASES_NAMES
|
||||
from pungi import get_full_version, util
|
||||
from pungi.errors import UnsignedPackagesError
|
||||
from pungi.wrappers import kojiwrapper
|
||||
from pungi.util import rmtree
|
||||
|
||||
|
||||
# force C locales
|
||||
|
@ -248,9 +252,15 @@ def main():
|
|||
kobo.log.add_stderr_logger(logger)
|
||||
|
||||
conf = util.load_config(opts.config)
|
||||
|
||||
compose_type = opts.compose_type or conf.get("compose_type", "production")
|
||||
if compose_type == "production" and not opts.label and not opts.no_label:
|
||||
label = opts.label or conf.get("label")
|
||||
if label:
|
||||
try:
|
||||
productmd.composeinfo.verify_label(label)
|
||||
except ValueError as ex:
|
||||
abort(str(ex))
|
||||
|
||||
if compose_type == "production" and not label and not opts.no_label:
|
||||
abort("must specify label for a production compose")
|
||||
|
||||
if (
|
||||
|
@ -262,14 +272,12 @@ def main():
|
|||
# check if all requirements are met
|
||||
import pungi.checks
|
||||
|
||||
if not pungi.checks.check(conf):
|
||||
sys.exit(1)
|
||||
pungi.checks.check_umask(logger)
|
||||
if not pungi.checks.check_skip_phases(
|
||||
logger, opts.skip_phase + conf.get("skip_phases", []), opts.just_phase
|
||||
):
|
||||
sys.exit(1)
|
||||
errors, warnings = pungi.checks.validate(conf)
|
||||
errors, warnings = pungi.checks.validate(conf, offline=True)
|
||||
|
||||
if not opts.quiet:
|
||||
# TODO: workaround for config files containing skip_phase = productimg
|
||||
|
@ -294,9 +302,17 @@ def main():
|
|||
fail_to_start("Config validation failed", errors=errors)
|
||||
sys.exit(1)
|
||||
|
||||
if not pungi.checks.check(conf):
|
||||
sys.exit(1)
|
||||
|
||||
if opts.target_dir:
|
||||
compose_dir = Compose.get_compose_dir(
|
||||
opts.target_dir, conf, compose_type=compose_type, compose_label=opts.label
|
||||
opts.target_dir,
|
||||
conf,
|
||||
compose_type=compose_type,
|
||||
compose_label=label,
|
||||
parent_compose_ids=opts.parent_compose_id,
|
||||
respin_of=opts.respin_of,
|
||||
)
|
||||
else:
|
||||
compose_dir = opts.compose_dir
|
||||
|
@ -305,7 +321,7 @@ def main():
|
|||
ci = Compose.get_compose_info(
|
||||
conf,
|
||||
compose_type=compose_type,
|
||||
compose_label=opts.label,
|
||||
compose_label=label,
|
||||
parent_compose_ids=opts.parent_compose_id,
|
||||
respin_of=opts.respin_of,
|
||||
)
|
||||
|
@ -325,14 +341,34 @@ def main():
|
|||
logger=logger,
|
||||
notifier=notifier,
|
||||
)
|
||||
|
||||
rv = Compose.update_compose_url(compose.compose_id, compose_dir, conf)
|
||||
if rv and not rv.ok:
|
||||
logger.error("CTS compose_url update failed with the error: %s" % rv.text)
|
||||
|
||||
errors, warnings = pungi.checks.validate(conf, offline=False)
|
||||
if errors:
|
||||
for error in errors:
|
||||
logger.error("Config validation failed with the error: %s" % error)
|
||||
fail_to_start("Config validation failed", errors=errors)
|
||||
sys.exit(1)
|
||||
|
||||
notifier.compose = compose
|
||||
COMPOSE = compose
|
||||
try:
|
||||
run_compose(
|
||||
compose,
|
||||
create_latest_link=create_latest_link,
|
||||
latest_link_status=latest_link_status,
|
||||
latest_link_components=latest_link_components,
|
||||
)
|
||||
except UnsignedPackagesError:
|
||||
# There was an unsigned package somewhere. It is not safe to reuse any
|
||||
# package set from this compose (since we could leak the unsigned
|
||||
# package). Let's make sure all reuse files are deleted.
|
||||
for fp in glob.glob(compose.paths.work.pkgset_reuse_file("*")):
|
||||
os.unlink(fp)
|
||||
raise
|
||||
|
||||
|
||||
def run_compose(
|
||||
|
@ -354,6 +390,16 @@ def run_compose(
|
|||
)
|
||||
compose.log_info("Compose top directory: %s" % compose.topdir)
|
||||
compose.log_info("Current timezone offset: %s" % pungi.util.get_tz_offset())
|
||||
compose.log_info("COMPOSE_ID=%s" % compose.compose_id)
|
||||
|
||||
installed_pkgs_log = compose.paths.log.log_file("global", "installed-pkgs")
|
||||
compose.log_info("Logging installed packages to %s" % installed_pkgs_log)
|
||||
try:
|
||||
with open(installed_pkgs_log, "w") as f:
|
||||
subprocess.Popen(["rpm", "-qa"], stdout=f)
|
||||
except Exception as e:
|
||||
compose.log_warning("Failed to log installed packages: %s" % str(e))
|
||||
|
||||
compose.read_variants()
|
||||
|
||||
# dump the config file
|
||||
|
@ -378,12 +424,13 @@ def run_compose(
|
|||
)
|
||||
ostree_phase = pungi.phases.OSTreePhase(compose, pkgset_phase)
|
||||
createiso_phase = pungi.phases.CreateisoPhase(compose, buildinstall_phase)
|
||||
extra_isos_phase = pungi.phases.ExtraIsosPhase(compose)
|
||||
extra_isos_phase = pungi.phases.ExtraIsosPhase(compose, buildinstall_phase)
|
||||
liveimages_phase = pungi.phases.LiveImagesPhase(compose)
|
||||
livemedia_phase = pungi.phases.LiveMediaPhase(compose)
|
||||
image_build_phase = pungi.phases.ImageBuildPhase(compose)
|
||||
image_build_phase = pungi.phases.ImageBuildPhase(compose, buildinstall_phase)
|
||||
osbuild_phase = pungi.phases.OSBuildPhase(compose)
|
||||
osbs_phase = pungi.phases.OSBSPhase(compose)
|
||||
osbs_phase = pungi.phases.OSBSPhase(compose, pkgset_phase, buildinstall_phase)
|
||||
image_container_phase = pungi.phases.ImageContainerPhase(compose)
|
||||
image_checksum_phase = pungi.phases.ImageChecksumPhase(compose)
|
||||
repoclosure_phase = pungi.phases.RepoclosurePhase(compose)
|
||||
test_phase = pungi.phases.TestPhase(compose)
|
||||
|
@ -407,6 +454,7 @@ def run_compose(
|
|||
extra_isos_phase,
|
||||
osbs_phase,
|
||||
osbuild_phase,
|
||||
image_container_phase,
|
||||
):
|
||||
if phase.skip():
|
||||
continue
|
||||
|
@ -506,9 +554,12 @@ def run_compose(
|
|||
livemedia_phase,
|
||||
osbuild_phase,
|
||||
)
|
||||
post_image_phase = pungi.phases.WeaverPhase(
|
||||
compose, (image_checksum_phase, image_container_phase)
|
||||
)
|
||||
compose_images_phase = pungi.phases.WeaverPhase(compose, compose_images_schema)
|
||||
extra_phase_schema = (
|
||||
(compose_images_phase, image_checksum_phase),
|
||||
(compose_images_phase, post_image_phase),
|
||||
osbs_phase,
|
||||
repoclosure_phase,
|
||||
)
|
||||
|
@ -522,13 +573,14 @@ def run_compose(
|
|||
buildinstall_phase.skip()
|
||||
and ostree_installer_phase.skip()
|
||||
and createiso_phase.skip()
|
||||
and extra_isos_phase.skip()
|
||||
and liveimages_phase.skip()
|
||||
and livemedia_phase.skip()
|
||||
and image_build_phase.skip()
|
||||
and osbuild_phase.skip()
|
||||
):
|
||||
compose.im.dump(compose.paths.compose.metadata("images.json"))
|
||||
osbs_phase.dump_metadata()
|
||||
compose.dump_containers_metadata()
|
||||
|
||||
test_phase.start()
|
||||
test_phase.stop()
|
||||
|
@ -600,9 +652,25 @@ def try_kill_children(signal):
|
|||
COMPOSE.log_warning("Failed to kill all subprocesses")
|
||||
|
||||
|
||||
def try_kill_koji_tasks():
|
||||
try:
|
||||
if COMPOSE:
|
||||
koji_tasks_dir = COMPOSE.paths.log.koji_tasks_dir(create_dir=False)
|
||||
if os.path.exists(koji_tasks_dir):
|
||||
COMPOSE.log_warning("Trying to kill koji tasks")
|
||||
koji = kojiwrapper.KojiWrapper(COMPOSE)
|
||||
koji.login()
|
||||
for task_id in os.listdir(koji_tasks_dir):
|
||||
koji.koji_proxy.cancelTask(int(task_id))
|
||||
except Exception:
|
||||
if COMPOSE:
|
||||
COMPOSE.log_warning("Failed to kill koji tasks")
|
||||
|
||||
|
||||
def sigterm_handler(signum, frame):
|
||||
if COMPOSE:
|
||||
try_kill_children(signum)
|
||||
try_kill_koji_tasks()
|
||||
COMPOSE.log_error("Compose run failed: signal %s" % signum)
|
||||
COMPOSE.log_error("Traceback:\n%s" % "\n".join(traceback.format_stack(frame)))
|
||||
COMPOSE.log_critical("Compose failed: %s" % COMPOSE.topdir)
|
||||
|
@ -622,18 +690,18 @@ def cli_main():
|
|||
main()
|
||||
except (Exception, KeyboardInterrupt) as ex:
|
||||
if COMPOSE:
|
||||
tb_path = COMPOSE.paths.log.log_file("global", "traceback")
|
||||
COMPOSE.log_error("Compose run failed: %s" % ex)
|
||||
COMPOSE.log_error("Extended traceback in: %s" % tb_path)
|
||||
COMPOSE.traceback(show_locals=getattr(ex, "show_locals", True))
|
||||
COMPOSE.log_critical("Compose failed: %s" % COMPOSE.topdir)
|
||||
COMPOSE.write_status("DOOMED")
|
||||
import kobo.tback
|
||||
|
||||
with open(tb_path, "wb") as f:
|
||||
f.write(kobo.tback.Traceback().get_traceback())
|
||||
else:
|
||||
print("Exception: %s" % ex)
|
||||
raise
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
sys.exit(1)
|
||||
finally:
|
||||
# Remove repositories cloned during ExtraFiles phase
|
||||
process_id = os.getpid()
|
||||
directoy_to_remove = "/tmp/pungi-temp-git-repos-" + str(process_id) + "/"
|
||||
rmtree(directoy_to_remove)
|
||||
|
|
100
pungi/util.py
100
pungi/util.py
|
@ -34,6 +34,7 @@ import kobo.conf
|
|||
from kobo.shortcuts import run, force_list
|
||||
from kobo.threads import WorkerThread, ThreadPool
|
||||
from productmd.common import get_major_version
|
||||
from pungi.module_util import Modulemd
|
||||
|
||||
# Patterns that match all names of debuginfo packages
|
||||
DEBUG_PATTERNS = ["*-debuginfo", "*-debuginfo-*", "*-debugsource"]
|
||||
|
@ -278,7 +279,7 @@ class GitUrlResolveError(RuntimeError):
|
|||
pass
|
||||
|
||||
|
||||
def resolve_git_ref(repourl, ref):
|
||||
def resolve_git_ref(repourl, ref, credential_helper=None):
|
||||
"""Resolve a reference in a Git repo to a commit.
|
||||
|
||||
Raises RuntimeError if there was an error. Most likely cause is failure to
|
||||
|
@ -287,8 +288,13 @@ def resolve_git_ref(repourl, ref):
|
|||
if re.match(r"^[a-f0-9]{40}$", ref):
|
||||
# This looks like a commit ID already.
|
||||
return ref
|
||||
|
||||
_, output = git_ls_remote(repourl, ref)
|
||||
try:
|
||||
_, output = git_ls_remote(repourl, ref, credential_helper)
|
||||
except RuntimeError as e:
|
||||
raise GitUrlResolveError(
|
||||
"ref does not exist in remote repo %s with the error %s %s"
|
||||
% (repourl, e, e.output)
|
||||
)
|
||||
|
||||
lines = []
|
||||
for line in output.split("\n"):
|
||||
|
@ -310,7 +316,7 @@ def resolve_git_ref(repourl, ref):
|
|||
return lines[0].split()[0]
|
||||
|
||||
|
||||
def resolve_git_url(url):
|
||||
def resolve_git_url(url, credential_helper=None):
|
||||
"""Given a url to a Git repo specifying HEAD or origin/<branch> as a ref,
|
||||
replace that specifier with actual SHA1 of the commit.
|
||||
|
||||
|
@ -329,7 +335,7 @@ def resolve_git_url(url):
|
|||
scheme = r.scheme.replace("git+", "")
|
||||
|
||||
baseurl = urllib.parse.urlunsplit((scheme, r.netloc, r.path, "", ""))
|
||||
fragment = resolve_git_ref(baseurl, ref)
|
||||
fragment = resolve_git_ref(baseurl, ref, credential_helper)
|
||||
|
||||
result = urllib.parse.urlunsplit((r.scheme, r.netloc, r.path, r.query, fragment))
|
||||
if "?#" in url:
|
||||
|
@ -348,13 +354,18 @@ class GitUrlResolver(object):
|
|||
self.offline = offline
|
||||
self.cache = {}
|
||||
|
||||
def __call__(self, url, branch=None):
|
||||
def __call__(self, url, branch=None, options=None):
|
||||
credential_helper = options.get("credential_helper") if options else None
|
||||
if self.offline:
|
||||
return branch or url
|
||||
key = (url, branch)
|
||||
if key not in self.cache:
|
||||
try:
|
||||
res = resolve_git_ref(url, branch) if branch else resolve_git_url(url)
|
||||
res = (
|
||||
resolve_git_ref(url, branch, credential_helper)
|
||||
if branch
|
||||
else resolve_git_url(url, credential_helper)
|
||||
)
|
||||
self.cache[key] = res
|
||||
except GitUrlResolveError as exc:
|
||||
self.cache[key] = exc
|
||||
|
@ -450,6 +461,9 @@ def get_volid(compose, arch, variant=None, disc_type=False, formats=None, **kwar
|
|||
if not variant_uid and "%(variant)s" in i:
|
||||
continue
|
||||
try:
|
||||
# fmt: off
|
||||
# Black wants to add a comma after kwargs, but that's not valid in
|
||||
# Python 2.7
|
||||
args = get_format_substs(
|
||||
compose,
|
||||
variant=variant_uid,
|
||||
|
@ -461,6 +475,7 @@ def get_volid(compose, arch, variant=None, disc_type=False, formats=None, **kwar
|
|||
base_product_version=base_product_version,
|
||||
**kwargs
|
||||
)
|
||||
# fmt: on
|
||||
volid = (i % args).format(**args)
|
||||
except KeyError as err:
|
||||
raise RuntimeError(
|
||||
|
@ -963,7 +978,7 @@ def version_generator(compose, gen):
|
|||
|
||||
|
||||
def retry(timeout=120, interval=30, wait_on=Exception):
|
||||
""" A decorator that allows to retry a section of code until success or
|
||||
"""A decorator that allows to retry a section of code until success or
|
||||
timeout.
|
||||
"""
|
||||
|
||||
|
@ -985,8 +1000,12 @@ def retry(timeout=120, interval=30, wait_on=Exception):
|
|||
|
||||
|
||||
@retry(wait_on=RuntimeError)
|
||||
def git_ls_remote(baseurl, ref):
|
||||
return run(["git", "ls-remote", baseurl, ref], universal_newlines=True)
|
||||
def git_ls_remote(baseurl, ref, credential_helper=None):
|
||||
cmd = ["git"]
|
||||
if credential_helper:
|
||||
cmd.extend(["-c", "credential.useHttpPath=true"])
|
||||
cmd.extend(["-c", "credential.helper=%s" % credential_helper])
|
||||
return run(cmd + ["ls-remote", baseurl, ref], universal_newlines=True)
|
||||
|
||||
|
||||
def get_tz_offset():
|
||||
|
@ -1034,6 +1053,46 @@ def load_config(file_path, defaults={}):
|
|||
return conf
|
||||
|
||||
|
||||
def _read_single_module_stream(
|
||||
file_or_string, compose=None, arch=None, build=None, is_file=True
|
||||
):
|
||||
try:
|
||||
mod_index = Modulemd.ModuleIndex.new()
|
||||
if is_file:
|
||||
mod_index.update_from_file(file_or_string, True)
|
||||
else:
|
||||
mod_index.update_from_string(file_or_string, True)
|
||||
mod_names = mod_index.get_module_names()
|
||||
emit_warning = False
|
||||
if len(mod_names) > 1:
|
||||
emit_warning = True
|
||||
mod_streams = mod_index.get_module(mod_names[0]).get_all_streams()
|
||||
if len(mod_streams) > 1:
|
||||
emit_warning = True
|
||||
if emit_warning and compose:
|
||||
compose.log_warning(
|
||||
"Multiple modules/streams for arch: %s. Build: %s. "
|
||||
"Processing first module/stream only.",
|
||||
arch,
|
||||
build,
|
||||
)
|
||||
return mod_streams[0]
|
||||
except (KeyError, IndexError):
|
||||
# There is no modulemd for this arch. This could mean an arch was
|
||||
# added to the compose after the module was built. We don't want to
|
||||
# process this, let's skip this module.
|
||||
if compose:
|
||||
compose.log_info("Skipping arch: %s. Build: %s", arch, build)
|
||||
|
||||
|
||||
def read_single_module_stream_from_file(*args, **kwargs):
|
||||
return _read_single_module_stream(*args, is_file=True, **kwargs)
|
||||
|
||||
|
||||
def read_single_module_stream_from_string(*args, **kwargs):
|
||||
return _read_single_module_stream(*args, is_file=False, **kwargs)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def as_local_file(url):
|
||||
"""If URL points to a file over HTTP, the file will be downloaded locally
|
||||
|
@ -1046,6 +1105,8 @@ def as_local_file(url):
|
|||
yield local_filename
|
||||
finally:
|
||||
os.remove(local_filename)
|
||||
elif url.startswith("file://"):
|
||||
yield url[7:]
|
||||
else:
|
||||
# Not a remote url, return unchanged.
|
||||
yield url
|
||||
|
@ -1083,3 +1144,22 @@ class PartialFuncThreadPool(ThreadPool):
|
|||
@property
|
||||
def results(self):
|
||||
return self._results
|
||||
|
||||
|
||||
def read_json_file(file_path):
|
||||
"""A helper function to read a JSON file."""
|
||||
with open(file_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
UNITS = ["", "Ki", "Mi", "Gi", "Ti"]
|
||||
|
||||
|
||||
def format_size(sz):
|
||||
sz = float(sz)
|
||||
unit = 0
|
||||
while sz > 1024:
|
||||
sz /= 1024
|
||||
unit += 1
|
||||
|
||||
return "%.3g %sB" % (sz, UNITS[unit])
|
||||
|
|
|
@ -177,16 +177,23 @@ class CompsFilter(object):
|
|||
for i in self.tree.xpath("//*[@xml:lang]"):
|
||||
i.getparent().remove(i)
|
||||
|
||||
def filter_environment_groups(self, lookaside_groups=[]):
|
||||
def filter_environment_groups(self, arch, lookaside_groups=[]):
|
||||
"""
|
||||
Remove undefined groups from environments.
|
||||
Remove undefined groups or groups not matching given arch from environments.
|
||||
"""
|
||||
all_groups = self.tree.xpath("/comps/group/id/text()") + lookaside_groups
|
||||
for environment in self.tree.xpath("/comps/environment"):
|
||||
for group in environment.xpath("grouplist/groupid"):
|
||||
for parent_tag in ("grouplist", "optionlist"):
|
||||
for group in environment.xpath("%s/groupid" % parent_tag):
|
||||
if group.text not in all_groups:
|
||||
group.getparent().remove(group)
|
||||
|
||||
for group in environment.xpath("%s/groupid[@arch]" % parent_tag):
|
||||
value = group.attrib.get("arch")
|
||||
values = [v for v in re.split(r"[, ]+", value) if v]
|
||||
if arch not in values:
|
||||
group.getparent().remove(group)
|
||||
|
||||
def remove_empty_environments(self):
|
||||
"""
|
||||
Remove all environments without groups.
|
||||
|
@ -212,7 +219,7 @@ class CompsFilter(object):
|
|||
)
|
||||
file_obj.write(b"\n")
|
||||
|
||||
def cleanup(self, keep_groups=[], lookaside_groups=[]):
|
||||
def cleanup(self, arch, keep_groups=[], lookaside_groups=[]):
|
||||
"""
|
||||
Remove empty groups, categories and environment from the comps file.
|
||||
Groups given in ``keep_groups`` will be preserved even if empty.
|
||||
|
@ -223,7 +230,7 @@ class CompsFilter(object):
|
|||
self.remove_empty_groups(keep_groups)
|
||||
self.filter_category_groups()
|
||||
self.remove_empty_categories()
|
||||
self.filter_environment_groups(lookaside_groups)
|
||||
self.filter_environment_groups(arch, lookaside_groups)
|
||||
self.remove_empty_environments()
|
||||
|
||||
|
||||
|
@ -357,7 +364,10 @@ class CompsWrapper(object):
|
|||
|
||||
if environment.option_ids:
|
||||
append_grouplist(
|
||||
doc, env_node, set(environment.option_ids), "optionlist",
|
||||
doc,
|
||||
env_node,
|
||||
set(environment.option_ids),
|
||||
"optionlist",
|
||||
)
|
||||
|
||||
if self.comps.langpacks:
|
||||
|
|
|
@ -26,7 +26,12 @@ Pungi).
|
|||
|
||||
|
||||
def get_cmd(
|
||||
conf_file, arch, repos, lookasides, platform=None, filter_packages=None,
|
||||
conf_file,
|
||||
arch,
|
||||
repos,
|
||||
lookasides,
|
||||
platform=None,
|
||||
filter_packages=None,
|
||||
):
|
||||
cmd = ["fus", "--verbose", "--arch", arch]
|
||||
|
||||
|
|
|
@ -146,6 +146,7 @@ def get_mkisofs_cmd(
|
|||
input_charset="utf-8",
|
||||
graft_points=None,
|
||||
use_xorrisofs=False,
|
||||
iso_level=None,
|
||||
):
|
||||
# following options are always enabled
|
||||
untranslated_filenames = True
|
||||
|
@ -155,6 +156,10 @@ def get_mkisofs_cmd(
|
|||
rock = True
|
||||
|
||||
cmd = ["/usr/bin/xorrisofs" if use_xorrisofs else "/usr/bin/genisoimage"]
|
||||
|
||||
if iso_level:
|
||||
cmd.extend(["-iso-level", str(iso_level)])
|
||||
|
||||
if appid:
|
||||
cmd.extend(["-appid", appid])
|
||||
|
||||
|
@ -255,10 +260,23 @@ def get_isohybrid_cmd(iso_path, arch):
|
|||
return cmd
|
||||
|
||||
|
||||
def get_manifest_cmd(iso_name):
|
||||
return "isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s.manifest" % (
|
||||
def get_manifest_cmd(iso_name, xorriso=False, output_file=None):
|
||||
if not output_file:
|
||||
output_file = "%s.manifest" % iso_name
|
||||
|
||||
if xorriso:
|
||||
return """xorriso -dev %s --find |
|
||||
tail -n+2 |
|
||||
tr -d "'" |
|
||||
cut -c2- |
|
||||
sort >> %s""" % (
|
||||
shlex_quote(iso_name),
|
||||
shlex_quote(output_file),
|
||||
)
|
||||
else:
|
||||
return "isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s" % (
|
||||
shlex_quote(iso_name),
|
||||
shlex_quote(output_file),
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
from attr import dataclass
|
||||
from kobo.rpmlib import parse_nvra
|
||||
|
@ -43,19 +43,20 @@ class KojiMock:
|
|||
Class that acts like real koji (for some needed methods)
|
||||
but uses local storage as data source
|
||||
"""
|
||||
def __init__(self, packages_dir, modules_dir):
|
||||
def __init__(self, packages_dir, modules_dir, all_arches):
|
||||
self._modules = self._gather_modules(modules_dir)
|
||||
self._modules_dir = modules_dir
|
||||
self._packages_dir = packages_dir
|
||||
self._all_arches = all_arches
|
||||
|
||||
def _gather_modules(self, modules_dir):
|
||||
@staticmethod
|
||||
def _gather_modules(modules_dir):
|
||||
modules = {}
|
||||
for arch in os.listdir(modules_dir):
|
||||
arch_dir = os.path.join(
|
||||
modules_dir,
|
||||
arch,
|
||||
)
|
||||
for index, f in enumerate(os.listdir(arch_dir)):
|
||||
for index, (f, arch) in enumerate(
|
||||
(sub_path.name, sub_path.parent.name)
|
||||
for path in Path(modules_dir).glob('*')
|
||||
for sub_path in path.iterdir()
|
||||
):
|
||||
parsed = parse_nvra(f)
|
||||
modules[index] = Module(
|
||||
name=parsed['name'],
|
||||
|
@ -68,7 +69,8 @@ class KojiMock:
|
|||
)
|
||||
return modules
|
||||
|
||||
def getLastEvent(self, *args, **kwargs):
|
||||
@staticmethod
|
||||
def getLastEvent(*args, **kwargs):
|
||||
return {'id': LAST_EVENT_ID, 'ts': LAST_EVENT_TIME}
|
||||
|
||||
def listTagged(self, tag_name, *args, **kwargs):
|
||||
|
@ -92,6 +94,7 @@ class KojiMock:
|
|||
'name': module.name,
|
||||
'id': module.build_id,
|
||||
'tag_name': tag_name,
|
||||
'arch': module.arch,
|
||||
# Following fields are currently not
|
||||
# used but returned by real koji
|
||||
# left them here just for reference
|
||||
|
@ -111,7 +114,8 @@ class KojiMock:
|
|||
|
||||
return builds
|
||||
|
||||
def getFullInheritance(self, *args, **kwargs):
|
||||
@staticmethod
|
||||
def getFullInheritance(*args, **kwargs):
|
||||
"""
|
||||
Unneeded because we use local storage.
|
||||
"""
|
||||
|
@ -199,31 +203,12 @@ class KojiMock:
|
|||
packages = []
|
||||
|
||||
# get all rpms in folder
|
||||
rpms = search_rpms(self._packages_dir)
|
||||
all_rpms = [package.path for package in rpms]
|
||||
rpms = search_rpms(Path(self._packages_dir))
|
||||
|
||||
# get nvras for modular packages
|
||||
nvras = set()
|
||||
for module in self._modules.values():
|
||||
path = os.path.join(
|
||||
self._modules_dir,
|
||||
module.arch,
|
||||
module.nvr,
|
||||
)
|
||||
info = Modulemd.ModuleStream.read_string(open(path).read(), strict=True)
|
||||
|
||||
for package in info.get_rpm_artifacts():
|
||||
data = parse_nvra(package)
|
||||
nvras.add((data['name'], data['version'], data['release'], data['arch']))
|
||||
|
||||
# and remove modular packages from global list
|
||||
for rpm in all_rpms[:]:
|
||||
data = parse_nvra(os.path.basename(rpm[:-4]))
|
||||
if (data['name'], data['version'], data['release'], data['arch']) in nvras:
|
||||
all_rpms.remove(rpm)
|
||||
|
||||
for rpm in all_rpms:
|
||||
info = parse_nvra(os.path.basename(rpm))
|
||||
for rpm in rpms:
|
||||
info = parse_nvra(rpm.path.stem)
|
||||
if 'module' in info['release']:
|
||||
continue
|
||||
packages.append({
|
||||
"build_id": RELEASE_BUILD_ID,
|
||||
"name": info['name'],
|
||||
|
@ -244,15 +229,19 @@ class KojiMock:
|
|||
"""
|
||||
Get list of builds for module and given module tag name.
|
||||
"""
|
||||
module = self._get_module_by_name(tag_name)
|
||||
builds = []
|
||||
packages = []
|
||||
modules = self._get_modules_by_name(tag_name)
|
||||
for module in modules:
|
||||
if module is None:
|
||||
raise ValueError('Module %s is not found' % tag_name)
|
||||
path = os.path.join(
|
||||
self._modules_dir,
|
||||
module.arch,
|
||||
tag_name,
|
||||
)
|
||||
|
||||
builds = [
|
||||
{
|
||||
builds.append({
|
||||
"build_id": module.build_id,
|
||||
"package_name": module.name,
|
||||
"nvr": module.nvr,
|
||||
|
@ -278,12 +267,8 @@ class KojiMock:
|
|||
# "volume_id": 0,
|
||||
# "package_id": 104,
|
||||
# "owner_id": 6,
|
||||
}
|
||||
]
|
||||
if module is None:
|
||||
raise ValueError('Module %s is not found' % tag_name)
|
||||
})
|
||||
|
||||
packages = []
|
||||
if os.path.exists(path):
|
||||
info = Modulemd.ModuleStream.read_string(open(path).read(), strict=True)
|
||||
for art in info.get_rpm_artifacts():
|
||||
|
@ -304,9 +289,11 @@ class KojiMock:
|
|||
raise RuntimeError('Unable to find module %s' % path)
|
||||
return builds, packages
|
||||
|
||||
def _get_module_by_name(self, tag_name):
|
||||
def _get_modules_by_name(self, tag_name):
|
||||
modules = []
|
||||
for arch in self._all_arches:
|
||||
for module in self._modules.values():
|
||||
if module.nvr != tag_name:
|
||||
if module.nvr != tag_name or module.arch != arch:
|
||||
continue
|
||||
return module
|
||||
return None
|
||||
modules.append(module)
|
||||
return modules
|
||||
|
|
|
@ -14,17 +14,23 @@
|
|||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import shutil
|
||||
import time
|
||||
import threading
|
||||
import contextlib
|
||||
|
||||
import requests
|
||||
|
||||
import koji
|
||||
from kobo.shortcuts import run, force_list
|
||||
import six
|
||||
from six.moves import configparser, shlex_quote
|
||||
import six.moves.xmlrpc_client as xmlrpclib
|
||||
from flufl.lock import Lock
|
||||
from datetime import timedelta
|
||||
|
||||
from .kojimock import KojiMock
|
||||
from .. import util
|
||||
|
@ -37,10 +43,14 @@ KOJI_BUILD_DELETED = koji.BUILD_STATES["DELETED"]
|
|||
class KojiWrapper(object):
|
||||
lock = threading.Lock()
|
||||
|
||||
def __init__(self, profile, real_koji=False):
|
||||
self.profile = profile
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
try:
|
||||
self.profile = self.compose.conf["koji_profile"]
|
||||
except KeyError:
|
||||
raise RuntimeError("Koji profile must be configured")
|
||||
with self.lock:
|
||||
self.koji_module = koji.get_profile_module(profile)
|
||||
self.koji_module = koji.get_profile_module(self.profile)
|
||||
session_opts = {}
|
||||
for key in (
|
||||
"timeout",
|
||||
|
@ -58,15 +68,13 @@ class KojiWrapper(object):
|
|||
value = getattr(self.koji_module.config, key, None)
|
||||
if value is not None:
|
||||
session_opts[key] = value
|
||||
if real_koji:
|
||||
self.koji_proxy = koji.ClientSession(
|
||||
self.koji_module.config.server, session_opts
|
||||
)
|
||||
else:
|
||||
self.koji_proxy = KojiMock(
|
||||
packages_dir=self.koji_module.config.topdir,
|
||||
modules_dir=os.path.join(self.koji_module.config.topdir, 'modules'))
|
||||
|
||||
# This retry should be removed once https://pagure.io/koji/issue/3170 is
|
||||
# fixed and released.
|
||||
@util.retry(wait_on=(xmlrpclib.ProtocolError, koji.GenericError))
|
||||
def login(self):
|
||||
"""Authenticate to the hub."""
|
||||
auth_type = self.koji_module.config.authtype
|
||||
|
@ -117,8 +125,6 @@ class KojiWrapper(object):
|
|||
|
||||
if channel:
|
||||
cmd.append("--channel-override=%s" % channel)
|
||||
else:
|
||||
cmd.append("--channel-override=runroot-local")
|
||||
|
||||
if weight:
|
||||
cmd.append("--weight=%s" % int(weight))
|
||||
|
@ -148,10 +154,13 @@ class KojiWrapper(object):
|
|||
|
||||
if chown_paths:
|
||||
paths = " ".join(shlex_quote(pth) for pth in chown_paths)
|
||||
command += " ; EXIT_CODE=$?"
|
||||
# Make the files world readable
|
||||
command += " && chmod -R a+r %s" % paths
|
||||
command += " ; chmod -R a+r %s" % paths
|
||||
# and owned by the same user that is running the process
|
||||
command += " && chown -R %d %s" % (os.getuid(), paths)
|
||||
command += " ; chown -R %d %s" % (os.getuid(), paths)
|
||||
# Exit with code of main command
|
||||
command += " ; exit $EXIT_CODE"
|
||||
cmd.append(command)
|
||||
|
||||
return cmd
|
||||
|
@ -171,8 +180,6 @@ class KojiWrapper(object):
|
|||
|
||||
if channel:
|
||||
cmd.append("--channel-override=%s" % channel)
|
||||
else:
|
||||
cmd.append("--channel-override=runroot-local")
|
||||
|
||||
if weight:
|
||||
cmd.append("--weight=%s" % int(weight))
|
||||
|
@ -208,14 +215,19 @@ class KojiWrapper(object):
|
|||
return cmd
|
||||
|
||||
def get_pungi_ostree_cmd(
|
||||
self, target, arch, args, channel=None, packages=None, mounts=None, weight=None,
|
||||
self,
|
||||
target,
|
||||
arch,
|
||||
args,
|
||||
channel=None,
|
||||
packages=None,
|
||||
mounts=None,
|
||||
weight=None,
|
||||
):
|
||||
cmd = self._get_cmd("pungi-ostree", "--nowait", "--task-id")
|
||||
|
||||
if channel:
|
||||
cmd.append("--channel-override=%s" % channel)
|
||||
else:
|
||||
cmd.append("--channel-override=runroot-local")
|
||||
|
||||
if weight:
|
||||
cmd.append("--weight=%s" % int(weight))
|
||||
|
@ -286,15 +298,22 @@ class KojiWrapper(object):
|
|||
universal_newlines=True,
|
||||
)
|
||||
|
||||
first_line = output.splitlines()[0]
|
||||
match = re.search(r"^(\d+)$", first_line)
|
||||
if not match:
|
||||
# Look for first line that contains only a number. This is the ID of
|
||||
# the new task. Usually this should be the first line, but there may be
|
||||
# warnings before it.
|
||||
for line in output.splitlines():
|
||||
match = re.search(r"^(\d+)$", line)
|
||||
if match:
|
||||
task_id = int(match.groups()[0])
|
||||
break
|
||||
|
||||
if not task_id:
|
||||
raise RuntimeError(
|
||||
"Could not find task ID in output. Command '%s' returned '%s'."
|
||||
% (" ".join(command), output)
|
||||
)
|
||||
|
||||
task_id = int(match.groups()[0])
|
||||
self.save_task_id(task_id)
|
||||
|
||||
retcode, output = self._wait_for_task(task_id, logfile=log_file)
|
||||
|
||||
|
@ -328,9 +347,11 @@ class KojiWrapper(object):
|
|||
"ksurl",
|
||||
"distro",
|
||||
)
|
||||
assert set(min_options).issubset(set(config_options["image-build"].keys())), (
|
||||
"image-build requires at least %s got '%s'"
|
||||
% (", ".join(min_options), config_options)
|
||||
assert set(min_options).issubset(
|
||||
set(config_options["image-build"].keys())
|
||||
), "image-build requires at least %s got '%s'" % (
|
||||
", ".join(min_options),
|
||||
config_options,
|
||||
)
|
||||
cfg_parser = configparser.ConfigParser()
|
||||
for section, opts in config_options.items():
|
||||
|
@ -385,6 +406,9 @@ class KojiWrapper(object):
|
|||
if "can_fail" in options:
|
||||
cmd.append("--can-fail=%s" % ",".join(options["can_fail"]))
|
||||
|
||||
if options.get("nomacboot"):
|
||||
cmd.append("--nomacboot")
|
||||
|
||||
if wait:
|
||||
cmd.append("--wait")
|
||||
|
||||
|
@ -522,6 +546,7 @@ class KojiWrapper(object):
|
|||
retcode, output = run(
|
||||
command,
|
||||
can_fail=True,
|
||||
show_cmd=True,
|
||||
logfile=log_file,
|
||||
env=env,
|
||||
buffer_size=-1,
|
||||
|
@ -536,6 +561,8 @@ class KojiWrapper(object):
|
|||
)
|
||||
task_id = int(match.groups()[0])
|
||||
|
||||
self.save_task_id(task_id)
|
||||
|
||||
if retcode != 0 and (
|
||||
self._has_connection_error(output) or self._has_offline_error(output)
|
||||
):
|
||||
|
@ -550,6 +577,19 @@ class KojiWrapper(object):
|
|||
}
|
||||
|
||||
def watch_task(self, task_id, log_file=None, max_retries=None):
|
||||
"""Watch and wait for a task to finish.
|
||||
|
||||
:param int task_id: ID of koji task.
|
||||
:param str log_file: Path to log file.
|
||||
:param int max_retries: Max times to retry when error occurs,
|
||||
no limits by default.
|
||||
"""
|
||||
if log_file:
|
||||
task_url = os.path.join(
|
||||
self.koji_module.config.weburl, "taskinfo?taskID=%d" % task_id
|
||||
)
|
||||
with open(log_file, "a") as f:
|
||||
f.write("Task URL: %s\n" % task_url)
|
||||
retcode, _ = self._wait_for_task(
|
||||
task_id, logfile=log_file, max_retries=max_retries
|
||||
)
|
||||
|
@ -752,11 +792,10 @@ class KojiWrapper(object):
|
|||
if list_of_args is None and list_of_kwargs is None:
|
||||
raise ValueError("One of list_of_args or list_of_kwargs must be set.")
|
||||
|
||||
if type(list_of_args) not in [type(None), list] or type(list_of_kwargs) not in [
|
||||
type(None),
|
||||
list,
|
||||
]:
|
||||
raise ValueError("list_of_args and list_of_kwargs must be list or None.")
|
||||
if list_of_args is not None and not isinstance(list_of_args, list):
|
||||
raise ValueError("list_of_args must be list or None.")
|
||||
if list_of_kwargs is not None and not isinstance(list_of_kwargs, list):
|
||||
raise ValueError("list_of_kwargs must be list or None.")
|
||||
|
||||
if list_of_kwargs is None:
|
||||
list_of_kwargs = [{}] * len(list_of_args)
|
||||
|
@ -770,9 +809,9 @@ class KojiWrapper(object):
|
|||
|
||||
koji_session.multicall = True
|
||||
for args, kwargs in zip(list_of_args, list_of_kwargs):
|
||||
if type(args) != list:
|
||||
if not isinstance(args, list):
|
||||
args = [args]
|
||||
if type(kwargs) != dict:
|
||||
if not isinstance(kwargs, dict):
|
||||
raise ValueError("Every item in list_of_kwargs must be a dict")
|
||||
koji_session_fnc(*args, **kwargs)
|
||||
|
||||
|
@ -780,7 +819,7 @@ class KojiWrapper(object):
|
|||
|
||||
if not responses:
|
||||
return None
|
||||
if type(responses) != list:
|
||||
if not isinstance(responses, list):
|
||||
raise ValueError(
|
||||
"Fault element was returned for multicall of method %r: %r"
|
||||
% (koji_session_fnc, responses)
|
||||
|
@ -796,7 +835,7 @@ class KojiWrapper(object):
|
|||
# a one-item array containing the result value,
|
||||
# or a struct of the form found inside the standard <fault> element.
|
||||
for response, args, kwargs in zip(responses, list_of_args, list_of_kwargs):
|
||||
if type(response) == list:
|
||||
if isinstance(response, list):
|
||||
if not response:
|
||||
raise ValueError(
|
||||
"Empty list returned for multicall of method %r with args %r, %r" # noqa: E501
|
||||
|
@ -821,13 +860,61 @@ class KojiWrapper(object):
|
|||
"""
|
||||
return self.multicall_map(*args, **kwargs)
|
||||
|
||||
def save_task_id(self, task_id):
|
||||
"""Save task id by creating a file using task_id as file name
|
||||
|
||||
:param int task_id: ID of koji task
|
||||
"""
|
||||
log_dir = self.compose.paths.log.koji_tasks_dir()
|
||||
with open(os.path.join(log_dir, str(task_id)), "w"):
|
||||
pass
|
||||
|
||||
|
||||
class KojiMockWrapper(object):
|
||||
lock = threading.Lock()
|
||||
|
||||
def __init__(self, compose, all_arches):
|
||||
self.all_arches = all_arches
|
||||
self.compose = compose
|
||||
try:
|
||||
self.profile = self.compose.conf["koji_profile"]
|
||||
except KeyError:
|
||||
raise RuntimeError("Koji profile must be configured")
|
||||
with self.lock:
|
||||
self.koji_module = koji.get_profile_module(self.profile)
|
||||
session_opts = {}
|
||||
for key in (
|
||||
"timeout",
|
||||
"keepalive",
|
||||
"max_retries",
|
||||
"retry_interval",
|
||||
"anon_retry",
|
||||
"offline_retry",
|
||||
"offline_retry_interval",
|
||||
"debug",
|
||||
"debug_xmlrpc",
|
||||
"serverca",
|
||||
"use_fast_upload",
|
||||
):
|
||||
value = getattr(self.koji_module.config, key, None)
|
||||
if value is not None:
|
||||
session_opts[key] = value
|
||||
self.koji_proxy = KojiMock(
|
||||
packages_dir=self.koji_module.config.topdir,
|
||||
modules_dir=os.path.join(
|
||||
self.koji_module.config.topdir,
|
||||
'modules',
|
||||
),
|
||||
all_arches=self.all_arches,
|
||||
)
|
||||
|
||||
|
||||
def get_buildroot_rpms(compose, task_id):
|
||||
"""Get build root RPMs - either from runroot or local"""
|
||||
result = []
|
||||
if task_id:
|
||||
# runroot
|
||||
koji = KojiWrapper(compose.conf["koji_profile"])
|
||||
koji = KojiWrapper(compose)
|
||||
buildroot_infos = koji.koji_proxy.listBuildroots(taskID=task_id)
|
||||
if not buildroot_infos:
|
||||
children_tasks = koji.koji_proxy.getTaskChildren(task_id)
|
||||
|
@ -853,3 +940,176 @@ def get_buildroot_rpms(compose, task_id):
|
|||
continue
|
||||
result.append(i)
|
||||
return sorted(result)
|
||||
|
||||
|
||||
class KojiDownloadProxy:
|
||||
def __init__(self, topdir, topurl, cache_dir, logger):
|
||||
if not topdir:
|
||||
# This will only happen if there is either no koji_profile
|
||||
# configured, or the profile doesn't have a topdir. In the first
|
||||
# case there will be no koji interaction, and the second indicates
|
||||
# broken koji configuration.
|
||||
# We can pretend to have local access in both cases to avoid any
|
||||
# external requests.
|
||||
self.has_local_access = True
|
||||
return
|
||||
|
||||
self.cache_dir = cache_dir
|
||||
self.logger = logger
|
||||
|
||||
self.topdir = topdir
|
||||
self.topurl = topurl
|
||||
|
||||
# If cache directory is configured, we want to use it (even if we
|
||||
# actually have local access to the storage).
|
||||
self.has_local_access = not bool(cache_dir)
|
||||
# This is used for temporary downloaded files. The suffix is unique
|
||||
# per-process. To prevent threads in the same process from colliding, a
|
||||
# thread id is added later.
|
||||
self.unique_suffix = "%s.%s" % (socket.gethostname(), os.getpid())
|
||||
self.session = None
|
||||
if not self.has_local_access:
|
||||
self.session = requests.Session()
|
||||
|
||||
@property
|
||||
def path_prefix(self):
|
||||
dir = self.topdir if self.has_local_access else self.cache_dir
|
||||
return dir.rstrip("/") + "/"
|
||||
|
||||
@classmethod
|
||||
def from_config(klass, conf, logger):
|
||||
topdir = None
|
||||
topurl = None
|
||||
cache_dir = None
|
||||
if "koji_profile" in conf:
|
||||
koji_module = koji.get_profile_module(conf["koji_profile"])
|
||||
topdir = koji_module.config.topdir
|
||||
topurl = koji_module.config.topurl
|
||||
|
||||
cache_dir = conf.get("koji_cache")
|
||||
if cache_dir:
|
||||
cache_dir = cache_dir.rstrip("/") + "/"
|
||||
return klass(topdir, topurl, cache_dir, logger)
|
||||
|
||||
@util.retry(wait_on=requests.exceptions.RequestException)
|
||||
def _download(self, url, dest):
|
||||
"""Download file into given location
|
||||
|
||||
:param str url: URL of the file to download
|
||||
:param str dest: file path to store the result in
|
||||
:returns: path to the downloaded file (same as dest) or None if the URL
|
||||
"""
|
||||
with self.session.get(url, stream=True) as r:
|
||||
if r.status_code == 404:
|
||||
self.logger.warning("GET %s NOT FOUND", url)
|
||||
return None
|
||||
if r.status_code != 200:
|
||||
self.logger.error("GET %s %s", url, r.status_code)
|
||||
r.raise_for_status()
|
||||
# The exception from here will be retried by the decorator.
|
||||
|
||||
file_size = int(r.headers.get("Content-Length", 0))
|
||||
self.logger.info("GET %s OK %s", url, util.format_size(file_size))
|
||||
with open(dest, "wb") as f:
|
||||
shutil.copyfileobj(r.raw, f)
|
||||
return dest
|
||||
|
||||
def _delete(self, path):
|
||||
"""Try to delete file at given path and ignore errors."""
|
||||
try:
|
||||
os.remove(path)
|
||||
except Exception:
|
||||
self.logger.warning("Failed to delete %s", path)
|
||||
|
||||
def _atomic_download(self, url, dest, validator):
|
||||
"""Atomically download a file
|
||||
|
||||
:param str url: URL of the file to download
|
||||
:param str dest: file path to store the result in
|
||||
:returns: path to the downloaded file (same as dest) or None if the URL
|
||||
return 404.
|
||||
"""
|
||||
temp_file = "%s.%s.%s" % (dest, self.unique_suffix, threading.get_ident())
|
||||
|
||||
# First download to the temporary location.
|
||||
try:
|
||||
if self._download(url, temp_file) is None:
|
||||
# The file was not found.
|
||||
return None
|
||||
except Exception:
|
||||
# Download failed, let's make sure to clean up potentially partial
|
||||
# temporary file.
|
||||
self._delete(temp_file)
|
||||
raise
|
||||
|
||||
# Check if the temporary file is correct (assuming we were provided a
|
||||
# validator function).
|
||||
try:
|
||||
if validator:
|
||||
validator(temp_file)
|
||||
except Exception:
|
||||
# Validation failed. Let's delete the problematic file and re-raise
|
||||
# the exception.
|
||||
self._delete(temp_file)
|
||||
raise
|
||||
|
||||
# Atomically move the temporary file into final location
|
||||
os.rename(temp_file, dest)
|
||||
return dest
|
||||
|
||||
def _download_file(self, path, validator):
|
||||
"""Ensure file on Koji volume in ``path`` is present in the local
|
||||
cache.
|
||||
|
||||
:returns: path to the local file or None if file is not found
|
||||
"""
|
||||
url = path.replace(self.topdir, self.topurl)
|
||||
destination_file = path.replace(self.topdir, self.cache_dir)
|
||||
util.makedirs(os.path.dirname(destination_file))
|
||||
|
||||
lock = Lock(destination_file + ".lock")
|
||||
# Hold the lock for this file for 5 minutes. If another compose needs
|
||||
# the same file but it's not downloaded yet, the process will wait.
|
||||
#
|
||||
# If the download finishes in time, the downloaded file will be used
|
||||
# here.
|
||||
#
|
||||
# If the download takes longer, this process will steal the lock and
|
||||
# start its own download.
|
||||
#
|
||||
# That should not be a problem: the same file will be downloaded and
|
||||
# then replaced atomically on the filesystem. If the original process
|
||||
# managed to hardlink the first file already, that hardlink will be
|
||||
# broken, but that will only result in the same file stored twice.
|
||||
lock.lifetime = timedelta(minutes=5)
|
||||
|
||||
with lock:
|
||||
# Check if the file already exists. If yes, return the path.
|
||||
if os.path.exists(destination_file):
|
||||
# Update mtime of the file. This covers the case of packages in the
|
||||
# tag that are not included in the compose. Updating mtime will
|
||||
# exempt them from cleanup for extra time.
|
||||
os.utime(destination_file)
|
||||
return destination_file
|
||||
|
||||
return self._atomic_download(url, destination_file, validator)
|
||||
|
||||
def get_file(self, path, validator=None):
|
||||
"""
|
||||
If path refers to an existing file in Koji, return a valid local path
|
||||
to it. If no such file exists, return None.
|
||||
|
||||
:param validator: A callable that will be called with the path to the
|
||||
downloaded file if and only if the file was actually downloaded.
|
||||
Any exception raised from there will be abort the download and be
|
||||
propagated.
|
||||
"""
|
||||
if self.has_local_access:
|
||||
# We have koji volume mounted locally. No transformation needed for
|
||||
# the path, just check it exists.
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
return None
|
||||
else:
|
||||
# We need to download the file.
|
||||
return self._download_file(path, validator)
|
||||
|
|
|
@ -40,9 +40,13 @@ def get_repoclosure_cmd(backend="yum", arch=None, repos=None, lookaside=None):
|
|||
# There are options that are not exposed here, because we don't need
|
||||
# them.
|
||||
|
||||
for i in force_list(arch or []):
|
||||
arches = force_list(arch or [])
|
||||
for i in arches:
|
||||
cmd.append("--arch=%s" % i)
|
||||
|
||||
if backend == "dnf" and arches:
|
||||
cmd.append("--forcearch=%s" % arches[0])
|
||||
|
||||
repos = repos or {}
|
||||
for repo_id, repo_path in repos.items():
|
||||
cmd.append("--repofrompath=%s,%s" % (repo_id, _to_url(repo_path)))
|
||||
|
|
|
@ -20,6 +20,7 @@ import os
|
|||
import shutil
|
||||
import glob
|
||||
import six
|
||||
import threading
|
||||
from six.moves import shlex_quote
|
||||
from six.moves.urllib.request import urlretrieve
|
||||
from fnmatch import fnmatch
|
||||
|
@ -29,12 +30,15 @@ from kobo.shortcuts import run, force_list
|
|||
from pungi.util import explode_rpm_package, makedirs, copy_all, temp_dir, retry
|
||||
from .kojiwrapper import KojiWrapper
|
||||
|
||||
lock = threading.Lock()
|
||||
|
||||
|
||||
class ScmBase(kobo.log.LoggingBase):
|
||||
def __init__(self, logger=None, command=None, compose=None):
|
||||
def __init__(self, logger=None, command=None, compose=None, options=None):
|
||||
kobo.log.LoggingBase.__init__(self, logger=logger)
|
||||
self.command = command
|
||||
self.compose = compose
|
||||
self.options = options or {}
|
||||
|
||||
@retry(interval=60, timeout=300, wait_on=RuntimeError)
|
||||
def retry_run(self, cmd, **kwargs):
|
||||
|
@ -156,22 +160,31 @@ class GitWrapper(ScmBase):
|
|||
if "://" not in repo:
|
||||
repo = "file://%s" % repo
|
||||
|
||||
git_cmd = ["git"]
|
||||
if "credential_helper" in self.options:
|
||||
git_cmd.extend(["-c", "credential.useHttpPath=true"])
|
||||
git_cmd.extend(
|
||||
["-c", "credential.helper=%s" % self.options["credential_helper"]]
|
||||
)
|
||||
|
||||
run(["git", "init"], workdir=destdir)
|
||||
try:
|
||||
run(["git", "fetch", "--depth=1", repo, branch], workdir=destdir)
|
||||
run(git_cmd + ["fetch", "--depth=1", repo, branch], workdir=destdir)
|
||||
run(["git", "checkout", "FETCH_HEAD"], workdir=destdir)
|
||||
except RuntimeError as e:
|
||||
# Fetch failed, to do a full clone we add a remote to our empty
|
||||
# repo, get its content and check out the reference we want.
|
||||
self.log_debug(
|
||||
"Trying to do a full clone because shallow clone failed: %s %s"
|
||||
% (e, e.output)
|
||||
% (e, getattr(e, "output", ""))
|
||||
)
|
||||
try:
|
||||
# Re-run git init in case of previous failure breaking .git dir
|
||||
run(["git", "init"], workdir=destdir)
|
||||
run(["git", "remote", "add", "origin", repo], workdir=destdir)
|
||||
self.retry_run(["git", "remote", "update", "origin"], workdir=destdir)
|
||||
self.retry_run(
|
||||
git_cmd + ["remote", "update", "origin"], workdir=destdir
|
||||
)
|
||||
run(["git", "checkout", branch], workdir=destdir)
|
||||
except RuntimeError:
|
||||
if self.compose:
|
||||
|
@ -185,19 +198,38 @@ class GitWrapper(ScmBase):
|
|||
copy_all(destdir, debugdir)
|
||||
raise
|
||||
|
||||
self.run_process_command(destdir)
|
||||
def get_temp_repo_path(self, scm_root, scm_branch):
|
||||
scm_repo = scm_root.split("/")[-1]
|
||||
process_id = os.getpid()
|
||||
tmp_dir = (
|
||||
"/tmp/pungi-temp-git-repos-"
|
||||
+ str(process_id)
|
||||
+ "/"
|
||||
+ scm_repo
|
||||
+ "-"
|
||||
+ scm_branch
|
||||
)
|
||||
return tmp_dir
|
||||
|
||||
def setup_repo(self, scm_root, scm_branch):
|
||||
tmp_dir = self.get_temp_repo_path(scm_root, scm_branch)
|
||||
if not os.path.isdir(tmp_dir):
|
||||
makedirs(tmp_dir)
|
||||
self._clone(scm_root, scm_branch, tmp_dir)
|
||||
self.run_process_command(tmp_dir)
|
||||
return tmp_dir
|
||||
|
||||
def export_dir(self, scm_root, scm_dir, target_dir, scm_branch=None):
|
||||
scm_dir = scm_dir.lstrip("/")
|
||||
scm_branch = scm_branch or "master"
|
||||
|
||||
with temp_dir() as tmp_dir:
|
||||
self.log_debug(
|
||||
"Exporting directory %s from git %s (branch %s)..."
|
||||
% (scm_dir, scm_root, scm_branch)
|
||||
)
|
||||
|
||||
self._clone(scm_root, scm_branch, tmp_dir)
|
||||
with lock:
|
||||
tmp_dir = self.setup_repo(scm_root, scm_branch)
|
||||
|
||||
copy_all(os.path.join(tmp_dir, scm_dir), target_dir)
|
||||
|
||||
|
@ -205,7 +237,6 @@ class GitWrapper(ScmBase):
|
|||
scm_file = scm_file.lstrip("/")
|
||||
scm_branch = scm_branch or "master"
|
||||
|
||||
with temp_dir() as tmp_dir:
|
||||
target_path = os.path.join(target_dir, os.path.basename(scm_file))
|
||||
|
||||
self.log_debug(
|
||||
|
@ -213,7 +244,8 @@ class GitWrapper(ScmBase):
|
|||
% (scm_file, scm_root, scm_branch)
|
||||
)
|
||||
|
||||
self._clone(scm_root, scm_branch, tmp_dir)
|
||||
with lock:
|
||||
tmp_dir = self.setup_repo(scm_root, scm_branch)
|
||||
|
||||
makedirs(target_dir)
|
||||
shutil.copy2(os.path.join(tmp_dir, scm_file), target_path)
|
||||
|
@ -265,11 +297,7 @@ class RpmScmWrapper(ScmBase):
|
|||
class KojiScmWrapper(ScmBase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(KojiScmWrapper, self).__init__(*args, **kwargs)
|
||||
try:
|
||||
profile = kwargs["compose"].conf["koji_profile"]
|
||||
except KeyError:
|
||||
raise RuntimeError("Koji profile must be configured")
|
||||
wrapper = KojiWrapper(profile)
|
||||
wrapper = KojiWrapper(kwargs["compose"])
|
||||
self.koji = wrapper.koji_module
|
||||
self.proxy = wrapper.koji_proxy
|
||||
|
||||
|
@ -365,15 +393,19 @@ def get_file_from_scm(scm_dict, target_path, compose=None):
|
|||
scm_file = os.path.abspath(scm_dict)
|
||||
scm_branch = None
|
||||
command = None
|
||||
options = {}
|
||||
else:
|
||||
scm_type = scm_dict["scm"]
|
||||
scm_repo = scm_dict["repo"]
|
||||
scm_file = scm_dict["file"]
|
||||
scm_branch = scm_dict.get("branch", None)
|
||||
command = scm_dict.get("command")
|
||||
options = scm_dict.get("options", {})
|
||||
|
||||
logger = compose._logger if compose else None
|
||||
scm = _get_wrapper(scm_type, logger=logger, command=command, compose=compose)
|
||||
scm = _get_wrapper(
|
||||
scm_type, logger=logger, command=command, compose=compose, options=options
|
||||
)
|
||||
|
||||
files_copied = []
|
||||
for i in force_list(scm_file):
|
||||
|
@ -454,15 +486,19 @@ def get_dir_from_scm(scm_dict, target_path, compose=None):
|
|||
scm_dir = os.path.abspath(scm_dict)
|
||||
scm_branch = None
|
||||
command = None
|
||||
options = {}
|
||||
else:
|
||||
scm_type = scm_dict["scm"]
|
||||
scm_repo = scm_dict.get("repo", None)
|
||||
scm_dir = scm_dict["dir"]
|
||||
scm_branch = scm_dict.get("branch", None)
|
||||
command = scm_dict.get("command")
|
||||
options = scm_dict.get("options", {})
|
||||
|
||||
logger = compose._logger if compose else None
|
||||
scm = _get_wrapper(scm_type, logger=logger, command=command, compose=compose)
|
||||
scm = _get_wrapper(
|
||||
scm_type, logger=logger, command=command, compose=compose, options=options
|
||||
)
|
||||
|
||||
with temp_dir(prefix="scm_checkout_") as tmp_dir:
|
||||
scm.export_dir(scm_repo, scm_dir, scm_branch=scm_branch, target_dir=tmp_dir)
|
||||
|
|
|
@ -276,7 +276,6 @@ class Variant(object):
|
|||
modules=None,
|
||||
modular_koji_tags=None,
|
||||
):
|
||||
|
||||
environments = environments or []
|
||||
buildinstallpackages = buildinstallpackages or []
|
||||
|
||||
|
|
|
@ -1,706 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import atexit
|
||||
import errno
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import threading
|
||||
from collections import namedtuple
|
||||
|
||||
import kobo.conf
|
||||
import kobo.log
|
||||
import productmd
|
||||
from kobo import shortcuts
|
||||
from six.moves import configparser, shlex_quote
|
||||
|
||||
import pungi.util
|
||||
from pungi.compose import get_compose_dir
|
||||
from pungi.linker import linker_pool
|
||||
from pungi.phases.pkgset.sources.source_koji import get_koji_event_raw
|
||||
from pungi.util import find_old_compose, parse_koji_event, temp_dir
|
||||
from pungi.wrappers.kojiwrapper import KojiWrapper
|
||||
|
||||
|
||||
Config = namedtuple(
|
||||
"Config",
|
||||
[
|
||||
# Path to directory with the compose
|
||||
"target",
|
||||
"compose_type",
|
||||
"label",
|
||||
# Path to the selected old compose that will be reused
|
||||
"old_compose",
|
||||
# Path to directory with config file copies
|
||||
"config_dir",
|
||||
# Which koji event to use (if any)
|
||||
"event",
|
||||
# Additional arguments to pungi-koji executable
|
||||
"extra_args",
|
||||
],
|
||||
)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Status(object):
|
||||
# Ready to start
|
||||
READY = "READY"
|
||||
# Waiting for dependencies to finish.
|
||||
WAITING = "WAITING"
|
||||
# Part is currently running
|
||||
STARTED = "STARTED"
|
||||
# A dependency failed, this one will never start.
|
||||
BLOCKED = "BLOCKED"
|
||||
|
||||
|
||||
class ComposePart(object):
|
||||
def __init__(self, name, config, just_phase=[], skip_phase=[], dependencies=[]):
|
||||
self.name = name
|
||||
self.config = config
|
||||
self.status = Status.WAITING if dependencies else Status.READY
|
||||
self.just_phase = just_phase
|
||||
self.skip_phase = skip_phase
|
||||
self.blocked_on = set(dependencies)
|
||||
self.depends_on = set(dependencies)
|
||||
self.path = None
|
||||
self.log_file = None
|
||||
self.failable = False
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"ComposePart({0.name!r},"
|
||||
" {0.config!r},"
|
||||
" {0.status!r},"
|
||||
" just_phase={0.just_phase!r},"
|
||||
" skip_phase={0.skip_phase!r},"
|
||||
" dependencies={0.depends_on!r})"
|
||||
).format(self)
|
||||
|
||||
def refresh_status(self):
|
||||
"""Refresh status of this part with the result of the compose. This
|
||||
should only be called once the compose finished.
|
||||
"""
|
||||
try:
|
||||
with open(os.path.join(self.path, "STATUS")) as fh:
|
||||
self.status = fh.read().strip()
|
||||
except IOError as exc:
|
||||
log.error("Failed to update status of %s: %s", self.name, exc)
|
||||
log.error("Assuming %s is DOOMED", self.name)
|
||||
self.status = "DOOMED"
|
||||
|
||||
def is_finished(self):
|
||||
return "FINISHED" in self.status
|
||||
|
||||
def unblock_on(self, finished_part):
|
||||
"""Update set of blockers for this part. If it's empty, mark us as ready."""
|
||||
self.blocked_on.discard(finished_part)
|
||||
if self.status == Status.WAITING and not self.blocked_on:
|
||||
log.debug("%s is ready to start", self)
|
||||
self.status = Status.READY
|
||||
|
||||
def setup_start(self, global_config, parts):
|
||||
substitutions = dict(
|
||||
("part-%s" % name, p.path) for name, p in parts.items() if p.is_finished()
|
||||
)
|
||||
substitutions["configdir"] = global_config.config_dir
|
||||
|
||||
config = pungi.util.load_config(self.config)
|
||||
|
||||
for f in config.opened_files:
|
||||
# apply substitutions
|
||||
fill_in_config_file(f, substitutions)
|
||||
|
||||
self.status = Status.STARTED
|
||||
self.path = get_compose_dir(
|
||||
os.path.join(global_config.target, "parts"),
|
||||
config,
|
||||
compose_type=global_config.compose_type,
|
||||
compose_label=global_config.label,
|
||||
)
|
||||
self.log_file = os.path.join(global_config.target, "logs", "%s.log" % self.name)
|
||||
log.info("Starting %s in %s", self.name, self.path)
|
||||
|
||||
def get_cmd(self, global_config):
|
||||
cmd = ["pungi-koji", "--config", self.config, "--compose-dir", self.path]
|
||||
cmd.append("--%s" % global_config.compose_type)
|
||||
if global_config.label:
|
||||
cmd.extend(["--label", global_config.label])
|
||||
for phase in self.just_phase:
|
||||
cmd.extend(["--just-phase", phase])
|
||||
for phase in self.skip_phase:
|
||||
cmd.extend(["--skip-phase", phase])
|
||||
if global_config.old_compose:
|
||||
cmd.extend(
|
||||
["--old-compose", os.path.join(global_config.old_compose, "parts")]
|
||||
)
|
||||
if global_config.event:
|
||||
cmd.extend(["--koji-event", str(global_config.event)])
|
||||
if global_config.extra_args:
|
||||
cmd.extend(global_config.extra_args)
|
||||
cmd.extend(["--no-latest-link"])
|
||||
return cmd
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, config, section, config_dir):
|
||||
part = cls(
|
||||
name=section,
|
||||
config=os.path.join(config_dir, config.get(section, "config")),
|
||||
just_phase=_safe_get_list(config, section, "just_phase", []),
|
||||
skip_phase=_safe_get_list(config, section, "skip_phase", []),
|
||||
dependencies=_safe_get_list(config, section, "depends_on", []),
|
||||
)
|
||||
if config.has_option(section, "failable"):
|
||||
part.failable = config.getboolean(section, "failable")
|
||||
return part
|
||||
|
||||
|
||||
def _safe_get_list(config, section, option, default=None):
|
||||
"""Get a value from config parser. The result is split into a list on
|
||||
commas or spaces, and `default` is returned if the key does not exist.
|
||||
"""
|
||||
if config.has_option(section, option):
|
||||
value = config.get(section, option)
|
||||
return [x.strip() for x in re.split(r"[, ]+", value) if x]
|
||||
return default
|
||||
|
||||
|
||||
def fill_in_config_file(fp, substs):
|
||||
"""Templating function. It works with Jinja2 style placeholders such as
|
||||
{{foo}}. Whitespace around the key name is fine. The file is modified in place.
|
||||
|
||||
:param fp string: path to the file to process
|
||||
:param substs dict: a mapping for values to put into the file
|
||||
"""
|
||||
|
||||
def repl(match):
|
||||
try:
|
||||
return substs[match.group(1)]
|
||||
except KeyError as exc:
|
||||
raise RuntimeError(
|
||||
"Unknown placeholder %s in %s" % (exc, os.path.basename(fp))
|
||||
)
|
||||
|
||||
with open(fp, "r") as f:
|
||||
contents = re.sub(r"{{ *([a-zA-Z-_]+) *}}", repl, f.read())
|
||||
with open(fp, "w") as f:
|
||||
f.write(contents)
|
||||
|
||||
|
||||
def start_part(global_config, parts, part):
|
||||
part.setup_start(global_config, parts)
|
||||
fh = open(part.log_file, "w")
|
||||
cmd = part.get_cmd(global_config)
|
||||
log.debug("Running command %r", " ".join(shlex_quote(x) for x in cmd))
|
||||
return subprocess.Popen(cmd, stdout=fh, stderr=subprocess.STDOUT)
|
||||
|
||||
|
||||
def handle_finished(global_config, linker, parts, proc, finished_part):
|
||||
finished_part.refresh_status()
|
||||
log.info("%s finished with status %s", finished_part, finished_part.status)
|
||||
if proc.returncode == 0:
|
||||
# Success, unblock other parts...
|
||||
for part in parts.values():
|
||||
part.unblock_on(finished_part.name)
|
||||
# ...and link the results into final destination.
|
||||
copy_part(global_config, linker, finished_part)
|
||||
update_metadata(global_config, finished_part)
|
||||
else:
|
||||
# Failure, other stuff may be blocked.
|
||||
log.info("See details in %s", finished_part.log_file)
|
||||
block_on(parts, finished_part.name)
|
||||
|
||||
|
||||
def copy_part(global_config, linker, part):
|
||||
c = productmd.Compose(part.path)
|
||||
for variant in c.info.variants:
|
||||
data_path = os.path.join(part.path, "compose", variant)
|
||||
link = os.path.join(global_config.target, "compose", variant)
|
||||
log.info("Hardlinking content %s -> %s", data_path, link)
|
||||
hardlink_dir(linker, data_path, link)
|
||||
|
||||
|
||||
def hardlink_dir(linker, srcdir, dstdir):
|
||||
for root, dirs, files in os.walk(srcdir):
|
||||
root = os.path.relpath(root, srcdir)
|
||||
for f in files:
|
||||
src = os.path.normpath(os.path.join(srcdir, root, f))
|
||||
dst = os.path.normpath(os.path.join(dstdir, root, f))
|
||||
linker.queue_put((src, dst))
|
||||
|
||||
|
||||
def update_metadata(global_config, part):
|
||||
part_metadata_dir = os.path.join(part.path, "compose", "metadata")
|
||||
final_metadata_dir = os.path.join(global_config.target, "compose", "metadata")
|
||||
for f in os.listdir(part_metadata_dir):
|
||||
# Load the metadata
|
||||
with open(os.path.join(part_metadata_dir, f)) as fh:
|
||||
part_metadata = json.load(fh)
|
||||
final_metadata = os.path.join(final_metadata_dir, f)
|
||||
if os.path.exists(final_metadata):
|
||||
# We already have this file, will need to merge.
|
||||
merge_metadata(final_metadata, part_metadata)
|
||||
else:
|
||||
# A new file, just copy it.
|
||||
copy_metadata(global_config, final_metadata, part_metadata)
|
||||
|
||||
|
||||
def copy_metadata(global_config, final_metadata, source):
|
||||
"""Copy file to final location, but update compose information."""
|
||||
with open(
|
||||
os.path.join(global_config.target, "compose/metadata/composeinfo.json")
|
||||
) as f:
|
||||
composeinfo = json.load(f)
|
||||
try:
|
||||
source["payload"]["compose"].update(composeinfo["payload"]["compose"])
|
||||
except KeyError:
|
||||
# No [payload][compose], probably OSBS metadata
|
||||
pass
|
||||
with open(final_metadata, "w") as f:
|
||||
json.dump(source, f, indent=2, sort_keys=True)
|
||||
|
||||
|
||||
def merge_metadata(final_metadata, source):
|
||||
with open(final_metadata) as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
try:
|
||||
key = {
|
||||
"productmd.composeinfo": "variants",
|
||||
"productmd.modules": "modules",
|
||||
"productmd.images": "images",
|
||||
"productmd.rpms": "rpms",
|
||||
}[source["header"]["type"]]
|
||||
# TODO what if multiple parts create images for the same variant
|
||||
metadata["payload"][key].update(source["payload"][key])
|
||||
except KeyError:
|
||||
# OSBS metadata, merge whole file
|
||||
metadata.update(source)
|
||||
with open(final_metadata, "w") as f:
|
||||
json.dump(metadata, f, indent=2, sort_keys=True)
|
||||
|
||||
|
||||
def block_on(parts, name):
|
||||
"""Part ``name`` failed, mark everything depending on it as blocked."""
|
||||
for part in parts.values():
|
||||
if name in part.blocked_on:
|
||||
log.warning("%s is blocked now and will not run", part)
|
||||
part.status = Status.BLOCKED
|
||||
block_on(parts, part.name)
|
||||
|
||||
|
||||
def check_finished_processes(processes):
|
||||
"""Walk through all active processes and check if something finished.
|
||||
"""
|
||||
for proc in processes.keys():
|
||||
proc.poll()
|
||||
if proc.returncode is not None:
|
||||
yield proc, processes[proc]
|
||||
|
||||
|
||||
def run_all(global_config, parts):
|
||||
# Mapping subprocess.Popen -> ComposePart
|
||||
processes = dict()
|
||||
remaining = set(p.name for p in parts.values() if not p.is_finished())
|
||||
|
||||
with linker_pool("hardlink") as linker:
|
||||
while remaining or processes:
|
||||
update_status(global_config, parts)
|
||||
|
||||
for proc, part in check_finished_processes(processes):
|
||||
del processes[proc]
|
||||
handle_finished(global_config, linker, parts, proc, part)
|
||||
|
||||
# Start new available processes.
|
||||
for name in list(remaining):
|
||||
part = parts[name]
|
||||
# Start all ready parts
|
||||
if part.status == Status.READY:
|
||||
remaining.remove(name)
|
||||
processes[start_part(global_config, parts, part)] = part
|
||||
# Remove blocked parts from todo list
|
||||
elif part.status == Status.BLOCKED:
|
||||
remaining.remove(part.name)
|
||||
|
||||
# Wait for any child process to finish if there is any.
|
||||
if processes:
|
||||
pid, reason = os.wait()
|
||||
for proc in processes.keys():
|
||||
# Set the return code for process that we caught by os.wait().
|
||||
# Calling poll() on it would not set the return code properly
|
||||
# since the value was already consumed by os.wait().
|
||||
if proc.pid == pid:
|
||||
proc.returncode = (reason >> 8) & 0xFF
|
||||
|
||||
log.info("Waiting for linking to finish...")
|
||||
return update_status(global_config, parts)
|
||||
|
||||
|
||||
def get_target_dir(config, compose_info, label, reldir=""):
|
||||
"""Find directory where this compose will be.
|
||||
|
||||
@param reldir: if target path in config is relative, it will be resolved
|
||||
against this directory
|
||||
"""
|
||||
dir = os.path.realpath(os.path.join(reldir, config.get("general", "target")))
|
||||
target_dir = get_compose_dir(
|
||||
dir,
|
||||
compose_info,
|
||||
compose_type=config.get("general", "compose_type"),
|
||||
compose_label=label,
|
||||
)
|
||||
return target_dir
|
||||
|
||||
|
||||
def setup_logging(debug=False):
|
||||
FORMAT = "%(asctime)s: %(levelname)s: %(message)s"
|
||||
level = logging.DEBUG if debug else logging.INFO
|
||||
kobo.log.add_stderr_logger(log, log_level=level, format=FORMAT)
|
||||
log.setLevel(level)
|
||||
|
||||
|
||||
def compute_status(statuses):
|
||||
if any(map(lambda x: x[0] in ("STARTED", "WAITING"), statuses)):
|
||||
# If there is anything still running or waiting to start, the whole is
|
||||
# still running.
|
||||
return "STARTED"
|
||||
elif any(map(lambda x: x[0] in ("DOOMED", "BLOCKED") and not x[1], statuses)):
|
||||
# If any required part is doomed or blocked, the whole is doomed
|
||||
return "DOOMED"
|
||||
elif all(map(lambda x: x[0] == "FINISHED", statuses)):
|
||||
# If all parts are complete, the whole is complete
|
||||
return "FINISHED"
|
||||
else:
|
||||
return "FINISHED_INCOMPLETE"
|
||||
|
||||
|
||||
def update_status(global_config, parts):
|
||||
log.debug("Updating status metadata")
|
||||
metadata = {}
|
||||
statuses = set()
|
||||
for part in parts.values():
|
||||
metadata[part.name] = {"status": part.status, "path": part.path}
|
||||
statuses.add((part.status, part.failable))
|
||||
metadata_path = os.path.join(
|
||||
global_config.target, "compose", "metadata", "parts.json"
|
||||
)
|
||||
with open(metadata_path, "w") as fh:
|
||||
json.dump(metadata, fh, indent=2, sort_keys=True, separators=(",", ": "))
|
||||
|
||||
status = compute_status(statuses)
|
||||
log.info("Overall status is %s", status)
|
||||
with open(os.path.join(global_config.target, "STATUS"), "w") as fh:
|
||||
fh.write(status)
|
||||
|
||||
return status != "DOOMED"
|
||||
|
||||
|
||||
def prepare_compose_dir(config, args, main_config_file, compose_info):
|
||||
if not hasattr(args, "compose_path"):
|
||||
# Creating a brand new compose
|
||||
target_dir = get_target_dir(
|
||||
config, compose_info, args.label, reldir=os.path.dirname(main_config_file)
|
||||
)
|
||||
for dir in ("logs", "parts", "compose/metadata", "work/global"):
|
||||
try:
|
||||
os.makedirs(os.path.join(target_dir, dir))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EEXIST:
|
||||
raise
|
||||
with open(os.path.join(target_dir, "STATUS"), "w") as fh:
|
||||
fh.write("STARTED")
|
||||
# Copy initial composeinfo for new compose
|
||||
shutil.copy(
|
||||
os.path.join(target_dir, "work/global/composeinfo-base.json"),
|
||||
os.path.join(target_dir, "compose/metadata/composeinfo.json"),
|
||||
)
|
||||
else:
|
||||
# Restarting a particular compose
|
||||
target_dir = args.compose_path
|
||||
|
||||
return target_dir
|
||||
|
||||
|
||||
def load_parts_metadata(global_config):
|
||||
parts_metadata = os.path.join(global_config.target, "compose/metadata/parts.json")
|
||||
with open(parts_metadata) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def setup_for_restart(global_config, parts, to_restart):
|
||||
has_stuff_to_do = False
|
||||
metadata = load_parts_metadata(global_config)
|
||||
for key in metadata:
|
||||
# Update state to match what is on disk
|
||||
log.debug(
|
||||
"Reusing %s (%s) from %s",
|
||||
key,
|
||||
metadata[key]["status"],
|
||||
metadata[key]["path"],
|
||||
)
|
||||
parts[key].status = metadata[key]["status"]
|
||||
parts[key].path = metadata[key]["path"]
|
||||
for key in to_restart:
|
||||
# Set restarted parts to run again
|
||||
parts[key].status = Status.WAITING
|
||||
parts[key].path = None
|
||||
|
||||
for key in to_restart:
|
||||
# Remove blockers that are already finished
|
||||
for blocker in list(parts[key].blocked_on):
|
||||
if parts[blocker].is_finished():
|
||||
parts[key].blocked_on.discard(blocker)
|
||||
if not parts[key].blocked_on:
|
||||
log.debug("Part %s in not blocked", key)
|
||||
# Nothing blocks it; let's go
|
||||
parts[key].status = Status.READY
|
||||
has_stuff_to_do = True
|
||||
|
||||
if not has_stuff_to_do:
|
||||
raise RuntimeError("All restarted parts are blocked. Nothing to do.")
|
||||
|
||||
|
||||
def run_kinit(config):
|
||||
if not config.getboolean("general", "kerberos"):
|
||||
return
|
||||
|
||||
keytab = config.get("general", "kerberos_keytab")
|
||||
principal = config.get("general", "kerberos_principal")
|
||||
|
||||
fd, fname = tempfile.mkstemp(prefix="krb5cc_pungi-orchestrate_")
|
||||
os.close(fd)
|
||||
os.environ["KRB5CCNAME"] = fname
|
||||
shortcuts.run(["kinit", "-k", "-t", keytab, principal])
|
||||
log.debug("Created a kerberos ticket for %s", principal)
|
||||
|
||||
atexit.register(os.remove, fname)
|
||||
|
||||
|
||||
def get_compose_data(compose_path):
|
||||
try:
|
||||
compose = productmd.compose.Compose(compose_path)
|
||||
data = {
|
||||
"compose_id": compose.info.compose.id,
|
||||
"compose_date": compose.info.compose.date,
|
||||
"compose_type": compose.info.compose.type,
|
||||
"compose_respin": str(compose.info.compose.respin),
|
||||
"compose_label": compose.info.compose.label,
|
||||
"release_id": compose.info.release_id,
|
||||
"release_name": compose.info.release.name,
|
||||
"release_short": compose.info.release.short,
|
||||
"release_version": compose.info.release.version,
|
||||
"release_type": compose.info.release.type,
|
||||
"release_is_layered": compose.info.release.is_layered,
|
||||
}
|
||||
if compose.info.release.is_layered:
|
||||
data.update(
|
||||
{
|
||||
"base_product_name": compose.info.base_product.name,
|
||||
"base_product_short": compose.info.base_product.short,
|
||||
"base_product_version": compose.info.base_product.version,
|
||||
"base_product_type": compose.info.base_product.type,
|
||||
}
|
||||
)
|
||||
return data
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def get_script_env(compose_path):
|
||||
env = os.environ.copy()
|
||||
env["COMPOSE_PATH"] = compose_path
|
||||
for key, value in get_compose_data(compose_path).items():
|
||||
if isinstance(value, bool):
|
||||
env[key.upper()] = "YES" if value else ""
|
||||
else:
|
||||
env[key.upper()] = str(value) if value else ""
|
||||
return env
|
||||
|
||||
|
||||
def run_scripts(prefix, compose_dir, scripts):
|
||||
env = get_script_env(compose_dir)
|
||||
for idx, script in enumerate(scripts.strip().splitlines()):
|
||||
command = script.strip()
|
||||
logfile = os.path.join(compose_dir, "logs", "%s%s.log" % (prefix, idx))
|
||||
log.debug("Running command: %r", command)
|
||||
log.debug("See output in %s", logfile)
|
||||
shortcuts.run(command, env=env, logfile=logfile)
|
||||
|
||||
|
||||
def try_translate_path(parts, path):
|
||||
translation = []
|
||||
for part in parts.values():
|
||||
conf = pungi.util.load_config(part.config)
|
||||
translation.extend(conf.get("translate_paths", []))
|
||||
return pungi.util.translate_path_raw(translation, path)
|
||||
|
||||
|
||||
def send_notification(compose_dir, command, parts):
|
||||
if not command:
|
||||
return
|
||||
from pungi.notifier import PungiNotifier
|
||||
|
||||
data = get_compose_data(compose_dir)
|
||||
data["location"] = try_translate_path(parts, compose_dir)
|
||||
notifier = PungiNotifier([command])
|
||||
with open(os.path.join(compose_dir, "STATUS")) as f:
|
||||
status = f.read().strip()
|
||||
notifier.send("status-change", workdir=compose_dir, status=status, **data)
|
||||
|
||||
|
||||
def setup_progress_monitor(global_config, parts):
|
||||
"""Update configuration so that each part send notifications about its
|
||||
progress to the orchestrator.
|
||||
|
||||
There is a file to which the notification is written. The orchestrator is
|
||||
reading it and mapping the entries to particular parts. The path to this
|
||||
file is stored in an environment variable.
|
||||
"""
|
||||
tmp_file = tempfile.NamedTemporaryFile(prefix="pungi-progress-monitor_")
|
||||
os.environ["_PUNGI_ORCHESTRATOR_PROGRESS_MONITOR"] = tmp_file.name
|
||||
atexit.register(os.remove, tmp_file.name)
|
||||
|
||||
global_config.extra_args.append(
|
||||
"--notification-script=pungi-notification-report-progress"
|
||||
)
|
||||
|
||||
def reader():
|
||||
while True:
|
||||
line = tmp_file.readline()
|
||||
if not line:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
path, msg = line.split(":", 1)
|
||||
for part in parts:
|
||||
if parts[part].path == os.path.dirname(path):
|
||||
log.debug("%s: %s", part, msg.strip())
|
||||
break
|
||||
|
||||
monitor = threading.Thread(target=reader)
|
||||
monitor.daemon = True
|
||||
monitor.start()
|
||||
|
||||
|
||||
def run(work_dir, main_config_file, args):
|
||||
config_dir = os.path.join(work_dir, "config")
|
||||
shutil.copytree(os.path.dirname(main_config_file), config_dir)
|
||||
|
||||
# Read main config
|
||||
parser = configparser.RawConfigParser(
|
||||
defaults={
|
||||
"kerberos": "false",
|
||||
"pre_compose_script": "",
|
||||
"post_compose_script": "",
|
||||
"notification_script": "",
|
||||
}
|
||||
)
|
||||
parser.read(main_config_file)
|
||||
|
||||
# Create kerberos ticket
|
||||
run_kinit(parser)
|
||||
|
||||
compose_info = dict(parser.items("general"))
|
||||
compose_type = parser.get("general", "compose_type")
|
||||
|
||||
target_dir = prepare_compose_dir(parser, args, main_config_file, compose_info)
|
||||
kobo.log.add_file_logger(log, os.path.join(target_dir, "logs", "orchestrator.log"))
|
||||
log.info("Composing %s", target_dir)
|
||||
|
||||
run_scripts("pre_compose_", target_dir, parser.get("general", "pre_compose_script"))
|
||||
|
||||
old_compose = find_old_compose(
|
||||
os.path.dirname(target_dir),
|
||||
compose_info["release_short"],
|
||||
compose_info["release_version"],
|
||||
"",
|
||||
)
|
||||
if old_compose:
|
||||
log.info("Reusing old compose %s", old_compose)
|
||||
|
||||
global_config = Config(
|
||||
target=target_dir,
|
||||
compose_type=compose_type,
|
||||
label=args.label,
|
||||
old_compose=old_compose,
|
||||
config_dir=os.path.dirname(main_config_file),
|
||||
event=args.koji_event,
|
||||
extra_args=_safe_get_list(parser, "general", "extra_args"),
|
||||
)
|
||||
|
||||
if not global_config.event and parser.has_option("general", "koji_profile"):
|
||||
koji_wrapper = KojiWrapper(parser.get("general", "koji_profile"))
|
||||
event_file = os.path.join(global_config.target, "work/global/koji-event")
|
||||
result = get_koji_event_raw(koji_wrapper, None, event_file)
|
||||
global_config = global_config._replace(event=result["id"])
|
||||
|
||||
parts = {}
|
||||
for section in parser.sections():
|
||||
if section == "general":
|
||||
continue
|
||||
parts[section] = ComposePart.from_config(parser, section, config_dir)
|
||||
|
||||
if hasattr(args, "part"):
|
||||
setup_for_restart(global_config, parts, args.part)
|
||||
|
||||
setup_progress_monitor(global_config, parts)
|
||||
|
||||
send_notification(target_dir, parser.get("general", "notification_script"), parts)
|
||||
|
||||
retcode = run_all(global_config, parts)
|
||||
|
||||
if retcode:
|
||||
# Only run the script if we are not doomed.
|
||||
run_scripts(
|
||||
"post_compose_", target_dir, parser.get("general", "post_compose_script")
|
||||
)
|
||||
|
||||
send_notification(target_dir, parser.get("general", "notification_script"), parts)
|
||||
|
||||
return retcode
|
||||
|
||||
|
||||
def parse_args(argv):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--debug", action="store_true")
|
||||
parser.add_argument("--koji-event", metavar="ID", type=parse_koji_event)
|
||||
subparsers = parser.add_subparsers()
|
||||
start = subparsers.add_parser("start")
|
||||
start.add_argument("config", metavar="CONFIG")
|
||||
start.add_argument("--label")
|
||||
|
||||
restart = subparsers.add_parser("restart")
|
||||
restart.add_argument("config", metavar="CONFIG")
|
||||
restart.add_argument("compose_path", metavar="COMPOSE_PATH")
|
||||
restart.add_argument(
|
||||
"part", metavar="PART", nargs="*", help="which parts to restart"
|
||||
)
|
||||
restart.add_argument("--label")
|
||||
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
args = parse_args(argv)
|
||||
setup_logging(args.debug)
|
||||
|
||||
main_config_file = os.path.abspath(args.config)
|
||||
|
||||
with temp_dir() as work_dir:
|
||||
try:
|
||||
if not run(work_dir, main_config_file, args):
|
||||
sys.exit(1)
|
||||
except Exception:
|
||||
log.exception("Unhandled exception!")
|
||||
sys.exit(1)
|
|
@ -1,7 +1,8 @@
|
|||
# Some packages must be installed via dnf/yum first, see doc/contributing.rst
|
||||
dict.sorted
|
||||
dogpile.cache
|
||||
fedmsg
|
||||
flufl.lock ; python_version >= '3.0'
|
||||
flufl.lock < 3.0 ; python_version <= '2.7'
|
||||
funcsigs
|
||||
jsonschema
|
||||
kobo
|
||||
|
|
10
setup.py
10
setup.py
|
@ -5,14 +5,9 @@
|
|||
import os
|
||||
import glob
|
||||
|
||||
import distutils.command.sdist
|
||||
from setuptools import setup
|
||||
|
||||
|
||||
# override default tarball format with bzip2
|
||||
distutils.command.sdist.sdist.default_format = {"posix": "bztar"}
|
||||
|
||||
|
||||
# recursively scan for python modules to be included
|
||||
package_root_dirs = ["pungi", "pungi_utils"]
|
||||
packages = set()
|
||||
|
@ -25,7 +20,7 @@ packages = sorted(packages)
|
|||
|
||||
setup(
|
||||
name="pungi",
|
||||
version="4.2.15",
|
||||
version="4.5.0",
|
||||
description="Distribution compose tool",
|
||||
url="https://pagure.io/pungi",
|
||||
author="Dennis Gilmore",
|
||||
|
@ -41,12 +36,12 @@ setup(
|
|||
"pungi-patch-iso = pungi.scripts.patch_iso:cli_main",
|
||||
"pungi-make-ostree = pungi.ostree:main",
|
||||
"pungi-notification-report-progress = pungi.scripts.report_progress:main",
|
||||
"pungi-orchestrate = pungi_utils.orchestrator:main",
|
||||
"pungi-wait-for-signed-ostree-handler = pungi.scripts.wait_for_signed_ostree_handler:main", # noqa: E501
|
||||
"pungi-koji = pungi.scripts.pungi_koji:cli_main",
|
||||
"pungi-gather = pungi.scripts.pungi_gather:cli_main",
|
||||
"pungi-config-dump = pungi.scripts.config_dump:cli_main",
|
||||
"pungi-config-validate = pungi.scripts.config_validate:cli_main",
|
||||
"pungi-cache-cleanup = pungi.scripts.cache_cleanup:main",
|
||||
"pungi-gather-modules = pungi.scripts.gather_modules:cli_main",
|
||||
"pungi-gather-rpms = pungi.scripts.gather_rpms:cli_main",
|
||||
"pungi-generate-packages-json = pungi.scripts.create_packages_json:cli_main", # noqa: E501
|
||||
|
@ -55,6 +50,7 @@ setup(
|
|||
},
|
||||
scripts=["contrib/yum-dnf-compare/pungi-compare-depsolving"],
|
||||
data_files=[
|
||||
("/usr/lib/tmpfiles.d", glob.glob("contrib/tmpfiles.d/*.conf")),
|
||||
("/usr/share/pungi", glob.glob("share/*.xsl")),
|
||||
("/usr/share/pungi", glob.glob("share/*.ks")),
|
||||
("/usr/share/pungi", glob.glob("share/*.dtd")),
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
FROM fedora:33
|
||||
LABEL \
|
||||
name="Pungi test" \
|
||||
description="Run tests using tox with Python 3" \
|
||||
vendor="Pungi developers" \
|
||||
license="MIT"
|
||||
|
||||
RUN dnf -y update && dnf -y install \
|
||||
findutils \
|
||||
libmodulemd \
|
||||
git \
|
||||
koji \
|
||||
make \
|
||||
python3-createrepo_c \
|
||||
python3-gobject-base \
|
||||
python3-tox \
|
||||
python3-urlgrabber \
|
||||
&& dnf clean all
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
COPY . .
|
||||
|
||||
CMD ["tox", "-e", "flake8,black,py3"]
|
|
@ -0,0 +1,27 @@
|
|||
FROM centos:7
|
||||
LABEL \
|
||||
name="Pungi test" \
|
||||
description="Run tests using tox with Python 2" \
|
||||
vendor="Pungi developers" \
|
||||
license="MIT"
|
||||
|
||||
RUN yum -y update && yum -y install epel-release && yum -y install \
|
||||
git \
|
||||
libmodulemd2 \
|
||||
make \
|
||||
python3 \
|
||||
python-createrepo_c \
|
||||
python-gobject-base \
|
||||
python-gssapi \
|
||||
python-libcomps \
|
||||
pykickstart \
|
||||
&& yum clean all
|
||||
|
||||
# python-tox in yum repo is too old, let's install latest version
|
||||
RUN pip3 install tox
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
COPY . .
|
||||
|
||||
CMD ["tox", "-e", "py27"]
|
|
@ -0,0 +1,59 @@
|
|||
def DUFFY_SESSION_ID
|
||||
|
||||
pipeline {
|
||||
agent {
|
||||
label 'cico-workspace'
|
||||
}
|
||||
|
||||
parameters {
|
||||
string(name: 'REPO', defaultValue: '', description: 'Git repo URL where the pull request from')
|
||||
string(name: 'BRANCH', defaultValue: '', description: 'Git branch where the pull request from')
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('CI') {
|
||||
steps {
|
||||
script {
|
||||
if (params.REPO == "" || params.BRANCH == "") {
|
||||
error "Please supply both params (REPO and BRANCH)"
|
||||
}
|
||||
try {
|
||||
echo "Requesting duffy node ..."
|
||||
def session_str = sh returnStdout: true, script: "set +x; duffy client --url https://duffy.ci.centos.org/api/v1 --auth-name fedora-infra --auth-key $CICO_API_KEY request-session pool=virt-ec2-t2-centos-9s-x86_64,quantity=1"
|
||||
def session = readJSON text: session_str
|
||||
DUFFY_SESSION_ID= session.session.id
|
||||
def hostname = session.session.nodes[0].hostname
|
||||
echo "duffy session id: $DUFFY_SESSION_ID hostname: $hostname"
|
||||
def remote_dir = "/tmp/$JENKINS_AGENT_NAME"
|
||||
echo "remote_dir: $remote_dir"
|
||||
writeFile file: 'job.sh', text: """
|
||||
set -xe
|
||||
dnf install -y git podman
|
||||
git config --global user.email "jenkins@localhost"
|
||||
git config --global user.name "jenkins"
|
||||
cd $remote_dir
|
||||
git clone https://pagure.io/pungi.git -b master
|
||||
cd pungi
|
||||
git remote rm proposed || true
|
||||
git remote add proposed "$params.REPO"
|
||||
git fetch proposed
|
||||
git checkout origin/master
|
||||
git merge --no-ff "proposed/$params.BRANCH" -m "Merge PR"
|
||||
podman run --rm -v .:/src:Z quay.io/exd-guild-compose/pungi-test tox -r -e flake8,black,py3,bandit
|
||||
podman run --rm -v .:/src:Z quay.io/exd-guild-compose/pungi-test-py2 tox -r -e py27
|
||||
"""
|
||||
sh "cat job.sh"
|
||||
sh "ssh -o StrictHostKeyChecking=no root@$hostname mkdir $remote_dir"
|
||||
sh "scp job.sh root@$hostname:$remote_dir"
|
||||
sh "ssh root@$hostname sh $remote_dir/job.sh"
|
||||
} finally {
|
||||
if (DUFFY_SESSION_ID) {
|
||||
echo "Release duffy node ..."
|
||||
sh "set +x; duffy client --url https://duffy.ci.centos.org/api/v1 --auth-name fedora-infra --auth-key $CICO_API_KEY retire-session $DUFFY_SESSION_ID > /dev/null"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -108,6 +108,7 @@
|
|||
<groupid>core</groupid>
|
||||
</grouplist>
|
||||
<optionlist>
|
||||
<groupid arch="x86_64">standard</groupid>
|
||||
</optionlist>
|
||||
</environment>
|
||||
|
||||
|
@ -118,7 +119,7 @@
|
|||
<display_order>10</display_order>
|
||||
<grouplist>
|
||||
<groupid>core</groupid>
|
||||
<groupid>standard</groupid>
|
||||
<groupid arch="x86_64">standard</groupid>
|
||||
<groupid>basic-desktop</groupid>
|
||||
</grouplist>
|
||||
<optionlist>
|
||||
|
|
|
@ -110,4 +110,8 @@ extra_isos = {
|
|||
}]
|
||||
}
|
||||
|
||||
create_jigdo = False
|
||||
iso_level = [
|
||||
(".*", {
|
||||
"src": 3,
|
||||
}),
|
||||
]
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue