Compare commits

..

8 Commits

Author SHA1 Message Date
soksanichenko
43d4275830 - The version is bumped
- The changelog is updated
2022-03-22 14:45:20 +02:00
soksanichenko
30414cc748 Merge pull request 'ALBS-226: Patch pungi/lorax for building AL9' (#3) from ALBS-226 into aln9
Reviewed-on: #3
2022-03-21 15:52:50 +00:00
soksanichenko
a83be2fbb2 ALBS-226: Patch pungi/lorax for building AL9
- Unit tests are fixed
2022-03-19 03:32:43 +02:00
soksanichenko
acfdfcef15 ALBS-226: Patch pungi/lorax for building AL9
- Unit tests are fixed
2022-03-19 03:26:10 +02:00
soksanichenko
700ae3cbac ALBS-226: Patch pungi/lorax for building AL9
- Defaults modules can be empty, but pungi detects
  empty folder while copying and raises the exception in this case
2022-03-18 23:43:33 +02:00
soksanichenko
f2deb8b7c9 Merge pull request 'ALBS-186: Move pungi to our gitea and build it for AL9' (#1) from ALBS-186 into aln9
Reviewed-on: #1
2022-03-07 11:17:52 +00:00
soksanichenko
19cad92ff3 ALBS-186: Move pungi to our gitea and build it for AL9
- Required package `python3-dataclasses` is not needed because py3.9 has built-in module `dataclasses`
2022-02-25 16:00:32 +02:00
soksanichenko
dfa191caec ALBS-186: Move pungi to our gitea and build it for AL9
- Required package `python3-dataclasses` is not needed because py3.9 has built-in module `dataclasses`
- Version is bumped
- Changelog is updated
2022-02-25 15:25:12 +02:00
200 changed files with 7146 additions and 12131 deletions

4
.gitignore vendored
View File

@ -11,9 +11,5 @@ tests/data/repo-krb5-lookaside
tests/_composes tests/_composes
htmlcov/ htmlcov/
.coverage .coverage
.eggs
.idea/ .idea/
.tox .tox
.venv
.kdev4/
pungi.kdev4

View File

@ -1,41 +0,0 @@
From 432b0bce0401c4bbcd1a958a89305c475a794f26 Mon Sep 17 00:00:00 2001
From: Adam Williamson <awilliam@redhat.com>
Date: Jan 19 2024 07:25:09 +0000
Subject: checks: don't require "repo" in the "ostree" schema
Per @siosm in https://pagure.io/pungi-fedora/pull-request/1227
this option "is deprecated and not needed anymore", so Pungi
should not be requiring it.
Merges: https://pagure.io/pungi/pull-request/1714
Signed-off-by: Adam Williamson <awilliam@redhat.com>
---
diff --git a/pungi/checks.py b/pungi/checks.py
index a340f93..db8b297 100644
--- a/pungi/checks.py
+++ b/pungi/checks.py
@@ -1066,7 +1066,6 @@ def make_schema():
"required": [
"treefile",
"config_url",
- "repo",
"ostree_repo",
],
"additionalProperties": False,
diff --git a/pungi/phases/ostree.py b/pungi/phases/ostree.py
index 90578ae..2649cdb 100644
--- a/pungi/phases/ostree.py
+++ b/pungi/phases/ostree.py
@@ -85,7 +85,7 @@ class OSTreeThread(WorkerThread):
comps_repo = compose.paths.work.comps_repo(
"$basearch", variant=variant, create_dir=False
)
- repos = shortcuts.force_list(config["repo"]) + self.repos
+ repos = shortcuts.force_list(config.get("repo", [])) + self.repos
if compose.has_comps:
repos.append(translate_path(compose, comps_repo))
repos = get_repo_dicts(repos, logger=self.pool)

View File

@ -2,7 +2,6 @@ include AUTHORS
include COPYING include COPYING
include GPL include GPL
include pungi.spec include pungi.spec
include setup.cfg
include tox.ini include tox.ini
include share/* include share/*
include share/multilib/* include share/multilib/*

View File

@ -34,6 +34,4 @@ also moves the artifacts to correct locations.
- Documentation: https://docs.pagure.org/pungi/ - Documentation: https://docs.pagure.org/pungi/
- Upstream GIT: https://pagure.io/pungi/ - Upstream GIT: https://pagure.io/pungi/
- Issue tracker: https://pagure.io/pungi/issues - Issue tracker: https://pagure.io/pungi/issues
- Questions can be asked in the *#fedora-releng* IRC channel on irc.libera.chat - Questions can be asked on *#fedora-releng* IRC channel on FreeNode
or in the matrix room
[`#releng:fedoraproject.org`](https://matrix.to/#/#releng:fedoraproject.org)

1
TODO
View File

@ -47,6 +47,7 @@ Split Pungi into smaller well-defined tools
* create install images * create install images
* lorax * lorax
* buildinstall
* create isos * create isos
* isos * isos

View File

@ -1,2 +0,0 @@
# Clean up pungi cache
d /var/cache/pungi/createrepo_c/ - - - 30d

147
doc/_static/phases.svg vendored
View File

@ -1,22 +1,22 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?> <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg <svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="610.46454" width="610.46454"
height="327.16599" height="301.1662"
viewBox="0 0 610.46457 327.16599" viewBox="0 0 610.46457 301.1662"
id="svg2" id="svg2"
version="1.1" version="1.1"
inkscape:version="1.3.2 (091e20e, 2023-11-25)" inkscape:version="1.0.1 (3bc2e813f5, 2020-09-07)"
sodipodi:docname="phases.svg" sodipodi:docname="phases.svg"
inkscape:export-filename="/home/lsedlar/repos/pungi/doc/_static/phases.png" inkscape:export-filename="/home/lsedlar/repos/pungi/doc/_static/phases.png"
inkscape:export-xdpi="90" inkscape:export-xdpi="90"
inkscape:export-ydpi="90" inkscape:export-ydpi="90">
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<sodipodi:namedview <sodipodi:namedview
id="base" id="base"
pagecolor="#ffffff" pagecolor="#ffffff"
@ -24,16 +24,16 @@
borderopacity="1.0" borderopacity="1.0"
inkscape:pageopacity="1" inkscape:pageopacity="1"
inkscape:pageshadow="2" inkscape:pageshadow="2"
inkscape:zoom="1.5" inkscape:zoom="2.1213203"
inkscape:cx="268" inkscape:cx="276.65806"
inkscape:cy="260.66667" inkscape:cy="189.24198"
inkscape:document-units="px" inkscape:document-units="px"
inkscape:current-layer="layer1" inkscape:current-layer="layer1"
showgrid="false" showgrid="false"
inkscape:window-width="1920" inkscape:window-width="2560"
inkscape:window-height="1027" inkscape:window-height="1376"
inkscape:window-x="0" inkscape:window-x="0"
inkscape:window-y="25" inkscape:window-y="0"
inkscape:window-maximized="1" inkscape:window-maximized="1"
units="px" units="px"
inkscape:document-rotation="0" inkscape:document-rotation="0"
@ -43,10 +43,7 @@
fit-margin-left="7.4" fit-margin-left="7.4"
fit-margin-right="7.4" fit-margin-right="7.4"
fit-margin-bottom="7.4" fit-margin-bottom="7.4"
lock-margins="true" lock-margins="true" />
inkscape:showpageshadow="2"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1" />
<defs <defs
id="defs4"> id="defs4">
<marker <marker
@ -73,6 +70,7 @@
<dc:format>image/svg+xml</dc:format> <dc:format>image/svg+xml</dc:format>
<dc:type <dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work> </cc:Work>
</rdf:RDF> </rdf:RDF>
</metadata> </metadata>
@ -105,7 +103,7 @@
style="font-size:13.1479px;line-height:1.25">Pkgset</tspan></text> style="font-size:13.1479px;line-height:1.25">Pkgset</tspan></text>
</g> </g>
<g <g
transform="translate(56.378954,-80.817124)" transform="translate(58.253953,-80.817124)"
id="g3398"> id="g3398">
<rect <rect
y="553.98242" y="553.98242"
@ -303,20 +301,17 @@
</g> </g>
</g> </g>
</g> </g>
<g
id="g2"
transform="translate(-1.4062678e-8,9.3749966)">
<rect <rect
transform="matrix(0,1,1,0,0,0)" transform="matrix(0,1,1,0,0,0)"
style="fill:#e9b96e;fill-rule:evenodd;stroke:none;stroke-width:1.85901px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" style="fill:#e9b96e;fill-rule:evenodd;stroke:none;stroke-width:2.65937px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3338-1" id="rect3338-1"
width="103.12497" width="185.96895"
height="115.80065" height="115.80065"
x="863.29883" x="872.67383"
y="486.55563" /> y="486.55563" />
<text <text
id="text3384-0" id="text3384-0"
y="921.73846" y="969.2854"
x="489.56451" x="489.56451"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan xml:space="preserve"><tspan
@ -324,8 +319,7 @@
id="tspan3391" id="tspan3391"
sodipodi:role="line" sodipodi:role="line"
x="489.56451" x="489.56451"
y="921.73846">ImageChecksum</tspan></text> y="969.2854">ImageChecksum</tspan></text>
</g>
<g <g
transform="translate(-42.209584,-80.817124)" transform="translate(-42.209584,-80.817124)"
id="g3458"> id="g3458">
@ -423,16 +417,16 @@
id="rect290" id="rect290"
width="26.295755" width="26.295755"
height="224.35098" height="224.35098"
x="1091.7223" x="1063.5973"
y="378.43698" y="378.43698"
transform="matrix(0,1,1,0,0,0)" /> transform="matrix(0,1,1,0,0,0)" />
<text <text
xml:space="preserve" xml:space="preserve"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="380.74133" x="380.74133"
y="1106.6223" y="1080.3723"
id="text294"><tspan id="text294"><tspan
y="1106.6223" y="1080.3723"
x="380.74133" x="380.74133"
sodipodi:role="line" sodipodi:role="line"
id="tspan301" id="tspan301"
@ -460,9 +454,32 @@
y="1069.0087" y="1069.0087"
id="tspan3812">ExtraIsos</tspan></text> id="tspan3812">ExtraIsos</tspan></text>
</g> </g>
<g
id="g1031"
transform="translate(-40.740337,29.23522)">
<rect
transform="matrix(0,1,1,0,0,0)"
style="fill:#5ed4ec;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect206"
width="26.295755"
height="102.36562"
x="1066.8611"
y="418.66275" />
<text
id="text210"
y="1084.9105"
x="421.51923"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
y="1084.9105"
x="421.51923"
id="tspan208"
sodipodi:role="line"
style="font-size:13.1479px;line-height:1.25">Repoclosure</tspan></text>
</g>
<rect <rect
y="377.92242" y="377.92242"
x="1122.3463" x="1096.0963"
height="224.24059" height="224.24059"
width="26.295755" width="26.295755"
id="rect87" id="rect87"
@ -472,18 +489,17 @@
xml:space="preserve" xml:space="preserve"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="380.7789" x="380.7789"
y="1140.3958" y="1114.1458"
id="text91"><tspan id="text91"><tspan
style="font-size:13.1479px;line-height:1.25" style="font-size:13.1479px;line-height:1.25"
sodipodi:role="line" sodipodi:role="line"
id="tspan89" id="tspan89"
x="380.7789" x="380.7789"
y="1140.3958">Repoclosure</tspan></text> y="1114.1458">Repoclosure</tspan></text>
<g <g
id="g206" id="g206">
transform="translate(0,-1.8749994)">
<rect <rect
style="fill:#fcd9a4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.00033px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" style="fill:#fcaf3e;fill-rule:evenodd;stroke:none;stroke-width:1.00033px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect290-6" id="rect290-6"
width="26.295755" width="26.295755"
height="101.91849" height="101.91849"
@ -500,57 +516,6 @@
x="380.23166" x="380.23166"
sodipodi:role="line" sodipodi:role="line"
id="tspan301-5" id="tspan301-5"
style="font-size:12px;line-height:0">KiwiBuild</tspan></text>
</g>
<g
id="g3">
<g
id="g1">
<g
id="g4">
<rect
transform="matrix(0,1,1,0,0,0)"
style="fill:#729fcf;fill-rule:evenodd;stroke:none;stroke-width:1.83502px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3338-1-3"
width="103.12497"
height="115.80065"
x="983.44263"
y="486.55563" />
<text
id="text3384-0-6"
y="1038.8422"
x="489.56451"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
style="font-size:13.1475px;line-height:1.25"
id="tspan3391-7"
sodipodi:role="line"
x="489.56451"
y="1038.8422">ImageContainer</tspan></text>
</g>
</g>
</g>
<g
id="g206-1"
transform="translate(-0.04628921,28.701853)">
<rect
style="fill:#fcaf3e;fill-rule:evenodd;stroke:none;stroke-width:1.00033px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect290-6-7"
width="26.295755"
height="101.91849"
x="1032.3469"
y="377.92731"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="380.23166"
y="1049.1219"
id="text294-7-5"><tspan
y="1049.1219"
x="380.23166"
sodipodi:role="line"
id="tspan301-5-5"
style="font-size:12px;line-height:0">OSBuild</tspan></text> style="font-size:12px;line-height:0">OSBuild</tspan></text>
</g> </g>
</g> </g>

Before

Width:  |  Height:  |  Size: 23 KiB

After

Width:  |  Height:  |  Size: 21 KiB

View File

@ -18,12 +18,12 @@ import os
# If extensions (or modules to document with autodoc) are in another directory, # If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the # add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here. # documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.')) #sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------ # -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here. # If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0' #needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be # Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@ -31,201 +31,207 @@ import os
extensions = [] extensions = []
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"] templates_path = ['_templates']
# The suffix of source filenames. # The suffix of source filenames.
source_suffix = ".rst" source_suffix = '.rst'
# The encoding of source files. # The encoding of source files.
# source_encoding = 'utf-8-sig' #source_encoding = 'utf-8-sig'
# The master toctree document. # The master toctree document.
master_doc = "index" master_doc = 'index'
# General information about the project. # General information about the project.
project = "Pungi" project = u'Pungi'
copyright = "2016, Red Hat, Inc." copyright = u'2016, Red Hat, Inc.'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the
# built documents. # built documents.
# #
# The short X.Y version. # The short X.Y version.
version = "4.7" version = '4.2'
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
release = "4.7.0" release = '4.2.7'
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.
# language = None #language = None
# There are two options for replacing |today|: either, you set today to some # There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: # non-false value, then it is used:
# today = '' #today = ''
# Else, today_fmt is used as the format for a strftime call. # Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y' #today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and # List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
exclude_patterns = ["_build"] exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all # The reST default role (used for this markup: `text`) to use for all
# documents. # documents.
# default_role = None #default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text. # If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True #add_function_parentheses = True
# If true, the current module name will be prepended to all description # If true, the current module name will be prepended to all description
# unit titles (such as .. function::). # unit titles (such as .. function::).
# add_module_names = True #add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the # If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default. # output. They are ignored by default.
# show_authors = False #show_authors = False
# The name of the Pygments (syntax highlighting) style to use. # The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx" pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting. # A list of ignored prefixes for module index sorting.
# modindex_common_prefix = [] #modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents. # If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False #keep_warnings = False
# -- Options for HTML output ---------------------------------------------- # -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for # The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. # a list of builtin themes.
html_theme = "default" html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme # Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the # further. For a list of options available for each theme, see the
# documentation. # documentation.
# html_theme_options = {} #html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory. # Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [] #html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to # The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation". # "<project> v<release> documentation".
# html_title = None #html_title = None
# A shorter title for the navigation bar. Default is the same as html_title. # A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None #html_short_title = None
# The name of an image file (relative to this directory) to place at the top # The name of an image file (relative to this directory) to place at the top
# of the sidebar. # of the sidebar.
# html_logo = None #html_logo = None
# The name of an image file (within the static path) to use as favicon of the # The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large. # pixels large.
# html_favicon = None #html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here, # Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files, # relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css". # so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"] html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or # Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied # .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation. # directly to the root of the documentation.
# html_extra_path = [] #html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format. # using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y' #html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to # If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities. # typographically correct entities.
# html_use_smartypants = True #html_use_smartypants = True
# Custom sidebar templates, maps document names to template names. # Custom sidebar templates, maps document names to template names.
# html_sidebars = {} #html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to # Additional templates that should be rendered to pages, maps page names to
# template names. # template names.
# html_additional_pages = {} #html_additional_pages = {}
# If false, no module index is generated. # If false, no module index is generated.
# html_domain_indices = True #html_domain_indices = True
# If false, no index is generated. # If false, no index is generated.
# html_use_index = True #html_use_index = True
# If true, the index is split into individual pages for each letter. # If true, the index is split into individual pages for each letter.
# html_split_index = False #html_split_index = False
# If true, links to the reST sources are added to the pages. # If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True #html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True #html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True #html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will # If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the # contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served. # base URL from which the finished HTML is served.
# html_use_opensearch = '' #html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml"). # This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None #html_file_suffix = None
# Output file base name for HTML help builder. # Output file base name for HTML help builder.
htmlhelp_basename = "Pungidoc" htmlhelp_basename = 'Pungidoc'
# -- Options for LaTeX output --------------------------------------------- # -- Options for LaTeX output ---------------------------------------------
latex_elements = { latex_elements = {
# The paper size ('letterpaper' or 'a4paper'). # The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper', #'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt', # The font size ('10pt', '11pt' or '12pt').
# Additional stuff for the LaTeX preamble. #'pointsize': '10pt',
#'preamble': '',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
} }
# Grouping the document tree into LaTeX files. List of tuples # Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, # (source start file, target name, title,
# author, documentclass [howto, manual, or own class]). # author, documentclass [howto, manual, or own class]).
latex_documents = [ latex_documents = [
("index", "Pungi.tex", "Pungi Documentation", "Daniel Mach", "manual"), ('index', 'Pungi.tex', u'Pungi Documentation',
u'Daniel Mach', 'manual'),
] ]
# The name of an image file (relative to this directory) to place at the top of # The name of an image file (relative to this directory) to place at the top of
# the title page. # the title page.
# latex_logo = None #latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts, # For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters. # not chapters.
# latex_use_parts = False #latex_use_parts = False
# If true, show page references after internal links. # If true, show page references after internal links.
# latex_show_pagerefs = False #latex_show_pagerefs = False
# If true, show URL addresses after external links. # If true, show URL addresses after external links.
# latex_show_urls = False #latex_show_urls = False
# Documents to append as an appendix to all manuals. # Documents to append as an appendix to all manuals.
# latex_appendices = [] #latex_appendices = []
# If false, no module index is generated. # If false, no module index is generated.
# latex_domain_indices = True #latex_domain_indices = True
# -- Options for manual page output --------------------------------------- # -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples # One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section). # (source start file, name, description, authors, manual section).
man_pages = [("index", "pungi", "Pungi Documentation", ["Daniel Mach"], 1)] man_pages = [
('index', 'pungi', u'Pungi Documentation',
[u'Daniel Mach'], 1)
]
# If true, show URL addresses after external links. # If true, show URL addresses after external links.
# man_show_urls = False #man_show_urls = False
# -- Options for Texinfo output ------------------------------------------- # -- Options for Texinfo output -------------------------------------------
@ -234,25 +240,19 @@ man_pages = [("index", "pungi", "Pungi Documentation", ["Daniel Mach"], 1)]
# (source start file, target name, title, author, # (source start file, target name, title, author,
# dir menu entry, description, category) # dir menu entry, description, category)
texinfo_documents = [ texinfo_documents = [
( ('index', 'Pungi', u'Pungi Documentation',
"index", u'Daniel Mach', 'Pungi', 'One line description of project.',
"Pungi", 'Miscellaneous'),
"Pungi Documentation",
"Daniel Mach",
"Pungi",
"One line description of project.",
"Miscellaneous",
),
] ]
# Documents to append as an appendix to all manuals. # Documents to append as an appendix to all manuals.
# texinfo_appendices = [] #texinfo_appendices = []
# If false, no module index is generated. # If false, no module index is generated.
# texinfo_domain_indices = True #texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'. # How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote' #texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu. # If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False #texinfo_no_detailmenu = False

View File

@ -182,8 +182,6 @@ Options
Please note that when ``dnf`` is used, the build dependencies check is Please note that when ``dnf`` is used, the build dependencies check is
skipped. On Python 3, only ``dnf`` backend is available. skipped. On Python 3, only ``dnf`` backend is available.
See also: the ``gather_backend`` setting for Pungi's gather phase.
**cts_url** **cts_url**
(*str*) -- URL to Compose Tracking Service. If defined, Pungi will add (*str*) -- URL to Compose Tracking Service. If defined, Pungi will add
the compose to Compose Tracking Service and ge the compose ID from it. the compose to Compose Tracking Service and ge the compose ID from it.
@ -194,17 +192,6 @@ Options
Tracking Service Kerberos authentication. If not defined, the default Tracking Service Kerberos authentication. If not defined, the default
Kerberos principal is used. Kerberos principal is used.
**cts_oidc_token_url**
(*str*) -- URL to the OIDC token endpoint.
For example ``https://oidc.example.com/openid-connect/token``.
This option can be overridden by the environment variable ``CTS_OIDC_TOKEN_URL``.
**cts_oidc_client_id*
(*str*) -- OIDC client ID.
This option can be overridden by the environment variable ``CTS_OIDC_CLIENT_ID``.
Note that environment variable ``CTS_OIDC_CLIENT_SECRET`` must be configured with
corresponding client secret to authenticate to CTS via OIDC.
**compose_type** **compose_type**
(*str*) -- Allows to set default compose type. Type set via a command-line (*str*) -- Allows to set default compose type. Type set via a command-line
option overwrites this. option overwrites this.
@ -292,8 +279,8 @@ There a couple common format specifiers available for both the options:
format string. The pattern should not overlap, otherwise it is undefined format string. The pattern should not overlap, otherwise it is undefined
which one will be used. which one will be used.
This format will be used for some phases generating images. Currently that This format will be used for all phases generating images. Currently that
means ``createiso``, ``buildinstall`` and ``ostree_installer``. means ``createiso``, ``live_images`` and ``buildinstall``.
Available extra keys are: Available extra keys are:
* ``disc_num`` * ``disc_num``
@ -323,6 +310,7 @@ There a couple common format specifiers available for both the options:
Available keys are: Available keys are:
* ``boot`` -- for ``boot.iso`` images created in *buildinstall* phase * ``boot`` -- for ``boot.iso`` images created in *buildinstall* phase
* ``live`` -- for images created by *live_images* phase
* ``dvd`` -- for images created by *createiso* phase * ``dvd`` -- for images created by *createiso* phase
* ``ostree`` -- for ostree installer images * ``ostree`` -- for ostree installer images
@ -350,10 +338,48 @@ Example
disc_types = { disc_types = {
'boot': 'netinst', 'boot': 'netinst',
'live': 'Live',
'dvd': 'DVD', 'dvd': 'DVD',
} }
Signing
=======
If you want to sign deliverables generated during pungi run like RPM wrapped
images. You must provide few configuration options:
**signing_command** [optional]
(*str*) -- Command that will be run with a koji build as a single
argument. This command must not require any user interaction.
If you need to pass a password for a signing key to the command,
do this via command line option of the command and use string
formatting syntax ``%(signing_key_password)s``.
(See **signing_key_password_file**).
**signing_key_id** [optional]
(*str*) -- ID of the key that will be used for the signing.
This ID will be used when crafting koji paths to signed files
(``kojipkgs.fedoraproject.org/packages/NAME/VER/REL/data/signed/KEYID/..``).
**signing_key_password_file** [optional]
(*str*) -- Path to a file with password that will be formatted
into **signing_command** string via ``%(signing_key_password)s``
string format syntax (if used).
Because pungi config is usually stored in git and is part of compose
logs we don't want password to be included directly in the config.
Note: If ``-`` string is used instead of a filename, then you will be asked
for the password interactivelly right after pungi starts.
Example
-------
::
signing_command = '~/git/releng/scripts/sigulsign_unsigned.py -vv --password=%(signing_key_password)s fedora-24'
signing_key_id = '81b46521'
signing_key_password_file = '~/password_for_fedora-24_key'
.. _git-urls: .. _git-urls:
Git URLs Git URLs
@ -431,12 +457,6 @@ Options
cloned files should be split into subdirectories for each architecture of cloned files should be split into subdirectories for each architecture of
the variant. the variant.
**createrepo_enable_cache** = True
(*bool*) -- whether to use ``--cachedir`` option of ``createrepo``. It will
cache and reuse checksum vaules to speed up createrepo phase.
The cache dir is located at ``/var/cache/pungi/createrepo_c/$release_short-$uid``
e.g. /var/cache/pungi/createrepo_c/Fedora-1000
**product_id** = None **product_id** = None
(:ref:`scm_dict <scm_support>`) -- If specified, it should point to a (:ref:`scm_dict <scm_support>`) -- If specified, it should point to a
directory with certificates ``*<variant_uid>-<arch>-*.pem``. Pungi will directory with certificates ``*<variant_uid>-<arch>-*.pem``. Pungi will
@ -553,16 +573,6 @@ Options
with everything. Set this option to ``False`` to ignore ``noarch`` in with everything. Set this option to ``False`` to ignore ``noarch`` in
``ExclusiveArch`` and always consider only binary architectures. ``ExclusiveArch`` and always consider only binary architectures.
**pkgset_inherit_exclusive_arch_to_noarch** = True
(*bool*) -- When set to ``True``, the value of ``ExclusiveArch`` or
``ExcludeArch`` will be copied from source rpm to all its noarch packages.
That will than limit which architectures the noarch packages can be
included in.
By setting this option to ``False`` this step is skipped, and noarch
packages will by default land in all architectures. They can still be
excluded by listing them in a relevant section of ``filter_packages``.
**pkgset_allow_reuse** = True **pkgset_allow_reuse** = True
(*bool*) -- When set to ``True``, *Pungi* will try to reuse pkgset data (*bool*) -- When set to ``True``, *Pungi* will try to reuse pkgset data
from the old composes specified by ``--old-composes``. When enabled, this from the old composes specified by ``--old-composes``. When enabled, this
@ -571,18 +581,6 @@ Options
(for example) between composes, then Pungi may not respect those changes (for example) between composes, then Pungi may not respect those changes
in your new compose. in your new compose.
**signed_packages_retries** = 0
(*int*) -- In automated workflows, you might start a compose before Koji
has completely written all signed packages to disk. In this case you may
want Pungi to wait for the package to appear in Koji's storage. This
option controls how many times Pungi will retry looking for the signed
copy.
**signed_packages_wait** = 30
(*int*) -- Interval in seconds for how long to wait between attempts to
find signed packages. This option only makes sense when
``signed_packages_retries`` is set higher than 0.
Example Example
------- -------
@ -603,7 +601,7 @@ Options
------- -------
**buildinstall_method** **buildinstall_method**
(*str*) -- "lorax" (f16+, rhel7+) (*str*) -- "lorax" (f16+, rhel7+) or "buildinstall" (older releases)
**lorax_options** **lorax_options**
(*list*) -- special options passed on to *lorax*. (*list*) -- special options passed on to *lorax*.
@ -654,11 +652,6 @@ Options
**buildinstall_allow_reuse** = False **buildinstall_allow_reuse** = False
(*bool*) -- When set to ``True``, *Pungi* will try to reuse buildinstall (*bool*) -- When set to ``True``, *Pungi* will try to reuse buildinstall
results from old compose specified by ``--old-composes``. results from old compose specified by ``--old-composes``.
**buildinstall_packages**
(list) Additional packages to be installed in the runroot environment
where lorax will run to create installer. Format: ``[(variant_uid_regex,
{arch|*: [package_globs]})]``.
Example Example
------- -------
@ -693,13 +686,6 @@ Example
}) })
] ]
# Additional packages to be installed in the Koji runroot environment where
# lorax will run.
buildinstall_packages = [
('^Simple$', {
'*': ['dummy-package'],
})
]
.. note:: .. note::
@ -742,7 +728,7 @@ Options
(*bool*) -- When set to ``True``, *Pungi* will try to reuse gather results (*bool*) -- When set to ``True``, *Pungi* will try to reuse gather results
from old compose specified by ``--old-composes``. from old compose specified by ``--old-composes``.
**greedy_method** = none **greedy_method**
(*str*) -- This option controls how package requirements are satisfied in (*str*) -- This option controls how package requirements are satisfied in
case a particular ``Requires`` has multiple candidates. case a particular ``Requires`` has multiple candidates.
@ -763,7 +749,7 @@ Options
pulled in. pulled in.
* With ``greedy_method = "all"`` all three packages will be * With ``greedy_method = "all"`` all three packages will be
pulled in. pulled in.
* With ``greedy_method = "build"`` ``pkg-b-provider-1`` and * With ``greedy_method = "build" ``pkg-b-provider-1`` and
``pkg-b-provider-2`` will be pulled in. ``pkg-b-provider-2`` will be pulled in.
**gather_backend** **gather_backend**
@ -777,9 +763,6 @@ Options
``python-multilib`` library. Please refer to ``multilib`` option to see the ``python-multilib`` library. Please refer to ``multilib`` option to see the
differences. differences.
See also: the ``repoclosure_backend`` setting for Pungi's repoclosure
phase.
**multilib** **multilib**
(*list*) -- mapping of variant regexes and arches to list of multilib (*list*) -- mapping of variant regexes and arches to list of multilib
methods methods
@ -804,14 +787,8 @@ Options
(*list*) -- additional packages to be included in a variant and (*list*) -- additional packages to be included in a variant and
architecture; format: ``[(variant_uid_regex, {arch|*: [package_globs]})]`` architecture; format: ``[(variant_uid_regex, {arch|*: [package_globs]})]``
In contrast to the ``comps_file`` setting, the ``additional_packages``
setting merely adds the list of packages to the compose. When a package
is in a comps group, it is visible to users via ``dnf groupinstall`` and
Anaconda's Groups selection, but ``additional_packages`` does not affect
DNF groups.
The packages specified here are matched against RPM names, not any other The packages specified here are matched against RPM names, not any other
provides in the package nor the name of source package. Shell globbing is provides in the package not the name of source package. Shell globbing is
used, so wildcards are possible. The package can be specified as name only used, so wildcards are possible. The package can be specified as name only
or ``name.arch``. or ``name.arch``.
@ -820,21 +797,6 @@ Options
it. If you add a debuginfo package that does not have anything else from it. If you add a debuginfo package that does not have anything else from
the same build included in the compose, the sources will not be pulled in. the same build included in the compose, the sources will not be pulled in.
If you list a package in ``additional_packages`` but Pungi cannot find
it (for example, it's not available in the Koji tag), Pungi will log a
warning in the "work" or "logs" directories and continue without aborting.
*Example*: This configuration will add all packages in a Koji tag to an
"Everything" variant::
additional_packages = [
('^Everything$', {
'*': [
'*',
],
})
]
**filter_packages** **filter_packages**
(*list*) -- packages to be excluded from a variant and architecture; (*list*) -- packages to be excluded from a variant and architecture;
format: ``[(variant_uid_regex, {arch|*: [package_globs]})]`` format: ``[(variant_uid_regex, {arch|*: [package_globs]})]``
@ -902,15 +864,10 @@ Options
comps file can not be found in the package set. When disabled (the comps file can not be found in the package set. When disabled (the
default), such cases are still reported as warnings in the log. default), such cases are still reported as warnings in the log.
With ``dnf`` gather backend, this option will abort the compose on any
missing package no matter if it's listed in comps, ``additional_packages``
or prepopulate file.
**gather_source_mapping** **gather_source_mapping**
(*str*) -- JSON mapping with initial packages for the compose. The value (*str*) -- JSON mapping with initial packages for the compose. The value
should be a path to JSON file with following mapping: ``{variant: {arch: should be a path to JSON file with following mapping: ``{variant: {arch:
{rpm_name: [rpm_arch|None]}}}``. Relative paths are interpreted relative to {rpm_name: [rpm_arch|None]}}}``.
the location of main config file.
**gather_profiler** = False **gather_profiler** = False
(*bool*) -- When set to ``True`` the gather tool will produce additional (*bool*) -- When set to ``True`` the gather tool will produce additional
@ -1244,7 +1201,7 @@ Options
Format: ``[(variant_uid_regex, {arch|*: bool})]`` Format: ``[(variant_uid_regex, {arch|*: bool})]``
**create_jigdo** = False **create_jigdo** = True
(*bool*) -- controls the creation of jigdo from ISO (*bool*) -- controls the creation of jigdo from ISO
**create_optional_isos** = False **create_optional_isos** = False
@ -1271,11 +1228,6 @@ Options
meaning size in bytes, or it can be a string with ``k``, ``M``, ``G`` meaning size in bytes, or it can be a string with ``k``, ``M``, ``G``
suffix (using multiples of 1024). suffix (using multiples of 1024).
**iso_level**
(*int|list*) [optional] -- Set the ISO9660 conformance level. This is
either a global single value (a number from 1 to 4), or a variant/arch
mapping.
**split_iso_reserve** = 10MiB **split_iso_reserve** = 10MiB
(*int|str*) -- how much free space should be left on each disk. The format (*int|str*) -- how much free space should be left on each disk. The format
is the same as for ``iso_size`` option. is the same as for ``iso_size`` option.
@ -1329,8 +1281,8 @@ All non-``RC`` milestones from label get appended to the version. For release
either label is used or date, type and respin. either label is used or date, type and respin.
Common options for Live Media and Image Build Common options for Live Images, Live Media and Image Build
============================================= ==========================================================
All images can have ``ksurl``, ``version``, ``release`` and ``target`` All images can have ``ksurl``, ``version``, ``release`` and ``target``
specified. Since this can create a lot of duplication, there are global options specified. Since this can create a lot of duplication, there are global options
@ -1346,12 +1298,14 @@ The kickstart URL is configured by these options.
* ``global_ksurl`` -- global fallback setting * ``global_ksurl`` -- global fallback setting
* ``live_media_ksurl`` * ``live_media_ksurl``
* ``image_build_ksurl`` * ``image_build_ksurl``
* ``live_images_ksurl``
Target is specified by these settings. Target is specified by these settings.
* ``global_target`` -- global fallback setting * ``global_target`` -- global fallback setting
* ``live_media_target`` * ``live_media_target``
* ``image_build_target`` * ``image_build_target``
* ``live_images_target``
* ``osbuild_target`` * ``osbuild_target``
Version is specified by these options. If no version is set, a default value Version is specified by these options. If no version is set, a default value
@ -1360,6 +1314,7 @@ will be provided according to :ref:`automatic versioning <auto-version>`.
* ``global_version`` -- global fallback setting * ``global_version`` -- global fallback setting
* ``live_media_version`` * ``live_media_version``
* ``image_build_version`` * ``image_build_version``
* ``live_images_version``
* ``osbuild_version`` * ``osbuild_version``
Release is specified by these options. If set to a magic value to Release is specified by these options. If set to a magic value to
@ -1369,14 +1324,44 @@ to :ref:`automatic versioning <auto-version>`.
* ``global_release`` -- global fallback setting * ``global_release`` -- global fallback setting
* ``live_media_release`` * ``live_media_release``
* ``image_build_release`` * ``image_build_release``
* ``live_images_release``
* ``osbuild_release`` * ``osbuild_release``
Each configuration block can also optionally specify a ``failable`` key. It Each configuration block can also optionally specify a ``failable`` key. For
live images it should have a boolean value. For live media and image build it
should be a list of strings containing architectures that are optional. If any should be a list of strings containing architectures that are optional. If any
deliverable fails on an optional architecture, it will not abort the whole deliverable fails on an optional architecture, it will not abort the whole
compose. If the list contains only ``"*"``, all arches will be substituted. compose. If the list contains only ``"*"``, all arches will be substituted.
Live Images Settings
====================
**live_images**
(*list*) -- Configuration for the particular image. The elements of the
list should be tuples ``(variant_uid_regex, {arch|*: config})``. The config
should be a dict with these keys:
* ``kickstart`` (*str*)
* ``ksurl`` (*str*) [optional] -- where to get the kickstart from
* ``name`` (*str*)
* ``version`` (*str*)
* ``target`` (*str*)
* ``repo`` (*str|[str]*) -- repos specified by URL or variant UID
* ``specfile`` (*str*) -- for images wrapped in RPM
* ``scratch`` (*bool*) -- only RPM-wrapped images can use scratch builds,
but by default this is turned off
* ``type`` (*str*) -- what kind of task to start in Koji. Defaults to
``live`` meaning ``koji spin-livecd`` will be used. Alternative option
is ``appliance`` corresponding to ``koji spin-appliance``.
* ``sign`` (*bool*) -- only RPM-wrapped images can be signed
**live_images_no_rename**
(*bool*) -- When set to ``True``, filenames generated by Koji will be used.
When ``False``, filenames will be generated based on ``image_name_format``
configuration option.
Live Media Settings Live Media Settings
=================== ===================
@ -1406,7 +1391,6 @@ Live Media Settings
* ``repo`` (*str|[str]*) -- repos specified by URL or variant UID * ``repo`` (*str|[str]*) -- repos specified by URL or variant UID
* ``title`` (*str*) * ``title`` (*str*)
* ``install_tree_from`` (*str*) -- variant to take install tree from * ``install_tree_from`` (*str*) -- variant to take install tree from
* ``nomacboot`` (*bool*)
Image Build Settings Image Build Settings
@ -1532,61 +1516,6 @@ Example
} }
KiwiBuild Settings
==================
**kiwibuild**
(*dict*) -- configuration for building images using kiwi by a Koji plugin.
Pungi will trigger a Koji task delegating to kiwi, which will build the image,
import it to Koji via content generators.
Format: ``{variant_uid_regex: [{...}]}``.
Required keys in the configuration dict:
* ``kiwi_profile`` -- (*str*) select profile from description file.
Description scm, description path and target have to be provided too, but
instead of specifying them for each image separately, you can use the
``kiwibuild_*`` options or ``global_target``.
Optional keys:
* ``description_scm`` -- (*str*) scm URL of description kiwi description.
* ``description_path`` -- (*str*) path to kiwi description inside the scm
repo.
* ``repos`` -- additional repos used to install RPMs in the image. The
compose repository for the enclosing variant is added automatically.
Either variant name or a URL is supported.
* ``target`` -- (*str*) which build target to use for the task. If not
provided, then either ``kiwibuild_target`` or ``global_target`` is
needed.
* ``release`` -- (*str*) release of the output image.
* ``arches`` -- (*[str]*) List of architectures to build for. If not
provided, all variant architectures will be built.
* ``failable`` -- (*[str]*) List of architectures for which this
deliverable is not release blocking.
* ``type`` -- (*str*) override default type from the bundle with this value.
* ``type_attr`` -- (*[str]*) override default attributes for the build type
from description.
* ``bundle_name_format`` -- (*str*) override default bundle format name.
**kiwibuild_description_scm**
(*str*) -- URL for scm containing the description files
**kiwibuild_description_path**
(*str*) -- path to a description file within the description scm
**kiwibuild_type**
(*str*) -- override default type from the bundle with this value.
**kiwibuild_type_attr**
(*[str]*) -- override default attributes for the build type from description.
**kiwibuild_bundle_name_format**
(*str*) -- override default bundle format name.
OSBuild Composer for building images OSBuild Composer for building images
==================================== ====================================
@ -1602,9 +1531,7 @@ OSBuild Composer for building images
* ``name`` -- name of the Koji package * ``name`` -- name of the Koji package
* ``distro`` -- image for which distribution should be build TODO examples * ``distro`` -- image for which distribution should be build TODO examples
* ``image_types`` -- a list with a single image type string or just a * ``image_type`` -- a list of image types to build (e.g. ``qcow2``)
string representing the image type to build (e.g. ``qcow2``). In any
case, only a single image type can be provided as an argument.
Optional keys: Optional keys:
@ -1615,76 +1542,11 @@ OSBuild Composer for building images
* ``release`` -- release part of the final NVR. If neither this option nor * ``release`` -- release part of the final NVR. If neither this option nor
the global ``osbuild_release`` is set, Koji will automatically generate a the global ``osbuild_release`` is set, Koji will automatically generate a
value. value.
* ``repo`` -- a list of repositories from which to consume packages for * ``repo`` -- a list of repository URLs from which to consume packages for
building the image. By default only the variant repository is used. building the image. By default only the variant repository is used.
The list items may use one of the following formats:
* String with just the repository URL.
* Dictionary with the following keys:
* ``baseurl`` -- URL of the repository.
* ``package_sets`` -- a list of package set names to use for this
repository. Package sets are an internal concept of Image Builder
and are used in image definitions. If specified, the repository is
used by Image Builder only for the pipeline with the same name.
For example, specifying the ``build`` package set name will make
the repository to be used only for the build environment in which
the image will be built. (optional)
* ``arches`` -- list of architectures for which to build the image. By * ``arches`` -- list of architectures for which to build the image. By
default, the variant arches are used. This option can only restrict it, default, the variant arches are used. This option can only restrict it,
not add a new one. not add a new one.
* ``manifest_type`` -- the image type that is put into the manifest by
pungi. If not supplied then it is autodetected from the Koji output.
* ``ostree_url`` -- URL of the repository that's used to fetch the parent
commit from.
* ``ostree_ref`` -- name of the ostree branch
* ``ostree_parent`` -- commit hash or a a branch-like reference to the
parent commit.
* ``customizations`` -- a dictionary with customizations to use for the
image build. For the list of supported customizations, see the **hosted**
variants in the `Image Builder documentation
<https://osbuild.org/docs/user-guide/blueprint-reference#installation-device>`.
* ``upload_options`` -- a dictionary with upload options specific to the
target cloud environment. If provided, the image will be uploaded to the
cloud environment, in addition to the Koji server. One can't combine
arbitrary image types with arbitrary upload options.
The dictionary keys differ based on the target cloud environment. The
following keys are supported:
* **AWS EC2 upload options** -- upload to Amazon Web Services.
* ``region`` -- AWS region to upload the image to
* ``share_with_accounts`` -- list of AWS account IDs to share the image
with
* ``snapshot_name`` -- Snapshot name of the uploaded EC2 image
(optional)
* **AWS S3 upload options** -- upload to Amazon Web Services S3.
* ``region`` -- AWS region to upload the image to
* **Azure upload options** -- upload to Microsoft Azure.
* ``tenant_id`` -- Azure tenant ID to upload the image to
* ``subscription_id`` -- Azure subscription ID to upload the image to
* ``resource_group`` -- Azure resource group to upload the image to
* ``location`` -- Azure location of the resource group (optional)
* ``image_name`` -- Image name of the uploaded Azure image (optional)
* **GCP upload options** -- upload to Google Cloud Platform.
* ``region`` -- GCP region to upload the image to
* ``bucket`` -- GCP bucket to upload the image to (optional)
* ``share_with_accounts`` -- list of GCP accounts to share the image
with
* ``image_name`` -- Image name of the uploaded GCP image (optional)
* **Container upload options** -- upload to a container registry.
* ``name`` -- name of the container image (optional)
* ``tag`` -- container tag to upload the image to (optional)
.. note:: .. note::
There is initial support for having this task as failable without aborting There is initial support for having this task as failable without aborting
@ -1693,56 +1555,6 @@ OSBuild Composer for building images
arch. arch.
Image container
===============
This phase supports building containers in OSBS that embed an image created in
the same compose. This can be useful for delivering the image to users running
in containerized environments.
Pungi will start a ``buildContainer`` task in Koji with configured source
repository. The ``Dockerfile`` can expect that a repo file will be injected
into the container that defines a repo named ``image-to-include``, and its
``baseurl`` will point to the image to include. It is possible to extract the
URL with a command like ``dnf config-manager --dump image-to-include | awk
'/baseurl =/{print $3}'```
**image_container**
(*dict*) -- configuration for building containers embedding an image.
Format: ``{variant_uid_regex: [{...}]}``.
The inner object will define a single container. These keys are required:
* ``url``, ``target``, ``git_branch``. See OSBS section for definition of
these.
* ``image_spec`` -- (*object*) A string mapping of filters used to select
the image to embed. All images listed in metadata for the variant will be
processed. The keys of this filter are used to select metadata fields for
the image, and values are regular expression that need to match the
metadata value.
The filter should match exactly one image.
Example config
--------------
::
image_container = {
"^Server$": [{
"url": "git://example.com/dockerfiles.git?#HEAD",
"target": "f24-container-candidate",
"git_branch": "f24",
"image_spec": {
"format": "qcow2",
"arch": "x86_64",
"path": ".*/guest-image-.*$",
}
}]
}
OSTree Settings OSTree Settings
=============== ===============
@ -1753,16 +1565,16 @@ another directory. Any new packages in the compose will be added to the
repository with a new commit. repository with a new commit.
**ostree** **ostree**
(*dict*) -- a mapping of configuration for each variant. The format should (*dict*) -- a mapping of configuration for each. The format should be
be ``{variant_uid_regex: config_dict}``. It is possible to use a list of ``{variant_uid_regex: config_dict}``. It is possible to use a list of
configuration dicts as well. configuration dicts as well.
The configuration dict for each variant arch pair must have these keys: The configuration dict for each variant arch pair must have these keys:
* ``treefile`` -- (*str*) Filename of configuration for ``rpm-ostree``. * ``treefile`` -- (*str*) Filename of configuration for ``rpm-ostree``.
* ``config_url`` -- (*str*) URL for Git repository with the ``treefile``. * ``config_url`` -- (*str*) URL for Git repository with the ``treefile``.
* ``repo`` -- (*str|dict|[str|dict]*) repos specified by URL or a dict of * ``repo`` -- (*str|dict|[str|dict]*) repos specified by URL or variant UID
repo options, ``baseurl`` is required in the dict. or a dict of repo options, ``baseurl`` is required in the dict.
* ``ostree_repo`` -- (*str*) Where to put the ostree repository * ``ostree_repo`` -- (*str*) Where to put the ostree repository
These keys are optional: These keys are optional:
@ -1782,8 +1594,6 @@ repository with a new commit.
* ``force_new_commit`` -- (*bool*) Do not use rpm-ostree's built-in change * ``force_new_commit`` -- (*bool*) Do not use rpm-ostree's built-in change
detection. detection.
Defaults to ``False``. Defaults to ``False``.
* ``unified_core`` -- (*bool*) Use rpm-ostree in unified core mode for composes.
Defaults to ``False``.
* ``version`` -- (*str*) Version string to be added as versioning metadata. * ``version`` -- (*str*) Version string to be added as versioning metadata.
If this option is set to ``!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN``, If this option is set to ``!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN``,
a value will be generated automatically as ``$VERSION.$RELEASE``. a value will be generated automatically as ``$VERSION.$RELEASE``.
@ -1793,8 +1603,6 @@ repository with a new commit.
* ``tag_ref`` -- (*bool*, default ``True``) If set to ``False``, a git * ``tag_ref`` -- (*bool*, default ``True``) If set to ``False``, a git
reference will not be created. reference will not be created.
* ``ostree_ref`` -- (*str*) To override value ``ref`` from ``treefile``. * ``ostree_ref`` -- (*str*) To override value ``ref`` from ``treefile``.
* ``runroot_packages`` -- (*list*) A list of additional package names to be
installed in the runroot environment in Koji.
Example config Example config
-------------- --------------
@ -1804,11 +1612,13 @@ Example config
"^Atomic$": { "^Atomic$": {
"treefile": "fedora-atomic-docker-host.json", "treefile": "fedora-atomic-docker-host.json",
"config_url": "https://git.fedorahosted.org/git/fedora-atomic.git", "config_url": "https://git.fedorahosted.org/git/fedora-atomic.git",
"keep_original_sources": True,
"repo": [ "repo": [
"Server",
"http://example.com/repo/x86_64/os", "http://example.com/repo/x86_64/os",
{"baseurl": "Everything"},
{"baseurl": "http://example.com/linux/repo", "exclude": "systemd-container"}, {"baseurl": "http://example.com/linux/repo", "exclude": "systemd-container"},
], ],
"keep_original_sources": True,
"ostree_repo": "/mnt/koji/compose/atomic/Rawhide/", "ostree_repo": "/mnt/koji/compose/atomic/Rawhide/",
"update_summary": True, "update_summary": True,
# Automatically generate a reasonable version # Automatically generate a reasonable version
@ -1824,79 +1634,6 @@ Example config
has the pungi_ostree plugin installed. has the pungi_ostree plugin installed.
OSTree Native Container Settings
================================
The ``ostree_container`` phase of *Pungi* can create an ostree native container
image as an OCI archive. This is done by running ``rpm-ostree compose image``
in a Koji runroot environment.
While rpm-ostree can use information from previously built images to improve
the split in container layers, we can not use that functionnality until
https://github.com/containers/skopeo/pull/2114 is resolved. Each invocation
will thus create a new OCI archive image *from scratch*.
**ostree_container**
(*dict*) -- a mapping of configuration for each variant. The format should
be ``{variant_uid_regex: config_dict}``. It is possible to use a list of
configuration dicts as well.
The configuration dict for each variant arch pair must have these keys:
* ``treefile`` -- (*str*) Filename of configuration for ``rpm-ostree``.
* ``config_url`` -- (*str*) URL for Git repository with the ``treefile``.
These keys are optional:
* ``repo`` -- (*str|dict|[str|dict]*) repos specified by URL or a dict of
repo options, ``baseurl`` is required in the dict.
* ``keep_original_sources`` -- (*bool*) Keep the existing source repos in
the tree config file. If not enabled, all the original source repos will
be removed from the tree config file.
* ``config_branch`` -- (*str*) Git branch of the repo to use. Defaults to
``main``.
* ``arches`` -- (*[str]*) List of architectures for which to generate
ostree native container images. There will be one task per architecture.
By default all architectures in the variant are used.
* ``failable`` -- (*[str]*) List of architectures for which this
deliverable is not release blocking.
* ``version`` -- (*str*) Version string to be added to the OCI archive name.
If this option is set to ``!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN``,
a value will be generated automatically as ``$VERSION.$RELEASE``.
If this option is set to ``!VERSION_FROM_VERSION_DATE_RESPIN``,
a value will be generated automatically as ``$VERSION.$DATE.$RESPIN``.
:ref:`See how those values are created <auto-version>`.
* ``tag_ref`` -- (*bool*, default ``True``) If set to ``False``, a git
reference will not be created.
* ``runroot_packages`` -- (*list*) A list of additional package names to be
installed in the runroot environment in Koji.
Example config
--------------
::
ostree_container = {
"^Sagano$": {
"treefile": "fedora-tier-0-38.yaml",
"config_url": "https://gitlab.com/CentOS/cloud/sagano.git",
"config_branch": "main",
"repo": [
"http://example.com/repo/x86_64/os",
{"baseurl": "http://example.com/linux/repo", "exclude": "systemd-container"},
],
# Automatically generate a reasonable version
"version": "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN",
# Only run this for x86_64 even if Sagano has more arches
"arches": ["x86_64"],
}
}
**ostree_container_use_koji_plugin** = False
(*bool*) -- When set to ``True``, the Koji pungi_ostree task will be
used to execute rpm-ostree instead of runroot. Use only if the Koji instance
has the pungi_ostree plugin installed.
Ostree Installer Settings Ostree Installer Settings
========================= =========================
@ -1938,8 +1675,6 @@ an OSTree repository. This always runs in Koji as a ``runroot`` task.
with the optional key: with the optional key:
* ``extra_runroot_pkgs`` -- (*[str]*) * ``extra_runroot_pkgs`` -- (*[str]*)
* ``skip_branding`` -- (*bool*) Stops lorax to install packages with branding.
Defaults to ``False``.
**ostree_installer_overwrite** = False **ostree_installer_overwrite** = False
(*bool*) -- by default if a variant including OSTree installer also creates (*bool*) -- by default if a variant including OSTree installer also creates
@ -2019,34 +1754,24 @@ they are not scratch builds).
to create the image will not abort the whole compose. to create the image will not abort the whole compose.
The configuration will pass other attributes directly to the Koji task. The configuration will pass other attributes directly to the Koji task.
This includes ``scratch`` and ``priority``. See ``koji list-api This includes ``scratch`` and ``priority``.
buildContainer`` for more details about these options.
A value for ``yum_repourls`` will be created automatically and point at a A value for ``yum_repourls`` will be created automatically and point at a
repository in the current compose. You can add extra repositories with repository in the current compose. You can add extra repositories with
``repo`` key having a list of urls pointing to ``.repo`` files or just ``repo`` key having a list of urls pointing to ``.repo`` files or just
variant uid, Pungi will create the .repo file for that variant. If variant uid, Pungi will create the .repo file for that variant. ``gpgkey``
specific URL is used in the ``repo``, the ``$COMPOSE_ID`` variable in can be specified to enable gpgcheck in repo files for variants.
the ``repo`` string will be replaced with the real compose ID.
``gpgkey`` can be specified to enable gpgcheck in repo files for variants.
**osbs_registries** **osbs_registries**
(*dict*) -- Use this optional setting to emit ``osbs-request-push`` (*dict*) -- It is possible to configure extra information about where to
messages for each non-scratch container build. These messages can guide push the image (unless it is a scratch build). For each finished build,
other tools how to push the images to other registries. For example, an Pungi will try to match NVR against a key in this mapping (using shell-style
external tool might trigger on these messages and copy the images from globbing) and take the corresponding value and collect them across all built
OSBS's registry to a staging or production registry. images. The data will be saved into ``logs/global/osbs-registries.json`` as
a mapping from Koji NVR to the registry data. The same data is also sent to
For each completed container build, Pungi will try to match the NVR against the message bus on ``osbs-request-push`` topic once the compose finishes
a key in ``osbs_registries`` mapping (using shell-style globbing) and take successfully. Handling the message and performing the actual push is outside
the corresponding value and collect them across all built images. Pungi of scope for Pungi.
will save this data into ``logs/global/osbs-registries.json``, mapping each
Koji NVR to the registry data. Pungi will also send this data to the
message bus on the ``osbs-request-push`` topic once the compose finishes
successfully.
Pungi simply logs the mapped data and emits the messages. It does not
handle the messages or push images. A separate tool must do that.
Example config Example config
@ -2247,9 +1972,9 @@ Miscellaneous Settings
format string accepting ``%(variant_name)s`` and ``%(arch)s`` placeholders. format string accepting ``%(variant_name)s`` and ``%(arch)s`` placeholders.
**symlink_isos_to** **symlink_isos_to**
(*str*) -- If set, the ISO files from ``buildinstall`` and ``createiso`` (*str*) -- If set, the ISO files from ``buildinstall``, ``createiso`` and
phases will be put into this destination, and a symlink pointing to this ``live_images`` phases will be put into this destination, and a symlink
location will be created in actual compose directory. pointing to this location will be created in actual compose directory.
**dogpile_cache_backend** **dogpile_cache_backend**
(*str*) -- If set, Pungi will use the configured Dogpile cache backend to (*str*) -- If set, Pungi will use the configured Dogpile cache backend to

View File

@ -30,17 +30,9 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
module_defaults_dir = { module_defaults_dir = {
'scm': 'git', 'scm': 'git',
'repo': 'https://pagure.io/releng/fedora-module-defaults.git', 'repo': 'https://pagure.io/releng/fedora-module-defaults.git',
'branch': 'main', 'branch': 'master',
'dir': '.' 'dir': '.'
} }
# Optional module obsoletes configuration which is merged
# into the module index and gets resolved
module_obsoletes_dir = {
'scm': 'git',
'repo': 'https://pagure.io/releng/fedora-module-defaults.git',
'branch': 'main',
'dir': 'obsoletes'
}
variants_file='variants-fedora.xml' variants_file='variants-fedora.xml'
sigkeys = ['12C944D0'] sigkeys = ['12C944D0']
@ -91,6 +83,7 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
# CREATEISO # CREATEISO
iso_hfs_ppc64le_compatible = False iso_hfs_ppc64le_compatible = False
create_jigdo = False
# BUILDINSTALL # BUILDINSTALL
buildinstall_method = 'lorax' buildinstall_method = 'lorax'
@ -294,6 +287,30 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
}) })
] ]
live_target = 'f32'
live_images_no_rename = True
live_images = [
('^Workstation$', {
'armhfp': {
'kickstart': 'fedora-arm-workstation.ks',
'name': 'Fedora-Workstation-armhfp',
# Again workstation takes packages from Everything.
'repo': 'Everything',
'type': 'appliance',
'failable': True,
}
}),
('^Server$', {
# But Server has its own repo.
'armhfp': {
'kickstart': 'fedora-arm-server.ks',
'name': 'Fedora-Server-armhfp',
'type': 'appliance',
'failable': True,
}
}),
]
ostree = { ostree = {
"^Silverblue$": { "^Silverblue$": {
"version": "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN", "version": "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN",
@ -308,8 +325,6 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
"tag_ref": False, "tag_ref": False,
# Don't use change detection in ostree. # Don't use change detection in ostree.
"force_new_commit": True, "force_new_commit": True,
# Use unified core mode for rpm-ostree composes
"unified_core": True,
# This is the location for the repo where new commit will be # This is the location for the repo where new commit will be
# created. Note that this is outside of the compose dir. # created. Note that this is outside of the compose dir.
"ostree_repo": "/mnt/koji/compose/ostree/repo/", "ostree_repo": "/mnt/koji/compose/ostree/repo/",
@ -319,20 +334,6 @@ This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
} }
} }
ostree_container = {
"^Sagano$": {
"treefile": "fedora-tier-0-38.yaml",
"config_url": "https://gitlab.com/CentOS/cloud/sagano.git",
"config_branch": "main",
# Consume packages from Everything
"repo": "Everything",
# Automatically generate a reasonable version
"version": "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN",
# Only run this for x86_64 even if Sagano has more arches
"arches": ["x86_64"],
}
}
ostree_installer = [ ostree_installer = [
("^Silverblue$", { ("^Silverblue$", {
"x86_64": { "x86_64": {

View File

@ -19,7 +19,7 @@ Contents:
scm_support scm_support
messaging messaging
gathering gathering
koji
comps comps
contributing contributing
testing testing
multi_compose

View File

@ -1,105 +0,0 @@
======================
Getting data from koji
======================
When Pungi is configured to get packages from a Koji tag, it somehow needs to
access the actual RPM files.
Historically, this required the storage used by Koji to be directly available
on the host where Pungi was running. This was usually achieved by using NFS for
the Koji volume, and mounting it on the compose host.
The compose could be created directly on the same volume. In such case the
packages would be hardlinked, significantly reducing space consumption.
The compose could also be created on a different storage, in which case the
packages would either need to be copied over or symlinked. Using symlinks
requires that anything that accesses the compose (e.g. a download server) would
also need to mount the Koji volume in the same location.
There is also a risk with symlinks that the package in Koji can change (due to
being resigned for example), which would invalidate composes linking to it.
Using Koji without direct mount
===============================
It is possible now to run a compose from a Koji tag without direct access to
Koji storage.
Pungi can download the packages over HTTP protocol, store them in a local
cache, and consume them from there.
The local cache has similar structure to what is on the Koji volume.
When Pungi needs some package, it has a path on Koji volume. It will replace
the ``topdir`` with the cache location. If such file exists, it will be used.
If it doesn't exist, it will be downloaded from Koji (by replacing the
``topdir`` with ``topurl``).
::
Koji path /mnt/koji/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
Koji URL https://kojipkgs.fedoraproject.org/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
Local path /mnt/compose/cache/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
The packages can be hardlinked from this cache directory.
Cleanup
-------
While the approach above allows each RPM to be downloaded only once, it will
eventually result in the Koji volume being mirrored locally. Most of the
packages will however no longer be needed.
There is a script ``pungi-cache-cleanup`` that can help with that. It can find
and remove files from the cache that are no longer needed.
A file is no longer needed if it has a single link (meaning it is only in the
cache, not in any compose), and it has mtime older than a given threshold.
It doesn't make sense to delete files that are hardlinked in an existing
compose as it would not save any space anyway.
The mtime check is meant to preserve files that are downloaded but not actually
used in a compose, like a subpackage that is not included in any variant. Every
time its existence in the local cache is checked, the mtime is updated.
Race conditions?
----------------
It should be safe to have multiple compose hosts share the same storage volume
for generated composes and local cache.
If a cache file is accessed and it exists, there's no risk of race condition.
If two composes need the same file at the same time and it is not present yet,
one of them will take a lock on it and start downloading. The other will wait
until the download is finished.
The lock is only valid for a set amount of time (5 minutes) to avoid issues
where the downloading process is killed in a way that blocks it from releasing
the lock.
If the file is large and network slow, the limit may not be enough finish
downloading. In that case the second process will steal the lock while the
first process is still downloading. This will result in the same file being
downloaded twice.
When the first process finishes the download, it will put the file into the
local cache location. When the second process finishes, it will atomically
replace it, but since it's the same file it will be the same file.
If the first compose already managed to hardlink the file before it gets
replaced, there will be two copies of the file present locally.
Integrity checking
------------------
There is minimal integrity checking. RPM packages belonging to real builds will
be check to match the checksum provided by Koji hub.
There is no checking for scratch builds or any images.

View File

@ -12,9 +12,8 @@ happened. A JSON-encoded object will be passed to standard input to provide
more information about the event. At the very least, the object will contain a more information about the event. At the very least, the object will contain a
``compose_id`` key. ``compose_id`` key.
The notification script inherits working directory from the parent process and it The script is invoked in compose directory and can read other information
can be called from the same directory ``pungi-koji`` is called from. The working directory there.
is listed at the start of main log.
Currently these messages are sent: Currently these messages are sent:

107
doc/multi_compose.rst Normal file
View File

@ -0,0 +1,107 @@
.. _multi_compose:
Managing compose from multiple parts
====================================
There may be cases where it makes sense to split a big compose into separate
parts, but create a compose output that links all output into one familiar
structure.
The `pungi-orchestrate` tools allows that.
It works with an INI-style configuration file. The ``[general]`` section
contains information about identity of the main compose. Other sections define
individual parts.
The parts are scheduled to run in parallel, with the minimal amount of
serialization. The final compose directory will contain hard-links to the
files.
General settings
----------------
**target**
Path to directory where the final compose should be created.
**compose_type**
Type of compose to make.
**release_name**
Name of the product for the final compose.
**release_short**
Short name of the product for the final compose.
**release_version**
Version of the product for the final compose.
**release_type**
Type of the product for the final compose.
**extra_args**
Additional arguments that will be passed to the child Pungi processes.
**koji_profile**
If specified, a current event will be retrieved from the Koji instance and
used for all parts.
**kerberos**
If set to yes, a kerberos ticket will be automatically created at the start.
Set keytab and principal as well.
**kerberos_keytab**
Path to keytab file used to create the kerberos ticket.
**kerberos_principal**
Kerberos principal for the ticket
**pre_compose_script**
Commands to execute before first part is started. Can contain multiple
commands on separate lines.
**post_compose_script**
Commands to execute after the last part finishes and final status is
updated. Can contain multiple commands on separate lines. ::
post_compose_script =
compose-latest-symlink $COMPOSE_PATH
custom-post-compose-script.sh
Multiple environment variables are defined for the scripts:
* ``COMPOSE_PATH``
* ``COMPOSE_ID``
* ``COMPOSE_DATE``
* ``COMPOSE_TYPE``
* ``COMPOSE_RESPIN``
* ``COMPOSE_LABEL``
* ``RELEASE_ID``
* ``RELEASE_NAME``
* ``RELEASE_SHORT``
* ``RELEASE_VERSION``
* ``RELEASE_TYPE``
* ``RELEASE_IS_LAYERED`` ``YES`` for layered products, empty otherwise
* ``BASE_PRODUCT_NAME`` only set for layered products
* ``BASE_PRODUCT_SHORT`` only set for layered products
* ``BASE_PRODUCT_VERSION`` only set for layered products
* ``BASE_PRODUCT_TYPE`` only set for layered products
**notification_script**
Executable name (or path to a script) that will be used to send a message
once the compose is finished. In order for a valid URL to be included in the
message, at least one part must configure path translation that would apply
to location of main compose.
Only two messages will be sent, one for start and one for finish (either
successful or not).
Partial compose settings
------------------------
Each part should have a separate section in the config file.
It can specify these options:
**config**
Path to configuration file that describes this part. If relative, it is
resolved relative to the file with parts configuration.
**just_phase**, **skip_phase**
Customize which phases should run for this part.
**depends_on**
A comma separated list of other parts that must be finished before this part
starts.
**failable**
A boolean toggle to mark a part as failable. A failure in such part will
mark the final compose as incomplete, but still successful.

View File

@ -30,14 +30,17 @@ packages to architectures.
Buildinstall Buildinstall
------------ ------------
Spawns a bunch of threads, each of which runs the ``lorax`` command. The Spawns a bunch of threads, each of which runs either ``lorax`` or
``buildinstall`` command (the latter coming from ``anaconda`` package). The
commands create ``boot.iso`` and other boot configuration files. The image is commands create ``boot.iso`` and other boot configuration files. The image is
finally linked into the ``compose/`` directory as netinstall media. finally linked into the ``compose/`` directory as netinstall media.
The created images are also needed for creating live media or other images in The created images are also needed for creating live media or other images in
later phases. later phases.
With ``lorax`` this phase runs one task per variant.arch combination. With ``lorax`` this phase runs one task per variant.arch combination. For
``buildinstall`` command there is only one task per architecture and
``product.img`` should be used to customize the results.
Gather Gather
------ ------
@ -112,36 +115,16 @@ ImageBuild
This phase wraps up ``koji image-build``. It also updates the metadata This phase wraps up ``koji image-build``. It also updates the metadata
ultimately responsible for ``images.json`` manifest. ultimately responsible for ``images.json`` manifest.
KiwiBuild
---------
Similarly to image build, this phases creates a koji `kiwiBuild` task. In the
background it uses Kiwi to create images.
OSBuild
-------
Similarly to image build, this phases creates a koji `osbuild` task. In the
background it uses OSBuild Composer to create images.
OSBS OSBS
---- ----
This phase builds container base images in `OSBS This phase builds docker base images in `OSBS
<http://osbs.readthedocs.io/en/latest/index.html>`_. <http://osbs.readthedocs.io/en/latest/index.html>`_.
The finished images are available in registry provided by OSBS, but not The finished images are available in registry provided by OSBS, but not
downloaded directly into the compose. The is metadata about the created image downloaded directly into the compose. The is metadata about the created image
in ``compose/metadata/osbs.json``. in ``compose/metadata/osbs.json``.
ImageContainer
--------------
This phase builds a container image in OSBS, and stores the metadata in the
same file as OSBS phase. The container produced here wraps a different image,
created it ImageBuild or OSBuild phase. It can be useful to deliver a VM image
to containerized environments.
OSTreeInstaller OSTreeInstaller
--------------- ---------------

View File

@ -41,14 +41,6 @@ which can contain following keys.
* ``command`` -- defines a shell command to run after Git clone to generate the * ``command`` -- defines a shell command to run after Git clone to generate the
needed file (for example to run ``make``). Only supported in Git backend. needed file (for example to run ``make``). Only supported in Git backend.
* ``options`` -- a dictionary of additional configuration options. These are
specific to different backends.
Currently supported values for Git:
* ``credential_helper`` -- path to a credential helper used to supply
username/password for remotes that require authentication.
Koji examples Koji examples
------------- -------------

View File

@ -1,24 +1,26 @@
%{?python_enable_dependency_generator} %{?python_enable_dependency_generator}
Name: pungi Name: pungi
Version: 4.7.0 Version: 4.2.17
Release: 6%{?dist}.alma Release: 1%{?dist}.cloudlinux
Summary: Distribution compose tool Summary: Distribution compose tool
License: GPL-2.0-only License: GPLv2
URL: https://pagure.io/pungi URL: https://pagure.io/pungi
Source0: %{name}-%{version}.tar.bz2 Source0: %{name}-%{version}.tar.bz2
BuildRequires: make BuildRequires: python3-nose
BuildRequires: python3-pytest BuildRequires: python3-pytest
# replaced by unittest.mock BuildRequires: python3-mock
# BuildRequires: python3-mock BuildRequires: python3-pyfakefs
BuildRequires: python3-ddt
BuildRequires: python3-devel BuildRequires: python3-devel
BuildRequires: python3-setuptools BuildRequires: python3-setuptools
BuildRequires: python3-productmd >= 1.33 BuildRequires: python3-productmd >= 1.33
BuildRequires: python3-kobo-rpmlib >= 0.18.0 BuildRequires: python3-kobo-rpmlib >= 0.18.0
BuildRequires: createrepo_c >= 0.20.1 BuildRequires: createrepo_c
BuildRequires: python3-lxml BuildRequires: python3-lxml
BuildRequires: python3-ddt
BuildRequires: python3-kickstart BuildRequires: python3-kickstart
BuildRequires: python3-rpm BuildRequires: python3-rpm
BuildRequires: python3-dnf BuildRequires: python3-dnf
@ -31,60 +33,36 @@ BuildRequires: python3-kobo
BuildRequires: python3-koji BuildRequires: python3-koji
BuildRequires: lorax BuildRequires: lorax
BuildRequires: python3-PyYAML BuildRequires: python3-PyYAML
BuildRequires: python3-libmodulemd >= 2.8.0 BuildRequires: libmodulemd >= 2.8.0
BuildRequires: python3-gobject BuildRequires: python3-gobject
BuildRequires: python3-createrepo_c >= 0.20.1 BuildRequires: python3-createrepo_c
BuildRequires: python3-dogpile-cache BuildRequires: python3-dogpile-cache
BuildRequires: python3-parameterized BuildRequires: python3-parameterized
BuildRequires: python3-flufl-lock
BuildRequires: python3-ddt
BuildRequires: python3-distro
BuildRequires: python3-gobject-base BuildRequires: python3-gobject-base
BuildRequires: python3-pgpy
BuildRequires: python3-pyfakefs
%if %{rhel} == 8
BuildRequires: python3-dataclasses
%endif
#deps for doc building #deps for doc building
BuildRequires: python3-sphinx BuildRequires: python3-sphinx
Requires: python3-kobo-rpmlib >= 0.18.0 Requires: python3-kobo-rpmlib >= 0.18.0
Requires: python3-productmd >= 1.33
Requires: python3-kickstart Requires: python3-kickstart
Requires: createrepo_c >= 0.20.1 Requires: python3-requests
Requires: createrepo_c
Requires: koji >= 1.10.1-13 Requires: koji >= 1.10.1-13
Requires: python3-koji-cli-plugins Requires: python3-koji-cli-plugins
Requires: isomd5sum Requires: isomd5sum
%if %{rhel} == 8 || %{rhel} == 9
Requires: genisoimage Requires: genisoimage
%else
Recommends: genisoimage
%endif
Requires: git Requires: git
Requires: python3-dnf Requires: python3-dnf
Requires: python3-multilib Requires: python3-multilib
Requires: python3-libcomps Requires: python3-libcomps
Requires: python3-koji Requires: python3-koji
Requires: python3-libmodulemd >= 2.8.0 Requires: libmodulemd >= 2.8.0
Requires: python3-gobject Requires: python3-gobject
Requires: python3-createrepo_c >= 0.20.1 Requires: python3-createrepo_c
Requires: python3-PyYAML Requires: python3-PyYAML
Requires: python3-productmd >= 1.38
Requires: python3-flufl-lock
%if %{rhel} == 10
Requires: xorriso
%else
Recommends: xorriso
%endif
Requires: python3-productmd >= 1.33
Requires: lorax
Requires: python3-distro
Requires: python3-gobject-base Requires: python3-gobject-base
Requires: python3-pgpy Requires: lorax
Requires: python3-requests
%if %{rhel} == 8
Requires: python3-dataclasses
%endif
# This package is not available on i686, hence we cannot require it # This package is not available on i686, hence we cannot require it
# See https://bugzilla.redhat.com/show_bug.cgi?id=1743421 # See https://bugzilla.redhat.com/show_bug.cgi?id=1743421
@ -100,7 +78,7 @@ A tool to create anaconda based installation trees/isos of a set of rpms.
%package utils %package utils
Summary: Utilities for working with finished composes Summary: Utilities for working with finished composes
Requires: pungi = %{version}-%{release} Requires: pungi = %{version}-%{release}
Requires: python3-fedora-messaging # Requires: python3-fedmsg
%description utils %description utils
These utilities work with finished composes produced by Pungi. They can be used These utilities work with finished composes produced by Pungi. They can be used
@ -109,8 +87,8 @@ notification to Fedora Message Bus.
%package -n python3-%{name} %package -n python3-%{name}
Summary: Python 3 libraries for pungi Summary: Python 3 libraries for pungi
Requires: fus
Requires: python3-attrs Requires: python3-attrs
Requires: fus
%description -n python3-%{name} %description -n python3-%{name}
Python library with code for Pungi. This is not a public library and there are Python library with code for Pungi. This is not a public library and there are
@ -130,14 +108,21 @@ gzip _build/man/pungi.1
%install %install
%py3_install %py3_install
%{__install} -d %{buildroot}/var/cache/pungi/createrepo_c %{__install} -d %{buildroot}/var/cache/pungi
%{__install} -d %{buildroot}%{_mandir}/man1 %{__install} -d %{buildroot}%{_mandir}/man1
%{__install} -m 0644 doc/_build/man/pungi.1.gz %{buildroot}%{_mandir}/man1 %{__install} -m 0644 doc/_build/man/pungi.1.gz %{buildroot}%{_mandir}/man1
rm %{buildroot}%{_bindir}/pungi rm %{buildroot}%{_bindir}/pungi
# CLOUDLINUX: We don't need fedmsg stuff
rm %{buildroot}%{_bindir}/%{name}-fedmsg-notification
%check %check
%pytest python3 -m pytest
# master branch part of %check segment. Currently it doesn't work
# because of pungi-koji requirement in bash tests
#./tests/data/specs/build.sh
#cd tests && ./test_compose.sh
%files %files
%license COPYING GPL %license COPYING GPL
@ -153,9 +138,7 @@ rm %{buildroot}%{_bindir}/pungi
%{_bindir}/%{name}-make-ostree %{_bindir}/%{name}-make-ostree
%{_mandir}/man1/pungi.1.gz %{_mandir}/man1/pungi.1.gz
%{_datadir}/pungi %{_datadir}/pungi
%{_localstatedir}/cache/pungi /var/cache/pungi
%dir %attr(1777, root, root) %{_localstatedir}/cache/pungi/createrepo_c
%{_tmpfilesdir}/pungi-clean-cache.conf
%files -n python3-%{name} %files -n python3-%{name}
%{python3_sitelib}/%{name} %{python3_sitelib}/%{name}
@ -166,343 +149,20 @@ rm %{buildroot}%{_bindir}/pungi
%{_bindir}/%{name}-create-unified-isos %{_bindir}/%{name}-create-unified-isos
%{_bindir}/%{name}-config-dump %{_bindir}/%{name}-config-dump
%{_bindir}/%{name}-config-validate %{_bindir}/%{name}-config-validate
%{_bindir}/%{name}-fedmsg-notification # %{_bindir}/%{name}-fedmsg-notification
%{_bindir}/%{name}-notification-report-progress %{_bindir}/%{name}-notification-report-progress
%{_bindir}/%{name}-orchestrate
%{_bindir}/%{name}-patch-iso %{_bindir}/%{name}-patch-iso
%{_bindir}/%{name}-compare-depsolving %{_bindir}/%{name}-compare-depsolving
%{_bindir}/%{name}-wait-for-signed-ostree-handler %{_bindir}/%{name}-wait-for-signed-ostree-handler
%{_bindir}/%{name}-cache-cleanup
%changelog %changelog
* Fri Sep 27 2024 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.7.0-6 * Tue Mar 22 2022 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.17-1
- Add x86_64_v2 to a lisf of exclusive arches if there is any arch with base `x86_64` - ALBS-226: Patch pungi/lorax for building AL9
* Mon Sep 16 2024 Eduard Abdullin <eabdullin@almalinux.org> - 4.7.0-5 * Thu Feb 25 2022 stepan_oksanichenio <soksanichenko@cloudlinux.com> - 4.2.16-1
- Add x86_64_v2 to arch list if x86_64 in list - ALBS-186: Move pungi to our gitea and build it for AL9
* Fri Sep 06 2024 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.7.0-4
- Truncate a volume ID to 32 bytes
- Add new architecture `x86_64_v2`
* Thu Sep 05 2024 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.7.0-2
- Use xorriso as recommended package and genisoimage as required for RHEL8/9 and vice versa for RHEL10
* Thu Aug 22 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.7.0-1
- kiwibuild: Add support for type, type attr and bundle format (lsedlar)
- createiso: Block reuse if unsigned packages are allowed (lsedlar)
- Allow live_images phase to still be skipped (lsedlar)
- createiso: Recompute .treeinfo checksums for images (lsedlar)
- Drop support for signing rpm-wrapped artifacts (lsedlar)
- Remove live_images.py (LiveImagesPhase) (awilliam)
- Clean up requirements (lsedlar)
- Update pungi.spec for py3 (hlin)
* Fri Jul 19 2024 Fedora Release Engineering <releng@fedoraproject.org> - 4.6.3-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_41_Mass_Rebuild
* Fri Jul 12 2024 Haibo Lin <hlin@redhat.com> - 4.6.3-1
- Fix formatting of long line (lsedlar)
- unified-isos: Resolve symlinks (lsedlar)
- gather: Skip lookaside packages from local lookaside repo (lsedlar)
- pkgset: Avoid adding modules to unavailable arches (hlin)
- iso: Extract volume id with xorriso if available (lsedlar)
- De-duplicate log messages for ostree and ostree_container phases (awilliam)
- Handle tracebacks as str or bytes (lsedlar)
- ostree/container: add missing --version arg (awilliam)
- Block pkgset reuse on module defaults change (lsedlar)
- Include task ID in DONE message for OSBS phase (awilliam)
- Various phases: consistent format of failure message (awilliam)
- Update tests to exercise kiwi specific metadata (lsedlar)
- Kiwi: translate virtualbox and azure productmd formats (awilliam)
- kiwibuild: Add tests for the basic functionality (lsedlar)
- kiwibuild: Remove repos as dicts (lsedlar)
- Fix additional image metadata (lsedlar)
- Drop kiwibuild_version option (lsedlar)
- Update docs with kiwibuild options (lsedlar)
- kiwibuild: allow setting description scm and path at phase level (awilliam)
- Use latest Fedora for python 3 test environment (lsedlar)
- Install unittest2 only on python 2 (lsedlar)
- Fix 'failable' handling for kiwibuild phase (awilliam)
- image_build: Accept Kiwi extension for Azure VHD images (jeremycline)
- image_build: accept Kiwi vagrant image name format (awilliam)
* Sun Jun 09 2024 Python Maint <python-maint@redhat.com> - 4.6.2-7
- Rebuilt for Python 3.13
* Fri May 31 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.2-6
- Rebuild to bump release over f40-infra build
* Fri May 31 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.2-2
- Add dependency on xorriso, fixes rhbz#2278677
* Tue Apr 30 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.2-1
- Phases/osbuild: support passing 'customizations' for image builds (thozza)
- dnf: Load filelists for actual solver too (lsedlar)
- kiwibuild: Tell Koji which arches are allowed to fail (lsedlar)
- kiwibuild: Update documentation with more details (lsedlar)
- kiwibuild: Add kiwibuild global options (lsedlar)
- kiwibuild: Process images same as image-build (lsedlar)
- kiwibuild: Add subvariant configuration (lsedlar)
- kiwibuild: Work around missing arch in build data (lsedlar)
- Support KiwiBuild (hlin)
- ostree/container: Set version in treefile 'automatic-version-prefix' (tim)
- dnf: Explicitly load filelists (lsedlar)
- Fix buildinstall reuse with pungi_buildinstall plugin (lsedlar)
- Fix filters for DNF query (lsedlar)
- gather-dnf: Support dotarch in filter_packages (lsedlar)
- gather: Support dotarch notation for debuginfo packages (lsedlar)
- Correctly set input and fultree_exclude flags for debuginfo (lsedlar)
* Fri Feb 09 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.1-1
- Make python3-mock dependency optional (lsedlar)
- Make latest black happy (lsedlar)
- Update tox configuration (lsedlar)
- Fix scm tests to not use user configuration (lsedlar)
- Add workaround for old requests in kojiwrapper (lsedlar)
- Use pungi_buildinstall without NFS (lsedlar)
- checks: don't require "repo" in the "ostree" schema (awilliam)
- ostree_container: Use unique temporary directory (lsedlar)
* Fri Jan 26 2024 Maxwell G <maxwell@gtmx.me> - 4.6.0-5
- Remove python3-mock dependency
* Fri Jan 26 2024 Fedora Release Engineering <releng@fedoraproject.org> - 4.6.0-4
- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild
* Sun Jan 21 2024 Fedora Release Engineering <releng@fedoraproject.org> - 4.6.0-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild
* Fri Jan 19 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.0-3
- Stop requiring repo option in ostree phase
* Thu Jan 18 2024 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.0-2
- ostree_container: Use unique temporary directory
* Wed Dec 13 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.6.0-1
- Add ostree container to image metadata (lsedlar)
- Updates for ostree-container phase (lsedlar)
- Add ostree native container support (tim)
- Improve autodetection of productmd image type for osbuild images (awilliam)
- pkgset: ignore events for modular content tags (lsedlar)
- pkgset: Ignore duplicated module builds (lsedlar)
- Drop buildinstall method (abisoi)
- Add step to send UMB message (lzhuang)
- Fix minor Ruff/flake8 warnings (tim)
- osbuild: manifest type in config (cmdr)
* Mon Sep 25 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.5.0-7
- Backport patch for explicit setting of osbuild image type in metadata
* Mon Nov 21 2023 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.5.0-3
- Method `get_remote_file_content` is object's method now
* Wed Nov 15 2023 Stepan Oksanichenko <soksanichenko@almalinux.org> - 4.5.0-2
- Return empty list if a repo doesn't contain any module
* Thu Aug 31 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.5.0-1
- kojiwrapper: Stop being smart about local access (lsedlar)
- Fix unittest errors (ounsal)
- Add integrity checking for builds (lsedlar)
- Add script for cleaning up the cache (lsedlar)
- Add ability to download images (lsedlar)
- Add support for not having koji volume mounted locally (lsedlar)
- Remove repository cloning multiple times (abisoi)
- Support require_all_comps_packages on DNF backend (lsedlar)
- Fix new warnings from flake8 (lsedlar)
* Tue Jul 25 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-8
- Option `excluded-packages` for script `pungi-gather-rpms`
* Tue Jul 25 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.1-1
- ostree: Add configuration for custom runroot packages (lsedlar)
- pkgset: Emit better error for missing modulemd file (lsedlar)
- Add support for git-credential-helper (lsedlar)
- Support OIDC Client Credentials authentication to CTS (hlin)
* Fri Jul 21 2023 Fedora Release Engineering <releng@fedoraproject.org> - 4.4.0-4
- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild
* Wed Jul 19 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.0-3
- Backport ostree runroot package additions
* Wed Jul 19 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.0-2
- Backport ostree runroot package additions
* Mon Jun 19 2023 Python Maint <python-maint@redhat.com> - 4.4.0-2
- Rebuilt for Python 3.12
* Wed Jun 07 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.0-1
- gather-dnf: Run latest() later (lsedlar)
- iso: Support joliet long names (lsedlar)
- Drop pungi-orchestrator code (lsedlar)
- isos: Ensure proper file ownership and permissions (lsedlar)
- gather: Always get latest packages (lsedlar)
- Add back compatibility with jsonschema <3.0.0 (lsedlar)
- Remove useless debug message (lsedlar)
- Remove fedmsg from requirements (lsedlar)
- gather: Support dotarch in DNF backend (lsedlar)
- Fix compatibility with createrepo_c 0.21.1 (lsedlar)
- comps: Apply arch filtering to environment/optionlist (lsedlar)
- Add config file for cleaning up cache files (hlin)
* Wed May 17 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.8-3
- Rebuild without fedmsg dependency
* Wed May 03 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.8-1
- Set priority for Fedora messages
* Thu Apr 13 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-7
- gather-module can find modules through symlinks
* Thu Apr 13 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-6
- CLI option `--label` can be passed through a Pungi config file
* Fri Mar 31 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-4
- ALBS-1030: Generate Devel section in packages.json
- Also the tool can combine (remove and add) packages in a variant from different sources according to an url's type of source
- Some upstream changes to KojiMock part
- Skip verifying an RPM signature if sigkeys are empty
- ALBS-987: Generate i686 and dev repositories with pungi on building new distr. version automatically
- [Generator of packages.json] Replace using CLI by config.yaml
- [Gather RPMs] os.path is replaced by Pat
* Thu Mar 30 2023 Haibo Lin <hlin@redhat.com> - 4.3.8-1
- createiso: Update possibly changed file on DVD (lsedlar)
- pkgset: Stop reuse if configuration changed (lsedlar)
- Allow disabling inheriting ExcludeArch to noarch packages (lsedlar)
- pkgset: Support extra builds with no tags (lsedlar)
- buildinstall: Avoid pointlessly tweaking the boot images (lsedlar)
- Prevent to reuse if unsigned packages are allowed (hlin)
- Pass parent id/respin id to CTS (lsedlar)
- Exclude existing files in boot.iso (hlin)
- image-build/osbuild: Pull ISOs into the compose (lsedlar)
- Retry 401 error from CTS (lsedlar)
- gather: Better detection of debuginfo in lookaside (lsedlar)
- Log versions of all installed packages (hlin)
- Use authentication for all CTS calls (lsedlar)
- Fix black complaints (lsedlar)
- Add vhd.gz extension to compressed VHD images (lsedlar)
- Add vhd-compressed image type (lsedlar)
- Update to work with latest mock (lsedlar)
- Default bztar format for sdist command (onosek)
* Fri Mar 17 2023 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.7-3
- ALBS-987: Generate i686 repositories with pungi on building new distr. version automatically
- KojiMock extracts all modules which are suitable for the variant's arches
- An old code is removed or refactored
* Fri Jan 20 2023 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.7-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild
* Fri Dec 09 2022 Ondřej Nosek <onosek@redhat.com> - 4.3.7-1
- osbuild: test passing of rich repos from configuration (lsedlar)
- osbuild: support specifying `package_sets` for repos (thozza)
- osbuild: don't use `util.get_repo_urls()` (thozza)
- osbuild: update schema and config documentation (thozza)
- Speed up tests by 30 seconds (lsedlar)
- Stop sending compose paths to CTS (lsedlar)
- Report errors from CTS (lsedlar)
- createiso: Create Joliet tree with xorriso (lsedlar)
- init: Filter comps for modular variants with tags (lsedlar)
- Retry failed cts requests (hlin)
- Ignore existing kerberos ticket for CTS auth (lsedlar)
- osbuild: support specifying upload_options (thozza)
- osbuild: accept only a single image type in the configuration (thozza)
- Add Jenkinsfile for CI (hlin)
- profiler: Flush stdout before printing (lsedlar)
* Sat Nov 12 2022 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.3.6-3
- AlmaLinux version. Updates from upstream
* Mon Nov 07 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.6-2
- Stop including comps in modular repos
* Wed Oct 19 2022 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.17-1
- Replace list of cr.packages by cr.PackageIterator in package JSON generator
- Do not lose a module from koji if we have more than one arch (e.g. x86_64 + i686)
* Fri Aug 26 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.6-1
- pkgset: Report better error when module is missing an arch (lsedlar)
- osbuild: add support for building ostree artifacts (ondrej)
- ostree: Add unified core mode for compose in rpm-ostree (tim)
- createiso: Make ISO level more granular (lsedlar)
- Create DVDs with xorriso (lsedlar)
- Fix compatibility with jsonschema >= 4.0.0 (lsedlar)
- Fix black complaint (lsedlar)
- doc: fix osbuild's image_types field name (ondrej)
- Convert _ssh_run output to str for python3 (hlin)
- Print more logs for git_ls_remote (hlin)
- Log time taken of each phase (hlin)
- Avoid crash when loading pickle file failed (hlin)
- extra_isos: Fix detection of changed packages (lsedlar)
* Thu Aug 11 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-8
- Backport jsonschema compatibility patch (rhbz#2113607)
* Mon Jul 25 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-7
- Update xorriso patch
* Fri Jul 22 2022 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.5-6
- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild
* Mon Jun 20 2022 Python Maint <python-maint@redhat.com> - 4.3.5-5
- Rebuilt for Python 3.11
* Thu Jun 16 2022 Adam Williamson <awilliam@redhat.com> - 4.3.5-4
- Don't try and run isohybrid when using xorriso
* Wed Jun 15 2022 Python Maint <python-maint@redhat.com> - 4.3.5-3
- Rebuilt for Python 3.11
* Wed Jun 15 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-2
- Backport patch for building DVDs with xorriso command again
* Wed Jun 15 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.5-1
- Fix module defaults and obsoletes validation (mkulik)
- Update the cts_keytab field in order to get the hostname of the server
(ounsal)
- Add skip_branding to ostree_installer. (lzhuang)
- kojiwrapper: Ignore warnings before task id (lsedlar)
- Restrict jsonschema version (lsedlar)
- Revert "Do not clone the same repository multiple times, re-use already
cloned repository" (hlin)
- Involve bandit (hlin)
* Wed Jun 08 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.4-2
- Backport patch for building DVDs with xorriso command
* Wed May 4 2022 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.16-1
- ALBS-334: Make the ability of Pungi to give module_defaults from remote sources
* Mon Apr 04 2022 Ondřej Nosek <onosek@redhat.com> - 4.3.4-1
- kojiwrapper: Add retries to login call (lsedlar)
- Variants file in config can contain path (onosek)
- nomacboot option for livemedia koji tasks (cobrien)
- doc: improve osbs_registries explanation (kdreyer)
- osbs: only handle archives of type "image" (kdreyer)
- Update the default greedy_method value in doc (ounsal)
- Fix the wrong working directory for the progress_notification script (ounsal)
- Filter out environment groups unmatch given arch (hlin)
- profiler: Respect provided output stream (lsedlar)
- modules: Correct a typo in loading obsoletes (ppisar)
- Do not clone the same repository multiple times, re-use already cloned
repository (ounsal)
* Fri Feb 04 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.3-3
- Backport typo fix
* Fri Jan 21 2022 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.3-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild
* Fri Jan 14 2022 Haibo Lin <hlin@redhat.com> - 4.3.3-1
- hybrid: Explicitly pull in debugsource packages (lsedlar)
- Add module obsoletes feature (fvalder)
- buildinstall: Add ability to install extra packages in runroot (ounsal)
- Ignore osbs/osbuild config when reusing iso images (hlin)
- compose: Make sure temporary dirs are world readable (lsedlar)
- Pass compose parameter for debugging git issue (hlin)
- Generate images.json for extra_isos phase (hlin)
- Fix tests for python 2.6 (hlin)
* Thu Dec 30 2021 stepan_oksanichenio <soksanichenko@cloudlinux.com> - 4.2.15-1 * Thu Dec 30 2021 stepan_oksanichenio <soksanichenko@cloudlinux.com> - 4.2.15-1
- ALBS-97: The scripts `gather_modules` and `generate_packages_json` support LZMA compression - ALBS-97: The scripts `gather_modules` and `generate_packages_json` support LZMA compression
@ -511,116 +171,21 @@ rm %{buildroot}%{_bindir}/pungi
* Mon Dec 20 2021 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.14-1 * Mon Dec 20 2021 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.14-1
- ALBS-66: The generator of packages JSON can process the same packages with different versions - ALBS-66: The generator of packages JSON can process the same packages with different versions
* Mon Nov 15 2021 Haibo Lin <hlin@redhat.com> - 4.3.2-2
- Backport patch for generating images.json
* Thu Nov 11 2021 Haibo Lin <hlin@redhat.com> - 4.3.2-1
- gather: Load JSON mapping relative to config dir (lsedlar)
- gather: Stop requiring all variants/arches in JSON (lsedlar)
- doc: make dnf "backend" settings easier to discover (kdreyer)
- Remove with_jigdo argument (lsedlar)
- Check dependencies after config validation (lsedlar)
- default "with_jigdo" to False (kdreyer)
- Stop trying to validate non-existent metadata (lsedlar)
- test images for metadata deserialization error (fdipretre)
- repoclosure: Use --forcearch for dnf repoclosure (lsedlar)
- extra_isos: Allow reusing old images (lsedlar)
- createiso: Allow reusing old images (lsedlar)
- Remove default runroot channel (lsedlar)
* Tue Oct 26 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.1-1
- Correct irc network name & add matrix room (dan.cermak)
- Add missing mock to osbs tests (lsedlar)
- osbs: Reuse images from old compose (hlin)
- image_build: Allow reusing old image_build results (hlin)
- Allow ISO-Level configuration within the config file (ounsal)
- Work around ODCS creating COMPOSE_ID later (lsedlar)
- When `cts_url` is configured, use CTS `/repo` API for buildContainer
yum_repourls. (jkaluza)
- Add COMPOSE_ID into the pungi log file (ounsal)
- buildinstall: Add easy way to check if previous result was reused (lsedlar)
* Mon Oct 04 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.0-2
- Backport patch to avoid crash on missing COMPOSE_ID
* Wed Sep 15 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.0-1
- Only build CTS url when configured (lsedlar)
- Require requests_kerberos only when needed (lsedlar)
- Allow specifying $COMPOSE_ID in the `repo` value for osbs phase. (jkaluza)
- Make getting old compose config reusable (lsedlar)
- paths: Allow customizing log file extension (lsedlar)
- Add authentication for updating the compose URL in CTS. (ounsal)
- Fix type detection for osbuild images (lsedlar)
- Enable pungi to send compose_url patches to CTS (ounsal)
- Use xorriso instead of isoinfo when createiso_use_xorrisofs is enabled
(ounsal)
- Fix tests for createrepo (drumian)
- Formatted files according to flake8 and black feedback (drumian)
- Handle the pungi failures to ensure creation of log files (ounsal)
- Add createrepo_enable_cache to configuration doc (hlin)
- Fix formatting (hlin)
- Install missing deps in ci image (hlin)
- Use pytest directly incl. support for posargs, e.g.: tox -- -s -vvv
tests/path/to/a/single/test_something.py (fvalder)
- Supersede ModuleStream loading with ModuleIndex (fvalder)
- Better error message than 'KeyError' in pungi (drumian)
- Adding multithreading support for pungi/phases/image_checksum.py (jkunstle)
- doc: more additional_packages documentation (kdreyer)
- doc: fix typo in additional_packages description (kdreyer)
- doc: improve signed packages retry docs (kdreyer)
- Better error message than 'KeyError' in pungi (drumian)
- doc: explain buildContainer API (kdreyer)
* Wed Aug 04 2021 Haibo Lin <hlin@redhat.com> - 4.2.10-1
- Show and log command when using the run_blocking_cmd() method (fdipretre)
- Use cachedir when createrepo (hlin)
- gather: Add all srpms to variant lookaside repo (lsedlar)
- Add task URL to watch task log (hlin)
- Log warning when module defined in variants.xml not found (hlin)
- pkgset: Compare future events correctly (lsedlar)
- util: Strip file:// from local urls (lsedlar)
- Clean up temporary yumroot dir (hlin)
* Fri Jul 23 2021 Fedora Release Engineering <releng@fedoraproject.org> - 4.2.9-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_35_Mass_Rebuild
* Fri Jun 18 2021 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.13-1 * Fri Jun 18 2021 stepan_oksanichenko <soksanichenko@cloudlinux.com> - 4.2.13-1
- LNX-326: Add the ability to include any package by mask in packages.json to the generator - LNX-326: Add the ability to include any package by mask in packages.json to the generator
- LNX-318: Modify build scripts for building CloudLinux OS 8.4 - LNX-318: Modify build scripts for building CloudLinux OS 8.4
* Fri Jun 04 2021 Python Maint <python-maint@redhat.com> - 4.2.9-2
- Rebuilt for Python 3.10
* Tue May 25 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.12-1 * Tue May 25 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.12-1
- LNX-108: Add multiarch support to pungi - LNX-108: Add multiarch support to pungi
* Thu Apr 29 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.11-1 * Thu Apr 29 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.11-1
- LNX-311: Add ability to productmd set a main variant while dumping TreeInfo - LNX-311: Add ability to productmd set a main variant while dumping TreeInfo
* Thu Apr 29 2021 onosek - 4.2.9-1
- New upstream release 4.2.9
- Fix can't link XDEV using repos as pkgset_sources (romain.forlot)
- Updated the deprecated ks argument name (to the current inst.ks) (lveyde)
- gather: Adjust reusing with lookaside (hlin)
- hybrid: Optimize getting lookaside packages (lsedlar)
- gather: Copy old logs when reusing gather result (hlin)
- Cancel koji tasks when pungi terminated (hlin)
- Add Dockerfile for building testing image (hlin)
- image_container: Fix incorrect arch processing (lsedlar)
- runroot: Adjust permissions always (hlin)
- Format code (hlin)
- pkgset: Fix meaning of retries (lsedlar)
- pkgset: Store module tag only if module is used (lsedlar)
- Store extended traceback for gather errors (lsedlar)
* Wed Feb 24 2021 Danylo Kuropiatnyk <dkuropiatnyk@cloudlinux.com>, Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.10-1 * Wed Feb 24 2021 Danylo Kuropiatnyk <dkuropiatnyk@cloudlinux.com>, Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.10-1
- LU-2186 .treeinfo file in AlmaLinux public kickstart repo should contain AppStream variant - LU-2186 .treeinfo file in AlmaLinux public kickstart repo should contain AppStream variant
- LU-2195 Change path to sources and iso when generating repositories - LU-2195 Change path to sources and iso when generating repositories
- LU-2202: Start unittests during installation or build of pungi - LU-2202: Start unittests during installation or build of pungi
* Fri Feb 12 2021 Ondrej Nosek <onosek@redhat.com> - 4.2.8-1
- New upstream version
* Thu Feb 11 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.9-1 * Thu Feb 11 2021 Stepan Oksanichenko <soksanichenko@cloudlinux.com> - 4.2.9-1
- LNX-133: Create a server for building nightly builds of AlmaLinux - LNX-133: Create a server for building nightly builds of AlmaLinux
- LU-2133: Prepare CI for iso builds of CLOSS 8 - LU-2133: Prepare CI for iso builds of CLOSS 8
@ -633,18 +198,6 @@ rm %{buildroot}%{_bindir}/pungi
- LNX-102: Add tool that collects information about modules - LNX-102: Add tool that collects information about modules
- LNX-103 Update .spec file for AlmaLinux - LNX-103 Update .spec file for AlmaLinux
* Wed Jan 27 2021 Fedora Release Engineering <releng@fedoraproject.org> - 4.2.7-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
* Fri Jan 22 2021 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.7-2
- Backport patch for preserving default attribute in comps
* Tue Dec 8 09:01:52 CET 2020 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.7-1
- New upstream version
* Thu Nov 05 2020 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.6-1
- New upstream release
* Fri Sep 25 2020 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.5-1 * Fri Sep 25 2020 Lubomír Sedlář <lsedlar@redhat.com> - 4.2.5-1
- New upstream release - New upstream release

View File

@ -93,11 +93,6 @@ def split_name_arch(name_arch):
def is_excluded(package, arches, logger=None): def is_excluded(package, arches, logger=None):
"""Check if package is excluded from given architectures.""" """Check if package is excluded from given architectures."""
if any(
getBaseArch(exc_arch) == 'x86_64' for exc_arch in package.exclusivearch
) and 'x86_64_v2' not in package.exclusivearch:
package.exclusivearch.append('x86_64_v2')
if package.excludearch and set(package.excludearch) & set(arches): if package.excludearch and set(package.excludearch) & set(arches):
if logger: if logger:
logger.debug( logger.debug(

View File

@ -34,8 +34,6 @@ arches = {
"x86_64": "athlon", "x86_64": "athlon",
"amd64": "x86_64", "amd64": "x86_64",
"ia32e": "x86_64", "ia32e": "x86_64",
# x86-64-v2
"x86_64_v2": "noarch",
# ppc64le # ppc64le
"ppc64le": "noarch", "ppc64le": "noarch",
# ppc # ppc
@ -133,8 +131,8 @@ def getArchList(thisarch=None): # pragma: no cover
def _try_read_cpuinfo(): # pragma: no cover def _try_read_cpuinfo(): # pragma: no cover
"""Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not """ Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not
mounted).""" mounted). """
try: try:
with open("/proc/cpuinfo", "r") as f: with open("/proc/cpuinfo", "r") as f:
return f.readlines() return f.readlines()
@ -143,8 +141,8 @@ def _try_read_cpuinfo(): # pragma: no cover
def _parse_auxv(): # pragma: no cover def _parse_auxv(): # pragma: no cover
"""Read /proc/self/auxv and parse it into global dict for easier access """ Read /proc/self/auxv and parse it into global dict for easier access
later on, very similar to what rpm does.""" later on, very similar to what rpm does. """
# In case we can't open and read /proc/self/auxv, just return # In case we can't open and read /proc/self/auxv, just return
try: try:
with open("/proc/self/auxv", "rb") as f: with open("/proc/self/auxv", "rb") as f:

View File

@ -53,7 +53,7 @@ from . import util
def is_jigdo_needed(conf): def is_jigdo_needed(conf):
return conf.get("create_jigdo") return conf.get("create_jigdo", True)
def is_isohybrid_needed(conf): def is_isohybrid_needed(conf):
@ -75,7 +75,8 @@ def is_isohybrid_needed(conf):
def is_genisoimage_needed(conf): def is_genisoimage_needed(conf):
"""This is only needed locally for createiso without runroot.""" """This is only needed locally for createiso without runroot.
"""
runroot_tag = conf.get("runroot_tag", "") runroot_tag = conf.get("runroot_tag", "")
if runroot_tag or conf.get("createiso_use_xorrisofs"): if runroot_tag or conf.get("createiso_use_xorrisofs"):
return False return False
@ -93,7 +94,7 @@ def is_xorrisofs_needed(conf):
def is_createrepo_c_needed(conf): def is_createrepo_c_needed(conf):
return conf.get("createrepo_c") return conf.get("createrepo_c", True)
# The first element in the tuple is package name expected to have the # The first element in the tuple is package name expected to have the
@ -227,18 +228,9 @@ def validate(config, offline=False, schema=None):
DefaultValidator = _extend_with_default_and_alias( DefaultValidator = _extend_with_default_and_alias(
jsonschema.Draft4Validator, offline=offline jsonschema.Draft4Validator, offline=offline
) )
if hasattr(jsonschema.Draft4Validator, "TYPE_CHECKER"):
# jsonschema >= 3.0 has new interface for checking types
validator = DefaultValidator(schema)
else:
validator = DefaultValidator( validator = DefaultValidator(
schema, schema,
{ {"array": (tuple, list), "regex": six.string_types, "url": six.string_types},
"array": (tuple, list),
"regex": six.string_types,
"url": six.string_types,
},
) )
errors = [] errors = []
warnings = [] warnings = []
@ -387,7 +379,6 @@ def _extend_with_default_and_alias(validator_class, offline=False):
instance[property]["branch"] = resolver( instance[property]["branch"] = resolver(
instance[property]["repo"], instance[property]["repo"],
instance[property].get("branch") or "HEAD", instance[property].get("branch") or "HEAD",
instance[property].get("options"),
) )
for error in _hook_errors(properties, instance, schema): for error in _hook_errors(properties, instance, schema):
@ -455,19 +446,6 @@ def _extend_with_default_and_alias(validator_class, offline=False):
context=all_errors, context=all_errors,
) )
kwargs = {}
if hasattr(validator_class, "TYPE_CHECKER"):
# jsonschema >= 3
def is_array(checker, instance):
return isinstance(instance, (tuple, list))
def is_string_type(checker, instance):
return isinstance(instance, six.string_types)
kwargs["type_checker"] = validator_class.TYPE_CHECKER.redefine_many(
{"array": is_array, "regex": is_string_type, "url": is_string_type}
)
return jsonschema.validators.extend( return jsonschema.validators.extend(
validator_class, validator_class,
{ {
@ -478,7 +456,6 @@ def _extend_with_default_and_alias(validator_class, offline=False):
"additionalProperties": _validate_additional_properties, "additionalProperties": _validate_additional_properties,
"anyOf": _validate_any_of, "anyOf": _validate_any_of,
}, },
**kwargs
) )
@ -521,13 +498,6 @@ def make_schema():
"file": {"type": "string"}, "file": {"type": "string"},
"dir": {"type": "string"}, "dir": {"type": "string"},
"command": {"type": "string"}, "command": {"type": "string"},
"options": {
"type": "object",
"properties": {
"credential_helper": {"type": "string"},
},
"additionalProperties": False,
},
}, },
"additionalProperties": False, "additionalProperties": False,
}, },
@ -553,6 +523,27 @@ def make_schema():
"list_of_strings": {"type": "array", "items": {"type": "string"}}, "list_of_strings": {"type": "array", "items": {"type": "string"}},
"strings": _one_or_list({"type": "string"}), "strings": _one_or_list({"type": "string"}),
"optional_string": {"anyOf": [{"type": "string"}, {"type": "null"}]}, "optional_string": {"anyOf": [{"type": "string"}, {"type": "null"}]},
"live_image_config": {
"type": "object",
"properties": {
"kickstart": {"type": "string"},
"ksurl": {"type": "url"},
"name": {"type": "string"},
"subvariant": {"type": "string"},
"target": {"type": "string"},
"version": {"type": "string"},
"repo": {"$ref": "#/definitions/repos"},
"specfile": {"type": "string"},
"scratch": {"type": "boolean"},
"type": {"type": "string"},
"sign": {"type": "boolean"},
"failable": {"type": "boolean"},
"release": {"$ref": "#/definitions/optional_string"},
},
"required": ["kickstart"],
"additionalProperties": False,
"type": "object",
},
"osbs_config": { "osbs_config": {
"type": "object", "type": "object",
"properties": { "properties": {
@ -588,7 +579,6 @@ def make_schema():
"release_discinfo_description": {"type": "string"}, "release_discinfo_description": {"type": "string"},
"treeinfo_version": {"type": "string"}, "treeinfo_version": {"type": "string"},
"compose_type": {"type": "string", "enum": COMPOSE_TYPES}, "compose_type": {"type": "string", "enum": COMPOSE_TYPES},
"label": {"type": "string"},
"base_product_name": {"type": "string"}, "base_product_name": {"type": "string"},
"base_product_short": {"type": "string"}, "base_product_short": {"type": "string"},
"base_product_version": {"type": "string"}, "base_product_version": {"type": "string"},
@ -620,7 +610,7 @@ def make_schema():
"runroot_ssh_init_template": {"type": "string"}, "runroot_ssh_init_template": {"type": "string"},
"runroot_ssh_install_packages_template": {"type": "string"}, "runroot_ssh_install_packages_template": {"type": "string"},
"runroot_ssh_run_template": {"type": "string"}, "runroot_ssh_run_template": {"type": "string"},
"create_jigdo": {"type": "boolean", "default": False}, "create_jigdo": {"type": "boolean", "default": True},
"check_deps": {"type": "boolean", "default": True}, "check_deps": {"type": "boolean", "default": True},
"require_all_comps_packages": {"type": "boolean", "default": False}, "require_all_comps_packages": {"type": "boolean", "default": False},
"bootable": { "bootable": {
@ -664,20 +654,13 @@ def make_schema():
"gather_profiler": {"type": "boolean", "default": False}, "gather_profiler": {"type": "boolean", "default": False},
"gather_allow_reuse": {"type": "boolean", "default": False}, "gather_allow_reuse": {"type": "boolean", "default": False},
"pkgset_allow_reuse": {"type": "boolean", "default": True}, "pkgset_allow_reuse": {"type": "boolean", "default": True},
"createiso_allow_reuse": {"type": "boolean", "default": True}, "pkgset_source": {"type": "string", "enum": ["koji", "repos"]},
"extraiso_allow_reuse": {"type": "boolean", "default": True},
"pkgset_source": {"type": "string", "enum": [
"koji",
"repos",
"kojimock",
]},
"createrepo_c": {"type": "boolean", "default": True}, "createrepo_c": {"type": "boolean", "default": True},
"createrepo_checksum": { "createrepo_checksum": {
"type": "string", "type": "string",
"default": "sha256", "default": "sha256",
"enum": ["sha1", "sha256", "sha512"], "enum": ["sha1", "sha256", "sha512"],
}, },
"createrepo_enable_cache": {"type": "boolean", "default": True},
"createrepo_use_xz": {"type": "boolean", "default": False}, "createrepo_use_xz": {"type": "boolean", "default": False},
"createrepo_num_threads": {"type": "number", "default": get_num_cpus()}, "createrepo_num_threads": {"type": "number", "default": get_num_cpus()},
"createrepo_num_workers": {"type": "number", "default": 3}, "createrepo_num_workers": {"type": "number", "default": 3},
@ -739,8 +722,6 @@ def make_schema():
"minItems": 1, "minItems": 1,
"default": [None], "default": [None],
}, },
"signed_packages_retries": {"type": "number", "default": 0},
"signed_packages_wait": {"type": "number", "default": 30},
"variants_file": {"$ref": "#/definitions/str_or_scm_dict"}, "variants_file": {"$ref": "#/definitions/str_or_scm_dict"},
"comps_file": {"$ref": "#/definitions/str_or_scm_dict"}, "comps_file": {"$ref": "#/definitions/str_or_scm_dict"},
"comps_filter_environments": {"type": "boolean", "default": True}, "comps_filter_environments": {"type": "boolean", "default": True},
@ -751,7 +732,6 @@ def make_schema():
"patternProperties": {".+": {"$ref": "#/definitions/strings"}}, "patternProperties": {".+": {"$ref": "#/definitions/strings"}},
"additionalProperties": False, "additionalProperties": False,
}, },
"module_obsoletes_dir": {"$ref": "#/definitions/str_or_scm_dict"},
"create_optional_isos": {"type": "boolean", "default": False}, "create_optional_isos": {"type": "boolean", "default": False},
"symlink_isos_to": {"type": "string"}, "symlink_isos_to": {"type": "string"},
"dogpile_cache_backend": {"type": "string"}, "dogpile_cache_backend": {"type": "string"},
@ -764,12 +744,6 @@ def make_schema():
), ),
"createiso_break_hardlinks": {"type": "boolean", "default": False}, "createiso_break_hardlinks": {"type": "boolean", "default": False},
"createiso_use_xorrisofs": {"type": "boolean", "default": False}, "createiso_use_xorrisofs": {"type": "boolean", "default": False},
"iso_level": {
"anyOf": [
{"type": "number", "enum": [1, 2, 3, 4]},
_variant_arch_mapping({"type": "number", "enum": [1, 2, 3, 4]}),
],
},
"iso_hfs_ppc64le_compatible": {"type": "boolean", "default": True}, "iso_hfs_ppc64le_compatible": {"type": "boolean", "default": True},
"multilib": _variant_arch_mapping( "multilib": _variant_arch_mapping(
{"$ref": "#/definitions/list_of_strings"} {"$ref": "#/definitions/list_of_strings"}
@ -797,7 +771,7 @@ def make_schema():
"buildinstall_allow_reuse": {"type": "boolean", "default": False}, "buildinstall_allow_reuse": {"type": "boolean", "default": False},
"buildinstall_method": { "buildinstall_method": {
"type": "string", "type": "string",
"enum": ["lorax"], "enum": ["lorax", "buildinstall"],
}, },
# In phase `buildinstall` we should add to compose only the # In phase `buildinstall` we should add to compose only the
# images that will be used only as netinstall # images that will be used only as netinstall
@ -811,10 +785,6 @@ def make_schema():
"buildinstall_kickstart": {"$ref": "#/definitions/str_or_scm_dict"}, "buildinstall_kickstart": {"$ref": "#/definitions/str_or_scm_dict"},
"buildinstall_use_guestmount": {"type": "boolean", "default": True}, "buildinstall_use_guestmount": {"type": "boolean", "default": True},
"buildinstall_skip": _variant_arch_mapping({"type": "boolean"}), "buildinstall_skip": _variant_arch_mapping({"type": "boolean"}),
"buildinstall_packages": {
"$ref": "#/definitions/package_mapping",
"default": [],
},
"global_ksurl": {"type": "url"}, "global_ksurl": {"type": "url"},
"global_version": {"type": "string"}, "global_version": {"type": "string"},
"global_target": {"type": "string"}, "global_target": {"type": "string"},
@ -824,11 +794,8 @@ def make_schema():
"pdc_insecure": {"deprecated": "Koji is queried instead"}, "pdc_insecure": {"deprecated": "Koji is queried instead"},
"cts_url": {"type": "string"}, "cts_url": {"type": "string"},
"cts_keytab": {"type": "string"}, "cts_keytab": {"type": "string"},
"cts_oidc_token_url": {"type": "url"},
"cts_oidc_client_id": {"type": "string"},
"koji_profile": {"type": "string"}, "koji_profile": {"type": "string"},
"koji_event": {"type": "number"}, "koji_event": {"type": "number"},
"koji_cache": {"type": "string"},
"pkgset_koji_tag": {"$ref": "#/definitions/strings"}, "pkgset_koji_tag": {"$ref": "#/definitions/strings"},
"pkgset_koji_builds": {"$ref": "#/definitions/strings"}, "pkgset_koji_builds": {"$ref": "#/definitions/strings"},
"pkgset_koji_scratch_tasks": {"$ref": "#/definitions/strings"}, "pkgset_koji_scratch_tasks": {"$ref": "#/definitions/strings"},
@ -846,10 +813,6 @@ def make_schema():
"type": "boolean", "type": "boolean",
"default": True, "default": True,
}, },
"pkgset_inherit_exclusive_arch_to_noarch": {
"type": "boolean",
"default": True,
},
"pkgset_scratch_modules": { "pkgset_scratch_modules": {
"type": "object", "type": "object",
"patternProperties": { "patternProperties": {
@ -862,10 +825,7 @@ def make_schema():
"paths_module": {"type": "string"}, "paths_module": {"type": "string"},
"skip_phases": { "skip_phases": {
"type": "array", "type": "array",
"items": { "items": {"type": "string", "enum": PHASES_NAMES + ["productimg"]},
"type": "string",
"enum": PHASES_NAMES + ["productimg", "live_images"],
},
"default": [], "default": [],
}, },
"image_name_format": { "image_name_format": {
@ -899,6 +859,11 @@ def make_schema():
}, },
"restricted_volid": {"type": "boolean", "default": False}, "restricted_volid": {"type": "boolean", "default": False},
"volume_id_substitutions": {"type": "object", "default": {}}, "volume_id_substitutions": {"type": "object", "default": {}},
"live_images_no_rename": {"type": "boolean", "default": False},
"live_images_ksurl": {"type": "url"},
"live_images_target": {"type": "string"},
"live_images_release": {"$ref": "#/definitions/optional_string"},
"live_images_version": {"type": "string"},
"image_build_ksurl": {"type": "url"}, "image_build_ksurl": {"type": "url"},
"image_build_target": {"type": "string"}, "image_build_target": {"type": "string"},
"image_build_release": {"$ref": "#/definitions/optional_string"}, "image_build_release": {"$ref": "#/definitions/optional_string"},
@ -931,6 +896,8 @@ def make_schema():
"product_id": {"$ref": "#/definitions/str_or_scm_dict"}, "product_id": {"$ref": "#/definitions/str_or_scm_dict"},
"product_id_allow_missing": {"type": "boolean", "default": False}, "product_id_allow_missing": {"type": "boolean", "default": False},
"product_id_allow_name_prefix": {"type": "boolean", "default": True}, "product_id_allow_name_prefix": {"type": "boolean", "default": True},
# Deprecated in favour of regular local/phase/global setting.
"live_target": {"type": "string"},
"tree_arches": {"$ref": "#/definitions/list_of_strings", "default": []}, "tree_arches": {"$ref": "#/definitions/list_of_strings", "default": []},
"tree_variants": {"$ref": "#/definitions/list_of_strings", "default": []}, "tree_variants": {"$ref": "#/definitions/list_of_strings", "default": []},
"translate_paths": {"$ref": "#/definitions/string_pairs", "default": []}, "translate_paths": {"$ref": "#/definitions/string_pairs", "default": []},
@ -1009,7 +976,6 @@ def make_schema():
"arches": {"$ref": "#/definitions/list_of_strings"}, "arches": {"$ref": "#/definitions/list_of_strings"},
"failable": {"$ref": "#/definitions/list_of_strings"}, "failable": {"$ref": "#/definitions/list_of_strings"},
"release": {"$ref": "#/definitions/optional_string"}, "release": {"$ref": "#/definitions/optional_string"},
"nomacboot": {"type": "boolean"},
}, },
"required": ["name", "kickstart"], "required": ["name", "kickstart"],
"additionalProperties": False, "additionalProperties": False,
@ -1043,18 +1009,15 @@ def make_schema():
}, },
"update_summary": {"type": "boolean"}, "update_summary": {"type": "boolean"},
"force_new_commit": {"type": "boolean"}, "force_new_commit": {"type": "boolean"},
"unified_core": {"type": "boolean"},
"version": {"type": "string"}, "version": {"type": "string"},
"config_branch": {"type": "string"}, "config_branch": {"type": "string"},
"tag_ref": {"type": "boolean"}, "tag_ref": {"type": "boolean"},
"ostree_ref": {"type": "string"}, "ostree_ref": {"type": "string"},
"runroot_packages": {
"$ref": "#/definitions/list_of_strings",
},
}, },
"required": [ "required": [
"treefile", "treefile",
"config_url", "config_url",
"repo",
"ostree_repo", "ostree_repo",
], ],
"additionalProperties": False, "additionalProperties": False,
@ -1080,7 +1043,6 @@ def make_schema():
"failable": {"$ref": "#/definitions/list_of_strings"}, "failable": {"$ref": "#/definitions/list_of_strings"},
"update_summary": {"type": "boolean"}, "update_summary": {"type": "boolean"},
"force_new_commit": {"type": "boolean"}, "force_new_commit": {"type": "boolean"},
"unified_core": {"type": "boolean"},
"version": {"type": "string"}, "version": {"type": "string"},
"config_branch": {"type": "string"}, "config_branch": {"type": "string"},
"tag_ref": {"type": "boolean"}, "tag_ref": {"type": "boolean"},
@ -1092,39 +1054,6 @@ def make_schema():
), ),
] ]
}, },
"ostree_container": {
"type": "object",
"patternProperties": {
# Warning: this pattern is a variant uid regex, but the
# format does not let us validate it as there is no regular
# expression to describe all regular expressions.
".+": _one_or_list(
{
"type": "object",
"properties": {
"treefile": {"type": "string"},
"config_url": {"type": "string"},
"repo": {"$ref": "#/definitions/repos"},
"keep_original_sources": {"type": "boolean"},
"config_branch": {"type": "string"},
"arches": {"$ref": "#/definitions/list_of_strings"},
"failable": {"$ref": "#/definitions/list_of_strings"},
"version": {"type": "string"},
"tag_ref": {"type": "boolean"},
"runroot_packages": {
"$ref": "#/definitions/list_of_strings",
},
},
"required": [
"treefile",
"config_url",
],
"additionalProperties": False,
}
),
},
"additionalProperties": False,
},
"ostree_installer": _variant_arch_mapping( "ostree_installer": _variant_arch_mapping(
{ {
"type": "object", "type": "object",
@ -1143,16 +1072,16 @@ def make_schema():
"template_repo": {"type": "string"}, "template_repo": {"type": "string"},
"template_branch": {"type": "string"}, "template_branch": {"type": "string"},
"extra_runroot_pkgs": {"$ref": "#/definitions/list_of_strings"}, "extra_runroot_pkgs": {"$ref": "#/definitions/list_of_strings"},
"skip_branding": {"type": "boolean"},
}, },
"additionalProperties": False, "additionalProperties": False,
} }
), ),
"ostree_use_koji_plugin": {"type": "boolean", "default": False}, "ostree_use_koji_plugin": {"type": "boolean", "default": False},
"ostree_container_use_koji_plugin": {"type": "boolean", "default": False},
"ostree_installer_use_koji_plugin": {"type": "boolean", "default": False}, "ostree_installer_use_koji_plugin": {"type": "boolean", "default": False},
"ostree_installer_overwrite": {"type": "boolean", "default": False}, "ostree_installer_overwrite": {"type": "boolean", "default": False},
"image_build_allow_reuse": {"type": "boolean", "default": False}, "live_images": _variant_arch_mapping(
_one_or_list({"$ref": "#/definitions/live_image_config"})
),
"image_build": { "image_build": {
"type": "object", "type": "object",
"patternProperties": { "patternProperties": {
@ -1202,50 +1131,6 @@ def make_schema():
}, },
"additionalProperties": False, "additionalProperties": False,
}, },
"kiwibuild": {
"type": "object",
"patternProperties": {
# Warning: this pattern is a variant uid regex, but the
# format does not let us validate it as there is no regular
# expression to describe all regular expressions.
".+": {
"type": "array",
"items": {
"type": "object",
"properties": {
"target": {"type": "string"},
"description_scm": {"type": "url"},
"description_path": {"type": "string"},
"kiwi_profile": {"type": "string"},
"release": {"type": "string"},
"arches": {"$ref": "#/definitions/list_of_strings"},
"repos": {"$ref": "#/definitions/list_of_strings"},
"failable": {"$ref": "#/definitions/list_of_strings"},
"subvariant": {"type": "string"},
"type": {"type": "string"},
"type_attr": {"$ref": "#/definitions/list_of_strings"},
"bundle_name_format": {"type": "string"},
},
"required": [
# description_scm and description_path
# are really required, but as they can
# be set at the phase level we cannot
# enforce that here
"kiwi_profile",
],
"additionalProperties": False,
},
}
},
"additionalProperties": False,
},
"kiwibuild_description_scm": {"type": "url"},
"kiwibuild_description_path": {"type": "string"},
"kiwibuild_target": {"type": "string"},
"kiwibuild_release": {"$ref": "#/definitions/optional_string"},
"kiwibuild_type": {"type": "string"},
"kiwibuild_type_attr": {"$ref": "#/definitions/list_of_strings"},
"kiwibuild_bundle_name_format": {"type": "string"},
"osbuild_target": {"type": "string"}, "osbuild_target": {"type": "string"},
"osbuild_release": {"$ref": "#/definitions/optional_string"}, "osbuild_release": {"$ref": "#/definitions/optional_string"},
"osbuild_version": {"type": "string"}, "osbuild_version": {"type": "string"},
@ -1264,135 +1149,12 @@ def make_schema():
"version": {"type": "string"}, "version": {"type": "string"},
"distro": {"type": "string"}, "distro": {"type": "string"},
"target": {"type": "string"}, "target": {"type": "string"},
# Only a single image_type can be specified "image_types": {"$ref": "#/definitions/strings"},
# https://github.com/osbuild/koji-osbuild/commit/c7252650814f82281ee57b598cb2ad970b580451
# https://github.com/osbuild/koji-osbuild/commit/f21a2de39b145eb94f3d49cb4d8775a33ba56752
"image_types": {
"oneOf": [
{
"type": "array",
"items": {"type": "string"},
"description": "Deprecated variant",
"minItems": 1,
"maxItems": 1,
},
{"type": "string"},
]
},
"arches": {"$ref": "#/definitions/list_of_strings"}, "arches": {"$ref": "#/definitions/list_of_strings"},
"release": {"type": "string"}, "release": {"type": "string"},
"repo": { "repo": {"$ref": "#/definitions/list_of_strings"},
"type": "array",
"items": {
"oneOf": [
{
"type": "object",
"additionalProperties": False,
"required": ["baseurl"],
"properties": {
"baseurl": {"type": "string"},
"package_sets": {
"type": "array",
"items": {"type": "string"},
},
},
},
{"type": "string"},
]
},
},
"failable": {"$ref": "#/definitions/list_of_strings"}, "failable": {"$ref": "#/definitions/list_of_strings"},
"subvariant": {"type": "string"}, "subvariant": {"type": "string"},
"ostree_url": {"type": "string"},
"ostree_ref": {"type": "string"},
"ostree_parent": {"type": "string"},
"manifest_type": {"type": "string"},
"customizations": {
"type": "object",
"additionalProperties": True,
},
"upload_options": {
# this should be really 'oneOf', but the minimal
# required properties in AWSEC2 and GCP options
# overlap.
"anyOf": [
# AWSEC2UploadOptions
{
"type": "object",
"additionalProperties": False,
"required": [
"region",
"share_with_accounts",
],
"properties": {
"region": {
"type": "string",
},
"snapshot_name": {
"type": "string",
},
"share_with_accounts": {
"type": "array",
"items": {"type": "string"},
},
},
},
# AWSS3UploadOptions
{
"type": "object",
"additionalProperties": False,
"required": ["region"],
"properties": {
"region": {"type": "string"}
},
},
# AzureUploadOptions
{
"type": "object",
"additionalProperties": False,
"required": [
"tenant_id",
"subscription_id",
"resource_group",
],
"properties": {
"tenant_id": {"type": "string"},
"subscription_id": {"type": "string"},
"resource_group": {"type": "string"},
"location": {"type": "string"},
"image_name": {
"type": "string",
},
},
},
# GCPUploadOptions
{
"type": "object",
"additionalProperties": False,
"required": ["region"],
"properties": {
"region": {"type": "string"},
"bucket": {"type": "string"},
"image_name": {
"type": "string",
},
"share_with_accounts": {
"type": "array",
"items": {"type": "string"},
},
},
},
# ContainerUploadOptions
{
"type": "object",
"additionalProperties": False,
"properties": {
"name": {"type": "string"},
"tag": {"type": "string"},
},
},
]
},
}, },
"required": ["name", "distro", "image_types"], "required": ["name", "distro", "image_types"],
"additionalProperties": False, "additionalProperties": False,
@ -1427,6 +1189,9 @@ def make_schema():
{"$ref": "#/definitions/strings"} {"$ref": "#/definitions/strings"}
), ),
"lorax_use_koji_plugin": {"type": "boolean", "default": False}, "lorax_use_koji_plugin": {"type": "boolean", "default": False},
"signing_key_id": {"type": "string"},
"signing_key_password_file": {"type": "string"},
"signing_command": {"type": "string"},
"productimg": { "productimg": {
"deprecated": "remove it. Productimg phase has been removed" "deprecated": "remove it. Productimg phase has been removed"
}, },
@ -1438,7 +1203,6 @@ def make_schema():
"anyOf": [{"type": "string"}, {"type": "number"}], "anyOf": [{"type": "string"}, {"type": "number"}],
"default": 10 * 1024 * 1024, "default": 10 * 1024 * 1024,
}, },
"osbs_allow_reuse": {"type": "boolean", "default": False},
"osbs": { "osbs": {
"type": "object", "type": "object",
"patternProperties": { "patternProperties": {
@ -1457,26 +1221,6 @@ def make_schema():
}, },
"additionalProperties": False, "additionalProperties": False,
}, },
"image_container": {
"type": "object",
"patternProperties": {
".+": _one_or_list(
{
"type": "object",
"properties": {
"url": {"type": "url"},
"target": {"type": "string"},
"priority": {"type": "number"},
"failable": {"type": "boolean"},
"git_branch": {"type": "string"},
"image_spec": {"type": "object"},
},
"required": ["url", "target", "git_branch", "image_spec"],
}
),
},
"additionalProperties": False,
},
"extra_files": _variant_arch_mapping( "extra_files": _variant_arch_mapping(
{ {
"type": "array", "type": "array",
@ -1561,6 +1305,7 @@ def get_num_cpus():
CONFIG_DEPS = { CONFIG_DEPS = {
"buildinstall_method": { "buildinstall_method": {
"conflicts": ( "conflicts": (
(lambda val: val == "buildinstall", ["lorax_options"]),
(lambda val: not val, ["lorax_options", "buildinstall_kickstart"]), (lambda val: not val, ["lorax_options", "buildinstall_kickstart"]),
), ),
}, },
@ -1580,7 +1325,6 @@ CONFIG_DEPS = {
"requires": ((lambda x: x, ["base_product_name", "base_product_short"]),), "requires": ((lambda x: x, ["base_product_name", "base_product_short"]),),
"conflicts": ((lambda x: not x, ["base_product_name", "base_product_short"]),), "conflicts": ((lambda x: not x, ["base_product_name", "base_product_short"]),),
}, },
"cts_url": {"requires": ((lambda x: x, ["translate_paths"]),)},
"product_id": {"conflicts": [(lambda x: not x, ["product_id_allow_missing"])]}, "product_id": {"conflicts": [(lambda x: not x, ["product_id_allow_missing"])]},
"pkgset_scratch_modules": {"requires": ((lambda x: x, ["mbs_api_url"]),)}, "pkgset_scratch_modules": {"requires": ((lambda x: x, ["mbs_api_url"]),)},
"pkgset_source": { "pkgset_source": {

View File

@ -17,7 +17,6 @@
__all__ = ("Compose",) __all__ = ("Compose",)
import contextlib
import errno import errno
import logging import logging
import os import os
@ -25,12 +24,8 @@ import time
import tempfile import tempfile
import shutil import shutil
import json import json
import socket
import kobo.log import kobo.log
import kobo.tback
import requests
from requests.exceptions import RequestException
from productmd.composeinfo import ComposeInfo from productmd.composeinfo import ComposeInfo
from productmd.images import Images from productmd.images import Images
from dogpile.cache import make_region from dogpile.cache import make_region
@ -39,15 +34,12 @@ from dogpile.cache import make_region
from pungi.graph import SimpleAcyclicOrientedGraph from pungi.graph import SimpleAcyclicOrientedGraph
from pungi.wrappers.variants import VariantsXmlParser from pungi.wrappers.variants import VariantsXmlParser
from pungi.paths import Paths from pungi.paths import Paths
from pungi.wrappers.kojiwrapper import KojiDownloadProxy
from pungi.wrappers.scm import get_file_from_scm from pungi.wrappers.scm import get_file_from_scm
from pungi.util import ( from pungi.util import (
makedirs, makedirs,
get_arch_variant_data, get_arch_variant_data,
get_format_substs, get_format_substs,
get_variant_data, get_variant_data,
retry,
translate_path_raw,
) )
from pungi.metadata import compose_to_composeinfo from pungi.metadata import compose_to_composeinfo
@ -59,101 +51,6 @@ except ImportError:
SUPPORTED_MILESTONES = ["RC", "Update", "SecurityFix"] SUPPORTED_MILESTONES = ["RC", "Update", "SecurityFix"]
def is_status_fatal(status_code):
"""Check if status code returned from CTS reports an error that is unlikely
to be fixed by retrying. Generally client errors (4XX) are fatal, with the
exception of 401 Unauthorized which could be caused by transient network
issue between compose host and KDC.
"""
if status_code == 401:
return False
return status_code >= 400 and status_code < 500
@retry(wait_on=RequestException)
def retry_request(method, url, data=None, json_data=None, auth=None):
"""
:param str method: Reqest method.
:param str url: Target URL.
:param dict data: form-urlencoded data to send in the body of the request.
:param dict json_data: json data to send in the body of the request.
"""
request_method = getattr(requests, method)
rv = request_method(url, data=data, json=json_data, auth=auth)
if is_status_fatal(rv.status_code):
try:
error = rv.json()
except ValueError:
error = rv.text
raise RuntimeError("%s responded with %d: %s" % (url, rv.status_code, error))
rv.raise_for_status()
return rv
class BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers["authorization"] = "Bearer " + self.token
return r
@contextlib.contextmanager
def cts_auth(pungi_conf):
"""
:param dict pungi_conf: dict obj of pungi.json config.
"""
auth = None
token = None
cts_keytab = pungi_conf.get("cts_keytab")
cts_oidc_token_url = os.environ.get("CTS_OIDC_TOKEN_URL", "") or pungi_conf.get(
"cts_oidc_token_url"
)
try:
if cts_keytab:
# requests-kerberos cannot accept custom keytab, we need to use
# environment variable for this. But we need to change environment
# only temporarily just for this single requests.post.
# So at first backup the current environment and revert to it
# after the requests call.
from requests_kerberos import HTTPKerberosAuth
auth = HTTPKerberosAuth()
environ_copy = dict(os.environ)
if "$HOSTNAME" in cts_keytab:
cts_keytab = cts_keytab.replace("$HOSTNAME", socket.gethostname())
os.environ["KRB5_CLIENT_KTNAME"] = cts_keytab
os.environ["KRB5CCNAME"] = "DIR:%s" % tempfile.mkdtemp()
elif cts_oidc_token_url:
cts_oidc_client_id = os.environ.get(
"CTS_OIDC_CLIENT_ID", ""
) or pungi_conf.get("cts_oidc_client_id", "")
token = retry_request(
"post",
cts_oidc_token_url,
data={
"grant_type": "client_credentials",
"client_id": cts_oidc_client_id,
"client_secret": os.environ.get("CTS_OIDC_CLIENT_SECRET", ""),
},
).json()["access_token"]
auth = BearerAuth(token)
del token
yield auth
except Exception as e:
# Avoid leaking client secret in trackback
e.show_locals = False
raise e
finally:
if cts_keytab:
shutil.rmtree(os.environ["KRB5CCNAME"].split(":", 1)[1])
os.environ.clear()
os.environ.update(environ_copy)
def get_compose_info( def get_compose_info(
conf, conf,
compose_type="production", compose_type="production",
@ -183,25 +80,47 @@ def get_compose_info(
ci.compose.type = compose_type ci.compose.type = compose_type
ci.compose.date = compose_date or time.strftime("%Y%m%d", time.localtime()) ci.compose.date = compose_date or time.strftime("%Y%m%d", time.localtime())
ci.compose.respin = compose_respin or 0 ci.compose.respin = compose_respin or 0
ci.compose.id = ci.create_compose_id()
cts_url = conf.get("cts_url") cts_url = conf.get("cts_url", None)
if cts_url: if cts_url:
# Import requests and requests-kerberos here so it is not needed
# if running without Compose Tracking Service.
import requests
from requests_kerberos import HTTPKerberosAuth
# Requests-kerberos cannot accept custom keytab, we need to use
# environment variable for this. But we need to change environment
# only temporarily just for this single requests.post.
# So at first backup the current environment and revert to it
# after the requests.post call.
cts_keytab = conf.get("cts_keytab", None)
if cts_keytab:
environ_copy = dict(os.environ)
os.environ["KRB5_CLIENT_KTNAME"] = cts_keytab
try:
# Create compose in CTS and get the reserved compose ID. # Create compose in CTS and get the reserved compose ID.
ci.compose.id = ci.create_compose_id()
url = os.path.join(cts_url, "api/1/composes/") url = os.path.join(cts_url, "api/1/composes/")
data = { data = {
"compose_info": json.loads(ci.dumps()), "compose_info": json.loads(ci.dumps()),
"parent_compose_ids": parent_compose_ids, "parent_compose_ids": parent_compose_ids,
"respin_of": respin_of, "respin_of": respin_of,
} }
with cts_auth(conf) as authentication: rv = requests.post(url, json=data, auth=HTTPKerberosAuth())
rv = retry_request("post", url, json_data=data, auth=authentication) rv.raise_for_status()
finally:
if cts_keytab:
os.environ.clear()
os.environ.update(environ_copy)
# Update local ComposeInfo with received ComposeInfo. # Update local ComposeInfo with received ComposeInfo.
cts_ci = ComposeInfo() cts_ci = ComposeInfo()
cts_ci.loads(rv.text) cts_ci.loads(rv.text)
ci.compose.respin = cts_ci.compose.respin ci.compose.respin = cts_ci.compose.respin
ci.compose.id = cts_ci.compose.id ci.compose.id = cts_ci.compose.id
else:
ci.compose.id = ci.create_compose_id()
return ci return ci
@ -218,23 +137,6 @@ def write_compose_info(compose_dir, ci):
ci.dump(os.path.join(work_dir, "composeinfo-base.json")) ci.dump(os.path.join(work_dir, "composeinfo-base.json"))
def update_compose_url(compose_id, compose_dir, conf):
cts_url = conf.get("cts_url", None)
if cts_url:
url = os.path.join(cts_url, "api/1/composes", compose_id)
tp = conf.get("translate_paths", None)
compose_url = translate_path_raw(tp, compose_dir)
if compose_url == compose_dir:
# We do not have a URL, do not attempt the update.
return
data = {
"action": "set_url",
"compose_url": compose_url,
}
with cts_auth(conf) as authentication:
return retry_request("patch", url, json_data=data, auth=authentication)
def get_compose_dir( def get_compose_dir(
topdir, topdir,
conf, conf,
@ -243,19 +145,11 @@ def get_compose_dir(
compose_respin=None, compose_respin=None,
compose_label=None, compose_label=None,
already_exists_callbacks=None, already_exists_callbacks=None,
parent_compose_ids=None,
respin_of=None,
): ):
already_exists_callbacks = already_exists_callbacks or [] already_exists_callbacks = already_exists_callbacks or []
ci = get_compose_info( ci = get_compose_info(
conf, conf, compose_type, compose_date, compose_respin, compose_label
compose_type,
compose_date,
compose_respin,
compose_label,
parent_compose_ids,
respin_of,
) )
cts_url = conf.get("cts_url", None) cts_url = conf.get("cts_url", None)
@ -328,8 +222,6 @@ class Compose(kobo.log.LoggingBase):
self.koji_event = koji_event or conf.get("koji_event") self.koji_event = koji_event or conf.get("koji_event")
self.notifier = notifier self.notifier = notifier
self._old_config = None
# path definitions # path definitions
self.paths = Paths(self) self.paths = Paths(self)
@ -392,8 +284,6 @@ class Compose(kobo.log.LoggingBase):
self.im.compose.respin = self.compose_respin self.im.compose.respin = self.compose_respin
self.im.metadata_path = self.paths.compose.metadata() self.im.metadata_path = self.paths.compose.metadata()
self.containers_metadata = {}
# Stores list of deliverables that failed, but did not abort the # Stores list of deliverables that failed, but did not abort the
# compose. # compose.
# {deliverable: [(Variant.uid, arch, subvariant)]} # {deliverable: [(Variant.uid, arch, subvariant)]}
@ -410,12 +300,9 @@ class Compose(kobo.log.LoggingBase):
else: else:
self.cache_region = make_region().configure("dogpile.cache.null") self.cache_region = make_region().configure("dogpile.cache.null")
self.koji_downloader = KojiDownloadProxy.from_config(self.conf, self._logger)
get_compose_info = staticmethod(get_compose_info) get_compose_info = staticmethod(get_compose_info)
write_compose_info = staticmethod(write_compose_info) write_compose_info = staticmethod(write_compose_info)
get_compose_dir = staticmethod(get_compose_dir) get_compose_dir = staticmethod(get_compose_dir)
update_compose_url = staticmethod(update_compose_url)
def __getitem__(self, name): def __getitem__(self, name):
return self.variants[name] return self.variants[name]
@ -456,10 +343,6 @@ class Compose(kobo.log.LoggingBase):
def has_module_defaults(self): def has_module_defaults(self):
return bool(self.conf.get("module_defaults_dir", False)) return bool(self.conf.get("module_defaults_dir", False))
@property
def has_module_obsoletes(self):
return bool(self.conf.get("module_obsoletes_dir", False))
@property @property
def config_dir(self): def config_dir(self):
return os.path.dirname(self.conf._open_file or "") return os.path.dirname(self.conf._open_file or "")
@ -487,7 +370,7 @@ class Compose(kobo.log.LoggingBase):
) )
else: else:
file_name = os.path.basename(scm_dict) file_name = os.path.basename(scm_dict)
scm_dict = os.path.join(self.config_dir, scm_dict) scm_dict = os.path.join(self.config_dir, os.path.basename(scm_dict))
self.log_debug("Writing variants file: %s", variants_file) self.log_debug("Writing variants file: %s", variants_file)
tmp_dir = self.mkdtemp(prefix="variants_file_") tmp_dir = self.mkdtemp(prefix="variants_file_")
@ -690,54 +573,7 @@ class Compose(kobo.log.LoggingBase):
<compose_topdir>/work/{global,<arch>}/tmp[-<variant>]/ <compose_topdir>/work/{global,<arch>}/tmp[-<variant>]/
""" """
path = os.path.join(self.paths.work.tmp_dir(arch=arch, variant=variant)) path = os.path.join(self.paths.work.tmp_dir(arch=arch, variant=variant))
tmpdir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=path) return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=path)
os.chmod(tmpdir, 0o755)
return tmpdir
def dump_containers_metadata(self):
"""Create a file with container metadata if there are any containers."""
if not self.containers_metadata:
return
with open(self.paths.compose.metadata("osbs.json"), "w") as f:
json.dump(
self.containers_metadata,
f,
indent=4,
sort_keys=True,
separators=(",", ": "),
)
def traceback(self, detail=None, show_locals=True):
"""Store an extended traceback. This method should only be called when
handling an exception.
:param str detail: Extra information appended to the filename
"""
basename = "traceback"
if detail:
basename += "-" + detail
tb_path = self.paths.log.log_file("global", basename)
self.log_error("Extended traceback in: %s", tb_path)
tback = kobo.tback.Traceback(show_locals=show_locals).get_traceback()
# Kobo 0.36.0 returns traceback as str, older versions return bytes
with open(tb_path, "wb" if isinstance(tback, bytes) else "w") as f:
f.write(tback)
def load_old_compose_config(self):
"""
Helper method to load Pungi config dump from old compose.
"""
if not self._old_config:
config_dump_full = self.paths.log.log_file("global", "config-dump")
config_dump_full = self.paths.old_compose_path(config_dump_full)
if not config_dump_full:
return None
self.log_info("Loading old config file: %s", config_dump_full)
with open(config_dump_full, "r") as f:
self._old_config = json.load(f)
return self._old_config
def get_ordered_variant_uids(compose): def get_ordered_variant_uids(compose):

View File

@ -5,20 +5,16 @@ from __future__ import print_function
import os import os
import six import six
from collections import namedtuple from collections import namedtuple
from kobo.shortcuts import run
from six.moves import shlex_quote from six.moves import shlex_quote
from .wrappers import iso from .wrappers import iso
from .wrappers.jigdo import JigdoWrapper from .wrappers.jigdo import JigdoWrapper
from .phases.buildinstall import BOOT_CONFIGS, BOOT_IMAGES
CreateIsoOpts = namedtuple( CreateIsoOpts = namedtuple(
"CreateIsoOpts", "CreateIsoOpts",
[ [
"buildinstall_method", "buildinstall_method",
"boot_iso",
"arch", "arch",
"output_dir", "output_dir",
"jigdo_dir", "jigdo_dir",
@ -29,8 +25,6 @@ CreateIsoOpts = namedtuple(
"os_tree", "os_tree",
"hfs_compat", "hfs_compat",
"use_xorrisofs", "use_xorrisofs",
"iso_level",
"script_dir",
], ],
) )
CreateIsoOpts.__new__.__defaults__ = (None,) * len(CreateIsoOpts._fields) CreateIsoOpts.__new__.__defaults__ = (None,) * len(CreateIsoOpts._fields)
@ -67,6 +61,10 @@ def make_image(f, opts):
os.path.join("$TEMPLATE", "config_files/ppc"), os.path.join("$TEMPLATE", "config_files/ppc"),
hfs_compat=opts.hfs_compat, hfs_compat=opts.hfs_compat,
) )
elif opts.buildinstall_method == "buildinstall":
mkisofs_kwargs["boot_args"] = iso.get_boot_options(
opts.arch, "/usr/lib/anaconda-runtime/boot"
)
# ppc(64) doesn't seem to support utf-8 # ppc(64) doesn't seem to support utf-8
if opts.arch in ("ppc", "ppc64", "ppc64le"): if opts.arch in ("ppc", "ppc64", "ppc64le"):
@ -78,8 +76,6 @@ def make_image(f, opts):
volid=opts.volid, volid=opts.volid,
exclude=["./lost+found"], exclude=["./lost+found"],
graft_points=opts.graft_points, graft_points=opts.graft_points,
use_xorrisofs=opts.use_xorrisofs,
iso_level=opts.iso_level,
**mkisofs_kwargs **mkisofs_kwargs
) )
emit(f, cmd) emit(f, cmd)
@ -101,7 +97,7 @@ def run_isohybrid(f, opts):
def make_manifest(f, opts): def make_manifest(f, opts):
emit(f, iso.get_manifest_cmd(opts.iso_name, opts.use_xorrisofs)) emit(f, iso.get_manifest_cmd(opts.iso_name))
def make_jigdo(f, opts): def make_jigdo(f, opts):
@ -117,69 +113,6 @@ def make_jigdo(f, opts):
emit(f, cmd) emit(f, cmd)
def _get_perms(fs_path):
"""Compute proper permissions for a file.
This mimicks what -rational-rock option of genisoimage does. All read bits
are set, so that files and directories are globally readable. If any
execute bit is set for a file, set them all. No writes are allowed and
special bits are erased too.
"""
statinfo = os.stat(fs_path)
perms = 0o444
if statinfo.st_mode & 0o111:
perms |= 0o111
return perms
def write_xorriso_commands(opts):
# Create manifest for the boot.iso listing all contents
boot_iso_manifest = "%s.manifest" % os.path.join(
opts.script_dir, os.path.basename(opts.boot_iso)
)
run(
iso.get_manifest_cmd(
opts.boot_iso, opts.use_xorrisofs, output_file=boot_iso_manifest
)
)
# Find which files may have been updated by pungi. This only includes a few
# files from tweaking buildinstall and .discinfo metadata. There's no good
# way to detect whether the boot config files actually changed, so we may
# be updating files in the ISO with the same data.
UPDATEABLE_FILES = set(BOOT_IMAGES + BOOT_CONFIGS + [".discinfo"])
updated_files = set()
excluded_files = set()
with open(boot_iso_manifest) as f:
for line in f:
path = line.lstrip("/").rstrip("\n")
if path in UPDATEABLE_FILES:
updated_files.add(path)
else:
excluded_files.add(path)
script = os.path.join(opts.script_dir, "xorriso-%s.txt" % id(opts))
with open(script, "w") as f:
for cmd in iso.xorriso_commands(
opts.arch, opts.boot_iso, os.path.join(opts.output_dir, opts.iso_name)
):
emit(f, " ".join(cmd))
emit(f, "-volid %s" % opts.volid)
with open(opts.graft_points) as gp:
for line in gp:
iso_path, fs_path = line.strip().split("=", 1)
if iso_path in excluded_files:
continue
cmd = "-update" if iso_path in updated_files else "-map"
emit(f, "%s %s %s" % (cmd, fs_path, iso_path))
emit(f, "-chmod 0%o %s" % (_get_perms(fs_path), iso_path))
emit(f, "-chown_r 0 /")
emit(f, "-chgrp_r 0 /")
emit(f, "-end")
return script
def write_script(opts, f): def write_script(opts, f):
if bool(opts.jigdo_dir) != bool(opts.os_tree): if bool(opts.jigdo_dir) != bool(opts.os_tree):
raise RuntimeError("jigdo_dir must be used together with os_tree") raise RuntimeError("jigdo_dir must be used together with os_tree")
@ -187,14 +120,8 @@ def write_script(opts, f):
emit(f, "#!/bin/bash") emit(f, "#!/bin/bash")
emit(f, "set -ex") emit(f, "set -ex")
emit(f, "cd %s" % opts.output_dir) emit(f, "cd %s" % opts.output_dir)
if opts.use_xorrisofs and opts.buildinstall_method:
script = write_xorriso_commands(opts)
emit(f, "xorriso -dialog on <%s" % script)
else:
make_image(f, opts) make_image(f, opts)
run_isohybrid(f, opts) run_isohybrid(f, opts)
implant_md5(f, opts) implant_md5(f, opts)
make_manifest(f, opts) make_manifest(f, opts)
if opts.jigdo_dir: if opts.jigdo_dir:

View File

@ -1,20 +0,0 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
class UnsignedPackagesError(RuntimeError):
"""Raised when package set fails to find a properly signed copy of an
RPM."""
pass

View File

@ -35,7 +35,7 @@ from pungi.wrappers.createrepo import CreaterepoWrapper
class ReentrantYumLock(object): class ReentrantYumLock(object):
"""A lock that can be acquired multiple times by the same process.""" """ A lock that can be acquired multiple times by the same process. """
def __init__(self, lock, log): def __init__(self, lock, log):
self.lock = lock self.lock = lock
@ -60,7 +60,7 @@ class ReentrantYumLock(object):
def yumlocked(method): def yumlocked(method):
"""A locking decorator.""" """ A locking decorator. """
def wrapper(self, *args, **kwargs): def wrapper(self, *args, **kwargs):
with self.yumlock: with self.yumlock:
@ -1118,6 +1118,7 @@ class Pungi(PungiBase):
self.logger.info("Finished gathering package objects.") self.logger.info("Finished gathering package objects.")
def gather(self): def gather(self):
# get package objects according to the input list # get package objects according to the input list
self.getPackageObjects() self.getPackageObjects()
if self.is_sources: if self.is_sources:

View File

@ -15,21 +15,17 @@
from enum import Enum from enum import Enum
from functools import cmp_to_key from itertools import count
from itertools import count, groupby
import errno
import logging import logging
import os import os
import re import re
from kobo.rpmlib import parse_nvra from kobo.rpmlib import parse_nvra
import rpm
import pungi.common import pungi.common
import pungi.dnf_wrapper import pungi.dnf_wrapper
import pungi.multilib_dnf import pungi.multilib_dnf
import pungi.util import pungi.util
from pungi import arch_utils
from pungi.linker import Linker from pungi.linker import Linker
from pungi.profiler import Profiler from pungi.profiler import Profiler
from pungi.util import DEBUG_PATTERNS from pungi.util import DEBUG_PATTERNS
@ -40,20 +36,6 @@ def get_source_name(pkg):
return pkg.sourcerpm.rsplit("-", 2)[0] return pkg.sourcerpm.rsplit("-", 2)[0]
def filter_dotarch(queue, pattern, **kwargs):
"""Filter queue for packages matching the pattern. If pattern matches the
dotarch format of <name>.<arch>, it is processed as such. Otherwise it is
treated as just a name.
"""
kwargs["name__glob"] = pattern
if "." in pattern:
name, arch = pattern.split(".", 1)
if arch in arch_utils.arches or arch == "noarch":
kwargs["name__glob"] = name
kwargs["arch"] = arch
return queue.filter(**kwargs).apply()
class GatherOptions(pungi.common.OptionsBase): class GatherOptions(pungi.common.OptionsBase):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super(GatherOptions, self).__init__() super(GatherOptions, self).__init__()
@ -263,37 +245,13 @@ class Gather(GatherBase):
# from lookaside. This can be achieved by removing any package that is # from lookaside. This can be achieved by removing any package that is
# also in lookaside from the list. # also in lookaside from the list.
lookaside_pkgs = set() lookaside_pkgs = set()
if self.opts.lookaside_repos:
# We will call `latest()` to get the highest version packages only.
# However, that is per name and architecture. If a package switches
# from arched to noarch or the other way, it is possible that the
# package_list contains different versions in main repos and in
# lookaside repos.
# We need to manually filter the latest version.
def vercmp(x, y):
return rpm.labelCompare(x[1], y[1])
# Annotate the packages with their version.
versioned_packages = [
(pkg, (str(pkg.epoch) or "0", pkg.version, pkg.release))
for pkg in package_list
]
# Sort the packages newest first.
sorted_packages = sorted(
versioned_packages, key=cmp_to_key(vercmp), reverse=True
)
# Group packages by version, take the first group and discard the
# version info from the tuple.
package_list = list(
x[0] for x in next(groupby(sorted_packages, key=lambda x: x[1]))[1]
)
# Now we can decide what is used from lookaside.
for pkg in package_list: for pkg in package_list:
if pkg.repoid in self.opts.lookaside_repos: if pkg.repoid in self.opts.lookaside_repos:
lookaside_pkgs.add("{0.name}-{0.evr}".format(pkg)) lookaside_pkgs.add("{0.name}-{0.evr}".format(pkg))
if self.opts.greedy_method == "all":
return list(package_list)
all_pkgs = [] all_pkgs = []
for pkg in package_list: for pkg in package_list:
# Remove packages that are also in lookaside # Remove packages that are also in lookaside
@ -305,22 +263,17 @@ class Gather(GatherBase):
if not debuginfo: if not debuginfo:
native_pkgs = set( native_pkgs = set(
self.q_native_binary_packages.filter(pkg=all_pkgs).latest().apply() self.q_native_binary_packages.filter(pkg=all_pkgs).apply()
) )
multilib_pkgs = set( multilib_pkgs = set(
self.q_multilib_binary_packages.filter(pkg=all_pkgs).latest().apply() self.q_multilib_binary_packages.filter(pkg=all_pkgs).apply()
) )
else: else:
native_pkgs = set( native_pkgs = set(self.q_native_debug_packages.filter(pkg=all_pkgs).apply())
self.q_native_debug_packages.filter(pkg=all_pkgs).latest().apply()
)
multilib_pkgs = set( multilib_pkgs = set(
self.q_multilib_debug_packages.filter(pkg=all_pkgs).latest().apply() self.q_multilib_debug_packages.filter(pkg=all_pkgs).apply()
) )
if self.opts.greedy_method == "all":
return list(native_pkgs | multilib_pkgs)
result = set() result = set()
# try seen native packages first # try seen native packages first
@ -439,7 +392,9 @@ class Gather(GatherBase):
"""Given an name of a queue (stored as attribute in `self`), exclude """Given an name of a queue (stored as attribute in `self`), exclude
all given packages and keep only the latest per package name and arch. all given packages and keep only the latest per package name and arch.
""" """
setattr(self, queue, getattr(self, queue).filter(pkg__neq=exclude).apply()) setattr(
self, queue, getattr(self, queue).filter(pkg__neq=exclude).latest().apply()
)
@Profiler("Gather._apply_excludes()") @Profiler("Gather._apply_excludes()")
def _apply_excludes(self, excludes): def _apply_excludes(self, excludes):
@ -465,16 +420,12 @@ class Gather(GatherBase):
name__glob=pattern[:-4], reponame__neq=self.opts.lookaside_repos name__glob=pattern[:-4], reponame__neq=self.opts.lookaside_repos
) )
elif pungi.util.pkg_is_debug(pattern): elif pungi.util.pkg_is_debug(pattern):
pkgs = filter_dotarch( pkgs = self.q_debug_packages.filter(
self.q_debug_packages, name__glob=pattern, reponame__neq=self.opts.lookaside_repos
pattern,
reponame__neq=self.opts.lookaside_repos,
) )
else: else:
pkgs = filter_dotarch( pkgs = self.q_binary_packages.filter(
self.q_binary_packages, name__glob=pattern, reponame__neq=self.opts.lookaside_repos
pattern,
reponame__neq=self.opts.lookaside_repos,
) )
exclude.update(pkgs) exclude.update(pkgs)
@ -540,19 +491,21 @@ class Gather(GatherBase):
name__glob=pattern[:-2] name__glob=pattern[:-2]
).apply() ).apply()
else: else:
pkgs = filter_dotarch(self.q_debug_packages, pattern) pkgs = self.q_debug_packages.filter(
name__glob=pattern
).apply()
else: else:
if pattern.endswith(".+"): if pattern.endswith(".+"):
pkgs = self.q_multilib_binary_packages.filter( pkgs = self.q_multilib_binary_packages.filter(
name__glob=pattern[:-2] name__glob=pattern[:-2]
).apply() ).apply()
else: else:
pkgs = filter_dotarch(self.q_binary_packages, pattern) pkgs = self.q_binary_packages.filter(
name__glob=pattern
).apply()
if not pkgs: if not pkgs:
self.logger.error( self.logger.error("No package matches pattern %s" % pattern)
"Could not find a match for %s in any configured repo", pattern
)
# The pattern could have been a glob. In that case we want to # The pattern could have been a glob. In that case we want to
# group the packages by name and get best match in those # group the packages by name and get best match in those
@ -663,6 +616,7 @@ class Gather(GatherBase):
return added return added
for pkg in self.result_debug_packages.copy(): for pkg in self.result_debug_packages.copy():
if pkg not in self.finished_add_debug_package_deps: if pkg not in self.finished_add_debug_package_deps:
deps = self._get_package_deps(pkg, debuginfo=True) deps = self._get_package_deps(pkg, debuginfo=True)
for i, req in deps: for i, req in deps:
@ -830,6 +784,7 @@ class Gather(GatherBase):
continue continue
debug_pkgs = [] debug_pkgs = []
pkg_in_lookaside = pkg.repoid in self.opts.lookaside_repos
for i in candidates: for i in candidates:
if pkg.arch != i.arch: if pkg.arch != i.arch:
continue continue
@ -837,14 +792,8 @@ class Gather(GatherBase):
# If it's not debugsource package or does not match name of # If it's not debugsource package or does not match name of
# the package, we don't want it in. # the package, we don't want it in.
continue continue
if self.is_from_lookaside(i): if i.repoid in self.opts.lookaside_repos or pkg_in_lookaside:
self._set_flag(i, PkgFlag.lookaside) self._set_flag(i, PkgFlag.lookaside)
srpm_name = i.sourcerpm.rsplit("-", 2)[0]
if srpm_name in self.opts.fulltree_excludes:
self._set_flag(i, PkgFlag.fulltree_exclude)
if PkgFlag.input in self.result_package_flags.get(srpm_name, set()):
# If src rpm is marked as input, mark debuginfo as input too
self._set_flag(i, PkgFlag.input)
if i not in self.result_debug_packages: if i not in self.result_debug_packages:
added.add(i) added.add(i)
debug_pkgs.append(i) debug_pkgs.append(i)
@ -1080,12 +1029,9 @@ class Gather(GatherBase):
# Link downloaded package in (or link package from file repo) # Link downloaded package in (or link package from file repo)
try: try:
linker.link(pkg.localPkg(), target) linker.hardlink(pkg.localPkg(), target)
except Exception as ex: except Exception:
if ex.errno == errno.EEXIST: self.logger.error("Unable to link %s from the yum cache." % pkg.name)
self.logger.warning("Downloaded package exists in %s", target)
else:
self.logger.error("Unable to link %s from the yum cache.", pkg.name)
raise raise
def log_count(self, msg, method, *args): def log_count(self, msg, method, *args):

View File

@ -54,7 +54,8 @@ class SimpleAcyclicOrientedGraph(object):
return False if node in self._graph else True return False if node in self._graph else True
def remove_final_endpoint(self, node): def remove_final_endpoint(self, node):
"""""" """
"""
remove_start_points = [] remove_start_points = []
for start, ends in self._graph.items(): for start, ends in self._graph.items():
if node in ends: if node in ends:

View File

@ -20,8 +20,8 @@ import os
SIZE_UNITS = { SIZE_UNITS = {
"b": 1, "b": 1,
"k": 1024, "k": 1024,
"M": 1024**2, "M": 1024 ** 2,
"G": 1024**3, "G": 1024 ** 3,
} }

View File

@ -306,6 +306,11 @@ def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
if variant.type in ("addon",) or variant.is_empty: if variant.type in ("addon",) or variant.is_empty:
return return
compose.log_debug(
"on arch '%s' looking at variant '%s' of type '%s'"
% (arch, variant, variant.type)
)
if not timestamp: if not timestamp:
timestamp = int(time.time()) timestamp = int(time.time())
else: else:

View File

@ -44,30 +44,6 @@ def iter_module_defaults(path):
yield module_name, index.get_module(module_name).get_defaults() yield module_name, index.get_module(module_name).get_defaults()
def get_module_obsoletes_idx(path, mod_list):
"""Given a path to a directory with yaml files, return Index with
merged all obsoletes.
"""
merger = Modulemd.ModuleIndexMerger.new()
md_idxs = []
# associate_index does NOT copy it's argument (nor increases a
# reference counter on the object). It only stores a pointer.
for file in glob.glob(os.path.join(path, "*.yaml")):
index = Modulemd.ModuleIndex()
index.update_from_file(file, strict=False)
mod_name = index.get_module_names()[0]
if mod_name and (mod_name in mod_list or not mod_list):
md_idxs.append(index)
merger.associate_index(md_idxs[-1], 0)
merged_idx = merger.resolve()
return merged_idx
def collect_module_defaults( def collect_module_defaults(
defaults_dir, modules_to_load=None, mod_index=None, overrides_dir=None defaults_dir, modules_to_load=None, mod_index=None, overrides_dir=None
): ):
@ -93,26 +69,3 @@ def collect_module_defaults(
mod_index.add_defaults(defaults) mod_index.add_defaults(defaults)
return mod_index return mod_index
def collect_module_obsoletes(obsoletes_dir, modules_to_load, mod_index=None):
"""Load module obsoletes into index.
This works in a similar fashion as collect_module_defaults except it
merges indexes together instead of adding them during iteration.
Additionally if modules_to_load is not empty returned Index will include
only obsoletes for those modules.
"""
obsoletes_index = get_module_obsoletes_idx(obsoletes_dir, modules_to_load)
# Merge Obsoletes with Modules Index.
if mod_index:
merger = Modulemd.ModuleIndexMerger.new()
merger.associate_index(mod_index, 0)
merger.associate_index(obsoletes_index, 0)
merged_idx = merger.resolve()
obsoletes_index = merged_idx
return obsoletes_index

View File

@ -81,6 +81,9 @@ class PungiNotifier(object):
self._update_args(kwargs) self._update_args(kwargs)
if self.compose:
workdir = self.compose.paths.compose.topdir()
with self.lock: with self.lock:
for cmd in self.cmds: for cmd in self.cmds:
self._run_script(cmd, msg, workdir, kwargs) self._run_script(cmd, msg, workdir, kwargs)

View File

@ -19,7 +19,6 @@ import logging
from .tree import Tree from .tree import Tree
from .installer import Installer from .installer import Installer
from .container import Container
def main(args=None): def main(args=None):
@ -66,48 +65,6 @@ def main(args=None):
action="store_true", action="store_true",
help="do not use rpm-ostree's built-in change detection", help="do not use rpm-ostree's built-in change detection",
) )
treep.add_argument(
"--unified-core",
action="store_true",
help="use unified core mode in rpm-ostree",
)
container = subparser.add_parser(
"container", help="Compose OSTree native container"
)
container.set_defaults(_class=Container, func="run")
container.add_argument(
"--name",
required=True,
help="the name of the the OCI archive (required)",
)
container.add_argument(
"--path",
required=True,
help="where to output the OCI archive (required)",
)
container.add_argument(
"--treefile",
metavar="FILE",
required=True,
help="treefile for rpm-ostree (required)",
)
container.add_argument(
"--log-dir",
metavar="DIR",
required=True,
help="where to log output (required).",
)
container.add_argument(
"--extra-config", metavar="FILE", help="JSON file contains extra configurations"
)
container.add_argument(
"-v",
"--version",
metavar="VERSION",
required=True,
help="version identifier (required)",
)
installerp = subparser.add_parser( installerp = subparser.add_parser(
"installer", help="Create an OSTree installer image" "installer", help="Create an OSTree installer image"

View File

@ -1,86 +0,0 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
import json
import six
from six.moves import shlex_quote
from .base import OSTree
from .utils import tweak_treeconf
def emit(cmd):
"""Print line of shell code into the stream."""
if isinstance(cmd, six.string_types):
print(cmd)
else:
print(" ".join([shlex_quote(x) for x in cmd]))
class Container(OSTree):
def _make_container(self):
"""Compose OSTree Container Native image"""
stamp_file = os.path.join(self.logdir, "%s.stamp" % self.name)
cmd = [
"rpm-ostree",
"compose",
"image",
# Always initialize for now
"--initialize",
# Touch the file if a new commit was created. This can help us tell
# if the commitid file is missing because no commit was created or
# because something went wrong.
"--touch-if-changed=%s" % stamp_file,
self.treefile,
]
fullpath = os.path.join(self.path, "%s.ociarchive" % self.name)
cmd.append(fullpath)
# Set the umask to be more permissive so directories get group write
# permissions. See https://pagure.io/releng/issue/8811#comment-629051
emit("umask 0002")
emit(cmd)
def run(self):
self.name = self.args.name
self.path = self.args.path
self.treefile = self.args.treefile
self.logdir = self.args.log_dir
self.extra_config = self.args.extra_config
if self.extra_config:
self.extra_config = json.load(open(self.extra_config, "r"))
repos = self.extra_config.get("repo", [])
keep_original_sources = self.extra_config.get(
"keep_original_sources", False
)
else:
# missing extra_config mustn't affect tweak_treeconf call
repos = []
keep_original_sources = True
update_dict = {"automatic-version-prefix": self.args.version}
self.treefile = tweak_treeconf(
self.treefile,
source_repos=repos,
keep_original_sources=keep_original_sources,
update_dict=update_dict,
)
self._make_container()

View File

@ -43,9 +43,6 @@ class Tree(OSTree):
# because something went wrong. # because something went wrong.
"--touch-if-changed=%s.stamp" % self.commitid_file, "--touch-if-changed=%s.stamp" % self.commitid_file,
] ]
if self.unified_core:
# See https://github.com/coreos/rpm-ostree/issues/729
cmd.append("--unified-core")
if self.version: if self.version:
# Add versioning metadata # Add versioning metadata
cmd.append("--add-metadata-string=version=%s" % self.version) cmd.append("--add-metadata-string=version=%s" % self.version)
@ -124,7 +121,6 @@ class Tree(OSTree):
self.extra_config = self.args.extra_config self.extra_config = self.args.extra_config
self.ostree_ref = self.args.ostree_ref self.ostree_ref = self.args.ostree_ref
self.force_new_commit = self.args.force_new_commit self.force_new_commit = self.args.force_new_commit
self.unified_core = self.args.unified_core
if self.extra_config or self.ostree_ref: if self.extra_config or self.ostree_ref:
if self.extra_config: if self.extra_config:

View File

@ -103,23 +103,12 @@ class LogPaths(object):
makedirs(path) makedirs(path)
return path return path
def koji_tasks_dir(self, create_dir=True): def log_file(self, arch, log_name, create_dir=True):
"""
Examples:
logs/global/koji-tasks
"""
path = os.path.join(self.topdir(create_dir=create_dir), "koji-tasks")
if create_dir:
makedirs(path)
return path
def log_file(self, arch, log_name, create_dir=True, ext=None):
ext = ext or "log"
arch = arch or "global" arch = arch or "global"
if log_name.endswith(".log"): if log_name.endswith(".log"):
log_name = log_name[:-4] log_name = log_name[:-4]
return os.path.join( return os.path.join(
self.topdir(arch, create_dir=create_dir), "%s.%s.%s" % (log_name, arch, ext) self.topdir(arch, create_dir=create_dir), "%s.%s.log" % (log_name, arch)
) )
@ -509,23 +498,10 @@ class WorkPaths(object):
makedirs(path) makedirs(path)
return path return path
def module_obsoletes_dir(self, create_dir=True):
"""
Example:
work/global/module_obsoletes
"""
path = os.path.join(self.topdir(create_dir=create_dir), "module_obsoletes")
if create_dir:
makedirs(path)
return path
def pkgset_file_cache(self, pkgset_name): def pkgset_file_cache(self, pkgset_name):
""" """
Returns the path to file in which the cached version of Returns the path to file in which the cached version of
PackageSetBase.file_cache should be stored. PackageSetBase.file_cache should be stored.
Example:
work/global/pkgset_f33-compose_file_cache.pickle
""" """
filename = "pkgset_%s_file_cache.pickle" % pkgset_name filename = "pkgset_%s_file_cache.pickle" % pkgset_name
return os.path.join(self.topdir(arch="global"), filename) return os.path.join(self.topdir(arch="global"), filename)

View File

@ -25,9 +25,8 @@ from .buildinstall import BuildinstallPhase # noqa
from .extra_files import ExtraFilesPhase # noqa from .extra_files import ExtraFilesPhase # noqa
from .createiso import CreateisoPhase # noqa from .createiso import CreateisoPhase # noqa
from .extra_isos import ExtraIsosPhase # noqa from .extra_isos import ExtraIsosPhase # noqa
from .live_images import LiveImagesPhase # noqa
from .image_build import ImageBuildPhase # noqa from .image_build import ImageBuildPhase # noqa
from .image_container import ImageContainerPhase # noqa
from .kiwibuild import KiwiBuildPhase # noqa
from .osbuild import OSBuildPhase # noqa from .osbuild import OSBuildPhase # noqa
from .repoclosure import RepoclosurePhase # noqa from .repoclosure import RepoclosurePhase # noqa
from .test import TestPhase # noqa from .test import TestPhase # noqa
@ -35,7 +34,6 @@ from .image_checksum import ImageChecksumPhase # noqa
from .livemedia_phase import LiveMediaPhase # noqa from .livemedia_phase import LiveMediaPhase # noqa
from .ostree import OSTreePhase # noqa from .ostree import OSTreePhase # noqa
from .ostree_installer import OstreeInstallerPhase # noqa from .ostree_installer import OstreeInstallerPhase # noqa
from .ostree_container import OSTreeContainerPhase # noqa
from .osbs import OSBSPhase # noqa from .osbs import OSBSPhase # noqa
from .phases_metadata import gather_phases_metadata # noqa from .phases_metadata import gather_phases_metadata # noqa

View File

@ -14,8 +14,6 @@
# along with this program; if not, see <https://gnu.org/licenses/>. # along with this program; if not, see <https://gnu.org/licenses/>.
import logging import logging
import math
import time
from pungi import util from pungi import util
@ -60,7 +58,6 @@ class PhaseBase(object):
self.compose.log_warning("[SKIP ] %s" % self.msg) self.compose.log_warning("[SKIP ] %s" % self.msg)
self.finished = True self.finished = True
return return
self._start_time = time.time()
self.compose.log_info("[BEGIN] %s" % self.msg) self.compose.log_info("[BEGIN] %s" % self.msg)
self.compose.notifier.send("phase-start", phase_name=self.name) self.compose.notifier.send("phase-start", phase_name=self.name)
self.run() self.run()
@ -111,13 +108,6 @@ class PhaseBase(object):
self.pool.stop() self.pool.stop()
self.finished = True self.finished = True
self.compose.log_info("[DONE ] %s" % self.msg) self.compose.log_info("[DONE ] %s" % self.msg)
if hasattr(self, "_start_time"):
self.compose.log_info(
"PHASE %s took %d seconds"
% (self.name.upper(), math.ceil(time.time() - self._start_time))
)
if self.used_patterns is not None: if self.used_patterns is not None:
# We only want to report this if the config was actually queried. # We only want to report this if the config was actually queried.
self.report_unused_patterns() self.report_unused_patterns()

View File

@ -31,14 +31,14 @@ from six.moves import shlex_quote
from pungi.arch import get_valid_arches from pungi.arch import get_valid_arches
from pungi.util import get_volid, get_arch_variant_data from pungi.util import get_volid, get_arch_variant_data
from pungi.util import get_file_size, get_mtime, failable, makedirs from pungi.util import get_file_size, get_mtime, failable, makedirs
from pungi.util import copy_all, translate_path from pungi.util import copy_all, translate_path, move_all
from pungi.wrappers.lorax import LoraxWrapper from pungi.wrappers.lorax import LoraxWrapper
from pungi.wrappers import iso from pungi.wrappers import iso
from pungi.wrappers.scm import get_file from pungi.wrappers.scm import get_file
from pungi.wrappers.scm import get_file_from_scm from pungi.wrappers.scm import get_file_from_scm
from pungi.wrappers import kojiwrapper from pungi.wrappers import kojiwrapper
from pungi.phases.base import PhaseBase from pungi.phases.base import PhaseBase
from pungi.runroot import Runroot, download_and_extract_archive from pungi.runroot import Runroot
class BuildinstallPhase(PhaseBase): class BuildinstallPhase(PhaseBase):
@ -50,9 +50,6 @@ class BuildinstallPhase(PhaseBase):
# A set of (variant_uid, arch) pairs that completed successfully. This # A set of (variant_uid, arch) pairs that completed successfully. This
# is needed to skip copying files for failed tasks. # is needed to skip copying files for failed tasks.
self.pool.finished_tasks = set() self.pool.finished_tasks = set()
# A set of (variant_uid, arch) pairs that were reused from previous
# compose.
self.pool.reused_tasks = set()
self.buildinstall_method = self.compose.conf.get("buildinstall_method") self.buildinstall_method = self.compose.conf.get("buildinstall_method")
self.lorax_use_koji_plugin = self.compose.conf.get("lorax_use_koji_plugin") self.lorax_use_koji_plugin = self.compose.conf.get("lorax_use_koji_plugin")
self.used_lorax = self.buildinstall_method == "lorax" self.used_lorax = self.buildinstall_method == "lorax"
@ -144,7 +141,7 @@ class BuildinstallPhase(PhaseBase):
) )
if self.compose.has_comps: if self.compose.has_comps:
comps_repo = self.compose.paths.work.comps_repo(arch, variant) comps_repo = self.compose.paths.work.comps_repo(arch, variant)
if final_output_dir != output_dir or self.lorax_use_koji_plugin: if final_output_dir != output_dir:
comps_repo = translate_path(self.compose, comps_repo) comps_repo = translate_path(self.compose, comps_repo)
repos.append(comps_repo) repos.append(comps_repo)
@ -169,6 +166,7 @@ class BuildinstallPhase(PhaseBase):
"rootfs-size": rootfs_size, "rootfs-size": rootfs_size,
"dracut-args": dracut_args, "dracut-args": dracut_args,
"skip_branding": skip_branding, "skip_branding": skip_branding,
"outputdir": output_dir,
"squashfs_only": squashfs_only, "squashfs_only": squashfs_only,
"configuration_file": configuration_file, "configuration_file": configuration_file,
} }
@ -218,6 +216,10 @@ class BuildinstallPhase(PhaseBase):
return repos return repos
def run(self): def run(self):
lorax = LoraxWrapper()
product = self.compose.conf["release_name"]
version = self.compose.conf["release_version"]
release = self.compose.conf["release_version"]
disc_type = self.compose.conf["disc_types"].get("dvd", "dvd") disc_type = self.compose.conf["disc_types"].get("dvd", "dvd")
# Prepare kickstart file for final images. # Prepare kickstart file for final images.
@ -234,7 +236,7 @@ class BuildinstallPhase(PhaseBase):
) )
makedirs(final_output_dir) makedirs(final_output_dir)
repo_baseurls = self.get_repos(arch) repo_baseurls = self.get_repos(arch)
if final_output_dir != output_dir or self.lorax_use_koji_plugin: if final_output_dir != output_dir:
repo_baseurls = [translate_path(self.compose, r) for r in repo_baseurls] repo_baseurls = [translate_path(self.compose, r) for r in repo_baseurls]
if self.buildinstall_method == "lorax": if self.buildinstall_method == "lorax":
@ -270,12 +272,29 @@ class BuildinstallPhase(PhaseBase):
), ),
) )
) )
elif self.buildinstall_method == "buildinstall":
volid = get_volid(self.compose, arch, disc_type=disc_type)
commands.append(
(
None,
lorax.get_buildinstall_cmd(
product,
version,
release,
repo_baseurls,
output_dir,
is_final=self.compose.supported,
buildarch=arch,
volid=volid,
),
)
)
else: else:
raise ValueError( raise ValueError(
"Unsupported buildinstall method: %s" % self.buildinstall_method "Unsupported buildinstall method: %s" % self.buildinstall_method
) )
for variant, cmd in commands: for (variant, cmd) in commands:
self.pool.add(BuildinstallThread(self.pool)) self.pool.add(BuildinstallThread(self.pool))
self.pool.queue_put( self.pool.queue_put(
(self.compose, arch, variant, cmd, self.pkgset_phase) (self.compose, arch, variant, cmd, self.pkgset_phase)
@ -293,18 +312,6 @@ class BuildinstallPhase(PhaseBase):
in self.pool.finished_tasks in self.pool.finished_tasks
) )
def reused(self, variant, arch):
"""
Check if buildinstall phase reused previous results for given variant
and arch. If the phase is skipped, the results will be considered
reused as well.
"""
return (
super(BuildinstallPhase, self).skip()
or (variant.uid if self.used_lorax else None, arch)
in self.pool.reused_tasks
)
def get_kickstart_file(compose): def get_kickstart_file(compose):
scm_dict = compose.conf.get("buildinstall_kickstart") scm_dict = compose.conf.get("buildinstall_kickstart")
@ -342,17 +349,9 @@ BOOT_CONFIGS = [
"EFI/BOOT/BOOTX64.conf", "EFI/BOOT/BOOTX64.conf",
"EFI/BOOT/grub.cfg", "EFI/BOOT/grub.cfg",
] ]
BOOT_IMAGES = [
"images/efiboot.img",
]
def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None): def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
"""
Put escaped volume ID and possibly kickstart file into the boot
configuration files.
:returns: list of paths to modified config files
"""
volid_escaped = volid.replace(" ", r"\x20").replace("\\", "\\\\") volid_escaped = volid.replace(" ", r"\x20").replace("\\", "\\\\")
volid_escaped_2 = volid_escaped.replace("\\", "\\\\") volid_escaped_2 = volid_escaped.replace("\\", "\\\\")
found_configs = [] found_configs = []
@ -360,6 +359,7 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
config_path = os.path.join(path, config) config_path = os.path.join(path, config)
if not os.path.exists(config_path): if not os.path.exists(config_path):
continue continue
found_configs.append(config)
with open(config_path, "r") as f: with open(config_path, "r") as f:
data = original_data = f.read() data = original_data = f.read()
@ -368,7 +368,7 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
# double-escape volid in yaboot.conf # double-escape volid in yaboot.conf
new_volid = volid_escaped_2 if "yaboot" in config else volid_escaped new_volid = volid_escaped_2 if "yaboot" in config else volid_escaped
ks = (" inst.ks=hd:LABEL=%s:/ks.cfg" % new_volid) if ks_file else "" ks = (" ks=hd:LABEL=%s:/ks.cfg" % new_volid) if ks_file else ""
# pre-f18 # pre-f18
data = re.sub(r":CDLABEL=[^ \n]*", r":CDLABEL=%s%s" % (new_volid, ks), data) data = re.sub(r":CDLABEL=[^ \n]*", r":CDLABEL=%s%s" % (new_volid, ks), data)
@ -379,12 +379,7 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
with open(config_path, "w") as f: with open(config_path, "w") as f:
f.write(data) f.write(data)
if data != original_data: if logger and data != original_data:
found_configs.append(config)
if logger:
# Generally lorax should create file with correct volume id
# already. If we don't have a kickstart, this function should
# be a no-op.
logger.info("Boot config %s changed" % config_path) logger.info("Boot config %s changed" % config_path)
return found_configs return found_configs
@ -424,8 +419,9 @@ def tweak_buildinstall(
if kickstart_file and found_configs: if kickstart_file and found_configs:
shutil.copy2(kickstart_file, os.path.join(dst, "ks.cfg")) shutil.copy2(kickstart_file, os.path.join(dst, "ks.cfg"))
images = [os.path.join(tmp_dir, img) for img in BOOT_IMAGES] images = [
if found_configs: os.path.join(tmp_dir, "images", "efiboot.img"),
]
for image in images: for image in images:
if not os.path.isfile(image): if not os.path.isfile(image):
continue continue
@ -435,9 +431,7 @@ def tweak_buildinstall(
logger=compose._logger, logger=compose._logger,
use_guestmount=compose.conf.get("buildinstall_use_guestmount"), use_guestmount=compose.conf.get("buildinstall_use_guestmount"),
) as mount_tmp_dir: ) as mount_tmp_dir:
for config in found_configs: for config in BOOT_CONFIGS:
# Put each modified config file into the image (overwriting the
# original).
config_path = os.path.join(tmp_dir, config) config_path = os.path.join(tmp_dir, config)
config_in_image = os.path.join(mount_tmp_dir, config) config_in_image = os.path.join(mount_tmp_dir, config)
@ -521,10 +515,7 @@ def link_boot_iso(compose, arch, variant, can_fail):
setattr(img, "can_fail", can_fail) setattr(img, "can_fail", can_fail)
setattr(img, "deliverable", "buildinstall") setattr(img, "deliverable", "buildinstall")
try: try:
img.volume_id = iso.get_volume_id( img.volume_id = iso.get_volume_id(new_boot_iso_path)
new_boot_iso_path,
compose.conf.get("createiso_use_xorrisofs"),
)
except RuntimeError: except RuntimeError:
pass pass
# In this phase we should add to compose only the images that # In this phase we should add to compose only the images that
@ -670,16 +661,9 @@ class BuildinstallThread(WorkerThread):
return None return None
compose.log_info("Loading old BUILDINSTALL phase metadata: %s", old_metadata) compose.log_info("Loading old BUILDINSTALL phase metadata: %s", old_metadata)
try:
with open(old_metadata, "rb") as f: with open(old_metadata, "rb") as f:
old_result = pickle.load(f) old_result = pickle.load(f)
return old_result return old_result
except Exception as e:
compose.log_debug(
"Failed to load old BUILDINSTALL phase metadata %s : %s"
% (old_metadata, str(e))
)
return None
def _reuse_old_buildinstall_result(self, compose, arch, variant, cmd, pkgset_phase): def _reuse_old_buildinstall_result(self, compose, arch, variant, cmd, pkgset_phase):
""" """
@ -719,8 +703,8 @@ class BuildinstallThread(WorkerThread):
# input on RPM level. # input on RPM level.
cmd_copy = copy(cmd) cmd_copy = copy(cmd)
for key in ["outputdir", "sources"]: for key in ["outputdir", "sources"]:
cmd_copy.pop(key, None) del cmd_copy[key]
old_metadata["cmd"].pop(key, None) del old_metadata["cmd"][key]
# Do not reuse if command line arguments are not the same. # Do not reuse if command line arguments are not the same.
if old_metadata["cmd"] != cmd_copy: if old_metadata["cmd"] != cmd_copy:
@ -745,7 +729,7 @@ class BuildinstallThread(WorkerThread):
# Ask Koji for all the RPMs in the `runroot_tag` and check that # Ask Koji for all the RPMs in the `runroot_tag` and check that
# those installed in the old buildinstall buildroot are still in the # those installed in the old buildinstall buildroot are still in the
# very same versions/releases. # very same versions/releases.
koji_wrapper = kojiwrapper.KojiWrapper(compose) koji_wrapper = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
rpms = koji_wrapper.koji_proxy.listTaggedRPMS( rpms = koji_wrapper.koji_proxy.listTaggedRPMS(
compose.conf.get("runroot_tag"), inherit=True, latest=True compose.conf.get("runroot_tag"), inherit=True, latest=True
)[0] )[0]
@ -815,15 +799,14 @@ class BuildinstallThread(WorkerThread):
if buildinstall_method == "lorax": if buildinstall_method == "lorax":
packages += ["lorax"] packages += ["lorax"]
chown_paths.append(_get_log_dir(compose, variant, arch)) chown_paths.append(_get_log_dir(compose, variant, arch))
packages += get_arch_variant_data( elif buildinstall_method == "buildinstall":
compose.conf, "buildinstall_packages", arch, variant packages += ["anaconda"]
)
if self._reuse_old_buildinstall_result( if self._reuse_old_buildinstall_result(
compose, arch, variant, cmd, pkgset_phase compose, arch, variant, cmd, pkgset_phase
): ):
self.copy_files(compose, variant, arch) self.copy_files(compose, variant, arch)
self.pool.finished_tasks.add((variant.uid if variant else None, arch)) self.pool.finished_tasks.add((variant.uid if variant else None, arch))
self.pool.reused_tasks.add((variant.uid if variant else None, arch))
self.pool.log_info("[DONE ] %s" % msg) self.pool.log_info("[DONE ] %s" % msg)
return return
@ -835,13 +818,13 @@ class BuildinstallThread(WorkerThread):
# Start the runroot task. # Start the runroot task.
runroot = Runroot(compose, phase="buildinstall") runroot = Runroot(compose, phase="buildinstall")
task_id = None
if buildinstall_method == "lorax" and lorax_use_koji_plugin: if buildinstall_method == "lorax" and lorax_use_koji_plugin:
task_id = runroot.run_pungi_buildinstall( runroot.run_pungi_buildinstall(
cmd, cmd,
log_file=log_file, log_file=log_file,
arch=arch, arch=arch,
packages=packages, packages=packages,
mounts=[compose.topdir],
weight=compose.conf["runroot_weights"].get("buildinstall"), weight=compose.conf["runroot_weights"].get("buildinstall"),
) )
else: else:
@ -874,17 +857,19 @@ class BuildinstallThread(WorkerThread):
log_dir = os.path.join(output_dir, "logs") log_dir = os.path.join(output_dir, "logs")
copy_all(log_dir, final_log_dir) copy_all(log_dir, final_log_dir)
elif lorax_use_koji_plugin: elif lorax_use_koji_plugin:
# If Koji pungi-buildinstall is used, then the buildinstall results # If Koji pungi-buildinstall is used, then the buildinstall results are
# are attached as outputs to the Koji task. Download and unpack # not stored directly in `output_dir` dir, but in "results" and "logs"
# them to the correct location. # subdirectories. We need to move them to final_output_dir.
download_and_extract_archive( results_dir = os.path.join(output_dir, "results")
compose, task_id, "results.tar.gz", final_output_dir move_all(results_dir, final_output_dir, rm_src_dir=True)
)
# Download the logs into proper location too. # Get the log_dir into which we should copy the resulting log files.
log_fname = "buildinstall-%s-logs/dummy" % variant.uid log_fname = "buildinstall-%s-logs/dummy" % variant.uid
final_log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_fname)) final_log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_fname))
download_and_extract_archive(compose, task_id, "logs.tar.gz", final_log_dir) if not os.path.exists(final_log_dir):
makedirs(final_log_dir)
log_dir = os.path.join(output_dir, "logs")
move_all(log_dir, final_log_dir, rm_src_dir=True)
rpms = runroot.get_buildroot_rpms() rpms = runroot.get_buildroot_rpms()
self._write_buildinstall_metadata( self._write_buildinstall_metadata(

View File

@ -14,17 +14,15 @@
# along with this program; if not, see <https://gnu.org/licenses/>. # along with this program; if not, see <https://gnu.org/licenses/>.
import itertools
import os import os
import random import random
import shutil import shutil
import stat import stat
import json
import productmd.treeinfo import productmd.treeinfo
from productmd.images import Image from productmd.images import Image
from kobo.threads import ThreadPool, WorkerThread from kobo.threads import ThreadPool, WorkerThread
from kobo.shortcuts import run, relative_path, compute_file_checksums from kobo.shortcuts import run, relative_path
from six.moves import shlex_quote from six.moves import shlex_quote
from pungi.wrappers import iso from pungi.wrappers import iso
@ -38,7 +36,6 @@ from pungi.util import (
failable, failable,
get_file_size, get_file_size,
get_mtime, get_mtime,
read_json_file,
) )
from pungi.media_split import MediaSplitter, convert_media_size from pungi.media_split import MediaSplitter, convert_media_size
from pungi.compose_metadata.discinfo import read_discinfo, write_discinfo from pungi.compose_metadata.discinfo import read_discinfo, write_discinfo
@ -76,185 +73,6 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
return False return False
return bool(self.compose.conf.get("buildinstall_method", "")) return bool(self.compose.conf.get("buildinstall_method", ""))
def _metadata_path(self, variant, arch, disc_num, disc_count):
return self.compose.paths.log.log_file(
arch,
"createiso-%s-%d-%d" % (variant.uid, disc_num, disc_count),
ext="json",
)
def save_reuse_metadata(self, cmd, variant, arch, opts):
"""Save metadata for future composes to verify if the compose can be reused."""
metadata = {
"cmd": cmd,
"opts": opts._asdict(),
}
metadata_path = self._metadata_path(
variant, arch, cmd["disc_num"], cmd["disc_count"]
)
with open(metadata_path, "w") as f:
json.dump(metadata, f, indent=2)
return metadata
def _load_old_metadata(self, cmd, variant, arch):
metadata_path = self._metadata_path(
variant, arch, cmd["disc_num"], cmd["disc_count"]
)
old_path = self.compose.paths.old_compose_path(metadata_path)
self.logger.info(
"Loading old metadata for %s.%s from: %s", variant, arch, old_path
)
try:
return read_json_file(old_path)
except Exception:
return None
def perform_reuse(self, cmd, variant, arch, opts, iso_path):
"""
Copy all related files from old compose to the new one. As a last step
add the new image to metadata.
"""
linker = OldFileLinker(self.logger)
old_file_name = os.path.basename(iso_path)
current_file_name = os.path.basename(cmd["iso_path"])
try:
# Hardlink ISO and manifest
for suffix in ("", ".manifest"):
linker.link(iso_path + suffix, cmd["iso_path"] + suffix)
# Copy log files
# The log file name includes filename of the image, so we need to
# find old file with the old name, and rename it to the new name.
log_file = self.compose.paths.log.log_file(
arch, "createiso-%s" % current_file_name
)
old_log_file = self.compose.paths.old_compose_path(
self.compose.paths.log.log_file(arch, "createiso-%s" % old_file_name)
)
linker.link(old_log_file, log_file)
# Copy jigdo files
if opts.jigdo_dir:
old_jigdo_dir = self.compose.paths.old_compose_path(opts.jigdo_dir)
for suffix in (".template", ".jigdo"):
linker.link(
os.path.join(old_jigdo_dir, old_file_name) + suffix,
os.path.join(opts.jigdo_dir, current_file_name) + suffix,
)
except Exception:
# A problem happened while linking some file, let's clean up
# everything.
linker.abort()
raise
# Add image to manifest
add_iso_to_metadata(
self.compose,
variant,
arch,
cmd["iso_path"],
bootable=cmd["bootable"],
disc_num=cmd["disc_num"],
disc_count=cmd["disc_count"],
)
if self.compose.notifier:
self.compose.notifier.send(
"createiso-imagedone",
file=cmd["iso_path"],
arch=arch,
variant=str(variant),
)
def try_reuse(self, cmd, variant, arch, opts):
"""Try to reuse image from previous compose.
:returns bool: True if reuse was successful, False otherwise
"""
if not self.compose.conf["createiso_allow_reuse"]:
return
log_msg = "Cannot reuse ISO for %s.%s" % (variant, arch)
current_metadata = self.save_reuse_metadata(cmd, variant, arch, opts)
if opts.buildinstall_method and not self.bi.reused(variant, arch):
# If buildinstall phase was not reused for some reason, we can not
# reuse any bootable image. If a package change caused rebuild of
# boot.iso, we would catch it here too, but there could be a
# configuration change in lorax template which would remain
# undetected.
self.logger.info("%s - boot configuration changed", log_msg)
return False
# Check old compose configuration: extra_files and product_ids can be
# reflected on ISO.
old_config = self.compose.load_old_compose_config()
if not old_config:
self.logger.info("%s - no config for old compose", log_msg)
return False
# Disable reuse if unsigned packages are allowed. The older compose
# could have unsigned packages, and those may have been signed since
# then. We want to regenerate the ISO to have signatures.
if None in self.compose.conf["sigkeys"]:
self.logger.info("%s - unsigned packages are allowed", log_msg)
return False
# Convert current configuration to JSON and back to encode it similarly
# to the old one
config = json.loads(json.dumps(self.compose.conf))
for opt in self.compose.conf:
# Skip a selection of options: these affect what packages can be
# included, which we explicitly check later on.
config_whitelist = set(
[
"gather_lookaside_repos",
"pkgset_koji_builds",
"pkgset_koji_scratch_tasks",
"pkgset_koji_module_builds",
]
)
# Skip irrelevant options
config_whitelist.update(["osbs", "osbuild"])
if opt in config_whitelist:
continue
if old_config.get(opt) != config.get(opt):
self.logger.info("%s - option %s differs", log_msg, opt)
return False
old_metadata = self._load_old_metadata(cmd, variant, arch)
if not old_metadata:
self.logger.info("%s - no old metadata found", log_msg)
return False
# Test if volume ID matches - volid can be generated dynamically based on
# other values, and could change even if nothing else is different.
if current_metadata["opts"]["volid"] != old_metadata["opts"]["volid"]:
self.logger.info("%s - volume ID differs", log_msg)
return False
# Compare packages on the ISO.
if compare_packages(
old_metadata["opts"]["graft_points"],
current_metadata["opts"]["graft_points"],
):
self.logger.info("%s - packages differ", log_msg)
return False
try:
self.perform_reuse(
cmd,
variant,
arch,
opts,
old_metadata["cmd"]["iso_path"],
)
return True
except Exception as exc:
self.compose.log_error(
"Error while reusing ISO for %s.%s: %s", variant, arch, exc
)
self.compose.traceback("createiso-reuse-%s-%s" % (variant, arch))
return False
def run(self): def run(self):
symlink_isos_to = self.compose.conf.get("symlink_isos_to") symlink_isos_to = self.compose.conf.get("symlink_isos_to")
disc_type = self.compose.conf["disc_types"].get("dvd", "dvd") disc_type = self.compose.conf["disc_types"].get("dvd", "dvd")
@ -354,29 +172,21 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
supported=self.compose.supported, supported=self.compose.supported,
hfs_compat=self.compose.conf["iso_hfs_ppc64le_compatible"], hfs_compat=self.compose.conf["iso_hfs_ppc64le_compatible"],
use_xorrisofs=self.compose.conf.get("createiso_use_xorrisofs"), use_xorrisofs=self.compose.conf.get("createiso_use_xorrisofs"),
iso_level=get_iso_level_config(self.compose, variant, arch),
) )
if bootable: if bootable:
opts = opts._replace( opts = opts._replace(
buildinstall_method=self.compose.conf[ buildinstall_method=self.compose.conf["buildinstall_method"]
"buildinstall_method"
],
boot_iso=os.path.join(os_tree, "images", "boot.iso"),
) )
if self.compose.conf["create_jigdo"]: if self.compose.conf["create_jigdo"]:
jigdo_dir = self.compose.paths.compose.jigdo_dir(arch, variant) jigdo_dir = self.compose.paths.compose.jigdo_dir(arch, variant)
opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree) opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree)
# Try to reuse script_file = os.path.join(
if self.try_reuse(cmd, variant, arch, opts): self.compose.paths.work.tmp_dir(arch, variant),
# Reuse was successful, go to next ISO "createiso-%s.sh" % filename,
continue )
script_dir = self.compose.paths.work.tmp_dir(arch, variant)
opts = opts._replace(script_dir=script_dir)
script_file = os.path.join(script_dir, "createiso-%s.sh" % filename)
with open(script_file, "w") as f: with open(script_file, "w") as f:
createiso.write_script(opts, f) createiso.write_script(opts, f)
cmd["cmd"] = ["bash", script_file] cmd["cmd"] = ["bash", script_file]
@ -385,43 +195,13 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
if self.compose.notifier: if self.compose.notifier:
self.compose.notifier.send("createiso-targets", deliverables=deliverables) self.compose.notifier.send("createiso-targets", deliverables=deliverables)
for cmd, variant, arch in commands: for (cmd, variant, arch) in commands:
self.pool.add(CreateIsoThread(self.pool)) self.pool.add(CreateIsoThread(self.pool))
self.pool.queue_put((self.compose, cmd, variant, arch)) self.pool.queue_put((self.compose, cmd, variant, arch))
self.pool.start() self.pool.start()
def read_packages(graft_points):
"""Read packages that were listed in given graft points file.
Only files under Packages directory are considered. Particularly this
excludes .discinfo, .treeinfo and media.repo as well as repodata and
any extra files.
Extra files are easier to check by configuration (same name doesn't
imply same content). Repodata depend entirely on included packages (and
possibly product id certificate), but are affected by current time
which can change checksum despite data being the same.
"""
with open(graft_points) as f:
return set(
line.split("=", 1)[0]
for line in f
if line.startswith("Packages/") or "/Packages/" in line
)
def compare_packages(old_graft_points, new_graft_points):
"""Read packages from the two files and compare them.
:returns bool: True if there are differences, False otherwise
"""
old_files = read_packages(old_graft_points)
new_files = read_packages(new_graft_points)
return old_files != new_files
class CreateIsoThread(WorkerThread): class CreateIsoThread(WorkerThread):
def fail(self, compose, cmd, variant, arch): def fail(self, compose, cmd, variant, arch):
self.pool.log_error("CreateISO failed, removing ISO: %s" % cmd["iso_path"]) self.pool.log_error("CreateISO failed, removing ISO: %s" % cmd["iso_path"])
@ -466,14 +246,7 @@ class CreateIsoThread(WorkerThread):
try: try:
run_createiso_command( run_createiso_command(
num, num, compose, bootable, arch, cmd["cmd"], mounts, log_file
compose,
bootable,
arch,
cmd["cmd"],
mounts,
log_file,
cmd["iso_path"],
) )
except Exception: except Exception:
self.fail(compose, cmd, variant, arch) self.fail(compose, cmd, variant, arch)
@ -540,10 +313,7 @@ def add_iso_to_metadata(
setattr(img, "can_fail", compose.can_fail(variant, arch, "iso")) setattr(img, "can_fail", compose.can_fail(variant, arch, "iso"))
setattr(img, "deliverable", "iso") setattr(img, "deliverable", "iso")
try: try:
img.volume_id = iso.get_volume_id( img.volume_id = iso.get_volume_id(iso_path)
iso_path,
compose.conf.get("createiso_use_xorrisofs"),
)
except RuntimeError: except RuntimeError:
pass pass
if arch == "src": if arch == "src":
@ -555,18 +325,19 @@ def add_iso_to_metadata(
def run_createiso_command( def run_createiso_command(
num, compose, bootable, arch, cmd, mounts, log_file, iso_path num, compose, bootable, arch, cmd, mounts, log_file, with_jigdo=True
): ):
packages = [ packages = [
"coreutils", "coreutils",
"xorriso" if compose.conf.get("createiso_use_xorrisofs") else "genisoimage", "xorriso" if compose.conf.get("createiso_use_xorrisofs") else "genisoimage",
"isomd5sum", "isomd5sum",
] ]
if compose.conf["create_jigdo"]: if with_jigdo and compose.conf["create_jigdo"]:
packages.append("jigdo") packages.append("jigdo")
if bootable: if bootable:
extra_packages = { extra_packages = {
"lorax": ["lorax", "which"], "lorax": ["lorax", "which"],
"buildinstall": ["anaconda"],
} }
packages.extend(extra_packages[compose.conf["buildinstall_method"]]) packages.extend(extra_packages[compose.conf["buildinstall_method"]])
@ -575,7 +346,7 @@ def run_createiso_command(
build_arch = arch build_arch = arch
if runroot.runroot_method == "koji" and not bootable: if runroot.runroot_method == "koji" and not bootable:
runroot_tag = compose.conf["runroot_tag"] runroot_tag = compose.conf["runroot_tag"]
koji_wrapper = kojiwrapper.KojiWrapper(compose) koji_wrapper = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
koji_proxy = koji_wrapper.koji_proxy koji_proxy = koji_wrapper.koji_proxy
tag_info = koji_proxy.getTag(runroot_tag) tag_info = koji_proxy.getTag(runroot_tag)
if not tag_info: if not tag_info:
@ -598,76 +369,6 @@ def run_createiso_command(
weight=compose.conf["runroot_weights"].get("createiso"), weight=compose.conf["runroot_weights"].get("createiso"),
) )
if bootable and compose.conf.get("createiso_use_xorrisofs"):
fix_treeinfo_checksums(compose, iso_path, arch)
def fix_treeinfo_checksums(compose, iso_path, arch):
"""It is possible for the ISO to contain a .treefile with incorrect
checksums. By modifying the ISO (adding files) some of the images may
change.
This function fixes that after the fact by looking for incorrect checksums,
recalculating them and updating the .treeinfo file. Since the size of the
file doesn't change, this seems to not change any images.
"""
modified = False
with iso.mount(iso_path, compose._logger) as mountpoint:
ti = productmd.TreeInfo()
ti.load(os.path.join(mountpoint, ".treeinfo"))
for image, (type_, expected) in ti.checksums.checksums.items():
checksums = compute_file_checksums(os.path.join(mountpoint, image), [type_])
actual = checksums[type_]
if actual == expected:
# Everything fine here, skip to next image.
continue
compose.log_debug("%s: %s: checksum mismatch", iso_path, image)
# Update treeinfo with correct checksum
ti.checksums.checksums[image] = (type_, actual)
modified = True
if not modified:
compose.log_debug("%s: All checksums match, nothing to do.", iso_path)
return
try:
tmpdir = compose.mkdtemp(arch, prefix="fix-checksum-")
# Write modified .treeinfo
ti_path = os.path.join(tmpdir, ".treeinfo")
compose.log_debug("Storing modified .treeinfo in %s", ti_path)
ti.dump(ti_path)
# Write a modified DVD into a temporary path, that is atomically moved
# over the original file.
fixed_path = os.path.join(tmpdir, "fixed-checksum-dvd.iso")
cmd = ["xorriso"]
cmd.extend(
itertools.chain.from_iterable(
iso.xorriso_commands(arch, iso_path, fixed_path)
)
)
cmd.extend(["-map", ti_path, ".treeinfo"])
run(
cmd,
logfile=compose.paths.log.log_file(
arch, "checksum-fix_generate_%s" % os.path.basename(iso_path)
),
)
# The modified ISO no longer has implanted MD5, so that needs to be
# fixed again.
compose.log_debug("Implanting new MD5 to %s", fixed_path)
run(
iso.get_implantisomd5_cmd(fixed_path, compose.supported),
logfile=compose.paths.log.log_file(
arch, "checksum-fix_implantisomd5_%s" % os.path.basename(iso_path)
),
)
# All done, move the updated image to the final location.
compose.log_debug("Updating %s", iso_path)
os.rename(fixed_path, iso_path)
finally:
shutil.rmtree(tmpdir)
def split_iso(compose, arch, variant, no_split=False, logger=None): def split_iso(compose, arch, variant, no_split=False, logger=None):
""" """
@ -897,36 +598,3 @@ def create_hardlinks(staging_dir, log_file):
""" """
cmd = ["/usr/sbin/hardlink", "-c", "-vv", staging_dir] cmd = ["/usr/sbin/hardlink", "-c", "-vv", staging_dir]
run(cmd, logfile=log_file, show_cmd=True) run(cmd, logfile=log_file, show_cmd=True)
class OldFileLinker(object):
"""
A wrapper around os.link that remembers which files were linked and can
clean them up.
"""
def __init__(self, logger):
self.logger = logger
self.linked_files = []
def link(self, src, dst):
self.logger.debug("Hardlinking %s to %s", src, dst)
os.link(src, dst)
self.linked_files.append(dst)
def abort(self):
"""Clean up all files created by this instance."""
for f in self.linked_files:
os.unlink(f)
def get_iso_level_config(compose, variant, arch):
"""
Get configured ISO level for this variant and architecture.
"""
level = compose.conf.get("iso_level")
if isinstance(level, list):
level = None
for c in get_arch_variant_data(compose.conf, "iso_level", arch, variant):
level = c
return level

View File

@ -16,6 +16,7 @@
__all__ = ("create_variant_repo",) __all__ = ("create_variant_repo",)
import copy import copy
import errno import errno
import glob import glob
@ -24,22 +25,19 @@ import shutil
import threading import threading
import xml.dom.minidom import xml.dom.minidom
import productmd.modules
import productmd.rpms
from kobo.shortcuts import relative_path, run
from kobo.threads import ThreadPool, WorkerThread from kobo.threads import ThreadPool, WorkerThread
from kobo.shortcuts import run, relative_path
from ..module_util import Modulemd, collect_module_defaults, collect_module_obsoletes
from ..util import (
get_arch_variant_data,
read_single_module_stream_from_file,
temp_dir,
)
from ..wrappers.createrepo import CreaterepoWrapper
from ..wrappers.scm import get_dir_from_scm from ..wrappers.scm import get_dir_from_scm
from ..wrappers.createrepo import CreaterepoWrapper
from .base import PhaseBase from .base import PhaseBase
from ..util import get_arch_variant_data, temp_dir
from ..module_util import Modulemd, collect_module_defaults
import productmd.rpms
import productmd.modules
CACHE_TOPDIR = "/var/cache/pungi/createrepo_c/"
createrepo_lock = threading.Lock() createrepo_lock = threading.Lock()
createrepo_dirs = set() createrepo_dirs = set()
@ -81,7 +79,6 @@ class CreaterepoPhase(PhaseBase):
get_dir_from_scm( get_dir_from_scm(
self.compose.conf["createrepo_extra_modulemd"][variant.uid], self.compose.conf["createrepo_extra_modulemd"][variant.uid],
self.compose.paths.work.tmp_dir(variant=variant, create_dir=False), self.compose.paths.work.tmp_dir(variant=variant, create_dir=False),
compose=self.compose,
) )
self.pool.queue_put((self.compose, None, variant, "srpm")) self.pool.queue_put((self.compose, None, variant, "srpm"))
@ -191,23 +188,6 @@ def create_variant_repo(
comps_path = None comps_path = None
if compose.has_comps and pkg_type == "rpm": if compose.has_comps and pkg_type == "rpm":
comps_path = compose.paths.work.comps(arch=arch, variant=variant) comps_path = compose.paths.work.comps(arch=arch, variant=variant)
if compose.conf["createrepo_enable_cache"]:
cachedir = os.path.join(
CACHE_TOPDIR,
"%s-%s" % (compose.conf["release_short"], os.getuid()),
)
if not os.path.exists(cachedir):
try:
os.makedirs(cachedir)
except Exception as e:
compose.log_warning(
"Cache disabled because cannot create cache dir %s %s"
% (cachedir, str(e))
)
cachedir = None
else:
cachedir = None
cmd = repo.get_createrepo_cmd( cmd = repo.get_createrepo_cmd(
repo_dir, repo_dir,
update=True, update=True,
@ -223,7 +203,6 @@ def create_variant_repo(
oldpackagedirs=old_package_dirs, oldpackagedirs=old_package_dirs,
use_xz=compose.conf["createrepo_use_xz"], use_xz=compose.conf["createrepo_use_xz"],
extra_args=compose.conf["createrepo_extra_args"], extra_args=compose.conf["createrepo_extra_args"],
cachedir=cachedir,
) )
log_file = compose.paths.log.log_file( log_file = compose.paths.log.log_file(
arch, "createrepo-%s.%s" % (variant, pkg_type) arch, "createrepo-%s.%s" % (variant, pkg_type)
@ -266,15 +245,12 @@ def create_variant_repo(
defaults_dir, module_names, mod_index, overrides_dir=overrides_dir defaults_dir, module_names, mod_index, overrides_dir=overrides_dir
) )
obsoletes_dir = compose.paths.work.module_obsoletes_dir()
mod_index = collect_module_obsoletes(obsoletes_dir, module_names, mod_index)
# Add extra modulemd files # Add extra modulemd files
if variant.uid in compose.conf.get("createrepo_extra_modulemd", {}): if variant.uid in compose.conf.get("createrepo_extra_modulemd", {}):
compose.log_debug("Adding extra modulemd for %s.%s", variant.uid, arch) compose.log_debug("Adding extra modulemd for %s.%s", variant.uid, arch)
dirname = compose.paths.work.tmp_dir(variant=variant, create_dir=False) dirname = compose.paths.work.tmp_dir(variant=variant, create_dir=False)
for filepath in glob.glob(os.path.join(dirname, arch) + "/*.yaml"): for filepath in glob.glob(os.path.join(dirname, arch) + "/*.yaml"):
module_stream = read_single_module_stream_from_file(filepath) module_stream = Modulemd.ModuleStream.read_file(filepath, strict=True)
if not mod_index.add_module_stream(module_stream): if not mod_index.add_module_stream(module_stream):
raise RuntimeError( raise RuntimeError(
"Failed parsing modulemd data from %s" % filepath "Failed parsing modulemd data from %s" % filepath
@ -367,7 +343,7 @@ def get_productids_from_scm(compose):
tmp_dir = compose.mkdtemp(prefix="pungi_") tmp_dir = compose.mkdtemp(prefix="pungi_")
try: try:
get_dir_from_scm(product_id, tmp_dir, compose=compose) get_dir_from_scm(product_id, tmp_dir)
except OSError as e: except OSError as e:
if e.errno == errno.ENOENT and product_id_allow_missing: if e.errno == errno.ENOENT and product_id_allow_missing:
compose.log_warning("No product IDs in %s" % product_id) compose.log_warning("No product IDs in %s" % product_id)

View File

@ -14,8 +14,6 @@
# along with this program; if not, see <https://gnu.org/licenses/>. # along with this program; if not, see <https://gnu.org/licenses/>.
import os import os
import hashlib
import json
from kobo.shortcuts import force_list from kobo.shortcuts import force_list
from kobo.threads import ThreadPool, WorkerThread from kobo.threads import ThreadPool, WorkerThread
@ -30,17 +28,8 @@ from pungi.phases.createiso import (
copy_boot_images, copy_boot_images,
run_createiso_command, run_createiso_command,
load_and_tweak_treeinfo, load_and_tweak_treeinfo,
compare_packages,
OldFileLinker,
get_iso_level_config,
)
from pungi.util import (
failable,
get_format_substs,
get_variant_data,
get_volid,
read_json_file,
) )
from pungi.util import failable, get_format_substs, get_variant_data, get_volid
from pungi.wrappers import iso from pungi.wrappers import iso
from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm
@ -48,10 +37,9 @@ from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm
class ExtraIsosPhase(PhaseLoggerMixin, ConfigGuardedPhase, PhaseBase): class ExtraIsosPhase(PhaseLoggerMixin, ConfigGuardedPhase, PhaseBase):
name = "extra_isos" name = "extra_isos"
def __init__(self, compose, buildinstall_phase): def __init__(self, compose):
super(ExtraIsosPhase, self).__init__(compose) super(ExtraIsosPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger) self.pool = ThreadPool(logger=self.logger)
self.bi = buildinstall_phase
def validate(self): def validate(self):
for variant in self.compose.get_variants(types=["variant"]): for variant in self.compose.get_variants(types=["variant"]):
@ -76,18 +64,14 @@ class ExtraIsosPhase(PhaseLoggerMixin, ConfigGuardedPhase, PhaseBase):
for arch in sorted(arches): for arch in sorted(arches):
commands.append((config, variant, arch)) commands.append((config, variant, arch))
for config, variant, arch in commands: for (config, variant, arch) in commands:
self.pool.add(ExtraIsosThread(self.pool, self.bi)) self.pool.add(ExtraIsosThread(self.pool))
self.pool.queue_put((self.compose, config, variant, arch)) self.pool.queue_put((self.compose, config, variant, arch))
self.pool.start() self.pool.start()
class ExtraIsosThread(WorkerThread): class ExtraIsosThread(WorkerThread):
def __init__(self, pool, buildinstall_phase):
super(ExtraIsosThread, self).__init__(pool)
self.bi = buildinstall_phase
def process(self, item, num): def process(self, item, num):
self.num = num self.num = num
compose, config, variant, arch = item compose, config, variant, arch = item
@ -131,28 +115,20 @@ class ExtraIsosThread(WorkerThread):
supported=compose.supported, supported=compose.supported,
hfs_compat=compose.conf["iso_hfs_ppc64le_compatible"], hfs_compat=compose.conf["iso_hfs_ppc64le_compatible"],
use_xorrisofs=compose.conf.get("createiso_use_xorrisofs"), use_xorrisofs=compose.conf.get("createiso_use_xorrisofs"),
iso_level=get_iso_level_config(compose, variant, arch),
) )
os_tree = compose.paths.compose.os_tree(arch, variant)
if compose.conf["create_jigdo"]: if compose.conf["create_jigdo"]:
jigdo_dir = compose.paths.compose.jigdo_dir(arch, variant) jigdo_dir = compose.paths.compose.jigdo_dir(arch, variant)
os_tree = compose.paths.compose.os_tree(arch, variant)
opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree) opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree)
if bootable: if bootable:
opts = opts._replace( opts = opts._replace(
buildinstall_method=compose.conf["buildinstall_method"], buildinstall_method=compose.conf["buildinstall_method"]
boot_iso=os.path.join(os_tree, "images", "boot.iso"),
) )
# Check if it can be reused. script_file = os.path.join(
hash = hashlib.sha256() compose.paths.work.tmp_dir(arch, variant), "extraiso-%s.sh" % filename
hash.update(json.dumps(config, sort_keys=True).encode("utf-8")) )
config_hash = hash.hexdigest()
if not self.try_reuse(compose, variant, arch, config_hash, opts):
script_dir = compose.paths.work.tmp_dir(arch, variant)
opts = opts._replace(script_dir=script_dir)
script_file = os.path.join(script_dir, "extraiso-%s.sh" % filename)
with open(script_file, "w") as f: with open(script_file, "w") as f:
createiso.write_script(opts, f) createiso.write_script(opts, f)
@ -166,7 +142,7 @@ class ExtraIsosThread(WorkerThread):
log_file=compose.paths.log.log_file( log_file=compose.paths.log.log_file(
arch, "extraiso-%s" % os.path.basename(iso_path) arch, "extraiso-%s" % os.path.basename(iso_path)
), ),
iso_path=iso_path, with_jigdo=compose.conf["create_jigdo"],
) )
img = add_iso_to_metadata( img = add_iso_to_metadata(
@ -179,163 +155,8 @@ class ExtraIsosThread(WorkerThread):
) )
img._max_size = config.get("max_size") img._max_size = config.get("max_size")
save_reuse_metadata(compose, variant, arch, config_hash, opts, iso_path)
self.pool.log_info("[DONE ] %s" % msg) self.pool.log_info("[DONE ] %s" % msg)
def try_reuse(self, compose, variant, arch, config_hash, opts):
# Check explicit config
if not compose.conf["extraiso_allow_reuse"]:
return
log_msg = "Cannot reuse ISO for %s.%s" % (variant, arch)
if opts.buildinstall_method and not self.bi.reused(variant, arch):
# If buildinstall phase was not reused for some reason, we can not
# reuse any bootable image. If a package change caused rebuild of
# boot.iso, we would catch it here too, but there could be a
# configuration change in lorax template which would remain
# undetected.
self.pool.log_info("%s - boot configuration changed", log_msg)
return False
# Check old compose configuration: extra_files and product_ids can be
# reflected on ISO.
old_config = compose.load_old_compose_config()
if not old_config:
self.pool.log_info("%s - no config for old compose", log_msg)
return False
# Disable reuse if unsigned packages are allowed. The older compose
# could have unsigned packages, and those may have been signed since
# then. We want to regenerate the ISO to have signatures.
if None in compose.conf["sigkeys"]:
self.pool.log_info("%s - unsigned packages are allowed", log_msg)
return False
# Convert current configuration to JSON and back to encode it similarly
# to the old one
config = json.loads(json.dumps(compose.conf))
for opt in compose.conf:
# Skip a selection of options: these affect what packages can be
# included, which we explicitly check later on.
config_whitelist = set(
[
"gather_lookaside_repos",
"pkgset_koji_builds",
"pkgset_koji_scratch_tasks",
"pkgset_koji_module_builds",
]
)
# Skip irrelevant options
config_whitelist.update(["osbs", "osbuild"])
if opt in config_whitelist:
continue
if old_config.get(opt) != config.get(opt):
self.pool.log_info("%s - option %s differs", log_msg, opt)
return False
old_metadata = load_old_metadata(compose, variant, arch, config_hash)
if not old_metadata:
self.pool.log_info("%s - no old metadata found", log_msg)
return False
# Test if volume ID matches - volid can be generated dynamically based on
# other values, and could change even if nothing else is different.
if opts.volid != old_metadata["opts"]["volid"]:
self.pool.log_info("%s - volume ID differs", log_msg)
return False
# Compare packages on the ISO.
if compare_packages(
old_metadata["opts"]["graft_points"],
opts.graft_points,
):
self.pool.log_info("%s - packages differ", log_msg)
return False
try:
self.perform_reuse(
compose,
variant,
arch,
opts,
old_metadata["opts"]["output_dir"],
old_metadata["opts"]["iso_name"],
)
return True
except Exception as exc:
self.pool.log_error(
"Error while reusing ISO for %s.%s: %s", variant, arch, exc
)
compose.traceback("extraiso-reuse-%s-%s-%s" % (variant, arch, config_hash))
return False
def perform_reuse(self, compose, variant, arch, opts, old_iso_dir, old_file_name):
"""
Copy all related files from old compose to the new one. As a last step
add the new image to metadata.
"""
linker = OldFileLinker(self.pool._logger)
old_iso_path = os.path.join(old_iso_dir, old_file_name)
iso_path = os.path.join(opts.output_dir, opts.iso_name)
try:
# Hardlink ISO and manifest
for suffix in ("", ".manifest"):
linker.link(old_iso_path + suffix, iso_path + suffix)
# Copy log files
# The log file name includes filename of the image, so we need to
# find old file with the old name, and rename it to the new name.
log_file = compose.paths.log.log_file(arch, "extraiso-%s" % opts.iso_name)
old_log_file = compose.paths.old_compose_path(
compose.paths.log.log_file(arch, "extraiso-%s" % old_file_name)
)
linker.link(old_log_file, log_file)
# Copy jigdo files
if opts.jigdo_dir:
old_jigdo_dir = compose.paths.old_compose_path(opts.jigdo_dir)
for suffix in (".template", ".jigdo"):
linker.link(
os.path.join(old_jigdo_dir, old_file_name) + suffix,
os.path.join(opts.jigdo_dir, opts.iso_name) + suffix,
)
except Exception:
# A problem happened while linking some file, let's clean up
# everything.
linker.abort()
raise
def save_reuse_metadata(compose, variant, arch, config_hash, opts, iso_path):
"""
Save metadata for possible reuse of this image. The file name is determined
from the hash of a configuration snippet for this image. Any change in that
configuration in next compose will change the hash and thus reuse will be
blocked.
"""
metadata = {"opts": opts._asdict()}
metadata_path = compose.paths.log.log_file(
arch,
"extraiso-reuse-%s-%s-%s" % (variant.uid, arch, config_hash),
ext="json",
)
with open(metadata_path, "w") as f:
json.dump(metadata, f, indent=2)
def load_old_metadata(compose, variant, arch, config_hash):
metadata_path = compose.paths.log.log_file(
arch,
"extraiso-reuse-%s-%s-%s" % (variant.uid, arch, config_hash),
ext="json",
)
old_path = compose.paths.old_compose_path(metadata_path)
try:
return read_json_file(old_path)
except Exception:
return None
def get_extra_files(compose, variant, arch, extra_files): def get_extra_files(compose, variant, arch, extra_files):
"""Clone the configured files into a directory from where they can be """Clone the configured files into a directory from where they can be

View File

@ -14,50 +14,51 @@
# along with this program; if not, see <https://gnu.org/licenses/>. # along with this program; if not, see <https://gnu.org/licenses/>.
import glob
import json import json
import os import os
import shutil import shutil
import threading import threading
import six
from six.moves import cPickle as pickle
from kobo.rpmlib import parse_nvra from kobo.rpmlib import parse_nvra
from kobo.shortcuts import run from kobo.shortcuts import run
from productmd.rpms import Rpms from productmd.rpms import Rpms
from pungi.phases.pkgset.common import get_all_arches
from six.moves import cPickle as pickle
try: try:
from queue import Queue from queue import Queue
except ImportError: except ImportError:
from Queue import Queue from Queue import Queue
import pungi.wrappers.kojiwrapper
from pungi.arch import get_compatible_arches, split_name_arch
from pungi.compose import get_ordered_variant_uids
from pungi.module_util import (
Modulemd,
collect_module_defaults,
collect_module_obsoletes,
)
from pungi.phases.base import PhaseBase
from pungi.phases.createrepo import add_modular_metadata
from pungi.util import get_arch_data, get_arch_variant_data, get_variant_data, makedirs
from pungi.wrappers.scm import get_file_from_scm from pungi.wrappers.scm import get_file_from_scm
from ...wrappers.createrepo import CreaterepoWrapper
from .link import link_files from .link import link_files
from ...wrappers.createrepo import CreaterepoWrapper
import pungi.wrappers.kojiwrapper
from pungi.compose import get_ordered_variant_uids
from pungi.arch import get_compatible_arches, split_name_arch
from pungi.phases.base import PhaseBase
from pungi.util import get_arch_data, get_arch_variant_data, get_variant_data, makedirs
from pungi.module_util import Modulemd, collect_module_defaults
from pungi.phases.createrepo import add_modular_metadata
def get_gather_source(name): def get_gather_source(name):
import pungi.phases.gather.sources import pungi.phases.gather.sources
from .source import GatherSourceContainer
return pungi.phases.gather.sources.ALL_SOURCES[name.lower()] GatherSourceContainer.register_module(pungi.phases.gather.sources)
container = GatherSourceContainer()
return container["GatherSource%s" % name]
def get_gather_method(name): def get_gather_method(name):
import pungi.phases.gather.methods import pungi.phases.gather.methods
from .method import GatherMethodContainer
return pungi.phases.gather.methods.ALL_METHODS[name.lower()] GatherMethodContainer.register_module(pungi.phases.gather.methods)
container = GatherMethodContainer()
return container["GatherMethod%s" % name]
class GatherPhase(PhaseBase): class GatherPhase(PhaseBase):
@ -86,34 +87,17 @@ class GatherPhase(PhaseBase):
if variant.modules: if variant.modules:
errors.append("Modular compose requires libmodulemd package.") errors.append("Modular compose requires libmodulemd package.")
variant_as_lookaside = self.compose.conf.get("variant_as_lookaside", [])
all_variants = self.compose.all_variants
# check whether variants from configuration value # check whether variants from configuration value
# 'variant_as_lookaside' are correct # 'variant_as_lookaside' are correct
for requiring, required in variant_as_lookaside: variant_as_lookaside = self.compose.conf.get("variant_as_lookaside", [])
all_variants = self.compose.all_variants
for (requiring, required) in variant_as_lookaside:
if requiring in all_variants and required not in all_variants: if requiring in all_variants and required not in all_variants:
errors.append( errors.append(
"variant_as_lookaside: variant %r doesn't exist but is " "variant_as_lookaside: variant %r doesn't exist but is "
"required by %r" % (required, requiring) "required by %r" % (required, requiring)
) )
# check whether variants from configuration value
# 'variant_as_lookaside' have same architectures
for requiring, required in variant_as_lookaside:
if (
requiring in all_variants
and required in all_variants
and not set(all_variants[requiring].arches).issubset(
set(all_variants[required].arches)
)
):
errors.append(
"variant_as_lookaside: architectures of variant '%s' "
"aren't subset of architectures of variant '%s'"
% (requiring, required)
)
if errors: if errors:
raise ValueError("\n".join(errors)) raise ValueError("\n".join(errors))
@ -194,19 +178,27 @@ def load_old_gather_result(compose, arch, variant):
return None return None
compose.log_info("Loading old GATHER phase results: %s", old_gather_result) compose.log_info("Loading old GATHER phase results: %s", old_gather_result)
try:
with open(old_gather_result, "rb") as f: with open(old_gather_result, "rb") as f:
old_result = pickle.load(f) old_result = pickle.load(f)
return old_result return old_result
except Exception as e:
compose.log_debug(
"Failed to load old GATHER phase results %s : %s" def load_old_compose_config(compose):
% (old_gather_result, str(e)) """
) Helper method to load Pungi config dump from old compose.
"""
config_dump_full = compose.paths.log.log_file("global", "config-dump")
config_dump_full = compose.paths.old_compose_path(config_dump_full)
if not config_dump_full:
return None return None
compose.log_info("Loading old config file: %s", config_dump_full)
with open(config_dump_full, "r") as f:
old_config = json.load(f)
return old_config
def reuse_old_gather_packages(compose, arch, variant, package_sets, methods):
def reuse_old_gather_packages(compose, arch, variant, package_sets):
""" """
Tries to reuse `gather_packages` result from older compose. Tries to reuse `gather_packages` result from older compose.
@ -214,7 +206,6 @@ def reuse_old_gather_packages(compose, arch, variant, package_sets, methods):
:param str arch: Architecture to reuse old gather data for. :param str arch: Architecture to reuse old gather data for.
:param str variant: Variant to reuse old gather data for. :param str variant: Variant to reuse old gather data for.
:param list package_sets: List of package sets to gather packages from. :param list package_sets: List of package sets to gather packages from.
:param str methods: Gather method.
:return: Old `gather_packages` result or None if old result cannot be used. :return: Old `gather_packages` result or None if old result cannot be used.
""" """
log_msg = "Cannot reuse old GATHER phase results - %s" log_msg = "Cannot reuse old GATHER phase results - %s"
@ -227,38 +218,38 @@ def reuse_old_gather_packages(compose, arch, variant, package_sets, methods):
compose.log_info(log_msg % "no old gather results.") compose.log_info(log_msg % "no old gather results.")
return return
old_config = compose.load_old_compose_config() old_config = load_old_compose_config(compose)
if old_config is None: if old_config is None:
compose.log_info(log_msg % "no old compose config dump.") compose.log_info(log_msg % "no old compose config dump.")
return return
# Do not reuse when required variant is not reused.
if not hasattr(compose, "_gather_reused_variant_arch"):
setattr(compose, "_gather_reused_variant_arch", [])
variant_as_lookaside = compose.conf.get("variant_as_lookaside", [])
for requiring, required in variant_as_lookaside:
if (
requiring == variant.uid
and (required, arch) not in compose._gather_reused_variant_arch
):
compose.log_info(
log_msg % "variant %s as lookaside is not reused." % required
)
return
# Do not reuse if there's external lookaside repo.
with open(compose.paths.log.log_file("global", "config-dump"), "r") as f:
config_dump = json.load(f)
if config_dump.get("gather_lookaside_repos") or old_config.get(
"gather_lookaside_repos"
):
compose.log_info(log_msg % "there's external lookaside repo.")
return
# The dumps/loads is needed to convert all unicode strings to non-unicode ones. # The dumps/loads is needed to convert all unicode strings to non-unicode ones.
config = json.loads(json.dumps(compose.conf)) config = json.loads(json.dumps(compose.conf))
for opt, value in old_config.items(): for opt, value in old_config.items():
if opt == "gather_lookaside_repos": # Gather lookaside repos are updated during the gather phase. Check that
# the gather_lookaside_repos except the ones added are the same.
if opt == "gather_lookaside_repos" and opt in config:
value_to_compare = []
# Filter out repourls which starts with `compose.topdir` and also remove
# their parent list in case it would be empty.
for variant, per_arch_repos in config[opt]:
per_arch_repos_to_compare = {}
for arch, repourl in per_arch_repos.items():
# The gather_lookaside_repos config allows setting multiple repourls
# using list, but `_update_config` always uses strings. Therefore we
# only try to filter out string_types.
if not isinstance(repourl, six.string_types):
continue
if not repourl.startswith(compose.topdir):
per_arch_repos_to_compare[arch] = repourl
if per_arch_repos_to_compare:
value_to_compare.append([variant, per_arch_repos_to_compare])
if value != value_to_compare:
compose.log_info(
log_msg
% ("compose configuration option gather_lookaside_repos changed.")
)
return
continue continue
# Skip checking for frequently changing configuration options which do *not* # Skip checking for frequently changing configuration options which do *not*
@ -387,30 +378,6 @@ def reuse_old_gather_packages(compose, arch, variant, package_sets, methods):
compose.log_info(log_msg % "some RPMs have been removed.") compose.log_info(log_msg % "some RPMs have been removed.")
return return
compose._gather_reused_variant_arch.append((variant.uid, arch))
# Copy old gather log for debugging
try:
if methods == "hybrid":
log_dir = compose.paths.log.topdir(arch, create_dir=False)
old_log_dir = compose.paths.old_compose_path(log_dir)
for log_file in glob.glob(
os.path.join(old_log_dir, "hybrid-depsolver-%s-iter-*" % variant)
):
compose.log_info(
"Copying old gather log %s to %s" % (log_file, log_dir)
)
shutil.copy2(log_file, log_dir)
else:
log_dir = os.path.dirname(
compose.paths.work.pungi_log(arch, variant, create_dir=False)
)
old_log_dir = compose.paths.old_compose_path(log_dir)
compose.log_info("Copying old gather log %s to %s" % (old_log_dir, log_dir))
shutil.copytree(old_log_dir, log_dir)
except Exception as e:
compose.log_warning("Copying old gather log failed: %s" % str(e))
return result return result
@ -437,9 +404,7 @@ def gather_packages(compose, arch, variant, package_sets, fulltree_excludes=None
prepopulate = get_prepopulate_packages(compose, arch, variant) prepopulate = get_prepopulate_packages(compose, arch, variant)
fulltree_excludes = fulltree_excludes or set() fulltree_excludes = fulltree_excludes or set()
reused_result = reuse_old_gather_packages( reused_result = reuse_old_gather_packages(compose, arch, variant, package_sets)
compose, arch, variant, package_sets, methods
)
if reused_result: if reused_result:
result = reused_result result = reused_result
elif methods == "hybrid": elif methods == "hybrid":
@ -469,7 +434,9 @@ def gather_packages(compose, arch, variant, package_sets, fulltree_excludes=None
) )
else: else:
for source_name in ("module", "comps", "json"): for source_name in ("module", "comps", "json"):
packages, groups, filter_packages = get_variant_packages( packages, groups, filter_packages = get_variant_packages(
compose, arch, variant, source_name, package_sets compose, arch, variant, source_name, package_sets
) )
@ -540,8 +507,7 @@ def write_packages(compose, arch, variant, pkg_map, path_prefix):
def trim_packages(compose, arch, variant, pkg_map, parent_pkgs=None, remove_pkgs=None): def trim_packages(compose, arch, variant, pkg_map, parent_pkgs=None, remove_pkgs=None):
"""Remove parent variant's packages from pkg_map <-- it gets modified in """Remove parent variant's packages from pkg_map <-- it gets modified in this function
this function
There are three cases where changes may happen: There are three cases where changes may happen:
@ -574,6 +540,7 @@ def trim_packages(compose, arch, variant, pkg_map, parent_pkgs=None, remove_pkgs
move_to_parent_pkgs = _mk_pkg_map() move_to_parent_pkgs = _mk_pkg_map()
removed_pkgs = _mk_pkg_map() removed_pkgs = _mk_pkg_map()
for pkg_type, pkgs in pkg_map.items(): for pkg_type, pkgs in pkg_map.items():
new_pkgs = [] new_pkgs = []
for pkg in pkgs: for pkg in pkgs:
pkg_path = pkg["path"] pkg_path = pkg["path"]
@ -645,44 +612,19 @@ def _make_lookaside_repo(compose, variant, arch, pkg_map, package_sets=None):
compose.paths.work.topdir(arch="global"), "download" compose.paths.work.topdir(arch="global"), "download"
) )
+ "/", + "/",
"koji": lambda: compose.conf.get( "koji": lambda: pungi.wrappers.kojiwrapper.KojiWrapper(
"koji_cache", compose.conf["koji_profile"]
pungi.wrappers.kojiwrapper.KojiWrapper(compose).koji_module.config.topdir,
).rstrip("/")
+ "/",
"kojimock": lambda: pungi.wrappers.kojiwrapper.KojiMockWrapper(
compose,
get_all_arches(compose),
).koji_module.config.topdir.rstrip("/") ).koji_module.config.topdir.rstrip("/")
+ "/", + "/",
} }
path_prefix = prefixes[compose.conf["pkgset_source"]]() path_prefix = prefixes[compose.conf["pkgset_source"]]()
package_list = set() pkglist = compose.paths.work.lookaside_package_list(arch=arch, variant=variant)
for pkg_arch in pkg_map.keys(): with open(pkglist, "w") as f:
try: for packages in pkg_map[arch][variant.uid].values():
for pkg_type, packages in pkg_map[pkg_arch][variant.uid].items():
# We want all packages for current arch, and SRPMs for any
# arch. Ultimately there will only be one source repository, so
# we need a union of all SRPMs.
if pkg_type == "srpm" or pkg_arch == arch:
for pkg in packages: for pkg in packages:
if "lookaside" in pkg.get("flags", []):
# We want to ignore lookaside packages, those will
# be visible to the depending variants from the
# lookaside repo directly.
continue
pkg = pkg["path"] pkg = pkg["path"]
if path_prefix and pkg.startswith(path_prefix): if path_prefix and pkg.startswith(path_prefix):
pkg = pkg[len(path_prefix) :] pkg = pkg[len(path_prefix) :]
package_list.add(pkg)
except KeyError:
raise RuntimeError(
"Variant '%s' does not have architecture " "'%s'!" % (variant, pkg_arch)
)
pkglist = compose.paths.work.lookaside_package_list(arch=arch, variant=variant)
with open(pkglist, "w") as f:
for pkg in sorted(package_list):
f.write("%s\n" % pkg) f.write("%s\n" % pkg)
cr = CreaterepoWrapper(compose.conf["createrepo_c"]) cr = CreaterepoWrapper(compose.conf["createrepo_c"])
@ -719,8 +661,6 @@ def _make_lookaside_repo(compose, variant, arch, pkg_map, package_sets=None):
collect_module_defaults( collect_module_defaults(
defaults_dir, module_names, mod_index, overrides_dir=overrides_dir defaults_dir, module_names, mod_index, overrides_dir=overrides_dir
) )
obsoletes_dir = compose.paths.work.module_obsoletes_dir()
mod_index = collect_module_obsoletes(obsoletes_dir, module_names, mod_index)
log_file = compose.paths.log.log_file( log_file = compose.paths.log.log_file(
arch, "lookaside_repo_modules_%s" % (variant.uid) arch, "lookaside_repo_modules_%s" % (variant.uid)
@ -796,10 +736,6 @@ def _gather_variants(
try: try:
que.put((arch, gather_packages(*args, **kwargs))) que.put((arch, gather_packages(*args, **kwargs)))
except Exception as exc: except Exception as exc:
compose.log_error(
"Error in gathering for %s.%s: %s", variant, arch, exc
)
compose.traceback("gather-%s-%s" % (variant, arch))
errors.put(exc) errors.put(exc)
# Run gather_packages() in parallel with multi threads and store # Run gather_packages() in parallel with multi threads and store

View File

@ -14,6 +14,15 @@
# along with this program; if not, see <https://gnu.org/licenses/>. # along with this program; if not, see <https://gnu.org/licenses/>.
class GatherMethodBase(object): import kobo.plugins
class GatherMethodBase(kobo.plugins.Plugin):
def __init__(self, compose): def __init__(self, compose):
self.compose = compose self.compose = compose
class GatherMethodContainer(kobo.plugins.PluginContainer):
@classmethod
def normalize_name(cls, name):
return name.lower()

View File

@ -1,24 +0,0 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
from .method_deps import GatherMethodDeps
from .method_nodeps import GatherMethodNodeps
from .method_hybrid import GatherMethodHybrid
ALL_METHODS = {
"deps": GatherMethodDeps,
"nodeps": GatherMethodNodeps,
"hybrid": GatherMethodHybrid,
}

View File

@ -15,7 +15,6 @@
import os import os
import shutil
from kobo.shortcuts import run from kobo.shortcuts import run
from kobo.pkgset import SimpleRpmWrapper, RpmWrapper from kobo.pkgset import SimpleRpmWrapper, RpmWrapper
@ -32,6 +31,8 @@ import pungi.phases.gather.method
class GatherMethodDeps(pungi.phases.gather.method.GatherMethodBase): class GatherMethodDeps(pungi.phases.gather.method.GatherMethodBase):
enabled = True
def __call__( def __call__(
self, self,
arch, arch,
@ -242,19 +243,8 @@ def resolve_deps(compose, arch, variant, source_name=None):
) )
# Use temp working directory directory as workaround for # Use temp working directory directory as workaround for
# https://bugzilla.redhat.com/show_bug.cgi?id=795137 # https://bugzilla.redhat.com/show_bug.cgi?id=795137
with temp_dir(prefix="pungi_") as work_dir: with temp_dir(prefix="pungi_") as tmp_dir:
run(cmd, logfile=pungi_log, show_cmd=True, workdir=work_dir, env=os.environ) run(cmd, logfile=pungi_log, show_cmd=True, workdir=tmp_dir, env=os.environ)
# Clean up tmp dir
# Workaround for rpm not honoring sgid bit which only appears when yum is used.
yumroot_dir = os.path.join(tmp_dir, "work", arch, "yumroot")
if os.path.isdir(yumroot_dir):
try:
shutil.rmtree(yumroot_dir)
except Exception as e:
compose.log_warning(
"Failed to clean up tmp dir: %s %s" % (yumroot_dir, str(e))
)
with open(pungi_log, "r") as f: with open(pungi_log, "r") as f:
packages, broken_deps, missing_comps_pkgs = pungi_wrapper.parse_log(f) packages, broken_deps, missing_comps_pkgs = pungi_wrapper.parse_log(f)

View File

@ -47,15 +47,9 @@ class FakePackage(object):
@property @property
def files(self): def files(self):
paths = [] return [
# createrepo_c.Package.files is a tuple, but its length differs across os.path.join(dirname, basename) for (_, dirname, basename) in self.pkg.files
# versions. The constants define index at which the related value is ]
# located.
for entry in self.pkg.files:
paths.append(
os.path.join(entry[cr.FILE_ENTRY_PATH], entry[cr.FILE_ENTRY_NAME])
)
return paths
@property @property
def provides(self): def provides(self):
@ -66,6 +60,8 @@ class FakePackage(object):
class GatherMethodHybrid(pungi.phases.gather.method.GatherMethodBase): class GatherMethodHybrid(pungi.phases.gather.method.GatherMethodBase):
enabled = True
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(GatherMethodHybrid, self).__init__(*args, **kwargs) super(GatherMethodHybrid, self).__init__(*args, **kwargs)
self.package_maps = {} self.package_maps = {}
@ -355,11 +351,8 @@ class GatherMethodHybrid(pungi.phases.gather.method.GatherMethodBase):
# There are two ways how the debuginfo package can be named. We # There are two ways how the debuginfo package can be named. We
# want to get them all. # want to get them all.
source_name = kobo.rpmlib.parse_nvra(pkg.rpm_sourcerpm)["name"] for pattern in ["%s-debuginfo", "%s-debugsource"]:
for debuginfo_name in [ debuginfo_name = pattern % pkg.name
"%s-debuginfo" % pkg.name,
"%s-debugsource" % source_name,
]:
debuginfo = self._get_debuginfo(debuginfo_name, pkg_arch) debuginfo = self._get_debuginfo(debuginfo_name, pkg_arch)
for dbg in debuginfo: for dbg in debuginfo:
# For each debuginfo package that matches on name and # For each debuginfo package that matches on name and
@ -508,27 +501,6 @@ def _make_result(paths):
return [{"path": path, "flags": []} for path in sorted(paths)] return [{"path": path, "flags": []} for path in sorted(paths)]
def get_repo_packages(path):
"""Extract file names of all packages in the given repository."""
packages = set()
def callback(pkg):
packages.add(os.path.basename(pkg.location_href))
repomd = os.path.join(path, "repodata/repomd.xml")
with as_local_file(repomd) as url_:
repomd = cr.Repomd(url_)
for rec in repomd.records:
if rec.type != "primary":
continue
record_url = os.path.join(path, rec.location_href)
with as_local_file(record_url) as url_:
cr.xml_parse_primary(url_, pkgcb=callback, do_files=False)
return packages
def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages): def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages):
"""For each package add source RPM.""" """For each package add source RPM."""
# This will serve as the final result. We collect sets of paths to the # This will serve as the final result. We collect sets of paths to the
@ -539,16 +511,25 @@ def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages):
filters = set(filter_packages) filters = set(filter_packages)
# Collect list of all packages in lookaside. These will not be added to the
# result. Fus handles this in part: if a package is explicitly mentioned as
# input (which can happen with comps group expansion), it will be in the
# output even if it's in lookaside.
lookaside_packages = set() lookaside_packages = set()
for repo in lookasides: for repo in lookasides:
lookaside_packages.update(get_repo_packages(repo)) md = cr.Metadata()
md.locate_and_load_xml(repo)
for key in md.keys():
pkg = md.get(key)
url = os.path.join(pkg.location_base or repo, pkg.location_href)
# Strip file:// prefix
lookaside_packages.add(url[7:])
for nvr, pkg_arch, flags in nvrs: for nvr, pkg_arch, flags in nvrs:
pkg = nevra_to_pkg["%s.%s" % (nvr, pkg_arch)] pkg = nevra_to_pkg["%s.%s" % (nvr, pkg_arch)]
if os.path.basename(pkg.file_path) in lookaside_packages: if pkg.file_path in lookaside_packages:
# Fus can return lookaside package in output if the package is # Package is in lookaside, don't add it and ignore sources and
# explicitly listed as input. This can happen during comps # debuginfo too.
# expansion.
continue continue
if pkg_is_debug(pkg): if pkg_is_debug(pkg):
debuginfo.add(pkg.file_path) debuginfo.add(pkg.file_path)
@ -561,7 +542,7 @@ def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages):
if (srpm.name, "src") in filters: if (srpm.name, "src") in filters:
# Filtered package, skipping # Filtered package, skipping
continue continue
if os.path.basename(srpm.file_path) not in lookaside_packages: if srpm.file_path not in lookaside_packages:
srpms.add(srpm.file_path) srpms.add(srpm.file_path)
except KeyError: except KeyError:
# Didn't find source RPM.. this should be logged # Didn't find source RPM.. this should be logged

View File

@ -28,6 +28,8 @@ from kobo.pkgset import SimpleRpmWrapper, RpmWrapper
class GatherMethodNodeps(pungi.phases.gather.method.GatherMethodBase): class GatherMethodNodeps(pungi.phases.gather.method.GatherMethodBase):
enabled = True
def __call__(self, arch, variant, *args, **kwargs): def __call__(self, arch, variant, *args, **kwargs):
fname = "gather-nodeps-%s" % variant.uid fname = "gather-nodeps-%s" % variant.uid
if self.source_name: if self.source_name:

View File

@ -14,6 +14,15 @@
# along with this program; if not, see <https://gnu.org/licenses/>. # along with this program; if not, see <https://gnu.org/licenses/>.
class GatherSourceBase(object): import kobo.plugins
class GatherSourceBase(kobo.plugins.Plugin):
def __init__(self, compose): def __init__(self, compose):
self.compose = compose self.compose = compose
class GatherSourceContainer(kobo.plugins.PluginContainer):
@classmethod
def normalize_name(cls, name):
return name.lower()

View File

@ -1,26 +0,0 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
from .source_comps import GatherSourceComps
from .source_json import GatherSourceJson
from .source_module import GatherSourceModule
from .source_none import GatherSourceNone
ALL_SOURCES = {
"comps": GatherSourceComps,
"json": GatherSourceJson,
"module": GatherSourceModule,
"none": GatherSourceNone,
}

View File

@ -30,6 +30,8 @@ import pungi.phases.gather.source
class GatherSourceComps(pungi.phases.gather.source.GatherSourceBase): class GatherSourceComps(pungi.phases.gather.source.GatherSourceBase):
enabled = True
def __call__(self, arch, variant): def __call__(self, arch, variant):
groups = set() groups = set()
if not self.compose.conf.get("comps_file"): if not self.compose.conf.get("comps_file"):

View File

@ -32,31 +32,30 @@ set([(rpm_name, rpm_arch or None)])
import json import json
import os
import pungi.phases.gather.source import pungi.phases.gather.source
class GatherSourceJson(pungi.phases.gather.source.GatherSourceBase): class GatherSourceJson(pungi.phases.gather.source.GatherSourceBase):
enabled = True
def __call__(self, arch, variant): def __call__(self, arch, variant):
json_path = self.compose.conf.get("gather_source_mapping") json_path = self.compose.conf.get("gather_source_mapping")
if not json_path: if not json_path:
return set(), set() return set(), set()
with open(os.path.join(self.compose.config_dir, json_path), "r") as f: with open(json_path, "r") as f:
mapping = json.load(f) mapping = json.load(f)
packages = set() packages = set()
if variant is None: if variant is None:
# get all packages for all variants # get all packages for all variants
for variant_uid in mapping: for variant_uid in mapping:
for pkg_name, pkg_arches in mapping[variant_uid].get(arch, {}).items(): for pkg_name, pkg_arches in mapping[variant_uid][arch].items():
for pkg_arch in pkg_arches: for pkg_arch in pkg_arches:
packages.add((pkg_name, pkg_arch)) packages.add((pkg_name, pkg_arch))
else: else:
# get packages for a particular variant # get packages for a particular variant
for pkg_name, pkg_arches in ( for pkg_name, pkg_arches in mapping[variant.uid][arch].items():
mapping.get(variant.uid, {}).get(arch, {}).items()
):
for pkg_arch in pkg_arches: for pkg_arch in pkg_arches:
packages.add((pkg_name, pkg_arch)) packages.add((pkg_name, pkg_arch))
return packages, set() return packages, set()

View File

@ -26,6 +26,8 @@ import pungi.phases.gather.source
class GatherSourceModule(pungi.phases.gather.source.GatherSourceBase): class GatherSourceModule(pungi.phases.gather.source.GatherSourceBase):
enabled = True
def __call__(self, arch, variant): def __call__(self, arch, variant):
groups = set() groups = set()
packages = set() packages = set()

View File

@ -29,5 +29,7 @@ import pungi.phases.gather.source
class GatherSourceNone(pungi.phases.gather.source.GatherSourceBase): class GatherSourceNone(pungi.phases.gather.source.GatherSourceBase):
enabled = True
def __call__(self, arch, variant): def __call__(self, arch, variant):
return set(), set() return set(), set()

View File

@ -1,22 +1,18 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import copy import copy
import hashlib
import json
import os import os
import shutil
import time import time
from kobo import shortcuts from kobo import shortcuts
from pungi.util import makedirs, get_mtime, get_file_size, failable, log_failed_task from pungi.util import makedirs, get_mtime, get_file_size, failable, log_failed_task
from pungi.util import as_local_file, translate_path, get_repo_urls, version_generator from pungi.util import translate_path, get_repo_urls, version_generator
from pungi.phases import base from pungi.phases import base
from pungi.linker import Linker from pungi.linker import Linker
from pungi.wrappers.kojiwrapper import KojiWrapper from pungi.wrappers.kojiwrapper import KojiWrapper
from kobo.threads import ThreadPool, WorkerThread from kobo.threads import ThreadPool, WorkerThread
from kobo.shortcuts import force_list from kobo.shortcuts import force_list
from productmd.images import Image from productmd.images import Image
from productmd.rpms import Rpms
# This is a mapping from formats to file extensions. The format is what koji # This is a mapping from formats to file extensions. The format is what koji
@ -25,7 +21,6 @@ from productmd.rpms import Rpms
# results will be pulled into the compose. # results will be pulled into the compose.
EXTENSIONS = { EXTENSIONS = {
"docker": ["tar.gz", "tar.xz"], "docker": ["tar.gz", "tar.xz"],
"iso": ["iso"],
"liveimg-squashfs": ["liveimg.squashfs"], "liveimg-squashfs": ["liveimg.squashfs"],
"qcow": ["qcow"], "qcow": ["qcow"],
"qcow2": ["qcow2"], "qcow2": ["qcow2"],
@ -40,7 +35,6 @@ EXTENSIONS = {
"vdi": ["vdi"], "vdi": ["vdi"],
"vmdk": ["vmdk"], "vmdk": ["vmdk"],
"vpc": ["vhd"], "vpc": ["vhd"],
"vhd-compressed": ["vhd.gz", "vhd.xz"],
"vsphere-ova": ["vsphere.ova"], "vsphere-ova": ["vsphere.ova"],
} }
@ -52,10 +46,9 @@ class ImageBuildPhase(
name = "image_build" name = "image_build"
def __init__(self, compose, buildinstall_phase=None): def __init__(self, compose):
super(ImageBuildPhase, self).__init__(compose) super(ImageBuildPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger) self.pool = ThreadPool(logger=self.logger)
self.buildinstall_phase = buildinstall_phase
def _get_install_tree(self, image_conf, variant): def _get_install_tree(self, image_conf, variant):
""" """
@ -124,7 +117,6 @@ class ImageBuildPhase(
# prevent problems in next iteration where the original # prevent problems in next iteration where the original
# value is needed. # value is needed.
image_conf = copy.deepcopy(image_conf) image_conf = copy.deepcopy(image_conf)
original_image_conf = copy.deepcopy(image_conf)
# image_conf is passed to get_image_build_cmd as dict # image_conf is passed to get_image_build_cmd as dict
@ -175,7 +167,6 @@ class ImageBuildPhase(
image_conf["image-build"]["can_fail"] = sorted(can_fail) image_conf["image-build"]["can_fail"] = sorted(can_fail)
cmd = { cmd = {
"original_image_conf": original_image_conf,
"image_conf": image_conf, "image_conf": image_conf,
"conf_file": self.compose.paths.work.image_build_conf( "conf_file": self.compose.paths.work.image_build_conf(
image_conf["image-build"]["variant"], image_conf["image-build"]["variant"],
@ -191,7 +182,7 @@ class ImageBuildPhase(
"scratch": image_conf["image-build"].pop("scratch", False), "scratch": image_conf["image-build"].pop("scratch", False),
} }
self.pool.add(CreateImageBuildThread(self.pool)) self.pool.add(CreateImageBuildThread(self.pool))
self.pool.queue_put((self.compose, cmd, self.buildinstall_phase)) self.pool.queue_put((self.compose, cmd))
self.pool.start() self.pool.start()
@ -201,7 +192,7 @@ class CreateImageBuildThread(WorkerThread):
self.pool.log_error("CreateImageBuild failed.") self.pool.log_error("CreateImageBuild failed.")
def process(self, item, num): def process(self, item, num):
compose, cmd, buildinstall_phase = item compose, cmd = item
variant = cmd["image_conf"]["image-build"]["variant"] variant = cmd["image_conf"]["image-build"]["variant"]
subvariant = cmd["image_conf"]["image-build"].get("subvariant", variant.uid) subvariant = cmd["image_conf"]["image-build"].get("subvariant", variant.uid)
self.failable_arches = cmd["image_conf"]["image-build"].get("can_fail", "") self.failable_arches = cmd["image_conf"]["image-build"].get("can_fail", "")
@ -217,54 +208,22 @@ class CreateImageBuildThread(WorkerThread):
subvariant, subvariant,
logger=self.pool._logger, logger=self.pool._logger,
): ):
self.worker(num, compose, variant, subvariant, cmd, buildinstall_phase) self.worker(num, compose, variant, subvariant, cmd)
def worker(self, num, compose, variant, subvariant, cmd, buildinstall_phase): def worker(self, num, compose, variant, subvariant, cmd):
arches = cmd["image_conf"]["image-build"]["arches"] arches = cmd["image_conf"]["image-build"]["arches"]
formats = "-".join(cmd["image_conf"]["image-build"]["format"]) formats = "-".join(cmd["image_conf"]["image-build"]["format"])
dash_arches = "-".join(arches) dash_arches = "-".join(arches)
log_file = compose.paths.log.log_file( log_file = compose.paths.log.log_file(
dash_arches, "imagebuild-%s-%s-%s" % (variant.uid, subvariant, formats) dash_arches, "imagebuild-%s-%s-%s" % (variant.uid, subvariant, formats)
) )
metadata_file = log_file[:-4] + ".reuse.json"
external_repo_checksum = {}
try:
for repo in cmd["original_image_conf"]["image-build"]["repo"]:
if repo in compose.all_variants:
continue
with as_local_file(
os.path.join(repo, "repodata/repomd.xml")
) as filename:
with open(filename, "rb") as f:
external_repo_checksum[repo] = hashlib.sha256(
f.read()
).hexdigest()
except Exception as e:
external_repo_checksum = None
self.pool.log_info(
"Can't calculate checksum of repomd.xml of external repo - %s" % str(e)
)
if self._try_to_reuse(
compose,
variant,
subvariant,
metadata_file,
log_file,
cmd,
external_repo_checksum,
buildinstall_phase,
):
return
msg = ( msg = (
"Creating image (formats: %s, arches: %s, variant: %s, subvariant: %s)" "Creating image (formats: %s, arches: %s, variant: %s, subvariant: %s)"
% (formats, dash_arches, variant, subvariant) % (formats, dash_arches, variant, subvariant)
) )
self.pool.log_info("[BEGIN] %s" % msg) self.pool.log_info("[BEGIN] %s" % msg)
koji_wrapper = KojiWrapper(compose) koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
# writes conf file for koji image-build # writes conf file for koji image-build
self.pool.log_info( self.pool.log_info(
@ -316,22 +275,6 @@ class CreateImageBuildThread(WorkerThread):
) )
break break
self._link_images(compose, variant, subvariant, cmd, image_infos)
self._write_reuse_metadata(
compose, metadata_file, cmd, image_infos, external_repo_checksum
)
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
def _link_images(self, compose, variant, subvariant, cmd, image_infos):
"""Link images to compose and update image manifest.
:param Compose compose: Current compose.
:param Variant variant: Current variant.
:param str subvariant:
:param dict cmd: Dict of params for image-build.
:param dict image_infos: Dict contains image info.
"""
# The usecase here is that you can run koji image-build with multiple --format # The usecase here is that you can run koji image-build with multiple --format
# It's ok to do it serialized since we're talking about max 2 images per single # It's ok to do it serialized since we're talking about max 2 images per single
# image_build record # image_build record
@ -346,9 +289,7 @@ class CreateImageBuildThread(WorkerThread):
# let's not change filename of koji outputs # let's not change filename of koji outputs
image_dest = os.path.join(image_dir, os.path.basename(image_info["path"])) image_dest = os.path.join(image_dir, os.path.basename(image_info["path"]))
src_file = compose.koji_downloader.get_file( src_file = os.path.realpath(image_info["path"])
os.path.realpath(image_info["path"])
)
linker.link(src_file, image_dest, link_type=cmd["link_type"]) linker.link(src_file, image_dest, link_type=cmd["link_type"])
# Update image manifest # Update image manifest
@ -367,160 +308,4 @@ class CreateImageBuildThread(WorkerThread):
setattr(img, "deliverable", "image-build") setattr(img, "deliverable", "image-build")
compose.im.add(variant=variant.uid, arch=image_info["arch"], image=img) compose.im.add(variant=variant.uid, arch=image_info["arch"], image=img)
def _try_to_reuse( self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
self,
compose,
variant,
subvariant,
metadata_file,
log_file,
cmd,
external_repo_checksum,
buildinstall_phase,
):
"""Try to reuse images from old compose.
:param Compose compose: Current compose.
:param Variant variant: Current variant.
:param str subvariant:
:param str metadata_file: Path to reuse metadata file.
:param str log_file: Path to log file.
:param dict cmd: Dict of params for image-build.
:param dict external_repo_checksum: Dict contains checksum of repomd.xml
or None if can't get checksum.
:param BuildinstallPhase buildinstall_phase: buildinstall phase of
current compose.
"""
log_msg = "Cannot reuse old image_build phase results - %s"
if not compose.conf["image_build_allow_reuse"]:
self.pool.log_info(
log_msg % "reuse of old image_build results is disabled."
)
return False
if external_repo_checksum is None:
self.pool.log_info(
log_msg % "Can't ensure that external repo is not changed."
)
return False
old_metadata_file = compose.paths.old_compose_path(metadata_file)
if not old_metadata_file:
self.pool.log_info(log_msg % "Can't find old reuse metadata file")
return False
try:
old_metadata = self._load_reuse_metadata(old_metadata_file)
except Exception as e:
self.pool.log_info(
log_msg % "Can't load old reuse metadata file: %s" % str(e)
)
return False
if old_metadata["cmd"]["original_image_conf"] != cmd["original_image_conf"]:
self.pool.log_info(log_msg % "image_build config changed")
return False
# Make sure external repo does not change
if (
old_metadata["external_repo_checksum"] is None
or old_metadata["external_repo_checksum"] != external_repo_checksum
):
self.pool.log_info(log_msg % "External repo may be changed")
return False
# Make sure buildinstall phase is reused
for arch in cmd["image_conf"]["image-build"]["arches"]:
if buildinstall_phase and not buildinstall_phase.reused(variant, arch):
self.pool.log_info(log_msg % "buildinstall phase changed")
return False
# Make sure packages in variant not change
rpm_manifest_file = compose.paths.compose.metadata("rpms.json")
rpm_manifest = Rpms()
rpm_manifest.load(rpm_manifest_file)
old_rpm_manifest_file = compose.paths.old_compose_path(rpm_manifest_file)
old_rpm_manifest = Rpms()
old_rpm_manifest.load(old_rpm_manifest_file)
for repo in cmd["original_image_conf"]["image-build"]["repo"]:
if repo not in compose.all_variants:
# External repos are checked using other logic.
continue
for arch in cmd["image_conf"]["image-build"]["arches"]:
if (
rpm_manifest.rpms[variant.uid][arch]
!= old_rpm_manifest.rpms[variant.uid][arch]
):
self.pool.log_info(
log_msg % "Packages in %s.%s changed." % (variant.uid, arch)
)
return False
self.pool.log_info(
"Reusing images from old compose for variant %s" % variant.uid
)
try:
self._link_images(
compose, variant, subvariant, cmd, old_metadata["image_infos"]
)
except Exception as e:
self.pool.log_info(log_msg % "Can't link images %s" % str(e))
return False
old_log_file = compose.paths.old_compose_path(log_file)
try:
shutil.copy2(old_log_file, log_file)
except Exception as e:
self.pool.log_info(
log_msg % "Can't copy old log_file: %s %s" % (old_log_file, str(e))
)
return False
self._write_reuse_metadata(
compose,
metadata_file,
cmd,
old_metadata["image_infos"],
external_repo_checksum,
)
return True
def _write_reuse_metadata(
self, compose, metadata_file, cmd, image_infos, external_repo_checksum
):
"""Write metadata file.
:param Compose compose: Current compose.
:param str metadata_file: Path to reuse metadata file.
:param dict cmd: Dict of params for image-build.
:param dict image_infos: Dict contains image info.
:param dict external_repo_checksum: Dict contains checksum of repomd.xml
or None if can't get checksum.
"""
msg = "Writing reuse metadata file: %s" % metadata_file
self.pool.log_info(msg)
cmd_copy = copy.deepcopy(cmd)
del cmd_copy["image_conf"]["image-build"]["variant"]
data = {
"cmd": cmd_copy,
"image_infos": image_infos,
"external_repo_checksum": external_repo_checksum,
}
try:
with open(metadata_file, "w") as f:
json.dump(data, f, indent=4)
except Exception as e:
self.pool.log_info("%s Failed: %s" % (msg, str(e)))
def _load_reuse_metadata(self, metadata_file):
"""Load metadata file.
:param str metadata_file: Path to reuse metadata file.
"""
with open(metadata_file, "r") as f:
return json.load(f)

View File

@ -3,7 +3,6 @@
import os import os
from kobo import shortcuts from kobo import shortcuts
from collections import defaultdict from collections import defaultdict
import threading
from .base import PhaseBase from .base import PhaseBase
from ..util import get_format_substs, get_file_size from ..util import get_format_substs, get_file_size
@ -69,7 +68,6 @@ class ImageChecksumPhase(PhaseBase):
def run(self): def run(self):
topdir = self.compose.paths.compose.topdir() topdir = self.compose.paths.compose.topdir()
make_checksums( make_checksums(
topdir, topdir,
self.compose.im, self.compose.im,
@ -89,8 +87,6 @@ def _compute_checksums(
checksum_types, checksum_types,
base_checksum_name_gen, base_checksum_name_gen,
one_file, one_file,
results_lock,
cache_lock,
): ):
for image in images: for image in images:
filename = os.path.basename(image.path) filename = os.path.basename(image.path)
@ -100,21 +96,14 @@ def _compute_checksums(
filesize = image.size or get_file_size(full_path) filesize = image.size or get_file_size(full_path)
cache_lock.acquire()
if full_path not in cache: if full_path not in cache:
cache_lock.release()
# Source ISO is listed under each binary architecture. There's no # Source ISO is listed under each binary architecture. There's no
# point in checksumming it twice, so we can just remember the # point in checksumming it twice, so we can just remember the
# digest from first run.. # digest from first run..
checksum_value = shortcuts.compute_file_checksums(full_path, checksum_types) cache[full_path] = shortcuts.compute_file_checksums(
with cache_lock: full_path, checksum_types
cache[full_path] = checksum_value )
else:
cache_lock.release()
with cache_lock:
digests = cache[full_path] digests = cache[full_path]
for checksum, digest in digests.items(): for checksum, digest in digests.items():
# Update metadata with the checksum # Update metadata with the checksum
image.add_checksum(None, checksum, digest) image.add_checksum(None, checksum, digest)
@ -123,10 +112,7 @@ def _compute_checksums(
checksum_filename = os.path.join( checksum_filename = os.path.join(
path, "%s.%sSUM" % (filename, checksum.upper()) path, "%s.%sSUM" % (filename, checksum.upper())
) )
with results_lock: results[checksum_filename].add((filename, filesize, checksum, digest))
results[checksum_filename].add(
(filename, filesize, checksum, digest)
)
if one_file: if one_file:
dirname = os.path.basename(path) dirname = os.path.basename(path)
@ -139,23 +125,14 @@ def _compute_checksums(
checksum_filename = "%s%sSUM" % (base_checksum_name, checksum.upper()) checksum_filename = "%s%sSUM" % (base_checksum_name, checksum.upper())
checksum_path = os.path.join(path, checksum_filename) checksum_path = os.path.join(path, checksum_filename)
with results_lock:
results[checksum_path].add((filename, filesize, checksum, digest)) results[checksum_path].add((filename, filesize, checksum, digest))
def make_checksums(topdir, im, checksum_types, one_file, base_checksum_name_gen): def make_checksums(topdir, im, checksum_types, one_file, base_checksum_name_gen):
results = defaultdict(set) results = defaultdict(set)
cache = {} cache = {}
threads = []
results_lock = threading.Lock() # lock to synchronize access to the results dict.
cache_lock = threading.Lock() # lock to synchronize access to the cache dict.
# create all worker threads
for (variant, arch, path), images in get_images(topdir, im).items(): for (variant, arch, path), images in get_images(topdir, im).items():
threads.append( _compute_checksums(
threading.Thread(
target=_compute_checksums,
args=[
results, results,
cache, cache,
variant, variant,
@ -165,16 +142,7 @@ def make_checksums(topdir, im, checksum_types, one_file, base_checksum_name_gen)
checksum_types, checksum_types,
base_checksum_name_gen, base_checksum_name_gen,
one_file, one_file,
results_lock,
cache_lock,
],
) )
)
threads[-1].start()
# wait for all worker threads to finish
for thread in threads:
thread.join()
for file in results: for file in results:
dump_checksums(file, results[file]) dump_checksums(file, results[file])

View File

@ -1,122 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
from kobo.threads import ThreadPool, WorkerThread
from .base import ConfigGuardedPhase, PhaseLoggerMixin
from .. import util
from ..wrappers import kojiwrapper
from ..phases.osbs import add_metadata
class ImageContainerPhase(PhaseLoggerMixin, ConfigGuardedPhase):
name = "image_container"
def __init__(self, compose):
super(ImageContainerPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
self.pool.metadata = {}
def run(self):
for variant in self.compose.get_variants():
for conf in self.get_config_block(variant):
self.pool.add(ImageContainerThread(self.pool))
self.pool.queue_put((self.compose, variant, conf))
self.pool.start()
class ImageContainerThread(WorkerThread):
def process(self, item, num):
compose, variant, config = item
self.num = num
with util.failable(
compose,
bool(config.pop("failable", None)),
variant,
"*",
"osbs",
logger=self.pool._logger,
):
self.worker(compose, variant, config)
def worker(self, compose, variant, config):
msg = "Image container task for variant %s" % variant.uid
self.pool.log_info("[BEGIN] %s" % msg)
source = config.pop("url")
target = config.pop("target")
priority = config.pop("priority", None)
config["yum_repourls"] = [
self._get_repo(
compose,
variant,
config.get("arch_override", "").split(),
config.pop("image_spec"),
)
]
# Start task
koji = kojiwrapper.KojiWrapper(compose)
koji.login()
task_id = koji.koji_proxy.buildContainer(
source, target, config, priority=priority
)
koji.save_task_id(task_id)
# Wait for it to finish and capture the output into log file (even
# though there is not much there).
log_dir = os.path.join(compose.paths.log.topdir(), "image_container")
util.makedirs(log_dir)
log_file = os.path.join(
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
)
if koji.watch_task(task_id, log_file) != 0:
raise RuntimeError(
"ImageContainer task failed: %s. See %s for details"
% (task_id, log_file)
)
add_metadata(variant, task_id, compose, config.get("scratch", False))
self.pool.log_info("[DONE ] %s" % msg)
def _get_repo(self, compose, variant, arches, image_spec):
"""
Return a repo file that points baseurl to the image specified by
image_spec.
"""
image_paths = set()
for arch in arches or compose.im.images[variant.uid].keys():
for image in compose.im.images[variant.uid].get(arch, []):
for key, value in image_spec.items():
if not re.match(value, getattr(image, key)):
break
else:
image_paths.add(image.path.replace(arch, "$basearch"))
if len(image_paths) != 1:
raise RuntimeError(
"%d images matched specification. Only one was expected."
% len(image_paths)
)
image_path = image_paths.pop()
absolute_path = os.path.join(compose.paths.compose.topdir(), image_path)
repo_file = os.path.join(
compose.paths.work.tmp_dir(None, variant),
"image-container-%s-%s.repo" % (variant, self.num),
)
with open(repo_file, "w") as f:
f.write("[image-to-include]\n")
f.write("name=Location of image to embed\n")
f.write("baseurl=%s\n" % util.translate_path(compose, absolute_path))
f.write("enabled=0\n")
f.write("gpgcheck=0\n")
return util.translate_path(compose, repo_file)

View File

@ -16,7 +16,6 @@
import collections import collections
import os import os
import glob
import shutil import shutil
from kobo.shortcuts import run from kobo.shortcuts import run
@ -73,10 +72,6 @@ class InitPhase(PhaseBase):
self.compose.paths.work.module_defaults_dir(create_dir=False) self.compose.paths.work.module_defaults_dir(create_dir=False)
) )
# download module obsoletes
if self.compose.has_module_obsoletes:
write_module_obsoletes(self.compose)
# write prepopulate file # write prepopulate file
write_prepopulate_file(self.compose) write_prepopulate_file(self.compose)
@ -165,18 +160,12 @@ def write_variant_comps(compose, arch, variant):
run(cmd) run(cmd)
comps = CompsWrapper(comps_file) comps = CompsWrapper(comps_file)
if variant.groups or variant.modules is not None or variant.type != "variant":
# Filter groups if the variant has some, or it's a modular variant, or # Filter groups if the variant has some, or it's a modular variant, or
# is not a base variant. # is not a base variant.
if (
variant.groups
or variant.modules is not None
or variant.modular_koji_tags is not None
or variant.type != "variant"
):
unmatched = comps.filter_groups(variant.groups) unmatched = comps.filter_groups(variant.groups)
for grp in unmatched: for grp in unmatched:
compose.log_warning(UNMATCHED_GROUP_MSG % (variant.uid, arch, grp)) compose.log_warning(UNMATCHED_GROUP_MSG % (variant.uid, arch, grp))
contains_all = not variant.groups and not variant.environments contains_all = not variant.groups and not variant.environments
if compose.conf["comps_filter_environments"] and not contains_all: if compose.conf["comps_filter_environments"] and not contains_all:
# We only want to filter environments if it's enabled by configuration # We only want to filter environments if it's enabled by configuration
@ -229,33 +218,12 @@ def write_module_defaults(compose):
) )
def write_module_obsoletes(compose):
scm_dict = compose.conf["module_obsoletes_dir"]
if isinstance(scm_dict, dict):
if scm_dict["scm"] == "file":
scm_dict["dir"] = os.path.join(compose.config_dir, scm_dict["dir"])
else:
scm_dict = os.path.join(compose.config_dir, scm_dict)
with temp_dir(prefix="moduleobsoletes_") as tmp_dir:
get_dir_from_scm(scm_dict, tmp_dir, compose=compose)
compose.log_debug("Writing module obsoletes")
shutil.copytree(
tmp_dir,
compose.paths.work.module_obsoletes_dir(create_dir=False),
ignore=shutil.ignore_patterns(".git"),
)
def validate_module_defaults(path): def validate_module_defaults(path):
"""Make sure there are no conflicting defaults and every default can be loaded. """Make sure there are no conflicting defaults. Each module name can only
Each module name can onlyhave one default stream. have one default stream.
:param str path: directory with cloned module defaults :param str path: directory with cloned module defaults
""" """
defaults_num = len(glob.glob(os.path.join(path, "*.yaml")))
seen_defaults = collections.defaultdict(set) seen_defaults = collections.defaultdict(set)
for module_name, defaults in iter_module_defaults(path): for module_name, defaults in iter_module_defaults(path):
@ -274,11 +242,6 @@ def validate_module_defaults(path):
"There are duplicated module defaults:\n%s" % "\n".join(errors) "There are duplicated module defaults:\n%s" % "\n".join(errors)
) )
# Make sure all defaults are valid otherwise update_from_defaults_directory
# will return empty object
if defaults_num != len(seen_defaults):
raise RuntimeError("Defaults contains not valid default file")
def validate_comps(path): def validate_comps(path):
"""Check that there are whitespace issues in comps.""" """Check that there are whitespace issues in comps."""

View File

@ -1,229 +0,0 @@
# -*- coding: utf-8 -*-
import os
from kobo.threads import ThreadPool, WorkerThread
from kobo import shortcuts
from productmd.images import Image
from . import base
from .. import util
from ..linker import Linker
from ..wrappers import kojiwrapper
from .image_build import EXTENSIONS
KIWIEXTENSIONS = [
("vhd-compressed", ["vhdfixed.xz"], "vhd.xz"),
("vagrant-libvirt", ["vagrant.libvirt.box"], "vagrant-libvirt.box"),
("vagrant-virtualbox", ["vagrant.virtualbox.box"], "vagrant-virtualbox.box"),
]
class KiwiBuildPhase(
base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase
):
name = "kiwibuild"
def __init__(self, compose):
super(KiwiBuildPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
def _get_arches(self, image_conf, arches):
"""Get an intersection of arches in the config dict and the given ones."""
if "arches" in image_conf:
arches = set(image_conf["arches"]) & arches
return sorted(arches)
@staticmethod
def _get_repo_urls(compose, repos, arch="$basearch"):
"""
Get list of repos with resolved repo URLs. Preserve repos defined
as dicts.
"""
resolved_repos = []
for repo in repos:
repo = util.get_repo_url(compose, repo, arch=arch)
if repo is None:
raise RuntimeError("Failed to resolve repo URL for %s" % repo)
resolved_repos.append(repo)
return resolved_repos
def _get_repo(self, image_conf, variant):
"""
Get a list of repos. First included are those explicitly listed in
config, followed by by repo for current variant if it's not included in
the list already.
"""
repos = shortcuts.force_list(image_conf.get("repos", []))
if not variant.is_empty and variant.uid not in repos:
repos.append(variant.uid)
return KiwiBuildPhase._get_repo_urls(self.compose, repos, arch="$arch")
def run(self):
for variant in self.compose.get_variants():
arches = set([x for x in variant.arches if x != "src"])
for image_conf in self.get_config_block(variant):
build_arches = self._get_arches(image_conf, arches)
if not build_arches:
self.log_debug("skip: no arches")
continue
# these properties can be set per-image *or* as e.g.
# kiwibuild_description_scm or global_release in the config
generics = {
"release": self.get_release(image_conf),
"target": self.get_config(image_conf, "target"),
"descscm": self.get_config(image_conf, "description_scm"),
"descpath": self.get_config(image_conf, "description_path"),
"type": self.get_config(image_conf, "type"),
"type_attr": self.get_config(image_conf, "type_attr"),
"bundle_name_format": self.get_config(
image_conf, "bundle_name_format"
),
}
repo = self._get_repo(image_conf, variant)
failable_arches = image_conf.pop("failable", [])
if failable_arches == ["*"]:
failable_arches = image_conf["arches"]
self.pool.add(RunKiwiBuildThread(self.pool))
self.pool.queue_put(
(
self.compose,
variant,
image_conf,
build_arches,
generics,
repo,
failable_arches,
)
)
self.pool.start()
class RunKiwiBuildThread(WorkerThread):
def process(self, item, num):
(compose, variant, config, arches, generics, repo, failable_arches) = item
self.failable_arches = failable_arches
# the Koji task as a whole can only fail if *all* arches are failable
can_task_fail = set(failable_arches).issuperset(set(arches))
self.num = num
with util.failable(
compose,
can_task_fail,
variant,
"*",
"kiwibuild",
logger=self.pool._logger,
):
self.worker(compose, variant, config, arches, generics, repo)
def worker(self, compose, variant, config, arches, generics, repo):
msg = "kiwibuild task for variant %s" % variant.uid
self.pool.log_info("[BEGIN] %s" % msg)
koji = kojiwrapper.KojiWrapper(compose)
koji.login()
task_id = koji.koji_proxy.kiwiBuild(
generics["target"],
arches,
generics["descscm"],
generics["descpath"],
profile=config["kiwi_profile"],
release=generics["release"],
repos=repo,
type=generics["type"],
type_attr=generics["type_attr"],
result_bundle_name_format=generics["bundle_name_format"],
# this ensures the task won't fail if only failable arches fail
optional_arches=self.failable_arches,
)
koji.save_task_id(task_id)
# Wait for it to finish and capture the output into log file.
log_dir = os.path.join(compose.paths.log.topdir(), "kiwibuild")
util.makedirs(log_dir)
log_file = os.path.join(
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
)
if koji.watch_task(task_id, log_file) != 0:
raise RuntimeError(
"kiwiBuild task failed: %s. See %s for details" % (task_id, log_file)
)
# Refresh koji session which may have timed out while the task was
# running. Watching is done via a subprocess, so the session is
# inactive.
koji = kojiwrapper.KojiWrapper(compose)
linker = Linker(logger=self.pool._logger)
# Process all images in the build. There should be one for each
# architecture, but we don't verify that.
paths = koji.get_image_paths(task_id)
for arch, paths in paths.items():
for path in paths:
type_, format_ = _find_type_and_format(path)
if not format_:
# Path doesn't match any known type.
continue
# image_dir is absolute path to which the image should be copied.
# We also need the same path as relative to compose directory for
# including in the metadata.
image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
rel_image_dir = compose.paths.compose.image_dir(
variant, relative=True
) % {"arch": arch}
util.makedirs(image_dir)
filename = os.path.basename(path)
image_dest = os.path.join(image_dir, filename)
src_file = compose.koji_downloader.get_file(path)
linker.link(src_file, image_dest, link_type=compose.conf["link_type"])
# Update image manifest
img = Image(compose.im)
# Get the manifest type from the config if supplied, otherwise we
# determine the manifest type based on the koji output
img.type = type_
img.format = format_
img.path = os.path.join(rel_image_dir, filename)
img.mtime = util.get_mtime(image_dest)
img.size = util.get_file_size(image_dest)
img.arch = arch
img.disc_number = 1 # We don't expect multiple disks
img.disc_count = 1
img.bootable = False
img.subvariant = config.get("subvariant", variant.uid)
setattr(img, "can_fail", arch in self.failable_arches)
setattr(img, "deliverable", "kiwibuild")
compose.im.add(variant=variant.uid, arch=arch, image=img)
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, task_id))
def _find_type_and_format(path):
for type_, suffixes in EXTENSIONS.items():
for suffix in suffixes:
if path.endswith(suffix):
return type_, suffix
# these are our kiwi-exclusive mappings for images whose extensions
# aren't quite the same as imagefactory
for type_, suffixes, format_ in KIWIEXTENSIONS:
if any(path.endswith(suffix) for suffix in suffixes):
return type_, format_
return None, None

406
pungi/phases/live_images.py Normal file
View File

@ -0,0 +1,406 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
import sys
import time
import shutil
from kobo.threads import ThreadPool, WorkerThread
from kobo.shortcuts import run, save_to_file, force_list
from productmd.images import Image
from six.moves import shlex_quote
from pungi.wrappers.kojiwrapper import KojiWrapper
from pungi.wrappers import iso
from pungi.phases import base
from pungi.util import makedirs, get_mtime, get_file_size, failable
from pungi.util import get_repo_urls
# HACK: define cmp in python3
if sys.version_info[0] == 3:
def cmp(a, b):
return (a > b) - (a < b)
class LiveImagesPhase(
base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase
):
name = "live_images"
def __init__(self, compose):
super(LiveImagesPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
def _get_repos(self, arch, variant, data):
repos = []
if not variant.is_empty:
repos.append(variant.uid)
repos.extend(force_list(data.get("repo", [])))
return get_repo_urls(self.compose, repos, arch=arch)
def run(self):
symlink_isos_to = self.compose.conf.get("symlink_isos_to")
commands = []
for variant in self.compose.all_variants.values():
for arch in variant.arches + ["src"]:
for data in self.get_config_block(variant, arch):
subvariant = data.get("subvariant", variant.uid)
type = data.get("type", "live")
if type == "live":
dest_dir = self.compose.paths.compose.iso_dir(
arch, variant, symlink_to=symlink_isos_to
)
elif type == "appliance":
dest_dir = self.compose.paths.compose.image_dir(
variant, symlink_to=symlink_isos_to
)
dest_dir = dest_dir % {"arch": arch}
makedirs(dest_dir)
else:
raise RuntimeError("Unknown live image type %s" % type)
if not dest_dir:
continue
cmd = {
"name": data.get("name"),
"version": self.get_version(data),
"release": self.get_release(data),
"dest_dir": dest_dir,
"build_arch": arch,
"ks_file": data["kickstart"],
"ksurl": self.get_ksurl(data),
# Used for images wrapped in RPM
"specfile": data.get("specfile", None),
# Scratch (only taken in consideration if specfile
# specified) For images wrapped in rpm is scratch
# disabled by default For other images is scratch
# always on
"scratch": data.get("scratch", False),
"sign": False,
"type": type,
"label": "", # currently not used
"subvariant": subvariant,
"failable_arches": data.get("failable", []),
# First see if live_target is specified, then fall back
# to regular setup of local, phase and global setting.
"target": self.compose.conf.get("live_target")
or self.get_config(data, "target"),
}
cmd["repos"] = self._get_repos(arch, variant, data)
# Signing of the rpm wrapped image
if not cmd["scratch"] and data.get("sign"):
cmd["sign"] = True
cmd["filename"] = self._get_file_name(
arch, variant, cmd["name"], cmd["version"]
)
commands.append((cmd, variant, arch))
for (cmd, variant, arch) in commands:
self.pool.add(CreateLiveImageThread(self.pool))
self.pool.queue_put((self.compose, cmd, variant, arch))
self.pool.start()
def _get_file_name(self, arch, variant, name=None, version=None):
if self.compose.conf["live_images_no_rename"]:
return None
disc_type = self.compose.conf["disc_types"].get("live", "live")
format = (
"%(compose_id)s-%(variant)s-%(arch)s-%(disc_type)s%(disc_num)s%(suffix)s"
)
# Custom name (prefix)
if name:
custom_iso_name = name
if version:
custom_iso_name += "-%s" % version
format = (
custom_iso_name
+ "-%(variant)s-%(arch)s-%(disc_type)s%(disc_num)s%(suffix)s"
)
# XXX: hardcoded disc_num
return self.compose.get_image_name(
arch, variant, disc_type=disc_type, disc_num=None, format=format
)
class CreateLiveImageThread(WorkerThread):
EXTS = (".iso", ".raw.xz")
def process(self, item, num):
compose, cmd, variant, arch = item
self.failable_arches = cmd.get("failable_arches", [])
self.can_fail = bool(self.failable_arches)
with failable(
compose,
self.can_fail,
variant,
arch,
"live",
cmd.get("subvariant"),
logger=self.pool._logger,
):
self.worker(compose, cmd, variant, arch, num)
def worker(self, compose, cmd, variant, arch, num):
self.basename = "%(name)s-%(version)s-%(release)s" % cmd
log_file = compose.paths.log.log_file(arch, "liveimage-%s" % self.basename)
subvariant = cmd.pop("subvariant")
imgname = "%s-%s-%s-%s" % (
compose.ci_base.release.short,
subvariant,
"Live" if cmd["type"] == "live" else "Disk",
arch,
)
msg = "Creating ISO (arch: %s, variant: %s): %s" % (
arch,
variant,
self.basename,
)
self.pool.log_info("[BEGIN] %s" % msg)
koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
_, version = compose.compose_id.rsplit("-", 1)
name = cmd["name"] or imgname
version = cmd["version"] or version
archive = False
if cmd["specfile"] and not cmd["scratch"]:
# Non scratch build are allowed only for rpm wrapped images
archive = True
koji_cmd = koji_wrapper.get_create_image_cmd(
name,
version,
cmd["target"],
cmd["build_arch"],
cmd["ks_file"],
cmd["repos"],
image_type=cmd["type"],
wait=True,
archive=archive,
specfile=cmd["specfile"],
release=cmd["release"],
ksurl=cmd["ksurl"],
)
# avoid race conditions?
# Kerberos authentication failed:
# Permission denied in replay cache code (-1765328215)
time.sleep(num * 3)
output = koji_wrapper.run_blocking_cmd(koji_cmd, log_file=log_file)
if output["retcode"] != 0:
raise RuntimeError(
"LiveImage task failed: %s. See %s for more details."
% (output["task_id"], log_file)
)
# copy finished image to isos/
image_path = [
path
for path in koji_wrapper.get_image_path(output["task_id"])
if self._is_image(path)
]
if len(image_path) != 1:
raise RuntimeError(
"Got %d images from task %d, expected 1."
% (len(image_path), output["task_id"])
)
image_path = image_path[0]
filename = cmd.get("filename") or os.path.basename(image_path)
destination = os.path.join(cmd["dest_dir"], filename)
shutil.copy2(image_path, destination)
# copy finished rpm to isos/ (if rpm wrapped ISO was built)
if cmd["specfile"]:
rpm_paths = koji_wrapper.get_wrapped_rpm_path(output["task_id"])
if cmd["sign"]:
# Sign the rpm wrapped images and get their paths
self.pool.log_info(
"Signing rpm wrapped images in task_id: %s (expected key ID: %s)"
% (output["task_id"], compose.conf.get("signing_key_id"))
)
signed_rpm_paths = self._sign_image(
koji_wrapper, compose, cmd, output["task_id"]
)
if signed_rpm_paths:
rpm_paths = signed_rpm_paths
for rpm_path in rpm_paths:
shutil.copy2(rpm_path, cmd["dest_dir"])
if cmd["type"] == "live":
# ISO manifest only makes sense for live images
self._write_manifest(destination)
self._add_to_images(
compose,
variant,
subvariant,
arch,
cmd["type"],
self._get_format(image_path),
destination,
)
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
def _add_to_images(self, compose, variant, subvariant, arch, type, format, path):
"""Adds the image to images.json"""
img = Image(compose.im)
img.type = "raw-xz" if type == "appliance" else type
img.format = format
img.path = os.path.relpath(path, compose.paths.compose.topdir())
img.mtime = get_mtime(path)
img.size = get_file_size(path)
img.arch = arch
img.disc_number = 1 # We don't expect multiple disks
img.disc_count = 1
img.bootable = True
img.subvariant = subvariant
setattr(img, "can_fail", self.can_fail)
setattr(img, "deliverable", "live")
compose.im.add(variant=variant.uid, arch=arch, image=img)
def _is_image(self, path):
for ext in self.EXTS:
if path.endswith(ext):
return True
return False
def _get_format(self, path):
"""Get format based on extension."""
for ext in self.EXTS:
if path.endswith(ext):
return ext[1:]
raise RuntimeError("Getting format for unknown image %s" % path)
def _write_manifest(self, iso_path):
"""Generate manifest for ISO at given path.
:param iso_path: (str) absolute path to the ISO
"""
dir, filename = os.path.split(iso_path)
run("cd %s && %s" % (shlex_quote(dir), iso.get_manifest_cmd(filename)))
def _sign_image(self, koji_wrapper, compose, cmd, koji_task_id):
signing_key_id = compose.conf.get("signing_key_id")
signing_command = compose.conf.get("signing_command")
if not signing_key_id:
self.pool.log_warning(
"Signing is enabled but signing_key_id is not specified"
)
self.pool.log_warning("Signing skipped")
return None
if not signing_command:
self.pool.log_warning(
"Signing is enabled but signing_command is not specified"
)
self.pool.log_warning("Signing skipped")
return None
# Prepare signing log file
signing_log_file = compose.paths.log.log_file(
cmd["build_arch"], "live_images-signing-%s" % self.basename
)
# Sign the rpm wrapped images
try:
sign_builds_in_task(
koji_wrapper,
koji_task_id,
signing_command,
log_file=signing_log_file,
signing_key_password=compose.conf.get("signing_key_password"),
)
except RuntimeError:
self.pool.log_error(
"Error while signing rpm wrapped images. See log: %s" % signing_log_file
)
raise
# Get pats to the signed rpms
signing_key_id = signing_key_id.lower() # Koji uses lowercase in paths
rpm_paths = koji_wrapper.get_signed_wrapped_rpms_paths(
koji_task_id, signing_key_id
)
# Wait until files are available
if wait_paths(rpm_paths, 60 * 15):
# Files are ready
return rpm_paths
# Signed RPMs are not available
self.pool.log_warning("Signed files are not available: %s" % rpm_paths)
self.pool.log_warning("Unsigned files will be used")
return None
def wait_paths(paths, timeout=60):
started = time.time()
remaining = paths[:]
while True:
for path in remaining[:]:
if os.path.exists(path):
remaining.remove(path)
if not remaining:
break
time.sleep(1)
if timeout >= 0 and (time.time() - started) > timeout:
return False
return True
def sign_builds_in_task(
koji_wrapper, task_id, signing_command, log_file=None, signing_key_password=None
):
# Get list of nvrs that should be signed
nvrs = koji_wrapper.get_build_nvrs(task_id)
if not nvrs:
# No builds are available (scratch build, etc.?)
return
# Append builds to sign_cmd
for nvr in nvrs:
signing_command += " '%s'" % nvr
# Log signing command before password is filled in it
if log_file:
save_to_file(log_file, signing_command, append=True)
# Fill password into the signing command
if signing_key_password:
signing_command = signing_command % {
"signing_key_password": signing_key_password
}
# Sign the builds
run(signing_command, can_fail=False, show_cmd=False, logfile=log_file)

View File

@ -71,7 +71,6 @@ class LiveMediaPhase(PhaseLoggerMixin, ImageConfigMixin, ConfigGuardedPhase):
"ksurl": self.get_ksurl(image_conf), "ksurl": self.get_ksurl(image_conf),
"ksversion": image_conf.get("ksversion"), "ksversion": image_conf.get("ksversion"),
"scratch": image_conf.get("scratch", False), "scratch": image_conf.get("scratch", False),
"nomacboot": image_conf.get("nomacboot", False),
"release": self.get_release(image_conf), "release": self.get_release(image_conf),
"skip_tag": image_conf.get("skip_tag"), "skip_tag": image_conf.get("skip_tag"),
"name": name, "name": name,
@ -141,7 +140,7 @@ class LiveMediaThread(WorkerThread):
) )
self.pool.log_info("[BEGIN] %s" % msg) self.pool.log_info("[BEGIN] %s" % msg)
koji_wrapper = KojiWrapper(compose) koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
cmd = self._get_cmd(koji_wrapper, config) cmd = self._get_cmd(koji_wrapper, config)
log_file = self._get_log_file(compose, variant, subvariant, config) log_file = self._get_log_file(compose, variant, subvariant, config)
@ -182,9 +181,7 @@ class LiveMediaThread(WorkerThread):
# let's not change filename of koji outputs # let's not change filename of koji outputs
image_dest = os.path.join(image_dir, os.path.basename(image_info["path"])) image_dest = os.path.join(image_dir, os.path.basename(image_info["path"]))
src_file = compose.koji_downloader.get_file( src_file = os.path.realpath(image_info["path"])
os.path.realpath(image_info["path"])
)
linker.link(src_file, image_dest, link_type=link_type) linker.link(src_file, image_dest, link_type=link_type)
# Update image manifest # Update image manifest

View File

@ -1,29 +1,24 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import copy
import fnmatch import fnmatch
import json import json
import os import os
from kobo.threads import ThreadPool, WorkerThread from kobo.threads import ThreadPool, WorkerThread
from kobo import shortcuts from kobo import shortcuts
from productmd.rpms import Rpms
from six.moves import configparser
from .base import ConfigGuardedPhase, PhaseLoggerMixin from .base import ConfigGuardedPhase, PhaseLoggerMixin
from .. import util from .. import util
from ..wrappers import kojiwrapper from ..wrappers import kojiwrapper
from ..wrappers.scm import get_file_from_scm
class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase): class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase):
name = "osbs" name = "osbs"
def __init__(self, compose, pkgset_phase, buildinstall_phase): def __init__(self, compose):
super(OSBSPhase, self).__init__(compose) super(OSBSPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger) self.pool = ThreadPool(logger=self.logger)
self.pool.metadata = {}
self.pool.registries = {} self.pool.registries = {}
self.pool.pkgset_phase = pkgset_phase
self.pool.buildinstall_phase = buildinstall_phase
def run(self): def run(self):
for variant in self.compose.get_variants(): for variant in self.compose.get_variants():
@ -33,6 +28,15 @@ class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase):
self.pool.start() self.pool.start()
def dump_metadata(self):
"""Create a file with image metadata if the phase actually ran."""
if self._skipped:
return
with open(self.compose.paths.compose.metadata("osbs.json"), "w") as f:
json.dump(
self.pool.metadata, f, indent=4, sort_keys=True, separators=(",", ": ")
)
def request_push(self): def request_push(self):
"""Store configuration data about where to push the created images and """Store configuration data about where to push the created images and
then send the same data to message bus. then send the same data to message bus.
@ -83,8 +87,8 @@ class OSBSThread(WorkerThread):
def worker(self, compose, variant, config): def worker(self, compose, variant, config):
msg = "OSBS task for variant %s" % variant.uid msg = "OSBS task for variant %s" % variant.uid
self.pool.log_info("[BEGIN] %s" % msg) self.pool.log_info("[BEGIN] %s" % msg)
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
original_config = copy.deepcopy(config) koji.login()
# Start task # Start task
source = config.pop("url") source = config.pop("url")
@ -100,99 +104,87 @@ class OSBSThread(WorkerThread):
config["yum_repourls"] = repos config["yum_repourls"] = repos
task_id = koji.koji_proxy.buildContainer(
source, target, config, priority=priority
)
# Wait for it to finish and capture the output into log file (even
# though there is not much there).
log_dir = os.path.join(compose.paths.log.topdir(), "osbs") log_dir = os.path.join(compose.paths.log.topdir(), "osbs")
util.makedirs(log_dir) util.makedirs(log_dir)
log_file = os.path.join( log_file = os.path.join(
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num) log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
) )
reuse_file = log_file[:-4] + ".reuse.json"
try:
image_conf = self._get_image_conf(compose, original_config)
except Exception as e:
image_conf = None
self.pool.log_info(
"Can't get image-build.conf for variant: %s source: %s - %s"
% (variant.uid, source, str(e))
)
koji = kojiwrapper.KojiWrapper(compose)
koji.login()
task_id = self._try_to_reuse(
compose, variant, original_config, image_conf, reuse_file
)
if not task_id:
task_id = koji.koji_proxy.buildContainer(
source, target, config, priority=priority
)
koji.save_task_id(task_id)
# Wait for it to finish and capture the output into log file (even
# though there is not much there).
if koji.watch_task(task_id, log_file) != 0: if koji.watch_task(task_id, log_file) != 0:
raise RuntimeError( raise RuntimeError(
"OSBS task failed: %s. See %s for details" % (task_id, log_file) "OSBS: task %s failed: see %s for details" % (task_id, log_file)
) )
scratch = config.get("scratch", False) scratch = config.get("scratch", False)
nvr, archive_ids = add_metadata(variant, task_id, compose, scratch) nvr = self._add_metadata(variant, task_id, compose, scratch)
if nvr: if nvr:
registry = get_registry(compose, nvr, registry) registry = get_registry(compose, nvr, registry)
if registry: if registry:
self.pool.registries[nvr] = registry self.pool.registries[nvr] = registry
self._write_reuse_metadata( self.pool.log_info("[DONE ] %s" % msg)
compose,
variant,
original_config,
image_conf,
task_id,
archive_ids,
reuse_file,
)
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, task_id)) def _add_metadata(self, variant, task_id, compose, is_scratch):
# Create new Koji session. The task could take so long to finish that
# our session will expire. This second session does not need to be
# authenticated since it will only do reading operations.
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
def _get_image_conf(self, compose, config): # Create metadata
"""Get image-build.conf from git repo. metadata = {
"compose_id": compose.compose_id,
"koji_task": task_id,
}
:param Compose compose: Current compose. result = koji.koji_proxy.getTaskResult(task_id)
:param dict config: One osbs config item of compose.conf["osbs"][$variant] if is_scratch:
""" metadata.update({"repositories": result["repositories"]})
tmp_dir = compose.mkdtemp(prefix="osbs_") # add a fake arch of 'scratch', so we can construct the metadata
# in same data structure as real builds.
url = config["url"].split("#") self.pool.metadata.setdefault(variant.uid, {}).setdefault(
if len(url) == 1: "scratch", []
url.append(config["git_branch"]) ).append(metadata)
filename = "image-build.conf"
get_file_from_scm(
{
"scm": "git",
"repo": url[0],
"branch": url[1],
"file": [filename],
},
tmp_dir,
)
c = configparser.ConfigParser()
c.read(os.path.join(tmp_dir, filename))
return c
def _get_ksurl(self, image_conf):
"""Get ksurl from image-build.conf"""
ksurl = image_conf.get("image-build", "ksurl")
if ksurl:
resolver = util.GitUrlResolver(offline=False)
return resolver(ksurl)
else:
return None return None
else:
build_id = int(result["koji_builds"][0])
buildinfo = koji.koji_proxy.getBuild(build_id)
archives = koji.koji_proxy.listArchives(build_id)
nvr = "%(name)s-%(version)s-%(release)s" % buildinfo
metadata.update(
{
"name": buildinfo["name"],
"version": buildinfo["version"],
"release": buildinfo["release"],
"nvr": nvr,
"creation_time": buildinfo["creation_time"],
}
)
for archive in archives:
data = {
"filename": archive["filename"],
"size": archive["size"],
"checksum": archive["checksum"],
}
data.update(archive["extra"])
data.update(metadata)
arch = archive["extra"]["image"]["arch"]
self.pool.log_debug(
"Created Docker base image %s-%s-%s.%s"
% (metadata["name"], metadata["version"], metadata["release"], arch)
)
self.pool.metadata.setdefault(variant.uid, {}).setdefault(
arch, []
).append(data)
return nvr
def _get_repo(self, compose, repo, gpgkey=None): def _get_repo(self, compose, repo, gpgkey=None):
""" """
Return repo file URL of repo, if repo contains "://", it's already a Return repo file URL of repo, if repo contains "://", it's already a
@ -200,7 +192,7 @@ class OSBSThread(WorkerThread):
file pointing to that location and return the URL to .repo file. file pointing to that location and return the URL to .repo file.
""" """
if "://" in repo: if "://" in repo:
return repo.replace("$COMPOSE_ID", compose.compose_id) return repo
if repo.startswith("/"): if repo.startswith("/"):
# The repo is an absolute path on the filesystem # The repo is an absolute path on the filesystem
@ -219,15 +211,6 @@ class OSBSThread(WorkerThread):
raise RuntimeError( raise RuntimeError(
"There is no variant %s to get repo from to pass to OSBS." % repo "There is no variant %s to get repo from to pass to OSBS." % repo
) )
cts_url = compose.conf.get("cts_url", None)
if cts_url:
return os.path.join(
cts_url,
"api/1/composes",
compose.compose_id,
"repo/?variant=%s" % variant,
)
repo_path = compose.paths.compose.repository( repo_path = compose.paths.compose.repository(
"$basearch", variant, create_dir=False "$basearch", variant, create_dir=False
) )
@ -248,209 +231,3 @@ class OSBSThread(WorkerThread):
f.write("gpgkey=%s\n" % gpgkey) f.write("gpgkey=%s\n" % gpgkey)
return util.translate_path(compose, repo_file) return util.translate_path(compose, repo_file)
def _try_to_reuse(self, compose, variant, config, image_conf, reuse_file):
"""Try to reuse results of old compose.
:param Compose compose: Current compose.
:param Variant variant: Current variant.
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
:param ConfigParser image_conf: ConfigParser obj of image-build.conf.
:param str reuse_file: Path to reuse metadata file
"""
log_msg = "Cannot reuse old osbs phase results - %s"
if not compose.conf["osbs_allow_reuse"]:
self.pool.log_info(log_msg % "reuse of old osbs results is disabled.")
return False
old_reuse_file = compose.paths.old_compose_path(reuse_file)
if not old_reuse_file:
self.pool.log_info(log_msg % "Can't find old reuse metadata file")
return False
try:
with open(old_reuse_file) as f:
old_reuse_metadata = json.load(f)
except Exception as e:
self.pool.log_info(
log_msg % "Can't load old reuse metadata file: %s" % str(e)
)
return False
if old_reuse_metadata["config"] != config:
self.pool.log_info(log_msg % "osbs config changed")
return False
if not image_conf:
self.pool.log_info(log_msg % "Can't get image-build.conf")
return False
# Make sure ksurl not change
try:
ksurl = self._get_ksurl(image_conf)
except Exception as e:
self.pool.log_info(
log_msg % "Can't get ksurl from image-build.conf - %s" % str(e)
)
return False
if not old_reuse_metadata["ksurl"]:
self.pool.log_info(
log_msg % "Can't get ksurl from old compose reuse metadata."
)
return False
if ksurl != old_reuse_metadata["ksurl"]:
self.pool.log_info(log_msg % "ksurl changed")
return False
# Make sure buildinstall phase is reused
try:
arches = image_conf.get("image-build", "arches").split(",")
except Exception as e:
self.pool.log_info(
log_msg % "Can't get arches from image-build.conf - %s" % str(e)
)
for arch in arches:
if not self.pool.buildinstall_phase.reused(variant, arch):
self.pool.log_info(
log_msg % "buildinstall phase changed %s.%s" % (variant, arch)
)
return False
# Make sure rpms installed in image exists in current compose
rpm_manifest_file = compose.paths.compose.metadata("rpms.json")
rpm_manifest = Rpms()
rpm_manifest.load(rpm_manifest_file)
rpms = set()
for variant in rpm_manifest.rpms:
for arch in rpm_manifest.rpms[variant]:
for src in rpm_manifest.rpms[variant][arch]:
for nevra in rpm_manifest.rpms[variant][arch][src]:
rpms.add(nevra)
for nevra in old_reuse_metadata["rpmlist"]:
if nevra not in rpms:
self.pool.log_info(
log_msg % "%s does not exist in current compose" % nevra
)
return False
self.pool.log_info(
"Reusing old OSBS task %d result" % old_reuse_file["task_id"]
)
return old_reuse_file["task_id"]
def _write_reuse_metadata(
self, compose, variant, config, image_conf, task_id, archive_ids, reuse_file
):
"""Write metadata to file for reusing.
:param Compose compose: Current compose.
:param Variant variant: Current variant.
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
:param ConfigParser image_conf: ConfigParser obj of image-build.conf.
:param int task_id: Koji task id of osbs task.
:param list archive_ids: List of koji archive id
:param str reuse_file: Path to reuse metadata file.
"""
msg = "Writing reuse metadata file %s" % reuse_file
compose.log_info(msg)
rpmlist = set()
koji = kojiwrapper.KojiWrapper(compose)
for archive_id in archive_ids:
rpms = koji.koji_proxy.listRPMs(imageID=archive_id)
for item in rpms:
if item["epoch"]:
rpmlist.add(
"%s:%s-%s-%s.%s"
% (
item["name"],
item["epoch"],
item["version"],
item["release"],
item["arch"],
)
)
else:
rpmlist.add("%s.%s" % (item["nvr"], item["arch"]))
try:
ksurl = self._get_ksurl(image_conf)
except Exception:
ksurl = None
data = {
"config": config,
"ksurl": ksurl,
"rpmlist": sorted(rpmlist),
"task_id": task_id,
}
try:
with open(reuse_file, "w") as f:
json.dump(data, f, indent=4)
except Exception as e:
compose.log_info(msg + " failed - %s" % str(e))
def add_metadata(variant, task_id, compose, is_scratch):
"""Given a task ID, find details about the container and add it to global
metadata."""
# Create new Koji session. The task could take so long to finish that
# our session will expire. This second session does not need to be
# authenticated since it will only do reading operations.
koji = kojiwrapper.KojiWrapper(compose)
# Create metadata
metadata = {
"compose_id": compose.compose_id,
"koji_task": task_id,
}
result = koji.koji_proxy.getTaskResult(task_id)
if is_scratch:
metadata.update({"repositories": result["repositories"]})
# add a fake arch of 'scratch', so we can construct the metadata
# in same data structure as real builds.
compose.containers_metadata.setdefault(variant.uid, {}).setdefault(
"scratch", []
).append(metadata)
return None, []
else:
build_id = int(result["koji_builds"][0])
buildinfo = koji.koji_proxy.getBuild(build_id)
archives = koji.koji_proxy.listArchives(build_id, type="image")
nvr = "%(name)s-%(version)s-%(release)s" % buildinfo
metadata.update(
{
"name": buildinfo["name"],
"version": buildinfo["version"],
"release": buildinfo["release"],
"nvr": nvr,
"creation_time": buildinfo["creation_time"],
}
)
archive_ids = []
for archive in archives:
data = {
"filename": archive["filename"],
"size": archive["size"],
"checksum": archive["checksum"],
}
data.update(archive["extra"])
data.update(metadata)
arch = archive["extra"]["image"]["arch"]
compose.log_debug(
"Created Docker base image %s-%s-%s.%s"
% (metadata["name"], metadata["version"], metadata["release"], arch)
)
compose.containers_metadata.setdefault(variant.uid, {}).setdefault(
arch, []
).append(data)
archive_ids.append(archive["id"])
return nvr, archive_ids

View File

@ -27,35 +27,6 @@ class OSBuildPhase(
arches = set(image_conf["arches"]) & arches arches = set(image_conf["arches"]) & arches
return sorted(arches) return sorted(arches)
@staticmethod
def _get_repo_urls(compose, repos, arch="$basearch"):
"""
Get list of repos with resolved repo URLs. Preserve repos defined
as dicts.
"""
resolved_repos = []
for repo in repos:
if isinstance(repo, dict):
try:
url = repo["baseurl"]
except KeyError:
raise RuntimeError(
"`baseurl` is required in repo dict %s" % str(repo)
)
url = util.get_repo_url(compose, url, arch=arch)
if url is None:
raise RuntimeError("Failed to resolve repo URL for %s" % str(repo))
repo["baseurl"] = url
resolved_repos.append(repo)
else:
repo = util.get_repo_url(compose, repo, arch=arch)
if repo is None:
raise RuntimeError("Failed to resolve repo URL for %s" % repo)
resolved_repos.append(repo)
return resolved_repos
def _get_repo(self, image_conf, variant): def _get_repo(self, image_conf, variant):
""" """
Get a list of repos. First included are those explicitly listed in Get a list of repos. First included are those explicitly listed in
@ -67,7 +38,7 @@ class OSBuildPhase(
if not variant.is_empty and variant.uid not in repos: if not variant.is_empty and variant.uid not in repos:
repos.append(variant.uid) repos.append(variant.uid)
return OSBuildPhase._get_repo_urls(self.compose, repos, arch="$arch") return util.get_repo_urls(self.compose, repos, arch="$arch")
def run(self): def run(self):
for variant in self.compose.get_variants(): for variant in self.compose.get_variants():
@ -125,12 +96,7 @@ class RunOSBuildThread(WorkerThread):
self.can_fail = can_fail self.can_fail = can_fail
self.num = num self.num = num
with util.failable( with util.failable(
compose, compose, can_fail, variant, "*", "osbuild", logger=self.pool._logger,
can_fail,
variant,
"*",
"osbuild",
logger=self.pool._logger,
): ):
self.worker( self.worker(
compose, variant, config, arches, version, release, target, repo compose, variant, config, arches, version, release, target, repo
@ -139,30 +105,11 @@ class RunOSBuildThread(WorkerThread):
def worker(self, compose, variant, config, arches, version, release, target, repo): def worker(self, compose, variant, config, arches, version, release, target, repo):
msg = "OSBuild task for variant %s" % variant.uid msg = "OSBuild task for variant %s" % variant.uid
self.pool.log_info("[BEGIN] %s" % msg) self.pool.log_info("[BEGIN] %s" % msg)
koji = kojiwrapper.KojiWrapper(compose) koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
koji.login() koji.login()
ostree = {}
if config.get("ostree_url"):
ostree["url"] = config["ostree_url"]
if config.get("ostree_ref"):
ostree["ref"] = config["ostree_ref"]
if config.get("ostree_parent"):
ostree["parent"] = config["ostree_parent"]
# Start task # Start task
opts = {"repo": repo} opts = {"repo": repo}
if ostree:
opts["ostree"] = ostree
upload_options = config.get("upload_options")
if upload_options:
opts["upload_options"] = upload_options
customizations = config.get("customizations")
if customizations:
opts["customizations"] = customizations
if release: if release:
opts["release"] = release opts["release"] = release
task_id = koji.koji_proxy.osbuildImage( task_id = koji.koji_proxy.osbuildImage(
@ -175,8 +122,6 @@ class RunOSBuildThread(WorkerThread):
opts=opts, opts=opts,
) )
koji.save_task_id(task_id)
# Wait for it to finish and capture the output into log file. # Wait for it to finish and capture the output into log file.
log_dir = os.path.join(compose.paths.log.topdir(), "osbuild") log_dir = os.path.join(compose.paths.log.topdir(), "osbuild")
util.makedirs(log_dir) util.makedirs(log_dir)
@ -185,13 +130,13 @@ class RunOSBuildThread(WorkerThread):
) )
if koji.watch_task(task_id, log_file) != 0: if koji.watch_task(task_id, log_file) != 0:
raise RuntimeError( raise RuntimeError(
"OSBuild task failed: %s. See %s for details" % (task_id, log_file) "OSBuild: task %s failed: see %s for details" % (task_id, log_file)
) )
# Refresh koji session which may have timed out while the task was # Refresh koji session which may have timed out while the task was
# running. Watching is done via a subprocess, so the session is # running. Watching is done via a subprocess, so the session is
# inactive. # inactive.
koji = kojiwrapper.KojiWrapper(compose) koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
# Get build id via the task's result json data # Get build id via the task's result json data
result = koji.koji_proxy.getTaskResult(task_id) result = koji.koji_proxy.getTaskResult(task_id)
@ -203,7 +148,7 @@ class RunOSBuildThread(WorkerThread):
# architecture, but we don't verify that. # architecture, but we don't verify that.
build_info = koji.koji_proxy.getBuild(build_id) build_info = koji.koji_proxy.getBuild(build_id)
for archive in koji.koji_proxy.listArchives(buildID=build_id): for archive in koji.koji_proxy.listArchives(buildID=build_id):
if archive["type_name"] not in EXTENSIONS: if archive["type_name"] not in config["image_types"]:
# Ignore values that are not of required types. # Ignore values that are not of required types.
continue continue
@ -216,36 +161,22 @@ class RunOSBuildThread(WorkerThread):
# image_dir is absolute path to which the image should be copied. # image_dir is absolute path to which the image should be copied.
# We also need the same path as relative to compose directory for # We also need the same path as relative to compose directory for
# including in the metadata. # including in the metadata.
if archive["type_name"] == "iso":
# If the produced image is actually an ISO, it should go to
# iso/ subdirectory.
image_dir = compose.paths.compose.iso_dir(arch, variant)
rel_image_dir = compose.paths.compose.iso_dir(
arch, variant, relative=True
)
else:
image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch} image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
rel_image_dir = compose.paths.compose.image_dir( rel_image_dir = compose.paths.compose.image_dir(variant, relative=True) % {
variant, relative=True "arch": arch
) % {"arch": arch} }
util.makedirs(image_dir) util.makedirs(image_dir)
image_dest = os.path.join(image_dir, archive["filename"]) image_dest = os.path.join(image_dir, archive["filename"])
src_file = compose.koji_downloader.get_file( src_file = os.path.join(
os.path.join( koji.koji_module.pathinfo.imagebuild(build_info), archive["filename"]
koji.koji_module.pathinfo.imagebuild(build_info),
archive["filename"],
),
) )
linker.link(src_file, image_dest, link_type=compose.conf["link_type"]) linker.link(src_file, image_dest, link_type=compose.conf["link_type"])
for suffix in EXTENSIONS[archive["type_name"]]: suffix = archive["filename"].rsplit(".", 1)[-1]
if archive["filename"].endswith(suffix): if suffix not in EXTENSIONS[archive["type_name"]]:
break
else:
# No suffix matched.
raise RuntimeError( raise RuntimeError(
"Failed to generate metadata. Format %s doesn't match type %s" "Failed to generate metadata. Format %s doesn't match type %s"
% (suffix, archive["type_name"]) % (suffix, archive["type_name"])
@ -253,24 +184,7 @@ class RunOSBuildThread(WorkerThread):
# Update image manifest # Update image manifest
img = Image(compose.im) img = Image(compose.im)
# Get the manifest type from the config if supplied, otherwise we
# determine the manifest type based on the koji output
img.type = config.get("manifest_type")
if not img.type:
if archive["type_name"] != "iso":
img.type = archive["type_name"] img.type = archive["type_name"]
else:
fn = archive["filename"].lower()
if "ostree" in fn:
img.type = "dvd-ostree-osbuild"
elif "live" in fn:
img.type = "live-osbuild"
elif "netinst" in fn or "boot" in fn:
img.type = "boot"
else:
img.type = "dvd"
img.format = suffix img.format = suffix
img.path = os.path.join(rel_image_dir, archive["filename"]) img.path = os.path.join(rel_image_dir, archive["filename"])
img.mtime = util.get_mtime(image_dest) img.mtime = util.get_mtime(image_dest)

View File

@ -85,7 +85,7 @@ class OSTreeThread(WorkerThread):
comps_repo = compose.paths.work.comps_repo( comps_repo = compose.paths.work.comps_repo(
"$basearch", variant=variant, create_dir=False "$basearch", variant=variant, create_dir=False
) )
repos = shortcuts.force_list(config.get("repo", [])) + self.repos repos = shortcuts.force_list(config["repo"]) + self.repos
if compose.has_comps: if compose.has_comps:
repos.append(translate_path(compose, comps_repo)) repos.append(translate_path(compose, comps_repo))
repos = get_repo_dicts(repos, logger=self.pool) repos = get_repo_dicts(repos, logger=self.pool)
@ -165,12 +165,9 @@ class OSTreeThread(WorkerThread):
("update-summary", config.get("update_summary", False)), ("update-summary", config.get("update_summary", False)),
("ostree-ref", config.get("ostree_ref")), ("ostree-ref", config.get("ostree_ref")),
("force-new-commit", config.get("force_new_commit", False)), ("force-new-commit", config.get("force_new_commit", False)),
("unified-core", config.get("unified_core", False)),
] ]
) )
default_packages = ["pungi", "ostree", "rpm-ostree"] packages = ["pungi", "ostree", "rpm-ostree"]
additional_packages = config.get("runroot_packages", [])
packages = default_packages + additional_packages
log_file = os.path.join(self.logdir, "runroot.log") log_file = os.path.join(self.logdir, "runroot.log")
mounts = [compose.topdir, config["ostree_repo"]] mounts = [compose.topdir, config["ostree_repo"]]
runroot = Runroot(compose, phase="ostree") runroot = Runroot(compose, phase="ostree")

View File

@ -1,190 +0,0 @@
# -*- coding: utf-8 -*-
import copy
import json
import os
from kobo import shortcuts
from kobo.threads import ThreadPool, WorkerThread
from productmd.images import Image
from pungi.runroot import Runroot
from .base import ConfigGuardedPhase
from .. import util
from ..util import get_repo_dicts, translate_path
from ..wrappers import scm
class OSTreeContainerPhase(ConfigGuardedPhase):
name = "ostree_container"
def __init__(self, compose, pkgset_phase=None):
super(OSTreeContainerPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.compose._logger)
self.pkgset_phase = pkgset_phase
def get_repos(self):
return [
translate_path(
self.compose,
self.compose.paths.work.pkgset_repo(
pkgset.name, "$basearch", create_dir=False
),
)
for pkgset in self.pkgset_phase.package_sets
]
def _enqueue(self, variant, arch, conf):
self.pool.add(OSTreeContainerThread(self.pool, self.get_repos()))
self.pool.queue_put((self.compose, variant, arch, conf))
def run(self):
if isinstance(self.compose.conf.get(self.name), dict):
for variant in self.compose.get_variants():
for conf in self.get_config_block(variant):
for arch in conf.get("arches", []) or variant.arches:
self._enqueue(variant, arch, conf)
else:
# Legacy code path to support original configuration.
for variant in self.compose.get_variants():
for arch in variant.arches:
for conf in self.get_config_block(variant, arch):
self._enqueue(variant, arch, conf)
self.pool.start()
class OSTreeContainerThread(WorkerThread):
def __init__(self, pool, repos):
super(OSTreeContainerThread, self).__init__(pool)
self.repos = repos
def process(self, item, num):
compose, variant, arch, config = item
self.num = num
failable_arches = config.get("failable", [])
self.can_fail = util.can_arch_fail(failable_arches, arch)
with util.failable(compose, self.can_fail, variant, arch, "ostree-container"):
self.worker(compose, variant, arch, config)
def worker(self, compose, variant, arch, config):
msg = "OSTree container phase for variant %s, arch %s" % (variant.uid, arch)
self.pool.log_info("[BEGIN] %s" % msg)
workdir = compose.paths.work.topdir("ostree-container-%d" % self.num)
self.logdir = compose.paths.log.topdir(
"%s/%s/ostree-container-%d" % (arch, variant.uid, self.num)
)
repodir = os.path.join(workdir, "config_repo")
self._clone_repo(
compose,
repodir,
config["config_url"],
config.get("config_branch", "main"),
)
repos = shortcuts.force_list(config.get("repo", [])) + self.repos
repos = get_repo_dicts(repos, logger=self.pool)
# copy the original config and update before save to a json file
new_config = copy.copy(config)
# repos in configuration can have repo url set to variant UID,
# update it to have the actual url that we just translated.
new_config.update({"repo": repos})
# remove unnecessary (for 'pungi-make-ostree container' script ) elements
# from config, it doesn't hurt to have them, however remove them can
# reduce confusion
for k in [
"treefile",
"config_url",
"config_branch",
"failable",
"version",
]:
new_config.pop(k, None)
# write a json file to save the configuration, so 'pungi-make-ostree tree'
# can take use of it
extra_config_file = os.path.join(workdir, "extra_config.json")
with open(extra_config_file, "w") as f:
json.dump(new_config, f, indent=4)
self._run_ostree_container_cmd(
compose, variant, arch, config, repodir, extra_config_file=extra_config_file
)
self.pool.log_info("[DONE ] %s" % (msg))
def _run_ostree_container_cmd(
self, compose, variant, arch, config, config_repo, extra_config_file=None
):
target_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
util.makedirs(target_dir)
version = util.version_generator(compose, config.get("version"))
archive_name = "%s-%s-%s" % (
compose.conf["release_short"],
variant.uid,
version,
)
# Run the pungi-make-ostree command locally to create a script to
# execute in runroot environment.
cmd = [
"pungi-make-ostree",
"container",
"--log-dir=%s" % self.logdir,
"--name=%s" % archive_name,
"--path=%s" % target_dir,
"--treefile=%s" % os.path.join(config_repo, config["treefile"]),
"--extra-config=%s" % extra_config_file,
"--version=%s" % version,
]
_, runroot_script = shortcuts.run(cmd, universal_newlines=True)
default_packages = ["ostree", "rpm-ostree", "selinux-policy-targeted"]
additional_packages = config.get("runroot_packages", [])
packages = default_packages + additional_packages
log_file = os.path.join(self.logdir, "runroot.log")
# TODO: Use to get previous build
mounts = [compose.topdir]
runroot = Runroot(compose, phase="ostree_container")
runroot.run(
" && ".join(runroot_script.splitlines()),
log_file=log_file,
arch=arch,
packages=packages,
mounts=mounts,
new_chroot=True,
weight=compose.conf["runroot_weights"].get("ostree"),
)
fullpath = os.path.join(target_dir, "%s.ociarchive" % archive_name)
# Update image manifest
img = Image(compose.im)
# Get the manifest type from the config if supplied, otherwise we
# determine the manifest type based on the koji output
img.type = "ociarchive"
img.format = "ociarchive"
img.path = os.path.relpath(fullpath, compose.paths.compose.topdir())
img.mtime = util.get_mtime(fullpath)
img.size = util.get_file_size(fullpath)
img.arch = arch
img.disc_number = 1
img.disc_count = 1
img.bootable = False
img.subvariant = config.get("subvariant", variant.uid)
setattr(img, "can_fail", self.can_fail)
setattr(img, "deliverable", "ostree-container")
compose.im.add(variant=variant.uid, arch=arch, image=img)
def _clone_repo(self, compose, repodir, url, branch):
scm.get_dir_from_scm(
{"scm": "git", "repo": url, "branch": branch, "dir": "."},
repodir,
compose=compose,
)

View File

@ -272,7 +272,6 @@ class OstreeInstallerThread(WorkerThread):
rootfs_size=config.get("rootfs_size"), rootfs_size=config.get("rootfs_size"),
is_final=compose.supported, is_final=compose.supported,
log_dir=self.logdir, log_dir=self.logdir,
skip_branding=config.get("skip_branding"),
) )
cmd = "rm -rf %s && %s" % ( cmd = "rm -rf %s && %s" % (
shlex_quote(output_dir), shlex_quote(output_dir),

View File

@ -29,10 +29,13 @@ class PkgsetPhase(PhaseBase):
self.path_prefix = None self.path_prefix = None
def run(self): def run(self):
pkgset_source = "PkgsetSource%s" % self.compose.conf["pkgset_source"]
from .source import PkgsetSourceContainer
from . import sources from . import sources
SourceClass = sources.ALL_SOURCES[self.compose.conf["pkgset_source"].lower()] PkgsetSourceContainer.register_module(sources)
container = PkgsetSourceContainer()
SourceClass = container[pkgset_source]
self.package_sets, self.path_prefix = SourceClass(self.compose)() self.package_sets, self.path_prefix = SourceClass(self.compose)()
def validate(self): def validate(self):

View File

@ -28,27 +28,18 @@ from pungi.util import (
PartialFuncWorkerThread, PartialFuncWorkerThread,
PartialFuncThreadPool, PartialFuncThreadPool,
) )
from pungi.module_util import ( from pungi.module_util import Modulemd, collect_module_defaults
Modulemd,
collect_module_defaults,
collect_module_obsoletes,
)
from pungi.phases.createrepo import add_modular_metadata from pungi.phases.createrepo import add_modular_metadata
def populate_arch_pkgsets(compose, path_prefix, global_pkgset): def populate_arch_pkgsets(compose, path_prefix, global_pkgset):
result = {} result = {}
exclusive_noarch = compose.conf["pkgset_exclusive_arch_considers_noarch"]
for arch in compose.get_arches(): for arch in compose.get_arches():
compose.log_info("Populating package set for arch: %s", arch) compose.log_info("Populating package set for arch: %s", arch)
is_multilib = is_arch_multilib(compose.conf, arch) is_multilib = is_arch_multilib(compose.conf, arch)
arches = get_valid_arches(arch, is_multilib, add_src=True) arches = get_valid_arches(arch, is_multilib, add_src=True)
pkgset = global_pkgset.subset( pkgset = global_pkgset.subset(arch, arches, exclusive_noarch=exclusive_noarch)
arch,
arches,
exclusive_noarch=compose.conf["pkgset_exclusive_arch_considers_noarch"],
inherit_to_noarch=compose.conf["pkgset_inherit_exclusive_arch_to_noarch"],
)
pkgset.save_file_list( pkgset.save_file_list(
compose.paths.work.package_list(arch=arch, pkgset=global_pkgset), compose.paths.work.package_list(arch=arch, pkgset=global_pkgset),
remove_path_prefix=path_prefix, remove_path_prefix=path_prefix,
@ -168,9 +159,6 @@ def _create_arch_repo(worker_thread, args, task_num):
mod_index = collect_module_defaults( mod_index = collect_module_defaults(
compose.paths.work.module_defaults_dir(), names, overrides_dir=overrides_dir compose.paths.work.module_defaults_dir(), names, overrides_dir=overrides_dir
) )
mod_index = collect_module_obsoletes(
compose.paths.work.module_obsoletes_dir(), names, mod_index
)
for x in mmd: for x in mmd:
mod_index.add_module_stream(x) mod_index.add_module_stream(x)
add_modular_metadata( add_modular_metadata(

View File

@ -22,22 +22,17 @@ It automatically finds a signed copies according to *sigkey_ordering*.
import itertools import itertools
import json import json
import os import os
import time
import pgpy
import rpm
from six.moves import cPickle as pickle from six.moves import cPickle as pickle
from functools import partial
import kobo.log import kobo.log
import kobo.pkgset import kobo.pkgset
import kobo.rpmlib import kobo.rpmlib
from kobo.shortcuts import compute_file_checksums
from kobo.threads import WorkerThread, ThreadPool from kobo.threads import WorkerThread, ThreadPool
import pungi.wrappers.kojiwrapper
from pungi.util import pkg_is_srpm, copy_all from pungi.util import pkg_is_srpm, copy_all
from pungi.arch import get_valid_arches, is_excluded from pungi.arch import get_valid_arches, is_excluded
from pungi.errors import UnsignedPackagesError
class ExtendedRpmWrapper(kobo.pkgset.SimpleRpmWrapper): class ExtendedRpmWrapper(kobo.pkgset.SimpleRpmWrapper):
@ -149,20 +144,14 @@ class PackageSetBase(kobo.log.LoggingBase):
def raise_invalid_sigkeys_exception(self, rpminfos): def raise_invalid_sigkeys_exception(self, rpminfos):
""" """
Raises UnsignedPackagesError containing details of RPMs with invalid Raises RuntimeError containing details of RPMs with invalid
sigkeys defined in `rpminfos`. sigkeys defined in `rpminfos`.
""" """
def nvr_formatter(package_info): def nvr_formatter(package_info):
epoch_suffix = '' # joins NVR parts of the package with '-' character.
if package_info['epoch'] is not None: return "-".join(
epoch_suffix = ':' + package_info['epoch'] (package_info["name"], package_info["version"], package_info["release"])
return (
f"{package_info['name']}"
f"{epoch_suffix}-"
f"{package_info['version']}-"
f"{package_info['release']}."
f"{package_info['arch']}"
) )
def get_error(sigkeys, infos): def get_error(sigkeys, infos):
@ -177,9 +166,7 @@ class PackageSetBase(kobo.log.LoggingBase):
if not isinstance(rpminfos, dict): if not isinstance(rpminfos, dict):
rpminfos = {self.sigkey_ordering: rpminfos} rpminfos = {self.sigkey_ordering: rpminfos}
raise UnsignedPackagesError( raise RuntimeError("\n".join(get_error(k, v) for k, v in rpminfos.items()))
"\n".join(get_error(k, v) for k, v in rpminfos.items())
)
def read_packages(self, rpms, srpms): def read_packages(self, rpms, srpms):
srpm_pool = ReaderPool(self, self._logger) srpm_pool = ReaderPool(self, self._logger)
@ -213,31 +200,16 @@ class PackageSetBase(kobo.log.LoggingBase):
return self.rpms_by_arch return self.rpms_by_arch
def subset( def subset(self, primary_arch, arch_list, exclusive_noarch=True):
self, primary_arch, arch_list, exclusive_noarch=True, inherit_to_noarch=True
):
"""Create a subset of this package set that only includes """Create a subset of this package set that only includes
packages compatible with""" packages compatible with"""
pkgset = PackageSetBase( pkgset = PackageSetBase(
self.name, self.sigkey_ordering, logger=self._logger, arches=arch_list self.name, self.sigkey_ordering, logger=self._logger, arches=arch_list
) )
pkgset.merge( pkgset.merge(self, primary_arch, arch_list, exclusive_noarch=exclusive_noarch)
self,
primary_arch,
arch_list,
exclusive_noarch=exclusive_noarch,
inherit_to_noarch=inherit_to_noarch,
)
return pkgset return pkgset
def merge( def merge(self, other, primary_arch, arch_list, exclusive_noarch=True):
self,
other,
primary_arch,
arch_list,
exclusive_noarch=True,
inherit_to_noarch=True,
):
""" """
Merge ``other`` package set into this instance. Merge ``other`` package set into this instance.
""" """
@ -276,7 +248,7 @@ class PackageSetBase(kobo.log.LoggingBase):
if i.file_path in self.file_cache: if i.file_path in self.file_cache:
# TODO: test if it really works # TODO: test if it really works
continue continue
if inherit_to_noarch and exclusivearch_list and arch == "noarch": if exclusivearch_list and arch == "noarch":
if is_excluded(i, exclusivearch_list, logger=self._logger): if is_excluded(i, exclusivearch_list, logger=self._logger):
continue continue
@ -343,11 +315,6 @@ class FilelistPackageSet(PackageSetBase):
return result return result
# This is a marker to indicate package set with only extra builds/tasks and no
# tasks.
MISSING_KOJI_TAG = object()
class KojiPackageSet(PackageSetBase): class KojiPackageSet(PackageSetBase):
def __init__( def __init__(
self, self,
@ -362,9 +329,6 @@ class KojiPackageSet(PackageSetBase):
cache_region=None, cache_region=None,
extra_builds=None, extra_builds=None,
extra_tasks=None, extra_tasks=None,
signed_packages_retries=0,
signed_packages_wait=30,
downloader=None,
): ):
""" """
Creates new KojiPackageSet. Creates new KojiPackageSet.
@ -397,12 +361,9 @@ class KojiPackageSet(PackageSetBase):
:param list extra_tasks: Extra RPMs defined as Koji task IDs to get from Koji :param list extra_tasks: Extra RPMs defined as Koji task IDs to get from Koji
and include in the package set. Useful when building testing compose and include in the package set. Useful when building testing compose
with RPM scratch builds. with RPM scratch builds.
:param int signed_packages_retries: How many times should a search for
signed package be repeated.
:param int signed_packages_wait: How long to wait between search attemts.
""" """
super(KojiPackageSet, self).__init__( super(KojiPackageSet, self).__init__(
name if name != MISSING_KOJI_TAG else "no-tag", name,
sigkey_ordering=sigkey_ordering, sigkey_ordering=sigkey_ordering,
arches=arches, arches=arches,
logger=logger, logger=logger,
@ -416,13 +377,10 @@ class KojiPackageSet(PackageSetBase):
self.extra_builds = extra_builds or [] self.extra_builds = extra_builds or []
self.extra_tasks = extra_tasks or [] self.extra_tasks = extra_tasks or []
self.reuse = None self.reuse = None
self.signed_packages_retries = signed_packages_retries
self.signed_packages_wait = signed_packages_wait
self.downloader = downloader
def __getstate__(self): def __getstate__(self):
result = self.__dict__.copy() result = self.__dict__.copy()
result["koji_profile"] = self.koji_wrapper.profile
del result["koji_wrapper"] del result["koji_wrapper"]
del result["_logger"] del result["_logger"]
if "cache_region" in result: if "cache_region" in result:
@ -430,6 +388,8 @@ class KojiPackageSet(PackageSetBase):
return result return result
def __setstate__(self, data): def __setstate__(self, data):
koji_profile = data.pop("koji_profile")
self.koji_wrapper = pungi.wrappers.kojiwrapper.KojiWrapper(koji_profile)
self._logger = None self._logger = None
self.__dict__.update(data) self.__dict__.update(data)
@ -511,8 +471,7 @@ class KojiPackageSet(PackageSetBase):
response = None response = None
if self.cache_region: if self.cache_region:
cache_key = "%s.get_latest_rpms_%s_%s_%s" % ( cache_key = "KojiPackageSet.get_latest_rpms_%s_%s_%s" % (
str(self.__class__.__name__),
str(tag), str(tag),
str(event), str(event),
str(inherit), str(inherit),
@ -534,83 +493,26 @@ class KojiPackageSet(PackageSetBase):
return response return response
def get_package_path(self, queue_item): def get_package_path(self, queue_item):
rpm_info, build_info = queue_item rpm_info, build_info = queue_item
# Check if this RPM is coming from scratch task. In this case, we already # Check if this RPM is coming from scratch task. In this case, we already
# know the path. # know the path.
if "path_from_task" in rpm_info: if "path_from_task" in rpm_info:
return self.downloader.get_file(rpm_info["path_from_task"]) return rpm_info["path_from_task"]
# we replaced this part because pungi uses way
# of guessing path of package on koji based on sigkey
# we don't need that because all our packages will
# be ready for release
# signature verification is still done during deps resolution
pathinfo = self.koji_wrapper.koji_module.pathinfo pathinfo = self.koji_wrapper.koji_module.pathinfo
paths = []
if "getRPMChecksums" in self.koji_proxy.system.listMethods():
def checksum_validator(keyname, pkg_path):
checksums = self.koji_proxy.getRPMChecksums(
rpm_info["id"], checksum_types=("sha256",)
)
if "sha256" in checksums.get(keyname, {}):
computed = compute_file_checksums(pkg_path, ("sha256",))
if computed["sha256"] != checksums[keyname]["sha256"]:
raise RuntimeError("Checksum mismatch for %s" % pkg_path)
rpm_path = os.path.join(pathinfo.topdir, pathinfo.rpm(rpm_info))
if os.path.isfile(rpm_path):
return rpm_path
else: else:
self.log_warning("RPM %s not found" % rpm_path)
def checksum_validator(keyname, pkg_path):
# Koji doesn't support checksums yet
pass
attempts_left = self.signed_packages_retries + 1
while attempts_left > 0:
for sigkey in self.sigkey_ordering:
if not sigkey:
# we're looking for *signed* copies here
continue
sigkey = sigkey.lower()
rpm_path = os.path.join(
pathinfo.build(build_info), pathinfo.signed(rpm_info, sigkey)
)
if rpm_path not in paths:
paths.append(rpm_path)
path = self.downloader.get_file(
rpm_path, partial(checksum_validator, sigkey)
)
if path:
return path
# No signed copy was found, wait a little and try again.
attempts_left -= 1
if attempts_left > 0:
nvr = "%(name)s-%(version)s-%(release)s" % rpm_info
self.log_debug("Waiting for signed package to appear for %s", nvr)
time.sleep(self.signed_packages_wait)
if None in self.sigkey_ordering or "" in self.sigkey_ordering:
# use an unsigned copy (if allowed)
rpm_path = os.path.join(pathinfo.build(build_info), pathinfo.rpm(rpm_info))
paths.append(rpm_path)
path = self.downloader.get_file(rpm_path, partial(checksum_validator, ""))
if path:
return path
if self._allow_invalid_sigkeys and rpm_info["name"] not in self.packages:
# use an unsigned copy (if allowed)
rpm_path = os.path.join(pathinfo.build(build_info), pathinfo.rpm(rpm_info))
paths.append(rpm_path)
path = self.downloader.get_file(rpm_path)
if path:
self._invalid_sigkey_rpms.append(rpm_info)
return path
self._invalid_sigkey_rpms.append(rpm_info)
self.log_error(
"RPM %s not found for sigs: %s. Paths checked: %s"
% (rpm_info, self.sigkey_ordering, paths)
)
return None return None
def populate(self, tag, event=None, inherit=True, include_packages=None): def populate(self, tag, event=None, inherit=True, include_packages=None):
@ -625,7 +527,7 @@ class KojiPackageSet(PackageSetBase):
result_srpms = [] result_srpms = []
include_packages = set(include_packages or []) include_packages = set(include_packages or [])
if isinstance(event, dict): if type(event) is dict:
event = event["id"] event = event["id"]
msg = "Getting latest RPMs (tag: %s, event: %s, inherit: %s)" % ( msg = "Getting latest RPMs (tag: %s, event: %s, inherit: %s)" % (
@ -634,8 +536,6 @@ class KojiPackageSet(PackageSetBase):
inherit, inherit,
) )
self.log_info("[BEGIN] %s" % msg) self.log_info("[BEGIN] %s" % msg)
rpms, builds = [], []
if tag != MISSING_KOJI_TAG:
rpms, builds = self.get_latest_rpms(tag, event, inherit=inherit) rpms, builds = self.get_latest_rpms(tag, event, inherit=inherit)
extra_rpms, extra_builds = self.get_extra_rpms() extra_rpms, extra_builds = self.get_extra_rpms()
rpms += extra_rpms rpms += extra_rpms
@ -741,15 +641,6 @@ class KojiPackageSet(PackageSetBase):
:param include_packages: an iterable of tuples (package name, arch) that should :param include_packages: an iterable of tuples (package name, arch) that should
be included. be included.
""" """
if len(self.sigkey_ordering) > 1 and (
None in self.sigkey_ordering or "" in self.sigkey_ordering
):
self.log_warning(
"Stop writing reuse file as unsigned packages are allowed "
"in the compose."
)
return
reuse_file = compose.paths.work.pkgset_reuse_file(self.name) reuse_file = compose.paths.work.pkgset_reuse_file(self.name)
self.log_info("Writing pkgset reuse file: %s" % reuse_file) self.log_info("Writing pkgset reuse file: %s" % reuse_file)
try: try:
@ -766,13 +657,6 @@ class KojiPackageSet(PackageSetBase):
"srpms_by_name": self.srpms_by_name, "srpms_by_name": self.srpms_by_name,
"extra_builds": self.extra_builds, "extra_builds": self.extra_builds,
"include_packages": include_packages, "include_packages": include_packages,
"inherit_to_noarch": compose.conf[
"pkgset_inherit_exclusive_arch_to_noarch"
],
"exclusive_noarch": compose.conf[
"pkgset_exclusive_arch_considers_noarch"
],
"module_defaults_dir": compose.conf.get("module_defaults_dir"),
}, },
f, f,
protocol=pickle.HIGHEST_PROTOCOL, protocol=pickle.HIGHEST_PROTOCOL,
@ -819,26 +703,20 @@ class KojiPackageSet(PackageSetBase):
% (old_koji_event, koji_event) % (old_koji_event, koji_event)
) )
changed = self.koji_proxy.queryHistory( changed = self.koji_proxy.queryHistory(
tables=["tag_listing", "tag_inheritance"], tables=["tag_listing"], tag=tag, afterEvent=old_koji_event
tag=tag,
afterEvent=min(koji_event, old_koji_event),
beforeEvent=max(koji_event, old_koji_event) + 1,
) )
if changed["tag_listing"]: if changed["tag_listing"]:
self.log_debug("Builds under tag %s changed. Can't reuse." % tag) self.log_debug("Builds under tag %s changed. Can't reuse." % tag)
return False return False
if changed["tag_inheritance"]:
self.log_debug("Tag inheritance %s changed. Can't reuse." % tag)
return False
if inherit: if inherit:
inherit_tags = self.koji_proxy.getFullInheritance(tag, koji_event) inherit_tags = self.koji_proxy.getFullInheritance(tag, koji_event)
for t in inherit_tags: for t in inherit_tags:
changed = self.koji_proxy.queryHistory( changed = self.koji_proxy.queryHistory(
tables=["tag_listing", "tag_inheritance"], tables=["tag_listing"],
tag=t["name"], tag=t["name"],
afterEvent=min(koji_event, old_koji_event), afterEvent=old_koji_event,
beforeEvent=max(koji_event, old_koji_event) + 1, beforeEvent=koji_event + 1,
) )
if changed["tag_listing"]: if changed["tag_listing"]:
self.log_debug( self.log_debug(
@ -846,9 +724,6 @@ class KojiPackageSet(PackageSetBase):
% t["name"] % t["name"]
) )
return False return False
if changed["tag_inheritance"]:
self.log_debug("Tag inheritance %s changed. Can't reuse." % tag)
return False
repo_dir = compose.paths.work.pkgset_repo(tag, create_dir=False) repo_dir = compose.paths.work.pkgset_repo(tag, create_dir=False)
old_repo_dir = compose.paths.old_compose_path(repo_dir) old_repo_dir = compose.paths.old_compose_path(repo_dir)
@ -867,9 +742,6 @@ class KojiPackageSet(PackageSetBase):
self.log_debug("Failed to load reuse file: %s" % str(e)) self.log_debug("Failed to load reuse file: %s" % str(e))
return False return False
inherit_to_noarch = compose.conf["pkgset_inherit_exclusive_arch_to_noarch"]
exclusive_noarch = compose.conf["pkgset_exclusive_arch_considers_noarch"]
module_defaults_dir = compose.conf.get("module_defaults_dir")
if ( if (
reuse_data["allow_invalid_sigkeys"] == self._allow_invalid_sigkeys reuse_data["allow_invalid_sigkeys"] == self._allow_invalid_sigkeys
and reuse_data["packages"] == self.packages and reuse_data["packages"] == self.packages
@ -877,11 +749,6 @@ class KojiPackageSet(PackageSetBase):
and reuse_data["extra_builds"] == self.extra_builds and reuse_data["extra_builds"] == self.extra_builds
and reuse_data["sigkeys"] == self.sigkey_ordering and reuse_data["sigkeys"] == self.sigkey_ordering
and reuse_data["include_packages"] == include_packages and reuse_data["include_packages"] == include_packages
# If the value is not present in reuse data, the compose was
# generated with older version of Pungi. Best to not reuse.
and reuse_data.get("inherit_to_noarch") == inherit_to_noarch
and reuse_data.get("exclusive_noarch") == exclusive_noarch
and reuse_data.get("module_defaults_dir") == module_defaults_dir
): ):
self.log_info("Copying repo data for reuse: %s" % old_repo_dir) self.log_info("Copying repo data for reuse: %s" % old_repo_dir)
copy_all(old_repo_dir, repo_dir) copy_all(old_repo_dir, repo_dir)
@ -896,67 +763,6 @@ class KojiPackageSet(PackageSetBase):
return False return False
class KojiMockPackageSet(KojiPackageSet):
def _is_rpm_signed(self, rpm_path) -> bool:
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
sigkeys = [
sigkey.lower() for sigkey in self.sigkey_ordering
if sigkey is not None
]
if not sigkeys:
return True
with open(rpm_path, 'rb') as fd:
header = ts.hdrFromFdno(fd)
signature = header[rpm.RPMTAG_SIGGPG] or header[rpm.RPMTAG_SIGPGP]
if signature is None:
return False
pgp_msg = pgpy.PGPMessage.from_blob(signature)
return any(
signature.signer.lower() in sigkeys
for signature in pgp_msg.signatures
)
def get_package_path(self, queue_item):
rpm_info, build_info = queue_item
# Check if this RPM is coming from scratch task.
# In this case, we already know the path.
if "path_from_task" in rpm_info:
return rpm_info["path_from_task"]
# we replaced this part because pungi uses way
# of guessing path of package on koji based on sigkey
# we don't need that because all our packages will
# be ready for release
# signature verification is still done during deps resolution
pathinfo = self.koji_wrapper.koji_module.pathinfo
rpm_path = os.path.join(pathinfo.topdir, pathinfo.rpm(rpm_info))
if os.path.isfile(rpm_path):
if not self._is_rpm_signed(rpm_path):
self._invalid_sigkey_rpms.append(rpm_info)
self.log_error(
'RPM "%s" not found for sigs: "%s". Path checked: "%s"',
rpm_info, self.sigkey_ordering, rpm_path
)
return
return rpm_path
else:
self.log_warning("RPM %s not found" % rpm_path)
return None
def populate(self, tag, event=None, inherit=True, include_packages=None):
result = super().populate(
tag=tag,
event=event,
inherit=inherit,
include_packages=include_packages,
)
return result
def _is_src(rpm_info): def _is_src(rpm_info):
"""Check if rpm info object returned by Koji refers to source packages.""" """Check if rpm info object returned by Koji refers to source packages."""
return rpm_info["arch"] in ("src", "nosrc") return rpm_info["arch"] in ("src", "nosrc")

View File

@ -14,6 +14,15 @@
# along with this program; if not, see <https://gnu.org/licenses/>. # along with this program; if not, see <https://gnu.org/licenses/>.
class PkgsetSourceBase(object): import kobo.plugins
class PkgsetSourceBase(kobo.plugins.Plugin):
def __init__(self, compose): def __init__(self, compose):
self.compose = compose self.compose = compose
class PkgsetSourceContainer(kobo.plugins.PluginContainer):
@classmethod
def normalize_name(cls, name):
return name.lower()

View File

@ -1,24 +0,0 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
from .source_koji import PkgsetSourceKoji
from .source_repos import PkgsetSourceRepos
from .source_kojimock import PkgsetSourceKojiMock
ALL_SOURCES = {
"koji": PkgsetSourceKoji,
"repos": PkgsetSourceRepos,
"kojimock": PkgsetSourceKojiMock,
}

View File

@ -23,19 +23,19 @@ from itertools import groupby
from kobo.rpmlib import parse_nvra from kobo.rpmlib import parse_nvra
from kobo.shortcuts import force_list from kobo.shortcuts import force_list
from typing import (
Dict,
AnyStr,
List,
Tuple,
Set,
)
import pungi.wrappers.kojiwrapper import pungi.wrappers.kojiwrapper
from pungi.wrappers.comps import CompsWrapper from pungi.wrappers.comps import CompsWrapper
from pungi.wrappers.mbs import MBSWrapper from pungi.wrappers.mbs import MBSWrapper
import pungi.phases.pkgset.pkgsets import pungi.phases.pkgset.pkgsets
from pungi.arch import getBaseArch from pungi.util import retry, get_arch_variant_data, get_variant_data
from pungi.util import (
retry,
get_arch_variant_data,
get_variant_data,
read_single_module_stream_from_file,
read_single_module_stream_from_string,
)
from pungi.module_util import Modulemd from pungi.module_util import Modulemd
from pungi.phases.pkgset.common import MaterializedPackageSet, get_all_arches from pungi.phases.pkgset.common import MaterializedPackageSet, get_all_arches
@ -190,25 +190,27 @@ def get_koji_modules(compose, koji_wrapper, event, module_info_str):
class PkgsetSourceKoji(pungi.phases.pkgset.source.PkgsetSourceBase): class PkgsetSourceKoji(pungi.phases.pkgset.source.PkgsetSourceBase):
enabled = True
def __call__(self): def __call__(self):
compose = self.compose compose = self.compose
self.koji_wrapper = pungi.wrappers.kojiwrapper.KojiWrapper(compose) koji_profile = compose.conf["koji_profile"]
package_sets = get_pkgset_from_koji(self.compose, self.koji_wrapper) self.koji_wrapper = pungi.wrappers.kojiwrapper.KojiWrapper(koji_profile)
return (package_sets, self.compose.koji_downloader.path_prefix) # path prefix must contain trailing '/'
path_prefix = self.koji_wrapper.koji_module.config.topdir.rstrip("/") + "/"
package_sets = get_pkgset_from_koji(
self.compose, self.koji_wrapper, path_prefix
)
return (package_sets, path_prefix)
def get_pkgset_from_koji(compose, koji_wrapper): def get_pkgset_from_koji(compose, koji_wrapper, path_prefix):
event_info = get_koji_event_info(compose, koji_wrapper) event_info = get_koji_event_info(compose, koji_wrapper)
return populate_global_pkgset(compose, koji_wrapper, event_info) return populate_global_pkgset(compose, koji_wrapper, path_prefix, event_info)
def _add_module_to_variant( def _add_module_to_variant(
koji_wrapper, koji_wrapper, variant, build, add_to_variant_modules=False, compose=None
variant,
build,
add_to_variant_modules=False,
compose=None,
exclude_module_ns=None,
): ):
""" """
Adds module defined by Koji build info to variant. Adds module defined by Koji build info to variant.
@ -218,29 +220,20 @@ def _add_module_to_variant(
:param bool add_to_variant_modules: Adds the modules also to :param bool add_to_variant_modules: Adds the modules also to
variant.modules. variant.modules.
:param compose: Compose object to get filters from :param compose: Compose object to get filters from
:param list exclude_module_ns: Module name:stream which will be excluded.
""" """
mmds = {} mmds = {}
archives = koji_wrapper.koji_proxy.listArchives(build["id"]) archives = koji_wrapper.koji_proxy.listArchives(build["id"])
available_arches = set()
for archive in archives: for archive in archives:
if archive["btype"] != "module": if archive["btype"] != "module":
# Skip non module archives # Skip non module archives
continue continue
typedir = koji_wrapper.koji_module.pathinfo.typedir(build, archive["btype"])
filename = archive["filename"] filename = archive["filename"]
file_path = compose.koji_downloader.get_file(os.path.join(typedir, filename)) file_path = os.path.join(
try: koji_wrapper.koji_module.pathinfo.topdir,
# If there are two dots, the arch is in the middle. MBS uploads 'modules',
# files with actual architecture in the filename, but Pungi deals build['arch'],
# in basearch. This assumes that each arch in the build maps to a build['extra']['typeinfo']['module']['content_koji_tag']
# unique basearch. )
_, arch, _ = filename.split(".")
basearch = getBaseArch(arch)
filename = "modulemd.%s.txt" % basearch
available_arches.add(basearch)
except ValueError:
pass
mmds[filename] = file_path mmds[filename] = file_path
if len(mmds) <= 1: if len(mmds) <= 1:
@ -251,10 +244,6 @@ def _add_module_to_variant(
info = build["extra"]["typeinfo"]["module"] info = build["extra"]["typeinfo"]["module"]
nsvc = "%(name)s:%(stream)s:%(version)s:%(context)s" % info nsvc = "%(name)s:%(stream)s:%(version)s:%(context)s" % info
ns = "%(name)s:%(stream)s" % info
if exclude_module_ns and ns in exclude_module_ns:
return
added = False added = False
@ -263,29 +252,17 @@ def _add_module_to_variant(
compose.log_debug("Module %s is filtered from %s.%s", nsvc, variant, arch) compose.log_debug("Module %s is filtered from %s.%s", nsvc, variant, arch)
continue continue
if arch not in available_arches:
compose.log_debug(
"Module %s is not available for arch %s.%s", nsvc, variant, arch
)
continue
filename = "modulemd.%s.txt" % arch
if filename not in mmds:
raise RuntimeError(
"Module %s does not have metadata for arch %s and is not filtered "
"out via filter_modules option." % (nsvc, arch)
)
try: try:
mod_stream = read_single_module_stream_from_file( mmd = Modulemd.ModuleStream.read_file(
mmds[filename], compose, arch, build mmds["modulemd.%s.txt" % arch], strict=True
) )
except Exception as exc: variant.arch_mmds.setdefault(arch, {})[nsvc] = mmd
# libmodulemd raises various GLib exceptions with not very helpful
# messages. Let's replace it with something more useful.
raise RuntimeError("Failed to read %s: %s", mmds[filename], str(exc))
if mod_stream:
added = True added = True
variant.arch_mmds.setdefault(arch, {})[nsvc] = mod_stream except KeyError:
# There is no modulemd for this arch. This could mean an arch was
# added to the compose after the module was built. We don't want to
# process this, let's skip this module.
pass
if not added: if not added:
# The module is filtered on all arches of this variant. # The module is filtered on all arches of this variant.
@ -365,7 +342,9 @@ def _add_scratch_modules_to_variant(
tag_to_mmd.setdefault(tag, {}) tag_to_mmd.setdefault(tag, {})
for arch in variant.arches: for arch in variant.arches:
try: try:
mmd = read_single_module_stream_from_string(final_modulemd[arch]) mmd = Modulemd.ModuleStream.read_string(
final_modulemd[arch], strict=True
)
variant.arch_mmds.setdefault(arch, {})[nsvc] = mmd variant.arch_mmds.setdefault(arch, {})[nsvc] = mmd
except KeyError: except KeyError:
continue continue
@ -405,13 +384,7 @@ def _is_filtered_out(compose, variant, arch, module_name, module_stream):
def _get_modules_from_koji( def _get_modules_from_koji(
compose, compose, koji_wrapper, event, variant, variant_tags, tag_to_mmd
koji_wrapper,
event,
variant,
variant_tags,
tag_to_mmd,
exclude_module_ns,
): ):
""" """
Loads modules for given `variant` from koji `session`, adds them to Loads modules for given `variant` from koji `session`, adds them to
@ -422,7 +395,6 @@ def _get_modules_from_koji(
:param Variant variant: Variant with modules to find. :param Variant variant: Variant with modules to find.
:param dict variant_tags: Dict populated by this method. Key is `variant` :param dict variant_tags: Dict populated by this method. Key is `variant`
and value is list of Koji tags to get the RPMs from. and value is list of Koji tags to get the RPMs from.
:param list exclude_module_ns: Module name:stream which will be excluded.
""" """
# Find out all modules in every variant and add their Koji tags # Find out all modules in every variant and add their Koji tags
@ -431,11 +403,7 @@ def _get_modules_from_koji(
koji_modules = get_koji_modules(compose, koji_wrapper, event, module["name"]) koji_modules = get_koji_modules(compose, koji_wrapper, event, module["name"])
for koji_module in koji_modules: for koji_module in koji_modules:
nsvc = _add_module_to_variant( nsvc = _add_module_to_variant(
koji_wrapper, koji_wrapper, variant, koji_module, compose=compose
variant,
koji_module,
compose=compose,
exclude_module_ns=exclude_module_ns,
) )
if not nsvc: if not nsvc:
continue continue
@ -496,16 +464,7 @@ def filter_inherited(koji_proxy, event, module_builds, top_tag):
# And keep only builds from that topmost tag # And keep only builds from that topmost tag
result.extend(build for build in builds if build["tag_name"] == tag) result.extend(build for build in builds if build["tag_name"] == tag)
# If the same module was inherited multiple times, it will be in result return result
# multiple times. We need to deduplicate.
deduplicated_result = []
included_nvrs = set()
for build in result:
if build["nvr"] not in included_nvrs:
deduplicated_result.append(build)
included_nvrs.add(build["nvr"])
return deduplicated_result
def filter_by_whitelist(compose, module_builds, input_modules, expected_modules): def filter_by_whitelist(compose, module_builds, input_modules, expected_modules):
@ -532,16 +491,15 @@ def filter_by_whitelist(compose, module_builds, input_modules, expected_modules)
info.get("context"), info.get("context"),
) )
nvr_patterns.add((pattern, spec["name"])) nvr_patterns.add((pattern, spec["name"]))
modules_to_keep = [] modules_to_keep = []
for mb in module_builds: for mb in sorted(module_builds, key=lambda i: i['name']):
# Split release from the build into version and context # Split release from the build into version and context
ver, ctx = mb["release"].split(".") ver, ctx = mb["release"].split(".")
# Values in `mb` are from Koji build. There's nvr and name, version and # Values in `mb` are from Koji build. There's nvr and name, version and
# release. The input pattern specifies modular name, stream, version # release. The input pattern specifies modular name, stream, version
# and context. # and context.
for (n, s, v, c), spec in nvr_patterns: for (n, s, v, c), spec in sorted(nvr_patterns):
if ( if (
# We always have a name and stream... # We always have a name and stream...
mb["name"] == n mb["name"] == n
@ -553,19 +511,51 @@ def filter_by_whitelist(compose, module_builds, input_modules, expected_modules)
): ):
modules_to_keep.append(mb) modules_to_keep.append(mb)
expected_modules.discard(spec) expected_modules.discard(spec)
break
return modules_to_keep return modules_to_keep
def _filter_expected_modules(
variant_name: AnyStr,
variant_arches: List[AnyStr],
expected_modules: Set[AnyStr],
filtered_modules: List[Tuple[AnyStr, Dict[AnyStr, List[AnyStr]]]],
) -> set:
"""
Function filters out all modules which are listed in Pungi config.
Those modules can be absent in koji env so we must remove it from
the expected modules list otherwise Pungi will fail
"""
for variant_regexp, filters_dict in filtered_modules:
for arch, modules in filters_dict.items():
arch = '.*' if arch == '*' else arch
variant_regexp = '.*' if variant_regexp == '*' else variant_regexp
modules = ['.*' if module == '*' else module for module in modules]
cond1 = re.findall(
variant_regexp,
variant_name,
)
cond2 = any(
re.findall(
arch,
variant_arch,
) for variant_arch in variant_arches
)
if cond1 and cond2:
expected_modules = {
expected_module for expected_module in expected_modules if
not any(
re.findall(
filtered_module,
expected_module,
) for filtered_module in modules
)
}
return expected_modules
def _get_modules_from_koji_tags( def _get_modules_from_koji_tags(
compose, compose, koji_wrapper, event_id, variant, variant_tags, tag_to_mmd
koji_wrapper,
event_id,
variant,
variant_tags,
tag_to_mmd,
exclude_module_ns,
): ):
""" """
Loads modules for given `variant` from Koji, adds them to Loads modules for given `variant` from Koji, adds them to
@ -577,7 +567,6 @@ def _get_modules_from_koji_tags(
:param Variant variant: Variant with modules to find. :param Variant variant: Variant with modules to find.
:param dict variant_tags: Dict populated by this method. Key is `variant` :param dict variant_tags: Dict populated by this method. Key is `variant`
and value is list of Koji tags to get the RPMs from. and value is list of Koji tags to get the RPMs from.
:param list exclude_module_ns: Module name:stream which will be excluded.
""" """
# Compose tags from configuration # Compose tags from configuration
compose_tags = [ compose_tags = [
@ -585,7 +574,13 @@ def _get_modules_from_koji_tags(
] ]
# Get set of configured module names for this variant. If nothing is # Get set of configured module names for this variant. If nothing is
# configured, the set is empty. # configured, the set is empty.
expected_modules = set(spec["name"] for spec in variant.get_modules()) expected_modules = []
for spec in variant.get_modules():
name, stream = spec['name'].split(':')
expected_modules.append(
':'.join((name, stream.replace('-', '_')))
)
expected_modules = set(expected_modules)
# Find out all modules in every variant and add their Koji tags # Find out all modules in every variant and add their Koji tags
# to variant and variant_tags list. # to variant and variant_tags list.
koji_proxy = koji_wrapper.koji_proxy koji_proxy = koji_wrapper.koji_proxy
@ -644,26 +639,21 @@ def _get_modules_from_koji_tags(
for build in latest_builds: for build in latest_builds:
# Get the Build from Koji to get modulemd and module_tag. # Get the Build from Koji to get modulemd and module_tag.
build = koji_proxy.getBuild(build["build_id"]) build = koji_proxy.getBuild(build["build_id"])
nsvc = _add_module_to_variant(
koji_wrapper,
variant,
build,
True,
compose=compose,
exclude_module_ns=exclude_module_ns,
)
if not nsvc:
continue
module_tag = ( module_tag = (
build.get("extra", {}) build.get("extra", {})
.get("typeinfo", {}) .get("typeinfo", {})
.get("module", {}) .get("module", {})
.get("content_koji_tag", "") .get("content_koji_tag", "")
) )
variant_tags[variant].append(module_tag) variant_tags[variant].append(module_tag)
nsvc = _add_module_to_variant(
koji_wrapper, variant, build, True, compose=compose
)
if not nsvc:
continue
tag_to_mmd.setdefault(module_tag, {}) tag_to_mmd.setdefault(module_tag, {})
for arch in variant.arch_mmds: for arch in variant.arch_mmds:
try: try:
@ -685,17 +675,22 @@ def _get_modules_from_koji_tags(
# needed in createrepo phase where metadata is exposed by # needed in createrepo phase where metadata is exposed by
# productmd # productmd
variant.module_uid_to_koji_tag[nsvc] = module_tag variant.module_uid_to_koji_tag[nsvc] = module_tag
expected_modules = _filter_expected_modules(
variant_name=variant.name,
variant_arches=variant.arches,
expected_modules=expected_modules,
filtered_modules=compose.conf['filter_modules'],
)
if expected_modules: if expected_modules:
# There are some module names that were listed in configuration and not # There are some module names that were listed in configuration and not
# found in any tag... # found in any tag...
compose.log_warning( raise RuntimeError(
"Configuration specified patterns (%s) that don't match " "Configuration specified patterns (%s) that don't match "
"any modules in the configured tags." % ", ".join(expected_modules) "any modules in the configured tags." % ", ".join(expected_modules)
) )
def populate_global_pkgset(compose, koji_wrapper, event): def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
all_arches = get_all_arches(compose) all_arches = get_all_arches(compose)
# List of compose tags from which we create this compose # List of compose tags from which we create this compose
@ -749,52 +744,26 @@ def populate_global_pkgset(compose, koji_wrapper, event):
"modules." "modules."
) )
extra_modules = get_variant_data(
compose.conf, "pkgset_koji_module_builds", variant
)
# When adding extra modules, other modules of the same name:stream available
# in brew tag should be excluded.
exclude_module_ns = []
if extra_modules:
exclude_module_ns = [
":".join(nsvc.split(":")[:2]) for nsvc in extra_modules
]
if modular_koji_tags or ( if modular_koji_tags or (
compose.conf["pkgset_koji_module_tag"] and variant.modules compose.conf["pkgset_koji_module_tag"] and variant.modules
): ):
# List modules tagged in particular tags. # List modules tagged in particular tags.
_get_modules_from_koji_tags( _get_modules_from_koji_tags(
compose, compose, koji_wrapper, event, variant, variant_tags, tag_to_mmd
koji_wrapper,
event,
variant,
variant_tags,
tag_to_mmd,
exclude_module_ns,
) )
elif variant.modules: elif variant.modules:
# Search each module in Koji separately. Tagging does not come into # Search each module in Koji separately. Tagging does not come into
# play here. # play here.
_get_modules_from_koji( _get_modules_from_koji(
compose, compose, koji_wrapper, event, variant, variant_tags, tag_to_mmd
koji_wrapper,
event,
variant,
variant_tags,
tag_to_mmd,
exclude_module_ns,
) )
extra_modules = get_variant_data(
compose.conf, "pkgset_koji_module_builds", variant
)
if extra_modules: if extra_modules:
_add_extra_modules_to_variant( _add_extra_modules_to_variant(
compose, compose, koji_wrapper, variant, extra_modules, variant_tags, tag_to_mmd
koji_wrapper,
variant,
extra_modules,
variant_tags,
tag_to_mmd,
) )
variant_scratch_modules = get_variant_data( variant_scratch_modules = get_variant_data(
@ -821,23 +790,17 @@ def populate_global_pkgset(compose, koji_wrapper, event):
pkgsets = [] pkgsets = []
extra_builds = force_list(compose.conf.get("pkgset_koji_builds", []))
extra_tasks = force_list(compose.conf.get("pkgset_koji_scratch_tasks", []))
if not pkgset_koji_tags and (extra_builds or extra_tasks):
# We have extra packages to pull in, but no tag to merge them with.
compose_tags.append(pungi.phases.pkgset.pkgsets.MISSING_KOJI_TAG)
pkgset_koji_tags.append(pungi.phases.pkgset.pkgsets.MISSING_KOJI_TAG)
# Get package set for each compose tag and merge it to global package # Get package set for each compose tag and merge it to global package
# list. Also prepare per-variant pkgset, because we do not have list # list. Also prepare per-variant pkgset, because we do not have list
# of binary RPMs in module definition - there is just list of SRPMs. # of binary RPMs in module definition - there is just list of SRPMs.
for compose_tag in compose_tags: for compose_tag in compose_tags:
compose.log_info("Loading package set for tag %s", compose_tag) compose.log_info("Loading package set for tag %s", compose_tag)
kwargs = {}
if compose_tag in pkgset_koji_tags: if compose_tag in pkgset_koji_tags:
kwargs["extra_builds"] = extra_builds extra_builds = force_list(compose.conf.get("pkgset_koji_builds", []))
kwargs["extra_tasks"] = extra_tasks extra_tasks = force_list(compose.conf.get("pkgset_koji_scratch_tasks", []))
else:
extra_builds = []
extra_tasks = []
pkgset = pungi.phases.pkgset.pkgsets.KojiPackageSet( pkgset = pungi.phases.pkgset.pkgsets.KojiPackageSet(
compose_tag, compose_tag,
@ -849,10 +812,8 @@ def populate_global_pkgset(compose, koji_wrapper, event):
allow_invalid_sigkeys=allow_invalid_sigkeys, allow_invalid_sigkeys=allow_invalid_sigkeys,
populate_only_packages=populate_only_packages_to_gather, populate_only_packages=populate_only_packages_to_gather,
cache_region=compose.cache_region, cache_region=compose.cache_region,
signed_packages_retries=compose.conf["signed_packages_retries"], extra_builds=extra_builds,
signed_packages_wait=compose.conf["signed_packages_wait"], extra_tasks=extra_tasks,
downloader=compose.koji_downloader,
**kwargs
) )
# Check if we have cache for this tag from previous compose. If so, use # Check if we have cache for this tag from previous compose. If so, use
@ -861,16 +822,11 @@ def populate_global_pkgset(compose, koji_wrapper, event):
compose.paths.work.pkgset_file_cache(compose_tag) compose.paths.work.pkgset_file_cache(compose_tag)
) )
if old_cache_path: if old_cache_path:
try:
pkgset.set_old_file_cache( pkgset.set_old_file_cache(
pungi.phases.pkgset.pkgsets.KojiPackageSet.load_old_file_cache( pungi.phases.pkgset.pkgsets.KojiPackageSet.load_old_file_cache(
old_cache_path old_cache_path
) )
) )
except Exception as e:
compose.log_debug(
"Failed to load old cache file %s : %s" % (old_cache_path, str(e))
)
is_traditional = compose_tag in compose.conf.get("pkgset_koji_tag", []) is_traditional = compose_tag in compose.conf.get("pkgset_koji_tag", [])
should_inherit = inherit if is_traditional else inherit_modules should_inherit = inherit if is_traditional else inherit_modules
@ -910,18 +866,13 @@ def populate_global_pkgset(compose, koji_wrapper, event):
if pkgset.reuse is None: if pkgset.reuse is None:
pkgset.populate( pkgset.populate(
compose_tag, compose_tag,
# We care about packages as they existed on the specified event,
# event. However, modular content tags are not expected to
# change, so the event doesn't matter there. If an exact NSVC
# of a module is specified, the code above would happily find
# its content tag, but fail here if the content tag doesn't
# exist at the given event.
event=event if is_traditional else None,
inherit=should_inherit, inherit=should_inherit,
include_packages=modular_packages, include_packages=modular_packages,
) )
for variant in compose.all_variants.values(): for variant in compose.all_variants.values():
if compose_tag in variant_tags[variant]: if compose_tag in variant_tags[variant]:
# If it's a modular tag, store the package set for the module. # If it's a modular tag, store the package set for the module.
for nsvc, koji_tag in variant.module_uid_to_koji_tag.items(): for nsvc, koji_tag in variant.module_uid_to_koji_tag.items():
if compose_tag == koji_tag: if compose_tag == koji_tag:
@ -944,7 +895,7 @@ def populate_global_pkgset(compose, koji_wrapper, event):
MaterializedPackageSet.create, MaterializedPackageSet.create,
compose, compose,
pkgset, pkgset,
compose.koji_downloader.path_prefix, path_prefix,
mmd=tag_to_mmd.get(pkgset.name), mmd=tag_to_mmd.get(pkgset.name),
) )
) )

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,6 @@
import os import os
import shutil
from kobo.shortcuts import run from kobo.shortcuts import run
@ -32,6 +31,8 @@ import pungi.phases.pkgset.source
class PkgsetSourceRepos(pungi.phases.pkgset.source.PkgsetSourceBase): class PkgsetSourceRepos(pungi.phases.pkgset.source.PkgsetSourceBase):
enabled = True
def __call__(self): def __call__(self):
package_sets, path_prefix = get_pkgset_from_repos(self.compose) package_sets, path_prefix = get_pkgset_from_repos(self.compose)
return (package_sets, path_prefix) return (package_sets, path_prefix)
@ -111,17 +112,6 @@ def get_pkgset_from_repos(compose):
flist.append(dst) flist.append(dst)
pool.queue_put((src, dst)) pool.queue_put((src, dst))
# Clean up tmp dir
# Workaround for rpm not honoring sgid bit which only appears when yum is used.
yumroot_dir = os.path.join(pungi_dir, "work", arch, "yumroot")
if os.path.isdir(yumroot_dir):
try:
shutil.rmtree(yumroot_dir)
except Exception as e:
compose.log_warning(
"Failed to clean up tmp dir: %s %s" % (yumroot_dir, str(e))
)
msg = "Linking downloaded pkgset packages" msg = "Linking downloaded pkgset packages"
compose.log_info("[BEGIN] %s" % msg) compose.log_info("[BEGIN] %s" % msg)
pool.start() pool.start()

View File

@ -18,7 +18,6 @@ import os
from pungi.phases.base import PhaseBase from pungi.phases.base import PhaseBase
from pungi.util import failable, get_arch_variant_data from pungi.util import failable, get_arch_variant_data
import productmd.compose
class TestPhase(PhaseBase): class TestPhase(PhaseBase):
@ -26,7 +25,6 @@ class TestPhase(PhaseBase):
def run(self): def run(self):
check_image_sanity(self.compose) check_image_sanity(self.compose)
check_image_metadata(self.compose)
def check_image_sanity(compose): def check_image_sanity(compose):
@ -47,17 +45,6 @@ def check_image_sanity(compose):
check_size_limit(compose, variant, arch, img) check_size_limit(compose, variant, arch, img)
def check_image_metadata(compose):
"""
Check the images metadata for entries that cannot be serialized.
Often caused by isos with duplicate metadata.
Accessing the `images` attribute will raise an exception if there's a problem
"""
if compose.im.images:
compose = productmd.compose.Compose(compose.paths.compose.topdir())
return compose.images
def check_sanity(compose, variant, arch, image): def check_sanity(compose, variant, arch, image):
path = os.path.join(compose.paths.compose.topdir(), image.path) path = os.path.join(compose.paths.compose.topdir(), image.path)
deliverable = getattr(image, "deliverable") deliverable = getattr(image, "deliverable")

View File

@ -69,13 +69,10 @@ class Profiler(object):
@classmethod @classmethod
def print_results(cls, stream=sys.stdout): def print_results(cls, stream=sys.stdout):
# Ensure all data that was printed to stdout was already flushed. If print("Profiling results:", file=sys.stdout)
# the caller is redirecting stderr to stdout, and there's buffered
# data, we may end up in a situation where the stderr output printed
# below ends up mixed with the stdout lines.
sys.stdout.flush()
print("Profiling results:", file=stream)
results = cls._data.items() results = cls._data.items()
results = sorted(results, key=lambda x: x[1]["time"], reverse=True) results = sorted(results, key=lambda x: x[1]["time"], reverse=True)
for name, data in results: for name, data in results:
print(" %6.2f %5d %s" % (data["time"], data["calls"], name), file=stream) print(
" %6.2f %5d %s" % (data["time"], data["calls"], name), file=sys.stdout
)

View File

@ -13,19 +13,12 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>. # along with this program; if not, see <https://gnu.org/licenses/>.
import contextlib
import os import os
import re import re
import shutil
import tarfile
import requests
import six
from six.moves import shlex_quote from six.moves import shlex_quote
import kobo.log import kobo.log
from kobo.shortcuts import run from kobo.shortcuts import run
from pungi import util
from pungi.wrappers import kojiwrapper from pungi.wrappers import kojiwrapper
@ -117,7 +110,7 @@ class Runroot(kobo.log.LoggingBase):
runroot_tag = self.compose.conf["runroot_tag"] runroot_tag = self.compose.conf["runroot_tag"]
log_dir = kwargs.pop("log_dir", None) log_dir = kwargs.pop("log_dir", None)
koji_wrapper = kojiwrapper.KojiWrapper(self.compose) koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
koji_cmd = koji_wrapper.get_runroot_cmd( koji_cmd = koji_wrapper.get_runroot_cmd(
runroot_tag, runroot_tag,
arch, arch,
@ -156,11 +149,7 @@ class Runroot(kobo.log.LoggingBase):
""" """
formatted_cmd = command.format(**fmt_dict) if fmt_dict else command formatted_cmd = command.format(**fmt_dict) if fmt_dict else command
ssh_cmd = ["ssh", "-oBatchMode=yes", "-n", "-l", user, hostname, formatted_cmd] ssh_cmd = ["ssh", "-oBatchMode=yes", "-n", "-l", user, hostname, formatted_cmd]
output = run(ssh_cmd, show_cmd=True, logfile=log_file)[1] return run(ssh_cmd, show_cmd=True, logfile=log_file)[1]
if six.PY3 and isinstance(output, bytes):
return output.decode()
else:
return output
def _log_file(self, base, suffix): def _log_file(self, base, suffix):
return base.replace(".log", "." + suffix + ".log") return base.replace(".log", "." + suffix + ".log")
@ -185,13 +174,10 @@ class Runroot(kobo.log.LoggingBase):
# by the runroot task, so the Pungi user can access them. # by the runroot task, so the Pungi user can access them.
if chown_paths: if chown_paths:
paths = " ".join(shlex_quote(pth) for pth in chown_paths) paths = " ".join(shlex_quote(pth) for pth in chown_paths)
command += " ; EXIT_CODE=$?"
# Make the files world readable # Make the files world readable
command += " ; chmod -R a+r %s" % paths command += " && chmod -R a+r %s" % paths
# and owned by the same user that is running the process # and owned by the same user that is running the process
command += " ; chown -R %d %s" % (os.getuid(), paths) command += " && chown -R %d %s" % (os.getuid(), paths)
# Exit with code of main command
command += " ; exit $EXIT_CODE"
hostname = runroot_ssh_hostnames[arch] hostname = runroot_ssh_hostnames[arch]
user = self.compose.conf.get("runroot_ssh_username", "root") user = self.compose.conf.get("runroot_ssh_username", "root")
@ -236,9 +222,9 @@ class Runroot(kobo.log.LoggingBase):
fmt_dict["runroot_key"] = runroot_key fmt_dict["runroot_key"] = runroot_key
self._ssh_run(hostname, user, run_template, fmt_dict, log_file=log_file) self._ssh_run(hostname, user, run_template, fmt_dict, log_file=log_file)
fmt_dict["command"] = ( fmt_dict[
"rpm -qa --qf='%{name}-%{version}-%{release}.%{arch}\n'" "command"
) ] = "rpm -qa --qf='%{name}-%{version}-%{release}.%{arch}\n'"
buildroot_rpms = self._ssh_run( buildroot_rpms = self._ssh_run(
hostname, hostname,
user, user,
@ -314,14 +300,13 @@ class Runroot(kobo.log.LoggingBase):
runroot_channel = self.compose.conf.get("runroot_channel") runroot_channel = self.compose.conf.get("runroot_channel")
runroot_tag = self.compose.conf["runroot_tag"] runroot_tag = self.compose.conf["runroot_tag"]
koji_wrapper = kojiwrapper.KojiWrapper(self.compose) koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
koji_cmd = koji_wrapper.get_pungi_buildinstall_cmd( koji_cmd = koji_wrapper.get_pungi_buildinstall_cmd(
runroot_tag, runroot_tag,
arch, arch,
args, args,
channel=runroot_channel, channel=runroot_channel,
# We want to change owner only if shared NFS directory is used. chown_uid=os.getuid(),
chown_uid=os.getuid() if kwargs.get("mounts") else None,
**kwargs **kwargs
) )
@ -332,7 +317,6 @@ class Runroot(kobo.log.LoggingBase):
% (output["task_id"], log_file) % (output["task_id"], log_file)
) )
self._result = output self._result = output
return output["task_id"]
def run_pungi_ostree(self, args, log_file=None, arch=None, **kwargs): def run_pungi_ostree(self, args, log_file=None, arch=None, **kwargs):
""" """
@ -350,7 +334,7 @@ class Runroot(kobo.log.LoggingBase):
runroot_channel = self.compose.conf.get("runroot_channel") runroot_channel = self.compose.conf.get("runroot_channel")
runroot_tag = self.compose.conf["runroot_tag"] runroot_tag = self.compose.conf["runroot_tag"]
koji_wrapper = kojiwrapper.KojiWrapper(self.compose) koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
koji_cmd = koji_wrapper.get_pungi_ostree_cmd( koji_cmd = koji_wrapper.get_pungi_ostree_cmd(
runroot_tag, arch, args, channel=runroot_channel, **kwargs runroot_tag, arch, args, channel=runroot_channel, **kwargs
) )
@ -389,72 +373,3 @@ class Runroot(kobo.log.LoggingBase):
return self._result return self._result
else: else:
raise ValueError("Unknown runroot_method %r." % self.runroot_method) raise ValueError("Unknown runroot_method %r." % self.runroot_method)
@util.retry(wait_on=requests.exceptions.RequestException)
def _download_file(url, dest):
# contextlib.closing is only needed in requests<2.18
with contextlib.closing(requests.get(url, stream=True, timeout=5)) as r:
if r.status_code == 404:
raise RuntimeError("Archive %s not found" % url)
r.raise_for_status()
with open(dest, "wb") as f:
shutil.copyfileobj(r.raw, f)
def _download_archive(task_id, fname, archive_url, dest_dir):
"""Download file from URL to a destination, with retries."""
temp_file = os.path.join(dest_dir, fname)
_download_file(archive_url, temp_file)
return temp_file
def _extract_archive(task_id, fname, archive_file, dest_path):
"""Extract the archive into given destination.
All items of the archive must match the name of the archive, i.e. all
paths in foo.tar.gz must start with foo/.
"""
basename = os.path.basename(fname).split(".")[0]
strip_prefix = basename + "/"
with tarfile.open(archive_file, "r") as archive:
for member in archive.getmembers():
# Check if each item is either the root directory or is within it.
if member.name != basename and not member.name.startswith(strip_prefix):
raise RuntimeError(
"Archive %s from task %s contains file without expected prefix: %s"
% (fname, task_id, member)
)
dest = os.path.join(dest_path, member.name[len(strip_prefix) :])
if member.isdir():
# Create directories where needed...
util.makedirs(dest)
elif member.isfile():
# ... and extract files into them.
with open(dest, "wb") as dest_obj:
shutil.copyfileobj(archive.extractfile(member), dest_obj)
elif member.islnk():
# We have a hardlink. Let's also link it.
linked_file = os.path.join(
dest_path, member.linkname[len(strip_prefix) :]
)
os.link(linked_file, dest)
else:
# Any other file type is an error.
raise RuntimeError(
"Unexpected file type in %s from task %s: %s"
% (fname, task_id, member)
)
def download_and_extract_archive(compose, task_id, fname, destination):
"""Download a tar archive from task outputs and extract it to the destination."""
koji = kojiwrapper.KojiWrapper(compose).koji_module
# Koji API provides downloadTaskOutput method, but it's not usable as it
# will attempt to load the entire file into memory.
# So instead let's generate a patch and attempt to convert it to a URL.
server_path = os.path.join(koji.pathinfo.task(task_id), fname)
archive_url = server_path.replace(koji.config.topdir, koji.config.topurl)
with util.temp_dir(prefix="buildinstall-download") as tmp_dir:
local_path = _download_archive(task_id, fname, archive_url, tmp_dir)
_extract_archive(task_id, fname, local_path, destination)

View File

@ -1,63 +0,0 @@
import argparse
import os
import re
import time
from pungi.util import format_size
LOCK_RE = re.compile(r".*\.lock(\|[A-Za-z0-9]+)*$")
def should_be_cleaned_up(path, st, threshold):
if st.st_nlink == 1 and st.st_mtime < threshold:
# No other instances, older than limit
return True
if LOCK_RE.match(path) and st.st_mtime < threshold:
# Suspiciously old lock
return True
return False
def main():
parser = argparse.ArgumentParser()
parser.add_argument("CACHE_DIR")
parser.add_argument("-n", "--dry-run", action="store_true")
parser.add_argument("--verbose", action="store_true")
parser.add_argument(
"--max-age",
help="how old files should be considered for deletion",
default=7,
type=int,
)
args = parser.parse_args()
topdir = os.path.abspath(args.CACHE_DIR)
max_age = args.max_age * 24 * 3600
cleaned_up = 0
threshold = time.time() - max_age
for dirpath, dirnames, filenames in os.walk(topdir):
for f in filenames:
filepath = os.path.join(dirpath, f)
st = os.stat(filepath)
if should_be_cleaned_up(filepath, st, threshold):
if args.verbose:
print("RM %s" % filepath)
cleaned_up += st.st_size
if not args.dry_run:
os.remove(filepath)
if not dirnames and not filenames:
if args.verbose:
print("RMDIR %s" % dirpath)
if not args.dry_run:
os.rmdir(dirpath)
if args.dry_run:
print("Would reclaim %s bytes." % format_size(cleaned_up))
else:
print("Reclaimed %s bytes." % format_size(cleaned_up))

View File

@ -96,7 +96,7 @@ def main():
f.filter_environments(opts.arch, opts.variant, opts.arch_only_environments) f.filter_environments(opts.arch, opts.variant, opts.arch_only_environments)
if not opts.no_cleanup: if not opts.no_cleanup:
f.cleanup(opts.arch, opts.keep_empty_group, opts.lookaside_group) f.cleanup(opts.keep_empty_group, opts.lookaside_group)
if opts.remove_categories: if opts.remove_categories:
f.remove_categories() f.remove_categories()

View File

@ -171,11 +171,32 @@ def main():
group.add_argument( group.add_argument(
"--offline", action="store_true", help="Do not resolve git references." "--offline", action="store_true", help="Do not resolve git references."
) )
parser.add_argument(
"--multi",
metavar="DIR",
help=(
"Treat source as config for pungi-orchestrate and store dump into "
"given directory."
),
)
args = parser.parse_args() args = parser.parse_args()
defines = config_utils.extract_defines(args.define) defines = config_utils.extract_defines(args.define)
if args.multi:
if len(args.sources) > 1:
parser.error("Only one multi config can be specified.")
return dump_multi_config(
args.sources[0],
dest=args.multi,
defines=defines,
just_dump=args.just_dump,
event=args.freeze_event,
offline=args.offline,
)
return process_file( return process_file(
args.sources, args.sources,
defines=defines, defines=defines,

View File

@ -127,7 +127,8 @@ def run(config, topdir, has_old, offline, defined_variables, schema_overrides):
pungi.phases.OstreeInstallerPhase(compose, buildinstall_phase), pungi.phases.OstreeInstallerPhase(compose, buildinstall_phase),
pungi.phases.OSTreePhase(compose), pungi.phases.OSTreePhase(compose),
pungi.phases.CreateisoPhase(compose, buildinstall_phase), pungi.phases.CreateisoPhase(compose, buildinstall_phase),
pungi.phases.ExtraIsosPhase(compose, buildinstall_phase), pungi.phases.ExtraIsosPhase(compose),
pungi.phases.LiveImagesPhase(compose),
pungi.phases.LiveMediaPhase(compose), pungi.phases.LiveMediaPhase(compose),
pungi.phases.ImageBuildPhase(compose), pungi.phases.ImageBuildPhase(compose),
pungi.phases.ImageChecksumPhase(compose), pungi.phases.ImageChecksumPhase(compose),

View File

@ -5,43 +5,35 @@ import os
import subprocess import subprocess
import tempfile import tempfile
from shutil import rmtree from shutil import rmtree
from typing import ( from typing import AnyStr, List, Dict, Optional
AnyStr,
List,
Dict,
Optional,
)
import createrepo_c as cr import createrepo_c as cr
import requests import requests
import yaml import yaml
from dataclasses import dataclass, field from dataclasses import dataclass, field
from .create_packages_json import ( from .create_packages_json import PackagesGenerator, RepoInfo
PackagesGenerator,
RepoInfo,
VariantInfo,
)
@dataclass @dataclass
class ExtraVariantInfo(VariantInfo): class ExtraRepoInfo(RepoInfo):
modules: List[AnyStr] = field(default_factory=list) modules: List[AnyStr] = field(default_factory=list)
packages: List[AnyStr] = field(default_factory=list) packages: List[AnyStr] = field(default_factory=list)
is_remote: bool = True
class CreateExtraRepo(PackagesGenerator): class CreateExtraRepo(PackagesGenerator):
def __init__( def __init__(
self, self,
variants: List[ExtraVariantInfo], repos: List[ExtraRepoInfo],
bs_auth_token: AnyStr, bs_auth_token: AnyStr,
local_repository_path: AnyStr, local_repository_path: AnyStr,
clear_target_repo: bool = True, clear_target_repo: bool = True,
): ):
self.variants = [] # type: List[ExtraVariantInfo] self.repos = [] # type: List[ExtraRepoInfo]
super().__init__(variants, [], []) super().__init__(repos, [], [])
self.auth_headers = { self.auth_headers = {
'Authorization': f'Bearer {bs_auth_token}', 'Authorization': f'Bearer {bs_auth_token}',
} }
@ -100,7 +92,7 @@ class CreateExtraRepo(PackagesGenerator):
arch: AnyStr, arch: AnyStr,
packages: Optional[List[AnyStr]] = None, packages: Optional[List[AnyStr]] = None,
modules: Optional[List[AnyStr]] = None, modules: Optional[List[AnyStr]] = None,
) -> List[ExtraVariantInfo]: ) -> List[ExtraRepoInfo]:
""" """
Get info about a BS repo and save it to Get info about a BS repo and save it to
an object of class ExtraRepoInfo an object of class ExtraRepoInfo
@ -118,7 +110,7 @@ class CreateExtraRepo(PackagesGenerator):
api_uri = 'api/v1' api_uri = 'api/v1'
bs_repo_suffix = 'build_repos' bs_repo_suffix = 'build_repos'
variants_info = [] repos_info = []
# get the full info about a BS repo # get the full info about a BS repo
repo_request = requests.get( repo_request = requests.get(
@ -140,13 +132,7 @@ class CreateExtraRepo(PackagesGenerator):
# skip repo with unsuitable architecture # skip repo with unsuitable architecture
if architecture != arch: if architecture != arch:
continue continue
variant_info = ExtraVariantInfo( repo_info = ExtraRepoInfo(
name=f'{build_id}-{platform_name}-{architecture}',
arch=architecture,
packages=packages,
modules=modules,
repos=[
RepoInfo(
path=os.path.join( path=os.path.join(
bs_url, bs_url,
bs_repo_suffix, bs_repo_suffix,
@ -154,12 +140,14 @@ class CreateExtraRepo(PackagesGenerator):
platform_name, platform_name,
), ),
folder=architecture, folder=architecture,
name=f'{build_id}-{platform_name}-{architecture}',
arch=architecture,
is_remote=True, is_remote=True,
packages=packages,
modules=modules,
) )
] repos_info.append(repo_info)
) return repos_info
variants_info.append(variant_info)
return variants_info
def _create_local_extra_repo(self): def _create_local_extra_repo(self):
""" """
@ -173,7 +161,7 @@ class CreateExtraRepo(PackagesGenerator):
if os.path.exists(self.default_modules_yaml_path): if os.path.exists(self.default_modules_yaml_path):
os.remove(self.default_modules_yaml_path) os.remove(self.default_modules_yaml_path)
def get_remote_file_content( def _get_remote_file_content(
self, self,
file_url: AnyStr, file_url: AnyStr,
) -> AnyStr: ) -> AnyStr:
@ -196,7 +184,7 @@ class CreateExtraRepo(PackagesGenerator):
def _download_rpm_to_local_repo( def _download_rpm_to_local_repo(
self, self,
package_location: AnyStr, package_location: AnyStr,
repo_info: RepoInfo, repo_info: ExtraRepoInfo,
) -> None: ) -> None:
""" """
Download a rpm package from a remote repo and save it to a local repo Download a rpm package from a remote repo and save it to a local repo
@ -224,22 +212,21 @@ class CreateExtraRepo(PackagesGenerator):
def _download_packages( def _download_packages(
self, self,
packages: Dict[AnyStr, cr.Package], packages: Dict[AnyStr, cr.Package],
variant_info: ExtraVariantInfo repo_info: ExtraRepoInfo
): ):
""" """
Download all defined packages from a remote repo Download all defined packages from a remote repo
:param packages: information about all packages (including :param packages: information about all of packages (including
modularity) in a remote repo modularity) in a remote repo
:param variant_info: information about a remote variant :param repo_info: information about a remote repo
""" """
for package in packages.values(): for package in packages.values():
package_name = package.name package_name = package.name
# Skip a current package from a remote repo if we defined # Skip a current package from a remote repo if we defined
# the list packages and a current package doesn't belong to it # the list packages and a current package doesn't belong to it
if variant_info.packages and \ if repo_info.packages and \
package_name not in variant_info.packages: package_name not in repo_info.packages:
continue continue
for repo_info in variant_info.repos:
self._download_rpm_to_local_repo( self._download_rpm_to_local_repo(
package_location=package.location_href, package_location=package.location_href,
repo_info=repo_info, repo_info=repo_info,
@ -248,23 +235,23 @@ class CreateExtraRepo(PackagesGenerator):
def _download_modules( def _download_modules(
self, self,
modules_data: List[Dict], modules_data: List[Dict],
variant_info: ExtraVariantInfo, repo_info: ExtraRepoInfo,
packages: Dict[AnyStr, cr.Package] packages: Dict[AnyStr, cr.Package]
): ):
""" """
Download all defined modularity packages and their data from Download all defined modularity packages and their data from
a remote repo a remote repo
:param modules_data: information about all modules in a remote repo :param modules_data: information about all of modules in a remote repo
:param variant_info: information about a remote variant :param repo_info: information about a remote repo
:param packages: information about all packages (including :param packages: information about all of packages (including
modularity) in a remote repo modularity) in a remote repo
""" """
for module in modules_data: for module in modules_data:
module_data = module['data'] module_data = module['data']
# Skip a current module from a remote repo if we defined # Skip a current module from a remote repo if we defined
# the list modules and a current module doesn't belong to it # the list modules and a current module doesn't belong to it
if variant_info.modules and \ if repo_info.modules and \
module_data['name'] not in variant_info.modules: module_data['name'] not in repo_info.modules:
continue continue
# we should add info about a module if the local repodata # we should add info about a module if the local repodata
# doesn't have it # doesn't have it
@ -277,14 +264,13 @@ class CreateExtraRepo(PackagesGenerator):
continue continue
for rpm in module['data']['artifacts']['rpms']: for rpm in module['data']['artifacts']['rpms']:
# Empty repo_info.packages means that we will download # Empty repo_info.packages means that we will download
# all packages from repo including # all of packages from repo including
# the modularity packages # the modularity packages
if not variant_info.packages: if not repo_info.packages:
break break
# skip a rpm if it doesn't belong to a processed repo # skip a rpm if it doesn't belong to a processed repo
if rpm not in packages: if rpm not in packages:
continue continue
for repo_info in variant_info.repos:
self._download_rpm_to_local_repo( self._download_rpm_to_local_repo(
package_location=packages[rpm].location_href, package_location=packages[rpm].location_href,
repo_info=repo_info, repo_info=repo_info,
@ -298,16 +284,16 @@ class CreateExtraRepo(PackagesGenerator):
3. Call `createrepo_c` which creates a local repo 3. Call `createrepo_c` which creates a local repo
with the right repodata with the right repodata
""" """
for variant_info in self.variants: for repo_info in self.repos:
for repo_info in variant_info.repos: packages = {} # type: Dict[AnyStr, cr.Package]
repomd_records = self._get_repomd_records( repomd_records = self._get_repomd_records(
repo_info=repo_info, repo_info=repo_info,
) )
packages_iterator = self.get_packages_iterator(repo_info)
# parse the repodata (including modules.yaml.gz) # parse the repodata (including modules.yaml.gz)
modules_data = self._parse_module_repomd_record( modules_data = self._parse_repomd_records(
repo_info=repo_info, repo_info=repo_info,
repomd_records=repomd_records, repomd_records=repomd_records,
packages=packages,
) )
# convert the packages dict to more usable form # convert the packages dict to more usable form
# for future checking that a rpm from the module's artifacts # for future checking that a rpm from the module's artifacts
@ -315,16 +301,16 @@ class CreateExtraRepo(PackagesGenerator):
packages = { packages = {
f'{package.name}-{package.epoch}:{package.version}-' f'{package.name}-{package.epoch}:{package.version}-'
f'{package.release}.{package.arch}': f'{package.release}.{package.arch}':
package for package in packages_iterator package for package in packages.values()
} }
self._download_modules( self._download_modules(
modules_data=modules_data, modules_data=modules_data,
variant_info=variant_info, repo_info=repo_info,
packages=packages, packages=packages,
) )
self._download_packages( self._download_packages(
packages=packages, packages=packages,
variant_info=variant_info, repo_info=repo_info,
) )
self._dump_local_modules_yaml() self._dump_local_modules_yaml()
@ -336,6 +322,7 @@ def create_parser():
parser.add_argument( parser.add_argument(
'--bs-auth-token', '--bs-auth-token',
help='Auth token for Build System', help='Auth token for Build System',
required=True,
) )
parser.add_argument( parser.add_argument(
'--local-repo-path', '--local-repo-path',
@ -404,16 +391,11 @@ def cli_main():
packages = packages.split() packages = packages.split()
if repo.startswith('http://'): if repo.startswith('http://'):
repos_info.append( repos_info.append(
ExtraVariantInfo( ExtraRepoInfo(
name=repo_folder,
arch=repo_arch,
repos=[
RepoInfo(
path=repo, path=repo,
folder=repo_folder, folder=repo_folder,
is_remote=True, name=repo_folder,
) arch=repo_arch,
],
modules=modules, modules=modules,
packages=packages, packages=packages,
) )
@ -429,7 +411,7 @@ def cli_main():
) )
) )
cer = CreateExtraRepo( cer = CreateExtraRepo(
variants=repos_info, repos=repos_info,
bs_auth_token=args.bs_auth_token, bs_auth_token=args.bs_auth_token,
local_repository_path=args.local_repo_path, local_repository_path=args.local_repo_path,
clear_target_repo=args.clear_local_repo, clear_target_repo=args.clear_local_repo,

View File

@ -9,60 +9,23 @@ https://github.com/rpm-software-management/createrepo_c/blob/master/examples/pyt
import argparse import argparse
import gzip import gzip
import json import json
import logging
import lzma import lzma
import os import os
import re import re
import tempfile import tempfile
from collections import defaultdict from collections import defaultdict
from itertools import tee from typing import AnyStr, Dict, List, Optional
from pathlib import Path
from typing import (
AnyStr,
Dict,
List,
Any,
Iterator,
Optional,
Tuple,
Union,
)
import binascii
from urllib.parse import urljoin
import createrepo_c as cr
import dnf.subject
import hawkey
import requests import requests
import rpm import rpm
import yaml import yaml
from createrepo_c import ( from createrepo_c import Package
Package, from dataclasses import dataclass
PackageIterator,
Repomd,
RepomdRecord,
)
from dataclasses import dataclass, field
from kobo.rpmlib import parse_nvra
logging.basicConfig(level=logging.INFO)
def _is_compressed_file(first_two_bytes: bytes, initial_bytes: bytes):
return binascii.hexlify(first_two_bytes) == initial_bytes
def is_gzip_file(first_two_bytes):
return _is_compressed_file(
first_two_bytes=first_two_bytes,
initial_bytes=b'1f8b',
)
def is_xz_file(first_two_bytes):
return _is_compressed_file(
first_two_bytes=first_two_bytes,
initial_bytes=b'fd37',
)
from .gather_modules import is_gzip_file, is_xz_file
@dataclass @dataclass
class RepoInfo: class RepoInfo:
@ -70,76 +33,32 @@ class RepoInfo:
# 'appstream', 'baseos', etc. # 'appstream', 'baseos', etc.
# Or 'http://koji.cloudlinux.com/mirrors/rhel_mirror' if you are # Or 'http://koji.cloudlinux.com/mirrors/rhel_mirror' if you are
# using remote repo # using remote repo
path: str path: AnyStr
# name of folder with a repodata folder. E.g. 'baseos', 'appstream', etc # name of folder with a repodata folder. E.g. 'baseos', 'appstream', etc
folder: str folder: AnyStr
# name of repo. E.g. 'BaseOS', 'AppStream', etc
name: AnyStr
# architecture of repo. E.g. 'x86_64', 'i686', etc
arch: AnyStr
# Is a repo remote or local # Is a repo remote or local
is_remote: bool is_remote: bool
# Is a reference repository (usually it's a RHEL repo) # Is an reference repository (usually it's a RHEL repo)
# Layout of packages from such repository will be taken as example # Layout of packages from such repository will be taken as example
# Only layout of specific package (which doesn't exist # Only layout of specific package (which don't exist
# in a reference repository) will be taken as example # in an reference repository) will be taken as example
is_reference: bool = False is_reference: bool = False
# The packages from 'present' repo will be added to a variant.
# The packages from 'absent' repo will be removed from a variant.
repo_type: str = 'present'
@dataclass
class VariantInfo:
# name of variant. E.g. 'BaseOS', 'AppStream', etc
name: AnyStr
# architecture of variant. E.g. 'x86_64', 'i686', etc
arch: AnyStr
# The packages which will be not added to a variant
excluded_packages: List[str] = field(default_factory=list)
# Repos of a variant
repos: List[RepoInfo] = field(default_factory=list)
class PackagesGenerator: class PackagesGenerator:
repo_arches = defaultdict(lambda: list(('noarch',)))
addon_repos = {
'x86_64': ['i686'],
'ppc64le': [],
'aarch64': [],
's390x': [],
'i686': [],
}
def __init__( def __init__(
self, self,
variants: List[VariantInfo], repos: List[RepoInfo],
excluded_packages: List[AnyStr], excluded_packages: List[AnyStr],
included_packages: List[AnyStr], included_packages: List[AnyStr],
): ):
self.variants = variants self.repos = repos
self.pkgs = dict()
self.excluded_packages = excluded_packages self.excluded_packages = excluded_packages
self.included_packages = included_packages self.included_packages = included_packages
self.tmp_files = [] # type: list[Path]
for arch, arch_list in self.addon_repos.items():
self.repo_arches[arch].extend(arch_list)
self.repo_arches[arch].append(arch)
def __del__(self):
for tmp_file in self.tmp_files:
if tmp_file.exists():
tmp_file.unlink()
@staticmethod
def _get_full_repo_path(repo_info: RepoInfo):
result = os.path.join(
repo_info.path,
repo_info.folder
)
if repo_info.is_remote:
result = urljoin(
repo_info.path + '/',
repo_info.folder,
)
return result
@staticmethod @staticmethod
def _warning_callback(warning_type, message): def _warning_callback(warning_type, message):
@ -149,7 +68,8 @@ class PackagesGenerator:
print(f'Warning message: "{message}"; warning type: "{warning_type}"') print(f'Warning message: "{message}"; warning type: "{warning_type}"')
return True return True
def get_remote_file_content(self, file_url: AnyStr) -> AnyStr: @staticmethod
def _get_remote_file_content(file_url: AnyStr) -> AnyStr:
""" """
Get content from a remote file and write it to a temp file Get content from a remote file and write it to a temp file
:param file_url: url of a remote file :param file_url: url of a remote file
@ -162,27 +82,89 @@ class PackagesGenerator:
file_request.raise_for_status() file_request.raise_for_status()
with tempfile.NamedTemporaryFile(delete=False) as file_stream: with tempfile.NamedTemporaryFile(delete=False) as file_stream:
file_stream.write(file_request.content) file_stream.write(file_request.content)
self.tmp_files.append(Path(file_stream.name))
return file_stream.name return file_stream.name
@staticmethod @staticmethod
def _parse_repomd(repomd_file_path: AnyStr) -> Repomd: def _parse_repomd(repomd_file_path: AnyStr) -> cr.Repomd:
""" """
Parse file repomd.xml and create object Repomd Parse file repomd.xml and create object Repomd
:param repomd_file_path: path to local repomd.xml :param repomd_file_path: path to local repomd.xml
""" """
return Repomd(repomd_file_path) return cr.Repomd(repomd_file_path)
def _parse_primary_file(
self,
primary_file_path: AnyStr,
packages: Dict[AnyStr, cr.Package],
) -> None:
"""
Parse primary.xml.gz, take from it info about packages and put it to
dict packages
:param primary_file_path: path to local primary.xml.gz
:param packages: dictionary which will be contain info about packages
from repository
"""
cr.xml_parse_primary(
path=primary_file_path,
pkgcb=lambda pkg: packages.update({
pkg.pkgId: pkg,
}),
do_files=False,
warningcb=self._warning_callback,
)
def _parse_filelists_file(
self,
filelists_file_path: AnyStr,
packages: Dict[AnyStr, cr.Package],
) -> None:
"""
Parse filelists.xml.gz, take from it info about packages and put it to
dict packages
:param filelists_file_path: path to local filelists.xml.gz
:param packages: dictionary which will be contain info about packages
from repository
"""
cr.xml_parse_filelists(
path=filelists_file_path,
newpkgcb=lambda pkg_id, name, arch: packages.get(
pkg_id,
None,
),
warningcb=self._warning_callback,
)
def _parse_other_file(
self,
other_file_path: AnyStr,
packages: Dict[AnyStr, cr.Package],
) -> None:
"""
Parse other.xml.gz, take from it info about packages and put it to
dict packages
:param other_file_path: path to local other.xml.gz
:param packages: dictionary which will be contain info about packages
from repository
"""
cr.xml_parse_other(
path=other_file_path,
newpkgcb=lambda pkg_id, name, arch: packages.get(
pkg_id,
None,
),
warningcb=self._warning_callback,
)
@classmethod @classmethod
def _parse_modules_file( def _parse_modules_file(
cls, cls,
modules_file_path: AnyStr, modules_file_path: AnyStr,
) -> Iterator[Any]: ) -> List[Dict]:
""" """
Parse modules.yaml.gz and returns parsed data Parse modules.yaml.gz and returns parsed data
:param modules_file_path: path to local modules.yaml.gz :param modules_file_path: path to local modules.yaml.gz
:return: List of dict for each module in a repo :return: List of dict for an each modules in a repo
""" """
with open(modules_file_path, 'rb') as modules_file: with open(modules_file_path, 'rb') as modules_file:
@ -199,7 +181,7 @@ class PackagesGenerator:
def _get_repomd_records( def _get_repomd_records(
self, self,
repo_info: RepoInfo, repo_info: RepoInfo,
) -> List[RepomdRecord]: ) -> List[cr.RepomdRecord]:
""" """
Get, parse file repomd.xml and extract from it repomd records Get, parse file repomd.xml and extract from it repomd records
:param repo_info: structure which contains info about a current repo :param repo_info: structure which contains info about a current repo
@ -212,37 +194,36 @@ class PackagesGenerator:
'repomd.xml', 'repomd.xml',
) )
if repo_info.is_remote: if repo_info.is_remote:
repomd_file_path = urljoin( repomd_file_path = self._get_remote_file_content(repomd_file_path)
urljoin( else:
repo_info.path + '/', repomd_file_path = repomd_file_path
repo_info.folder
) + '/',
'repodata/repomd.xml'
)
repomd_file_path = self.get_remote_file_content(repomd_file_path)
repomd_object = self._parse_repomd(repomd_file_path) repomd_object = self._parse_repomd(repomd_file_path)
if repo_info.is_remote: if repo_info.is_remote:
os.remove(repomd_file_path) os.remove(repomd_file_path)
return repomd_object.records return repomd_object.records
def _download_repomd_records( def _parse_repomd_records(
self, self,
repo_info: RepoInfo, repo_info: RepoInfo,
repomd_records: List[RepomdRecord], repomd_records: List[cr.RepomdRecord],
repomd_records_dict: Dict[str, str], packages: Dict[AnyStr, cr.Package],
): ) -> Optional[List[Dict]]:
""" """
Download repomd records Parse repomd records and extract from repodata file info about packages
:param repo_info: structure which contains info about a current repo :param repo_info: structure which contains info about a current repo
:param repomd_records: list with repomd records :param repomd_records: list with repomd records
:param repomd_records_dict: dict with paths to repodata files :param packages: dictionary which will be contain info about packages
from repository
:return: List of dict for an each modules in a repo if it contains
modules info otherwise returns None
""" """
modules_data = []
for repomd_record in repomd_records: for repomd_record in repomd_records:
if repomd_record.type not in ( if repomd_record.type not in (
'primary', 'primary',
'filelists', 'filelists',
'other', 'other',
'modules',
): ):
continue continue
repomd_record_file_path = os.path.join( repomd_record_file_path = os.path.join(
@ -251,35 +232,25 @@ class PackagesGenerator:
repomd_record.location_href, repomd_record.location_href,
) )
if repo_info.is_remote: if repo_info.is_remote:
repomd_record_file_path = self.get_remote_file_content( repomd_record_file_path = self._get_remote_file_content(
repomd_record_file_path) repomd_record_file_path,
repomd_records_dict[repomd_record.type] = repomd_record_file_path )
if repomd_record.type == 'modules':
def _parse_module_repomd_record( modules_data = self._parse_modules_file(
repomd_record_file_path,
)
else:
parse_file_method = getattr(
self, self,
repo_info: RepoInfo, f'_parse_{repomd_record.type}_file'
repomd_records: List[RepomdRecord], )
) -> List[Dict]: parse_file_method(
""" repomd_record_file_path,
Download repomd records packages,
:param repo_info: structure which contains info about a current repo
:param repomd_records: list with repomd records
"""
for repomd_record in repomd_records:
if repomd_record.type != 'modules':
continue
repomd_record_file_path = os.path.join(
repo_info.path,
repo_info.folder,
repomd_record.location_href,
) )
if repo_info.is_remote: if repo_info.is_remote:
repomd_record_file_path = self.get_remote_file_content( os.remove(repomd_record_file_path)
repomd_record_file_path) return list(modules_data)
return list(self._parse_modules_file(
repomd_record_file_path,
))
return []
@staticmethod @staticmethod
def compare_pkgs_version(package_1: Package, package_2: Package) -> int: def compare_pkgs_version(package_1: Package, package_2: Package) -> int:
@ -295,162 +266,183 @@ class PackagesGenerator:
) )
return rpm.labelCompare(version_tuple_1, version_tuple_2) return rpm.labelCompare(version_tuple_1, version_tuple_2)
def get_packages_iterator(
self,
repo_info: RepoInfo,
) -> Union[PackageIterator, Iterator]:
full_repo_path = self._get_full_repo_path(repo_info)
pkgs_iterator = self.pkgs.get(full_repo_path)
if pkgs_iterator is None:
repomd_records = self._get_repomd_records(
repo_info=repo_info,
)
repomd_records_dict = {} # type: Dict[str, str]
self._download_repomd_records(
repo_info=repo_info,
repomd_records=repomd_records,
repomd_records_dict=repomd_records_dict,
)
pkgs_iterator = PackageIterator(
primary_path=repomd_records_dict['primary'],
filelists_path=repomd_records_dict['filelists'],
other_path=repomd_records_dict['other'],
warningcb=self._warning_callback,
)
pkgs_iterator, self.pkgs[full_repo_path] = tee(pkgs_iterator)
return pkgs_iterator
def get_package_arch(
self,
package: Package,
variant_arch: str,
) -> str:
result = variant_arch
if package.arch in self.repo_arches[variant_arch]:
result = package.arch
return result
def is_skipped_module_package(
self,
package: Package,
variant_arch: str,
) -> bool:
package_key = self.get_package_key(package, variant_arch)
# Even a module package will be added to packages.json if
# it presents in the list of included packages
return 'module' in package.release and not any(
re.search(
f'^{included_pkg}$',
package_key,
) or included_pkg in (package.name, package_key)
for included_pkg in self.included_packages
)
def is_excluded_package(
self,
package: Package,
variant_arch: str,
excluded_packages: List[str],
) -> bool:
package_key = self.get_package_key(package, variant_arch)
return any(
re.search(
f'^{excluded_pkg}$',
package_key,
) or excluded_pkg in (package.name, package_key)
for excluded_pkg in excluded_packages
)
@staticmethod
def get_source_rpm_name(package: Package) -> str:
source_rpm_nvra = parse_nvra(package.rpm_sourcerpm)
return source_rpm_nvra['name']
def get_package_key(self, package: Package, variant_arch: str) -> str:
return (
f'{package.name}.'
f'{self.get_package_arch(package, variant_arch)}'
)
def generate_packages_json( def generate_packages_json(
self self
) -> Dict[AnyStr, Dict[AnyStr, Dict[AnyStr, List[AnyStr]]]]: ) -> Dict[AnyStr, Dict[AnyStr, Dict[AnyStr, List[AnyStr]]]]:
""" """
Generate packages.json Generate packages.json
""" """
packages = defaultdict(lambda: defaultdict(lambda: { packages_json = defaultdict(
'variants': list(), lambda: defaultdict(
})) lambda: defaultdict(
for variant_info in self.variants: list,
for repo_info in variant_info.repos:
is_reference = repo_info.is_reference
for package in self.get_packages_iterator(repo_info=repo_info):
if self.is_skipped_module_package(
package=package,
variant_arch=variant_info.arch,
):
continue
if self.is_excluded_package(
package=package,
variant_arch=variant_info.arch,
excluded_packages=self.excluded_packages,
):
continue
if self.is_excluded_package(
package=package,
variant_arch=variant_info.arch,
excluded_packages=variant_info.excluded_packages,
):
continue
package_key = self.get_package_key(
package,
variant_info.arch,
) )
source_rpm_name = self.get_source_rpm_name(package) )
package_info = packages[source_rpm_name][package_key] )
if 'is_reference' not in package_info: all_packages = defaultdict(lambda: {'variants': list()})
package_info['variants'].append(variant_info.name) for repo_info in self.repos:
package_info['is_reference'] = is_reference repo_arches = [
package_info['package'] = package repo_info.arch,
elif not package_info['is_reference'] or \ 'noarch',
package_info['is_reference'] == is_reference and \ ]
self.compare_pkgs_version( if repo_info.arch == 'x86_64':
package_1=package, repo_arches.extend([
package_2=package_info['package'], 'i686',
) > 0: 'i386',
package_info['variants'] = [variant_info.name] ])
package_info['is_reference'] = is_reference packages = {} # type: Dict[AnyStr, cr.Package]
package_info['package'] = package repomd_records = self._get_repomd_records(
elif self.compare_pkgs_version( repo_info=repo_info,
package_1=package, )
package_2=package_info['package'], self._parse_repomd_records(
) == 0 and repo_info.repo_type != 'absent': repo_info=repo_info,
package_info['variants'].append(variant_info.name) repomd_records=repomd_records,
result = defaultdict(lambda: defaultdict( packages=packages,
lambda: defaultdict(list), )
)) for package in packages.values():
for variant_info in self.variants: if package.arch not in repo_arches:
for source_rpm_name, packages_info in packages.items(): package_arch = repo_info.arch
for package_key, package_info in packages_info.items(): else:
variant_pkgs = result[variant_info.name][variant_info.arch] package_arch = package.arch
if variant_info.name not in package_info['variants']: package_key = f'{package.name}.{package_arch}'
if 'module' in package.release and not any(
re.search(included_package, package.name)
for included_package in self.included_packages
):
# Even a module package will be added to packages.json if
# it presents in the list of included packages
continue continue
variant_pkgs[source_rpm_name].append(package_key) if package_key not in all_packages:
return result all_packages[package_key]['variants'].append(
repo_info.name
)
all_packages[package_key]['arch'] = repo_info.arch
all_packages[package_key]['package'] = package
all_packages[package_key]['type'] = repo_info.is_reference
# replace an older package if it's not reference or
# a newer package is from reference repo
elif (not all_packages[package_key]['type'] or
all_packages[package_key]['type'] ==
repo_info.is_reference) and \
self.compare_pkgs_version(
package,
all_packages[package_key]['package']
) > 0:
all_packages[package_key]['variants'] = [repo_info.name]
all_packages[package_key]['arch'] = repo_info.arch
all_packages[package_key]['package'] = package
elif self.compare_pkgs_version(
package,
all_packages[package_key]['package']
) == 0:
all_packages[package_key]['variants'].append(
repo_info.name
)
for package_dict in all_packages.values():
repo_arches = [
package_dict['arch'],
'noarch',
]
if package_dict['arch'] == 'x86_64':
repo_arches.extend([
'i686',
'i386',
])
for variant in package_dict['variants']:
repo_arch = package_dict['arch']
package = package_dict['package']
package_name = package.name
if package.arch not in repo_arches:
package_arch = package_dict['arch']
else:
package_arch = package.arch
if any(re.search(excluded_package, package_name)
for excluded_package in self.excluded_packages):
continue
src_package_name = dnf.subject.Subject(
package.rpm_sourcerpm,
).get_nevra_possibilities(
forms=hawkey.FORM_NEVRA,
)
if len(src_package_name) > 1:
# We should stop utility if we can't get exact name of srpm
raise ValueError(
'We can\'t get exact name of srpm '
f'by its NEVRA "{package.rpm_sourcerpm}"'
)
else:
src_package_name = src_package_name[0].name
pkgs_list = packages_json[variant][
repo_arch][src_package_name]
added_pkg = f'{package_name}.{package_arch}'
if added_pkg not in pkgs_list:
pkgs_list.append(added_pkg)
return packages_json
def create_parser(): def create_parser():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
'-c', '--repo-path',
'--config', action='append',
type=Path, help='Path to a folder with repofolders. E.g. "/var/repos" or '
default=Path('config.yaml'), '"http://koji.cloudlinux.com/mirrors/rhel_mirror"',
required=False, required=True,
help='Path to a config', )
parser.add_argument(
'--repo-folder',
action='append',
help='A folder which contains folder repodata . E.g. "baseos-stream"',
required=True,
)
parser.add_argument(
'--repo-arch',
action='append',
help='What architecture packages a repository contains. E.g. "x86_64"',
required=True,
)
parser.add_argument(
'--repo-name',
action='append',
help='Name of a repository. E.g. "AppStream"',
required=True,
)
parser.add_argument(
'--is-remote',
action='append',
type=str,
help='A repository is remote or local',
choices=['yes', 'no'],
required=True,
)
parser.add_argument(
'--is-reference',
action='append',
type=str,
help='A repository is used as reference for packages layout',
choices=['yes', 'no'],
required=True,
)
parser.add_argument(
'--excluded-packages',
nargs='+',
type=str,
default=[],
help='A list of globally excluded packages from generated json.'
'All of list elements should be separated by space',
required=False,
)
parser.add_argument(
'--included-packages',
nargs='+',
type=str,
default=[],
help='A list of globally included packages from generated json.'
'All of list elements should be separated by space',
required=False,
) )
parser.add_argument( parser.add_argument(
'-o',
'--json-output-path', '--json-output-path',
type=str, type=str,
help='Full path to output json file', help='Full path to output json file',
@ -460,45 +452,30 @@ def create_parser():
return parser return parser
def read_config(config_path: Path) -> Optional[Dict]:
if not config_path.exists():
logging.error('A config by path "%s" does not exist', config_path)
exit(1)
with config_path.open('r') as config_fd:
return yaml.safe_load(config_fd)
def process_config(config_data: Dict) -> Tuple[
List[VariantInfo],
List[str],
List[str],
]:
excluded_packages = config_data.get('excluded_packages', [])
included_packages = config_data.get('included_packages', [])
variants = [VariantInfo(
name=variant_name,
arch=variant_info['arch'],
excluded_packages=variant_info.get('excluded_packages', []),
repos=[RepoInfo(
path=variant_repo['path'],
folder=variant_repo['folder'],
is_remote=variant_repo['remote'],
is_reference=variant_repo['reference'],
repo_type=variant_repo.get('repo_type', 'present'),
) for variant_repo in variant_info['repos']]
) for variant_name, variant_info in config_data['variants'].items()]
return variants, excluded_packages, included_packages
def cli_main(): def cli_main():
args = create_parser().parse_args() args = create_parser().parse_args()
variants, excluded_packages, included_packages = process_config( repos = []
config_data=read_config(args.config) for repo_path, repo_folder, repo_name, \
) repo_arch, is_remote, is_reference in zip(
args.repo_path,
args.repo_folder,
args.repo_name,
args.repo_arch,
args.is_remote,
args.is_reference,
):
repos.append(RepoInfo(
path=repo_path,
folder=repo_folder,
name=repo_name,
arch=repo_arch,
is_remote=True if is_remote == 'yes' else False,
is_reference=True if is_reference == 'yes' else False
))
pg = PackagesGenerator( pg = PackagesGenerator(
variants=variants, repos=repos,
excluded_packages=excluded_packages, excluded_packages=args.excluded_packages,
included_packages=included_packages, included_packages=args.included_packages,
) )
result = pg.generate_packages_json() result = pg.generate_packages_json()
with open(args.json_output_path, 'w') as packages_file: with open(args.json_output_path, 'w') as packages_file:

View File

@ -16,10 +16,7 @@ def parse_args():
parser = argparse.ArgumentParser(add_help=True) parser = argparse.ArgumentParser(add_help=True)
parser.add_argument( parser.add_argument(
"compose", "compose", metavar="<compose-path>", nargs=1, help="path to compose",
metavar="<compose-path>",
nargs=1,
help="path to compose",
) )
parser.add_argument( parser.add_argument(
"--arch", "--arch",

View File

@ -14,9 +14,6 @@ def send(cmd, data):
topic = "compose.%s" % cmd.replace("-", ".").lower() topic = "compose.%s" % cmd.replace("-", ".").lower()
try: try:
msg = fedora_messaging.api.Message(topic="pungi.{}".format(topic), body=data) msg = fedora_messaging.api.Message(topic="pungi.{}".format(topic), body=data)
if cmd == "ostree":
# https://pagure.io/fedora-infrastructure/issue/10899
msg.priority = 3
fedora_messaging.api.publish(msg) fedora_messaging.api.publish(msg)
except fedora_messaging.exceptions.PublishReturned as e: except fedora_messaging.exceptions.PublishReturned as e:
print("Fedora Messaging broker rejected message %s: %s" % (msg.id, e)) print("Fedora Messaging broker rejected message %s: %s" % (msg.id, e))

View File

@ -1,146 +1,72 @@
import binascii
import gzip import gzip
import lzma import lzma
import os import os
from argparse import ArgumentParser, FileType from argparse import ArgumentParser, FileType
from glob import iglob
from io import BytesIO from io import BytesIO
from pathlib import Path from pathlib import Path
from typing import List, AnyStr, Iterable, Union, Optional from typing import List, AnyStr
import logging import logging
from urllib.parse import urljoin
import yaml import yaml
import createrepo_c as cr import createrepo_c as cr
from typing.io import BinaryIO from typing.io import BinaryIO
from .create_packages_json import PackagesGenerator, is_gzip_file, is_xz_file
EMPTY_FILE = '.empty' EMPTY_FILE = '.empty'
def read_modules_yaml(modules_yaml_path: Union[str, Path]) -> BytesIO: def _is_compressed_file(first_two_bytes: bytes, initial_bytes: bytes):
with open(modules_yaml_path, 'rb') as fp: return binascii.hexlify(first_two_bytes) == initial_bytes
return BytesIO(fp.read())
def grep_list_of_modules_yaml(repos_path: AnyStr) -> Iterable[BytesIO]: def is_gzip_file(first_two_bytes):
return _is_compressed_file(
first_two_bytes=first_two_bytes,
initial_bytes=b'1f8b',
)
def is_xz_file(first_two_bytes):
return _is_compressed_file(
first_two_bytes=first_two_bytes,
initial_bytes=b'fd37',
)
def grep_list_of_modules_yaml_gz(repo_path: AnyStr) -> List[BytesIO]:
""" """
Find all of valid *modules.yaml.gz in repos Find all of valid *modules.yaml.gz in repos
:param repos_path: path to a directory which contains repo dirs :param repo_path: path to a directory which contains repodirs
:return: iterable object of content from *modules.yaml.* :return: list of content from *modules.yaml.gz
""" """
return ( result = []
read_modules_yaml_from_specific_repo(repo_path=Path(path).parent) for path in Path(repo_path).rglob('repomd.xml'):
for path in iglob( repo_dir_path = Path(path.parent).parent
str(Path(repos_path).joinpath('**/repodata')), repomd_obj = cr.Repomd(str(path))
recursive=True
)
)
def _is_remote(path: str):
return any(str(path).startswith(protocol)
for protocol in ('http', 'https'))
def read_modules_yaml_from_specific_repo(
repo_path: Union[str, Path]
) -> Optional[BytesIO]:
"""
Read modules_yaml from a specific repo (remote or local)
:param repo_path: path/url to a specific repo
(final dir should contain dir `repodata`)
:return: iterable object of content from *modules.yaml.*
"""
if _is_remote(repo_path):
repomd_url = urljoin(
repo_path + '/',
'repodata/repomd.xml',
)
packages_generator = PackagesGenerator(
variants=[],
excluded_packages=[],
included_packages=[],
)
repomd_file_path = packages_generator.get_remote_file_content(
file_url=repomd_url
)
else:
repomd_file_path = os.path.join(
repo_path,
'repodata/repomd.xml',
)
repomd_obj = cr.Repomd(str(repomd_file_path))
for record in repomd_obj.records: for record in repomd_obj.records:
if record.type != 'modules': if record.type != 'modules':
continue continue
else: with open(os.path.join(
if _is_remote(repo_path): repo_dir_path,
modules_yaml_url = urljoin(
repo_path + '/',
record.location_href, record.location_href,
), 'rb') as fp:
result.append(
BytesIO(fp.read())
) )
packages_generator = PackagesGenerator( return result
variants=[],
excluded_packages=[],
included_packages=[],
)
modules_yaml_path = packages_generator.get_remote_file_content(
file_url=modules_yaml_url
)
else:
modules_yaml_path = os.path.join(
repo_path,
record.location_href,
)
return read_modules_yaml(modules_yaml_path=modules_yaml_path)
else:
return None
def _should_grep_defaults( def collect_modules(modules_paths: List[BinaryIO], target_dir: str):
document_type: str,
grep_only_modules_data: bool = False,
grep_only_modules_defaults_data: bool = False,
) -> bool:
xor_flag = grep_only_modules_data == grep_only_modules_defaults_data
if document_type == 'modulemd' and (xor_flag or grep_only_modules_data):
return True
return False
def _should_grep_modules(
document_type: str,
grep_only_modules_data: bool = False,
grep_only_modules_defaults_data: bool = False,
) -> bool:
xor_flag = grep_only_modules_data == grep_only_modules_defaults_data
if document_type == 'modulemd-defaults' and \
(xor_flag or grep_only_modules_defaults_data):
return True
return False
def collect_modules(
modules_paths: List[BinaryIO],
target_dir: str,
grep_only_modules_data: bool = False,
grep_only_modules_defaults_data: bool = False,
):
""" """
Read given modules.yaml.gz files and export modules Read given modules.yaml.gz files and export modules
and modulemd files from it. and modulemd files from it.
Returns: Returns:
object: object:
""" """
xor_flag = grep_only_modules_defaults_data is grep_only_modules_data
modules_path = os.path.join(target_dir, 'modules') modules_path = os.path.join(target_dir, 'modules')
module_defaults_path = os.path.join(target_dir, 'module_defaults') module_defaults_path = os.path.join(target_dir, 'module_defaults')
if grep_only_modules_data or xor_flag:
os.makedirs(modules_path, exist_ok=True) os.makedirs(modules_path, exist_ok=True)
if grep_only_modules_defaults_data or xor_flag:
os.makedirs(module_defaults_path, exist_ok=True) os.makedirs(module_defaults_path, exist_ok=True)
# Defaults modules can be empty, but pungi detects # Defaults modules can be empty, but pungi detects
# empty folder while copying and raises the exception in this case # empty folder while copying and raises the exception in this case
@ -154,20 +80,11 @@ def collect_modules(
data = lzma.decompress(data) data = lzma.decompress(data)
documents = yaml.load_all(data, Loader=yaml.BaseLoader) documents = yaml.load_all(data, Loader=yaml.BaseLoader)
for doc in documents: for doc in documents:
path = None if doc['document'] == 'modulemd-defaults':
if _should_grep_modules(
doc['document'],
grep_only_modules_data,
grep_only_modules_defaults_data,
):
name = f"{doc['data']['module']}.yaml" name = f"{doc['data']['module']}.yaml"
path = os.path.join(module_defaults_path, name) path = os.path.join(module_defaults_path, name)
logging.info('Found %s module defaults', name) logging.info('Found %s module defaults', name)
elif _should_grep_defaults( else:
doc['document'],
grep_only_modules_data,
grep_only_modules_defaults_data,
):
# pungi.phases.pkgset.sources.source_koji.get_koji_modules # pungi.phases.pkgset.sources.source_koji.get_koji_modules
stream = doc['data']['stream'].replace('-', '_') stream = doc['data']['stream'].replace('-', '_')
doc_data = doc['data'] doc_data = doc['data']
@ -189,24 +106,13 @@ def collect_modules(
'RPM %s does not have explicit list of artifacts', 'RPM %s does not have explicit list of artifacts',
name name
) )
if path is not None:
with open(path, 'w') as f: with open(path, 'w') as f:
yaml.dump(doc, f, default_flow_style=False) yaml.dump(doc, f, default_flow_style=False)
def cli_main(): def cli_main():
parser = ArgumentParser() parser = ArgumentParser()
content_type_group = parser.add_mutually_exclusive_group(required=False)
content_type_group.add_argument(
'--get-only-modules-data',
action='store_true',
help='Parse and get only modules data',
)
content_type_group.add_argument(
'--get-only-modules-defaults-data',
action='store_true',
help='Parse and get only modules_defaults data',
)
path_group = parser.add_mutually_exclusive_group(required=True) path_group = parser.add_mutually_exclusive_group(required=True)
path_group.add_argument( path_group.add_argument(
'-p', '--path', '-p', '--path',
@ -221,33 +127,16 @@ def cli_main():
default=None, default=None,
help='Path to a directory which contains repodirs. E.g. /var/repos' help='Path to a directory which contains repodirs. E.g. /var/repos'
) )
path_group.add_argument(
'-rd', '--repodata-paths',
required=False,
type=str,
nargs='+',
default=[],
help='Paths/urls to the directories with directory `repodata`',
)
parser.add_argument('-t', '--target', required=True) parser.add_argument('-t', '--target', required=True)
namespace = parser.parse_args() namespace = parser.parse_args()
if namespace.repodata_paths: if namespace.repo_path is None:
modules = []
for repodata_path in namespace.repodata_paths:
modules.append(read_modules_yaml_from_specific_repo(
repodata_path,
))
elif namespace.path is not None:
modules = namespace.path modules = namespace.path
else: else:
modules = grep_list_of_modules_yaml(namespace.repo_path) modules = grep_list_of_modules_yaml_gz(namespace.repo_path)
modules = list(filter(lambda i: i is not None, modules))
collect_modules( collect_modules(
modules, modules,
namespace.target, namespace.target,
namespace.get_only_modules_data,
namespace.get_only_modules_defaults_data,
) )

View File

@ -1,53 +1,39 @@
import re
from argparse import ArgumentParser from argparse import ArgumentParser
import os import os
from glob import iglob
from typing import List from typing import List
from pathlib import Path
from dataclasses import dataclass from attr import dataclass
from productmd.common import parse_nvra from productmd.common import parse_nvra
@dataclass @dataclass
class Package: class Package:
nvra: dict nvra: str
path: Path path: str
def search_rpms(top_dir: Path) -> List[Package]: def search_rpms(top_dir) -> List[Package]:
""" """
Search for all *.rpm files recursively Search for all *.rpm files recursively
in given top directory in given top directory
Returns: Returns:
list: list of paths list: list of paths
""" """
return [Package( rpms = []
nvra=parse_nvra(Path(path).stem), for root, dirs, files in os.walk(top_dir):
path=Path(path), path = root.split(os.sep)
) for path in iglob(str(top_dir.joinpath('**/*.rpm')), recursive=True)] for file in files:
if not file.endswith('.rpm'):
continue
def is_excluded_package( nvra, _ = os.path.splitext(file)
package: Package, rpms.append(
excluded_packages: List[str], Package(nvra=nvra, path=os.path.join('/', *path, file))
) -> bool:
package_key = f'{package.nvra["name"]}.{package.nvra["arch"]}'
return any(
re.search(
f'^{excluded_pkg}$',
package_key,
) or excluded_pkg in (package.nvra['name'], package_key)
for excluded_pkg in excluded_packages
) )
return rpms
def copy_rpms( def copy_rpms(packages: List[Package], target_top_dir: str):
packages: List[Package],
target_top_dir: Path,
excluded_packages: List[str],
):
""" """
Search synced repos for rpms and prepare Search synced repos for rpms and prepare
koji-like structure for pungi koji-like structure for pungi
@ -59,37 +45,30 @@ def copy_rpms(
Nothing: Nothing:
""" """
for package in packages: for package in packages:
if is_excluded_package(package, excluded_packages): info = parse_nvra(package.nvra)
continue
target_arch_dir = target_top_dir.joinpath(package.nvra['arch']) target_arch_dir = os.path.join(target_top_dir, info['arch'])
target_file = target_arch_dir.joinpath(package.path.name)
os.makedirs(target_arch_dir, exist_ok=True) os.makedirs(target_arch_dir, exist_ok=True)
if not target_file.exists(): target_file = os.path.join(target_arch_dir, os.path.basename(package.path))
if not os.path.exists(target_file):
try: try:
os.link(package.path, target_file) os.link(package.path, target_file)
except OSError: except OSError:
# hardlink failed, try symlinking # hardlink failed, try symlinking
package.path.symlink_to(target_file) os.symlink(package.path, target_file)
def cli_main(): def cli_main():
parser = ArgumentParser() parser = ArgumentParser()
parser.add_argument('-p', '--path', required=True, type=Path) parser.add_argument('-p', '--path', required=True)
parser.add_argument('-t', '--target', required=True, type=Path) parser.add_argument('-t', '--target', required=True)
parser.add_argument(
'-e',
'--excluded-packages',
required=False,
nargs='+',
type=str,
default=[],
)
namespace = parser.parse_args() namespace = parser.parse_args()
rpms = search_rpms(namespace.path) rpms = search_rpms(namespace.path)
copy_rpms(rpms, namespace.target, namespace.excluded_packages) copy_rpms(rpms, namespace.target)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -319,6 +319,7 @@ def get_arguments(config):
def main(): def main():
config = pungi.config.Config() config = pungi.config.Config()
opts = get_arguments(config) opts = get_arguments(config)
@ -475,13 +476,14 @@ def main():
else: else:
mypungi.downloadSRPMs() mypungi.downloadSRPMs()
print("RPM size: %s MiB" % (mypungi.size_packages() / 1024**2)) print("RPM size: %s MiB" % (mypungi.size_packages() / 1024 ** 2))
if not opts.nodebuginfo: if not opts.nodebuginfo:
print( print(
"DEBUGINFO size: %s MiB" % (mypungi.size_debuginfo() / 1024**2) "DEBUGINFO size: %s MiB"
% (mypungi.size_debuginfo() / 1024 ** 2)
) )
if not opts.nosource: if not opts.nosource:
print("SRPM size: %s MiB" % (mypungi.size_srpms() / 1024**2)) print("SRPM size: %s MiB" % (mypungi.size_srpms() / 1024 ** 2))
# Furthermore (but without the yumlock...) # Furthermore (but without the yumlock...)
if not opts.sourceisos: if not opts.sourceisos:

View File

@ -18,18 +18,13 @@ from pungi.util import temp_dir
def get_parser(): def get_parser():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"--profiler", "--profiler", action="store_true",
action="store_true",
) )
parser.add_argument( parser.add_argument(
"--arch", "--arch", required=True,
required=True,
) )
parser.add_argument( parser.add_argument(
"--config", "--config", metavar="PATH", required=True, help="path to kickstart config file",
metavar="PATH",
required=True,
help="path to kickstart config file",
) )
parser.add_argument( parser.add_argument(
"--download-to", "--download-to",
@ -47,9 +42,7 @@ def get_parser():
group = parser.add_argument_group("Gather options") group = parser.add_argument_group("Gather options")
group.add_argument( group.add_argument(
"--nodeps", "--nodeps", action="store_true", help="disable resolving dependencies",
action="store_true",
help="disable resolving dependencies",
) )
group.add_argument( group.add_argument(
"--selfhosting", "--selfhosting",
@ -68,9 +61,7 @@ def get_parser():
choices=["none", "all", "build"], choices=["none", "all", "build"],
) )
group.add_argument( group.add_argument(
"--multilib", "--multilib", metavar="[METHOD]", action="append",
metavar="[METHOD]",
action="append",
) )
group.add_argument( group.add_argument(
"--tempdir", "--tempdir",
@ -97,7 +88,6 @@ def main(ns, persistdir, cachedir):
dnf_conf = Conf(ns.arch) dnf_conf = Conf(ns.arch)
dnf_conf.persistdir = persistdir dnf_conf.persistdir = persistdir
dnf_conf.cachedir = cachedir dnf_conf.cachedir = cachedir
dnf_conf.optional_metadata_types = ["filelists"]
dnf_obj = DnfWrapper(dnf_conf) dnf_obj = DnfWrapper(dnf_conf)
gather_opts = GatherOptions() gather_opts = GatherOptions()

View File

@ -5,7 +5,6 @@ from __future__ import print_function
import argparse import argparse
import getpass import getpass
import glob
import json import json
import locale import locale
import logging import logging
@ -21,9 +20,6 @@ from six.moves import shlex_quote
from pungi.phases import PHASES_NAMES from pungi.phases import PHASES_NAMES
from pungi import get_full_version, util from pungi import get_full_version, util
from pungi.errors import UnsignedPackagesError
from pungi.wrappers import kojiwrapper
from pungi.util import rmtree
# force C locales # force C locales
@ -252,15 +248,9 @@ def main():
kobo.log.add_stderr_logger(logger) kobo.log.add_stderr_logger(logger)
conf = util.load_config(opts.config) conf = util.load_config(opts.config)
compose_type = opts.compose_type or conf.get("compose_type", "production")
label = opts.label or conf.get("label")
if label:
try:
productmd.composeinfo.verify_label(label)
except ValueError as ex:
abort(str(ex))
if compose_type == "production" and not label and not opts.no_label: compose_type = opts.compose_type or conf.get("compose_type", "production")
if compose_type == "production" and not opts.label and not opts.no_label:
abort("must specify label for a production compose") abort("must specify label for a production compose")
if ( if (
@ -272,12 +262,14 @@ def main():
# check if all requirements are met # check if all requirements are met
import pungi.checks import pungi.checks
if not pungi.checks.check(conf):
sys.exit(1)
pungi.checks.check_umask(logger) pungi.checks.check_umask(logger)
if not pungi.checks.check_skip_phases( if not pungi.checks.check_skip_phases(
logger, opts.skip_phase + conf.get("skip_phases", []), opts.just_phase logger, opts.skip_phase + conf.get("skip_phases", []), opts.just_phase
): ):
sys.exit(1) sys.exit(1)
errors, warnings = pungi.checks.validate(conf, offline=True) errors, warnings = pungi.checks.validate(conf)
if not opts.quiet: if not opts.quiet:
# TODO: workaround for config files containing skip_phase = productimg # TODO: workaround for config files containing skip_phase = productimg
@ -302,17 +294,9 @@ def main():
fail_to_start("Config validation failed", errors=errors) fail_to_start("Config validation failed", errors=errors)
sys.exit(1) sys.exit(1)
if not pungi.checks.check(conf):
sys.exit(1)
if opts.target_dir: if opts.target_dir:
compose_dir = Compose.get_compose_dir( compose_dir = Compose.get_compose_dir(
opts.target_dir, opts.target_dir, conf, compose_type=compose_type, compose_label=opts.label
conf,
compose_type=compose_type,
compose_label=label,
parent_compose_ids=opts.parent_compose_id,
respin_of=opts.respin_of,
) )
else: else:
compose_dir = opts.compose_dir compose_dir = opts.compose_dir
@ -321,7 +305,7 @@ def main():
ci = Compose.get_compose_info( ci = Compose.get_compose_info(
conf, conf,
compose_type=compose_type, compose_type=compose_type,
compose_label=label, compose_label=opts.label,
parent_compose_ids=opts.parent_compose_id, parent_compose_ids=opts.parent_compose_id,
respin_of=opts.respin_of, respin_of=opts.respin_of,
) )
@ -341,34 +325,14 @@ def main():
logger=logger, logger=logger,
notifier=notifier, notifier=notifier,
) )
rv = Compose.update_compose_url(compose.compose_id, compose_dir, conf)
if rv and not rv.ok:
logger.error("CTS compose_url update failed with the error: %s" % rv.text)
errors, warnings = pungi.checks.validate(conf, offline=False)
if errors:
for error in errors:
logger.error("Config validation failed with the error: %s" % error)
fail_to_start("Config validation failed", errors=errors)
sys.exit(1)
notifier.compose = compose notifier.compose = compose
COMPOSE = compose COMPOSE = compose
try:
run_compose( run_compose(
compose, compose,
create_latest_link=create_latest_link, create_latest_link=create_latest_link,
latest_link_status=latest_link_status, latest_link_status=latest_link_status,
latest_link_components=latest_link_components, latest_link_components=latest_link_components,
) )
except UnsignedPackagesError:
# There was an unsigned package somewhere. It is not safe to reuse any
# package set from this compose (since we could leak the unsigned
# package). Let's make sure all reuse files are deleted.
for fp in glob.glob(compose.paths.work.pkgset_reuse_file("*")):
os.unlink(fp)
raise
def run_compose( def run_compose(
@ -390,16 +354,6 @@ def run_compose(
) )
compose.log_info("Compose top directory: %s" % compose.topdir) compose.log_info("Compose top directory: %s" % compose.topdir)
compose.log_info("Current timezone offset: %s" % pungi.util.get_tz_offset()) compose.log_info("Current timezone offset: %s" % pungi.util.get_tz_offset())
compose.log_info("COMPOSE_ID=%s" % compose.compose_id)
installed_pkgs_log = compose.paths.log.log_file("global", "installed-pkgs")
compose.log_info("Logging installed packages to %s" % installed_pkgs_log)
try:
with open(installed_pkgs_log, "w") as f:
subprocess.Popen(["rpm", "-qa"], stdout=f)
except Exception as e:
compose.log_warning("Failed to log installed packages: %s" % str(e))
compose.read_variants() compose.read_variants()
# dump the config file # dump the config file
@ -423,15 +377,13 @@ def run_compose(
compose, buildinstall_phase, pkgset_phase compose, buildinstall_phase, pkgset_phase
) )
ostree_phase = pungi.phases.OSTreePhase(compose, pkgset_phase) ostree_phase = pungi.phases.OSTreePhase(compose, pkgset_phase)
ostree_container_phase = pungi.phases.OSTreeContainerPhase(compose, pkgset_phase)
createiso_phase = pungi.phases.CreateisoPhase(compose, buildinstall_phase) createiso_phase = pungi.phases.CreateisoPhase(compose, buildinstall_phase)
extra_isos_phase = pungi.phases.ExtraIsosPhase(compose, buildinstall_phase) extra_isos_phase = pungi.phases.ExtraIsosPhase(compose)
liveimages_phase = pungi.phases.LiveImagesPhase(compose)
livemedia_phase = pungi.phases.LiveMediaPhase(compose) livemedia_phase = pungi.phases.LiveMediaPhase(compose)
image_build_phase = pungi.phases.ImageBuildPhase(compose, buildinstall_phase) image_build_phase = pungi.phases.ImageBuildPhase(compose)
kiwibuild_phase = pungi.phases.KiwiBuildPhase(compose)
osbuild_phase = pungi.phases.OSBuildPhase(compose) osbuild_phase = pungi.phases.OSBuildPhase(compose)
osbs_phase = pungi.phases.OSBSPhase(compose, pkgset_phase, buildinstall_phase) osbs_phase = pungi.phases.OSBSPhase(compose)
image_container_phase = pungi.phases.ImageContainerPhase(compose)
image_checksum_phase = pungi.phases.ImageChecksumPhase(compose) image_checksum_phase = pungi.phases.ImageChecksumPhase(compose)
repoclosure_phase = pungi.phases.RepoclosurePhase(compose) repoclosure_phase = pungi.phases.RepoclosurePhase(compose)
test_phase = pungi.phases.TestPhase(compose) test_phase = pungi.phases.TestPhase(compose)
@ -445,18 +397,16 @@ def run_compose(
gather_phase, gather_phase,
extrafiles_phase, extrafiles_phase,
createiso_phase, createiso_phase,
liveimages_phase,
livemedia_phase, livemedia_phase,
image_build_phase, image_build_phase,
image_checksum_phase, image_checksum_phase,
test_phase, test_phase,
ostree_phase, ostree_phase,
ostree_installer_phase, ostree_installer_phase,
ostree_container_phase,
extra_isos_phase, extra_isos_phase,
osbs_phase, osbs_phase,
osbuild_phase, osbuild_phase,
image_container_phase,
kiwibuild_phase,
): ):
if phase.skip(): if phase.skip():
continue continue
@ -471,6 +421,50 @@ def run_compose(
print(i) print(i)
raise RuntimeError("Configuration is not valid") raise RuntimeError("Configuration is not valid")
# PREP
# Note: This may be put into a new method of phase classes (e.g. .prep())
# in same way as .validate() or .run()
# Prep for liveimages - Obtain a password for signing rpm wrapped images
if (
"signing_key_password_file" in compose.conf
and "signing_command" in compose.conf
and "%(signing_key_password)s" in compose.conf["signing_command"]
and not liveimages_phase.skip()
):
# TODO: Don't require key if signing is turned off
# Obtain signing key password
signing_key_password = None
# Use appropriate method
if compose.conf["signing_key_password_file"] == "-":
# Use stdin (by getpass module)
try:
signing_key_password = getpass.getpass("Signing key password: ")
except EOFError:
compose.log_debug("Ignoring signing key password")
pass
else:
# Use text file with password
try:
signing_key_password = (
open(compose.conf["signing_key_password_file"], "r")
.readline()
.rstrip("\n")
)
except IOError:
# Filename is not print intentionally in case someone puts
# password directly into the option
err_msg = "Cannot load password from file specified by 'signing_key_password_file' option" # noqa: E501
compose.log_error(err_msg)
print(err_msg)
raise RuntimeError(err_msg)
if signing_key_password:
# Store the password
compose.conf["signing_key_password"] = signing_key_password
init_phase.start() init_phase.start()
init_phase.stop() init_phase.stop()
@ -483,7 +477,6 @@ def run_compose(
(gather_phase, createrepo_phase), (gather_phase, createrepo_phase),
extrafiles_phase, extrafiles_phase,
(ostree_phase, ostree_installer_phase), (ostree_phase, ostree_installer_phase),
ostree_container_phase,
) )
essentials_phase = pungi.phases.WeaverPhase(compose, essentials_schema) essentials_phase = pungi.phases.WeaverPhase(compose, essentials_schema)
essentials_phase.start() essentials_phase.start()
@ -508,17 +501,14 @@ def run_compose(
compose_images_schema = ( compose_images_schema = (
createiso_phase, createiso_phase,
extra_isos_phase, extra_isos_phase,
liveimages_phase,
image_build_phase, image_build_phase,
livemedia_phase, livemedia_phase,
osbuild_phase, osbuild_phase,
kiwibuild_phase,
)
post_image_phase = pungi.phases.WeaverPhase(
compose, (image_checksum_phase, image_container_phase)
) )
compose_images_phase = pungi.phases.WeaverPhase(compose, compose_images_schema) compose_images_phase = pungi.phases.WeaverPhase(compose, compose_images_schema)
extra_phase_schema = ( extra_phase_schema = (
(compose_images_phase, post_image_phase), (compose_images_phase, image_checksum_phase),
osbs_phase, osbs_phase,
repoclosure_phase, repoclosure_phase,
) )
@ -532,15 +522,13 @@ def run_compose(
buildinstall_phase.skip() buildinstall_phase.skip()
and ostree_installer_phase.skip() and ostree_installer_phase.skip()
and createiso_phase.skip() and createiso_phase.skip()
and extra_isos_phase.skip() and liveimages_phase.skip()
and livemedia_phase.skip() and livemedia_phase.skip()
and image_build_phase.skip() and image_build_phase.skip()
and kiwibuild_phase.skip()
and osbuild_phase.skip() and osbuild_phase.skip()
and ostree_container_phase.skip()
): ):
compose.im.dump(compose.paths.compose.metadata("images.json")) compose.im.dump(compose.paths.compose.metadata("images.json"))
compose.dump_containers_metadata() osbs_phase.dump_metadata()
test_phase.start() test_phase.start()
test_phase.stop() test_phase.stop()
@ -612,25 +600,9 @@ def try_kill_children(signal):
COMPOSE.log_warning("Failed to kill all subprocesses") COMPOSE.log_warning("Failed to kill all subprocesses")
def try_kill_koji_tasks():
try:
if COMPOSE:
koji_tasks_dir = COMPOSE.paths.log.koji_tasks_dir(create_dir=False)
if os.path.exists(koji_tasks_dir):
COMPOSE.log_warning("Trying to kill koji tasks")
koji = kojiwrapper.KojiWrapper(COMPOSE)
koji.login()
for task_id in os.listdir(koji_tasks_dir):
koji.koji_proxy.cancelTask(int(task_id))
except Exception:
if COMPOSE:
COMPOSE.log_warning("Failed to kill koji tasks")
def sigterm_handler(signum, frame): def sigterm_handler(signum, frame):
if COMPOSE: if COMPOSE:
try_kill_children(signum) try_kill_children(signum)
try_kill_koji_tasks()
COMPOSE.log_error("Compose run failed: signal %s" % signum) COMPOSE.log_error("Compose run failed: signal %s" % signum)
COMPOSE.log_error("Traceback:\n%s" % "\n".join(traceback.format_stack(frame))) COMPOSE.log_error("Traceback:\n%s" % "\n".join(traceback.format_stack(frame)))
COMPOSE.log_critical("Compose failed: %s" % COMPOSE.topdir) COMPOSE.log_critical("Compose failed: %s" % COMPOSE.topdir)
@ -650,18 +622,18 @@ def cli_main():
main() main()
except (Exception, KeyboardInterrupt) as ex: except (Exception, KeyboardInterrupt) as ex:
if COMPOSE: if COMPOSE:
tb_path = COMPOSE.paths.log.log_file("global", "traceback")
COMPOSE.log_error("Compose run failed: %s" % ex) COMPOSE.log_error("Compose run failed: %s" % ex)
COMPOSE.traceback(show_locals=getattr(ex, "show_locals", True)) COMPOSE.log_error("Extended traceback in: %s" % tb_path)
COMPOSE.log_critical("Compose failed: %s" % COMPOSE.topdir) COMPOSE.log_critical("Compose failed: %s" % COMPOSE.topdir)
COMPOSE.write_status("DOOMED") COMPOSE.write_status("DOOMED")
import kobo.tback
with open(tb_path, "wb") as f:
f.write(kobo.tback.Traceback().get_traceback())
else: else:
print("Exception: %s" % ex) print("Exception: %s" % ex)
raise raise
sys.stdout.flush() sys.stdout.flush()
sys.stderr.flush() sys.stderr.flush()
sys.exit(1) sys.exit(1)
finally:
# Remove repositories cloned during ExtraFiles phase
process_id = os.getpid()
directoy_to_remove = "/tmp/pungi-temp-git-repos-" + str(process_id) + "/"
rmtree(directoy_to_remove)

View File

@ -34,7 +34,6 @@ import kobo.conf
from kobo.shortcuts import run, force_list from kobo.shortcuts import run, force_list
from kobo.threads import WorkerThread, ThreadPool from kobo.threads import WorkerThread, ThreadPool
from productmd.common import get_major_version from productmd.common import get_major_version
from pungi.module_util import Modulemd
# Patterns that match all names of debuginfo packages # Patterns that match all names of debuginfo packages
DEBUG_PATTERNS = ["*-debuginfo", "*-debuginfo-*", "*-debugsource"] DEBUG_PATTERNS = ["*-debuginfo", "*-debuginfo-*", "*-debugsource"]
@ -279,7 +278,7 @@ class GitUrlResolveError(RuntimeError):
pass pass
def resolve_git_ref(repourl, ref, credential_helper=None): def resolve_git_ref(repourl, ref):
"""Resolve a reference in a Git repo to a commit. """Resolve a reference in a Git repo to a commit.
Raises RuntimeError if there was an error. Most likely cause is failure to Raises RuntimeError if there was an error. Most likely cause is failure to
@ -288,13 +287,8 @@ def resolve_git_ref(repourl, ref, credential_helper=None):
if re.match(r"^[a-f0-9]{40}$", ref): if re.match(r"^[a-f0-9]{40}$", ref):
# This looks like a commit ID already. # This looks like a commit ID already.
return ref return ref
try:
_, output = git_ls_remote(repourl, ref, credential_helper) _, output = git_ls_remote(repourl, ref)
except RuntimeError as e:
raise GitUrlResolveError(
"ref does not exist in remote repo %s with the error %s %s"
% (repourl, e, e.output)
)
lines = [] lines = []
for line in output.split("\n"): for line in output.split("\n"):
@ -316,7 +310,7 @@ def resolve_git_ref(repourl, ref, credential_helper=None):
return lines[0].split()[0] return lines[0].split()[0]
def resolve_git_url(url, credential_helper=None): def resolve_git_url(url):
"""Given a url to a Git repo specifying HEAD or origin/<branch> as a ref, """Given a url to a Git repo specifying HEAD or origin/<branch> as a ref,
replace that specifier with actual SHA1 of the commit. replace that specifier with actual SHA1 of the commit.
@ -335,7 +329,7 @@ def resolve_git_url(url, credential_helper=None):
scheme = r.scheme.replace("git+", "") scheme = r.scheme.replace("git+", "")
baseurl = urllib.parse.urlunsplit((scheme, r.netloc, r.path, "", "")) baseurl = urllib.parse.urlunsplit((scheme, r.netloc, r.path, "", ""))
fragment = resolve_git_ref(baseurl, ref, credential_helper) fragment = resolve_git_ref(baseurl, ref)
result = urllib.parse.urlunsplit((r.scheme, r.netloc, r.path, r.query, fragment)) result = urllib.parse.urlunsplit((r.scheme, r.netloc, r.path, r.query, fragment))
if "?#" in url: if "?#" in url:
@ -354,18 +348,13 @@ class GitUrlResolver(object):
self.offline = offline self.offline = offline
self.cache = {} self.cache = {}
def __call__(self, url, branch=None, options=None): def __call__(self, url, branch=None):
credential_helper = options.get("credential_helper") if options else None
if self.offline: if self.offline:
return branch or url return branch or url
key = (url, branch) key = (url, branch)
if key not in self.cache: if key not in self.cache:
try: try:
res = ( res = resolve_git_ref(url, branch) if branch else resolve_git_url(url)
resolve_git_ref(url, branch, credential_helper)
if branch
else resolve_git_url(url, credential_helper)
)
self.cache[key] = res self.cache[key] = res
except GitUrlResolveError as exc: except GitUrlResolveError as exc:
self.cache[key] = exc self.cache[key] = exc
@ -461,9 +450,6 @@ def get_volid(compose, arch, variant=None, disc_type=False, formats=None, **kwar
if not variant_uid and "%(variant)s" in i: if not variant_uid and "%(variant)s" in i:
continue continue
try: try:
# fmt: off
# Black wants to add a comma after kwargs, but that's not valid in
# Python 2.7
args = get_format_substs( args = get_format_substs(
compose, compose,
variant=variant_uid, variant=variant_uid,
@ -475,7 +461,6 @@ def get_volid(compose, arch, variant=None, disc_type=False, formats=None, **kwar
base_product_version=base_product_version, base_product_version=base_product_version,
**kwargs **kwargs
) )
# fmt: on
volid = (i % args).format(**args) volid = (i % args).format(**args)
except KeyError as err: except KeyError as err:
raise RuntimeError( raise RuntimeError(
@ -487,7 +472,10 @@ def get_volid(compose, arch, variant=None, disc_type=False, formats=None, **kwar
tried.add(volid) tried.add(volid)
if volid and len(volid) > 32: if volid and len(volid) > 32:
volid = volid[:32] raise ValueError(
"Could not create volume ID longer than 32 bytes, options are %r",
sorted(tried, key=len),
)
if compose.conf["restricted_volid"]: if compose.conf["restricted_volid"]:
# Replace all non-alphanumeric characters and non-underscores) with # Replace all non-alphanumeric characters and non-underscores) with
@ -975,7 +963,7 @@ def version_generator(compose, gen):
def retry(timeout=120, interval=30, wait_on=Exception): def retry(timeout=120, interval=30, wait_on=Exception):
"""A decorator that allows to retry a section of code until success or """ A decorator that allows to retry a section of code until success or
timeout. timeout.
""" """
@ -997,12 +985,8 @@ def retry(timeout=120, interval=30, wait_on=Exception):
@retry(wait_on=RuntimeError) @retry(wait_on=RuntimeError)
def git_ls_remote(baseurl, ref, credential_helper=None): def git_ls_remote(baseurl, ref):
cmd = ["git"] return run(["git", "ls-remote", baseurl, ref], universal_newlines=True)
if credential_helper:
cmd.extend(["-c", "credential.useHttpPath=true"])
cmd.extend(["-c", "credential.helper=%s" % credential_helper])
return run(cmd + ["ls-remote", baseurl, ref], universal_newlines=True)
def get_tz_offset(): def get_tz_offset():
@ -1050,46 +1034,6 @@ def load_config(file_path, defaults={}):
return conf return conf
def _read_single_module_stream(
file_or_string, compose=None, arch=None, build=None, is_file=True
):
try:
mod_index = Modulemd.ModuleIndex.new()
if is_file:
mod_index.update_from_file(file_or_string, True)
else:
mod_index.update_from_string(file_or_string, True)
mod_names = mod_index.get_module_names()
emit_warning = False
if len(mod_names) > 1:
emit_warning = True
mod_streams = mod_index.get_module(mod_names[0]).get_all_streams()
if len(mod_streams) > 1:
emit_warning = True
if emit_warning and compose:
compose.log_warning(
"Multiple modules/streams for arch: %s. Build: %s. "
"Processing first module/stream only.",
arch,
build,
)
return mod_streams[0]
except (KeyError, IndexError):
# There is no modulemd for this arch. This could mean an arch was
# added to the compose after the module was built. We don't want to
# process this, let's skip this module.
if compose:
compose.log_info("Skipping arch: %s. Build: %s", arch, build)
def read_single_module_stream_from_file(*args, **kwargs):
return _read_single_module_stream(*args, is_file=True, **kwargs)
def read_single_module_stream_from_string(*args, **kwargs):
return _read_single_module_stream(*args, is_file=False, **kwargs)
@contextlib.contextmanager @contextlib.contextmanager
def as_local_file(url): def as_local_file(url):
"""If URL points to a file over HTTP, the file will be downloaded locally """If URL points to a file over HTTP, the file will be downloaded locally
@ -1102,8 +1046,6 @@ def as_local_file(url):
yield local_filename yield local_filename
finally: finally:
os.remove(local_filename) os.remove(local_filename)
elif url.startswith("file://"):
yield url[7:]
else: else:
# Not a remote url, return unchanged. # Not a remote url, return unchanged.
yield url yield url
@ -1141,22 +1083,3 @@ class PartialFuncThreadPool(ThreadPool):
@property @property
def results(self): def results(self):
return self._results return self._results
def read_json_file(file_path):
"""A helper function to read a JSON file."""
with open(file_path) as f:
return json.load(f)
UNITS = ["", "Ki", "Mi", "Gi", "Ti"]
def format_size(sz):
sz = float(sz)
unit = 0
while sz > 1024:
sz /= 1024
unit += 1
return "%.3g %sB" % (sz, UNITS[unit])

View File

@ -177,23 +177,16 @@ class CompsFilter(object):
for i in self.tree.xpath("//*[@xml:lang]"): for i in self.tree.xpath("//*[@xml:lang]"):
i.getparent().remove(i) i.getparent().remove(i)
def filter_environment_groups(self, arch, lookaside_groups=[]): def filter_environment_groups(self, lookaside_groups=[]):
""" """
Remove undefined groups or groups not matching given arch from environments. Remove undefined groups from environments.
""" """
all_groups = self.tree.xpath("/comps/group/id/text()") + lookaside_groups all_groups = self.tree.xpath("/comps/group/id/text()") + lookaside_groups
for environment in self.tree.xpath("/comps/environment"): for environment in self.tree.xpath("/comps/environment"):
for parent_tag in ("grouplist", "optionlist"): for group in environment.xpath("grouplist/groupid"):
for group in environment.xpath("%s/groupid" % parent_tag):
if group.text not in all_groups: if group.text not in all_groups:
group.getparent().remove(group) group.getparent().remove(group)
for group in environment.xpath("%s/groupid[@arch]" % parent_tag):
value = group.attrib.get("arch")
values = [v for v in re.split(r"[, ]+", value) if v]
if arch not in values:
group.getparent().remove(group)
def remove_empty_environments(self): def remove_empty_environments(self):
""" """
Remove all environments without groups. Remove all environments without groups.
@ -219,7 +212,7 @@ class CompsFilter(object):
) )
file_obj.write(b"\n") file_obj.write(b"\n")
def cleanup(self, arch, keep_groups=[], lookaside_groups=[]): def cleanup(self, keep_groups=[], lookaside_groups=[]):
""" """
Remove empty groups, categories and environment from the comps file. Remove empty groups, categories and environment from the comps file.
Groups given in ``keep_groups`` will be preserved even if empty. Groups given in ``keep_groups`` will be preserved even if empty.
@ -230,7 +223,7 @@ class CompsFilter(object):
self.remove_empty_groups(keep_groups) self.remove_empty_groups(keep_groups)
self.filter_category_groups() self.filter_category_groups()
self.remove_empty_categories() self.remove_empty_categories()
self.filter_environment_groups(arch, lookaside_groups) self.filter_environment_groups(lookaside_groups)
self.remove_empty_environments() self.remove_empty_environments()
@ -364,10 +357,7 @@ class CompsWrapper(object):
if environment.option_ids: if environment.option_ids:
append_grouplist( append_grouplist(
doc, doc, env_node, set(environment.option_ids), "optionlist",
env_node,
set(environment.option_ids),
"optionlist",
) )
if self.comps.langpacks: if self.comps.langpacks:

View File

@ -26,12 +26,7 @@ Pungi).
def get_cmd( def get_cmd(
conf_file, conf_file, arch, repos, lookasides, platform=None, filter_packages=None,
arch,
repos,
lookasides,
platform=None,
filter_packages=None,
): ):
cmd = ["fus", "--verbose", "--arch", arch] cmd = ["fus", "--verbose", "--arch", arch]

View File

@ -146,7 +146,6 @@ def get_mkisofs_cmd(
input_charset="utf-8", input_charset="utf-8",
graft_points=None, graft_points=None,
use_xorrisofs=False, use_xorrisofs=False,
iso_level=None,
): ):
# following options are always enabled # following options are always enabled
untranslated_filenames = True untranslated_filenames = True
@ -156,10 +155,6 @@ def get_mkisofs_cmd(
rock = True rock = True
cmd = ["/usr/bin/xorrisofs" if use_xorrisofs else "/usr/bin/genisoimage"] cmd = ["/usr/bin/xorrisofs" if use_xorrisofs else "/usr/bin/genisoimage"]
if iso_level:
cmd.extend(["-iso-level", str(iso_level)])
if appid: if appid:
cmd.extend(["-appid", appid]) cmd.extend(["-appid", appid])
@ -260,34 +255,14 @@ def get_isohybrid_cmd(iso_path, arch):
return cmd return cmd
def get_manifest_cmd(iso_name, xorriso=False, output_file=None): def get_manifest_cmd(iso_name):
if not output_file: return "isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s.manifest" % (
output_file = "%s.manifest" % iso_name
if xorriso:
return """xorriso -dev %s --find |
tail -n+2 |
tr -d "'" |
cut -c2- |
sort >> %s""" % (
shlex_quote(iso_name), shlex_quote(iso_name),
shlex_quote(output_file),
)
else:
return "isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s" % (
shlex_quote(iso_name), shlex_quote(iso_name),
shlex_quote(output_file),
) )
def get_volume_id(path, xorriso=False): def get_volume_id(path):
if xorriso:
cmd = ["xorriso", "-indev", path]
retcode, output = run(cmd, universal_newlines=True)
for line in output.splitlines():
if line.startswith("Volume id"):
return line.split("'")[1]
else:
cmd = ["isoinfo", "-d", "-i", path] cmd = ["isoinfo", "-d", "-i", path]
retcode, output = run(cmd, universal_newlines=True) retcode, output = run(cmd, universal_newlines=True)
@ -516,21 +491,3 @@ def mount(image, logger=None, use_guestmount=True):
util.run_unmount_cmd(["fusermount", "-u", mount_dir], path=mount_dir) util.run_unmount_cmd(["fusermount", "-u", mount_dir], path=mount_dir)
else: else:
util.run_unmount_cmd(["umount", mount_dir], path=mount_dir) util.run_unmount_cmd(["umount", mount_dir], path=mount_dir)
def xorriso_commands(arch, input, output):
"""List of xorriso commands to modify a bootable image."""
commands = [
("-indev", input),
("-outdev", output),
# isoinfo -J uses the Joliet tree, and it's used by virt-install
("-joliet", "on"),
# Support long filenames in the Joliet trees. Repodata is particularly
# likely to run into this limit.
("-compliance", "joliet_long_names"),
("-boot_image", "any", "replay"),
]
if arch == "ppc64le":
# This is needed for the image to be bootable.
commands.append(("-as", "mkisofs", "-U", "--"))
return commands

View File

@ -1,6 +1,6 @@
import os import os
import subprocess
import time import time
from pathlib import Path
from attr import dataclass from attr import dataclass
from kobo.rpmlib import parse_nvra from kobo.rpmlib import parse_nvra
@ -43,20 +43,19 @@ class KojiMock:
Class that acts like real koji (for some needed methods) Class that acts like real koji (for some needed methods)
but uses local storage as data source but uses local storage as data source
""" """
def __init__(self, packages_dir, modules_dir, all_arches): def __init__(self, packages_dir, modules_dir):
self._modules = self._gather_modules(modules_dir) self._modules = self._gather_modules(modules_dir)
self._modules_dir = modules_dir self._modules_dir = modules_dir
self._packages_dir = packages_dir self._packages_dir = packages_dir
self._all_arches = all_arches
@staticmethod def _gather_modules(self, modules_dir):
def _gather_modules(modules_dir):
modules = {} modules = {}
for index, (f, arch) in enumerate( for arch in os.listdir(modules_dir):
(sub_path.name, sub_path.parent.name) arch_dir = os.path.join(
for path in Path(modules_dir).glob('*') modules_dir,
for sub_path in path.iterdir() arch,
): )
for index, f in enumerate(os.listdir(arch_dir)):
parsed = parse_nvra(f) parsed = parse_nvra(f)
modules[index] = Module( modules[index] = Module(
name=parsed['name'], name=parsed['name'],
@ -69,8 +68,7 @@ class KojiMock:
) )
return modules return modules
@staticmethod def getLastEvent(self, *args, **kwargs):
def getLastEvent(*args, **kwargs):
return {'id': LAST_EVENT_ID, 'ts': LAST_EVENT_TIME} return {'id': LAST_EVENT_ID, 'ts': LAST_EVENT_TIME}
def listTagged(self, tag_name, *args, **kwargs): def listTagged(self, tag_name, *args, **kwargs):
@ -94,7 +92,6 @@ class KojiMock:
'name': module.name, 'name': module.name,
'id': module.build_id, 'id': module.build_id,
'tag_name': tag_name, 'tag_name': tag_name,
'arch': module.arch,
# Following fields are currently not # Following fields are currently not
# used but returned by real koji # used but returned by real koji
# left them here just for reference # left them here just for reference
@ -114,8 +111,7 @@ class KojiMock:
return builds return builds
@staticmethod def getFullInheritance(self, *args, **kwargs):
def getFullInheritance(*args, **kwargs):
""" """
Unneeded because we use local storage. Unneeded because we use local storage.
""" """
@ -203,12 +199,31 @@ class KojiMock:
packages = [] packages = []
# get all rpms in folder # get all rpms in folder
rpms = search_rpms(Path(self._packages_dir)) rpms = search_rpms(self._packages_dir)
all_rpms = [package.path for package in rpms]
for rpm in rpms: # get nvras for modular packages
info = parse_nvra(rpm.path.stem) nvras = set()
if 'module' in info['release']: for module in self._modules.values():
continue path = os.path.join(
self._modules_dir,
module.arch,
module.nvr,
)
info = Modulemd.ModuleStream.read_string(open(path).read(), strict=True)
for package in info.get_rpm_artifacts():
data = parse_nvra(package)
nvras.add((data['name'], data['version'], data['release'], data['arch']))
# and remove modular packages from global list
for rpm in all_rpms[:]:
data = parse_nvra(os.path.basename(rpm[:-4]))
if (data['name'], data['version'], data['release'], data['arch']) in nvras:
all_rpms.remove(rpm)
for rpm in all_rpms:
info = parse_nvra(os.path.basename(rpm))
packages.append({ packages.append({
"build_id": RELEASE_BUILD_ID, "build_id": RELEASE_BUILD_ID,
"name": info['name'], "name": info['name'],
@ -229,19 +244,15 @@ class KojiMock:
""" """
Get list of builds for module and given module tag name. Get list of builds for module and given module tag name.
""" """
builds = [] module = self._get_module_by_name(tag_name)
packages = []
modules = self._get_modules_by_name(tag_name)
for module in modules:
if module is None:
raise ValueError('Module %s is not found' % tag_name)
path = os.path.join( path = os.path.join(
self._modules_dir, self._modules_dir,
module.arch, module.arch,
tag_name, tag_name,
) )
builds.append({ builds = [
{
"build_id": module.build_id, "build_id": module.build_id,
"package_name": module.name, "package_name": module.name,
"nvr": module.nvr, "nvr": module.nvr,
@ -267,8 +278,12 @@ class KojiMock:
# "volume_id": 0, # "volume_id": 0,
# "package_id": 104, # "package_id": 104,
# "owner_id": 6, # "owner_id": 6,
}) }
]
if module is None:
raise ValueError('Module %s is not found' % tag_name)
packages = []
if os.path.exists(path): if os.path.exists(path):
info = Modulemd.ModuleStream.read_string(open(path).read(), strict=True) info = Modulemd.ModuleStream.read_string(open(path).read(), strict=True)
for art in info.get_rpm_artifacts(): for art in info.get_rpm_artifacts():
@ -289,11 +304,9 @@ class KojiMock:
raise RuntimeError('Unable to find module %s' % path) raise RuntimeError('Unable to find module %s' % path)
return builds, packages return builds, packages
def _get_modules_by_name(self, tag_name): def _get_module_by_name(self, tag_name):
modules = []
for arch in self._all_arches:
for module in self._modules.values(): for module in self._modules.values():
if module.nvr != tag_name or module.arch != arch: if module.nvr != tag_name:
continue continue
modules.append(module) return module
return modules return None

View File

@ -14,23 +14,17 @@
# along with this program; if not, see <https://gnu.org/licenses/>. # along with this program; if not, see <https://gnu.org/licenses/>.
import contextlib
import os import os
import re import re
import socket
import shutil
import time import time
import threading import threading
import contextlib
import requests
import koji import koji
from kobo.shortcuts import run, force_list from kobo.shortcuts import run, force_list
import six import six
from six.moves import configparser, shlex_quote from six.moves import configparser, shlex_quote
import six.moves.xmlrpc_client as xmlrpclib import six.moves.xmlrpc_client as xmlrpclib
from flufl.lock import Lock
from datetime import timedelta
from .kojimock import KojiMock from .kojimock import KojiMock
from .. import util from .. import util
@ -43,14 +37,10 @@ KOJI_BUILD_DELETED = koji.BUILD_STATES["DELETED"]
class KojiWrapper(object): class KojiWrapper(object):
lock = threading.Lock() lock = threading.Lock()
def __init__(self, compose): def __init__(self, profile, real_koji=False):
self.compose = compose self.profile = profile
try:
self.profile = self.compose.conf["koji_profile"]
except KeyError:
raise RuntimeError("Koji profile must be configured")
with self.lock: with self.lock:
self.koji_module = koji.get_profile_module(self.profile) self.koji_module = koji.get_profile_module(profile)
session_opts = {} session_opts = {}
for key in ( for key in (
"timeout", "timeout",
@ -68,13 +58,15 @@ class KojiWrapper(object):
value = getattr(self.koji_module.config, key, None) value = getattr(self.koji_module.config, key, None)
if value is not None: if value is not None:
session_opts[key] = value session_opts[key] = value
if real_koji:
self.koji_proxy = koji.ClientSession( self.koji_proxy = koji.ClientSession(
self.koji_module.config.server, session_opts self.koji_module.config.server, session_opts
) )
else:
self.koji_proxy = KojiMock(
packages_dir=self.koji_module.config.topdir,
modules_dir=os.path.join(self.koji_module.config.topdir, 'modules'))
# This retry should be removed once https://pagure.io/koji/issue/3170 is
# fixed and released.
@util.retry(wait_on=(xmlrpclib.ProtocolError, koji.GenericError))
def login(self): def login(self):
"""Authenticate to the hub.""" """Authenticate to the hub."""
auth_type = self.koji_module.config.authtype auth_type = self.koji_module.config.authtype
@ -125,6 +117,8 @@ class KojiWrapper(object):
if channel: if channel:
cmd.append("--channel-override=%s" % channel) cmd.append("--channel-override=%s" % channel)
else:
cmd.append("--channel-override=runroot-local")
if weight: if weight:
cmd.append("--weight=%s" % int(weight)) cmd.append("--weight=%s" % int(weight))
@ -154,13 +148,10 @@ class KojiWrapper(object):
if chown_paths: if chown_paths:
paths = " ".join(shlex_quote(pth) for pth in chown_paths) paths = " ".join(shlex_quote(pth) for pth in chown_paths)
command += " ; EXIT_CODE=$?"
# Make the files world readable # Make the files world readable
command += " ; chmod -R a+r %s" % paths command += " && chmod -R a+r %s" % paths
# and owned by the same user that is running the process # and owned by the same user that is running the process
command += " ; chown -R %d %s" % (os.getuid(), paths) command += " && chown -R %d %s" % (os.getuid(), paths)
# Exit with code of main command
command += " ; exit $EXIT_CODE"
cmd.append(command) cmd.append(command)
return cmd return cmd
@ -180,6 +171,8 @@ class KojiWrapper(object):
if channel: if channel:
cmd.append("--channel-override=%s" % channel) cmd.append("--channel-override=%s" % channel)
else:
cmd.append("--channel-override=runroot-local")
if weight: if weight:
cmd.append("--weight=%s" % int(weight)) cmd.append("--weight=%s" % int(weight))
@ -215,19 +208,14 @@ class KojiWrapper(object):
return cmd return cmd
def get_pungi_ostree_cmd( def get_pungi_ostree_cmd(
self, self, target, arch, args, channel=None, packages=None, mounts=None, weight=None,
target,
arch,
args,
channel=None,
packages=None,
mounts=None,
weight=None,
): ):
cmd = self._get_cmd("pungi-ostree", "--nowait", "--task-id") cmd = self._get_cmd("pungi-ostree", "--nowait", "--task-id")
if channel: if channel:
cmd.append("--channel-override=%s" % channel) cmd.append("--channel-override=%s" % channel)
else:
cmd.append("--channel-override=runroot-local")
if weight: if weight:
cmd.append("--weight=%s" % int(weight)) cmd.append("--weight=%s" % int(weight))
@ -298,22 +286,15 @@ class KojiWrapper(object):
universal_newlines=True, universal_newlines=True,
) )
# Look for first line that contains only a number. This is the ID of first_line = output.splitlines()[0]
# the new task. Usually this should be the first line, but there may be match = re.search(r"^(\d+)$", first_line)
# warnings before it. if not match:
for line in output.splitlines():
match = re.search(r"^(\d+)$", line)
if match:
task_id = int(match.groups()[0])
break
if not task_id:
raise RuntimeError( raise RuntimeError(
"Could not find task ID in output. Command '%s' returned '%s'." "Could not find task ID in output. Command '%s' returned '%s'."
% (" ".join(command), output) % (" ".join(command), output)
) )
self.save_task_id(task_id) task_id = int(match.groups()[0])
retcode, output = self._wait_for_task(task_id, logfile=log_file) retcode, output = self._wait_for_task(task_id, logfile=log_file)
@ -347,11 +328,9 @@ class KojiWrapper(object):
"ksurl", "ksurl",
"distro", "distro",
) )
assert set(min_options).issubset( assert set(min_options).issubset(set(config_options["image-build"].keys())), (
set(config_options["image-build"].keys()) "image-build requires at least %s got '%s'"
), "image-build requires at least %s got '%s'" % ( % (", ".join(min_options), config_options)
", ".join(min_options),
config_options,
) )
cfg_parser = configparser.ConfigParser() cfg_parser = configparser.ConfigParser()
for section, opts in config_options.items(): for section, opts in config_options.items():
@ -406,11 +385,94 @@ class KojiWrapper(object):
if "can_fail" in options: if "can_fail" in options:
cmd.append("--can-fail=%s" % ",".join(options["can_fail"])) cmd.append("--can-fail=%s" % ",".join(options["can_fail"]))
if options.get("nomacboot"): if wait:
cmd.append("--nomacboot") cmd.append("--wait")
return cmd
def get_create_image_cmd(
self,
name,
version,
target,
arch,
ks_file,
repos,
image_type="live",
image_format=None,
release=None,
wait=True,
archive=False,
specfile=None,
ksurl=None,
):
# Usage: koji spin-livecd [options] <name> <version> <target> <arch> <kickstart-file> # noqa: E501
# Usage: koji spin-appliance [options] <name> <version> <target> <arch> <kickstart-file> # noqa: E501
# Examples:
# * name: RHEL-7.0
# * name: Satellite-6.0.1-RHEL-6
# ** -<type>.<arch>
# * version: YYYYMMDD[.n|.t].X
# * release: 1
cmd = self._get_cmd()
if image_type == "live":
cmd.append("spin-livecd")
elif image_type == "appliance":
cmd.append("spin-appliance")
else:
raise ValueError("Invalid image type: %s" % image_type)
if not archive:
cmd.append("--scratch")
cmd.append("--noprogress")
if wait: if wait:
cmd.append("--wait") cmd.append("--wait")
else:
cmd.append("--nowait")
if specfile:
cmd.append("--specfile=%s" % specfile)
if ksurl:
cmd.append("--ksurl=%s" % ksurl)
if isinstance(repos, list):
for repo in repos:
cmd.append("--repo=%s" % repo)
else:
cmd.append("--repo=%s" % repos)
if image_format:
if image_type != "appliance":
raise ValueError("Format can be specified only for appliance images'")
supported_formats = ["raw", "qcow", "qcow2", "vmx"]
if image_format not in supported_formats:
raise ValueError(
"Format is not supported: %s. Supported formats: %s"
% (image_format, " ".join(sorted(supported_formats)))
)
cmd.append("--format=%s" % image_format)
if release is not None:
cmd.append("--release=%s" % release)
# IMPORTANT: all --opts have to be provided *before* args
# Usage:
# koji spin-livecd [options] <name> <version> <target> <arch> <kickstart-file>
cmd.append(name)
cmd.append(version)
cmd.append(target)
# i686 -> i386 etc.
arch = getBaseArch(arch)
cmd.append(arch)
cmd.append(ks_file)
return cmd return cmd
@ -460,7 +522,6 @@ class KojiWrapper(object):
retcode, output = run( retcode, output = run(
command, command,
can_fail=True, can_fail=True,
show_cmd=True,
logfile=log_file, logfile=log_file,
env=env, env=env,
buffer_size=-1, buffer_size=-1,
@ -475,8 +536,6 @@ class KojiWrapper(object):
) )
task_id = int(match.groups()[0]) task_id = int(match.groups()[0])
self.save_task_id(task_id)
if retcode != 0 and ( if retcode != 0 and (
self._has_connection_error(output) or self._has_offline_error(output) self._has_connection_error(output) or self._has_offline_error(output)
): ):
@ -491,19 +550,6 @@ class KojiWrapper(object):
} }
def watch_task(self, task_id, log_file=None, max_retries=None): def watch_task(self, task_id, log_file=None, max_retries=None):
"""Watch and wait for a task to finish.
:param int task_id: ID of koji task.
:param str log_file: Path to log file.
:param int max_retries: Max times to retry when error occurs,
no limits by default.
"""
if log_file:
task_url = os.path.join(
self.koji_module.config.weburl, "taskinfo?taskID=%d" % task_id
)
with open(log_file, "a") as f:
f.write("Task URL: %s\n" % task_url)
retcode, _ = self._wait_for_task( retcode, _ = self._wait_for_task(
task_id, logfile=log_file, max_retries=max_retries task_id, logfile=log_file, max_retries=max_retries
) )
@ -527,7 +573,6 @@ class KojiWrapper(object):
"createImage", "createImage",
"createLiveMedia", "createLiveMedia",
"createAppliance", "createAppliance",
"createKiwiImage",
]: ]:
continue continue
@ -707,10 +752,11 @@ class KojiWrapper(object):
if list_of_args is None and list_of_kwargs is None: if list_of_args is None and list_of_kwargs is None:
raise ValueError("One of list_of_args or list_of_kwargs must be set.") raise ValueError("One of list_of_args or list_of_kwargs must be set.")
if list_of_args is not None and not isinstance(list_of_args, list): if type(list_of_args) not in [type(None), list] or type(list_of_kwargs) not in [
raise ValueError("list_of_args must be list or None.") type(None),
if list_of_kwargs is not None and not isinstance(list_of_kwargs, list): list,
raise ValueError("list_of_kwargs must be list or None.") ]:
raise ValueError("list_of_args and list_of_kwargs must be list or None.")
if list_of_kwargs is None: if list_of_kwargs is None:
list_of_kwargs = [{}] * len(list_of_args) list_of_kwargs = [{}] * len(list_of_args)
@ -724,9 +770,9 @@ class KojiWrapper(object):
koji_session.multicall = True koji_session.multicall = True
for args, kwargs in zip(list_of_args, list_of_kwargs): for args, kwargs in zip(list_of_args, list_of_kwargs):
if not isinstance(args, list): if type(args) != list:
args = [args] args = [args]
if not isinstance(kwargs, dict): if type(kwargs) != dict:
raise ValueError("Every item in list_of_kwargs must be a dict") raise ValueError("Every item in list_of_kwargs must be a dict")
koji_session_fnc(*args, **kwargs) koji_session_fnc(*args, **kwargs)
@ -734,7 +780,7 @@ class KojiWrapper(object):
if not responses: if not responses:
return None return None
if not isinstance(responses, list): if type(responses) != list:
raise ValueError( raise ValueError(
"Fault element was returned for multicall of method %r: %r" "Fault element was returned for multicall of method %r: %r"
% (koji_session_fnc, responses) % (koji_session_fnc, responses)
@ -750,7 +796,7 @@ class KojiWrapper(object):
# a one-item array containing the result value, # a one-item array containing the result value,
# or a struct of the form found inside the standard <fault> element. # or a struct of the form found inside the standard <fault> element.
for response, args, kwargs in zip(responses, list_of_args, list_of_kwargs): for response, args, kwargs in zip(responses, list_of_args, list_of_kwargs):
if isinstance(response, list): if type(response) == list:
if not response: if not response:
raise ValueError( raise ValueError(
"Empty list returned for multicall of method %r with args %r, %r" # noqa: E501 "Empty list returned for multicall of method %r with args %r, %r" # noqa: E501
@ -775,61 +821,13 @@ class KojiWrapper(object):
""" """
return self.multicall_map(*args, **kwargs) return self.multicall_map(*args, **kwargs)
def save_task_id(self, task_id):
"""Save task id by creating a file using task_id as file name
:param int task_id: ID of koji task
"""
log_dir = self.compose.paths.log.koji_tasks_dir()
with open(os.path.join(log_dir, str(task_id)), "w"):
pass
class KojiMockWrapper(object):
lock = threading.Lock()
def __init__(self, compose, all_arches):
self.all_arches = all_arches
self.compose = compose
try:
self.profile = self.compose.conf["koji_profile"]
except KeyError:
raise RuntimeError("Koji profile must be configured")
with self.lock:
self.koji_module = koji.get_profile_module(self.profile)
session_opts = {}
for key in (
"timeout",
"keepalive",
"max_retries",
"retry_interval",
"anon_retry",
"offline_retry",
"offline_retry_interval",
"debug",
"debug_xmlrpc",
"serverca",
"use_fast_upload",
):
value = getattr(self.koji_module.config, key, None)
if value is not None:
session_opts[key] = value
self.koji_proxy = KojiMock(
packages_dir=self.koji_module.config.topdir,
modules_dir=os.path.join(
self.koji_module.config.topdir,
'modules',
),
all_arches=self.all_arches,
)
def get_buildroot_rpms(compose, task_id): def get_buildroot_rpms(compose, task_id):
"""Get build root RPMs - either from runroot or local""" """Get build root RPMs - either from runroot or local"""
result = [] result = []
if task_id: if task_id:
# runroot # runroot
koji = KojiWrapper(compose) koji = KojiWrapper(compose.conf["koji_profile"])
buildroot_infos = koji.koji_proxy.listBuildroots(taskID=task_id) buildroot_infos = koji.koji_proxy.listBuildroots(taskID=task_id)
if not buildroot_infos: if not buildroot_infos:
children_tasks = koji.koji_proxy.getTaskChildren(task_id) children_tasks = koji.koji_proxy.getTaskChildren(task_id)
@ -855,177 +853,3 @@ def get_buildroot_rpms(compose, task_id):
continue continue
result.append(i) result.append(i)
return sorted(result) return sorted(result)
class KojiDownloadProxy:
def __init__(self, topdir, topurl, cache_dir, logger):
if not topdir:
# This will only happen if there is either no koji_profile
# configured, or the profile doesn't have a topdir. In the first
# case there will be no koji interaction, and the second indicates
# broken koji configuration.
# We can pretend to have local access in both cases to avoid any
# external requests.
self.has_local_access = True
return
self.cache_dir = cache_dir
self.logger = logger
self.topdir = topdir
self.topurl = topurl
# If cache directory is configured, we want to use it (even if we
# actually have local access to the storage).
self.has_local_access = not bool(cache_dir)
# This is used for temporary downloaded files. The suffix is unique
# per-process. To prevent threads in the same process from colliding, a
# thread id is added later.
self.unique_suffix = "%s.%s" % (socket.gethostname(), os.getpid())
self.session = None
if not self.has_local_access:
self.session = requests.Session()
@property
def path_prefix(self):
dir = self.topdir if self.has_local_access else self.cache_dir
return dir.rstrip("/") + "/"
@classmethod
def from_config(klass, conf, logger):
topdir = None
topurl = None
cache_dir = None
if "koji_profile" in conf:
koji_module = koji.get_profile_module(conf["koji_profile"])
topdir = koji_module.config.topdir
topurl = koji_module.config.topurl
cache_dir = conf.get("koji_cache")
if cache_dir:
cache_dir = cache_dir.rstrip("/") + "/"
return klass(topdir, topurl, cache_dir, logger)
@util.retry(wait_on=requests.exceptions.RequestException)
def _download(self, url, dest):
"""Download file into given location
:param str url: URL of the file to download
:param str dest: file path to store the result in
:returns: path to the downloaded file (same as dest) or None if the URL
"""
# contextlib.closing is only needed in requests<2.18
with contextlib.closing(self.session.get(url, stream=True)) as r:
if r.status_code == 404:
self.logger.warning("GET %s NOT FOUND", url)
return None
if r.status_code != 200:
self.logger.error("GET %s %s", url, r.status_code)
r.raise_for_status()
# The exception from here will be retried by the decorator.
file_size = int(r.headers.get("Content-Length", 0))
self.logger.info("GET %s OK %s", url, util.format_size(file_size))
with open(dest, "wb") as f:
shutil.copyfileobj(r.raw, f)
return dest
def _delete(self, path):
"""Try to delete file at given path and ignore errors."""
try:
os.remove(path)
except Exception:
self.logger.warning("Failed to delete %s", path)
def _atomic_download(self, url, dest, validator):
"""Atomically download a file
:param str url: URL of the file to download
:param str dest: file path to store the result in
:returns: path to the downloaded file (same as dest) or None if the URL
return 404.
"""
temp_file = "%s.%s.%s" % (dest, self.unique_suffix, threading.get_ident())
# First download to the temporary location.
try:
if self._download(url, temp_file) is None:
# The file was not found.
return None
except Exception:
# Download failed, let's make sure to clean up potentially partial
# temporary file.
self._delete(temp_file)
raise
# Check if the temporary file is correct (assuming we were provided a
# validator function).
try:
if validator:
validator(temp_file)
except Exception:
# Validation failed. Let's delete the problematic file and re-raise
# the exception.
self._delete(temp_file)
raise
# Atomically move the temporary file into final location
os.rename(temp_file, dest)
return dest
def _download_file(self, path, validator):
"""Ensure file on Koji volume in ``path`` is present in the local
cache.
:returns: path to the local file or None if file is not found
"""
url = path.replace(self.topdir, self.topurl)
destination_file = path.replace(self.topdir, self.cache_dir)
util.makedirs(os.path.dirname(destination_file))
lock = Lock(destination_file + ".lock")
# Hold the lock for this file for 5 minutes. If another compose needs
# the same file but it's not downloaded yet, the process will wait.
#
# If the download finishes in time, the downloaded file will be used
# here.
#
# If the download takes longer, this process will steal the lock and
# start its own download.
#
# That should not be a problem: the same file will be downloaded and
# then replaced atomically on the filesystem. If the original process
# managed to hardlink the first file already, that hardlink will be
# broken, but that will only result in the same file stored twice.
lock.lifetime = timedelta(minutes=5)
with lock:
# Check if the file already exists. If yes, return the path.
if os.path.exists(destination_file):
# Update mtime of the file. This covers the case of packages in the
# tag that are not included in the compose. Updating mtime will
# exempt them from cleanup for extra time.
os.utime(destination_file)
return destination_file
return self._atomic_download(url, destination_file, validator)
def get_file(self, path, validator=None):
"""
If path refers to an existing file in Koji, return a valid local path
to it. If no such file exists, return None.
:param validator: A callable that will be called with the path to the
downloaded file if and only if the file was actually downloaded.
Any exception raised from there will be abort the download and be
propagated.
"""
if self.has_local_access:
# We have koji volume mounted locally. No transformation needed for
# the path, just check it exists.
if os.path.exists(path):
return path
return None
else:
# We need to download the file.
return self._download_file(path, validator)

View File

@ -109,3 +109,55 @@ class LoraxWrapper(object):
# TODO: workdir # TODO: workdir
return cmd return cmd
def get_buildinstall_cmd(
self,
product,
version,
release,
repo_baseurl,
output_dir,
variant=None,
bugurl=None,
nomacboot=False,
noupgrade=False,
is_final=False,
buildarch=None,
volid=None,
brand=None,
):
# RHEL 6 compatibility
# Usage: buildinstall [--debug] --version <version> --brand <brand> --product <product> --release <comment> --final [--output outputdir] [--discs <discstring>] <root> # noqa: E501
brand = brand or "redhat"
# HACK: ignore provided release
release = "%s %s" % (brand, version)
bugurl = bugurl or "https://bugzilla.redhat.com"
cmd = ["/usr/lib/anaconda-runtime/buildinstall"]
cmd.append("--debug")
cmd.extend(["--version", version])
cmd.extend(["--brand", brand])
cmd.extend(["--product", product])
cmd.extend(["--release", release])
if is_final:
cmd.append("--final")
if buildarch:
cmd.extend(["--buildarch", buildarch])
if bugurl:
cmd.extend(["--bugurl", bugurl])
output_dir = os.path.abspath(output_dir)
cmd.extend(["--output", output_dir])
for i in force_list(repo_baseurl):
if "://" not in i:
i = "file://%s" % os.path.abspath(i)
cmd.append(i)
return cmd

View File

@ -40,13 +40,9 @@ def get_repoclosure_cmd(backend="yum", arch=None, repos=None, lookaside=None):
# There are options that are not exposed here, because we don't need # There are options that are not exposed here, because we don't need
# them. # them.
arches = force_list(arch or []) for i in force_list(arch or []):
for i in arches:
cmd.append("--arch=%s" % i) cmd.append("--arch=%s" % i)
if backend == "dnf" and arches:
cmd.append("--forcearch=%s" % arches[0])
repos = repos or {} repos = repos or {}
for repo_id, repo_path in repos.items(): for repo_id, repo_path in repos.items():
cmd.append("--repofrompath=%s,%s" % (repo_id, _to_url(repo_path))) cmd.append("--repofrompath=%s,%s" % (repo_id, _to_url(repo_path)))

View File

@ -20,7 +20,6 @@ import os
import shutil import shutil
import glob import glob
import six import six
import threading
from six.moves import shlex_quote from six.moves import shlex_quote
from six.moves.urllib.request import urlretrieve from six.moves.urllib.request import urlretrieve
from fnmatch import fnmatch from fnmatch import fnmatch
@ -30,15 +29,12 @@ from kobo.shortcuts import run, force_list
from pungi.util import explode_rpm_package, makedirs, copy_all, temp_dir, retry from pungi.util import explode_rpm_package, makedirs, copy_all, temp_dir, retry
from .kojiwrapper import KojiWrapper from .kojiwrapper import KojiWrapper
lock = threading.Lock()
class ScmBase(kobo.log.LoggingBase): class ScmBase(kobo.log.LoggingBase):
def __init__(self, logger=None, command=None, compose=None, options=None): def __init__(self, logger=None, command=None, compose=None):
kobo.log.LoggingBase.__init__(self, logger=logger) kobo.log.LoggingBase.__init__(self, logger=logger)
self.command = command self.command = command
self.compose = compose self.compose = compose
self.options = options or {}
@retry(interval=60, timeout=300, wait_on=RuntimeError) @retry(interval=60, timeout=300, wait_on=RuntimeError)
def retry_run(self, cmd, **kwargs): def retry_run(self, cmd, **kwargs):
@ -160,31 +156,22 @@ class GitWrapper(ScmBase):
if "://" not in repo: if "://" not in repo:
repo = "file://%s" % repo repo = "file://%s" % repo
git_cmd = ["git"]
if "credential_helper" in self.options:
git_cmd.extend(["-c", "credential.useHttpPath=true"])
git_cmd.extend(
["-c", "credential.helper=%s" % self.options["credential_helper"]]
)
run(["git", "init"], workdir=destdir) run(["git", "init"], workdir=destdir)
try: try:
run(git_cmd + ["fetch", "--depth=1", repo, branch], workdir=destdir) run(["git", "fetch", "--depth=1", repo, branch], workdir=destdir)
run(["git", "checkout", "FETCH_HEAD"], workdir=destdir) run(["git", "checkout", "FETCH_HEAD"], workdir=destdir)
except RuntimeError as e: except RuntimeError as e:
# Fetch failed, to do a full clone we add a remote to our empty # Fetch failed, to do a full clone we add a remote to our empty
# repo, get its content and check out the reference we want. # repo, get its content and check out the reference we want.
self.log_debug( self.log_debug(
"Trying to do a full clone because shallow clone failed: %s %s" "Trying to do a full clone because shallow clone failed: %s %s"
% (e, getattr(e, "output", "")) % (e, e.output)
) )
try: try:
# Re-run git init in case of previous failure breaking .git dir # Re-run git init in case of previous failure breaking .git dir
run(["git", "init"], workdir=destdir) run(["git", "init"], workdir=destdir)
run(["git", "remote", "add", "origin", repo], workdir=destdir) run(["git", "remote", "add", "origin", repo], workdir=destdir)
self.retry_run( self.retry_run(["git", "remote", "update", "origin"], workdir=destdir)
git_cmd + ["remote", "update", "origin"], workdir=destdir
)
run(["git", "checkout", branch], workdir=destdir) run(["git", "checkout", branch], workdir=destdir)
except RuntimeError: except RuntimeError:
if self.compose: if self.compose:
@ -198,38 +185,19 @@ class GitWrapper(ScmBase):
copy_all(destdir, debugdir) copy_all(destdir, debugdir)
raise raise
def get_temp_repo_path(self, scm_root, scm_branch): self.run_process_command(destdir)
scm_repo = scm_root.split("/")[-1]
process_id = os.getpid()
tmp_dir = (
"/tmp/pungi-temp-git-repos-"
+ str(process_id)
+ "/"
+ scm_repo
+ "-"
+ scm_branch
)
return tmp_dir
def setup_repo(self, scm_root, scm_branch):
tmp_dir = self.get_temp_repo_path(scm_root, scm_branch)
if not os.path.isdir(tmp_dir):
makedirs(tmp_dir)
self._clone(scm_root, scm_branch, tmp_dir)
self.run_process_command(tmp_dir)
return tmp_dir
def export_dir(self, scm_root, scm_dir, target_dir, scm_branch=None): def export_dir(self, scm_root, scm_dir, target_dir, scm_branch=None):
scm_dir = scm_dir.lstrip("/") scm_dir = scm_dir.lstrip("/")
scm_branch = scm_branch or "master" scm_branch = scm_branch or "master"
with temp_dir() as tmp_dir:
self.log_debug( self.log_debug(
"Exporting directory %s from git %s (branch %s)..." "Exporting directory %s from git %s (branch %s)..."
% (scm_dir, scm_root, scm_branch) % (scm_dir, scm_root, scm_branch)
) )
with lock: self._clone(scm_root, scm_branch, tmp_dir)
tmp_dir = self.setup_repo(scm_root, scm_branch)
copy_all(os.path.join(tmp_dir, scm_dir), target_dir) copy_all(os.path.join(tmp_dir, scm_dir), target_dir)
@ -237,6 +205,7 @@ class GitWrapper(ScmBase):
scm_file = scm_file.lstrip("/") scm_file = scm_file.lstrip("/")
scm_branch = scm_branch or "master" scm_branch = scm_branch or "master"
with temp_dir() as tmp_dir:
target_path = os.path.join(target_dir, os.path.basename(scm_file)) target_path = os.path.join(target_dir, os.path.basename(scm_file))
self.log_debug( self.log_debug(
@ -244,8 +213,7 @@ class GitWrapper(ScmBase):
% (scm_file, scm_root, scm_branch) % (scm_file, scm_root, scm_branch)
) )
with lock: self._clone(scm_root, scm_branch, tmp_dir)
tmp_dir = self.setup_repo(scm_root, scm_branch)
makedirs(target_dir) makedirs(target_dir)
shutil.copy2(os.path.join(tmp_dir, scm_file), target_path) shutil.copy2(os.path.join(tmp_dir, scm_file), target_path)
@ -297,7 +265,11 @@ class RpmScmWrapper(ScmBase):
class KojiScmWrapper(ScmBase): class KojiScmWrapper(ScmBase):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(KojiScmWrapper, self).__init__(*args, **kwargs) super(KojiScmWrapper, self).__init__(*args, **kwargs)
wrapper = KojiWrapper(kwargs["compose"]) try:
profile = kwargs["compose"].conf["koji_profile"]
except KeyError:
raise RuntimeError("Koji profile must be configured")
wrapper = KojiWrapper(profile)
self.koji = wrapper.koji_module self.koji = wrapper.koji_module
self.proxy = wrapper.koji_proxy self.proxy = wrapper.koji_proxy
@ -393,19 +365,15 @@ def get_file_from_scm(scm_dict, target_path, compose=None):
scm_file = os.path.abspath(scm_dict) scm_file = os.path.abspath(scm_dict)
scm_branch = None scm_branch = None
command = None command = None
options = {}
else: else:
scm_type = scm_dict["scm"] scm_type = scm_dict["scm"]
scm_repo = scm_dict["repo"] scm_repo = scm_dict["repo"]
scm_file = scm_dict["file"] scm_file = scm_dict["file"]
scm_branch = scm_dict.get("branch", None) scm_branch = scm_dict.get("branch", None)
command = scm_dict.get("command") command = scm_dict.get("command")
options = scm_dict.get("options", {})
logger = compose._logger if compose else None logger = compose._logger if compose else None
scm = _get_wrapper( scm = _get_wrapper(scm_type, logger=logger, command=command, compose=compose)
scm_type, logger=logger, command=command, compose=compose, options=options
)
files_copied = [] files_copied = []
for i in force_list(scm_file): for i in force_list(scm_file):
@ -486,19 +454,15 @@ def get_dir_from_scm(scm_dict, target_path, compose=None):
scm_dir = os.path.abspath(scm_dict) scm_dir = os.path.abspath(scm_dict)
scm_branch = None scm_branch = None
command = None command = None
options = {}
else: else:
scm_type = scm_dict["scm"] scm_type = scm_dict["scm"]
scm_repo = scm_dict.get("repo", None) scm_repo = scm_dict.get("repo", None)
scm_dir = scm_dict["dir"] scm_dir = scm_dict["dir"]
scm_branch = scm_dict.get("branch", None) scm_branch = scm_dict.get("branch", None)
command = scm_dict.get("command") command = scm_dict.get("command")
options = scm_dict.get("options", {})
logger = compose._logger if compose else None logger = compose._logger if compose else None
scm = _get_wrapper( scm = _get_wrapper(scm_type, logger=logger, command=command, compose=compose)
scm_type, logger=logger, command=command, compose=compose, options=options
)
with temp_dir(prefix="scm_checkout_") as tmp_dir: with temp_dir(prefix="scm_checkout_") as tmp_dir:
scm.export_dir(scm_repo, scm_dir, scm_branch=scm_branch, target_dir=tmp_dir) scm.export_dir(scm_repo, scm_dir, scm_branch=scm_branch, target_dir=tmp_dir)

View File

@ -276,6 +276,7 @@ class Variant(object):
modules=None, modules=None,
modular_koji_tags=None, modular_koji_tags=None,
): ):
environments = environments or [] environments = environments or []
buildinstallpackages = buildinstallpackages or [] buildinstallpackages = buildinstallpackages or []

706
pungi_utils/orchestrator.py Normal file
View File

@ -0,0 +1,706 @@
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import atexit
import errno
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import threading
from collections import namedtuple
import kobo.conf
import kobo.log
import productmd
from kobo import shortcuts
from six.moves import configparser, shlex_quote
import pungi.util
from pungi.compose import get_compose_dir
from pungi.linker import linker_pool
from pungi.phases.pkgset.sources.source_koji import get_koji_event_raw
from pungi.util import find_old_compose, parse_koji_event, temp_dir
from pungi.wrappers.kojiwrapper import KojiWrapper
Config = namedtuple(
"Config",
[
# Path to directory with the compose
"target",
"compose_type",
"label",
# Path to the selected old compose that will be reused
"old_compose",
# Path to directory with config file copies
"config_dir",
# Which koji event to use (if any)
"event",
# Additional arguments to pungi-koji executable
"extra_args",
],
)
log = logging.getLogger(__name__)
class Status(object):
# Ready to start
READY = "READY"
# Waiting for dependencies to finish.
WAITING = "WAITING"
# Part is currently running
STARTED = "STARTED"
# A dependency failed, this one will never start.
BLOCKED = "BLOCKED"
class ComposePart(object):
def __init__(self, name, config, just_phase=[], skip_phase=[], dependencies=[]):
self.name = name
self.config = config
self.status = Status.WAITING if dependencies else Status.READY
self.just_phase = just_phase
self.skip_phase = skip_phase
self.blocked_on = set(dependencies)
self.depends_on = set(dependencies)
self.path = None
self.log_file = None
self.failable = False
def __str__(self):
return self.name
def __repr__(self):
return (
"ComposePart({0.name!r},"
" {0.config!r},"
" {0.status!r},"
" just_phase={0.just_phase!r},"
" skip_phase={0.skip_phase!r},"
" dependencies={0.depends_on!r})"
).format(self)
def refresh_status(self):
"""Refresh status of this part with the result of the compose. This
should only be called once the compose finished.
"""
try:
with open(os.path.join(self.path, "STATUS")) as fh:
self.status = fh.read().strip()
except IOError as exc:
log.error("Failed to update status of %s: %s", self.name, exc)
log.error("Assuming %s is DOOMED", self.name)
self.status = "DOOMED"
def is_finished(self):
return "FINISHED" in self.status
def unblock_on(self, finished_part):
"""Update set of blockers for this part. If it's empty, mark us as ready."""
self.blocked_on.discard(finished_part)
if self.status == Status.WAITING and not self.blocked_on:
log.debug("%s is ready to start", self)
self.status = Status.READY
def setup_start(self, global_config, parts):
substitutions = dict(
("part-%s" % name, p.path) for name, p in parts.items() if p.is_finished()
)
substitutions["configdir"] = global_config.config_dir
config = pungi.util.load_config(self.config)
for f in config.opened_files:
# apply substitutions
fill_in_config_file(f, substitutions)
self.status = Status.STARTED
self.path = get_compose_dir(
os.path.join(global_config.target, "parts"),
config,
compose_type=global_config.compose_type,
compose_label=global_config.label,
)
self.log_file = os.path.join(global_config.target, "logs", "%s.log" % self.name)
log.info("Starting %s in %s", self.name, self.path)
def get_cmd(self, global_config):
cmd = ["pungi-koji", "--config", self.config, "--compose-dir", self.path]
cmd.append("--%s" % global_config.compose_type)
if global_config.label:
cmd.extend(["--label", global_config.label])
for phase in self.just_phase:
cmd.extend(["--just-phase", phase])
for phase in self.skip_phase:
cmd.extend(["--skip-phase", phase])
if global_config.old_compose:
cmd.extend(
["--old-compose", os.path.join(global_config.old_compose, "parts")]
)
if global_config.event:
cmd.extend(["--koji-event", str(global_config.event)])
if global_config.extra_args:
cmd.extend(global_config.extra_args)
cmd.extend(["--no-latest-link"])
return cmd
@classmethod
def from_config(cls, config, section, config_dir):
part = cls(
name=section,
config=os.path.join(config_dir, config.get(section, "config")),
just_phase=_safe_get_list(config, section, "just_phase", []),
skip_phase=_safe_get_list(config, section, "skip_phase", []),
dependencies=_safe_get_list(config, section, "depends_on", []),
)
if config.has_option(section, "failable"):
part.failable = config.getboolean(section, "failable")
return part
def _safe_get_list(config, section, option, default=None):
"""Get a value from config parser. The result is split into a list on
commas or spaces, and `default` is returned if the key does not exist.
"""
if config.has_option(section, option):
value = config.get(section, option)
return [x.strip() for x in re.split(r"[, ]+", value) if x]
return default
def fill_in_config_file(fp, substs):
"""Templating function. It works with Jinja2 style placeholders such as
{{foo}}. Whitespace around the key name is fine. The file is modified in place.
:param fp string: path to the file to process
:param substs dict: a mapping for values to put into the file
"""
def repl(match):
try:
return substs[match.group(1)]
except KeyError as exc:
raise RuntimeError(
"Unknown placeholder %s in %s" % (exc, os.path.basename(fp))
)
with open(fp, "r") as f:
contents = re.sub(r"{{ *([a-zA-Z-_]+) *}}", repl, f.read())
with open(fp, "w") as f:
f.write(contents)
def start_part(global_config, parts, part):
part.setup_start(global_config, parts)
fh = open(part.log_file, "w")
cmd = part.get_cmd(global_config)
log.debug("Running command %r", " ".join(shlex_quote(x) for x in cmd))
return subprocess.Popen(cmd, stdout=fh, stderr=subprocess.STDOUT)
def handle_finished(global_config, linker, parts, proc, finished_part):
finished_part.refresh_status()
log.info("%s finished with status %s", finished_part, finished_part.status)
if proc.returncode == 0:
# Success, unblock other parts...
for part in parts.values():
part.unblock_on(finished_part.name)
# ...and link the results into final destination.
copy_part(global_config, linker, finished_part)
update_metadata(global_config, finished_part)
else:
# Failure, other stuff may be blocked.
log.info("See details in %s", finished_part.log_file)
block_on(parts, finished_part.name)
def copy_part(global_config, linker, part):
c = productmd.Compose(part.path)
for variant in c.info.variants:
data_path = os.path.join(part.path, "compose", variant)
link = os.path.join(global_config.target, "compose", variant)
log.info("Hardlinking content %s -> %s", data_path, link)
hardlink_dir(linker, data_path, link)
def hardlink_dir(linker, srcdir, dstdir):
for root, dirs, files in os.walk(srcdir):
root = os.path.relpath(root, srcdir)
for f in files:
src = os.path.normpath(os.path.join(srcdir, root, f))
dst = os.path.normpath(os.path.join(dstdir, root, f))
linker.queue_put((src, dst))
def update_metadata(global_config, part):
part_metadata_dir = os.path.join(part.path, "compose", "metadata")
final_metadata_dir = os.path.join(global_config.target, "compose", "metadata")
for f in os.listdir(part_metadata_dir):
# Load the metadata
with open(os.path.join(part_metadata_dir, f)) as fh:
part_metadata = json.load(fh)
final_metadata = os.path.join(final_metadata_dir, f)
if os.path.exists(final_metadata):
# We already have this file, will need to merge.
merge_metadata(final_metadata, part_metadata)
else:
# A new file, just copy it.
copy_metadata(global_config, final_metadata, part_metadata)
def copy_metadata(global_config, final_metadata, source):
"""Copy file to final location, but update compose information."""
with open(
os.path.join(global_config.target, "compose/metadata/composeinfo.json")
) as f:
composeinfo = json.load(f)
try:
source["payload"]["compose"].update(composeinfo["payload"]["compose"])
except KeyError:
# No [payload][compose], probably OSBS metadata
pass
with open(final_metadata, "w") as f:
json.dump(source, f, indent=2, sort_keys=True)
def merge_metadata(final_metadata, source):
with open(final_metadata) as f:
metadata = json.load(f)
try:
key = {
"productmd.composeinfo": "variants",
"productmd.modules": "modules",
"productmd.images": "images",
"productmd.rpms": "rpms",
}[source["header"]["type"]]
# TODO what if multiple parts create images for the same variant
metadata["payload"][key].update(source["payload"][key])
except KeyError:
# OSBS metadata, merge whole file
metadata.update(source)
with open(final_metadata, "w") as f:
json.dump(metadata, f, indent=2, sort_keys=True)
def block_on(parts, name):
"""Part ``name`` failed, mark everything depending on it as blocked."""
for part in parts.values():
if name in part.blocked_on:
log.warning("%s is blocked now and will not run", part)
part.status = Status.BLOCKED
block_on(parts, part.name)
def check_finished_processes(processes):
"""Walk through all active processes and check if something finished.
"""
for proc in processes.keys():
proc.poll()
if proc.returncode is not None:
yield proc, processes[proc]
def run_all(global_config, parts):
# Mapping subprocess.Popen -> ComposePart
processes = dict()
remaining = set(p.name for p in parts.values() if not p.is_finished())
with linker_pool("hardlink") as linker:
while remaining or processes:
update_status(global_config, parts)
for proc, part in check_finished_processes(processes):
del processes[proc]
handle_finished(global_config, linker, parts, proc, part)
# Start new available processes.
for name in list(remaining):
part = parts[name]
# Start all ready parts
if part.status == Status.READY:
remaining.remove(name)
processes[start_part(global_config, parts, part)] = part
# Remove blocked parts from todo list
elif part.status == Status.BLOCKED:
remaining.remove(part.name)
# Wait for any child process to finish if there is any.
if processes:
pid, reason = os.wait()
for proc in processes.keys():
# Set the return code for process that we caught by os.wait().
# Calling poll() on it would not set the return code properly
# since the value was already consumed by os.wait().
if proc.pid == pid:
proc.returncode = (reason >> 8) & 0xFF
log.info("Waiting for linking to finish...")
return update_status(global_config, parts)
def get_target_dir(config, compose_info, label, reldir=""):
"""Find directory where this compose will be.
@param reldir: if target path in config is relative, it will be resolved
against this directory
"""
dir = os.path.realpath(os.path.join(reldir, config.get("general", "target")))
target_dir = get_compose_dir(
dir,
compose_info,
compose_type=config.get("general", "compose_type"),
compose_label=label,
)
return target_dir
def setup_logging(debug=False):
FORMAT = "%(asctime)s: %(levelname)s: %(message)s"
level = logging.DEBUG if debug else logging.INFO
kobo.log.add_stderr_logger(log, log_level=level, format=FORMAT)
log.setLevel(level)
def compute_status(statuses):
if any(map(lambda x: x[0] in ("STARTED", "WAITING"), statuses)):
# If there is anything still running or waiting to start, the whole is
# still running.
return "STARTED"
elif any(map(lambda x: x[0] in ("DOOMED", "BLOCKED") and not x[1], statuses)):
# If any required part is doomed or blocked, the whole is doomed
return "DOOMED"
elif all(map(lambda x: x[0] == "FINISHED", statuses)):
# If all parts are complete, the whole is complete
return "FINISHED"
else:
return "FINISHED_INCOMPLETE"
def update_status(global_config, parts):
log.debug("Updating status metadata")
metadata = {}
statuses = set()
for part in parts.values():
metadata[part.name] = {"status": part.status, "path": part.path}
statuses.add((part.status, part.failable))
metadata_path = os.path.join(
global_config.target, "compose", "metadata", "parts.json"
)
with open(metadata_path, "w") as fh:
json.dump(metadata, fh, indent=2, sort_keys=True, separators=(",", ": "))
status = compute_status(statuses)
log.info("Overall status is %s", status)
with open(os.path.join(global_config.target, "STATUS"), "w") as fh:
fh.write(status)
return status != "DOOMED"
def prepare_compose_dir(config, args, main_config_file, compose_info):
if not hasattr(args, "compose_path"):
# Creating a brand new compose
target_dir = get_target_dir(
config, compose_info, args.label, reldir=os.path.dirname(main_config_file)
)
for dir in ("logs", "parts", "compose/metadata", "work/global"):
try:
os.makedirs(os.path.join(target_dir, dir))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(target_dir, "STATUS"), "w") as fh:
fh.write("STARTED")
# Copy initial composeinfo for new compose
shutil.copy(
os.path.join(target_dir, "work/global/composeinfo-base.json"),
os.path.join(target_dir, "compose/metadata/composeinfo.json"),
)
else:
# Restarting a particular compose
target_dir = args.compose_path
return target_dir
def load_parts_metadata(global_config):
parts_metadata = os.path.join(global_config.target, "compose/metadata/parts.json")
with open(parts_metadata) as f:
return json.load(f)
def setup_for_restart(global_config, parts, to_restart):
has_stuff_to_do = False
metadata = load_parts_metadata(global_config)
for key in metadata:
# Update state to match what is on disk
log.debug(
"Reusing %s (%s) from %s",
key,
metadata[key]["status"],
metadata[key]["path"],
)
parts[key].status = metadata[key]["status"]
parts[key].path = metadata[key]["path"]
for key in to_restart:
# Set restarted parts to run again
parts[key].status = Status.WAITING
parts[key].path = None
for key in to_restart:
# Remove blockers that are already finished
for blocker in list(parts[key].blocked_on):
if parts[blocker].is_finished():
parts[key].blocked_on.discard(blocker)
if not parts[key].blocked_on:
log.debug("Part %s in not blocked", key)
# Nothing blocks it; let's go
parts[key].status = Status.READY
has_stuff_to_do = True
if not has_stuff_to_do:
raise RuntimeError("All restarted parts are blocked. Nothing to do.")
def run_kinit(config):
if not config.getboolean("general", "kerberos"):
return
keytab = config.get("general", "kerberos_keytab")
principal = config.get("general", "kerberos_principal")
fd, fname = tempfile.mkstemp(prefix="krb5cc_pungi-orchestrate_")
os.close(fd)
os.environ["KRB5CCNAME"] = fname
shortcuts.run(["kinit", "-k", "-t", keytab, principal])
log.debug("Created a kerberos ticket for %s", principal)
atexit.register(os.remove, fname)
def get_compose_data(compose_path):
try:
compose = productmd.compose.Compose(compose_path)
data = {
"compose_id": compose.info.compose.id,
"compose_date": compose.info.compose.date,
"compose_type": compose.info.compose.type,
"compose_respin": str(compose.info.compose.respin),
"compose_label": compose.info.compose.label,
"release_id": compose.info.release_id,
"release_name": compose.info.release.name,
"release_short": compose.info.release.short,
"release_version": compose.info.release.version,
"release_type": compose.info.release.type,
"release_is_layered": compose.info.release.is_layered,
}
if compose.info.release.is_layered:
data.update(
{
"base_product_name": compose.info.base_product.name,
"base_product_short": compose.info.base_product.short,
"base_product_version": compose.info.base_product.version,
"base_product_type": compose.info.base_product.type,
}
)
return data
except Exception:
return {}
def get_script_env(compose_path):
env = os.environ.copy()
env["COMPOSE_PATH"] = compose_path
for key, value in get_compose_data(compose_path).items():
if isinstance(value, bool):
env[key.upper()] = "YES" if value else ""
else:
env[key.upper()] = str(value) if value else ""
return env
def run_scripts(prefix, compose_dir, scripts):
env = get_script_env(compose_dir)
for idx, script in enumerate(scripts.strip().splitlines()):
command = script.strip()
logfile = os.path.join(compose_dir, "logs", "%s%s.log" % (prefix, idx))
log.debug("Running command: %r", command)
log.debug("See output in %s", logfile)
shortcuts.run(command, env=env, logfile=logfile)
def try_translate_path(parts, path):
translation = []
for part in parts.values():
conf = pungi.util.load_config(part.config)
translation.extend(conf.get("translate_paths", []))
return pungi.util.translate_path_raw(translation, path)
def send_notification(compose_dir, command, parts):
if not command:
return
from pungi.notifier import PungiNotifier
data = get_compose_data(compose_dir)
data["location"] = try_translate_path(parts, compose_dir)
notifier = PungiNotifier([command])
with open(os.path.join(compose_dir, "STATUS")) as f:
status = f.read().strip()
notifier.send("status-change", workdir=compose_dir, status=status, **data)
def setup_progress_monitor(global_config, parts):
"""Update configuration so that each part send notifications about its
progress to the orchestrator.
There is a file to which the notification is written. The orchestrator is
reading it and mapping the entries to particular parts. The path to this
file is stored in an environment variable.
"""
tmp_file = tempfile.NamedTemporaryFile(prefix="pungi-progress-monitor_")
os.environ["_PUNGI_ORCHESTRATOR_PROGRESS_MONITOR"] = tmp_file.name
atexit.register(os.remove, tmp_file.name)
global_config.extra_args.append(
"--notification-script=pungi-notification-report-progress"
)
def reader():
while True:
line = tmp_file.readline()
if not line:
time.sleep(0.1)
continue
path, msg = line.split(":", 1)
for part in parts:
if parts[part].path == os.path.dirname(path):
log.debug("%s: %s", part, msg.strip())
break
monitor = threading.Thread(target=reader)
monitor.daemon = True
monitor.start()
def run(work_dir, main_config_file, args):
config_dir = os.path.join(work_dir, "config")
shutil.copytree(os.path.dirname(main_config_file), config_dir)
# Read main config
parser = configparser.RawConfigParser(
defaults={
"kerberos": "false",
"pre_compose_script": "",
"post_compose_script": "",
"notification_script": "",
}
)
parser.read(main_config_file)
# Create kerberos ticket
run_kinit(parser)
compose_info = dict(parser.items("general"))
compose_type = parser.get("general", "compose_type")
target_dir = prepare_compose_dir(parser, args, main_config_file, compose_info)
kobo.log.add_file_logger(log, os.path.join(target_dir, "logs", "orchestrator.log"))
log.info("Composing %s", target_dir)
run_scripts("pre_compose_", target_dir, parser.get("general", "pre_compose_script"))
old_compose = find_old_compose(
os.path.dirname(target_dir),
compose_info["release_short"],
compose_info["release_version"],
"",
)
if old_compose:
log.info("Reusing old compose %s", old_compose)
global_config = Config(
target=target_dir,
compose_type=compose_type,
label=args.label,
old_compose=old_compose,
config_dir=os.path.dirname(main_config_file),
event=args.koji_event,
extra_args=_safe_get_list(parser, "general", "extra_args"),
)
if not global_config.event and parser.has_option("general", "koji_profile"):
koji_wrapper = KojiWrapper(parser.get("general", "koji_profile"))
event_file = os.path.join(global_config.target, "work/global/koji-event")
result = get_koji_event_raw(koji_wrapper, None, event_file)
global_config = global_config._replace(event=result["id"])
parts = {}
for section in parser.sections():
if section == "general":
continue
parts[section] = ComposePart.from_config(parser, section, config_dir)
if hasattr(args, "part"):
setup_for_restart(global_config, parts, args.part)
setup_progress_monitor(global_config, parts)
send_notification(target_dir, parser.get("general", "notification_script"), parts)
retcode = run_all(global_config, parts)
if retcode:
# Only run the script if we are not doomed.
run_scripts(
"post_compose_", target_dir, parser.get("general", "post_compose_script")
)
send_notification(target_dir, parser.get("general", "notification_script"), parts)
return retcode
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
parser.add_argument("--koji-event", metavar="ID", type=parse_koji_event)
subparsers = parser.add_subparsers()
start = subparsers.add_parser("start")
start.add_argument("config", metavar="CONFIG")
start.add_argument("--label")
restart = subparsers.add_parser("restart")
restart.add_argument("config", metavar="CONFIG")
restart.add_argument("compose_path", metavar="COMPOSE_PATH")
restart.add_argument(
"part", metavar="PART", nargs="*", help="which parts to restart"
)
restart.add_argument("--label")
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
setup_logging(args.debug)
main_config_file = os.path.abspath(args.config)
with temp_dir() as work_dir:
try:
if not run(work_dir, main_config_file, args):
sys.exit(1)
except Exception:
log.exception("Unhandled exception!")
sys.exit(1)

View File

@ -148,15 +148,6 @@ class UnifiedISO(object):
new_path = os.path.join(self.temp_dir, "trees", arch, old_relpath) new_path = os.path.join(self.temp_dir, "trees", arch, old_relpath)
makedirs(os.path.dirname(new_path)) makedirs(os.path.dirname(new_path))
# Resolve symlinks to external files. Symlinks within the
# provided `dir` are kept.
if os.path.islink(old_path):
real_path = os.readlink(old_path)
abspath = os.path.normpath(
os.path.join(os.path.dirname(old_path), real_path)
)
if not abspath.startswith(dir):
old_path = real_path
try: try:
self.linker.link(old_path, new_path) self.linker.link(old_path, new_path)
except OSError as exc: except OSError as exc:

Some files were not shown because too many files have changed in this diff Show More