Compare commits

..

No commits in common. "pungi-0_3_0-1_fc7" and "master" have entirely different histories.

446 changed files with 88186 additions and 140 deletions

View File

@ -1 +0,0 @@
pungi-0.3.0.tar.gz

19
.gitignore vendored Normal file
View File

@ -0,0 +1,19 @@
*.py[co]
*~
*.egg-info
MANIFEST
build/*
dist/*
doc/_build
noarch/*
tests/data/repo
tests/data/repo-krb5-lookaside
tests/_composes
htmlcov/
.coverage
.eggs
.idea/
.tox
.venv
.kdev4/
pungi.kdev4

41
1715.patch Normal file
View File

@ -0,0 +1,41 @@
From 432b0bce0401c4bbcd1a958a89305c475a794f26 Mon Sep 17 00:00:00 2001
From: Adam Williamson <awilliam@redhat.com>
Date: Jan 19 2024 07:25:09 +0000
Subject: checks: don't require "repo" in the "ostree" schema
Per @siosm in https://pagure.io/pungi-fedora/pull-request/1227
this option "is deprecated and not needed anymore", so Pungi
should not be requiring it.
Merges: https://pagure.io/pungi/pull-request/1714
Signed-off-by: Adam Williamson <awilliam@redhat.com>
---
diff --git a/pungi/checks.py b/pungi/checks.py
index a340f93..db8b297 100644
--- a/pungi/checks.py
+++ b/pungi/checks.py
@@ -1066,7 +1066,6 @@ def make_schema():
"required": [
"treefile",
"config_url",
- "repo",
"ostree_repo",
],
"additionalProperties": False,
diff --git a/pungi/phases/ostree.py b/pungi/phases/ostree.py
index 90578ae..2649cdb 100644
--- a/pungi/phases/ostree.py
+++ b/pungi/phases/ostree.py
@@ -85,7 +85,7 @@ class OSTreeThread(WorkerThread):
comps_repo = compose.paths.work.comps_repo(
"$basearch", variant=variant, create_dir=False
)
- repos = shortcuts.force_list(config["repo"]) + self.repos
+ repos = shortcuts.force_list(config.get("repo", [])) + self.repos
if compose.has_comps:
repos.append(translate_path(compose, comps_repo))
repos = get_repo_dicts(repos, logger=self.pool)

16
AUTHORS Normal file
View File

@ -0,0 +1,16 @@
Authors:
Jesse Keating <jkeating at redhat dot com>
Dennis Gilmore <dennis at ausil dot us>
Daniel Mach <dmach at redhat dot com>
Contributors:
Will Woods <wwoods at redhat dot com>
Essien Ita Essien <essien at wazobialinux dot com>
James Bowes <jbowes at redhat dot com>
Tom Callaway <tcallawa at redhat dot com>
Joel Andres Granados <jgranado at redhat dot com>
<proski at fedoraproject dot org>
Mark McLoughlin <markmc at redhat dot com>
Jeremy Cline <jcline at redhat dot com>

14
COPYING Normal file
View File

@ -0,0 +1,14 @@
Pungi - Distribution compose tool
Copyright (C) 2006-2015 Red Hat, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <https://gnu.org/licenses/>.

339
GPL Normal file
View File

@ -0,0 +1,339 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

16
MANIFEST.in Normal file
View File

@ -0,0 +1,16 @@
include AUTHORS
include COPYING
include GPL
include pungi.spec
include setup.cfg
include tox.ini
include share/*
include share/multilib/*
include doc/*
include doc/_static/*
include tests/*
include tests/data/*
include tests/data/specs/*
recursive-include tests/fixtures *
global-exclude *.py[co]
global-exclude *~ *.sw? \#*\#

124
Makefile
View File

@ -1,21 +1,113 @@
# Makefile for source rpm: pungi .PHONY: all clean doc log test
# $Id$
NAME := pungi
SPECFILE = $(firstword $(wildcard *.spec))
define find-makefile-common PKGNAME=pungi
for d in common ../common ../../common ; do if [ -f $$d/Makefile.common ] ; then if [ -f $$d/CVS/Root -a -w $$/Makefile.common ] ; then cd $$d ; cvs -Q update ; fi ; echo "$$d/Makefile.common" ; break ; fi ; done VERSION=$(shell rpm -q --qf "%{VERSION}\n" --specfile ${PKGNAME}.spec | head -n1)
endef RELEASE=$(shell rpm -q --qf "%{RELEASE}\n" --specfile ${PKGNAME}.spec | head -n1)
GITTAG=${PKGNAME}-$(VERSION)
PKGRPMFLAGS=--define "_topdir ${PWD}" --define "_specdir ${PWD}" --define "_sourcedir ${PWD}/dist" --define "_srcrpmdir ${PWD}" --define "_rpmdir ${PWD}" --define "_builddir ${PWD}"
MAKEFILE_COMMON := $(shell $(find-makefile-common)) RPM="noarch/${PKGNAME}-$(VERSION)-$(RELEASE).noarch.rpm"
SRPM="${PKGNAME}-$(VERSION)-$(RELEASE).src.rpm"
ifeq ($(MAKEFILE_COMMON),) PYTEST=pytest
# attept a checkout
define checkout-makefile-common
test -f CVS/Root && { cvs -Q -d $$(cat CVS/Root) checkout common && echo "common/Makefile.common" ; } || { echo "ERROR: I can't figure out how to checkout the 'common' module." ; exit -1 ; } >&2
endef
MAKEFILE_COMMON := $(shell $(checkout-makefile-common))
endif
include $(MAKEFILE_COMMON) all: help
help:
@echo "Usage: make <target>"
@echo
@echo "Available targets are:"
@echo " help show this text"
@echo " clean remove python bytecode and temp files"
@echo " doc build documentation"
@echo " install install program on current system"
@echo " test run tests"
@echo " test-coverage run tests and generate a coverage report"
@echo " test-compose run a small teest compose (requires test data)"
@echo " test-data build test data (requirement for running tests)"
@echo
@echo "Available rel-eng targets are:"
@echo " archive create source tarball"
@echo " log display changelog for spec file"
@echo " tag create a git tag according to version and release from spec file"
@echo " rpm build rpm"
@echo " srpm build srpm"
@echo " rpminstall build rpm and install it"
@echo " release build srpm and create git tag"
tag:
@git tag -a -m "Tag as $(GITTAG)" -f $(GITTAG)
@echo "Tagged as $(GITTAG)"
Changelog:
(GIT_DIR=.git git log > .changelog.tmp && mv .changelog.tmp Changelog; rm -f .changelog.tmp) || (touch Changelog; echo 'git directory not found: installing possibly empty changelog.' >&2)
log:
@(LC_ALL=C date +"* %a %b %e %Y `git config --get user.name` <`git config --get user.email`> - VERSION"; git log --pretty="format:- %s (%ae)" | sed -r 's/ \(([^@]+)@[^)]+\)/ (\1)/g' | cat) | less
archive:
@rm -f Changelog
@rm -f MANIFEST
@make Changelog
@rm -rf ${PKGNAME}-$(VERSION)/
@python setup.py sdist --formats=bztar > /dev/null
@echo "The archive is in dist/${PKGNAME}-$(VERSION).tar.bz2"
srpm: archive
@rm -f $(SRPM)
@rpmbuild -bs ${PKGRPMFLAGS} ${PKGNAME}.spec
@echo "The srpm is in $(SRPM)"
rpm: archive
@rpmbuild --clean -bb ${PKGRPMFLAGS} ${PKGNAME}.spec
@echo "The rpm is in $(RPM)"
rpminstall: rpm
@rpm -ivh --force $(RPM)
release: tag srpm
install:
@python setup.py install
clean:
@python setup.py clean
@rm -vf *.rpm
@rm -vrf noarch
@rm -vf *.tar.gz
@rm -vrf dist
@rm -vf MANIFEST
@rm -vf Changelog
@find . -\( -name "*.pyc" -o -name '*.pyo' -o -name "*~" -o -name "__pycache__" -\) -delete
@find . -depth -type d -a -name '*.egg-info' -exec rm -rf {} \;
test:
$(PYTEST) $(PYTEST_OPTS)
test-coverage:
$(PYTEST) --cov=pungi --cov-report term --cov-report html --cov-config tox.ini $(PYTEST_OPTS)
test-data:
./tests/data/specs/build.sh
test-compose:
cd tests && ./test_compose.sh
test-multi-compose:
PYTHONPATH=$$(pwd) PATH=$$(pwd)/bin:$$PATH pungi-orchestrate --debug start tests/data/multi-compose.conf
doc:
cd doc; make html

39
README.md Normal file
View File

@ -0,0 +1,39 @@
# Pungi
*Pungi* is a distribution compose tool.
Composes are release snapshots that contain release deliverables such as:
- installation trees
- RPMs
- repodata
- comps
- (bootable) ISOs
- kickstart trees
- anaconda images
- images for PXE boot
## Tool overview
*Pungi* consists of multiple separate executables backed by a common library.
The main entry-point is the `pungi-koji` script. It loads the compose
configuration and kicks off the process. Composing itself is done in phases.
Each phase is responsible for generating some artifacts on disk and updating
the `compose` object that is threaded through all the phases.
*Pungi* itself does not actually do that much. Most of the actual work is
delegated to separate executables. *Pungi* just makes sure that all the
commands are invoked in the appropriate order and with correct arguments. It
also moves the artifacts to correct locations.
## Links
- Documentation: https://docs.pagure.org/pungi/
- Upstream GIT: https://pagure.io/pungi/
- Issue tracker: https://pagure.io/pungi/issues
- Questions can be asked in the *#fedora-releng* IRC channel on irc.libera.chat
or in the matrix room
[`#releng:fedoraproject.org`](https://matrix.to/#/#releng:fedoraproject.org)

74
TODO Normal file
View File

@ -0,0 +1,74 @@
Random thoughts on what needs to be done before Pungi 4.0 is completed.
Define building blocks and their metadata
=========================================
* rpms in yum repos
* comps
* kickstart trees
* isos
* kickstart trees
* bootable images
* readme files
* license(s)
Compose structure
=================
* topdir
* work, logs, etc.
* compose
* $variant
* $arch
* $content_type (rpms, isos, kickstart trees, etc.)
* actual content
Split Pungi into smaller well-defined tools
===========================================
* process initial packages
* comps
* json mapping
* ???
* grab initial package set
* yum repos
* koji instance (basically what mash does today)
* resolve deps (gather)
* self-hosting
* fulltree
* multilib
* langpacks
* create repos
* create install images
* lorax
* create isos
* isos
* bootable
* hybrid
* implant md5sum
* jigdo
* checksums
* run tests
* just quick sanity tests
* notification
* email
* messagebus
Unsorted
========
* run any tasks in koji or local host
* support for non-rpm content? (java artifacts, etc.)
* docs!
* unit tests!
* use productmd for metadata: https://github.com/release-engineering/productmd/
* use next-gen tools: createrepo_c, mergerepo_c, dnf, hawkey, libcomps

View File

@ -0,0 +1,2 @@
# Clean up pungi cache
d /var/cache/pungi/createrepo_c/ - - - 30d

View File

@ -0,0 +1,19 @@
This directory contains scripts to compare YUM and DNF based gathering code in
Pungi.
There are two scripts to help re-run the depsolving on existing code. As input
they need .conf and .log file from an existing compose. They collect correct
command line options from them and run the respective tool.
Run:
$ run-dnf.sh Server.x86_64.conf
$ run-yum.sh Server.x86_64.conf
The results are stored in a file with .log.dnf or .log.yum extensions. When
--interactive is used as second argument of the scripts, the output is printed
to terminal (useful for running in debugger).
To compare the RPM package lists, run:
$ ./pungi-compare-depsolving Server.x86_64.log.yum Server.x86_64.log.dnf

View File

@ -0,0 +1,63 @@
#!/usr/bin/python
from __future__ import print_function
import argparse
import os
import sys
here = sys.path[0]
if here != '/usr/bin':
# Git checkout
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
from kobo.rpmlib import parse_nvra, make_nvra
from pungi.wrappers.pungi import PungiWrapper
def read_rpms(fn):
pw = PungiWrapper()
with open(fn, "r") as f:
data, _, _ = pw.parse_log(f)
result = set()
for i in data["rpm"]:
nvra = parse_nvra(i["path"])
result.add(make_nvra(nvra, add_rpm=True))
return result
parser = argparse.ArgumentParser()
parser.add_argument('old', metavar='OLD', default='pungi-yum.log')
parser.add_argument('new', metavar='NEW', default='pungi-dnf.log')
args = parser.parse_args()
yum_rpms = read_rpms(args.old)
dnf_rpms = read_rpms(args.new)
removed = yum_rpms - dnf_rpms
added = dnf_rpms - yum_rpms
print("ADDED: %s" % len(added))
for i in sorted(added):
print(" %s" % i)
print()
print("REMOVED: %s" % len(removed))
for i in sorted(removed):
print(" %s" % i)
print()
print("ADDED: %6s" % len(added))
print("REMOVED: %6s" % len(removed))
print("YUM RPMS: %6s" % len(yum_rpms))
print("DNF RPMS: %6s" % len(dnf_rpms))
print("ALL RPMS: %6s" % len(yum_rpms | dnf_rpms))
if added or removed:
sys.exit(1)

View File

@ -0,0 +1,24 @@
#!/bin/bash
set -e
set -u
set -o pipefail
HERE=$(dirname "$0")
PATH=$HERE/../../bin:$PATH
PYTHONPATH=$HERE/../../:${PYTHONPATH:-}
export PATH PYTHONPATH
CONF=$1
LOG=${CONF%%.conf}.log
ARCH=$(head -n1 "$LOG" | tr ' ' '\n' | grep -- '--arch=')
CMD=(pungi-gather "--config=$CONF" "$ARCH" $(head -n1 "$LOG" | tr ' ' '\n' | grep '^--\(selfhosting\|fulltree\|greedy\|multilib\)'))
echo "${CMD[@]}"
if [ $# -le 1 ] || [ "$2" != "--interactive" ]; then
exec >"$LOG.dnf"
fi
exec 2>&1
exec "${CMD[@]}"

View File

@ -0,0 +1,28 @@
#!/bin/sh
set -e
set -o pipefail
set -u
export LANG=C
HERE=$(dirname "$0")
PATH=$HERE/../../bin:$PATH
PYTHONPATH=$HERE/../../
export PATH PYTHONPATH
CONF="$1"
LOG=${CONF%%.conf}.log
tempdir=$(mktemp -d)
trap 'rm -rf $tempdir' EXIT
cmd=$(head -n1 "$LOG" | cut -d' ' -f2- | sed "s@--\(destdir\|cachedir\)=\(/[^/ ]*\)*@--\1=$tempdir/\1@g" | sed 's/^pungi3/pungi/' | sed "s@--config=/\([^/]*/\)*work/[^/]*/pungi/\([^ ]*\)@--config=$1@g")
echo "$cmd"
if [ $# -le 1 ] || [ "$2" != "--interactive" ]; then
exec >"$LOG.yum"
fi
exec 2>&1
$cmd

177
doc/Makefile Normal file
View File

@ -0,0 +1,177 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Pungi.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Pungi.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/Pungi"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Pungi"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

0
doc/_static/.keep vendored Normal file
View File

557
doc/_static/phases.svg vendored Normal file
View File

@ -0,0 +1,557 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
width="610.46454"
height="327.16599"
viewBox="0 0 610.46457 327.16599"
id="svg2"
version="1.1"
inkscape:version="1.3.2 (091e20e, 2023-11-25)"
sodipodi:docname="phases.svg"
inkscape:export-filename="/home/lsedlar/repos/pungi/doc/_static/phases.png"
inkscape:export-xdpi="90"
inkscape:export-ydpi="90"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="1"
inkscape:pageshadow="2"
inkscape:zoom="1.5"
inkscape:cx="268"
inkscape:cy="260.66667"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
inkscape:window-width="1920"
inkscape:window-height="1027"
inkscape:window-x="0"
inkscape:window-y="25"
inkscape:window-maximized="1"
units="px"
inkscape:document-rotation="0"
showguides="true"
inkscape:guide-bbox="true"
fit-margin-top="7.4"
fit-margin-left="7.4"
fit-margin-right="7.4"
fit-margin-bottom="7.4"
lock-margins="true"
inkscape:showpageshadow="2"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1" />
<defs
id="defs4">
<marker
inkscape:isstock="true"
style="overflow:visible"
id="Arrow1Lend"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend">
<path
inkscape:connector-curvature="0"
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path4451" />
</marker>
</defs>
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
</cc:Work>
</rdf:RDF>
</metadata>
<g
transform="matrix(1.066667,0,0,1.066667,-99.07321,-903.45239)"
id="layer1"
inkscape:groupmode="layer"
inkscape:label="Vrstva 1">
<g
transform="translate(98.243246,-80.817124)"
id="g3411">
<rect
style="fill:#8ae234;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3340"
width="26.295755"
height="49.214859"
x="953.49097"
y="49.250374"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="51.554729"
y="970.26605"
id="text3360"><tspan
sodipodi:role="line"
id="tspan3362"
x="51.554729"
y="970.26605"
style="font-size:13.1479px;line-height:1.25">Pkgset</tspan></text>
</g>
<g
transform="translate(56.378954,-80.817124)"
id="g3398">
<rect
y="553.98242"
x="953.49097"
height="46.01757"
width="26.295755"
id="rect3400"
style="fill:#3465a4;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="557.61566"
y="971.33813"
id="text3396"><tspan
sodipodi:role="line"
id="tspan3398"
x="557.61566"
y="971.33813"
style="font-size:13.1479px;line-height:1.25">Test</tspan></text>
</g>
<g
id="g3720"
transform="translate(97.49995,-0.34404039)">
<rect
style="fill:#fce94f;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3336"
width="26.295755"
height="39.669899"
x="873.01788"
y="2.3186533"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="6.2600794"
y="891.1604"
id="text3356"><tspan
sodipodi:role="line"
id="tspan3358"
x="6.2600794"
y="891.1604"
style="font-size:13.1479px;line-height:1.25">Init</tspan></text>
</g>
<path
inkscape:connector-curvature="0"
id="path3642"
d="M 100.90864,859.8891 H 654.22706"
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.17467px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend)" />
<g
transform="translate(26.249988)"
id="g262">
<g
id="g234">
<rect
transform="matrix(0,1,1,0,0,0)"
y="179.38934"
x="872.67383"
height="162.72726"
width="26.295755"
id="rect3342"
style="fill:#fcaf3e;fill-rule:evenodd;stroke:none;stroke-width:0.838448px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<text
id="text3364"
y="890.72327"
x="181.69368"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
style="font-size:13.1479px;line-height:1.25"
y="890.72327"
x="181.69368"
id="tspan3366"
sodipodi:role="line">Buildinstall</tspan></text>
</g>
<g
id="g3639"
transform="translate(75.925692,-0.34404039)">
<rect
transform="matrix(0,1,1,0,0,0)"
y="103.28194"
x="905.2099"
height="54.197887"
width="26.295755"
id="rect3344"
style="fill:#729fcf;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<text
id="text3368"
y="923.25934"
x="106.1384"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
style="font-size:13.1479px;line-height:1.25"
y="923.25934"
x="106.1384"
id="tspan3370"
sodipodi:role="line">Gather</tspan></text>
</g>
<g
transform="translate(15.925722,63.405928)"
id="g3647">
<g
id="g3644">
<rect
style="fill:#ad7fa8;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3346"
width="26.295755"
height="72.729973"
x="905.2099"
y="162.92607"
transform="matrix(0,1,1,0,0,0)" />
</g>
<text
id="text3372"
y="923.25934"
x="165.23042"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
style="font-size:13.1479px;line-height:1.25"
y="923.25934"
x="165.23042"
id="tspan3374"
sodipodi:role="line">ExtraFiles</tspan></text>
</g>
<g
transform="translate(-2.824268,-0.34404039)"
id="g3658">
<rect
transform="matrix(0,1,1,0,0,0)"
y="241.10229"
x="905.2099"
height="78.636055"
width="26.295755"
id="rect3348"
style="fill:#e9b96e;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<text
id="text3376"
y="921.86945"
x="243.95874"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
style="font-size:13.1479px;line-height:1.25"
y="921.86945"
x="243.95874"
id="tspan3378"
sodipodi:role="line">Createrepo</tspan></text>
</g>
<g
id="g3408"
transform="translate(-74.638308,113.77258)">
<rect
transform="matrix(0,1,1,0,0,0)"
y="254.60153"
x="823.54675"
height="53.653927"
width="26.295755"
id="rect3350-3"
style="fill:#729fcf;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<text
id="text3380-2"
y="840.3219"
x="256.90588"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
style="font-size:13.1479px;line-height:1.25"
id="tspan3406"
sodipodi:role="line"
x="256.90588"
y="840.3219">OSTree</tspan></text>
</g>
<g
transform="translate(-252.46536,-85.861863)"
id="g288">
<g
transform="translate(0.56706579)"
id="g3653">
<rect
style="fill:#fcaf3e;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3428"
width="26.295755"
height="101.85102"
x="1022.637"
y="490.33765"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="492.642"
y="1039.4121"
id="text3430"><tspan
id="tspan283"
sodipodi:role="line"
x="492.642"
y="1039.4121"
style="font-size:12px;line-height:0">OSTreeInstaller</tspan></text>
</g>
</g>
</g>
<g
id="g2"
transform="translate(-1.4062678e-8,9.3749966)">
<rect
transform="matrix(0,1,1,0,0,0)"
style="fill:#e9b96e;fill-rule:evenodd;stroke:none;stroke-width:1.85901px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3338-1"
width="103.12497"
height="115.80065"
x="863.29883"
y="486.55563" />
<text
id="text3384-0"
y="921.73846"
x="489.56451"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
style="font-size:13.1475px;line-height:1.25"
id="tspan3391"
sodipodi:role="line"
x="489.56451"
y="921.73846">ImageChecksum</tspan></text>
</g>
<g
transform="translate(-42.209584,-80.817124)"
id="g3458">
<rect
transform="matrix(0,1,1,0,0,0)"
style="fill:#edd400;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3338"
width="26.295755"
height="102.36562"
x="953.49097"
y="420.13605" />
<text
id="text3384"
y="971.54041"
x="422.99252"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
y="971.54041"
x="422.99252"
id="tspan3386"
sodipodi:role="line"
style="font-size:13.1479px;line-height:1.25">Createiso</tspan></text>
</g>
<g
id="g3453"
transform="translate(-42.466031,-84.525321)">
<rect
transform="matrix(0,1,1,0,0,0)"
y="420.39337"
x="989.65247"
height="101.85102"
width="26.295755"
id="rect3352"
style="fill:#73d216;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<text
id="text3388"
y="1006.4276"
x="422.69772"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
y="1006.4276"
x="422.69772"
id="tspan3390"
sodipodi:role="line"
style="font-size:13.1479px;line-height:1.25">LiveImages</tspan></text>
</g>
<g
id="g3448"
transform="translate(-42.466031,-88.485966)">
<rect
transform="matrix(0,1,1,0,0,0)"
y="420.39337"
x="1026.0664"
height="101.85102"
width="26.295755"
id="rect3354"
style="fill:#f57900;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<text
id="text3392"
y="1042.8416"
x="422.69772"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
y="1042.8416"
x="422.69772"
id="tspan3394"
sodipodi:role="line"
style="font-size:13.1479px;line-height:1.25">ImageBuild</tspan></text>
</g>
<g
id="g3443"
transform="translate(-43.173123,-92.80219)">
<rect
style="fill:#edd400;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3422"
width="26.295755"
height="101.85102"
x="1062.8359"
y="421.10046"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="423.40482"
y="1079.6111"
id="text3424"><tspan
id="tspan3434"
sodipodi:role="line"
x="423.40482"
y="1079.6111"
style="font-size:13.1479px;line-height:1.25">LiveMedia</tspan></text>
</g>
<rect
style="fill:#c17d11;fill-rule:evenodd;stroke:none;stroke-width:1.48416px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect290"
width="26.295755"
height="224.35098"
x="1091.7223"
y="378.43698"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="380.74133"
y="1106.6223"
id="text294"><tspan
y="1106.6223"
x="380.74133"
sodipodi:role="line"
id="tspan301"
style="font-size:12px;line-height:0">OSBS</tspan></text>
<g
transform="translate(-70.933542,-51.043149)"
id="g3819">
<rect
style="fill:#73d216;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3801"
width="26.295755"
height="101.85102"
x="1052.2335"
y="448.86087"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="451.16522"
y="1069.0087"
id="text3805"><tspan
style="font-size:13.1479px;line-height:1.25"
sodipodi:role="line"
x="451.16522"
y="1069.0087"
id="tspan3812">ExtraIsos</tspan></text>
</g>
<rect
y="377.92242"
x="1122.3463"
height="224.24059"
width="26.295755"
id="rect87"
style="fill:#5ed4ec;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.48006px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="380.7789"
y="1140.3958"
id="text91"><tspan
style="font-size:13.1479px;line-height:1.25"
sodipodi:role="line"
id="tspan89"
x="380.7789"
y="1140.3958">Repoclosure</tspan></text>
<g
id="g206"
transform="translate(0,-1.8749994)">
<rect
style="fill:#fcd9a4;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.00033px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect290-6"
width="26.295755"
height="101.91849"
x="1032.3469"
y="377.92731"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="380.23166"
y="1049.1219"
id="text294-7"><tspan
y="1049.1219"
x="380.23166"
sodipodi:role="line"
id="tspan301-5"
style="font-size:12px;line-height:0">KiwiBuild</tspan></text>
</g>
<g
id="g3">
<g
id="g1">
<g
id="g4">
<rect
transform="matrix(0,1,1,0,0,0)"
style="fill:#729fcf;fill-rule:evenodd;stroke:none;stroke-width:1.83502px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect3338-1-3"
width="103.12497"
height="115.80065"
x="983.44263"
y="486.55563" />
<text
id="text3384-0-6"
y="1038.8422"
x="489.56451"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
style="font-size:13.1475px;line-height:1.25"
id="tspan3391-7"
sodipodi:role="line"
x="489.56451"
y="1038.8422">ImageContainer</tspan></text>
</g>
</g>
</g>
<g
id="g206-1"
transform="translate(-0.04628921,28.701853)">
<rect
style="fill:#fcaf3e;fill-rule:evenodd;stroke:none;stroke-width:1.00033px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="rect290-6-7"
width="26.295755"
height="101.91849"
x="1032.3469"
y="377.92731"
transform="matrix(0,1,1,0,0,0)" />
<text
xml:space="preserve"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:0%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="380.23166"
y="1049.1219"
id="text294-7-5"><tspan
y="1049.1219"
x="380.23166"
sodipodi:role="line"
id="tspan301-5-5"
style="font-size:12px;line-height:0">OSBuild</tspan></text>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 23 KiB

BIN
doc/_static/pungi_snake-sm-dark.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

0
doc/_templates/.keep vendored Normal file
View File

70
doc/about.rst Normal file
View File

@ -0,0 +1,70 @@
=============
About Pungi
=============
.. figure:: _static/pungi_snake-sm-dark.png
:align: right
:alt: Pungi Logo
*Pungi* is a distribution compose tool.
Composes are release snapshots that contain release deliverables such as:
- installation trees
- RPMs
- repodata
- comps
- (bootable) ISOs
- kickstart trees
- anaconda images
- images for PXE boot
Tool overview
=============
*Pungi* consists of multiple separate executables backed by a common library.
The main entry-point is the ``pungi-koji`` script. It loads the compose
configuration and kicks off the process. Composing itself is done in phases.
Each phase is responsible for generating some artifacts on disk and updating
the ``compose`` object that is threaded through all the phases.
*Pungi* itself does not actually do that much. Most of the actual work is
delegated to separate executables. *Pungi* just makes sure that all the
commands are invoked in the appropriate order and with correct arguments. It
also moves the artifacts to correct locations.
The executable name ``pungi-koji`` comes from the fact that most of those
separate executables submit tasks to Koji that does the actual work in an
auditable way.
However unlike doing everything manually in Koji, Pungi will make sure you are
building all images from the same package set, and will produce even
deliverables that Koji can not create like YUM repos and installer ISOs.
Links
=====
- Upstream GIT: https://pagure.io/pungi/
- Issue tracker: https://pagure.io/pungi/issues
- Questions can be asked on *#fedora-releng* IRC channel on FreeNode
Origin of name
==============
The name *Pungi* comes from the instrument used to charm snakes. *Anaconda*
being the software Pungi was manipulating, and anaconda being a snake, led to
the referential naming.
The first name, which was suggested by Seth Vidal, was *FIST*, *Fedora
Installation <Something> Tool*. That name was quickly discarded and replaced
with Pungi.
There was also a bit of an inside joke that when said aloud, it could sound
like punji, which is `a sharpened stick at the bottom of a
trap <https://en.wikipedia.org/wiki/Punji_stick>`_. Kind of like software…

27
doc/comps.rst Normal file
View File

@ -0,0 +1,27 @@
.. _comps:
Processing comps files
======================
The comps file that Pungi takes as input is not really pure comps as used by
tools like DNF. There are extensions used to customize how the file is processed.
The first step of Pungi processing is to retrieve the actual file. This can use
anything that :ref:`scm_support` supports.
Pungi extensions are ``arch`` attribute on ``packageref``, ``group`` and
``environment`` tags. The value of this attribute is a comma separated list of
architectures.
Second step Pungi performs is creating a file for each architecture. This is
done by removing all elements with incompatible ``arch`` attribute. No
additional clean up is performed on this file. The resulting file is only used
internally for the rest of the compose process.
Third and final step is to create comps file for each Variant.Arch combination.
This is the actual file that will be included in the compose. The start file is
the original input file, from which all elements with incompatible architecture
are removed. Then clean up is performed by removing all empty groups, removing
non-existing groups from environments and categories and finally removing empty
environments and categories. As a last step groups not listed in the variants
file are removed.

258
doc/conf.py Normal file
View File

@ -0,0 +1,258 @@
# -*- coding: utf-8 -*-
#
# Pungi documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 2 08:11:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Pungi"
copyright = "2016, Red Hat, Inc."
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "4.7"
# The full version, including alpha/beta/rc tags.
release = "4.7.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Pungidoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "Pungi.tex", "Pungi Documentation", "Daniel Mach", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "pungi", "Pungi Documentation", ["Daniel Mach"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Pungi",
"Pungi Documentation",
"Daniel Mach",
"Pungi",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False

2278
doc/configuration.rst Normal file

File diff suppressed because it is too large Load Diff

166
doc/contributing.rst Normal file
View File

@ -0,0 +1,166 @@
=====================
Contributing to Pungi
=====================
Set up development environment
==============================
In order to work on *Pungi*, you should install recent version of *Fedora*.
Python2
-------
Fedora 29 is recommended because some packages are not available in newer Fedora release, e.g. python2-libcomps.
Install required packages ::
$ sudo dnf install -y krb5-devel gcc make libcurl-devel python2-devel python2-createrepo_c kobo-rpmlib yum python2-libcomps python2-libselinx
Python3
-------
Install required packages ::
$ sudo dnf install -y krb5-devel gcc make libcurl-devel python3-devel python3-createrepo_c python3-libcomps
Developing
==========
Currently the development workflow for Pungi is on master branch:
- Make your own fork at https://pagure.io/pungi
- Clone your fork locally (replacing $USERNAME with your own)::
git clone git@pagure.io:forks/$USERNAME/pungi.git
- cd into your local clone and add the remote upstream for rebasing::
cd pungi
git remote add upstream git@pagure.io:pungi.git
.. note::
This workflow assumes that you never ``git commit`` directly to the master
branch of your fork. This will make more sense when we cover rebasing
below.
- create a topic branch based on master::
git branch my_topic_branch master
git checkout my_topic_branch
- Make edits, changes, add new features, etc. and then make sure to pull
from upstream master and rebase before submitting a pull request::
# lets just say you edited setup.py for sake of argument
git checkout my_topic_branch
# make changes to setup.py
black setup.py
tox
git add setup.py
git commit -s -m "added awesome feature to setup.py"
# now we rebase
git checkout master
git pull --rebase upstream master
git push origin master
git push origin --tags
git checkout my_topic_branch
git rebase master
# resolve merge conflicts if any as a result of your development in
# your topic branch
git push origin my_topic_branch
.. note::
In order to for your commit to be merged:
- you must sign-off on it. Use ``-s`` option when running ``git commit``.
- The code must be well formatted via ``black`` and pass ``flake8`` checking. Run ``tox -e black,flake8`` to do the check.
- Create pull request in the pagure.io web UI
- For convenience, here is a bash shell function that can be placed in your
~/.bashrc and called such as ``pullupstream pungi-4-devel`` that will
automate a large portion of the rebase steps from above::
pullupstream () {
if [[ -z "$1" ]]; then
printf "Error: must specify a branch name (e.g. - master, devel)\n"
else
pullup_startbranch=$(git describe --contains --all HEAD)
git checkout $1
git pull --rebase upstream master
git push origin $1
git push origin --tags
git checkout ${pullup_startbranch}
fi
}
Testing
=======
You must write unit tests for any new code (except for trivial changes). Any
code without sufficient test coverage may not be merged.
To run all existing tests, suggested method is to use *tox*. ::
$ sudo dnf install python3-tox -y
$ tox -e py3
$ tox -e py27
Alternatively you could create a vitualenv, install deps and run tests
manually if you don't want to use tox. ::
$ sudo dnf install python3-virtualenvwrapper -y
$ mkvirtualenv --system-site-packages py3
$ workon py3
$ pip install -r requirements.txt -r test-requirements.txt
$ make test
# or with coverage
$ make test-coverage
If you need to run specified tests, *pytest* is recommended. ::
# Activate virtualenv first
# Run tests
$ pytest tests/test_config.py
$ pytest tests/test_config.py -k test_pkgset_mismatch_repos
In the ``tests/`` directory there is a shell script ``test_compose.sh`` that
you can use to try and create a miniature compose on dummy data. The actual
data will be created by running ``make test-data`` in project root. ::
$ sudo dnf -y install rpm-build createrepo_c isomd5sum genisoimage syslinux
# Activate virtualenv (the one created by tox could be used)
$ source .tox/py3/bin/activate
$ python setup.py develop
$ make test-data
$ make test-compose
This testing compose does not actually use all phases that are available, and
there is no checking that the result is correct. It only tells you whether it
crashed or not.
.. note::
Even when it finishes successfully, it may print errors about
``repoclosure`` on *Server-Gluster.x86_64* in *test* phase. This is not a
bug.
Documenting
===========
You must write documentation for any new features and functional changes.
Any code without sufficient documentation may not be merged.
To generate the documentation, run ``make doc`` in project root.

480
doc/examples.rst Normal file
View File

@ -0,0 +1,480 @@
.. _examples:
Big picture examples
====================
Actual Pungi configuration files can get very large. This pages brings two
examples of (almost) full configuration for two different composes.
Fedora Rawhide compose
----------------------
This is a shortened configuration for Fedora Radhide compose as of 2019-10-14.
::
release_name = 'Fedora'
release_short = 'Fedora'
release_version = 'Rawhide'
release_is_layered = False
bootable = True
comps_file = {
'scm': 'git',
'repo': 'https://pagure.io/fedora-comps.git',
'branch': 'master',
'file': 'comps-rawhide.xml',
# Merge translations by running make. This command will generate the file.
'command': 'make comps-rawhide.xml'
}
module_defaults_dir = {
'scm': 'git',
'repo': 'https://pagure.io/releng/fedora-module-defaults.git',
'branch': 'main',
'dir': '.'
}
# Optional module obsoletes configuration which is merged
# into the module index and gets resolved
module_obsoletes_dir = {
'scm': 'git',
'repo': 'https://pagure.io/releng/fedora-module-defaults.git',
'branch': 'main',
'dir': 'obsoletes'
}
variants_file='variants-fedora.xml'
sigkeys = ['12C944D0']
# Put packages into subdirectories hashed by their initial letter.
hashed_directories = True
# There is a special profile for use with compose. It makes Pungi
# authenticate automatically as rel-eng user.
koji_profile = 'compose_koji'
# RUNROOT settings
runroot = True
runroot_channel = 'compose'
runroot_tag = 'f32-build'
# PKGSET
pkgset_source = 'koji'
pkgset_koji_tag = 'f32'
pkgset_koji_inherit = False
filter_system_release_packages = False
# GATHER
gather_method = {
'^.*': { # For all variants
'comps': 'deps', # resolve dependencies for packages from comps file
'module': 'nodeps', # but not for packages from modules
}
}
gather_backend = 'dnf'
gather_profiler = True
check_deps = False
greedy_method = 'build'
repoclosure_backend = 'dnf'
# CREATEREPO
createrepo_deltas = False
createrepo_database = True
createrepo_use_xz = True
createrepo_extra_args = ['--zck', '--zck-dict-dir=/usr/share/fedora-repo-zdicts/rawhide']
# CHECKSUMS
media_checksums = ['sha256']
media_checksum_one_file = True
media_checksum_base_filename = '%(release_short)s-%(variant)s-%(version)s-%(arch)s-%(date)s%(type_suffix)s.%(respin)s'
# CREATEISO
iso_hfs_ppc64le_compatible = False
# BUILDINSTALL
buildinstall_method = 'lorax'
buildinstall_skip = [
# No installer for Modular variant
('^Modular$', {'*': True}),
# No 32 bit installer for Everything.
('^Everything$', {'i386': True}),
]
# Enables macboot on x86_64 for all variants and disables upgrade image building
# everywhere.
lorax_options = [
('^.*$', {
'x86_64': {
'nomacboot': False
},
'ppc64le': {
# Use 3GB image size for ppc64le.
'rootfs_size': 3
},
'*': {
'noupgrade': True
}
})
]
additional_packages = [
('^(Server|Everything)$', {
'*': [
# Add all architectures of dracut package.
'dracut.*',
# All all packages matching this pattern
'autocorr-*',
],
}),
('^Everything$', {
# Everything should include all packages from the tag. This only
# applies to the native arch. Multilib will still be pulled in
# according to multilib rules.
'*': ['*'],
}),
]
filter_packages = [
("^.*$", {"*": ["glibc32", "libgcc32"]}),
('(Server)$', {
'*': [
'kernel*debug*',
'kernel-kdump*',
]
}),
]
multilib = [
('^Everything$', {
'x86_64': ['devel', 'runtime'],
})
]
# These packages should never be multilib on any arch.
multilib_blacklist = {
'*': [
'kernel', 'kernel-PAE*', 'kernel*debug*', 'java-*', 'php*', 'mod_*', 'ghc-*'
],
}
# These should be multilib even if they don't match the rules defined above.
multilib_whitelist = {
'*': ['wine', '*-static'],
}
createiso_skip = [
# Keep binary ISOs for Server, but not source ones.
('^Server$', {'src': True}),
# Remove all other ISOs.
('^Everything$', {'*': True, 'src': True}),
('^Modular$', {'*': True, 'src': True}),
]
# Image name respecting Fedora's image naming policy
image_name_format = '%(release_short)s-%(variant)s-%(disc_type)s-%(arch)s-%(version)s-%(date)s%(type_suffix)s.%(respin)s.iso'
# Use the same format for volume id
image_volid_formats = [
'%(release_short)s-%(variant)s-%(disc_type)s-%(arch)s-%(version)s'
]
# Used by Pungi to replace 'Cloud' with 'C' (etc.) in ISO volume IDs.
# There is a hard 32-character limit on ISO volume IDs, so we use
# these to try and produce short enough but legible IDs. Note this is
# duplicated in Koji for live images, as livemedia-creator does not
# allow Pungi to tell it what volume ID to use. Note:
# https://fedoraproject.org/wiki/User:Adamwill/Draft_fedora_image_naming_policy
volume_id_substitutions = {
'Beta': 'B',
'Rawhide': 'rawh',
'Silverblue': 'SB',
'Cinnamon': 'Cinn',
'Cloud': 'C',
'Design_suite': 'Dsgn',
'Electronic_Lab': 'Elec',
'Everything': 'E',
'Scientific_KDE': 'SciK',
'Security': 'Sec',
'Server': 'S',
'Workstation': 'WS',
}
disc_types = {
'boot': 'netinst',
'live': 'Live',
}
translate_paths = [
('/mnt/koji/compose/', 'https://kojipkgs.fedoraproject.org/compose/'),
]
# These will be inherited by live_media, live_images and image_build
global_ksurl = 'git+https://pagure.io/fedora-kickstarts.git?#HEAD'
global_release = '!RELEASE_FROM_LABEL_DATE_TYPE_RESPIN'
global_version = 'Rawhide'
# live_images ignores this in favor of live_target
global_target = 'f32'
image_build = {
'^Container$': [
{
'image-build': {
'format': [('docker', 'tar.xz')],
'name': 'Fedora-Container-Base',
'kickstart': 'fedora-container-base.ks',
'distro': 'Fedora-22',
'disk_size': 5,
'arches': ['armhfp', 'aarch64', 'ppc64le', 's390x', 'x86_64'],
'repo': 'Everything',
'install_tree_from': 'Everything',
'subvariant': 'Container_Base',
'failable': ['*'],
},
'factory-parameters': {
'dockerversion': "1.10.1",
'docker_cmd': '[ "/bin/bash" ]',
'docker_env': '[ "DISTTAG=f32container", "FGC=f32", "container=oci" ]',
'docker_label': '{ "name": "fedora", "license": "MIT", "vendor": "Fedora Project", "version": "32"}',
},
},
],
}
live_media = {
'^Workstation$': [
{
'name': 'Fedora-Workstation-Live',
'kickstart': 'fedora-live-workstation.ks',
# Variants.xml also contains aarch64 and armhfp, but there
# should be no live media for those arches.
'arches': ['x86_64', 'ppc64le'],
'failable': ['ppc64le'],
# Take packages and install tree from Everything repo.
'repo': 'Everything',
'install_tree_from': 'Everything',
}
],
'^Spins': [
# There are multiple media for Spins variant. They use subvariant
# field so that they can be identified in the metadata.
{
'name': 'Fedora-KDE-Live',
'kickstart': 'fedora-live-kde.ks',
'arches': ['x86_64'],
'repo': 'Everything',
'install_tree_from': 'Everything',
'subvariant': 'KDE'
},
{
'name': 'Fedora-Xfce-Live',
'kickstart': 'fedora-live-xfce.ks',
'arches': ['x86_64'],
'failable': ['*'],
'repo': 'Everything',
'install_tree_from': 'Everything',
'subvariant': 'Xfce'
},
],
}
failable_deliverables = [
# Installer and ISOs for server failing do not abort the compose.
('^Server$', {
'*': ['buildinstall', 'iso'],
}),
('^.*$', {
# Buildinstall is not blocking
'src': ['buildinstall'],
# Nothing on i386, ppc64le blocks the compose
'i386': ['buildinstall', 'iso'],
'ppc64le': ['buildinstall', 'iso'],
's390x': ['buildinstall', 'iso'],
})
]
ostree = {
"^Silverblue$": {
"version": "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN",
# To get config, clone master branch from this repo and take
# treefile from there.
"treefile": "fedora-silverblue.yaml",
"config_url": "https://pagure.io/workstation-ostree-config.git",
"config_branch": "master",
# Consume packages from Everything
"repo": "Everything",
# Don't create a reference in the ostree repo (signing automation does that).
"tag_ref": False,
# Don't use change detection in ostree.
"force_new_commit": True,
# Use unified core mode for rpm-ostree composes
"unified_core": True,
# This is the location for the repo where new commit will be
# created. Note that this is outside of the compose dir.
"ostree_repo": "/mnt/koji/compose/ostree/repo/",
"ostree_ref": "fedora/rawhide/${basearch}/silverblue",
"arches": ["x86_64", "ppc64le", "aarch64"],
"failable": ['*'],
}
}
ostree_container = {
"^Sagano$": {
"treefile": "fedora-tier-0-38.yaml",
"config_url": "https://gitlab.com/CentOS/cloud/sagano.git",
"config_branch": "main",
# Consume packages from Everything
"repo": "Everything",
# Automatically generate a reasonable version
"version": "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN",
# Only run this for x86_64 even if Sagano has more arches
"arches": ["x86_64"],
}
}
ostree_installer = [
("^Silverblue$", {
"x86_64": {
"repo": "Everything",
"release": None,
"rootfs_size": "8",
# Take templates from this repository.
'template_repo': 'https://pagure.io/fedora-lorax-templates.git',
'template_branch': 'master',
# Use following templates.
"add_template": ["ostree-based-installer/lorax-configure-repo.tmpl",
"ostree-based-installer/lorax-embed-repo.tmpl",
"ostree-based-installer/lorax-embed-flatpaks.tmpl"],
# And add these variables for the templates.
"add_template_var": [
"ostree_install_repo=https://kojipkgs.fedoraproject.org/compose/ostree/repo/",
"ostree_update_repo=https://ostree.fedoraproject.org",
"ostree_osname=fedora",
"ostree_oskey=fedora-32-primary",
"ostree_contenturl=mirrorlist=https://ostree.fedoraproject.org/mirrorlist",
"ostree_install_ref=fedora/rawhide/x86_64/silverblue",
"ostree_update_ref=fedora/rawhide/x86_64/silverblue",
"flatpak_remote_name=fedora",
"flatpak_remote_url=oci+https://registry.fedoraproject.org",
"flatpak_remote_refs=runtime/org.fedoraproject.Platform/x86_64/f30 app/org.gnome.Baobab/x86_64/stable",
],
'failable': ['*'],
},
})
]
RCM Tools compose
-----------------
This is a small compose used to deliver packages to Red Hat internal users. The
configuration is split into two files.
::
# rcmtools-common.conf
release_name = "RCM Tools"
release_short = "RCMTOOLS"
release_version = "2.0"
release_type = "updates"
release_is_layered = True
createrepo_c = True
createrepo_checksum = "sha256"
# PKGSET
pkgset_source = "koji"
koji_profile = "brew"
pkgset_koji_inherit = True
# GENERAL SETTINGS
bootable = False
comps_file = "rcmtools-comps.xml"
variants_file = "rcmtools-variants.xml"
sigkeys = ["3A3A33A3"]
# RUNROOT settings
runroot = False
# GATHER
gather_method = "deps"
check_deps = True
additional_packages = [
('.*', {
'*': ['puddle', 'rcm-nexus'],
}
),
]
# Set repoclosure_strictness to fatal to avoid installation dependency
# issues in production composes
repoclosure_strictness = [
("^.*$", {
"*": "fatal"
})
]
Configuration specific for different base products is split into separate files.
::
# rcmtools-common.conf
from rcmtools-common import *
# BASE PRODUCT
base_product_name = "Red Hat Enterprise Linux"
base_product_short = "RHEL"
base_product_version = "7"
# PKGSET
pkgset_koji_tag = "rcmtools-rhel-7-compose"
# remove i386 arch on rhel7
tree_arches = ["aarch64", "ppc64le", "s390x", "x86_64"]
check_deps = False
# Packages in these repos are available to satisfy dependencies inside the
# compose, but will not be pulled in.
gather_lookaside_repos = [
("^Client|Client-optional$", {
"x86_64": [
"http://example.redhat.com/rhel/7/Client/x86_64/os/",
"http://example.redhat.com/rhel/7/Client/x86_64/optional/os/",
],
}),
("^Workstation|Workstation-optional$", {
"x86_64": [
"http://example.redhat.com/rhel/7/Workstation/x86_64/os/",
"http://example.redhat.com/rhel/7/Workstation/x86_64/optional/os/",
],
}),
("^Server|Server-optional$", {
"aarch64": [
"http://example.redhat.com/rhel/7/Server/aarch64/os/",
"http://example.redhat.com/rhel/7/Server/aarch64/optional/os/",
],
"ppc64": [
"http://example.redhat.com/rhel/7/Server/ppc64/os/",
"http://example.redhat.com/rhel/7/Server/ppc64/optional/os/",
],
"ppc64le": [
"http://example.redhat.com/rhel/7/Server/ppc64le/os/",
"http://example.redhat.com/rhel/7/Server/ppc64le/optional/os/",
],
"s390x": [
"http://example.redhat.com/rhel/7/Server/s390x/os/",
"http://example.redhat.com/rhel/7/Server/s390x/optional/os/",
],
"x86_64": [
"http://example.redhat.com/rhel/7/Server/x86_64/os/",
"http://example.redhat.com/rhel/7/Server/x86_64/optional/os/",
],
})
]

90
doc/format.rst Normal file
View File

@ -0,0 +1,90 @@
==================
Config file format
==================
The configuration file parser is provided by `kobo
<https://github.com/release-engineering/kobo>`_
The file follows a Python-like format. It consists of a sequence of variables
that have a value assigned to them. ::
variable = value
The variable names must follow the same convention as Python code: start with a
letter and consist of letters, digits and underscores only.
The values can be either an integer, float, boolean (``True`` or ``False``), a
string or ``None``. Strings must be enclosed in either single or double quotes.
Complex types are supported as well.
A list is enclosed in square brackets and items are separated with commas.
There can be a comma after the last item as well. ::
a_list = [1,
2,
3,
]
A tuple works like a list, but is enclosed in parenthesis. ::
a_tuple = (1, "one")
A dictionary is wrapped in brackets, and consists of ``key: value`` pairs
separated by commas. The keys can only be formed from basic types (int, float,
string). ::
a_dict = {
'foo': 'bar',
1: None
}
The value assigned to a variable can also be taken from another variable. ::
one = 1
another = one
Anything on a line after a ``#`` symbol is ignored and functions as a comment.
Importing other files
=====================
It is possible to include another configuration file. The files are looked up
relative to the currently processed file.
The general structure of import is: ::
from FILENAME import WHAT
The ``FILENAME`` should be just the base name of the file without extension
(which must be ``.conf``). ``WHAT`` can either be a comma separated list of
variables or ``*``. ::
# Opens constants.conf and brings PI and E into current scope.
from constants import PI, E
# Opens common.conf and brings everything defined in that file into current
# file as well.
from common import *
.. note::
Pungi will copy the configuration file given on command line into the
``logs/`` directory. Only this single file will be copied, not any included
ones. (Copying included files requires a fix in kobo library.)
The JSON-formatted dump of configuration is correct though.
Formatting strings
==================
String interpolation is available as well. It uses a ``%``-encoded format. See
Python documentation for more details. ::
joined = "%s %s" % (var_a, var_b)
a_dict = {
"fst": 1,
"snd": 2,
}
another = "%(fst)s %(snd)s" % a_dict

102
doc/gathering.rst Normal file
View File

@ -0,0 +1,102 @@
==================
Gathering packages
==================
A compose created by Pungi consists of one or more variants. A variant contains
a subset of the content targeted at a particular use case.
There are different types of variants. The type affects how packages are
gathered into the variant.
The inputs for gathering are defined by various gather sources. Packages from
all sources are collected to create a big list of package names, comps groups
names and a list of packages that should be filtered out.
.. note::
The inputs for both explicit package list and comps file are interpreted as
RPM names, not any arbitrary provides nor source package name.
Next, ``gather_method`` defines how the list is processed. For ``nodeps``, the
results from source are used pretty much as is [#]_. For ``deps`` method, a
process will be launched to figure out what dependencies are needed and those
will be pulled in.
.. [#] The lists are filtered based on what packages are available in the
package set, but nothing else will be pulled in.
Variant types
=============
*Variant*
is a base type that has no special behaviour.
*Addon*
is built on top of a regular variant. Any packages that should go to both
the addon and its parent will be removed from addon. Packages that are only
in addon but pulled in because of ``gather_fulltree`` option will be moved
to parent.
*Integrated Layered Product*
works similarly to *addon*. Additionally, all packages from addons on the
same parent variant are removed integrated layered products.
The main difference between an *addon* and *integrated layered product* is
that *integrated layered product* has its own identity in the metadata
(defined with product name and version).
.. note::
There's also *Layered Product* as a term, but this is not related to
variants. It's used to describe a product that is not a standalone
operating system and is instead meant to be used on some other base
system.
*Optional*
contains packages that complete the base variants' package set. It always
has ``fulltree`` and ``selfhosting`` enabled, so it contains build
dependencies and packages which were not specifically requested for base
variant.
Some configuration options are overridden for particular variant types.
.. table:: Depsolving configuration
+-----------+--------------+--------------+
| Variant | Fulltree | Selfhosting |
+===========+==============+==============+
| base | configurable | configurable |
+-----------+--------------+--------------+
| addon/ILP | enabled | disabled |
+-----------+--------------+--------------+
| optional | enabled | enabled |
+-----------+--------------+--------------+
Profiling
=========
Profiling data on the ``pungi-gather`` tool can be enabled by setting the
``gather_profiler`` configuration option to ``True``.
Modular compose
===============
A compose with ``gather_source`` set to ``module`` is called *modular*. The
package list is determined by a list of modules.
The list of modules that will be put into a variant is defined in the
``variants.xml`` file. The file can contain either *Name:Stream* or
*Name:Stream:Version* references. See `Module Naming Policy
<https://pagure.io/modularity/blob/master/f/source/development/building-modules/naming-policy.rst>`_
for details. When *Version* is missing from the specification, Pungi will ask
PDC for the latest one.
The module metadata in PDC contains a list of RPMs in the module as well as
Koji tag from which the packages can be retrieved.
Restrictions
------------
* A modular compose must always use Koji as a package set source.

25
doc/index.rst Normal file
View File

@ -0,0 +1,25 @@
.. Pungi documentation master file, created by
sphinx-quickstart on Thu Jul 2 08:11:04 2015.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Pungi's documentation!
=================================
Contents:
.. toctree::
:maxdepth: 2
about
phases
format
configuration
examples
scm_support
messaging
gathering
koji
comps
contributing
testing

105
doc/koji.rst Normal file
View File

@ -0,0 +1,105 @@
======================
Getting data from koji
======================
When Pungi is configured to get packages from a Koji tag, it somehow needs to
access the actual RPM files.
Historically, this required the storage used by Koji to be directly available
on the host where Pungi was running. This was usually achieved by using NFS for
the Koji volume, and mounting it on the compose host.
The compose could be created directly on the same volume. In such case the
packages would be hardlinked, significantly reducing space consumption.
The compose could also be created on a different storage, in which case the
packages would either need to be copied over or symlinked. Using symlinks
requires that anything that accesses the compose (e.g. a download server) would
also need to mount the Koji volume in the same location.
There is also a risk with symlinks that the package in Koji can change (due to
being resigned for example), which would invalidate composes linking to it.
Using Koji without direct mount
===============================
It is possible now to run a compose from a Koji tag without direct access to
Koji storage.
Pungi can download the packages over HTTP protocol, store them in a local
cache, and consume them from there.
The local cache has similar structure to what is on the Koji volume.
When Pungi needs some package, it has a path on Koji volume. It will replace
the ``topdir`` with the cache location. If such file exists, it will be used.
If it doesn't exist, it will be downloaded from Koji (by replacing the
``topdir`` with ``topurl``).
::
Koji path /mnt/koji/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
Koji URL https://kojipkgs.fedoraproject.org/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
Local path /mnt/compose/cache/packages/foo/1/1.fc38/data/signed/abcdef/noarch/foo-1-1.fc38.noarch.rpm
The packages can be hardlinked from this cache directory.
Cleanup
-------
While the approach above allows each RPM to be downloaded only once, it will
eventually result in the Koji volume being mirrored locally. Most of the
packages will however no longer be needed.
There is a script ``pungi-cache-cleanup`` that can help with that. It can find
and remove files from the cache that are no longer needed.
A file is no longer needed if it has a single link (meaning it is only in the
cache, not in any compose), and it has mtime older than a given threshold.
It doesn't make sense to delete files that are hardlinked in an existing
compose as it would not save any space anyway.
The mtime check is meant to preserve files that are downloaded but not actually
used in a compose, like a subpackage that is not included in any variant. Every
time its existence in the local cache is checked, the mtime is updated.
Race conditions?
----------------
It should be safe to have multiple compose hosts share the same storage volume
for generated composes and local cache.
If a cache file is accessed and it exists, there's no risk of race condition.
If two composes need the same file at the same time and it is not present yet,
one of them will take a lock on it and start downloading. The other will wait
until the download is finished.
The lock is only valid for a set amount of time (5 minutes) to avoid issues
where the downloading process is killed in a way that blocks it from releasing
the lock.
If the file is large and network slow, the limit may not be enough finish
downloading. In that case the second process will steal the lock while the
first process is still downloading. This will result in the same file being
downloaded twice.
When the first process finishes the download, it will put the file into the
local cache location. When the second process finishes, it will atomically
replace it, but since it's the same file it will be the same file.
If the first compose already managed to hardlink the file before it gets
replaced, there will be two copies of the file present locally.
Integrity checking
------------------
There is minimal integrity checking. RPM packages belonging to real builds will
be check to match the checksum provided by Koji hub.
There is no checking for scratch builds or any images.

45
doc/messaging.rst Normal file
View File

@ -0,0 +1,45 @@
.. _messaging:
Progress notification
=====================
*Pungi* has the ability to emit notification messages about progress and
general status of the compose. These can be used to e.g. send messages to
*fedmsg*. This is implemented by actually calling a separate script.
The script will be called with one argument describing action that just
happened. A JSON-encoded object will be passed to standard input to provide
more information about the event. At the very least, the object will contain a
``compose_id`` key.
The notification script inherits working directory from the parent process and it
can be called from the same directory ``pungi-koji`` is called from. The working directory
is listed at the start of main log.
Currently these messages are sent:
* ``status-change`` -- when composing starts, finishes or fails; a ``status``
key is provided to indicate details
* ``phase-start`` -- on start of a phase
* ``phase-stop`` -- when phase is finished
* ``createiso-targets`` -- with a list of images to be created
* ``createiso-imagedone`` -- when any single image is finished
* ``createiso-imagefail`` -- when any single image fails to create
* ``fail-to-start`` -- when there are incorrect CLI options or errors in
configuration file; this message does not contain ``compose_id`` nor is it
started in the compose directory (which does not exist yet)
* ``ostree`` -- when a new commit is created, this message will announce its
hash and the name of ref it is meant for.
For phase related messages ``phase_name`` key is provided as well.
A ``pungi-fedmsg-notification`` script is provided and understands this
interface.
Setting it up
-------------
The script should be provided as a command line argument
``--notification-script``. ::
--notification-script=pungi-fedmsg-notification

175
doc/phases.rst Normal file
View File

@ -0,0 +1,175 @@
.. _phases:
Phases
======
Each invocation of ``pungi-koji`` consists of a set of phases.
.. image:: _static/phases.svg
:alt: phase diagram
Most of the phases run sequentially (left-to-right in the diagram), but there
are use cases where multiple phases run in parallel. This happens for phases
whose main point is to wait for a Koji task to finish.
Init
----
The first phase to ever run. Can not be skipped. It prepares the comps files
for variants (by filtering out groups and packages that should not be there).
See :doc:`comps` for details about how this is done.
Pkgset
------
This phase loads a set of packages that should be composed. It has two separate
results: it prepares repos with packages in ``work/`` directory (one per arch)
for further processing, and it returns a data structure with mapping of
packages to architectures.
Buildinstall
------------
Spawns a bunch of threads, each of which runs the ``lorax`` command. The
commands create ``boot.iso`` and other boot configuration files. The image is
finally linked into the ``compose/`` directory as netinstall media.
The created images are also needed for creating live media or other images in
later phases.
With ``lorax`` this phase runs one task per variant.arch combination.
Gather
------
This phase uses data collected by ``pkgset`` phase and figures out what
packages should be in each variant. The basic mapping can come from comps file,
a JSON mapping or ``additional_packages`` config option. This inputs can then
be enriched by adding all dependencies. See :doc:`gathering` for details.
Once the mapping is finalized, the packages are linked to appropriate places
and the ``rpms.json`` manifest is created.
ExtraFiles
----------
This phase collects extra files from the configuration and copies them to the
compose directory. The files are described by a JSON file in the compose
subtree where the files are copied. This metadata is meant to be distributed
with the data (on ISO images).
Createrepo
----------
This phase creates RPM repositories for each variant.arch tree. It is actually
reading the ``rpms.json`` manifest to figure out which packages should be
included.
OSTree
------
Updates an ostree repository with a new commit with packages from the compose.
The repository lives outside of the compose and is updated immediately. If the
compose fails in a later stage, the commit will not be reverted.
Implementation wise, this phase runs ``rpm-ostree`` command in Koji runroot (to
allow running on different arches).
Createiso
---------
Generates ISO files and accumulates enough metadata to be able to create
``image.json`` manifest. The file is however not created in this phase, instead
it is dumped in the ``pungi-koji`` script itself.
The files include a repository with all RPMs from the variant. There will be
multiple images if the packages do not fit on a single image.
The image will be bootable if ``buildinstall`` phase is enabled and the
packages fit on a single image.
There can also be images with source repositories. These are never bootable.
ExtraIsos
---------
This phase is very similar to ``createiso``, except it combines content from
multiple variants onto a single image. Packages, repodata and extra files from
each configured variant are put into a subdirectory. Additional extra files can
be put into top level of the image. The image will be bootable if the main
variant is bootable.
LiveImages, LiveMedia
---------------------
Creates media in Koji with ``koji spin-livecd``, ``koji spin-appliance`` or
``koji spin-livemedia`` command. When the media are finished, the images are
copied into the ``compose/`` directory and metadata for images is updated.
ImageBuild
----------
This phase wraps up ``koji image-build``. It also updates the metadata
ultimately responsible for ``images.json`` manifest.
KiwiBuild
---------
Similarly to image build, this phases creates a koji `kiwiBuild` task. In the
background it uses Kiwi to create images.
OSBuild
-------
Similarly to image build, this phases creates a koji `osbuild` task. In the
background it uses OSBuild Composer to create images.
OSBS
----
This phase builds container base images in `OSBS
<http://osbs.readthedocs.io/en/latest/index.html>`_.
The finished images are available in registry provided by OSBS, but not
downloaded directly into the compose. The is metadata about the created image
in ``compose/metadata/osbs.json``.
ImageContainer
--------------
This phase builds a container image in OSBS, and stores the metadata in the
same file as OSBS phase. The container produced here wraps a different image,
created it ImageBuild or OSBuild phase. It can be useful to deliver a VM image
to containerized environments.
OSTreeInstaller
---------------
Creates bootable media that carry an ostree repository as a payload. These
images are created by running ``lorax`` with special templates. Again it runs
in Koji runroot.
Repoclosure
-----------
Run ``repoclosure`` on each repository. By default errors are only reported
in the log, the compose will still be considered a success. The actual error
has to be looked up in the compose logs directory. Configuration allows customizing this.
ImageChecksum
-------------
Responsible for generating checksums for the images. The checksums are stored
in image manifest as well as files on disk. The list of images to be processed
is obtained from the image manifest. This way all images will get the same
checksums irrespective of the phase that created them.
Test
----
This phase is supposed to run some sanity checks on the finished compose.
The only test is to check all images listed the metadata and verify that they
look sane. For ISO files headers are checked to verify the format is correct,
and for bootable media a check is run to verify they have properties that allow
booting.

100
doc/scm_support.rst Normal file
View File

@ -0,0 +1,100 @@
.. _scm_support:
Exporting files from SCM
========================
Multiple places in Pungi can use files from external storage. The configuration
is similar independently of the backend that is used, although some features
may be different.
The so-called ``scm_dict`` is always put into configuration as a dictionary,
which can contain following keys.
* ``scm`` -- indicates which SCM system is used. This is always required.
Allowed values are:
* ``file`` -- copies files from local filesystem
* ``git`` -- copies files from a Git repository
* ``cvs`` -- copies files from a CVS repository
* ``rpm`` -- copies files from a package in the compose
* ``koji`` -- downloads archives from a given build in Koji build system
* ``repo``
* for Git and CVS backends this should be URL to the repository
* for RPM backend this should be a shell style glob matching package names
(or a list of such globs)
* for file backend this should be empty
* for Koji backend this should be an NVR or package name
* ``branch``
* branch name for Git and CVS backends, with ``master`` and ``HEAD`` as defaults
* Koji tag for koji backend if only package name is given
* otherwise should not be specified
* ``file`` -- a list of files that should be exported.
* ``dir`` -- a directory that should be exported. All its contents will be
exported. This option is mutually exclusive with ``file``.
* ``command`` -- defines a shell command to run after Git clone to generate the
needed file (for example to run ``make``). Only supported in Git backend.
* ``options`` -- a dictionary of additional configuration options. These are
specific to different backends.
Currently supported values for Git:
* ``credential_helper`` -- path to a credential helper used to supply
username/password for remotes that require authentication.
Koji examples
-------------
There are two different ways how to configure the Koji backend. ::
{
# Download all *.tar files from build my-image-1.0-1.
"scm": "koji",
"repo": "my-image-1.0-1",
"file": "*.tar",
}
{
# Find latest build of my-image in tag my-tag and take files from
# there.
"scm": "koji",
"repo": "my-image",
"branch": "my-tag",
"file": "*.tar",
}
Using both tag name and exact NVR will result in error: the NVR would be
interpreted as a package name, and would not match anything.
``file`` vs. ``dir``
--------------------
Exactly one of these two options has to be specified. Documentation for each
configuration option should specify whether it expects a file or a directory.
For ``extra_files`` phase either key is valid and should be chosen depending on
what the actual use case.
Caveats
-------
The ``rpm`` backend can only be used in phases that would extract the files
after ``pkgset`` phase finished. You can't get comps file from a package.
Depending on Git repository URL configuration Pungi can only export the
requested content using ``git archive``. When a command should run this is not
possible and a clone is always needed.
When using ``koji`` backend, it is required to provide configuration for Koji
profile to be used (``koji_profile``). It is not possible to contact multiple
different Koji instances.

42
doc/testing.rst Normal file
View File

@ -0,0 +1,42 @@
===============
Testing Pungi
===============
Test Data
=========
Tests require test data and not all of it is available in git.
You must create test repositories before running the tests::
make test-data
Requirements: createrepo_c, rpmbuild
Unit Tests
==========
Unit tests cover functionality of Pungi python modules.
You can run all of them at once::
make test
which is shortcut to::
python2 setup.py test
python3 setup.py test
You can alternatively run individual tests::
cd tests
./<test>.py [<class>[.<test>]]
Functional Tests
================
Because compose is quite complex process and not everything is covered with
unit tests yet, the easiest way how to test if your changes did not break
anything badly is to start a compose on a relatively small and well defined
package set::
cd tests
./test_compose.sh

40
doc/update-docs.sh Executable file
View File

@ -0,0 +1,40 @@
#!/bin/bash
# Copyright (C) 2015 Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0
trap cleanup EXIT
function cleanup() {
printf "Run cleanup\\n"
rm -rf "$dir_pungi" "$dir_pungi_doc"
}
if [ -z "$1" ]; then
printf "Usage:\\n"
printf "\\t%s release_version\\n" "$0"
exit 1
fi
set -e
dir_pungi=$(mktemp -d /tmp/pungi.XXX) || { echo "Failed to create temp directory"; exit 1; }
git clone https://pagure.io/pungi.git "$dir_pungi"
pushd "$dir_pungi"/doc
make html
popd
dir_pungi_doc=$(mktemp -d /tmp/pungi-doc.XXX) || { echo "Failed to create temp directory"; exit 1; }
git clone ssh://git@pagure.io/docs/pungi.git "$dir_pungi_doc"
pushd "$dir_pungi_doc"
git rm -fr ./*
cp -r "$dir_pungi"/doc/_build/html/* ./
pushd "$dir_pungi"/doc
git checkout 4.0.x
make html
popd
mkdir 4.0
cp -r "$dir_pungi"/doc/_build/html/* ./4.0/
git add .
git commit -s -m "update rendered pungi docs for release $1"
git push origin master
popd

105
git-changelog Executable file
View File

@ -0,0 +1,105 @@
#!/usr/bin/env python
#
# git-changelog - Output a rpm changelog
#
# Copyright (C) 2009-2010 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: David Cantrell <dcantrell@redhat.com>
# Author: Brian C. Lane <bcl@redhat.com>
from __future__ import print_function
import subprocess
import textwrap
from argparse import ArgumentParser
class ChangeLog:
def __init__(self, name, version):
self.name = name
self.version = version
def _getCommitDetail(self, commit, field):
proc = subprocess.Popen(
["git", "log", "-1", "--pretty=format:%s" % field, commit],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
).communicate()
ret = proc[0].strip('\n').split('\n')
if field == '%aE' and len(ret) == 1 and ret[0].find('@') != -1:
ret = ret[0].split('@')[0]
elif len(ret) == 1:
ret = ret[0]
else:
ret = filter(lambda x: x != '', ret)
return ret
def getLog(self):
if not self.name:
range = "%s.." % (self.version)
else:
range = "%s-%s.." % (self.name, self.version)
proc = subprocess.Popen(
["git", "log", "--pretty=oneline", "--no-merges", range],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
).communicate()
lines = filter(lambda x: x.find('l10n: ') != 41,
proc[0].strip('\n').split('\n'))
log = []
for line in lines:
fields = line.split(' ')
commit = fields[0]
summary = self._getCommitDetail(commit, "%s")
author = self._getCommitDetail(commit, "%aE")
log.append(("%s (%s)" % (summary.strip(), author)))
return log
def formatLog(self):
s = ""
for msg in self.getLog():
sublines = textwrap.wrap(msg, 77)
s = s + "- %s\n" % sublines[0]
if len(sublines) > 1:
for subline in sublines[1:]:
s = s + " %s\n" % subline
return s
def main():
parser = ArgumentParser()
parser.add_argument("-n", "--name",
help="Name of package used in tags")
parser.add_argument("-v", "--version",
help="Last version, changelog is commits after this tag")
args = parser.parse_args()
cl = ChangeLog(args.name, args.version)
print(cl.formatLog())
if __name__ == "__main__":
main()

2617
pungi.spec

File diff suppressed because it is too large Load Diff

33
pungi/__init__.py Normal file
View File

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
import os
import re
def get_full_version():
"""
Find full version of Pungi: if running from git, this will return cleaned
output of `git describe`, otherwise it will look for installed version.
"""
location = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
if os.path.isdir(os.path.join(location, ".git")):
import subprocess
proc = subprocess.Popen(
["git", "--git-dir=%s/.git" % location, "describe", "--tags"],
stdout=subprocess.PIPE,
universal_newlines=True,
)
output, _ = proc.communicate()
return re.sub(r"-1.fc\d\d?", "", output.strip().replace("pungi-", ""))
else:
import subprocess
proc = subprocess.Popen(
["rpm", "-q", "pungi"], stdout=subprocess.PIPE, universal_newlines=True
)
(output, err) = proc.communicate()
if not err:
return output.rstrip()
else:
return "unknown"

115
pungi/arch.py Normal file
View File

@ -0,0 +1,115 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
from .arch_utils import arches as ALL_ARCHES
from .arch_utils import getBaseArch, getMultiArchInfo, getArchList
TREE_ARCH_YUM_ARCH_MAP = {
"i386": "i686",
"sparc": "sparc64v",
"arm": "armv7l",
"armhfp": "armv7hnl",
}
def tree_arch_to_yum_arch(tree_arch):
# this is basically an opposite to pungi.arch_utils.getBaseArch()
yum_arch = TREE_ARCH_YUM_ARCH_MAP.get(tree_arch, tree_arch)
return yum_arch
def get_multilib_arch(yum_arch):
arch_info = getMultiArchInfo(yum_arch)
if arch_info is None:
return None
return arch_info[0]
def get_valid_multilib_arches(tree_arch):
yum_arch = tree_arch_to_yum_arch(tree_arch)
multilib_arch = get_multilib_arch(yum_arch)
if not multilib_arch:
return []
return [i for i in getArchList(multilib_arch) if i not in ("noarch", "src")]
def get_valid_arches(tree_arch, multilib=True, add_noarch=True, add_src=False):
result = []
yum_arch = tree_arch_to_yum_arch(tree_arch)
for arch in getArchList(yum_arch):
if arch not in result:
result.append(arch)
if not multilib:
for i in get_valid_multilib_arches(tree_arch):
while i in result:
result.remove(i)
if add_noarch and "noarch" not in result:
result.append("noarch")
if add_src and "src" not in result:
result.append("src")
return result
def get_compatible_arches(arch, multilib=False):
tree_arch = getBaseArch(arch)
compatible_arches = get_valid_arches(tree_arch, multilib=multilib)
return compatible_arches
def is_valid_arch(arch):
if arch in ("noarch", "src", "nosrc"):
return True
if arch in ALL_ARCHES:
return True
return False
def split_name_arch(name_arch):
if "." in name_arch:
name, arch = name_arch.rsplit(".", 1)
if not is_valid_arch(arch):
name, arch = name_arch, None
else:
name, arch = name_arch, None
return name, arch
def is_excluded(package, arches, logger=None):
"""Check if package is excluded from given architectures."""
if any(
getBaseArch(exc_arch) == 'x86_64' for exc_arch in package.exclusivearch
) and 'x86_64_v2' not in package.exclusivearch:
package.exclusivearch.append('x86_64_v2')
if package.excludearch and set(package.excludearch) & set(arches):
if logger:
logger.debug(
"Excluding (EXCLUDEARCH: %s): %s"
% (sorted(set(package.excludearch)), package.file_name)
)
return True
if package.exclusivearch and not (set(package.exclusivearch) & set(arches)):
if logger:
logger.debug(
"Excluding (EXCLUSIVEARCH: %s): %s"
% (sorted(set(package.exclusivearch)), package.file_name)
)
return True
return False

368
pungi/arch_utils.py Normal file
View File

@ -0,0 +1,368 @@
# A copy of some necessary parts from yum.rpmUtils.arch, with slightly changes:
# 1. _ppc64_native_is_best changed to True
# 2. code style fixes for flake8 reported errors
import os
import rpm
import ctypes
import struct
# _ppc64_native_is_best is False in yum's source code, but patched with a
# separate patch when built from source rpm, so we set it to True here.
_ppc64_native_is_best = True
# dict mapping arch -> ( multicompat, best personality, biarch personality )
multilibArches = {
"x86_64": ("athlon", "x86_64", "athlon"),
"sparc64v": ("sparcv9v", "sparcv9v", "sparc64v"),
"sparc64": ("sparcv9", "sparcv9", "sparc64"),
"ppc64": ("ppc", "ppc", "ppc64"),
"s390x": ("s390", "s390x", "s390"),
}
if _ppc64_native_is_best:
multilibArches["ppc64"] = ("ppc", "ppc64", "ppc64")
arches = {
# ia32
"athlon": "i686",
"i686": "i586",
"geode": "i586",
"i586": "i486",
"i486": "i386",
"i386": "noarch",
# amd64
"x86_64": "athlon",
"amd64": "x86_64",
"ia32e": "x86_64",
# x86-64-v2
"x86_64_v2": "noarch",
# ppc64le
"ppc64le": "noarch",
# ppc
"ppc64p7": "ppc64",
"ppc64pseries": "ppc64",
"ppc64iseries": "ppc64",
"ppc64": "ppc",
"ppc": "noarch",
# s390{,x}
"s390x": "s390",
"s390": "noarch",
# sparc
"sparc64v": "sparcv9v",
"sparc64": "sparcv9",
"sparcv9v": "sparcv9",
"sparcv9": "sparcv8",
"sparcv8": "sparc",
"sparc": "noarch",
# alpha
"alphaev7": "alphaev68",
"alphaev68": "alphaev67",
"alphaev67": "alphaev6",
"alphaev6": "alphapca56",
"alphapca56": "alphaev56",
"alphaev56": "alphaev5",
"alphaev5": "alphaev45",
"alphaev45": "alphaev4",
"alphaev4": "alpha",
"alpha": "noarch",
# arm
"armv7l": "armv6l",
"armv6l": "armv5tejl",
"armv5tejl": "armv5tel",
"armv5tel": "noarch",
# arm hardware floating point
"armv7hnl": "armv7hl",
"armv7hl": "armv6hl",
"armv6hl": "noarch",
# arm64
"arm64": "noarch",
# aarch64
"aarch64": "noarch",
# super-h
"sh4a": "sh4",
"sh4": "noarch",
"sh3": "noarch",
# itanium
"ia64": "noarch",
}
# Will contain information parsed from /proc/self/auxv via _parse_auxv().
# Should move into rpm really.
_aux_vector = {
"platform": "",
"hwcap": 0,
}
def isMultiLibArch(arch=None): # pragma: no cover
"""returns true if arch is a multilib arch, false if not"""
if arch is None:
arch = canonArch
if arch not in arches: # or we could check if it is noarch
return 0
if arch in multilibArches:
return 1
if arches[arch] in multilibArches:
return 1
return 0
def getArchList(thisarch=None): # pragma: no cover
# this returns a list of archs that are compatible with arch given
if not thisarch:
thisarch = canonArch
archlist = [thisarch]
while thisarch in arches:
thisarch = arches[thisarch]
archlist.append(thisarch)
# hack hack hack
# sparc64v is also sparc64 compat
if archlist[0] == "sparc64v":
archlist.insert(1, "sparc64")
# if we're a weirdo arch - add noarch on there.
if len(archlist) == 1 and archlist[0] == thisarch:
archlist.append("noarch")
return archlist
def _try_read_cpuinfo(): # pragma: no cover
"""Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not
mounted)."""
try:
with open("/proc/cpuinfo", "r") as f:
return f.readlines()
except Exception:
return []
def _parse_auxv(): # pragma: no cover
"""Read /proc/self/auxv and parse it into global dict for easier access
later on, very similar to what rpm does."""
# In case we can't open and read /proc/self/auxv, just return
try:
with open("/proc/self/auxv", "rb") as f:
data = f.read()
except Exception:
return
# Define values from /usr/include/elf.h
AT_PLATFORM = 15
AT_HWCAP = 16
fmtlen = struct.calcsize("LL")
offset = 0
platform = ctypes.c_char_p()
# Parse the data and fill in _aux_vector dict
while offset <= len(data) - fmtlen:
at_type, at_val = struct.unpack_from("LL", data, offset)
if at_type == AT_PLATFORM:
platform.value = at_val
_aux_vector["platform"] = platform.value
if at_type == AT_HWCAP:
_aux_vector["hwcap"] = at_val
offset = offset + fmtlen
def getCanonX86Arch(arch): # pragma: no cover
if arch == "i586":
for line in _try_read_cpuinfo():
if line.startswith("model name"):
if line.find("Geode(TM)") != -1:
return "geode"
break
return arch
# only athlon vs i686 isn't handled with uname currently
if arch != "i686":
return arch
# if we're i686 and AuthenticAMD, then we should be an athlon
for line in _try_read_cpuinfo():
if line.startswith("vendor") and line.find("AuthenticAMD") != -1:
return "athlon"
elif line.startswith("vendor") and line.find("HygonGenuine") != -1:
return "athlon"
# i686 doesn't guarantee cmov, but we depend on it
elif line.startswith("flags"):
if line.find("cmov") == -1:
return "i586"
break
return arch
def getCanonARMArch(arch): # pragma: no cover
# the %{_target_arch} macro in rpm will let us know the abi we are using
target = rpm.expandMacro("%{_target_cpu}")
if target.startswith("armv6h"):
return target
if target.startswith("armv7h"):
return target
return arch
def getCanonPPCArch(arch): # pragma: no cover
# FIXME: should I do better handling for mac, etc?
if arch != "ppc64":
return arch
machine = None
for line in _try_read_cpuinfo():
if line.find("machine") != -1:
machine = line.split(":")[1]
break
platform = _aux_vector["platform"]
if machine is None and not platform:
return arch
try:
if platform.startswith("power") and int(platform[5:].rstrip("+")) >= 7:
return "ppc64p7"
except Exception:
pass
if machine is None:
return arch
if machine.find("CHRP IBM") != -1:
return "ppc64pseries"
if machine.find("iSeries") != -1:
return "ppc64iseries"
return arch
def getCanonSPARCArch(arch): # pragma: no cover
# Deal with sun4v, sun4u, sun4m cases
SPARCtype = None
for line in _try_read_cpuinfo():
if line.startswith("type"):
SPARCtype = line.split(":")[1]
break
if SPARCtype is None:
return arch
if SPARCtype.find("sun4v") != -1:
if arch.startswith("sparc64"):
return "sparc64v"
else:
return "sparcv9v"
if SPARCtype.find("sun4u") != -1:
if arch.startswith("sparc64"):
return "sparc64"
else:
return "sparcv9"
if SPARCtype.find("sun4m") != -1:
return "sparcv8"
return arch
def getCanonX86_64Arch(arch): # pragma: no cover
if arch != "x86_64":
return arch
vendor = None
for line in _try_read_cpuinfo():
if line.startswith("vendor_id"):
vendor = line.split(":")[1]
break
if vendor is None:
return arch
if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1:
return "amd64"
if vendor.find("HygonGenuine") != -1:
return "amd64"
if vendor.find("GenuineIntel") != -1:
return "ia32e"
return arch
def getCanonArch(skipRpmPlatform=0): # pragma: no cover
if not skipRpmPlatform and os.access("/etc/rpm/platform", os.R_OK):
try:
f = open("/etc/rpm/platform", "r")
line = f.readline()
f.close()
(arch, vendor, opersys) = line.split("-", 2)
return arch
except Exception:
pass
arch = os.uname()[4]
_parse_auxv()
if len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86":
return getCanonX86Arch(arch)
if arch.startswith("arm"):
return getCanonARMArch(arch)
if arch.startswith("ppc"):
return getCanonPPCArch(arch)
if arch.startswith("sparc"):
return getCanonSPARCArch(arch)
if arch == "x86_64":
return getCanonX86_64Arch(arch)
return arch
canonArch = getCanonArch()
# this gets you the "compat" arch of a biarch pair
def getMultiArchInfo(arch=canonArch): # pragma: no cover
if arch in multilibArches:
return multilibArches[arch]
if arch in arches and arches[arch] != "noarch":
return getMultiArchInfo(arch=arches[arch])
return None
def getBaseArch(myarch=None): # pragma: no cover
"""returns 'base' arch for myarch, if specified, or canonArch if not.
base arch is the arch before noarch in the arches dict if myarch is not
a key in the multilibArches."""
if not myarch:
myarch = canonArch
if myarch not in arches: # this is dumb, but <shrug>
return myarch
if myarch.startswith("sparc64"):
return "sparc"
elif myarch == "ppc64le":
return "ppc64le"
elif myarch.startswith("ppc64") and not _ppc64_native_is_best:
return "ppc"
elif myarch.startswith("arm64"):
return "arm64"
elif myarch.startswith("armv6h"):
return "armhfp"
elif myarch.startswith("armv7h"):
return "armhfp"
elif myarch.startswith("arm"):
return "arm"
if isMultiLibArch(arch=myarch):
if myarch in multilibArches:
return myarch
else:
return arches[myarch]
if myarch in arches:
basearch = myarch
value = arches[basearch]
while value != "noarch":
basearch = value
value = arches[basearch]
return basearch

1622
pungi/checks.py Normal file

File diff suppressed because it is too large Load Diff

34
pungi/common.py Normal file
View File

@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
class OptionsBase(object):
def __init__(self, **kwargs):
"""
inherit and initialize attributes
call self.merge_options(**kwargs) at the end
"""
pass
def merge_options(self, **kwargs):
"""
override defaults with user defined values
"""
for key, value in kwargs.items():
if not hasattr(self, key):
raise ValueError(
"Invalid option in %s: %s" % (self.__class__.__name__, key)
)
setattr(self, key, value)

779
pungi/compose.py Normal file
View File

@ -0,0 +1,779 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
__all__ = ("Compose",)
import contextlib
import errno
import logging
import os
import time
import tempfile
import shutil
import json
import socket
import kobo.log
import kobo.tback
import requests
from requests.exceptions import RequestException
from productmd.composeinfo import ComposeInfo
from productmd.images import Images
from dogpile.cache import make_region
from pungi.graph import SimpleAcyclicOrientedGraph
from pungi.wrappers.variants import VariantsXmlParser
from pungi.paths import Paths
from pungi.wrappers.kojiwrapper import KojiDownloadProxy
from pungi.wrappers.scm import get_file_from_scm
from pungi.util import (
makedirs,
get_arch_variant_data,
get_format_substs,
get_variant_data,
retry,
translate_path_raw,
)
from pungi.metadata import compose_to_composeinfo
try:
# This is available since productmd >= 1.18
# TODO: remove this once the version is distributed widely enough
from productmd.composeinfo import SUPPORTED_MILESTONES
except ImportError:
SUPPORTED_MILESTONES = ["RC", "Update", "SecurityFix"]
def is_status_fatal(status_code):
"""Check if status code returned from CTS reports an error that is unlikely
to be fixed by retrying. Generally client errors (4XX) are fatal, with the
exception of 401 Unauthorized which could be caused by transient network
issue between compose host and KDC.
"""
if status_code == 401:
return False
return status_code >= 400 and status_code < 500
@retry(wait_on=RequestException)
def retry_request(method, url, data=None, json_data=None, auth=None):
"""
:param str method: Reqest method.
:param str url: Target URL.
:param dict data: form-urlencoded data to send in the body of the request.
:param dict json_data: json data to send in the body of the request.
"""
request_method = getattr(requests, method)
rv = request_method(url, data=data, json=json_data, auth=auth)
if is_status_fatal(rv.status_code):
try:
error = rv.json()
except ValueError:
error = rv.text
raise RuntimeError("%s responded with %d: %s" % (url, rv.status_code, error))
rv.raise_for_status()
return rv
class BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers["authorization"] = "Bearer " + self.token
return r
@contextlib.contextmanager
def cts_auth(pungi_conf):
"""
:param dict pungi_conf: dict obj of pungi.json config.
"""
auth = None
token = None
cts_keytab = pungi_conf.get("cts_keytab")
cts_oidc_token_url = os.environ.get("CTS_OIDC_TOKEN_URL", "") or pungi_conf.get(
"cts_oidc_token_url"
)
try:
if cts_keytab:
# requests-kerberos cannot accept custom keytab, we need to use
# environment variable for this. But we need to change environment
# only temporarily just for this single requests.post.
# So at first backup the current environment and revert to it
# after the requests call.
from requests_kerberos import HTTPKerberosAuth
auth = HTTPKerberosAuth()
environ_copy = dict(os.environ)
if "$HOSTNAME" in cts_keytab:
cts_keytab = cts_keytab.replace("$HOSTNAME", socket.gethostname())
os.environ["KRB5_CLIENT_KTNAME"] = cts_keytab
os.environ["KRB5CCNAME"] = "DIR:%s" % tempfile.mkdtemp()
elif cts_oidc_token_url:
cts_oidc_client_id = os.environ.get(
"CTS_OIDC_CLIENT_ID", ""
) or pungi_conf.get("cts_oidc_client_id", "")
token = retry_request(
"post",
cts_oidc_token_url,
data={
"grant_type": "client_credentials",
"client_id": cts_oidc_client_id,
"client_secret": os.environ.get("CTS_OIDC_CLIENT_SECRET", ""),
},
).json()["access_token"]
auth = BearerAuth(token)
del token
yield auth
except Exception as e:
# Avoid leaking client secret in trackback
e.show_locals = False
raise e
finally:
if cts_keytab:
shutil.rmtree(os.environ["KRB5CCNAME"].split(":", 1)[1])
os.environ.clear()
os.environ.update(environ_copy)
def get_compose_info(
conf,
compose_type="production",
compose_date=None,
compose_respin=None,
compose_label=None,
parent_compose_ids=None,
respin_of=None,
):
"""
Creates inncomplete ComposeInfo to generate Compose ID
"""
ci = ComposeInfo()
ci.release.name = conf["release_name"]
ci.release.short = conf["release_short"]
ci.release.version = conf["release_version"]
ci.release.is_layered = True if conf.get("base_product_name", "") else False
ci.release.type = conf.get("release_type", "ga").lower()
ci.release.internal = bool(conf.get("release_internal", False))
if ci.release.is_layered:
ci.base_product.name = conf["base_product_name"]
ci.base_product.short = conf["base_product_short"]
ci.base_product.version = conf["base_product_version"]
ci.base_product.type = conf.get("base_product_type", "ga").lower()
ci.compose.label = compose_label
ci.compose.type = compose_type
ci.compose.date = compose_date or time.strftime("%Y%m%d", time.localtime())
ci.compose.respin = compose_respin or 0
ci.compose.id = ci.create_compose_id()
cts_url = conf.get("cts_url")
if cts_url:
# Create compose in CTS and get the reserved compose ID.
url = os.path.join(cts_url, "api/1/composes/")
data = {
"compose_info": json.loads(ci.dumps()),
"parent_compose_ids": parent_compose_ids,
"respin_of": respin_of,
}
with cts_auth(conf) as authentication:
rv = retry_request("post", url, json_data=data, auth=authentication)
# Update local ComposeInfo with received ComposeInfo.
cts_ci = ComposeInfo()
cts_ci.loads(rv.text)
ci.compose.respin = cts_ci.compose.respin
ci.compose.id = cts_ci.compose.id
return ci
def write_compose_info(compose_dir, ci):
"""
Write ComposeInfo `ci` to `compose_dir` subdirectories.
"""
makedirs(compose_dir)
with open(os.path.join(compose_dir, "COMPOSE_ID"), "w") as f:
f.write(ci.compose.id)
work_dir = os.path.join(compose_dir, "work", "global")
makedirs(work_dir)
ci.dump(os.path.join(work_dir, "composeinfo-base.json"))
def update_compose_url(compose_id, compose_dir, conf):
cts_url = conf.get("cts_url", None)
if cts_url:
url = os.path.join(cts_url, "api/1/composes", compose_id)
tp = conf.get("translate_paths", None)
compose_url = translate_path_raw(tp, compose_dir)
if compose_url == compose_dir:
# We do not have a URL, do not attempt the update.
return
data = {
"action": "set_url",
"compose_url": compose_url,
}
with cts_auth(conf) as authentication:
return retry_request("patch", url, json_data=data, auth=authentication)
def get_compose_dir(
topdir,
conf,
compose_type="production",
compose_date=None,
compose_respin=None,
compose_label=None,
already_exists_callbacks=None,
parent_compose_ids=None,
respin_of=None,
):
already_exists_callbacks = already_exists_callbacks or []
ci = get_compose_info(
conf,
compose_type,
compose_date,
compose_respin,
compose_label,
parent_compose_ids,
respin_of,
)
cts_url = conf.get("cts_url", None)
if cts_url:
# Create compose directory.
compose_dir = os.path.join(topdir, ci.compose.id)
os.makedirs(compose_dir)
else:
while 1:
ci.compose.id = ci.create_compose_id()
compose_dir = os.path.join(topdir, ci.compose.id)
exists = False
# TODO: callbacks to determine if a composeid was already used
# for callback in already_exists_callbacks:
# if callback(data):
# exists = True
# break
# already_exists_callbacks fallback: does target compose_dir exist?
try:
os.makedirs(compose_dir)
except OSError as ex:
if ex.errno == errno.EEXIST:
exists = True
else:
raise
if exists:
ci = get_compose_info(
conf,
compose_type,
compose_date,
ci.compose.respin + 1,
compose_label,
)
continue
break
write_compose_info(compose_dir, ci)
return compose_dir
class Compose(kobo.log.LoggingBase):
def __init__(
self,
conf,
topdir,
skip_phases=None,
just_phases=None,
old_composes=None,
koji_event=None,
supported=False,
logger=None,
notifier=None,
):
kobo.log.LoggingBase.__init__(self, logger)
# TODO: check if minimal conf values are set
self.conf = conf
# This is a dict mapping UID to Variant objects. It only contains top
# level variants.
self.variants = {}
# This is a similar mapping, but contains even nested variants.
self.all_variants = {}
self.topdir = os.path.abspath(topdir)
self.skip_phases = skip_phases or []
self.just_phases = just_phases or []
self.old_composes = old_composes or []
self.koji_event = koji_event or conf.get("koji_event")
self.notifier = notifier
self._old_config = None
# path definitions
self.paths = Paths(self)
# Set up logging to file
if logger:
kobo.log.add_file_logger(
logger, self.paths.log.log_file("global", "pungi.log")
)
kobo.log.add_file_logger(
logger, self.paths.log.log_file("global", "excluding-arch.log")
)
class PungiLogFilter(logging.Filter):
def filter(self, record):
return (
False
if record.funcName and record.funcName == "is_excluded"
else True
)
class ExcludingArchLogFilter(logging.Filter):
def filter(self, record):
message = record.getMessage()
if "Populating package set for arch:" in message or (
record.funcName and record.funcName == "is_excluded"
):
return True
else:
return False
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
log_file_name = os.path.basename(handler.stream.name)
if log_file_name == "pungi.global.log":
handler.addFilter(PungiLogFilter())
elif log_file_name == "excluding-arch.global.log":
handler.addFilter(ExcludingArchLogFilter())
# to provide compose_id, compose_date and compose_respin
self.ci_base = ComposeInfo()
self.ci_base.load(
os.path.join(self.paths.work.topdir(arch="global"), "composeinfo-base.json")
)
self.supported = supported
if (
self.compose_label
and self.compose_label.split("-")[0] in SUPPORTED_MILESTONES
):
self.log_info(
"Automatically setting 'supported' flag due to label: %s."
% self.compose_label
)
self.supported = True
self.im = Images()
self.im.compose.id = self.compose_id
self.im.compose.type = self.compose_type
self.im.compose.date = self.compose_date
self.im.compose.respin = self.compose_respin
self.im.metadata_path = self.paths.compose.metadata()
self.containers_metadata = {}
# Stores list of deliverables that failed, but did not abort the
# compose.
# {deliverable: [(Variant.uid, arch, subvariant)]}
self.failed_deliverables = {}
self.attempted_deliverables = {}
self.required_deliverables = {}
if self.conf.get("dogpile_cache_backend", None):
self.cache_region = make_region().configure(
self.conf.get("dogpile_cache_backend"),
expiration_time=self.conf.get("dogpile_cache_expiration_time", 3600),
arguments=self.conf.get("dogpile_cache_arguments", {}),
)
else:
self.cache_region = make_region().configure("dogpile.cache.null")
self.koji_downloader = KojiDownloadProxy.from_config(self.conf, self._logger)
get_compose_info = staticmethod(get_compose_info)
write_compose_info = staticmethod(write_compose_info)
get_compose_dir = staticmethod(get_compose_dir)
update_compose_url = staticmethod(update_compose_url)
def __getitem__(self, name):
return self.variants[name]
@property
def compose_id(self):
return self.ci_base.compose.id
@property
def compose_date(self):
return self.ci_base.compose.date
@property
def compose_respin(self):
return self.ci_base.compose.respin
@property
def compose_type(self):
return self.ci_base.compose.type
@property
def compose_type_suffix(self):
return self.ci_base.compose.type_suffix
@property
def compose_label(self):
return self.ci_base.compose.label
@property
def compose_label_major_version(self):
return self.ci_base.compose.label_major_version
@property
def has_comps(self):
return bool(self.conf.get("comps_file", False))
@property
def has_module_defaults(self):
return bool(self.conf.get("module_defaults_dir", False))
@property
def has_module_obsoletes(self):
return bool(self.conf.get("module_obsoletes_dir", False))
@property
def config_dir(self):
return os.path.dirname(self.conf._open_file or "")
@property
def should_create_yum_database(self):
"""Explicit configuration trumps all. Otherwise check gather backend
and only create it for Yum.
"""
config = self.conf.get("createrepo_database")
if config is not None:
return config
return self.conf["gather_backend"] == "yum"
def read_variants(self):
# TODO: move to phases/init ?
variants_file = self.paths.work.variants_file(arch="global")
scm_dict = self.conf["variants_file"]
if isinstance(scm_dict, dict):
file_name = os.path.basename(scm_dict["file"])
if scm_dict["scm"] == "file":
scm_dict["file"] = os.path.join(
self.config_dir, os.path.basename(scm_dict["file"])
)
else:
file_name = os.path.basename(scm_dict)
scm_dict = os.path.join(self.config_dir, scm_dict)
self.log_debug("Writing variants file: %s", variants_file)
tmp_dir = self.mkdtemp(prefix="variants_file_")
get_file_from_scm(scm_dict, tmp_dir, compose=self)
shutil.copy2(os.path.join(tmp_dir, file_name), variants_file)
shutil.rmtree(tmp_dir)
tree_arches = self.conf.get("tree_arches", None)
tree_variants = self.conf.get("tree_variants", None)
with open(variants_file, "r") as file_obj:
parser = VariantsXmlParser(
file_obj, tree_arches, tree_variants, logger=self._logger
)
self.variants = parser.parse()
self.all_variants = {}
for variant in self.get_variants():
self.all_variants[variant.uid] = variant
# populate ci_base with variants - needed for layered-products (compose_id)
# FIXME - compose_to_composeinfo is no longer needed and has been
# removed, but I'm not entirely sure what this is needed for
# or if it is at all
self.ci_base = compose_to_composeinfo(self)
def get_variants(self, types=None, arch=None):
result = []
for i in self.variants.values():
if (not types or i.type in types) and (not arch or arch in i.arches):
result.append(i)
result.extend(i.get_variants(types=types, arch=arch))
return sorted(set(result))
def get_arches(self):
result = set()
for variant in self.get_variants():
for arch in variant.arches:
result.add(arch)
return sorted(result)
@property
def status_file(self):
"""Path to file where the compose status will be stored."""
if not hasattr(self, "_status_file"):
self._status_file = os.path.join(self.topdir, "STATUS")
return self._status_file
def _log_failed_deliverables(self):
for kind, data in self.failed_deliverables.items():
for variant, arch, subvariant in data:
self.log_info(
"Failed %s on variant <%s>, arch <%s>, subvariant <%s>."
% (kind, variant, arch, subvariant)
)
log = os.path.join(self.paths.log.topdir("global"), "deliverables.json")
with open(log, "w") as f:
json.dump(
{
"required": self.required_deliverables,
"failed": self.failed_deliverables,
"attempted": self.attempted_deliverables,
},
f,
indent=4,
)
def write_status(self, stat_msg):
if stat_msg not in ("STARTED", "FINISHED", "DOOMED", "TERMINATED"):
self.log_warning("Writing nonstandard compose status: %s" % stat_msg)
old_status = self.get_status()
if stat_msg == old_status:
return
if old_status == "FINISHED":
msg = "Could not modify a FINISHED compose: %s" % self.topdir
self.log_error(msg)
raise RuntimeError(msg)
if stat_msg == "FINISHED" and self.failed_deliverables:
stat_msg = "FINISHED_INCOMPLETE"
self._log_failed_deliverables()
with open(self.status_file, "w") as f:
f.write(stat_msg + "\n")
if self.notifier:
self.notifier.send("status-change", status=stat_msg)
def get_status(self):
if not os.path.isfile(self.status_file):
return
return open(self.status_file, "r").read().strip()
def get_image_name(
self, arch, variant, disc_type="dvd", disc_num=1, suffix=".iso", format=None
):
"""Create a filename for image with given parameters.
:raises RuntimeError: when unknown ``disc_type`` is given
"""
default_format = "{compose_id}-{variant}-{arch}-{disc_type}{disc_num}{suffix}"
format = format or self.conf.get("image_name_format", default_format)
if isinstance(format, dict):
conf = get_variant_data(self.conf, "image_name_format", variant)
format = conf[0] if conf else default_format
if arch == "src":
arch = "source"
if disc_num:
disc_num = int(disc_num)
else:
disc_num = ""
kwargs = {
"arch": arch,
"disc_type": disc_type,
"disc_num": disc_num,
"suffix": suffix,
}
if variant.type == "layered-product":
variant_uid = variant.parent.uid
kwargs["compose_id"] = self.ci_base[variant.uid].compose_id
else:
variant_uid = variant.uid
args = get_format_substs(self, variant=variant_uid, **kwargs)
try:
return (format % args).format(**args)
except KeyError as err:
raise RuntimeError(
"Failed to create image name: unknown format element: %s" % err
)
def can_fail(self, variant, arch, deliverable):
"""Figure out if deliverable can fail on variant.arch.
Variant can be None.
"""
failable = get_arch_variant_data(
self.conf, "failable_deliverables", arch, variant
)
return deliverable in failable
def attempt_deliverable(self, variant, arch, kind, subvariant=None):
"""Log information about attempted deliverable."""
variant_uid = variant.uid if variant else ""
self.attempted_deliverables.setdefault(kind, []).append(
(variant_uid, arch, subvariant)
)
def require_deliverable(self, variant, arch, kind, subvariant=None):
"""Log information about attempted deliverable."""
variant_uid = variant.uid if variant else ""
self.required_deliverables.setdefault(kind, []).append(
(variant_uid, arch, subvariant)
)
def fail_deliverable(self, variant, arch, kind, subvariant=None):
"""Log information about failed deliverable."""
variant_uid = variant.uid if variant else ""
self.failed_deliverables.setdefault(kind, []).append(
(variant_uid, arch, subvariant)
)
@property
def image_release(self):
"""Generate a value to pass to Koji as image release.
If this compose has a label, the version from it will be used,
otherwise we will create a string with date, compose type and respin.
"""
if self.compose_label:
milestone, release = self.compose_label.split("-")
return release
return "%s%s.%s" % (
self.compose_date,
self.ci_base.compose.type_suffix,
self.compose_respin,
)
@property
def image_version(self):
"""Generate a value to pass to Koji as image version.
The value is based on release version. If compose has a label, the
milestone from it is appended to the version (unless it is RC).
"""
version = self.ci_base.release.version
if self.compose_label and not self.compose_label.startswith("RC-"):
milestone, release = self.compose_label.split("-")
return "%s_%s" % (version, milestone)
return version
def mkdtemp(self, arch=None, variant=None, suffix="", prefix="tmp"):
"""
Create and return a unique temporary directory under dir of
<compose_topdir>/work/{global,<arch>}/tmp[-<variant>]/
"""
path = os.path.join(self.paths.work.tmp_dir(arch=arch, variant=variant))
tmpdir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=path)
os.chmod(tmpdir, 0o755)
return tmpdir
def dump_containers_metadata(self):
"""Create a file with container metadata if there are any containers."""
if not self.containers_metadata:
return
with open(self.paths.compose.metadata("osbs.json"), "w") as f:
json.dump(
self.containers_metadata,
f,
indent=4,
sort_keys=True,
separators=(",", ": "),
)
def traceback(self, detail=None, show_locals=True):
"""Store an extended traceback. This method should only be called when
handling an exception.
:param str detail: Extra information appended to the filename
"""
basename = "traceback"
if detail:
basename += "-" + detail
tb_path = self.paths.log.log_file("global", basename)
self.log_error("Extended traceback in: %s", tb_path)
tback = kobo.tback.Traceback(show_locals=show_locals).get_traceback()
# Kobo 0.36.0 returns traceback as str, older versions return bytes
with open(tb_path, "wb" if isinstance(tback, bytes) else "w") as f:
f.write(tback)
def load_old_compose_config(self):
"""
Helper method to load Pungi config dump from old compose.
"""
if not self._old_config:
config_dump_full = self.paths.log.log_file("global", "config-dump")
config_dump_full = self.paths.old_compose_path(config_dump_full)
if not config_dump_full:
return None
self.log_info("Loading old config file: %s", config_dump_full)
with open(config_dump_full, "r") as f:
self._old_config = json.load(f)
return self._old_config
def get_ordered_variant_uids(compose):
if not hasattr(compose, "_ordered_variant_uids"):
ordered_variant_uids = _prepare_variant_as_lookaside(compose)
# Some variants were not mentioned in configuration value
# 'variant_as_lookaside' and its run order is not crucial (that
# means there are no dependencies inside this group). They will be
# processed first. A-Z sorting is for reproducibility.
unordered_variant_uids = sorted(
set(compose.all_variants.keys()) - set(ordered_variant_uids)
)
setattr(
compose,
"_ordered_variant_uids",
unordered_variant_uids + ordered_variant_uids,
)
return getattr(compose, "_ordered_variant_uids")
def _prepare_variant_as_lookaside(compose):
"""
Configuration value 'variant_as_lookaside' contains variant pairs <variant,
its lookaside>. In that pair lookaside variant have to be processed first.
Structure can be represented as a oriented graph. Its spanning line shows
order how to process this set of variants.
"""
variant_as_lookaside = compose.conf.get("variant_as_lookaside", [])
graph = SimpleAcyclicOrientedGraph()
for variant, lookaside_variant in variant_as_lookaside:
try:
graph.add_edge(variant, lookaside_variant)
except ValueError as e:
raise ValueError(
"There is a bad configuration in 'variant_as_lookaside': %s" % e
)
variant_processing_order = reversed(graph.prune_graph())
return list(variant_processing_order)

View File

View File

@ -0,0 +1,93 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
"""
The .discinfo file contains metadata about media.
Following fields are part of the .discinfo file,
one record per line:
- timestamp
- release
- architecture
- disc number (optional)
"""
__all__ = (
"read_discinfo",
"write_discinfo",
"write_media_repo",
)
import os
import time
def write_discinfo(file_path, description, arch, disc_numbers=None, timestamp=None):
"""
Write a .discinfo file:
"""
disc_numbers = disc_numbers or ["ALL"]
if not isinstance(disc_numbers, list):
raise TypeError(
"Invalid type: disc_numbers type is %s; expected: <list>"
% type(disc_numbers)
)
if not timestamp:
timestamp = os.environ.get("SOURCE_DATE_EPOCH", "%f" % time.time())
with open(file_path, "w") as f:
f.write("%s\n" % timestamp)
f.write("%s\n" % description)
f.write("%s\n" % arch)
if disc_numbers:
f.write("%s\n" % ",".join([str(i) for i in disc_numbers]))
return timestamp
def read_discinfo(file_path):
result = {}
with open(file_path, "r") as f:
result["timestamp"] = f.readline().strip()
result["description"] = f.readline().strip()
result["arch"] = f.readline().strip()
disc_numbers = f.readline().strip()
if not disc_numbers:
result["disc_numbers"] = None
elif disc_numbers == "ALL":
result["disc_numbers"] = ["ALL"]
else:
result["disc_numbers"] = [int(i) for i in disc_numbers.split(",")]
return result
def write_media_repo(file_path, description, timestamp):
"""
Write media.repo file for the disc to be used on installed system.
PackageKit uses this.
"""
data = [
"[InstallMedia]",
"name=%s" % description,
"mediaid=%s" % timestamp,
"metadata_expire=-1",
"gpgcheck=0",
"cost=500",
"",
]
with open(file_path, "w") as repo_file:
repo_file.write("\n".join(data))
return timestamp

79
pungi/config.py Normal file
View File

@ -0,0 +1,79 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
import sys
import time
from ConfigParser import SafeConfigParser
from .arch_utils import getBaseArch
# In development, `here` will point to the bin/ directory with scripts.
here = sys.path[0]
MULTILIBCONF = (
os.path.join(os.path.dirname(__file__), "..", "share", "multilib")
if here != "/usr/bin"
else "/usr/share/pungi/multilib"
)
class Config(SafeConfigParser):
def __init__(self, pungirc=None):
SafeConfigParser.__init__(self)
self.add_section("pungi")
self.add_section("lorax")
self.set("pungi", "osdir", "os")
self.set("pungi", "sourcedir", "source")
self.set("pungi", "debugdir", "debug")
self.set("pungi", "isodir", "iso")
self.set("pungi", "multilibconf", MULTILIBCONF)
self.set(
"pungi", "relnotefilere", "LICENSE README-BURNING-ISOS-en_US.txt ^RPM-GPG"
)
self.set("pungi", "relnotedirre", "")
self.set(
"pungi", "relnotepkgs", "fedora-repos fedora-release fedora-release-notes"
)
self.set("pungi", "product_path", "Packages")
self.set("pungi", "cachedir", "/var/cache/pungi")
self.set("pungi", "compress_type", "xz")
self.set("pungi", "arch", getBaseArch())
self.set("pungi", "family", "Fedora")
self.set("pungi", "iso_basename", "Fedora")
self.set("pungi", "version", time.strftime("%Y%m%d", time.localtime()))
self.set("pungi", "variant", "")
self.set("pungi", "destdir", os.getcwd())
self.set("pungi", "workdirbase", "/work")
self.set("pungi", "bugurl", "https://bugzilla.redhat.com")
self.set("pungi", "cdsize", "695.0")
self.set("pungi", "debuginfo", "True")
self.set("pungi", "alldeps", "True")
self.set("pungi", "isfinal", "False")
self.set("pungi", "nohash", "False")
self.set("pungi", "full_archlist", "False")
self.set("pungi", "multilib", "")
self.set("pungi", "lookaside_repos", "")
self.set("pungi", "resolve_deps", "True")
self.set("pungi", "no_dvd", "False")
self.set("pungi", "nomacboot", "False")
self.set("pungi", "rootfs_size", "False")
# if missing, self.read() is a noop, else change 'defaults'
if pungirc:
self.read(os.path.expanduser(pungirc))

201
pungi/createiso.py Normal file
View File

@ -0,0 +1,201 @@
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import six
from collections import namedtuple
from kobo.shortcuts import run
from six.moves import shlex_quote
from .wrappers import iso
from .wrappers.jigdo import JigdoWrapper
from .phases.buildinstall import BOOT_CONFIGS, BOOT_IMAGES
CreateIsoOpts = namedtuple(
"CreateIsoOpts",
[
"buildinstall_method",
"boot_iso",
"arch",
"output_dir",
"jigdo_dir",
"iso_name",
"volid",
"graft_points",
"supported",
"os_tree",
"hfs_compat",
"use_xorrisofs",
"iso_level",
"script_dir",
],
)
CreateIsoOpts.__new__.__defaults__ = (None,) * len(CreateIsoOpts._fields)
def quote(str):
"""Quote an argument for shell, but make sure $TEMPLATE variable will be
expanded.
"""
if str.startswith("$TEMPLATE"):
return "$TEMPLATE%s" % shlex_quote(str.replace("$TEMPLATE", "", 1))
return shlex_quote(str)
def emit(f, cmd):
"""Print line of shell code into the stream."""
if isinstance(cmd, six.string_types):
print(cmd, file=f)
else:
print(" ".join([quote(x) for x in cmd]), file=f)
FIND_TEMPLATE_SNIPPET = """if ! TEMPLATE="$($(head -n1 $(which lorax) | cut -c3-) -c 'import pylorax; print(pylorax.find_templates())')"; then TEMPLATE=/usr/share/lorax; fi""" # noqa: E501
def make_image(f, opts):
mkisofs_kwargs = {}
if opts.buildinstall_method:
if opts.buildinstall_method == "lorax":
emit(f, FIND_TEMPLATE_SNIPPET)
mkisofs_kwargs["boot_args"] = iso.get_boot_options(
opts.arch,
os.path.join("$TEMPLATE", "config_files/ppc"),
hfs_compat=opts.hfs_compat,
)
# ppc(64) doesn't seem to support utf-8
if opts.arch in ("ppc", "ppc64", "ppc64le"):
mkisofs_kwargs["input_charset"] = None
cmd = iso.get_mkisofs_cmd(
opts.iso_name,
None,
volid=opts.volid,
exclude=["./lost+found"],
graft_points=opts.graft_points,
use_xorrisofs=opts.use_xorrisofs,
iso_level=opts.iso_level,
**mkisofs_kwargs
)
emit(f, cmd)
def implant_md5(f, opts):
cmd = iso.get_implantisomd5_cmd(opts.iso_name, opts.supported)
emit(f, cmd)
def run_isohybrid(f, opts):
"""If the image is bootable, it should include an MBR or GPT so that it can
be booted when written to USB disk. This is done by running isohybrid on
the image.
"""
if opts.buildinstall_method and opts.arch in ["x86_64", "i386"]:
cmd = iso.get_isohybrid_cmd(opts.iso_name, opts.arch)
emit(f, cmd)
def make_manifest(f, opts):
emit(f, iso.get_manifest_cmd(opts.iso_name, opts.use_xorrisofs))
def make_jigdo(f, opts):
jigdo = JigdoWrapper()
files = [{"path": opts.os_tree, "label": None, "uri": None}]
cmd = jigdo.get_jigdo_cmd(
os.path.join(opts.output_dir, opts.iso_name),
files,
output_dir=opts.jigdo_dir,
no_servers=True,
report="noprogress",
)
emit(f, cmd)
def _get_perms(fs_path):
"""Compute proper permissions for a file.
This mimicks what -rational-rock option of genisoimage does. All read bits
are set, so that files and directories are globally readable. If any
execute bit is set for a file, set them all. No writes are allowed and
special bits are erased too.
"""
statinfo = os.stat(fs_path)
perms = 0o444
if statinfo.st_mode & 0o111:
perms |= 0o111
return perms
def write_xorriso_commands(opts):
# Create manifest for the boot.iso listing all contents
boot_iso_manifest = "%s.manifest" % os.path.join(
opts.script_dir, os.path.basename(opts.boot_iso)
)
run(
iso.get_manifest_cmd(
opts.boot_iso, opts.use_xorrisofs, output_file=boot_iso_manifest
)
)
# Find which files may have been updated by pungi. This only includes a few
# files from tweaking buildinstall and .discinfo metadata. There's no good
# way to detect whether the boot config files actually changed, so we may
# be updating files in the ISO with the same data.
UPDATEABLE_FILES = set(BOOT_IMAGES + BOOT_CONFIGS + [".discinfo"])
updated_files = set()
excluded_files = set()
with open(boot_iso_manifest) as f:
for line in f:
path = line.lstrip("/").rstrip("\n")
if path in UPDATEABLE_FILES:
updated_files.add(path)
else:
excluded_files.add(path)
script = os.path.join(opts.script_dir, "xorriso-%s.txt" % id(opts))
with open(script, "w") as f:
for cmd in iso.xorriso_commands(
opts.arch, opts.boot_iso, os.path.join(opts.output_dir, opts.iso_name)
):
emit(f, " ".join(cmd))
emit(f, "-volid %s" % opts.volid)
with open(opts.graft_points) as gp:
for line in gp:
iso_path, fs_path = line.strip().split("=", 1)
if iso_path in excluded_files:
continue
cmd = "-update" if iso_path in updated_files else "-map"
emit(f, "%s %s %s" % (cmd, fs_path, iso_path))
emit(f, "-chmod 0%o %s" % (_get_perms(fs_path), iso_path))
emit(f, "-chown_r 0 /")
emit(f, "-chgrp_r 0 /")
emit(f, "-end")
return script
def write_script(opts, f):
if bool(opts.jigdo_dir) != bool(opts.os_tree):
raise RuntimeError("jigdo_dir must be used together with os_tree")
emit(f, "#!/bin/bash")
emit(f, "set -ex")
emit(f, "cd %s" % opts.output_dir)
if opts.use_xorrisofs and opts.buildinstall_method:
script = write_xorriso_commands(opts)
emit(f, "xorriso -dialog on <%s" % script)
else:
make_image(f, opts)
run_isohybrid(f, opts)
implant_md5(f, opts)
make_manifest(f, opts)
if opts.jigdo_dir:
make_jigdo(f, opts)

155
pungi/dnf_wrapper.py Normal file
View File

@ -0,0 +1,155 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
# TODO: remove all DNF hacks, possibly this whole file
import dnf
import dnf.conf
import dnf.repo
import dnf.sack
import pungi.arch
try:
import dnf.rpm as dnf_arch
except ImportError:
import dnf.arch as dnf_arch
class Conf(dnf.conf.Conf):
# This is only modified to get our custom Substitutions class in.
def __init__(self, arch, *args, **kwargs):
super(Conf, self).__init__(*args, **kwargs)
self.substitutions = Substitutions(arch)
class Substitutions(dict):
# DNF version of Substitutions detects host arch. We don't want that.
def __init__(self, arch):
super(Substitutions, self).__init__()
self["arch"] = arch
self["basearch"] = dnf_arch.basearch(arch)
class DnfWrapper(dnf.Base):
def __init__(self, *args, **kwargs):
super(DnfWrapper, self).__init__(*args, **kwargs)
self.arch_wrapper = ArchWrapper(self.conf.substitutions["arch"])
self.comps_wrapper = CompsWrapper(self)
def add_repo(
self, repoid, baseurl=None, enablegroups=True, lookaside=False, **kwargs
):
self.repos.add_new_repo(
repoid,
self.conf,
baseurl=[baseurl],
enabledgroups=enablegroups,
priority=10 if lookaside else 20,
module_hotfixes=True,
**kwargs
)
class CompsWrapper(object):
def __init__(self, dnf_obj):
self.dnf = dnf_obj
def __getitem__(self, name):
return self.groups[name]
@property
def comps(self):
return self.dnf.comps
@property
def groups(self):
result = {}
for i in self.comps.groups:
result[i.id] = i
return result
def get_packages_from_group(
self,
group_id,
include_default=True,
include_optional=True,
include_conditional=True,
):
packages = []
conditional = []
group = self.groups[group_id]
# add mandatory packages
packages.extend([i.name for i in group.mandatory_packages])
# add default packages
if include_default:
packages.extend([i.name for i in group.default_packages])
# add optional packages
if include_optional:
packages.extend([i.name for i in group.optional_packages])
for package in group.conditional_packages:
conditional.append({"name": package.requires, "install": package.name})
return packages, conditional
def get_comps_packages(self, groups, exclude_groups):
packages = set()
conditional = []
if isinstance(groups, list):
groups = dict([(i, 1) for i in groups])
for group_id, group_include in sorted(groups.items()):
if group_id in exclude_groups:
continue
include_default = group_include in (1, 2)
include_optional = group_include in (2,)
include_conditional = True
pkgs, cond = self.get_packages_from_group(
group_id, include_default, include_optional, include_conditional
)
packages.update(pkgs)
for i in cond:
if i not in conditional:
conditional.append(i)
return list(packages), conditional
def get_langpacks(self):
result = []
for name, install in self.comps._i.langpacks.items():
result.append({"name": name, "install": install})
return result
class ArchWrapper(object):
def __init__(self, arch):
self.base_arch = dnf_arch.basearch(arch)
self.all_arches = pungi.arch.get_valid_arches(
self.base_arch, multilib=True, add_noarch=True
)
self.native_arches = pungi.arch.get_valid_arches(
self.base_arch, multilib=False, add_noarch=True
)
self.multilib_arches = pungi.arch.get_valid_multilib_arches(self.base_arch)
self.source_arches = ["src", "nosrc"]

20
pungi/errors.py Normal file
View File

@ -0,0 +1,20 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
class UnsignedPackagesError(RuntimeError):
"""Raised when package set fails to find a properly signed copy of an
RPM."""
pass

2297
pungi/gather.py Normal file

File diff suppressed because it is too large Load Diff

1099
pungi/gather_dnf.py Normal file

File diff suppressed because it is too large Load Diff

105
pungi/graph.py Executable file
View File

@ -0,0 +1,105 @@
# -*- coding: utf-8 -*-
class SimpleAcyclicOrientedGraph(object):
"""
Stores a graph data structure and allows operation with it.
Example data: {'P1': ['P2'], 'P3': ['P4', 'P5'], 'P2': 'P3'}
Graph is constructed by adding oriented edges one by one. It can not contain cycles.
Main result is spanning line, it determines ordering of the nodes.
"""
def __init__(self):
self._graph = {}
self._all_nodes = set()
def add_edge(self, start, end):
"""
Add one edge from node 'start' to node 'end'.
This operation must not create a cycle in the graph.
"""
if start == end:
raise ValueError(
"Can not add this kind of edge into graph: %s-%s" % (start, end)
)
self._graph.setdefault(start, [])
if end not in self._graph[start]:
self._graph[start].append(end)
self._all_nodes.add(start)
self._all_nodes.add(end)
# try to find opposite direction path (from end to start)
# to detect newly created cycle
path = SimpleAcyclicOrientedGraph.find_path(self._graph, end, start)
if path:
raise ValueError("There is a cycle in the graph: %s" % path)
def get_active_nodes(self):
"""
nodes connected to any edge
"""
active_nodes = set()
for start, ends in self._graph.items():
active_nodes.add(start)
active_nodes.update(ends)
return active_nodes
def is_final_endpoint(self, node):
"""
edge(s) ends in this node; no other edge starts in this node
"""
if node not in self._all_nodes:
return ValueError("This node is not found in the graph: %s" % node)
if node not in self.get_active_nodes():
return False
return False if node in self._graph else True
def remove_final_endpoint(self, node):
""""""
remove_start_points = []
for start, ends in self._graph.items():
if node in ends:
ends.remove(node)
if not ends:
remove_start_points.append(start)
for start in remove_start_points:
del self._graph[start]
@staticmethod
def find_path(graph, start, end, path=[]):
"""
find path among nodes 'start' and 'end' recursively
"""
path = path + [start]
if start == end:
return path
if start not in graph:
return None
for node in graph[start]:
if node not in path:
newpath = SimpleAcyclicOrientedGraph.find_path(graph, node, end, path)
if newpath:
return newpath
return None
def prune_graph(self):
"""
Construct spanning_line by pruning the graph.
Looking for endpoints and remove them one by one until graph is empty.
"""
spanning_line = []
while self._graph:
for node in sorted(self._all_nodes):
if self.is_final_endpoint(node):
self.remove_final_endpoint(node)
spanning_line.insert(0, node)
# orphan node = no edge is connected with this node
orphans = self._all_nodes - self.get_active_nodes()
if orphans:
# restart iteration not to set size self._all_nodes
# during iteration
break
for orphan in orphans:
if orphan not in spanning_line:
spanning_line.insert(0, orphan)
self._all_nodes.remove(orphan)
return spanning_line

223
pungi/ks.py Normal file
View File

@ -0,0 +1,223 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
"""
Pungi adds several new sections to kickstarts.
FULLTREE EXCLUDES
-----------------
Fulltree excludes allow us to define SRPM names
we don't want to be part of fulltree processing.
Syntax:
%fulltree-excludes
<srpm_name>
<srpm_name>
...
%end
MULTILIB BLACKLIST
------------------
List of RPMs which are prevented from becoming multilib.
Syntax:
%multilib-blacklist
<rpm_name>
<rpm_name>
...
%end
MULTILIB WHITELIST
------------------
List of RPMs which will become multilib (but only if native package is pulled in).
Syntax:
%multilib-whitelist
<rpm_name>
<rpm_name>
...
%end
PREPOPULATE
-----------
To make sure no package is left behind between 2 composes,
we can explicitly add <name>.<arch> records to the %prepopulate section.
These will be added to the input list and marked with 'prepopulate' flag.
Syntax:
%prepopulate
<rpm_name>.<rpm_arch>
<rpm_name>.<rpm_arch>
...
%end
"""
import pykickstart.parser
import pykickstart.sections
from pykickstart.constants import GROUP_REQUIRED, GROUP_DEFAULT
class FulltreeExcludesSection(pykickstart.sections.Section):
sectionOpen = "%fulltree-excludes"
def handleLine(self, line):
if not self.handler:
return
(h, s, t) = line.partition("#")
line = h.rstrip()
self.handler.fulltree_excludes.add(line)
class MultilibBlacklistSection(pykickstart.sections.Section):
sectionOpen = "%multilib-blacklist"
def handleLine(self, line):
if not self.handler:
return
(h, s, t) = line.partition("#")
line = h.rstrip()
self.handler.multilib_blacklist.add(line)
class MultilibWhitelistSection(pykickstart.sections.Section):
sectionOpen = "%multilib-whitelist"
def handleLine(self, line):
if not self.handler:
return
(h, s, t) = line.partition("#")
line = h.rstrip()
self.handler.multilib_whitelist.add(line)
class PrepopulateSection(pykickstart.sections.Section):
sectionOpen = "%prepopulate"
def handleLine(self, line):
if not self.handler:
return
(h, s, t) = line.partition("#")
line = h.rstrip()
self.handler.prepopulate.add(line)
class KickstartParser(pykickstart.parser.KickstartParser):
def setupSections(self):
pykickstart.parser.KickstartParser.setupSections(self)
self.registerSection(FulltreeExcludesSection(self.handler))
self.registerSection(MultilibBlacklistSection(self.handler))
self.registerSection(MultilibWhitelistSection(self.handler))
self.registerSection(PrepopulateSection(self.handler))
def get_packages(self, dnf_obj):
packages = set()
conditional_packages = []
packages.update(self.handler.packages.packageList)
for ks_group in self.handler.packages.groupList:
group_id = ks_group.name
if ks_group.include == GROUP_REQUIRED:
include_default = False
include_optional = False
elif ks_group.include == GROUP_DEFAULT:
include_default = True
include_optional = False
else:
include_default = True
include_optional = True
(
group_packages,
group_conditional_packages,
) = dnf_obj.comps_wrapper.get_packages_from_group(
group_id,
include_default=include_default,
include_optional=include_optional,
include_conditional=True,
)
packages.update(group_packages)
for i in group_conditional_packages:
if i not in conditional_packages:
conditional_packages.append(i)
return packages, conditional_packages
def get_excluded_packages(self, dnf_obj):
excluded = set()
excluded.update(self.handler.packages.excludedList)
for ks_group in self.handler.packages.excludedGroupList:
group_id = ks_group.name
include_default = False
include_optional = False
if ks_group.include == 1:
include_default = True
if ks_group.include == 2:
include_default = True
include_optional = True
(
group_packages,
group_conditional_packages,
) = dnf_obj.comps_wrapper.get_packages_from_group(
group_id,
include_default=include_default,
include_optional=include_optional,
include_conditional=False,
)
excluded.update(group_packages)
return excluded
HandlerClass = pykickstart.version.returnClassForVersion()
class PungiHandler(HandlerClass):
def __init__(self, *args, **kwargs):
HandlerClass.__init__(self, *args, **kwargs)
self.fulltree_excludes = set()
self.multilib_blacklist = set()
self.multilib_whitelist = set()
self.prepopulate = set()
def get_ksparser(ks_path=None):
"""
Return a kickstart parser instance.
Read kickstart if ks_path provided.
"""
ksparser = KickstartParser(PungiHandler())
if ks_path:
ksparser.readKickstart(ks_path)
return ksparser

247
pungi/linker.py Normal file
View File

@ -0,0 +1,247 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import contextlib
import errno
import os
import shutil
import kobo.log
from kobo.shortcuts import relative_path
from kobo.threads import WorkerThread, ThreadPool
from pungi.util import makedirs
class LinkerPool(ThreadPool):
def __init__(self, link_type="hardlink-or-copy", logger=None):
ThreadPool.__init__(self, logger)
self.link_type = link_type
self.linker = Linker()
@classmethod
def with_workers(cls, num_workers, *args, **kwargs):
pool = cls(*args, **kwargs)
for _ in range(num_workers):
pool.add(LinkerThread(pool))
return pool
@contextlib.contextmanager
def linker_pool(link_type="hardlink-or-copy", num_workers=10):
"""Create a linker and make sure it is stopped no matter what."""
linker = LinkerPool.with_workers(num_workers=num_workers, link_type=link_type)
linker.start()
try:
yield linker
finally:
linker.stop()
class LinkerThread(WorkerThread):
def process(self, item, num):
src, dst = item
if (num % 100 == 0) or (num == self.pool.queue_total):
self.pool.log_debug(
"Linked %s out of %s packages" % (num, self.pool.queue_total)
)
directory = os.path.dirname(dst)
makedirs(directory)
self.pool.linker.link(src, dst, link_type=self.pool.link_type)
class Linker(kobo.log.LoggingBase):
def __init__(self, always_copy=None, test=False, logger=None):
kobo.log.LoggingBase.__init__(self, logger=logger)
self.always_copy = always_copy or []
self.test = test
self._inode_map = {}
def _is_same_type(self, path1, path2):
if not os.path.islink(path1) == os.path.islink(path2):
return False
if not os.path.isdir(path1) == os.path.isdir(path2):
return False
if not os.path.isfile(path1) == os.path.isfile(path2):
return False
return True
def _is_same(self, path1, path2):
if path1 == path2:
return True
if os.path.islink(path2) and not os.path.exists(path2):
# Broken symlink
return True
if os.path.getsize(path1) != os.path.getsize(path2):
return False
if int(os.path.getmtime(path1)) != int(os.path.getmtime(path2)):
return False
return True
def symlink(self, src, dst, relative=True):
if src == dst:
return
# Always hardlink or copy scratch builds
if "/work/tasks/" in src:
self._link_file(src, dst, "hardlink-or-copy")
old_src = src
if relative:
src = relative_path(src, dst)
msg = "Symlinking %s -> %s" % (dst, src)
if self.test:
self.log_info("TEST: %s" % msg)
return
self.log_info(msg)
try:
os.symlink(src, dst)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if os.path.islink(dst) and self._is_same(old_src, dst):
if os.readlink(dst) != src:
raise
self.log_debug(
"The same file already exists, skipping symlink %s -> %s"
% (dst, src)
)
else:
raise
def hardlink(self, src, dst):
if src == dst:
return
msg = "Hardlinking %s to %s" % (src, dst)
if self.test:
self.log_info("TEST: %s" % msg)
return
self.log_info(msg)
try:
os.link(src, dst)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if self._is_same(src, dst):
if not self._is_same_type(src, dst):
self.log_error(
"File %s already exists but has different type than %s"
% (dst, src)
)
raise
self.log_debug(
"The same file already exists, skipping hardlink %s to %s"
% (src, dst)
)
else:
raise
def copy(self, src, dst):
if src == dst:
return True
if os.path.islink(src):
msg = "Copying symlink %s to %s" % (src, dst)
else:
msg = "Copying file %s to %s" % (src, dst)
if self.test:
self.log_info("TEST: %s" % msg)
return
self.log_info(msg)
if os.path.exists(dst):
if self._is_same(src, dst):
if not self._is_same_type(src, dst):
self.log_error(
"File %s already exists but has different type than %s"
% (dst, src)
)
raise OSError(errno.EEXIST, "File exists")
self.log_debug(
"The same file already exists, skipping copy %s to %s" % (src, dst)
)
return
else:
raise OSError(errno.EEXIST, "File exists")
if os.path.islink(src):
if not os.path.islink(dst):
os.symlink(os.readlink(src), dst)
return
return
src_stat = os.stat(src)
src_key = (src_stat.st_dev, src_stat.st_ino)
if src_key in self._inode_map:
# (st_dev, st_ino) found in the mapping
self.log_debug(
"Harlink detected, hardlinking in destination %s to %s"
% (self._inode_map[src_key], dst)
)
os.link(self._inode_map[src_key], dst)
return
# BEWARE: shutil.copy2 automatically *rewrites* existing files
shutil.copy2(src, dst)
self._inode_map[src_key] = dst
def _link_file(self, src, dst, link_type):
if link_type == "hardlink":
self.hardlink(src, dst)
elif link_type == "copy":
self.copy(src, dst)
elif link_type in ("symlink", "abspath-symlink"):
if os.path.islink(src):
self.copy(src, dst)
else:
relative = link_type != "abspath-symlink"
self.symlink(src, dst, relative)
elif link_type == "hardlink-or-copy":
try:
self.hardlink(src, dst)
except OSError as ex:
if ex.errno == errno.EXDEV:
self.copy(src, dst)
else:
raise
else:
raise ValueError("Unknown link_type: %s" % link_type)
def link(self, src, dst, link_type="hardlink-or-copy"):
"""Link directories recursively."""
if os.path.isfile(src) or os.path.islink(src):
self._link_file(src, dst, link_type)
return
if os.path.isfile(dst):
raise OSError(errno.EEXIST, "File exists")
if not self.test:
if not os.path.exists(dst):
makedirs(dst)
shutil.copystat(src, dst)
for i in os.listdir(src):
src_path = os.path.join(src, i)
dst_path = os.path.join(dst, i)
self.link(src_path, dst_path, link_type)

127
pungi/media_split.py Normal file
View File

@ -0,0 +1,127 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
SIZE_UNITS = {
"b": 1,
"k": 1024,
"M": 1024**2,
"G": 1024**3,
}
def convert_media_size(size):
if isinstance(size, str):
if size[-1] in SIZE_UNITS:
num = int(size[:-1])
units = size[-1]
else:
num = int(size)
units = "b"
result = num * SIZE_UNITS[units]
else:
result = int(size)
if result <= 0:
raise ValueError("Media size must be a positive number: %s" % size)
return result
def convert_file_size(size, block_size=2048):
"""round file size to block"""
blocks = int(size / block_size)
if size % block_size:
blocks += 1
return blocks * block_size
class MediaSplitter(object):
"""
MediaSplitter splits files so that they fit on a media of given size.
Each file added to the spliter has a size in bytes that will be rounded to
the nearest multiple of block size. If the file is sticky, it will be
included on each disk. The files will be on disks in the same order they
are added; there is no re-ordering. The number of disk is thus not the
possible minimum.
"""
def __init__(self, media_size, compose=None, logger=None):
self.media_size = media_size
self.files = [] # to preserve order
self.file_sizes = {}
self.sticky_files = set()
self.compose = compose
self.logger = logger
if not self.logger and self.compose:
self.logger = self.compose._logger
def add_file(self, name, size, sticky=False):
name = os.path.normpath(name)
size = int(size)
old_size = self.file_sizes.get(name, None)
if old_size is not None and old_size != size:
raise ValueError(
"File size mismatch; file: %s; sizes: %s vs %s" % (name, old_size, size)
)
if self.media_size and size > self.media_size:
raise ValueError("File is larger than media size: %s" % name)
self.files.append(name)
self.file_sizes[name] = size
if sticky:
self.sticky_files.add(name)
@property
def total_size(self):
return sum(self.file_sizes.values())
@property
def total_size_in_blocks(self):
return sum([convert_file_size(i) for i in list(self.file_sizes.values())])
def split(self, first_disk=0, all_disks=0):
all_files = []
sticky_files = []
sticky_files_size = 0
for name in self.files:
if name in self.sticky_files:
sticky_files.append(name)
sticky_files_size += convert_file_size(self.file_sizes[name])
else:
all_files.append(name)
disks = []
disk = {}
# as it would be on single medium (sticky_files just once)
total_size_single = sticky_files_size
while all_files:
name = all_files.pop(0)
size = convert_file_size(self.file_sizes[name])
if not disks or (self.media_size and disk["size"] + size > self.media_size):
disk = {"size": sticky_files_size, "files": sticky_files[:]}
disks.append(disk)
disk["files"].append(name)
disk["size"] += size
total_size_single += size
return disks

537
pungi/metadata.py Normal file
View File

@ -0,0 +1,537 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import copy
import os
import time
import productmd.composeinfo
import productmd.treeinfo
from productmd.common import get_major_version
from kobo.shortcuts import relative_path, compute_file_checksums
from pungi.compose_metadata.discinfo import write_discinfo as create_discinfo
from pungi.compose_metadata.discinfo import write_media_repo as create_media_repo
def get_description(compose, variant, arch):
if "release_discinfo_description" in compose.conf:
result = compose.conf["release_discinfo_description"]
elif variant.type == "layered-product":
# we need to make sure the layered product behaves as it was composed separately
result = "%s %s for %s %s" % (
variant.release_name,
variant.release_version,
compose.conf["release_name"],
get_major_version(compose.conf["release_version"]),
)
else:
result = "%s %s" % (
compose.conf["release_name"],
compose.conf["release_version"],
)
if compose.conf.get("base_product_name", ""):
result += " for %s %s" % (
compose.conf["base_product_name"],
compose.conf["base_product_version"],
)
result = result % {"variant_name": variant.name, "arch": arch}
return result
def write_discinfo(compose, arch, variant):
if variant.type == "addon":
return
os_tree = compose.paths.compose.os_tree(arch, variant)
path = os.path.join(os_tree, ".discinfo")
# description = get_volid(compose, arch, variant)
description = get_description(compose, variant, arch)
return create_discinfo(path, description, arch)
def write_media_repo(compose, arch, variant, timestamp=None):
if variant.type == "addon":
return
os_tree = compose.paths.compose.os_tree(arch, variant)
path = os.path.join(os_tree, "media.repo")
# description = get_volid(compose, arch, variant)
description = get_description(compose, variant, arch)
return create_media_repo(path, description, timestamp)
def compose_to_composeinfo(compose):
ci = productmd.composeinfo.ComposeInfo()
# compose
ci.compose.id = compose.compose_id
ci.compose.type = compose.compose_type
ci.compose.date = compose.compose_date
ci.compose.respin = compose.compose_respin
ci.compose.label = compose.compose_label
ci.compose.final = compose.supported
# product
ci.release.name = compose.conf["release_name"]
ci.release.version = compose.conf["release_version"]
ci.release.short = compose.conf["release_short"]
ci.release.is_layered = True if compose.conf.get("base_product_name", "") else False
ci.release.type = compose.conf["release_type"].lower()
ci.release.internal = bool(compose.conf["release_internal"])
# base product
if ci.release.is_layered:
ci.base_product.name = compose.conf["base_product_name"]
ci.base_product.version = compose.conf["base_product_version"]
ci.base_product.short = compose.conf["base_product_short"]
ci.base_product.type = compose.conf["base_product_type"].lower()
def dump_variant(variant, parent=None):
var = productmd.composeinfo.Variant(ci)
tree_arches = compose.conf.get("tree_arches")
if tree_arches and not (set(variant.arches) & set(tree_arches)):
return None
# variant details
# remove dashes from variant ID, rely on productmd verification
var.id = variant.id.replace("-", "")
var.uid = variant.uid
var.name = variant.name
var.type = variant.type
var.arches = set(variant.arches)
if var.type == "layered-product":
var.release.name = variant.release_name
var.release.short = variant.release_short
var.release.version = variant.release_version
var.release.is_layered = True
var.release.type = ci.release.type
for arch in variant.arches:
# paths: binaries
var.paths.os_tree[arch] = relative_path(
compose.paths.compose.os_tree(
arch=arch, variant=variant, create_dir=False
).rstrip("/")
+ "/",
compose.paths.compose.topdir().rstrip("/") + "/",
).rstrip("/")
var.paths.repository[arch] = relative_path(
compose.paths.compose.repository(
arch=arch, variant=variant, create_dir=False
).rstrip("/")
+ "/",
compose.paths.compose.topdir().rstrip("/") + "/",
).rstrip("/")
var.paths.packages[arch] = relative_path(
compose.paths.compose.packages(
arch=arch, variant=variant, create_dir=False
).rstrip("/")
+ "/",
compose.paths.compose.topdir().rstrip("/") + "/",
).rstrip("/")
iso_dir = (
compose.paths.compose.iso_dir(
arch=arch, variant=variant, create_dir=False
)
or ""
)
if iso_dir and os.path.isdir(
os.path.join(compose.paths.compose.topdir(), iso_dir)
):
var.paths.isos[arch] = relative_path(
iso_dir, compose.paths.compose.topdir().rstrip("/") + "/"
).rstrip("/")
image_dir = compose.paths.compose.image_dir(variant=variant) or ""
if image_dir:
image_dir = image_dir % {"arch": arch}
if os.path.isdir(image_dir):
var.paths.images[arch] = relative_path(
image_dir, compose.paths.compose.topdir().rstrip("/") + "/"
).rstrip("/")
jigdo_dir = (
compose.paths.compose.jigdo_dir(
arch=arch, variant=variant, create_dir=False
)
or ""
)
if jigdo_dir and os.path.isdir(
os.path.join(compose.paths.compose.topdir(), jigdo_dir)
):
var.paths.jigdos[arch] = relative_path(
jigdo_dir, compose.paths.compose.topdir().rstrip("/") + "/"
).rstrip("/")
# paths: sources
var.paths.source_tree[arch] = relative_path(
compose.paths.compose.os_tree(
arch="source", variant=variant, create_dir=False
).rstrip("/")
+ "/",
compose.paths.compose.topdir().rstrip("/") + "/",
).rstrip("/")
var.paths.source_repository[arch] = relative_path(
compose.paths.compose.repository(
arch="source", variant=variant, create_dir=False
).rstrip("/")
+ "/",
compose.paths.compose.topdir().rstrip("/") + "/",
).rstrip("/")
var.paths.source_packages[arch] = relative_path(
compose.paths.compose.packages(
arch="source", variant=variant, create_dir=False
).rstrip("/")
+ "/",
compose.paths.compose.topdir().rstrip("/") + "/",
).rstrip("/")
source_iso_dir = (
compose.paths.compose.iso_dir(
arch="source", variant=variant, create_dir=False
)
or ""
)
if source_iso_dir and os.path.isdir(
os.path.join(compose.paths.compose.topdir(), source_iso_dir)
):
var.paths.source_isos[arch] = relative_path(
source_iso_dir, compose.paths.compose.topdir().rstrip("/") + "/"
).rstrip("/")
source_jigdo_dir = (
compose.paths.compose.jigdo_dir(
arch="source", variant=variant, create_dir=False
)
or ""
)
if source_jigdo_dir and os.path.isdir(
os.path.join(compose.paths.compose.topdir(), source_jigdo_dir)
):
var.paths.source_jigdos[arch] = relative_path(
source_jigdo_dir, compose.paths.compose.topdir().rstrip("/") + "/"
).rstrip("/")
# paths: debug
var.paths.debug_tree[arch] = relative_path(
compose.paths.compose.debug_tree(
arch=arch, variant=variant, create_dir=False
).rstrip("/")
+ "/",
compose.paths.compose.topdir().rstrip("/") + "/",
).rstrip("/")
var.paths.debug_repository[arch] = relative_path(
compose.paths.compose.debug_repository(
arch=arch, variant=variant, create_dir=False
).rstrip("/")
+ "/",
compose.paths.compose.topdir().rstrip("/") + "/",
).rstrip("/")
var.paths.debug_packages[arch] = relative_path(
compose.paths.compose.debug_packages(
arch=arch, variant=variant, create_dir=False
).rstrip("/")
+ "/",
compose.paths.compose.topdir().rstrip("/") + "/",
).rstrip("/")
"""
# XXX: not supported (yet?)
debug_iso_dir = (
compose.paths.compose.debug_iso_dir(arch=arch, variant=variant) or ""
)
if debug_iso_dir:
var.debug_iso_dir[arch] = relative_path(
debug_iso_dir, compose.paths.compose.topdir().rstrip("/") + "/"
).rstrip("/")
debug_jigdo_dir = (
compose.paths.compose.debug_jigdo_dir(arch=arch, variant=variant) or ""
)
if debug_jigdo_dir:
var.debug_jigdo_dir[arch] = relative_path(
debug_jigdo_dir, compose.paths.compose.topdir().rstrip("/") + "/"
).rstrip("/")
"""
for v in variant.get_variants(recursive=False):
x = dump_variant(v, parent=variant)
if x is not None:
var.add(x)
return var
for variant_id in sorted(compose.variants):
variant = compose.variants[variant_id]
v = dump_variant(variant)
if v is not None:
ci.variants.add(v)
return ci
def write_compose_info(compose):
ci = compose_to_composeinfo(compose)
msg = "Writing composeinfo"
compose.log_info("[BEGIN] %s" % msg)
path = compose.paths.compose.metadata("composeinfo.json")
# make a copy of composeinfo and modify the copy
# if any path in variant paths doesn't exist or just an empty
# dir, set it to None, then it won't be dumped.
ci_copy = copy.deepcopy(ci)
for variant in ci_copy.variants.variants.values():
for field in variant.paths._fields:
field_paths = getattr(variant.paths, field)
for arch, dirpath in field_paths.items():
dirpath = os.path.join(compose.paths.compose.topdir(), dirpath)
if not os.path.isdir(dirpath):
# If the directory does not exist, do not include the path
# in metadata.
field_paths[arch] = None
ci_copy.dump(path)
compose.log_info("[DONE ] %s" % msg)
def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
if variant.type in ("addon",) or variant.is_empty:
return
if not timestamp:
timestamp = int(time.time())
else:
timestamp = int(timestamp)
os_tree = (
compose.paths.compose.os_tree(arch=arch, variant=variant).rstrip("/") + "/"
)
ti = productmd.treeinfo.TreeInfo()
# load from buildinstall .treeinfo
if variant.type == "layered-product":
# we need to make sure the layered product behaves as it was composed separately
# release
# TODO: read from variants.xml
ti.release.name = variant.release_name
ti.release.version = variant.release_version
ti.release.short = variant.release_short
ti.release.is_layered = True
ti.release.type = compose.conf["release_type"].lower()
# base product
ti.base_product.name = compose.conf["release_name"]
if "." in compose.conf["release_version"]:
# remove minor version if present
ti.base_product.version = get_major_version(compose.conf["release_version"])
else:
ti.base_product.version = compose.conf["release_version"]
ti.base_product.short = compose.conf["release_short"]
else:
# release
ti.release.name = compose.conf["release_name"]
ti.release.version = compose.conf.get(
"treeinfo_version", compose.conf["release_version"]
)
ti.release.short = compose.conf["release_short"]
ti.release.is_layered = (
True if compose.conf.get("base_product_name", "") else False
)
ti.release.type = compose.conf["release_type"].lower()
# base product
if ti.release.is_layered:
ti.base_product.name = compose.conf["base_product_name"]
ti.base_product.version = compose.conf["base_product_version"]
ti.base_product.short = compose.conf["base_product_short"]
# tree
ti.tree.arch = arch
ti.tree.build_timestamp = timestamp
# ti.platforms
# main variant
var = productmd.treeinfo.Variant(ti)
if variant.type == "layered-product":
var.id = variant.parent.id
var.uid = variant.parent.uid
var.name = variant.parent.name
var.type = "variant"
else:
# remove dashes from variant ID, rely on productmd verification
var.id = variant.id.replace("-", "")
var.uid = variant.uid
var.name = variant.name
var.type = variant.type
var.paths.packages = (
relative_path(
compose.paths.compose.packages(
arch=arch, variant=variant, create_dir=False
).rstrip("/")
+ "/",
os_tree,
).rstrip("/")
or "."
)
var.paths.repository = (
relative_path(
compose.paths.compose.repository(
arch=arch, variant=variant, create_dir=False
).rstrip("/")
+ "/",
os_tree,
).rstrip("/")
or "."
)
ti.variants.add(var)
repomd_path = os.path.join(var.paths.repository, "repodata", "repomd.xml")
createrepo_checksum = compose.conf["createrepo_checksum"]
if os.path.isfile(repomd_path):
ti.checksums.add(repomd_path, createrepo_checksum, root_dir=os_tree)
for i in variant.get_variants(types=["addon"], arch=arch):
addon = productmd.treeinfo.Variant(ti)
addon.id = i.id
addon.uid = i.uid
addon.name = i.name
addon.type = i.type
compose.log_debug(
"variant '%s' inserting addon uid '%s' type '%s'"
% (variant, addon.uid, addon.type)
)
os_tree = compose.paths.compose.os_tree(arch=arch, variant=i).rstrip("/") + "/"
addon.paths.packages = (
relative_path(
compose.paths.compose.packages(
arch=arch, variant=i, create_dir=False
).rstrip("/")
+ "/",
os_tree,
).rstrip("/")
or "."
)
addon.paths.repository = (
relative_path(
compose.paths.compose.repository(
arch=arch, variant=i, create_dir=False
).rstrip("/")
+ "/",
os_tree,
).rstrip("/")
or "."
)
var.add(addon)
repomd_path = os.path.join(addon.paths.repository, "repodata", "repomd.xml")
if os.path.isfile(repomd_path):
ti.checksums.add(repomd_path, createrepo_checksum, root_dir=os_tree)
class LoraxProduct(productmd.treeinfo.Release):
def _validate_short(self):
# HACK: set self.short so .treeinfo produced by lorax can be read
if not self.short:
self.short = compose.conf["release_short"]
class LoraxTreeInfo(productmd.treeinfo.TreeInfo):
def __init__(self, *args, **kwargs):
super(LoraxTreeInfo, self).__init__(*args, **kwargs)
self.release = LoraxProduct(self)
# images
if variant.type == "variant" and bi.succeeded(variant, arch):
os_tree = compose.paths.compose.os_tree(arch, variant)
# clone all but 'general' sections from buildinstall .treeinfo
bi_dir = compose.paths.work.buildinstall_dir(arch)
if compose.conf.get("buildinstall_method") == "lorax":
# The .treeinfo file produced by lorax is nested in variant
# subdirectory. Legacy buildinstall runs once per arch, so there is
# only one file.
bi_dir = os.path.join(bi_dir, variant.uid)
bi_treeinfo = os.path.join(bi_dir, ".treeinfo")
if os.path.exists(bi_treeinfo):
bi_ti = LoraxTreeInfo()
bi_ti.load(bi_treeinfo)
# stage2 - mainimage
if bi_ti.stage2.mainimage:
ti.stage2.mainimage = bi_ti.stage2.mainimage
ti.checksums.add(
ti.stage2.mainimage, createrepo_checksum, root_dir=os_tree
)
# stage2 - instimage
if bi_ti.stage2.instimage:
ti.stage2.instimage = bi_ti.stage2.instimage
ti.checksums.add(
ti.stage2.instimage, createrepo_checksum, root_dir=os_tree
)
# images
for platform in bi_ti.images.images:
ti.images.images[platform] = {}
ti.tree.platforms.add(platform)
for image, path in bi_ti.images.images[platform].items():
if not path:
# The .treeinfo file contains an image without a path.
# We can't add that.
continue
ti.images.images[platform][image] = path
ti.checksums.add(path, createrepo_checksum, root_dir=os_tree)
path = os.path.join(
compose.paths.compose.os_tree(arch=arch, variant=variant), ".treeinfo"
)
compose.log_info("Writing treeinfo: %s" % path)
ti.dump(path)
def populate_extra_files_metadata(
metadata, variant, arch, topdir, files, checksum_types, relative_root=None
):
"""
:param metadata: an instance of productmd.extra_files.ExtraFiles to
populate with the current files
:param Variant variant: under which variant should the files be listed
:param str arch: under which arch should the files be listed
:param topdir: directory where files are located
:param files: list of file paths relative to topdir
:param checksum_types: list of checksums to compute
:param relative_root: ancestor directory of topdir, this will be removed
from paths written to local metadata file
"""
for copied_file in files:
full_path = os.path.join(topdir, copied_file)
size = os.path.getsize(full_path)
try:
checksums = compute_file_checksums(full_path, checksum_types)
except IOError as exc:
raise RuntimeError(
"Failed to calculate checksum for %s: %s" % (full_path, exc)
)
if relative_root:
copied_file = os.path.relpath(full_path, relative_root)
metadata.add(variant.uid, arch, copied_file, size, checksums)
strip_prefix = (
(os.path.relpath(topdir, relative_root) + "/") if relative_root else ""
)
with open(os.path.join(topdir, "extra_files.json"), "w") as f:
metadata.dump_for_tree(f, variant.uid, arch, strip_prefix)

118
pungi/module_util.py Normal file
View File

@ -0,0 +1,118 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import glob
import os
try:
import gi
gi.require_version("Modulemd", "2.0") # noqa
from gi.repository import Modulemd
except (ImportError, ValueError):
Modulemd = None
def iter_module_defaults(path):
"""Given a path to a directory with yaml files, yield each module default
in there as a pair (module_name, ModuleDefaults instance).
"""
# It is really tempting to merge all the module indexes into a single one
# and work with it. However that does not allow for detecting conflicting
# defaults. That should not happen in practice, but better safe than sorry.
# Once libmodulemd can report the error, this code can be simplifed by a
# lot. It was implemented in
# https://github.com/fedora-modularity/libmodulemd/commit/3087e4a5c38a331041fec9b6b8f1a372f9ffe64d
# and released in 2.6.0, but 2.8.0 added the need to merge overrides and
# that breaks this use case again.
for file in glob.glob(os.path.join(path, "*.yaml")):
index = Modulemd.ModuleIndex()
index.update_from_file(file, strict=False)
for module_name in index.get_module_names():
yield module_name, index.get_module(module_name).get_defaults()
def get_module_obsoletes_idx(path, mod_list):
"""Given a path to a directory with yaml files, return Index with
merged all obsoletes.
"""
merger = Modulemd.ModuleIndexMerger.new()
md_idxs = []
# associate_index does NOT copy it's argument (nor increases a
# reference counter on the object). It only stores a pointer.
for file in glob.glob(os.path.join(path, "*.yaml")):
index = Modulemd.ModuleIndex()
index.update_from_file(file, strict=False)
mod_name = index.get_module_names()[0]
if mod_name and (mod_name in mod_list or not mod_list):
md_idxs.append(index)
merger.associate_index(md_idxs[-1], 0)
merged_idx = merger.resolve()
return merged_idx
def collect_module_defaults(
defaults_dir, modules_to_load=None, mod_index=None, overrides_dir=None
):
"""Load module defaults into index.
If `modules_to_load` is passed in, it should be a set of module names. Only
defaults for these modules will be loaded.
If `mod_index` is passed in, it will be updated and returned. If it was
not, a new ModuleIndex will be created and returned
"""
mod_index = mod_index or Modulemd.ModuleIndex()
temp_index = Modulemd.ModuleIndex.new()
temp_index.update_from_defaults_directory(
defaults_dir, overrides_path=overrides_dir, strict=False
)
for module_name in temp_index.get_module_names():
defaults = temp_index.get_module(module_name).get_defaults()
if not modules_to_load or module_name in modules_to_load:
mod_index.add_defaults(defaults)
return mod_index
def collect_module_obsoletes(obsoletes_dir, modules_to_load, mod_index=None):
"""Load module obsoletes into index.
This works in a similar fashion as collect_module_defaults except it
merges indexes together instead of adding them during iteration.
Additionally if modules_to_load is not empty returned Index will include
only obsoletes for those modules.
"""
obsoletes_index = get_module_obsoletes_idx(obsoletes_dir, modules_to_load)
# Merge Obsoletes with Modules Index.
if mod_index:
merger = Modulemd.ModuleIndexMerger.new()
merger.associate_index(mod_index, 0)
merger.associate_index(obsoletes_index, 0)
merged_idx = merger.resolve()
obsoletes_index = merged_idx
return obsoletes_index

67
pungi/multilib_dnf.py Normal file
View File

@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
from multilib import multilib
class Multilib(object):
"""This class decides whether a package should be multilib.
To use it, create an instance and call the ``is_multilib`` method on it.
The blacklist and whitelist in constructor should be sets of package names.
It may be more convenient to create the instance with the ``from_globs``
method that accepts a DNF sach and an iterable of globs that will be used
to find package names.
"""
def __init__(self, methods, blacklist, whitelist):
self.methods = {}
self.blacklist = blacklist
self.whitelist = whitelist
self.all_methods = {
"none": multilib.NoMultilibMethod(None),
"all": multilib.AllMultilibMethod(None),
"devel": multilib.DevelMultilibMethod(None),
"runtime": multilib.RuntimeMultilibMethod(None),
}
for method in methods:
self.methods[method] = self.all_methods[method]
@classmethod
def from_globs(cls, sack, methods, blacklist=None, whitelist=None):
"""Create a Multilib instance with expanded blacklist and whitelist."""
return cls(
methods,
_expand_list(sack, blacklist or []),
_expand_list(sack, whitelist or []),
)
def is_multilib(self, pkg):
if pkg.name in self.blacklist:
return False
if pkg.name in self.whitelist:
return "whitelist"
for method, cls in self.methods.items():
if cls.select(pkg):
return method
return False
def _expand_list(sack, patterns):
"""Find all package names that match any of the provided patterns."""
return set(pkg.name for pkg in sack.query().filter(name__glob=list(patterns)))

295
pungi/multilib_yum.py Executable file
View File

@ -0,0 +1,295 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import re
import fnmatch
import pungi.pathmatch
import pungi.gather
import pungi.util
LINE_PATTERN_RE = re.compile(r"^\s*(?P<line>[^#]+)(:?\s+(?P<comment>#.*))?$")
RUNTIME_PATTERN_SPLIT_RE = re.compile(
r"^\s*(?P<path>[^\s]+)\s+(?P<pattern>[^\s]+)(:?\s+(?P<comment>#.*))?$"
)
SONAME_PATTERN_RE = re.compile(r"^(.+\.so\.[a-zA-Z0-9_\.]+).*$")
def read_lines(lines):
result = []
for i in lines:
i = i.strip()
if not i:
continue
# skip comments
if i.startswith("#"):
continue
match = LINE_PATTERN_RE.match(i)
if match is None:
raise ValueError("Couldn't parse line: %s" % i)
gd = match.groupdict()
result.append(gd["line"])
return result
def read_lines_from_file(path):
lines = open(path, "r").readlines()
lines = read_lines(lines)
return lines
def read_runtime_patterns(lines):
result = []
for i in read_lines(lines):
match = RUNTIME_PATTERN_SPLIT_RE.match(i)
if match is None:
raise ValueError("Couldn't parse pattern: %s" % i)
gd = match.groupdict()
result.append((gd["path"], gd["pattern"]))
return result
def read_runtime_patterns_from_file(path):
lines = open(path, "r").readlines()
return read_runtime_patterns(lines)
def expand_runtime_patterns(patterns):
pm = pungi.pathmatch.PathMatch()
for path, pattern in patterns:
for root in ("", "/opt/*/*/root"):
# include Software Collections: /opt/<vendor>/<scl_name>/root/...
if "$LIBDIR" in path:
for lib_dir in ("/lib", "/lib64", "/usr/lib", "/usr/lib64"):
path_pattern = path.replace("$LIBDIR", lib_dir)
path_pattern = "%s/%s" % (root, path_pattern.lstrip("/"))
pm[path_pattern] = (path_pattern, pattern)
else:
path_pattern = "%s/%s" % (root, path.lstrip("/"))
pm[path_pattern] = (path_pattern, pattern)
return pm
class MultilibMethodBase(object):
"""a base class for multilib methods"""
name = "base"
def __init__(self, config_path):
self.config_path = config_path
def select(self, po):
raise NotImplementedError
def skip(self, po):
if (
pungi.gather.is_noarch(po)
or pungi.gather.is_source(po)
or pungi.util.pkg_is_debug(po)
):
return True
return False
def is_kernel(self, po):
for p_name, p_flag, (p_e, p_v, p_r) in po.provides:
if p_name == "kernel":
return True
return False
def is_kernel_devel(self, po):
for p_name, p_flag, (p_e, p_v, p_r) in po.provides:
if p_name == "kernel-devel":
return True
return False
def is_kernel_or_kernel_devel(self, po):
for p_name, p_flag, (p_e, p_v, p_r) in po.provides:
if p_name in ("kernel", "kernel-devel"):
return True
return False
class NoneMultilibMethod(MultilibMethodBase):
"""multilib disabled"""
name = "none"
def select(self, po):
return False
class AllMultilibMethod(MultilibMethodBase):
"""all packages are multilib"""
name = "all"
def select(self, po):
if self.skip(po):
return False
return True
class RuntimeMultilibMethod(MultilibMethodBase):
"""pre-defined paths to libs"""
name = "runtime"
def __init__(self, *args, **kwargs):
super(RuntimeMultilibMethod, self).__init__(*args, **kwargs)
self.blacklist = read_lines_from_file(
self.config_path + "runtime-blacklist.conf"
)
self.whitelist = read_lines_from_file(
self.config_path + "runtime-whitelist.conf"
)
self.patterns = expand_runtime_patterns(
read_runtime_patterns_from_file(self.config_path + "runtime-patterns.conf")
)
def select(self, po):
if self.skip(po):
return False
if po.name in self.blacklist:
return False
if po.name in self.whitelist:
return True
if self.is_kernel(po):
return False
# gather all *.so.* provides from the RPM header
provides = set()
for i in po.provides:
match = SONAME_PATTERN_RE.match(i[0])
if match is not None:
provides.add(match.group(1))
for path in po.returnFileEntries() + po.returnFileEntries("ghost"):
dirname, filename = path.rsplit("/", 1)
dirname = dirname.rstrip("/")
patterns = self.patterns[dirname]
if not patterns:
continue
for dir_pattern, file_pattern in patterns:
if file_pattern == "-":
return True
if fnmatch.fnmatch(filename, file_pattern):
if ".so.*" in file_pattern:
if filename in provides:
# return only if the lib is provided in RPM header
# (some libs may be private, hence not exposed in Provides)
return True
else:
return True
return False
class KernelMultilibMethod(MultilibMethodBase):
"""kernel and kernel-devel"""
name = "kernel"
def __init__(self, *args, **kwargs):
super(KernelMultilibMethod, self).__init__(*args, **kwargs)
def select(self, po):
if self.is_kernel_or_kernel_devel(po):
return True
return False
class YabootMultilibMethod(MultilibMethodBase):
"""yaboot on ppc"""
name = "yaboot"
def __init__(self, *args, **kwargs):
super(YabootMultilibMethod, self).__init__(*args, **kwargs)
def select(self, po):
if po.arch in ["ppc"]:
if po.name.startswith("yaboot"):
return True
return False
class DevelMultilibMethod(MultilibMethodBase):
"""all -devel and -static packages"""
name = "devel"
def __init__(self, *args, **kwargs):
super(DevelMultilibMethod, self).__init__(*args, **kwargs)
self.blacklist = read_lines_from_file(self.config_path + "devel-blacklist.conf")
self.whitelist = read_lines_from_file(self.config_path + "devel-whitelist.conf")
def select(self, po):
if self.skip(po):
return False
if po.name in self.blacklist:
return False
if po.name in self.whitelist:
return True
if self.is_kernel_devel(po):
return False
# HACK: exclude ghc*
if po.name.startswith("ghc-"):
return False
if po.name.endswith("-devel"):
return True
if po.name.endswith("-static"):
return True
for p_name, p_flag, (p_e, p_v, p_r) in po.provides:
if p_name.endswith("-devel"):
return True
if p_name.endswith("-static"):
return True
return False
DEFAULT_METHODS = ["devel", "runtime"]
METHOD_MAP = {}
def init(config_path="/usr/share/pungi/multilib/"):
global METHOD_MAP
if not config_path.endswith("/"):
config_path += "/"
for cls in (
AllMultilibMethod,
DevelMultilibMethod,
KernelMultilibMethod,
NoneMultilibMethod,
RuntimeMultilibMethod,
YabootMultilibMethod,
):
method = cls(config_path)
METHOD_MAP[method.name] = method
def po_is_multilib(po, methods):
for method_name in methods:
if not method_name:
continue
method = METHOD_MAP[method_name]
if method.select(po):
return method_name
return None

112
pungi/notifier.py Normal file
View File

@ -0,0 +1,112 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
from datetime import datetime
import json
import os
import threading
import pungi.util
from kobo import shortcuts
class PungiNotifier(object):
"""Wrapper around an external script for sending messages.
If no script is configured, the messages are just silently ignored. If the
script fails, a warning will be logged, but the compose process will not be
interrupted.
"""
def __init__(self, cmds):
self.cmds = cmds
self.lock = threading.Lock()
self.compose = None
def _update_args(self, data):
"""Add compose related information to the data."""
if not self.compose:
return
data.setdefault("compose_id", self.compose.compose_id)
# Publish where in the world this compose will end up living
location = pungi.util.translate_path(
self.compose, self.compose.paths.compose.topdir()
)
data.setdefault("location", location)
# Add information about the compose itself.
data.setdefault("compose_date", self.compose.compose_date)
data.setdefault("compose_type", self.compose.compose_type)
data.setdefault("compose_respin", self.compose.compose_respin)
data.setdefault("compose_label", self.compose.compose_label)
data.setdefault("compose_path", self.compose.topdir)
data.setdefault("release_short", self.compose.conf["release_short"])
data.setdefault("release_name", self.compose.conf["release_name"])
data.setdefault("release_version", self.compose.conf["release_version"])
data.setdefault("release_type", self.compose.conf["release_type"].lower())
data.setdefault("release_is_layered", False)
if self.compose.conf.get("base_product_name", ""):
data["release_is_layered"] = True
data["base_product_name"] = self.compose.conf["base_product_name"]
data["base_product_version"] = self.compose.conf["base_product_version"]
data["base_product_short"] = self.compose.conf["base_product_short"]
data["base_product_type"] = self.compose.conf["base_product_type"].lower()
def send(self, msg, workdir=None, **kwargs):
"""Send a message.
The actual meaning of ``msg`` depends on what the notification script
will be doing. The keyword arguments will be JSON-encoded and passed on
to standard input of the notification process.
Unless you specify it manually, a ``compose_id`` key with appropriate
value will be automatically added.
"""
if not self.cmds:
return
self._update_args(kwargs)
with self.lock:
for cmd in self.cmds:
self._run_script(cmd, msg, workdir, kwargs)
def _run_script(self, cmd, msg, workdir, kwargs):
"""Run a single notification script with proper logging."""
logfile = None
if self.compose:
self.compose.log_debug("Notification: %r %r, %r" % (cmd, msg, kwargs))
logfile = os.path.join(
self.compose.paths.log.topdir(),
"notifications",
"notification-%s.log" % datetime.utcnow().strftime("%Y-%m-%d_%H-%M-%S"),
)
pungi.util.makedirs(os.path.dirname(logfile))
ret, _ = shortcuts.run(
(cmd, msg),
stdin_data=json.dumps(kwargs),
can_fail=True,
workdir=workdir,
return_stdout=False,
show_cmd=True,
universal_newlines=True,
logfile=logfile,
)
if ret != 0:
if self.compose:
self.compose.log_warning("Failed to invoke notification script.")

202
pungi/ostree/__init__.py Normal file
View File

@ -0,0 +1,202 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import argparse
import logging
from .tree import Tree
from .installer import Installer
from .container import Container
def main(args=None):
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers(help="Sub commands")
treep = subparser.add_parser("tree", help="Compose OSTree repository")
treep.set_defaults(_class=Tree, func="run")
treep.add_argument(
"--repo",
metavar="PATH",
required=True,
help="where to put the OSTree repo (required)",
)
treep.add_argument(
"--treefile",
metavar="FILE",
required=True,
help="treefile for rpm-ostree (required)",
)
treep.add_argument(
"--log-dir",
metavar="DIR",
required=True,
help="where to log output and commitid (required). \
Note: commitid file will be written to this dir",
)
treep.add_argument(
"--extra-config", metavar="FILE", help="JSON file contains extra configurations"
)
treep.add_argument(
"--version",
metavar="VERSION",
help="version string to be added as versioning metadata",
)
treep.add_argument(
"--update-summary", action="store_true", help="update summary metadata"
)
treep.add_argument(
"--ostree-ref", metavar="PATH", help="override ref value from treefile"
)
treep.add_argument(
"--force-new-commit",
action="store_true",
help="do not use rpm-ostree's built-in change detection",
)
treep.add_argument(
"--unified-core",
action="store_true",
help="use unified core mode in rpm-ostree",
)
container = subparser.add_parser(
"container", help="Compose OSTree native container"
)
container.set_defaults(_class=Container, func="run")
container.add_argument(
"--name",
required=True,
help="the name of the the OCI archive (required)",
)
container.add_argument(
"--path",
required=True,
help="where to output the OCI archive (required)",
)
container.add_argument(
"--treefile",
metavar="FILE",
required=True,
help="treefile for rpm-ostree (required)",
)
container.add_argument(
"--log-dir",
metavar="DIR",
required=True,
help="where to log output (required).",
)
container.add_argument(
"--extra-config", metavar="FILE", help="JSON file contains extra configurations"
)
container.add_argument(
"-v",
"--version",
metavar="VERSION",
required=True,
help="version identifier (required)",
)
installerp = subparser.add_parser(
"installer", help="Create an OSTree installer image"
)
installerp.set_defaults(_class=Installer, func="run")
installerp.add_argument(
"-p",
"--product",
metavar="PRODUCT",
required=True,
help="product name (required)",
)
installerp.add_argument(
"-v",
"--version",
metavar="VERSION",
required=True,
help="version identifier (required)",
)
installerp.add_argument(
"-r",
"--release",
metavar="RELEASE",
required=True,
help="release information (required)",
)
installerp.add_argument(
"-s",
"--source",
metavar="REPOSITORY",
required=True,
action="append",
help="source repository (required)",
)
installerp.add_argument(
"-o",
"--output",
metavar="DIR",
required=True,
help="path to image output directory (required)",
)
installerp.add_argument("--log-dir", metavar="DIR", help="path to log directory")
installerp.add_argument("--volid", metavar="VOLID", help="volume id")
installerp.add_argument("--variant", metavar="VARIANT", help="variant name")
installerp.add_argument("--rootfs-size", metavar="SIZE")
installerp.add_argument("--nomacboot", action="store_true", default=False)
installerp.add_argument("--noupgrade", action="store_true", default=False)
installerp.add_argument("--isfinal", action="store_true", default=False)
installerp.add_argument(
"--installpkgs",
metavar="PACKAGE",
action="append",
help="package glob to install before runtime-install.tmpl",
)
installerp.add_argument(
"--add-template",
metavar="FILE",
action="append",
help="Additional template for runtime image",
)
installerp.add_argument(
"--add-template-var",
metavar="ADD_TEMPLATE_VARS",
action="append",
help="Set variable for runtime image template",
)
installerp.add_argument(
"--add-arch-template",
metavar="FILE",
action="append",
help="Additional template for architecture-specific image",
)
installerp.add_argument(
"--add-arch-template-var",
metavar="ADD_ARCH_TEMPLATE_VARS",
action="append",
help="Set variable for architecture-specific image",
)
installerp.add_argument(
"--extra-config", metavar="FILE", help="JSON file contains extra configurations"
)
args = parser.parse_args(args)
logging.basicConfig(format="%(message)s", level=logging.DEBUG)
_class = args._class()
_class.set_args(args)
func = getattr(_class, args.func)
func()

19
pungi/ostree/base.py Normal file
View File

@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
class OSTree(object):
def set_args(self, args):
self.args = args

86
pungi/ostree/container.py Normal file
View File

@ -0,0 +1,86 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
import json
import six
from six.moves import shlex_quote
from .base import OSTree
from .utils import tweak_treeconf
def emit(cmd):
"""Print line of shell code into the stream."""
if isinstance(cmd, six.string_types):
print(cmd)
else:
print(" ".join([shlex_quote(x) for x in cmd]))
class Container(OSTree):
def _make_container(self):
"""Compose OSTree Container Native image"""
stamp_file = os.path.join(self.logdir, "%s.stamp" % self.name)
cmd = [
"rpm-ostree",
"compose",
"image",
# Always initialize for now
"--initialize",
# Touch the file if a new commit was created. This can help us tell
# if the commitid file is missing because no commit was created or
# because something went wrong.
"--touch-if-changed=%s" % stamp_file,
self.treefile,
]
fullpath = os.path.join(self.path, "%s.ociarchive" % self.name)
cmd.append(fullpath)
# Set the umask to be more permissive so directories get group write
# permissions. See https://pagure.io/releng/issue/8811#comment-629051
emit("umask 0002")
emit(cmd)
def run(self):
self.name = self.args.name
self.path = self.args.path
self.treefile = self.args.treefile
self.logdir = self.args.log_dir
self.extra_config = self.args.extra_config
if self.extra_config:
self.extra_config = json.load(open(self.extra_config, "r"))
repos = self.extra_config.get("repo", [])
keep_original_sources = self.extra_config.get(
"keep_original_sources", False
)
else:
# missing extra_config mustn't affect tweak_treeconf call
repos = []
keep_original_sources = True
update_dict = {"automatic-version-prefix": self.args.version}
self.treefile = tweak_treeconf(
self.treefile,
source_repos=repos,
keep_original_sources=keep_original_sources,
update_dict=update_dict,
)
self._make_container()

77
pungi/ostree/installer.py Normal file
View File

@ -0,0 +1,77 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import json
from kobo import shortcuts
from .base import OSTree
from ..wrappers import lorax
class Installer(OSTree):
def _merge_config(self, config):
self.installpkgs.extend(config.get("installpkgs", []))
self.add_template.extend(config.get("add_template", []))
self.add_template_var.extend(config.get("add_template_var"))
self.add_arch_template.extend(config.get("add_arch_template", []))
self.add_arch_template_var.extend(config.get("add_arch_template_var", []))
def run(self):
self.product = self.args.product
self.version = self.args.version
self.release = self.args.release
self.sources = self.args.source
self.output = self.args.output
self.logdir = self.args.log_dir
self.volid = self.args.volid
self.variant = self.args.variant
self.rootfs_size = self.args.rootfs_size
self.nomacboot = self.args.nomacboot
self.noupgrade = self.args.noupgrade
self.isfinal = self.args.isfinal
self.installpkgs = self.args.installpkgs or []
self.add_template = self.args.add_template or []
self.add_template_var = self.args.add_template_var or []
self.add_arch_template = self.args.add_arch_template or []
self.add_arch_template_var = self.args.add_arch_template_var or []
self.extra_config = self.args.extra_config
if self.extra_config:
self.extra_config = json.load(open(self.extra_config, "r"))
self._merge_config(self.extra_config)
lorax_wrapper = lorax.LoraxWrapper()
cmd = lorax_wrapper.get_lorax_cmd(
self.product,
self.version,
self.release,
self.sources,
self.output,
variant=self.variant,
nomacboot=self.nomacboot,
volid=self.volid,
buildinstallpackages=self.installpkgs,
add_template=self.add_template,
add_template_var=self.add_template_var,
add_arch_template=self.add_arch_template,
add_arch_template_var=self.add_arch_template_var,
rootfs_size=self.rootfs_size,
is_final=self.isfinal,
log_dir=self.logdir,
)
shortcuts.run(cmd)

158
pungi/ostree/tree.py Normal file
View File

@ -0,0 +1,158 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
import json
from kobo import shortcuts
from pungi.util import makedirs
from .base import OSTree
from .utils import (
make_log_file,
tweak_treeconf,
get_ref_from_treefile,
get_commitid_from_commitid_file,
)
class Tree(OSTree):
def _make_tree(self):
"""Compose OSTree tree"""
log_file = make_log_file(self.logdir, "create-ostree-repo")
cmd = [
"rpm-ostree",
"compose",
"tree",
"--repo=%s" % self.repo,
"--write-commitid-to=%s" % self.commitid_file,
# Touch the file if a new commit was created. This can help us tell
# if the commitid file is missing because no commit was created or
# because something went wrong.
"--touch-if-changed=%s.stamp" % self.commitid_file,
]
if self.unified_core:
# See https://github.com/coreos/rpm-ostree/issues/729
cmd.append("--unified-core")
if self.version:
# Add versioning metadata
cmd.append("--add-metadata-string=version=%s" % self.version)
# Note renamed from rpm-ostree --force-nocache since it's a better
# name; more clearly describes what we're doing here.
if self.force_new_commit:
cmd.append("--force-nocache")
cmd.append(self.treefile)
# Set the umask to be more permissive so directories get group write
# permissions. See https://pagure.io/releng/issue/8811#comment-629051
oldumask = os.umask(0o0002)
try:
shortcuts.run(
cmd,
show_cmd=True,
stdout=True,
logfile=log_file,
universal_newlines=True,
)
finally:
os.umask(oldumask)
def _update_summary(self):
"""Update summary metadata"""
log_file = make_log_file(self.logdir, "ostree-summary")
shortcuts.run(
["ostree", "summary", "-u", "--repo=%s" % self.repo],
show_cmd=True,
stdout=True,
logfile=log_file,
universal_newlines=True,
)
def _update_ref(self):
"""
Update the ref.
'--write-commitid-to' is specified when compose the tree, so we need
to update the ref by ourselves. ref is retrieved from treefile and
commitid is retrieved from the committid file.
"""
tag_ref = True
if self.extra_config:
tag_ref = self.extra_config.get("tag_ref", True)
if not tag_ref:
print("Not updating ref as configured")
return
ref = get_ref_from_treefile(self.treefile)
commitid = get_commitid_from_commitid_file(self.commitid_file)
print("Ref: %r, Commit ID: %r" % (ref, commitid))
if ref and commitid:
print("Updating ref")
# Let's write the tag out ourselves
heads_dir = os.path.join(self.repo, "refs", "heads")
if not os.path.exists(heads_dir):
raise RuntimeError("Refs/heads did not exist in ostree repo")
ref_path = os.path.join(heads_dir, ref)
# Set the umask to be more permissive so directories get group write
# permissions. See https://pagure.io/releng/issue/8811#comment-629051
oldumask = os.umask(0o0002)
try:
makedirs(os.path.dirname(ref_path))
finally:
os.umask(oldumask)
with open(ref_path, "w") as f:
f.write(commitid + "\n")
def run(self):
self.repo = self.args.repo
self.treefile = self.args.treefile
self.version = self.args.version
self.logdir = self.args.log_dir
self.update_summary = self.args.update_summary
self.extra_config = self.args.extra_config
self.ostree_ref = self.args.ostree_ref
self.force_new_commit = self.args.force_new_commit
self.unified_core = self.args.unified_core
if self.extra_config or self.ostree_ref:
if self.extra_config:
self.extra_config = json.load(open(self.extra_config, "r"))
repos = self.extra_config.get("repo", [])
keep_original_sources = self.extra_config.get(
"keep_original_sources", False
)
else:
# missing extra_config mustn't affect tweak_treeconf call
repos = []
keep_original_sources = True
update_dict = {}
if self.ostree_ref:
# override ref value in treefile
update_dict["ref"] = self.ostree_ref
self.treefile = tweak_treeconf(
self.treefile,
source_repos=repos,
keep_original_sources=keep_original_sources,
update_dict=update_dict,
)
self.commitid_file = make_log_file(self.logdir, "commitid")
self._make_tree()
self._update_ref()
if self.update_summary:
self._update_summary()

126
pungi/ostree/utils.py Normal file
View File

@ -0,0 +1,126 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import json
import logging
import os
import shutil
import yaml
from pungi.arch_utils import getBaseArch
from pungi.util import makedirs
def make_log_file(log_dir, filename):
"""Return path to log file with given name, if log_dir is set."""
if not log_dir:
return None
makedirs(log_dir)
return os.path.join(log_dir, "%s.log" % filename)
def get_ref_from_treefile(treefile, arch=None, logger=None):
"""
Return ref name by parsing the tree config file. Replacing ${basearch} with
the basearch of the architecture we are running on or of the passed in arch.
"""
logger = logger or logging.getLogger(__name__)
if os.path.isfile(treefile):
with open(treefile, "r") as f:
try:
# rpm-ostree now supports YAML
# https://github.com/projectatomic/rpm-ostree/pull/1377
if treefile.endswith(".yaml"):
parsed = yaml.safe_load(f)
else:
parsed = json.load(f)
return parsed["ref"].replace("${basearch}", getBaseArch(arch))
except Exception as e:
logger.error("Unable to get ref from treefile: %s" % e)
else:
logger.error("Unable to open treefile")
return None
def get_commitid_from_commitid_file(commitid_file):
"""Return commit id which is read from the commitid file"""
if not os.path.exists(commitid_file + ".stamp"):
# The stamp does not exist, so no new commit.
return None
with open(commitid_file, "r") as f:
return f.read().replace("\n", "")
def tweak_treeconf(
treeconf, source_repos=None, keep_original_sources=False, update_dict=None
):
"""
Update tree config file by adding new repos, and remove existing repos
from the tree config file if 'keep_original_sources' is not enabled.
Additionally, other values can be passed to method by 'update_dict' parameter to
update treefile content.
"""
# backup the old tree config
shutil.copy2(treeconf, "{0}.bak".format(treeconf))
treeconf_dir = os.path.dirname(treeconf)
with open(treeconf, "r") as f:
# rpm-ostree now supports YAML, but we'll end up converting it to JSON.
# https://github.com/projectatomic/rpm-ostree/pull/1377
if treeconf.endswith(".yaml"):
treeconf_content = yaml.safe_load(f)
treeconf = treeconf.replace(".yaml", ".json")
else:
treeconf_content = json.load(f)
repos = []
if source_repos:
# Sort to ensure reliable ordering
source_repos = sorted(source_repos, key=lambda x: x["name"])
# Now, since pungi includes timestamps in the repo names which
# currently defeats rpm-ostree's change detection, let's just
# use repos named 'repo-<number>'.
# https://pagure.io/pungi/issue/811
with open("{0}/pungi.repo".format(treeconf_dir), "w") as f:
for i, repo in enumerate(source_repos):
name = "repo-{0}".format(i)
f.write("[%s]\n" % name)
f.write("name=%s\n" % name)
f.write("baseurl=%s\n" % repo["baseurl"])
exclude = repo.get("exclude", None)
if exclude:
f.write("exclude=%s\n" % exclude)
gpgcheck = "1" if repo.get("gpgcheck", False) else "0"
f.write("gpgcheck=%s\n" % gpgcheck)
repos.append(name)
original_repos = treeconf_content.get("repos", [])
if keep_original_sources:
treeconf_content["repos"] = original_repos + repos
else:
treeconf_content["repos"] = repos
# update content with config values from dictionary (for example 'ref')
if isinstance(update_dict, dict):
treeconf_content.update(update_dict)
# update tree config to add new repos
with open(treeconf, "w") as f:
json.dump(treeconf_content, f, indent=4)
return treeconf

73
pungi/pathmatch.py Normal file
View File

@ -0,0 +1,73 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import fnmatch
def head_tail_split(name):
name_split = name.strip("/").split("/", 1)
if len(name_split) == 2:
head = name_split[0]
tail = name_split[1].strip("/")
else:
head, tail = name_split[0], None
return head, tail
class PathMatch(object):
def __init__(self, parent=None, desc=None):
self._patterns = {}
self._final_patterns = {}
self._values = []
def __setitem__(self, name, value):
head, tail = head_tail_split(name)
if tail is not None:
# recursion
if head not in self._patterns:
self._patterns[head] = PathMatch(parent=self, desc=head)
self._patterns[head][tail] = value
else:
if head not in self._final_patterns:
self._final_patterns[head] = PathMatch(parent=self, desc=head)
if value not in self._final_patterns[head]._values:
self._final_patterns[head]._values.append(value)
def __getitem__(self, name):
result = []
head, tail = head_tail_split(name)
for pattern in self._patterns:
if fnmatch.fnmatch(head, pattern):
if tail is None:
values = self._patterns[pattern]._values
else:
values = self._patterns[pattern][tail]
for value in values:
if value not in result:
result.append(value)
for pattern in self._final_patterns:
if tail is None:
x = head
else:
x = "%s/%s" % (head, tail)
if fnmatch.fnmatch(x, pattern):
values = self._final_patterns[pattern]._values
for value in values:
if value not in result:
result.append(value)
return result

841
pungi/paths.py Normal file
View File

@ -0,0 +1,841 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
__all__ = ("Paths",)
import errno
import os
from kobo.shortcuts import relative_path
from pungi.util import makedirs, find_old_compose
class Paths(object):
def __init__(self, compose):
self._compose = compose
paths_module_name = compose.conf.get("paths_module")
if paths_module_name:
# custom paths
paths_module = __import__(
paths_module_name,
globals(),
locals(),
["LogPaths", "WorkPaths", "ComposePaths"],
)
self.compose = paths_module.ComposePaths(compose)
self.log = paths_module.LogPaths(compose)
self.work = paths_module.WorkPaths(compose)
else:
# default paths
self.compose = ComposePaths(compose)
self.log = LogPaths(compose)
self.work = WorkPaths(compose)
# self.metadata ?
def get_old_compose_topdir(self, **kwargs):
"""
Finds old compose using the `find_old_compose` function and returns
the path to it. The `kwargs` are passed to `find_old_compose`.
"""
is_layered = self._compose.ci_base.release.is_layered
return find_old_compose(
self._compose.old_composes,
self._compose.ci_base.release.short,
self._compose.ci_base.release.version,
self._compose.ci_base.release.type_suffix,
self._compose.ci_base.base_product.short if is_layered else None,
self._compose.ci_base.base_product.version if is_layered else None,
**kwargs
)
def old_compose_path(self, path, **kwargs):
"""
Translates `path` to the topdir of old compose.
:param str path: Path to translate.
:param kwargs: The kwargs passed to `find_old_compose` function.
:return: None if old compose cannot be used or if `path` does not exist
in the old compose topdir. Otherwise path translated to old_compose
topdir.
Example:
old_repo_dir = compose.old_compose_path(
compose.paths.work.pkgset_repo(pkgset.name, arch="global"))
"""
old_compose_topdir = self.get_old_compose_topdir(**kwargs)
if not old_compose_topdir:
return None
rel_path = relative_path(path, self._compose.topdir.rstrip("/") + "/")
old_path = os.path.join(old_compose_topdir, rel_path)
if not os.path.exists(old_path):
return None
return old_path
class LogPaths(object):
def __init__(self, compose):
self.compose = compose
def topdir(self, arch=None, create_dir=True):
"""
Examples:
log/global
log/x86_64
"""
arch = arch or "global"
path = os.path.join(self.compose.topdir, "logs", arch)
if create_dir:
makedirs(path)
return path
def koji_tasks_dir(self, create_dir=True):
"""
Examples:
logs/global/koji-tasks
"""
path = os.path.join(self.topdir(create_dir=create_dir), "koji-tasks")
if create_dir:
makedirs(path)
return path
def log_file(self, arch, log_name, create_dir=True, ext=None):
ext = ext or "log"
arch = arch or "global"
if log_name.endswith(".log"):
log_name = log_name[:-4]
return os.path.join(
self.topdir(arch, create_dir=create_dir), "%s.%s.%s" % (log_name, arch, ext)
)
class WorkPaths(object):
def __init__(self, compose):
self.compose = compose
def topdir(self, arch=None, create_dir=True):
"""
Examples:
work/global
work/x86_64
"""
arch = arch or "global"
path = os.path.join(self.compose.topdir, "work", arch)
if create_dir:
makedirs(path)
return path
def variants_file(self, arch=None, create_dir=True):
"""
Examples:
work/global/variants.xml
"""
arch = "global"
path = os.path.join(self.topdir(arch, create_dir=create_dir), "variants.xml")
return path
def comps(self, arch=None, variant=None, create_dir=True):
"""
Examples:
work/x86_64/comps/comps-86_64.xml
work/x86_64/comps/comps-Server.x86_64.xml
"""
arch = arch or "global"
if variant is None:
file_name = "comps-%s.xml" % arch
else:
file_name = "comps-%s.%s.xml" % (variant.uid, arch)
path = os.path.join(self.topdir(arch, create_dir=create_dir), "comps")
if create_dir:
makedirs(path)
path = os.path.join(path, file_name)
return path
def gather_result(self, arch=None, variant=None, create_dir=True):
"""
Examples:
work/x86_64/gather_result/x86_64.result
work/x86_64/gather_result/Server.x86_64.result
"""
arch = arch or "global"
file_name = ""
if variant:
file_name += variant.uid + "."
file_name += arch + "."
file_name += "result"
path = os.path.join(self.topdir(arch, create_dir=create_dir), "gather_result")
if create_dir:
makedirs(path)
path = os.path.join(path, file_name)
return path
def pungi_conf(self, arch=None, variant=None, create_dir=True, source_name=None):
"""
Examples:
work/x86_64/pungi/x86_64.conf
work/x86_64/pungi/Server.x86_64.conf
"""
arch = arch or "global"
file_name = ""
if variant:
file_name += variant.uid + "."
file_name += arch + "."
if source_name:
file_name += source_name + "."
file_name += "conf"
path = os.path.join(self.topdir(arch, create_dir=create_dir), "pungi")
if create_dir:
makedirs(path)
path = os.path.join(path, file_name)
return path
def fus_conf(self, arch, variant, iteration, create_dir=True):
"""
Examples:
work/x86_64/fus/Server-solvables.x86_64.conf
"""
file_name = "%s-solvables-%d.%s.conf" % (variant.uid, iteration, arch)
path = os.path.join(self.topdir(arch, create_dir=create_dir), "fus")
if create_dir:
makedirs(path)
return os.path.join(path, file_name)
def pungi_log(self, arch=None, variant=None, create_dir=True, source_name=None):
"""
Examples:
work/x86_64/pungi/x86_64.log
work/x86_64/pungi/Server.x86_64.log
"""
path = self.pungi_conf(arch, variant, create_dir=create_dir)
path = path[:-5]
if source_name:
path += "." + source_name
return path + ".log"
def pungi_cache_dir(self, arch, variant=None, create_dir=True):
"""
Examples:
work/global/pungi-cache
"""
# WARNING: Using the same cache dir with repos of the same names
# may lead to a race condition.
# We should use per arch variant cache dirs to workaround this.
path = os.path.join(self.topdir(arch, create_dir=create_dir), "pungi-cache")
if variant:
path = os.path.join(path, variant.uid)
if create_dir:
makedirs(path)
return path
def _repo(self, type, arch=None, variant=None, create_dir=True):
arch = arch or "global"
path = os.path.join(self.topdir(arch, create_dir=create_dir), "%s_repo" % type)
if variant:
path += "_" + variant.uid
if create_dir:
makedirs(path)
return path
def comps_repo(self, arch=None, variant=None, create_dir=True):
"""
Examples:
work/x86_64/comps_repo_Server
work/global/comps_repo
"""
return self._repo("comps", arch, variant, create_dir=create_dir)
def pkgset_repo(self, pkgset_name, arch=None, create_dir=True):
"""
Examples:
work/x86_64/repo/f30-compose
work/global/repo/f30-compose
"""
arch = arch or "global"
path = os.path.join(
self.topdir(arch, create_dir=create_dir), "repo", pkgset_name
)
if create_dir:
makedirs(path)
return path
def lookaside_repo(self, arch, variant, create_dir=True):
"""
Examples:
work/x86_64/Server/lookaside_repo
"""
path = os.path.join(
self.topdir(arch, create_dir=create_dir), variant.uid, "lookaside_repo"
)
if create_dir:
makedirs(path)
return path
def package_list(
self, arch=None, variant=None, pkgset=None, pkg_type=None, create_dir=True
):
"""
Examples:
work/x86_64/package_list/x86_64.conf
work/x86_64/package_list/Server.x86_64.conf
work/x86_64/package_list/Server.x86_64.rpm.conf
"""
arch = arch or "global"
if variant is not None:
file_name = "%s.%s" % (variant, arch)
else:
file_name = "%s" % arch
if pkgset:
file_name += "." + pkgset.name
if pkg_type is not None:
file_name += ".%s" % pkg_type
file_name += ".conf"
path = os.path.join(self.topdir(arch, create_dir=create_dir), "package_list")
if create_dir:
makedirs(path)
path = os.path.join(path, file_name)
return path
def lookaside_package_list(self, arch, variant, create_dir=True):
"""
Examples:
work/x86_64/package_list/Server.x86_64.lookaside.conf
"""
return self.package_list(
arch, variant, pkg_type="lookaside", create_dir=create_dir
)
def pungi_download_dir(self, arch, create_dir=True):
"""
Examples:
work/x86_64/pungi_download
"""
path = os.path.join(self.topdir(arch, create_dir=create_dir), "pungi_download")
if create_dir:
makedirs(path)
return path
def buildinstall_dir(
self, arch, create_dir=True, allow_topdir_override=False, variant=None
):
"""
:param bool allow_topdir_override: When True, the
"buildinstall_topdir" will be used (if set) instead of real
"topdir".
Examples:
work/x86_64/buildinstall
"""
if arch == "global":
raise RuntimeError("Global buildinstall dir makes no sense.")
buildinstall_topdir = self.compose.conf.get("buildinstall_topdir", "")
if allow_topdir_override and buildinstall_topdir:
topdir_basename = os.path.basename(self.compose.topdir)
path = os.path.join(
buildinstall_topdir, "buildinstall-%s" % topdir_basename, arch
)
else:
path = os.path.join(
self.topdir(arch, create_dir=create_dir), "buildinstall"
)
if variant:
path = os.path.join(path, variant.uid)
return path
def extra_files_dir(self, arch, variant, create_dir=True):
"""
Examples:
work/x86_64/Server/extra-files
"""
if arch == "global":
raise RuntimeError("Global extra files dir makes no sense.")
path = os.path.join(
self.topdir(arch, create_dir=create_dir), variant.uid, "extra-files"
)
if create_dir:
makedirs(path)
return path
def extra_iso_extra_files_dir(self, arch, variant, create_dir=True):
"""
Examples:
work/x86_64/Server/extra-iso-extra-files
"""
if arch == "global":
raise RuntimeError("Global extra files dir makes no sense.")
path = os.path.join(
self.topdir(arch, create_dir=create_dir),
variant.uid,
"extra-iso-extra-files",
)
if create_dir:
makedirs(path)
return path
def iso_staging_dir(self, arch, variant, filename, create_dir=True):
"""
Examples:
work/x86_64/Server/iso-staging-dir/file.iso/
"""
path = os.path.join(
self.topdir(arch, create_dir=create_dir),
variant.uid,
"iso-staging-dir",
filename,
)
if create_dir:
makedirs(path)
return path
def repo_package_list(self, arch, variant, pkg_type=None, create_dir=True):
"""
Examples:
work/x86_64/repo_package_list/Server.x86_64.rpm.conf
"""
file_name = "%s.%s" % (variant.uid, arch)
if pkg_type is not None:
file_name += ".%s" % pkg_type
file_name += ".conf"
path = os.path.join(
self.topdir(arch, create_dir=create_dir), "repo_package_list"
)
if create_dir:
makedirs(path)
path = os.path.join(path, file_name)
return path
def iso_dir(self, arch, filename, create_dir=True):
"""
Examples:
work/x86_64/iso/Project-1.0-20151203.0-Client-x86_64-dvd1.iso
"""
path = os.path.join(self.topdir(arch, create_dir=create_dir), "iso", filename)
if create_dir:
makedirs(path)
return path
def tmp_dir(self, arch=None, variant=None, create_dir=True):
"""
Examples:
work/global/tmp
work/x86_64/tmp
work/x86_64/tmp-Server
"""
dir_name = "tmp"
if variant:
dir_name += "-%s" % variant.uid
path = os.path.join(self.topdir(arch=arch, create_dir=create_dir), dir_name)
if create_dir:
makedirs(path)
return path
def product_id(self, arch, variant, create_dir=True):
"""
Examples:
work/x86_64/product_id/productid-Server.x86_64.pem/productid
"""
# file_name = "%s.%s.pem" % (variant, arch)
# HACK: modifyrepo doesn't handle renames -> $dir/productid
file_name = "productid"
path = os.path.join(
self.topdir(arch, create_dir=create_dir),
"product_id",
"%s.%s.pem" % (variant, arch),
)
if create_dir:
makedirs(path)
path = os.path.join(path, file_name)
return path
def image_build_dir(self, variant, create_dir=True):
"""
@param variant
@param create_dir=True
Examples:
work/image-build/Server
"""
path = os.path.join(
self.topdir("image-build", create_dir=create_dir), variant.uid
)
if create_dir:
makedirs(path)
return path
def image_build_conf(
self, variant, image_name, image_type, arches=None, create_dir=True
):
"""
@param variant
@param image-name
@param image-type (e.g docker)
@param arches
@param create_dir=True
Examples:
work/image-build/Server/docker_rhel-server-docker.cfg
work/image-build/Server/docker_rhel-server-docker_x86_64.cfg
work/image-build/Server/docker_rhel-server-docker_x86_64-ppc64le.cfg
"""
path = os.path.join(
self.image_build_dir(variant), "%s_%s" % (image_type, image_name)
)
if arches is not None:
path = "%s_%s" % (path, "-".join(list(arches)))
path = "%s.cfg" % path
return path
def module_defaults_dir(self, create_dir=True):
"""
Example:
work/global/module_defaults
"""
path = os.path.join(self.topdir(create_dir=create_dir), "module_defaults")
if create_dir:
makedirs(path)
return path
def module_obsoletes_dir(self, create_dir=True):
"""
Example:
work/global/module_obsoletes
"""
path = os.path.join(self.topdir(create_dir=create_dir), "module_obsoletes")
if create_dir:
makedirs(path)
return path
def pkgset_file_cache(self, pkgset_name):
"""
Returns the path to file in which the cached version of
PackageSetBase.file_cache should be stored.
Example:
work/global/pkgset_f33-compose_file_cache.pickle
"""
filename = "pkgset_%s_file_cache.pickle" % pkgset_name
return os.path.join(self.topdir(arch="global"), filename)
def pkgset_reuse_file(self, pkgset_name):
"""
Example:
work/global/pkgset_f30-compose_reuse.pickle
"""
filename = "pkgset_%s_reuse.pickle" % pkgset_name
return os.path.join(self.topdir(arch="global", create_dir=False), filename)
class ComposePaths(object):
def __init__(self, compose):
self.compose = compose
# TODO: TREES?
def topdir(self, arch=None, variant=None, create_dir=True, relative=False):
"""
Examples:
compose
compose/Server/x86_64
"""
if bool(arch) != bool(variant):
raise TypeError("topdir(): either none or 2 arguments are expected")
path = ""
if not relative:
path = os.path.join(self.compose.topdir, "compose")
if arch or variant:
if variant.type == "addon":
return self.topdir(
arch, variant.parent, create_dir=create_dir, relative=relative
)
path = os.path.join(path, variant.uid, arch)
if create_dir and not relative:
makedirs(path)
return path
def tree_dir(self, arch, variant, create_dir=True, relative=False):
"""
Examples:
compose/Server/x86_64/os
compose/Server-optional/x86_64/os
"""
if arch == "src":
arch = "source"
if arch == "source":
tree_dir = "tree"
else:
# use 'os' dir due to historical reasons
tree_dir = "os"
path = os.path.join(
self.topdir(arch, variant, create_dir=create_dir, relative=relative),
tree_dir,
)
if create_dir and not relative:
makedirs(path)
return path
def os_tree(self, arch, variant, create_dir=True, relative=False):
return self.tree_dir(arch, variant, create_dir=create_dir, relative=relative)
def repository(self, arch, variant, create_dir=True, relative=False):
"""
Examples:
compose/Server/x86_64/os
compose/Server/x86_64/addons/LoadBalancer
"""
if variant.type == "addon":
path = self.packages(
arch, variant, create_dir=create_dir, relative=relative
)
else:
path = self.tree_dir(
arch, variant, create_dir=create_dir, relative=relative
)
if create_dir and not relative:
makedirs(path)
return path
def packages(self, arch, variant, create_dir=True, relative=False):
"""
Examples:
compose/Server/x86_64/os/Packages
compose/Server/x86_64/os/addons/LoadBalancer
compose/Server-optional/x86_64/os/Packages
"""
if variant.type == "addon":
path = os.path.join(
self.tree_dir(arch, variant, create_dir=create_dir, relative=relative),
"addons",
variant.id,
)
else:
path = os.path.join(
self.tree_dir(arch, variant, create_dir=create_dir, relative=relative),
"Packages",
)
if create_dir and not relative:
makedirs(path)
return path
def debug_topdir(self, arch, variant, create_dir=True, relative=False):
"""
Examples:
compose/Server/x86_64/debug
compose/Server-optional/x86_64/debug
"""
path = os.path.join(
self.topdir(arch, variant, create_dir=create_dir, relative=relative),
"debug",
)
if create_dir and not relative:
makedirs(path)
return path
def debug_tree(self, arch, variant, create_dir=True, relative=False):
"""
Examples:
compose/Server/x86_64/debug/tree
compose/Server-optional/x86_64/debug/tree
"""
path = os.path.join(
self.debug_topdir(arch, variant, create_dir=create_dir, relative=relative),
"tree",
)
if create_dir and not relative:
makedirs(path)
return path
def debug_packages(self, arch, variant, create_dir=True, relative=False):
"""
Examples:
compose/Server/x86_64/debug/tree/Packages
compose/Server/x86_64/debug/tree/addons/LoadBalancer
compose/Server-optional/x86_64/debug/tree/Packages
"""
if arch in ("source", "src"):
return None
if variant.type == "addon":
path = os.path.join(
self.debug_tree(
arch, variant, create_dir=create_dir, relative=relative
),
"addons",
variant.id,
)
else:
path = os.path.join(
self.debug_tree(
arch, variant, create_dir=create_dir, relative=relative
),
"Packages",
)
if create_dir and not relative:
makedirs(path)
return path
def debug_repository(self, arch, variant, create_dir=True, relative=False):
"""
Examples:
compose/Server/x86_64/debug/tree
compose/Server/x86_64/debug/tree/addons/LoadBalancer
compose/Server-optional/x86_64/debug/tree
"""
if arch in ("source", "src"):
return None
if variant.type == "addon":
path = os.path.join(
self.debug_tree(
arch, variant, create_dir=create_dir, relative=relative
),
"addons",
variant.id,
)
else:
path = self.debug_tree(
arch, variant, create_dir=create_dir, relative=relative
)
if create_dir and not relative:
makedirs(path)
return path
def iso_dir(self, arch, variant, symlink_to=None, create_dir=True, relative=False):
"""
Examples:
compose/Server/x86_64/iso
None
"""
if variant.type == "addon":
return None
if variant.type == "optional":
if not self.compose.conf.get("create_optional_isos", False):
return None
if arch == "src":
arch = "source"
path = os.path.join(
self.topdir(arch, variant, create_dir=create_dir, relative=relative), "iso"
)
if symlink_to:
# TODO: create_dir
topdir = self.compose.topdir.rstrip("/") + "/"
relative_dir = path[len(topdir) :]
target_dir = os.path.join(symlink_to, self.compose.compose_id, relative_dir)
if create_dir and not relative:
makedirs(target_dir)
try:
os.symlink(target_dir, path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
msg = "Symlink pointing to '%s' expected: %s" % (target_dir, path)
if not os.path.islink(path):
raise RuntimeError(msg)
if os.path.abspath(os.readlink(path)) != target_dir:
raise RuntimeError(msg)
else:
if create_dir and not relative:
makedirs(path)
return path
def iso_path(
self, arch, variant, filename, symlink_to=None, create_dir=True, relative=False
):
"""
Examples:
compose/Server/x86_64/iso/rhel-7.0-20120127.0-Server-x86_64-dvd1.iso
None
"""
path = self.iso_dir(
arch,
variant,
symlink_to=symlink_to,
create_dir=create_dir,
relative=relative,
)
if path is None:
return None
return os.path.join(path, filename)
def image_dir(self, variant, symlink_to=None, relative=False):
"""
The arch is listed as literal '%(arch)s'
Examples:
compose/Server/%(arch)s/images
None
@param variant
@param symlink_to=None
@param relative=False
"""
path = os.path.join(
self.topdir("%(arch)s", variant, create_dir=False, relative=relative),
"images",
)
if symlink_to:
topdir = self.compose.topdir.rstrip("/") + "/"
relative_dir = path[len(topdir) :]
target_dir = os.path.join(symlink_to, self.compose.compose_id, relative_dir)
try:
os.symlink(target_dir, path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
msg = "Symlink pointing to '%s' expected: %s" % (target_dir, path)
if not os.path.islink(path):
raise RuntimeError(msg)
if os.path.abspath(os.readlink(path)) != target_dir:
raise RuntimeError(msg)
return path
def jigdo_dir(self, arch, variant, create_dir=True, relative=False):
"""
Examples:
compose/Server/x86_64/jigdo
None
"""
if variant.type == "addon":
return None
if variant.type == "optional":
if not self.compose.conf.get("create_optional_isos", False):
return None
if arch == "src":
arch = "source"
path = os.path.join(
self.topdir(arch, variant, create_dir=create_dir, relative=relative),
"jigdo",
)
if create_dir and not relative:
makedirs(path)
return path
def metadata(self, file_name=None, create_dir=True, relative=False):
"""
Examples:
compose/metadata
compose/metadata/rpms.json
"""
path = os.path.join(
self.topdir(create_dir=create_dir, relative=relative), "metadata"
)
if create_dir and not relative:
makedirs(path)
if file_name:
path = os.path.join(path, file_name)
return path

44
pungi/phases/__init__.py Normal file
View File

@ -0,0 +1,44 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import sys
# phases in runtime order
from .init import InitPhase # noqa
from .weaver import WeaverPhase # noqa
from .pkgset import PkgsetPhase # noqa
from .gather import GatherPhase # noqa
from .createrepo import CreaterepoPhase # noqa
from .buildinstall import BuildinstallPhase # noqa
from .extra_files import ExtraFilesPhase # noqa
from .createiso import CreateisoPhase # noqa
from .extra_isos import ExtraIsosPhase # noqa
from .image_build import ImageBuildPhase # noqa
from .image_container import ImageContainerPhase # noqa
from .kiwibuild import KiwiBuildPhase # noqa
from .osbuild import OSBuildPhase # noqa
from .repoclosure import RepoclosurePhase # noqa
from .test import TestPhase # noqa
from .image_checksum import ImageChecksumPhase # noqa
from .livemedia_phase import LiveMediaPhase # noqa
from .ostree import OSTreePhase # noqa
from .ostree_installer import OstreeInstallerPhase # noqa
from .ostree_container import OSTreeContainerPhase # noqa
from .osbs import OSBSPhase # noqa
from .phases_metadata import gather_phases_metadata # noqa
this_module = sys.modules[__name__]
PHASES_NAMES = gather_phases_metadata(this_module)

224
pungi/phases/base.py Normal file
View File

@ -0,0 +1,224 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import logging
import math
import time
from pungi import util
class PhaseBase(object):
def __init__(self, compose):
self.compose = compose
self.msg = "---------- PHASE: %s ----------" % self.name.upper()
self.finished = False
self._skipped = False
# A set of config patterns that were actually used. Starts as None, and
# when config is queried the variable turns into a set of patterns.
self.used_patterns = None
def validate(self):
pass
def conf_assert_str(self, name):
missing = []
invalid = []
if name not in self.compose.conf:
missing.append(name)
elif not isinstance(self.compose.conf[name], str):
invalid.append(name, type(self.compose.conf[name]), str)
return missing, invalid
def skip(self):
if self._skipped:
return True
if self.compose.just_phases and self.name not in self.compose.just_phases:
return True
if self.name in self.compose.skip_phases:
return True
if self.name in self.compose.conf["skip_phases"]:
return True
return False
def start(self):
self._skipped = self.skip()
if self._skipped:
self.compose.log_warning("[SKIP ] %s" % self.msg)
self.finished = True
return
self._start_time = time.time()
self.compose.log_info("[BEGIN] %s" % self.msg)
self.compose.notifier.send("phase-start", phase_name=self.name)
self.run()
def get_config_block(self, variant, arch=None):
"""In config for current phase, find a block corresponding to given
variant and arch. The arch should be given if and only if the config
uses variant/arch mapping.
"""
self.used_patterns = self.used_patterns or set()
if arch is not None:
return util.get_arch_variant_data(
self.compose.conf, self.name, arch, variant, keys=self.used_patterns
)
else:
return util.get_variant_data(
self.compose.conf, self.name, variant, keys=self.used_patterns
)
def get_all_patterns(self):
"""Get all variant patterns from config file for this phase."""
if isinstance(self.compose.conf.get(self.name), dict):
return set(self.compose.conf.get(self.name, {}).keys())
else:
return set(x[0] for x in self.compose.conf.get(self.name, []))
def report_unused_patterns(self):
"""Log warning about unused parts of the config.
This is not technically an error, but can help debug when something
expected is missing.
"""
all_patterns = self.get_all_patterns()
unused_patterns = all_patterns - self.used_patterns
if unused_patterns:
self.compose.log_warning(
"[%s] Patterns in config do not match any variant: %s"
% (self.name.upper(), ", ".join(sorted(unused_patterns)))
)
self.compose.log_info(
"Note that variants can be excluded in configuration file"
)
def stop(self):
if self.finished:
return
if hasattr(self, "pool"):
self.pool.stop()
self.finished = True
self.compose.log_info("[DONE ] %s" % self.msg)
if hasattr(self, "_start_time"):
self.compose.log_info(
"PHASE %s took %d seconds"
% (self.name.upper(), math.ceil(time.time() - self._start_time))
)
if self.used_patterns is not None:
# We only want to report this if the config was actually queried.
self.report_unused_patterns()
self.compose.notifier.send("phase-stop", phase_name=self.name)
def run(self):
raise NotImplementedError
class ConfigGuardedPhase(PhaseBase):
"""A phase that is skipped unless config option is set."""
def skip(self):
if super(ConfigGuardedPhase, self).skip():
return True
if not self.compose.conf.get(self.name):
self.compose.log_info(
"Config section '%s' was not found. Skipping." % self.name
)
return True
return False
class ImageConfigMixin(object):
"""
A mixin for phase that needs to access image related settings: ksurl,
version, target and release.
First, it checks config object given as argument, then it checks
phase-level configuration and finally falls back to global configuration.
"""
def __init__(self, *args, **kwargs):
super(ImageConfigMixin, self).__init__(*args, **kwargs)
def get_config(self, cfg, opt):
return cfg.get(
opt,
self.compose.conf.get(
"%s_%s" % (self.name, opt), self.compose.conf.get("global_%s" % opt)
),
)
def get_version(self, cfg):
"""
Get version from configuration hierarchy or fall back to release
version.
"""
return (
util.version_generator(self.compose, self.get_config(cfg, "version"))
or self.get_config(cfg, "version")
or self.compose.image_version
)
def get_release(self, cfg):
"""
If release is set to a magic string (or explicitly to None -
deprecated), replace it with a generated value. Uses configuration
passed as argument, phase specific settings and global settings.
"""
for key, conf in [
("release", cfg),
("%s_release" % self.name, self.compose.conf),
("global_release", self.compose.conf),
]:
if key in conf:
return (
util.version_generator(self.compose, conf[key])
or self.compose.image_release
)
return None
def get_ksurl(self, cfg):
"""
Get ksurl from `cfg`. If not present, fall back to phase defined one or
global one.
"""
return (
cfg.get("ksurl")
or self.compose.conf.get("%s_ksurl" % self.name)
or self.compose.conf.get("global_ksurl")
)
class PhaseLoggerMixin(object):
"""
A mixin that can extend a phase with a new logging logger that copy
handlers from compose, but with different formatter that includes phase name.
"""
def __init__(self, *args, **kwargs):
super(PhaseLoggerMixin, self).__init__(*args, **kwargs)
self.logger = None
if self.compose._logger and self.compose._logger.handlers:
self.logger = logging.getLogger(self.name.upper())
self.logger.setLevel(logging.DEBUG)
format = "%(asctime)s [%(name)-16s] [%(levelname)-8s] %(message)s"
import copy
for handler in self.compose._logger.handlers:
hl = copy.copy(handler)
hl.setFormatter(logging.Formatter(format, datefmt="%Y-%m-%d %H:%M:%S"))
hl.setLevel(logging.DEBUG)
self.logger.addHandler(hl)

View File

@ -0,0 +1,950 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import errno
import os
import time
import shutil
import re
from six.moves import cPickle as pickle
from copy import copy
from kobo.threads import ThreadPool, WorkerThread
from kobo.shortcuts import run, force_list
import kobo.rpmlib
from productmd.images import Image
from six.moves import shlex_quote
from pungi.arch import get_valid_arches
from pungi.util import get_volid, get_arch_variant_data
from pungi.util import get_file_size, get_mtime, failable, makedirs
from pungi.util import copy_all, translate_path
from pungi.wrappers.lorax import LoraxWrapper
from pungi.wrappers import iso
from pungi.wrappers.scm import get_file
from pungi.wrappers.scm import get_file_from_scm
from pungi.wrappers import kojiwrapper
from pungi.phases.base import PhaseBase
from pungi.runroot import Runroot, download_and_extract_archive
class BuildinstallPhase(PhaseBase):
name = "buildinstall"
def __init__(self, compose, pkgset_phase=None):
PhaseBase.__init__(self, compose)
self.pool = ThreadPool(logger=self.compose._logger)
# A set of (variant_uid, arch) pairs that completed successfully. This
# is needed to skip copying files for failed tasks.
self.pool.finished_tasks = set()
# A set of (variant_uid, arch) pairs that were reused from previous
# compose.
self.pool.reused_tasks = set()
self.buildinstall_method = self.compose.conf.get("buildinstall_method")
self.lorax_use_koji_plugin = self.compose.conf.get("lorax_use_koji_plugin")
self.used_lorax = self.buildinstall_method == "lorax"
self.pkgset_phase = pkgset_phase
self.warned_skipped = False
def skip(self):
if PhaseBase.skip(self):
return True
if not self.compose.conf.get("buildinstall_method"):
if not self.warned_skipped:
msg = "Not a bootable product. Skipping buildinstall."
self.compose.log_debug(msg)
self.warned_skipped = True
return True
return False
def _get_lorax_cmd(
self,
repo_baseurl,
output_dir,
variant,
arch,
buildarch,
volid,
final_output_dir,
):
noupgrade = True
bugurl = None
nomacboot = True
add_template = []
add_arch_template = []
add_template_var = []
add_arch_template_var = []
dracut_args = []
rootfs_size = None
skip_branding = False
squashfs_only = False
configuration_file = None
configuration_file_source = None
version = self.compose.conf.get(
"treeinfo_version", self.compose.conf["release_version"]
)
for data in get_arch_variant_data(
self.compose.conf, "lorax_options", arch, variant
):
if not data.get("noupgrade", True):
noupgrade = False
if data.get("bugurl"):
bugurl = data.get("bugurl")
if not data.get("nomacboot", True):
nomacboot = False
if "rootfs_size" in data:
rootfs_size = data.get("rootfs_size")
add_template.extend(data.get("add_template", []))
add_arch_template.extend(data.get("add_arch_template", []))
add_template_var.extend(data.get("add_template_var", []))
add_arch_template_var.extend(data.get("add_arch_template_var", []))
dracut_args.extend(data.get("dracut_args", []))
skip_branding = data.get("skip_branding", False)
configuration_file_source = data.get("configuration_file")
squashfs_only = data.get("squashfs_only", False)
if "version" in data:
version = data["version"]
output_dir = os.path.join(output_dir, variant.uid)
output_topdir = output_dir
# The paths module will modify the filename (by inserting arch). But we
# only care about the directory anyway.
log_dir = _get_log_dir(self.compose, variant, arch)
# Place the lorax.conf as specified by
# the configuration_file parameter of lorax_options to the log directory.
if configuration_file_source:
configuration_file_destination = os.path.join(log_dir, "lorax.conf")
# Obtain lorax.conf for the buildInstall phase
get_file(
configuration_file_source,
configuration_file_destination,
compose=self.compose,
)
configuration_file = configuration_file_destination
repos = repo_baseurl[:]
repos.extend(
get_arch_variant_data(
self.compose.conf, "lorax_extra_sources", arch, variant
)
)
if self.compose.has_comps:
comps_repo = self.compose.paths.work.comps_repo(arch, variant)
if final_output_dir != output_dir or self.lorax_use_koji_plugin:
comps_repo = translate_path(self.compose, comps_repo)
repos.append(comps_repo)
if self.lorax_use_koji_plugin:
return {
"product": self.compose.conf["release_name"],
"version": version,
"release": version,
"sources": force_list(repos),
"variant": variant.uid,
"installpkgs": variant.buildinstallpackages,
"isfinal": self.compose.supported,
"buildarch": buildarch,
"volid": volid,
"nomacboot": nomacboot,
"bugurl": bugurl,
"add-template": add_template,
"add-arch-template": add_arch_template,
"add-template-var": add_template_var,
"add-arch-template-var": add_arch_template_var,
"noupgrade": noupgrade,
"rootfs-size": rootfs_size,
"dracut-args": dracut_args,
"skip_branding": skip_branding,
"squashfs_only": squashfs_only,
"configuration_file": configuration_file,
}
else:
# If the buildinstall_topdir is set, it means Koji is used for
# buildinstall phase and the filesystem with Koji is read-only.
# In that case, we have to write logs to buildinstall_topdir and
# later copy them back to our local log directory.
if self.compose.conf.get("buildinstall_topdir", None):
output_dir = os.path.join(output_dir, "results")
lorax = LoraxWrapper()
lorax_cmd = lorax.get_lorax_cmd(
self.compose.conf["release_name"],
version,
version,
repos,
output_dir,
variant=variant.uid,
buildinstallpackages=variant.buildinstallpackages,
is_final=self.compose.supported,
buildarch=buildarch,
volid=volid,
nomacboot=nomacboot,
bugurl=bugurl,
add_template=add_template,
add_arch_template=add_arch_template,
add_template_var=add_template_var,
add_arch_template_var=add_arch_template_var,
noupgrade=noupgrade,
rootfs_size=rootfs_size,
log_dir=log_dir,
dracut_args=dracut_args,
skip_branding=skip_branding,
squashfs_only=squashfs_only,
configuration_file=configuration_file,
)
return "rm -rf %s && %s" % (
shlex_quote(output_topdir),
" ".join([shlex_quote(x) for x in lorax_cmd]),
)
def get_repos(self, arch):
repos = []
for pkgset in self.pkgset_phase.package_sets:
repos.append(pkgset.paths[arch])
return repos
def run(self):
disc_type = self.compose.conf["disc_types"].get("dvd", "dvd")
# Prepare kickstart file for final images.
self.pool.kickstart_file = get_kickstart_file(self.compose)
for arch in self.compose.get_arches():
commands = []
output_dir = self.compose.paths.work.buildinstall_dir(
arch, allow_topdir_override=True
)
final_output_dir = self.compose.paths.work.buildinstall_dir(
arch, allow_topdir_override=False
)
makedirs(final_output_dir)
repo_baseurls = self.get_repos(arch)
if final_output_dir != output_dir or self.lorax_use_koji_plugin:
repo_baseurls = [translate_path(self.compose, r) for r in repo_baseurls]
if self.buildinstall_method == "lorax":
buildarch = get_valid_arches(arch)[0]
for variant in self.compose.get_variants(arch=arch, types=["variant"]):
if variant.is_empty:
continue
skip = get_arch_variant_data(
self.compose.conf, "buildinstall_skip", arch, variant
)
if skip == [True]:
self.compose.log_info(
"Skipping buildinstall for %s.%s due to config option"
% (variant, arch)
)
continue
volid = get_volid(
self.compose, arch, variant=variant, disc_type=disc_type
)
commands.append(
(
variant,
self._get_lorax_cmd(
repo_baseurls,
output_dir,
variant,
arch,
buildarch,
volid,
final_output_dir,
),
)
)
else:
raise ValueError(
"Unsupported buildinstall method: %s" % self.buildinstall_method
)
for variant, cmd in commands:
self.pool.add(BuildinstallThread(self.pool))
self.pool.queue_put(
(self.compose, arch, variant, cmd, self.pkgset_phase)
)
self.pool.start()
def succeeded(self, variant, arch):
# If the phase is skipped, we can treat it as successful. Either there
# will be no output, or it's a debug run of compose where anything can
# happen.
return (
super(BuildinstallPhase, self).skip()
or (variant.uid if self.used_lorax else None, arch)
in self.pool.finished_tasks
)
def reused(self, variant, arch):
"""
Check if buildinstall phase reused previous results for given variant
and arch. If the phase is skipped, the results will be considered
reused as well.
"""
return (
super(BuildinstallPhase, self).skip()
or (variant.uid if self.used_lorax else None, arch)
in self.pool.reused_tasks
)
def get_kickstart_file(compose):
scm_dict = compose.conf.get("buildinstall_kickstart")
if not scm_dict:
compose.log_debug("Path to ks.cfg (buildinstall_kickstart) not specified.")
return
msg = "Getting ks.cfg"
kickstart_path = os.path.join(compose.paths.work.topdir(arch="global"), "ks.cfg")
if os.path.exists(kickstart_path):
compose.log_warning("[SKIP ] %s" % msg)
return kickstart_path
compose.log_info("[BEGIN] %s" % msg)
if isinstance(scm_dict, dict):
kickstart_name = os.path.basename(scm_dict["file"])
if scm_dict["scm"] == "file":
scm_dict["file"] = os.path.join(compose.config_dir, scm_dict["file"])
else:
kickstart_name = os.path.basename(scm_dict)
scm_dict = os.path.join(compose.config_dir, scm_dict)
tmp_dir = compose.mkdtemp(prefix="buildinstall_kickstart_")
get_file_from_scm(scm_dict, tmp_dir, compose=compose)
src = os.path.join(tmp_dir, kickstart_name)
shutil.copy2(src, kickstart_path)
compose.log_info("[DONE ] %s" % msg)
return kickstart_path
BOOT_CONFIGS = [
"isolinux/isolinux.cfg",
"etc/yaboot.conf",
"ppc/ppc64/yaboot.conf",
"EFI/BOOT/BOOTX64.conf",
"EFI/BOOT/grub.cfg",
]
BOOT_IMAGES = [
"images/efiboot.img",
]
def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
"""
Put escaped volume ID and possibly kickstart file into the boot
configuration files.
:returns: list of paths to modified config files
"""
volid_escaped = volid.replace(" ", r"\x20").replace("\\", "\\\\")
volid_escaped_2 = volid_escaped.replace("\\", "\\\\")
found_configs = []
for config in configs:
config_path = os.path.join(path, config)
if not os.path.exists(config_path):
continue
with open(config_path, "r") as f:
data = original_data = f.read()
os.unlink(config_path) # break hadlink by removing file writing a new one
# double-escape volid in yaboot.conf
new_volid = volid_escaped_2 if "yaboot" in config else volid_escaped
ks = (" inst.ks=hd:LABEL=%s:/ks.cfg" % new_volid) if ks_file else ""
# pre-f18
data = re.sub(r":CDLABEL=[^ \n]*", r":CDLABEL=%s%s" % (new_volid, ks), data)
# f18+
data = re.sub(r":LABEL=[^ \n]*", r":LABEL=%s%s" % (new_volid, ks), data)
data = re.sub(r"(search .* -l) '[^'\n]*'", r"\1 '%s'" % volid, data)
with open(config_path, "w") as f:
f.write(data)
if data != original_data:
found_configs.append(config)
if logger:
# Generally lorax should create file with correct volume id
# already. If we don't have a kickstart, this function should
# be a no-op.
logger.info("Boot config %s changed" % config_path)
return found_configs
# HACK: this is a hack!
# * it's quite trivial to replace volids
# * it's not easy to replace menu titles
# * we probably need to get this into lorax
def tweak_buildinstall(
compose, src, dst, arch, variant, label, volid, kickstart_file=None
):
tmp_dir = compose.mkdtemp(prefix="tweak_buildinstall_")
# verify src
if not os.path.isdir(src):
raise OSError(errno.ENOENT, "Directory does not exist: %s" % src)
# create dst
try:
os.makedirs(dst)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
# copy src to temp
# TODO: place temp on the same device as buildinstall dir so we can hardlink
cmd = "cp -dRv --preserve=mode,links,timestamps --remove-destination %s/* %s/" % (
shlex_quote(src),
shlex_quote(tmp_dir),
)
run(cmd)
found_configs = tweak_configs(
tmp_dir, volid, kickstart_file, logger=compose._logger
)
if kickstart_file and found_configs:
shutil.copy2(kickstart_file, os.path.join(dst, "ks.cfg"))
images = [os.path.join(tmp_dir, img) for img in BOOT_IMAGES]
if found_configs:
for image in images:
if not os.path.isfile(image):
continue
with iso.mount(
image,
logger=compose._logger,
use_guestmount=compose.conf.get("buildinstall_use_guestmount"),
) as mount_tmp_dir:
for config in found_configs:
# Put each modified config file into the image (overwriting the
# original).
config_path = os.path.join(tmp_dir, config)
config_in_image = os.path.join(mount_tmp_dir, config)
if os.path.isfile(config_in_image):
cmd = [
"cp",
"-v",
"--remove-destination",
config_path,
config_in_image,
]
run(cmd)
# HACK: make buildinstall files world readable
run("chmod -R a+rX %s" % shlex_quote(tmp_dir))
# copy temp to dst
cmd = "cp -dRv --preserve=mode,links,timestamps --remove-destination %s/* %s/" % (
shlex_quote(tmp_dir),
shlex_quote(dst),
)
run(cmd)
shutil.rmtree(tmp_dir)
def link_boot_iso(compose, arch, variant, can_fail):
if arch == "src":
return
disc_type = compose.conf["disc_types"].get("boot", "boot")
symlink_isos_to = compose.conf.get("symlink_isos_to")
os_tree = compose.paths.compose.os_tree(arch, variant)
# TODO: find in treeinfo?
boot_iso_path = os.path.join(os_tree, "images", "boot.iso")
if not os.path.isfile(boot_iso_path):
return
msg = "Linking boot.iso (arch: %s, variant: %s)" % (arch, variant)
filename = compose.get_image_name(
arch, variant, disc_type=disc_type, disc_num=None, suffix=".iso"
)
new_boot_iso_path = compose.paths.compose.iso_path(
arch, variant, filename, symlink_to=symlink_isos_to
)
new_boot_iso_relative_path = compose.paths.compose.iso_path(
arch, variant, filename, relative=True
)
if os.path.exists(new_boot_iso_path):
# TODO: log
compose.log_warning("[SKIP ] %s" % msg)
return
compose.log_info("[BEGIN] %s" % msg)
# Try to hardlink, and copy if that fails
try:
os.link(boot_iso_path, new_boot_iso_path)
except OSError:
shutil.copy2(boot_iso_path, new_boot_iso_path)
implant_md5 = iso.get_implanted_md5(new_boot_iso_path)
iso_name = os.path.basename(new_boot_iso_path)
iso_dir = os.path.dirname(new_boot_iso_path)
# create iso manifest
run(iso.get_manifest_cmd(iso_name), workdir=iso_dir)
img = Image(compose.im)
img.path = new_boot_iso_relative_path
img.mtime = get_mtime(new_boot_iso_path)
img.size = get_file_size(new_boot_iso_path)
img.arch = arch
img.type = "boot"
img.format = "iso"
img.disc_number = 1
img.disc_count = 1
img.bootable = True
img.subvariant = variant.uid
img.implant_md5 = implant_md5
setattr(img, "can_fail", can_fail)
setattr(img, "deliverable", "buildinstall")
try:
img.volume_id = iso.get_volume_id(
new_boot_iso_path,
compose.conf.get("createiso_use_xorrisofs"),
)
except RuntimeError:
pass
# In this phase we should add to compose only the images that
# will be used only as netinstall.
# On this step lorax generates environment
# for creating isos and create them.
# On step `extra_isos` we overwrite the not needed iso `boot Minimal` by
# new iso. It already contains necessary packages from incldued variants.
if variant.uid in compose.conf['netinstall_variants']:
compose.im.add(variant.uid, arch, img)
compose.log_info("[DONE ] %s" % msg)
class BuildinstallThread(WorkerThread):
def process(self, item, num):
# The variant is None unless lorax is used as buildinstall method.
compose, arch, variant, cmd, pkgset_phase = item
can_fail = compose.can_fail(variant, arch, "buildinstall")
with failable(compose, can_fail, variant, arch, "buildinstall"):
try:
self.worker(compose, arch, variant, cmd, pkgset_phase, num)
except RuntimeError:
self._print_depsolve_error(compose, arch, variant)
raise
def _print_depsolve_error(self, compose, arch, variant):
try:
log_file = os.path.join(_get_log_dir(compose, variant, arch), "pylorax.log")
with open(log_file) as f:
matched = False
for line in f:
if re.match("Dependency check failed", line):
matched = True
if matched:
compose.log_error(line.rstrip())
except Exception:
pass
def _generate_buildinstall_metadata(
self, compose, arch, variant, cmd, buildroot_rpms, pkgset_phase
):
"""
Generate buildinstall.metadata dict.
:param Compose compose: Current compose.
:param str arch: Current architecture.
:param Variant variant: Compose variant.
:param list cmd: List of command line arguments passed to buildinstall task.
:param list buildroot_rpms: List of NVRAs of all RPMs installed in the
buildinstall task's buildroot.
:param PkgsetPhase pkgset_phase: Package set phase instance.
:return: The buildinstall.metadata dict.
"""
# Load the list of packages installed in the boot.iso.
# The list of installed packages is logged by Lorax in the "pkglists"
# directory. There is one file for each installed RPM and the name
# of the file is the name of the RPM.
# We need to resolve the name of each RPM back to its NVRA.
installed_rpms = []
log_fname = "buildinstall-%s-logs/dummy" % variant.uid
log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_fname, False))
pkglists_dir = os.path.join(log_dir, "pkglists")
if os.path.exists(pkglists_dir):
for pkg_name in os.listdir(pkglists_dir):
for pkgset in pkgset_phase.package_sets:
global_pkgset = pkgset["global"]
# We actually do not care from which package_set the RPM
# came from or if there are multiple versions/release of
# the single RPM in more packages sets. We simply include
# all RPMs with this name in the metadata.
# Later when deciding if the buildinstall phase results
# can be reused, we check that all the RPMs with this name
# are still the same in old/new compose.
for rpm_path, rpm_obj in global_pkgset.file_cache.items():
if rpm_obj.name == pkg_name:
installed_rpms.append(rpm_path)
# Store the metadata in `buildinstall.metadata`.
metadata = {
"cmd": cmd,
"buildroot_rpms": sorted(buildroot_rpms),
"installed_rpms": sorted(installed_rpms),
}
return metadata
def _write_buildinstall_metadata(
self, compose, arch, variant, cmd, buildroot_rpms, pkgset_phase
):
"""
Write buildinstall.metadata file containing all the information about
buildinstall phase input and environment.
This file is later used to decide whether old buildinstall results can
be reused instead of generating them again.
:param Compose compose: Current compose.
:param str arch: Current architecture.
:param Variant variant: Compose variant.
:param list cmd: List of command line arguments passed to buildinstall task.
:param list buildroot_rpms: List of NVRAs of all RPMs installed in the
buildinstall task's buildroot.
:param PkgsetPhase pkgset_phase: Package set phase instance.
"""
# Generate the list of `*-RPMs` log file.
log_filename = ("buildinstall-%s" % variant.uid) if variant else "buildinstall"
log_file = compose.paths.log.log_file(arch, log_filename + "-RPMs")
with open(log_file, "w") as f:
f.write("\n".join(buildroot_rpms))
# Write buildinstall.metadata only if particular variant is defined.
# The `variant` is `None` only if old "buildinstall" method is used.
if not variant:
return
metadata = self._generate_buildinstall_metadata(
compose, arch, variant, cmd, buildroot_rpms, pkgset_phase
)
log_fname = "buildinstall-%s-logs/dummy" % variant.uid
log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_fname))
metadata_path = os.path.join(log_dir, "buildinstall.metadata")
with open(metadata_path, "wb") as f:
pickle.dump(metadata, f, protocol=pickle.HIGHEST_PROTOCOL)
def _load_old_buildinstall_metadata(self, compose, arch, variant):
"""
Helper method to load "buildinstall.metadata" from old compose.
:param Compose compose: Current compose.
:param str arch: Current architecture.
:param Variant variant: Compose variant.
"""
if not variant:
return None
log_fname = "buildinstall-%s-logs/dummy" % variant.uid
metadata = os.path.join(
os.path.dirname(compose.paths.log.log_file(arch, log_fname)),
"buildinstall.metadata",
)
old_metadata = compose.paths.old_compose_path(metadata)
if not old_metadata:
return None
compose.log_info("Loading old BUILDINSTALL phase metadata: %s", old_metadata)
try:
with open(old_metadata, "rb") as f:
old_result = pickle.load(f)
return old_result
except Exception as e:
compose.log_debug(
"Failed to load old BUILDINSTALL phase metadata %s : %s"
% (old_metadata, str(e))
)
return None
def _reuse_old_buildinstall_result(self, compose, arch, variant, cmd, pkgset_phase):
"""
Try to reuse old buildinstall results.
:param Compose compose: Current compose.
:param str arch: Current architecture.
:param Variant variant: Compose variant.
:param list cmd: List of command line arguments passed to buildinstall task.
:param list buildroot_rpms: List of NVRAs of all RPMs installed in the
buildinstall task's buildroot.
:param PkgsetPhase pkgset_phase: Package set phase instance.
:return: True if old buildinstall phase results have been reused.
"""
log_msg = "Cannot reuse old BUILDINSTALL phase results - %s"
if not compose.conf["buildinstall_allow_reuse"]:
compose.log_info(log_msg % "reuse of old buildinstall results is disabled.")
return
# Load the old buildinstall.metadata.
old_metadata = self._load_old_buildinstall_metadata(compose, arch, variant)
if old_metadata is None:
compose.log_info(log_msg % "no old BUILDINSTALL metadata.")
return
# For now try to reuse only if pungi_buildinstall plugin is used.
# This is the easiest approach, because we later need to filter out
# some parts of `cmd` and for pungi_buildinstall, the `cmd` is a dict
# which makes this easy.
if not isinstance(old_metadata["cmd"], dict) or not isinstance(cmd, dict):
compose.log_info(log_msg % "pungi_buildinstall plugin is not used.")
return
# Filter out "outputdir" and "sources" because they change every time.
# The "sources" are not important, because we check the buildinstall
# input on RPM level.
cmd_copy = copy(cmd)
for key in ["outputdir", "sources"]:
cmd_copy.pop(key, None)
old_metadata["cmd"].pop(key, None)
# Do not reuse if command line arguments are not the same.
if old_metadata["cmd"] != cmd_copy:
compose.log_info(log_msg % "lorax command line arguments differ.")
return
# Check that the RPMs installed in the old boot.iso exists in the very
# same versions/releases in this compose.
for rpm_path in old_metadata["installed_rpms"]:
found = False
for pkgset in pkgset_phase.package_sets:
global_pkgset = pkgset["global"]
if rpm_path in global_pkgset.file_cache:
found = True
break
if not found:
compose.log_info(
log_msg % "RPM %s does not exist in new compose." % rpm_path
)
return
# Ask Koji for all the RPMs in the `runroot_tag` and check that
# those installed in the old buildinstall buildroot are still in the
# very same versions/releases.
koji_wrapper = kojiwrapper.KojiWrapper(compose)
rpms = koji_wrapper.koji_proxy.listTaggedRPMS(
compose.conf.get("runroot_tag"), inherit=True, latest=True
)[0]
rpm_nvras = set()
for rpm in rpms:
rpm_nvras.add(kobo.rpmlib.make_nvra(rpm, add_rpm=False, force_epoch=False))
for old_nvra in old_metadata["buildroot_rpms"]:
if old_nvra not in rpm_nvras:
compose.log_info(
log_msg % "RPM %s does not exist in new buildroot." % old_nvra
)
return
# We can reuse the old buildinstall results!
compose.log_info("Reusing old BUILDINSTALL phase output")
# Copy old buildinstall output to this this compose.
final_output_dir = compose.paths.work.buildinstall_dir(arch, variant=variant)
old_final_output_dir = compose.paths.old_compose_path(final_output_dir)
copy_all(old_final_output_dir, final_output_dir)
# Copy old buildinstall logs to this compose.
log_fname = "buildinstall-%s-logs/dummy" % variant.uid
final_log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_fname))
old_final_log_dir = compose.paths.old_compose_path(final_log_dir)
if not os.path.exists(final_log_dir):
makedirs(final_log_dir)
copy_all(old_final_log_dir, final_log_dir)
# Write the buildinstall metadata so next compose can reuse this compose.
self._write_buildinstall_metadata(
compose, arch, variant, cmd, old_metadata["buildroot_rpms"], pkgset_phase
)
return True
def worker(self, compose, arch, variant, cmd, pkgset_phase, num):
buildinstall_method = compose.conf["buildinstall_method"]
lorax_use_koji_plugin = compose.conf["lorax_use_koji_plugin"]
log_filename = ("buildinstall-%s" % variant.uid) if variant else "buildinstall"
log_file = compose.paths.log.log_file(arch, log_filename)
msg = "Running buildinstall for arch %s, variant %s" % (arch, variant)
output_dir = compose.paths.work.buildinstall_dir(
arch, allow_topdir_override=True, variant=variant
)
final_output_dir = compose.paths.work.buildinstall_dir(arch, variant=variant)
if (
os.path.isdir(output_dir)
and os.listdir(output_dir)
or os.path.isdir(final_output_dir)
and os.listdir(final_output_dir)
):
# output dir is *not* empty -> SKIP
self.pool.log_warning(
"[SKIP ] Buildinstall for arch %s, variant %s" % (arch, variant)
)
return
self.pool.log_info("[BEGIN] %s" % msg)
# Get list of packages which are needed in runroot.
packages = []
chown_paths = [output_dir]
if buildinstall_method == "lorax":
packages += ["lorax"]
chown_paths.append(_get_log_dir(compose, variant, arch))
packages += get_arch_variant_data(
compose.conf, "buildinstall_packages", arch, variant
)
if self._reuse_old_buildinstall_result(
compose, arch, variant, cmd, pkgset_phase
):
self.copy_files(compose, variant, arch)
self.pool.finished_tasks.add((variant.uid if variant else None, arch))
self.pool.reused_tasks.add((variant.uid if variant else None, arch))
self.pool.log_info("[DONE ] %s" % msg)
return
# This should avoid a possible race condition with multiple processes
# trying to get a kerberos ticket at the same time.
# Kerberos authentication failed:
# Permission denied in replay cache code (-1765328215)
time.sleep(num * 3)
# Start the runroot task.
runroot = Runroot(compose, phase="buildinstall")
task_id = None
if buildinstall_method == "lorax" and lorax_use_koji_plugin:
task_id = runroot.run_pungi_buildinstall(
cmd,
log_file=log_file,
arch=arch,
packages=packages,
weight=compose.conf["runroot_weights"].get("buildinstall"),
)
else:
try:
lorax_log_dir = _get_log_dir(compose, variant, arch)
except Exception:
lorax_log_dir = None
runroot.run(
cmd,
log_file=log_file,
arch=arch,
packages=packages,
mounts=[compose.topdir],
weight=compose.conf["runroot_weights"].get("buildinstall"),
chown_paths=chown_paths,
log_dir=lorax_log_dir,
)
if final_output_dir != output_dir:
if not os.path.exists(final_output_dir):
makedirs(final_output_dir)
results_dir = os.path.join(output_dir, "results")
copy_all(results_dir, final_output_dir)
# Get the log_dir into which we should copy the resulting log files.
log_fname = "buildinstall-%s-logs/dummy" % variant.uid
final_log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_fname))
if not os.path.exists(final_log_dir):
makedirs(final_log_dir)
log_dir = os.path.join(output_dir, "logs")
copy_all(log_dir, final_log_dir)
elif lorax_use_koji_plugin:
# If Koji pungi-buildinstall is used, then the buildinstall results
# are attached as outputs to the Koji task. Download and unpack
# them to the correct location.
download_and_extract_archive(
compose, task_id, "results.tar.gz", final_output_dir
)
# Download the logs into proper location too.
log_fname = "buildinstall-%s-logs/dummy" % variant.uid
final_log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_fname))
download_and_extract_archive(compose, task_id, "logs.tar.gz", final_log_dir)
rpms = runroot.get_buildroot_rpms()
self._write_buildinstall_metadata(
compose, arch, variant, cmd, rpms, pkgset_phase
)
self.copy_files(compose, variant, arch)
self.pool.finished_tasks.add((variant.uid if variant else None, arch))
self.pool.log_info("[DONE ] %s" % msg)
def copy_files(self, compose, variant, arch):
disc_type = compose.conf["disc_types"].get("dvd", "dvd")
buildinstall_dir = compose.paths.work.buildinstall_dir(arch)
# Lorax runs per-variant, so we need to tweak the source path
# to include variant.
if variant:
buildinstall_dir = os.path.join(buildinstall_dir, variant.uid)
# Find all relevant variants if lorax is not used.
variants = (
[variant]
if variant
else compose.get_variants(arch=arch, types=["self", "variant"])
)
for var in variants:
os_tree = compose.paths.compose.os_tree(arch, var)
# TODO: label is not used
label = ""
volid = get_volid(compose, arch, var, disc_type=disc_type)
can_fail = compose.can_fail(var, arch, "buildinstall")
tweak_buildinstall(
compose,
buildinstall_dir,
os_tree,
arch,
var.uid,
label,
volid,
self.pool.kickstart_file,
)
link_boot_iso(compose, arch, var, can_fail)
def _get_log_dir(compose, variant, arch):
"""Find directory where to store lorax logs in. If it's inside the compose,
create the directory.
"""
if compose.conf.get("buildinstall_topdir"):
log_dir = compose.paths.work.buildinstall_dir(
arch, allow_topdir_override=True, variant=variant
)
return os.path.join(log_dir, "logs")
# The paths module will modify the filename (by inserting arch). But we
# only care about the directory anyway.
log_filename = "buildinstall-%s-logs/dummy" % variant.uid
log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_filename))
makedirs(log_dir)
return log_dir

932
pungi/phases/createiso.py Normal file
View File

@ -0,0 +1,932 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import itertools
import os
import random
import shutil
import stat
import json
import productmd.treeinfo
from productmd.images import Image
from kobo.threads import ThreadPool, WorkerThread
from kobo.shortcuts import run, relative_path, compute_file_checksums
from six.moves import shlex_quote
from pungi.wrappers import iso
from pungi.wrappers.createrepo import CreaterepoWrapper
from pungi.wrappers import kojiwrapper
from pungi.phases.base import PhaseBase, PhaseLoggerMixin
from pungi.util import (
makedirs,
get_volid,
get_arch_variant_data,
failable,
get_file_size,
get_mtime,
read_json_file,
)
from pungi.media_split import MediaSplitter, convert_media_size
from pungi.compose_metadata.discinfo import read_discinfo, write_discinfo
from pungi.runroot import Runroot
from .. import createiso
class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
name = "createiso"
def __init__(self, compose, buildinstall_phase):
super(CreateisoPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
self.bi = buildinstall_phase
def _find_rpms(self, path):
"""Check if there are some RPMs in the path."""
for _, _, files in os.walk(path):
for fn in files:
if fn.endswith(".rpm"):
return True
return False
def _is_bootable(self, variant, arch):
if arch == "src":
return False
if variant.type != "variant":
return False
skip = get_arch_variant_data(
self.compose.conf, "buildinstall_skip", arch, variant
)
if skip == [True]:
# Buildinstall is skipped for this tree. Can't create a bootable ISO.
return False
return bool(self.compose.conf.get("buildinstall_method", ""))
def _metadata_path(self, variant, arch, disc_num, disc_count):
return self.compose.paths.log.log_file(
arch,
"createiso-%s-%d-%d" % (variant.uid, disc_num, disc_count),
ext="json",
)
def save_reuse_metadata(self, cmd, variant, arch, opts):
"""Save metadata for future composes to verify if the compose can be reused."""
metadata = {
"cmd": cmd,
"opts": opts._asdict(),
}
metadata_path = self._metadata_path(
variant, arch, cmd["disc_num"], cmd["disc_count"]
)
with open(metadata_path, "w") as f:
json.dump(metadata, f, indent=2)
return metadata
def _load_old_metadata(self, cmd, variant, arch):
metadata_path = self._metadata_path(
variant, arch, cmd["disc_num"], cmd["disc_count"]
)
old_path = self.compose.paths.old_compose_path(metadata_path)
self.logger.info(
"Loading old metadata for %s.%s from: %s", variant, arch, old_path
)
try:
return read_json_file(old_path)
except Exception:
return None
def perform_reuse(self, cmd, variant, arch, opts, iso_path):
"""
Copy all related files from old compose to the new one. As a last step
add the new image to metadata.
"""
linker = OldFileLinker(self.logger)
old_file_name = os.path.basename(iso_path)
current_file_name = os.path.basename(cmd["iso_path"])
try:
# Hardlink ISO and manifest
for suffix in ("", ".manifest"):
linker.link(iso_path + suffix, cmd["iso_path"] + suffix)
# Copy log files
# The log file name includes filename of the image, so we need to
# find old file with the old name, and rename it to the new name.
log_file = self.compose.paths.log.log_file(
arch, "createiso-%s" % current_file_name
)
old_log_file = self.compose.paths.old_compose_path(
self.compose.paths.log.log_file(arch, "createiso-%s" % old_file_name)
)
linker.link(old_log_file, log_file)
# Copy jigdo files
if opts.jigdo_dir:
old_jigdo_dir = self.compose.paths.old_compose_path(opts.jigdo_dir)
for suffix in (".template", ".jigdo"):
linker.link(
os.path.join(old_jigdo_dir, old_file_name) + suffix,
os.path.join(opts.jigdo_dir, current_file_name) + suffix,
)
except Exception:
# A problem happened while linking some file, let's clean up
# everything.
linker.abort()
raise
# Add image to manifest
add_iso_to_metadata(
self.compose,
variant,
arch,
cmd["iso_path"],
bootable=cmd["bootable"],
disc_num=cmd["disc_num"],
disc_count=cmd["disc_count"],
)
if self.compose.notifier:
self.compose.notifier.send(
"createiso-imagedone",
file=cmd["iso_path"],
arch=arch,
variant=str(variant),
)
def try_reuse(self, cmd, variant, arch, opts):
"""Try to reuse image from previous compose.
:returns bool: True if reuse was successful, False otherwise
"""
if not self.compose.conf["createiso_allow_reuse"]:
return
log_msg = "Cannot reuse ISO for %s.%s" % (variant, arch)
current_metadata = self.save_reuse_metadata(cmd, variant, arch, opts)
if opts.buildinstall_method and not self.bi.reused(variant, arch):
# If buildinstall phase was not reused for some reason, we can not
# reuse any bootable image. If a package change caused rebuild of
# boot.iso, we would catch it here too, but there could be a
# configuration change in lorax template which would remain
# undetected.
self.logger.info("%s - boot configuration changed", log_msg)
return False
# Check old compose configuration: extra_files and product_ids can be
# reflected on ISO.
old_config = self.compose.load_old_compose_config()
if not old_config:
self.logger.info("%s - no config for old compose", log_msg)
return False
# Disable reuse if unsigned packages are allowed. The older compose
# could have unsigned packages, and those may have been signed since
# then. We want to regenerate the ISO to have signatures.
if None in self.compose.conf["sigkeys"]:
self.logger.info("%s - unsigned packages are allowed", log_msg)
return False
# Convert current configuration to JSON and back to encode it similarly
# to the old one
config = json.loads(json.dumps(self.compose.conf))
for opt in self.compose.conf:
# Skip a selection of options: these affect what packages can be
# included, which we explicitly check later on.
config_whitelist = set(
[
"gather_lookaside_repos",
"pkgset_koji_builds",
"pkgset_koji_scratch_tasks",
"pkgset_koji_module_builds",
]
)
# Skip irrelevant options
config_whitelist.update(["osbs", "osbuild"])
if opt in config_whitelist:
continue
if old_config.get(opt) != config.get(opt):
self.logger.info("%s - option %s differs", log_msg, opt)
return False
old_metadata = self._load_old_metadata(cmd, variant, arch)
if not old_metadata:
self.logger.info("%s - no old metadata found", log_msg)
return False
# Test if volume ID matches - volid can be generated dynamically based on
# other values, and could change even if nothing else is different.
if current_metadata["opts"]["volid"] != old_metadata["opts"]["volid"]:
self.logger.info("%s - volume ID differs", log_msg)
return False
# Compare packages on the ISO.
if compare_packages(
old_metadata["opts"]["graft_points"],
current_metadata["opts"]["graft_points"],
):
self.logger.info("%s - packages differ", log_msg)
return False
try:
self.perform_reuse(
cmd,
variant,
arch,
opts,
old_metadata["cmd"]["iso_path"],
)
return True
except Exception as exc:
self.compose.log_error(
"Error while reusing ISO for %s.%s: %s", variant, arch, exc
)
self.compose.traceback("createiso-reuse-%s-%s" % (variant, arch))
return False
def run(self):
symlink_isos_to = self.compose.conf.get("symlink_isos_to")
disc_type = self.compose.conf["disc_types"].get("dvd", "dvd")
deliverables = []
commands = []
for variant in self.compose.get_variants(
types=["variant", "layered-product", "optional"]
):
if variant.is_empty:
continue
for arch in variant.arches + ["src"]:
skip_iso = get_arch_variant_data(
self.compose.conf, "createiso_skip", arch, variant
)
if skip_iso == [True]:
self.logger.info(
"Skipping createiso for %s.%s due to config option"
% (variant, arch)
)
continue
volid = get_volid(self.compose, arch, variant, disc_type=disc_type)
os_tree = self.compose.paths.compose.os_tree(arch, variant)
iso_dir = self.compose.paths.compose.iso_dir(
arch, variant, symlink_to=symlink_isos_to
)
if not iso_dir:
continue
if not self._find_rpms(os_tree):
self.logger.warning(
"No RPMs found for %s.%s, skipping ISO" % (variant.uid, arch)
)
continue
bootable = self._is_bootable(variant, arch)
if bootable and not self.bi.succeeded(variant, arch):
self.logger.warning(
"ISO should be bootable, but buildinstall failed. "
"Skipping for %s.%s" % (variant, arch)
)
continue
split_iso_data = split_iso(
self.compose, arch, variant, no_split=bootable, logger=self.logger
)
disc_count = len(split_iso_data)
for disc_num, iso_data in enumerate(split_iso_data):
disc_num += 1
filename = self.compose.get_image_name(
arch, variant, disc_type=disc_type, disc_num=disc_num
)
iso_path = self.compose.paths.compose.iso_path(
arch, variant, filename, symlink_to=symlink_isos_to
)
if os.path.isfile(iso_path):
self.logger.warning(
"Skipping mkisofs, image already exists: %s", iso_path
)
continue
deliverables.append(iso_path)
graft_points = prepare_iso(
self.compose,
arch,
variant,
disc_num=disc_num,
disc_count=disc_count,
split_iso_data=iso_data,
)
cmd = {
"iso_path": iso_path,
"bootable": bootable,
"cmd": [],
"label": "", # currently not used
"disc_num": disc_num,
"disc_count": disc_count,
}
if os.path.islink(iso_dir):
cmd["mount"] = os.path.abspath(
os.path.join(os.path.dirname(iso_dir), os.readlink(iso_dir))
)
opts = createiso.CreateIsoOpts(
output_dir=iso_dir,
iso_name=filename,
volid=volid,
graft_points=graft_points,
arch=arch,
supported=self.compose.supported,
hfs_compat=self.compose.conf["iso_hfs_ppc64le_compatible"],
use_xorrisofs=self.compose.conf.get("createiso_use_xorrisofs"),
iso_level=get_iso_level_config(self.compose, variant, arch),
)
if bootable:
opts = opts._replace(
buildinstall_method=self.compose.conf[
"buildinstall_method"
],
boot_iso=os.path.join(os_tree, "images", "boot.iso"),
)
if self.compose.conf["create_jigdo"]:
jigdo_dir = self.compose.paths.compose.jigdo_dir(arch, variant)
opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree)
# Try to reuse
if self.try_reuse(cmd, variant, arch, opts):
# Reuse was successful, go to next ISO
continue
script_dir = self.compose.paths.work.tmp_dir(arch, variant)
opts = opts._replace(script_dir=script_dir)
script_file = os.path.join(script_dir, "createiso-%s.sh" % filename)
with open(script_file, "w") as f:
createiso.write_script(opts, f)
cmd["cmd"] = ["bash", script_file]
commands.append((cmd, variant, arch))
if self.compose.notifier:
self.compose.notifier.send("createiso-targets", deliverables=deliverables)
for cmd, variant, arch in commands:
self.pool.add(CreateIsoThread(self.pool))
self.pool.queue_put((self.compose, cmd, variant, arch))
self.pool.start()
def read_packages(graft_points):
"""Read packages that were listed in given graft points file.
Only files under Packages directory are considered. Particularly this
excludes .discinfo, .treeinfo and media.repo as well as repodata and
any extra files.
Extra files are easier to check by configuration (same name doesn't
imply same content). Repodata depend entirely on included packages (and
possibly product id certificate), but are affected by current time
which can change checksum despite data being the same.
"""
with open(graft_points) as f:
return set(
line.split("=", 1)[0]
for line in f
if line.startswith("Packages/") or "/Packages/" in line
)
def compare_packages(old_graft_points, new_graft_points):
"""Read packages from the two files and compare them.
:returns bool: True if there are differences, False otherwise
"""
old_files = read_packages(old_graft_points)
new_files = read_packages(new_graft_points)
return old_files != new_files
class CreateIsoThread(WorkerThread):
def fail(self, compose, cmd, variant, arch):
self.pool.log_error("CreateISO failed, removing ISO: %s" % cmd["iso_path"])
try:
# remove incomplete ISO
os.unlink(cmd["iso_path"])
# TODO: remove jigdo & template
except OSError:
pass
if compose.notifier:
compose.notifier.send(
"createiso-imagefail",
file=cmd["iso_path"],
arch=arch,
variant=str(variant),
)
def process(self, item, num):
compose, cmd, variant, arch = item
can_fail = compose.can_fail(variant, arch, "iso")
with failable(
compose, can_fail, variant, arch, "iso", logger=self.pool._logger
):
self.worker(compose, cmd, variant, arch, num)
def worker(self, compose, cmd, variant, arch, num):
mounts = [compose.topdir]
if "mount" in cmd:
mounts.append(cmd["mount"])
bootable = cmd["bootable"]
log_file = compose.paths.log.log_file(
arch, "createiso-%s" % os.path.basename(cmd["iso_path"])
)
msg = "Creating ISO (arch: %s, variant: %s): %s" % (
arch,
variant,
os.path.basename(cmd["iso_path"]),
)
self.pool.log_info("[BEGIN] %s" % msg)
try:
run_createiso_command(
num,
compose,
bootable,
arch,
cmd["cmd"],
mounts,
log_file,
cmd["iso_path"],
)
except Exception:
self.fail(compose, cmd, variant, arch)
raise
add_iso_to_metadata(
compose,
variant,
arch,
cmd["iso_path"],
cmd["bootable"],
cmd["disc_num"],
cmd["disc_count"],
)
# Delete staging directory if present.
staging_dir = compose.paths.work.iso_staging_dir(
arch, variant, filename=os.path.basename(cmd["iso_path"]), create_dir=False
)
if os.path.exists(staging_dir):
try:
shutil.rmtree(staging_dir)
except Exception as e:
self.pool.log_warning(
"Failed to clean up staging dir: %s %s" % (staging_dir, str(e))
)
self.pool.log_info("[DONE ] %s" % msg)
if compose.notifier:
compose.notifier.send(
"createiso-imagedone",
file=cmd["iso_path"],
arch=arch,
variant=str(variant),
)
def add_iso_to_metadata(
compose,
variant,
arch,
iso_path,
bootable,
disc_num=1,
disc_count=1,
additional_variants=None,
):
img = Image(compose.im)
img.path = iso_path.replace(compose.paths.compose.topdir(), "").lstrip("/")
img.mtime = get_mtime(iso_path)
img.size = get_file_size(iso_path)
img.arch = arch
# XXX: HARDCODED
img.type = "dvd"
img.format = "iso"
img.disc_number = disc_num
img.disc_count = disc_count
img.bootable = bootable
img.subvariant = variant.uid
img.implant_md5 = iso.get_implanted_md5(iso_path, logger=compose._logger)
if additional_variants:
img.unified = True
img.additional_variants = additional_variants
setattr(img, "can_fail", compose.can_fail(variant, arch, "iso"))
setattr(img, "deliverable", "iso")
try:
img.volume_id = iso.get_volume_id(
iso_path,
compose.conf.get("createiso_use_xorrisofs"),
)
except RuntimeError:
pass
if arch == "src":
for variant_arch in variant.arches:
compose.im.add(variant.uid, variant_arch, img)
else:
compose.im.add(variant.uid, arch, img)
return img
def run_createiso_command(
num, compose, bootable, arch, cmd, mounts, log_file, iso_path
):
packages = [
"coreutils",
"xorriso" if compose.conf.get("createiso_use_xorrisofs") else "genisoimage",
"isomd5sum",
]
if compose.conf["create_jigdo"]:
packages.append("jigdo")
if bootable:
extra_packages = {
"lorax": ["lorax", "which"],
}
packages.extend(extra_packages[compose.conf["buildinstall_method"]])
runroot = Runroot(compose, phase="createiso")
build_arch = arch
if runroot.runroot_method == "koji" and not bootable:
runroot_tag = compose.conf["runroot_tag"]
koji_wrapper = kojiwrapper.KojiWrapper(compose)
koji_proxy = koji_wrapper.koji_proxy
tag_info = koji_proxy.getTag(runroot_tag)
if not tag_info:
raise RuntimeError('Tag "%s" does not exist.' % runroot_tag)
tag_arches = tag_info["arches"].split(" ")
if "x86_64" in tag_arches:
# assign non-bootable images to x86_64 if possible
build_arch = "x86_64"
elif build_arch == "src":
# pick random arch from available runroot tag arches
build_arch = random.choice(tag_arches)
runroot.run(
cmd,
log_file=log_file,
arch=build_arch,
packages=packages,
mounts=mounts,
weight=compose.conf["runroot_weights"].get("createiso"),
)
if bootable and compose.conf.get("createiso_use_xorrisofs"):
fix_treeinfo_checksums(compose, iso_path, arch)
def fix_treeinfo_checksums(compose, iso_path, arch):
"""It is possible for the ISO to contain a .treefile with incorrect
checksums. By modifying the ISO (adding files) some of the images may
change.
This function fixes that after the fact by looking for incorrect checksums,
recalculating them and updating the .treeinfo file. Since the size of the
file doesn't change, this seems to not change any images.
"""
modified = False
with iso.mount(iso_path, compose._logger) as mountpoint:
ti = productmd.TreeInfo()
ti.load(os.path.join(mountpoint, ".treeinfo"))
for image, (type_, expected) in ti.checksums.checksums.items():
checksums = compute_file_checksums(os.path.join(mountpoint, image), [type_])
actual = checksums[type_]
if actual == expected:
# Everything fine here, skip to next image.
continue
compose.log_debug("%s: %s: checksum mismatch", iso_path, image)
# Update treeinfo with correct checksum
ti.checksums.checksums[image] = (type_, actual)
modified = True
if not modified:
compose.log_debug("%s: All checksums match, nothing to do.", iso_path)
return
try:
tmpdir = compose.mkdtemp(arch, prefix="fix-checksum-")
# Write modified .treeinfo
ti_path = os.path.join(tmpdir, ".treeinfo")
compose.log_debug("Storing modified .treeinfo in %s", ti_path)
ti.dump(ti_path)
# Write a modified DVD into a temporary path, that is atomically moved
# over the original file.
fixed_path = os.path.join(tmpdir, "fixed-checksum-dvd.iso")
cmd = ["xorriso"]
cmd.extend(
itertools.chain.from_iterable(
iso.xorriso_commands(arch, iso_path, fixed_path)
)
)
cmd.extend(["-map", ti_path, ".treeinfo"])
run(
cmd,
logfile=compose.paths.log.log_file(
arch, "checksum-fix_generate_%s" % os.path.basename(iso_path)
),
)
# The modified ISO no longer has implanted MD5, so that needs to be
# fixed again.
compose.log_debug("Implanting new MD5 to %s", fixed_path)
run(
iso.get_implantisomd5_cmd(fixed_path, compose.supported),
logfile=compose.paths.log.log_file(
arch, "checksum-fix_implantisomd5_%s" % os.path.basename(iso_path)
),
)
# All done, move the updated image to the final location.
compose.log_debug("Updating %s", iso_path)
os.rename(fixed_path, iso_path)
finally:
shutil.rmtree(tmpdir)
def split_iso(compose, arch, variant, no_split=False, logger=None):
"""
Split contents of the os/ directory for given tree into chunks fitting on ISO.
All files from the directory are taken except for possible boot.iso image.
Files added in extra_files phase are put on all disks.
If `no_split` is set, we will pretend that the media is practically
infinite so that everything goes on single disc. A warning is printed if
the size is bigger than configured.
"""
if not logger:
logger = compose._logger
media_size = compose.conf["iso_size"]
media_reserve = compose.conf["split_iso_reserve"]
split_size = convert_media_size(media_size) - convert_media_size(media_reserve)
real_size = None if no_split else split_size
ms = MediaSplitter(real_size, compose, logger=logger)
os_tree = compose.paths.compose.os_tree(arch, variant)
extra_files_dir = compose.paths.work.extra_files_dir(arch, variant)
# scan extra files to mark them "sticky" -> they'll be on all media after split
extra_files = set(["media.repo"])
for root, dirs, files in os.walk(extra_files_dir):
for fn in files:
path = os.path.join(root, fn)
rel_path = relative_path(path, extra_files_dir.rstrip("/") + "/")
extra_files.add(rel_path)
packages = []
all_files = []
all_files_ignore = []
ti = productmd.treeinfo.TreeInfo()
ti.load(os.path.join(os_tree, ".treeinfo"))
boot_iso_rpath = ti.images.images.get(arch, {}).get("boot.iso", None)
if boot_iso_rpath:
all_files_ignore.append(boot_iso_rpath)
if all_files_ignore:
logger.debug("split_iso all_files_ignore = %s" % ", ".join(all_files_ignore))
for root, dirs, files in os.walk(os_tree):
for dn in dirs[:]:
repo_dir = os.path.join(root, dn)
if repo_dir == os.path.join(
compose.paths.compose.repository(arch, variant), "repodata"
):
dirs.remove(dn)
for fn in files:
path = os.path.join(root, fn)
rel_path = relative_path(path, os_tree.rstrip("/") + "/")
sticky = rel_path in extra_files
if rel_path in all_files_ignore:
logger.info("split_iso: Skipping %s" % rel_path)
continue
if root.startswith(compose.paths.compose.packages(arch, variant)):
packages.append((path, os.path.getsize(path), sticky))
else:
all_files.append((path, os.path.getsize(path), sticky))
for path, size, sticky in all_files + packages:
ms.add_file(path, size, sticky)
logger.debug("Splitting media for %s.%s:" % (variant.uid, arch))
result = ms.split()
if no_split and result[0]["size"] > split_size:
logger.warning(
"ISO for %s.%s does not fit on single media! It is %s bytes too big. "
"(Total size: %s B)"
% (variant.uid, arch, result[0]["size"] - split_size, result[0]["size"])
)
return result
def prepare_iso(
compose, arch, variant, disc_num=1, disc_count=None, split_iso_data=None
):
tree_dir = compose.paths.compose.os_tree(arch, variant)
filename = compose.get_image_name(arch, variant, disc_num=disc_num)
iso_dir = compose.paths.work.iso_dir(arch, filename)
# modify treeinfo
ti_path = os.path.join(tree_dir, ".treeinfo")
ti = load_and_tweak_treeinfo(ti_path, disc_num, disc_count)
copy_boot_images(tree_dir, iso_dir)
if disc_count > 1:
# remove repodata/repomd.xml from checksums, create a new one later
if "repodata/repomd.xml" in ti.checksums.checksums:
del ti.checksums.checksums["repodata/repomd.xml"]
# rebuild repodata
createrepo_c = compose.conf["createrepo_c"]
createrepo_checksum = compose.conf["createrepo_checksum"]
repo = CreaterepoWrapper(createrepo_c=createrepo_c)
file_list = "%s-file-list" % iso_dir
packages_dir = compose.paths.compose.packages(arch, variant)
file_list_content = []
for i in split_iso_data["files"]:
if not i.endswith(".rpm"):
continue
if not i.startswith(packages_dir):
continue
rel_path = relative_path(i, tree_dir.rstrip("/") + "/")
file_list_content.append(rel_path)
if file_list_content:
# write modified repodata only if there are packages available
run("cp -a %s/repodata %s/" % (shlex_quote(tree_dir), shlex_quote(iso_dir)))
with open(file_list, "w") as f:
f.write("\n".join(file_list_content))
cmd = repo.get_createrepo_cmd(
tree_dir,
update=True,
database=True,
skip_stat=True,
pkglist=file_list,
outputdir=iso_dir,
workers=compose.conf["createrepo_num_workers"],
checksum=createrepo_checksum,
)
run(cmd)
# add repodata/repomd.xml back to checksums
ti.checksums.add(
"repodata/repomd.xml", createrepo_checksum, root_dir=iso_dir
)
new_ti_path = os.path.join(iso_dir, ".treeinfo")
ti.dump(new_ti_path)
# modify discinfo
di_path = os.path.join(tree_dir, ".discinfo")
data = read_discinfo(di_path)
data["disc_numbers"] = [disc_num]
new_di_path = os.path.join(iso_dir, ".discinfo")
write_discinfo(new_di_path, **data)
if not disc_count or disc_count == 1:
data = iso.get_graft_points(compose.paths.compose.topdir(), [tree_dir, iso_dir])
else:
data = iso.get_graft_points(
compose.paths.compose.topdir(),
[iso._paths_from_list(tree_dir, split_iso_data["files"]), iso_dir],
)
if compose.conf["createiso_break_hardlinks"]:
compose.log_debug(
"Breaking hardlinks for ISO %s for %s.%s" % (filename, variant, arch)
)
break_hardlinks(
data, compose.paths.work.iso_staging_dir(arch, variant, filename)
)
# Create hardlinks for files with duplicate contents.
compose.log_debug(
"Creating hardlinks for ISO %s for %s.%s" % (filename, variant, arch)
)
create_hardlinks(
compose.paths.work.iso_staging_dir(arch, variant, filename),
log_file=compose.paths.log.log_file(
arch, "iso-hardlink-%s.log" % variant.uid
),
)
# TODO: /content /graft-points
gp = "%s-graft-points" % iso_dir
iso.write_graft_points(gp, data, exclude=["*/lost+found", "*/boot.iso"])
return gp
def load_and_tweak_treeinfo(ti_path, disc_num=1, disc_count=1):
"""Treeinfo on the media should not contain any reference to boot.iso and
it should also have a valid [media] section.
"""
ti = productmd.treeinfo.TreeInfo()
ti.load(ti_path)
ti.media.totaldiscs = disc_count or 1
ti.media.discnum = disc_num
# remove boot.iso from all sections
paths = set()
for platform in ti.images.images:
if "boot.iso" in ti.images.images[platform]:
paths.add(ti.images.images[platform].pop("boot.iso"))
# remove boot.iso from checksums
for i in paths:
if i in ti.checksums.checksums.keys():
del ti.checksums.checksums[i]
return ti
def copy_boot_images(src, dest):
"""When mkisofs is called it tries to modify isolinux/isolinux.bin and
images/boot.img. Therefore we need to make copies of them.
"""
for i in ("isolinux/isolinux.bin", "images/boot.img"):
src_path = os.path.join(src, i)
dst_path = os.path.join(dest, i)
if os.path.exists(src_path):
makedirs(os.path.dirname(dst_path))
shutil.copy2(src_path, dst_path)
def break_hardlinks(graft_points, staging_dir):
"""Iterate over graft points and copy any file that has more than 1
hardlink into the staging directory. Replace the entry in the dict.
"""
for f in graft_points:
info = os.stat(graft_points[f])
if stat.S_ISREG(info.st_mode) and info.st_nlink > 1:
dest_path = os.path.join(staging_dir, graft_points[f].lstrip("/"))
makedirs(os.path.dirname(dest_path))
shutil.copy2(graft_points[f], dest_path)
graft_points[f] = dest_path
def create_hardlinks(staging_dir, log_file):
"""Create hardlinks within the staging directory.
Should happen after break_hardlinks()
"""
cmd = ["/usr/sbin/hardlink", "-c", "-vv", staging_dir]
run(cmd, logfile=log_file, show_cmd=True)
class OldFileLinker(object):
"""
A wrapper around os.link that remembers which files were linked and can
clean them up.
"""
def __init__(self, logger):
self.logger = logger
self.linked_files = []
def link(self, src, dst):
self.logger.debug("Hardlinking %s to %s", src, dst)
os.link(src, dst)
self.linked_files.append(dst)
def abort(self):
"""Clean up all files created by this instance."""
for f in self.linked_files:
os.unlink(f)
def get_iso_level_config(compose, variant, arch):
"""
Get configured ISO level for this variant and architecture.
"""
level = compose.conf.get("iso_level")
if isinstance(level, list):
level = None
for c in get_arch_variant_data(compose.conf, "iso_level", arch, variant):
level = c
return level

496
pungi/phases/createrepo.py Normal file
View File

@ -0,0 +1,496 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
__all__ = ("create_variant_repo",)
import copy
import errno
import glob
import os
import shutil
import threading
import xml.dom.minidom
import productmd.modules
import productmd.rpms
from kobo.shortcuts import relative_path, run
from kobo.threads import ThreadPool, WorkerThread
from ..module_util import Modulemd, collect_module_defaults, collect_module_obsoletes
from ..util import (
get_arch_variant_data,
read_single_module_stream_from_file,
temp_dir,
)
from ..wrappers.createrepo import CreaterepoWrapper
from ..wrappers.scm import get_dir_from_scm
from .base import PhaseBase
CACHE_TOPDIR = "/var/cache/pungi/createrepo_c/"
createrepo_lock = threading.Lock()
createrepo_dirs = set()
class CreaterepoPhase(PhaseBase):
name = "createrepo"
def __init__(self, compose, pkgset_phase=None):
PhaseBase.__init__(self, compose)
self.pool = ThreadPool(logger=self.compose._logger)
self.modules_metadata = ModulesMetadata(compose)
self.pkgset_phase = pkgset_phase
def validate(self):
errors = []
if not self.compose.old_composes and self.compose.conf.get("createrepo_deltas"):
errors.append("Can not generate deltas without old compose")
if errors:
raise ValueError("\n".join(errors))
def run(self):
get_productids_from_scm(self.compose)
reference_pkgset = None
if self.pkgset_phase and self.pkgset_phase.package_sets:
reference_pkgset = self.pkgset_phase.package_sets[-1]
for i in range(self.compose.conf["createrepo_num_threads"]):
self.pool.add(
CreaterepoThread(self.pool, reference_pkgset, self.modules_metadata)
)
for variant in self.compose.get_variants():
if variant.is_empty:
continue
if variant.uid in self.compose.conf.get("createrepo_extra_modulemd", {}):
# Clone extra modulemd repository if it's configured.
get_dir_from_scm(
self.compose.conf["createrepo_extra_modulemd"][variant.uid],
self.compose.paths.work.tmp_dir(variant=variant, create_dir=False),
compose=self.compose,
)
self.pool.queue_put((self.compose, None, variant, "srpm"))
for arch in variant.arches:
self.pool.queue_put((self.compose, arch, variant, "rpm"))
self.pool.queue_put((self.compose, arch, variant, "debuginfo"))
self.pool.start()
def stop(self):
super(CreaterepoPhase, self).stop()
self.modules_metadata.write_modules_metadata()
def create_variant_repo(
compose, arch, variant, pkg_type, pkgset, modules_metadata=None
):
types = {
"rpm": (
"binary",
lambda **kwargs: compose.paths.compose.repository(
arch=arch, variant=variant, **kwargs
),
),
"srpm": (
"source",
lambda **kwargs: compose.paths.compose.repository(
arch="src", variant=variant, **kwargs
),
),
"debuginfo": (
"debug",
lambda **kwargs: compose.paths.compose.debug_repository(
arch=arch, variant=variant, **kwargs
),
),
}
if variant.is_empty or (arch is None and pkg_type != "srpm"):
compose.log_info(
"[SKIP ] Creating repo (arch: %s, variant: %s)" % (arch, variant)
)
return
createrepo_c = compose.conf["createrepo_c"]
createrepo_checksum = compose.conf["createrepo_checksum"]
repo = CreaterepoWrapper(createrepo_c=createrepo_c)
repo_dir_arch = None
if pkgset:
repo_dir_arch = pkgset.paths["global" if pkg_type == "srpm" else arch]
try:
repo_dir = types[pkg_type][1]()
except KeyError:
raise ValueError("Unknown package type: %s" % pkg_type)
msg = "Creating repo (arch: %s, variant: %s): %s" % (arch, variant, repo_dir)
# HACK: using global lock
# This is important when addons put packages into parent variant directory.
# There can't be multiple createrepo processes operating on the same
# directory.
with createrepo_lock:
if repo_dir in createrepo_dirs:
compose.log_warning("[SKIP ] Already in progress: %s" % msg)
return
createrepo_dirs.add(repo_dir)
compose.log_info("[BEGIN] %s" % msg)
# We only want delta RPMs for binary repos.
with_deltas = pkg_type == "rpm" and _has_deltas(compose, variant, arch)
rpms = set()
rpm_nevras = set()
# read rpms from metadata rather than guessing it by scanning filesystem
manifest_file = compose.paths.compose.metadata("rpms.json")
manifest = productmd.rpms.Rpms()
manifest.load(manifest_file)
for rpms_arch, data in manifest.rpms.get(variant.uid, {}).items():
if arch is not None and arch != rpms_arch:
continue
for srpm_data in data.values():
for rpm_nevra, rpm_data in srpm_data.items():
if types[pkg_type][0] != rpm_data["category"]:
continue
path = os.path.join(compose.topdir, "compose", rpm_data["path"])
rel_path = relative_path(path, repo_dir.rstrip("/") + "/")
rpms.add(rel_path)
rpm_nevras.add(str(rpm_nevra))
file_list = compose.paths.work.repo_package_list(arch, variant, pkg_type)
with open(file_list, "w") as f:
for rel_path in sorted(rpms):
f.write("%s\n" % rel_path)
# Only find last compose when we actually want delta RPMs.
old_package_dirs = _get_old_package_dirs(compose, repo_dir) if with_deltas else None
if old_package_dirs:
# If we are creating deltas, we can not reuse existing metadata, as
# that would stop deltas from being created.
# This seems to only affect createrepo_c though.
repo_dir_arch = None
comps_path = None
if compose.has_comps and pkg_type == "rpm":
comps_path = compose.paths.work.comps(arch=arch, variant=variant)
if compose.conf["createrepo_enable_cache"]:
cachedir = os.path.join(
CACHE_TOPDIR,
"%s-%s" % (compose.conf["release_short"], os.getuid()),
)
if not os.path.exists(cachedir):
try:
os.makedirs(cachedir)
except Exception as e:
compose.log_warning(
"Cache disabled because cannot create cache dir %s %s"
% (cachedir, str(e))
)
cachedir = None
else:
cachedir = None
cmd = repo.get_createrepo_cmd(
repo_dir,
update=True,
database=compose.should_create_yum_database,
skip_stat=True,
pkglist=file_list,
outputdir=repo_dir,
workers=compose.conf["createrepo_num_workers"],
groupfile=comps_path,
update_md_path=repo_dir_arch,
checksum=createrepo_checksum,
deltas=with_deltas,
oldpackagedirs=old_package_dirs,
use_xz=compose.conf["createrepo_use_xz"],
extra_args=compose.conf["createrepo_extra_args"],
cachedir=cachedir,
)
log_file = compose.paths.log.log_file(
arch, "createrepo-%s.%s" % (variant, pkg_type)
)
run(cmd, logfile=log_file, show_cmd=True)
# call modifyrepo to inject productid
product_id = compose.conf.get("product_id")
if product_id and pkg_type == "rpm":
# add product certificate to base (rpm) repo; skip source and debug
product_id_path = compose.paths.work.product_id(arch, variant)
if os.path.isfile(product_id_path):
cmd = repo.get_modifyrepo_cmd(
os.path.join(repo_dir, "repodata"), product_id_path, compress_type="gz"
)
log_file = compose.paths.log.log_file(arch, "modifyrepo-%s" % variant)
run(cmd, logfile=log_file, show_cmd=True)
# productinfo is not supported by modifyrepo in any way
# this is a HACK to make CDN happy (dmach: at least I think,
# need to confirm with dgregor)
shutil.copy2(
product_id_path, os.path.join(repo_dir, "repodata", "productid")
)
# call modifyrepo to inject modulemd if needed
if pkg_type == "rpm" and arch in variant.arch_mmds and Modulemd is not None:
mod_index = Modulemd.ModuleIndex()
metadata = []
for module_id, mmd in variant.arch_mmds.get(arch, {}).items():
if modules_metadata:
module_rpms = mmd.get_rpm_artifacts()
metadata.append((module_id, module_rpms))
mod_index.add_module_stream(mmd)
module_names = set(mod_index.get_module_names())
defaults_dir = compose.paths.work.module_defaults_dir()
overrides_dir = compose.conf.get("module_defaults_override_dir")
collect_module_defaults(
defaults_dir, module_names, mod_index, overrides_dir=overrides_dir
)
obsoletes_dir = compose.paths.work.module_obsoletes_dir()
mod_index = collect_module_obsoletes(obsoletes_dir, module_names, mod_index)
# Add extra modulemd files
if variant.uid in compose.conf.get("createrepo_extra_modulemd", {}):
compose.log_debug("Adding extra modulemd for %s.%s", variant.uid, arch)
dirname = compose.paths.work.tmp_dir(variant=variant, create_dir=False)
for filepath in glob.glob(os.path.join(dirname, arch) + "/*.yaml"):
module_stream = read_single_module_stream_from_file(filepath)
if not mod_index.add_module_stream(module_stream):
raise RuntimeError(
"Failed parsing modulemd data from %s" % filepath
)
# Add the module to metadata with dummy tag. We can't leave the
# value empty, but we don't know what the correct tag is.
nsvc = module_stream.get_nsvc()
variant.module_uid_to_koji_tag[nsvc] = "DUMMY"
metadata.append((nsvc, []))
log_file = compose.paths.log.log_file(arch, "modifyrepo-modules-%s" % variant)
add_modular_metadata(repo, repo_dir, mod_index, log_file)
for module_id, module_rpms in metadata:
modulemd_path = os.path.join(
types[pkg_type][1](relative=True),
find_file_in_repodata(repo_dir, "modules"),
)
modules_metadata.prepare_module_metadata(
variant,
arch,
module_id,
modulemd_path,
types[pkg_type][0],
list(module_rpms),
)
compose.log_info("[DONE ] %s" % msg)
def add_modular_metadata(repo, repo_path, mod_index, log_file):
"""Add modular metadata into a repository."""
# Dumping empty index fails, we need to check for that.
if not mod_index.get_module_names():
return
with temp_dir() as tmp_dir:
modules_path = os.path.join(tmp_dir, "modules.yaml")
with open(modules_path, "w") as f:
f.write(mod_index.dump_to_string())
cmd = repo.get_modifyrepo_cmd(
os.path.join(repo_path, "repodata"),
modules_path,
mdtype="modules",
compress_type="gz",
)
run(cmd, logfile=log_file, show_cmd=True)
def find_file_in_repodata(repo_path, type_):
dom = xml.dom.minidom.parse(os.path.join(repo_path, "repodata", "repomd.xml"))
for entry in dom.getElementsByTagName("data"):
if entry.getAttribute("type") == type_:
return entry.getElementsByTagName("location")[0].getAttribute("href")
entry.unlink()
raise RuntimeError("No such file in repodata: %s" % type_)
class CreaterepoThread(WorkerThread):
def __init__(self, pool, reference_pkgset, modules_metadata):
super(CreaterepoThread, self).__init__(pool)
self.reference_pkgset = reference_pkgset
self.modules_metadata = modules_metadata
def process(self, item, num):
compose, arch, variant, pkg_type = item
create_variant_repo(
compose,
arch,
variant,
pkg_type=pkg_type,
pkgset=self.reference_pkgset,
modules_metadata=self.modules_metadata,
)
def get_productids_from_scm(compose):
# product_id is a scm_dict: {scm, repo, branch, dir}
# expected file name format: $variant_uid-$arch-*.pem
product_id = compose.conf.get("product_id")
if not product_id:
compose.log_info("No product certificates specified")
return
product_id_allow_missing = compose.conf["product_id_allow_missing"]
msg = "Getting product certificates from SCM..."
compose.log_info("[BEGIN] %s" % msg)
tmp_dir = compose.mkdtemp(prefix="pungi_")
try:
get_dir_from_scm(product_id, tmp_dir, compose=compose)
except OSError as e:
if e.errno == errno.ENOENT and product_id_allow_missing:
compose.log_warning("No product IDs in %s" % product_id)
return
raise
if compose.conf["product_id_allow_name_prefix"]:
pattern = "%s/*%s-%s-*.pem"
else:
pattern = "%s/%s-%s-*.pem"
for arch in compose.get_arches():
for variant in compose.get_variants(arch=arch):
# some layered products may use base product name before variant
pem_files = glob.glob(pattern % (tmp_dir, variant.uid, arch))
if not pem_files:
warning = "No product certificate found (arch: %s, variant: %s)" % (
arch,
variant.uid,
)
if product_id_allow_missing:
compose.log_warning(warning)
continue
else:
shutil.rmtree(tmp_dir)
raise RuntimeError(warning)
if len(pem_files) > 1:
shutil.rmtree(tmp_dir)
raise RuntimeError(
"Multiple product certificates found (arch: %s, variant: %s): %s"
% (
arch,
variant.uid,
", ".join(sorted([os.path.basename(i) for i in pem_files])),
)
)
product_id_path = compose.paths.work.product_id(arch, variant)
shutil.copy2(pem_files[0], product_id_path)
try:
shutil.rmtree(tmp_dir)
except Exception as e:
compose.log_warning("Failed to clean up tmp dir: %s %s" % (tmp_dir, str(e)))
compose.log_info("[DONE ] %s" % msg)
def _get_old_package_dirs(compose, repo_dir):
"""Given a compose and a path to a repo in it, try to find corresponding
repo in an older compose and return a list of paths to directories with
packages in it.
"""
if not compose.conf["createrepo_deltas"]:
return None
old_package_dirs = compose.paths.old_compose_path(
repo_dir, allowed_statuses=["FINISHED", "FINISHED_INCOMPLETE"]
)
if not old_package_dirs:
compose.log_info("No suitable old compose found in: %s" % compose.old_composes)
return None
old_package_dirs = os.path.join(old_package_dirs, "Packages")
if compose.conf["hashed_directories"]:
old_package_dirs = _find_package_dirs(old_package_dirs)
return old_package_dirs
def _find_package_dirs(base):
"""Assuming the packages are in directories hashed by first letter, find
all the buckets in given base.
"""
buckets = set()
try:
for subdir in os.listdir(base):
bucket = os.path.join(base, subdir)
if os.path.isdir(bucket):
buckets.add(bucket)
except OSError:
# The directory does not exist, so no drpms for you!
pass
return sorted(buckets)
def _has_deltas(compose, variant, arch):
"""Check if delta RPMs are enabled for given variant and architecture."""
key = "createrepo_deltas"
if isinstance(compose.conf.get(key), bool):
return compose.conf[key]
return any(get_arch_variant_data(compose.conf, key, arch, variant))
class ModulesMetadata(object):
def __init__(self, compose):
# Prepare empty module metadata
self.compose = compose
self.modules_metadata_file = self.compose.paths.compose.metadata("modules.json")
self.productmd_modules_metadata = productmd.modules.Modules()
self.productmd_modules_metadata.compose.id = copy.copy(self.compose.compose_id)
self.productmd_modules_metadata.compose.type = copy.copy(
self.compose.compose_type
)
self.productmd_modules_metadata.compose.date = copy.copy(
self.compose.compose_date
)
self.productmd_modules_metadata.compose.respin = copy.copy(
self.compose.compose_respin
)
def write_modules_metadata(self):
"""
flush modules metadata into file
"""
self.compose.log_info(
"Writing modules metadata: %s" % self.modules_metadata_file
)
self.productmd_modules_metadata.dump(self.modules_metadata_file)
def prepare_module_metadata(
self, variant, arch, nsvc, modulemd_path, category, module_rpms
):
"""
Find koji tag which corresponds to the module and add record into
module metadata structure.
"""
koji_tag = variant.module_uid_to_koji_tag[nsvc]
self.productmd_modules_metadata.add(
variant.uid, arch, nsvc, koji_tag, modulemd_path, category, module_rpms
)

141
pungi/phases/extra_files.py Normal file
View File

@ -0,0 +1,141 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
import copy
import fnmatch
from productmd.extra_files import ExtraFiles
from pungi.util import get_arch_variant_data, pkg_is_rpm, copy_all
from pungi.arch import split_name_arch
from pungi.wrappers.scm import get_file_from_scm, get_dir_from_scm
from pungi.phases.base import ConfigGuardedPhase
from pungi import metadata
class ExtraFilesPhase(ConfigGuardedPhase):
"""EXTRA_FILES"""
name = "extra_files"
def __init__(self, compose, pkgset_phase):
super(ExtraFilesPhase, self).__init__(compose)
# pkgset_phase provides package_sets
self.pkgset_phase = pkgset_phase
# Prepare metadata
self.metadata = ExtraFiles()
self.metadata.compose.id = self.compose.compose_id
self.metadata.compose.type = self.compose.compose_type
self.metadata.compose.date = self.compose.compose_date
self.metadata.compose.respin = self.compose.compose_respin
def run(self):
for variant in self.compose.get_variants():
if variant.is_empty:
continue
for arch in variant.arches + ["src"]:
cfg = get_arch_variant_data(self.compose.conf, self.name, arch, variant)
if cfg:
copy_extra_files(
self.compose,
cfg,
arch,
variant,
self.pkgset_phase.package_sets,
self.metadata,
)
else:
self.compose.log_info(
"[SKIP ] No extra files (arch: %s, variant: %s)"
% (arch, variant.uid)
)
metadata_path = self.compose.paths.compose.metadata("extra_files.json")
self.compose.log_info("Writing global extra files metadata: %s" % metadata_path)
self.metadata.dump(metadata_path)
def copy_extra_files(
compose, cfg, arch, variant, package_sets, extra_metadata, checksum_type=None
):
checksum_type = checksum_type or compose.conf["media_checksums"]
var_dict = {
"arch": arch,
"variant_id": variant.id,
"variant_id_lower": variant.id.lower(),
"variant_uid": variant.uid,
"variant_uid_lower": variant.uid.lower(),
}
msg = "Getting extra files (arch: %s, variant: %s)" % (arch, variant)
compose.log_info("[BEGIN] %s" % msg)
os_tree = compose.paths.compose.os_tree(arch, variant)
extra_files_dir = compose.paths.work.extra_files_dir(arch, variant)
for scm_dict in cfg:
scm_dict = copy.deepcopy(scm_dict)
# if scm is "rpm" and repo contains only a package name, find the
# package(s) in package set
if scm_dict["scm"] == "rpm" and not _is_external(scm_dict["repo"]):
rpms = []
pattern = scm_dict["repo"] % var_dict
pkg_name, pkg_arch = split_name_arch(pattern)
for package_set in package_sets:
for pkgset_file in package_set[arch]:
pkg_obj = package_set[arch][pkgset_file]
if pkg_is_rpm(pkg_obj) and _pkg_matches(
pkg_obj, pkg_name, pkg_arch
):
rpms.append(pkg_obj.file_path)
if not rpms:
raise RuntimeError(
"No package matching %s in the package set." % pattern
)
scm_dict["repo"] = rpms
getter = get_file_from_scm if "file" in scm_dict else get_dir_from_scm
target_path = os.path.join(
extra_files_dir, scm_dict.get("target", "").lstrip("/")
)
getter(scm_dict, target_path, compose=compose)
if os.listdir(extra_files_dir):
metadata.populate_extra_files_metadata(
extra_metadata,
variant,
arch,
os_tree,
copy_all(extra_files_dir, os_tree),
compose.conf["media_checksums"],
relative_root=compose.paths.compose.topdir(),
)
compose.log_info("[DONE ] %s" % msg)
def _pkg_matches(pkg_obj, name_glob, arch):
"""Check if `pkg_obj` matches name and arch."""
return fnmatch.fnmatch(pkg_obj.name, name_glob) and (
arch is None or arch == pkg_obj.arch
)
def _is_external(rpm):
"""Check if path to rpm points outside of the compose: i.e. it is an
absolute path or a URL."""
return rpm.startswith("/") or "://" in rpm

554
pungi/phases/extra_isos.py Normal file
View File

@ -0,0 +1,554 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
import hashlib
import json
from kobo.shortcuts import force_list
from kobo.threads import ThreadPool, WorkerThread
import productmd.treeinfo
from productmd.extra_files import ExtraFiles
from pungi import createiso
from pungi import metadata
from pungi.phases.base import ConfigGuardedPhase, PhaseBase, PhaseLoggerMixin
from pungi.phases.createiso import (
add_iso_to_metadata,
copy_boot_images,
run_createiso_command,
load_and_tweak_treeinfo,
compare_packages,
OldFileLinker,
get_iso_level_config,
)
from pungi.util import (
failable,
get_format_substs,
get_variant_data,
get_volid,
read_json_file,
)
from pungi.wrappers import iso
from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm
class ExtraIsosPhase(PhaseLoggerMixin, ConfigGuardedPhase, PhaseBase):
name = "extra_isos"
def __init__(self, compose, buildinstall_phase):
super(ExtraIsosPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
self.bi = buildinstall_phase
def validate(self):
for variant in self.compose.get_variants(types=["variant"]):
for config in get_variant_data(self.compose.conf, self.name, variant):
extra_arches = set(config.get("arches", [])) - set(variant.arches)
if extra_arches:
self.compose.log_warning(
"Extra iso config for %s mentions non-existing arches: %s"
% (variant, ", ".join(sorted(extra_arches)))
)
def run(self):
commands = []
for variant in self.compose.get_variants(types=["variant"]):
for config in get_variant_data(self.compose.conf, self.name, variant):
arches = set(variant.arches)
if config.get("arches"):
arches &= set(config["arches"])
if not config["skip_src"]:
arches.add("src")
for arch in sorted(arches):
commands.append((config, variant, arch))
for config, variant, arch in commands:
self.pool.add(ExtraIsosThread(self.pool, self.bi))
self.pool.queue_put((self.compose, config, variant, arch))
self.pool.start()
class ExtraIsosThread(WorkerThread):
def __init__(self, pool, buildinstall_phase):
super(ExtraIsosThread, self).__init__(pool)
self.bi = buildinstall_phase
def process(self, item, num):
self.num = num
compose, config, variant, arch = item
can_fail = arch in config.get("failable_arches", [])
with failable(
compose, can_fail, variant, arch, "extra_iso", logger=self.pool._logger
):
self.worker(compose, config, variant, arch)
def worker(self, compose, config, variant, arch):
filename = get_filename(compose, variant, arch, config.get("filename"))
volid = get_volume_id(compose, variant, arch, config.get("volid", []))
iso_dir = compose.paths.compose.iso_dir(arch, variant)
iso_path = os.path.join(iso_dir, filename)
prepare_media_metadata(compose, variant, arch)
msg = "Creating ISO (arch: %s, variant: %s): %s" % (arch, variant, filename)
self.pool.log_info("[BEGIN] %s" % msg)
get_extra_files(compose, variant, arch, config.get("extra_files", []))
bootable = arch != "src" and bool(compose.conf.get("buildinstall_method"))
graft_points = get_iso_contents(
compose,
variant,
arch,
config["include_variants"],
filename,
bootable=bootable,
inherit_extra_files=config.get("inherit_extra_files", False),
)
opts = createiso.CreateIsoOpts(
output_dir=iso_dir,
iso_name=filename,
volid=volid,
graft_points=graft_points,
arch=arch,
supported=compose.supported,
hfs_compat=compose.conf["iso_hfs_ppc64le_compatible"],
use_xorrisofs=compose.conf.get("createiso_use_xorrisofs"),
iso_level=get_iso_level_config(compose, variant, arch),
)
os_tree = compose.paths.compose.os_tree(arch, variant)
if compose.conf["create_jigdo"]:
jigdo_dir = compose.paths.compose.jigdo_dir(arch, variant)
opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree)
if bootable:
opts = opts._replace(
buildinstall_method=compose.conf["buildinstall_method"],
boot_iso=os.path.join(os_tree, "images", "boot.iso"),
)
# Check if it can be reused.
hash = hashlib.sha256()
hash.update(json.dumps(config, sort_keys=True).encode("utf-8"))
config_hash = hash.hexdigest()
if not self.try_reuse(compose, variant, arch, config_hash, opts):
script_dir = compose.paths.work.tmp_dir(arch, variant)
opts = opts._replace(script_dir=script_dir)
script_file = os.path.join(script_dir, "extraiso-%s.sh" % filename)
with open(script_file, "w") as f:
createiso.write_script(opts, f)
run_createiso_command(
self.num,
compose,
bootable,
arch,
["bash", script_file],
[compose.topdir],
log_file=compose.paths.log.log_file(
arch, "extraiso-%s" % os.path.basename(iso_path)
),
iso_path=iso_path,
)
img = add_iso_to_metadata(
compose,
variant,
arch,
iso_path,
bootable,
additional_variants=config["include_variants"],
)
img._max_size = config.get("max_size")
save_reuse_metadata(compose, variant, arch, config_hash, opts, iso_path)
self.pool.log_info("[DONE ] %s" % msg)
def try_reuse(self, compose, variant, arch, config_hash, opts):
# Check explicit config
if not compose.conf["extraiso_allow_reuse"]:
return
log_msg = "Cannot reuse ISO for %s.%s" % (variant, arch)
if opts.buildinstall_method and not self.bi.reused(variant, arch):
# If buildinstall phase was not reused for some reason, we can not
# reuse any bootable image. If a package change caused rebuild of
# boot.iso, we would catch it here too, but there could be a
# configuration change in lorax template which would remain
# undetected.
self.pool.log_info("%s - boot configuration changed", log_msg)
return False
# Check old compose configuration: extra_files and product_ids can be
# reflected on ISO.
old_config = compose.load_old_compose_config()
if not old_config:
self.pool.log_info("%s - no config for old compose", log_msg)
return False
# Disable reuse if unsigned packages are allowed. The older compose
# could have unsigned packages, and those may have been signed since
# then. We want to regenerate the ISO to have signatures.
if None in compose.conf["sigkeys"]:
self.pool.log_info("%s - unsigned packages are allowed", log_msg)
return False
# Convert current configuration to JSON and back to encode it similarly
# to the old one
config = json.loads(json.dumps(compose.conf))
for opt in compose.conf:
# Skip a selection of options: these affect what packages can be
# included, which we explicitly check later on.
config_whitelist = set(
[
"gather_lookaside_repos",
"pkgset_koji_builds",
"pkgset_koji_scratch_tasks",
"pkgset_koji_module_builds",
]
)
# Skip irrelevant options
config_whitelist.update(["osbs", "osbuild"])
if opt in config_whitelist:
continue
if old_config.get(opt) != config.get(opt):
self.pool.log_info("%s - option %s differs", log_msg, opt)
return False
old_metadata = load_old_metadata(compose, variant, arch, config_hash)
if not old_metadata:
self.pool.log_info("%s - no old metadata found", log_msg)
return False
# Test if volume ID matches - volid can be generated dynamically based on
# other values, and could change even if nothing else is different.
if opts.volid != old_metadata["opts"]["volid"]:
self.pool.log_info("%s - volume ID differs", log_msg)
return False
# Compare packages on the ISO.
if compare_packages(
old_metadata["opts"]["graft_points"],
opts.graft_points,
):
self.pool.log_info("%s - packages differ", log_msg)
return False
try:
self.perform_reuse(
compose,
variant,
arch,
opts,
old_metadata["opts"]["output_dir"],
old_metadata["opts"]["iso_name"],
)
return True
except Exception as exc:
self.pool.log_error(
"Error while reusing ISO for %s.%s: %s", variant, arch, exc
)
compose.traceback("extraiso-reuse-%s-%s-%s" % (variant, arch, config_hash))
return False
def perform_reuse(self, compose, variant, arch, opts, old_iso_dir, old_file_name):
"""
Copy all related files from old compose to the new one. As a last step
add the new image to metadata.
"""
linker = OldFileLinker(self.pool._logger)
old_iso_path = os.path.join(old_iso_dir, old_file_name)
iso_path = os.path.join(opts.output_dir, opts.iso_name)
try:
# Hardlink ISO and manifest
for suffix in ("", ".manifest"):
linker.link(old_iso_path + suffix, iso_path + suffix)
# Copy log files
# The log file name includes filename of the image, so we need to
# find old file with the old name, and rename it to the new name.
log_file = compose.paths.log.log_file(arch, "extraiso-%s" % opts.iso_name)
old_log_file = compose.paths.old_compose_path(
compose.paths.log.log_file(arch, "extraiso-%s" % old_file_name)
)
linker.link(old_log_file, log_file)
# Copy jigdo files
if opts.jigdo_dir:
old_jigdo_dir = compose.paths.old_compose_path(opts.jigdo_dir)
for suffix in (".template", ".jigdo"):
linker.link(
os.path.join(old_jigdo_dir, old_file_name) + suffix,
os.path.join(opts.jigdo_dir, opts.iso_name) + suffix,
)
except Exception:
# A problem happened while linking some file, let's clean up
# everything.
linker.abort()
raise
def save_reuse_metadata(compose, variant, arch, config_hash, opts, iso_path):
"""
Save metadata for possible reuse of this image. The file name is determined
from the hash of a configuration snippet for this image. Any change in that
configuration in next compose will change the hash and thus reuse will be
blocked.
"""
metadata = {"opts": opts._asdict()}
metadata_path = compose.paths.log.log_file(
arch,
"extraiso-reuse-%s-%s-%s" % (variant.uid, arch, config_hash),
ext="json",
)
with open(metadata_path, "w") as f:
json.dump(metadata, f, indent=2)
def load_old_metadata(compose, variant, arch, config_hash):
metadata_path = compose.paths.log.log_file(
arch,
"extraiso-reuse-%s-%s-%s" % (variant.uid, arch, config_hash),
ext="json",
)
old_path = compose.paths.old_compose_path(metadata_path)
try:
return read_json_file(old_path)
except Exception:
return None
def get_extra_files(compose, variant, arch, extra_files):
"""Clone the configured files into a directory from where they can be
included in the ISO.
"""
extra_files_dir = compose.paths.work.extra_iso_extra_files_dir(arch, variant)
filelist = []
for scm_dict in extra_files:
getter = get_file_from_scm if "file" in scm_dict else get_dir_from_scm
target = scm_dict.get("target", "").lstrip("/")
target_path = os.path.join(extra_files_dir, target).rstrip("/")
filelist.extend(
os.path.join(target, f)
for f in getter(scm_dict, target_path, compose=compose)
)
if filelist:
metadata.populate_extra_files_metadata(
ExtraFiles(),
variant,
arch,
extra_files_dir,
filelist,
compose.conf["media_checksums"],
)
def get_iso_contents(
compose, variant, arch, include_variants, filename, bootable, inherit_extra_files
):
"""Find all files that should be on the ISO. For bootable image we start
with the boot configuration. Then for each variant we add packages,
repodata and extra files. Finally we add top-level extra files.
"""
iso_dir = compose.paths.work.iso_dir(arch, filename)
files = {}
if bootable:
buildinstall_dir = compose.paths.work.buildinstall_dir(arch, create_dir=False)
if compose.conf["buildinstall_method"] == "lorax":
buildinstall_dir = os.path.join(buildinstall_dir, variant.uid)
copy_boot_images(buildinstall_dir, iso_dir)
files = iso.get_graft_points(
compose.paths.compose.topdir(), [buildinstall_dir, iso_dir]
)
# We need to point efiboot.img to compose/ tree, because it was
# modified in buildinstall phase and the file in work/ has different
# checksum to what is in the .treeinfo.
if "images/efiboot.img" in files:
files["images/efiboot.img"] = os.path.join(
compose.paths.compose.os_tree(arch, variant), "images/efiboot.img"
)
variants = [variant.uid] + include_variants
for variant_uid in variants:
var = compose.all_variants[variant_uid]
# Get packages...
package_dir = compose.paths.compose.packages(arch, var)
for k, v in iso.get_graft_points(
compose.paths.compose.topdir(), [package_dir]
).items():
files[os.path.join(var.uid, "Packages", k)] = v
# Get repodata...
tree_dir = compose.paths.compose.repository(arch, var)
repo_dir = os.path.join(tree_dir, "repodata")
for k, v in iso.get_graft_points(
compose.paths.compose.topdir(), [repo_dir]
).items():
files[os.path.join(var.uid, "repodata", k)] = v
if inherit_extra_files:
# Get extra files...
extra_files_dir = compose.paths.work.extra_files_dir(arch, var)
for k, v in iso.get_graft_points(
compose.paths.compose.topdir(), [extra_files_dir]
).items():
files[os.path.join(var.uid, k)] = v
extra_files_dir = compose.paths.work.extra_iso_extra_files_dir(arch, variant)
original_treeinfo = os.path.join(
compose.paths.compose.os_tree(arch=arch, variant=variant), ".treeinfo"
)
tweak_treeinfo(
compose,
include_variants,
original_treeinfo,
os.path.join(extra_files_dir, ".treeinfo"),
)
tweak_repo_treeinfo(
compose,
include_variants,
original_treeinfo,
original_treeinfo,
)
# Add extra files specific for the ISO
files.update(
iso.get_graft_points(compose.paths.compose.topdir(), [extra_files_dir])
)
gp = "%s-graft-points" % iso_dir
iso.write_graft_points(gp, files, exclude=["*/lost+found", "*/boot.iso"])
return gp
def tweak_repo_treeinfo(compose, include_variants, source_file, dest_file):
"""
The method includes the variants to file .treeinfo of a variant. It takes
the variants which are described
by options `extra_isos -> include_variants`.
"""
ti = productmd.treeinfo.TreeInfo()
ti.load(source_file)
main_variant = next(iter(ti.variants))
for variant_uid in include_variants:
variant = compose.all_variants[variant_uid]
var = productmd.treeinfo.Variant(ti)
var.id = variant.id
var.uid = variant.uid
var.name = variant.name
var.type = variant.type
ti.variants.add(var)
for variant_id in ti.variants:
var = ti.variants[variant_id]
if variant_id == main_variant:
var.paths.packages = 'Packages'
var.paths.repository = '.'
else:
var.paths.packages = os.path.join(
'../../..',
var.uid,
var.arch,
'os/Packages',
)
var.paths.repository = os.path.join(
'../../..',
var.uid,
var.arch,
'os',
)
ti.dump(dest_file, main_variant=main_variant)
def tweak_treeinfo(compose, include_variants, source_file, dest_file):
ti = load_and_tweak_treeinfo(source_file)
for variant_uid in include_variants:
variant = compose.all_variants[variant_uid]
var = productmd.treeinfo.Variant(ti)
var.id = variant.id
var.uid = variant.uid
var.name = variant.name
var.type = variant.type
ti.variants.add(var)
for variant_id in ti.variants:
var = ti.variants[variant_id]
var.paths.packages = os.path.join(var.uid, "Packages")
var.paths.repository = var.uid
ti.dump(dest_file)
def get_filename(compose, variant, arch, format):
disc_type = compose.conf["disc_types"].get("dvd", "dvd")
base_filename = compose.get_image_name(
arch, variant, disc_type=disc_type, disc_num=1
)
if not format:
return base_filename
kwargs = {
"arch": arch,
"disc_type": disc_type,
"disc_num": 1,
"suffix": ".iso",
"filename": base_filename,
"variant": variant,
}
args = get_format_substs(compose, **kwargs)
try:
return (format % args).format(**args)
except KeyError as err:
raise RuntimeError(
"Failed to create image name: unknown format element: %s" % err
)
def get_volume_id(compose, variant, arch, formats):
disc_type = compose.conf["disc_types"].get("dvd", "dvd")
# Get volume ID for regular ISO so that we can substitute it in.
volid = get_volid(compose, arch, variant, disc_type=disc_type)
return get_volid(
compose,
arch,
variant,
disc_type=disc_type,
formats=force_list(formats),
volid=volid,
)
def prepare_media_metadata(compose, variant, arch):
"""Write a .discinfo and media.repo files to a directory that will be
included on the ISO. It's possible to overwrite the files by using extra
files.
"""
md_dir = compose.paths.work.extra_iso_extra_files_dir(arch, variant)
description = metadata.get_description(compose, variant, arch)
metadata.create_media_repo(
os.path.join(md_dir, "media.repo"), description, timestamp=None
)
metadata.create_discinfo(os.path.join(md_dir, ".discinfo"), description, arch)

File diff suppressed because it is too large Load Diff

167
pungi/phases/gather/link.py Normal file
View File

@ -0,0 +1,167 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
import kobo.rpmlib
from pungi.linker import LinkerPool
# TODO: global Linker instance - to keep hardlinks on dest?
# DONE: show overall progress, not each file
# TODO: (these should be logged separately)
def _get_src_nevra(compose, pkg_obj, srpm_map):
"""Return source N-E:V-R.A.rpm; guess if necessary."""
result = srpm_map.get(pkg_obj.sourcerpm, None)
if not result:
nvra = kobo.rpmlib.parse_nvra(pkg_obj.sourcerpm)
nvra["epoch"] = pkg_obj.epoch
result = kobo.rpmlib.make_nvra(nvra, add_rpm=True, force_epoch=True)
compose.log_warning(
"Package %s has no SRPM available, guessing epoch: %s"
% (pkg_obj.nevra, result)
)
return result
def get_package_path(filename, hashed_directory=False):
"""Get path for filename. If ``hashed_directory`` is ``True``, the path
will include a prefix based on the initial letter.
>>> get_package_path('my-package.rpm')
'my-package.rpm'
>>> get_package_path('my-package.rpm', True)
'm/my-package.rpm'
>>> get_package_path('My-Package.rpm', True)
'm/My-Package.rpm'
"""
if hashed_directory:
prefix = filename[0].lower()
return os.path.join(prefix, filename)
return filename
def link_files(compose, arch, variant, pkg_map, pkg_sets, manifest, srpm_map={}):
# srpm_map instance is shared between link_files() runs
msg = "Linking packages (arch: %s, variant: %s)" % (arch, variant)
compose.log_info("[BEGIN] %s" % msg)
link_type = compose.conf["link_type"]
pool = LinkerPool.with_workers(10, link_type, logger=compose._logger)
hashed_directories = compose.conf["hashed_directories"]
# Create temporary dict mapping package path to package object from pkgset
# so we do not have to search all pkg_sets for every package in pkg_map.
pkg_by_path = {}
for pkg_set in pkg_sets:
for path in pkg_set[arch]:
pkg_by_path[path] = pkg_set[arch][path]
packages_dir = compose.paths.compose.packages("src", variant)
packages_dir_relpath = compose.paths.compose.packages("src", variant, relative=True)
for pkg in pkg_map["srpm"]:
if "lookaside" in pkg["flags"]:
continue
package_path = get_package_path(
os.path.basename(pkg["path"]), hashed_directories
)
dst = os.path.join(packages_dir, package_path)
dst_relpath = os.path.join(packages_dir_relpath, package_path)
# link file
pool.queue_put((pkg["path"], dst))
# update rpm manifest
pkg_obj = pkg_by_path[pkg["path"]]
nevra = pkg_obj.nevra
manifest.add(
variant.uid,
arch,
nevra,
path=dst_relpath,
sigkey=pkg_obj.signature,
category="source",
)
# update srpm_map
srpm_map.setdefault(pkg_obj.file_name, nevra)
packages_dir = compose.paths.compose.packages(arch, variant)
packages_dir_relpath = compose.paths.compose.packages(arch, variant, relative=True)
for pkg in pkg_map["rpm"]:
if "lookaside" in pkg["flags"]:
continue
package_path = get_package_path(
os.path.basename(pkg["path"]), hashed_directories
)
dst = os.path.join(packages_dir, package_path)
dst_relpath = os.path.join(packages_dir_relpath, package_path)
# link file
pool.queue_put((pkg["path"], dst))
# update rpm manifest
pkg_obj = pkg_by_path[pkg["path"]]
nevra = pkg_obj.nevra
src_nevra = _get_src_nevra(compose, pkg_obj, srpm_map)
manifest.add(
variant.uid,
arch,
nevra,
path=dst_relpath,
sigkey=pkg_obj.signature,
category="binary",
srpm_nevra=src_nevra,
)
packages_dir = compose.paths.compose.debug_packages(arch, variant)
packages_dir_relpath = compose.paths.compose.debug_packages(
arch, variant, relative=True
)
for pkg in pkg_map["debuginfo"]:
if "lookaside" in pkg["flags"]:
continue
package_path = get_package_path(
os.path.basename(pkg["path"]), hashed_directories
)
dst = os.path.join(packages_dir, package_path)
dst_relpath = os.path.join(packages_dir_relpath, package_path)
# link file
pool.queue_put((pkg["path"], dst))
# update rpm manifest
pkg_obj = pkg_by_path[pkg["path"]]
nevra = pkg_obj.nevra
src_nevra = _get_src_nevra(compose, pkg_obj, srpm_map)
manifest.add(
variant.uid,
arch,
nevra,
path=dst_relpath,
sigkey=pkg_obj.signature,
category="debug",
srpm_nevra=src_nevra,
)
pool.start()
pool.stop()
compose.log_info("[DONE ] %s" % msg)

View File

@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
class GatherMethodBase(object):
def __init__(self, compose):
self.compose = compose

View File

@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
from .method_deps import GatherMethodDeps
from .method_nodeps import GatherMethodNodeps
from .method_hybrid import GatherMethodHybrid
ALL_METHODS = {
"deps": GatherMethodDeps,
"nodeps": GatherMethodNodeps,
"hybrid": GatherMethodHybrid,
}

View File

@ -0,0 +1,286 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
import shutil
from kobo.shortcuts import run
from kobo.pkgset import SimpleRpmWrapper, RpmWrapper
from kobo.rpmlib import parse_nvra
from pungi.util import get_arch_variant_data, temp_dir
from pungi.wrappers.pungi import PungiWrapper
from pungi.arch import tree_arch_to_yum_arch
import pungi.phases.gather
from pungi.phases.pkgset.pkgsets import ExtendedRpmWrapper
import pungi.phases.gather.method
class GatherMethodDeps(pungi.phases.gather.method.GatherMethodBase):
def __call__(
self,
arch,
variant,
packages,
groups,
filter_packages,
multilib_whitelist,
multilib_blacklist,
package_sets,
path_prefix=None,
fulltree_excludes=None,
prepopulate=None,
):
# result = {
# "rpm": [],
# "srpm": [],
# "debuginfo": [],
# }
write_pungi_config(
self.compose,
arch,
variant,
packages,
groups,
filter_packages,
multilib_whitelist,
multilib_blacklist,
fulltree_excludes=fulltree_excludes,
prepopulate=prepopulate,
source_name=self.source_name,
package_sets=package_sets,
)
result, missing_deps = resolve_deps(
self.compose, arch, variant, source_name=self.source_name
)
raise_on_invalid_sigkeys(arch, variant, package_sets, result)
check_deps(self.compose, arch, variant, missing_deps)
return result
def raise_on_invalid_sigkeys(arch, variant, package_sets, result):
"""
Raises RuntimeError if some package in compose is signed with an invalid
sigkey.
"""
invalid_sigkey_rpms = {}
for package in result["rpm"]:
name = parse_nvra(package["path"])["name"]
for pkgset in package_sets:
for forbidden_package in pkgset["global"].invalid_sigkey_rpms:
if name == forbidden_package["name"]:
invalid_sigkey_rpms.setdefault(
pkgset["global"].sigkey_ordering, []
).append(forbidden_package)
if invalid_sigkey_rpms:
package_sets[0]["global"].raise_invalid_sigkeys_exception(invalid_sigkey_rpms)
def _format_packages(pkgs):
"""Sort packages and merge name with arch."""
result = set()
for pkg, pkg_arch in pkgs:
if type(pkg) in [SimpleRpmWrapper, RpmWrapper, ExtendedRpmWrapper]:
pkg_name = pkg.name
else:
pkg_name = pkg
if pkg_arch:
result.add("%s.%s" % (pkg_name, pkg_arch))
else:
result.add(pkg_name)
return sorted(result)
def write_pungi_config(
compose,
arch,
variant,
packages,
groups,
filter_packages,
multilib_whitelist,
multilib_blacklist,
fulltree_excludes=None,
prepopulate=None,
source_name=None,
package_sets=None,
):
"""write pungi config (kickstart) for arch/variant"""
pungi_wrapper = PungiWrapper()
pungi_cfg = compose.paths.work.pungi_conf(
variant=variant, arch=arch, source_name=source_name
)
compose.log_info(
"Writing pungi config (arch: %s, variant: %s): %s", arch, variant, pungi_cfg
)
repos = {}
for i, pkgset in enumerate(package_sets or []):
if not variant.pkgsets or pkgset.name in variant.pkgsets:
repos["pungi-repo-%d" % i] = pkgset.paths[arch]
if compose.has_comps:
repos["comps-repo"] = compose.paths.work.comps_repo(arch=arch, variant=variant)
if variant.type == "optional":
for var in variant.parent.get_variants(
arch=arch, types=["self", "variant", "addon", "layered-product"]
):
repos["%s-comps" % var.uid] = compose.paths.work.comps_repo(
arch=arch, variant=var
)
if variant.type in ["addon", "layered-product"]:
repos["parent-comps"] = compose.paths.work.comps_repo(
arch=arch, variant=variant.parent
)
lookaside_repos = {}
for i, repo_url in enumerate(
pungi.phases.gather.get_lookaside_repos(compose, arch, variant)
):
lookaside_repos["lookaside-repo-%s" % i] = repo_url
packages_str = list(_format_packages(packages))
filter_packages_str = list(_format_packages(filter_packages))
if not groups and not packages_str and not prepopulate:
raise RuntimeError(
"No packages included in %s.%s "
"(no comps groups, no input packages, no prepopulate)" % (variant.uid, arch)
)
pungi_wrapper.write_kickstart(
ks_path=pungi_cfg,
repos=repos,
groups=groups,
packages=packages_str,
exclude_packages=filter_packages_str,
lookaside_repos=lookaside_repos,
fulltree_excludes=fulltree_excludes,
multilib_whitelist=multilib_whitelist,
multilib_blacklist=multilib_blacklist,
prepopulate=prepopulate,
)
def resolve_deps(compose, arch, variant, source_name=None):
pungi_wrapper = PungiWrapper()
pungi_log = compose.paths.work.pungi_log(arch, variant, source_name=source_name)
msg = "Running pungi (arch: %s, variant: %s)" % (arch, variant)
compose.log_info("[BEGIN] %s" % msg)
pungi_conf = compose.paths.work.pungi_conf(arch, variant, source_name=source_name)
multilib_methods = get_arch_variant_data(compose.conf, "multilib", arch, variant)
greedy_method = compose.conf["greedy_method"]
# variant
fulltree = compose.conf["gather_fulltree"]
selfhosting = compose.conf["gather_selfhosting"]
# profiling
profiler = compose.conf["gather_profiler"]
# optional
if variant.type == "optional":
fulltree = True
selfhosting = True
# addon
if variant.type in ["addon", "layered-product"]:
# packages having SRPM in parent variant are excluded from
# fulltree (via %fulltree-excludes)
fulltree = True
selfhosting = False
lookaside_repos = {}
for i, repo_url in enumerate(
pungi.phases.gather.get_lookaside_repos(compose, arch, variant)
):
lookaside_repos["lookaside-repo-%s" % i] = repo_url
yum_arch = tree_arch_to_yum_arch(arch)
tmp_dir = compose.paths.work.tmp_dir(arch, variant)
cache_dir = compose.paths.work.pungi_cache_dir(arch, variant)
# TODO: remove YUM code, fully migrate to DNF
backends = {
"yum": pungi_wrapper.get_pungi_cmd,
"dnf": pungi_wrapper.get_pungi_cmd_dnf,
}
get_cmd = backends[compose.conf["gather_backend"]]
cmd = get_cmd(
pungi_conf,
destdir=tmp_dir,
name=variant.uid,
selfhosting=selfhosting,
fulltree=fulltree,
arch=yum_arch,
full_archlist=True,
greedy=greedy_method,
cache_dir=cache_dir,
lookaside_repos=lookaside_repos,
multilib_methods=multilib_methods,
profiler=profiler,
)
# Use temp working directory directory as workaround for
# https://bugzilla.redhat.com/show_bug.cgi?id=795137
with temp_dir(prefix="pungi_") as work_dir:
run(cmd, logfile=pungi_log, show_cmd=True, workdir=work_dir, env=os.environ)
# Clean up tmp dir
# Workaround for rpm not honoring sgid bit which only appears when yum is used.
yumroot_dir = os.path.join(tmp_dir, "work", arch, "yumroot")
if os.path.isdir(yumroot_dir):
try:
shutil.rmtree(yumroot_dir)
except Exception as e:
compose.log_warning(
"Failed to clean up tmp dir: %s %s" % (yumroot_dir, str(e))
)
with open(pungi_log, "r") as f:
packages, broken_deps, missing_comps_pkgs = pungi_wrapper.parse_log(f)
if missing_comps_pkgs:
log_msg = "Packages mentioned in comps do not exist for %s.%s: %s" % (
variant.uid,
arch,
", ".join(sorted(missing_comps_pkgs)),
)
compose.log_warning(log_msg)
if compose.conf["require_all_comps_packages"]:
raise RuntimeError(log_msg)
compose.log_info("[DONE ] %s" % msg)
return packages, broken_deps
def check_deps(compose, arch, variant, missing_deps):
if not compose.conf["check_deps"]:
return
if missing_deps:
for pkg in sorted(missing_deps):
compose.log_error(
"Unresolved dependencies for %s.%s in package %s: %s"
% (variant, arch, pkg, sorted(missing_deps[pkg]))
)
raise RuntimeError("Unresolved dependencies detected")

View File

@ -0,0 +1,580 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import gzip
import os
from collections import defaultdict
from fnmatch import fnmatch
import createrepo_c as cr
import kobo.rpmlib
from kobo.shortcuts import run
import pungi.phases.gather.method
from pungi import multilib_dnf
from pungi.module_util import Modulemd
from pungi.arch import get_valid_arches, tree_arch_to_yum_arch
from pungi.phases.gather import _mk_pkg_map
from pungi.util import get_arch_variant_data, pkg_is_debug, temp_dir, as_local_file
from pungi.wrappers import fus
from pungi.wrappers.comps import CompsWrapper
from .method_nodeps import expand_groups
class FakePackage(object):
"""This imitates a DNF package object and can be passed to python-multilib
library.
"""
def __init__(self, pkg):
self.pkg = pkg
def __getattr__(self, attr):
return getattr(self.pkg, attr)
@property
def files(self):
paths = []
# createrepo_c.Package.files is a tuple, but its length differs across
# versions. The constants define index at which the related value is
# located.
for entry in self.pkg.files:
paths.append(
os.path.join(entry[cr.FILE_ENTRY_PATH], entry[cr.FILE_ENTRY_NAME])
)
return paths
@property
def provides(self):
# This is supposed to match what yum package object returns. It's a
# nested tuple (name, flag, (epoch, version, release)). This code only
# fills in the name, because that's all that python-multilib is using..
return [(p[0].split()[0], None, (None, None, None)) for p in self.pkg.provides]
class GatherMethodHybrid(pungi.phases.gather.method.GatherMethodBase):
def __init__(self, *args, **kwargs):
super(GatherMethodHybrid, self).__init__(*args, **kwargs)
self.package_maps = {}
self.packages = {}
# Mapping from package name to set of langpack packages (stored as
# names).
self.langpacks = {}
# Set of packages for which we already added langpacks.
self.added_langpacks = set()
# Set of NEVRAs of modular packages
self.modular_packages = set()
# Arch -> pkg name -> set of pkg object
self.debuginfo = defaultdict(lambda: defaultdict(set))
# caches for processed packages
self.processed_multilib = set()
self.processed_debuginfo = set()
def _get_pkg_map(self, arch):
"""Create a mapping from NEVRA to actual package object. This will be
done once for each architecture, since the package set is the same for
all variants.
The keys are in NEVRA format and only include the epoch if it's not
zero. This makes it easier to query by results for the depsolver.
"""
if arch not in self.package_maps:
pkg_map = {}
for pkgset in self.package_sets:
for pkg_arch in pkgset.package_sets[arch].rpms_by_arch:
for pkg in pkgset.package_sets[arch].rpms_by_arch[pkg_arch]:
pkg_map[_fmt_nevra(pkg, pkg_arch)] = pkg
self.package_maps[arch] = pkg_map
return self.package_maps[arch]
def _prepare_packages(self):
for repo_path in self.get_repos():
md = cr.Metadata()
md.locate_and_load_xml(repo_path)
for key in md.keys():
pkg = md.get(key)
if pkg.arch in self.valid_arches:
self.packages[_fmt_nevra(pkg, arch=pkg.arch)] = FakePackage(pkg)
def _get_package(self, nevra):
if not self.packages:
self._prepare_packages()
return self.packages[nevra]
def _prepare_debuginfo(self):
"""Prepare cache of debuginfo packages for easy access. The cache is
indexed by package architecture and then by package name. There can be
more than one debuginfo package with the same name.
"""
for pkgset in self.package_sets:
for pkg_arch in pkgset.package_sets[self.arch].rpms_by_arch:
for pkg in pkgset.package_sets[self.arch].rpms_by_arch[pkg_arch]:
self.debuginfo[pkg.arch][pkg.name].add(pkg)
def _get_debuginfo(self, name, arch):
if not self.debuginfo:
self._prepare_debuginfo()
return self.debuginfo.get(arch, {}).get(name, set())
def expand_list(self, patterns):
"""Given a list of globs, create a list of package names matching any
of the pattern.
"""
expanded = set()
for pkgset in self.package_sets:
for pkg_arch in pkgset.package_sets[self.arch].rpms_by_arch:
for pkg in pkgset.package_sets[self.arch].rpms_by_arch[pkg_arch]:
for pattern in patterns:
if fnmatch(pkg.name, pattern):
expanded.add(pkg)
break
return expanded
def prepare_modular_packages(self):
for var in self.compose.all_variants.values():
for mmd in var.arch_mmds.get(self.arch, {}).values():
self.modular_packages.update(mmd.get_rpm_artifacts())
def prepare_langpacks(self, arch, variant):
if not self.compose.has_comps:
return
comps_file = self.compose.paths.work.comps(arch, variant, create_dir=False)
comps = CompsWrapper(comps_file)
for name, install in comps.get_langpacks().items():
# Replace %s with * for fnmatch.
install_match = install % "*"
self.langpacks[name] = set()
for pkgset in self.package_sets:
for pkg_arch in pkgset.package_sets[arch].rpms_by_arch:
for pkg in pkgset.package_sets[arch].rpms_by_arch[pkg_arch]:
if not fnmatch(pkg.name, install_match):
# Does not match the pattern, ignore...
continue
if pkg.name.endswith("-devel") or pkg.name.endswith("-static"):
continue
if pkg_is_debug(pkg):
continue
self.langpacks[name].add(pkg.name)
def __call__(
self,
arch,
variant,
package_sets,
packages=[],
groups=[],
multilib_whitelist=[],
multilib_blacklist=[],
filter_packages=[],
prepopulate=[],
**kwargs
):
self.arch = arch
self.variant = variant
self.valid_arches = get_valid_arches(arch, multilib=True)
self.package_sets = package_sets
self.prepare_langpacks(arch, variant)
self.prepare_modular_packages()
self.multilib_methods = get_arch_variant_data(
self.compose.conf, "multilib", arch, variant
)
self.multilib = multilib_dnf.Multilib(
self.multilib_methods,
set(p.name for p in self.expand_list(multilib_blacklist)),
set(p.name for p in self.expand_list(multilib_whitelist)),
)
platform = get_platform(self.compose, variant, arch)
packages.update(
expand_groups(self.compose, arch, variant, groups, set_pkg_arch=False)
)
packages.update(tuple(pkg.rsplit(".", 1)) for pkg in prepopulate)
# Filters are received as tuples (name, arch), we should convert it to
# strings.
filters = [_fmt_pkg(*p) for p in filter_packages]
cache_prefix = "fus-cache-%s-%s-%s-" % (self.compose.compose_id, variant, arch)
with temp_dir(prefix=cache_prefix) as cache_dir:
nvrs, out_modules = self.run_solver(
variant, arch, packages, platform, filters, cache_dir=cache_dir
)
filter_modules(variant, arch, out_modules)
return expand_packages(
self._get_pkg_map(arch),
pungi.phases.gather.get_lookaside_repos(self.compose, arch, variant),
nvrs,
filter_packages=filter_packages,
)
# maybe check invalid sigkeys
def get_repos(self):
repos = []
for pkgset in self.package_sets:
if self.variant.pkgsets and pkgset.name not in self.variant.pkgsets:
continue
repos.append(pkgset.paths[self.arch])
return repos
def run_solver(self, variant, arch, packages, platform, filter_packages, cache_dir):
repos = self.get_repos()
results = set()
result_modules = set()
modules = []
for mmd in variant.arch_mmds.get(arch, {}).values():
modules.append("%s:%s" % (mmd.get_module_name(), mmd.get_stream_name()))
input_packages = []
for pkg_name, pkg_arch in packages:
input_packages.extend(self._expand_wildcard(pkg_name, pkg_arch))
step = 0
while True:
step += 1
conf_file = self.compose.paths.work.fus_conf(arch, variant, step)
fus.write_config(conf_file, sorted(modules), sorted(input_packages))
cmd = fus.get_cmd(
conf_file,
tree_arch_to_yum_arch(arch),
repos,
pungi.phases.gather.get_lookaside_repos(self.compose, arch, variant),
platform=platform,
filter_packages=filter_packages,
)
logfile = self.compose.paths.log.log_file(
arch, "hybrid-depsolver-%s-iter-%d" % (variant, step)
)
# Adding this environment variable will tell GLib not to prefix
# any log messages with the PID of the fus process (which is quite
# useless for us anyway).
env = os.environ.copy()
env["G_MESSAGES_PREFIXED"] = ""
env["XDG_CACHE_HOME"] = cache_dir
self.compose.log_debug(
"[BEGIN] Running fus (arch: %s, variant: %s)" % (arch, variant)
)
run(cmd, logfile=logfile, show_cmd=True, env=env)
output, out_modules = fus.parse_output(logfile)
self.compose.log_debug(
"[DONE ] Running fus (arch: %s, variant: %s)" % (arch, variant)
)
# No need to resolve modules again. They are not going to change.
modules = []
# Reset input packages as well to only solve newly added things.
input_packages = []
# Preserve the results from this iteration.
results.update(output)
result_modules.update(out_modules)
new_multilib = self.add_multilib(variant, arch, output)
input_packages.extend(
_fmt_pkg(pkg_name, pkg_arch)
for pkg_name, pkg_arch in sorted(new_multilib)
)
new_debuginfo = self.add_debuginfo(arch, output)
input_packages.extend(
_fmt_pkg(pkg_name, pkg_arch)
for pkg_name, pkg_arch in sorted(new_debuginfo)
)
new_langpacks = self.add_langpacks(output)
input_packages.extend(new_langpacks)
if not input_packages:
# Nothing new was added, we can stop now.
break
return results, result_modules
def add_multilib(self, variant, arch, nvrs):
added = set()
if not self.multilib_methods:
return []
for nvr, pkg_arch, flags in nvrs:
if (nvr, pkg_arch) in self.processed_multilib:
continue
self.processed_multilib.add((nvr, pkg_arch))
if "modular" in flags:
continue
if pkg_arch != arch:
# Not a native package, not checking to add multilib
continue
nevr = kobo.rpmlib.parse_nvr(nvr)
for add_arch in self.valid_arches:
if add_arch == arch:
continue
try:
multilib_candidate = self._get_package("%s.%s" % (nvr, add_arch))
except KeyError:
continue
if self.multilib.is_multilib(multilib_candidate):
added.add((nevr["name"], add_arch))
return added
def add_debuginfo(self, arch, nvrs):
added = set()
for nvr, pkg_arch, flags in nvrs:
if (nvr, pkg_arch) in self.processed_debuginfo:
continue
self.processed_debuginfo.add((nvr, pkg_arch))
if "modular" in flags:
continue
pkg = self._get_package("%s.%s" % (nvr, pkg_arch))
# There are two ways how the debuginfo package can be named. We
# want to get them all.
source_name = kobo.rpmlib.parse_nvra(pkg.rpm_sourcerpm)["name"]
for debuginfo_name in [
"%s-debuginfo" % pkg.name,
"%s-debugsource" % source_name,
]:
debuginfo = self._get_debuginfo(debuginfo_name, pkg_arch)
for dbg in debuginfo:
# For each debuginfo package that matches on name and
# architecture, we also need to check if it comes from the
# same build.
if dbg.sourcerpm == pkg.rpm_sourcerpm:
added.add((dbg.name, dbg.arch))
return added
def add_langpacks(self, nvrs):
if not self.langpacks:
return set()
added = set()
for nvr, pkg_arch, flags in nvrs:
if "modular" in flags:
continue
name = nvr.rsplit("-", 2)[0]
if name in self.added_langpacks:
# This package is already processed.
continue
added.update(self.langpacks.get(name, []))
self.added_langpacks.add(name)
return sorted(added)
def _expand_wildcard(self, pkg_name, pkg_arch):
if "*" not in pkg_name:
return [_fmt_pkg(pkg_name, pkg_arch)]
packages = []
for pkg in self.expand_list([pkg_name]):
if pkg_is_debug(pkg):
# No debuginfo
continue
if pkg_arch:
if pkg_arch != pkg.arch:
# Arch is specified and does not match, skip the package.
continue
else:
if pkg.arch not in ("noarch", self.arch):
# No arch specified and package does not match
continue
strict_nevra = "%s-%s:%s-%s.%s" % (
pkg.name,
pkg.epoch or "0",
pkg.version,
pkg.release,
pkg.arch,
)
if strict_nevra in self.modular_packages:
# Wildcards should not match modular packages.
continue
packages.append(_fmt_nevra(pkg, pkg.arch))
return packages
def iter_platforms_in_repo(url):
"""Find all platform streams that any module in give repo requires at runtime.
Yields lists of stream names (possible empty).
"""
repomd = os.path.join(url, "repodata/repomd.xml")
with as_local_file(repomd) as url_:
repomd = cr.Repomd(url_)
for rec in repomd.records:
if rec.type != "modules":
continue
# No with statement on Python 2.6 for GzipFile...
record_url = os.path.join(url, rec.location_href)
with as_local_file(record_url) as url_:
gzipped_file = gzip.GzipFile(url_, "rb")
mod_index = Modulemd.ModuleIndex.new()
mod_index.update_from_string(gzipped_file.read().decode("utf-8"), False)
gzipped_file.close()
for module_name in mod_index.get_module_names():
module = mod_index.get_module(module_name)
for module_stream in module.get_all_streams():
module_stream = module_stream.upgrade(2)
for dep in module_stream.get_dependencies():
yield dep.get_runtime_streams("platform")
def get_platform_from_lookasides(compose, variant, arch):
"""Find a set of all platform dependencies in all lookaside repos."""
platforms = set()
for repo in pungi.phases.gather.get_lookaside_repos(compose, arch, variant):
for ps in iter_platforms_in_repo(fus._prep_path(repo)):
platforms.update(ps)
return platforms
def get_platform(compose, variant, arch):
"""Find platform stream for modules. Raises RuntimeError if there are
conflicting requests.
"""
platforms = get_platform_from_lookasides(compose, variant, arch)
for var in compose.all_variants.values():
for mmd in var.arch_mmds.get(arch, {}).values():
for dep in mmd.get_dependencies():
streams = dep.get_runtime_streams("platform")
if streams:
platforms.update(streams)
if len(platforms) > 1:
raise RuntimeError("There are conflicting requests for platform.")
return list(platforms)[0] if platforms else None
def _fmt_pkg(pkg_name, arch):
if arch:
pkg_name += ".%s" % arch
return pkg_name
def _nevra(**kwargs):
if kwargs.get("epoch") not in (None, "", 0, "0"):
return "%(name)s-%(epoch)s:%(version)s-%(release)s.%(arch)s" % kwargs
return "%(name)s-%(version)s-%(release)s.%(arch)s" % kwargs
def _fmt_nevra(pkg, arch):
return _nevra(
name=pkg.name,
epoch=pkg.epoch,
version=pkg.version,
release=pkg.release,
arch=arch,
)
def _get_srpm_nevra(pkg):
nevra = kobo.rpmlib.parse_nvra(pkg.sourcerpm)
nevra["epoch"] = nevra["epoch"] or pkg.epoch
return _nevra(**nevra)
def _make_result(paths):
return [{"path": path, "flags": []} for path in sorted(paths)]
def get_repo_packages(path):
"""Extract file names of all packages in the given repository."""
packages = set()
def callback(pkg):
packages.add(os.path.basename(pkg.location_href))
repomd = os.path.join(path, "repodata/repomd.xml")
with as_local_file(repomd) as url_:
repomd = cr.Repomd(url_)
for rec in repomd.records:
if rec.type != "primary":
continue
record_url = os.path.join(path, rec.location_href)
with as_local_file(record_url) as url_:
cr.xml_parse_primary(url_, pkgcb=callback, do_files=False)
return packages
def expand_packages(nevra_to_pkg, lookasides, nvrs, filter_packages):
"""For each package add source RPM."""
# This will serve as the final result. We collect sets of paths to the
# packages.
rpms = set()
srpms = set()
debuginfo = set()
filters = set(filter_packages)
lookaside_packages = set()
for repo in lookasides:
lookaside_packages.update(get_repo_packages(repo))
for nvr, pkg_arch, flags in nvrs:
pkg = nevra_to_pkg["%s.%s" % (nvr, pkg_arch)]
if os.path.basename(pkg.file_path) in lookaside_packages:
# Fus can return lookaside package in output if the package is
# explicitly listed as input. This can happen during comps
# expansion.
continue
if pkg_is_debug(pkg):
debuginfo.add(pkg.file_path)
else:
rpms.add(pkg.file_path)
try:
srpm_nevra = _get_srpm_nevra(pkg)
srpm = nevra_to_pkg[srpm_nevra]
if (srpm.name, "src") in filters:
# Filtered package, skipping
continue
if os.path.basename(srpm.file_path) not in lookaside_packages:
srpms.add(srpm.file_path)
except KeyError:
# Didn't find source RPM.. this should be logged
pass
return _mk_pkg_map(_make_result(rpms), _make_result(srpms), _make_result(debuginfo))
def filter_modules(variant, arch, nsvcs_to_keep):
"""Remove any arch-specific module metadata from the module if it's not
listed in the list to keep. This will ultimately cause the module to not be
included in the final repodata and module metadata.
"""
for nsvc in list(variant.arch_mmds.get(arch, {}).keys()):
if nsvc not in nsvcs_to_keep:
del variant.arch_mmds[arch][nsvc]

View File

@ -0,0 +1,184 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import os
from pprint import pformat
import re
import six
import pungi.arch
from pungi.util import pkg_is_rpm, pkg_is_srpm, pkg_is_debug
from pungi.wrappers.comps import CompsWrapper
from pungi.phases.pkgset.pkgsets import ExtendedRpmWrapper
import pungi.phases.gather.method
from kobo.pkgset import SimpleRpmWrapper, RpmWrapper
class GatherMethodNodeps(pungi.phases.gather.method.GatherMethodBase):
def __call__(self, arch, variant, *args, **kwargs):
fname = "gather-nodeps-%s" % variant.uid
if self.source_name:
fname += "-" + self.source_name
log_file = self.compose.paths.log.log_file(arch, fname)
with open(log_file, "w") as log:
return self.worker(log, arch, variant, *args, **kwargs)
def worker(
self,
log,
arch,
variant,
pkgs,
groups,
filter_packages,
multilib_whitelist,
multilib_blacklist,
package_sets,
path_prefix=None,
fulltree_excludes=None,
prepopulate=None,
):
result = {
"rpm": [],
"srpm": [],
"debuginfo": [],
}
group_packages = expand_groups(self.compose, arch, variant, groups)
packages = pkgs | group_packages
log.write("Requested packages:\n%s\n" % pformat(packages))
seen_rpms = {}
seen_srpms = {}
valid_arches = pungi.arch.get_valid_arches(arch, multilib=True)
compatible_arches = {}
for i in valid_arches:
compatible_arches[i] = pungi.arch.get_compatible_arches(i)
log.write("\nGathering rpms\n")
for pkg in iterate_packages(package_sets, arch):
if not pkg_is_rpm(pkg):
continue
for gathered_pkg, pkg_arch in packages:
if isinstance(gathered_pkg, six.string_types) and not re.match(
gathered_pkg.replace(".", "\\.")
.replace("+", "\\+")
.replace("*", ".*")
+ "$",
pkg.name,
):
continue
elif (
type(gathered_pkg)
in [SimpleRpmWrapper, RpmWrapper, ExtendedRpmWrapper]
and pkg.nevra != gathered_pkg.nevra
):
continue
if (
pkg_arch is not None
and pkg.arch != pkg_arch
and pkg.arch != "noarch"
):
continue
result["rpm"].append({"path": pkg.file_path, "flags": ["input"]})
seen_rpms.setdefault(pkg.name, set()).add(pkg.arch)
seen_srpms.setdefault(pkg.sourcerpm, set()).add(pkg.arch)
log.write(
"Added %s (matched %s.%s) (sourcerpm: %s)\n"
% (pkg, gathered_pkg, pkg_arch, pkg.sourcerpm)
)
log.write("\nGathering source rpms\n")
for pkg in iterate_packages(package_sets, arch):
if not pkg_is_srpm(pkg):
continue
if pkg.file_name in seen_srpms:
result["srpm"].append({"path": pkg.file_path, "flags": ["input"]})
log.write("Adding %s\n" % pkg)
log.write("\nGathering debuginfo packages\n")
for pkg in iterate_packages(package_sets, arch):
if not pkg_is_debug(pkg):
continue
if pkg.sourcerpm not in seen_srpms:
log.write("Not considering %s: corresponding srpm not included\n" % pkg)
continue
pkg_arches = set(compatible_arches[pkg.arch]) - set(["noarch"])
seen_arches = set(seen_srpms[pkg.sourcerpm]) - set(["noarch"])
if not (pkg_arches & seen_arches):
# We only want to pull in a debuginfo if we have a binary
# package for a compatible arch. Noarch packages should not
# pull debuginfo (they would pull in all architectures).
log.write("Not including %s: no package for this arch\n" % pkg)
continue
result["debuginfo"].append({"path": pkg.file_path, "flags": ["input"]})
log.write("Adding %s\n" % pkg)
return result
def expand_groups(compose, arch, variant, groups, set_pkg_arch=True):
"""Read comps file filtered for given architecture and variant and return
all packages in given groups.
:returns: A set of tuples (pkg_name, arch)
"""
if not groups:
# No groups, nothing to do (this also covers case when there is no
# comps file.
return set()
comps = []
comps_file = compose.paths.work.comps(arch, variant, create_dir=False)
comps.append(CompsWrapper(comps_file))
if variant and variant.parent:
parent_comps_file = compose.paths.work.comps(
arch, variant.parent, create_dir=False
)
comps.append(CompsWrapper(parent_comps_file))
if variant.type == "optional":
for v in variant.parent.variants.values():
if v.id == variant.id:
continue
comps_file = compose.paths.work.comps(arch, v, create_dir=False)
if os.path.exists(comps_file):
comps.append(CompsWrapper(comps_file))
packages = set()
pkg_arch = arch if set_pkg_arch else None
for group in groups:
found = False
ex = None
for c in comps:
try:
packages.update([(pkg, pkg_arch) for pkg in c.get_packages(group)])
found = True
break
except KeyError as e:
ex = e
if not found:
raise ex
return packages
def iterate_packages(package_sets, arch):
for pkgset in package_sets:
for pkg in pkgset[arch]:
yield pkgset[arch][pkg]

View File

@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
class GatherSourceBase(object):
def __init__(self, compose):
self.compose = compose

View File

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
from .source_comps import GatherSourceComps
from .source_json import GatherSourceJson
from .source_module import GatherSourceModule
from .source_none import GatherSourceNone
ALL_SOURCES = {
"comps": GatherSourceComps,
"json": GatherSourceJson,
"module": GatherSourceModule,
"none": GatherSourceNone,
}

View File

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
"""
Get a package list based on comps.xml.
Input format:
see comps.dtd
Output:
set([(rpm_name, rpm_arch or None)])
"""
from pungi.wrappers.comps import CompsWrapper
import pungi.phases.gather.source
class GatherSourceComps(pungi.phases.gather.source.GatherSourceBase):
def __call__(self, arch, variant):
groups = set()
if not self.compose.conf.get("comps_file"):
return set(), set()
comps = CompsWrapper(self.compose.paths.work.comps(arch=arch, variant=variant))
for i in comps.get_comps_groups():
groups.add(i)
return set(), groups

View File

@ -0,0 +1,62 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
"""
Get a package list based on a JSON mapping.
Input format:
{
variant: {
tree_arch: {
rpm_name: [rpm_arch, rpm_arch, ... (or None for any/best arch)],
}
}
}
Output:
set([(rpm_name, rpm_arch or None)])
"""
import json
import os
import pungi.phases.gather.source
class GatherSourceJson(pungi.phases.gather.source.GatherSourceBase):
def __call__(self, arch, variant):
json_path = self.compose.conf.get("gather_source_mapping")
if not json_path:
return set(), set()
with open(os.path.join(self.compose.config_dir, json_path), "r") as f:
mapping = json.load(f)
packages = set()
if variant is None:
# get all packages for all variants
for variant_uid in mapping:
for pkg_name, pkg_arches in mapping[variant_uid].get(arch, {}).items():
for pkg_arch in pkg_arches:
packages.add((pkg_name, pkg_arch))
else:
# get packages for a particular variant
for pkg_name, pkg_arches in (
mapping.get(variant.uid, {}).get(arch, {}).items()
):
for pkg_arch in pkg_arches:
packages.add((pkg_name, pkg_arch))
return packages, set()

View File

@ -0,0 +1,52 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
"""
Get a package list based on modulemd metadata loaded in pkgset phase. Each
modulemd file contains a list of exact RPM NEVRAs that should be include, so
just go over all modules in a given variant and join all lists together.
"""
import pungi.arch
import pungi.phases.gather.source
class GatherSourceModule(pungi.phases.gather.source.GatherSourceBase):
def __call__(self, arch, variant):
groups = set()
packages = set()
# Check if there is a variant. The method only makes sense for variants.
if variant is None:
return packages, groups
compatible_arches = pungi.arch.get_compatible_arches(arch, multilib=True)
for nsvc, module_stream in variant.arch_mmds.get(arch, {}).items():
available_rpms = sum(
(
variant.nsvc_to_pkgset[nsvc].rpms_by_arch.get(a, [])
for a in compatible_arches
),
[],
)
to_include = set(module_stream.get_rpm_artifacts())
for rpm_obj in available_rpms:
if rpm_obj.nevra in to_include:
packages.add((rpm_obj, None))
return packages, groups

View File

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
"""
Get an empty package list.
Input:
none
Output:
set()
"""
import pungi.phases.gather.source
class GatherSourceNone(pungi.phases.gather.source.GatherSourceBase):
def __call__(self, arch, variant):
return set(), set()

526
pungi/phases/image_build.py Normal file
View File

@ -0,0 +1,526 @@
# -*- coding: utf-8 -*-
import copy
import hashlib
import json
import os
import shutil
import time
from kobo import shortcuts
from pungi.util import makedirs, get_mtime, get_file_size, failable, log_failed_task
from pungi.util import as_local_file, translate_path, get_repo_urls, version_generator
from pungi.phases import base
from pungi.linker import Linker
from pungi.wrappers.kojiwrapper import KojiWrapper
from kobo.threads import ThreadPool, WorkerThread
from kobo.shortcuts import force_list
from productmd.images import Image
from productmd.rpms import Rpms
# This is a mapping from formats to file extensions. The format is what koji
# image-build command expects as argument, and the extension is what the file
# name will be ending with. The extensions are used to filter out which task
# results will be pulled into the compose.
EXTENSIONS = {
"docker": ["tar.gz", "tar.xz"],
"iso": ["iso"],
"liveimg-squashfs": ["liveimg.squashfs"],
"qcow": ["qcow"],
"qcow2": ["qcow2"],
"raw": ["raw"],
"raw-xz": ["raw.xz"],
"rhevm-ova": ["rhevm.ova"],
"tar-gz": ["tar.gz"],
"vagrant-hyperv": ["vagrant-hyperv.box"],
"vagrant-libvirt": ["vagrant-libvirt.box"],
"vagrant-virtualbox": ["vagrant-virtualbox.box"],
"vagrant-vmware-fusion": ["vagrant-vmware-fusion.box"],
"vdi": ["vdi"],
"vmdk": ["vmdk"],
"vpc": ["vhd"],
"vhd-compressed": ["vhd.gz", "vhd.xz"],
"vsphere-ova": ["vsphere.ova"],
}
class ImageBuildPhase(
base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase
):
"""class for wrapping up koji image-build"""
name = "image_build"
def __init__(self, compose, buildinstall_phase=None):
super(ImageBuildPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
self.buildinstall_phase = buildinstall_phase
def _get_install_tree(self, image_conf, variant):
"""
Get a path to os tree for a variant specified in `install_tree_from` or
current variant. If the config is set, it will be removed from the
dict.
"""
if variant.type != "variant":
# Buildinstall only runs for top-level variants. Nested variants
# need to re-use install tree from parent.
variant = variant.parent
install_tree_from = image_conf.pop("install_tree_from", variant.uid)
if "://" in install_tree_from:
# It's a URL, return it unchanged
return install_tree_from
if install_tree_from.startswith("/"):
# It's a path on local filesystem.
return translate_path(self.compose, install_tree_from)
install_tree_source = self.compose.all_variants.get(install_tree_from)
if not install_tree_source:
raise RuntimeError(
"There is no variant %s to get install tree from "
"when building image for %s." % (install_tree_from, variant.uid)
)
return translate_path(
self.compose,
self.compose.paths.compose.os_tree(
"$arch", install_tree_source, create_dir=False
),
)
def _get_repo(self, image_conf, variant):
"""
Get a comma separated list of repos. First included are those
explicitly listed in config, followed by by repo for current variant
if it's not included in the list already.
"""
repos = shortcuts.force_list(image_conf.get("repo", []))
if not variant.is_empty and variant.uid not in repos:
repos.append(variant.uid)
return ",".join(get_repo_urls(self.compose, repos, arch="$arch"))
def _get_arches(self, image_conf, arches):
if "arches" in image_conf["image-build"]:
arches = set(image_conf["image-build"].get("arches", [])) & arches
return sorted(arches)
def _set_release(self, image_conf):
"""If release is set explicitly to None, replace it with date and respin."""
if "release" in image_conf:
image_conf["release"] = (
version_generator(self.compose, image_conf["release"])
or self.compose.image_release
)
def run(self):
for variant in self.compose.get_variants():
arches = set([x for x in variant.arches if x != "src"])
for image_conf in self.get_config_block(variant):
# We will modify the data, so we need to make a copy to
# prevent problems in next iteration where the original
# value is needed.
image_conf = copy.deepcopy(image_conf)
original_image_conf = copy.deepcopy(image_conf)
# image_conf is passed to get_image_build_cmd as dict
image_conf["image-build"]["arches"] = self._get_arches(
image_conf, arches
)
if not image_conf["image-build"]["arches"]:
continue
# Replace possible ambiguous ref name with explicit hash.
ksurl = self.get_ksurl(image_conf["image-build"])
if ksurl:
image_conf["image-build"]["ksurl"] = ksurl
image_conf["image-build"]["variant"] = variant
image_conf["image-build"]["install_tree"] = self._get_install_tree(
image_conf["image-build"], variant
)
release = self.get_release(image_conf["image-build"])
if release:
image_conf["image-build"]["release"] = release
image_conf["image-build"]["version"] = self.get_version(
image_conf["image-build"]
)
image_conf["image-build"]["target"] = self.get_config(
image_conf["image-build"], "target"
)
# Pungi config can either contain old [(format, suffix)], or
# just list of formats, or a single format.
formats = []
for format in force_list(image_conf["image-build"]["format"]):
formats.append(
format[0] if isinstance(format, (tuple, list)) else format
)
image_conf["image-build"]["format"] = formats
image_conf["image-build"]["repo"] = self._get_repo(
image_conf["image-build"], variant
)
can_fail = image_conf["image-build"].pop("failable", [])
if can_fail == ["*"]:
can_fail = image_conf["image-build"]["arches"]
if can_fail:
image_conf["image-build"]["can_fail"] = sorted(can_fail)
cmd = {
"original_image_conf": original_image_conf,
"image_conf": image_conf,
"conf_file": self.compose.paths.work.image_build_conf(
image_conf["image-build"]["variant"],
image_name=image_conf["image-build"]["name"],
image_type="-".join(formats),
arches=image_conf["image-build"]["arches"],
),
"image_dir": self.compose.paths.compose.image_dir(variant),
"relative_image_dir": self.compose.paths.compose.image_dir(
variant, relative=True
),
"link_type": self.compose.conf["link_type"],
"scratch": image_conf["image-build"].pop("scratch", False),
}
self.pool.add(CreateImageBuildThread(self.pool))
self.pool.queue_put((self.compose, cmd, self.buildinstall_phase))
self.pool.start()
class CreateImageBuildThread(WorkerThread):
def fail(self, compose, cmd):
self.pool.log_error("CreateImageBuild failed.")
def process(self, item, num):
compose, cmd, buildinstall_phase = item
variant = cmd["image_conf"]["image-build"]["variant"]
subvariant = cmd["image_conf"]["image-build"].get("subvariant", variant.uid)
self.failable_arches = cmd["image_conf"]["image-build"].get("can_fail", "")
self.can_fail = (
self.failable_arches == cmd["image_conf"]["image-build"]["arches"]
)
with failable(
compose,
self.can_fail,
variant,
"*",
"image-build",
subvariant,
logger=self.pool._logger,
):
self.worker(num, compose, variant, subvariant, cmd, buildinstall_phase)
def worker(self, num, compose, variant, subvariant, cmd, buildinstall_phase):
arches = cmd["image_conf"]["image-build"]["arches"]
formats = "-".join(cmd["image_conf"]["image-build"]["format"])
dash_arches = "-".join(arches)
log_file = compose.paths.log.log_file(
dash_arches, "imagebuild-%s-%s-%s" % (variant.uid, subvariant, formats)
)
metadata_file = log_file[:-4] + ".reuse.json"
external_repo_checksum = {}
try:
for repo in cmd["original_image_conf"]["image-build"]["repo"]:
if repo in compose.all_variants:
continue
with as_local_file(
os.path.join(repo, "repodata/repomd.xml")
) as filename:
with open(filename, "rb") as f:
external_repo_checksum[repo] = hashlib.sha256(
f.read()
).hexdigest()
except Exception as e:
external_repo_checksum = None
self.pool.log_info(
"Can't calculate checksum of repomd.xml of external repo - %s" % str(e)
)
if self._try_to_reuse(
compose,
variant,
subvariant,
metadata_file,
log_file,
cmd,
external_repo_checksum,
buildinstall_phase,
):
return
msg = (
"Creating image (formats: %s, arches: %s, variant: %s, subvariant: %s)"
% (formats, dash_arches, variant, subvariant)
)
self.pool.log_info("[BEGIN] %s" % msg)
koji_wrapper = KojiWrapper(compose)
# writes conf file for koji image-build
self.pool.log_info(
"Writing image-build config for %s.%s into %s"
% (variant, dash_arches, cmd["conf_file"])
)
koji_cmd = koji_wrapper.get_image_build_cmd(
cmd["image_conf"], conf_file_dest=cmd["conf_file"], scratch=cmd["scratch"]
)
# avoid race conditions?
# Kerberos authentication failed:
# Permission denied in replay cache code (-1765328215)
# [workaround] Increased time delay from 3 to 10 sec until the issue in
# koji gets fixed https://pagure.io/koji/issue/2138
time.sleep(num * 10)
output = koji_wrapper.run_blocking_cmd(koji_cmd, log_file=log_file)
self.pool.log_debug("build-image outputs: %s" % (output))
if output["retcode"] != 0:
self.fail(compose, cmd)
raise RuntimeError(
"ImageBuild task failed: %s. See %s for more details."
% (output["task_id"], log_file)
)
# copy image to images/
image_infos = []
paths = koji_wrapper.get_image_paths(
output["task_id"],
callback=lambda arch: log_failed_task(
compose, variant, arch, "image-build", subvariant
),
)
for arch, paths in paths.items():
for path in paths:
for format in cmd["image_conf"]["image-build"]["format"]:
for suffix in EXTENSIONS[format]:
if path.endswith(suffix):
image_infos.append(
{
"path": path,
"suffix": suffix,
"type": format,
"arch": arch,
}
)
break
self._link_images(compose, variant, subvariant, cmd, image_infos)
self._write_reuse_metadata(
compose, metadata_file, cmd, image_infos, external_repo_checksum
)
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
def _link_images(self, compose, variant, subvariant, cmd, image_infos):
"""Link images to compose and update image manifest.
:param Compose compose: Current compose.
:param Variant variant: Current variant.
:param str subvariant:
:param dict cmd: Dict of params for image-build.
:param dict image_infos: Dict contains image info.
"""
# The usecase here is that you can run koji image-build with multiple --format
# It's ok to do it serialized since we're talking about max 2 images per single
# image_build record
linker = Linker(logger=self.pool._logger)
for image_info in image_infos:
image_dir = cmd["image_dir"] % {"arch": image_info["arch"]}
makedirs(image_dir)
relative_image_dir = cmd["relative_image_dir"] % {
"arch": image_info["arch"]
}
# let's not change filename of koji outputs
image_dest = os.path.join(image_dir, os.path.basename(image_info["path"]))
src_file = compose.koji_downloader.get_file(
os.path.realpath(image_info["path"])
)
linker.link(src_file, image_dest, link_type=cmd["link_type"])
# Update image manifest
img = Image(compose.im)
img.type = image_info["type"]
img.format = image_info["suffix"]
img.path = os.path.join(relative_image_dir, os.path.basename(image_dest))
img.mtime = get_mtime(image_dest)
img.size = get_file_size(image_dest)
img.arch = image_info["arch"]
img.disc_number = 1 # We don't expect multiple disks
img.disc_count = 1
img.bootable = False
img.subvariant = subvariant
setattr(img, "can_fail", self.can_fail)
setattr(img, "deliverable", "image-build")
compose.im.add(variant=variant.uid, arch=image_info["arch"], image=img)
def _try_to_reuse(
self,
compose,
variant,
subvariant,
metadata_file,
log_file,
cmd,
external_repo_checksum,
buildinstall_phase,
):
"""Try to reuse images from old compose.
:param Compose compose: Current compose.
:param Variant variant: Current variant.
:param str subvariant:
:param str metadata_file: Path to reuse metadata file.
:param str log_file: Path to log file.
:param dict cmd: Dict of params for image-build.
:param dict external_repo_checksum: Dict contains checksum of repomd.xml
or None if can't get checksum.
:param BuildinstallPhase buildinstall_phase: buildinstall phase of
current compose.
"""
log_msg = "Cannot reuse old image_build phase results - %s"
if not compose.conf["image_build_allow_reuse"]:
self.pool.log_info(
log_msg % "reuse of old image_build results is disabled."
)
return False
if external_repo_checksum is None:
self.pool.log_info(
log_msg % "Can't ensure that external repo is not changed."
)
return False
old_metadata_file = compose.paths.old_compose_path(metadata_file)
if not old_metadata_file:
self.pool.log_info(log_msg % "Can't find old reuse metadata file")
return False
try:
old_metadata = self._load_reuse_metadata(old_metadata_file)
except Exception as e:
self.pool.log_info(
log_msg % "Can't load old reuse metadata file: %s" % str(e)
)
return False
if old_metadata["cmd"]["original_image_conf"] != cmd["original_image_conf"]:
self.pool.log_info(log_msg % "image_build config changed")
return False
# Make sure external repo does not change
if (
old_metadata["external_repo_checksum"] is None
or old_metadata["external_repo_checksum"] != external_repo_checksum
):
self.pool.log_info(log_msg % "External repo may be changed")
return False
# Make sure buildinstall phase is reused
for arch in cmd["image_conf"]["image-build"]["arches"]:
if buildinstall_phase and not buildinstall_phase.reused(variant, arch):
self.pool.log_info(log_msg % "buildinstall phase changed")
return False
# Make sure packages in variant not change
rpm_manifest_file = compose.paths.compose.metadata("rpms.json")
rpm_manifest = Rpms()
rpm_manifest.load(rpm_manifest_file)
old_rpm_manifest_file = compose.paths.old_compose_path(rpm_manifest_file)
old_rpm_manifest = Rpms()
old_rpm_manifest.load(old_rpm_manifest_file)
for repo in cmd["original_image_conf"]["image-build"]["repo"]:
if repo not in compose.all_variants:
# External repos are checked using other logic.
continue
for arch in cmd["image_conf"]["image-build"]["arches"]:
if (
rpm_manifest.rpms[variant.uid][arch]
!= old_rpm_manifest.rpms[variant.uid][arch]
):
self.pool.log_info(
log_msg % "Packages in %s.%s changed." % (variant.uid, arch)
)
return False
self.pool.log_info(
"Reusing images from old compose for variant %s" % variant.uid
)
try:
self._link_images(
compose, variant, subvariant, cmd, old_metadata["image_infos"]
)
except Exception as e:
self.pool.log_info(log_msg % "Can't link images %s" % str(e))
return False
old_log_file = compose.paths.old_compose_path(log_file)
try:
shutil.copy2(old_log_file, log_file)
except Exception as e:
self.pool.log_info(
log_msg % "Can't copy old log_file: %s %s" % (old_log_file, str(e))
)
return False
self._write_reuse_metadata(
compose,
metadata_file,
cmd,
old_metadata["image_infos"],
external_repo_checksum,
)
return True
def _write_reuse_metadata(
self, compose, metadata_file, cmd, image_infos, external_repo_checksum
):
"""Write metadata file.
:param Compose compose: Current compose.
:param str metadata_file: Path to reuse metadata file.
:param dict cmd: Dict of params for image-build.
:param dict image_infos: Dict contains image info.
:param dict external_repo_checksum: Dict contains checksum of repomd.xml
or None if can't get checksum.
"""
msg = "Writing reuse metadata file: %s" % metadata_file
self.pool.log_info(msg)
cmd_copy = copy.deepcopy(cmd)
del cmd_copy["image_conf"]["image-build"]["variant"]
data = {
"cmd": cmd_copy,
"image_infos": image_infos,
"external_repo_checksum": external_repo_checksum,
}
try:
with open(metadata_file, "w") as f:
json.dump(data, f, indent=4)
except Exception as e:
self.pool.log_info("%s Failed: %s" % (msg, str(e)))
def _load_reuse_metadata(self, metadata_file):
"""Load metadata file.
:param str metadata_file: Path to reuse metadata file.
"""
with open(metadata_file, "r") as f:
return json.load(f)

View File

@ -0,0 +1,206 @@
# -*- coding: utf-8 -*-
import os
from kobo import shortcuts
from collections import defaultdict
import threading
from .base import PhaseBase
from ..util import get_format_substs, get_file_size
MULTIPLE_CHECKSUMS_ERROR = (
'Config option "media_checksum_one_file" requires only one checksum'
' to be configured in "media_checksums".'
)
class ImageChecksumPhase(PhaseBase):
"""Go through images specified in image manifest and generate their
checksums. The manifest will be updated with the checksums.
"""
name = "image_checksum"
def __init__(self, compose):
super(ImageChecksumPhase, self).__init__(compose)
self.checksums = self.compose.conf["media_checksums"]
self.one_file = self.compose.conf["media_checksum_one_file"]
def skip(self):
# Skipping this phase does not make sense:
# * if there are no images, it doesn't do anything and is quick
# * if there are images, they must have checksums computed or else
# writing metadata will fail
return False
def validate(self):
errors = []
if self.one_file and len(self.checksums) != 1:
errors.append(MULTIPLE_CHECKSUMS_ERROR)
if errors:
raise ValueError("\n".join(errors))
def _get_images(self):
"""Returns a mapping from directories to sets of ``Image``s.
The paths to dirs are absolute.
"""
top_dir = self.compose.paths.compose.topdir()
images = {}
for variant in self.compose.im.images:
for arch in self.compose.im.images[variant]:
for image in self.compose.im.images[variant][arch]:
path = os.path.dirname(os.path.join(top_dir, image.path))
images.setdefault((variant, arch, path), set()).add(image)
return images
def _get_base_filename(self, variant, arch, **kwargs):
base_checksum_name = self.compose.conf["media_checksum_base_filename"]
if base_checksum_name:
substs = get_format_substs(
self.compose, variant=variant, arch=arch, **kwargs
)
base_checksum_name = (base_checksum_name % substs).format(**substs)
base_checksum_name += "-"
return base_checksum_name
def run(self):
topdir = self.compose.paths.compose.topdir()
make_checksums(
topdir,
self.compose.im,
self.checksums,
self.one_file,
self._get_base_filename,
)
def _compute_checksums(
results,
cache,
variant,
arch,
path,
images,
checksum_types,
base_checksum_name_gen,
one_file,
results_lock,
cache_lock,
):
for image in images:
filename = os.path.basename(image.path)
full_path = os.path.join(path, filename)
if not os.path.exists(full_path):
continue
filesize = image.size or get_file_size(full_path)
cache_lock.acquire()
if full_path not in cache:
cache_lock.release()
# Source ISO is listed under each binary architecture. There's no
# point in checksumming it twice, so we can just remember the
# digest from first run..
checksum_value = shortcuts.compute_file_checksums(full_path, checksum_types)
with cache_lock:
cache[full_path] = checksum_value
else:
cache_lock.release()
with cache_lock:
digests = cache[full_path]
for checksum, digest in digests.items():
# Update metadata with the checksum
image.add_checksum(None, checksum, digest)
# If not turned of, create the file-specific checksum file
if not one_file:
checksum_filename = os.path.join(
path, "%s.%sSUM" % (filename, checksum.upper())
)
with results_lock:
results[checksum_filename].add(
(filename, filesize, checksum, digest)
)
if one_file:
dirname = os.path.basename(path)
base_checksum_name = base_checksum_name_gen(
variant, arch, dirname=dirname
)
checksum_filename = base_checksum_name + "CHECKSUM"
else:
base_checksum_name = base_checksum_name_gen(variant, arch)
checksum_filename = "%s%sSUM" % (base_checksum_name, checksum.upper())
checksum_path = os.path.join(path, checksum_filename)
with results_lock:
results[checksum_path].add((filename, filesize, checksum, digest))
def make_checksums(topdir, im, checksum_types, one_file, base_checksum_name_gen):
results = defaultdict(set)
cache = {}
threads = []
results_lock = threading.Lock() # lock to synchronize access to the results dict.
cache_lock = threading.Lock() # lock to synchronize access to the cache dict.
# create all worker threads
for (variant, arch, path), images in get_images(topdir, im).items():
threads.append(
threading.Thread(
target=_compute_checksums,
args=[
results,
cache,
variant,
arch,
path,
images,
checksum_types,
base_checksum_name_gen,
one_file,
results_lock,
cache_lock,
],
)
)
threads[-1].start()
# wait for all worker threads to finish
for thread in threads:
thread.join()
for file in results:
dump_checksums(file, results[file])
def dump_checksums(checksum_file, data):
"""Write checksums to file.
:param checksum_file: where to write the checksums
:param data: an iterable of tuples (filename, filesize, checksum_type, hash)
"""
with open(checksum_file, "w") as f:
for filename, filesize, alg, checksum in sorted(data):
f.write("# %s: %s bytes\n" % (filename, filesize))
f.write("%s (%s) = %s\n" % (alg.upper(), filename, checksum))
def get_images(top_dir, manifest):
"""Returns a mapping from directories to sets of ``Image``s.
The paths to dirs are absolute.
"""
images = {}
for variant in manifest.images:
for arch in manifest.images[variant]:
for image in manifest.images[variant][arch]:
path = os.path.dirname(os.path.join(top_dir, image.path))
images.setdefault((variant, arch, path), []).append(image)
return images

View File

@ -0,0 +1,122 @@
# -*- coding: utf-8 -*-
import os
import re
from kobo.threads import ThreadPool, WorkerThread
from .base import ConfigGuardedPhase, PhaseLoggerMixin
from .. import util
from ..wrappers import kojiwrapper
from ..phases.osbs import add_metadata
class ImageContainerPhase(PhaseLoggerMixin, ConfigGuardedPhase):
name = "image_container"
def __init__(self, compose):
super(ImageContainerPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
self.pool.metadata = {}
def run(self):
for variant in self.compose.get_variants():
for conf in self.get_config_block(variant):
self.pool.add(ImageContainerThread(self.pool))
self.pool.queue_put((self.compose, variant, conf))
self.pool.start()
class ImageContainerThread(WorkerThread):
def process(self, item, num):
compose, variant, config = item
self.num = num
with util.failable(
compose,
bool(config.pop("failable", None)),
variant,
"*",
"osbs",
logger=self.pool._logger,
):
self.worker(compose, variant, config)
def worker(self, compose, variant, config):
msg = "Image container task for variant %s" % variant.uid
self.pool.log_info("[BEGIN] %s" % msg)
source = config.pop("url")
target = config.pop("target")
priority = config.pop("priority", None)
config["yum_repourls"] = [
self._get_repo(
compose,
variant,
config.get("arch_override", "").split(),
config.pop("image_spec"),
)
]
# Start task
koji = kojiwrapper.KojiWrapper(compose)
koji.login()
task_id = koji.koji_proxy.buildContainer(
source, target, config, priority=priority
)
koji.save_task_id(task_id)
# Wait for it to finish and capture the output into log file (even
# though there is not much there).
log_dir = os.path.join(compose.paths.log.topdir(), "image_container")
util.makedirs(log_dir)
log_file = os.path.join(
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
)
if koji.watch_task(task_id, log_file) != 0:
raise RuntimeError(
"ImageContainer task failed: %s. See %s for details"
% (task_id, log_file)
)
add_metadata(variant, task_id, compose, config.get("scratch", False))
self.pool.log_info("[DONE ] %s" % msg)
def _get_repo(self, compose, variant, arches, image_spec):
"""
Return a repo file that points baseurl to the image specified by
image_spec.
"""
image_paths = set()
for arch in arches or compose.im.images[variant.uid].keys():
for image in compose.im.images[variant.uid].get(arch, []):
for key, value in image_spec.items():
if not re.match(value, getattr(image, key)):
break
else:
image_paths.add(image.path.replace(arch, "$basearch"))
if len(image_paths) != 1:
raise RuntimeError(
"%d images matched specification. Only one was expected."
% len(image_paths)
)
image_path = image_paths.pop()
absolute_path = os.path.join(compose.paths.compose.topdir(), image_path)
repo_file = os.path.join(
compose.paths.work.tmp_dir(None, variant),
"image-container-%s-%s.repo" % (variant, self.num),
)
with open(repo_file, "w") as f:
f.write("[image-to-include]\n")
f.write("name=Location of image to embed\n")
f.write("baseurl=%s\n" % util.translate_path(compose, absolute_path))
f.write("enabled=0\n")
f.write("gpgcheck=0\n")
return util.translate_path(compose, repo_file)

286
pungi/phases/init.py Normal file
View File

@ -0,0 +1,286 @@
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://gnu.org/licenses/>.
import collections
import os
import glob
import shutil
from kobo.shortcuts import run
from kobo.threads import run_in_threads
from pungi.phases.base import PhaseBase
from pungi.phases.gather import write_prepopulate_file
from pungi.util import temp_dir
from pungi.module_util import iter_module_defaults
from pungi.wrappers.comps import CompsWrapper
from pungi.wrappers.createrepo import CreaterepoWrapper
from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm
class InitPhase(PhaseBase):
"""INIT is a mandatory phase"""
name = "init"
def skip(self):
# INIT must never be skipped,
# because it generates data for LIVEIMAGES
return False
def run(self):
if self.compose.has_comps:
# write global comps and arch comps, create comps repos
global_comps = write_global_comps(self.compose)
validate_comps(global_comps)
num_workers = self.compose.conf["createrepo_num_threads"]
run_in_threads(
_arch_worker,
[(self.compose, arch) for arch in self.compose.get_arches()],
threads=num_workers,
)
# write variant comps
run_in_threads(
_variant_worker,
[
(self.compose, arch, variant)
for variant in self.compose.get_variants()
for arch in variant.arches
],
threads=num_workers,
)
# download variants.xml / product.xml?
# download module defaults
if self.compose.has_module_defaults:
write_module_defaults(self.compose)
validate_module_defaults(
self.compose.paths.work.module_defaults_dir(create_dir=False)
)
# download module obsoletes
if self.compose.has_module_obsoletes:
write_module_obsoletes(self.compose)
# write prepopulate file
write_prepopulate_file(self.compose)
def _arch_worker(_, args, num):
compose, arch = args
write_arch_comps(compose, arch)
create_comps_repo(compose, arch, None)
def _variant_worker(_, args, num):
compose, arch, variant = args
write_variant_comps(compose, arch, variant)
create_comps_repo(compose, arch, variant)
def write_global_comps(compose):
comps_file_global = compose.paths.work.comps(arch="global")
scm_dict = compose.conf["comps_file"]
if isinstance(scm_dict, dict):
comps_name = os.path.basename(scm_dict["file"])
if scm_dict["scm"] == "file":
scm_dict["file"] = os.path.join(compose.config_dir, scm_dict["file"])
else:
comps_name = os.path.basename(scm_dict)
scm_dict = os.path.join(compose.config_dir, scm_dict)
compose.log_debug("Writing global comps file: %s", comps_file_global)
tmp_dir = compose.mkdtemp(prefix="comps_")
get_file_from_scm(scm_dict, tmp_dir, compose=compose)
shutil.copy2(os.path.join(tmp_dir, comps_name), comps_file_global)
shutil.rmtree(tmp_dir)
return comps_file_global
def write_arch_comps(compose, arch):
comps_file_arch = compose.paths.work.comps(arch=arch)
compose.log_debug("Writing comps file for arch '%s': %s", arch, comps_file_arch)
run(
[
"comps_filter",
"--arch=%s" % arch,
"--no-cleanup",
"--output=%s" % comps_file_arch,
compose.paths.work.comps(arch="global"),
]
)
UNMATCHED_GROUP_MSG = "Variant %s.%s requires comps group %s which does not match anything in input comps file" # noqa: E501
def get_lookaside_groups(compose, variant):
"""Find all groups listed in parent variant."""
groups = set()
if variant.parent:
groups.update(g["name"] for g in variant.parent.groups)
for var, lookaside in compose.conf.get("variant_as_lookaside", []):
if var == variant.uid:
lookaside_variant = compose.all_variants[lookaside]
groups.update(g["name"] for g in lookaside_variant.groups)
return groups
def write_variant_comps(compose, arch, variant):
comps_file = compose.paths.work.comps(arch=arch, variant=variant)
compose.log_debug(
"Writing comps file (arch: %s, variant: %s): %s", arch, variant, comps_file
)
cmd = [
"comps_filter",
"--arch=%s" % arch,
"--keep-empty-group=conflicts",
"--keep-empty-group=conflicts-%s" % variant.uid.lower(),
"--variant=%s" % variant.uid,
"--output=%s" % comps_file,
compose.paths.work.comps(arch="global"),
]
for group in get_lookaside_groups(compose, variant):
cmd.append("--lookaside-group=%s" % group)
run(cmd)
comps = CompsWrapper(comps_file)
# Filter groups if the variant has some, or it's a modular variant, or
# is not a base variant.
if (
variant.groups
or variant.modules is not None
or variant.modular_koji_tags is not None
or variant.type != "variant"
):
unmatched = comps.filter_groups(variant.groups)
for grp in unmatched:
compose.log_warning(UNMATCHED_GROUP_MSG % (variant.uid, arch, grp))
contains_all = not variant.groups and not variant.environments
if compose.conf["comps_filter_environments"] and not contains_all:
# We only want to filter environments if it's enabled by configuration
# and it's a variant with some groups and environements defined. If
# there are none, all packages should go in there and also all
# environments should be preserved.
comps.filter_environments(variant.environments)
comps.write_comps()
def create_comps_repo(compose, arch, variant):
createrepo_c = compose.conf["createrepo_c"]
createrepo_checksum = compose.conf["createrepo_checksum"]
repo = CreaterepoWrapper(createrepo_c=createrepo_c)
comps_repo = compose.paths.work.comps_repo(arch=arch, variant=variant)
comps_path = compose.paths.work.comps(arch=arch, variant=variant)
msg = "Creating comps repo for arch '%s' variant '%s'" % (
arch,
variant.uid if variant else None,
)
compose.log_info("[BEGIN] %s" % msg)
cmd = repo.get_createrepo_cmd(
comps_repo,
database=False,
outputdir=comps_repo,
groupfile=comps_path,
checksum=createrepo_checksum,
)
logfile = "comps_repo-%s" % variant if variant else "comps_repo"
run(cmd, logfile=compose.paths.log.log_file(arch, logfile), show_cmd=True)
compose.log_info("[DONE ] %s" % msg)
def write_module_defaults(compose):
scm_dict = compose.conf["module_defaults_dir"]
if isinstance(scm_dict, dict):
if scm_dict["scm"] == "file":
scm_dict["dir"] = os.path.join(compose.config_dir, scm_dict["dir"])
else:
scm_dict = os.path.join(compose.config_dir, scm_dict)
with temp_dir(prefix="moduledefaults_") as tmp_dir:
get_dir_from_scm(scm_dict, tmp_dir, compose=compose)
compose.log_debug("Writing module defaults")
shutil.copytree(
tmp_dir,
compose.paths.work.module_defaults_dir(create_dir=False),
ignore=shutil.ignore_patterns(".git"),
)
def write_module_obsoletes(compose):
scm_dict = compose.conf["module_obsoletes_dir"]
if isinstance(scm_dict, dict):
if scm_dict["scm"] == "file":
scm_dict["dir"] = os.path.join(compose.config_dir, scm_dict["dir"])
else:
scm_dict = os.path.join(compose.config_dir, scm_dict)
with temp_dir(prefix="moduleobsoletes_") as tmp_dir:
get_dir_from_scm(scm_dict, tmp_dir, compose=compose)
compose.log_debug("Writing module obsoletes")
shutil.copytree(
tmp_dir,
compose.paths.work.module_obsoletes_dir(create_dir=False),
ignore=shutil.ignore_patterns(".git"),
)
def validate_module_defaults(path):
"""Make sure there are no conflicting defaults and every default can be loaded.
Each module name can onlyhave one default stream.
:param str path: directory with cloned module defaults
"""
defaults_num = len(glob.glob(os.path.join(path, "*.yaml")))
seen_defaults = collections.defaultdict(set)
for module_name, defaults in iter_module_defaults(path):
seen_defaults[module_name].add(defaults.get_default_stream())
errors = []
for module_name, defaults in seen_defaults.items():
if len(defaults) > 1:
errors.append(
"Module %s has multiple defaults: %s"
% (module_name, ", ".join(sorted(defaults)))
)
if errors:
raise RuntimeError(
"There are duplicated module defaults:\n%s" % "\n".join(errors)
)
# Make sure all defaults are valid otherwise update_from_defaults_directory
# will return empty object
if defaults_num != len(seen_defaults):
raise RuntimeError("Defaults contains not valid default file")
def validate_comps(path):
"""Check that there are whitespace issues in comps."""
wrapper = CompsWrapper(path)
wrapper.validate()

229
pungi/phases/kiwibuild.py Normal file
View File

@ -0,0 +1,229 @@
# -*- coding: utf-8 -*-
import os
from kobo.threads import ThreadPool, WorkerThread
from kobo import shortcuts
from productmd.images import Image
from . import base
from .. import util
from ..linker import Linker
from ..wrappers import kojiwrapper
from .image_build import EXTENSIONS
KIWIEXTENSIONS = [
("vhd-compressed", ["vhdfixed.xz"], "vhd.xz"),
("vagrant-libvirt", ["vagrant.libvirt.box"], "vagrant-libvirt.box"),
("vagrant-virtualbox", ["vagrant.virtualbox.box"], "vagrant-virtualbox.box"),
]
class KiwiBuildPhase(
base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase
):
name = "kiwibuild"
def __init__(self, compose):
super(KiwiBuildPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
def _get_arches(self, image_conf, arches):
"""Get an intersection of arches in the config dict and the given ones."""
if "arches" in image_conf:
arches = set(image_conf["arches"]) & arches
return sorted(arches)
@staticmethod
def _get_repo_urls(compose, repos, arch="$basearch"):
"""
Get list of repos with resolved repo URLs. Preserve repos defined
as dicts.
"""
resolved_repos = []
for repo in repos:
repo = util.get_repo_url(compose, repo, arch=arch)
if repo is None:
raise RuntimeError("Failed to resolve repo URL for %s" % repo)
resolved_repos.append(repo)
return resolved_repos
def _get_repo(self, image_conf, variant):
"""
Get a list of repos. First included are those explicitly listed in
config, followed by by repo for current variant if it's not included in
the list already.
"""
repos = shortcuts.force_list(image_conf.get("repos", []))
if not variant.is_empty and variant.uid not in repos:
repos.append(variant.uid)
return KiwiBuildPhase._get_repo_urls(self.compose, repos, arch="$arch")
def run(self):
for variant in self.compose.get_variants():
arches = set([x for x in variant.arches if x != "src"])
for image_conf in self.get_config_block(variant):
build_arches = self._get_arches(image_conf, arches)
if not build_arches:
self.log_debug("skip: no arches")
continue
# these properties can be set per-image *or* as e.g.
# kiwibuild_description_scm or global_release in the config
generics = {
"release": self.get_release(image_conf),
"target": self.get_config(image_conf, "target"),
"descscm": self.get_config(image_conf, "description_scm"),
"descpath": self.get_config(image_conf, "description_path"),
"type": self.get_config(image_conf, "type"),
"type_attr": self.get_config(image_conf, "type_attr"),
"bundle_name_format": self.get_config(
image_conf, "bundle_name_format"
),
}
repo = self._get_repo(image_conf, variant)
failable_arches = image_conf.pop("failable", [])
if failable_arches == ["*"]:
failable_arches = image_conf["arches"]
self.pool.add(RunKiwiBuildThread(self.pool))
self.pool.queue_put(
(
self.compose,
variant,
image_conf,
build_arches,
generics,
repo,
failable_arches,
)
)
self.pool.start()
class RunKiwiBuildThread(WorkerThread):
def process(self, item, num):
(compose, variant, config, arches, generics, repo, failable_arches) = item
self.failable_arches = failable_arches
# the Koji task as a whole can only fail if *all* arches are failable
can_task_fail = set(failable_arches).issuperset(set(arches))
self.num = num
with util.failable(
compose,
can_task_fail,
variant,
"*",
"kiwibuild",
logger=self.pool._logger,
):
self.worker(compose, variant, config, arches, generics, repo)
def worker(self, compose, variant, config, arches, generics, repo):
msg = "kiwibuild task for variant %s" % variant.uid
self.pool.log_info("[BEGIN] %s" % msg)
koji = kojiwrapper.KojiWrapper(compose)
koji.login()
task_id = koji.koji_proxy.kiwiBuild(
generics["target"],
arches,
generics["descscm"],
generics["descpath"],
profile=config["kiwi_profile"],
release=generics["release"],
repos=repo,
type=generics["type"],
type_attr=generics["type_attr"],
result_bundle_name_format=generics["bundle_name_format"],
# this ensures the task won't fail if only failable arches fail
optional_arches=self.failable_arches,
)
koji.save_task_id(task_id)
# Wait for it to finish and capture the output into log file.
log_dir = os.path.join(compose.paths.log.topdir(), "kiwibuild")
util.makedirs(log_dir)
log_file = os.path.join(
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
)
if koji.watch_task(task_id, log_file) != 0:
raise RuntimeError(
"kiwiBuild task failed: %s. See %s for details" % (task_id, log_file)
)
# Refresh koji session which may have timed out while the task was
# running. Watching is done via a subprocess, so the session is
# inactive.
koji = kojiwrapper.KojiWrapper(compose)
linker = Linker(logger=self.pool._logger)
# Process all images in the build. There should be one for each
# architecture, but we don't verify that.
paths = koji.get_image_paths(task_id)
for arch, paths in paths.items():
for path in paths:
type_, format_ = _find_type_and_format(path)
if not format_:
# Path doesn't match any known type.
continue
# image_dir is absolute path to which the image should be copied.
# We also need the same path as relative to compose directory for
# including in the metadata.
image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
rel_image_dir = compose.paths.compose.image_dir(
variant, relative=True
) % {"arch": arch}
util.makedirs(image_dir)
filename = os.path.basename(path)
image_dest = os.path.join(image_dir, filename)
src_file = compose.koji_downloader.get_file(path)
linker.link(src_file, image_dest, link_type=compose.conf["link_type"])
# Update image manifest
img = Image(compose.im)
# Get the manifest type from the config if supplied, otherwise we
# determine the manifest type based on the koji output
img.type = type_
img.format = format_
img.path = os.path.join(rel_image_dir, filename)
img.mtime = util.get_mtime(image_dest)
img.size = util.get_file_size(image_dest)
img.arch = arch
img.disc_number = 1 # We don't expect multiple disks
img.disc_count = 1
img.bootable = False
img.subvariant = config.get("subvariant", variant.uid)
setattr(img, "can_fail", arch in self.failable_arches)
setattr(img, "deliverable", "kiwibuild")
compose.im.add(variant=variant.uid, arch=arch, image=img)
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, task_id))
def _find_type_and_format(path):
for type_, suffixes in EXTENSIONS.items():
for suffix in suffixes:
if path.endswith(suffix):
return type_, suffix
# these are our kiwi-exclusive mappings for images whose extensions
# aren't quite the same as imagefactory
for type_, suffixes, format_ in KIWIEXTENSIONS:
if any(path.endswith(suffix) for suffix in suffixes):
return type_, format_
return None, None

View File

@ -0,0 +1,206 @@
# -*- coding: utf-8 -*-
import os
import time
from kobo import shortcuts
from pungi.util import makedirs, get_mtime, get_file_size, failable, log_failed_task
from pungi.util import translate_path, get_repo_urls
from pungi.phases.base import ConfigGuardedPhase, ImageConfigMixin, PhaseLoggerMixin
from pungi.linker import Linker
from pungi.wrappers.kojiwrapper import KojiWrapper
from kobo.threads import ThreadPool, WorkerThread
from productmd.images import Image
class LiveMediaPhase(PhaseLoggerMixin, ImageConfigMixin, ConfigGuardedPhase):
"""class for wrapping up koji spin-livemedia"""
name = "live_media"
def __init__(self, compose):
super(LiveMediaPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
def _get_repos(self, image_conf, variant):
"""
Get a list of repo urls. First included are those explicitly listed in config,
followed by repo for current variant if it's not present in the list.
"""
repos = shortcuts.force_list(image_conf.get("repo", []))
if not variant.is_empty:
if variant.uid not in repos:
repos.append(variant.uid)
return get_repo_urls(self.compose, repos)
def _get_arches(self, image_conf, arches):
if "arches" in image_conf:
arches = set(image_conf.get("arches", [])) & arches
return sorted(arches)
def _get_install_tree(self, image_conf, variant):
if "install_tree_from" in image_conf:
variant_uid = image_conf["install_tree_from"]
try:
variant = self.compose.all_variants[variant_uid]
except KeyError:
raise RuntimeError(
"There is no variant %s to get repo from when building "
"live media for %s." % (variant_uid, variant.uid)
)
return translate_path(
self.compose,
self.compose.paths.compose.os_tree("$basearch", variant, create_dir=False),
)
def run(self):
for variant in self.compose.get_variants():
arches = set([x for x in variant.arches if x != "src"])
for image_conf in self.get_config_block(variant):
subvariant = image_conf.get("subvariant", variant.uid)
name = image_conf.get(
"name",
"%s-%s-Live" % (self.compose.ci_base.release.short, subvariant),
)
config = {
"target": self.get_config(image_conf, "target"),
"arches": self._get_arches(image_conf, arches),
"ksfile": image_conf["kickstart"],
"ksurl": self.get_ksurl(image_conf),
"ksversion": image_conf.get("ksversion"),
"scratch": image_conf.get("scratch", False),
"nomacboot": image_conf.get("nomacboot", False),
"release": self.get_release(image_conf),
"skip_tag": image_conf.get("skip_tag"),
"name": name,
"subvariant": subvariant,
"repo": self._get_repos(image_conf, variant),
"install_tree": self._get_install_tree(image_conf, variant),
"version": self.get_version(image_conf),
"failable_arches": image_conf.get("failable", []),
}
if config["failable_arches"] == ["*"]:
config["failable_arches"] = config["arches"]
self.pool.add(LiveMediaThread(self.pool))
self.pool.queue_put((self.compose, variant, config))
self.pool.start()
class LiveMediaThread(WorkerThread):
def process(self, item, num):
compose, variant, config = item
subvariant = config.pop("subvariant")
self.failable_arches = config.pop("failable_arches")
self.num = num
can_fail = set(self.failable_arches) == set(config["arches"])
with failable(
compose,
can_fail,
variant,
"*",
"live-media",
subvariant,
logger=self.pool._logger,
):
self.worker(compose, variant, subvariant, config)
def _get_log_file(self, compose, variant, subvariant, config):
arches = "-".join(config["arches"])
return compose.paths.log.log_file(
arches, "livemedia-%s-%s" % (variant.uid, subvariant)
)
def _run_command(self, koji_wrapper, cmd, compose, log_file):
time.sleep(self.num * 3)
output = koji_wrapper.run_blocking_cmd(cmd, log_file=log_file)
self.pool.log_debug("live media outputs: %s" % (output))
if output["retcode"] != 0:
self.pool.log_error("Live media task failed.")
raise RuntimeError(
"Live media task failed: %s. See %s for more details."
% (output["task_id"], log_file)
)
return output
def _get_cmd(self, koji_wrapper, config):
"""Replace `arches` (as list) with `arch` as a comma-separated string."""
copy = dict(config)
copy["arch"] = ",".join(copy.pop("arches", []))
copy["can_fail"] = self.failable_arches
return koji_wrapper.get_live_media_cmd(copy)
def worker(self, compose, variant, subvariant, config):
msg = "Live media: %s (arches: %s, variant: %s, subvariant: %s)" % (
config["name"],
" ".join(config["arches"]),
variant.uid,
subvariant,
)
self.pool.log_info("[BEGIN] %s" % msg)
koji_wrapper = KojiWrapper(compose)
cmd = self._get_cmd(koji_wrapper, config)
log_file = self._get_log_file(compose, variant, subvariant, config)
output = self._run_command(koji_wrapper, cmd, compose, log_file)
# collect results and update manifest
image_infos = []
paths = koji_wrapper.get_image_paths(
output["task_id"],
callback=lambda arch: log_failed_task(
compose, variant, arch, "live-media", subvariant
),
)
for arch, paths in paths.items():
for path in paths:
if path.endswith(".iso"):
image_infos.append({"path": path, "arch": arch})
if len(image_infos) < len(config["arches"]) - len(self.failable_arches):
self.pool.log_error(
"Error in koji task %s. Expected to find at least one image "
"for each required arch (%s). Got %s."
% (output["task_id"], len(config["arches"]), len(image_infos))
)
raise RuntimeError("Image count mismatch in task %s." % output["task_id"])
linker = Linker(logger=self.pool._logger)
link_type = compose.conf["link_type"]
for image_info in image_infos:
image_dir = compose.paths.compose.iso_dir(image_info["arch"], variant)
makedirs(image_dir)
relative_image_dir = compose.paths.compose.iso_dir(
image_info["arch"], variant, relative=True
)
# let's not change filename of koji outputs
image_dest = os.path.join(image_dir, os.path.basename(image_info["path"]))
src_file = compose.koji_downloader.get_file(
os.path.realpath(image_info["path"])
)
linker.link(src_file, image_dest, link_type=link_type)
# Update image manifest
img = Image(compose.im)
img.type = "live"
img.format = "iso"
img.path = os.path.join(relative_image_dir, os.path.basename(image_dest))
img.mtime = get_mtime(image_dest)
img.size = get_file_size(image_dest)
img.arch = image_info["arch"]
img.disc_number = 1 # We don't expect multiple disks
img.disc_count = 1
img.bootable = True
img.subvariant = subvariant
setattr(img, "can_fail", bool(self.failable_arches))
setattr(img, "deliverable", "live-media")
compose.im.add(variant=variant.uid, arch=image_info["arch"], image=img)
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))

456
pungi/phases/osbs.py Normal file
View File

@ -0,0 +1,456 @@
# -*- coding: utf-8 -*-
import copy
import fnmatch
import json
import os
from kobo.threads import ThreadPool, WorkerThread
from kobo import shortcuts
from productmd.rpms import Rpms
from six.moves import configparser
from .base import ConfigGuardedPhase, PhaseLoggerMixin
from .. import util
from ..wrappers import kojiwrapper
from ..wrappers.scm import get_file_from_scm
class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase):
name = "osbs"
def __init__(self, compose, pkgset_phase, buildinstall_phase):
super(OSBSPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
self.pool.registries = {}
self.pool.pkgset_phase = pkgset_phase
self.pool.buildinstall_phase = buildinstall_phase
def run(self):
for variant in self.compose.get_variants():
for conf in self.get_config_block(variant):
self.pool.add(OSBSThread(self.pool))
self.pool.queue_put((self.compose, variant, conf))
self.pool.start()
def request_push(self):
"""Store configuration data about where to push the created images and
then send the same data to message bus.
"""
if not self.pool.registries:
return
# Write the data into a file.
registry_file = os.path.join(
self.compose.paths.log.topdir(), "osbs-registries.json"
)
with open(registry_file, "w") as fh:
json.dump(self.pool.registries, fh)
# Send a message with the data
if self.compose.notifier:
self.compose.notifier.send(
"osbs-request-push",
config_location=util.translate_path(self.compose, registry_file),
config=self.pool.registries,
)
def get_registry(compose, nvr, fallback=None):
"""Get a configured registry for the image from config matching given NVR.
If not present, return fallback value.
"""
for pattern, registry in compose.conf.get("osbs_registries", {}).items():
if fnmatch.fnmatch(nvr, pattern):
return registry
return fallback
class OSBSThread(WorkerThread):
def process(self, item, num):
compose, variant, config = item
self.num = num
with util.failable(
compose,
bool(config.pop("failable", None)),
variant,
"*",
"osbs",
logger=self.pool._logger,
):
self.worker(compose, variant, config)
def worker(self, compose, variant, config):
msg = "OSBS task for variant %s" % variant.uid
self.pool.log_info("[BEGIN] %s" % msg)
original_config = copy.deepcopy(config)
# Start task
source = config.pop("url")
target = config.pop("target")
priority = config.pop("priority", None)
gpgkey = config.pop("gpgkey", None)
repos = [
self._get_repo(compose, v, gpgkey=gpgkey)
for v in [variant.uid] + shortcuts.force_list(config.pop("repo", []))
]
# Deprecated in 4.1.36
registry = config.pop("registry", None)
config["yum_repourls"] = repos
log_dir = os.path.join(compose.paths.log.topdir(), "osbs")
util.makedirs(log_dir)
log_file = os.path.join(
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
)
reuse_file = log_file[:-4] + ".reuse.json"
try:
image_conf = self._get_image_conf(compose, original_config)
except Exception as e:
image_conf = None
self.pool.log_info(
"Can't get image-build.conf for variant: %s source: %s - %s"
% (variant.uid, source, str(e))
)
koji = kojiwrapper.KojiWrapper(compose)
koji.login()
task_id = self._try_to_reuse(
compose, variant, original_config, image_conf, reuse_file
)
if not task_id:
task_id = koji.koji_proxy.buildContainer(
source, target, config, priority=priority
)
koji.save_task_id(task_id)
# Wait for it to finish and capture the output into log file (even
# though there is not much there).
if koji.watch_task(task_id, log_file) != 0:
raise RuntimeError(
"OSBS task failed: %s. See %s for details" % (task_id, log_file)
)
scratch = config.get("scratch", False)
nvr, archive_ids = add_metadata(variant, task_id, compose, scratch)
if nvr:
registry = get_registry(compose, nvr, registry)
if registry:
self.pool.registries[nvr] = registry
self._write_reuse_metadata(
compose,
variant,
original_config,
image_conf,
task_id,
archive_ids,
reuse_file,
)
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, task_id))
def _get_image_conf(self, compose, config):
"""Get image-build.conf from git repo.
:param Compose compose: Current compose.
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
"""
tmp_dir = compose.mkdtemp(prefix="osbs_")
url = config["url"].split("#")
if len(url) == 1:
url.append(config["git_branch"])
filename = "image-build.conf"
get_file_from_scm(
{
"scm": "git",
"repo": url[0],
"branch": url[1],
"file": [filename],
},
tmp_dir,
)
c = configparser.ConfigParser()
c.read(os.path.join(tmp_dir, filename))
return c
def _get_ksurl(self, image_conf):
"""Get ksurl from image-build.conf"""
ksurl = image_conf.get("image-build", "ksurl")
if ksurl:
resolver = util.GitUrlResolver(offline=False)
return resolver(ksurl)
else:
return None
def _get_repo(self, compose, repo, gpgkey=None):
"""
Return repo file URL of repo, if repo contains "://", it's already a
URL of repo file. Or it's a variant UID or local path, then write a .repo
file pointing to that location and return the URL to .repo file.
"""
if "://" in repo:
return repo.replace("$COMPOSE_ID", compose.compose_id)
if repo.startswith("/"):
# The repo is an absolute path on the filesystem
repo_path = repo
variant = "local"
repo_file = os.path.join(
compose.paths.work.tmp_dir(None, None),
"compose-rpms-%s-%s.repo" % (variant, self.num),
)
else:
# We got a variant name and have to find the repository for that variant.
try:
variant = compose.all_variants[repo]
except KeyError:
raise RuntimeError(
"There is no variant %s to get repo from to pass to OSBS." % repo
)
cts_url = compose.conf.get("cts_url", None)
if cts_url:
return os.path.join(
cts_url,
"api/1/composes",
compose.compose_id,
"repo/?variant=%s" % variant,
)
repo_path = compose.paths.compose.repository(
"$basearch", variant, create_dir=False
)
repo_file = os.path.join(
compose.paths.work.tmp_dir(None, variant),
"compose-rpms-%s-%s.repo" % (variant, self.num),
)
gpgcheck = 1 if gpgkey else 0
with open(repo_file, "w") as f:
f.write("[%s-%s-%s]\n" % (compose.compose_id, variant, self.num))
f.write("name=Compose %s (RPMs) - %s\n" % (compose.compose_id, variant))
f.write("baseurl=%s\n" % util.translate_path(compose, repo_path))
f.write("enabled=1\n")
f.write("gpgcheck=%s\n" % gpgcheck)
if gpgcheck:
f.write("gpgkey=%s\n" % gpgkey)
return util.translate_path(compose, repo_file)
def _try_to_reuse(self, compose, variant, config, image_conf, reuse_file):
"""Try to reuse results of old compose.
:param Compose compose: Current compose.
:param Variant variant: Current variant.
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
:param ConfigParser image_conf: ConfigParser obj of image-build.conf.
:param str reuse_file: Path to reuse metadata file
"""
log_msg = "Cannot reuse old osbs phase results - %s"
if not compose.conf["osbs_allow_reuse"]:
self.pool.log_info(log_msg % "reuse of old osbs results is disabled.")
return False
old_reuse_file = compose.paths.old_compose_path(reuse_file)
if not old_reuse_file:
self.pool.log_info(log_msg % "Can't find old reuse metadata file")
return False
try:
with open(old_reuse_file) as f:
old_reuse_metadata = json.load(f)
except Exception as e:
self.pool.log_info(
log_msg % "Can't load old reuse metadata file: %s" % str(e)
)
return False
if old_reuse_metadata["config"] != config:
self.pool.log_info(log_msg % "osbs config changed")
return False
if not image_conf:
self.pool.log_info(log_msg % "Can't get image-build.conf")
return False
# Make sure ksurl not change
try:
ksurl = self._get_ksurl(image_conf)
except Exception as e:
self.pool.log_info(
log_msg % "Can't get ksurl from image-build.conf - %s" % str(e)
)
return False
if not old_reuse_metadata["ksurl"]:
self.pool.log_info(
log_msg % "Can't get ksurl from old compose reuse metadata."
)
return False
if ksurl != old_reuse_metadata["ksurl"]:
self.pool.log_info(log_msg % "ksurl changed")
return False
# Make sure buildinstall phase is reused
try:
arches = image_conf.get("image-build", "arches").split(",")
except Exception as e:
self.pool.log_info(
log_msg % "Can't get arches from image-build.conf - %s" % str(e)
)
for arch in arches:
if not self.pool.buildinstall_phase.reused(variant, arch):
self.pool.log_info(
log_msg % "buildinstall phase changed %s.%s" % (variant, arch)
)
return False
# Make sure rpms installed in image exists in current compose
rpm_manifest_file = compose.paths.compose.metadata("rpms.json")
rpm_manifest = Rpms()
rpm_manifest.load(rpm_manifest_file)
rpms = set()
for variant in rpm_manifest.rpms:
for arch in rpm_manifest.rpms[variant]:
for src in rpm_manifest.rpms[variant][arch]:
for nevra in rpm_manifest.rpms[variant][arch][src]:
rpms.add(nevra)
for nevra in old_reuse_metadata["rpmlist"]:
if nevra not in rpms:
self.pool.log_info(
log_msg % "%s does not exist in current compose" % nevra
)
return False
self.pool.log_info(
"Reusing old OSBS task %d result" % old_reuse_file["task_id"]
)
return old_reuse_file["task_id"]
def _write_reuse_metadata(
self, compose, variant, config, image_conf, task_id, archive_ids, reuse_file
):
"""Write metadata to file for reusing.
:param Compose compose: Current compose.
:param Variant variant: Current variant.
:param dict config: One osbs config item of compose.conf["osbs"][$variant]
:param ConfigParser image_conf: ConfigParser obj of image-build.conf.
:param int task_id: Koji task id of osbs task.
:param list archive_ids: List of koji archive id
:param str reuse_file: Path to reuse metadata file.
"""
msg = "Writing reuse metadata file %s" % reuse_file
compose.log_info(msg)
rpmlist = set()
koji = kojiwrapper.KojiWrapper(compose)
for archive_id in archive_ids:
rpms = koji.koji_proxy.listRPMs(imageID=archive_id)
for item in rpms:
if item["epoch"]:
rpmlist.add(
"%s:%s-%s-%s.%s"
% (
item["name"],
item["epoch"],
item["version"],
item["release"],
item["arch"],
)
)
else:
rpmlist.add("%s.%s" % (item["nvr"], item["arch"]))
try:
ksurl = self._get_ksurl(image_conf)
except Exception:
ksurl = None
data = {
"config": config,
"ksurl": ksurl,
"rpmlist": sorted(rpmlist),
"task_id": task_id,
}
try:
with open(reuse_file, "w") as f:
json.dump(data, f, indent=4)
except Exception as e:
compose.log_info(msg + " failed - %s" % str(e))
def add_metadata(variant, task_id, compose, is_scratch):
"""Given a task ID, find details about the container and add it to global
metadata."""
# Create new Koji session. The task could take so long to finish that
# our session will expire. This second session does not need to be
# authenticated since it will only do reading operations.
koji = kojiwrapper.KojiWrapper(compose)
# Create metadata
metadata = {
"compose_id": compose.compose_id,
"koji_task": task_id,
}
result = koji.koji_proxy.getTaskResult(task_id)
if is_scratch:
metadata.update({"repositories": result["repositories"]})
# add a fake arch of 'scratch', so we can construct the metadata
# in same data structure as real builds.
compose.containers_metadata.setdefault(variant.uid, {}).setdefault(
"scratch", []
).append(metadata)
return None, []
else:
build_id = int(result["koji_builds"][0])
buildinfo = koji.koji_proxy.getBuild(build_id)
archives = koji.koji_proxy.listArchives(build_id, type="image")
nvr = "%(name)s-%(version)s-%(release)s" % buildinfo
metadata.update(
{
"name": buildinfo["name"],
"version": buildinfo["version"],
"release": buildinfo["release"],
"nvr": nvr,
"creation_time": buildinfo["creation_time"],
}
)
archive_ids = []
for archive in archives:
data = {
"filename": archive["filename"],
"size": archive["size"],
"checksum": archive["checksum"],
}
data.update(archive["extra"])
data.update(metadata)
arch = archive["extra"]["image"]["arch"]
compose.log_debug(
"Created Docker base image %s-%s-%s.%s"
% (metadata["name"], metadata["version"], metadata["release"], arch)
)
compose.containers_metadata.setdefault(variant.uid, {}).setdefault(
arch, []
).append(data)
archive_ids.append(archive["id"])
return nvr, archive_ids

287
pungi/phases/osbuild.py Normal file
View File

@ -0,0 +1,287 @@
# -*- coding: utf-8 -*-
import os
from kobo.threads import ThreadPool, WorkerThread
from kobo import shortcuts
from productmd.images import Image
from . import base
from .. import util
from ..linker import Linker
from ..wrappers import kojiwrapper
from .image_build import EXTENSIONS
class OSBuildPhase(
base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase
):
name = "osbuild"
def __init__(self, compose):
super(OSBuildPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
def _get_arches(self, image_conf, arches):
"""Get an intersection of arches in the config dict and the given ones."""
if "arches" in image_conf:
arches = set(image_conf["arches"]) & arches
return sorted(arches)
@staticmethod
def _get_repo_urls(compose, repos, arch="$basearch"):
"""
Get list of repos with resolved repo URLs. Preserve repos defined
as dicts.
"""
resolved_repos = []
for repo in repos:
if isinstance(repo, dict):
try:
url = repo["baseurl"]
except KeyError:
raise RuntimeError(
"`baseurl` is required in repo dict %s" % str(repo)
)
url = util.get_repo_url(compose, url, arch=arch)
if url is None:
raise RuntimeError("Failed to resolve repo URL for %s" % str(repo))
repo["baseurl"] = url
resolved_repos.append(repo)
else:
repo = util.get_repo_url(compose, repo, arch=arch)
if repo is None:
raise RuntimeError("Failed to resolve repo URL for %s" % repo)
resolved_repos.append(repo)
return resolved_repos
def _get_repo(self, image_conf, variant):
"""
Get a list of repos. First included are those explicitly listed in
config, followed by by repo for current variant if it's not included in
the list already.
"""
repos = shortcuts.force_list(image_conf.get("repo", []))
if not variant.is_empty and variant.uid not in repos:
repos.append(variant.uid)
return OSBuildPhase._get_repo_urls(self.compose, repos, arch="$arch")
def run(self):
for variant in self.compose.get_variants():
arches = set([x for x in variant.arches if x != "src"])
for image_conf in self.get_config_block(variant):
build_arches = self._get_arches(image_conf, arches)
if not build_arches:
self.log_debug("skip: no arches")
continue
release = self.get_release(image_conf)
version = self.get_version(image_conf)
target = self.get_config(image_conf, "target")
repo = self._get_repo(image_conf, variant)
can_fail = image_conf.pop("failable", [])
if can_fail == ["*"]:
can_fail = image_conf["arches"]
if can_fail:
can_fail = sorted(can_fail)
self.pool.add(RunOSBuildThread(self.pool))
self.pool.queue_put(
(
self.compose,
variant,
image_conf,
build_arches,
version,
release,
target,
repo,
can_fail,
)
)
self.pool.start()
class RunOSBuildThread(WorkerThread):
def process(self, item, num):
(
compose,
variant,
config,
arches,
version,
release,
target,
repo,
can_fail,
) = item
self.can_fail = can_fail
self.num = num
with util.failable(
compose,
can_fail,
variant,
"*",
"osbuild",
logger=self.pool._logger,
):
self.worker(
compose, variant, config, arches, version, release, target, repo
)
def worker(self, compose, variant, config, arches, version, release, target, repo):
msg = "OSBuild task for variant %s" % variant.uid
self.pool.log_info("[BEGIN] %s" % msg)
koji = kojiwrapper.KojiWrapper(compose)
koji.login()
ostree = {}
if config.get("ostree_url"):
ostree["url"] = config["ostree_url"]
if config.get("ostree_ref"):
ostree["ref"] = config["ostree_ref"]
if config.get("ostree_parent"):
ostree["parent"] = config["ostree_parent"]
# Start task
opts = {"repo": repo}
if ostree:
opts["ostree"] = ostree
upload_options = config.get("upload_options")
if upload_options:
opts["upload_options"] = upload_options
customizations = config.get("customizations")
if customizations:
opts["customizations"] = customizations
if release:
opts["release"] = release
task_id = koji.koji_proxy.osbuildImage(
config["name"],
version,
config["distro"],
config["image_types"],
target,
arches,
opts=opts,
)
koji.save_task_id(task_id)
# Wait for it to finish and capture the output into log file.
log_dir = os.path.join(compose.paths.log.topdir(), "osbuild")
util.makedirs(log_dir)
log_file = os.path.join(
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
)
if koji.watch_task(task_id, log_file) != 0:
raise RuntimeError(
"OSBuild task failed: %s. See %s for details" % (task_id, log_file)
)
# Refresh koji session which may have timed out while the task was
# running. Watching is done via a subprocess, so the session is
# inactive.
koji = kojiwrapper.KojiWrapper(compose)
# Get build id via the task's result json data
result = koji.koji_proxy.getTaskResult(task_id)
build_id = result["koji"]["build"]
linker = Linker(logger=self.pool._logger)
# Process all images in the build. There should be one for each
# architecture, but we don't verify that.
build_info = koji.koji_proxy.getBuild(build_id)
for archive in koji.koji_proxy.listArchives(buildID=build_id):
if archive["type_name"] not in EXTENSIONS:
# Ignore values that are not of required types.
continue
# Get architecture of the image from extra data.
try:
arch = archive["extra"]["image"]["arch"]
except KeyError:
raise RuntimeError("Image doesn't have any architecture!")
# image_dir is absolute path to which the image should be copied.
# We also need the same path as relative to compose directory for
# including in the metadata.
if archive["type_name"] == "iso":
# If the produced image is actually an ISO, it should go to
# iso/ subdirectory.
image_dir = compose.paths.compose.iso_dir(arch, variant)
rel_image_dir = compose.paths.compose.iso_dir(
arch, variant, relative=True
)
else:
image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
rel_image_dir = compose.paths.compose.image_dir(
variant, relative=True
) % {"arch": arch}
util.makedirs(image_dir)
image_dest = os.path.join(image_dir, archive["filename"])
src_file = compose.koji_downloader.get_file(
os.path.join(
koji.koji_module.pathinfo.imagebuild(build_info),
archive["filename"],
),
)
linker.link(src_file, image_dest, link_type=compose.conf["link_type"])
for suffix in EXTENSIONS[archive["type_name"]]:
if archive["filename"].endswith(suffix):
break
else:
# No suffix matched.
raise RuntimeError(
"Failed to generate metadata. Format %s doesn't match type %s"
% (suffix, archive["type_name"])
)
# Update image manifest
img = Image(compose.im)
# Get the manifest type from the config if supplied, otherwise we
# determine the manifest type based on the koji output
img.type = config.get("manifest_type")
if not img.type:
if archive["type_name"] != "iso":
img.type = archive["type_name"]
else:
fn = archive["filename"].lower()
if "ostree" in fn:
img.type = "dvd-ostree-osbuild"
elif "live" in fn:
img.type = "live-osbuild"
elif "netinst" in fn or "boot" in fn:
img.type = "boot"
else:
img.type = "dvd"
img.format = suffix
img.path = os.path.join(rel_image_dir, archive["filename"])
img.mtime = util.get_mtime(image_dest)
img.size = util.get_file_size(image_dest)
img.arch = arch
img.disc_number = 1 # We don't expect multiple disks
img.disc_count = 1
img.bootable = False
img.subvariant = config.get("subvariant", variant.uid)
setattr(img, "can_fail", self.can_fail)
setattr(img, "deliverable", "image-build")
compose.im.add(variant=variant.uid, arch=arch, image=img)
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, task_id))

210
pungi/phases/ostree.py Normal file
View File

@ -0,0 +1,210 @@
# -*- coding: utf-8 -*-
import copy
import json
import os
from kobo import shortcuts
from kobo.threads import ThreadPool, WorkerThread
from collections import OrderedDict
from pungi.arch_utils import getBaseArch
from pungi.runroot import Runroot
from .base import ConfigGuardedPhase
from .. import util
from ..ostree.utils import get_ref_from_treefile, get_commitid_from_commitid_file
from ..util import get_repo_dicts, translate_path
from ..wrappers import scm
class OSTreePhase(ConfigGuardedPhase):
name = "ostree"
def __init__(self, compose, pkgset_phase=None):
super(OSTreePhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.compose._logger)
self.pkgset_phase = pkgset_phase
def get_repos(self):
return [
translate_path(
self.compose,
self.compose.paths.work.pkgset_repo(pkgset.name, "$basearch"),
)
for pkgset in self.pkgset_phase.package_sets
]
def _enqueue(self, variant, arch, conf):
self.pool.add(OSTreeThread(self.pool, self.get_repos()))
self.pool.queue_put((self.compose, variant, arch, conf))
def run(self):
if isinstance(self.compose.conf.get(self.name), dict):
for variant in self.compose.get_variants():
for conf in self.get_config_block(variant):
for arch in conf.get("arches", []) or variant.arches:
self._enqueue(variant, arch, conf)
else:
# Legacy code path to support original configuration.
for variant in self.compose.get_variants():
for arch in variant.arches:
for conf in self.get_config_block(variant, arch):
self._enqueue(variant, arch, conf)
self.pool.start()
class OSTreeThread(WorkerThread):
def __init__(self, pool, repos):
super(OSTreeThread, self).__init__(pool)
self.repos = repos
def process(self, item, num):
compose, variant, arch, config = item
self.num = num
failable_arches = config.get("failable", [])
with util.failable(
compose, util.can_arch_fail(failable_arches, arch), variant, arch, "ostree"
):
self.worker(compose, variant, arch, config)
def worker(self, compose, variant, arch, config):
msg = "OSTree phase for variant %s, arch %s" % (variant.uid, arch)
self.pool.log_info("[BEGIN] %s" % msg)
workdir = compose.paths.work.topdir("ostree-%d" % self.num)
self.logdir = compose.paths.log.topdir(
"%s/%s/ostree-%d" % (arch, variant.uid, self.num)
)
repodir = os.path.join(workdir, "config_repo")
self._clone_repo(
compose,
repodir,
config["config_url"],
config.get("config_branch", "master"),
)
comps_repo = compose.paths.work.comps_repo(
"$basearch", variant=variant, create_dir=False
)
repos = shortcuts.force_list(config.get("repo", [])) + self.repos
if compose.has_comps:
repos.append(translate_path(compose, comps_repo))
repos = get_repo_dicts(repos, logger=self.pool)
# copy the original config and update before save to a json file
new_config = copy.copy(config)
# repos in configuration can have repo url set to variant UID,
# update it to have the actual url that we just translated.
new_config.update({"repo": repos})
# remove unnecessary (for 'pungi-make-ostree tree' script ) elements
# from config, it doesn't hurt to have them, however remove them can
# reduce confusion
for k in [
"ostree_repo",
"treefile",
"config_url",
"config_branch",
"failable",
"version",
"update_summary",
]:
new_config.pop(k, None)
# write a json file to save the configuration, so 'pungi-make-ostree tree'
# can take use of it
extra_config_file = os.path.join(workdir, "extra_config.json")
with open(extra_config_file, "w") as f:
json.dump(new_config, f, indent=4)
# Ensure target directory exists, otherwise Koji task will fail to
# mount it.
util.makedirs(config["ostree_repo"])
self._run_ostree_cmd(
compose, variant, arch, config, repodir, extra_config_file=extra_config_file
)
if compose.notifier:
original_ref = get_ref_from_treefile(
os.path.join(repodir, config["treefile"]),
arch,
logger=self.pool._logger,
)
ref = config.get("ostree_ref") or original_ref
ref = ref.replace("${basearch}", getBaseArch(arch))
# 'pungi-make-ostree tree' writes commitid to commitid.log in
# logdir, except if there was no new commit we will get None
# instead. If the commit id could not be read, an exception will be
# raised.
commitid = get_commitid_from_commitid_file(
os.path.join(self.logdir, "commitid.log")
)
compose.notifier.send(
"ostree",
variant=variant.uid,
arch=arch,
ref=ref,
commitid=commitid,
repo_path=translate_path(compose, config["ostree_repo"]),
local_repo_path=config["ostree_repo"],
)
self.pool.log_info("[DONE ] %s" % (msg))
def _run_ostree_cmd(
self, compose, variant, arch, config, config_repo, extra_config_file=None
):
args = OrderedDict(
[
("repo", config["ostree_repo"]),
("log-dir", self.logdir),
("treefile", os.path.join(config_repo, config["treefile"])),
("version", util.version_generator(compose, config.get("version"))),
("extra-config", extra_config_file),
("update-summary", config.get("update_summary", False)),
("ostree-ref", config.get("ostree_ref")),
("force-new-commit", config.get("force_new_commit", False)),
("unified-core", config.get("unified_core", False)),
]
)
default_packages = ["pungi", "ostree", "rpm-ostree"]
additional_packages = config.get("runroot_packages", [])
packages = default_packages + additional_packages
log_file = os.path.join(self.logdir, "runroot.log")
mounts = [compose.topdir, config["ostree_repo"]]
runroot = Runroot(compose, phase="ostree")
if compose.conf["ostree_use_koji_plugin"]:
runroot.run_pungi_ostree(
dict(args),
log_file=log_file,
arch=arch,
packages=packages,
mounts=mounts,
weight=compose.conf["runroot_weights"].get("ostree"),
)
else:
cmd = ["pungi-make-ostree", "tree"]
for key, value in args.items():
if value is True:
cmd.append("--%s" % key)
elif value:
cmd.append("--%s=%s" % (key, value))
runroot.run(
cmd,
log_file=log_file,
arch=arch,
packages=packages,
mounts=mounts,
new_chroot=True,
weight=compose.conf["runroot_weights"].get("ostree"),
)
def _clone_repo(self, compose, repodir, url, branch):
scm.get_dir_from_scm(
{"scm": "git", "repo": url, "branch": branch, "dir": "."},
repodir,
compose=compose,
)

View File

@ -0,0 +1,190 @@
# -*- coding: utf-8 -*-
import copy
import json
import os
from kobo import shortcuts
from kobo.threads import ThreadPool, WorkerThread
from productmd.images import Image
from pungi.runroot import Runroot
from .base import ConfigGuardedPhase
from .. import util
from ..util import get_repo_dicts, translate_path
from ..wrappers import scm
class OSTreeContainerPhase(ConfigGuardedPhase):
name = "ostree_container"
def __init__(self, compose, pkgset_phase=None):
super(OSTreeContainerPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.compose._logger)
self.pkgset_phase = pkgset_phase
def get_repos(self):
return [
translate_path(
self.compose,
self.compose.paths.work.pkgset_repo(
pkgset.name, "$basearch", create_dir=False
),
)
for pkgset in self.pkgset_phase.package_sets
]
def _enqueue(self, variant, arch, conf):
self.pool.add(OSTreeContainerThread(self.pool, self.get_repos()))
self.pool.queue_put((self.compose, variant, arch, conf))
def run(self):
if isinstance(self.compose.conf.get(self.name), dict):
for variant in self.compose.get_variants():
for conf in self.get_config_block(variant):
for arch in conf.get("arches", []) or variant.arches:
self._enqueue(variant, arch, conf)
else:
# Legacy code path to support original configuration.
for variant in self.compose.get_variants():
for arch in variant.arches:
for conf in self.get_config_block(variant, arch):
self._enqueue(variant, arch, conf)
self.pool.start()
class OSTreeContainerThread(WorkerThread):
def __init__(self, pool, repos):
super(OSTreeContainerThread, self).__init__(pool)
self.repos = repos
def process(self, item, num):
compose, variant, arch, config = item
self.num = num
failable_arches = config.get("failable", [])
self.can_fail = util.can_arch_fail(failable_arches, arch)
with util.failable(compose, self.can_fail, variant, arch, "ostree-container"):
self.worker(compose, variant, arch, config)
def worker(self, compose, variant, arch, config):
msg = "OSTree container phase for variant %s, arch %s" % (variant.uid, arch)
self.pool.log_info("[BEGIN] %s" % msg)
workdir = compose.paths.work.topdir("ostree-container-%d" % self.num)
self.logdir = compose.paths.log.topdir(
"%s/%s/ostree-container-%d" % (arch, variant.uid, self.num)
)
repodir = os.path.join(workdir, "config_repo")
self._clone_repo(
compose,
repodir,
config["config_url"],
config.get("config_branch", "main"),
)
repos = shortcuts.force_list(config.get("repo", [])) + self.repos
repos = get_repo_dicts(repos, logger=self.pool)
# copy the original config and update before save to a json file
new_config = copy.copy(config)
# repos in configuration can have repo url set to variant UID,
# update it to have the actual url that we just translated.
new_config.update({"repo": repos})
# remove unnecessary (for 'pungi-make-ostree container' script ) elements
# from config, it doesn't hurt to have them, however remove them can
# reduce confusion
for k in [
"treefile",
"config_url",
"config_branch",
"failable",
"version",
]:
new_config.pop(k, None)
# write a json file to save the configuration, so 'pungi-make-ostree tree'
# can take use of it
extra_config_file = os.path.join(workdir, "extra_config.json")
with open(extra_config_file, "w") as f:
json.dump(new_config, f, indent=4)
self._run_ostree_container_cmd(
compose, variant, arch, config, repodir, extra_config_file=extra_config_file
)
self.pool.log_info("[DONE ] %s" % (msg))
def _run_ostree_container_cmd(
self, compose, variant, arch, config, config_repo, extra_config_file=None
):
target_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
util.makedirs(target_dir)
version = util.version_generator(compose, config.get("version"))
archive_name = "%s-%s-%s" % (
compose.conf["release_short"],
variant.uid,
version,
)
# Run the pungi-make-ostree command locally to create a script to
# execute in runroot environment.
cmd = [
"pungi-make-ostree",
"container",
"--log-dir=%s" % self.logdir,
"--name=%s" % archive_name,
"--path=%s" % target_dir,
"--treefile=%s" % os.path.join(config_repo, config["treefile"]),
"--extra-config=%s" % extra_config_file,
"--version=%s" % version,
]
_, runroot_script = shortcuts.run(cmd, universal_newlines=True)
default_packages = ["ostree", "rpm-ostree", "selinux-policy-targeted"]
additional_packages = config.get("runroot_packages", [])
packages = default_packages + additional_packages
log_file = os.path.join(self.logdir, "runroot.log")
# TODO: Use to get previous build
mounts = [compose.topdir]
runroot = Runroot(compose, phase="ostree_container")
runroot.run(
" && ".join(runroot_script.splitlines()),
log_file=log_file,
arch=arch,
packages=packages,
mounts=mounts,
new_chroot=True,
weight=compose.conf["runroot_weights"].get("ostree"),
)
fullpath = os.path.join(target_dir, "%s.ociarchive" % archive_name)
# Update image manifest
img = Image(compose.im)
# Get the manifest type from the config if supplied, otherwise we
# determine the manifest type based on the koji output
img.type = "ociarchive"
img.format = "ociarchive"
img.path = os.path.relpath(fullpath, compose.paths.compose.topdir())
img.mtime = util.get_mtime(fullpath)
img.size = util.get_file_size(fullpath)
img.arch = arch
img.disc_number = 1
img.disc_count = 1
img.bootable = False
img.subvariant = config.get("subvariant", variant.uid)
setattr(img, "can_fail", self.can_fail)
setattr(img, "deliverable", "ostree-container")
compose.im.add(variant=variant.uid, arch=arch, image=img)
def _clone_repo(self, compose, repodir, url, branch):
scm.get_dir_from_scm(
{"scm": "git", "repo": url, "branch": branch, "dir": "."},
repodir,
compose=compose,
)

View File

@ -0,0 +1,291 @@
# -*- coding: utf-8 -*-
import os
from kobo.threads import ThreadPool, WorkerThread
import shutil
from productmd import images
from six.moves import shlex_quote
from kobo import shortcuts
from .base import ConfigGuardedPhase, PhaseLoggerMixin
from .. import util
from ..arch import get_valid_arches
from ..util import (
get_volid,
get_repo_urls,
version_generator,
translate_path,
move_all,
makedirs,
)
from ..wrappers import iso, lorax, scm
from ..runroot import Runroot
class OstreeInstallerPhase(PhaseLoggerMixin, ConfigGuardedPhase):
name = "ostree_installer"
def __init__(self, compose, buildinstall_phase, pkgset_phase=None):
super(OstreeInstallerPhase, self).__init__(compose)
self.pool = ThreadPool(logger=self.logger)
self.bi = buildinstall_phase
self.pkgset_phase = pkgset_phase
def validate(self):
errors = []
if not self.compose.conf["ostree_installer_overwrite"] and not self.bi.skip():
for variant in self.compose.get_variants():
for arch in variant.arches:
conf = util.get_arch_variant_data(
self.compose.conf, self.name, arch, variant
)
if conf and not variant.is_empty:
errors.append(
"Can not generate ostree installer for %s.%s: "
"it has buildinstall running already and the "
"files would clash." % (variant.uid, arch)
)
if errors:
raise ValueError("\n".join(errors))
def get_repos(self):
return [
translate_path(
self.compose,
self.compose.paths.work.pkgset_repo(pkgset.name, "$basearch"),
)
for pkgset in self.pkgset_phase.package_sets
]
def run(self):
for variant in self.compose.get_variants():
for arch in variant.arches:
for conf in self.get_config_block(variant, arch):
self.pool.add(OstreeInstallerThread(self.pool, self.get_repos()))
self.pool.queue_put((self.compose, variant, arch, conf))
self.pool.start()
class OstreeInstallerThread(WorkerThread):
def __init__(self, pool, baseurls):
super(OstreeInstallerThread, self).__init__(pool)
self.baseurls = baseurls
def process(self, item, num):
compose, variant, arch, config = item
self.num = num
failable_arches = config.get("failable", [])
self.can_fail = util.can_arch_fail(failable_arches, arch)
with util.failable(
compose,
self.can_fail,
variant,
arch,
"ostree-installer",
logger=self.pool._logger,
):
self.worker(compose, variant, arch, config)
def worker(self, compose, variant, arch, config):
msg = "Ostree phase for variant %s, arch %s" % (variant.uid, arch)
self.pool.log_info("[BEGIN] %s" % msg)
self.logdir = compose.paths.log.topdir(
"%s/%s/ostree_installer-%s" % (arch, variant, self.num)
)
repos = get_repo_urls(
None, # compose==None. Special value says that method should ignore deprecated variant-type repo # noqa: E501
shortcuts.force_list(config["repo"]) + self.baseurls,
arch=arch,
logger=self.pool,
)
if compose.has_comps:
repos.append(
translate_path(
compose,
compose.paths.work.comps_repo(
"$basearch", variant=variant, create_dir=False
),
)
)
repos = [url.replace("$arch", arch) for url in repos]
output_dir = os.path.join(
compose.paths.work.topdir(arch), variant.uid, "ostree_installer"
)
util.makedirs(os.path.dirname(output_dir))
self.template_dir = os.path.join(
compose.paths.work.topdir(arch), variant.uid, "lorax_templates"
)
self._clone_templates(
compose, config.get("template_repo"), config.get("template_branch")
)
disc_type = compose.conf["disc_types"].get("ostree", "ostree")
volid = get_volid(compose, arch, variant, disc_type=disc_type)
self._run_ostree_cmd(compose, variant, arch, config, repos, output_dir, volid)
filename = compose.get_image_name(arch, variant, disc_type=disc_type)
self._copy_image(compose, variant, arch, filename, output_dir)
self._add_to_manifest(compose, variant, arch, filename)
self.pool.log_info("[DONE ] %s" % (msg))
def _clone_templates(self, compose, url, branch="master"):
if not url:
self.template_dir = None
return
scm.get_dir_from_scm(
{"scm": "git", "repo": url, "branch": branch, "dir": "."},
self.template_dir,
compose=compose,
)
def _get_release(self, compose, config):
if "release" in config:
return (
version_generator(compose, config["release"]) or compose.image_release
)
return config.get("release", None)
def _copy_image(self, compose, variant, arch, filename, output_dir):
iso_path = compose.paths.compose.iso_path(arch, variant, filename)
os_path = compose.paths.compose.os_tree(arch, variant)
boot_iso = os.path.join(output_dir, "images", "boot.iso")
util.copy_all(output_dir, os_path)
try:
os.link(boot_iso, iso_path)
except OSError:
shutil.copy2(boot_iso, iso_path)
def _add_to_manifest(self, compose, variant, arch, filename):
full_iso_path = compose.paths.compose.iso_path(arch, variant, filename)
iso_path = compose.paths.compose.iso_path(
arch, variant, filename, relative=True
)
implant_md5 = iso.get_implanted_md5(full_iso_path)
img = images.Image(compose.im)
img.path = iso_path
img.mtime = util.get_mtime(full_iso_path)
img.size = util.get_file_size(full_iso_path)
img.arch = arch
img.type = "dvd-ostree"
img.format = "iso"
img.disc_number = 1
img.disc_count = 1
img.bootable = True
img.subvariant = variant.uid
img.implant_md5 = implant_md5
setattr(img, "can_fail", self.can_fail)
setattr(img, "deliverable", "ostree-installer")
try:
img.volume_id = iso.get_volume_id(full_iso_path)
except RuntimeError:
pass
compose.im.add(variant.uid, arch, img)
def _get_templates(self, config, key):
"""Retrieve all templates from configuration and make sure the paths
are absolute. Raises RuntimeError if template repo is needed but not
configured.
"""
templates = []
for template in config.get(key, []):
if template[0] != "/":
if not self.template_dir:
raise RuntimeError(
"Relative path to template without setting template_repo."
)
template = os.path.join(self.template_dir, template)
templates.append(template)
return templates
def _run_ostree_cmd(
self, compose, variant, arch, config, source_repo, output_dir, volid
):
packages = ["pungi", "lorax", "ostree"]
packages += config.get("extra_runroot_pkgs", [])
log_file = os.path.join(self.logdir, "runroot.log")
runroot = Runroot(compose, phase="ostree_installer")
if compose.conf["ostree_installer_use_koji_plugin"]:
args = {
"product": compose.conf["release_name"],
"version": compose.conf["release_version"],
"release": self._get_release(compose, config),
"sources": shortcuts.force_list(source_repo),
"variant": variant.uid,
"nomacboot": True,
"volid": volid,
"buildarch": get_valid_arches(arch)[0],
"installpkgs": config.get("installpkgs"),
"add-template": self._get_templates(config, "add_template"),
"add-arch-template": self._get_templates(config, "add_arch_template"),
"add-template-var": config.get("add_template_var"),
"add-arch-template-var": config.get("add_arch_template_var"),
"rootfs-size": config.get("rootfs_size"),
"isfinal": compose.supported,
"outputdir": output_dir,
}
runroot.run_pungi_buildinstall(
args,
log_file=log_file,
arch=arch,
packages=packages,
mounts=[compose.topdir],
weight=compose.conf["runroot_weights"].get("ostree_installer"),
)
# If Koji pungi-buildinstall is used, then the buildinstall results are
# not stored directly in `output_dir` dir, but in "results" and "logs"
# subdirectories. We need to move them to final_output_dir.
results_dir = os.path.join(output_dir, "results")
move_all(results_dir, output_dir, rm_src_dir=True)
# Get the log_dir into which we should copy the resulting log files.
if not os.path.exists(self.logdir):
makedirs(self.logdir)
log_dir = os.path.join(output_dir, "logs")
move_all(log_dir, self.logdir, rm_src_dir=True)
else:
lorax_wrapper = lorax.LoraxWrapper()
lorax_cmd = lorax_wrapper.get_lorax_cmd(
compose.conf["release_name"],
compose.conf["release_version"],
self._get_release(compose, config),
repo_baseurl=source_repo,
output_dir=output_dir,
variant=variant.uid,
nomacboot=True,
volid=volid,
buildarch=get_valid_arches(arch)[0],
buildinstallpackages=config.get("installpkgs"),
add_template=self._get_templates(config, "add_template"),
add_arch_template=self._get_templates(config, "add_arch_template"),
add_template_var=config.get("add_template_var"),
add_arch_template_var=config.get("add_arch_template_var"),
rootfs_size=config.get("rootfs_size"),
is_final=compose.supported,
log_dir=self.logdir,
skip_branding=config.get("skip_branding"),
)
cmd = "rm -rf %s && %s" % (
shlex_quote(output_dir),
" ".join([shlex_quote(x) for x in lorax_cmd]),
)
runroot.run(
cmd,
log_file=log_file,
arch=arch,
packages=packages,
mounts=[compose.topdir],
chown_paths=[output_dir],
weight=compose.conf["runroot_weights"].get("ostree_installer"),
log_dir=self.logdir,
)

View File

@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
from inspect import isclass
from pungi.phases.base import PhaseBase
def gather_phases_metadata(source_object):
"""
Code gathers metadata from Phase classes.
Metadata are 'name' attributes of the corresponding classes.
Metadata are gathered without creating instances of Phase classes.
"""
if not source_object:
raise ValueError(
"PhasesMetadata can not load any data - it got empty parameter"
)
phases = []
for item in dir(source_object):
cls = getattr(source_object, item) # get all objects references
if not isclass(cls): # filter out non-classes
continue
if issubclass(cls, PhaseBase):
try:
name_attr = getattr(cls, "name")
phases.append(name_attr)
except AttributeError:
raise AttributeError(
"Bad phase-class format: '%s' is missing attribute 'name'" % item
)
return phases

Some files were not shown because too many files have changed in this diff Show More