Compare commits
42 Commits
master
...
pungi-4.4.
Author | SHA1 | Date |
---|---|---|
Lubomír Sedlář | 091d228219 | |
Lubomír Sedlář | bcc440491e | |
Lubomír Sedlář | fa50eedfad | |
Lubomír Sedlář | b7adbf8a91 | |
Lubomír Sedlář | 82ae9e86d5 | |
Lubomír Sedlář | 2ad341a01c | |
Lubomír Sedlář | e888e76992 | |
Lubomír Sedlář | 6e72de7efe | |
Lubomír Sedlář | c8263fcd39 | |
Lubomír Sedlář | 82ca4f4e65 | |
Aurélien Bompard | b8b6b46ce7 | |
Lubomír Sedlář | e9d836c115 | |
Lubomír Sedlář | d3f0701e01 | |
Haibo Lin | 8f6f0f463f | |
Haibo Lin | 467c7a7f6a | |
Lubomír Sedlář | e1d7544c2b | |
Lubomír Sedlář | a71c8e23be | |
Lubomír Sedlář | ab508c1511 | |
Lubomír Sedlář | f960b4d155 | |
Lubomír Sedlář | 602b698080 | |
Haibo Lin | b30f7e0d83 | |
Lubomír Sedlář | 0c3b6e22f9 | |
Haibo Lin | 3175ede38a | |
Lubomír Sedlář | 8920eef339 | |
Lubomír Sedlář | 58036eab84 | |
Lubomír Sedlář | a4476f2570 | |
Haibo Lin | 8c06b7a3f1 | |
Lubomír Sedlář | 64ae81b416 | |
Lubomír Sedlář | 826169af7c | |
Lubomír Sedlář | d97b8bdd33 | |
Lubomír Sedlář | 8768b23cbe | |
Lubomír Sedlář | 51628a974d | |
Ondrej Nosek | 88327d5784 | |
Ondrej Nosek | 6e0a9385f2 | |
Lubomír Sedlář | 8be0d84f8a | |
Tomáš Hozza | 8f0906be53 | |
Tomáš Hozza | e3072c3d5f | |
Tomáš Hozza | ef6d40dce4 | |
Lubomír Sedlář | df6664098d | |
Lubomír Sedlář | 147df93f75 | |
Lubomír Sedlář | dd8c1002d4 | |
Lubomír Sedlář | 12e3a46390 |
|
@ -2,6 +2,7 @@ include AUTHORS
|
|||
include COPYING
|
||||
include GPL
|
||||
include pungi.spec
|
||||
include setup.cfg
|
||||
include tox.ini
|
||||
include share/*
|
||||
include share/multilib/*
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
# Clean up pungi cache
|
||||
d /var/cache/pungi/createrepo_c/ - - - 30d
|
|
@ -51,9 +51,9 @@ copyright = u'2016, Red Hat, Inc.'
|
|||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '4.3'
|
||||
version = '4.4'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '4.3.6'
|
||||
release = '4.4.0'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
|
|
|
@ -581,6 +581,16 @@ Options
|
|||
with everything. Set this option to ``False`` to ignore ``noarch`` in
|
||||
``ExclusiveArch`` and always consider only binary architectures.
|
||||
|
||||
**pkgset_inherit_exclusive_arch_to_noarch** = True
|
||||
(*bool*) -- When set to ``True``, the value of ``ExclusiveArch`` or
|
||||
``ExcludeArch`` will be copied from source rpm to all its noarch packages.
|
||||
That will than limit which architectures the noarch packages can be
|
||||
included in.
|
||||
|
||||
By setting this option to ``False`` this step is skipped, and noarch
|
||||
packages will by default land in all architectures. They can still be
|
||||
excluded by listing them in a relevant section of ``filter_packages``.
|
||||
|
||||
**pkgset_allow_reuse** = True
|
||||
(*bool*) -- When set to ``True``, *Pungi* will try to reuse pkgset data
|
||||
from the old composes specified by ``--old-composes``. When enabled, this
|
||||
|
@ -1607,8 +1617,23 @@ OSBuild Composer for building images
|
|||
* ``release`` -- release part of the final NVR. If neither this option nor
|
||||
the global ``osbuild_release`` is set, Koji will automatically generate a
|
||||
value.
|
||||
* ``repo`` -- a list of repository URLs from which to consume packages for
|
||||
* ``repo`` -- a list of repositories from which to consume packages for
|
||||
building the image. By default only the variant repository is used.
|
||||
The list items may use one of the following formats:
|
||||
|
||||
* String with just the repository URL.
|
||||
|
||||
* Dictionary with the following keys:
|
||||
|
||||
* ``baseurl`` -- URL of the repository.
|
||||
* ``package_sets`` -- a list of package set names to use for this
|
||||
repository. Package sets are an internal concept of Image Builder
|
||||
and are used in image definitions. If specified, the repository is
|
||||
used by Image Builder only for the pipeline with the same name.
|
||||
For example, specifying the ``build`` package set name will make
|
||||
the repository to be used only for the build environment in which
|
||||
the image will be built. (optional)
|
||||
|
||||
* ``arches`` -- list of architectures for which to build the image. By
|
||||
default, the variant arches are used. This option can only restrict it,
|
||||
not add a new one.
|
||||
|
@ -1641,13 +1666,13 @@ OSBuild Composer for building images
|
|||
* ``tenant_id`` -- Azure tenant ID to upload the image to
|
||||
* ``subscription_id`` -- Azure subscription ID to upload the image to
|
||||
* ``resource_group`` -- Azure resource group to upload the image to
|
||||
* ``location`` -- Azure location to upload the image to
|
||||
* ``location`` -- Azure location of the resource group (optional)
|
||||
* ``image_name`` -- Image name of the uploaded Azure image (optional)
|
||||
|
||||
* **GCP upload options** -- upload to Google Cloud Platform.
|
||||
|
||||
* ``region`` -- GCP region to upload the image to
|
||||
* ``bucket`` -- GCP bucket to upload the image to
|
||||
* ``bucket`` -- GCP bucket to upload the image to (optional)
|
||||
* ``share_with_accounts`` -- list of GCP accounts to share the image
|
||||
with
|
||||
* ``image_name`` -- Image name of the uploaded GCP image (optional)
|
||||
|
|
|
@ -22,4 +22,3 @@ Contents:
|
|||
comps
|
||||
contributing
|
||||
testing
|
||||
multi_compose
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
.. _multi_compose:
|
||||
|
||||
Managing compose from multiple parts
|
||||
====================================
|
||||
|
||||
There may be cases where it makes sense to split a big compose into separate
|
||||
parts, but create a compose output that links all output into one familiar
|
||||
structure.
|
||||
|
||||
The `pungi-orchestrate` tools allows that.
|
||||
|
||||
It works with an INI-style configuration file. The ``[general]`` section
|
||||
contains information about identity of the main compose. Other sections define
|
||||
individual parts.
|
||||
|
||||
The parts are scheduled to run in parallel, with the minimal amount of
|
||||
serialization. The final compose directory will contain hard-links to the
|
||||
files.
|
||||
|
||||
|
||||
General settings
|
||||
----------------
|
||||
|
||||
**target**
|
||||
Path to directory where the final compose should be created.
|
||||
**compose_type**
|
||||
Type of compose to make.
|
||||
**release_name**
|
||||
Name of the product for the final compose.
|
||||
**release_short**
|
||||
Short name of the product for the final compose.
|
||||
**release_version**
|
||||
Version of the product for the final compose.
|
||||
**release_type**
|
||||
Type of the product for the final compose.
|
||||
**extra_args**
|
||||
Additional arguments that will be passed to the child Pungi processes.
|
||||
**koji_profile**
|
||||
If specified, a current event will be retrieved from the Koji instance and
|
||||
used for all parts.
|
||||
|
||||
**kerberos**
|
||||
If set to yes, a kerberos ticket will be automatically created at the start.
|
||||
Set keytab and principal as well.
|
||||
**kerberos_keytab**
|
||||
Path to keytab file used to create the kerberos ticket.
|
||||
**kerberos_principal**
|
||||
Kerberos principal for the ticket
|
||||
|
||||
**pre_compose_script**
|
||||
Commands to execute before first part is started. Can contain multiple
|
||||
commands on separate lines.
|
||||
**post_compose_script**
|
||||
Commands to execute after the last part finishes and final status is
|
||||
updated. Can contain multiple commands on separate lines. ::
|
||||
|
||||
post_compose_script =
|
||||
compose-latest-symlink $COMPOSE_PATH
|
||||
custom-post-compose-script.sh
|
||||
|
||||
Multiple environment variables are defined for the scripts:
|
||||
|
||||
* ``COMPOSE_PATH``
|
||||
* ``COMPOSE_ID``
|
||||
* ``COMPOSE_DATE``
|
||||
* ``COMPOSE_TYPE``
|
||||
* ``COMPOSE_RESPIN``
|
||||
* ``COMPOSE_LABEL``
|
||||
* ``RELEASE_ID``
|
||||
* ``RELEASE_NAME``
|
||||
* ``RELEASE_SHORT``
|
||||
* ``RELEASE_VERSION``
|
||||
* ``RELEASE_TYPE``
|
||||
* ``RELEASE_IS_LAYERED`` – ``YES`` for layered products, empty otherwise
|
||||
* ``BASE_PRODUCT_NAME`` – only set for layered products
|
||||
* ``BASE_PRODUCT_SHORT`` – only set for layered products
|
||||
* ``BASE_PRODUCT_VERSION`` – only set for layered products
|
||||
* ``BASE_PRODUCT_TYPE`` – only set for layered products
|
||||
|
||||
**notification_script**
|
||||
Executable name (or path to a script) that will be used to send a message
|
||||
once the compose is finished. In order for a valid URL to be included in the
|
||||
message, at least one part must configure path translation that would apply
|
||||
to location of main compose.
|
||||
|
||||
Only two messages will be sent, one for start and one for finish (either
|
||||
successful or not).
|
||||
|
||||
|
||||
Partial compose settings
|
||||
------------------------
|
||||
|
||||
Each part should have a separate section in the config file.
|
||||
|
||||
It can specify these options:
|
||||
|
||||
**config**
|
||||
Path to configuration file that describes this part. If relative, it is
|
||||
resolved relative to the file with parts configuration.
|
||||
**just_phase**, **skip_phase**
|
||||
Customize which phases should run for this part.
|
||||
**depends_on**
|
||||
A comma separated list of other parts that must be finished before this part
|
||||
starts.
|
||||
**failable**
|
||||
A boolean toggle to mark a part as failable. A failure in such part will
|
||||
mark the final compose as incomplete, but still successful.
|
55
pungi.spec
55
pungi.spec
|
@ -1,5 +1,5 @@
|
|||
Name: pungi
|
||||
Version: 4.3.6
|
||||
Version: 4.4.0
|
||||
Release: 1%{?dist}
|
||||
Summary: Distribution compose tool
|
||||
|
||||
|
@ -100,7 +100,6 @@ rm -rf %{buildroot}
|
|||
%{_bindir}/%{name}-config-validate
|
||||
%{_bindir}/%{name}-fedmsg-notification
|
||||
%{_bindir}/%{name}-notification-report-progress
|
||||
%{_bindir}/%{name}-orchestrate
|
||||
%{_bindir}/%{name}-patch-iso
|
||||
%{_bindir}/%{name}-compare-depsolving
|
||||
%{_bindir}/%{name}-wait-for-signed-ostree-handler
|
||||
|
@ -111,6 +110,58 @@ pytest
|
|||
cd tests && ./test_compose.sh
|
||||
|
||||
%changelog
|
||||
* Tue Jun 06 2023 Lubomír Sedlář <lsedlar@redhat.com> - 4.4.0-1
|
||||
- gather-dnf: Run latest() later (lsedlar)
|
||||
- iso: Support joliet long names (lsedlar)
|
||||
- Drop pungi-orchestrator code (lsedlar)
|
||||
- isos: Ensure proper file ownership and permissions (lsedlar)
|
||||
- gather: Always get latest packages (lsedlar)
|
||||
- Add back compatibility with jsonschema <3.0.0 (lsedlar)
|
||||
- Remove useless debug message (lsedlar)
|
||||
- Remove fedmsg from requirements (lsedlar)
|
||||
- gather: Support dotarch in DNF backend (lsedlar)
|
||||
- Set the priority in the fedora-messaging notifier (aurelien)
|
||||
- Fix compatibility with createrepo_c 0.21.1 (lsedlar)
|
||||
- comps: Apply arch filtering to environment/optionlist (lsedlar)
|
||||
- Add config file for cleaning up cache files (hlin)
|
||||
|
||||
* Tue Mar 28 2023 Haibo Lin <hlin@redhat.com> - 4.3.8-1
|
||||
- createiso: Update possibly changed file on DVD (lsedlar)
|
||||
- pkgset: Stop reuse if configuration changed (lsedlar)
|
||||
- Allow disabling inheriting ExcludeArch to noarch packages (lsedlar)
|
||||
- pkgset: Support extra builds with no tags (lsedlar)
|
||||
- buildinstall: Avoid pointlessly tweaking the boot images (lsedlar)
|
||||
- Prevent to reuse if unsigned packages are allowed (hlin)
|
||||
- Pass parent id/respin id to CTS (lsedlar)
|
||||
- Exclude existing files in boot.iso (hlin)
|
||||
- image-build/osbuild: Pull ISOs into the compose (lsedlar)
|
||||
- Retry 401 error from CTS (lsedlar)
|
||||
- gather: Better detection of debuginfo in lookaside (lsedlar)
|
||||
- Log versions of all installed packages (hlin)
|
||||
- Use authentication for all CTS calls (lsedlar)
|
||||
- Fix black complaints (lsedlar)
|
||||
- Add vhd.gz extension to compressed VHD images (lsedlar)
|
||||
- Add vhd-compressed image type (lsedlar)
|
||||
- Update to work with latest mock (lsedlar)
|
||||
- Default bztar format for sdist command (onosek)
|
||||
|
||||
* Fri Dec 09 2022 Ondřej Nosek <onosek@redhat.com>
|
||||
- osbuild: test passing of rich repos from configuration (lsedlar)
|
||||
- osbuild: support specifying `package_sets` for repos (thozza)
|
||||
- osbuild: don't use `util.get_repo_urls()` (thozza)
|
||||
- osbuild: update schema and config documentation (thozza)
|
||||
- Speed up tests by 30 seconds (lsedlar)
|
||||
- Stop sending compose paths to CTS (lsedlar)
|
||||
- Report errors from CTS (lsedlar)
|
||||
- createiso: Create Joliet tree with xorriso (lsedlar)
|
||||
- init: Filter comps for modular variants with tags (lsedlar)
|
||||
- Retry failed cts requests (hlin)
|
||||
- Ignore existing kerberos ticket for CTS auth (lsedlar)
|
||||
- osbuild: support specifying upload_options (thozza)
|
||||
- osbuild: accept only a single image type in the configuration (thozza)
|
||||
- Add Jenkinsfile for CI (hlin)
|
||||
- profiler: Flush stdout before printing (lsedlar)
|
||||
|
||||
* Fri Aug 26 2022 Lubomír Sedlář <lsedlar@redhat.com> - 4.3.6-1
|
||||
- pkgset: Report better error when module is missing an arch (lsedlar)
|
||||
- osbuild: add support for building ostree artifacts (ondrej)
|
||||
|
|
|
@ -227,9 +227,19 @@ def validate(config, offline=False, schema=None):
|
|||
DefaultValidator = _extend_with_default_and_alias(
|
||||
jsonschema.Draft4Validator, offline=offline
|
||||
)
|
||||
validator = DefaultValidator(
|
||||
schema,
|
||||
)
|
||||
|
||||
if hasattr(jsonschema.Draft4Validator, "TYPE_CHECKER"):
|
||||
# jsonschema >= 3.0 has new interface for checking types
|
||||
validator = DefaultValidator(schema)
|
||||
else:
|
||||
validator = DefaultValidator(
|
||||
schema,
|
||||
{
|
||||
"array": (tuple, list),
|
||||
"regex": six.string_types,
|
||||
"url": six.string_types,
|
||||
},
|
||||
)
|
||||
errors = []
|
||||
warnings = []
|
||||
for error in validator.iter_errors(config):
|
||||
|
@ -444,15 +454,18 @@ def _extend_with_default_and_alias(validator_class, offline=False):
|
|||
context=all_errors,
|
||||
)
|
||||
|
||||
def is_array(checker, instance):
|
||||
return isinstance(instance, (tuple, list))
|
||||
kwargs = {}
|
||||
if hasattr(validator_class, "TYPE_CHECKER"):
|
||||
# jsonschema >= 3
|
||||
def is_array(checker, instance):
|
||||
return isinstance(instance, (tuple, list))
|
||||
|
||||
def is_string_type(checker, instance):
|
||||
return isinstance(instance, six.string_types)
|
||||
def is_string_type(checker, instance):
|
||||
return isinstance(instance, six.string_types)
|
||||
|
||||
type_checker = validator_class.TYPE_CHECKER.redefine_many(
|
||||
{"array": is_array, "regex": is_string_type, "url": is_string_type}
|
||||
)
|
||||
kwargs["type_checker"] = validator_class.TYPE_CHECKER.redefine_many(
|
||||
{"array": is_array, "regex": is_string_type, "url": is_string_type}
|
||||
)
|
||||
|
||||
return jsonschema.validators.extend(
|
||||
validator_class,
|
||||
|
@ -464,7 +477,7 @@ def _extend_with_default_and_alias(validator_class, offline=False):
|
|||
"additionalProperties": _validate_additional_properties,
|
||||
"anyOf": _validate_any_of,
|
||||
},
|
||||
type_checker=type_checker,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
|
@ -830,6 +843,10 @@ def make_schema():
|
|||
"type": "boolean",
|
||||
"default": True,
|
||||
},
|
||||
"pkgset_inherit_exclusive_arch_to_noarch": {
|
||||
"type": "boolean",
|
||||
"default": True,
|
||||
},
|
||||
"pkgset_scratch_modules": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
|
@ -1188,14 +1205,36 @@ def make_schema():
|
|||
},
|
||||
"arches": {"$ref": "#/definitions/list_of_strings"},
|
||||
"release": {"type": "string"},
|
||||
"repo": {"$ref": "#/definitions/list_of_strings"},
|
||||
"repo": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["baseurl"],
|
||||
"properties": {
|
||||
"baseurl": {"type": "string"},
|
||||
"package_sets": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{"type": "string"},
|
||||
]
|
||||
},
|
||||
},
|
||||
"failable": {"$ref": "#/definitions/list_of_strings"},
|
||||
"subvariant": {"type": "string"},
|
||||
"ostree_url": {"type": "string"},
|
||||
"ostree_ref": {"type": "string"},
|
||||
"ostree_parent": {"type": "string"},
|
||||
"upload_options": {
|
||||
"oneOf": [
|
||||
# this should be really 'oneOf', but the minimal
|
||||
# required properties in AWSEC2 and GCP options
|
||||
# overlap.
|
||||
"anyOf": [
|
||||
# AWSEC2UploadOptions
|
||||
{
|
||||
"type": "object",
|
||||
|
@ -1234,7 +1273,6 @@ def make_schema():
|
|||
"tenant_id",
|
||||
"subscription_id",
|
||||
"resource_group",
|
||||
"location",
|
||||
],
|
||||
"properties": {
|
||||
"tenant_id": {"type": "string"},
|
||||
|
@ -1250,7 +1288,7 @@ def make_schema():
|
|||
{
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"required": ["region", "bucket"],
|
||||
"required": ["region"],
|
||||
"properties": {
|
||||
"region": {"type": "string"},
|
||||
"bucket": {"type": "string"},
|
||||
|
|
114
pungi/compose.py
114
pungi/compose.py
|
@ -17,6 +17,7 @@
|
|||
__all__ = ("Compose",)
|
||||
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
|
@ -57,14 +58,58 @@ except ImportError:
|
|||
SUPPORTED_MILESTONES = ["RC", "Update", "SecurityFix"]
|
||||
|
||||
|
||||
def is_status_fatal(status_code):
|
||||
"""Check if status code returned from CTS reports an error that is unlikely
|
||||
to be fixed by retrying. Generally client errors (4XX) are fatal, with the
|
||||
exception of 401 Unauthorized which could be caused by transient network
|
||||
issue between compose host and KDC.
|
||||
"""
|
||||
if status_code == 401:
|
||||
return False
|
||||
return status_code >= 400 and status_code < 500
|
||||
|
||||
|
||||
@retry(wait_on=RequestException)
|
||||
def retry_request(method, url, data=None, auth=None):
|
||||
request_method = getattr(requests, method)
|
||||
rv = request_method(url, json=data, auth=auth)
|
||||
if is_status_fatal(rv.status_code):
|
||||
try:
|
||||
error = rv.json()["message"]
|
||||
except ValueError:
|
||||
error = rv.text
|
||||
raise RuntimeError("CTS responded with %d: %s" % (rv.status_code, error))
|
||||
rv.raise_for_status()
|
||||
return rv
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def cts_auth(cts_keytab):
|
||||
auth = None
|
||||
if cts_keytab:
|
||||
# requests-kerberos cannot accept custom keytab, we need to use
|
||||
# environment variable for this. But we need to change environment
|
||||
# only temporarily just for this single requests.post.
|
||||
# So at first backup the current environment and revert to it
|
||||
# after the requests call.
|
||||
from requests_kerberos import HTTPKerberosAuth
|
||||
|
||||
auth = HTTPKerberosAuth()
|
||||
environ_copy = dict(os.environ)
|
||||
if "$HOSTNAME" in cts_keytab:
|
||||
cts_keytab = cts_keytab.replace("$HOSTNAME", socket.gethostname())
|
||||
os.environ["KRB5_CLIENT_KTNAME"] = cts_keytab
|
||||
os.environ["KRB5CCNAME"] = "DIR:%s" % tempfile.mkdtemp()
|
||||
|
||||
try:
|
||||
yield auth
|
||||
finally:
|
||||
if cts_keytab:
|
||||
shutil.rmtree(os.environ["KRB5CCNAME"].split(":", 1)[1])
|
||||
os.environ.clear()
|
||||
os.environ.update(environ_copy)
|
||||
|
||||
|
||||
def get_compose_info(
|
||||
conf,
|
||||
compose_type="production",
|
||||
|
@ -94,38 +139,19 @@ def get_compose_info(
|
|||
ci.compose.type = compose_type
|
||||
ci.compose.date = compose_date or time.strftime("%Y%m%d", time.localtime())
|
||||
ci.compose.respin = compose_respin or 0
|
||||
ci.compose.id = ci.create_compose_id()
|
||||
|
||||
cts_url = conf.get("cts_url", None)
|
||||
cts_url = conf.get("cts_url")
|
||||
if cts_url:
|
||||
# Requests-kerberos cannot accept custom keytab, we need to use
|
||||
# environment variable for this. But we need to change environment
|
||||
# only temporarily just for this single requests.post.
|
||||
# So at first backup the current environment and revert to it
|
||||
# after the requests.post call.
|
||||
cts_keytab = conf.get("cts_keytab", None)
|
||||
authentication = get_authentication(conf)
|
||||
if cts_keytab:
|
||||
environ_copy = dict(os.environ)
|
||||
if "$HOSTNAME" in cts_keytab:
|
||||
cts_keytab = cts_keytab.replace("$HOSTNAME", socket.gethostname())
|
||||
os.environ["KRB5_CLIENT_KTNAME"] = cts_keytab
|
||||
os.environ["KRB5CCNAME"] = "DIR:%s" % tempfile.mkdtemp()
|
||||
|
||||
try:
|
||||
# Create compose in CTS and get the reserved compose ID.
|
||||
ci.compose.id = ci.create_compose_id()
|
||||
url = os.path.join(cts_url, "api/1/composes/")
|
||||
data = {
|
||||
"compose_info": json.loads(ci.dumps()),
|
||||
"parent_compose_ids": parent_compose_ids,
|
||||
"respin_of": respin_of,
|
||||
}
|
||||
# Create compose in CTS and get the reserved compose ID.
|
||||
url = os.path.join(cts_url, "api/1/composes/")
|
||||
data = {
|
||||
"compose_info": json.loads(ci.dumps()),
|
||||
"parent_compose_ids": parent_compose_ids,
|
||||
"respin_of": respin_of,
|
||||
}
|
||||
with cts_auth(conf.get("cts_keytab")) as authentication:
|
||||
rv = retry_request("post", url, data=data, auth=authentication)
|
||||
finally:
|
||||
if cts_keytab:
|
||||
shutil.rmtree(os.environ["KRB5CCNAME"].split(":", 1)[1])
|
||||
os.environ.clear()
|
||||
os.environ.update(environ_copy)
|
||||
|
||||
# Update local ComposeInfo with received ComposeInfo.
|
||||
cts_ci = ComposeInfo()
|
||||
|
@ -133,22 +159,9 @@ def get_compose_info(
|
|||
ci.compose.respin = cts_ci.compose.respin
|
||||
ci.compose.id = cts_ci.compose.id
|
||||
|
||||
else:
|
||||
ci.compose.id = ci.create_compose_id()
|
||||
|
||||
return ci
|
||||
|
||||
|
||||
def get_authentication(conf):
|
||||
authentication = None
|
||||
cts_keytab = conf.get("cts_keytab", None)
|
||||
if cts_keytab:
|
||||
from requests_kerberos import HTTPKerberosAuth
|
||||
|
||||
authentication = HTTPKerberosAuth()
|
||||
return authentication
|
||||
|
||||
|
||||
def write_compose_info(compose_dir, ci):
|
||||
"""
|
||||
Write ComposeInfo `ci` to `compose_dir` subdirectories.
|
||||
|
@ -162,17 +175,20 @@ def write_compose_info(compose_dir, ci):
|
|||
|
||||
|
||||
def update_compose_url(compose_id, compose_dir, conf):
|
||||
authentication = get_authentication(conf)
|
||||
cts_url = conf.get("cts_url", None)
|
||||
if cts_url:
|
||||
url = os.path.join(cts_url, "api/1/composes", compose_id)
|
||||
tp = conf.get("translate_paths", None)
|
||||
compose_url = translate_path_raw(tp, compose_dir)
|
||||
if compose_url == compose_dir:
|
||||
# We do not have a URL, do not attempt the update.
|
||||
return
|
||||
data = {
|
||||
"action": "set_url",
|
||||
"compose_url": compose_url,
|
||||
}
|
||||
return retry_request("patch", url, data=data, auth=authentication)
|
||||
with cts_auth(conf.get("cts_keytab")) as authentication:
|
||||
return retry_request("patch", url, data=data, auth=authentication)
|
||||
|
||||
|
||||
def get_compose_dir(
|
||||
|
@ -183,11 +199,19 @@ def get_compose_dir(
|
|||
compose_respin=None,
|
||||
compose_label=None,
|
||||
already_exists_callbacks=None,
|
||||
parent_compose_ids=None,
|
||||
respin_of=None,
|
||||
):
|
||||
already_exists_callbacks = already_exists_callbacks or []
|
||||
|
||||
ci = get_compose_info(
|
||||
conf, compose_type, compose_date, compose_respin, compose_label
|
||||
conf,
|
||||
compose_type,
|
||||
compose_date,
|
||||
compose_respin,
|
||||
compose_label,
|
||||
parent_compose_ids,
|
||||
respin_of,
|
||||
)
|
||||
|
||||
cts_url = conf.get("cts_url", None)
|
||||
|
|
|
@ -5,11 +5,14 @@ from __future__ import print_function
|
|||
import os
|
||||
import six
|
||||
from collections import namedtuple
|
||||
from kobo.shortcuts import run
|
||||
from six.moves import shlex_quote
|
||||
|
||||
from .wrappers import iso
|
||||
from .wrappers.jigdo import JigdoWrapper
|
||||
|
||||
from .phases.buildinstall import BOOT_CONFIGS, BOOT_IMAGES
|
||||
|
||||
|
||||
CreateIsoOpts = namedtuple(
|
||||
"CreateIsoOpts",
|
||||
|
@ -118,23 +121,73 @@ def make_jigdo(f, opts):
|
|||
emit(f, cmd)
|
||||
|
||||
|
||||
def _get_perms(fs_path):
|
||||
"""Compute proper permissions for a file.
|
||||
|
||||
This mimicks what -rational-rock option of genisoimage does. All read bits
|
||||
are set, so that files and directories are globally readable. If any
|
||||
execute bit is set for a file, set them all. No writes are allowed and
|
||||
special bits are erased too.
|
||||
"""
|
||||
statinfo = os.stat(fs_path)
|
||||
perms = 0o444
|
||||
if statinfo.st_mode & 0o111:
|
||||
perms |= 0o111
|
||||
return perms
|
||||
|
||||
|
||||
def write_xorriso_commands(opts):
|
||||
# Create manifest for the boot.iso listing all contents
|
||||
boot_iso_manifest = "%s.manifest" % os.path.join(
|
||||
opts.script_dir, os.path.basename(opts.boot_iso)
|
||||
)
|
||||
run(
|
||||
iso.get_manifest_cmd(
|
||||
opts.boot_iso, opts.use_xorrisofs, output_file=boot_iso_manifest
|
||||
)
|
||||
)
|
||||
# Find which files may have been updated by pungi. This only includes a few
|
||||
# files from tweaking buildinstall and .discinfo metadata. There's no good
|
||||
# way to detect whether the boot config files actually changed, so we may
|
||||
# be updating files in the ISO with the same data.
|
||||
UPDATEABLE_FILES = set(BOOT_IMAGES + BOOT_CONFIGS + [".discinfo"])
|
||||
updated_files = set()
|
||||
excluded_files = set()
|
||||
with open(boot_iso_manifest) as f:
|
||||
for line in f:
|
||||
path = line.lstrip("/").rstrip("\n")
|
||||
if path in UPDATEABLE_FILES:
|
||||
updated_files.add(path)
|
||||
else:
|
||||
excluded_files.add(path)
|
||||
|
||||
script = os.path.join(opts.script_dir, "xorriso-%s.txt" % id(opts))
|
||||
with open(script, "w") as f:
|
||||
emit(f, "-indev %s" % opts.boot_iso)
|
||||
emit(f, "-outdev %s" % os.path.join(opts.output_dir, opts.iso_name))
|
||||
emit(f, "-boot_image any replay")
|
||||
emit(f, "-volid %s" % opts.volid)
|
||||
# isoinfo -J uses the Joliet tree, and it's used by virt-install
|
||||
emit(f, "-joliet on")
|
||||
# Support long filenames in the Joliet trees. Repodata is particularly
|
||||
# likely to run into this limit.
|
||||
emit(f, "-compliance joliet_long_names")
|
||||
|
||||
with open(opts.graft_points) as gp:
|
||||
for line in gp:
|
||||
iso_path, fs_path = line.strip().split("=", 1)
|
||||
emit(f, "-map %s %s" % (fs_path, iso_path))
|
||||
if iso_path in excluded_files:
|
||||
continue
|
||||
cmd = "-update" if iso_path in updated_files else "-map"
|
||||
emit(f, "%s %s %s" % (cmd, fs_path, iso_path))
|
||||
emit(f, "-chmod 0%o %s" % (_get_perms(fs_path), iso_path))
|
||||
|
||||
if opts.arch == "ppc64le":
|
||||
# This is needed for the image to be bootable.
|
||||
emit(f, "-as mkisofs -U --")
|
||||
|
||||
emit(f, "-chown_r 0 /")
|
||||
emit(f, "-chgrp_r 0 /")
|
||||
emit(f, "-end")
|
||||
return script
|
||||
|
||||
|
|
|
@ -1118,7 +1118,6 @@ class Pungi(PungiBase):
|
|||
self.logger.info("Finished gathering package objects.")
|
||||
|
||||
def gather(self):
|
||||
|
||||
# get package objects according to the input list
|
||||
self.getPackageObjects()
|
||||
if self.is_sources:
|
||||
|
|
|
@ -15,17 +15,20 @@
|
|||
|
||||
|
||||
from enum import Enum
|
||||
from itertools import count
|
||||
from functools import cmp_to_key
|
||||
from itertools import count, groupby
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from kobo.rpmlib import parse_nvra
|
||||
import rpm
|
||||
|
||||
import pungi.common
|
||||
import pungi.dnf_wrapper
|
||||
import pungi.multilib_dnf
|
||||
import pungi.util
|
||||
from pungi import arch_utils
|
||||
from pungi.linker import Linker
|
||||
from pungi.profiler import Profiler
|
||||
from pungi.util import DEBUG_PATTERNS
|
||||
|
@ -245,12 +248,36 @@ class Gather(GatherBase):
|
|||
# from lookaside. This can be achieved by removing any package that is
|
||||
# also in lookaside from the list.
|
||||
lookaside_pkgs = set()
|
||||
for pkg in package_list:
|
||||
if pkg.repoid in self.opts.lookaside_repos:
|
||||
lookaside_pkgs.add("{0.name}-{0.evr}".format(pkg))
|
||||
|
||||
if self.opts.greedy_method == "all":
|
||||
return list(package_list)
|
||||
if self.opts.lookaside_repos:
|
||||
# We will call `latest()` to get the highest version packages only.
|
||||
# However, that is per name and architecture. If a package switches
|
||||
# from arched to noarch or the other way, it is possible that the
|
||||
# package_list contains different versions in main repos and in
|
||||
# lookaside repos.
|
||||
# We need to manually filter the latest version.
|
||||
def vercmp(x, y):
|
||||
return rpm.labelCompare(x[1], y[1])
|
||||
|
||||
# Annotate the packages with their version.
|
||||
versioned_packages = [
|
||||
(pkg, (str(pkg.epoch) or "0", pkg.version, pkg.release))
|
||||
for pkg in package_list
|
||||
]
|
||||
# Sort the packages newest first.
|
||||
sorted_packages = sorted(
|
||||
versioned_packages, key=cmp_to_key(vercmp), reverse=True
|
||||
)
|
||||
# Group packages by version, take the first group and discard the
|
||||
# version info from the tuple.
|
||||
package_list = list(
|
||||
x[0] for x in next(groupby(sorted_packages, key=lambda x: x[1]))[1]
|
||||
)
|
||||
|
||||
# Now we can decide what is used from lookaside.
|
||||
for pkg in package_list:
|
||||
if pkg.repoid in self.opts.lookaside_repos:
|
||||
lookaside_pkgs.add("{0.name}-{0.evr}".format(pkg))
|
||||
|
||||
all_pkgs = []
|
||||
for pkg in package_list:
|
||||
|
@ -263,16 +290,21 @@ class Gather(GatherBase):
|
|||
|
||||
if not debuginfo:
|
||||
native_pkgs = set(
|
||||
self.q_native_binary_packages.filter(pkg=all_pkgs).apply()
|
||||
self.q_native_binary_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_binary_packages.filter(pkg=all_pkgs).apply()
|
||||
self.q_multilib_binary_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
else:
|
||||
native_pkgs = set(self.q_native_debug_packages.filter(pkg=all_pkgs).apply())
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_debug_packages.filter(pkg=all_pkgs).apply()
|
||||
native_pkgs = set(
|
||||
self.q_native_debug_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_debug_packages.filter(pkg=all_pkgs).latest().apply()
|
||||
)
|
||||
|
||||
if self.opts.greedy_method == "all":
|
||||
return list(native_pkgs | multilib_pkgs)
|
||||
|
||||
result = set()
|
||||
|
||||
|
@ -392,9 +424,7 @@ class Gather(GatherBase):
|
|||
"""Given an name of a queue (stored as attribute in `self`), exclude
|
||||
all given packages and keep only the latest per package name and arch.
|
||||
"""
|
||||
setattr(
|
||||
self, queue, getattr(self, queue).filter(pkg__neq=exclude).latest().apply()
|
||||
)
|
||||
setattr(self, queue, getattr(self, queue).filter(pkg__neq=exclude).apply())
|
||||
|
||||
@Profiler("Gather._apply_excludes()")
|
||||
def _apply_excludes(self, excludes):
|
||||
|
@ -500,9 +530,16 @@ class Gather(GatherBase):
|
|||
name__glob=pattern[:-2]
|
||||
).apply()
|
||||
else:
|
||||
pkgs = self.q_binary_packages.filter(
|
||||
name__glob=pattern
|
||||
).apply()
|
||||
kwargs = {"name__glob": pattern}
|
||||
if "." in pattern:
|
||||
# The pattern could be name.arch. Check if the
|
||||
# arch is valid, and if yes, make a more
|
||||
# specific query.
|
||||
name, arch = pattern.split(".", 1)
|
||||
if arch in arch_utils.arches:
|
||||
kwargs["name__glob"] = name
|
||||
kwargs["arch__eq"] = arch
|
||||
pkgs = self.q_binary_packages.filter(**kwargs).apply()
|
||||
|
||||
if not pkgs:
|
||||
self.logger.error("No package matches pattern %s" % pattern)
|
||||
|
@ -616,7 +653,6 @@ class Gather(GatherBase):
|
|||
return added
|
||||
|
||||
for pkg in self.result_debug_packages.copy():
|
||||
|
||||
if pkg not in self.finished_add_debug_package_deps:
|
||||
deps = self._get_package_deps(pkg, debuginfo=True)
|
||||
for i, req in deps:
|
||||
|
@ -784,7 +820,6 @@ class Gather(GatherBase):
|
|||
continue
|
||||
|
||||
debug_pkgs = []
|
||||
pkg_in_lookaside = pkg.repoid in self.opts.lookaside_repos
|
||||
for i in candidates:
|
||||
if pkg.arch != i.arch:
|
||||
continue
|
||||
|
@ -792,7 +827,7 @@ class Gather(GatherBase):
|
|||
# If it's not debugsource package or does not match name of
|
||||
# the package, we don't want it in.
|
||||
continue
|
||||
if i.repoid in self.opts.lookaside_repos or pkg_in_lookaside:
|
||||
if self.is_from_lookaside(i):
|
||||
self._set_flag(i, PkgFlag.lookaside)
|
||||
if i not in self.result_debug_packages:
|
||||
added.add(i)
|
||||
|
|
|
@ -306,11 +306,6 @@ def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
|
|||
if variant.type in ("addon",) or variant.is_empty:
|
||||
return
|
||||
|
||||
compose.log_debug(
|
||||
"on arch '%s' looking at variant '%s' of type '%s'"
|
||||
% (arch, variant, variant.type)
|
||||
)
|
||||
|
||||
if not timestamp:
|
||||
timestamp = int(time.time())
|
||||
else:
|
||||
|
|
|
@ -297,7 +297,7 @@ class BuildinstallPhase(PhaseBase):
|
|||
"Unsupported buildinstall method: %s" % self.buildinstall_method
|
||||
)
|
||||
|
||||
for (variant, cmd) in commands:
|
||||
for variant, cmd in commands:
|
||||
self.pool.add(BuildinstallThread(self.pool))
|
||||
self.pool.queue_put(
|
||||
(self.compose, arch, variant, cmd, self.pkgset_phase)
|
||||
|
@ -364,9 +364,17 @@ BOOT_CONFIGS = [
|
|||
"EFI/BOOT/BOOTX64.conf",
|
||||
"EFI/BOOT/grub.cfg",
|
||||
]
|
||||
BOOT_IMAGES = [
|
||||
"images/efiboot.img",
|
||||
]
|
||||
|
||||
|
||||
def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
||||
"""
|
||||
Put escaped volume ID and possibly kickstart file into the boot
|
||||
configuration files.
|
||||
:returns: list of paths to modified config files
|
||||
"""
|
||||
volid_escaped = volid.replace(" ", r"\x20").replace("\\", "\\\\")
|
||||
volid_escaped_2 = volid_escaped.replace("\\", "\\\\")
|
||||
found_configs = []
|
||||
|
@ -374,7 +382,6 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
|||
config_path = os.path.join(path, config)
|
||||
if not os.path.exists(config_path):
|
||||
continue
|
||||
found_configs.append(config)
|
||||
|
||||
with open(config_path, "r") as f:
|
||||
data = original_data = f.read()
|
||||
|
@ -394,8 +401,13 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
|||
with open(config_path, "w") as f:
|
||||
f.write(data)
|
||||
|
||||
if logger and data != original_data:
|
||||
logger.info("Boot config %s changed" % config_path)
|
||||
if data != original_data:
|
||||
found_configs.append(config)
|
||||
if logger:
|
||||
# Generally lorax should create file with correct volume id
|
||||
# already. If we don't have a kickstart, this function should
|
||||
# be a no-op.
|
||||
logger.info("Boot config %s changed" % config_path)
|
||||
|
||||
return found_configs
|
||||
|
||||
|
@ -434,31 +446,32 @@ def tweak_buildinstall(
|
|||
if kickstart_file and found_configs:
|
||||
shutil.copy2(kickstart_file, os.path.join(dst, "ks.cfg"))
|
||||
|
||||
images = [
|
||||
os.path.join(tmp_dir, "images", "efiboot.img"),
|
||||
]
|
||||
for image in images:
|
||||
if not os.path.isfile(image):
|
||||
continue
|
||||
images = [os.path.join(tmp_dir, img) for img in BOOT_IMAGES]
|
||||
if found_configs:
|
||||
for image in images:
|
||||
if not os.path.isfile(image):
|
||||
continue
|
||||
|
||||
with iso.mount(
|
||||
image,
|
||||
logger=compose._logger,
|
||||
use_guestmount=compose.conf.get("buildinstall_use_guestmount"),
|
||||
) as mount_tmp_dir:
|
||||
for config in BOOT_CONFIGS:
|
||||
config_path = os.path.join(tmp_dir, config)
|
||||
config_in_image = os.path.join(mount_tmp_dir, config)
|
||||
with iso.mount(
|
||||
image,
|
||||
logger=compose._logger,
|
||||
use_guestmount=compose.conf.get("buildinstall_use_guestmount"),
|
||||
) as mount_tmp_dir:
|
||||
for config in found_configs:
|
||||
# Put each modified config file into the image (overwriting the
|
||||
# original).
|
||||
config_path = os.path.join(tmp_dir, config)
|
||||
config_in_image = os.path.join(mount_tmp_dir, config)
|
||||
|
||||
if os.path.isfile(config_in_image):
|
||||
cmd = [
|
||||
"cp",
|
||||
"-v",
|
||||
"--remove-destination",
|
||||
config_path,
|
||||
config_in_image,
|
||||
]
|
||||
run(cmd)
|
||||
if os.path.isfile(config_in_image):
|
||||
cmd = [
|
||||
"cp",
|
||||
"-v",
|
||||
"--remove-destination",
|
||||
config_path,
|
||||
config_in_image,
|
||||
]
|
||||
run(cmd)
|
||||
|
||||
# HACK: make buildinstall files world readable
|
||||
run("chmod -R a+rX %s" % shlex_quote(tmp_dir))
|
||||
|
|
|
@ -369,7 +369,7 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
|||
if self.compose.notifier:
|
||||
self.compose.notifier.send("createiso-targets", deliverables=deliverables)
|
||||
|
||||
for (cmd, variant, arch) in commands:
|
||||
for cmd, variant, arch in commands:
|
||||
self.pool.add(CreateIsoThread(self.pool))
|
||||
self.pool.queue_put((self.compose, cmd, variant, arch))
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ class ExtraIsosPhase(PhaseLoggerMixin, ConfigGuardedPhase, PhaseBase):
|
|||
for arch in sorted(arches):
|
||||
commands.append((config, variant, arch))
|
||||
|
||||
for (config, variant, arch) in commands:
|
||||
for config, variant, arch in commands:
|
||||
self.pool.add(ExtraIsosThread(self.pool, self.bi))
|
||||
self.pool.queue_put((self.compose, config, variant, arch))
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ class GatherPhase(PhaseBase):
|
|||
|
||||
# check whether variants from configuration value
|
||||
# 'variant_as_lookaside' are correct
|
||||
for (requiring, required) in variant_as_lookaside:
|
||||
for requiring, required in variant_as_lookaside:
|
||||
if requiring in all_variants and required not in all_variants:
|
||||
errors.append(
|
||||
"variant_as_lookaside: variant %r doesn't exist but is "
|
||||
|
@ -99,7 +99,7 @@ class GatherPhase(PhaseBase):
|
|||
|
||||
# check whether variants from configuration value
|
||||
# 'variant_as_lookaside' have same architectures
|
||||
for (requiring, required) in variant_as_lookaside:
|
||||
for requiring, required in variant_as_lookaside:
|
||||
if (
|
||||
requiring in all_variants
|
||||
and required in all_variants
|
||||
|
@ -235,7 +235,7 @@ def reuse_old_gather_packages(compose, arch, variant, package_sets, methods):
|
|||
if not hasattr(compose, "_gather_reused_variant_arch"):
|
||||
setattr(compose, "_gather_reused_variant_arch", [])
|
||||
variant_as_lookaside = compose.conf.get("variant_as_lookaside", [])
|
||||
for (requiring, required) in variant_as_lookaside:
|
||||
for requiring, required in variant_as_lookaside:
|
||||
if (
|
||||
requiring == variant.uid
|
||||
and (required, arch) not in compose._gather_reused_variant_arch
|
||||
|
@ -468,9 +468,7 @@ def gather_packages(compose, arch, variant, package_sets, fulltree_excludes=None
|
|||
)
|
||||
|
||||
else:
|
||||
|
||||
for source_name in ("module", "comps", "json"):
|
||||
|
||||
packages, groups, filter_packages = get_variant_packages(
|
||||
compose, arch, variant, source_name, package_sets
|
||||
)
|
||||
|
@ -575,7 +573,6 @@ def trim_packages(compose, arch, variant, pkg_map, parent_pkgs=None, remove_pkgs
|
|||
move_to_parent_pkgs = _mk_pkg_map()
|
||||
removed_pkgs = _mk_pkg_map()
|
||||
for pkg_type, pkgs in pkg_map.items():
|
||||
|
||||
new_pkgs = []
|
||||
for pkg in pkgs:
|
||||
pkg_path = pkg["path"]
|
||||
|
|
|
@ -47,9 +47,15 @@ class FakePackage(object):
|
|||
|
||||
@property
|
||||
def files(self):
|
||||
return [
|
||||
os.path.join(dirname, basename) for (_, dirname, basename) in self.pkg.files
|
||||
]
|
||||
paths = []
|
||||
# createrepo_c.Package.files is a tuple, but its length differs across
|
||||
# versions. The constants define index at which the related value is
|
||||
# located.
|
||||
for entry in self.pkg.files:
|
||||
paths.append(
|
||||
os.path.join(entry[cr.FILE_ENTRY_PATH], entry[cr.FILE_ENTRY_NAME])
|
||||
)
|
||||
return paths
|
||||
|
||||
@property
|
||||
def provides(self):
|
||||
|
|
|
@ -25,6 +25,7 @@ from productmd.rpms import Rpms
|
|||
# results will be pulled into the compose.
|
||||
EXTENSIONS = {
|
||||
"docker": ["tar.gz", "tar.xz"],
|
||||
"iso": ["iso"],
|
||||
"liveimg-squashfs": ["liveimg.squashfs"],
|
||||
"qcow": ["qcow"],
|
||||
"qcow2": ["qcow2"],
|
||||
|
@ -39,6 +40,7 @@ EXTENSIONS = {
|
|||
"vdi": ["vdi"],
|
||||
"vmdk": ["vmdk"],
|
||||
"vpc": ["vhd"],
|
||||
"vhd-compressed": ["vhd.gz", "vhd.xz"],
|
||||
"vsphere-ova": ["vsphere.ova"],
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ class LiveImagesPhase(
|
|||
|
||||
commands.append((cmd, variant, arch))
|
||||
|
||||
for (cmd, variant, arch) in commands:
|
||||
for cmd, variant, arch in commands:
|
||||
self.pool.add(CreateLiveImageThread(self.pool))
|
||||
self.pool.queue_put((self.compose, cmd, variant, arch))
|
||||
|
||||
|
|
|
@ -27,6 +27,35 @@ class OSBuildPhase(
|
|||
arches = set(image_conf["arches"]) & arches
|
||||
return sorted(arches)
|
||||
|
||||
@staticmethod
|
||||
def _get_repo_urls(compose, repos, arch="$basearch"):
|
||||
"""
|
||||
Get list of repos with resolved repo URLs. Preserve repos defined
|
||||
as dicts.
|
||||
"""
|
||||
resolved_repos = []
|
||||
|
||||
for repo in repos:
|
||||
if isinstance(repo, dict):
|
||||
try:
|
||||
url = repo["baseurl"]
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
"`baseurl` is required in repo dict %s" % str(repo)
|
||||
)
|
||||
url = util.get_repo_url(compose, url, arch=arch)
|
||||
if url is None:
|
||||
raise RuntimeError("Failed to resolve repo URL for %s" % str(repo))
|
||||
repo["baseurl"] = url
|
||||
resolved_repos.append(repo)
|
||||
else:
|
||||
repo = util.get_repo_url(compose, repo, arch=arch)
|
||||
if repo is None:
|
||||
raise RuntimeError("Failed to resolve repo URL for %s" % repo)
|
||||
resolved_repos.append(repo)
|
||||
|
||||
return resolved_repos
|
||||
|
||||
def _get_repo(self, image_conf, variant):
|
||||
"""
|
||||
Get a list of repos. First included are those explicitly listed in
|
||||
|
@ -38,7 +67,7 @@ class OSBuildPhase(
|
|||
if not variant.is_empty and variant.uid not in repos:
|
||||
repos.append(variant.uid)
|
||||
|
||||
return util.get_repo_urls(self.compose, repos, arch="$arch")
|
||||
return OSBuildPhase._get_repo_urls(self.compose, repos, arch="$arch")
|
||||
|
||||
def run(self):
|
||||
for variant in self.compose.get_variants():
|
||||
|
@ -183,10 +212,18 @@ class RunOSBuildThread(WorkerThread):
|
|||
# image_dir is absolute path to which the image should be copied.
|
||||
# We also need the same path as relative to compose directory for
|
||||
# including in the metadata.
|
||||
image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
|
||||
rel_image_dir = compose.paths.compose.image_dir(variant, relative=True) % {
|
||||
"arch": arch
|
||||
}
|
||||
if archive["type_name"] == "iso":
|
||||
# If the produced image is actually an ISO, it should go to
|
||||
# iso/ subdirectory.
|
||||
image_dir = compose.paths.compose.iso_dir(arch, variant)
|
||||
rel_image_dir = compose.paths.compose.iso_dir(
|
||||
arch, variant, relative=True
|
||||
)
|
||||
else:
|
||||
image_dir = compose.paths.compose.image_dir(variant) % {"arch": arch}
|
||||
rel_image_dir = compose.paths.compose.image_dir(
|
||||
variant, relative=True
|
||||
) % {"arch": arch}
|
||||
util.makedirs(image_dir)
|
||||
|
||||
image_dest = os.path.join(image_dir, archive["filename"])
|
||||
|
@ -209,7 +246,7 @@ class RunOSBuildThread(WorkerThread):
|
|||
|
||||
# Update image manifest
|
||||
img = Image(compose.im)
|
||||
img.type = archive["type_name"]
|
||||
img.type = archive["type_name"] if archive["type_name"] != "iso" else "dvd"
|
||||
img.format = suffix
|
||||
img.path = os.path.join(rel_image_dir, archive["filename"])
|
||||
img.mtime = util.get_mtime(image_dest)
|
||||
|
|
|
@ -38,12 +38,17 @@ from pungi.phases.createrepo import add_modular_metadata
|
|||
|
||||
def populate_arch_pkgsets(compose, path_prefix, global_pkgset):
|
||||
result = {}
|
||||
exclusive_noarch = compose.conf["pkgset_exclusive_arch_considers_noarch"]
|
||||
|
||||
for arch in compose.get_arches():
|
||||
compose.log_info("Populating package set for arch: %s", arch)
|
||||
is_multilib = is_arch_multilib(compose.conf, arch)
|
||||
arches = get_valid_arches(arch, is_multilib, add_src=True)
|
||||
pkgset = global_pkgset.subset(arch, arches, exclusive_noarch=exclusive_noarch)
|
||||
pkgset = global_pkgset.subset(
|
||||
arch,
|
||||
arches,
|
||||
exclusive_noarch=compose.conf["pkgset_exclusive_arch_considers_noarch"],
|
||||
inherit_to_noarch=compose.conf["pkgset_inherit_exclusive_arch_to_noarch"],
|
||||
)
|
||||
pkgset.save_file_list(
|
||||
compose.paths.work.package_list(arch=arch, pkgset=global_pkgset),
|
||||
remove_path_prefix=path_prefix,
|
||||
|
|
|
@ -203,16 +203,31 @@ class PackageSetBase(kobo.log.LoggingBase):
|
|||
|
||||
return self.rpms_by_arch
|
||||
|
||||
def subset(self, primary_arch, arch_list, exclusive_noarch=True):
|
||||
def subset(
|
||||
self, primary_arch, arch_list, exclusive_noarch=True, inherit_to_noarch=True
|
||||
):
|
||||
"""Create a subset of this package set that only includes
|
||||
packages compatible with"""
|
||||
pkgset = PackageSetBase(
|
||||
self.name, self.sigkey_ordering, logger=self._logger, arches=arch_list
|
||||
)
|
||||
pkgset.merge(self, primary_arch, arch_list, exclusive_noarch=exclusive_noarch)
|
||||
pkgset.merge(
|
||||
self,
|
||||
primary_arch,
|
||||
arch_list,
|
||||
exclusive_noarch=exclusive_noarch,
|
||||
inherit_to_noarch=inherit_to_noarch,
|
||||
)
|
||||
return pkgset
|
||||
|
||||
def merge(self, other, primary_arch, arch_list, exclusive_noarch=True):
|
||||
def merge(
|
||||
self,
|
||||
other,
|
||||
primary_arch,
|
||||
arch_list,
|
||||
exclusive_noarch=True,
|
||||
inherit_to_noarch=True,
|
||||
):
|
||||
"""
|
||||
Merge ``other`` package set into this instance.
|
||||
"""
|
||||
|
@ -251,7 +266,7 @@ class PackageSetBase(kobo.log.LoggingBase):
|
|||
if i.file_path in self.file_cache:
|
||||
# TODO: test if it really works
|
||||
continue
|
||||
if exclusivearch_list and arch == "noarch":
|
||||
if inherit_to_noarch and exclusivearch_list and arch == "noarch":
|
||||
if is_excluded(i, exclusivearch_list, logger=self._logger):
|
||||
continue
|
||||
|
||||
|
@ -318,6 +333,11 @@ class FilelistPackageSet(PackageSetBase):
|
|||
return result
|
||||
|
||||
|
||||
# This is a marker to indicate package set with only extra builds/tasks and no
|
||||
# tasks.
|
||||
MISSING_KOJI_TAG = object()
|
||||
|
||||
|
||||
class KojiPackageSet(PackageSetBase):
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -371,7 +391,7 @@ class KojiPackageSet(PackageSetBase):
|
|||
:param int signed_packages_wait: How long to wait between search attemts.
|
||||
"""
|
||||
super(KojiPackageSet, self).__init__(
|
||||
name,
|
||||
name if name != MISSING_KOJI_TAG else "no-tag",
|
||||
sigkey_ordering=sigkey_ordering,
|
||||
arches=arches,
|
||||
logger=logger,
|
||||
|
@ -576,7 +596,9 @@ class KojiPackageSet(PackageSetBase):
|
|||
inherit,
|
||||
)
|
||||
self.log_info("[BEGIN] %s" % msg)
|
||||
rpms, builds = self.get_latest_rpms(tag, event, inherit=inherit)
|
||||
rpms, builds = [], []
|
||||
if tag != MISSING_KOJI_TAG:
|
||||
rpms, builds = self.get_latest_rpms(tag, event, inherit=inherit)
|
||||
extra_rpms, extra_builds = self.get_extra_rpms()
|
||||
rpms += extra_rpms
|
||||
builds += extra_builds
|
||||
|
@ -681,6 +703,15 @@ class KojiPackageSet(PackageSetBase):
|
|||
:param include_packages: an iterable of tuples (package name, arch) that should
|
||||
be included.
|
||||
"""
|
||||
if len(self.sigkey_ordering) > 1 and (
|
||||
None in self.sigkey_ordering or "" in self.sigkey_ordering
|
||||
):
|
||||
self.log_warning(
|
||||
"Stop writing reuse file as unsigned packages are allowed "
|
||||
"in the compose."
|
||||
)
|
||||
return
|
||||
|
||||
reuse_file = compose.paths.work.pkgset_reuse_file(self.name)
|
||||
self.log_info("Writing pkgset reuse file: %s" % reuse_file)
|
||||
try:
|
||||
|
@ -697,6 +728,12 @@ class KojiPackageSet(PackageSetBase):
|
|||
"srpms_by_name": self.srpms_by_name,
|
||||
"extra_builds": self.extra_builds,
|
||||
"include_packages": include_packages,
|
||||
"inherit_to_noarch": compose.conf[
|
||||
"pkgset_inherit_exclusive_arch_to_noarch"
|
||||
],
|
||||
"exclusive_noarch": compose.conf[
|
||||
"pkgset_exclusive_arch_considers_noarch"
|
||||
],
|
||||
},
|
||||
f,
|
||||
protocol=pickle.HIGHEST_PROTOCOL,
|
||||
|
@ -791,6 +828,8 @@ class KojiPackageSet(PackageSetBase):
|
|||
self.log_debug("Failed to load reuse file: %s" % str(e))
|
||||
return False
|
||||
|
||||
inherit_to_noarch = compose.conf["pkgset_inherit_exclusive_arch_to_noarch"]
|
||||
exclusive_noarch = compose.conf["pkgset_exclusive_arch_considers_noarch"]
|
||||
if (
|
||||
reuse_data["allow_invalid_sigkeys"] == self._allow_invalid_sigkeys
|
||||
and reuse_data["packages"] == self.packages
|
||||
|
@ -798,6 +837,10 @@ class KojiPackageSet(PackageSetBase):
|
|||
and reuse_data["extra_builds"] == self.extra_builds
|
||||
and reuse_data["sigkeys"] == self.sigkey_ordering
|
||||
and reuse_data["include_packages"] == include_packages
|
||||
# If the value is not present in reuse data, the compose was
|
||||
# generated with older version of Pungi. Best to not reuse.
|
||||
and reuse_data.get("inherit_to_noarch") == inherit_to_noarch
|
||||
and reuse_data.get("exclusive_noarch") == exclusive_noarch
|
||||
):
|
||||
self.log_info("Copying repo data for reuse: %s" % old_repo_dir)
|
||||
copy_all(old_repo_dir, repo_dir)
|
||||
|
|
|
@ -791,17 +791,23 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
|||
|
||||
pkgsets = []
|
||||
|
||||
extra_builds = force_list(compose.conf.get("pkgset_koji_builds", []))
|
||||
extra_tasks = force_list(compose.conf.get("pkgset_koji_scratch_tasks", []))
|
||||
|
||||
if not pkgset_koji_tags and (extra_builds or extra_tasks):
|
||||
# We have extra packages to pull in, but no tag to merge them with.
|
||||
compose_tags.append(pungi.phases.pkgset.pkgsets.MISSING_KOJI_TAG)
|
||||
pkgset_koji_tags.append(pungi.phases.pkgset.pkgsets.MISSING_KOJI_TAG)
|
||||
|
||||
# Get package set for each compose tag and merge it to global package
|
||||
# list. Also prepare per-variant pkgset, because we do not have list
|
||||
# of binary RPMs in module definition - there is just list of SRPMs.
|
||||
for compose_tag in compose_tags:
|
||||
compose.log_info("Loading package set for tag %s", compose_tag)
|
||||
kwargs = {}
|
||||
if compose_tag in pkgset_koji_tags:
|
||||
extra_builds = force_list(compose.conf.get("pkgset_koji_builds", []))
|
||||
extra_tasks = force_list(compose.conf.get("pkgset_koji_scratch_tasks", []))
|
||||
else:
|
||||
extra_builds = []
|
||||
extra_tasks = []
|
||||
kwargs["extra_builds"] = extra_builds
|
||||
kwargs["extra_tasks"] = extra_tasks
|
||||
|
||||
pkgset = pungi.phases.pkgset.pkgsets.KojiPackageSet(
|
||||
compose_tag,
|
||||
|
@ -813,10 +819,9 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
|||
allow_invalid_sigkeys=allow_invalid_sigkeys,
|
||||
populate_only_packages=populate_only_packages_to_gather,
|
||||
cache_region=compose.cache_region,
|
||||
extra_builds=extra_builds,
|
||||
extra_tasks=extra_tasks,
|
||||
signed_packages_retries=compose.conf["signed_packages_retries"],
|
||||
signed_packages_wait=compose.conf["signed_packages_wait"],
|
||||
**kwargs
|
||||
)
|
||||
|
||||
# Check if we have cache for this tag from previous compose. If so, use
|
||||
|
@ -880,7 +885,6 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
|||
)
|
||||
for variant in compose.all_variants.values():
|
||||
if compose_tag in variant_tags[variant]:
|
||||
|
||||
# If it's a modular tag, store the package set for the module.
|
||||
for nsvc, koji_tag in variant.module_uid_to_koji_tag.items():
|
||||
if compose_tag == koji_tag:
|
||||
|
|
|
@ -171,32 +171,11 @@ def main():
|
|||
group.add_argument(
|
||||
"--offline", action="store_true", help="Do not resolve git references."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--multi",
|
||||
metavar="DIR",
|
||||
help=(
|
||||
"Treat source as config for pungi-orchestrate and store dump into "
|
||||
"given directory."
|
||||
),
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
defines = config_utils.extract_defines(args.define)
|
||||
|
||||
if args.multi:
|
||||
if len(args.sources) > 1:
|
||||
parser.error("Only one multi config can be specified.")
|
||||
|
||||
return dump_multi_config(
|
||||
args.sources[0],
|
||||
dest=args.multi,
|
||||
defines=defines,
|
||||
just_dump=args.just_dump,
|
||||
event=args.freeze_event,
|
||||
offline=args.offline,
|
||||
)
|
||||
|
||||
return process_file(
|
||||
args.sources,
|
||||
defines=defines,
|
||||
|
|
|
@ -14,6 +14,9 @@ def send(cmd, data):
|
|||
topic = "compose.%s" % cmd.replace("-", ".").lower()
|
||||
try:
|
||||
msg = fedora_messaging.api.Message(topic="pungi.{}".format(topic), body=data)
|
||||
if cmd == "ostree":
|
||||
# https://pagure.io/fedora-infrastructure/issue/10899
|
||||
msg.priority = 3
|
||||
fedora_messaging.api.publish(msg)
|
||||
except fedora_messaging.exceptions.PublishReturned as e:
|
||||
print("Fedora Messaging broker rejected message %s: %s" % (msg.id, e))
|
||||
|
|
|
@ -319,7 +319,6 @@ def get_arguments(config):
|
|||
|
||||
|
||||
def main():
|
||||
|
||||
config = pungi.config.Config()
|
||||
opts = get_arguments(config)
|
||||
|
||||
|
|
|
@ -300,7 +300,12 @@ def main():
|
|||
|
||||
if opts.target_dir:
|
||||
compose_dir = Compose.get_compose_dir(
|
||||
opts.target_dir, conf, compose_type=compose_type, compose_label=opts.label
|
||||
opts.target_dir,
|
||||
conf,
|
||||
compose_type=compose_type,
|
||||
compose_label=opts.label,
|
||||
parent_compose_ids=opts.parent_compose_id,
|
||||
respin_of=opts.respin_of,
|
||||
)
|
||||
else:
|
||||
compose_dir = opts.compose_dir
|
||||
|
@ -380,6 +385,14 @@ def run_compose(
|
|||
compose.log_info("Current timezone offset: %s" % pungi.util.get_tz_offset())
|
||||
compose.log_info("COMPOSE_ID=%s" % compose.compose_id)
|
||||
|
||||
installed_pkgs_log = compose.paths.log.log_file("global", "installed-pkgs")
|
||||
compose.log_info("Logging installed packages to %s" % installed_pkgs_log)
|
||||
try:
|
||||
with open(installed_pkgs_log, "w") as f:
|
||||
subprocess.Popen(["rpm", "-qa"], stdout=f)
|
||||
except Exception as e:
|
||||
compose.log_warning("Failed to log installed packages: %s" % str(e))
|
||||
|
||||
compose.read_variants()
|
||||
|
||||
# dump the config file
|
||||
|
|
|
@ -183,15 +183,16 @@ class CompsFilter(object):
|
|||
"""
|
||||
all_groups = self.tree.xpath("/comps/group/id/text()") + lookaside_groups
|
||||
for environment in self.tree.xpath("/comps/environment"):
|
||||
for group in environment.xpath("grouplist/groupid"):
|
||||
if group.text not in all_groups:
|
||||
group.getparent().remove(group)
|
||||
for parent_tag in ("grouplist", "optionlist"):
|
||||
for group in environment.xpath("%s/groupid" % parent_tag):
|
||||
if group.text not in all_groups:
|
||||
group.getparent().remove(group)
|
||||
|
||||
for group in environment.xpath("grouplist/groupid[@arch]"):
|
||||
value = group.attrib.get("arch")
|
||||
values = [v for v in re.split(r"[, ]+", value) if v]
|
||||
if arch not in values:
|
||||
group.getparent().remove(group)
|
||||
for group in environment.xpath("%s/groupid[@arch]" % parent_tag):
|
||||
value = group.attrib.get("arch")
|
||||
values = [v for v in re.split(r"[, ]+", value) if v]
|
||||
if arch not in values:
|
||||
group.getparent().remove(group)
|
||||
|
||||
def remove_empty_environments(self):
|
||||
"""
|
||||
|
|
|
@ -260,20 +260,23 @@ def get_isohybrid_cmd(iso_path, arch):
|
|||
return cmd
|
||||
|
||||
|
||||
def get_manifest_cmd(iso_name, xorriso=False):
|
||||
def get_manifest_cmd(iso_name, xorriso=False, output_file=None):
|
||||
if not output_file:
|
||||
output_file = "%s.manifest" % iso_name
|
||||
|
||||
if xorriso:
|
||||
return """xorriso -dev %s --find |
|
||||
tail -n+2 |
|
||||
tr -d "'" |
|
||||
cut -c2- |
|
||||
sort >> %s.manifest""" % (
|
||||
shlex_quote(iso_name),
|
||||
sort >> %s""" % (
|
||||
shlex_quote(iso_name),
|
||||
shlex_quote(output_file),
|
||||
)
|
||||
else:
|
||||
return "isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s.manifest" % (
|
||||
shlex_quote(iso_name),
|
||||
return "isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s" % (
|
||||
shlex_quote(iso_name),
|
||||
shlex_quote(output_file),
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -276,7 +276,6 @@ class Variant(object):
|
|||
modules=None,
|
||||
modular_koji_tags=None,
|
||||
):
|
||||
|
||||
environments = environments or []
|
||||
buildinstallpackages = buildinstallpackages or []
|
||||
|
||||
|
|
|
@ -1,705 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import atexit
|
||||
import errno
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import threading
|
||||
from collections import namedtuple
|
||||
|
||||
import kobo.conf
|
||||
import kobo.log
|
||||
import productmd
|
||||
from kobo import shortcuts
|
||||
from six.moves import configparser, shlex_quote
|
||||
|
||||
import pungi.util
|
||||
from pungi.compose import get_compose_dir
|
||||
from pungi.linker import linker_pool
|
||||
from pungi.phases.pkgset.sources.source_koji import get_koji_event_raw
|
||||
from pungi.util import find_old_compose, parse_koji_event, temp_dir
|
||||
from pungi.wrappers.kojiwrapper import KojiWrapper
|
||||
|
||||
|
||||
Config = namedtuple(
|
||||
"Config",
|
||||
[
|
||||
# Path to directory with the compose
|
||||
"target",
|
||||
"compose_type",
|
||||
"label",
|
||||
# Path to the selected old compose that will be reused
|
||||
"old_compose",
|
||||
# Path to directory with config file copies
|
||||
"config_dir",
|
||||
# Which koji event to use (if any)
|
||||
"event",
|
||||
# Additional arguments to pungi-koji executable
|
||||
"extra_args",
|
||||
],
|
||||
)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Status(object):
|
||||
# Ready to start
|
||||
READY = "READY"
|
||||
# Waiting for dependencies to finish.
|
||||
WAITING = "WAITING"
|
||||
# Part is currently running
|
||||
STARTED = "STARTED"
|
||||
# A dependency failed, this one will never start.
|
||||
BLOCKED = "BLOCKED"
|
||||
|
||||
|
||||
class ComposePart(object):
|
||||
def __init__(self, name, config, just_phase=[], skip_phase=[], dependencies=[]):
|
||||
self.name = name
|
||||
self.config = config
|
||||
self.status = Status.WAITING if dependencies else Status.READY
|
||||
self.just_phase = just_phase
|
||||
self.skip_phase = skip_phase
|
||||
self.blocked_on = set(dependencies)
|
||||
self.depends_on = set(dependencies)
|
||||
self.path = None
|
||||
self.log_file = None
|
||||
self.failable = False
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"ComposePart({0.name!r},"
|
||||
" {0.config!r},"
|
||||
" {0.status!r},"
|
||||
" just_phase={0.just_phase!r},"
|
||||
" skip_phase={0.skip_phase!r},"
|
||||
" dependencies={0.depends_on!r})"
|
||||
).format(self)
|
||||
|
||||
def refresh_status(self):
|
||||
"""Refresh status of this part with the result of the compose. This
|
||||
should only be called once the compose finished.
|
||||
"""
|
||||
try:
|
||||
with open(os.path.join(self.path, "STATUS")) as fh:
|
||||
self.status = fh.read().strip()
|
||||
except IOError as exc:
|
||||
log.error("Failed to update status of %s: %s", self.name, exc)
|
||||
log.error("Assuming %s is DOOMED", self.name)
|
||||
self.status = "DOOMED"
|
||||
|
||||
def is_finished(self):
|
||||
return "FINISHED" in self.status
|
||||
|
||||
def unblock_on(self, finished_part):
|
||||
"""Update set of blockers for this part. If it's empty, mark us as ready."""
|
||||
self.blocked_on.discard(finished_part)
|
||||
if self.status == Status.WAITING and not self.blocked_on:
|
||||
log.debug("%s is ready to start", self)
|
||||
self.status = Status.READY
|
||||
|
||||
def setup_start(self, global_config, parts):
|
||||
substitutions = dict(
|
||||
("part-%s" % name, p.path) for name, p in parts.items() if p.is_finished()
|
||||
)
|
||||
substitutions["configdir"] = global_config.config_dir
|
||||
|
||||
config = pungi.util.load_config(self.config)
|
||||
|
||||
for f in config.opened_files:
|
||||
# apply substitutions
|
||||
fill_in_config_file(f, substitutions)
|
||||
|
||||
self.status = Status.STARTED
|
||||
self.path = get_compose_dir(
|
||||
os.path.join(global_config.target, "parts"),
|
||||
config,
|
||||
compose_type=global_config.compose_type,
|
||||
compose_label=global_config.label,
|
||||
)
|
||||
self.log_file = os.path.join(global_config.target, "logs", "%s.log" % self.name)
|
||||
log.info("Starting %s in %s", self.name, self.path)
|
||||
|
||||
def get_cmd(self, global_config):
|
||||
cmd = ["pungi-koji", "--config", self.config, "--compose-dir", self.path]
|
||||
cmd.append("--%s" % global_config.compose_type)
|
||||
if global_config.label:
|
||||
cmd.extend(["--label", global_config.label])
|
||||
for phase in self.just_phase:
|
||||
cmd.extend(["--just-phase", phase])
|
||||
for phase in self.skip_phase:
|
||||
cmd.extend(["--skip-phase", phase])
|
||||
if global_config.old_compose:
|
||||
cmd.extend(
|
||||
["--old-compose", os.path.join(global_config.old_compose, "parts")]
|
||||
)
|
||||
if global_config.event:
|
||||
cmd.extend(["--koji-event", str(global_config.event)])
|
||||
if global_config.extra_args:
|
||||
cmd.extend(global_config.extra_args)
|
||||
cmd.extend(["--no-latest-link"])
|
||||
return cmd
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, config, section, config_dir):
|
||||
part = cls(
|
||||
name=section,
|
||||
config=os.path.join(config_dir, config.get(section, "config")),
|
||||
just_phase=_safe_get_list(config, section, "just_phase", []),
|
||||
skip_phase=_safe_get_list(config, section, "skip_phase", []),
|
||||
dependencies=_safe_get_list(config, section, "depends_on", []),
|
||||
)
|
||||
if config.has_option(section, "failable"):
|
||||
part.failable = config.getboolean(section, "failable")
|
||||
return part
|
||||
|
||||
|
||||
def _safe_get_list(config, section, option, default=None):
|
||||
"""Get a value from config parser. The result is split into a list on
|
||||
commas or spaces, and `default` is returned if the key does not exist.
|
||||
"""
|
||||
if config.has_option(section, option):
|
||||
value = config.get(section, option)
|
||||
return [x.strip() for x in re.split(r"[, ]+", value) if x]
|
||||
return default
|
||||
|
||||
|
||||
def fill_in_config_file(fp, substs):
|
||||
"""Templating function. It works with Jinja2 style placeholders such as
|
||||
{{foo}}. Whitespace around the key name is fine. The file is modified in place.
|
||||
|
||||
:param fp string: path to the file to process
|
||||
:param substs dict: a mapping for values to put into the file
|
||||
"""
|
||||
|
||||
def repl(match):
|
||||
try:
|
||||
return substs[match.group(1)]
|
||||
except KeyError as exc:
|
||||
raise RuntimeError(
|
||||
"Unknown placeholder %s in %s" % (exc, os.path.basename(fp))
|
||||
)
|
||||
|
||||
with open(fp, "r") as f:
|
||||
contents = re.sub(r"{{ *([a-zA-Z-_]+) *}}", repl, f.read())
|
||||
with open(fp, "w") as f:
|
||||
f.write(contents)
|
||||
|
||||
|
||||
def start_part(global_config, parts, part):
|
||||
part.setup_start(global_config, parts)
|
||||
fh = open(part.log_file, "w")
|
||||
cmd = part.get_cmd(global_config)
|
||||
log.debug("Running command %r", " ".join(shlex_quote(x) for x in cmd))
|
||||
return subprocess.Popen(cmd, stdout=fh, stderr=subprocess.STDOUT)
|
||||
|
||||
|
||||
def handle_finished(global_config, linker, parts, proc, finished_part):
|
||||
finished_part.refresh_status()
|
||||
log.info("%s finished with status %s", finished_part, finished_part.status)
|
||||
if proc.returncode == 0:
|
||||
# Success, unblock other parts...
|
||||
for part in parts.values():
|
||||
part.unblock_on(finished_part.name)
|
||||
# ...and link the results into final destination.
|
||||
copy_part(global_config, linker, finished_part)
|
||||
update_metadata(global_config, finished_part)
|
||||
else:
|
||||
# Failure, other stuff may be blocked.
|
||||
log.info("See details in %s", finished_part.log_file)
|
||||
block_on(parts, finished_part.name)
|
||||
|
||||
|
||||
def copy_part(global_config, linker, part):
|
||||
c = productmd.Compose(part.path)
|
||||
for variant in c.info.variants:
|
||||
data_path = os.path.join(part.path, "compose", variant)
|
||||
link = os.path.join(global_config.target, "compose", variant)
|
||||
log.info("Hardlinking content %s -> %s", data_path, link)
|
||||
hardlink_dir(linker, data_path, link)
|
||||
|
||||
|
||||
def hardlink_dir(linker, srcdir, dstdir):
|
||||
for root, dirs, files in os.walk(srcdir):
|
||||
root = os.path.relpath(root, srcdir)
|
||||
for f in files:
|
||||
src = os.path.normpath(os.path.join(srcdir, root, f))
|
||||
dst = os.path.normpath(os.path.join(dstdir, root, f))
|
||||
linker.queue_put((src, dst))
|
||||
|
||||
|
||||
def update_metadata(global_config, part):
|
||||
part_metadata_dir = os.path.join(part.path, "compose", "metadata")
|
||||
final_metadata_dir = os.path.join(global_config.target, "compose", "metadata")
|
||||
for f in os.listdir(part_metadata_dir):
|
||||
# Load the metadata
|
||||
with open(os.path.join(part_metadata_dir, f)) as fh:
|
||||
part_metadata = json.load(fh)
|
||||
final_metadata = os.path.join(final_metadata_dir, f)
|
||||
if os.path.exists(final_metadata):
|
||||
# We already have this file, will need to merge.
|
||||
merge_metadata(final_metadata, part_metadata)
|
||||
else:
|
||||
# A new file, just copy it.
|
||||
copy_metadata(global_config, final_metadata, part_metadata)
|
||||
|
||||
|
||||
def copy_metadata(global_config, final_metadata, source):
|
||||
"""Copy file to final location, but update compose information."""
|
||||
with open(
|
||||
os.path.join(global_config.target, "compose/metadata/composeinfo.json")
|
||||
) as f:
|
||||
composeinfo = json.load(f)
|
||||
try:
|
||||
source["payload"]["compose"].update(composeinfo["payload"]["compose"])
|
||||
except KeyError:
|
||||
# No [payload][compose], probably OSBS metadata
|
||||
pass
|
||||
with open(final_metadata, "w") as f:
|
||||
json.dump(source, f, indent=2, sort_keys=True)
|
||||
|
||||
|
||||
def merge_metadata(final_metadata, source):
|
||||
with open(final_metadata) as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
try:
|
||||
key = {
|
||||
"productmd.composeinfo": "variants",
|
||||
"productmd.modules": "modules",
|
||||
"productmd.images": "images",
|
||||
"productmd.rpms": "rpms",
|
||||
}[source["header"]["type"]]
|
||||
# TODO what if multiple parts create images for the same variant
|
||||
metadata["payload"][key].update(source["payload"][key])
|
||||
except KeyError:
|
||||
# OSBS metadata, merge whole file
|
||||
metadata.update(source)
|
||||
with open(final_metadata, "w") as f:
|
||||
json.dump(metadata, f, indent=2, sort_keys=True)
|
||||
|
||||
|
||||
def block_on(parts, name):
|
||||
"""Part ``name`` failed, mark everything depending on it as blocked."""
|
||||
for part in parts.values():
|
||||
if name in part.blocked_on:
|
||||
log.warning("%s is blocked now and will not run", part)
|
||||
part.status = Status.BLOCKED
|
||||
block_on(parts, part.name)
|
||||
|
||||
|
||||
def check_finished_processes(processes):
|
||||
"""Walk through all active processes and check if something finished."""
|
||||
for proc in processes.keys():
|
||||
proc.poll()
|
||||
if proc.returncode is not None:
|
||||
yield proc, processes[proc]
|
||||
|
||||
|
||||
def run_all(global_config, parts):
|
||||
# Mapping subprocess.Popen -> ComposePart
|
||||
processes = dict()
|
||||
remaining = set(p.name for p in parts.values() if not p.is_finished())
|
||||
|
||||
with linker_pool("hardlink") as linker:
|
||||
while remaining or processes:
|
||||
update_status(global_config, parts)
|
||||
|
||||
for proc, part in check_finished_processes(processes):
|
||||
del processes[proc]
|
||||
handle_finished(global_config, linker, parts, proc, part)
|
||||
|
||||
# Start new available processes.
|
||||
for name in list(remaining):
|
||||
part = parts[name]
|
||||
# Start all ready parts
|
||||
if part.status == Status.READY:
|
||||
remaining.remove(name)
|
||||
processes[start_part(global_config, parts, part)] = part
|
||||
# Remove blocked parts from todo list
|
||||
elif part.status == Status.BLOCKED:
|
||||
remaining.remove(part.name)
|
||||
|
||||
# Wait for any child process to finish if there is any.
|
||||
if processes:
|
||||
pid, reason = os.wait()
|
||||
for proc in processes.keys():
|
||||
# Set the return code for process that we caught by os.wait().
|
||||
# Calling poll() on it would not set the return code properly
|
||||
# since the value was already consumed by os.wait().
|
||||
if proc.pid == pid:
|
||||
proc.returncode = (reason >> 8) & 0xFF
|
||||
|
||||
log.info("Waiting for linking to finish...")
|
||||
return update_status(global_config, parts)
|
||||
|
||||
|
||||
def get_target_dir(config, compose_info, label, reldir=""):
|
||||
"""Find directory where this compose will be.
|
||||
|
||||
@param reldir: if target path in config is relative, it will be resolved
|
||||
against this directory
|
||||
"""
|
||||
dir = os.path.realpath(os.path.join(reldir, config.get("general", "target")))
|
||||
target_dir = get_compose_dir(
|
||||
dir,
|
||||
compose_info,
|
||||
compose_type=config.get("general", "compose_type"),
|
||||
compose_label=label,
|
||||
)
|
||||
return target_dir
|
||||
|
||||
|
||||
def setup_logging(debug=False):
|
||||
FORMAT = "%(asctime)s: %(levelname)s: %(message)s"
|
||||
level = logging.DEBUG if debug else logging.INFO
|
||||
kobo.log.add_stderr_logger(log, log_level=level, format=FORMAT)
|
||||
log.setLevel(level)
|
||||
|
||||
|
||||
def compute_status(statuses):
|
||||
if any(map(lambda x: x[0] in ("STARTED", "WAITING"), statuses)):
|
||||
# If there is anything still running or waiting to start, the whole is
|
||||
# still running.
|
||||
return "STARTED"
|
||||
elif any(map(lambda x: x[0] in ("DOOMED", "BLOCKED") and not x[1], statuses)):
|
||||
# If any required part is doomed or blocked, the whole is doomed
|
||||
return "DOOMED"
|
||||
elif all(map(lambda x: x[0] == "FINISHED", statuses)):
|
||||
# If all parts are complete, the whole is complete
|
||||
return "FINISHED"
|
||||
else:
|
||||
return "FINISHED_INCOMPLETE"
|
||||
|
||||
|
||||
def update_status(global_config, parts):
|
||||
log.debug("Updating status metadata")
|
||||
metadata = {}
|
||||
statuses = set()
|
||||
for part in parts.values():
|
||||
metadata[part.name] = {"status": part.status, "path": part.path}
|
||||
statuses.add((part.status, part.failable))
|
||||
metadata_path = os.path.join(
|
||||
global_config.target, "compose", "metadata", "parts.json"
|
||||
)
|
||||
with open(metadata_path, "w") as fh:
|
||||
json.dump(metadata, fh, indent=2, sort_keys=True, separators=(",", ": "))
|
||||
|
||||
status = compute_status(statuses)
|
||||
log.info("Overall status is %s", status)
|
||||
with open(os.path.join(global_config.target, "STATUS"), "w") as fh:
|
||||
fh.write(status)
|
||||
|
||||
return status != "DOOMED"
|
||||
|
||||
|
||||
def prepare_compose_dir(config, args, main_config_file, compose_info):
|
||||
if not hasattr(args, "compose_path"):
|
||||
# Creating a brand new compose
|
||||
target_dir = get_target_dir(
|
||||
config, compose_info, args.label, reldir=os.path.dirname(main_config_file)
|
||||
)
|
||||
for dir in ("logs", "parts", "compose/metadata", "work/global"):
|
||||
try:
|
||||
os.makedirs(os.path.join(target_dir, dir))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EEXIST:
|
||||
raise
|
||||
with open(os.path.join(target_dir, "STATUS"), "w") as fh:
|
||||
fh.write("STARTED")
|
||||
# Copy initial composeinfo for new compose
|
||||
shutil.copy(
|
||||
os.path.join(target_dir, "work/global/composeinfo-base.json"),
|
||||
os.path.join(target_dir, "compose/metadata/composeinfo.json"),
|
||||
)
|
||||
else:
|
||||
# Restarting a particular compose
|
||||
target_dir = args.compose_path
|
||||
|
||||
return target_dir
|
||||
|
||||
|
||||
def load_parts_metadata(global_config):
|
||||
parts_metadata = os.path.join(global_config.target, "compose/metadata/parts.json")
|
||||
with open(parts_metadata) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def setup_for_restart(global_config, parts, to_restart):
|
||||
has_stuff_to_do = False
|
||||
metadata = load_parts_metadata(global_config)
|
||||
for key in metadata:
|
||||
# Update state to match what is on disk
|
||||
log.debug(
|
||||
"Reusing %s (%s) from %s",
|
||||
key,
|
||||
metadata[key]["status"],
|
||||
metadata[key]["path"],
|
||||
)
|
||||
parts[key].status = metadata[key]["status"]
|
||||
parts[key].path = metadata[key]["path"]
|
||||
for key in to_restart:
|
||||
# Set restarted parts to run again
|
||||
parts[key].status = Status.WAITING
|
||||
parts[key].path = None
|
||||
|
||||
for key in to_restart:
|
||||
# Remove blockers that are already finished
|
||||
for blocker in list(parts[key].blocked_on):
|
||||
if parts[blocker].is_finished():
|
||||
parts[key].blocked_on.discard(blocker)
|
||||
if not parts[key].blocked_on:
|
||||
log.debug("Part %s in not blocked", key)
|
||||
# Nothing blocks it; let's go
|
||||
parts[key].status = Status.READY
|
||||
has_stuff_to_do = True
|
||||
|
||||
if not has_stuff_to_do:
|
||||
raise RuntimeError("All restarted parts are blocked. Nothing to do.")
|
||||
|
||||
|
||||
def run_kinit(config):
|
||||
if not config.getboolean("general", "kerberos"):
|
||||
return
|
||||
|
||||
keytab = config.get("general", "kerberos_keytab")
|
||||
principal = config.get("general", "kerberos_principal")
|
||||
|
||||
fd, fname = tempfile.mkstemp(prefix="krb5cc_pungi-orchestrate_")
|
||||
os.close(fd)
|
||||
os.environ["KRB5CCNAME"] = fname
|
||||
shortcuts.run(["kinit", "-k", "-t", keytab, principal])
|
||||
log.debug("Created a kerberos ticket for %s", principal)
|
||||
|
||||
atexit.register(os.remove, fname)
|
||||
|
||||
|
||||
def get_compose_data(compose_path):
|
||||
try:
|
||||
compose = productmd.compose.Compose(compose_path)
|
||||
data = {
|
||||
"compose_id": compose.info.compose.id,
|
||||
"compose_date": compose.info.compose.date,
|
||||
"compose_type": compose.info.compose.type,
|
||||
"compose_respin": str(compose.info.compose.respin),
|
||||
"compose_label": compose.info.compose.label,
|
||||
"release_id": compose.info.release_id,
|
||||
"release_name": compose.info.release.name,
|
||||
"release_short": compose.info.release.short,
|
||||
"release_version": compose.info.release.version,
|
||||
"release_type": compose.info.release.type,
|
||||
"release_is_layered": compose.info.release.is_layered,
|
||||
}
|
||||
if compose.info.release.is_layered:
|
||||
data.update(
|
||||
{
|
||||
"base_product_name": compose.info.base_product.name,
|
||||
"base_product_short": compose.info.base_product.short,
|
||||
"base_product_version": compose.info.base_product.version,
|
||||
"base_product_type": compose.info.base_product.type,
|
||||
}
|
||||
)
|
||||
return data
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def get_script_env(compose_path):
|
||||
env = os.environ.copy()
|
||||
env["COMPOSE_PATH"] = compose_path
|
||||
for key, value in get_compose_data(compose_path).items():
|
||||
if isinstance(value, bool):
|
||||
env[key.upper()] = "YES" if value else ""
|
||||
else:
|
||||
env[key.upper()] = str(value) if value else ""
|
||||
return env
|
||||
|
||||
|
||||
def run_scripts(prefix, compose_dir, scripts):
|
||||
env = get_script_env(compose_dir)
|
||||
for idx, script in enumerate(scripts.strip().splitlines()):
|
||||
command = script.strip()
|
||||
logfile = os.path.join(compose_dir, "logs", "%s%s.log" % (prefix, idx))
|
||||
log.debug("Running command: %r", command)
|
||||
log.debug("See output in %s", logfile)
|
||||
shortcuts.run(command, env=env, logfile=logfile)
|
||||
|
||||
|
||||
def try_translate_path(parts, path):
|
||||
translation = []
|
||||
for part in parts.values():
|
||||
conf = pungi.util.load_config(part.config)
|
||||
translation.extend(conf.get("translate_paths", []))
|
||||
return pungi.util.translate_path_raw(translation, path)
|
||||
|
||||
|
||||
def send_notification(compose_dir, command, parts):
|
||||
if not command:
|
||||
return
|
||||
from pungi.notifier import PungiNotifier
|
||||
|
||||
data = get_compose_data(compose_dir)
|
||||
data["location"] = try_translate_path(parts, compose_dir)
|
||||
notifier = PungiNotifier([command])
|
||||
with open(os.path.join(compose_dir, "STATUS")) as f:
|
||||
status = f.read().strip()
|
||||
notifier.send("status-change", workdir=compose_dir, status=status, **data)
|
||||
|
||||
|
||||
def setup_progress_monitor(global_config, parts):
|
||||
"""Update configuration so that each part send notifications about its
|
||||
progress to the orchestrator.
|
||||
|
||||
There is a file to which the notification is written. The orchestrator is
|
||||
reading it and mapping the entries to particular parts. The path to this
|
||||
file is stored in an environment variable.
|
||||
"""
|
||||
tmp_file = tempfile.NamedTemporaryFile(prefix="pungi-progress-monitor_")
|
||||
os.environ["_PUNGI_ORCHESTRATOR_PROGRESS_MONITOR"] = tmp_file.name
|
||||
atexit.register(os.remove, tmp_file.name)
|
||||
|
||||
global_config.extra_args.append(
|
||||
"--notification-script=pungi-notification-report-progress"
|
||||
)
|
||||
|
||||
def reader():
|
||||
while True:
|
||||
line = tmp_file.readline()
|
||||
if not line:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
path, msg = line.split(":", 1)
|
||||
for part in parts:
|
||||
if parts[part].path == os.path.dirname(path):
|
||||
log.debug("%s: %s", part, msg.strip())
|
||||
break
|
||||
|
||||
monitor = threading.Thread(target=reader)
|
||||
monitor.daemon = True
|
||||
monitor.start()
|
||||
|
||||
|
||||
def run(work_dir, main_config_file, args):
|
||||
config_dir = os.path.join(work_dir, "config")
|
||||
shutil.copytree(os.path.dirname(main_config_file), config_dir)
|
||||
|
||||
# Read main config
|
||||
parser = configparser.RawConfigParser(
|
||||
defaults={
|
||||
"kerberos": "false",
|
||||
"pre_compose_script": "",
|
||||
"post_compose_script": "",
|
||||
"notification_script": "",
|
||||
}
|
||||
)
|
||||
parser.read(main_config_file)
|
||||
|
||||
# Create kerberos ticket
|
||||
run_kinit(parser)
|
||||
|
||||
compose_info = dict(parser.items("general"))
|
||||
compose_type = parser.get("general", "compose_type")
|
||||
|
||||
target_dir = prepare_compose_dir(parser, args, main_config_file, compose_info)
|
||||
kobo.log.add_file_logger(log, os.path.join(target_dir, "logs", "orchestrator.log"))
|
||||
log.info("Composing %s", target_dir)
|
||||
|
||||
run_scripts("pre_compose_", target_dir, parser.get("general", "pre_compose_script"))
|
||||
|
||||
old_compose = find_old_compose(
|
||||
os.path.dirname(target_dir),
|
||||
compose_info["release_short"],
|
||||
compose_info["release_version"],
|
||||
"",
|
||||
)
|
||||
if old_compose:
|
||||
log.info("Reusing old compose %s", old_compose)
|
||||
|
||||
global_config = Config(
|
||||
target=target_dir,
|
||||
compose_type=compose_type,
|
||||
label=args.label,
|
||||
old_compose=old_compose,
|
||||
config_dir=os.path.dirname(main_config_file),
|
||||
event=args.koji_event,
|
||||
extra_args=_safe_get_list(parser, "general", "extra_args"),
|
||||
)
|
||||
|
||||
if not global_config.event and parser.has_option("general", "koji_profile"):
|
||||
koji_wrapper = KojiWrapper(parser.get("general", "koji_profile"))
|
||||
event_file = os.path.join(global_config.target, "work/global/koji-event")
|
||||
result = get_koji_event_raw(koji_wrapper, None, event_file)
|
||||
global_config = global_config._replace(event=result["id"])
|
||||
|
||||
parts = {}
|
||||
for section in parser.sections():
|
||||
if section == "general":
|
||||
continue
|
||||
parts[section] = ComposePart.from_config(parser, section, config_dir)
|
||||
|
||||
if hasattr(args, "part"):
|
||||
setup_for_restart(global_config, parts, args.part)
|
||||
|
||||
setup_progress_monitor(global_config, parts)
|
||||
|
||||
send_notification(target_dir, parser.get("general", "notification_script"), parts)
|
||||
|
||||
retcode = run_all(global_config, parts)
|
||||
|
||||
if retcode:
|
||||
# Only run the script if we are not doomed.
|
||||
run_scripts(
|
||||
"post_compose_", target_dir, parser.get("general", "post_compose_script")
|
||||
)
|
||||
|
||||
send_notification(target_dir, parser.get("general", "notification_script"), parts)
|
||||
|
||||
return retcode
|
||||
|
||||
|
||||
def parse_args(argv):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--debug", action="store_true")
|
||||
parser.add_argument("--koji-event", metavar="ID", type=parse_koji_event)
|
||||
subparsers = parser.add_subparsers()
|
||||
start = subparsers.add_parser("start")
|
||||
start.add_argument("config", metavar="CONFIG")
|
||||
start.add_argument("--label")
|
||||
|
||||
restart = subparsers.add_parser("restart")
|
||||
restart.add_argument("config", metavar="CONFIG")
|
||||
restart.add_argument("compose_path", metavar="COMPOSE_PATH")
|
||||
restart.add_argument(
|
||||
"part", metavar="PART", nargs="*", help="which parts to restart"
|
||||
)
|
||||
restart.add_argument("--label")
|
||||
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
args = parse_args(argv)
|
||||
setup_logging(args.debug)
|
||||
|
||||
main_config_file = os.path.abspath(args.config)
|
||||
|
||||
with temp_dir() as work_dir:
|
||||
try:
|
||||
if not run(work_dir, main_config_file, args):
|
||||
sys.exit(1)
|
||||
except Exception:
|
||||
log.exception("Unhandled exception!")
|
||||
sys.exit(1)
|
|
@ -1,7 +1,6 @@
|
|||
# Some packages must be installed via dnf/yum first, see doc/contributing.rst
|
||||
dict.sorted
|
||||
dogpile.cache
|
||||
fedmsg
|
||||
funcsigs
|
||||
jsonschema
|
||||
kobo
|
||||
|
|
9
setup.py
9
setup.py
|
@ -5,14 +5,9 @@
|
|||
import os
|
||||
import glob
|
||||
|
||||
import distutils.command.sdist
|
||||
from setuptools import setup
|
||||
|
||||
|
||||
# override default tarball format with bzip2
|
||||
distutils.command.sdist.sdist.default_format = {"posix": "bztar"}
|
||||
|
||||
|
||||
# recursively scan for python modules to be included
|
||||
package_root_dirs = ["pungi", "pungi_utils"]
|
||||
packages = set()
|
||||
|
@ -25,7 +20,7 @@ packages = sorted(packages)
|
|||
|
||||
setup(
|
||||
name="pungi",
|
||||
version="4.3.6",
|
||||
version="4.4.0",
|
||||
description="Distribution compose tool",
|
||||
url="https://pagure.io/pungi",
|
||||
author="Dennis Gilmore",
|
||||
|
@ -41,7 +36,6 @@ setup(
|
|||
"pungi-patch-iso = pungi.scripts.patch_iso:cli_main",
|
||||
"pungi-make-ostree = pungi.ostree:main",
|
||||
"pungi-notification-report-progress = pungi.scripts.report_progress:main",
|
||||
"pungi-orchestrate = pungi_utils.orchestrator:main",
|
||||
"pungi-wait-for-signed-ostree-handler = pungi.scripts.wait_for_signed_ostree_handler:main", # noqa: E501
|
||||
"pungi-koji = pungi.scripts.pungi_koji:cli_main",
|
||||
"pungi-gather = pungi.scripts.pungi_gather:cli_main",
|
||||
|
@ -51,6 +45,7 @@ setup(
|
|||
},
|
||||
scripts=["contrib/yum-dnf-compare/pungi-compare-depsolving"],
|
||||
data_files=[
|
||||
("/usr/lib/tmpfiles.d", glob.glob("contrib/tmpfiles.d/*.conf")),
|
||||
("/usr/share/pungi", glob.glob("share/*.xsl")),
|
||||
("/usr/share/pungi", glob.glob("share/*.ks")),
|
||||
("/usr/share/pungi", glob.glob("share/*.dtd")),
|
||||
|
|
|
@ -108,6 +108,7 @@
|
|||
<groupid>core</groupid>
|
||||
</grouplist>
|
||||
<optionlist>
|
||||
<groupid arch="x86_64">standard</groupid>
|
||||
</optionlist>
|
||||
</environment>
|
||||
|
||||
|
|
|
@ -272,7 +272,7 @@ class DummyCompose(object):
|
|||
return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=self.topdir)
|
||||
|
||||
|
||||
def touch(path, content=None):
|
||||
def touch(path, content=None, mode=None):
|
||||
"""Helper utility that creates an dummy file in given location. Directories
|
||||
will be created."""
|
||||
content = content or (path + "\n")
|
||||
|
@ -284,6 +284,8 @@ def touch(path, content=None):
|
|||
content = content.encode()
|
||||
with open(path, "wb") as f:
|
||||
f.write(content)
|
||||
if mode:
|
||||
os.chmod(path, mode)
|
||||
return path
|
||||
|
||||
|
||||
|
|
|
@ -628,6 +628,7 @@ class ComposeTestCase(unittest.TestCase):
|
|||
ci_copy = dict(self.ci_json)
|
||||
ci_copy["header"]["version"] = "1.2"
|
||||
mocked_response = mock.MagicMock()
|
||||
mocked_response.status_code = 200
|
||||
mocked_response.text = json.dumps(self.ci_json)
|
||||
mocked_requests.post.return_value = mocked_response
|
||||
|
||||
|
@ -811,6 +812,7 @@ class TracebackTest(unittest.TestCase):
|
|||
|
||||
|
||||
class RetryRequestTest(unittest.TestCase):
|
||||
@mock.patch("time.sleep", new=lambda x: x)
|
||||
@mock.patch("pungi.compose.requests")
|
||||
def test_retry_timeout(self, mocked_requests):
|
||||
mocked_requests.post.side_effect = [
|
||||
|
@ -827,3 +829,17 @@ class RetryRequestTest(unittest.TestCase):
|
|||
],
|
||||
)
|
||||
self.assertEqual(rv.status_code, 200)
|
||||
|
||||
@mock.patch("pungi.compose.requests")
|
||||
def test_no_retry_on_client_error(self, mocked_requests):
|
||||
mocked_requests.post.side_effect = [
|
||||
mock.Mock(status_code=400, json=lambda: {"message": "You made a mistake"}),
|
||||
]
|
||||
url = "http://locahost/api/1/composes/"
|
||||
with self.assertRaises(RuntimeError):
|
||||
retry_request("post", url)
|
||||
|
||||
self.assertEqual(
|
||||
mocked_requests.mock_calls,
|
||||
[mock.call.post(url, json=None, auth=None)],
|
||||
)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import mock
|
||||
from parameterized import parameterized
|
||||
|
||||
import os
|
||||
from six.moves import StringIO
|
||||
|
@ -391,3 +392,27 @@ class CreateIsoScriptTest(helpers.PungiTestCase):
|
|||
),
|
||||
]
|
||||
)
|
||||
|
||||
@parameterized.expand(
|
||||
[("644", 0o644), ("664", 0o664), ("666", 0o666), ("2644", 0o2644)]
|
||||
)
|
||||
def test_get_perms_non_executable(self, test_name, mode):
|
||||
path = helpers.touch(os.path.join(self.topdir, "f"), mode=mode)
|
||||
self.assertEqual(createiso._get_perms(path), 0o444)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
("544", 0o544),
|
||||
("554", 0o554),
|
||||
("555", 0o555),
|
||||
("744", 0o744),
|
||||
("755", 0o755),
|
||||
("774", 0o774),
|
||||
("775", 0o775),
|
||||
("777", 0o777),
|
||||
("2775", 0o2775),
|
||||
]
|
||||
)
|
||||
def test_get_perms_executable(self, test_name, mode):
|
||||
path = helpers.touch(os.path.join(self.topdir, "f"), mode=mode)
|
||||
self.assertEqual(createiso._get_perms(path), 0o555)
|
||||
|
|
|
@ -45,7 +45,7 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
client_args = {
|
||||
"original_image_conf": original_image_conf,
|
||||
"image_conf": {
|
||||
|
@ -137,7 +137,7 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
server_args = {
|
||||
"original_image_conf": original_image_conf,
|
||||
"image_conf": {
|
||||
|
@ -196,7 +196,7 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
server_args = {
|
||||
"original_image_conf": original_image_conf,
|
||||
"image_conf": {
|
||||
|
@ -261,8 +261,8 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertFalse(phase.pool.add.called)
|
||||
self.assertFalse(phase.pool.queue_put.called)
|
||||
phase.pool.add.assert_not_called()
|
||||
phase.pool.queue_put.assert_not_called()
|
||||
|
||||
@mock.patch("pungi.phases.image_build.ThreadPool")
|
||||
def test_image_build_set_install_tree(self, ThreadPool):
|
||||
|
@ -297,9 +297,9 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
|
||||
self.assertTrue(phase.pool.queue_put.called_once)
|
||||
phase.pool.queue_put.assert_called_once()
|
||||
args, kwargs = phase.pool.queue_put.call_args
|
||||
self.assertEqual(args[0][0], compose)
|
||||
self.assertDictEqual(
|
||||
|
@ -364,9 +364,9 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
|
||||
self.assertTrue(phase.pool.queue_put.called_once)
|
||||
phase.pool.queue_put.assert_called_once()
|
||||
args, kwargs = phase.pool.queue_put.call_args
|
||||
self.assertEqual(args[0][0], compose)
|
||||
self.assertDictEqual(
|
||||
|
@ -430,9 +430,9 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
|
||||
self.assertTrue(phase.pool.queue_put.called_once)
|
||||
phase.pool.queue_put.assert_called_once()
|
||||
args, kwargs = phase.pool.queue_put.call_args
|
||||
self.assertEqual(args[0][0], compose)
|
||||
self.assertDictEqual(
|
||||
|
@ -501,9 +501,9 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
|
||||
self.assertTrue(phase.pool.queue_put.called_once)
|
||||
phase.pool.queue_put.assert_called_once()
|
||||
args, kwargs = phase.pool.queue_put.call_args
|
||||
self.assertEqual(args[0][0], compose)
|
||||
self.assertDictEqual(
|
||||
|
@ -569,9 +569,9 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
|
||||
self.assertTrue(phase.pool.queue_put.called_once)
|
||||
phase.pool.queue_put.assert_called_once()
|
||||
args, kwargs = phase.pool.queue_put.call_args
|
||||
self.assertEqual(
|
||||
args[0][1].get("image_conf", {}).get("image-build", {}).get("release"),
|
||||
|
@ -612,9 +612,9 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
|
||||
self.assertTrue(phase.pool.queue_put.called_once)
|
||||
phase.pool.queue_put.assert_called_once()
|
||||
args, kwargs = phase.pool.queue_put.call_args
|
||||
self.assertEqual(
|
||||
args[0][1].get("image_conf", {}).get("image-build", {}).get("release"),
|
||||
|
@ -655,9 +655,9 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
|
||||
self.assertTrue(phase.pool.queue_put.called_once)
|
||||
phase.pool.queue_put.assert_called_once()
|
||||
args, kwargs = phase.pool.queue_put.call_args
|
||||
self.assertTrue(args[0][1].get("scratch"))
|
||||
|
||||
|
@ -692,7 +692,7 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
server_args = {
|
||||
"original_image_conf": original_image_conf,
|
||||
"image_conf": {
|
||||
|
@ -755,7 +755,7 @@ class TestImageBuildPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
server_args = {
|
||||
"original_image_conf": original_image_conf,
|
||||
"image_conf": {
|
||||
|
|
|
@ -28,6 +28,7 @@ def fake_listdir(pattern, result=None, exc=None):
|
|||
"""Create a function that mocks os.listdir. If the path contains pattern,
|
||||
result will be returned or exc raised. Otherwise it's normal os.listdir
|
||||
"""
|
||||
|
||||
# The point of this is to avoid issues on Python 2, where apparently
|
||||
# isdir() is using listdir(), so the mocking is breaking it.
|
||||
def worker(path):
|
||||
|
|
|
@ -121,7 +121,6 @@ class KojiWrapperTest(KojiWrapperBaseTestCase):
|
|||
)
|
||||
|
||||
def test_get_image_paths(self):
|
||||
|
||||
# The data for this tests is obtained from the actual Koji build. It
|
||||
# includes lots of fields that are not used, but for the sake of
|
||||
# completeness is fully preserved.
|
||||
|
@ -321,7 +320,6 @@ class KojiWrapperTest(KojiWrapperBaseTestCase):
|
|||
)
|
||||
|
||||
def test_get_image_paths_failed_subtask(self):
|
||||
|
||||
failed = set()
|
||||
|
||||
def failed_callback(arch):
|
||||
|
|
|
@ -43,7 +43,7 @@ class TestLiveImagesPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.maxDiff = None
|
||||
six.assertCountEqual(
|
||||
self,
|
||||
|
@ -124,7 +124,7 @@ class TestLiveImagesPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.maxDiff = None
|
||||
six.assertCountEqual(
|
||||
self,
|
||||
|
@ -192,7 +192,7 @@ class TestLiveImagesPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.maxDiff = None
|
||||
six.assertCountEqual(
|
||||
self,
|
||||
|
@ -265,7 +265,7 @@ class TestLiveImagesPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.maxDiff = None
|
||||
six.assertCountEqual(
|
||||
self,
|
||||
|
@ -363,7 +363,7 @@ class TestLiveImagesPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.maxDiff = None
|
||||
six.assertCountEqual(
|
||||
self,
|
||||
|
@ -433,7 +433,7 @@ class TestLiveImagesPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.maxDiff = None
|
||||
six.assertCountEqual(
|
||||
self,
|
||||
|
@ -503,7 +503,7 @@ class TestLiveImagesPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.maxDiff = None
|
||||
six.assertCountEqual(
|
||||
self,
|
||||
|
@ -571,7 +571,7 @@ class TestLiveImagesPhase(PungiTestCase):
|
|||
phase.run()
|
||||
|
||||
# assert at least one thread was started
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.maxDiff = None
|
||||
six.assertCountEqual(
|
||||
self,
|
||||
|
|
|
@ -36,7 +36,7 @@ class TestLiveMediaPhase(PungiTestCase):
|
|||
phase = LiveMediaPhase(compose)
|
||||
|
||||
phase.run()
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.assertEqual(
|
||||
phase.pool.queue_put.call_args_list,
|
||||
[
|
||||
|
@ -93,7 +93,7 @@ class TestLiveMediaPhase(PungiTestCase):
|
|||
phase = LiveMediaPhase(compose)
|
||||
|
||||
phase.run()
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.assertEqual(
|
||||
phase.pool.queue_put.call_args_list,
|
||||
[
|
||||
|
@ -156,7 +156,7 @@ class TestLiveMediaPhase(PungiTestCase):
|
|||
phase = LiveMediaPhase(compose)
|
||||
|
||||
phase.run()
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.assertEqual(
|
||||
phase.pool.queue_put.call_args_list,
|
||||
[
|
||||
|
@ -267,7 +267,7 @@ class TestLiveMediaPhase(PungiTestCase):
|
|||
phase = LiveMediaPhase(compose)
|
||||
|
||||
phase.run()
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
self.assertEqual(
|
||||
phase.pool.queue_put.call_args_list,
|
||||
[
|
||||
|
@ -444,7 +444,7 @@ class TestLiveMediaPhase(PungiTestCase):
|
|||
phase = LiveMediaPhase(compose)
|
||||
|
||||
phase.run()
|
||||
self.assertTrue(phase.pool.add.called)
|
||||
phase.pool.add.assert_called()
|
||||
|
||||
self.assertEqual(
|
||||
phase.pool.queue_put.call_args_list,
|
||||
|
|
|
@ -133,7 +133,7 @@ class TestNotifier(unittest.TestCase):
|
|||
def test_does_not_run_without_config(self, run, makedirs):
|
||||
n = PungiNotifier(None)
|
||||
n.send("cmd", foo="bar", baz="quux")
|
||||
self.assertFalse(run.called)
|
||||
run.assert_not_called()
|
||||
|
||||
@mock.patch("pungi.util.translate_path")
|
||||
@mock.patch("kobo.shortcuts.run")
|
||||
|
@ -146,4 +146,4 @@ class TestNotifier(unittest.TestCase):
|
|||
n.send("cmd", **self.data)
|
||||
|
||||
self.assertEqual(run.call_args_list, [self._call("run-notify", "cmd")])
|
||||
self.assertTrue(self.compose.log_warning.called)
|
||||
self.compose.log_warning.assert_called()
|
||||
|
|
|
@ -1,934 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import itertools
|
||||
import json
|
||||
from functools import wraps
|
||||
import operator
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from textwrap import dedent
|
||||
|
||||
import mock
|
||||
import six
|
||||
from six.moves import configparser
|
||||
|
||||
from parameterized import parameterized
|
||||
|
||||
from tests.helpers import BaseTestCase, PungiTestCase, touch, FIXTURE_DIR
|
||||
from pungi_utils import orchestrator as o
|
||||
|
||||
|
||||
class TestConfigSubstitute(PungiTestCase):
|
||||
def setUp(self):
|
||||
super(TestConfigSubstitute, self).setUp()
|
||||
self.fp = os.path.join(self.topdir, "config.conf")
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
("hello = 'world'", "hello = 'world'"),
|
||||
("hello = '{{foo}}'", "hello = 'bar'"),
|
||||
("hello = '{{ foo}}'", "hello = 'bar'"),
|
||||
("hello = '{{foo }}'", "hello = 'bar'"),
|
||||
]
|
||||
)
|
||||
def test_substitutions(self, initial, expected):
|
||||
touch(self.fp, initial)
|
||||
o.fill_in_config_file(self.fp, {"foo": "bar"})
|
||||
with open(self.fp) as f:
|
||||
self.assertEqual(expected, f.read())
|
||||
|
||||
def test_missing_key(self):
|
||||
touch(self.fp, "hello = '{{unknown}}'")
|
||||
with self.assertRaises(RuntimeError) as ctx:
|
||||
o.fill_in_config_file(self.fp, {})
|
||||
self.assertEqual(
|
||||
"Unknown placeholder 'unknown' in config.conf", str(ctx.exception)
|
||||
)
|
||||
|
||||
|
||||
class TestSafeGetList(BaseTestCase):
|
||||
@parameterized.expand(
|
||||
[
|
||||
("", []),
|
||||
("foo", ["foo"]),
|
||||
("foo,bar", ["foo", "bar"]),
|
||||
("foo bar", ["foo", "bar"]),
|
||||
]
|
||||
)
|
||||
def test_success(self, value, expected):
|
||||
cf = configparser.RawConfigParser()
|
||||
cf.add_section("general")
|
||||
cf.set("general", "key", value)
|
||||
self.assertEqual(o._safe_get_list(cf, "general", "key"), expected)
|
||||
|
||||
def test_default(self):
|
||||
cf = configparser.RawConfigParser()
|
||||
cf.add_section("general")
|
||||
self.assertEqual(o._safe_get_list(cf, "general", "missing", "hello"), "hello")
|
||||
|
||||
|
||||
class TestComposePart(PungiTestCase):
|
||||
def test_from_minimal_config(self):
|
||||
cf = configparser.RawConfigParser()
|
||||
cf.add_section("test")
|
||||
cf.set("test", "config", "my.conf")
|
||||
|
||||
part = o.ComposePart.from_config(cf, "test", "/tmp/config")
|
||||
deps = "set()" if six.PY3 else "set([])"
|
||||
self.assertEqual(str(part), "test")
|
||||
self.assertEqual(
|
||||
repr(part),
|
||||
"ComposePart('test', '/tmp/config/my.conf', 'READY', "
|
||||
"just_phase=[], skip_phase=[], dependencies=%s)" % deps,
|
||||
)
|
||||
self.assertFalse(part.failable)
|
||||
|
||||
def test_from_full_config(self):
|
||||
cf = configparser.RawConfigParser()
|
||||
cf.add_section("test")
|
||||
cf.set("test", "config", "my.conf")
|
||||
cf.set("test", "depends_on", "base")
|
||||
cf.set("test", "skip_phase", "skip")
|
||||
cf.set("test", "just_phase", "just")
|
||||
cf.set("test", "failable", "yes")
|
||||
|
||||
part = o.ComposePart.from_config(cf, "test", "/tmp/config")
|
||||
deps = "{'base'}" if six.PY3 else "set(['base'])"
|
||||
self.assertEqual(
|
||||
repr(part),
|
||||
"ComposePart('test', '/tmp/config/my.conf', 'WAITING', "
|
||||
"just_phase=['just'], skip_phase=['skip'], dependencies=%s)" % deps,
|
||||
)
|
||||
self.assertTrue(part.failable)
|
||||
|
||||
def test_get_cmd(self):
|
||||
conf = o.Config(
|
||||
"/tgt/", "production", "RC-1.0", "/old", "/cfg", 1234, ["--quiet"]
|
||||
)
|
||||
part = o.ComposePart(
|
||||
"test", "/tmp/my.conf", just_phase=["just"], skip_phase=["skip"]
|
||||
)
|
||||
part.path = "/compose"
|
||||
|
||||
self.assertEqual(
|
||||
part.get_cmd(conf),
|
||||
[
|
||||
"pungi-koji",
|
||||
"--config",
|
||||
"/tmp/my.conf",
|
||||
"--compose-dir",
|
||||
"/compose",
|
||||
"--production",
|
||||
"--label",
|
||||
"RC-1.0",
|
||||
"--just-phase",
|
||||
"just",
|
||||
"--skip-phase",
|
||||
"skip",
|
||||
"--old-compose",
|
||||
"/old/parts",
|
||||
"--koji-event",
|
||||
"1234",
|
||||
"--quiet",
|
||||
"--no-latest-link",
|
||||
],
|
||||
)
|
||||
|
||||
def test_refresh_status(self):
|
||||
part = o.ComposePart("test", "/tmp/my.conf")
|
||||
part.path = os.path.join(self.topdir)
|
||||
touch(os.path.join(self.topdir, "STATUS"), "FINISHED")
|
||||
part.refresh_status()
|
||||
self.assertEqual(part.status, "FINISHED")
|
||||
|
||||
def test_refresh_status_missing_file(self):
|
||||
part = o.ComposePart("test", "/tmp/my.conf")
|
||||
part.path = os.path.join(self.topdir)
|
||||
part.refresh_status()
|
||||
self.assertEqual(part.status, "DOOMED")
|
||||
|
||||
@parameterized.expand(["FINISHED", "FINISHED_INCOMPLETE"])
|
||||
def test_is_finished(self, status):
|
||||
part = o.ComposePart("test", "/tmp/my.conf")
|
||||
part.status = status
|
||||
self.assertTrue(part.is_finished())
|
||||
|
||||
@parameterized.expand(["STARTED", "WAITING"])
|
||||
def test_is_not_finished(self, status):
|
||||
part = o.ComposePart("test", "/tmp/my.conf")
|
||||
part.status = status
|
||||
self.assertFalse(part.is_finished())
|
||||
|
||||
@mock.patch("pungi_utils.orchestrator.fill_in_config_file")
|
||||
@mock.patch("pungi_utils.orchestrator.get_compose_dir")
|
||||
@mock.patch("kobo.conf.PyConfigParser")
|
||||
def test_setup_start(self, Conf, gcd, ficf):
|
||||
def pth(*path):
|
||||
return os.path.join(self.topdir, *path)
|
||||
|
||||
conf = o.Config(
|
||||
pth("tgt"), "production", "RC-1.0", "/old", pth("cfg"), None, None
|
||||
)
|
||||
part = o.ComposePart("test", "/tmp/my.conf")
|
||||
parts = {"base": mock.Mock(path="/base", is_finished=lambda: True)}
|
||||
Conf.return_value.opened_files = ["foo.conf"]
|
||||
|
||||
part.setup_start(conf, parts)
|
||||
|
||||
self.assertEqual(part.status, "STARTED")
|
||||
self.assertEqual(part.path, gcd.return_value)
|
||||
self.assertEqual(part.log_file, pth("tgt", "logs", "test.log"))
|
||||
self.assertEqual(
|
||||
ficf.call_args_list,
|
||||
[mock.call("foo.conf", {"part-base": "/base", "configdir": pth("cfg")})],
|
||||
)
|
||||
self.assertEqual(
|
||||
gcd.call_args_list,
|
||||
[
|
||||
mock.call(
|
||||
pth("tgt/parts"),
|
||||
Conf.return_value,
|
||||
compose_type="production",
|
||||
compose_label="RC-1.0",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
# Nothing blocking, no change
|
||||
([], [], o.Status.READY),
|
||||
# Remove last blocker and switch to READY
|
||||
(["finished"], [], o.Status.READY),
|
||||
# Blocker remaining, stay in WAITING
|
||||
(["finished", "block"], ["block"], o.Status.WAITING),
|
||||
]
|
||||
)
|
||||
def test_unblock_on(self, deps, blockers, status):
|
||||
part = o.ComposePart("test", "/tmp/my.conf", dependencies=deps)
|
||||
part.unblock_on("finished")
|
||||
six.assertCountEqual(self, part.blocked_on, blockers)
|
||||
self.assertEqual(part.status, status)
|
||||
|
||||
|
||||
class TestStartPart(PungiTestCase):
|
||||
@mock.patch("subprocess.Popen")
|
||||
def test_start(self, Popen):
|
||||
part = mock.Mock(log_file=os.path.join(self.topdir, "log"))
|
||||
config = mock.Mock()
|
||||
parts = mock.Mock()
|
||||
cmd = ["pungi-koji", "..."]
|
||||
|
||||
part.get_cmd.return_value = cmd
|
||||
|
||||
proc = o.start_part(config, parts, part)
|
||||
|
||||
self.assertEqual(
|
||||
part.mock_calls,
|
||||
[mock.call.setup_start(config, parts), mock.call.get_cmd(config)],
|
||||
)
|
||||
self.assertEqual(proc, Popen.return_value)
|
||||
self.assertEqual(
|
||||
Popen.call_args_list,
|
||||
[mock.call(cmd, stdout=mock.ANY, stderr=subprocess.STDOUT)],
|
||||
)
|
||||
|
||||
|
||||
class TestHandleFinished(BaseTestCase):
|
||||
def setUp(self):
|
||||
self.config = mock.Mock()
|
||||
self.linker = mock.Mock()
|
||||
self.parts = {"a": mock.Mock(), "b": mock.Mock()}
|
||||
|
||||
@mock.patch("pungi_utils.orchestrator.update_metadata")
|
||||
@mock.patch("pungi_utils.orchestrator.copy_part")
|
||||
def test_handle_success(self, cp, um):
|
||||
proc = mock.Mock(returncode=0)
|
||||
o.handle_finished(self.config, self.linker, self.parts, proc, self.parts["a"])
|
||||
|
||||
self.assertEqual(
|
||||
self.parts["a"].mock_calls,
|
||||
[mock.call.refresh_status(), mock.call.unblock_on(self.parts["a"].name)],
|
||||
)
|
||||
self.assertEqual(
|
||||
self.parts["b"].mock_calls, [mock.call.unblock_on(self.parts["a"].name)]
|
||||
)
|
||||
self.assertEqual(
|
||||
cp.call_args_list, [mock.call(self.config, self.linker, self.parts["a"])]
|
||||
)
|
||||
self.assertEqual(um.call_args_list, [mock.call(self.config, self.parts["a"])])
|
||||
|
||||
@mock.patch("pungi_utils.orchestrator.block_on")
|
||||
def test_handle_failure(self, bo):
|
||||
proc = mock.Mock(returncode=1)
|
||||
o.handle_finished(self.config, self.linker, self.parts, proc, self.parts["a"])
|
||||
|
||||
self.assertEqual(self.parts["a"].mock_calls, [mock.call.refresh_status()])
|
||||
|
||||
self.assertEqual(
|
||||
bo.call_args_list, [mock.call(self.parts, self.parts["a"].name)]
|
||||
)
|
||||
|
||||
|
||||
class TestBlockOn(BaseTestCase):
|
||||
def test_single(self):
|
||||
parts = {"b": o.ComposePart("b", "b.conf", dependencies=["a"])}
|
||||
|
||||
o.block_on(parts, "a")
|
||||
|
||||
self.assertEqual(parts["b"].status, o.Status.BLOCKED)
|
||||
|
||||
def test_chain(self):
|
||||
parts = {
|
||||
"b": o.ComposePart("b", "b.conf", dependencies=["a"]),
|
||||
"c": o.ComposePart("c", "c.conf", dependencies=["b"]),
|
||||
"d": o.ComposePart("d", "d.conf", dependencies=["c"]),
|
||||
}
|
||||
|
||||
o.block_on(parts, "a")
|
||||
|
||||
self.assertEqual(parts["b"].status, o.Status.BLOCKED)
|
||||
self.assertEqual(parts["c"].status, o.Status.BLOCKED)
|
||||
self.assertEqual(parts["d"].status, o.Status.BLOCKED)
|
||||
|
||||
|
||||
class TestUpdateMetadata(PungiTestCase):
|
||||
def assertEqualJSON(self, f1, f2):
|
||||
with open(f1) as f:
|
||||
actual = json.load(f)
|
||||
with open(f2) as f:
|
||||
expected = json.load(f)
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def assertEqualMetadata(self, expected):
|
||||
expected_dir = os.path.join(FIXTURE_DIR, expected, "compose/metadata")
|
||||
for f in os.listdir(expected_dir):
|
||||
self.assertEqualJSON(
|
||||
os.path.join(self.tgt, "compose/metadata", f),
|
||||
os.path.join(expected_dir, f),
|
||||
)
|
||||
|
||||
@parameterized.expand(["empty-metadata", "basic-metadata"])
|
||||
def test_merge_into_empty(self, fixture):
|
||||
self.tgt = os.path.join(self.topdir, "target")
|
||||
|
||||
conf = o.Config(self.tgt, "production", None, None, None, None, [])
|
||||
part = o.ComposePart("test", "/tmp/my.conf")
|
||||
part.path = os.path.join(FIXTURE_DIR, "DP-1.0-20181001.n.0")
|
||||
|
||||
shutil.copytree(os.path.join(FIXTURE_DIR, fixture), self.tgt)
|
||||
|
||||
o.update_metadata(conf, part)
|
||||
|
||||
self.assertEqualMetadata(fixture + "-merged")
|
||||
|
||||
|
||||
class TestCopyPart(PungiTestCase):
|
||||
@mock.patch("pungi_utils.orchestrator.hardlink_dir")
|
||||
def test_copy(self, hd):
|
||||
self.tgt = os.path.join(self.topdir, "target")
|
||||
conf = o.Config(self.tgt, "production", None, None, None, None, [])
|
||||
linker = mock.Mock()
|
||||
part = o.ComposePart("test", "/tmp/my.conf")
|
||||
part.path = os.path.join(FIXTURE_DIR, "DP-1.0-20161013.t.4")
|
||||
|
||||
o.copy_part(conf, linker, part)
|
||||
|
||||
six.assertCountEqual(
|
||||
self,
|
||||
hd.call_args_list,
|
||||
[
|
||||
mock.call(
|
||||
linker,
|
||||
os.path.join(part.path, "compose", variant),
|
||||
os.path.join(self.tgt, "compose", variant),
|
||||
)
|
||||
for variant in ["Client", "Server"]
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class TestHardlinkDir(PungiTestCase):
|
||||
def test_hardlinking(self):
|
||||
linker = mock.Mock()
|
||||
src = os.path.join(self.topdir, "src")
|
||||
dst = os.path.join(self.topdir, "dst")
|
||||
files = ["file.txt", "nested/deep/another.txt"]
|
||||
|
||||
for f in files:
|
||||
touch(os.path.join(src, f))
|
||||
|
||||
o.hardlink_dir(linker, src, dst)
|
||||
|
||||
six.assertCountEqual(
|
||||
self,
|
||||
linker.queue_put.call_args_list,
|
||||
[mock.call((os.path.join(src, f), os.path.join(dst, f))) for f in files],
|
||||
)
|
||||
|
||||
|
||||
class TestCheckFinishedProcesses(BaseTestCase):
|
||||
def test_nothing_finished(self):
|
||||
k1 = mock.Mock(returncode=None)
|
||||
v1 = mock.Mock()
|
||||
processes = {k1: v1}
|
||||
|
||||
six.assertCountEqual(self, o.check_finished_processes(processes), [])
|
||||
|
||||
def test_yields_finished(self):
|
||||
k1 = mock.Mock(returncode=None)
|
||||
v1 = mock.Mock()
|
||||
k2 = mock.Mock(returncode=0)
|
||||
v2 = mock.Mock()
|
||||
processes = {k1: v1, k2: v2}
|
||||
|
||||
six.assertCountEqual(self, o.check_finished_processes(processes), [(k2, v2)])
|
||||
|
||||
def test_yields_failed(self):
|
||||
k1 = mock.Mock(returncode=1)
|
||||
v1 = mock.Mock()
|
||||
processes = {k1: v1}
|
||||
|
||||
six.assertCountEqual(self, o.check_finished_processes(processes), [(k1, v1)])
|
||||
|
||||
|
||||
class _Part(object):
|
||||
def __init__(self, name, parent=None, fails=False, status=None):
|
||||
self.name = name
|
||||
self.finished = False
|
||||
self.status = o.Status.WAITING if parent else o.Status.READY
|
||||
if status:
|
||||
self.status = status
|
||||
self.proc = mock.Mock(name="proc_%s" % name, pid=hash(self))
|
||||
self.parent = parent
|
||||
self.fails = fails
|
||||
self.failable = False
|
||||
self.path = "/path/to/%s" % name
|
||||
self.blocked_on = set([parent]) if parent else set()
|
||||
|
||||
def is_finished(self):
|
||||
return self.finished or self.status == "FINISHED"
|
||||
|
||||
def __repr__(self):
|
||||
return "<_Part(%r, parent=%r)>" % (self.name, self.parent)
|
||||
|
||||
|
||||
def with_mocks(parts, finish_order, wait_results):
|
||||
"""Setup all mocks and create dict with the parts.
|
||||
:param finish_order: nested list: first element contains parts that finish
|
||||
in first iteration, etc.
|
||||
:param wait_results: list of names of processes that are returned by wait in each
|
||||
iteration
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def worker(self, lp, update_status, cfp, hf, sp, wait):
|
||||
self.parts = dict((p.name, p) for p in parts)
|
||||
self.linker = lp.return_value.__enter__.return_value
|
||||
|
||||
update_status.side_effect = self.mock_update
|
||||
hf.side_effect = self.mock_finish
|
||||
sp.side_effect = self.mock_start
|
||||
|
||||
finish = [[]]
|
||||
for grp in finish_order:
|
||||
finish.append([(self.parts[p].proc, self.parts[p]) for p in grp])
|
||||
|
||||
cfp.side_effect = finish
|
||||
wait.side_effect = [(self.parts[p].proc.pid, 0) for p in wait_results]
|
||||
|
||||
func(self)
|
||||
|
||||
self.assertEqual(lp.call_args_list, [mock.call("hardlink")])
|
||||
|
||||
return worker
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@mock.patch("os.wait")
|
||||
@mock.patch("pungi_utils.orchestrator.start_part")
|
||||
@mock.patch("pungi_utils.orchestrator.handle_finished")
|
||||
@mock.patch("pungi_utils.orchestrator.check_finished_processes")
|
||||
@mock.patch("pungi_utils.orchestrator.update_status")
|
||||
@mock.patch("pungi_utils.orchestrator.linker_pool")
|
||||
class TestRunAll(BaseTestCase):
|
||||
def setUp(self):
|
||||
self.maxDiff = None
|
||||
self.conf = mock.Mock(name="global_config")
|
||||
self.calls = []
|
||||
|
||||
def mock_update(self, global_config, parts):
|
||||
self.assertEqual(global_config, self.conf)
|
||||
self.assertEqual(parts, self.parts)
|
||||
self.calls.append("update_status")
|
||||
|
||||
def mock_start(self, global_config, parts, part):
|
||||
self.assertEqual(global_config, self.conf)
|
||||
self.assertEqual(parts, self.parts)
|
||||
self.calls.append(("start_part", part.name))
|
||||
part.status = o.Status.STARTED
|
||||
return part.proc
|
||||
|
||||
@property
|
||||
def sorted_calls(self):
|
||||
"""Sort the consecutive calls of the same function based on the argument."""
|
||||
|
||||
def key(val):
|
||||
return val[0] if isinstance(val, tuple) else val
|
||||
|
||||
return list(
|
||||
itertools.chain.from_iterable(
|
||||
sorted(grp, key=operator.itemgetter(1))
|
||||
for _, grp in itertools.groupby(self.calls, key)
|
||||
)
|
||||
)
|
||||
|
||||
def mock_finish(self, global_config, linker, parts, proc, part):
|
||||
self.assertEqual(global_config, self.conf)
|
||||
self.assertEqual(linker, self.linker)
|
||||
self.assertEqual(parts, self.parts)
|
||||
self.calls.append(("handle_finished", part.name))
|
||||
for child in parts.values():
|
||||
if child.parent == part.name:
|
||||
child.status = o.Status.BLOCKED if part.fails else o.Status.READY
|
||||
part.status = "DOOMED" if part.fails else "FINISHED"
|
||||
|
||||
@with_mocks(
|
||||
[_Part("fst"), _Part("snd", parent="fst")], [["fst"], ["snd"]], ["fst", "snd"]
|
||||
)
|
||||
def test_sequential(self):
|
||||
o.run_all(self.conf, self.parts)
|
||||
|
||||
self.assertEqual(
|
||||
self.sorted_calls,
|
||||
[
|
||||
# First iteration starts fst
|
||||
"update_status",
|
||||
("start_part", "fst"),
|
||||
# Second iteration handles finish of fst and starts snd
|
||||
"update_status",
|
||||
("handle_finished", "fst"),
|
||||
("start_part", "snd"),
|
||||
# Third iteration handles finish of snd
|
||||
"update_status",
|
||||
("handle_finished", "snd"),
|
||||
# Final update of status
|
||||
"update_status",
|
||||
],
|
||||
)
|
||||
|
||||
@with_mocks([_Part("fst"), _Part("snd")], [["fst", "snd"]], ["fst"])
|
||||
def test_parallel(self):
|
||||
o.run_all(self.conf, self.parts)
|
||||
|
||||
self.assertEqual(
|
||||
self.sorted_calls,
|
||||
[
|
||||
# First iteration starts both fst and snd
|
||||
"update_status",
|
||||
("start_part", "fst"),
|
||||
("start_part", "snd"),
|
||||
# Second iteration handles finish of both of them
|
||||
"update_status",
|
||||
("handle_finished", "fst"),
|
||||
("handle_finished", "snd"),
|
||||
# Final update of status
|
||||
"update_status",
|
||||
],
|
||||
)
|
||||
|
||||
@with_mocks(
|
||||
[_Part("1"), _Part("2", parent="1"), _Part("3", parent="1")],
|
||||
[["1"], ["2", "3"]],
|
||||
["1", "2"],
|
||||
)
|
||||
def test_waits_for_dep_then_parallel_with_simultaneous_end(self):
|
||||
o.run_all(self.conf, self.parts)
|
||||
|
||||
self.assertEqual(
|
||||
self.sorted_calls,
|
||||
[
|
||||
# First iteration starts first part
|
||||
"update_status",
|
||||
("start_part", "1"),
|
||||
# Second iteration starts 2 and 3
|
||||
"update_status",
|
||||
("handle_finished", "1"),
|
||||
("start_part", "2"),
|
||||
("start_part", "3"),
|
||||
# Both 2 and 3 end in third iteration
|
||||
"update_status",
|
||||
("handle_finished", "2"),
|
||||
("handle_finished", "3"),
|
||||
# Final update of status
|
||||
"update_status",
|
||||
],
|
||||
)
|
||||
|
||||
@with_mocks(
|
||||
[_Part("1"), _Part("2", parent="1"), _Part("3", parent="1")],
|
||||
[["1"], ["3"], ["2"]],
|
||||
["1", "3", "2"],
|
||||
)
|
||||
def test_waits_for_dep_then_parallel_with_different_end_times(self):
|
||||
o.run_all(self.conf, self.parts)
|
||||
|
||||
self.assertEqual(
|
||||
self.sorted_calls,
|
||||
[
|
||||
# First iteration starts first part
|
||||
"update_status",
|
||||
("start_part", "1"),
|
||||
# Second iteration starts 2 and 3
|
||||
"update_status",
|
||||
("handle_finished", "1"),
|
||||
("start_part", "2"),
|
||||
("start_part", "3"),
|
||||
# Third iteration sees 3 finish
|
||||
"update_status",
|
||||
("handle_finished", "3"),
|
||||
# Fourth iteration, 2 finishes
|
||||
"update_status",
|
||||
("handle_finished", "2"),
|
||||
# Final update of status
|
||||
"update_status",
|
||||
],
|
||||
)
|
||||
|
||||
@with_mocks(
|
||||
[_Part("fst", fails=True), _Part("snd", parent="fst")], [["fst"]], ["fst"]
|
||||
)
|
||||
def test_blocked(self):
|
||||
o.run_all(self.conf, self.parts)
|
||||
|
||||
self.assertEqual(
|
||||
self.sorted_calls,
|
||||
[
|
||||
# First iteration starts first part
|
||||
"update_status",
|
||||
("start_part", "fst"),
|
||||
# Second iteration handles fail of first part
|
||||
"update_status",
|
||||
("handle_finished", "fst"),
|
||||
# Final update of status
|
||||
"update_status",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("pungi_utils.orchestrator.get_compose_dir")
|
||||
class TestGetTargetDir(BaseTestCase):
|
||||
def test_with_absolute_path(self, gcd):
|
||||
config = {"target": "/tgt", "compose_type": "nightly"}
|
||||
cfg = mock.Mock()
|
||||
cfg.get.side_effect = lambda _, k: config[k]
|
||||
ci = mock.Mock()
|
||||
res = o.get_target_dir(cfg, ci, None, reldir="/checkout")
|
||||
self.assertEqual(res, gcd.return_value)
|
||||
self.assertEqual(
|
||||
gcd.call_args_list,
|
||||
[mock.call("/tgt", ci, compose_type="nightly", compose_label=None)],
|
||||
)
|
||||
|
||||
def test_with_relative_path(self, gcd):
|
||||
config = {"target": "tgt", "compose_type": "nightly"}
|
||||
cfg = mock.Mock()
|
||||
cfg.get.side_effect = lambda _, k: config[k]
|
||||
ci = mock.Mock()
|
||||
res = o.get_target_dir(cfg, ci, None, reldir="/checkout")
|
||||
self.assertEqual(res, gcd.return_value)
|
||||
self.assertEqual(
|
||||
gcd.call_args_list,
|
||||
[
|
||||
mock.call(
|
||||
"/checkout/tgt", ci, compose_type="nightly", compose_label=None
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class TestComputeStatus(BaseTestCase):
|
||||
@parameterized.expand(
|
||||
[
|
||||
([("FINISHED", False)], "FINISHED"),
|
||||
([("FINISHED", False), ("STARTED", False)], "STARTED"),
|
||||
([("FINISHED", False), ("STARTED", False), ("WAITING", False)], "STARTED"),
|
||||
([("FINISHED", False), ("DOOMED", False)], "DOOMED"),
|
||||
(
|
||||
[("FINISHED", False), ("BLOCKED", True), ("DOOMED", True)],
|
||||
"FINISHED_INCOMPLETE",
|
||||
),
|
||||
([("FINISHED", False), ("BLOCKED", False), ("DOOMED", True)], "DOOMED"),
|
||||
([("FINISHED", False), ("DOOMED", True)], "FINISHED_INCOMPLETE"),
|
||||
([("FINISHED", False), ("STARTED", False), ("DOOMED", False)], "STARTED"),
|
||||
]
|
||||
)
|
||||
def test_cases(self, statuses, expected):
|
||||
self.assertEqual(o.compute_status(statuses), expected)
|
||||
|
||||
|
||||
class TestUpdateStatus(PungiTestCase):
|
||||
def test_updating(self):
|
||||
os.makedirs(os.path.join(self.topdir, "compose/metadata"))
|
||||
conf = o.Config(
|
||||
self.topdir, "production", "RC-1.0", "/old", "/cfg", 1234, ["--quiet"]
|
||||
)
|
||||
o.update_status(
|
||||
conf,
|
||||
{"1": _Part("1", status="FINISHED"), "2": _Part("2", status="STARTED")},
|
||||
)
|
||||
self.assertFileContent(os.path.join(self.topdir, "STATUS"), "STARTED")
|
||||
self.assertFileContent(
|
||||
os.path.join(self.topdir, "compose/metadata/parts.json"),
|
||||
dedent(
|
||||
"""\
|
||||
{
|
||||
"1": {
|
||||
"path": "/path/to/1",
|
||||
"status": "FINISHED"
|
||||
},
|
||||
"2": {
|
||||
"path": "/path/to/2",
|
||||
"status": "STARTED"
|
||||
}
|
||||
}
|
||||
"""
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("pungi_utils.orchestrator.get_target_dir")
|
||||
class TestPrepareComposeDir(PungiTestCase):
|
||||
def setUp(self):
|
||||
super(TestPrepareComposeDir, self).setUp()
|
||||
self.conf = mock.Mock(name="config")
|
||||
self.main_config = "/some/config"
|
||||
self.compose_info = mock.Mock(name="compose_info")
|
||||
|
||||
def test_new_compose(self, gtd):
|
||||
def mock_get_target(conf, compose_info, label, reldir):
|
||||
self.assertEqual(conf, self.conf)
|
||||
self.assertEqual(compose_info, self.compose_info)
|
||||
self.assertEqual(label, args.label)
|
||||
self.assertEqual(reldir, "/some")
|
||||
touch(os.path.join(self.topdir, "work/global/composeinfo-base.json"), "WOO")
|
||||
return self.topdir
|
||||
|
||||
gtd.side_effect = mock_get_target
|
||||
args = mock.Mock(name="args", spec=["label"])
|
||||
retval = o.prepare_compose_dir(
|
||||
self.conf, args, self.main_config, self.compose_info
|
||||
)
|
||||
self.assertEqual(retval, self.topdir)
|
||||
self.assertFileContent(
|
||||
os.path.join(self.topdir, "compose/metadata/composeinfo.json"), "WOO"
|
||||
)
|
||||
self.assertTrue(os.path.isdir(os.path.join(self.topdir, "logs")))
|
||||
self.assertTrue(os.path.isdir(os.path.join(self.topdir, "parts")))
|
||||
self.assertTrue(os.path.isdir(os.path.join(self.topdir, "work/global")))
|
||||
self.assertFileContent(os.path.join(self.topdir, "STATUS"), "STARTED")
|
||||
|
||||
def test_restarting_compose(self, gtd):
|
||||
args = mock.Mock(name="args", spec=["label", "compose_path"])
|
||||
retval = o.prepare_compose_dir(
|
||||
self.conf, args, self.main_config, self.compose_info
|
||||
)
|
||||
self.assertEqual(gtd.call_args_list, [])
|
||||
self.assertEqual(retval, args.compose_path)
|
||||
|
||||
|
||||
class TestLoadPartsMetadata(PungiTestCase):
|
||||
def test_loading(self):
|
||||
touch(
|
||||
os.path.join(self.topdir, "compose/metadata/parts.json"), '{"foo": "bar"}'
|
||||
)
|
||||
conf = mock.Mock(target=self.topdir)
|
||||
|
||||
self.assertEqual(o.load_parts_metadata(conf), {"foo": "bar"})
|
||||
|
||||
|
||||
@mock.patch("pungi_utils.orchestrator.load_parts_metadata")
|
||||
class TestSetupForRestart(BaseTestCase):
|
||||
def setUp(self):
|
||||
self.conf = mock.Mock(name="global_config")
|
||||
|
||||
def test_restart_ok(self, lpm):
|
||||
lpm.return_value = {
|
||||
"p1": {"status": "FINISHED", "path": "/p1"},
|
||||
"p2": {"status": "DOOMED", "path": "/p2"},
|
||||
}
|
||||
parts = {"p1": _Part("p1"), "p2": _Part("p2", parent="p1")}
|
||||
|
||||
o.setup_for_restart(self.conf, parts, ["p2"])
|
||||
|
||||
self.assertEqual(parts["p1"].status, "FINISHED")
|
||||
self.assertEqual(parts["p1"].path, "/p1")
|
||||
self.assertEqual(parts["p2"].status, "READY")
|
||||
self.assertEqual(parts["p2"].path, None)
|
||||
|
||||
def test_restart_one_blocked_one_ok(self, lpm):
|
||||
lpm.return_value = {
|
||||
"p1": {"status": "DOOMED", "path": "/p1"},
|
||||
"p2": {"status": "DOOMED", "path": "/p2"},
|
||||
"p3": {"status": "WAITING", "path": None},
|
||||
}
|
||||
parts = {
|
||||
"p1": _Part("p1"),
|
||||
"p2": _Part("p2", parent="p1"),
|
||||
"p3": _Part("p3", parent="p2"),
|
||||
}
|
||||
|
||||
o.setup_for_restart(self.conf, parts, ["p1", "p3"])
|
||||
|
||||
self.assertEqual(parts["p1"].status, "READY")
|
||||
self.assertEqual(parts["p1"].path, None)
|
||||
self.assertEqual(parts["p2"].status, "DOOMED")
|
||||
self.assertEqual(parts["p2"].path, "/p2")
|
||||
self.assertEqual(parts["p3"].status, "WAITING")
|
||||
self.assertEqual(parts["p3"].path, None)
|
||||
|
||||
def test_restart_all_blocked(self, lpm):
|
||||
lpm.return_value = {
|
||||
"p1": {"status": "DOOMED", "path": "/p1"},
|
||||
"p2": {"status": "STARTED", "path": "/p2"},
|
||||
}
|
||||
parts = {"p1": _Part("p1"), "p2": _Part("p2", parent="p1")}
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
o.setup_for_restart(self.conf, parts, ["p2"])
|
||||
|
||||
self.assertEqual(parts["p1"].status, "DOOMED")
|
||||
self.assertEqual(parts["p1"].path, "/p1")
|
||||
self.assertEqual(parts["p2"].status, "WAITING")
|
||||
self.assertEqual(parts["p2"].path, None)
|
||||
|
||||
|
||||
@mock.patch("atexit.register")
|
||||
@mock.patch("kobo.shortcuts.run")
|
||||
class TestRunKinit(BaseTestCase):
|
||||
def test_without_config(self, run, register):
|
||||
conf = mock.Mock()
|
||||
conf.getboolean.return_value = False
|
||||
|
||||
o.run_kinit(conf)
|
||||
|
||||
self.assertEqual(run.call_args_list, [])
|
||||
self.assertEqual(register.call_args_list, [])
|
||||
|
||||
@mock.patch.dict("os.environ")
|
||||
def test_with_config(self, run, register):
|
||||
conf = mock.Mock()
|
||||
conf.getboolean.return_value = True
|
||||
conf.get.side_effect = lambda section, option: option
|
||||
|
||||
o.run_kinit(conf)
|
||||
|
||||
self.assertEqual(
|
||||
run.call_args_list,
|
||||
[mock.call(["kinit", "-k", "-t", "kerberos_keytab", "kerberos_principal"])],
|
||||
)
|
||||
self.assertEqual(
|
||||
register.call_args_list, [mock.call(os.remove, os.environ["KRB5CCNAME"])]
|
||||
)
|
||||
|
||||
|
||||
@mock.patch.dict("os.environ", {}, clear=True)
|
||||
class TestGetScriptEnv(BaseTestCase):
|
||||
def test_without_metadata(self):
|
||||
env = o.get_script_env("/foobar")
|
||||
self.assertEqual(env, {"COMPOSE_PATH": "/foobar"})
|
||||
|
||||
def test_with_metadata(self):
|
||||
compose_dir = os.path.join(FIXTURE_DIR, "DP-1.0-20161013.t.4")
|
||||
env = o.get_script_env(compose_dir)
|
||||
self.maxDiff = None
|
||||
self.assertEqual(
|
||||
env,
|
||||
{
|
||||
"COMPOSE_PATH": compose_dir,
|
||||
"COMPOSE_ID": "DP-1.0-20161013.t.4",
|
||||
"COMPOSE_DATE": "20161013",
|
||||
"COMPOSE_TYPE": "test",
|
||||
"COMPOSE_RESPIN": "4",
|
||||
"COMPOSE_LABEL": "",
|
||||
"RELEASE_ID": "DP-1.0",
|
||||
"RELEASE_NAME": "Dummy Product",
|
||||
"RELEASE_SHORT": "DP",
|
||||
"RELEASE_VERSION": "1.0",
|
||||
"RELEASE_TYPE": "ga",
|
||||
"RELEASE_IS_LAYERED": "",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class TestRunScripts(BaseTestCase):
|
||||
@mock.patch("pungi_utils.orchestrator.get_script_env")
|
||||
@mock.patch("kobo.shortcuts.run")
|
||||
def test_run_scripts(self, run, get_env):
|
||||
commands = """
|
||||
date
|
||||
env
|
||||
"""
|
||||
|
||||
o.run_scripts("pref_", "/tmp/compose", commands)
|
||||
|
||||
self.assertEqual(
|
||||
run.call_args_list,
|
||||
[
|
||||
mock.call(
|
||||
"date",
|
||||
logfile="/tmp/compose/logs/pref_0.log",
|
||||
env=get_env.return_value,
|
||||
),
|
||||
mock.call(
|
||||
"env",
|
||||
logfile="/tmp/compose/logs/pref_1.log",
|
||||
env=get_env.return_value,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("pungi.notifier.PungiNotifier")
|
||||
class TestSendNotification(BaseTestCase):
|
||||
def test_no_command(self, notif):
|
||||
o.send_notification("/foobar", None, None)
|
||||
self.assertEqual(notif.mock_calls, [])
|
||||
|
||||
@mock.patch("pungi.util.load_config")
|
||||
def test_with_command_and_translate(self, load_config, notif):
|
||||
compose_dir = os.path.join(FIXTURE_DIR, "DP-1.0-20161013.t.4")
|
||||
load_config.return_value = {
|
||||
"translate_paths": [(os.path.dirname(compose_dir), "http://example.com")],
|
||||
}
|
||||
parts = {"foo": mock.Mock()}
|
||||
|
||||
o.send_notification(compose_dir, "handler", parts)
|
||||
|
||||
self.assertEqual(len(notif.mock_calls), 2)
|
||||
self.assertEqual(notif.mock_calls[0], mock.call(["handler"]))
|
||||
_, args, kwargs = notif.mock_calls[1]
|
||||
self.assertEqual(args, ("status-change",))
|
||||
self.assertEqual(
|
||||
kwargs,
|
||||
{
|
||||
"status": "FINISHED",
|
||||
"workdir": compose_dir,
|
||||
"location": "http://example.com/DP-1.0-20161013.t.4",
|
||||
"compose_id": "DP-1.0-20161013.t.4",
|
||||
"compose_date": "20161013",
|
||||
"compose_type": "test",
|
||||
"compose_respin": "4",
|
||||
"compose_label": None,
|
||||
"release_id": "DP-1.0",
|
||||
"release_name": "Dummy Product",
|
||||
"release_short": "DP",
|
||||
"release_version": "1.0",
|
||||
"release_type": "ga",
|
||||
"release_is_layered": False,
|
||||
},
|
||||
)
|
||||
self.assertEqual(load_config.call_args_list, [mock.call(parts["foo"].config)])
|
|
@ -3,14 +3,76 @@
|
|||
import mock
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import koji as orig_koji
|
||||
|
||||
from tests import helpers
|
||||
from pungi import compose
|
||||
from pungi.phases import osbuild
|
||||
from pungi.checks import validate
|
||||
|
||||
|
||||
class OSBuildPhaseHelperFuncsTest(unittest.TestCase):
|
||||
@mock.patch("pungi.compose.ComposeInfo")
|
||||
def setUp(self, ci):
|
||||
self.tmp_dir = tempfile.mkdtemp()
|
||||
conf = {"translate_paths": [(self.tmp_dir, "http://example.com")]}
|
||||
ci.return_value.compose.respin = 0
|
||||
ci.return_value.compose.id = "RHEL-8.0-20180101.n.0"
|
||||
ci.return_value.compose.date = "20160101"
|
||||
ci.return_value.compose.type = "nightly"
|
||||
ci.return_value.compose.type_suffix = ".n"
|
||||
ci.return_value.compose.label = "RC-1.0"
|
||||
ci.return_value.compose.label_major_version = "1"
|
||||
|
||||
compose_dir = os.path.join(self.tmp_dir, ci.return_value.compose.id)
|
||||
self.compose = compose.Compose(conf, compose_dir)
|
||||
server_variant = mock.Mock(uid="Server", type="variant")
|
||||
client_variant = mock.Mock(uid="Client", type="variant")
|
||||
self.compose.all_variants = {
|
||||
"Server": server_variant,
|
||||
"Client": client_variant,
|
||||
}
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tmp_dir)
|
||||
|
||||
def test__get_repo_urls(self):
|
||||
repos = [
|
||||
"http://example.com/repo",
|
||||
"Server",
|
||||
{
|
||||
"baseurl": "Client",
|
||||
"package_sets": ["build"],
|
||||
},
|
||||
{
|
||||
"baseurl": "ftp://example.com/linux/repo",
|
||||
"package_sets": ["build"],
|
||||
},
|
||||
]
|
||||
|
||||
expect = [
|
||||
"http://example.com/repo",
|
||||
"http://example.com/RHEL-8.0-20180101.n.0/compose/Server/$basearch/os",
|
||||
{
|
||||
"baseurl": "http://example.com/RHEL-8.0-20180101.n.0/compose/Client/"
|
||||
+ "$basearch/os",
|
||||
"package_sets": ["build"],
|
||||
},
|
||||
{
|
||||
"baseurl": "ftp://example.com/linux/repo",
|
||||
"package_sets": ["build"],
|
||||
},
|
||||
]
|
||||
|
||||
self.assertEqual(
|
||||
osbuild.OSBuildPhase._get_repo_urls(self.compose, repos), expect
|
||||
)
|
||||
|
||||
|
||||
class OSBuildPhaseTest(helpers.PungiTestCase):
|
||||
@mock.patch("pungi.phases.osbuild.ThreadPool")
|
||||
def test_run(self, ThreadPool):
|
||||
|
@ -124,6 +186,49 @@ class OSBuildPhaseTest(helpers.PungiTestCase):
|
|||
)
|
||||
self.assertNotEqual(validate(compose.conf), ([], []))
|
||||
|
||||
@mock.patch("pungi.phases.osbuild.ThreadPool")
|
||||
def test_rich_repos(self, ThreadPool):
|
||||
repo = {"baseurl": "http://example.com/repo", "package_sets": ["build"]}
|
||||
cfg = {
|
||||
"name": "test-image",
|
||||
"distro": "rhel-8",
|
||||
"version": "1",
|
||||
"target": "image-target",
|
||||
"arches": ["x86_64"],
|
||||
"image_types": ["qcow2"],
|
||||
"repo": [repo],
|
||||
}
|
||||
compose = helpers.DummyCompose(
|
||||
self.topdir, {"osbuild": {"^Everything$": [cfg]}}
|
||||
)
|
||||
|
||||
self.assertValidConfig(compose.conf)
|
||||
|
||||
pool = ThreadPool.return_value
|
||||
|
||||
phase = osbuild.OSBuildPhase(compose)
|
||||
phase.run()
|
||||
|
||||
self.assertEqual(len(pool.add.call_args_list), 1)
|
||||
self.assertEqual(
|
||||
pool.queue_put.call_args_list,
|
||||
[
|
||||
mock.call(
|
||||
(
|
||||
compose,
|
||||
compose.variants["Everything"],
|
||||
cfg,
|
||||
["x86_64"],
|
||||
"1",
|
||||
None,
|
||||
"image-target",
|
||||
[repo, self.topdir + "/compose/Everything/$arch/os"],
|
||||
[],
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class RunOSBuildThreadTest(helpers.PungiTestCase):
|
||||
def setUp(self):
|
||||
|
@ -189,7 +294,13 @@ class RunOSBuildThreadTest(helpers.PungiTestCase):
|
|||
"1", # version
|
||||
"15", # release
|
||||
"image-target",
|
||||
[self.topdir + "/compose/Everything/$arch/os"],
|
||||
[
|
||||
self.topdir + "/compose/Everything/$arch/os",
|
||||
{
|
||||
"baseurl": self.topdir + "/compose/Everything/$arch/os",
|
||||
"package_sets": ["build"],
|
||||
},
|
||||
],
|
||||
["x86_64"],
|
||||
),
|
||||
1,
|
||||
|
@ -211,7 +322,13 @@ class RunOSBuildThreadTest(helpers.PungiTestCase):
|
|||
["aarch64", "x86_64"],
|
||||
opts={
|
||||
"release": "15",
|
||||
"repo": [self.topdir + "/compose/Everything/$arch/os"],
|
||||
"repo": [
|
||||
self.topdir + "/compose/Everything/$arch/os",
|
||||
{
|
||||
"baseurl": self.topdir + "/compose/Everything/$arch/os",
|
||||
"package_sets": ["build"],
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
mock.call.save_task_id(1234),
|
||||
|
|
|
@ -315,7 +315,6 @@ class OstreeTreeScriptTest(helpers.PungiTestCase):
|
|||
|
||||
@mock.patch("kobo.shortcuts.run")
|
||||
def test_extra_config_with_keep_original_sources(self, run):
|
||||
|
||||
configdir = os.path.join(self.topdir, "config")
|
||||
self._make_dummy_config_dir(configdir)
|
||||
treefile = os.path.join(configdir, "fedora-atomic-docker-host.json")
|
||||
|
|
|
@ -47,7 +47,7 @@ class TestMaterializedPkgsetCreate(helpers.PungiTestCase):
|
|||
pkgset.name = name
|
||||
pkgset.reuse = None
|
||||
|
||||
def mock_subset(primary, arch_list, exclusive_noarch):
|
||||
def mock_subset(primary, arch_list, **kwargs):
|
||||
self.subsets[primary] = mock.Mock()
|
||||
return self.subsets[primary]
|
||||
|
||||
|
@ -73,10 +73,16 @@ class TestMaterializedPkgsetCreate(helpers.PungiTestCase):
|
|||
self.assertEqual(result["amd64"], self.subsets["amd64"])
|
||||
|
||||
self.pkgset.subset.assert_any_call(
|
||||
"x86_64", ["x86_64", "noarch", "src"], exclusive_noarch=True
|
||||
"x86_64",
|
||||
["x86_64", "noarch", "src"],
|
||||
exclusive_noarch=True,
|
||||
inherit_to_noarch=True,
|
||||
)
|
||||
self.pkgset.subset.assert_any_call(
|
||||
"amd64", ["amd64", "x86_64", "noarch", "src"], exclusive_noarch=True
|
||||
"amd64",
|
||||
["amd64", "x86_64", "noarch", "src"],
|
||||
exclusive_noarch=True,
|
||||
inherit_to_noarch=True,
|
||||
)
|
||||
|
||||
for arch, pkgset in result.package_sets.items():
|
||||
|
|
|
@ -853,6 +853,8 @@ class TestReuseKojiPkgset(helpers.PungiTestCase):
|
|||
"include_packages": None,
|
||||
"rpms_by_arch": mock.Mock(),
|
||||
"srpms_by_name": mock.Mock(),
|
||||
"exclusive_noarch": True,
|
||||
"inherit_to_noarch": True,
|
||||
}
|
||||
)
|
||||
self.pkgset.old_file_cache = mock.Mock()
|
||||
|
@ -934,6 +936,28 @@ class TestMergePackageSets(PkgsetCompareMixin, unittest.TestCase):
|
|||
first.rpms_by_arch, {"i686": ["rpms/bash@4.3.42@4.fc24@i686"], "noarch": []}
|
||||
)
|
||||
|
||||
def test_merge_doesnt_exclude_noarch_exclude_arch_when_configured(self):
|
||||
first = pkgsets.PackageSetBase("first", [None])
|
||||
second = pkgsets.PackageSetBase("second", [None])
|
||||
|
||||
pkg = first.file_cache.add("rpms/bash@4.3.42@4.fc24@i686")
|
||||
first.rpms_by_arch.setdefault(pkg.arch, []).append(pkg)
|
||||
|
||||
pkg = second.file_cache.add("rpms/pungi@4.1.3@3.fc25@noarch")
|
||||
pkg.excludearch = ["i686"]
|
||||
second.rpms_by_arch.setdefault(pkg.arch, []).append(pkg)
|
||||
|
||||
first.merge(second, "i386", ["i686", "noarch"], inherit_to_noarch=False)
|
||||
|
||||
print(first.rpms_by_arch)
|
||||
self.assertPkgsetEqual(
|
||||
first.rpms_by_arch,
|
||||
{
|
||||
"i686": ["rpms/bash@4.3.42@4.fc24@i686"],
|
||||
"noarch": ["rpms/pungi@4.1.3@3.fc25@noarch"],
|
||||
},
|
||||
)
|
||||
|
||||
def test_merge_excludes_noarch_exclusive_arch(self):
|
||||
first = pkgsets.PackageSetBase("first", [None])
|
||||
second = pkgsets.PackageSetBase("second", [None])
|
||||
|
|
Loading…
Reference in New Issue