Format code base with black
https://black.readthedocs.io/en/stable/ JIRA: COMPOSE-4086 Signed-off-by: Haibo Lin <hlin@redhat.com>
This commit is contained in:
parent
38142d30ba
commit
41a629969c
@ -9,15 +9,20 @@ def get_full_version():
|
||||
Find full version of Pungi: if running from git, this will return cleaned
|
||||
output of `git describe`, otherwise it will look for installed version.
|
||||
"""
|
||||
location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
|
||||
if os.path.isdir(os.path.join(location, '.git')):
|
||||
location = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
|
||||
if os.path.isdir(os.path.join(location, ".git")):
|
||||
import subprocess
|
||||
proc = subprocess.Popen(['git', '--git-dir=%s/.git' % location, 'describe', '--tags'],
|
||||
stdout=subprocess.PIPE, universal_newlines=True)
|
||||
|
||||
proc = subprocess.Popen(
|
||||
["git", "--git-dir=%s/.git" % location, "describe", "--tags"],
|
||||
stdout=subprocess.PIPE,
|
||||
universal_newlines=True,
|
||||
)
|
||||
output, _ = proc.communicate()
|
||||
return re.sub(r'-1.fc\d\d?', '', output.strip().replace('pungi-', ''))
|
||||
return re.sub(r"-1.fc\d\d?", "", output.strip().replace("pungi-", ""))
|
||||
else:
|
||||
import subprocess
|
||||
|
||||
proc = subprocess.Popen(
|
||||
["rpm", "-q", "pungi"], stdout=subprocess.PIPE, universal_newlines=True
|
||||
)
|
||||
@ -25,4 +30,4 @@ def get_full_version():
|
||||
if not err:
|
||||
return output.rstrip()
|
||||
else:
|
||||
return 'unknown'
|
||||
return "unknown"
|
||||
|
@ -93,14 +93,18 @@ def split_name_arch(name_arch):
|
||||
|
||||
def is_excluded(package, arches, logger=None):
|
||||
"""Check if package is excluded from given architectures."""
|
||||
if (package.excludearch and set(package.excludearch) & set(arches)):
|
||||
if package.excludearch and set(package.excludearch) & set(arches):
|
||||
if logger:
|
||||
logger.debug("Excluding (EXCLUDEARCH: %s): %s"
|
||||
% (sorted(set(package.excludearch)), package.file_name))
|
||||
logger.debug(
|
||||
"Excluding (EXCLUDEARCH: %s): %s"
|
||||
% (sorted(set(package.excludearch)), package.file_name)
|
||||
)
|
||||
return True
|
||||
if (package.exclusivearch and not (set(package.exclusivearch) & set(arches))):
|
||||
if package.exclusivearch and not (set(package.exclusivearch) & set(arches)):
|
||||
if logger:
|
||||
logger.debug("Excluding (EXCLUSIVEARCH: %s): %s"
|
||||
% (sorted(set(package.exclusivearch)), package.file_name))
|
||||
logger.debug(
|
||||
"Excluding (EXCLUSIVEARCH: %s): %s"
|
||||
% (sorted(set(package.exclusivearch)), package.file_name)
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
@ -12,12 +12,13 @@ import struct
|
||||
_ppc64_native_is_best = True
|
||||
|
||||
# dict mapping arch -> ( multicompat, best personality, biarch personality )
|
||||
multilibArches = {"x86_64": ("athlon", "x86_64", "athlon"),
|
||||
"sparc64v": ("sparcv9v", "sparcv9v", "sparc64v"),
|
||||
"sparc64": ("sparcv9", "sparcv9", "sparc64"),
|
||||
"ppc64": ("ppc", "ppc", "ppc64"),
|
||||
"s390x": ("s390", "s390x", "s390"),
|
||||
}
|
||||
multilibArches = {
|
||||
"x86_64": ("athlon", "x86_64", "athlon"),
|
||||
"sparc64v": ("sparcv9v", "sparcv9v", "sparc64v"),
|
||||
"sparc64": ("sparcv9", "sparcv9", "sparc64"),
|
||||
"ppc64": ("ppc", "ppc", "ppc64"),
|
||||
"s390x": ("s390", "s390x", "s390"),
|
||||
}
|
||||
if _ppc64_native_is_best:
|
||||
multilibArches["ppc64"] = ("ppc", "ppc64", "ppc64")
|
||||
|
||||
@ -29,26 +30,21 @@ arches = {
|
||||
"i586": "i486",
|
||||
"i486": "i386",
|
||||
"i386": "noarch",
|
||||
|
||||
# amd64
|
||||
"x86_64": "athlon",
|
||||
"amd64": "x86_64",
|
||||
"ia32e": "x86_64",
|
||||
|
||||
# ppc64le
|
||||
"ppc64le": "noarch",
|
||||
|
||||
# ppc
|
||||
"ppc64p7": "ppc64",
|
||||
"ppc64pseries": "ppc64",
|
||||
"ppc64iseries": "ppc64",
|
||||
"ppc64": "ppc",
|
||||
"ppc": "noarch",
|
||||
|
||||
# s390{,x}
|
||||
"s390x": "s390",
|
||||
"s390": "noarch",
|
||||
|
||||
# sparc
|
||||
"sparc64v": "sparcv9v",
|
||||
"sparc64": "sparcv9",
|
||||
@ -56,7 +52,6 @@ arches = {
|
||||
"sparcv9": "sparcv8",
|
||||
"sparcv8": "sparc",
|
||||
"sparc": "noarch",
|
||||
|
||||
# alpha
|
||||
"alphaev7": "alphaev68",
|
||||
"alphaev68": "alphaev67",
|
||||
@ -68,29 +63,23 @@ arches = {
|
||||
"alphaev45": "alphaev4",
|
||||
"alphaev4": "alpha",
|
||||
"alpha": "noarch",
|
||||
|
||||
# arm
|
||||
"armv7l": "armv6l",
|
||||
"armv6l": "armv5tejl",
|
||||
"armv5tejl": "armv5tel",
|
||||
"armv5tel": "noarch",
|
||||
|
||||
# arm hardware floating point
|
||||
"armv7hnl": "armv7hl",
|
||||
"armv7hl": "armv6hl",
|
||||
"armv6hl": "noarch",
|
||||
|
||||
# arm64
|
||||
"arm64": "noarch",
|
||||
|
||||
# aarch64
|
||||
"aarch64": "noarch",
|
||||
|
||||
# super-h
|
||||
"sh4a": "sh4",
|
||||
"sh4": "noarch",
|
||||
"sh3": "noarch",
|
||||
|
||||
# itanium
|
||||
"ia64": "noarch",
|
||||
}
|
||||
@ -137,7 +126,7 @@ def getArchList(thisarch=None): # pragma: no cover
|
||||
|
||||
# if we're a weirdo arch - add noarch on there.
|
||||
if len(archlist) == 1 and archlist[0] == thisarch:
|
||||
archlist.append('noarch')
|
||||
archlist.append("noarch")
|
||||
return archlist
|
||||
|
||||
|
||||
@ -208,10 +197,10 @@ def getCanonX86Arch(arch): # pragma: no cover
|
||||
|
||||
def getCanonARMArch(arch): # pragma: no cover
|
||||
# the %{_target_arch} macro in rpm will let us know the abi we are using
|
||||
target = rpm.expandMacro('%{_target_cpu}')
|
||||
if target.startswith('armv6h'):
|
||||
target = rpm.expandMacro("%{_target_cpu}")
|
||||
if target.startswith("armv6h"):
|
||||
return target
|
||||
if target.startswith('armv7h'):
|
||||
if target.startswith("armv7h"):
|
||||
return target
|
||||
return arch
|
||||
|
||||
@ -224,7 +213,7 @@ def getCanonPPCArch(arch): # pragma: no cover
|
||||
machine = None
|
||||
for line in _try_read_cpuinfo():
|
||||
if line.find("machine") != -1:
|
||||
machine = line.split(':')[1]
|
||||
machine = line.split(":")[1]
|
||||
break
|
||||
|
||||
platform = _aux_vector["platform"]
|
||||
@ -232,7 +221,7 @@ def getCanonPPCArch(arch): # pragma: no cover
|
||||
return arch
|
||||
|
||||
try:
|
||||
if platform.startswith("power") and int(platform[5:].rstrip('+')) >= 7:
|
||||
if platform.startswith("power") and int(platform[5:].rstrip("+")) >= 7:
|
||||
return "ppc64p7"
|
||||
except:
|
||||
pass
|
||||
@ -252,7 +241,7 @@ def getCanonSPARCArch(arch): # pragma: no cover
|
||||
SPARCtype = None
|
||||
for line in _try_read_cpuinfo():
|
||||
if line.startswith("type"):
|
||||
SPARCtype = line.split(':')[1]
|
||||
SPARCtype = line.split(":")[1]
|
||||
break
|
||||
if SPARCtype is None:
|
||||
return arch
|
||||
@ -279,7 +268,7 @@ def getCanonX86_64Arch(arch): # pragma: no cover
|
||||
vendor = None
|
||||
for line in _try_read_cpuinfo():
|
||||
if line.startswith("vendor_id"):
|
||||
vendor = line.split(':')[1]
|
||||
vendor = line.split(":")[1]
|
||||
break
|
||||
if vendor is None:
|
||||
return arch
|
||||
@ -308,7 +297,7 @@ def getCanonArch(skipRpmPlatform=0): # pragma: no cover
|
||||
|
||||
_parse_auxv()
|
||||
|
||||
if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"):
|
||||
if len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86":
|
||||
return getCanonX86Arch(arch)
|
||||
|
||||
if arch.startswith("arm"):
|
||||
@ -370,7 +359,7 @@ def getBaseArch(myarch=None): # pragma: no cover
|
||||
if myarch in arches:
|
||||
basearch = myarch
|
||||
value = arches[basearch]
|
||||
while value != 'noarch':
|
||||
while value != "noarch":
|
||||
basearch = value
|
||||
value = arches[basearch]
|
||||
|
||||
|
890
pungi/checks.py
890
pungi/checks.py
File diff suppressed because it is too large
Load Diff
@ -15,7 +15,6 @@
|
||||
|
||||
|
||||
class OptionsBase(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""
|
||||
inherit and initialize attributes
|
||||
@ -29,5 +28,7 @@ class OptionsBase(object):
|
||||
"""
|
||||
for key, value in kwargs.items():
|
||||
if not hasattr(self, key):
|
||||
raise ValueError("Invalid option in %s: %s" % (self.__class__.__name__, key))
|
||||
raise ValueError(
|
||||
"Invalid option in %s: %s" % (self.__class__.__name__, key)
|
||||
)
|
||||
setattr(self, key, value)
|
||||
|
175
pungi/compose.py
175
pungi/compose.py
@ -14,9 +14,7 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
__all__ = (
|
||||
"Compose",
|
||||
)
|
||||
__all__ = ("Compose",)
|
||||
|
||||
|
||||
import errno
|
||||
@ -38,7 +36,10 @@ from pungi.wrappers.variants import VariantsXmlParser
|
||||
from pungi.paths import Paths
|
||||
from pungi.wrappers.scm import get_file_from_scm
|
||||
from pungi.util import (
|
||||
makedirs, get_arch_variant_data, get_format_substs, get_variant_data
|
||||
makedirs,
|
||||
get_arch_variant_data,
|
||||
get_format_substs,
|
||||
get_variant_data,
|
||||
)
|
||||
from pungi.metadata import compose_to_composeinfo
|
||||
|
||||
@ -50,7 +51,15 @@ except ImportError:
|
||||
SUPPORTED_MILESTONES = ["RC", "Update", "SecurityFix"]
|
||||
|
||||
|
||||
def get_compose_dir(topdir, conf, compose_type="production", compose_date=None, compose_respin=None, compose_label=None, already_exists_callbacks=None):
|
||||
def get_compose_dir(
|
||||
topdir,
|
||||
conf,
|
||||
compose_type="production",
|
||||
compose_date=None,
|
||||
compose_respin=None,
|
||||
compose_label=None,
|
||||
already_exists_callbacks=None,
|
||||
):
|
||||
already_exists_callbacks = already_exists_callbacks or []
|
||||
|
||||
# create an incomplete composeinfo to generate compose ID
|
||||
@ -107,7 +116,18 @@ def get_compose_dir(topdir, conf, compose_type="production", compose_date=None,
|
||||
|
||||
|
||||
class Compose(kobo.log.LoggingBase):
|
||||
def __init__(self, conf, topdir, skip_phases=None, just_phases=None, old_composes=None, koji_event=None, supported=False, logger=None, notifier=None):
|
||||
def __init__(
|
||||
self,
|
||||
conf,
|
||||
topdir,
|
||||
skip_phases=None,
|
||||
just_phases=None,
|
||||
old_composes=None,
|
||||
koji_event=None,
|
||||
supported=False,
|
||||
logger=None,
|
||||
notifier=None,
|
||||
):
|
||||
kobo.log.LoggingBase.__init__(self, logger)
|
||||
# TODO: check if minimal conf values are set
|
||||
self.conf = conf
|
||||
@ -128,18 +148,27 @@ class Compose(kobo.log.LoggingBase):
|
||||
|
||||
# Set up logging to file
|
||||
if logger:
|
||||
kobo.log.add_file_logger(logger, self.paths.log.log_file("global", "pungi.log"))
|
||||
kobo.log.add_file_logger(logger, self.paths.log.log_file("global", "excluding-arch.log"))
|
||||
kobo.log.add_file_logger(
|
||||
logger, self.paths.log.log_file("global", "pungi.log")
|
||||
)
|
||||
kobo.log.add_file_logger(
|
||||
logger, self.paths.log.log_file("global", "excluding-arch.log")
|
||||
)
|
||||
|
||||
class PungiLogFilter(logging.Filter):
|
||||
def filter(self, record):
|
||||
return False if record.funcName and record.funcName == 'is_excluded' else True
|
||||
return (
|
||||
False
|
||||
if record.funcName and record.funcName == "is_excluded"
|
||||
else True
|
||||
)
|
||||
|
||||
class ExcludingArchLogFilter(logging.Filter):
|
||||
def filter(self, record):
|
||||
message = record.getMessage()
|
||||
if 'Populating package set for arch:' in message or \
|
||||
(record.funcName and record.funcName == 'is_excluded'):
|
||||
if "Populating package set for arch:" in message or (
|
||||
record.funcName and record.funcName == "is_excluded"
|
||||
):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -147,18 +176,26 @@ class Compose(kobo.log.LoggingBase):
|
||||
for handler in logger.handlers:
|
||||
if isinstance(handler, logging.FileHandler):
|
||||
log_file_name = os.path.basename(handler.stream.name)
|
||||
if log_file_name == 'pungi.global.log':
|
||||
if log_file_name == "pungi.global.log":
|
||||
handler.addFilter(PungiLogFilter())
|
||||
elif log_file_name == 'excluding-arch.global.log':
|
||||
elif log_file_name == "excluding-arch.global.log":
|
||||
handler.addFilter(ExcludingArchLogFilter())
|
||||
|
||||
# to provide compose_id, compose_date and compose_respin
|
||||
self.ci_base = ComposeInfo()
|
||||
self.ci_base.load(os.path.join(self.paths.work.topdir(arch="global"), "composeinfo-base.json"))
|
||||
self.ci_base.load(
|
||||
os.path.join(self.paths.work.topdir(arch="global"), "composeinfo-base.json")
|
||||
)
|
||||
|
||||
self.supported = supported
|
||||
if self.compose_label and self.compose_label.split("-")[0] in SUPPORTED_MILESTONES:
|
||||
self.log_info("Automatically setting 'supported' flag due to label: %s." % self.compose_label)
|
||||
if (
|
||||
self.compose_label
|
||||
and self.compose_label.split("-")[0] in SUPPORTED_MILESTONES
|
||||
):
|
||||
self.log_info(
|
||||
"Automatically setting 'supported' flag due to label: %s."
|
||||
% self.compose_label
|
||||
)
|
||||
self.supported = True
|
||||
|
||||
self.im = Images()
|
||||
@ -179,10 +216,10 @@ class Compose(kobo.log.LoggingBase):
|
||||
self.cache_region = make_region().configure(
|
||||
self.conf.get("dogpile_cache_backend"),
|
||||
expiration_time=self.conf.get("dogpile_cache_expiration_time", 3600),
|
||||
arguments=self.conf.get("dogpile_cache_arguments", {})
|
||||
arguments=self.conf.get("dogpile_cache_arguments", {}),
|
||||
)
|
||||
else:
|
||||
self.cache_region = make_region().configure('dogpile.cache.null')
|
||||
self.cache_region = make_region().configure("dogpile.cache.null")
|
||||
|
||||
get_compose_dir = staticmethod(get_compose_dir)
|
||||
|
||||
@ -234,10 +271,10 @@ class Compose(kobo.log.LoggingBase):
|
||||
"""Explicit configuration trumps all. Otherwise check gather backend
|
||||
and only create it for Yum.
|
||||
"""
|
||||
config = self.conf.get('createrepo_database')
|
||||
config = self.conf.get("createrepo_database")
|
||||
if config is not None:
|
||||
return config
|
||||
return self.conf['gather_backend'] == 'yum'
|
||||
return self.conf["gather_backend"] == "yum"
|
||||
|
||||
def read_variants(self):
|
||||
# TODO: move to phases/init ?
|
||||
@ -263,7 +300,9 @@ class Compose(kobo.log.LoggingBase):
|
||||
tree_arches = self.conf.get("tree_arches", None)
|
||||
tree_variants = self.conf.get("tree_variants", None)
|
||||
with open(variants_file, "r") as file_obj:
|
||||
parser = VariantsXmlParser(file_obj, tree_arches, tree_variants, logger=self._logger)
|
||||
parser = VariantsXmlParser(
|
||||
file_obj, tree_arches, tree_variants, logger=self._logger
|
||||
)
|
||||
self.variants = parser.parse()
|
||||
|
||||
self.all_variants = {}
|
||||
@ -294,21 +333,28 @@ class Compose(kobo.log.LoggingBase):
|
||||
@property
|
||||
def status_file(self):
|
||||
"""Path to file where the compose status will be stored."""
|
||||
if not hasattr(self, '_status_file'):
|
||||
self._status_file = os.path.join(self.topdir, 'STATUS')
|
||||
if not hasattr(self, "_status_file"):
|
||||
self._status_file = os.path.join(self.topdir, "STATUS")
|
||||
return self._status_file
|
||||
|
||||
def _log_failed_deliverables(self):
|
||||
for kind, data in self.failed_deliverables.items():
|
||||
for variant, arch, subvariant in data:
|
||||
self.log_info('Failed %s on variant <%s>, arch <%s>, subvariant <%s>.'
|
||||
% (kind, variant, arch, subvariant))
|
||||
log = os.path.join(self.paths.log.topdir('global'), 'deliverables.json')
|
||||
with open(log, 'w') as f:
|
||||
json.dump({'required': self.required_deliverables,
|
||||
'failed': self.failed_deliverables,
|
||||
'attempted': self.attempted_deliverables},
|
||||
f, indent=4)
|
||||
self.log_info(
|
||||
"Failed %s on variant <%s>, arch <%s>, subvariant <%s>."
|
||||
% (kind, variant, arch, subvariant)
|
||||
)
|
||||
log = os.path.join(self.paths.log.topdir("global"), "deliverables.json")
|
||||
with open(log, "w") as f:
|
||||
json.dump(
|
||||
{
|
||||
"required": self.required_deliverables,
|
||||
"failed": self.failed_deliverables,
|
||||
"attempted": self.attempted_deliverables,
|
||||
},
|
||||
f,
|
||||
indent=4,
|
||||
)
|
||||
|
||||
def write_status(self, stat_msg):
|
||||
if stat_msg not in ("STARTED", "FINISHED", "DOOMED", "TERMINATED"):
|
||||
@ -321,8 +367,8 @@ class Compose(kobo.log.LoggingBase):
|
||||
self.log_error(msg)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
if stat_msg == 'FINISHED' and self.failed_deliverables:
|
||||
stat_msg = 'FINISHED_INCOMPLETE'
|
||||
if stat_msg == "FINISHED" and self.failed_deliverables:
|
||||
stat_msg = "FINISHED_INCOMPLETE"
|
||||
|
||||
self._log_failed_deliverables()
|
||||
|
||||
@ -330,21 +376,22 @@ class Compose(kobo.log.LoggingBase):
|
||||
f.write(stat_msg + "\n")
|
||||
|
||||
if self.notifier:
|
||||
self.notifier.send('status-change', status=stat_msg)
|
||||
self.notifier.send("status-change", status=stat_msg)
|
||||
|
||||
def get_status(self):
|
||||
if not os.path.isfile(self.status_file):
|
||||
return
|
||||
return open(self.status_file, "r").read().strip()
|
||||
|
||||
def get_image_name(self, arch, variant, disc_type='dvd',
|
||||
disc_num=1, suffix='.iso', format=None):
|
||||
def get_image_name(
|
||||
self, arch, variant, disc_type="dvd", disc_num=1, suffix=".iso", format=None
|
||||
):
|
||||
"""Create a filename for image with given parameters.
|
||||
|
||||
:raises RuntimeError: when unknown ``disc_type`` is given
|
||||
"""
|
||||
default_format = "{compose_id}-{variant}-{arch}-{disc_type}{disc_num}{suffix}"
|
||||
format = format or self.conf.get('image_name_format', default_format)
|
||||
format = format or self.conf.get("image_name_format", default_format)
|
||||
|
||||
if isinstance(format, dict):
|
||||
conf = get_variant_data(self.conf, "image_name_format", variant)
|
||||
@ -359,47 +406,54 @@ class Compose(kobo.log.LoggingBase):
|
||||
disc_num = ""
|
||||
|
||||
kwargs = {
|
||||
'arch': arch,
|
||||
'disc_type': disc_type,
|
||||
'disc_num': disc_num,
|
||||
'suffix': suffix
|
||||
"arch": arch,
|
||||
"disc_type": disc_type,
|
||||
"disc_num": disc_num,
|
||||
"suffix": suffix,
|
||||
}
|
||||
if variant.type == "layered-product":
|
||||
variant_uid = variant.parent.uid
|
||||
kwargs['compose_id'] = self.ci_base[variant.uid].compose_id
|
||||
kwargs["compose_id"] = self.ci_base[variant.uid].compose_id
|
||||
else:
|
||||
variant_uid = variant.uid
|
||||
args = get_format_substs(self, variant=variant_uid, **kwargs)
|
||||
try:
|
||||
return (format % args).format(**args)
|
||||
except KeyError as err:
|
||||
raise RuntimeError('Failed to create image name: unknown format element: %s' % err)
|
||||
raise RuntimeError(
|
||||
"Failed to create image name: unknown format element: %s" % err
|
||||
)
|
||||
|
||||
def can_fail(self, variant, arch, deliverable):
|
||||
"""Figure out if deliverable can fail on variant.arch.
|
||||
|
||||
Variant can be None.
|
||||
"""
|
||||
failable = get_arch_variant_data(self.conf, 'failable_deliverables', arch, variant)
|
||||
failable = get_arch_variant_data(
|
||||
self.conf, "failable_deliverables", arch, variant
|
||||
)
|
||||
return deliverable in failable
|
||||
|
||||
def attempt_deliverable(self, variant, arch, kind, subvariant=None):
|
||||
"""Log information about attempted deliverable."""
|
||||
variant_uid = variant.uid if variant else ''
|
||||
variant_uid = variant.uid if variant else ""
|
||||
self.attempted_deliverables.setdefault(kind, []).append(
|
||||
(variant_uid, arch, subvariant))
|
||||
(variant_uid, arch, subvariant)
|
||||
)
|
||||
|
||||
def require_deliverable(self, variant, arch, kind, subvariant=None):
|
||||
"""Log information about attempted deliverable."""
|
||||
variant_uid = variant.uid if variant else ''
|
||||
variant_uid = variant.uid if variant else ""
|
||||
self.required_deliverables.setdefault(kind, []).append(
|
||||
(variant_uid, arch, subvariant))
|
||||
(variant_uid, arch, subvariant)
|
||||
)
|
||||
|
||||
def fail_deliverable(self, variant, arch, kind, subvariant=None):
|
||||
"""Log information about failed deliverable."""
|
||||
variant_uid = variant.uid if variant else ''
|
||||
variant_uid = variant.uid if variant else ""
|
||||
self.failed_deliverables.setdefault(kind, []).append(
|
||||
(variant_uid, arch, subvariant))
|
||||
(variant_uid, arch, subvariant)
|
||||
)
|
||||
|
||||
@property
|
||||
def image_release(self):
|
||||
@ -409,11 +463,14 @@ class Compose(kobo.log.LoggingBase):
|
||||
otherwise we will create a string with date, compose type and respin.
|
||||
"""
|
||||
if self.compose_label:
|
||||
milestone, release = self.compose_label.split('-')
|
||||
milestone, release = self.compose_label.split("-")
|
||||
return release
|
||||
|
||||
return '%s%s.%s' % (self.compose_date, self.ci_base.compose.type_suffix,
|
||||
self.compose_respin)
|
||||
return "%s%s.%s" % (
|
||||
self.compose_date,
|
||||
self.ci_base.compose.type_suffix,
|
||||
self.compose_respin,
|
||||
)
|
||||
|
||||
@property
|
||||
def image_version(self):
|
||||
@ -423,9 +480,9 @@ class Compose(kobo.log.LoggingBase):
|
||||
milestone from it is appended to the version (unless it is RC).
|
||||
"""
|
||||
version = self.ci_base.release.version
|
||||
if self.compose_label and not self.compose_label.startswith('RC-'):
|
||||
milestone, release = self.compose_label.split('-')
|
||||
return '%s_%s' % (version, milestone)
|
||||
if self.compose_label and not self.compose_label.startswith("RC-"):
|
||||
milestone, release = self.compose_label.split("-")
|
||||
return "%s_%s" % (version, milestone)
|
||||
|
||||
return version
|
||||
|
||||
@ -451,7 +508,7 @@ def get_ordered_variant_uids(compose):
|
||||
setattr(
|
||||
compose,
|
||||
"_ordered_variant_uids",
|
||||
unordered_variant_uids + ordered_variant_uids
|
||||
unordered_variant_uids + ordered_variant_uids,
|
||||
)
|
||||
return getattr(compose, "_ordered_variant_uids")
|
||||
|
||||
@ -469,7 +526,9 @@ def _prepare_variant_as_lookaside(compose):
|
||||
try:
|
||||
graph.add_edge(variant, lookaside_variant)
|
||||
except ValueError as e:
|
||||
raise ValueError("There is a bad configuration in 'variant_as_lookaside': %s" % e)
|
||||
raise ValueError(
|
||||
"There is a bad configuration in 'variant_as_lookaside': %s" % e
|
||||
)
|
||||
|
||||
variant_processing_order = reversed(graph.prune_graph())
|
||||
return list(variant_processing_order)
|
||||
|
@ -42,9 +42,12 @@ def write_discinfo(file_path, description, arch, disc_numbers=None, timestamp=No
|
||||
"""
|
||||
disc_numbers = disc_numbers or ["ALL"]
|
||||
if not isinstance(disc_numbers, list):
|
||||
raise TypeError("Invalid type: disc_numbers type is %s; expected: <list>" % type(disc_numbers))
|
||||
raise TypeError(
|
||||
"Invalid type: disc_numbers type is %s; expected: <list>"
|
||||
% type(disc_numbers)
|
||||
)
|
||||
if not timestamp:
|
||||
timestamp = os.environ.get('SOURCE_DATE_EPOCH', "%f" % time.time())
|
||||
timestamp = os.environ.get("SOURCE_DATE_EPOCH", "%f" % time.time())
|
||||
with open(file_path, "w") as f:
|
||||
f.write("%s\n" % timestamp)
|
||||
f.write("%s\n" % description)
|
||||
|
@ -21,51 +21,58 @@ import time
|
||||
from ConfigParser import SafeConfigParser
|
||||
|
||||
from .arch_utils import getBaseArch
|
||||
|
||||
# In development, `here` will point to the bin/ directory with scripts.
|
||||
here = sys.path[0]
|
||||
MULTILIBCONF = (os.path.join(os.path.dirname(__file__), '..', 'share', 'multilib')
|
||||
if here != '/usr/bin'
|
||||
else '/usr/share/pungi/multilib')
|
||||
MULTILIBCONF = (
|
||||
os.path.join(os.path.dirname(__file__), "..", "share", "multilib")
|
||||
if here != "/usr/bin"
|
||||
else "/usr/share/pungi/multilib"
|
||||
)
|
||||
|
||||
|
||||
class Config(SafeConfigParser):
|
||||
def __init__(self, pungirc=None):
|
||||
SafeConfigParser.__init__(self)
|
||||
|
||||
self.add_section('pungi')
|
||||
self.add_section('lorax')
|
||||
self.add_section("pungi")
|
||||
self.add_section("lorax")
|
||||
|
||||
self.set('pungi', 'osdir', 'os')
|
||||
self.set('pungi', 'sourcedir', 'source')
|
||||
self.set('pungi', 'debugdir', 'debug')
|
||||
self.set('pungi', 'isodir', 'iso')
|
||||
self.set('pungi', 'multilibconf', MULTILIBCONF)
|
||||
self.set('pungi', 'relnotefilere', 'LICENSE README-BURNING-ISOS-en_US.txt ^RPM-GPG')
|
||||
self.set('pungi', 'relnotedirre', '')
|
||||
self.set('pungi', 'relnotepkgs', 'fedora-repos fedora-release fedora-release-notes')
|
||||
self.set('pungi', 'product_path', 'Packages')
|
||||
self.set('pungi', 'cachedir', '/var/cache/pungi')
|
||||
self.set('pungi', 'compress_type', 'xz')
|
||||
self.set('pungi', 'arch', getBaseArch())
|
||||
self.set('pungi', 'family', 'Fedora')
|
||||
self.set('pungi', 'iso_basename', 'Fedora')
|
||||
self.set('pungi', 'version', time.strftime('%Y%m%d', time.localtime()))
|
||||
self.set('pungi', 'variant', '')
|
||||
self.set('pungi', 'destdir', os.getcwd())
|
||||
self.set('pungi', 'workdirbase', "/work")
|
||||
self.set('pungi', 'bugurl', 'https://bugzilla.redhat.com')
|
||||
self.set('pungi', 'cdsize', '695.0')
|
||||
self.set('pungi', 'debuginfo', "True")
|
||||
self.set('pungi', 'alldeps', "True")
|
||||
self.set('pungi', 'isfinal', "False")
|
||||
self.set('pungi', 'nohash', "False")
|
||||
self.set('pungi', 'full_archlist', "False")
|
||||
self.set('pungi', 'multilib', '')
|
||||
self.set('pungi', 'lookaside_repos', '')
|
||||
self.set('pungi', 'resolve_deps', "True")
|
||||
self.set('pungi', 'no_dvd', "False")
|
||||
self.set('pungi', 'nomacboot', "False")
|
||||
self.set('pungi', 'rootfs_size', "False")
|
||||
self.set("pungi", "osdir", "os")
|
||||
self.set("pungi", "sourcedir", "source")
|
||||
self.set("pungi", "debugdir", "debug")
|
||||
self.set("pungi", "isodir", "iso")
|
||||
self.set("pungi", "multilibconf", MULTILIBCONF)
|
||||
self.set(
|
||||
"pungi", "relnotefilere", "LICENSE README-BURNING-ISOS-en_US.txt ^RPM-GPG"
|
||||
)
|
||||
self.set("pungi", "relnotedirre", "")
|
||||
self.set(
|
||||
"pungi", "relnotepkgs", "fedora-repos fedora-release fedora-release-notes"
|
||||
)
|
||||
self.set("pungi", "product_path", "Packages")
|
||||
self.set("pungi", "cachedir", "/var/cache/pungi")
|
||||
self.set("pungi", "compress_type", "xz")
|
||||
self.set("pungi", "arch", getBaseArch())
|
||||
self.set("pungi", "family", "Fedora")
|
||||
self.set("pungi", "iso_basename", "Fedora")
|
||||
self.set("pungi", "version", time.strftime("%Y%m%d", time.localtime()))
|
||||
self.set("pungi", "variant", "")
|
||||
self.set("pungi", "destdir", os.getcwd())
|
||||
self.set("pungi", "workdirbase", "/work")
|
||||
self.set("pungi", "bugurl", "https://bugzilla.redhat.com")
|
||||
self.set("pungi", "cdsize", "695.0")
|
||||
self.set("pungi", "debuginfo", "True")
|
||||
self.set("pungi", "alldeps", "True")
|
||||
self.set("pungi", "isfinal", "False")
|
||||
self.set("pungi", "nohash", "False")
|
||||
self.set("pungi", "full_archlist", "False")
|
||||
self.set("pungi", "multilib", "")
|
||||
self.set("pungi", "lookaside_repos", "")
|
||||
self.set("pungi", "resolve_deps", "True")
|
||||
self.set("pungi", "no_dvd", "False")
|
||||
self.set("pungi", "nomacboot", "False")
|
||||
self.set("pungi", "rootfs_size", "False")
|
||||
|
||||
# if missing, self.read() is a noop, else change 'defaults'
|
||||
if pungirc:
|
||||
|
@ -11,10 +11,21 @@ from .wrappers import iso
|
||||
from .wrappers.jigdo import JigdoWrapper
|
||||
|
||||
|
||||
CreateIsoOpts = namedtuple('CreateIsoOpts',
|
||||
['buildinstall_method', 'arch', 'output_dir', 'jigdo_dir',
|
||||
'iso_name', 'volid', 'graft_points', 'supported', 'os_tree',
|
||||
"hfs_compat"])
|
||||
CreateIsoOpts = namedtuple(
|
||||
"CreateIsoOpts",
|
||||
[
|
||||
"buildinstall_method",
|
||||
"arch",
|
||||
"output_dir",
|
||||
"jigdo_dir",
|
||||
"iso_name",
|
||||
"volid",
|
||||
"graft_points",
|
||||
"supported",
|
||||
"os_tree",
|
||||
"hfs_compat",
|
||||
],
|
||||
)
|
||||
CreateIsoOpts.__new__.__defaults__ = (None,) * len(CreateIsoOpts._fields)
|
||||
|
||||
|
||||
@ -22,8 +33,8 @@ def quote(str):
|
||||
"""Quote an argument for shell, but make sure $TEMPLATE variable will be
|
||||
expanded.
|
||||
"""
|
||||
if str.startswith('$TEMPLATE'):
|
||||
return '$TEMPLATE%s' % shlex_quote(str.replace('$TEMPLATE', '', 1))
|
||||
if str.startswith("$TEMPLATE"):
|
||||
return "$TEMPLATE%s" % shlex_quote(str.replace("$TEMPLATE", "", 1))
|
||||
return shlex_quote(str)
|
||||
|
||||
|
||||
@ -32,38 +43,46 @@ def emit(f, cmd):
|
||||
if isinstance(cmd, six.string_types):
|
||||
print(cmd, file=f)
|
||||
else:
|
||||
print(' '.join([quote(x) for x in cmd]), file=f)
|
||||
print(" ".join([quote(x) for x in cmd]), file=f)
|
||||
|
||||
|
||||
FIND_TEMPLATE_SNIPPET = """
|
||||
if ! TEMPLATE="$($(head -n1 $(which lorax) | cut -c3-) -c 'import pylorax; print(pylorax.find_templates())')"; then
|
||||
TEMPLATE=/usr/share/lorax;
|
||||
fi
|
||||
""".replace('\n', '')
|
||||
""".replace(
|
||||
"\n", ""
|
||||
)
|
||||
|
||||
|
||||
def make_image(f, opts):
|
||||
mkisofs_kwargs = {}
|
||||
|
||||
if opts.buildinstall_method:
|
||||
if opts.buildinstall_method == 'lorax':
|
||||
if opts.buildinstall_method == "lorax":
|
||||
emit(f, FIND_TEMPLATE_SNIPPET)
|
||||
mkisofs_kwargs["boot_args"] = iso.get_boot_options(
|
||||
opts.arch,
|
||||
os.path.join("$TEMPLATE", "config_files/ppc"),
|
||||
hfs_compat=opts.hfs_compat,
|
||||
)
|
||||
elif opts.buildinstall_method == 'buildinstall':
|
||||
elif opts.buildinstall_method == "buildinstall":
|
||||
mkisofs_kwargs["boot_args"] = iso.get_boot_options(
|
||||
opts.arch, "/usr/lib/anaconda-runtime/boot")
|
||||
opts.arch, "/usr/lib/anaconda-runtime/boot"
|
||||
)
|
||||
|
||||
# ppc(64) doesn't seem to support utf-8
|
||||
if opts.arch in ("ppc", "ppc64", "ppc64le"):
|
||||
mkisofs_kwargs["input_charset"] = None
|
||||
|
||||
cmd = iso.get_mkisofs_cmd(opts.iso_name, None, volid=opts.volid,
|
||||
exclude=["./lost+found"],
|
||||
graft_points=opts.graft_points, **mkisofs_kwargs)
|
||||
cmd = iso.get_mkisofs_cmd(
|
||||
opts.iso_name,
|
||||
None,
|
||||
volid=opts.volid,
|
||||
exclude=["./lost+found"],
|
||||
graft_points=opts.graft_points,
|
||||
**mkisofs_kwargs
|
||||
)
|
||||
emit(f, cmd)
|
||||
|
||||
|
||||
@ -88,22 +107,20 @@ def make_manifest(f, opts):
|
||||
|
||||
def make_jigdo(f, opts):
|
||||
jigdo = JigdoWrapper()
|
||||
files = [
|
||||
{
|
||||
"path": opts.os_tree,
|
||||
"label": None,
|
||||
"uri": None,
|
||||
}
|
||||
]
|
||||
cmd = jigdo.get_jigdo_cmd(os.path.join(opts.output_dir, opts.iso_name),
|
||||
files, output_dir=opts.jigdo_dir,
|
||||
no_servers=True, report="noprogress")
|
||||
files = [{"path": opts.os_tree, "label": None, "uri": None}]
|
||||
cmd = jigdo.get_jigdo_cmd(
|
||||
os.path.join(opts.output_dir, opts.iso_name),
|
||||
files,
|
||||
output_dir=opts.jigdo_dir,
|
||||
no_servers=True,
|
||||
report="noprogress",
|
||||
)
|
||||
emit(f, cmd)
|
||||
|
||||
|
||||
def write_script(opts, f):
|
||||
if bool(opts.jigdo_dir) != bool(opts.os_tree):
|
||||
raise RuntimeError('jigdo_dir must be used together with os_tree')
|
||||
raise RuntimeError("jigdo_dir must be used together with os_tree")
|
||||
|
||||
emit(f, "#!/bin/bash")
|
||||
emit(f, "set -ex")
|
||||
|
@ -42,8 +42,8 @@ class Substitutions(dict):
|
||||
# DNF version of Substitutions detects host arch. We don't want that.
|
||||
def __init__(self, arch):
|
||||
super(Substitutions, self).__init__()
|
||||
self['arch'] = arch
|
||||
self['basearch'] = dnf_arch.basearch(arch)
|
||||
self["arch"] = arch
|
||||
self["basearch"] = dnf_arch.basearch(arch)
|
||||
|
||||
|
||||
class DnfWrapper(dnf.Base):
|
||||
@ -52,8 +52,9 @@ class DnfWrapper(dnf.Base):
|
||||
self.arch_wrapper = ArchWrapper(self.conf.substitutions["arch"])
|
||||
self.comps_wrapper = CompsWrapper(self)
|
||||
|
||||
def add_repo(self, repoid, baseurl=None, enablegroups=True, lookaside=False,
|
||||
**kwargs):
|
||||
def add_repo(
|
||||
self, repoid, baseurl=None, enablegroups=True, lookaside=False, **kwargs
|
||||
):
|
||||
self.repos.add_new_repo(
|
||||
repoid,
|
||||
self.conf,
|
||||
@ -83,7 +84,13 @@ class CompsWrapper(object):
|
||||
result[i.id] = i
|
||||
return result
|
||||
|
||||
def get_packages_from_group(self, group_id, include_default=True, include_optional=True, include_conditional=True):
|
||||
def get_packages_from_group(
|
||||
self,
|
||||
group_id,
|
||||
include_default=True,
|
||||
include_optional=True,
|
||||
include_conditional=True,
|
||||
):
|
||||
packages = []
|
||||
conditional = []
|
||||
|
||||
@ -117,9 +124,11 @@ class CompsWrapper(object):
|
||||
continue
|
||||
|
||||
include_default = group_include in (1, 2)
|
||||
include_optional = group_include in (2, )
|
||||
include_optional = group_include in (2,)
|
||||
include_conditional = True
|
||||
pkgs, cond = self.get_packages_from_group(group_id, include_default, include_optional, include_conditional)
|
||||
pkgs, cond = self.get_packages_from_group(
|
||||
group_id, include_default, include_optional, include_conditional
|
||||
)
|
||||
packages.update(pkgs)
|
||||
for i in cond:
|
||||
if i not in conditional:
|
||||
@ -136,7 +145,11 @@ class CompsWrapper(object):
|
||||
class ArchWrapper(object):
|
||||
def __init__(self, arch):
|
||||
self.base_arch = dnf_arch.basearch(arch)
|
||||
self.all_arches = pungi.arch.get_valid_arches(self.base_arch, multilib=True, add_noarch=True)
|
||||
self.native_arches = pungi.arch.get_valid_arches(self.base_arch, multilib=False, add_noarch=True)
|
||||
self.all_arches = pungi.arch.get_valid_arches(
|
||||
self.base_arch, multilib=True, add_noarch=True
|
||||
)
|
||||
self.native_arches = pungi.arch.get_valid_arches(
|
||||
self.base_arch, multilib=False, add_noarch=True
|
||||
)
|
||||
self.multilib_arches = pungi.arch.get_valid_multilib_arches(self.base_arch)
|
||||
self.source_arches = ["src", "nosrc"]
|
||||
|
1169
pungi/gather.py
1169
pungi/gather.py
File diff suppressed because it is too large
Load Diff
@ -32,7 +32,7 @@ from pungi.util import DEBUG_PATTERNS
|
||||
|
||||
def get_source_name(pkg):
|
||||
# Workaround for rhbz#1418298
|
||||
return pkg.sourcerpm.rsplit('-', 2)[0]
|
||||
return pkg.sourcerpm.rsplit("-", 2)[0]
|
||||
|
||||
|
||||
class GatherOptions(pungi.common.OptionsBase):
|
||||
@ -79,21 +79,21 @@ class GatherOptions(pungi.common.OptionsBase):
|
||||
|
||||
def __str__(self):
|
||||
lines = [
|
||||
'fulltree=%s' % self.fulltree,
|
||||
'fulltree_excludes=%d items' % len(self.fulltree_excludes),
|
||||
'resolve_deps=%s' % self.resolve_deps,
|
||||
'selfhosting=%s' % self.selfhosting,
|
||||
'greedy_method=%s' % self.greedy_method,
|
||||
'langpacks=%s' % self.langpacks,
|
||||
'multilib_methods=%s' % self.multilib_methods,
|
||||
'multilib_blacklist=%d items' % len(self.multilib_blacklist),
|
||||
'multilib_whitelist=%d items' % len(self.multilib_whitelist),
|
||||
'lookaside_repos=%s' % self.lookaside_repos,
|
||||
'prepopulate=%d items' % len(self.prepopulate),
|
||||
'exclude_source=%s' % self.exclude_source,
|
||||
'exclude_debug=%s' % self.exclude_debug
|
||||
"fulltree=%s" % self.fulltree,
|
||||
"fulltree_excludes=%d items" % len(self.fulltree_excludes),
|
||||
"resolve_deps=%s" % self.resolve_deps,
|
||||
"selfhosting=%s" % self.selfhosting,
|
||||
"greedy_method=%s" % self.greedy_method,
|
||||
"langpacks=%s" % self.langpacks,
|
||||
"multilib_methods=%s" % self.multilib_methods,
|
||||
"multilib_blacklist=%d items" % len(self.multilib_blacklist),
|
||||
"multilib_whitelist=%d items" % len(self.multilib_whitelist),
|
||||
"lookaside_repos=%s" % self.lookaside_repos,
|
||||
"prepopulate=%d items" % len(self.prepopulate),
|
||||
"exclude_source=%s" % self.exclude_source,
|
||||
"exclude_debug=%s" % self.exclude_debug,
|
||||
]
|
||||
return '[\n%s\n]' % '\n'.join(' ' + l for l in lines)
|
||||
return "[\n%s\n]" % "\n".join(" " + l for l in lines)
|
||||
|
||||
|
||||
class QueryCache(object):
|
||||
@ -142,7 +142,9 @@ class GatherBase(object):
|
||||
# lookaside.
|
||||
|
||||
# source packages
|
||||
self.q_source_packages = q.filter(arch=self.dnf.arch_wrapper.source_arches).apply()
|
||||
self.q_source_packages = q.filter(
|
||||
arch=self.dnf.arch_wrapper.source_arches
|
||||
).apply()
|
||||
q = q.difference(self.q_source_packages)
|
||||
|
||||
# filter arches
|
||||
@ -191,8 +193,12 @@ class Gather(GatherBase):
|
||||
if not self.logger.handlers:
|
||||
# default logging handler
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S"))
|
||||
handler.setFormatter(
|
||||
logging.Formatter(
|
||||
"%(asctime)s [%(levelname)-8s] %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
)
|
||||
handler.setLevel(logging.DEBUG)
|
||||
self.logger.addHandler(handler)
|
||||
|
||||
@ -202,22 +208,23 @@ class Gather(GatherBase):
|
||||
self.dnf._sack,
|
||||
gather_options.multilib_methods,
|
||||
blacklist=self.opts.multilib_blacklist,
|
||||
whitelist=self.opts.multilib_whitelist)
|
||||
whitelist=self.opts.multilib_whitelist,
|
||||
)
|
||||
|
||||
# already processed packages
|
||||
self.finished_add_binary_package_deps = {} # {pkg: [deps]}
|
||||
self.finished_add_debug_package_deps = {} # {pkg: [deps]}
|
||||
self.finished_add_source_package_deps = {} # {pkg: [deps]}
|
||||
self.finished_add_binary_package_deps = {} # {pkg: [deps]}
|
||||
self.finished_add_debug_package_deps = {} # {pkg: [deps]}
|
||||
self.finished_add_source_package_deps = {} # {pkg: [deps]}
|
||||
|
||||
self.finished_get_package_deps_reqs = {}
|
||||
|
||||
self.finished_add_conditional_packages = {} # {pkg: [pkgs]}
|
||||
self.finished_add_source_packages = {} # {pkg: src-pkg|None}
|
||||
self.sourcerpm_cache = {} # {src_nvra: src-pkg|None}
|
||||
self.finished_add_debug_packages = {} # {pkg: [debug-pkgs]}
|
||||
self.finished_add_fulltree_packages = {} # {pkg: [pkgs]}
|
||||
self.finished_add_langpack_packages = {} # {pkg: [pkgs]}
|
||||
self.finished_add_multilib_packages = {} # {pkg: pkg|None}
|
||||
self.finished_add_conditional_packages = {} # {pkg: [pkgs]}
|
||||
self.finished_add_source_packages = {} # {pkg: src-pkg|None}
|
||||
self.sourcerpm_cache = {} # {src_nvra: src-pkg|None}
|
||||
self.finished_add_debug_packages = {} # {pkg: [debug-pkgs]}
|
||||
self.finished_add_fulltree_packages = {} # {pkg: [pkgs]}
|
||||
self.finished_add_langpack_packages = {} # {pkg: [pkgs]}
|
||||
self.finished_add_multilib_packages = {} # {pkg: pkg|None}
|
||||
|
||||
# result
|
||||
self.result_binary_packages = set()
|
||||
@ -254,11 +261,17 @@ class Gather(GatherBase):
|
||||
all_pkgs.append(pkg)
|
||||
|
||||
if not debuginfo:
|
||||
native_pkgs = set(self.q_native_binary_packages.filter(pkg=all_pkgs).apply())
|
||||
multilib_pkgs = set(self.q_multilib_binary_packages.filter(pkg=all_pkgs).apply())
|
||||
native_pkgs = set(
|
||||
self.q_native_binary_packages.filter(pkg=all_pkgs).apply()
|
||||
)
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_binary_packages.filter(pkg=all_pkgs).apply()
|
||||
)
|
||||
else:
|
||||
native_pkgs = set(self.q_native_debug_packages.filter(pkg=all_pkgs).apply())
|
||||
multilib_pkgs = set(self.q_multilib_debug_packages.filter(pkg=all_pkgs).apply())
|
||||
multilib_pkgs = set(
|
||||
self.q_multilib_debug_packages.filter(pkg=all_pkgs).apply()
|
||||
)
|
||||
|
||||
result = set()
|
||||
|
||||
@ -307,7 +320,7 @@ class Gather(GatherBase):
|
||||
version=pkg.version,
|
||||
release=pkg.release,
|
||||
arch=pkg.arch,
|
||||
reponame=self.opts.lookaside_repos
|
||||
reponame=self.opts.lookaside_repos,
|
||||
)
|
||||
return pkg in pkgs
|
||||
|
||||
@ -328,7 +341,7 @@ class Gather(GatherBase):
|
||||
# lookaside
|
||||
if self.is_from_lookaside(i):
|
||||
self._set_flag(i, PkgFlag.lookaside)
|
||||
if i.sourcerpm.rsplit('-', 2)[0] in self.opts.fulltree_excludes:
|
||||
if i.sourcerpm.rsplit("-", 2)[0] in self.opts.fulltree_excludes:
|
||||
self._set_flag(i, PkgFlag.fulltree_exclude)
|
||||
|
||||
def _get_package_deps(self, pkg, debuginfo=False):
|
||||
@ -350,8 +363,8 @@ class Gather(GatherBase):
|
||||
# empty.
|
||||
requires = (
|
||||
pkg.requires
|
||||
+ getattr(pkg, 'requires_pre', [])
|
||||
+ getattr(pkg, 'requires_post', [])
|
||||
+ getattr(pkg, "requires_pre", [])
|
||||
+ getattr(pkg, "requires_post", [])
|
||||
)
|
||||
|
||||
q = queue.filter(provides=requires).apply()
|
||||
@ -378,7 +391,9 @@ class Gather(GatherBase):
|
||||
"""Given an name of a queue (stored as attribute in `self`), exclude
|
||||
all given packages and keep only the latest per package name and arch.
|
||||
"""
|
||||
setattr(self, queue, getattr(self, queue).filter(pkg__neq=exclude).latest().apply())
|
||||
setattr(
|
||||
self, queue, getattr(self, queue).filter(pkg__neq=exclude).latest().apply()
|
||||
)
|
||||
|
||||
@Profiler("Gather._apply_excludes()")
|
||||
def _apply_excludes(self, excludes):
|
||||
@ -395,20 +410,22 @@ class Gather(GatherBase):
|
||||
with Profiler("Gather._apply_excludes():exclude"):
|
||||
if pattern.endswith(".+"):
|
||||
pkgs = self.q_multilib_binary_packages.filter(
|
||||
name__glob=pattern[:-2], arch__neq='noarch',
|
||||
reponame__neq=self.opts.lookaside_repos)
|
||||
name__glob=pattern[:-2],
|
||||
arch__neq="noarch",
|
||||
reponame__neq=self.opts.lookaside_repos,
|
||||
)
|
||||
elif pattern.endswith(".src"):
|
||||
pkgs = self.q_source_packages.filter(
|
||||
name__glob=pattern[:-4],
|
||||
reponame__neq=self.opts.lookaside_repos)
|
||||
name__glob=pattern[:-4], reponame__neq=self.opts.lookaside_repos
|
||||
)
|
||||
elif pungi.util.pkg_is_debug(pattern):
|
||||
pkgs = self.q_debug_packages.filter(
|
||||
name__glob=pattern,
|
||||
reponame__neq=self.opts.lookaside_repos)
|
||||
name__glob=pattern, reponame__neq=self.opts.lookaside_repos
|
||||
)
|
||||
else:
|
||||
pkgs = self.q_binary_packages.filter(
|
||||
name__glob=pattern,
|
||||
reponame__neq=self.opts.lookaside_repos)
|
||||
name__glob=pattern, reponame__neq=self.opts.lookaside_repos
|
||||
)
|
||||
|
||||
exclude.update(pkgs)
|
||||
self.logger.debug("EXCLUDED by %s: %s", pattern, [str(p) for p in pkgs])
|
||||
@ -417,15 +434,22 @@ class Gather(GatherBase):
|
||||
for pattern in self.opts.multilib_blacklist:
|
||||
with Profiler("Gather._apply_excludes():exclude-multilib-blacklist"):
|
||||
# TODO: does whitelist affect this in any way?
|
||||
pkgs = self.q_multilib_binary_packages.filter(name__glob=pattern, arch__neq='noarch')
|
||||
pkgs = self.q_multilib_binary_packages.filter(
|
||||
name__glob=pattern, arch__neq="noarch"
|
||||
)
|
||||
exclude.update(pkgs)
|
||||
self.logger.debug("EXCLUDED by %s: %s", pattern, [str(p) for p in pkgs])
|
||||
self.dnf._sack.add_excludes(pkgs)
|
||||
|
||||
all_queues = ['q_binary_packages', 'q_native_binary_packages',
|
||||
'q_multilib_binary_packages', 'q_noarch_binary_packages',
|
||||
'q_source_packages', 'q_native_debug_packages',
|
||||
'q_multilib_debug_packages']
|
||||
all_queues = [
|
||||
"q_binary_packages",
|
||||
"q_native_binary_packages",
|
||||
"q_multilib_binary_packages",
|
||||
"q_noarch_binary_packages",
|
||||
"q_source_packages",
|
||||
"q_native_debug_packages",
|
||||
"q_multilib_debug_packages",
|
||||
]
|
||||
|
||||
with Profiler("Gather._apply_excludes():exclude-queries"):
|
||||
for queue in all_queues:
|
||||
@ -449,10 +473,14 @@ class Gather(GatherBase):
|
||||
for pattern in includes:
|
||||
with Profiler("Gather.add_initial_packages():include"):
|
||||
if pattern == "system-release" and self.opts.greedy_method == "all":
|
||||
pkgs = self.q_binary_packages.filter(provides="system-release").apply()
|
||||
pkgs = self.q_binary_packages.filter(
|
||||
provides="system-release"
|
||||
).apply()
|
||||
else:
|
||||
if pattern.endswith(".+"):
|
||||
pkgs = self.q_multilib_binary_packages.filter(name__glob=pattern[:-2]).apply()
|
||||
pkgs = self.q_multilib_binary_packages.filter(
|
||||
name__glob=pattern[:-2]
|
||||
).apply()
|
||||
else:
|
||||
pkgs = self.q_binary_packages.filter(name__glob=pattern).apply()
|
||||
|
||||
@ -482,19 +510,37 @@ class Gather(GatherBase):
|
||||
# Must be executed *after* add_initial_packages() to exclude packages properly.
|
||||
|
||||
# source
|
||||
self.source_pkgs_cache = QueryCache(self.q_source_packages, "name", "version", "release")
|
||||
self.source_pkgs_cache = QueryCache(
|
||||
self.q_source_packages, "name", "version", "release"
|
||||
)
|
||||
|
||||
# debug
|
||||
self.native_debug_packages_cache = QueryCache(self.q_native_debug_packages, "sourcerpm")
|
||||
self.multilib_debug_packages_cache = QueryCache(self.q_multilib_debug_packages, "sourcerpm")
|
||||
self.native_debug_packages_cache = QueryCache(
|
||||
self.q_native_debug_packages, "sourcerpm"
|
||||
)
|
||||
self.multilib_debug_packages_cache = QueryCache(
|
||||
self.q_multilib_debug_packages, "sourcerpm"
|
||||
)
|
||||
|
||||
# packages by sourcerpm
|
||||
self.q_native_pkgs_by_sourcerpm_cache = QueryCache(self.q_native_binary_packages, "sourcerpm", arch__neq="noarch")
|
||||
self.q_multilib_pkgs_by_sourcerpm_cache = QueryCache(self.q_multilib_binary_packages, "sourcerpm", arch__neq="noarch")
|
||||
self.q_noarch_pkgs_by_sourcerpm_cache = QueryCache(self.q_native_binary_packages, "sourcerpm", arch="noarch")
|
||||
self.q_native_pkgs_by_sourcerpm_cache = QueryCache(
|
||||
self.q_native_binary_packages, "sourcerpm", arch__neq="noarch"
|
||||
)
|
||||
self.q_multilib_pkgs_by_sourcerpm_cache = QueryCache(
|
||||
self.q_multilib_binary_packages, "sourcerpm", arch__neq="noarch"
|
||||
)
|
||||
self.q_noarch_pkgs_by_sourcerpm_cache = QueryCache(
|
||||
self.q_native_binary_packages, "sourcerpm", arch="noarch"
|
||||
)
|
||||
|
||||
# multilib
|
||||
self.q_multilib_binary_packages_cache = QueryCache(self.q_multilib_binary_packages, "name", "version", "release", arch__neq="noarch")
|
||||
self.q_multilib_binary_packages_cache = QueryCache(
|
||||
self.q_multilib_binary_packages,
|
||||
"name",
|
||||
"version",
|
||||
"release",
|
||||
arch__neq="noarch",
|
||||
)
|
||||
|
||||
# prepopulate
|
||||
self.prepopulate_cache = QueryCache(self.q_binary_packages, "name", "arch")
|
||||
@ -531,7 +577,9 @@ class Gather(GatherBase):
|
||||
deps = self._get_package_deps(pkg)
|
||||
for i, req in deps:
|
||||
if i not in self.result_binary_packages:
|
||||
self._add_packages([i], pulled_by=pkg, req=req, reason='binary-dep')
|
||||
self._add_packages(
|
||||
[i], pulled_by=pkg, req=req, reason="binary-dep"
|
||||
)
|
||||
added.add(i)
|
||||
self.finished_add_binary_package_deps[pkg] = deps
|
||||
|
||||
@ -593,7 +641,7 @@ class Gather(GatherBase):
|
||||
|
||||
for i in deps:
|
||||
if i not in self.result_binary_packages:
|
||||
self._add_packages([i], pulled_by=pkg, reason='cond-dep')
|
||||
self._add_packages([i], pulled_by=pkg, reason="cond-dep")
|
||||
self._set_flag(pkg, PkgFlag.conditional)
|
||||
added.add(i)
|
||||
|
||||
@ -617,10 +665,14 @@ class Gather(GatherBase):
|
||||
deps = self.finished_add_source_package_deps[pkg]
|
||||
except KeyError:
|
||||
deps = self._get_package_deps(pkg)
|
||||
self.finished_add_source_package_deps[pkg] = set(dep for (dep, req) in deps)
|
||||
self.finished_add_source_package_deps[pkg] = set(
|
||||
dep for (dep, req) in deps
|
||||
)
|
||||
for i, req in deps:
|
||||
if i not in self.result_binary_packages:
|
||||
self._add_packages([i], pulled_by=pkg, req=req, reason='source-dep')
|
||||
self._add_packages(
|
||||
[i], pulled_by=pkg, req=req, reason="source-dep"
|
||||
)
|
||||
added.add(i)
|
||||
self._set_flag(pkg, PkgFlag.self_hosting)
|
||||
|
||||
@ -658,7 +710,9 @@ class Gather(GatherBase):
|
||||
source_pkg = self.sourcerpm_cache.get(pkg.sourcerpm, None)
|
||||
if source_pkg is None:
|
||||
nvra = parse_nvra(pkg.sourcerpm)
|
||||
source_pkgs = self.source_pkgs_cache.get(nvra["name"], nvra["version"], nvra["release"])
|
||||
source_pkgs = self.source_pkgs_cache.get(
|
||||
nvra["name"], nvra["version"], nvra["release"]
|
||||
)
|
||||
if source_pkgs:
|
||||
source_pkg = self._get_matching_srpm(pkg, source_pkgs)
|
||||
self.sourcerpm_cache[pkg.sourcerpm] = source_pkg
|
||||
@ -667,8 +721,10 @@ class Gather(GatherBase):
|
||||
if not source_pkg:
|
||||
continue
|
||||
|
||||
if (source_pkg.repoid in self.opts.lookaside_repos
|
||||
or pkg.repoid in self.opts.lookaside_repos):
|
||||
if (
|
||||
source_pkg.repoid in self.opts.lookaside_repos
|
||||
or pkg.repoid in self.opts.lookaside_repos
|
||||
):
|
||||
self._set_flag(source_pkg, PkgFlag.lookaside)
|
||||
if source_pkg not in self.result_source_packages:
|
||||
added.add(source_pkg)
|
||||
@ -741,15 +797,21 @@ class Gather(GatherBase):
|
||||
assert pkg is not None
|
||||
|
||||
if get_source_name(pkg) in self.opts.fulltree_excludes:
|
||||
self.logger.debug('No fulltree for %s due to exclude list', pkg)
|
||||
self.logger.debug("No fulltree for %s due to exclude list", pkg)
|
||||
continue
|
||||
|
||||
try:
|
||||
fulltree_pkgs = self.finished_add_fulltree_packages[pkg]
|
||||
except KeyError:
|
||||
native_fulltree_pkgs = self.q_native_pkgs_by_sourcerpm_cache.get(pkg.sourcerpm) or []
|
||||
multilib_fulltree_pkgs = self.q_multilib_pkgs_by_sourcerpm_cache.get(pkg.sourcerpm) or []
|
||||
noarch_fulltree_pkgs = self.q_noarch_pkgs_by_sourcerpm_cache.get(pkg.sourcerpm) or []
|
||||
native_fulltree_pkgs = (
|
||||
self.q_native_pkgs_by_sourcerpm_cache.get(pkg.sourcerpm) or []
|
||||
)
|
||||
multilib_fulltree_pkgs = (
|
||||
self.q_multilib_pkgs_by_sourcerpm_cache.get(pkg.sourcerpm) or []
|
||||
)
|
||||
noarch_fulltree_pkgs = (
|
||||
self.q_noarch_pkgs_by_sourcerpm_cache.get(pkg.sourcerpm) or []
|
||||
)
|
||||
|
||||
if not native_fulltree_pkgs:
|
||||
# no existing native pkgs -> pull multilib
|
||||
@ -767,9 +829,9 @@ class Gather(GatherBase):
|
||||
# We pull packages determined by `pull_native`, or everything
|
||||
# if we're greedy
|
||||
fulltree_pkgs = []
|
||||
if pull_native or self.opts.greedy_method == 'all':
|
||||
if pull_native or self.opts.greedy_method == "all":
|
||||
fulltree_pkgs.extend(native_fulltree_pkgs)
|
||||
if not pull_native or self.opts.greedy_method == 'all':
|
||||
if not pull_native or self.opts.greedy_method == "all":
|
||||
fulltree_pkgs.extend(multilib_fulltree_pkgs)
|
||||
|
||||
# always pull all noarch subpackages
|
||||
@ -777,7 +839,7 @@ class Gather(GatherBase):
|
||||
|
||||
for i in fulltree_pkgs:
|
||||
if i not in self.result_binary_packages:
|
||||
self._add_packages([i], reason='fulltree')
|
||||
self._add_packages([i], reason="fulltree")
|
||||
self._set_flag(i, PkgFlag.fulltree)
|
||||
added.add(i)
|
||||
|
||||
@ -809,15 +871,21 @@ class Gather(GatherBase):
|
||||
try:
|
||||
langpack_pkgs = self.finished_add_langpack_packages[pkg]
|
||||
except KeyError:
|
||||
patterns = [i["install"] for i in langpack_patterns if i["name"] == pkg.name]
|
||||
patterns = [
|
||||
i["install"] for i in langpack_patterns if i["name"] == pkg.name
|
||||
]
|
||||
patterns = [i.replace("%s", "*") for i in patterns]
|
||||
|
||||
if not patterns:
|
||||
self.finished_add_langpack_packages[pkg] = []
|
||||
continue
|
||||
|
||||
langpack_pkgs = self.q_binary_packages.filter(name__glob=patterns).apply()
|
||||
langpack_pkgs = langpack_pkgs.filter(name__glob__not=["*-devel", "*-static"])
|
||||
langpack_pkgs = self.q_binary_packages.filter(
|
||||
name__glob=patterns
|
||||
).apply()
|
||||
langpack_pkgs = langpack_pkgs.filter(
|
||||
name__glob__not=["*-devel", "*-static"]
|
||||
)
|
||||
langpack_pkgs = langpack_pkgs.filter(name__neq=exceptions)
|
||||
|
||||
pkgs_by_name = {}
|
||||
@ -834,7 +902,7 @@ class Gather(GatherBase):
|
||||
langpack_pkgs.add(i)
|
||||
self._set_flag(i, PkgFlag.langpack)
|
||||
if i not in self.result_binary_packages:
|
||||
self._add_packages([i], pulled_by=pkg, reason='langpack')
|
||||
self._add_packages([i], pulled_by=pkg, reason="langpack")
|
||||
added.add(pkg)
|
||||
self.finished_add_langpack_packages[pkg] = langpack_pkgs
|
||||
|
||||
@ -856,7 +924,9 @@ class Gather(GatherBase):
|
||||
self.finished_add_multilib_packages[pkg] = None
|
||||
continue
|
||||
|
||||
pkgs = self.q_multilib_binary_packages_cache.get(pkg.name, pkg.version, pkg.release)
|
||||
pkgs = self.q_multilib_binary_packages_cache.get(
|
||||
pkg.name, pkg.version, pkg.release
|
||||
)
|
||||
pkgs = self._get_best_package(pkgs)
|
||||
multilib_pkgs = []
|
||||
for i in pkgs:
|
||||
@ -865,7 +935,7 @@ class Gather(GatherBase):
|
||||
multilib_pkgs.append(i)
|
||||
added.add(i)
|
||||
self._set_flag(i, PkgFlag.multilib)
|
||||
self._add_packages([i], reason='multilib:%s' % is_multilib)
|
||||
self._add_packages([i], reason="multilib:%s" % is_multilib)
|
||||
self.finished_add_multilib_packages[pkg] = i
|
||||
# TODO: ^^^ may get multiple results; i686, i586, etc.
|
||||
|
||||
@ -879,45 +949,51 @@ class Gather(GatherBase):
|
||||
added = self.add_initial_packages(pattern_list)
|
||||
self._add_packages(added)
|
||||
|
||||
added = self.log_count('PREPOPULATE', self.add_prepopulate_packages)
|
||||
self._add_packages(added, reason='prepopulate')
|
||||
added = self.log_count("PREPOPULATE", self.add_prepopulate_packages)
|
||||
self._add_packages(added, reason="prepopulate")
|
||||
|
||||
for pass_num in count(1):
|
||||
self.logger.debug("PASS %s" % pass_num)
|
||||
|
||||
if self.log_count('CONDITIONAL DEPS', self.add_conditional_packages):
|
||||
if self.log_count("CONDITIONAL DEPS", self.add_conditional_packages):
|
||||
continue
|
||||
|
||||
# resolve deps
|
||||
if self.log_count('BINARY DEPS', self.add_binary_package_deps):
|
||||
if self.log_count("BINARY DEPS", self.add_binary_package_deps):
|
||||
continue
|
||||
|
||||
if self.log_count('SOURCE DEPS', self.add_source_package_deps):
|
||||
if self.log_count("SOURCE DEPS", self.add_source_package_deps):
|
||||
continue
|
||||
|
||||
if self.log_count('SOURCE PACKAGES', self.add_source_packages):
|
||||
if self.log_count("SOURCE PACKAGES", self.add_source_packages):
|
||||
continue
|
||||
|
||||
if self.log_count('DEBUG PACKAGES', self.add_debug_packages):
|
||||
if self.log_count("DEBUG PACKAGES", self.add_debug_packages):
|
||||
continue
|
||||
|
||||
if self.log_count("DEBUG DEPS", self.add_debug_package_deps):
|
||||
continue
|
||||
|
||||
if self.log_count('FULLTREE', self.add_fulltree_packages):
|
||||
if self.log_count("FULLTREE", self.add_fulltree_packages):
|
||||
continue
|
||||
|
||||
if self.log_count('LANGPACKS', self.add_langpack_packages, self.opts.langpacks):
|
||||
if self.log_count(
|
||||
"LANGPACKS", self.add_langpack_packages, self.opts.langpacks
|
||||
):
|
||||
continue
|
||||
|
||||
if self.log_count('MULTILIB', self.add_multilib_packages):
|
||||
if self.log_count("MULTILIB", self.add_multilib_packages):
|
||||
continue
|
||||
|
||||
# nothing added -> break depsolving cycle
|
||||
break
|
||||
|
||||
def download(self, destdir):
|
||||
pkglist = (self.result_binary_packages | self.result_debug_packages | self.result_source_packages)
|
||||
pkglist = (
|
||||
self.result_binary_packages
|
||||
| self.result_debug_packages
|
||||
| self.result_source_packages
|
||||
)
|
||||
self.dnf.download_packages(pkglist)
|
||||
linker = Linker(logger=self.logger)
|
||||
|
||||
@ -937,7 +1013,7 @@ class Gather(GatherBase):
|
||||
Print a message, run the function with given arguments and log length
|
||||
of result.
|
||||
"""
|
||||
self.logger.debug('%s', msg)
|
||||
self.logger.debug("%s", msg)
|
||||
added = method(*args)
|
||||
self.logger.debug('ADDED: %s', len(added))
|
||||
self.logger.debug("ADDED: %s", len(added))
|
||||
return added
|
||||
|
@ -8,6 +8,7 @@ class SimpleAcyclicOrientedGraph(object):
|
||||
Graph is constructed by adding oriented edges one by one. It can not contain cycles.
|
||||
Main result is spanning line, it determines ordering of the nodes.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._graph = {}
|
||||
self._all_nodes = set()
|
||||
@ -18,7 +19,9 @@ class SimpleAcyclicOrientedGraph(object):
|
||||
This operation must not create a cycle in the graph.
|
||||
"""
|
||||
if start == end:
|
||||
raise ValueError("Can not add this kind of edge into graph: %s-%s" % (start, end))
|
||||
raise ValueError(
|
||||
"Can not add this kind of edge into graph: %s-%s" % (start, end)
|
||||
)
|
||||
self._graph.setdefault(start, [])
|
||||
if end not in self._graph[start]:
|
||||
self._graph[start].append(end)
|
||||
|
28
pungi/ks.py
28
pungi/ks.py
@ -82,7 +82,7 @@ class FulltreeExcludesSection(pykickstart.sections.Section):
|
||||
if not self.handler:
|
||||
return
|
||||
|
||||
(h, s, t) = line.partition('#')
|
||||
(h, s, t) = line.partition("#")
|
||||
line = h.rstrip()
|
||||
|
||||
self.handler.fulltree_excludes.add(line)
|
||||
@ -95,7 +95,7 @@ class MultilibBlacklistSection(pykickstart.sections.Section):
|
||||
if not self.handler:
|
||||
return
|
||||
|
||||
(h, s, t) = line.partition('#')
|
||||
(h, s, t) = line.partition("#")
|
||||
line = h.rstrip()
|
||||
|
||||
self.handler.multilib_blacklist.add(line)
|
||||
@ -108,7 +108,7 @@ class MultilibWhitelistSection(pykickstart.sections.Section):
|
||||
if not self.handler:
|
||||
return
|
||||
|
||||
(h, s, t) = line.partition('#')
|
||||
(h, s, t) = line.partition("#")
|
||||
line = h.rstrip()
|
||||
|
||||
self.handler.multilib_whitelist.add(line)
|
||||
@ -121,7 +121,7 @@ class PrepopulateSection(pykickstart.sections.Section):
|
||||
if not self.handler:
|
||||
return
|
||||
|
||||
(h, s, t) = line.partition('#')
|
||||
(h, s, t) = line.partition("#")
|
||||
line = h.rstrip()
|
||||
|
||||
self.handler.prepopulate.add(line)
|
||||
@ -154,7 +154,15 @@ class KickstartParser(pykickstart.parser.KickstartParser):
|
||||
include_default = True
|
||||
include_optional = True
|
||||
|
||||
group_packages, group_conditional_packages = dnf_obj.comps_wrapper.get_packages_from_group(group_id, include_default=include_default, include_optional=include_optional, include_conditional=True)
|
||||
(
|
||||
group_packages,
|
||||
group_conditional_packages,
|
||||
) = dnf_obj.comps_wrapper.get_packages_from_group(
|
||||
group_id,
|
||||
include_default=include_default,
|
||||
include_optional=include_optional,
|
||||
include_conditional=True,
|
||||
)
|
||||
packages.update(group_packages)
|
||||
for i in group_conditional_packages:
|
||||
if i not in conditional_packages:
|
||||
@ -178,7 +186,15 @@ class KickstartParser(pykickstart.parser.KickstartParser):
|
||||
include_default = True
|
||||
include_optional = True
|
||||
|
||||
group_packages, group_conditional_packages = dnf_obj.comps_wrapper.get_packages_from_group(group_id, include_default=include_default, include_optional=include_optional, include_conditional=False)
|
||||
(
|
||||
group_packages,
|
||||
group_conditional_packages,
|
||||
) = dnf_obj.comps_wrapper.get_packages_from_group(
|
||||
group_id,
|
||||
include_default=include_default,
|
||||
include_optional=include_optional,
|
||||
include_conditional=False,
|
||||
)
|
||||
excluded.update(group_packages)
|
||||
|
||||
return excluded
|
||||
|
@ -56,7 +56,9 @@ class LinkerThread(WorkerThread):
|
||||
src, dst = item
|
||||
|
||||
if (num % 100 == 0) or (num == self.pool.queue_total):
|
||||
self.pool.log_debug("Linked %s out of %s packages" % (num, self.pool.queue_total))
|
||||
self.pool.log_debug(
|
||||
"Linked %s out of %s packages" % (num, self.pool.queue_total)
|
||||
)
|
||||
|
||||
directory = os.path.dirname(dst)
|
||||
makedirs(directory)
|
||||
@ -113,7 +115,10 @@ class Linker(kobo.log.LoggingBase):
|
||||
if os.path.islink(dst) and self._is_same(old_src, dst):
|
||||
if os.readlink(dst) != src:
|
||||
raise
|
||||
self.log_debug("The same file already exists, skipping symlink %s -> %s" % (dst, src))
|
||||
self.log_debug(
|
||||
"The same file already exists, skipping symlink %s -> %s"
|
||||
% (dst, src)
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
@ -134,9 +139,15 @@ class Linker(kobo.log.LoggingBase):
|
||||
raise
|
||||
if self._is_same(src, dst):
|
||||
if not self._is_same_type(src, dst):
|
||||
self.log_error("File %s already exists but has different type than %s" % (dst, src))
|
||||
self.log_error(
|
||||
"File %s already exists but has different type than %s"
|
||||
% (dst, src)
|
||||
)
|
||||
raise
|
||||
self.log_debug("The same file already exists, skipping hardlink %s to %s" % (src, dst))
|
||||
self.log_debug(
|
||||
"The same file already exists, skipping hardlink %s to %s"
|
||||
% (src, dst)
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
@ -157,9 +168,14 @@ class Linker(kobo.log.LoggingBase):
|
||||
if os.path.exists(dst):
|
||||
if self._is_same(src, dst):
|
||||
if not self._is_same_type(src, dst):
|
||||
self.log_error("File %s already exists but has different type than %s" % (dst, src))
|
||||
self.log_error(
|
||||
"File %s already exists but has different type than %s"
|
||||
% (dst, src)
|
||||
)
|
||||
raise OSError(errno.EEXIST, "File exists")
|
||||
self.log_debug("The same file already exists, skipping copy %s to %s" % (src, dst))
|
||||
self.log_debug(
|
||||
"The same file already exists, skipping copy %s to %s" % (src, dst)
|
||||
)
|
||||
return
|
||||
else:
|
||||
raise OSError(errno.EEXIST, "File exists")
|
||||
@ -174,7 +190,10 @@ class Linker(kobo.log.LoggingBase):
|
||||
src_key = (src_stat.st_dev, src_stat.st_ino)
|
||||
if src_key in self._inode_map:
|
||||
# (st_dev, st_ino) found in the mapping
|
||||
self.log_debug("Harlink detected, hardlinking in destination %s to %s" % (self._inode_map[src_key], dst))
|
||||
self.log_debug(
|
||||
"Harlink detected, hardlinking in destination %s to %s"
|
||||
% (self._inode_map[src_key], dst)
|
||||
)
|
||||
os.link(self._inode_map[src_key], dst)
|
||||
return
|
||||
|
||||
|
@ -61,6 +61,7 @@ class MediaSplitter(object):
|
||||
are added; there is no re-ordering. The number of disk is thus not the
|
||||
possible minimum.
|
||||
"""
|
||||
|
||||
def __init__(self, media_size, compose=None, logger=None):
|
||||
self.media_size = media_size
|
||||
self.files = [] # to preserve order
|
||||
@ -77,7 +78,9 @@ class MediaSplitter(object):
|
||||
old_size = self.file_sizes.get(name, None)
|
||||
|
||||
if old_size is not None and old_size != size:
|
||||
raise ValueError("File size mismatch; file: %s; sizes: %s vs %s" % (name, old_size, size))
|
||||
raise ValueError(
|
||||
"File size mismatch; file: %s; sizes: %s vs %s" % (name, old_size, size)
|
||||
)
|
||||
if self.media_size and size > self.media_size:
|
||||
raise ValueError("File is larger than media size: %s" % name)
|
||||
|
||||
|
@ -32,11 +32,22 @@ def get_description(compose, variant, arch):
|
||||
result = compose.conf["release_discinfo_description"]
|
||||
elif variant.type == "layered-product":
|
||||
# we need to make sure the layered product behaves as it was composed separately
|
||||
result = "%s %s for %s %s" % (variant.release_name, variant.release_version, compose.conf["release_name"], get_major_version(compose.conf["release_version"]))
|
||||
result = "%s %s for %s %s" % (
|
||||
variant.release_name,
|
||||
variant.release_version,
|
||||
compose.conf["release_name"],
|
||||
get_major_version(compose.conf["release_version"]),
|
||||
)
|
||||
else:
|
||||
result = "%s %s" % (compose.conf["release_name"], compose.conf["release_version"])
|
||||
result = "%s %s" % (
|
||||
compose.conf["release_name"],
|
||||
compose.conf["release_version"],
|
||||
)
|
||||
if compose.conf.get("base_product_name", ""):
|
||||
result += " for %s %s" % (compose.conf["base_product_name"], compose.conf["base_product_version"])
|
||||
result += " for %s %s" % (
|
||||
compose.conf["base_product_name"],
|
||||
compose.conf["base_product_version"],
|
||||
)
|
||||
|
||||
result = result % {"variant_name": variant.name, "arch": arch}
|
||||
return result
|
||||
@ -112,32 +123,122 @@ def compose_to_composeinfo(compose):
|
||||
|
||||
for arch in variant.arches:
|
||||
# paths: binaries
|
||||
var.paths.os_tree[arch] = relative_path(compose.paths.compose.os_tree(arch=arch, variant=variant, create_dir=False).rstrip("/") + "/", compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
var.paths.repository[arch] = relative_path(compose.paths.compose.repository(arch=arch, variant=variant, create_dir=False).rstrip("/") + "/", compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
var.paths.packages[arch] = relative_path(compose.paths.compose.packages(arch=arch, variant=variant, create_dir=False).rstrip("/") + "/", compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
iso_dir = compose.paths.compose.iso_dir(arch=arch, variant=variant, create_dir=False) or ""
|
||||
if iso_dir and os.path.isdir(os.path.join(compose.paths.compose.topdir(), iso_dir)):
|
||||
var.paths.isos[arch] = relative_path(iso_dir, compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
jigdo_dir = compose.paths.compose.jigdo_dir(arch=arch, variant=variant, create_dir=False) or ""
|
||||
if jigdo_dir and os.path.isdir(os.path.join(compose.paths.compose.topdir(), jigdo_dir)):
|
||||
var.paths.jigdos[arch] = relative_path(jigdo_dir, compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
var.paths.os_tree[arch] = relative_path(
|
||||
compose.paths.compose.os_tree(
|
||||
arch=arch, variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
compose.paths.compose.topdir().rstrip("/") + "/",
|
||||
).rstrip("/")
|
||||
var.paths.repository[arch] = relative_path(
|
||||
compose.paths.compose.repository(
|
||||
arch=arch, variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
compose.paths.compose.topdir().rstrip("/") + "/",
|
||||
).rstrip("/")
|
||||
var.paths.packages[arch] = relative_path(
|
||||
compose.paths.compose.packages(
|
||||
arch=arch, variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
compose.paths.compose.topdir().rstrip("/") + "/",
|
||||
).rstrip("/")
|
||||
iso_dir = (
|
||||
compose.paths.compose.iso_dir(
|
||||
arch=arch, variant=variant, create_dir=False
|
||||
)
|
||||
or ""
|
||||
)
|
||||
if iso_dir and os.path.isdir(
|
||||
os.path.join(compose.paths.compose.topdir(), iso_dir)
|
||||
):
|
||||
var.paths.isos[arch] = relative_path(
|
||||
iso_dir, compose.paths.compose.topdir().rstrip("/") + "/"
|
||||
).rstrip("/")
|
||||
jigdo_dir = (
|
||||
compose.paths.compose.jigdo_dir(
|
||||
arch=arch, variant=variant, create_dir=False
|
||||
)
|
||||
or ""
|
||||
)
|
||||
if jigdo_dir and os.path.isdir(
|
||||
os.path.join(compose.paths.compose.topdir(), jigdo_dir)
|
||||
):
|
||||
var.paths.jigdos[arch] = relative_path(
|
||||
jigdo_dir, compose.paths.compose.topdir().rstrip("/") + "/"
|
||||
).rstrip("/")
|
||||
|
||||
# paths: sources
|
||||
var.paths.source_tree[arch] = relative_path(compose.paths.compose.os_tree(arch="source", variant=variant, create_dir=False).rstrip("/") + "/", compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
var.paths.source_repository[arch] = relative_path(compose.paths.compose.repository(arch="source", variant=variant, create_dir=False).rstrip("/") + "/", compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
var.paths.source_packages[arch] = relative_path(compose.paths.compose.packages(arch="source", variant=variant, create_dir=False).rstrip("/") + "/", compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
source_iso_dir = compose.paths.compose.iso_dir(arch="source", variant=variant, create_dir=False) or ""
|
||||
if source_iso_dir and os.path.isdir(os.path.join(compose.paths.compose.topdir(), source_iso_dir)):
|
||||
var.paths.source_isos[arch] = relative_path(source_iso_dir, compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
source_jigdo_dir = compose.paths.compose.jigdo_dir(arch="source", variant=variant, create_dir=False) or ""
|
||||
if source_jigdo_dir and os.path.isdir(os.path.join(compose.paths.compose.topdir(), source_jigdo_dir)):
|
||||
var.paths.source_jigdos[arch] = relative_path(source_jigdo_dir, compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
var.paths.source_tree[arch] = relative_path(
|
||||
compose.paths.compose.os_tree(
|
||||
arch="source", variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
compose.paths.compose.topdir().rstrip("/") + "/",
|
||||
).rstrip("/")
|
||||
var.paths.source_repository[arch] = relative_path(
|
||||
compose.paths.compose.repository(
|
||||
arch="source", variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
compose.paths.compose.topdir().rstrip("/") + "/",
|
||||
).rstrip("/")
|
||||
var.paths.source_packages[arch] = relative_path(
|
||||
compose.paths.compose.packages(
|
||||
arch="source", variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
compose.paths.compose.topdir().rstrip("/") + "/",
|
||||
).rstrip("/")
|
||||
source_iso_dir = (
|
||||
compose.paths.compose.iso_dir(
|
||||
arch="source", variant=variant, create_dir=False
|
||||
)
|
||||
or ""
|
||||
)
|
||||
if source_iso_dir and os.path.isdir(
|
||||
os.path.join(compose.paths.compose.topdir(), source_iso_dir)
|
||||
):
|
||||
var.paths.source_isos[arch] = relative_path(
|
||||
source_iso_dir, compose.paths.compose.topdir().rstrip("/") + "/"
|
||||
).rstrip("/")
|
||||
source_jigdo_dir = (
|
||||
compose.paths.compose.jigdo_dir(
|
||||
arch="source", variant=variant, create_dir=False
|
||||
)
|
||||
or ""
|
||||
)
|
||||
if source_jigdo_dir and os.path.isdir(
|
||||
os.path.join(compose.paths.compose.topdir(), source_jigdo_dir)
|
||||
):
|
||||
var.paths.source_jigdos[arch] = relative_path(
|
||||
source_jigdo_dir, compose.paths.compose.topdir().rstrip("/") + "/"
|
||||
).rstrip("/")
|
||||
|
||||
# paths: debug
|
||||
var.paths.debug_tree[arch] = relative_path(compose.paths.compose.debug_tree(arch=arch, variant=variant, create_dir=False).rstrip("/") + "/", compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
var.paths.debug_repository[arch] = relative_path(compose.paths.compose.debug_repository(arch=arch, variant=variant, create_dir=False).rstrip("/") + "/", compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
var.paths.debug_packages[arch] = relative_path(compose.paths.compose.debug_packages(arch=arch, variant=variant, create_dir=False).rstrip("/") + "/", compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
'''
|
||||
var.paths.debug_tree[arch] = relative_path(
|
||||
compose.paths.compose.debug_tree(
|
||||
arch=arch, variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
compose.paths.compose.topdir().rstrip("/") + "/",
|
||||
).rstrip("/")
|
||||
var.paths.debug_repository[arch] = relative_path(
|
||||
compose.paths.compose.debug_repository(
|
||||
arch=arch, variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
compose.paths.compose.topdir().rstrip("/") + "/",
|
||||
).rstrip("/")
|
||||
var.paths.debug_packages[arch] = relative_path(
|
||||
compose.paths.compose.debug_packages(
|
||||
arch=arch, variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
compose.paths.compose.topdir().rstrip("/") + "/",
|
||||
).rstrip("/")
|
||||
"""
|
||||
# XXX: not suported (yet?)
|
||||
debug_iso_dir = compose.paths.compose.debug_iso_dir(arch=arch, variant=variant) or ""
|
||||
if debug_iso_dir:
|
||||
@ -145,7 +246,7 @@ def compose_to_composeinfo(compose):
|
||||
debug_jigdo_dir = compose.paths.compose.debug_jigdo_dir(arch=arch, variant=variant) or ""
|
||||
if debug_jigdo_dir:
|
||||
var.debug_jigdo_dir[arch] = relative_path(debug_jigdo_dir, compose.paths.compose.topdir().rstrip("/") + "/").rstrip("/")
|
||||
'''
|
||||
"""
|
||||
|
||||
for v in variant.get_variants(recursive=False):
|
||||
x = dump_variant(v, parent=variant)
|
||||
@ -187,17 +288,22 @@ def write_compose_info(compose):
|
||||
|
||||
|
||||
def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
|
||||
if variant.type in ("addon", ) or variant.is_empty:
|
||||
if variant.type in ("addon",) or variant.is_empty:
|
||||
return
|
||||
|
||||
compose.log_debug("on arch '%s' looking at variant '%s' of type '%s'" % (arch, variant, variant.type))
|
||||
compose.log_debug(
|
||||
"on arch '%s' looking at variant '%s' of type '%s'"
|
||||
% (arch, variant, variant.type)
|
||||
)
|
||||
|
||||
if not timestamp:
|
||||
timestamp = int(time.time())
|
||||
else:
|
||||
timestamp = int(timestamp)
|
||||
|
||||
os_tree = compose.paths.compose.os_tree(arch=arch, variant=variant).rstrip("/") + "/"
|
||||
os_tree = (
|
||||
compose.paths.compose.os_tree(arch=arch, variant=variant).rstrip("/") + "/"
|
||||
)
|
||||
|
||||
ti = productmd.treeinfo.TreeInfo()
|
||||
# load from buildinstall .treeinfo
|
||||
@ -224,9 +330,13 @@ def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
|
||||
else:
|
||||
# release
|
||||
ti.release.name = compose.conf["release_name"]
|
||||
ti.release.version = compose.conf.get("treeinfo_version", compose.conf["release_version"])
|
||||
ti.release.version = compose.conf.get(
|
||||
"treeinfo_version", compose.conf["release_version"]
|
||||
)
|
||||
ti.release.short = compose.conf["release_short"]
|
||||
ti.release.is_layered = True if compose.conf.get("base_product_name", "") else False
|
||||
ti.release.is_layered = (
|
||||
True if compose.conf.get("base_product_name", "") else False
|
||||
)
|
||||
ti.release.type = compose.conf["release_type"].lower()
|
||||
|
||||
# base product
|
||||
@ -254,8 +364,26 @@ def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
|
||||
var.name = variant.name
|
||||
var.type = variant.type
|
||||
|
||||
var.paths.packages = relative_path(compose.paths.compose.packages(arch=arch, variant=variant, create_dir=False).rstrip("/") + "/", os_tree).rstrip("/") or "."
|
||||
var.paths.repository = relative_path(compose.paths.compose.repository(arch=arch, variant=variant, create_dir=False).rstrip("/") + "/", os_tree).rstrip("/") or "."
|
||||
var.paths.packages = (
|
||||
relative_path(
|
||||
compose.paths.compose.packages(
|
||||
arch=arch, variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
os_tree,
|
||||
).rstrip("/")
|
||||
or "."
|
||||
)
|
||||
var.paths.repository = (
|
||||
relative_path(
|
||||
compose.paths.compose.repository(
|
||||
arch=arch, variant=variant, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
os_tree,
|
||||
).rstrip("/")
|
||||
or "."
|
||||
)
|
||||
|
||||
ti.variants.add(var)
|
||||
|
||||
@ -270,11 +398,32 @@ def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
|
||||
addon.uid = i.uid
|
||||
addon.name = i.name
|
||||
addon.type = i.type
|
||||
compose.log_debug("variant '%s' inserting addon uid '%s' type '%s'" % (variant, addon.uid, addon.type))
|
||||
compose.log_debug(
|
||||
"variant '%s' inserting addon uid '%s' type '%s'"
|
||||
% (variant, addon.uid, addon.type)
|
||||
)
|
||||
|
||||
os_tree = compose.paths.compose.os_tree(arch=arch, variant=i).rstrip("/") + "/"
|
||||
addon.paths.packages = relative_path(compose.paths.compose.packages(arch=arch, variant=i, create_dir=False).rstrip("/") + "/", os_tree).rstrip("/") or "."
|
||||
addon.paths.repository = relative_path(compose.paths.compose.repository(arch=arch, variant=i, create_dir=False).rstrip("/") + "/", os_tree).rstrip("/") or "."
|
||||
addon.paths.packages = (
|
||||
relative_path(
|
||||
compose.paths.compose.packages(
|
||||
arch=arch, variant=i, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
os_tree,
|
||||
).rstrip("/")
|
||||
or "."
|
||||
)
|
||||
addon.paths.repository = (
|
||||
relative_path(
|
||||
compose.paths.compose.repository(
|
||||
arch=arch, variant=i, create_dir=False
|
||||
).rstrip("/")
|
||||
+ "/",
|
||||
os_tree,
|
||||
).rstrip("/")
|
||||
or "."
|
||||
)
|
||||
var.add(addon)
|
||||
|
||||
repomd_path = os.path.join(addon.paths.repository, "repodata", "repomd.xml")
|
||||
@ -299,7 +448,7 @@ def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
|
||||
# clone all but 'general' sections from buildinstall .treeinfo
|
||||
|
||||
bi_dir = compose.paths.work.buildinstall_dir(arch)
|
||||
if compose.conf.get('buildinstall_method') == 'lorax':
|
||||
if compose.conf.get("buildinstall_method") == "lorax":
|
||||
# The .treeinfo file produced by lorax is nested in variant
|
||||
# subdirectory. Legacy buildinstall runs once per arch, so there is
|
||||
# only one file.
|
||||
@ -313,12 +462,16 @@ def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
|
||||
# stage2 - mainimage
|
||||
if bi_ti.stage2.mainimage:
|
||||
ti.stage2.mainimage = bi_ti.stage2.mainimage
|
||||
ti.checksums.add(ti.stage2.mainimage, createrepo_checksum, root_dir=os_tree)
|
||||
ti.checksums.add(
|
||||
ti.stage2.mainimage, createrepo_checksum, root_dir=os_tree
|
||||
)
|
||||
|
||||
# stage2 - instimage
|
||||
if bi_ti.stage2.instimage:
|
||||
ti.stage2.instimage = bi_ti.stage2.instimage
|
||||
ti.checksums.add(ti.stage2.instimage, createrepo_checksum, root_dir=os_tree)
|
||||
ti.checksums.add(
|
||||
ti.stage2.instimage, createrepo_checksum, root_dir=os_tree
|
||||
)
|
||||
|
||||
# images
|
||||
for platform in bi_ti.images.images:
|
||||
@ -332,7 +485,9 @@ def write_tree_info(compose, arch, variant, timestamp=None, bi=None):
|
||||
ti.images.images[platform][image] = path
|
||||
ti.checksums.add(path, createrepo_checksum, root_dir=os_tree)
|
||||
|
||||
path = os.path.join(compose.paths.compose.os_tree(arch=arch, variant=variant), ".treeinfo")
|
||||
path = os.path.join(
|
||||
compose.paths.compose.os_tree(arch=arch, variant=variant), ".treeinfo"
|
||||
)
|
||||
compose.log_info("Writing treeinfo: %s" % path)
|
||||
ti.dump(path)
|
||||
|
||||
@ -365,6 +520,8 @@ def populate_extra_files_metadata(
|
||||
copied_file = os.path.relpath(full_path, relative_root)
|
||||
metadata.add(variant.uid, arch, copied_file, size, checksums)
|
||||
|
||||
strip_prefix = (os.path.relpath(topdir, relative_root) + "/") if relative_root else ""
|
||||
strip_prefix = (
|
||||
(os.path.relpath(topdir, relative_root) + "/") if relative_root else ""
|
||||
)
|
||||
with open(os.path.join(topdir, "extra_files.json"), "w") as f:
|
||||
metadata.dump_for_tree(f, variant.uid, arch, strip_prefix)
|
||||
|
@ -26,16 +26,17 @@ class Multilib(object):
|
||||
method that accepts a DNF sach and an iterable of globs that will be used
|
||||
to find package names.
|
||||
"""
|
||||
|
||||
def __init__(self, methods, blacklist, whitelist):
|
||||
self.methods = {}
|
||||
self.blacklist = blacklist
|
||||
self.whitelist = whitelist
|
||||
|
||||
self.all_methods = {
|
||||
'none': multilib.NoMultilibMethod(None),
|
||||
'all': multilib.AllMultilibMethod(None),
|
||||
'devel': multilib.DevelMultilibMethod(None),
|
||||
'runtime': multilib.RuntimeMultilibMethod(None),
|
||||
"none": multilib.NoMultilibMethod(None),
|
||||
"all": multilib.AllMultilibMethod(None),
|
||||
"devel": multilib.DevelMultilibMethod(None),
|
||||
"runtime": multilib.RuntimeMultilibMethod(None),
|
||||
}
|
||||
|
||||
for method in methods:
|
||||
@ -44,15 +45,17 @@ class Multilib(object):
|
||||
@classmethod
|
||||
def from_globs(cls, sack, methods, blacklist=None, whitelist=None):
|
||||
"""Create a Multilib instance with expanded blacklist and whitelist."""
|
||||
return cls(methods,
|
||||
_expand_list(sack, blacklist or []),
|
||||
_expand_list(sack, whitelist or []))
|
||||
return cls(
|
||||
methods,
|
||||
_expand_list(sack, blacklist or []),
|
||||
_expand_list(sack, whitelist or []),
|
||||
)
|
||||
|
||||
def is_multilib(self, pkg):
|
||||
if pkg.name in self.blacklist:
|
||||
return False
|
||||
if pkg.name in self.whitelist:
|
||||
return 'whitelist'
|
||||
return "whitelist"
|
||||
for method, cls in self.methods.items():
|
||||
if cls.select(pkg):
|
||||
return method
|
||||
|
@ -22,7 +22,9 @@ import pungi.util
|
||||
|
||||
|
||||
LINE_PATTERN_RE = re.compile(r"^\s*(?P<line>[^#]+)(:?\s+(?P<comment>#.*))?$")
|
||||
RUNTIME_PATTERN_SPLIT_RE = re.compile(r"^\s*(?P<path>[^\s]+)\s+(?P<pattern>[^\s]+)(:?\s+(?P<comment>#.*))?$")
|
||||
RUNTIME_PATTERN_SPLIT_RE = re.compile(
|
||||
r"^\s*(?P<path>[^\s]+)\s+(?P<pattern>[^\s]+)(:?\s+(?P<comment>#.*))?$"
|
||||
)
|
||||
SONAME_PATTERN_RE = re.compile(r"^(.+\.so\.[a-zA-Z0-9_\.]+).*$")
|
||||
|
||||
|
||||
@ -86,6 +88,7 @@ def expand_runtime_patterns(patterns):
|
||||
|
||||
class MultilibMethodBase(object):
|
||||
"""a base class for multilib methods"""
|
||||
|
||||
name = "base"
|
||||
|
||||
def __init__(self, config_path):
|
||||
@ -95,7 +98,11 @@ class MultilibMethodBase(object):
|
||||
raise NotImplementedError
|
||||
|
||||
def skip(self, po):
|
||||
if pungi.gather.is_noarch(po) or pungi.gather.is_source(po) or pungi.util.pkg_is_debug(po):
|
||||
if (
|
||||
pungi.gather.is_noarch(po)
|
||||
or pungi.gather.is_source(po)
|
||||
or pungi.util.pkg_is_debug(po)
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -120,6 +127,7 @@ class MultilibMethodBase(object):
|
||||
|
||||
class NoneMultilibMethod(MultilibMethodBase):
|
||||
"""multilib disabled"""
|
||||
|
||||
name = "none"
|
||||
|
||||
def select(self, po):
|
||||
@ -128,6 +136,7 @@ class NoneMultilibMethod(MultilibMethodBase):
|
||||
|
||||
class AllMultilibMethod(MultilibMethodBase):
|
||||
"""all packages are multilib"""
|
||||
|
||||
name = "all"
|
||||
|
||||
def select(self, po):
|
||||
@ -138,13 +147,20 @@ class AllMultilibMethod(MultilibMethodBase):
|
||||
|
||||
class RuntimeMultilibMethod(MultilibMethodBase):
|
||||
"""pre-defined paths to libs"""
|
||||
|
||||
name = "runtime"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RuntimeMultilibMethod, self).__init__(*args, **kwargs)
|
||||
self.blacklist = read_lines_from_file(self.config_path+"runtime-blacklist.conf")
|
||||
self.whitelist = read_lines_from_file(self.config_path+"runtime-whitelist.conf")
|
||||
self.patterns = expand_runtime_patterns(read_runtime_patterns_from_file(self.config_path+"runtime-patterns.conf"))
|
||||
self.blacklist = read_lines_from_file(
|
||||
self.config_path + "runtime-blacklist.conf"
|
||||
)
|
||||
self.whitelist = read_lines_from_file(
|
||||
self.config_path + "runtime-whitelist.conf"
|
||||
)
|
||||
self.patterns = expand_runtime_patterns(
|
||||
read_runtime_patterns_from_file(self.config_path + "runtime-patterns.conf")
|
||||
)
|
||||
|
||||
def select(self, po):
|
||||
if self.skip(po):
|
||||
@ -186,6 +202,7 @@ class RuntimeMultilibMethod(MultilibMethodBase):
|
||||
|
||||
class KernelMultilibMethod(MultilibMethodBase):
|
||||
"""kernel and kernel-devel"""
|
||||
|
||||
name = "kernel"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@ -199,6 +216,7 @@ class KernelMultilibMethod(MultilibMethodBase):
|
||||
|
||||
class YabootMultilibMethod(MultilibMethodBase):
|
||||
"""yaboot on ppc"""
|
||||
|
||||
name = "yaboot"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@ -213,12 +231,13 @@ class YabootMultilibMethod(MultilibMethodBase):
|
||||
|
||||
class DevelMultilibMethod(MultilibMethodBase):
|
||||
"""all -devel and -static packages"""
|
||||
|
||||
name = "devel"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DevelMultilibMethod, self).__init__(*args, **kwargs)
|
||||
self.blacklist = read_lines_from_file(self.config_path+"devel-blacklist.conf")
|
||||
self.whitelist = read_lines_from_file(self.config_path+"devel-whitelist.conf")
|
||||
self.blacklist = read_lines_from_file(self.config_path + "devel-blacklist.conf")
|
||||
self.whitelist = read_lines_from_file(self.config_path + "devel-whitelist.conf")
|
||||
|
||||
def select(self, po):
|
||||
if self.skip(po):
|
||||
@ -254,8 +273,14 @@ def init(config_path="/usr/share/pungi/multilib/"):
|
||||
if not config_path.endswith("/"):
|
||||
config_path += "/"
|
||||
|
||||
for cls in (AllMultilibMethod, DevelMultilibMethod, KernelMultilibMethod,
|
||||
NoneMultilibMethod, RuntimeMultilibMethod, YabootMultilibMethod):
|
||||
for cls in (
|
||||
AllMultilibMethod,
|
||||
DevelMultilibMethod,
|
||||
KernelMultilibMethod,
|
||||
NoneMultilibMethod,
|
||||
RuntimeMultilibMethod,
|
||||
YabootMultilibMethod,
|
||||
):
|
||||
method = cls(config_path)
|
||||
METHOD_MAP[method.name] = method
|
||||
|
||||
|
@ -29,6 +29,7 @@ class PungiNotifier(object):
|
||||
script fails, a warning will be logged, but the compose process will not be
|
||||
interrupted.
|
||||
"""
|
||||
|
||||
def __init__(self, cmds):
|
||||
self.cmds = cmds
|
||||
self.lock = threading.Lock()
|
||||
@ -38,30 +39,31 @@ class PungiNotifier(object):
|
||||
"""Add compose related information to the data."""
|
||||
if not self.compose:
|
||||
return
|
||||
data.setdefault('compose_id', self.compose.compose_id)
|
||||
data.setdefault("compose_id", self.compose.compose_id)
|
||||
|
||||
# Publish where in the world this compose will end up living
|
||||
location = pungi.util.translate_path(
|
||||
self.compose, self.compose.paths.compose.topdir())
|
||||
data.setdefault('location', location)
|
||||
self.compose, self.compose.paths.compose.topdir()
|
||||
)
|
||||
data.setdefault("location", location)
|
||||
|
||||
# Add information about the compose itself.
|
||||
data.setdefault('compose_date', self.compose.compose_date)
|
||||
data.setdefault('compose_type', self.compose.compose_type)
|
||||
data.setdefault('compose_respin', self.compose.compose_respin)
|
||||
data.setdefault('compose_label', self.compose.compose_label)
|
||||
data.setdefault('release_short', self.compose.conf['release_short'])
|
||||
data.setdefault('release_name', self.compose.conf['release_name'])
|
||||
data.setdefault('release_version', self.compose.conf['release_version'])
|
||||
data.setdefault('release_type', self.compose.conf['release_type'].lower())
|
||||
data.setdefault('release_is_layered', False)
|
||||
data.setdefault("compose_date", self.compose.compose_date)
|
||||
data.setdefault("compose_type", self.compose.compose_type)
|
||||
data.setdefault("compose_respin", self.compose.compose_respin)
|
||||
data.setdefault("compose_label", self.compose.compose_label)
|
||||
data.setdefault("release_short", self.compose.conf["release_short"])
|
||||
data.setdefault("release_name", self.compose.conf["release_name"])
|
||||
data.setdefault("release_version", self.compose.conf["release_version"])
|
||||
data.setdefault("release_type", self.compose.conf["release_type"].lower())
|
||||
data.setdefault("release_is_layered", False)
|
||||
|
||||
if self.compose.conf.get('base_product_name', ''):
|
||||
data['release_is_layered'] = True
|
||||
data['base_product_name'] = self.compose.conf["base_product_name"]
|
||||
data['base_product_version'] = self.compose.conf["base_product_version"]
|
||||
data['base_product_short'] = self.compose.conf["base_product_short"]
|
||||
data['base_product_type'] = self.compose.conf["base_product_type"].lower()
|
||||
if self.compose.conf.get("base_product_name", ""):
|
||||
data["release_is_layered"] = True
|
||||
data["base_product_name"] = self.compose.conf["base_product_name"]
|
||||
data["base_product_version"] = self.compose.conf["base_product_version"]
|
||||
data["base_product_short"] = self.compose.conf["base_product_short"]
|
||||
data["base_product_type"] = self.compose.conf["base_product_type"].lower()
|
||||
|
||||
def send(self, msg, workdir=None, **kwargs):
|
||||
"""Send a message.
|
||||
@ -89,23 +91,24 @@ class PungiNotifier(object):
|
||||
"""Run a single notification script with proper logging."""
|
||||
logfile = None
|
||||
if self.compose:
|
||||
self.compose.log_debug("Notification: %r %r, %r" % (
|
||||
cmd, msg, kwargs))
|
||||
self.compose.log_debug("Notification: %r %r, %r" % (cmd, msg, kwargs))
|
||||
logfile = os.path.join(
|
||||
self.compose.paths.log.topdir(),
|
||||
'notifications',
|
||||
'notification-%s.log' % datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
|
||||
"notifications",
|
||||
"notification-%s.log" % datetime.utcnow().strftime("%Y-%m-%d_%H-%M-%S"),
|
||||
)
|
||||
pungi.util.makedirs(os.path.dirname(logfile))
|
||||
|
||||
ret, _ = shortcuts.run((cmd, msg),
|
||||
stdin_data=json.dumps(kwargs),
|
||||
can_fail=True,
|
||||
workdir=workdir,
|
||||
return_stdout=False,
|
||||
show_cmd=True,
|
||||
universal_newlines=True,
|
||||
logfile=logfile)
|
||||
ret, _ = shortcuts.run(
|
||||
(cmd, msg),
|
||||
stdin_data=json.dumps(kwargs),
|
||||
can_fail=True,
|
||||
workdir=workdir,
|
||||
return_stdout=False,
|
||||
show_cmd=True,
|
||||
universal_newlines=True,
|
||||
logfile=logfile,
|
||||
)
|
||||
if ret != 0:
|
||||
if self.compose:
|
||||
self.compose.log_warning('Failed to invoke notification script.')
|
||||
self.compose.log_warning("Failed to invoke notification script.")
|
||||
|
@ -26,62 +26,128 @@ def main(args=None):
|
||||
subparser = parser.add_subparsers(help="Sub commands")
|
||||
|
||||
treep = subparser.add_parser("tree", help="Compose OSTree repository")
|
||||
treep.set_defaults(_class=Tree, func='run')
|
||||
treep.add_argument('--repo', metavar='PATH', required=True,
|
||||
help='where to put the OSTree repo (required)')
|
||||
treep.add_argument('--treefile', metavar="FILE", required=True,
|
||||
help='treefile for rpm-ostree (required)')
|
||||
treep.add_argument('--log-dir', metavar="DIR", required=True,
|
||||
help='where to log output and commitid (required). \
|
||||
Note: commitid file will be written to this dir')
|
||||
treep.add_argument('--extra-config', metavar="FILE",
|
||||
help='JSON file contains extra configurations')
|
||||
treep.add_argument('--version', metavar="VERSION",
|
||||
help='version string to be added as versioning metadata')
|
||||
treep.add_argument('--update-summary', action='store_true',
|
||||
help='update summary metadata')
|
||||
treep.add_argument('--ostree-ref', metavar='PATH',
|
||||
help='override ref value from treefile')
|
||||
treep.add_argument('--force-new-commit', action='store_true',
|
||||
help='do not use rpm-ostree\'s built-in change detection')
|
||||
treep.set_defaults(_class=Tree, func="run")
|
||||
treep.add_argument(
|
||||
"--repo",
|
||||
metavar="PATH",
|
||||
required=True,
|
||||
help="where to put the OSTree repo (required)",
|
||||
)
|
||||
treep.add_argument(
|
||||
"--treefile",
|
||||
metavar="FILE",
|
||||
required=True,
|
||||
help="treefile for rpm-ostree (required)",
|
||||
)
|
||||
treep.add_argument(
|
||||
"--log-dir",
|
||||
metavar="DIR",
|
||||
required=True,
|
||||
help="where to log output and commitid (required). \
|
||||
Note: commitid file will be written to this dir",
|
||||
)
|
||||
treep.add_argument(
|
||||
"--extra-config", metavar="FILE", help="JSON file contains extra configurations"
|
||||
)
|
||||
treep.add_argument(
|
||||
"--version",
|
||||
metavar="VERSION",
|
||||
help="version string to be added as versioning metadata",
|
||||
)
|
||||
treep.add_argument(
|
||||
"--update-summary", action="store_true", help="update summary metadata"
|
||||
)
|
||||
treep.add_argument(
|
||||
"--ostree-ref", metavar="PATH", help="override ref value from treefile"
|
||||
)
|
||||
treep.add_argument(
|
||||
"--force-new-commit",
|
||||
action="store_true",
|
||||
help="do not use rpm-ostree's built-in change detection",
|
||||
)
|
||||
|
||||
installerp = subparser.add_parser("installer", help="Create an OSTree installer image")
|
||||
installerp.set_defaults(_class=Installer, func='run')
|
||||
installerp.add_argument('-p', '--product', metavar='PRODUCT', required=True,
|
||||
help='product name (required)')
|
||||
installerp.add_argument('-v', '--version', metavar='VERSION', required=True,
|
||||
help='version identifier (required)')
|
||||
installerp.add_argument('-r', '--release', metavar='RELEASE', required=True,
|
||||
help='release information (required)')
|
||||
installerp.add_argument('-s', '--source', metavar='REPOSITORY', required=True,
|
||||
action='append',
|
||||
help='source repository (required)')
|
||||
installerp.add_argument('-o', '--output', metavar='DIR', required=True,
|
||||
help='path to image output directory (required)')
|
||||
installerp.add_argument('--log-dir', metavar='DIR',
|
||||
help='path to log directory')
|
||||
installerp.add_argument('--volid', metavar='VOLID',
|
||||
help='volume id')
|
||||
installerp.add_argument('--variant', metavar='VARIANT',
|
||||
help='variant name')
|
||||
installerp.add_argument('--rootfs-size', metavar='SIZE')
|
||||
installerp.add_argument('--nomacboot', action='store_true', default=False)
|
||||
installerp.add_argument('--noupgrade', action='store_true', default=False)
|
||||
installerp.add_argument('--isfinal', action='store_true', default=False)
|
||||
installerp = subparser.add_parser(
|
||||
"installer", help="Create an OSTree installer image"
|
||||
)
|
||||
installerp.set_defaults(_class=Installer, func="run")
|
||||
installerp.add_argument(
|
||||
"-p",
|
||||
"--product",
|
||||
metavar="PRODUCT",
|
||||
required=True,
|
||||
help="product name (required)",
|
||||
)
|
||||
installerp.add_argument(
|
||||
"-v",
|
||||
"--version",
|
||||
metavar="VERSION",
|
||||
required=True,
|
||||
help="version identifier (required)",
|
||||
)
|
||||
installerp.add_argument(
|
||||
"-r",
|
||||
"--release",
|
||||
metavar="RELEASE",
|
||||
required=True,
|
||||
help="release information (required)",
|
||||
)
|
||||
installerp.add_argument(
|
||||
"-s",
|
||||
"--source",
|
||||
metavar="REPOSITORY",
|
||||
required=True,
|
||||
action="append",
|
||||
help="source repository (required)",
|
||||
)
|
||||
installerp.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
metavar="DIR",
|
||||
required=True,
|
||||
help="path to image output directory (required)",
|
||||
)
|
||||
installerp.add_argument("--log-dir", metavar="DIR", help="path to log directory")
|
||||
installerp.add_argument("--volid", metavar="VOLID", help="volume id")
|
||||
installerp.add_argument("--variant", metavar="VARIANT", help="variant name")
|
||||
installerp.add_argument("--rootfs-size", metavar="SIZE")
|
||||
installerp.add_argument("--nomacboot", action="store_true", default=False)
|
||||
installerp.add_argument("--noupgrade", action="store_true", default=False)
|
||||
installerp.add_argument("--isfinal", action="store_true", default=False)
|
||||
|
||||
installerp.add_argument('--installpkgs', metavar='PACKAGE', action='append',
|
||||
help='package glob to install before runtime-install.tmpl')
|
||||
installerp.add_argument('--add-template', metavar='FILE', action='append',
|
||||
help='Additional template for runtime image')
|
||||
installerp.add_argument('--add-template-var', metavar='ADD_TEMPLATE_VARS', action='append',
|
||||
help='Set variable for runtime image template')
|
||||
installerp.add_argument('--add-arch-template', metavar='FILE', action='append',
|
||||
help='Additional template for architecture-specific image')
|
||||
installerp.add_argument('--add-arch-template-var', metavar='ADD_ARCH_TEMPLATE_VARS', action='append',
|
||||
help='Set variable for architecture-specific image')
|
||||
installerp.add_argument(
|
||||
"--installpkgs",
|
||||
metavar="PACKAGE",
|
||||
action="append",
|
||||
help="package glob to install before runtime-install.tmpl",
|
||||
)
|
||||
installerp.add_argument(
|
||||
"--add-template",
|
||||
metavar="FILE",
|
||||
action="append",
|
||||
help="Additional template for runtime image",
|
||||
)
|
||||
installerp.add_argument(
|
||||
"--add-template-var",
|
||||
metavar="ADD_TEMPLATE_VARS",
|
||||
action="append",
|
||||
help="Set variable for runtime image template",
|
||||
)
|
||||
installerp.add_argument(
|
||||
"--add-arch-template",
|
||||
metavar="FILE",
|
||||
action="append",
|
||||
help="Additional template for architecture-specific image",
|
||||
)
|
||||
installerp.add_argument(
|
||||
"--add-arch-template-var",
|
||||
metavar="ADD_ARCH_TEMPLATE_VARS",
|
||||
action="append",
|
||||
help="Set variable for architecture-specific image",
|
||||
)
|
||||
|
||||
installerp.add_argument('--extra-config', metavar='FILE',
|
||||
help='JSON file contains extra configurations')
|
||||
installerp.add_argument(
|
||||
"--extra-config", metavar="FILE", help="JSON file contains extra configurations"
|
||||
)
|
||||
|
||||
args = parser.parse_args(args)
|
||||
|
||||
|
@ -23,7 +23,7 @@ from ..wrappers import lorax
|
||||
|
||||
class Installer(OSTree):
|
||||
def _merge_config(self, config):
|
||||
self.installpkgs.extend(config.get('installpkgs', []))
|
||||
self.installpkgs.extend(config.get("installpkgs", []))
|
||||
self.add_template.extend(config.get("add_template", []))
|
||||
self.add_template_var.extend(config.get("add_template_var"))
|
||||
self.add_arch_template.extend(config.get("add_arch_template", []))
|
||||
@ -52,7 +52,7 @@ class Installer(OSTree):
|
||||
|
||||
self.extra_config = self.args.extra_config
|
||||
if self.extra_config:
|
||||
self.extra_config = json.load(open(self.extra_config, 'r'))
|
||||
self.extra_config = json.load(open(self.extra_config, "r"))
|
||||
self._merge_config(self.extra_config)
|
||||
|
||||
lorax_wrapper = lorax.LoraxWrapper()
|
||||
@ -72,6 +72,6 @@ class Installer(OSTree):
|
||||
add_arch_template_var=self.add_arch_template_var,
|
||||
rootfs_size=self.rootfs_size,
|
||||
is_final=self.isfinal,
|
||||
log_dir=self.logdir
|
||||
log_dir=self.logdir,
|
||||
)
|
||||
shortcuts.run(cmd)
|
||||
|
@ -20,14 +20,18 @@ from kobo import shortcuts
|
||||
|
||||
from pungi.util import makedirs
|
||||
from .base import OSTree
|
||||
from .utils import (make_log_file, tweak_treeconf,
|
||||
get_ref_from_treefile, get_commitid_from_commitid_file)
|
||||
from .utils import (
|
||||
make_log_file,
|
||||
tweak_treeconf,
|
||||
get_ref_from_treefile,
|
||||
get_commitid_from_commitid_file,
|
||||
)
|
||||
|
||||
|
||||
class Tree(OSTree):
|
||||
def _make_tree(self):
|
||||
"""Compose OSTree tree"""
|
||||
log_file = make_log_file(self.logdir, 'create-ostree-repo')
|
||||
log_file = make_log_file(self.logdir, "create-ostree-repo")
|
||||
cmd = [
|
||||
"rpm-ostree",
|
||||
"compose",
|
||||
@ -41,11 +45,11 @@ class Tree(OSTree):
|
||||
]
|
||||
if self.version:
|
||||
# Add versioning metadata
|
||||
cmd.append('--add-metadata-string=version=%s' % self.version)
|
||||
cmd.append("--add-metadata-string=version=%s" % self.version)
|
||||
# Note renamed from rpm-ostree --force-nocache since it's a better
|
||||
# name; more clearly describes what we're doing here.
|
||||
if self.force_new_commit:
|
||||
cmd.append('--force-nocache')
|
||||
cmd.append("--force-nocache")
|
||||
cmd.append(self.treefile)
|
||||
|
||||
shortcuts.run(
|
||||
@ -54,9 +58,9 @@ class Tree(OSTree):
|
||||
|
||||
def _update_summary(self):
|
||||
"""Update summary metadata"""
|
||||
log_file = make_log_file(self.logdir, 'ostree-summary')
|
||||
log_file = make_log_file(self.logdir, "ostree-summary")
|
||||
shortcuts.run(
|
||||
['ostree', 'summary', '-u', '--repo=%s' % self.repo],
|
||||
["ostree", "summary", "-u", "--repo=%s" % self.repo],
|
||||
show_cmd=True,
|
||||
stdout=True,
|
||||
logfile=log_file,
|
||||
@ -73,24 +77,24 @@ class Tree(OSTree):
|
||||
"""
|
||||
tag_ref = True
|
||||
if self.extra_config:
|
||||
tag_ref = self.extra_config.get('tag_ref', True)
|
||||
tag_ref = self.extra_config.get("tag_ref", True)
|
||||
if not tag_ref:
|
||||
print('Not updating ref as configured')
|
||||
print("Not updating ref as configured")
|
||||
return
|
||||
ref = get_ref_from_treefile(self.treefile)
|
||||
commitid = get_commitid_from_commitid_file(self.commitid_file)
|
||||
print('Ref: %r, Commit ID: %r' % (ref, commitid))
|
||||
print("Ref: %r, Commit ID: %r" % (ref, commitid))
|
||||
if ref and commitid:
|
||||
print('Updating ref')
|
||||
print("Updating ref")
|
||||
# Let's write the tag out ourselves
|
||||
heads_dir = os.path.join(self.repo, 'refs', 'heads')
|
||||
heads_dir = os.path.join(self.repo, "refs", "heads")
|
||||
if not os.path.exists(heads_dir):
|
||||
raise RuntimeError('Refs/heads did not exist in ostree repo')
|
||||
raise RuntimeError("Refs/heads did not exist in ostree repo")
|
||||
|
||||
ref_path = os.path.join(heads_dir, ref)
|
||||
makedirs(os.path.dirname(ref_path))
|
||||
with open(ref_path, 'w') as f:
|
||||
f.write(commitid + '\n')
|
||||
with open(ref_path, "w") as f:
|
||||
f.write(commitid + "\n")
|
||||
|
||||
def run(self):
|
||||
self.repo = self.args.repo
|
||||
@ -104,9 +108,11 @@ class Tree(OSTree):
|
||||
|
||||
if self.extra_config or self.ostree_ref:
|
||||
if self.extra_config:
|
||||
self.extra_config = json.load(open(self.extra_config, 'r'))
|
||||
repos = self.extra_config.get('repo', [])
|
||||
keep_original_sources = self.extra_config.get('keep_original_sources', False)
|
||||
self.extra_config = json.load(open(self.extra_config, "r"))
|
||||
repos = self.extra_config.get("repo", [])
|
||||
keep_original_sources = self.extra_config.get(
|
||||
"keep_original_sources", False
|
||||
)
|
||||
else:
|
||||
# missing extra_config mustn't affect tweak_treeconf call
|
||||
repos = []
|
||||
@ -115,16 +121,16 @@ class Tree(OSTree):
|
||||
update_dict = {}
|
||||
if self.ostree_ref:
|
||||
# override ref value in treefile
|
||||
update_dict['ref'] = self.ostree_ref
|
||||
update_dict["ref"] = self.ostree_ref
|
||||
|
||||
self.treefile = tweak_treeconf(
|
||||
self.treefile,
|
||||
source_repos=repos,
|
||||
keep_original_sources=keep_original_sources,
|
||||
update_dict=update_dict
|
||||
update_dict=update_dict,
|
||||
)
|
||||
|
||||
self.commitid_file = make_log_file(self.logdir, 'commitid')
|
||||
self.commitid_file = make_log_file(self.logdir, "commitid")
|
||||
|
||||
self._make_tree()
|
||||
self._update_ref()
|
||||
|
@ -30,7 +30,7 @@ def make_log_file(log_dir, filename):
|
||||
if not log_dir:
|
||||
return None
|
||||
makedirs(log_dir)
|
||||
return os.path.join(log_dir, '%s.log' % filename)
|
||||
return os.path.join(log_dir, "%s.log" % filename)
|
||||
|
||||
|
||||
def get_ref_from_treefile(treefile, arch=None, logger=None):
|
||||
@ -40,7 +40,7 @@ def get_ref_from_treefile(treefile, arch=None, logger=None):
|
||||
"""
|
||||
logger = logger or logging.getLogger(__name__)
|
||||
if os.path.isfile(treefile):
|
||||
with open(treefile, 'r') as f:
|
||||
with open(treefile, "r") as f:
|
||||
try:
|
||||
# rpm-ostree now supports YAML
|
||||
# https://github.com/projectatomic/rpm-ostree/pull/1377
|
||||
@ -50,9 +50,9 @@ def get_ref_from_treefile(treefile, arch=None, logger=None):
|
||||
parsed = json.load(f)
|
||||
return parsed["ref"].replace("${basearch}", getBaseArch(arch))
|
||||
except Exception as e:
|
||||
logger.error('Unable to get ref from treefile: %s' % e)
|
||||
logger.error("Unable to get ref from treefile: %s" % e)
|
||||
else:
|
||||
logger.error('Unable to open treefile')
|
||||
logger.error("Unable to open treefile")
|
||||
return None
|
||||
|
||||
|
||||
@ -61,11 +61,13 @@ def get_commitid_from_commitid_file(commitid_file):
|
||||
if not os.path.exists(commitid_file + ".stamp"):
|
||||
# The stamp does not exist, so no new commit.
|
||||
return None
|
||||
with open(commitid_file, 'r') as f:
|
||||
return f.read().replace('\n', '')
|
||||
with open(commitid_file, "r") as f:
|
||||
return f.read().replace("\n", "")
|
||||
|
||||
|
||||
def tweak_treeconf(treeconf, source_repos=None, keep_original_sources=False, update_dict=None):
|
||||
def tweak_treeconf(
|
||||
treeconf, source_repos=None, keep_original_sources=False, update_dict=None
|
||||
):
|
||||
"""
|
||||
Update tree config file by adding new repos, and remove existing repos
|
||||
from the tree config file if 'keep_original_sources' is not enabled.
|
||||
@ -74,51 +76,51 @@ def tweak_treeconf(treeconf, source_repos=None, keep_original_sources=False, upd
|
||||
"""
|
||||
|
||||
# backup the old tree config
|
||||
shutil.copy2(treeconf, '{0}.bak'.format(treeconf))
|
||||
shutil.copy2(treeconf, "{0}.bak".format(treeconf))
|
||||
|
||||
treeconf_dir = os.path.dirname(treeconf)
|
||||
with open(treeconf, 'r') as f:
|
||||
with open(treeconf, "r") as f:
|
||||
# rpm-ostree now supports YAML, but we'll end up converting it to JSON.
|
||||
# https://github.com/projectatomic/rpm-ostree/pull/1377
|
||||
if treeconf.endswith('.yaml'):
|
||||
if treeconf.endswith(".yaml"):
|
||||
treeconf_content = yaml.safe_load(f)
|
||||
treeconf = treeconf.replace('.yaml', '.json')
|
||||
treeconf = treeconf.replace(".yaml", ".json")
|
||||
else:
|
||||
treeconf_content = json.load(f)
|
||||
|
||||
repos = []
|
||||
if source_repos:
|
||||
# Sort to ensure reliable ordering
|
||||
source_repos = sorted(source_repos, key=lambda x: x['name'])
|
||||
source_repos = sorted(source_repos, key=lambda x: x["name"])
|
||||
# Now, since pungi includes timestamps in the repo names which
|
||||
# currently defeats rpm-ostree's change detection, let's just
|
||||
# use repos named 'repo-<number>'.
|
||||
# https://pagure.io/pungi/issue/811
|
||||
with open("{0}/pungi.repo".format(treeconf_dir), 'w') as f:
|
||||
with open("{0}/pungi.repo".format(treeconf_dir), "w") as f:
|
||||
for i, repo in enumerate(source_repos):
|
||||
name = 'repo-{0}'.format(i)
|
||||
name = "repo-{0}".format(i)
|
||||
f.write("[%s]\n" % name)
|
||||
f.write("name=%s\n" % name)
|
||||
f.write("baseurl=%s\n" % repo['baseurl'])
|
||||
exclude = repo.get('exclude', None)
|
||||
f.write("baseurl=%s\n" % repo["baseurl"])
|
||||
exclude = repo.get("exclude", None)
|
||||
if exclude:
|
||||
f.write("exclude=%s\n" % exclude)
|
||||
gpgcheck = '1' if repo.get('gpgcheck', False) else '0'
|
||||
gpgcheck = "1" if repo.get("gpgcheck", False) else "0"
|
||||
f.write("gpgcheck=%s\n" % gpgcheck)
|
||||
|
||||
repos.append(name)
|
||||
|
||||
original_repos = treeconf_content.get('repos', [])
|
||||
original_repos = treeconf_content.get("repos", [])
|
||||
if keep_original_sources:
|
||||
treeconf_content['repos'] = original_repos + repos
|
||||
treeconf_content["repos"] = original_repos + repos
|
||||
else:
|
||||
treeconf_content['repos'] = repos
|
||||
treeconf_content["repos"] = repos
|
||||
|
||||
# update content with config values from dictionary (for example 'ref')
|
||||
if isinstance(update_dict, dict):
|
||||
treeconf_content.update(update_dict)
|
||||
|
||||
# update tree config to add new repos
|
||||
with open(treeconf, 'w') as f:
|
||||
with open(treeconf, "w") as f:
|
||||
json.dump(treeconf_content, f, indent=4)
|
||||
return treeconf
|
||||
|
190
pungi/paths.py
190
pungi/paths.py
@ -14,9 +14,7 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
__all__ = (
|
||||
"Paths",
|
||||
)
|
||||
__all__ = ("Paths",)
|
||||
|
||||
|
||||
import errno
|
||||
@ -30,7 +28,12 @@ class Paths(object):
|
||||
paths_module_name = compose.conf.get("paths_module")
|
||||
if paths_module_name:
|
||||
# custom paths
|
||||
paths_module = __import__(paths_module_name, globals(), locals(), ["LogPaths", "WorkPaths", "ComposePaths"])
|
||||
paths_module = __import__(
|
||||
paths_module_name,
|
||||
globals(),
|
||||
locals(),
|
||||
["LogPaths", "WorkPaths", "ComposePaths"],
|
||||
)
|
||||
self.compose = paths_module.ComposePaths(compose)
|
||||
self.log = paths_module.LogPaths(compose)
|
||||
self.work = paths_module.WorkPaths(compose)
|
||||
@ -62,7 +65,9 @@ class LogPaths(object):
|
||||
arch = arch or "global"
|
||||
if log_name.endswith(".log"):
|
||||
log_name = log_name[:-4]
|
||||
return os.path.join(self.topdir(arch, create_dir=create_dir), "%s.%s.log" % (log_name, arch))
|
||||
return os.path.join(
|
||||
self.topdir(arch, create_dir=create_dir), "%s.%s.log" % (log_name, arch)
|
||||
)
|
||||
|
||||
|
||||
class WorkPaths(object):
|
||||
@ -114,13 +119,13 @@ class WorkPaths(object):
|
||||
work/x86_64/pungi/Server.x86_64.conf
|
||||
"""
|
||||
arch = arch or "global"
|
||||
file_name = ''
|
||||
file_name = ""
|
||||
if variant:
|
||||
file_name += variant.uid + '.'
|
||||
file_name += arch + '.'
|
||||
file_name += variant.uid + "."
|
||||
file_name += arch + "."
|
||||
if source_name:
|
||||
file_name += source_name + '.'
|
||||
file_name += 'conf'
|
||||
file_name += source_name + "."
|
||||
file_name += "conf"
|
||||
path = os.path.join(self.topdir(arch, create_dir=create_dir), "pungi")
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
@ -147,7 +152,7 @@ class WorkPaths(object):
|
||||
path = self.pungi_conf(arch, variant, create_dir=create_dir)
|
||||
path = path[:-5]
|
||||
if source_name:
|
||||
path += '.' + source_name
|
||||
path += "." + source_name
|
||||
return path + ".log"
|
||||
|
||||
def pungi_cache_dir(self, arch, variant=None, create_dir=True):
|
||||
@ -200,13 +205,16 @@ class WorkPaths(object):
|
||||
Examples:
|
||||
work/x86_64/Server/lookaside_repo
|
||||
"""
|
||||
path = os.path.join(self.topdir(arch, create_dir=create_dir),
|
||||
variant.uid, "lookaside_repo")
|
||||
path = os.path.join(
|
||||
self.topdir(arch, create_dir=create_dir), variant.uid, "lookaside_repo"
|
||||
)
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
return path
|
||||
|
||||
def package_list(self, arch=None, variant=None, pkgset=None, pkg_type=None, create_dir=True):
|
||||
def package_list(
|
||||
self, arch=None, variant=None, pkgset=None, pkg_type=None, create_dir=True
|
||||
):
|
||||
"""
|
||||
Examples:
|
||||
work/x86_64/package_list/x86_64.conf
|
||||
@ -234,7 +242,9 @@ class WorkPaths(object):
|
||||
Examples:
|
||||
work/x86_64/package_list/Server.x86_64.lookaside.conf
|
||||
"""
|
||||
return self.package_list(arch, variant, pkg_type='lookaside', create_dir=create_dir)
|
||||
return self.package_list(
|
||||
arch, variant, pkg_type="lookaside", create_dir=create_dir
|
||||
)
|
||||
|
||||
def pungi_download_dir(self, arch, create_dir=True):
|
||||
"""
|
||||
@ -246,8 +256,9 @@ class WorkPaths(object):
|
||||
makedirs(path)
|
||||
return path
|
||||
|
||||
def buildinstall_dir(self, arch, create_dir=True,
|
||||
allow_topdir_override=False, variant=None):
|
||||
def buildinstall_dir(
|
||||
self, arch, create_dir=True, allow_topdir_override=False, variant=None
|
||||
):
|
||||
"""
|
||||
:param bool allow_topdir_override: When True, the
|
||||
"buildinstall_topdir" will be used (if set) instead of real
|
||||
@ -262,9 +273,12 @@ class WorkPaths(object):
|
||||
if allow_topdir_override and buildinstall_topdir:
|
||||
topdir_basename = os.path.basename(self.compose.topdir)
|
||||
path = os.path.join(
|
||||
buildinstall_topdir, "buildinstall-%s" % topdir_basename, arch)
|
||||
buildinstall_topdir, "buildinstall-%s" % topdir_basename, arch
|
||||
)
|
||||
else:
|
||||
path = os.path.join(self.topdir(arch, create_dir=create_dir), "buildinstall")
|
||||
path = os.path.join(
|
||||
self.topdir(arch, create_dir=create_dir), "buildinstall"
|
||||
)
|
||||
|
||||
if variant:
|
||||
path = os.path.join(path, variant.uid)
|
||||
@ -277,7 +291,9 @@ class WorkPaths(object):
|
||||
"""
|
||||
if arch == "global":
|
||||
raise RuntimeError("Global extra files dir makes no sense.")
|
||||
path = os.path.join(self.topdir(arch, create_dir=create_dir), variant.uid, "extra-files")
|
||||
path = os.path.join(
|
||||
self.topdir(arch, create_dir=create_dir), variant.uid, "extra-files"
|
||||
)
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
return path
|
||||
@ -289,7 +305,11 @@ class WorkPaths(object):
|
||||
"""
|
||||
if arch == "global":
|
||||
raise RuntimeError("Global extra files dir makes no sense.")
|
||||
path = os.path.join(self.topdir(arch, create_dir=create_dir), variant.uid, "extra-iso-extra-files")
|
||||
path = os.path.join(
|
||||
self.topdir(arch, create_dir=create_dir),
|
||||
variant.uid,
|
||||
"extra-iso-extra-files",
|
||||
)
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
return path
|
||||
@ -303,7 +323,7 @@ class WorkPaths(object):
|
||||
self.topdir(arch, create_dir=create_dir),
|
||||
variant.uid,
|
||||
"iso-staging-dir",
|
||||
filename
|
||||
filename,
|
||||
)
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
@ -318,7 +338,9 @@ class WorkPaths(object):
|
||||
if pkg_type is not None:
|
||||
file_name += ".%s" % pkg_type
|
||||
file_name += ".conf"
|
||||
path = os.path.join(self.topdir(arch, create_dir=create_dir), "repo_package_list")
|
||||
path = os.path.join(
|
||||
self.topdir(arch, create_dir=create_dir), "repo_package_list"
|
||||
)
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
path = os.path.join(path, file_name)
|
||||
@ -357,7 +379,11 @@ class WorkPaths(object):
|
||||
# file_name = "%s.%s.pem" % (variant, arch)
|
||||
# HACK: modifyrepo doesn't handle renames -> $dir/productid
|
||||
file_name = "productid"
|
||||
path = os.path.join(self.topdir(arch, create_dir=create_dir), "product_id", "%s.%s.pem" % (variant, arch))
|
||||
path = os.path.join(
|
||||
self.topdir(arch, create_dir=create_dir),
|
||||
"product_id",
|
||||
"%s.%s.pem" % (variant, arch),
|
||||
)
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
path = os.path.join(path, file_name)
|
||||
@ -371,12 +397,16 @@ class WorkPaths(object):
|
||||
Examples:
|
||||
work/image-build/Server
|
||||
"""
|
||||
path = os.path.join(self.topdir('image-build', create_dir=create_dir), variant.uid)
|
||||
path = os.path.join(
|
||||
self.topdir("image-build", create_dir=create_dir), variant.uid
|
||||
)
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
return path
|
||||
|
||||
def image_build_conf(self, variant, image_name, image_type, arches=None, create_dir=True):
|
||||
def image_build_conf(
|
||||
self, variant, image_name, image_type, arches=None, create_dir=True
|
||||
):
|
||||
"""
|
||||
@param variant
|
||||
@param image-name
|
||||
@ -389,16 +419,18 @@ class WorkPaths(object):
|
||||
work/image-build/Server/docker_rhel-server-docker_x86_64.cfg
|
||||
work/image-build/Server/docker_rhel-server-docker_x86_64-ppc64le.cfg
|
||||
"""
|
||||
path = os.path.join(self.image_build_dir(variant), "%s_%s" % (image_type, image_name))
|
||||
path = os.path.join(
|
||||
self.image_build_dir(variant), "%s_%s" % (image_type, image_name)
|
||||
)
|
||||
if arches is not None:
|
||||
path = "%s_%s" % (path, '-'.join(list(arches)))
|
||||
path = "%s_%s" % (path, "-".join(list(arches)))
|
||||
path = "%s.cfg" % path
|
||||
return path
|
||||
|
||||
def module_defaults_dir(self, create_dir=True):
|
||||
"""
|
||||
"""
|
||||
path = os.path.join(self.topdir(create_dir=create_dir), 'module_defaults')
|
||||
path = os.path.join(self.topdir(create_dir=create_dir), "module_defaults")
|
||||
if create_dir:
|
||||
makedirs(path)
|
||||
return path
|
||||
@ -432,7 +464,9 @@ class ComposePaths(object):
|
||||
|
||||
if arch or variant:
|
||||
if variant.type == "addon":
|
||||
return self.topdir(arch, variant.parent, create_dir=create_dir, relative=relative)
|
||||
return self.topdir(
|
||||
arch, variant.parent, create_dir=create_dir, relative=relative
|
||||
)
|
||||
path = os.path.join(path, variant.uid, arch)
|
||||
if create_dir and not relative:
|
||||
makedirs(path)
|
||||
@ -453,7 +487,10 @@ class ComposePaths(object):
|
||||
# use 'os' dir due to historical reasons
|
||||
tree_dir = "os"
|
||||
|
||||
path = os.path.join(self.topdir(arch, variant, create_dir=create_dir, relative=relative), tree_dir)
|
||||
path = os.path.join(
|
||||
self.topdir(arch, variant, create_dir=create_dir, relative=relative),
|
||||
tree_dir,
|
||||
)
|
||||
if create_dir and not relative:
|
||||
makedirs(path)
|
||||
return path
|
||||
@ -468,9 +505,13 @@ class ComposePaths(object):
|
||||
compose/Server/x86_64/addons/LoadBalancer
|
||||
"""
|
||||
if variant.type == "addon":
|
||||
path = self.packages(arch, variant, create_dir=create_dir, relative=relative)
|
||||
path = self.packages(
|
||||
arch, variant, create_dir=create_dir, relative=relative
|
||||
)
|
||||
else:
|
||||
path = self.tree_dir(arch, variant, create_dir=create_dir, relative=relative)
|
||||
path = self.tree_dir(
|
||||
arch, variant, create_dir=create_dir, relative=relative
|
||||
)
|
||||
if create_dir and not relative:
|
||||
makedirs(path)
|
||||
return path
|
||||
@ -483,9 +524,16 @@ class ComposePaths(object):
|
||||
compose/Server-optional/x86_64/os/Packages
|
||||
"""
|
||||
if variant.type == "addon":
|
||||
path = os.path.join(self.tree_dir(arch, variant, create_dir=create_dir, relative=relative), "addons", variant.id)
|
||||
path = os.path.join(
|
||||
self.tree_dir(arch, variant, create_dir=create_dir, relative=relative),
|
||||
"addons",
|
||||
variant.id,
|
||||
)
|
||||
else:
|
||||
path = os.path.join(self.tree_dir(arch, variant, create_dir=create_dir, relative=relative), "Packages")
|
||||
path = os.path.join(
|
||||
self.tree_dir(arch, variant, create_dir=create_dir, relative=relative),
|
||||
"Packages",
|
||||
)
|
||||
if create_dir and not relative:
|
||||
makedirs(path)
|
||||
return path
|
||||
@ -496,7 +544,10 @@ class ComposePaths(object):
|
||||
compose/Server/x86_64/debug
|
||||
compose/Server-optional/x86_64/debug
|
||||
"""
|
||||
path = os.path.join(self.topdir(arch, variant, create_dir=create_dir, relative=relative), "debug")
|
||||
path = os.path.join(
|
||||
self.topdir(arch, variant, create_dir=create_dir, relative=relative),
|
||||
"debug",
|
||||
)
|
||||
if create_dir and not relative:
|
||||
makedirs(path)
|
||||
return path
|
||||
@ -507,7 +558,10 @@ class ComposePaths(object):
|
||||
compose/Server/x86_64/debug/tree
|
||||
compose/Server-optional/x86_64/debug/tree
|
||||
"""
|
||||
path = os.path.join(self.debug_topdir(arch, variant, create_dir=create_dir, relative=relative), "tree")
|
||||
path = os.path.join(
|
||||
self.debug_topdir(arch, variant, create_dir=create_dir, relative=relative),
|
||||
"tree",
|
||||
)
|
||||
if create_dir and not relative:
|
||||
makedirs(path)
|
||||
return path
|
||||
@ -522,9 +576,20 @@ class ComposePaths(object):
|
||||
if arch in ("source", "src"):
|
||||
return None
|
||||
if variant.type == "addon":
|
||||
path = os.path.join(self.debug_tree(arch, variant, create_dir=create_dir, relative=relative), "addons", variant.id)
|
||||
path = os.path.join(
|
||||
self.debug_tree(
|
||||
arch, variant, create_dir=create_dir, relative=relative
|
||||
),
|
||||
"addons",
|
||||
variant.id,
|
||||
)
|
||||
else:
|
||||
path = os.path.join(self.debug_tree(arch, variant, create_dir=create_dir, relative=relative), "Packages")
|
||||
path = os.path.join(
|
||||
self.debug_tree(
|
||||
arch, variant, create_dir=create_dir, relative=relative
|
||||
),
|
||||
"Packages",
|
||||
)
|
||||
if create_dir and not relative:
|
||||
makedirs(path)
|
||||
return path
|
||||
@ -539,9 +604,17 @@ class ComposePaths(object):
|
||||
if arch in ("source", "src"):
|
||||
return None
|
||||
if variant.type == "addon":
|
||||
path = os.path.join(self.debug_tree(arch, variant, create_dir=create_dir, relative=relative), "addons", variant.id)
|
||||
path = os.path.join(
|
||||
self.debug_tree(
|
||||
arch, variant, create_dir=create_dir, relative=relative
|
||||
),
|
||||
"addons",
|
||||
variant.id,
|
||||
)
|
||||
else:
|
||||
path = self.debug_tree(arch, variant, create_dir=create_dir, relative=relative)
|
||||
path = self.debug_tree(
|
||||
arch, variant, create_dir=create_dir, relative=relative
|
||||
)
|
||||
if create_dir and not relative:
|
||||
makedirs(path)
|
||||
return path
|
||||
@ -559,12 +632,14 @@ class ComposePaths(object):
|
||||
return None
|
||||
if arch == "src":
|
||||
arch = "source"
|
||||
path = os.path.join(self.topdir(arch, variant, create_dir=create_dir, relative=relative), "iso")
|
||||
path = os.path.join(
|
||||
self.topdir(arch, variant, create_dir=create_dir, relative=relative), "iso"
|
||||
)
|
||||
|
||||
if symlink_to:
|
||||
# TODO: create_dir
|
||||
topdir = self.compose.topdir.rstrip("/") + "/"
|
||||
relative_dir = path[len(topdir):]
|
||||
relative_dir = path[len(topdir) :]
|
||||
target_dir = os.path.join(symlink_to, self.compose.compose_id, relative_dir)
|
||||
if create_dir and not relative:
|
||||
makedirs(target_dir)
|
||||
@ -583,13 +658,21 @@ class ComposePaths(object):
|
||||
makedirs(path)
|
||||
return path
|
||||
|
||||
def iso_path(self, arch, variant, filename, symlink_to=None, create_dir=True, relative=False):
|
||||
def iso_path(
|
||||
self, arch, variant, filename, symlink_to=None, create_dir=True, relative=False
|
||||
):
|
||||
"""
|
||||
Examples:
|
||||
compose/Server/x86_64/iso/rhel-7.0-20120127.0-Server-x86_64-dvd1.iso
|
||||
None
|
||||
"""
|
||||
path = self.iso_dir(arch, variant, symlink_to=symlink_to, create_dir=create_dir, relative=relative)
|
||||
path = self.iso_dir(
|
||||
arch,
|
||||
variant,
|
||||
symlink_to=symlink_to,
|
||||
create_dir=create_dir,
|
||||
relative=relative,
|
||||
)
|
||||
if path is None:
|
||||
return None
|
||||
|
||||
@ -605,11 +688,13 @@ class ComposePaths(object):
|
||||
@param symlink_to=None
|
||||
@param relative=False
|
||||
"""
|
||||
path = os.path.join(self.topdir('%(arch)s', variant, create_dir=False, relative=relative),
|
||||
"images")
|
||||
path = os.path.join(
|
||||
self.topdir("%(arch)s", variant, create_dir=False, relative=relative),
|
||||
"images",
|
||||
)
|
||||
if symlink_to:
|
||||
topdir = self.compose.topdir.rstrip("/") + "/"
|
||||
relative_dir = path[len(topdir):]
|
||||
relative_dir = path[len(topdir) :]
|
||||
target_dir = os.path.join(symlink_to, self.compose.compose_id, relative_dir)
|
||||
try:
|
||||
os.symlink(target_dir, path)
|
||||
@ -636,7 +721,10 @@ class ComposePaths(object):
|
||||
return None
|
||||
if arch == "src":
|
||||
arch = "source"
|
||||
path = os.path.join(self.topdir(arch, variant, create_dir=create_dir, relative=relative), "jigdo")
|
||||
path = os.path.join(
|
||||
self.topdir(arch, variant, create_dir=create_dir, relative=relative),
|
||||
"jigdo",
|
||||
)
|
||||
|
||||
if create_dir and not relative:
|
||||
makedirs(path)
|
||||
@ -648,7 +736,9 @@ class ComposePaths(object):
|
||||
compose/metadata
|
||||
compose/metadata/rpms.json
|
||||
"""
|
||||
path = os.path.join(self.topdir(create_dir=create_dir, relative=relative), "metadata")
|
||||
path = os.path.join(
|
||||
self.topdir(create_dir=create_dir, relative=relative), "metadata"
|
||||
)
|
||||
if create_dir and not relative:
|
||||
makedirs(path)
|
||||
if file_name:
|
||||
|
@ -28,7 +28,7 @@ from .extra_isos import ExtraIsosPhase # noqa
|
||||
from .live_images import LiveImagesPhase # noqa
|
||||
from .image_build import ImageBuildPhase # noqa
|
||||
from .test import TestPhase # noqa
|
||||
from .image_checksum import ImageChecksumPhase # noqa
|
||||
from .image_checksum import ImageChecksumPhase # noqa
|
||||
from .livemedia_phase import LiveMediaPhase # noqa
|
||||
from .ostree import OSTreePhase # noqa
|
||||
from .ostree_installer import OstreeInstallerPhase # noqa
|
||||
|
@ -19,7 +19,6 @@ from pungi import util
|
||||
|
||||
|
||||
class PhaseBase(object):
|
||||
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
self.msg = "---------- PHASE: %s ----------" % self.name.upper()
|
||||
@ -60,7 +59,7 @@ class PhaseBase(object):
|
||||
self.finished = True
|
||||
return
|
||||
self.compose.log_info("[BEGIN] %s" % self.msg)
|
||||
self.compose.notifier.send('phase-start', phase_name=self.name)
|
||||
self.compose.notifier.send("phase-start", phase_name=self.name)
|
||||
self.run()
|
||||
|
||||
def get_config_block(self, variant, arch=None):
|
||||
@ -70,11 +69,13 @@ class PhaseBase(object):
|
||||
"""
|
||||
self.used_patterns = self.used_patterns or set()
|
||||
if arch is not None:
|
||||
return util.get_arch_variant_data(self.compose.conf, self.name,
|
||||
arch, variant, keys=self.used_patterns)
|
||||
return util.get_arch_variant_data(
|
||||
self.compose.conf, self.name, arch, variant, keys=self.used_patterns
|
||||
)
|
||||
else:
|
||||
return util.get_variant_data(self.compose.conf, self.name,
|
||||
variant, keys=self.used_patterns)
|
||||
return util.get_variant_data(
|
||||
self.compose.conf, self.name, variant, keys=self.used_patterns
|
||||
)
|
||||
|
||||
def get_all_patterns(self):
|
||||
"""Get all variant patterns from config file for this phase."""
|
||||
@ -93,10 +94,12 @@ class PhaseBase(object):
|
||||
unused_patterns = all_patterns - self.used_patterns
|
||||
if unused_patterns:
|
||||
self.compose.log_warning(
|
||||
'[%s] Patterns in config do not match any variant: %s'
|
||||
% (self.name.upper(), ', '.join(sorted(unused_patterns))))
|
||||
"[%s] Patterns in config do not match any variant: %s"
|
||||
% (self.name.upper(), ", ".join(sorted(unused_patterns)))
|
||||
)
|
||||
self.compose.log_info(
|
||||
'Note that variants can be excluded in configuration file')
|
||||
"Note that variants can be excluded in configuration file"
|
||||
)
|
||||
|
||||
def stop(self):
|
||||
if self.finished:
|
||||
@ -108,7 +111,7 @@ class PhaseBase(object):
|
||||
if self.used_patterns is not None:
|
||||
# We only want to report this if the config was actually queried.
|
||||
self.report_unused_patterns()
|
||||
self.compose.notifier.send('phase-stop', phase_name=self.name)
|
||||
self.compose.notifier.send("phase-stop", phase_name=self.name)
|
||||
|
||||
def run(self):
|
||||
raise NotImplementedError
|
||||
@ -121,7 +124,9 @@ class ConfigGuardedPhase(PhaseBase):
|
||||
if super(ConfigGuardedPhase, self).skip():
|
||||
return True
|
||||
if not self.compose.conf.get(self.name):
|
||||
self.compose.log_info("Config section '%s' was not found. Skipping." % self.name)
|
||||
self.compose.log_info(
|
||||
"Config section '%s' was not found. Skipping." % self.name
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -140,9 +145,11 @@ class ImageConfigMixin(object):
|
||||
|
||||
def get_config(self, cfg, opt):
|
||||
return cfg.get(
|
||||
opt, self.compose.conf.get(
|
||||
'%s_%s' % (self.name, opt), self.compose.conf.get(
|
||||
'global_%s' % opt)))
|
||||
opt,
|
||||
self.compose.conf.get(
|
||||
"%s_%s" % (self.name, opt), self.compose.conf.get("global_%s" % opt)
|
||||
),
|
||||
)
|
||||
|
||||
def get_version(self, cfg):
|
||||
"""
|
||||
@ -161,11 +168,16 @@ class ImageConfigMixin(object):
|
||||
deprecated), replace it with a generated value. Uses configuration
|
||||
passed as argument, phase specific settings and global settings.
|
||||
"""
|
||||
for key, conf in [('release', cfg),
|
||||
('%s_release' % self.name, self.compose.conf),
|
||||
('global_release', self.compose.conf)]:
|
||||
for key, conf in [
|
||||
("release", cfg),
|
||||
("%s_release" % self.name, self.compose.conf),
|
||||
("global_release", self.compose.conf),
|
||||
]:
|
||||
if key in conf:
|
||||
return util.version_generator(self.compose, conf[key]) or self.compose.image_release
|
||||
return (
|
||||
util.version_generator(self.compose, conf[key])
|
||||
or self.compose.image_release
|
||||
)
|
||||
return None
|
||||
|
||||
def get_ksurl(self, cfg):
|
||||
@ -185,6 +197,7 @@ class PhaseLoggerMixin(object):
|
||||
A mixin that can extend a phase with a new logging logger that copy
|
||||
handlers from compose, but with different formatter that includes phase name.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(PhaseLoggerMixin, self).__init__(*args, **kwargs)
|
||||
self.logger = None
|
||||
@ -193,6 +206,7 @@ class PhaseLoggerMixin(object):
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
format = "%(asctime)s [%(name)-16s] [%(levelname)-8s] %(message)s"
|
||||
import copy
|
||||
|
||||
for handler in self.compose._logger.handlers:
|
||||
hl = copy.copy(handler)
|
||||
hl.setFormatter(logging.Formatter(format, datefmt="%Y-%m-%d %H:%M:%S"))
|
||||
|
@ -47,7 +47,7 @@ class BuildinstallPhase(PhaseBase):
|
||||
self.pool.finished_tasks = set()
|
||||
self.buildinstall_method = self.compose.conf.get("buildinstall_method")
|
||||
self.lorax_use_koji_plugin = self.compose.conf.get("lorax_use_koji_plugin")
|
||||
self.used_lorax = self.buildinstall_method == 'lorax'
|
||||
self.used_lorax = self.buildinstall_method == "lorax"
|
||||
self.pkgset_phase = pkgset_phase
|
||||
|
||||
self.warned_skipped = False
|
||||
@ -63,7 +63,16 @@ class BuildinstallPhase(PhaseBase):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_lorax_cmd(self, repo_baseurl, output_dir, variant, arch, buildarch, volid, final_output_dir):
|
||||
def _get_lorax_cmd(
|
||||
self,
|
||||
repo_baseurl,
|
||||
output_dir,
|
||||
variant,
|
||||
arch,
|
||||
buildarch,
|
||||
volid,
|
||||
final_output_dir,
|
||||
):
|
||||
noupgrade = True
|
||||
bugurl = None
|
||||
nomacboot = True
|
||||
@ -76,19 +85,21 @@ class BuildinstallPhase(PhaseBase):
|
||||
version = self.compose.conf.get(
|
||||
"treeinfo_version", self.compose.conf["release_version"]
|
||||
)
|
||||
for data in get_arch_variant_data(self.compose.conf, 'lorax_options', arch, variant):
|
||||
if not data.get('noupgrade', True):
|
||||
for data in get_arch_variant_data(
|
||||
self.compose.conf, "lorax_options", arch, variant
|
||||
):
|
||||
if not data.get("noupgrade", True):
|
||||
noupgrade = False
|
||||
if data.get('bugurl'):
|
||||
bugurl = data.get('bugurl')
|
||||
if not data.get('nomacboot', True):
|
||||
if data.get("bugurl"):
|
||||
bugurl = data.get("bugurl")
|
||||
if not data.get("nomacboot", True):
|
||||
nomacboot = False
|
||||
if "rootfs_size" in data:
|
||||
rootfs_size = data.get("rootfs_size")
|
||||
add_template.extend(data.get('add_template', []))
|
||||
add_arch_template.extend(data.get('add_arch_template', []))
|
||||
add_template_var.extend(data.get('add_template_var', []))
|
||||
add_arch_template_var.extend(data.get('add_arch_template_var', []))
|
||||
add_template.extend(data.get("add_template", []))
|
||||
add_arch_template.extend(data.get("add_arch_template", []))
|
||||
add_template_var.extend(data.get("add_template_var", []))
|
||||
add_arch_template_var.extend(data.get("add_arch_template_var", []))
|
||||
dracut_args.extend(data.get("dracut_args", []))
|
||||
if "version" in data:
|
||||
version = data["version"]
|
||||
@ -101,7 +112,9 @@ class BuildinstallPhase(PhaseBase):
|
||||
|
||||
repos = repo_baseurl[:]
|
||||
repos.extend(
|
||||
get_arch_variant_data(self.compose.conf, "lorax_extra_sources", arch, variant)
|
||||
get_arch_variant_data(
|
||||
self.compose.conf, "lorax_extra_sources", arch, variant
|
||||
)
|
||||
)
|
||||
if self.compose.has_comps:
|
||||
comps_repo = self.compose.paths.work.comps_repo(arch, variant)
|
||||
@ -162,8 +175,10 @@ class BuildinstallPhase(PhaseBase):
|
||||
log_dir=log_dir,
|
||||
dracut_args=dracut_args,
|
||||
)
|
||||
return 'rm -rf %s && %s' % (shlex_quote(output_topdir),
|
||||
' '.join([shlex_quote(x) for x in lorax_cmd]))
|
||||
return "rm -rf %s && %s" % (
|
||||
shlex_quote(output_topdir),
|
||||
" ".join([shlex_quote(x) for x in lorax_cmd]),
|
||||
)
|
||||
|
||||
def get_repos(self, arch):
|
||||
repos = []
|
||||
@ -176,7 +191,7 @@ class BuildinstallPhase(PhaseBase):
|
||||
product = self.compose.conf["release_name"]
|
||||
version = self.compose.conf["release_version"]
|
||||
release = self.compose.conf["release_version"]
|
||||
disc_type = self.compose.conf['disc_types'].get('dvd', 'dvd')
|
||||
disc_type = self.compose.conf["disc_types"].get("dvd", "dvd")
|
||||
|
||||
# Prepare kickstart file for final images.
|
||||
self.pool.kickstart_file = get_kickstart_file(self.compose)
|
||||
@ -184,8 +199,12 @@ class BuildinstallPhase(PhaseBase):
|
||||
for arch in self.compose.get_arches():
|
||||
commands = []
|
||||
|
||||
output_dir = self.compose.paths.work.buildinstall_dir(arch, allow_topdir_override=True)
|
||||
final_output_dir = self.compose.paths.work.buildinstall_dir(arch, allow_topdir_override=False)
|
||||
output_dir = self.compose.paths.work.buildinstall_dir(
|
||||
arch, allow_topdir_override=True
|
||||
)
|
||||
final_output_dir = self.compose.paths.work.buildinstall_dir(
|
||||
arch, allow_topdir_override=False
|
||||
)
|
||||
makedirs(final_output_dir)
|
||||
repo_baseurls = self.get_repos(arch)
|
||||
if final_output_dir != output_dir:
|
||||
@ -194,40 +213,58 @@ class BuildinstallPhase(PhaseBase):
|
||||
if self.buildinstall_method == "lorax":
|
||||
|
||||
buildarch = get_valid_arches(arch)[0]
|
||||
for variant in self.compose.get_variants(arch=arch, types=['variant']):
|
||||
for variant in self.compose.get_variants(arch=arch, types=["variant"]):
|
||||
if variant.is_empty:
|
||||
continue
|
||||
|
||||
skip = get_arch_variant_data(self.compose.conf, "buildinstall_skip", arch, variant)
|
||||
skip = get_arch_variant_data(
|
||||
self.compose.conf, "buildinstall_skip", arch, variant
|
||||
)
|
||||
if skip == [True]:
|
||||
self.compose.log_info(
|
||||
'Skipping buildinstall for %s.%s due to config option' % (variant, arch))
|
||||
"Skipping buildinstall for %s.%s due to config option"
|
||||
% (variant, arch)
|
||||
)
|
||||
continue
|
||||
|
||||
volid = get_volid(self.compose, arch, variant=variant, disc_type=disc_type)
|
||||
volid = get_volid(
|
||||
self.compose, arch, variant=variant, disc_type=disc_type
|
||||
)
|
||||
commands.append(
|
||||
(
|
||||
variant,
|
||||
self._get_lorax_cmd(
|
||||
repo_baseurls, output_dir, variant, arch, buildarch, volid, final_output_dir
|
||||
repo_baseurls,
|
||||
output_dir,
|
||||
variant,
|
||||
arch,
|
||||
buildarch,
|
||||
volid,
|
||||
final_output_dir,
|
||||
),
|
||||
)
|
||||
)
|
||||
elif self.buildinstall_method == "buildinstall":
|
||||
volid = get_volid(self.compose, arch, disc_type=disc_type)
|
||||
commands.append(
|
||||
(None,
|
||||
lorax.get_buildinstall_cmd(product,
|
||||
version,
|
||||
release,
|
||||
repo_baseurls,
|
||||
output_dir,
|
||||
is_final=self.compose.supported,
|
||||
buildarch=arch,
|
||||
volid=volid))
|
||||
(
|
||||
None,
|
||||
lorax.get_buildinstall_cmd(
|
||||
product,
|
||||
version,
|
||||
release,
|
||||
repo_baseurls,
|
||||
output_dir,
|
||||
is_final=self.compose.supported,
|
||||
buildarch=arch,
|
||||
volid=volid,
|
||||
),
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ValueError("Unsupported buildinstall method: %s" % self.buildinstall_method)
|
||||
raise ValueError(
|
||||
"Unsupported buildinstall method: %s" % self.buildinstall_method
|
||||
)
|
||||
|
||||
for (variant, cmd) in commands:
|
||||
self.pool.add(BuildinstallThread(self.pool))
|
||||
@ -239,8 +276,11 @@ class BuildinstallPhase(PhaseBase):
|
||||
# If the phase is skipped, we can treat it as successful. Either there
|
||||
# will be no output, or it's a debug run of compose where anything can
|
||||
# happen.
|
||||
return (super(BuildinstallPhase, self).skip()
|
||||
or (variant.uid if self.used_lorax else None, arch) in self.pool.finished_tasks)
|
||||
return (
|
||||
super(BuildinstallPhase, self).skip()
|
||||
or (variant.uid if self.used_lorax else None, arch)
|
||||
in self.pool.finished_tasks
|
||||
)
|
||||
|
||||
|
||||
def get_kickstart_file(compose):
|
||||
@ -296,7 +336,7 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
||||
os.unlink(config_path) # break hadlink by removing file writing a new one
|
||||
|
||||
# double-escape volid in yaboot.conf
|
||||
new_volid = volid_escaped_2 if 'yaboot' in config else volid_escaped
|
||||
new_volid = volid_escaped_2 if "yaboot" in config else volid_escaped
|
||||
|
||||
ks = (" ks=hd:LABEL=%s:/ks.cfg" % new_volid) if ks_file else ""
|
||||
|
||||
@ -310,7 +350,7 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
||||
f.write(data)
|
||||
|
||||
if logger and data != original_data:
|
||||
logger.info('Boot config %s changed' % config_path)
|
||||
logger.info("Boot config %s changed" % config_path)
|
||||
|
||||
return found_configs
|
||||
|
||||
@ -319,7 +359,9 @@ def tweak_configs(path, volid, ks_file, configs=BOOT_CONFIGS, logger=None):
|
||||
# * it's quite trivial to replace volids
|
||||
# * it's not easy to replace menu titles
|
||||
# * we probably need to get this into lorax
|
||||
def tweak_buildinstall(compose, src, dst, arch, variant, label, volid, kickstart_file=None):
|
||||
def tweak_buildinstall(
|
||||
compose, src, dst, arch, variant, label, volid, kickstart_file=None
|
||||
):
|
||||
tmp_dir = compose.mkdtemp(prefix="tweak_buildinstall_")
|
||||
|
||||
# verify src
|
||||
@ -336,11 +378,14 @@ def tweak_buildinstall(compose, src, dst, arch, variant, label, volid, kickstart
|
||||
# copy src to temp
|
||||
# TODO: place temp on the same device as buildinstall dir so we can hardlink
|
||||
cmd = "cp -dRv --preserve=mode,links,timestamps --remove-destination %s/* %s/" % (
|
||||
shlex_quote(src), shlex_quote(tmp_dir)
|
||||
shlex_quote(src),
|
||||
shlex_quote(tmp_dir),
|
||||
)
|
||||
run(cmd)
|
||||
|
||||
found_configs = tweak_configs(tmp_dir, volid, kickstart_file, logger=compose._logger)
|
||||
found_configs = tweak_configs(
|
||||
tmp_dir, volid, kickstart_file, logger=compose._logger
|
||||
)
|
||||
if kickstart_file and found_configs:
|
||||
shutil.copy2(kickstart_file, os.path.join(dst, "ks.cfg"))
|
||||
|
||||
@ -351,15 +396,23 @@ def tweak_buildinstall(compose, src, dst, arch, variant, label, volid, kickstart
|
||||
if not os.path.isfile(image):
|
||||
continue
|
||||
|
||||
with iso.mount(image, logger=compose._logger,
|
||||
use_guestmount=compose.conf.get("buildinstall_use_guestmount")
|
||||
) as mount_tmp_dir:
|
||||
with iso.mount(
|
||||
image,
|
||||
logger=compose._logger,
|
||||
use_guestmount=compose.conf.get("buildinstall_use_guestmount"),
|
||||
) as mount_tmp_dir:
|
||||
for config in BOOT_CONFIGS:
|
||||
config_path = os.path.join(tmp_dir, config)
|
||||
config_in_image = os.path.join(mount_tmp_dir, config)
|
||||
|
||||
if os.path.isfile(config_in_image):
|
||||
cmd = ["cp", "-v", "--remove-destination", config_path, config_in_image]
|
||||
cmd = [
|
||||
"cp",
|
||||
"-v",
|
||||
"--remove-destination",
|
||||
config_path,
|
||||
config_in_image,
|
||||
]
|
||||
run(cmd)
|
||||
|
||||
# HACK: make buildinstall files world readable
|
||||
@ -367,7 +420,8 @@ def tweak_buildinstall(compose, src, dst, arch, variant, label, volid, kickstart
|
||||
|
||||
# copy temp to dst
|
||||
cmd = "cp -dRv --preserve=mode,links,timestamps --remove-destination %s/* %s/" % (
|
||||
shlex_quote(tmp_dir), shlex_quote(dst)
|
||||
shlex_quote(tmp_dir),
|
||||
shlex_quote(dst),
|
||||
)
|
||||
run(cmd)
|
||||
|
||||
@ -378,7 +432,7 @@ def link_boot_iso(compose, arch, variant, can_fail):
|
||||
if arch == "src":
|
||||
return
|
||||
|
||||
disc_type = compose.conf['disc_types'].get('boot', 'boot')
|
||||
disc_type = compose.conf["disc_types"].get("boot", "boot")
|
||||
|
||||
symlink_isos_to = compose.conf.get("symlink_isos_to")
|
||||
os_tree = compose.paths.compose.os_tree(arch, variant)
|
||||
@ -388,14 +442,15 @@ def link_boot_iso(compose, arch, variant, can_fail):
|
||||
return
|
||||
|
||||
msg = "Linking boot.iso (arch: %s, variant: %s)" % (arch, variant)
|
||||
filename = compose.get_image_name(arch, variant, disc_type=disc_type,
|
||||
disc_num=None, suffix=".iso")
|
||||
new_boot_iso_path = compose.paths.compose.iso_path(arch, variant, filename,
|
||||
symlink_to=symlink_isos_to)
|
||||
new_boot_iso_relative_path = compose.paths.compose.iso_path(arch,
|
||||
variant,
|
||||
filename,
|
||||
relative=True)
|
||||
filename = compose.get_image_name(
|
||||
arch, variant, disc_type=disc_type, disc_num=None, suffix=".iso"
|
||||
)
|
||||
new_boot_iso_path = compose.paths.compose.iso_path(
|
||||
arch, variant, filename, symlink_to=symlink_isos_to
|
||||
)
|
||||
new_boot_iso_relative_path = compose.paths.compose.iso_path(
|
||||
arch, variant, filename, relative=True
|
||||
)
|
||||
if os.path.exists(new_boot_iso_path):
|
||||
# TODO: log
|
||||
compose.log_warning("[SKIP ] %s" % msg)
|
||||
@ -427,8 +482,8 @@ def link_boot_iso(compose, arch, variant, can_fail):
|
||||
img.bootable = True
|
||||
img.subvariant = variant.uid
|
||||
img.implant_md5 = implant_md5
|
||||
setattr(img, 'can_fail', can_fail)
|
||||
setattr(img, 'deliverable', 'buildinstall')
|
||||
setattr(img, "can_fail", can_fail)
|
||||
setattr(img, "deliverable", "buildinstall")
|
||||
try:
|
||||
img.volume_id = iso.get_volume_id(new_boot_iso_path)
|
||||
except RuntimeError:
|
||||
@ -441,28 +496,33 @@ class BuildinstallThread(WorkerThread):
|
||||
def process(self, item, num):
|
||||
# The variant is None unless lorax is used as buildinstall method.
|
||||
compose, arch, variant, cmd = item
|
||||
can_fail = compose.can_fail(variant, arch, 'buildinstall')
|
||||
with failable(compose, can_fail, variant, arch, 'buildinstall'):
|
||||
can_fail = compose.can_fail(variant, arch, "buildinstall")
|
||||
with failable(compose, can_fail, variant, arch, "buildinstall"):
|
||||
self.worker(compose, arch, variant, cmd, num)
|
||||
|
||||
def worker(self, compose, arch, variant, cmd, num):
|
||||
buildinstall_method = compose.conf["buildinstall_method"]
|
||||
lorax_use_koji_plugin = compose.conf["lorax_use_koji_plugin"]
|
||||
log_filename = ('buildinstall-%s' % variant.uid) if variant else 'buildinstall'
|
||||
log_filename = ("buildinstall-%s" % variant.uid) if variant else "buildinstall"
|
||||
log_file = compose.paths.log.log_file(arch, log_filename)
|
||||
|
||||
msg = "Running buildinstall for arch %s, variant %s" % (arch, variant)
|
||||
|
||||
output_dir = compose.paths.work.buildinstall_dir(
|
||||
arch, allow_topdir_override=True, variant=variant)
|
||||
final_output_dir = compose.paths.work.buildinstall_dir(
|
||||
arch, variant=variant)
|
||||
arch, allow_topdir_override=True, variant=variant
|
||||
)
|
||||
final_output_dir = compose.paths.work.buildinstall_dir(arch, variant=variant)
|
||||
|
||||
if (os.path.isdir(output_dir) and os.listdir(output_dir) or
|
||||
os.path.isdir(final_output_dir) and os.listdir(final_output_dir)):
|
||||
if (
|
||||
os.path.isdir(output_dir)
|
||||
and os.listdir(output_dir)
|
||||
or os.path.isdir(final_output_dir)
|
||||
and os.listdir(final_output_dir)
|
||||
):
|
||||
# output dir is *not* empty -> SKIP
|
||||
self.pool.log_warning(
|
||||
'[SKIP ] Buildinstall for arch %s, variant %s' % (arch, variant))
|
||||
"[SKIP ] Buildinstall for arch %s, variant %s" % (arch, variant)
|
||||
)
|
||||
return
|
||||
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
@ -485,15 +545,21 @@ class BuildinstallThread(WorkerThread):
|
||||
runroot = Runroot(compose, phase="buildinstall")
|
||||
if buildinstall_method == "lorax" and lorax_use_koji_plugin:
|
||||
runroot.run_pungi_buildinstall(
|
||||
cmd, log_file=log_file, arch=arch, packages=packages,
|
||||
cmd,
|
||||
log_file=log_file,
|
||||
arch=arch,
|
||||
packages=packages,
|
||||
mounts=[compose.topdir],
|
||||
weight=compose.conf['runroot_weights'].get('buildinstall'),
|
||||
weight=compose.conf["runroot_weights"].get("buildinstall"),
|
||||
)
|
||||
else:
|
||||
runroot.run(
|
||||
cmd, log_file=log_file, arch=arch, packages=packages,
|
||||
cmd,
|
||||
log_file=log_file,
|
||||
arch=arch,
|
||||
packages=packages,
|
||||
mounts=[compose.topdir],
|
||||
weight=compose.conf['runroot_weights'].get('buildinstall'),
|
||||
weight=compose.conf["runroot_weights"].get("buildinstall"),
|
||||
chown_paths=chown_paths,
|
||||
)
|
||||
|
||||
@ -504,14 +570,14 @@ class BuildinstallThread(WorkerThread):
|
||||
copy_all(results_dir, final_output_dir)
|
||||
|
||||
# Get the log_dir into which we should copy the resulting log files.
|
||||
log_fname = 'buildinstall-%s-logs/dummy' % variant.uid
|
||||
log_fname = "buildinstall-%s-logs/dummy" % variant.uid
|
||||
final_log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_fname))
|
||||
if not os.path.exists(final_log_dir):
|
||||
makedirs(final_log_dir)
|
||||
log_dir = os.path.join(output_dir, "logs")
|
||||
copy_all(log_dir, final_log_dir)
|
||||
|
||||
log_file = compose.paths.log.log_file(arch, log_filename + '-RPMs')
|
||||
log_file = compose.paths.log.log_file(arch, log_filename + "-RPMs")
|
||||
rpms = runroot.get_buildroot_rpms()
|
||||
with open(log_file, "w") as f:
|
||||
f.write("\n".join(rpms))
|
||||
@ -523,7 +589,7 @@ class BuildinstallThread(WorkerThread):
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
|
||||
def copy_files(self, compose, variant, arch):
|
||||
disc_type = compose.conf['disc_types'].get('dvd', 'dvd')
|
||||
disc_type = compose.conf["disc_types"].get("dvd", "dvd")
|
||||
|
||||
buildinstall_dir = compose.paths.work.buildinstall_dir(arch)
|
||||
|
||||
@ -533,13 +599,17 @@ class BuildinstallThread(WorkerThread):
|
||||
buildinstall_dir = os.path.join(buildinstall_dir, variant.uid)
|
||||
|
||||
# Find all relevant variants if lorax is not used.
|
||||
variants = [variant] if variant else compose.get_variants(arch=arch, types=["self", "variant"])
|
||||
variants = (
|
||||
[variant]
|
||||
if variant
|
||||
else compose.get_variants(arch=arch, types=["self", "variant"])
|
||||
)
|
||||
for var in variants:
|
||||
os_tree = compose.paths.compose.os_tree(arch, var)
|
||||
# TODO: label is not used
|
||||
label = ""
|
||||
volid = get_volid(compose, arch, var, disc_type=disc_type)
|
||||
can_fail = compose.can_fail(var, arch, 'buildinstall')
|
||||
can_fail = compose.can_fail(var, arch, "buildinstall")
|
||||
tweak_buildinstall(
|
||||
compose,
|
||||
buildinstall_dir,
|
||||
@ -565,7 +635,7 @@ def _get_log_dir(compose, variant, arch):
|
||||
|
||||
# The paths module will modify the filename (by inserting arch). But we
|
||||
# only care about the directory anyway.
|
||||
log_filename = 'buildinstall-%s-logs/dummy' % variant.uid
|
||||
log_filename = "buildinstall-%s-logs/dummy" % variant.uid
|
||||
log_dir = os.path.dirname(compose.paths.log.log_file(arch, log_filename))
|
||||
makedirs(log_dir)
|
||||
return log_dir
|
||||
|
@ -29,8 +29,14 @@ from pungi.wrappers import iso
|
||||
from pungi.wrappers.createrepo import CreaterepoWrapper
|
||||
from pungi.wrappers import kojiwrapper
|
||||
from pungi.phases.base import PhaseBase, PhaseLoggerMixin
|
||||
from pungi.util import (makedirs, get_volid, get_arch_variant_data, failable,
|
||||
get_file_size, get_mtime)
|
||||
from pungi.util import (
|
||||
makedirs,
|
||||
get_volid,
|
||||
get_arch_variant_data,
|
||||
failable,
|
||||
get_file_size,
|
||||
get_mtime,
|
||||
)
|
||||
from pungi.media_split import MediaSplitter, convert_media_size
|
||||
from pungi.compose_metadata.discinfo import read_discinfo, write_discinfo
|
||||
from pungi.runroot import Runroot
|
||||
@ -59,31 +65,42 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
||||
return False
|
||||
if variant.type != "variant":
|
||||
return False
|
||||
skip = get_arch_variant_data(self.compose.conf, "buildinstall_skip", arch, variant)
|
||||
skip = get_arch_variant_data(
|
||||
self.compose.conf, "buildinstall_skip", arch, variant
|
||||
)
|
||||
if skip == [True]:
|
||||
# Buildinstall is skipped for this tree. Can't create a bootable ISO.
|
||||
return False
|
||||
return bool(self.compose.conf.get('buildinstall_method', ''))
|
||||
return bool(self.compose.conf.get("buildinstall_method", ""))
|
||||
|
||||
def run(self):
|
||||
symlink_isos_to = self.compose.conf.get("symlink_isos_to")
|
||||
disc_type = self.compose.conf['disc_types'].get('dvd', 'dvd')
|
||||
disc_type = self.compose.conf["disc_types"].get("dvd", "dvd")
|
||||
deliverables = []
|
||||
|
||||
commands = []
|
||||
for variant in self.compose.get_variants(types=["variant", "layered-product", "optional"]):
|
||||
for variant in self.compose.get_variants(
|
||||
types=["variant", "layered-product", "optional"]
|
||||
):
|
||||
if variant.is_empty:
|
||||
continue
|
||||
for arch in variant.arches + ["src"]:
|
||||
skip_iso = get_arch_variant_data(self.compose.conf, "createiso_skip", arch, variant)
|
||||
skip_iso = get_arch_variant_data(
|
||||
self.compose.conf, "createiso_skip", arch, variant
|
||||
)
|
||||
if skip_iso == [True]:
|
||||
self.logger.info("Skipping createiso for %s.%s due to config option" % (variant, arch))
|
||||
self.logger.info(
|
||||
"Skipping createiso for %s.%s due to config option"
|
||||
% (variant, arch)
|
||||
)
|
||||
continue
|
||||
|
||||
volid = get_volid(self.compose, arch, variant, disc_type=disc_type)
|
||||
os_tree = self.compose.paths.compose.os_tree(arch, variant)
|
||||
|
||||
iso_dir = self.compose.paths.compose.iso_dir(arch, variant, symlink_to=symlink_isos_to)
|
||||
iso_dir = self.compose.paths.compose.iso_dir(
|
||||
arch, variant, symlink_to=symlink_isos_to
|
||||
)
|
||||
if not iso_dir:
|
||||
continue
|
||||
|
||||
@ -97,21 +114,25 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
||||
|
||||
if bootable and not self.bi.succeeded(variant, arch):
|
||||
self.logger.warning(
|
||||
'ISO should be bootable, but buildinstall failed. Skipping for %s.%s'
|
||||
% (variant, arch))
|
||||
"ISO should be bootable, but buildinstall failed. Skipping for %s.%s"
|
||||
% (variant, arch)
|
||||
)
|
||||
continue
|
||||
|
||||
split_iso_data = split_iso(self.compose, arch, variant, no_split=bootable,
|
||||
logger=self.logger)
|
||||
split_iso_data = split_iso(
|
||||
self.compose, arch, variant, no_split=bootable, logger=self.logger
|
||||
)
|
||||
disc_count = len(split_iso_data)
|
||||
|
||||
for disc_num, iso_data in enumerate(split_iso_data):
|
||||
disc_num += 1
|
||||
|
||||
filename = self.compose.get_image_name(
|
||||
arch, variant, disc_type=disc_type, disc_num=disc_num)
|
||||
arch, variant, disc_type=disc_type, disc_num=disc_num
|
||||
)
|
||||
iso_path = self.compose.paths.compose.iso_path(
|
||||
arch, variant, filename, symlink_to=symlink_isos_to)
|
||||
arch, variant, filename, symlink_to=symlink_isos_to
|
||||
)
|
||||
if os.path.isfile(iso_path):
|
||||
self.logger.warning(
|
||||
"Skipping mkisofs, image already exists: %s", iso_path
|
||||
@ -119,9 +140,14 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
||||
continue
|
||||
deliverables.append(iso_path)
|
||||
|
||||
graft_points = prepare_iso(self.compose, arch, variant,
|
||||
disc_num=disc_num, disc_count=disc_count,
|
||||
split_iso_data=iso_data)
|
||||
graft_points = prepare_iso(
|
||||
self.compose,
|
||||
arch,
|
||||
variant,
|
||||
disc_num=disc_num,
|
||||
disc_count=disc_count,
|
||||
split_iso_data=iso_data,
|
||||
)
|
||||
|
||||
cmd = {
|
||||
"iso_path": iso_path,
|
||||
@ -133,8 +159,9 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
||||
}
|
||||
|
||||
if os.path.islink(iso_dir):
|
||||
cmd["mount"] = os.path.abspath(os.path.join(os.path.dirname(iso_dir),
|
||||
os.readlink(iso_dir)))
|
||||
cmd["mount"] = os.path.abspath(
|
||||
os.path.join(os.path.dirname(iso_dir), os.readlink(iso_dir))
|
||||
)
|
||||
|
||||
opts = createiso.CreateIsoOpts(
|
||||
output_dir=iso_dir,
|
||||
@ -147,21 +174,25 @@ class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
||||
)
|
||||
|
||||
if bootable:
|
||||
opts = opts._replace(buildinstall_method=self.compose.conf['buildinstall_method'])
|
||||
opts = opts._replace(
|
||||
buildinstall_method=self.compose.conf["buildinstall_method"]
|
||||
)
|
||||
|
||||
if self.compose.conf['create_jigdo']:
|
||||
if self.compose.conf["create_jigdo"]:
|
||||
jigdo_dir = self.compose.paths.compose.jigdo_dir(arch, variant)
|
||||
opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree)
|
||||
|
||||
script_file = os.path.join(self.compose.paths.work.tmp_dir(arch, variant),
|
||||
'createiso-%s.sh' % filename)
|
||||
with open(script_file, 'w') as f:
|
||||
script_file = os.path.join(
|
||||
self.compose.paths.work.tmp_dir(arch, variant),
|
||||
"createiso-%s.sh" % filename,
|
||||
)
|
||||
with open(script_file, "w") as f:
|
||||
createiso.write_script(opts, f)
|
||||
cmd['cmd'] = ['bash', script_file]
|
||||
cmd["cmd"] = ["bash", script_file]
|
||||
commands.append((cmd, variant, arch))
|
||||
|
||||
if self.compose.notifier:
|
||||
self.compose.notifier.send('createiso-targets', deliverables=deliverables)
|
||||
self.compose.notifier.send("createiso-targets", deliverables=deliverables)
|
||||
|
||||
for (cmd, variant, arch) in commands:
|
||||
self.pool.add(CreateIsoThread(self.pool))
|
||||
@ -180,15 +211,19 @@ class CreateIsoThread(WorkerThread):
|
||||
except OSError:
|
||||
pass
|
||||
if compose.notifier:
|
||||
compose.notifier.send('createiso-imagefail',
|
||||
file=cmd['iso_path'],
|
||||
arch=arch,
|
||||
variant=str(variant))
|
||||
compose.notifier.send(
|
||||
"createiso-imagefail",
|
||||
file=cmd["iso_path"],
|
||||
arch=arch,
|
||||
variant=str(variant),
|
||||
)
|
||||
|
||||
def process(self, item, num):
|
||||
compose, cmd, variant, arch = item
|
||||
can_fail = compose.can_fail(variant, arch, 'iso')
|
||||
with failable(compose, can_fail, variant, arch, 'iso', logger=self.pool._logger):
|
||||
can_fail = compose.can_fail(variant, arch, "iso")
|
||||
with failable(
|
||||
compose, can_fail, variant, arch, "iso", logger=self.pool._logger
|
||||
):
|
||||
self.worker(compose, cmd, variant, arch, num)
|
||||
|
||||
def worker(self, compose, cmd, variant, arch, num):
|
||||
@ -196,23 +231,35 @@ class CreateIsoThread(WorkerThread):
|
||||
if "mount" in cmd:
|
||||
mounts.append(cmd["mount"])
|
||||
|
||||
bootable = cmd['bootable']
|
||||
bootable = cmd["bootable"]
|
||||
log_file = compose.paths.log.log_file(
|
||||
arch, "createiso-%s" % os.path.basename(cmd["iso_path"]))
|
||||
arch, "createiso-%s" % os.path.basename(cmd["iso_path"])
|
||||
)
|
||||
|
||||
msg = "Creating ISO (arch: %s, variant: %s): %s" % (
|
||||
arch, variant, os.path.basename(cmd["iso_path"]))
|
||||
arch,
|
||||
variant,
|
||||
os.path.basename(cmd["iso_path"]),
|
||||
)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
try:
|
||||
run_createiso_command(num, compose, bootable, arch,
|
||||
cmd['cmd'], mounts, log_file)
|
||||
run_createiso_command(
|
||||
num, compose, bootable, arch, cmd["cmd"], mounts, log_file
|
||||
)
|
||||
except Exception:
|
||||
self.fail(compose, cmd, variant, arch)
|
||||
raise
|
||||
|
||||
add_iso_to_metadata(compose, variant, arch, cmd["iso_path"],
|
||||
cmd["bootable"], cmd["disc_num"], cmd["disc_count"])
|
||||
add_iso_to_metadata(
|
||||
compose,
|
||||
variant,
|
||||
arch,
|
||||
cmd["iso_path"],
|
||||
cmd["bootable"],
|
||||
cmd["disc_num"],
|
||||
cmd["disc_count"],
|
||||
)
|
||||
|
||||
# Delete staging directory if present.
|
||||
staging_dir = compose.paths.work.iso_staging_dir(
|
||||
@ -223,10 +270,12 @@ class CreateIsoThread(WorkerThread):
|
||||
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
if compose.notifier:
|
||||
compose.notifier.send('createiso-imagedone',
|
||||
file=cmd['iso_path'],
|
||||
arch=arch,
|
||||
variant=str(variant))
|
||||
compose.notifier.send(
|
||||
"createiso-imagedone",
|
||||
file=cmd["iso_path"],
|
||||
arch=arch,
|
||||
variant=str(variant),
|
||||
)
|
||||
|
||||
|
||||
def add_iso_to_metadata(
|
||||
@ -240,7 +289,7 @@ def add_iso_to_metadata(
|
||||
additional_variants=None,
|
||||
):
|
||||
img = Image(compose.im)
|
||||
img.path = iso_path.replace(compose.paths.compose.topdir(), '').lstrip('/')
|
||||
img.path = iso_path.replace(compose.paths.compose.topdir(), "").lstrip("/")
|
||||
img.mtime = get_mtime(iso_path)
|
||||
img.size = get_file_size(iso_path)
|
||||
img.arch = arch
|
||||
@ -255,8 +304,8 @@ def add_iso_to_metadata(
|
||||
if additional_variants:
|
||||
img.unified = True
|
||||
img.additional_variants = additional_variants
|
||||
setattr(img, 'can_fail', compose.can_fail(variant, arch, 'iso'))
|
||||
setattr(img, 'deliverable', 'iso')
|
||||
setattr(img, "can_fail", compose.can_fail(variant, arch, "iso"))
|
||||
setattr(img, "deliverable", "iso")
|
||||
try:
|
||||
img.volume_id = iso.get_volume_id(iso_path)
|
||||
except RuntimeError:
|
||||
@ -269,15 +318,16 @@ def add_iso_to_metadata(
|
||||
return img
|
||||
|
||||
|
||||
def run_createiso_command(num, compose, bootable, arch, cmd, mounts,
|
||||
log_file, with_jigdo=True):
|
||||
def run_createiso_command(
|
||||
num, compose, bootable, arch, cmd, mounts, log_file, with_jigdo=True
|
||||
):
|
||||
packages = ["coreutils", "genisoimage", "isomd5sum"]
|
||||
if with_jigdo and compose.conf['create_jigdo']:
|
||||
packages.append('jigdo')
|
||||
if with_jigdo and compose.conf["create_jigdo"]:
|
||||
packages.append("jigdo")
|
||||
if bootable:
|
||||
extra_packages = {
|
||||
'lorax': ['lorax', 'which'],
|
||||
'buildinstall': ['anaconda'],
|
||||
"lorax": ["lorax", "which"],
|
||||
"buildinstall": ["anaconda"],
|
||||
}
|
||||
packages.extend(extra_packages[compose.conf["buildinstall_method"]])
|
||||
|
||||
@ -301,8 +351,13 @@ def run_createiso_command(num, compose, bootable, arch, cmd, mounts,
|
||||
build_arch = random.choice(tag_arches)
|
||||
|
||||
runroot.run(
|
||||
cmd, log_file=log_file, arch=build_arch, packages=packages, mounts=mounts,
|
||||
weight=compose.conf['runroot_weights'].get('createiso'))
|
||||
cmd,
|
||||
log_file=log_file,
|
||||
arch=build_arch,
|
||||
packages=packages,
|
||||
mounts=mounts,
|
||||
weight=compose.conf["runroot_weights"].get("createiso"),
|
||||
)
|
||||
|
||||
|
||||
def split_iso(compose, arch, variant, no_split=False, logger=None):
|
||||
@ -318,8 +373,8 @@ def split_iso(compose, arch, variant, no_split=False, logger=None):
|
||||
"""
|
||||
if not logger:
|
||||
logger = compose._logger
|
||||
media_size = compose.conf['iso_size']
|
||||
media_reserve = compose.conf['split_iso_reserve']
|
||||
media_size = compose.conf["iso_size"]
|
||||
media_reserve = compose.conf["split_iso_reserve"]
|
||||
split_size = convert_media_size(media_size) - convert_media_size(media_reserve)
|
||||
real_size = None if no_split else split_size
|
||||
|
||||
@ -351,7 +406,9 @@ def split_iso(compose, arch, variant, no_split=False, logger=None):
|
||||
for root, dirs, files in os.walk(os_tree):
|
||||
for dn in dirs[:]:
|
||||
repo_dir = os.path.join(root, dn)
|
||||
if repo_dir == os.path.join(compose.paths.compose.repository(arch, variant), "repodata"):
|
||||
if repo_dir == os.path.join(
|
||||
compose.paths.compose.repository(arch, variant), "repodata"
|
||||
):
|
||||
dirs.remove(dn)
|
||||
|
||||
for fn in files:
|
||||
@ -369,17 +426,19 @@ def split_iso(compose, arch, variant, no_split=False, logger=None):
|
||||
for path, size, sticky in all_files + packages:
|
||||
ms.add_file(path, size, sticky)
|
||||
|
||||
logger.debug('Splitting media for %s.%s:' % (variant.uid, arch))
|
||||
logger.debug("Splitting media for %s.%s:" % (variant.uid, arch))
|
||||
result = ms.split()
|
||||
if no_split and result[0]['size'] > split_size:
|
||||
if no_split and result[0]["size"] > split_size:
|
||||
logger.warning(
|
||||
"ISO for %s.%s does not fit on single media! It is %s bytes too big. (Total size: %s B)"
|
||||
% (variant.uid, arch, result[0]['size'] - split_size, result[0]['size'])
|
||||
% (variant.uid, arch, result[0]["size"] - split_size, result[0]["size"])
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def prepare_iso(compose, arch, variant, disc_num=1, disc_count=None, split_iso_data=None):
|
||||
def prepare_iso(
|
||||
compose, arch, variant, disc_num=1, disc_count=None, split_iso_data=None
|
||||
):
|
||||
tree_dir = compose.paths.compose.os_tree(arch, variant)
|
||||
filename = compose.get_image_name(arch, variant, disc_num=disc_num)
|
||||
iso_dir = compose.paths.work.iso_dir(arch, filename)
|
||||
@ -428,7 +487,9 @@ def prepare_iso(compose, arch, variant, disc_num=1, disc_count=None, split_iso_d
|
||||
)
|
||||
run(cmd)
|
||||
# add repodata/repomd.xml back to checksums
|
||||
ti.checksums.add("repodata/repomd.xml", createrepo_checksum, root_dir=iso_dir)
|
||||
ti.checksums.add(
|
||||
"repodata/repomd.xml", createrepo_checksum, root_dir=iso_dir
|
||||
)
|
||||
|
||||
new_ti_path = os.path.join(iso_dir, ".treeinfo")
|
||||
ti.dump(new_ti_path)
|
||||
@ -443,7 +504,9 @@ def prepare_iso(compose, arch, variant, disc_num=1, disc_count=None, split_iso_d
|
||||
if not disc_count or disc_count == 1:
|
||||
data = iso.get_graft_points(compose, [tree_dir, iso_dir])
|
||||
else:
|
||||
data = iso.get_graft_points(compose, [iso._paths_from_list(tree_dir, split_iso_data["files"]), iso_dir])
|
||||
data = iso.get_graft_points(
|
||||
compose, [iso._paths_from_list(tree_dir, split_iso_data["files"]), iso_dir]
|
||||
)
|
||||
|
||||
if compose.conf["createiso_break_hardlinks"]:
|
||||
compose.log_debug(
|
||||
@ -458,7 +521,9 @@ def prepare_iso(compose, arch, variant, disc_num=1, disc_count=None, split_iso_d
|
||||
)
|
||||
create_hardlinks(
|
||||
compose.paths.work.iso_staging_dir(arch, variant, filename),
|
||||
log_file=compose.paths.log.log_file(arch, "iso-hardlink-%s.log" % variant.uid),
|
||||
log_file=compose.paths.log.log_file(
|
||||
arch, "iso-hardlink-%s.log" % variant.uid
|
||||
),
|
||||
)
|
||||
|
||||
# TODO: /content /graft-points
|
||||
|
@ -14,9 +14,7 @@
|
||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||
|
||||
|
||||
__all__ = (
|
||||
"create_variant_repo",
|
||||
)
|
||||
__all__ = ("create_variant_repo",)
|
||||
|
||||
|
||||
import copy
|
||||
@ -56,18 +54,18 @@ class CreaterepoPhase(PhaseBase):
|
||||
def validate(self):
|
||||
errors = []
|
||||
|
||||
if not self.compose.old_composes and self.compose.conf.get('createrepo_deltas'):
|
||||
errors.append('Can not generate deltas without old compose')
|
||||
if not self.compose.old_composes and self.compose.conf.get("createrepo_deltas"):
|
||||
errors.append("Can not generate deltas without old compose")
|
||||
|
||||
if errors:
|
||||
raise ValueError('\n'.join(errors))
|
||||
raise ValueError("\n".join(errors))
|
||||
|
||||
def run(self):
|
||||
get_productids_from_scm(self.compose)
|
||||
reference_pkgset = None
|
||||
if self.pkgset_phase and self.pkgset_phase.package_sets:
|
||||
reference_pkgset = self.pkgset_phase.package_sets[-1]
|
||||
for i in range(self.compose.conf['createrepo_num_threads']):
|
||||
for i in range(self.compose.conf["createrepo_num_threads"]):
|
||||
self.pool.add(
|
||||
CreaterepoThread(self.pool, reference_pkgset, self.modules_metadata)
|
||||
)
|
||||
@ -87,18 +85,34 @@ class CreaterepoPhase(PhaseBase):
|
||||
self.modules_metadata.write_modules_metadata()
|
||||
|
||||
|
||||
def create_variant_repo(compose, arch, variant, pkg_type, pkgset, modules_metadata=None):
|
||||
def create_variant_repo(
|
||||
compose, arch, variant, pkg_type, pkgset, modules_metadata=None
|
||||
):
|
||||
types = {
|
||||
'rpm': ('binary',
|
||||
lambda **kwargs: compose.paths.compose.repository(arch=arch, variant=variant, **kwargs)),
|
||||
'srpm': ('source',
|
||||
lambda **kwargs: compose.paths.compose.repository(arch='src', variant=variant, **kwargs)),
|
||||
'debuginfo': ('debug',
|
||||
lambda **kwargs: compose.paths.compose.debug_repository(arch=arch, variant=variant, **kwargs)),
|
||||
"rpm": (
|
||||
"binary",
|
||||
lambda **kwargs: compose.paths.compose.repository(
|
||||
arch=arch, variant=variant, **kwargs
|
||||
),
|
||||
),
|
||||
"srpm": (
|
||||
"source",
|
||||
lambda **kwargs: compose.paths.compose.repository(
|
||||
arch="src", variant=variant, **kwargs
|
||||
),
|
||||
),
|
||||
"debuginfo": (
|
||||
"debug",
|
||||
lambda **kwargs: compose.paths.compose.debug_repository(
|
||||
arch=arch, variant=variant, **kwargs
|
||||
),
|
||||
),
|
||||
}
|
||||
|
||||
if variant.is_empty or (arch is None and pkg_type != 'srpm'):
|
||||
compose.log_info("[SKIP ] Creating repo (arch: %s, variant: %s): %s" % (arch, variant))
|
||||
if variant.is_empty or (arch is None and pkg_type != "srpm"):
|
||||
compose.log_info(
|
||||
"[SKIP ] Creating repo (arch: %s, variant: %s): %s" % (arch, variant)
|
||||
)
|
||||
return
|
||||
|
||||
createrepo_c = compose.conf["createrepo_c"]
|
||||
@ -128,7 +142,7 @@ def create_variant_repo(compose, arch, variant, pkg_type, pkgset, modules_metada
|
||||
compose.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
# We only want delta RPMs for binary repos.
|
||||
with_deltas = pkg_type == 'rpm' and _has_deltas(compose, variant, arch)
|
||||
with_deltas = pkg_type == "rpm" and _has_deltas(compose, variant, arch)
|
||||
|
||||
rpms = set()
|
||||
rpm_nevras = set()
|
||||
@ -143,7 +157,7 @@ def create_variant_repo(compose, arch, variant, pkg_type, pkgset, modules_metada
|
||||
continue
|
||||
for srpm_data in data.values():
|
||||
for rpm_nevra, rpm_data in srpm_data.items():
|
||||
if types[pkg_type][0] != rpm_data['category']:
|
||||
if types[pkg_type][0] != rpm_data["category"]:
|
||||
continue
|
||||
path = os.path.join(compose.topdir, "compose", rpm_data["path"])
|
||||
rel_path = relative_path(path, repo_dir.rstrip("/") + "/")
|
||||
@ -151,7 +165,7 @@ def create_variant_repo(compose, arch, variant, pkg_type, pkgset, modules_metada
|
||||
rpm_nevras.add(str(rpm_nevra))
|
||||
|
||||
file_list = compose.paths.work.repo_package_list(arch, variant, pkg_type)
|
||||
with open(file_list, 'w') as f:
|
||||
with open(file_list, "w") as f:
|
||||
for rel_path in sorted(rpms):
|
||||
f.write("%s\n" % rel_path)
|
||||
|
||||
@ -166,18 +180,25 @@ def create_variant_repo(compose, arch, variant, pkg_type, pkgset, modules_metada
|
||||
comps_path = None
|
||||
if compose.has_comps and pkg_type == "rpm":
|
||||
comps_path = compose.paths.work.comps(arch=arch, variant=variant)
|
||||
cmd = repo.get_createrepo_cmd(repo_dir, update=True,
|
||||
database=compose.should_create_yum_database,
|
||||
skip_stat=True,
|
||||
pkglist=file_list, outputdir=repo_dir,
|
||||
workers=compose.conf["createrepo_num_workers"],
|
||||
groupfile=comps_path, update_md_path=repo_dir_arch,
|
||||
checksum=createrepo_checksum,
|
||||
deltas=with_deltas,
|
||||
oldpackagedirs=old_package_dirs,
|
||||
use_xz=compose.conf['createrepo_use_xz'],
|
||||
extra_args=compose.conf["createrepo_extra_args"])
|
||||
log_file = compose.paths.log.log_file(arch, "createrepo-%s.%s" % (variant, pkg_type))
|
||||
cmd = repo.get_createrepo_cmd(
|
||||
repo_dir,
|
||||
update=True,
|
||||
database=compose.should_create_yum_database,
|
||||
skip_stat=True,
|
||||
pkglist=file_list,
|
||||
outputdir=repo_dir,
|
||||
workers=compose.conf["createrepo_num_workers"],
|
||||
groupfile=comps_path,
|
||||
update_md_path=repo_dir_arch,
|
||||
checksum=createrepo_checksum,
|
||||
deltas=with_deltas,
|
||||
oldpackagedirs=old_package_dirs,
|
||||
use_xz=compose.conf["createrepo_use_xz"],
|
||||
extra_args=compose.conf["createrepo_extra_args"],
|
||||
)
|
||||
log_file = compose.paths.log.log_file(
|
||||
arch, "createrepo-%s.%s" % (variant, pkg_type)
|
||||
)
|
||||
run(cmd, logfile=log_file, show_cmd=True)
|
||||
|
||||
# call modifyrepo to inject productid
|
||||
@ -186,12 +207,16 @@ def create_variant_repo(compose, arch, variant, pkg_type, pkgset, modules_metada
|
||||
# add product certificate to base (rpm) repo; skip source and debug
|
||||
product_id_path = compose.paths.work.product_id(arch, variant)
|
||||
if os.path.isfile(product_id_path):
|
||||
cmd = repo.get_modifyrepo_cmd(os.path.join(repo_dir, "repodata"), product_id_path, compress_type="gz")
|
||||
cmd = repo.get_modifyrepo_cmd(
|
||||
os.path.join(repo_dir, "repodata"), product_id_path, compress_type="gz"
|
||||
)
|
||||
log_file = compose.paths.log.log_file(arch, "modifyrepo-%s" % variant)
|
||||
run(cmd, logfile=log_file, show_cmd=True)
|
||||
# productinfo is not supported by modifyrepo in any way
|
||||
# this is a HACK to make CDN happy (dmach: at least I think, need to confirm with dgregor)
|
||||
shutil.copy2(product_id_path, os.path.join(repo_dir, "repodata", "productid"))
|
||||
shutil.copy2(
|
||||
product_id_path, os.path.join(repo_dir, "repodata", "productid")
|
||||
)
|
||||
|
||||
# call modifyrepo to inject modulemd if needed
|
||||
if pkg_type == "rpm" and arch in variant.arch_mmds and Modulemd is not None:
|
||||
@ -217,7 +242,7 @@ def create_variant_repo(compose, arch, variant, pkg_type, pkgset, modules_metada
|
||||
for module_id, module_rpms in metadata:
|
||||
modulemd_path = os.path.join(
|
||||
types[pkg_type][1](relative=True),
|
||||
find_file_in_repodata(repo_dir, 'modules'),
|
||||
find_file_in_repodata(repo_dir, "modules"),
|
||||
)
|
||||
modules_metadata.prepare_module_metadata(
|
||||
variant,
|
||||
@ -246,18 +271,18 @@ def add_modular_metadata(repo, repo_path, mod_index, log_file):
|
||||
os.path.join(repo_path, "repodata"),
|
||||
modules_path,
|
||||
mdtype="modules",
|
||||
compress_type="gz"
|
||||
compress_type="gz",
|
||||
)
|
||||
run(cmd, logfile=log_file, show_cmd=True)
|
||||
|
||||
|
||||
def find_file_in_repodata(repo_path, type_):
|
||||
dom = xml.dom.minidom.parse(os.path.join(repo_path, 'repodata', 'repomd.xml'))
|
||||
for entry in dom.getElementsByTagName('data'):
|
||||
if entry.getAttribute('type') == type_:
|
||||
return entry.getElementsByTagName('location')[0].getAttribute('href')
|
||||
dom = xml.dom.minidom.parse(os.path.join(repo_path, "repodata", "repomd.xml"))
|
||||
for entry in dom.getElementsByTagName("data"):
|
||||
if entry.getAttribute("type") == type_:
|
||||
return entry.getElementsByTagName("location")[0].getAttribute("href")
|
||||
entry.unlink()
|
||||
raise RuntimeError('No such file in repodata: %s' % type_)
|
||||
raise RuntimeError("No such file in repodata: %s" % type_)
|
||||
|
||||
|
||||
class CreaterepoThread(WorkerThread):
|
||||
@ -274,7 +299,7 @@ class CreaterepoThread(WorkerThread):
|
||||
variant,
|
||||
pkg_type=pkg_type,
|
||||
pkgset=self.reference_pkgset,
|
||||
modules_metadata=self.modules_metadata
|
||||
modules_metadata=self.modules_metadata,
|
||||
)
|
||||
|
||||
|
||||
@ -308,7 +333,8 @@ def get_productids_from_scm(compose):
|
||||
# pem_files = glob.glob("%s/*.pem" % tmp_dir)[-1:]
|
||||
if not pem_files:
|
||||
warning = "No product certificate found (arch: %s, variant: %s)" % (
|
||||
arch, variant.uid
|
||||
arch,
|
||||
variant.uid,
|
||||
)
|
||||
if product_id_allow_missing:
|
||||
compose.log_warning(warning)
|
||||
@ -318,7 +344,14 @@ def get_productids_from_scm(compose):
|
||||
raise RuntimeError(warning)
|
||||
if len(pem_files) > 1:
|
||||
shutil.rmtree(tmp_dir)
|
||||
raise RuntimeError("Multiple product certificates found (arch: %s, variant: %s): %s" % (arch, variant.uid, ", ".join(sorted([os.path.basename(i) for i in pem_files]))))
|
||||
raise RuntimeError(
|
||||
"Multiple product certificates found (arch: %s, variant: %s): %s"
|
||||
% (
|
||||
arch,
|
||||
variant.uid,
|
||||
", ".join(sorted([os.path.basename(i) for i in pem_files])),
|
||||
)
|
||||
)
|
||||
product_id_path = compose.paths.work.product_id(arch, variant)
|
||||
shutil.copy2(pem_files[0], product_id_path)
|
||||
|
||||
@ -331,23 +364,27 @@ def _get_old_package_dirs(compose, repo_dir):
|
||||
repo in an older compose and return a list of paths to directories with
|
||||
packages in it.
|
||||
"""
|
||||
if not compose.conf['createrepo_deltas']:
|
||||
if not compose.conf["createrepo_deltas"]:
|
||||
return None
|
||||
old_compose_path = find_old_compose(
|
||||
compose.old_composes,
|
||||
compose.ci_base.release.short,
|
||||
compose.ci_base.release.version,
|
||||
compose.ci_base.release.type_suffix,
|
||||
compose.ci_base.base_product.short if compose.ci_base.release.is_layered else None,
|
||||
compose.ci_base.base_product.version if compose.ci_base.release.is_layered else None,
|
||||
allowed_statuses=['FINISHED', 'FINISHED_INCOMPLETE'],
|
||||
compose.ci_base.base_product.short
|
||||
if compose.ci_base.release.is_layered
|
||||
else None,
|
||||
compose.ci_base.base_product.version
|
||||
if compose.ci_base.release.is_layered
|
||||
else None,
|
||||
allowed_statuses=["FINISHED", "FINISHED_INCOMPLETE"],
|
||||
)
|
||||
if not old_compose_path:
|
||||
compose.log_info("No suitable old compose found in: %s" % compose.old_composes)
|
||||
return None
|
||||
rel_dir = relative_path(repo_dir, compose.topdir.rstrip('/') + '/')
|
||||
old_package_dirs = os.path.join(old_compose_path, rel_dir, 'Packages')
|
||||
if compose.conf['hashed_directories']:
|
||||
rel_dir = relative_path(repo_dir, compose.topdir.rstrip("/") + "/")
|
||||
old_package_dirs = os.path.join(old_compose_path, rel_dir, "Packages")
|
||||
if compose.conf["hashed_directories"]:
|
||||
old_package_dirs = _find_package_dirs(old_package_dirs)
|
||||
return old_package_dirs
|
||||
|
||||
@ -370,7 +407,7 @@ def _find_package_dirs(base):
|
||||
|
||||
def _has_deltas(compose, variant, arch):
|
||||
"""Check if delta RPMs are enabled for given variant and architecture."""
|
||||
key = 'createrepo_deltas'
|
||||
key = "createrepo_deltas"
|
||||
if isinstance(compose.conf.get(key), bool):
|
||||
return compose.conf[key]
|
||||
return any(get_arch_variant_data(compose.conf, key, arch, variant))
|
||||
@ -383,18 +420,28 @@ class ModulesMetadata(object):
|
||||
self.modules_metadata_file = self.compose.paths.compose.metadata("modules.json")
|
||||
self.productmd_modules_metadata = productmd.modules.Modules()
|
||||
self.productmd_modules_metadata.compose.id = copy.copy(self.compose.compose_id)
|
||||
self.productmd_modules_metadata.compose.type = copy.copy(self.compose.compose_type)
|
||||
self.productmd_modules_metadata.compose.date = copy.copy(self.compose.compose_date)
|
||||
self.productmd_modules_metadata.compose.respin = copy.copy(self.compose.compose_respin)
|
||||
self.productmd_modules_metadata.compose.type = copy.copy(
|
||||
self.compose.compose_type
|
||||
)
|
||||
self.productmd_modules_metadata.compose.date = copy.copy(
|
||||
self.compose.compose_date
|
||||
)
|
||||
self.productmd_modules_metadata.compose.respin = copy.copy(
|
||||
self.compose.compose_respin
|
||||
)
|
||||
|
||||
def write_modules_metadata(self):
|
||||
"""
|
||||
flush modules metadata into file
|
||||
"""
|
||||
self.compose.log_info("Writing modules metadata: %s" % self.modules_metadata_file)
|
||||
self.compose.log_info(
|
||||
"Writing modules metadata: %s" % self.modules_metadata_file
|
||||
)
|
||||
self.productmd_modules_metadata.dump(self.modules_metadata_file)
|
||||
|
||||
def prepare_module_metadata(self, variant, arch, nsvc, modulemd_path, category, module_rpms):
|
||||
def prepare_module_metadata(
|
||||
self, variant, arch, nsvc, modulemd_path, category, module_rpms
|
||||
):
|
||||
"""
|
||||
Find koji tag which corresponds to the module and add record into
|
||||
module metadata structure.
|
||||
|
@ -29,6 +29,7 @@ from pungi import metadata
|
||||
|
||||
class ExtraFilesPhase(ConfigGuardedPhase):
|
||||
"""EXTRA_FILES"""
|
||||
|
||||
name = "extra_files"
|
||||
|
||||
def __init__(self, compose, pkgset_phase):
|
||||
@ -58,8 +59,10 @@ class ExtraFilesPhase(ConfigGuardedPhase):
|
||||
self.metadata,
|
||||
)
|
||||
else:
|
||||
self.compose.log_info('[SKIP ] No extra files (arch: %s, variant: %s)'
|
||||
% (arch, variant.uid))
|
||||
self.compose.log_info(
|
||||
"[SKIP ] No extra files (arch: %s, variant: %s)"
|
||||
% (arch, variant.uid)
|
||||
)
|
||||
|
||||
metadata_path = self.compose.paths.compose.metadata("extra_files.json")
|
||||
self.compose.log_info("Writing global extra files metadata: %s" % metadata_path)
|
||||
@ -69,7 +72,7 @@ class ExtraFilesPhase(ConfigGuardedPhase):
|
||||
def copy_extra_files(
|
||||
compose, cfg, arch, variant, package_sets, extra_metadata, checksum_type=None
|
||||
):
|
||||
checksum_type = checksum_type or compose.conf['media_checksums']
|
||||
checksum_type = checksum_type or compose.conf["media_checksums"]
|
||||
var_dict = {
|
||||
"arch": arch,
|
||||
"variant_id": variant.id,
|
||||
@ -95,14 +98,20 @@ def copy_extra_files(
|
||||
for package_set in package_sets:
|
||||
for pkgset_file in package_set[arch]:
|
||||
pkg_obj = package_set[arch][pkgset_file]
|
||||
if pkg_is_rpm(pkg_obj) and _pkg_matches(pkg_obj, pkg_name, pkg_arch):
|
||||
if pkg_is_rpm(pkg_obj) and _pkg_matches(
|
||||
pkg_obj, pkg_name, pkg_arch
|
||||
):
|
||||
rpms.append(pkg_obj.file_path)
|
||||
if not rpms:
|
||||
raise RuntimeError('No package matching %s in the package set.' % pattern)
|
||||
raise RuntimeError(
|
||||
"No package matching %s in the package set." % pattern
|
||||
)
|
||||
scm_dict["repo"] = rpms
|
||||
|
||||
getter = get_file_from_scm if 'file' in scm_dict else get_dir_from_scm
|
||||
target_path = os.path.join(extra_files_dir, scm_dict.get('target', '').lstrip('/'))
|
||||
getter = get_file_from_scm if "file" in scm_dict else get_dir_from_scm
|
||||
target_path = os.path.join(
|
||||
extra_files_dir, scm_dict.get("target", "").lstrip("/")
|
||||
)
|
||||
getter(scm_dict, target_path, compose=compose)
|
||||
|
||||
if os.listdir(extra_files_dir):
|
||||
@ -121,11 +130,12 @@ def copy_extra_files(
|
||||
|
||||
def _pkg_matches(pkg_obj, name_glob, arch):
|
||||
"""Check if `pkg_obj` matches name and arch."""
|
||||
return (fnmatch.fnmatch(pkg_obj.name, name_glob) and
|
||||
(arch is None or arch == pkg_obj.arch))
|
||||
return fnmatch.fnmatch(pkg_obj.name, name_glob) and (
|
||||
arch is None or arch == pkg_obj.arch
|
||||
)
|
||||
|
||||
|
||||
def _is_external(rpm):
|
||||
"""Check if path to rpm points outside of the compose: i.e. it is an
|
||||
absolute path or a URL."""
|
||||
return rpm.startswith('/') or '://' in rpm
|
||||
return rpm.startswith("/") or "://" in rpm
|
||||
|
@ -23,8 +23,12 @@ from productmd.extra_files import ExtraFiles
|
||||
from pungi import createiso
|
||||
from pungi import metadata
|
||||
from pungi.phases.base import ConfigGuardedPhase, PhaseBase, PhaseLoggerMixin
|
||||
from pungi.phases.createiso import (add_iso_to_metadata, copy_boot_images,
|
||||
run_createiso_command, load_and_tweak_treeinfo)
|
||||
from pungi.phases.createiso import (
|
||||
add_iso_to_metadata,
|
||||
copy_boot_images,
|
||||
run_createiso_command,
|
||||
load_and_tweak_treeinfo,
|
||||
)
|
||||
from pungi.util import failable, get_format_substs, get_variant_data, get_volid
|
||||
from pungi.wrappers import iso
|
||||
from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm
|
||||
@ -38,24 +42,25 @@ class ExtraIsosPhase(PhaseLoggerMixin, ConfigGuardedPhase, PhaseBase):
|
||||
self.pool = ThreadPool(logger=self.logger)
|
||||
|
||||
def validate(self):
|
||||
for variant in self.compose.get_variants(types=['variant']):
|
||||
for variant in self.compose.get_variants(types=["variant"]):
|
||||
for config in get_variant_data(self.compose.conf, self.name, variant):
|
||||
extra_arches = set(config.get('arches', [])) - set(variant.arches)
|
||||
extra_arches = set(config.get("arches", [])) - set(variant.arches)
|
||||
if extra_arches:
|
||||
self.compose.log_warning(
|
||||
'Extra iso config for %s mentions non-existing arches: %s'
|
||||
% (variant, ', '.join(sorted(extra_arches))))
|
||||
"Extra iso config for %s mentions non-existing arches: %s"
|
||||
% (variant, ", ".join(sorted(extra_arches)))
|
||||
)
|
||||
|
||||
def run(self):
|
||||
commands = []
|
||||
|
||||
for variant in self.compose.get_variants(types=['variant']):
|
||||
for variant in self.compose.get_variants(types=["variant"]):
|
||||
for config in get_variant_data(self.compose.conf, self.name, variant):
|
||||
arches = set(variant.arches)
|
||||
if config.get('arches'):
|
||||
arches &= set(config['arches'])
|
||||
if not config['skip_src']:
|
||||
arches.add('src')
|
||||
if config.get("arches"):
|
||||
arches &= set(config["arches"])
|
||||
if not config["skip_src"]:
|
||||
arches.add("src")
|
||||
for arch in sorted(arches):
|
||||
commands.append((config, variant, arch))
|
||||
|
||||
@ -70,13 +75,15 @@ class ExtraIsosThread(WorkerThread):
|
||||
def process(self, item, num):
|
||||
self.num = num
|
||||
compose, config, variant, arch = item
|
||||
can_fail = arch in config.get('failable_arches', [])
|
||||
with failable(compose, can_fail, variant, arch, 'extra_iso', logger=self.pool._logger):
|
||||
can_fail = arch in config.get("failable_arches", [])
|
||||
with failable(
|
||||
compose, can_fail, variant, arch, "extra_iso", logger=self.pool._logger
|
||||
):
|
||||
self.worker(compose, config, variant, arch)
|
||||
|
||||
def worker(self, compose, config, variant, arch):
|
||||
filename = get_filename(compose, variant, arch, config.get('filename'))
|
||||
volid = get_volume_id(compose, variant, arch, config.get('volid', []))
|
||||
filename = get_filename(compose, variant, arch, config.get("filename"))
|
||||
volid = get_volume_id(compose, variant, arch, config.get("volid", []))
|
||||
iso_dir = compose.paths.compose.iso_dir(arch, variant)
|
||||
iso_path = os.path.join(iso_dir, filename)
|
||||
|
||||
@ -85,15 +92,15 @@ class ExtraIsosThread(WorkerThread):
|
||||
msg = "Creating ISO (arch: %s, variant: %s): %s" % (arch, variant, filename)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
get_extra_files(compose, variant, arch, config.get('extra_files', []))
|
||||
get_extra_files(compose, variant, arch, config.get("extra_files", []))
|
||||
|
||||
bootable = arch != "src" and bool(compose.conf.get('buildinstall_method'))
|
||||
bootable = arch != "src" and bool(compose.conf.get("buildinstall_method"))
|
||||
|
||||
graft_points = get_iso_contents(
|
||||
compose,
|
||||
variant,
|
||||
arch,
|
||||
config['include_variants'],
|
||||
config["include_variants"],
|
||||
filename,
|
||||
bootable=bootable,
|
||||
inherit_extra_files=config.get("inherit_extra_files", False),
|
||||
@ -108,24 +115,34 @@ class ExtraIsosThread(WorkerThread):
|
||||
supported=compose.supported,
|
||||
hfs_compat=compose.conf["iso_hfs_ppc64le_compatible"],
|
||||
)
|
||||
if compose.conf['create_jigdo']:
|
||||
if compose.conf["create_jigdo"]:
|
||||
jigdo_dir = compose.paths.compose.jigdo_dir(arch, variant)
|
||||
os_tree = compose.paths.compose.os_tree(arch, variant)
|
||||
opts = opts._replace(jigdo_dir=jigdo_dir, os_tree=os_tree)
|
||||
|
||||
if bootable:
|
||||
opts = opts._replace(buildinstall_method=compose.conf['buildinstall_method'])
|
||||
opts = opts._replace(
|
||||
buildinstall_method=compose.conf["buildinstall_method"]
|
||||
)
|
||||
|
||||
script_file = os.path.join(compose.paths.work.tmp_dir(arch, variant),
|
||||
'extraiso-%s.sh' % filename)
|
||||
with open(script_file, 'w') as f:
|
||||
script_file = os.path.join(
|
||||
compose.paths.work.tmp_dir(arch, variant), "extraiso-%s.sh" % filename
|
||||
)
|
||||
with open(script_file, "w") as f:
|
||||
createiso.write_script(opts, f)
|
||||
|
||||
run_createiso_command(self.num, compose, bootable, arch,
|
||||
['bash', script_file], [compose.topdir],
|
||||
log_file=compose.paths.log.log_file(
|
||||
arch, "extraiso-%s" % os.path.basename(iso_path)),
|
||||
with_jigdo=compose.conf['create_jigdo'])
|
||||
run_createiso_command(
|
||||
self.num,
|
||||
compose,
|
||||
bootable,
|
||||
arch,
|
||||
["bash", script_file],
|
||||
[compose.topdir],
|
||||
log_file=compose.paths.log.log_file(
|
||||
arch, "extraiso-%s" % os.path.basename(iso_path)
|
||||
),
|
||||
with_jigdo=compose.conf["create_jigdo"],
|
||||
)
|
||||
|
||||
img = add_iso_to_metadata(
|
||||
compose,
|
||||
@ -147,7 +164,7 @@ def get_extra_files(compose, variant, arch, extra_files):
|
||||
extra_files_dir = compose.paths.work.extra_iso_extra_files_dir(arch, variant)
|
||||
filelist = []
|
||||
for scm_dict in extra_files:
|
||||
getter = get_file_from_scm if 'file' in scm_dict else get_dir_from_scm
|
||||
getter = get_file_from_scm if "file" in scm_dict else get_dir_from_scm
|
||||
target = scm_dict.get("target", "").lstrip("/")
|
||||
target_path = os.path.join(extra_files_dir, target).rstrip("/")
|
||||
filelist.extend(
|
||||
@ -178,7 +195,7 @@ def get_iso_contents(
|
||||
files = {}
|
||||
if bootable:
|
||||
buildinstall_dir = compose.paths.work.buildinstall_dir(arch, create_dir=False)
|
||||
if compose.conf['buildinstall_method'] == 'lorax':
|
||||
if compose.conf["buildinstall_method"] == "lorax":
|
||||
buildinstall_dir = os.path.join(buildinstall_dir, variant.uid)
|
||||
|
||||
copy_boot_images(buildinstall_dir, iso_dir)
|
||||
@ -199,13 +216,13 @@ def get_iso_contents(
|
||||
# Get packages...
|
||||
package_dir = compose.paths.compose.packages(arch, var)
|
||||
for k, v in iso.get_graft_points(compose, [package_dir]).items():
|
||||
files[os.path.join(var.uid, 'Packages', k)] = v
|
||||
files[os.path.join(var.uid, "Packages", k)] = v
|
||||
|
||||
# Get repodata...
|
||||
tree_dir = compose.paths.compose.repository(arch, var)
|
||||
repo_dir = os.path.join(tree_dir, 'repodata')
|
||||
repo_dir = os.path.join(tree_dir, "repodata")
|
||||
for k, v in iso.get_graft_points(compose, [repo_dir]).items():
|
||||
files[os.path.join(var.uid, 'repodata', k)] = v
|
||||
files[os.path.join(var.uid, "repodata", k)] = v
|
||||
|
||||
if inherit_extra_files:
|
||||
# Get extra files...
|
||||
@ -253,32 +270,41 @@ def tweak_treeinfo(compose, include_variants, source_file, dest_file):
|
||||
|
||||
|
||||
def get_filename(compose, variant, arch, format):
|
||||
disc_type = compose.conf['disc_types'].get('dvd', 'dvd')
|
||||
disc_type = compose.conf["disc_types"].get("dvd", "dvd")
|
||||
base_filename = compose.get_image_name(
|
||||
arch, variant, disc_type=disc_type, disc_num=1)
|
||||
arch, variant, disc_type=disc_type, disc_num=1
|
||||
)
|
||||
if not format:
|
||||
return base_filename
|
||||
kwargs = {
|
||||
'arch': arch,
|
||||
'disc_type': disc_type,
|
||||
'disc_num': 1,
|
||||
'suffix': '.iso',
|
||||
'filename': base_filename,
|
||||
'variant': variant,
|
||||
"arch": arch,
|
||||
"disc_type": disc_type,
|
||||
"disc_num": 1,
|
||||
"suffix": ".iso",
|
||||
"filename": base_filename,
|
||||
"variant": variant,
|
||||
}
|
||||
args = get_format_substs(compose, **kwargs)
|
||||
try:
|
||||
return (format % args).format(**args)
|
||||
except KeyError as err:
|
||||
raise RuntimeError('Failed to create image name: unknown format element: %s' % err)
|
||||
raise RuntimeError(
|
||||
"Failed to create image name: unknown format element: %s" % err
|
||||
)
|
||||
|
||||
|
||||
def get_volume_id(compose, variant, arch, formats):
|
||||
disc_type = compose.conf['disc_types'].get('dvd', 'dvd')
|
||||
disc_type = compose.conf["disc_types"].get("dvd", "dvd")
|
||||
# Get volume ID for regular ISO so that we can substitute it in.
|
||||
volid = get_volid(compose, arch, variant, disc_type=disc_type)
|
||||
return get_volid(compose, arch, variant, disc_type=disc_type,
|
||||
formats=force_list(formats), volid=volid)
|
||||
return get_volid(
|
||||
compose,
|
||||
arch,
|
||||
variant,
|
||||
disc_type=disc_type,
|
||||
formats=force_list(formats),
|
||||
volid=volid,
|
||||
)
|
||||
|
||||
|
||||
def prepare_media_metadata(compose, variant, arch):
|
||||
|
@ -22,6 +22,7 @@ import threading
|
||||
from kobo.rpmlib import parse_nvra
|
||||
from kobo.shortcuts import run
|
||||
from productmd.rpms import Rpms
|
||||
|
||||
try:
|
||||
from queue import Queue
|
||||
except ImportError:
|
||||
@ -35,8 +36,7 @@ import pungi.wrappers.kojiwrapper
|
||||
from pungi.compose import get_ordered_variant_uids
|
||||
from pungi.arch import get_compatible_arches, split_name_arch
|
||||
from pungi.phases.base import PhaseBase
|
||||
from pungi.util import (get_arch_data, get_arch_variant_data, get_variant_data,
|
||||
makedirs)
|
||||
from pungi.util import get_arch_data, get_arch_variant_data, get_variant_data, makedirs
|
||||
from pungi.module_util import Modulemd, collect_module_defaults
|
||||
from pungi.phases.createrepo import add_modular_metadata
|
||||
|
||||
@ -44,6 +44,7 @@ from pungi.phases.createrepo import add_modular_metadata
|
||||
def get_gather_source(name):
|
||||
import pungi.phases.gather.sources
|
||||
from .source import GatherSourceContainer
|
||||
|
||||
GatherSourceContainer.register_module(pungi.phases.gather.sources)
|
||||
container = GatherSourceContainer()
|
||||
return container["GatherSource%s" % name]
|
||||
@ -52,6 +53,7 @@ def get_gather_source(name):
|
||||
def get_gather_method(name):
|
||||
import pungi.phases.gather.methods
|
||||
from .method import GatherMethodContainer
|
||||
|
||||
GatherMethodContainer.register_module(pungi.phases.gather.methods)
|
||||
container = GatherMethodContainer()
|
||||
return container["GatherMethod%s" % name]
|
||||
@ -59,6 +61,7 @@ def get_gather_method(name):
|
||||
|
||||
class GatherPhase(PhaseBase):
|
||||
"""GATHER"""
|
||||
|
||||
name = "gather"
|
||||
|
||||
def __init__(self, compose, pkgset_phase):
|
||||
@ -80,7 +83,7 @@ class GatherPhase(PhaseBase):
|
||||
# Modules are not supported, check if we need them
|
||||
for variant in self.compose.variants.values():
|
||||
if variant.modules:
|
||||
errors.append('Modular compose requires libmodulemd package.')
|
||||
errors.append("Modular compose requires libmodulemd package.")
|
||||
|
||||
# check whether variants from configuration value 'variant_as_lookaside' are correct
|
||||
variant_as_lookaside = self.compose.conf.get("variant_as_lookaside", [])
|
||||
@ -93,25 +96,30 @@ class GatherPhase(PhaseBase):
|
||||
)
|
||||
|
||||
if errors:
|
||||
raise ValueError('\n'.join(errors))
|
||||
raise ValueError("\n".join(errors))
|
||||
|
||||
def _write_manifest(self):
|
||||
self.compose.log_info("Writing RPM manifest: %s" % self.manifest_file)
|
||||
self.manifest.dump(self.manifest_file)
|
||||
|
||||
def run(self):
|
||||
pkg_map = gather_wrapper(self.compose, self.pkgset_phase.package_sets,
|
||||
self.pkgset_phase.path_prefix)
|
||||
pkg_map = gather_wrapper(
|
||||
self.compose, self.pkgset_phase.package_sets, self.pkgset_phase.path_prefix
|
||||
)
|
||||
|
||||
for variant_uid in get_ordered_variant_uids(self.compose):
|
||||
variant = self.compose.all_variants[variant_uid]
|
||||
if variant.is_empty:
|
||||
continue
|
||||
for arch in variant.arches:
|
||||
link_files(self.compose, arch, variant,
|
||||
pkg_map[arch][variant.uid],
|
||||
self.pkgset_phase.package_sets,
|
||||
manifest=self.manifest)
|
||||
link_files(
|
||||
self.compose,
|
||||
arch,
|
||||
variant,
|
||||
pkg_map[arch][variant.uid],
|
||||
self.pkgset_phase.package_sets,
|
||||
manifest=self.manifest,
|
||||
)
|
||||
|
||||
self._write_manifest()
|
||||
|
||||
@ -148,10 +156,12 @@ def get_gather_methods(compose, variant):
|
||||
global_method_name = methods
|
||||
if isinstance(methods, dict):
|
||||
try:
|
||||
methods = get_variant_data(compose.conf, 'gather_method', variant)[-1]
|
||||
methods = get_variant_data(compose.conf, "gather_method", variant)[-1]
|
||||
global_method_name = None
|
||||
except IndexError:
|
||||
raise RuntimeError("Variant %s has no configured gather_method" % variant.uid)
|
||||
raise RuntimeError(
|
||||
"Variant %s has no configured gather_method" % variant.uid
|
||||
)
|
||||
return global_method_name, methods
|
||||
|
||||
|
||||
@ -208,8 +218,9 @@ def gather_packages(compose, arch, variant, package_sets, fulltree_excludes=None
|
||||
|
||||
for source_name in ("module", "comps", "json"):
|
||||
|
||||
packages, groups, filter_packages = get_variant_packages(compose, arch, variant,
|
||||
source_name, package_sets)
|
||||
packages, groups, filter_packages = get_variant_packages(
|
||||
compose, arch, variant, source_name, package_sets
|
||||
)
|
||||
if not packages and not groups:
|
||||
# No inputs, nothing to do really.
|
||||
continue
|
||||
@ -217,20 +228,32 @@ def gather_packages(compose, arch, variant, package_sets, fulltree_excludes=None
|
||||
try:
|
||||
method_name = global_method_name or methods[source_name]
|
||||
except KeyError:
|
||||
raise RuntimeError("Variant %s has no configured gather_method for source %s"
|
||||
% (variant.uid, source_name))
|
||||
raise RuntimeError(
|
||||
"Variant %s has no configured gather_method for source %s"
|
||||
% (variant.uid, source_name)
|
||||
)
|
||||
|
||||
GatherMethod = get_gather_method(method_name)
|
||||
method = GatherMethod(compose)
|
||||
method.source_name = source_name
|
||||
compose.log_debug(
|
||||
"Gathering source %s, method %s (arch: %s, variant: %s)" % (source_name, method_name, arch, variant))
|
||||
pkg_map = method(arch, variant, packages, groups, filter_packages,
|
||||
multilib_whitelist, multilib_blacklist, package_sets,
|
||||
fulltree_excludes=fulltree_excludes,
|
||||
prepopulate=prepopulate if source_name == 'comps' else set())
|
||||
"Gathering source %s, method %s (arch: %s, variant: %s)"
|
||||
% (source_name, method_name, arch, variant)
|
||||
)
|
||||
pkg_map = method(
|
||||
arch,
|
||||
variant,
|
||||
packages,
|
||||
groups,
|
||||
filter_packages,
|
||||
multilib_whitelist,
|
||||
multilib_blacklist,
|
||||
package_sets,
|
||||
fulltree_excludes=fulltree_excludes,
|
||||
prepopulate=prepopulate if source_name == "comps" else set(),
|
||||
)
|
||||
|
||||
for t in ('rpm', 'srpm', 'debuginfo'):
|
||||
for t in ("rpm", "srpm", "debuginfo"):
|
||||
result[t].extend(pkg_map.get(t, []))
|
||||
|
||||
compose.log_info("[DONE ] %s" % msg)
|
||||
@ -246,13 +269,15 @@ def write_packages(compose, arch, variant, pkg_map, path_prefix):
|
||||
compose.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
for pkg_type, pkgs in pkg_map.items():
|
||||
file_name = compose.paths.work.package_list(arch=arch, variant=variant, pkg_type=pkg_type)
|
||||
file_name = compose.paths.work.package_list(
|
||||
arch=arch, variant=variant, pkg_type=pkg_type
|
||||
)
|
||||
with open(file_name, "w") as pkg_list:
|
||||
for pkg in pkgs:
|
||||
# TODO: flags?
|
||||
pkg_path = pkg["path"]
|
||||
if pkg_path.startswith(path_prefix):
|
||||
pkg_path = pkg_path[len(path_prefix):]
|
||||
pkg_path = pkg_path[len(path_prefix) :]
|
||||
pkg_list.write("%s\n" % pkg_path)
|
||||
|
||||
compose.log_info("[DONE ] %s" % msg)
|
||||
@ -299,18 +324,23 @@ def trim_packages(compose, arch, variant, pkg_map, parent_pkgs=None, remove_pkgs
|
||||
if not pkg_path:
|
||||
continue
|
||||
nvra = parse_nvra(pkg_path)
|
||||
key = ((nvra["name"], nvra["arch"]))
|
||||
key = (nvra["name"], nvra["arch"])
|
||||
|
||||
if nvra["name"] in remove_pkgs.get(pkg_type, set()):
|
||||
# TODO: make an option to turn this off
|
||||
if variant.type == "layered-product" and pkg_type in ("srpm", "debuginfo"):
|
||||
if variant.type == "layered-product" and pkg_type in (
|
||||
"srpm",
|
||||
"debuginfo",
|
||||
):
|
||||
new_pkgs.append(pkg)
|
||||
# User may not have addons available, therefore we need to
|
||||
# keep addon SRPMs in layered products in order not to violate GPL.
|
||||
# The same applies on debuginfo availability.
|
||||
continue
|
||||
compose.log_warning("Removed addon package (arch: %s, variant: %s): %s: %s" % (
|
||||
arch, variant, pkg_type, pkg_path))
|
||||
compose.log_warning(
|
||||
"Removed addon package (arch: %s, variant: %s): %s: %s"
|
||||
% (arch, variant, pkg_type, pkg_path)
|
||||
)
|
||||
removed_pkgs[pkg_type].append(pkg)
|
||||
elif key not in parent_pkgs.get(pkg_type, set()):
|
||||
if "fulltree-exclude" in pkg["flags"] and "input" not in pkg["flags"]:
|
||||
@ -326,10 +356,14 @@ def trim_packages(compose, arch, variant, pkg_map, parent_pkgs=None, remove_pkgs
|
||||
removed_pkgs[pkg_type].append(pkg)
|
||||
|
||||
pkg_map[pkg_type] = new_pkgs
|
||||
compose.log_info("Removed packages (arch: %s, variant: %s): %s: %s" % (
|
||||
arch, variant, pkg_type, len(removed_pkgs[pkg_type])))
|
||||
compose.log_info("Moved to parent (arch: %s, variant: %s): %s: %s" % (
|
||||
arch, variant, pkg_type, len(move_to_parent_pkgs[pkg_type])))
|
||||
compose.log_info(
|
||||
"Removed packages (arch: %s, variant: %s): %s: %s"
|
||||
% (arch, variant, pkg_type, len(removed_pkgs[pkg_type]))
|
||||
)
|
||||
compose.log_info(
|
||||
"Moved to parent (arch: %s, variant: %s): %s: %s"
|
||||
% (arch, variant, pkg_type, len(move_to_parent_pkgs[pkg_type]))
|
||||
)
|
||||
|
||||
compose.log_info("[DONE ] %s" % msg)
|
||||
return addon_pkgs, move_to_parent_pkgs, removed_pkgs
|
||||
@ -347,39 +381,50 @@ def _make_lookaside_repo(compose, variant, arch, pkg_map, package_sets=None):
|
||||
return repo
|
||||
|
||||
makedirs(repo)
|
||||
msg = 'Generating lookaside repo from %s.%s' % (variant.uid, arch)
|
||||
compose.log_info('[BEGIN] %s', msg)
|
||||
msg = "Generating lookaside repo from %s.%s" % (variant.uid, arch)
|
||||
compose.log_info("[BEGIN] %s", msg)
|
||||
|
||||
prefixes = {
|
||||
'repos': lambda: os.path.join(compose.paths.work.topdir(
|
||||
arch="global"), "download") + "/",
|
||||
'koji': lambda: pungi.wrappers.kojiwrapper.KojiWrapper(
|
||||
compose.conf['koji_profile']).koji_module.config.topdir.rstrip("/") + "/"
|
||||
"repos": lambda: os.path.join(
|
||||
compose.paths.work.topdir(arch="global"), "download"
|
||||
)
|
||||
+ "/",
|
||||
"koji": lambda: pungi.wrappers.kojiwrapper.KojiWrapper(
|
||||
compose.conf["koji_profile"]
|
||||
).koji_module.config.topdir.rstrip("/")
|
||||
+ "/",
|
||||
}
|
||||
path_prefix = prefixes[compose.conf['pkgset_source']]()
|
||||
path_prefix = prefixes[compose.conf["pkgset_source"]]()
|
||||
pkglist = compose.paths.work.lookaside_package_list(arch=arch, variant=variant)
|
||||
with open(pkglist, 'w') as f:
|
||||
with open(pkglist, "w") as f:
|
||||
for packages in pkg_map[arch][variant.uid].values():
|
||||
for pkg in packages:
|
||||
pkg = pkg['path']
|
||||
pkg = pkg["path"]
|
||||
if path_prefix and pkg.startswith(path_prefix):
|
||||
pkg = pkg[len(path_prefix):]
|
||||
f.write('%s\n' % pkg)
|
||||
pkg = pkg[len(path_prefix) :]
|
||||
f.write("%s\n" % pkg)
|
||||
|
||||
cr = CreaterepoWrapper(compose.conf['createrepo_c'])
|
||||
cr = CreaterepoWrapper(compose.conf["createrepo_c"])
|
||||
update_metadata = None
|
||||
if package_sets:
|
||||
pkgset = package_sets[-1]
|
||||
update_metadata = compose.paths.work.pkgset_repo(pkgset.name, arch)
|
||||
cmd = cr.get_createrepo_cmd(path_prefix, update=True, database=True, skip_stat=True,
|
||||
pkglist=pkglist,
|
||||
outputdir=repo,
|
||||
baseurl="file://%s" % path_prefix,
|
||||
workers=compose.conf["createrepo_num_workers"],
|
||||
update_md_path=update_metadata)
|
||||
run(cmd,
|
||||
cmd = cr.get_createrepo_cmd(
|
||||
path_prefix,
|
||||
update=True,
|
||||
database=True,
|
||||
skip_stat=True,
|
||||
pkglist=pkglist,
|
||||
outputdir=repo,
|
||||
baseurl="file://%s" % path_prefix,
|
||||
workers=compose.conf["createrepo_num_workers"],
|
||||
update_md_path=update_metadata,
|
||||
)
|
||||
run(
|
||||
cmd,
|
||||
logfile=compose.paths.log.log_file(arch, "lookaside_repo_%s" % (variant.uid)),
|
||||
show_cmd=True)
|
||||
show_cmd=True,
|
||||
)
|
||||
|
||||
# Add modular metadata into the repo
|
||||
if variant.arch_mmds:
|
||||
@ -399,7 +444,7 @@ def _make_lookaside_repo(compose, variant, arch, pkg_map, package_sets=None):
|
||||
)
|
||||
add_modular_metadata(cr, repo, mod_index, log_file)
|
||||
|
||||
compose.log_info('[DONE ] %s', msg)
|
||||
compose.log_info("[DONE ] %s", msg)
|
||||
|
||||
return repo
|
||||
|
||||
@ -408,8 +453,8 @@ def _update_config(compose, variant_uid, arch, repo):
|
||||
"""
|
||||
Add the variant lookaside repository into the configuration.
|
||||
"""
|
||||
lookasides = compose.conf.setdefault('gather_lookaside_repos', [])
|
||||
lookasides.append(('^%s$' % variant_uid, {arch: repo}))
|
||||
lookasides = compose.conf.setdefault("gather_lookaside_repos", [])
|
||||
lookasides.append(("^%s$" % variant_uid, {arch: repo}))
|
||||
|
||||
|
||||
def _update_lookaside_config(compose, variant, arch, pkg_map, package_sets=None):
|
||||
@ -417,13 +462,17 @@ def _update_lookaside_config(compose, variant, arch, pkg_map, package_sets=None)
|
||||
Make sure lookaside repo for all variants that the given one depends on
|
||||
exist, and that configuration is updated to use those repos.
|
||||
"""
|
||||
for dest, lookaside_variant_uid in compose.conf.get('variant_as_lookaside', []):
|
||||
for dest, lookaside_variant_uid in compose.conf.get("variant_as_lookaside", []):
|
||||
lookaside_variant = compose.all_variants[lookaside_variant_uid]
|
||||
if dest != variant.uid:
|
||||
continue
|
||||
if arch not in lookaside_variant.arches:
|
||||
compose.log_warning('[SKIP] Skipping lookaside from %s for %s.%s due to arch mismatch',
|
||||
lookaside_variant.uid, variant.uid, arch)
|
||||
compose.log_warning(
|
||||
"[SKIP] Skipping lookaside from %s for %s.%s due to arch mismatch",
|
||||
lookaside_variant.uid,
|
||||
variant.uid,
|
||||
arch,
|
||||
)
|
||||
continue
|
||||
repo = _make_lookaside_repo(
|
||||
compose, lookaside_variant, arch, pkg_map, package_sets
|
||||
@ -431,7 +480,9 @@ def _update_lookaside_config(compose, variant, arch, pkg_map, package_sets=None)
|
||||
_update_config(compose, variant.uid, arch, repo)
|
||||
|
||||
|
||||
def _gather_variants(result, compose, variant_type, package_sets, exclude_fulltree=False):
|
||||
def _gather_variants(
|
||||
result, compose, variant_type, package_sets, exclude_fulltree=False
|
||||
):
|
||||
"""Run gathering on all arches of all variants of given type.
|
||||
|
||||
If ``exclude_fulltree`` is set, all source packages from parent variants
|
||||
@ -448,7 +499,9 @@ def _gather_variants(result, compose, variant_type, package_sets, exclude_fulltr
|
||||
for arch in variant.arches:
|
||||
fulltree_excludes = set()
|
||||
if exclude_fulltree:
|
||||
for pkg_name, pkg_arch in get_parent_pkgs(arch, variant, result)["srpm"]:
|
||||
for pkg_name, pkg_arch in get_parent_pkgs(arch, variant, result)[
|
||||
"srpm"
|
||||
]:
|
||||
fulltree_excludes.add(pkg_name)
|
||||
|
||||
# Get lookaside repos for this variant from other variants. Based
|
||||
@ -467,7 +520,7 @@ def _gather_variants(result, compose, variant_type, package_sets, exclude_fulltr
|
||||
t = threading.Thread(
|
||||
target=worker,
|
||||
args=(que, errors, arch, compose, arch, variant, package_sets),
|
||||
kwargs={'fulltree_excludes': fulltree_excludes},
|
||||
kwargs={"fulltree_excludes": fulltree_excludes},
|
||||
)
|
||||
threads_list.append(t)
|
||||
t.start()
|
||||
@ -487,7 +540,9 @@ def _gather_variants(result, compose, variant_type, package_sets, exclude_fulltr
|
||||
variant.nsvc_to_pkgset = None
|
||||
|
||||
|
||||
def _trim_variants(result, compose, variant_type, remove_pkgs=None, move_to_parent=True):
|
||||
def _trim_variants(
|
||||
result, compose, variant_type, remove_pkgs=None, move_to_parent=True
|
||||
):
|
||||
"""Trim all varians of given type.
|
||||
|
||||
Returns a map of all packages included in these variants.
|
||||
@ -498,7 +553,8 @@ def _trim_variants(result, compose, variant_type, remove_pkgs=None, move_to_pare
|
||||
pkg_map = result[arch][variant.uid]
|
||||
parent_pkgs = get_parent_pkgs(arch, variant, result)
|
||||
included_packages, move_to_parent_pkgs, removed_pkgs = trim_packages(
|
||||
compose, arch, variant, pkg_map, parent_pkgs, remove_pkgs=remove_pkgs)
|
||||
compose, arch, variant, pkg_map, parent_pkgs, remove_pkgs=remove_pkgs
|
||||
)
|
||||
|
||||
# update all_addon_pkgs
|
||||
for pkg_type, pkgs in included_packages.items():
|
||||
@ -509,8 +565,15 @@ def _trim_variants(result, compose, variant_type, remove_pkgs=None, move_to_pare
|
||||
parent_pkg_map = result[arch][variant.parent.uid]
|
||||
for pkg_type, pkgs in move_to_parent_pkgs.items():
|
||||
for pkg in pkgs:
|
||||
compose.log_debug("Moving package to parent (arch: %s, variant: %s, pkg_type: %s): %s"
|
||||
% (arch, variant.uid, pkg_type, os.path.basename(pkg["path"])))
|
||||
compose.log_debug(
|
||||
"Moving package to parent (arch: %s, variant: %s, pkg_type: %s): %s"
|
||||
% (
|
||||
arch,
|
||||
variant.uid,
|
||||
pkg_type,
|
||||
os.path.basename(pkg["path"]),
|
||||
)
|
||||
)
|
||||
if pkg not in parent_pkg_map[pkg_type]:
|
||||
parent_pkg_map[pkg_type].append(pkg)
|
||||
return all_included_packages
|
||||
@ -519,20 +582,28 @@ def _trim_variants(result, compose, variant_type, remove_pkgs=None, move_to_pare
|
||||
def gather_wrapper(compose, package_sets, path_prefix):
|
||||
result = {}
|
||||
|
||||
_gather_variants(result, compose, 'variant', package_sets)
|
||||
_gather_variants(result, compose, 'addon', package_sets, exclude_fulltree=True)
|
||||
_gather_variants(result, compose, 'layered-product', package_sets, exclude_fulltree=True)
|
||||
_gather_variants(result, compose, 'optional', package_sets)
|
||||
_gather_variants(result, compose, "variant", package_sets)
|
||||
_gather_variants(result, compose, "addon", package_sets, exclude_fulltree=True)
|
||||
_gather_variants(
|
||||
result, compose, "layered-product", package_sets, exclude_fulltree=True
|
||||
)
|
||||
_gather_variants(result, compose, "optional", package_sets)
|
||||
|
||||
all_addon_pkgs = _trim_variants(result, compose, 'addon')
|
||||
all_addon_pkgs = _trim_variants(result, compose, "addon")
|
||||
# TODO do we really want to move packages to parent here?
|
||||
all_lp_pkgs = _trim_variants(result, compose, 'layered-product', remove_pkgs=all_addon_pkgs)
|
||||
all_lp_pkgs = _trim_variants(
|
||||
result, compose, "layered-product", remove_pkgs=all_addon_pkgs
|
||||
)
|
||||
|
||||
# merge all_addon_pkgs with all_lp_pkgs
|
||||
for pkg_type in set(all_addon_pkgs.keys()) | set(all_lp_pkgs.keys()):
|
||||
all_addon_pkgs.setdefault(pkg_type, set()).update(all_lp_pkgs.get(pkg_type, set()))
|
||||
all_addon_pkgs.setdefault(pkg_type, set()).update(
|
||||
all_lp_pkgs.get(pkg_type, set())
|
||||
)
|
||||
|
||||
_trim_variants(result, compose, 'optional', remove_pkgs=all_addon_pkgs, move_to_parent=False)
|
||||
_trim_variants(
|
||||
result, compose, "optional", remove_pkgs=all_addon_pkgs, move_to_parent=False
|
||||
)
|
||||
|
||||
# write packages (package lists) for all variants
|
||||
for arch in compose.get_arches():
|
||||
@ -549,17 +620,21 @@ def write_prepopulate_file(compose):
|
||||
It is stored in a location where ``get_prepopulate_packages`` function
|
||||
expects.
|
||||
"""
|
||||
if 'gather_prepopulate' not in compose.conf:
|
||||
if "gather_prepopulate" not in compose.conf:
|
||||
return
|
||||
|
||||
prepopulate_file = os.path.join(compose.paths.work.topdir(arch="global"), "prepopulate.json")
|
||||
prepopulate_file = os.path.join(
|
||||
compose.paths.work.topdir(arch="global"), "prepopulate.json"
|
||||
)
|
||||
msg = "Writing prepopulate file: %s" % prepopulate_file
|
||||
|
||||
scm_dict = compose.conf["gather_prepopulate"]
|
||||
if isinstance(scm_dict, dict):
|
||||
file_name = os.path.basename(scm_dict["file"])
|
||||
if scm_dict["scm"] == "file":
|
||||
scm_dict["file"] = os.path.join(compose.config_dir, os.path.basename(scm_dict["file"]))
|
||||
scm_dict["file"] = os.path.join(
|
||||
compose.config_dir, os.path.basename(scm_dict["file"])
|
||||
)
|
||||
else:
|
||||
file_name = os.path.basename(scm_dict)
|
||||
scm_dict = os.path.join(compose.config_dir, os.path.basename(scm_dict))
|
||||
@ -581,7 +656,9 @@ def get_prepopulate_packages(compose, arch, variant, include_arch=True):
|
||||
"""
|
||||
result = set()
|
||||
|
||||
prepopulate_file = os.path.join(compose.paths.work.topdir(arch="global"), "prepopulate.json")
|
||||
prepopulate_file = os.path.join(
|
||||
compose.paths.work.topdir(arch="global"), "prepopulate.json"
|
||||
)
|
||||
if not os.path.isfile(prepopulate_file):
|
||||
return result
|
||||
|
||||
@ -597,7 +674,8 @@ def get_prepopulate_packages(compose, arch, variant, include_arch=True):
|
||||
if pkg_arch not in get_compatible_arches(arch, multilib=True):
|
||||
raise ValueError(
|
||||
"Incompatible package arch '%s' for tree arch '%s' in prepopulate package '%s'"
|
||||
% (pkg_arch, arch, pkg_name))
|
||||
% (pkg_arch, arch, pkg_name)
|
||||
)
|
||||
if include_arch:
|
||||
result.add(i)
|
||||
else:
|
||||
@ -609,10 +687,13 @@ def get_additional_packages(compose, arch, variant):
|
||||
result = set()
|
||||
for i in get_arch_variant_data(compose.conf, "additional_packages", arch, variant):
|
||||
pkg_name, pkg_arch = split_name_arch(i)
|
||||
if pkg_arch is not None and pkg_arch not in get_compatible_arches(arch, multilib=True):
|
||||
if pkg_arch is not None and pkg_arch not in get_compatible_arches(
|
||||
arch, multilib=True
|
||||
):
|
||||
raise ValueError(
|
||||
"Incompatible package arch '%s' for tree arch '%s' in additional package '%s'"
|
||||
% (pkg_arch, arch, pkg_name))
|
||||
% (pkg_arch, arch, pkg_name)
|
||||
)
|
||||
result.add((pkg_name, pkg_arch))
|
||||
return result
|
||||
|
||||
@ -669,23 +750,28 @@ def get_variant_packages(compose, arch, variant, source_name, package_sets=None)
|
||||
packages |= get_additional_packages(compose, arch, variant)
|
||||
filter_packages |= get_filter_packages(compose, arch, variant)
|
||||
|
||||
if compose.conf['filter_system_release_packages']:
|
||||
system_release_packages, system_release_filter_packages = get_system_release_packages(
|
||||
compose, arch, variant, package_sets)
|
||||
if compose.conf["filter_system_release_packages"]:
|
||||
(
|
||||
system_release_packages,
|
||||
system_release_filter_packages,
|
||||
) = get_system_release_packages(compose, arch, variant, package_sets)
|
||||
packages |= system_release_packages
|
||||
filter_packages |= system_release_filter_packages
|
||||
|
||||
if variant.type == "optional":
|
||||
for var in variant.parent.get_variants(
|
||||
arch=arch, types=["self", "variant", "addon", "layered-product"]):
|
||||
arch=arch, types=["self", "variant", "addon", "layered-product"]
|
||||
):
|
||||
var_packages, var_groups, _ = get_variant_packages(
|
||||
compose, arch, var, source_name, package_sets=package_sets)
|
||||
compose, arch, var, source_name, package_sets=package_sets
|
||||
)
|
||||
packages |= var_packages
|
||||
groups |= var_groups
|
||||
|
||||
if variant.type in ["addon", "layered-product"]:
|
||||
var_packages, var_groups, _ = get_variant_packages(
|
||||
compose, arch, variant.parent, source_name, package_sets=package_sets)
|
||||
compose, arch, variant.parent, source_name, package_sets=package_sets
|
||||
)
|
||||
packages |= var_packages
|
||||
groups |= var_groups
|
||||
|
||||
@ -714,12 +800,16 @@ def get_system_release_packages(compose, arch, variant, package_sets):
|
||||
# search for best match
|
||||
best_match = None
|
||||
for pkg in system_release_packages:
|
||||
if pkg.name.endswith("release-%s" % variant.uid.lower()) or pkg.name.startswith("%s-release" % variant.uid.lower()):
|
||||
if pkg.name.endswith(
|
||||
"release-%s" % variant.uid.lower()
|
||||
) or pkg.name.startswith("%s-release" % variant.uid.lower()):
|
||||
best_match = pkg
|
||||
break
|
||||
else:
|
||||
# addons: return release packages from parent variant
|
||||
return get_system_release_packages(compose, arch, variant.parent, package_sets)
|
||||
return get_system_release_packages(
|
||||
compose, arch, variant.parent, package_sets
|
||||
)
|
||||
|
||||
if not best_match:
|
||||
# no package matches variant name -> pick the first one
|
||||
@ -734,8 +824,9 @@ def get_system_release_packages(compose, arch, variant, package_sets):
|
||||
return packages, filter_packages
|
||||
|
||||
|
||||
def get_packages_to_gather(compose, arch=None, variant=None, include_arch=True,
|
||||
include_prepopulated=False):
|
||||
def get_packages_to_gather(
|
||||
compose, arch=None, variant=None, include_arch=True, include_prepopulated=False
|
||||
):
|
||||
"""
|
||||
Returns the list of names of packages and list of names of groups which
|
||||
would be included in a compose as GATHER phase result.
|
||||
@ -771,7 +862,8 @@ def get_packages_to_gather(compose, arch=None, variant=None, include_arch=True,
|
||||
|
||||
if include_prepopulated:
|
||||
prepopulated = get_prepopulate_packages(
|
||||
compose, arch, variant, include_arch)
|
||||
compose, arch, variant, include_arch
|
||||
)
|
||||
packages = packages.union(prepopulated)
|
||||
|
||||
return list(packages), list(groups)
|
||||
|
@ -25,6 +25,7 @@ from pungi.linker import LinkerPool
|
||||
# DONE: show overall progress, not each file
|
||||
# TODO: (these should be logged separately)
|
||||
|
||||
|
||||
def _get_src_nevra(compose, pkg_obj, srpm_map):
|
||||
"""Return source N-E:V-R.A.rpm; guess if necessary."""
|
||||
result = srpm_map.get(pkg_obj.sourcerpm, None)
|
||||
@ -32,7 +33,10 @@ def _get_src_nevra(compose, pkg_obj, srpm_map):
|
||||
nvra = kobo.rpmlib.parse_nvra(pkg_obj.sourcerpm)
|
||||
nvra["epoch"] = pkg_obj.epoch
|
||||
result = kobo.rpmlib.make_nvra(nvra, add_rpm=True, force_epoch=True)
|
||||
compose.log_warning("Package %s has no SRPM available, guessing epoch: %s" % (pkg_obj.nevra, result))
|
||||
compose.log_warning(
|
||||
"Package %s has no SRPM available, guessing epoch: %s"
|
||||
% (pkg_obj.nevra, result)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@ -77,8 +81,14 @@ def link_files(compose, arch, variant, pkg_map, pkg_sets, manifest, srpm_map={})
|
||||
for pkg in pkg_map["srpm"]:
|
||||
if "lookaside" in pkg["flags"]:
|
||||
continue
|
||||
dst = os.path.join(packages_dir, get_package_path(os.path.basename(pkg["path"]), hashed_directories))
|
||||
dst_relpath = os.path.join(packages_dir_relpath, get_package_path(os.path.basename(pkg["path"]), hashed_directories))
|
||||
dst = os.path.join(
|
||||
packages_dir,
|
||||
get_package_path(os.path.basename(pkg["path"]), hashed_directories),
|
||||
)
|
||||
dst_relpath = os.path.join(
|
||||
packages_dir_relpath,
|
||||
get_package_path(os.path.basename(pkg["path"]), hashed_directories),
|
||||
)
|
||||
|
||||
# link file
|
||||
pool.queue_put((pkg["path"], dst))
|
||||
@ -86,7 +96,14 @@ def link_files(compose, arch, variant, pkg_map, pkg_sets, manifest, srpm_map={})
|
||||
# update rpm manifest
|
||||
pkg_obj = _find_by_path(pkg_sets, arch, pkg["path"])
|
||||
nevra = pkg_obj.nevra
|
||||
manifest.add(variant.uid, arch, nevra, path=dst_relpath, sigkey=pkg_obj.signature, category="source")
|
||||
manifest.add(
|
||||
variant.uid,
|
||||
arch,
|
||||
nevra,
|
||||
path=dst_relpath,
|
||||
sigkey=pkg_obj.signature,
|
||||
category="source",
|
||||
)
|
||||
|
||||
# update srpm_map
|
||||
srpm_map.setdefault(pkg_obj.file_name, nevra)
|
||||
@ -96,8 +113,14 @@ def link_files(compose, arch, variant, pkg_map, pkg_sets, manifest, srpm_map={})
|
||||
for pkg in pkg_map["rpm"]:
|
||||
if "lookaside" in pkg["flags"]:
|
||||
continue
|
||||
dst = os.path.join(packages_dir, get_package_path(os.path.basename(pkg["path"]), hashed_directories))
|
||||
dst_relpath = os.path.join(packages_dir_relpath, get_package_path(os.path.basename(pkg["path"]), hashed_directories))
|
||||
dst = os.path.join(
|
||||
packages_dir,
|
||||
get_package_path(os.path.basename(pkg["path"]), hashed_directories),
|
||||
)
|
||||
dst_relpath = os.path.join(
|
||||
packages_dir_relpath,
|
||||
get_package_path(os.path.basename(pkg["path"]), hashed_directories),
|
||||
)
|
||||
|
||||
# link file
|
||||
pool.queue_put((pkg["path"], dst))
|
||||
@ -106,15 +129,31 @@ def link_files(compose, arch, variant, pkg_map, pkg_sets, manifest, srpm_map={})
|
||||
pkg_obj = _find_by_path(pkg_sets, arch, pkg["path"])
|
||||
nevra = pkg_obj.nevra
|
||||
src_nevra = _get_src_nevra(compose, pkg_obj, srpm_map)
|
||||
manifest.add(variant.uid, arch, nevra, path=dst_relpath, sigkey=pkg_obj.signature, category="binary", srpm_nevra=src_nevra)
|
||||
manifest.add(
|
||||
variant.uid,
|
||||
arch,
|
||||
nevra,
|
||||
path=dst_relpath,
|
||||
sigkey=pkg_obj.signature,
|
||||
category="binary",
|
||||
srpm_nevra=src_nevra,
|
||||
)
|
||||
|
||||
packages_dir = compose.paths.compose.debug_packages(arch, variant)
|
||||
packages_dir_relpath = compose.paths.compose.debug_packages(arch, variant, relative=True)
|
||||
packages_dir_relpath = compose.paths.compose.debug_packages(
|
||||
arch, variant, relative=True
|
||||
)
|
||||
for pkg in pkg_map["debuginfo"]:
|
||||
if "lookaside" in pkg["flags"]:
|
||||
continue
|
||||
dst = os.path.join(packages_dir, get_package_path(os.path.basename(pkg["path"]), hashed_directories))
|
||||
dst_relpath = os.path.join(packages_dir_relpath, get_package_path(os.path.basename(pkg["path"]), hashed_directories))
|
||||
dst = os.path.join(
|
||||
packages_dir,
|
||||
get_package_path(os.path.basename(pkg["path"]), hashed_directories),
|
||||
)
|
||||
dst_relpath = os.path.join(
|
||||
packages_dir_relpath,
|
||||
get_package_path(os.path.basename(pkg["path"]), hashed_directories),
|
||||
)
|
||||
|
||||
# link file
|
||||
pool.queue_put((pkg["path"], dst))
|
||||
@ -123,7 +162,15 @@ def link_files(compose, arch, variant, pkg_map, pkg_sets, manifest, srpm_map={})
|
||||
pkg_obj = _find_by_path(pkg_sets, arch, pkg["path"])
|
||||
nevra = pkg_obj.nevra
|
||||
src_nevra = _get_src_nevra(compose, pkg_obj, srpm_map)
|
||||
manifest.add(variant.uid, arch, nevra, path=dst_relpath, sigkey=pkg_obj.signature, category="debug", srpm_nevra=src_nevra)
|
||||
manifest.add(
|
||||
variant.uid,
|
||||
arch,
|
||||
nevra,
|
||||
path=dst_relpath,
|
||||
sigkey=pkg_obj.signature,
|
||||
category="debug",
|
||||
srpm_nevra=src_nevra,
|
||||
)
|
||||
|
||||
pool.start()
|
||||
pool.stop()
|
||||
|
@ -18,7 +18,6 @@ import kobo.plugins
|
||||
|
||||
|
||||
class GatherMethodBase(kobo.plugins.Plugin):
|
||||
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
|
||||
|
@ -32,18 +32,43 @@ import pungi.phases.gather.method
|
||||
class GatherMethodDeps(pungi.phases.gather.method.GatherMethodBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant, packages, groups, filter_packages, multilib_whitelist, multilib_blacklist, package_sets, path_prefix=None, fulltree_excludes=None, prepopulate=None):
|
||||
def __call__(
|
||||
self,
|
||||
arch,
|
||||
variant,
|
||||
packages,
|
||||
groups,
|
||||
filter_packages,
|
||||
multilib_whitelist,
|
||||
multilib_blacklist,
|
||||
package_sets,
|
||||
path_prefix=None,
|
||||
fulltree_excludes=None,
|
||||
prepopulate=None,
|
||||
):
|
||||
# result = {
|
||||
# "rpm": [],
|
||||
# "srpm": [],
|
||||
# "debuginfo": [],
|
||||
# }
|
||||
|
||||
write_pungi_config(self.compose, arch, variant, packages, groups, filter_packages,
|
||||
multilib_whitelist, multilib_blacklist,
|
||||
fulltree_excludes=fulltree_excludes, prepopulate=prepopulate,
|
||||
source_name=self.source_name, package_sets=package_sets)
|
||||
result, missing_deps = resolve_deps(self.compose, arch, variant, source_name=self.source_name)
|
||||
write_pungi_config(
|
||||
self.compose,
|
||||
arch,
|
||||
variant,
|
||||
packages,
|
||||
groups,
|
||||
filter_packages,
|
||||
multilib_whitelist,
|
||||
multilib_blacklist,
|
||||
fulltree_excludes=fulltree_excludes,
|
||||
prepopulate=prepopulate,
|
||||
source_name=self.source_name,
|
||||
package_sets=package_sets,
|
||||
)
|
||||
result, missing_deps = resolve_deps(
|
||||
self.compose, arch, variant, source_name=self.source_name
|
||||
)
|
||||
raise_on_invalid_sigkeys(arch, variant, package_sets, result)
|
||||
check_deps(self.compose, arch, variant, missing_deps)
|
||||
return result
|
||||
@ -83,12 +108,25 @@ def _format_packages(pkgs):
|
||||
return sorted(result)
|
||||
|
||||
|
||||
def write_pungi_config(compose, arch, variant, packages, groups, filter_packages,
|
||||
multilib_whitelist, multilib_blacklist, fulltree_excludes=None,
|
||||
prepopulate=None, source_name=None, package_sets=None):
|
||||
def write_pungi_config(
|
||||
compose,
|
||||
arch,
|
||||
variant,
|
||||
packages,
|
||||
groups,
|
||||
filter_packages,
|
||||
multilib_whitelist,
|
||||
multilib_blacklist,
|
||||
fulltree_excludes=None,
|
||||
prepopulate=None,
|
||||
source_name=None,
|
||||
package_sets=None,
|
||||
):
|
||||
"""write pungi config (kickstart) for arch/variant"""
|
||||
pungi_wrapper = PungiWrapper()
|
||||
pungi_cfg = compose.paths.work.pungi_conf(variant=variant, arch=arch, source_name=source_name)
|
||||
pungi_cfg = compose.paths.work.pungi_conf(
|
||||
variant=variant, arch=arch, source_name=source_name
|
||||
)
|
||||
|
||||
compose.log_info(
|
||||
"Writing pungi config (arch: %s, variant: %s): %s", arch, variant, pungi_cfg
|
||||
@ -102,13 +140,20 @@ def write_pungi_config(compose, arch, variant, packages, groups, filter_packages
|
||||
repos["comps-repo"] = compose.paths.work.comps_repo(arch=arch, variant=variant)
|
||||
if variant.type == "optional":
|
||||
for var in variant.parent.get_variants(
|
||||
arch=arch, types=["self", "variant", "addon", "layered-product"]):
|
||||
repos['%s-comps' % var.uid] = compose.paths.work.comps_repo(arch=arch, variant=var)
|
||||
arch=arch, types=["self", "variant", "addon", "layered-product"]
|
||||
):
|
||||
repos["%s-comps" % var.uid] = compose.paths.work.comps_repo(
|
||||
arch=arch, variant=var
|
||||
)
|
||||
if variant.type in ["addon", "layered-product"]:
|
||||
repos['parent-comps'] = compose.paths.work.comps_repo(arch=arch, variant=variant.parent)
|
||||
repos["parent-comps"] = compose.paths.work.comps_repo(
|
||||
arch=arch, variant=variant.parent
|
||||
)
|
||||
|
||||
lookaside_repos = {}
|
||||
for i, repo_url in enumerate(pungi.phases.gather.get_lookaside_repos(compose, arch, variant)):
|
||||
for i, repo_url in enumerate(
|
||||
pungi.phases.gather.get_lookaside_repos(compose, arch, variant)
|
||||
):
|
||||
lookaside_repos["lookaside-repo-%s" % i] = repo_url
|
||||
|
||||
packages_str = list(_format_packages(packages))
|
||||
@ -116,15 +161,22 @@ def write_pungi_config(compose, arch, variant, packages, groups, filter_packages
|
||||
|
||||
if not groups and not packages_str and not prepopulate:
|
||||
raise RuntimeError(
|
||||
'No packages included in %s.%s (no comps groups, no input packages, no prepopulate)'
|
||||
% (variant.uid, arch))
|
||||
"No packages included in %s.%s (no comps groups, no input packages, no prepopulate)"
|
||||
% (variant.uid, arch)
|
||||
)
|
||||
|
||||
pungi_wrapper.write_kickstart(
|
||||
ks_path=pungi_cfg, repos=repos, groups=groups, packages=packages_str,
|
||||
ks_path=pungi_cfg,
|
||||
repos=repos,
|
||||
groups=groups,
|
||||
packages=packages_str,
|
||||
exclude_packages=filter_packages_str,
|
||||
lookaside_repos=lookaside_repos, fulltree_excludes=fulltree_excludes,
|
||||
multilib_whitelist=multilib_whitelist, multilib_blacklist=multilib_blacklist,
|
||||
prepopulate=prepopulate)
|
||||
lookaside_repos=lookaside_repos,
|
||||
fulltree_excludes=fulltree_excludes,
|
||||
multilib_whitelist=multilib_whitelist,
|
||||
multilib_blacklist=multilib_blacklist,
|
||||
prepopulate=prepopulate,
|
||||
)
|
||||
|
||||
|
||||
def resolve_deps(compose, arch, variant, source_name=None):
|
||||
@ -136,7 +188,7 @@ def resolve_deps(compose, arch, variant, source_name=None):
|
||||
compose.log_info("[BEGIN] %s" % msg)
|
||||
pungi_conf = compose.paths.work.pungi_conf(arch, variant, source_name=source_name)
|
||||
|
||||
multilib_methods = get_arch_variant_data(compose.conf, 'multilib', arch, variant)
|
||||
multilib_methods = get_arch_variant_data(compose.conf, "multilib", arch, variant)
|
||||
|
||||
greedy_method = compose.conf["greedy_method"]
|
||||
|
||||
@ -159,7 +211,9 @@ def resolve_deps(compose, arch, variant, source_name=None):
|
||||
selfhosting = False
|
||||
|
||||
lookaside_repos = {}
|
||||
for i, repo_url in enumerate(pungi.phases.gather.get_lookaside_repos(compose, arch, variant)):
|
||||
for i, repo_url in enumerate(
|
||||
pungi.phases.gather.get_lookaside_repos(compose, arch, variant)
|
||||
):
|
||||
lookaside_repos["lookaside-repo-%s" % i] = repo_url
|
||||
|
||||
yum_arch = tree_arch_to_yum_arch(arch)
|
||||
@ -167,28 +221,40 @@ def resolve_deps(compose, arch, variant, source_name=None):
|
||||
cache_dir = compose.paths.work.pungi_cache_dir(arch, variant)
|
||||
# TODO: remove YUM code, fully migrate to DNF
|
||||
backends = {
|
||||
'yum': pungi_wrapper.get_pungi_cmd,
|
||||
'dnf': pungi_wrapper.get_pungi_cmd_dnf,
|
||||
"yum": pungi_wrapper.get_pungi_cmd,
|
||||
"dnf": pungi_wrapper.get_pungi_cmd_dnf,
|
||||
}
|
||||
get_cmd = backends[compose.conf['gather_backend']]
|
||||
cmd = get_cmd(pungi_conf, destdir=tmp_dir, name=variant.uid,
|
||||
selfhosting=selfhosting, fulltree=fulltree, arch=yum_arch,
|
||||
full_archlist=True, greedy=greedy_method, cache_dir=cache_dir,
|
||||
lookaside_repos=lookaside_repos, multilib_methods=multilib_methods,
|
||||
profiler=profiler)
|
||||
get_cmd = backends[compose.conf["gather_backend"]]
|
||||
cmd = get_cmd(
|
||||
pungi_conf,
|
||||
destdir=tmp_dir,
|
||||
name=variant.uid,
|
||||
selfhosting=selfhosting,
|
||||
fulltree=fulltree,
|
||||
arch=yum_arch,
|
||||
full_archlist=True,
|
||||
greedy=greedy_method,
|
||||
cache_dir=cache_dir,
|
||||
lookaside_repos=lookaside_repos,
|
||||
multilib_methods=multilib_methods,
|
||||
profiler=profiler,
|
||||
)
|
||||
# Use temp working directory directory as workaround for
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=795137
|
||||
with temp_dir(prefix='pungi_') as tmp_dir:
|
||||
with temp_dir(prefix="pungi_") as tmp_dir:
|
||||
run(cmd, logfile=pungi_log, show_cmd=True, workdir=tmp_dir, env=os.environ)
|
||||
|
||||
with open(pungi_log, "r") as f:
|
||||
packages, broken_deps, missing_comps_pkgs = pungi_wrapper.parse_log(f)
|
||||
|
||||
if missing_comps_pkgs:
|
||||
log_msg = ("Packages mentioned in comps do not exist for %s.%s: %s"
|
||||
% (variant.uid, arch, ", ".join(sorted(missing_comps_pkgs))))
|
||||
log_msg = "Packages mentioned in comps do not exist for %s.%s: %s" % (
|
||||
variant.uid,
|
||||
arch,
|
||||
", ".join(sorted(missing_comps_pkgs)),
|
||||
)
|
||||
compose.log_warning(log_msg)
|
||||
if compose.conf['require_all_comps_packages']:
|
||||
if compose.conf["require_all_comps_packages"]:
|
||||
raise RuntimeError(log_msg)
|
||||
|
||||
compose.log_info("[DONE ] %s" % msg)
|
||||
@ -202,5 +268,7 @@ def check_deps(compose, arch, variant, missing_deps):
|
||||
if missing_deps:
|
||||
for pkg in sorted(missing_deps):
|
||||
compose.log_error(
|
||||
"Unresolved dependencies for %s.%s in package %s: %s" % (variant, arch, pkg, sorted(missing_deps[pkg])))
|
||||
"Unresolved dependencies for %s.%s in package %s: %s"
|
||||
% (variant, arch, pkg, sorted(missing_deps[pkg]))
|
||||
)
|
||||
raise RuntimeError("Unresolved dependencies detected")
|
||||
|
@ -268,10 +268,14 @@ class GatherMethodHybrid(pungi.phases.gather.method.GatherMethodBase):
|
||||
env = os.environ.copy()
|
||||
env["G_MESSAGES_PREFIXED"] = ""
|
||||
env["XDG_CACHE_HOME"] = cache_dir
|
||||
self.compose.log_debug("[BEGIN] Running fus (arch: %s, variant: %s)" % (arch, variant))
|
||||
self.compose.log_debug(
|
||||
"[BEGIN] Running fus (arch: %s, variant: %s)" % (arch, variant)
|
||||
)
|
||||
run(cmd, logfile=logfile, show_cmd=True, env=env)
|
||||
output, out_modules = fus.parse_output(logfile)
|
||||
self.compose.log_debug("[DONE ] Running fus (arch: %s, variant: %s)" % (arch, variant))
|
||||
self.compose.log_debug(
|
||||
"[DONE ] Running fus (arch: %s, variant: %s)" % (arch, variant)
|
||||
)
|
||||
# No need to resolve modules again. They are not going to change.
|
||||
modules = []
|
||||
# Reset input packages as well to only solve newly added things.
|
||||
@ -397,7 +401,11 @@ class GatherMethodHybrid(pungi.phases.gather.method.GatherMethodBase):
|
||||
continue
|
||||
|
||||
strict_nevra = "%s-%s:%s-%s.%s" % (
|
||||
pkg.name, pkg.epoch or "0", pkg.version, pkg.release, pkg.arch
|
||||
pkg.name,
|
||||
pkg.epoch or "0",
|
||||
pkg.version,
|
||||
pkg.release,
|
||||
pkg.arch,
|
||||
)
|
||||
if strict_nevra in self.modular_packages:
|
||||
# Wildcards should not match modular packages.
|
||||
|
@ -30,16 +30,28 @@ class GatherMethodNodeps(pungi.phases.gather.method.GatherMethodBase):
|
||||
enabled = True
|
||||
|
||||
def __call__(self, arch, variant, *args, **kwargs):
|
||||
fname = 'gather-nodeps-%s' % variant.uid
|
||||
fname = "gather-nodeps-%s" % variant.uid
|
||||
if self.source_name:
|
||||
fname += '-' + self.source_name
|
||||
fname += "-" + self.source_name
|
||||
log_file = self.compose.paths.log.log_file(arch, fname)
|
||||
with open(log_file, 'w') as log:
|
||||
with open(log_file, "w") as log:
|
||||
return self.worker(log, arch, variant, *args, **kwargs)
|
||||
|
||||
def worker(self, log, arch, variant, pkgs, groups, filter_packages,
|
||||
multilib_whitelist, multilib_blacklist, package_sets,
|
||||
path_prefix=None, fulltree_excludes=None, prepopulate=None):
|
||||
def worker(
|
||||
self,
|
||||
log,
|
||||
arch,
|
||||
variant,
|
||||
pkgs,
|
||||
groups,
|
||||
filter_packages,
|
||||
multilib_whitelist,
|
||||
multilib_blacklist,
|
||||
package_sets,
|
||||
path_prefix=None,
|
||||
fulltree_excludes=None,
|
||||
prepopulate=None,
|
||||
):
|
||||
result = {
|
||||
"rpm": [],
|
||||
"srpm": [],
|
||||
@ -48,7 +60,7 @@ class GatherMethodNodeps(pungi.phases.gather.method.GatherMethodBase):
|
||||
|
||||
group_packages = expand_groups(self.compose, arch, variant, groups)
|
||||
packages = pkgs | group_packages
|
||||
log.write('Requested packages:\n%s\n' % pformat(packages))
|
||||
log.write("Requested packages:\n%s\n" % pformat(packages))
|
||||
|
||||
seen_rpms = {}
|
||||
seen_srpms = {}
|
||||
@ -58,59 +70,65 @@ class GatherMethodNodeps(pungi.phases.gather.method.GatherMethodBase):
|
||||
for i in valid_arches:
|
||||
compatible_arches[i] = pungi.arch.get_compatible_arches(i)
|
||||
|
||||
log.write('\nGathering rpms\n')
|
||||
log.write("\nGathering rpms\n")
|
||||
for pkg in iterate_packages(package_sets, arch):
|
||||
if not pkg_is_rpm(pkg):
|
||||
continue
|
||||
for gathered_pkg, pkg_arch in packages:
|
||||
if isinstance(gathered_pkg, six.string_types) and not fnmatch(pkg.name, gathered_pkg):
|
||||
if isinstance(gathered_pkg, six.string_types) and not fnmatch(
|
||||
pkg.name, gathered_pkg
|
||||
):
|
||||
continue
|
||||
elif (type(gathered_pkg) in [SimpleRpmWrapper, RpmWrapper]
|
||||
and pkg.nevra != gathered_pkg.nevra):
|
||||
elif (
|
||||
type(gathered_pkg) in [SimpleRpmWrapper, RpmWrapper]
|
||||
and pkg.nevra != gathered_pkg.nevra
|
||||
):
|
||||
continue
|
||||
if pkg_arch is not None and pkg.arch != pkg_arch and pkg.arch != 'noarch':
|
||||
if (
|
||||
pkg_arch is not None
|
||||
and pkg.arch != pkg_arch
|
||||
and pkg.arch != "noarch"
|
||||
):
|
||||
continue
|
||||
result["rpm"].append({
|
||||
"path": pkg.file_path,
|
||||
"flags": ["input"],
|
||||
})
|
||||
result["rpm"].append(
|
||||
{"path": pkg.file_path, "flags": ["input"]}
|
||||
)
|
||||
seen_rpms.setdefault(pkg.name, set()).add(pkg.arch)
|
||||
seen_srpms.setdefault(pkg.sourcerpm, set()).add(pkg.arch)
|
||||
log.write('Added %s (matched %s.%s) (sourcerpm: %s)\n'
|
||||
% (pkg, gathered_pkg, pkg_arch, pkg.sourcerpm))
|
||||
log.write(
|
||||
"Added %s (matched %s.%s) (sourcerpm: %s)\n"
|
||||
% (pkg, gathered_pkg, pkg_arch, pkg.sourcerpm)
|
||||
)
|
||||
|
||||
log.write('\nGathering source rpms\n')
|
||||
log.write("\nGathering source rpms\n")
|
||||
for pkg in iterate_packages(package_sets, arch):
|
||||
if not pkg_is_srpm(pkg):
|
||||
continue
|
||||
if pkg.file_name in seen_srpms:
|
||||
result["srpm"].append({
|
||||
"path": pkg.file_path,
|
||||
"flags": ["input"],
|
||||
})
|
||||
log.write('Adding %s\n' % pkg)
|
||||
result["srpm"].append(
|
||||
{"path": pkg.file_path, "flags": ["input"]}
|
||||
)
|
||||
log.write("Adding %s\n" % pkg)
|
||||
|
||||
log.write('\nGathering debuginfo packages\n')
|
||||
log.write("\nGathering debuginfo packages\n")
|
||||
for pkg in iterate_packages(package_sets, arch):
|
||||
if not pkg_is_debug(pkg):
|
||||
continue
|
||||
if pkg.sourcerpm not in seen_srpms:
|
||||
log.write('Not considering %s: corresponding srpm not included\n' % pkg)
|
||||
log.write("Not considering %s: corresponding srpm not included\n" % pkg)
|
||||
continue
|
||||
pkg_arches = set(compatible_arches[pkg.arch]) - set(['noarch'])
|
||||
seen_arches = set(seen_srpms[pkg.sourcerpm]) - set(['noarch'])
|
||||
pkg_arches = set(compatible_arches[pkg.arch]) - set(["noarch"])
|
||||
seen_arches = set(seen_srpms[pkg.sourcerpm]) - set(["noarch"])
|
||||
if not (pkg_arches & seen_arches):
|
||||
# We only want to pull in a debuginfo if we have a binary
|
||||
# package for a compatible arch. Noarch packages should not
|
||||
# pull debuginfo (they would pull in all architectures).
|
||||
log.write('Not including %s: no package for this arch\n'
|
||||
% pkg)
|
||||
log.write("Not including %s: no package for this arch\n" % pkg)
|
||||
continue
|
||||
result["debuginfo"].append({
|
||||
"path": pkg.file_path,
|
||||
"flags": ["input"],
|
||||
})
|
||||
log.write('Adding %s\n' % pkg)
|
||||
result["debuginfo"].append(
|
||||
{"path": pkg.file_path, "flags": ["input"]}
|
||||
)
|
||||
log.write("Adding %s\n" % pkg)
|
||||
|
||||
return result
|
||||
|
||||
@ -130,10 +148,12 @@ def expand_groups(compose, arch, variant, groups, set_pkg_arch=True):
|
||||
comps.append(CompsWrapper(comps_file))
|
||||
|
||||
if variant and variant.parent:
|
||||
parent_comps_file = compose.paths.work.comps(arch, variant.parent, create_dir=False)
|
||||
parent_comps_file = compose.paths.work.comps(
|
||||
arch, variant.parent, create_dir=False
|
||||
)
|
||||
comps.append(CompsWrapper(parent_comps_file))
|
||||
|
||||
if variant.type == 'optional':
|
||||
if variant.type == "optional":
|
||||
for v in variant.parent.variants.values():
|
||||
if v.id == variant.id:
|
||||
continue
|
||||
|
@ -18,7 +18,6 @@ import kobo.plugins
|
||||
|
||||
|
||||
class GatherSourceBase(kobo.plugins.Plugin):
|
||||
|
||||
def __init__(self, compose):
|
||||
self.compose = compose
|
||||
|
||||
|
@ -34,7 +34,7 @@ class GatherSourceComps(pungi.phases.gather.source.GatherSourceBase):
|
||||
|
||||
def __call__(self, arch, variant):
|
||||
groups = set()
|
||||
if not self.compose.conf.get('comps_file'):
|
||||
if not self.compose.conf.get("comps_file"):
|
||||
return set(), set()
|
||||
|
||||
comps = CompsWrapper(self.compose.paths.work.comps(arch=arch, variant=variant))
|
||||
|
@ -20,27 +20,30 @@ from productmd.images import Image
|
||||
# name will be ending with. The extensions are used to filter out which task
|
||||
# results will be pulled into the compose.
|
||||
EXTENSIONS = {
|
||||
'docker': ['tar.gz', 'tar.xz'],
|
||||
'liveimg-squashfs': ['liveimg.squashfs'],
|
||||
'qcow': ['qcow'],
|
||||
'qcow2': ['qcow2'],
|
||||
'raw': ['raw'],
|
||||
'raw-xz': ['raw.xz'],
|
||||
'rhevm-ova': ['rhevm.ova'],
|
||||
'tar-gz': ['tar.gz'],
|
||||
'vagrant-hyperv': ['vagrant-hyperv.box'],
|
||||
'vagrant-libvirt': ['vagrant-libvirt.box'],
|
||||
'vagrant-virtualbox': ['vagrant-virtualbox.box'],
|
||||
'vagrant-vmware-fusion': ['vagrant-vmware-fusion.box'],
|
||||
'vdi': ['vdi'],
|
||||
'vmdk': ['vmdk'],
|
||||
'vpc': ['vhd'],
|
||||
'vsphere-ova': ['vsphere.ova'],
|
||||
"docker": ["tar.gz", "tar.xz"],
|
||||
"liveimg-squashfs": ["liveimg.squashfs"],
|
||||
"qcow": ["qcow"],
|
||||
"qcow2": ["qcow2"],
|
||||
"raw": ["raw"],
|
||||
"raw-xz": ["raw.xz"],
|
||||
"rhevm-ova": ["rhevm.ova"],
|
||||
"tar-gz": ["tar.gz"],
|
||||
"vagrant-hyperv": ["vagrant-hyperv.box"],
|
||||
"vagrant-libvirt": ["vagrant-libvirt.box"],
|
||||
"vagrant-virtualbox": ["vagrant-virtualbox.box"],
|
||||
"vagrant-vmware-fusion": ["vagrant-vmware-fusion.box"],
|
||||
"vdi": ["vdi"],
|
||||
"vmdk": ["vmdk"],
|
||||
"vpc": ["vhd"],
|
||||
"vsphere-ova": ["vsphere.ova"],
|
||||
}
|
||||
|
||||
|
||||
class ImageBuildPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase):
|
||||
class ImageBuildPhase(
|
||||
base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase
|
||||
):
|
||||
"""class for wrapping up koji image-build"""
|
||||
|
||||
name = "image_build"
|
||||
|
||||
def __init__(self, compose):
|
||||
@ -53,13 +56,13 @@ class ImageBuildPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigG
|
||||
current variant. If the config is set, it will be removed from the
|
||||
dict.
|
||||
"""
|
||||
if variant.type != 'variant':
|
||||
if variant.type != "variant":
|
||||
# Buildinstall only runs for top-level variants. Nested variants
|
||||
# need to re-use install tree from parent.
|
||||
variant = variant.parent
|
||||
|
||||
install_tree_from = image_conf.pop('install_tree_from', variant.uid)
|
||||
if '://' in install_tree_from:
|
||||
install_tree_from = image_conf.pop("install_tree_from", variant.uid)
|
||||
if "://" in install_tree_from:
|
||||
# It's a URL, return it unchanged
|
||||
return install_tree_from
|
||||
if install_tree_from.startswith("/"):
|
||||
@ -69,11 +72,14 @@ class ImageBuildPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigG
|
||||
install_tree_source = self.compose.all_variants.get(install_tree_from)
|
||||
if not install_tree_source:
|
||||
raise RuntimeError(
|
||||
'There is no variant %s to get install tree from when building image for %s.'
|
||||
% (install_tree_from, variant.uid))
|
||||
"There is no variant %s to get install tree from when building image for %s."
|
||||
% (install_tree_from, variant.uid)
|
||||
)
|
||||
return translate_path(
|
||||
self.compose,
|
||||
self.compose.paths.compose.os_tree('$arch', install_tree_source, create_dir=False)
|
||||
self.compose.paths.compose.os_tree(
|
||||
"$arch", install_tree_source, create_dir=False
|
||||
),
|
||||
)
|
||||
|
||||
def _get_repo(self, image_conf, variant):
|
||||
@ -82,27 +88,29 @@ class ImageBuildPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigG
|
||||
explicitly listed in config, followed by by repo for current variant
|
||||
if it's not included in the list already.
|
||||
"""
|
||||
repos = shortcuts.force_list(image_conf.get('repo', []))
|
||||
repos = shortcuts.force_list(image_conf.get("repo", []))
|
||||
|
||||
if not variant.is_empty and variant.uid not in repos:
|
||||
repos.append(variant.uid)
|
||||
|
||||
return ",".join(get_repo_urls(self.compose, repos, arch='$arch'))
|
||||
return ",".join(get_repo_urls(self.compose, repos, arch="$arch"))
|
||||
|
||||
def _get_arches(self, image_conf, arches):
|
||||
if 'arches' in image_conf['image-build']:
|
||||
arches = set(image_conf['image-build'].get('arches', [])) & arches
|
||||
if "arches" in image_conf["image-build"]:
|
||||
arches = set(image_conf["image-build"].get("arches", [])) & arches
|
||||
return sorted(arches)
|
||||
|
||||
def _set_release(self, image_conf):
|
||||
"""If release is set explicitly to None, replace it with date and respin."""
|
||||
if 'release' in image_conf:
|
||||
image_conf['release'] = (version_generator(self.compose, image_conf['release']) or
|
||||
self.compose.image_release)
|
||||
if "release" in image_conf:
|
||||
image_conf["release"] = (
|
||||
version_generator(self.compose, image_conf["release"])
|
||||
or self.compose.image_release
|
||||
)
|
||||
|
||||
def run(self):
|
||||
for variant in self.compose.get_variants():
|
||||
arches = set([x for x in variant.arches if x != 'src'])
|
||||
arches = set([x for x in variant.arches if x != "src"])
|
||||
|
||||
for image_conf in self.get_config_block(variant):
|
||||
# We will modify the data, so we need to make a copy to
|
||||
@ -112,54 +120,66 @@ class ImageBuildPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigG
|
||||
|
||||
# image_conf is passed to get_image_build_cmd as dict
|
||||
|
||||
image_conf["image-build"]['arches'] = self._get_arches(image_conf, arches)
|
||||
if not image_conf["image-build"]['arches']:
|
||||
image_conf["image-build"]["arches"] = self._get_arches(
|
||||
image_conf, arches
|
||||
)
|
||||
if not image_conf["image-build"]["arches"]:
|
||||
continue
|
||||
|
||||
# Replace possible ambiguous ref name with explicit hash.
|
||||
ksurl = self.get_ksurl(image_conf['image-build'])
|
||||
ksurl = self.get_ksurl(image_conf["image-build"])
|
||||
if ksurl:
|
||||
image_conf["image-build"]['ksurl'] = ksurl
|
||||
image_conf["image-build"]["ksurl"] = ksurl
|
||||
|
||||
image_conf["image-build"]["variant"] = variant
|
||||
|
||||
image_conf["image-build"]["install_tree"] = self._get_install_tree(image_conf['image-build'], variant)
|
||||
image_conf["image-build"]["install_tree"] = self._get_install_tree(
|
||||
image_conf["image-build"], variant
|
||||
)
|
||||
|
||||
release = self.get_release(image_conf['image-build'])
|
||||
release = self.get_release(image_conf["image-build"])
|
||||
if release:
|
||||
image_conf['image-build']['release'] = release
|
||||
image_conf["image-build"]["release"] = release
|
||||
|
||||
image_conf['image-build']['version'] = self.get_version(image_conf['image-build'])
|
||||
image_conf['image-build']['target'] = self.get_config(image_conf['image-build'], 'target')
|
||||
image_conf["image-build"]["version"] = self.get_version(
|
||||
image_conf["image-build"]
|
||||
)
|
||||
image_conf["image-build"]["target"] = self.get_config(
|
||||
image_conf["image-build"], "target"
|
||||
)
|
||||
|
||||
# Pungi config can either contain old [(format, suffix)], or
|
||||
# just list of formats, or a single format.
|
||||
formats = []
|
||||
for format in force_list(image_conf["image-build"]["format"]):
|
||||
formats.append(format[0] if isinstance(format, (tuple, list)) else format)
|
||||
formats.append(
|
||||
format[0] if isinstance(format, (tuple, list)) else format
|
||||
)
|
||||
image_conf["image-build"]["format"] = formats
|
||||
image_conf["image-build"]['repo'] = self._get_repo(image_conf['image-build'], variant)
|
||||
image_conf["image-build"]["repo"] = self._get_repo(
|
||||
image_conf["image-build"], variant
|
||||
)
|
||||
|
||||
can_fail = image_conf['image-build'].pop('failable', [])
|
||||
if can_fail == ['*']:
|
||||
can_fail = image_conf['image-build']['arches']
|
||||
can_fail = image_conf["image-build"].pop("failable", [])
|
||||
if can_fail == ["*"]:
|
||||
can_fail = image_conf["image-build"]["arches"]
|
||||
if can_fail:
|
||||
image_conf['image-build']['can_fail'] = sorted(can_fail)
|
||||
image_conf["image-build"]["can_fail"] = sorted(can_fail)
|
||||
|
||||
cmd = {
|
||||
"image_conf": image_conf,
|
||||
"conf_file": self.compose.paths.work.image_build_conf(
|
||||
image_conf["image-build"]['variant'],
|
||||
image_name=image_conf["image-build"]['name'],
|
||||
image_type='-'.join(formats),
|
||||
arches=image_conf["image-build"]['arches'],
|
||||
image_conf["image-build"]["variant"],
|
||||
image_name=image_conf["image-build"]["name"],
|
||||
image_type="-".join(formats),
|
||||
arches=image_conf["image-build"]["arches"],
|
||||
),
|
||||
"image_dir": self.compose.paths.compose.image_dir(variant),
|
||||
"relative_image_dir": self.compose.paths.compose.image_dir(
|
||||
variant, relative=True
|
||||
),
|
||||
"link_type": self.compose.conf["link_type"],
|
||||
"scratch": image_conf['image-build'].pop('scratch', False),
|
||||
"scratch": image_conf["image-build"].pop("scratch", False),
|
||||
}
|
||||
self.pool.add(CreateImageBuildThread(self.pool))
|
||||
self.pool.queue_put((self.compose, cmd))
|
||||
@ -175,33 +195,45 @@ class CreateImageBuildThread(WorkerThread):
|
||||
compose, cmd = item
|
||||
variant = cmd["image_conf"]["image-build"]["variant"]
|
||||
subvariant = cmd["image_conf"]["image-build"].get("subvariant", variant.uid)
|
||||
self.failable_arches = cmd["image_conf"]['image-build'].get('can_fail', '')
|
||||
self.can_fail = self.failable_arches == cmd['image_conf']['image-build']['arches']
|
||||
with failable(compose, self.can_fail, variant, '*', 'image-build', subvariant,
|
||||
logger=self.pool._logger):
|
||||
self.failable_arches = cmd["image_conf"]["image-build"].get("can_fail", "")
|
||||
self.can_fail = (
|
||||
self.failable_arches == cmd["image_conf"]["image-build"]["arches"]
|
||||
)
|
||||
with failable(
|
||||
compose,
|
||||
self.can_fail,
|
||||
variant,
|
||||
"*",
|
||||
"image-build",
|
||||
subvariant,
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(num, compose, variant, subvariant, cmd)
|
||||
|
||||
def worker(self, num, compose, variant, subvariant, cmd):
|
||||
arches = cmd["image_conf"]["image-build"]['arches']
|
||||
formats = '-'.join(cmd['image_conf']['image-build']['format'])
|
||||
dash_arches = '-'.join(arches)
|
||||
arches = cmd["image_conf"]["image-build"]["arches"]
|
||||
formats = "-".join(cmd["image_conf"]["image-build"]["format"])
|
||||
dash_arches = "-".join(arches)
|
||||
log_file = compose.paths.log.log_file(
|
||||
dash_arches,
|
||||
"imagebuild-%s-%s-%s" % (variant.uid, subvariant, formats)
|
||||
dash_arches, "imagebuild-%s-%s-%s" % (variant.uid, subvariant, formats)
|
||||
)
|
||||
msg = (
|
||||
"Creating image (formats: %s, arches: %s, variant: %s, subvariant: %s)"
|
||||
% (formats, dash_arches, variant, subvariant)
|
||||
)
|
||||
msg = ("Creating image (formats: %s, arches: %s, variant: %s, subvariant: %s)"
|
||||
% (formats, dash_arches, variant, subvariant))
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
|
||||
|
||||
# writes conf file for koji image-build
|
||||
self.pool.log_info("Writing image-build config for %s.%s into %s" % (
|
||||
variant, dash_arches, cmd["conf_file"]))
|
||||
self.pool.log_info(
|
||||
"Writing image-build config for %s.%s into %s"
|
||||
% (variant, dash_arches, cmd["conf_file"])
|
||||
)
|
||||
|
||||
koji_cmd = koji_wrapper.get_image_build_cmd(cmd["image_conf"],
|
||||
conf_file_dest=cmd["conf_file"],
|
||||
scratch=cmd['scratch'])
|
||||
koji_cmd = koji_wrapper.get_image_build_cmd(
|
||||
cmd["image_conf"], conf_file_dest=cmd["conf_file"], scratch=cmd["scratch"]
|
||||
)
|
||||
|
||||
# avoid race conditions?
|
||||
# Kerberos authentication failed: Permission denied in replay cache code (-1765328215)
|
||||
@ -210,26 +242,34 @@ class CreateImageBuildThread(WorkerThread):
|
||||
self.pool.log_debug("build-image outputs: %s" % (output))
|
||||
if output["retcode"] != 0:
|
||||
self.fail(compose, cmd)
|
||||
raise RuntimeError("ImageBuild task failed: %s. See %s for more details."
|
||||
% (output["task_id"], log_file))
|
||||
raise RuntimeError(
|
||||
"ImageBuild task failed: %s. See %s for more details."
|
||||
% (output["task_id"], log_file)
|
||||
)
|
||||
|
||||
# copy image to images/
|
||||
image_infos = []
|
||||
|
||||
paths = koji_wrapper.get_image_paths(
|
||||
output["task_id"],
|
||||
callback=lambda arch: log_failed_task(compose, variant, arch, 'image-build', subvariant)
|
||||
callback=lambda arch: log_failed_task(
|
||||
compose, variant, arch, "image-build", subvariant
|
||||
),
|
||||
)
|
||||
|
||||
for arch, paths in paths.items():
|
||||
for path in paths:
|
||||
for format in cmd['image_conf']['image-build']['format']:
|
||||
for format in cmd["image_conf"]["image-build"]["format"]:
|
||||
for suffix in EXTENSIONS[format]:
|
||||
if path.endswith(suffix):
|
||||
image_infos.append({'path': path,
|
||||
'suffix': suffix,
|
||||
'type': format,
|
||||
'arch': arch})
|
||||
image_infos.append(
|
||||
{
|
||||
"path": path,
|
||||
"suffix": suffix,
|
||||
"type": format,
|
||||
"arch": arch,
|
||||
}
|
||||
)
|
||||
break
|
||||
|
||||
# The usecase here is that you can run koji image-build with multiple --format
|
||||
@ -237,30 +277,32 @@ class CreateImageBuildThread(WorkerThread):
|
||||
# image_build record
|
||||
linker = Linker(logger=self.pool._logger)
|
||||
for image_info in image_infos:
|
||||
image_dir = cmd["image_dir"] % {"arch": image_info['arch']}
|
||||
image_dir = cmd["image_dir"] % {"arch": image_info["arch"]}
|
||||
makedirs(image_dir)
|
||||
relative_image_dir = cmd["relative_image_dir"] % {"arch": image_info['arch']}
|
||||
relative_image_dir = cmd["relative_image_dir"] % {
|
||||
"arch": image_info["arch"]
|
||||
}
|
||||
|
||||
# let's not change filename of koji outputs
|
||||
image_dest = os.path.join(image_dir, os.path.basename(image_info['path']))
|
||||
image_dest = os.path.join(image_dir, os.path.basename(image_info["path"]))
|
||||
|
||||
src_file = os.path.realpath(image_info["path"])
|
||||
linker.link(src_file, image_dest, link_type=cmd["link_type"])
|
||||
|
||||
# Update image manifest
|
||||
img = Image(compose.im)
|
||||
img.type = image_info['type']
|
||||
img.format = image_info['suffix']
|
||||
img.type = image_info["type"]
|
||||
img.format = image_info["suffix"]
|
||||
img.path = os.path.join(relative_image_dir, os.path.basename(image_dest))
|
||||
img.mtime = get_mtime(image_dest)
|
||||
img.size = get_file_size(image_dest)
|
||||
img.arch = image_info['arch']
|
||||
img.disc_number = 1 # We don't expect multiple disks
|
||||
img.arch = image_info["arch"]
|
||||
img.disc_number = 1 # We don't expect multiple disks
|
||||
img.disc_count = 1
|
||||
img.bootable = False
|
||||
img.subvariant = subvariant
|
||||
setattr(img, 'can_fail', self.can_fail)
|
||||
setattr(img, 'deliverable', 'image-build')
|
||||
compose.im.add(variant=variant.uid, arch=image_info['arch'], image=img)
|
||||
setattr(img, "can_fail", self.can_fail)
|
||||
setattr(img, "deliverable", "image-build")
|
||||
compose.im.add(variant=variant.uid, arch=image_info["arch"], image=img)
|
||||
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output['task_id']))
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
|
||||
|
@ -19,12 +19,12 @@ class ImageChecksumPhase(PhaseBase):
|
||||
checksums. The manifest will be updated with the checksums.
|
||||
"""
|
||||
|
||||
name = 'image_checksum'
|
||||
name = "image_checksum"
|
||||
|
||||
def __init__(self, compose):
|
||||
super(ImageChecksumPhase, self).__init__(compose)
|
||||
self.checksums = self.compose.conf['media_checksums']
|
||||
self.one_file = self.compose.conf['media_checksum_one_file']
|
||||
self.checksums = self.compose.conf["media_checksums"]
|
||||
self.one_file = self.compose.conf["media_checksum_one_file"]
|
||||
|
||||
def skip(self):
|
||||
# Skipping this phase does not make sense:
|
||||
@ -40,7 +40,7 @@ class ImageChecksumPhase(PhaseBase):
|
||||
errors.append(MULTIPLE_CHECKSUMS_ERROR)
|
||||
|
||||
if errors:
|
||||
raise ValueError('\n'.join(errors))
|
||||
raise ValueError("\n".join(errors))
|
||||
|
||||
def _get_images(self):
|
||||
"""Returns a mapping from directories to sets of ``Image``s.
|
||||
@ -57,20 +57,37 @@ class ImageChecksumPhase(PhaseBase):
|
||||
return images
|
||||
|
||||
def _get_base_filename(self, variant, arch, **kwargs):
|
||||
base_checksum_name = self.compose.conf['media_checksum_base_filename']
|
||||
base_checksum_name = self.compose.conf["media_checksum_base_filename"]
|
||||
if base_checksum_name:
|
||||
substs = get_format_substs(self.compose, variant=variant, arch=arch, **kwargs)
|
||||
substs = get_format_substs(
|
||||
self.compose, variant=variant, arch=arch, **kwargs
|
||||
)
|
||||
base_checksum_name = (base_checksum_name % substs).format(**substs)
|
||||
base_checksum_name += '-'
|
||||
base_checksum_name += "-"
|
||||
return base_checksum_name
|
||||
|
||||
def run(self):
|
||||
topdir = self.compose.paths.compose.topdir()
|
||||
make_checksums(topdir, self.compose.im, self.checksums, self.one_file, self._get_base_filename)
|
||||
make_checksums(
|
||||
topdir,
|
||||
self.compose.im,
|
||||
self.checksums,
|
||||
self.one_file,
|
||||
self._get_base_filename,
|
||||
)
|
||||
|
||||
|
||||
def _compute_checksums(results, cache, variant, arch, path, images,
|
||||
checksum_types, base_checksum_name_gen, one_file):
|
||||
def _compute_checksums(
|
||||
results,
|
||||
cache,
|
||||
variant,
|
||||
arch,
|
||||
path,
|
||||
images,
|
||||
checksum_types,
|
||||
base_checksum_name_gen,
|
||||
one_file,
|
||||
):
|
||||
for image in images:
|
||||
filename = os.path.basename(image.path)
|
||||
full_path = os.path.join(path, filename)
|
||||
@ -83,23 +100,29 @@ def _compute_checksums(results, cache, variant, arch, path, images,
|
||||
# Source ISO is listed under each binary architecture. There's no
|
||||
# point in checksumming it twice, so we can just remember the
|
||||
# digest from first run..
|
||||
cache[full_path] = shortcuts.compute_file_checksums(full_path, checksum_types)
|
||||
cache[full_path] = shortcuts.compute_file_checksums(
|
||||
full_path, checksum_types
|
||||
)
|
||||
digests = cache[full_path]
|
||||
for checksum, digest in digests.items():
|
||||
# Update metadata with the checksum
|
||||
image.add_checksum(None, checksum, digest)
|
||||
# If not turned of, create the file-specific checksum file
|
||||
if not one_file:
|
||||
checksum_filename = os.path.join(path, '%s.%sSUM' % (filename, checksum.upper()))
|
||||
checksum_filename = os.path.join(
|
||||
path, "%s.%sSUM" % (filename, checksum.upper())
|
||||
)
|
||||
results[checksum_filename].add((filename, filesize, checksum, digest))
|
||||
|
||||
if one_file:
|
||||
dirname = os.path.basename(path)
|
||||
base_checksum_name = base_checksum_name_gen(variant, arch, dirname=dirname)
|
||||
checksum_filename = base_checksum_name + 'CHECKSUM'
|
||||
base_checksum_name = base_checksum_name_gen(
|
||||
variant, arch, dirname=dirname
|
||||
)
|
||||
checksum_filename = base_checksum_name + "CHECKSUM"
|
||||
else:
|
||||
base_checksum_name = base_checksum_name_gen(variant, arch)
|
||||
checksum_filename = '%s%sSUM' % (base_checksum_name, checksum.upper())
|
||||
checksum_filename = "%s%sSUM" % (base_checksum_name, checksum.upper())
|
||||
checksum_path = os.path.join(path, checksum_filename)
|
||||
|
||||
results[checksum_path].add((filename, filesize, checksum, digest))
|
||||
@ -109,8 +132,17 @@ def make_checksums(topdir, im, checksum_types, one_file, base_checksum_name_gen)
|
||||
results = defaultdict(set)
|
||||
cache = {}
|
||||
for (variant, arch, path), images in get_images(topdir, im).items():
|
||||
_compute_checksums(results, cache, variant, arch, path, images,
|
||||
checksum_types, base_checksum_name_gen, one_file)
|
||||
_compute_checksums(
|
||||
results,
|
||||
cache,
|
||||
variant,
|
||||
arch,
|
||||
path,
|
||||
images,
|
||||
checksum_types,
|
||||
base_checksum_name_gen,
|
||||
one_file,
|
||||
)
|
||||
|
||||
for file in results:
|
||||
dump_checksums(file, results[file])
|
||||
@ -122,10 +154,10 @@ def dump_checksums(checksum_file, data):
|
||||
:param checksum_file: where to write the checksums
|
||||
:param data: an iterable of tuples (filename, filesize, checksum_type, hash)
|
||||
"""
|
||||
with open(checksum_file, 'w') as f:
|
||||
with open(checksum_file, "w") as f:
|
||||
for filename, filesize, alg, checksum in sorted(data):
|
||||
f.write('# %s: %s bytes\n' % (filename, filesize))
|
||||
f.write('%s (%s) = %s\n' % (alg.upper(), filename, checksum))
|
||||
f.write("# %s: %s bytes\n" % (filename, filesize))
|
||||
f.write("%s (%s) = %s\n" % (alg.upper(), filename, checksum))
|
||||
|
||||
|
||||
def get_images(top_dir, manifest):
|
||||
|
@ -32,6 +32,7 @@ from pungi.wrappers.scm import get_dir_from_scm, get_file_from_scm
|
||||
|
||||
class InitPhase(PhaseBase):
|
||||
"""INIT is a mandatory phase"""
|
||||
|
||||
name = "init"
|
||||
|
||||
def skip(self):
|
||||
@ -44,7 +45,7 @@ class InitPhase(PhaseBase):
|
||||
# write global comps and arch comps, create comps repos
|
||||
global_comps = write_global_comps(self.compose)
|
||||
validate_comps(global_comps)
|
||||
num_workers = self.compose.conf['createrepo_num_threads']
|
||||
num_workers = self.compose.conf["createrepo_num_threads"]
|
||||
run_in_threads(
|
||||
_arch_worker,
|
||||
[(self.compose, arch) for arch in self.compose.get_arches()],
|
||||
@ -112,12 +113,18 @@ def write_arch_comps(compose, arch):
|
||||
comps_file_arch = compose.paths.work.comps(arch=arch)
|
||||
|
||||
compose.log_debug("Writing comps file for arch '%s': %s", arch, comps_file_arch)
|
||||
run(["comps_filter", "--arch=%s" % arch, "--no-cleanup",
|
||||
"--output=%s" % comps_file_arch,
|
||||
compose.paths.work.comps(arch="global")])
|
||||
run(
|
||||
[
|
||||
"comps_filter",
|
||||
"--arch=%s" % arch,
|
||||
"--no-cleanup",
|
||||
"--output=%s" % comps_file_arch,
|
||||
compose.paths.work.comps(arch="global"),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
UNMATCHED_GROUP_MSG = 'Variant %s.%s requires comps group %s which does not match anything in input comps file'
|
||||
UNMATCHED_GROUP_MSG = "Variant %s.%s requires comps group %s which does not match anything in input comps file"
|
||||
|
||||
|
||||
def get_lookaside_groups(compose, variant):
|
||||
@ -146,14 +153,14 @@ def write_variant_comps(compose, arch, variant):
|
||||
"--keep-empty-group=conflicts-%s" % variant.uid.lower(),
|
||||
"--variant=%s" % variant.uid,
|
||||
"--output=%s" % comps_file,
|
||||
compose.paths.work.comps(arch="global")
|
||||
compose.paths.work.comps(arch="global"),
|
||||
]
|
||||
for group in get_lookaside_groups(compose, variant):
|
||||
cmd.append("--lookaside-group=%s" % group)
|
||||
run(cmd)
|
||||
|
||||
comps = CompsWrapper(comps_file)
|
||||
if variant.groups or variant.modules is not None or variant.type != 'variant':
|
||||
if variant.groups or variant.modules is not None or variant.type != "variant":
|
||||
# Filter groups if the variant has some, or it's a modular variant, or
|
||||
# is not a base variant.
|
||||
unmatched = comps.filter_groups(variant.groups)
|
||||
@ -175,11 +182,15 @@ def create_comps_repo(compose, arch, variant):
|
||||
repo = CreaterepoWrapper(createrepo_c=createrepo_c)
|
||||
comps_repo = compose.paths.work.comps_repo(arch=arch, variant=variant)
|
||||
comps_path = compose.paths.work.comps(arch=arch, variant=variant)
|
||||
msg = "Creating comps repo for arch '%s' variant '%s'" % (arch, variant.uid if variant else None)
|
||||
msg = "Creating comps repo for arch '%s' variant '%s'" % (
|
||||
arch,
|
||||
variant.uid if variant else None,
|
||||
)
|
||||
|
||||
compose.log_info("[BEGIN] %s" % msg)
|
||||
cmd = repo.get_createrepo_cmd(
|
||||
comps_repo, database=False,
|
||||
comps_repo,
|
||||
database=False,
|
||||
outputdir=comps_repo,
|
||||
groupfile=comps_path,
|
||||
checksum=createrepo_checksum,
|
||||
@ -200,7 +211,9 @@ def write_module_defaults(compose):
|
||||
with temp_dir(prefix="moduledefaults_") as tmp_dir:
|
||||
get_dir_from_scm(scm_dict, tmp_dir, compose=compose)
|
||||
compose.log_debug("Writing module defaults")
|
||||
shutil.copytree(tmp_dir, compose.paths.work.module_defaults_dir(create_dir=False))
|
||||
shutil.copytree(
|
||||
tmp_dir, compose.paths.work.module_defaults_dir(create_dir=False)
|
||||
)
|
||||
|
||||
|
||||
def validate_module_defaults(path):
|
||||
|
@ -33,11 +33,14 @@ from pungi.util import get_repo_urls
|
||||
|
||||
# HACK: define cmp in python3
|
||||
if sys.version_info[0] == 3:
|
||||
|
||||
def cmp(a, b):
|
||||
return (a > b) - (a < b)
|
||||
|
||||
|
||||
class LiveImagesPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase):
|
||||
class LiveImagesPhase(
|
||||
base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase
|
||||
):
|
||||
name = "live_images"
|
||||
|
||||
def __init__(self, compose):
|
||||
@ -48,7 +51,7 @@ class LiveImagesPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigG
|
||||
repos = []
|
||||
if not variant.is_empty:
|
||||
repos.append(variant.uid)
|
||||
repos.extend(force_list(data.get('repo', [])))
|
||||
repos.extend(force_list(data.get("repo", [])))
|
||||
return get_repo_urls(self.compose, repos, arch=arch)
|
||||
|
||||
def run(self):
|
||||
@ -58,27 +61,31 @@ class LiveImagesPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigG
|
||||
for variant in self.compose.all_variants.values():
|
||||
for arch in variant.arches + ["src"]:
|
||||
for data in self.get_config_block(variant, arch):
|
||||
subvariant = data.get('subvariant', variant.uid)
|
||||
type = data.get('type', 'live')
|
||||
subvariant = data.get("subvariant", variant.uid)
|
||||
type = data.get("type", "live")
|
||||
|
||||
if type == 'live':
|
||||
dest_dir = self.compose.paths.compose.iso_dir(arch, variant, symlink_to=symlink_isos_to)
|
||||
elif type == 'appliance':
|
||||
dest_dir = self.compose.paths.compose.image_dir(variant, symlink_to=symlink_isos_to)
|
||||
dest_dir = dest_dir % {'arch': arch}
|
||||
if type == "live":
|
||||
dest_dir = self.compose.paths.compose.iso_dir(
|
||||
arch, variant, symlink_to=symlink_isos_to
|
||||
)
|
||||
elif type == "appliance":
|
||||
dest_dir = self.compose.paths.compose.image_dir(
|
||||
variant, symlink_to=symlink_isos_to
|
||||
)
|
||||
dest_dir = dest_dir % {"arch": arch}
|
||||
makedirs(dest_dir)
|
||||
else:
|
||||
raise RuntimeError('Unknown live image type %s' % type)
|
||||
raise RuntimeError("Unknown live image type %s" % type)
|
||||
if not dest_dir:
|
||||
continue
|
||||
|
||||
cmd = {
|
||||
"name": data.get('name'),
|
||||
"name": data.get("name"),
|
||||
"version": self.get_version(data),
|
||||
"release": self.get_release(data),
|
||||
"dest_dir": dest_dir,
|
||||
"build_arch": arch,
|
||||
"ks_file": data['kickstart'],
|
||||
"ks_file": data["kickstart"],
|
||||
"ksurl": self.get_ksurl(data),
|
||||
# Used for images wrapped in RPM
|
||||
"specfile": data.get("specfile", None),
|
||||
@ -91,10 +98,11 @@ class LiveImagesPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigG
|
||||
"type": type,
|
||||
"label": "", # currently not used
|
||||
"subvariant": subvariant,
|
||||
"failable_arches": data.get('failable', []),
|
||||
"failable_arches": data.get("failable", []),
|
||||
# First see if live_target is specified, then fall back
|
||||
# to regular setup of local, phase and global setting.
|
||||
"target": self.compose.conf.get('live_target') or self.get_config(data, 'target'),
|
||||
"target": self.compose.conf.get("live_target")
|
||||
or self.get_config(data, "target"),
|
||||
}
|
||||
|
||||
cmd["repos"] = self._get_repos(arch, variant, data)
|
||||
@ -103,7 +111,9 @@ class LiveImagesPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigG
|
||||
if not cmd["scratch"] and data.get("sign"):
|
||||
cmd["sign"] = True
|
||||
|
||||
cmd['filename'] = self._get_file_name(arch, variant, cmd['name'], cmd['version'])
|
||||
cmd["filename"] = self._get_file_name(
|
||||
arch, variant, cmd["name"], cmd["version"]
|
||||
)
|
||||
|
||||
commands.append((cmd, variant, arch))
|
||||
|
||||
@ -114,46 +124,66 @@ class LiveImagesPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigG
|
||||
self.pool.start()
|
||||
|
||||
def _get_file_name(self, arch, variant, name=None, version=None):
|
||||
if self.compose.conf['live_images_no_rename']:
|
||||
if self.compose.conf["live_images_no_rename"]:
|
||||
return None
|
||||
|
||||
disc_type = self.compose.conf['disc_types'].get('live', 'live')
|
||||
disc_type = self.compose.conf["disc_types"].get("live", "live")
|
||||
|
||||
format = "%(compose_id)s-%(variant)s-%(arch)s-%(disc_type)s%(disc_num)s%(suffix)s"
|
||||
format = (
|
||||
"%(compose_id)s-%(variant)s-%(arch)s-%(disc_type)s%(disc_num)s%(suffix)s"
|
||||
)
|
||||
# Custom name (prefix)
|
||||
if name:
|
||||
custom_iso_name = name
|
||||
if version:
|
||||
custom_iso_name += "-%s" % version
|
||||
format = custom_iso_name + "-%(variant)s-%(arch)s-%(disc_type)s%(disc_num)s%(suffix)s"
|
||||
format = (
|
||||
custom_iso_name
|
||||
+ "-%(variant)s-%(arch)s-%(disc_type)s%(disc_num)s%(suffix)s"
|
||||
)
|
||||
|
||||
# XXX: hardcoded disc_num
|
||||
return self.compose.get_image_name(arch, variant, disc_type=disc_type,
|
||||
disc_num=None, format=format)
|
||||
return self.compose.get_image_name(
|
||||
arch, variant, disc_type=disc_type, disc_num=None, format=format
|
||||
)
|
||||
|
||||
|
||||
class CreateLiveImageThread(WorkerThread):
|
||||
EXTS = ('.iso', '.raw.xz')
|
||||
EXTS = (".iso", ".raw.xz")
|
||||
|
||||
def process(self, item, num):
|
||||
compose, cmd, variant, arch = item
|
||||
self.failable_arches = cmd.get('failable_arches', [])
|
||||
self.failable_arches = cmd.get("failable_arches", [])
|
||||
self.can_fail = bool(self.failable_arches)
|
||||
with failable(compose, self.can_fail, variant, arch, 'live', cmd.get('subvariant'),
|
||||
logger=self.pool._logger):
|
||||
with failable(
|
||||
compose,
|
||||
self.can_fail,
|
||||
variant,
|
||||
arch,
|
||||
"live",
|
||||
cmd.get("subvariant"),
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(compose, cmd, variant, arch, num)
|
||||
|
||||
def worker(self, compose, cmd, variant, arch, num):
|
||||
self.basename = '%(name)s-%(version)s-%(release)s' % cmd
|
||||
self.basename = "%(name)s-%(version)s-%(release)s" % cmd
|
||||
log_file = compose.paths.log.log_file(arch, "liveimage-%s" % self.basename)
|
||||
|
||||
subvariant = cmd.pop('subvariant')
|
||||
subvariant = cmd.pop("subvariant")
|
||||
|
||||
imgname = "%s-%s-%s-%s" % (compose.ci_base.release.short, subvariant,
|
||||
'Live' if cmd['type'] == 'live' else 'Disk',
|
||||
arch)
|
||||
imgname = "%s-%s-%s-%s" % (
|
||||
compose.ci_base.release.short,
|
||||
subvariant,
|
||||
"Live" if cmd["type"] == "live" else "Disk",
|
||||
arch,
|
||||
)
|
||||
|
||||
msg = "Creating ISO (arch: %s, variant: %s): %s" % (arch, variant, self.basename)
|
||||
msg = "Creating ISO (arch: %s, variant: %s): %s" % (
|
||||
arch,
|
||||
variant,
|
||||
self.basename,
|
||||
)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
|
||||
@ -164,17 +194,20 @@ class CreateLiveImageThread(WorkerThread):
|
||||
if cmd["specfile"] and not cmd["scratch"]:
|
||||
# Non scratch build are allowed only for rpm wrapped images
|
||||
archive = True
|
||||
koji_cmd = koji_wrapper.get_create_image_cmd(name, version,
|
||||
cmd["target"],
|
||||
cmd["build_arch"],
|
||||
cmd["ks_file"],
|
||||
cmd["repos"],
|
||||
image_type=cmd['type'],
|
||||
wait=True,
|
||||
archive=archive,
|
||||
specfile=cmd["specfile"],
|
||||
release=cmd['release'],
|
||||
ksurl=cmd['ksurl'])
|
||||
koji_cmd = koji_wrapper.get_create_image_cmd(
|
||||
name,
|
||||
version,
|
||||
cmd["target"],
|
||||
cmd["build_arch"],
|
||||
cmd["ks_file"],
|
||||
cmd["repos"],
|
||||
image_type=cmd["type"],
|
||||
wait=True,
|
||||
archive=archive,
|
||||
specfile=cmd["specfile"],
|
||||
release=cmd["release"],
|
||||
ksurl=cmd["ksurl"],
|
||||
)
|
||||
|
||||
# avoid race conditions?
|
||||
# Kerberos authentication failed: Permission denied in replay cache code (-1765328215)
|
||||
@ -182,17 +215,25 @@ class CreateLiveImageThread(WorkerThread):
|
||||
|
||||
output = koji_wrapper.run_blocking_cmd(koji_cmd, log_file=log_file)
|
||||
if output["retcode"] != 0:
|
||||
raise RuntimeError("LiveImage task failed: %s. See %s for more details." % (output["task_id"], log_file))
|
||||
raise RuntimeError(
|
||||
"LiveImage task failed: %s. See %s for more details."
|
||||
% (output["task_id"], log_file)
|
||||
)
|
||||
|
||||
# copy finished image to isos/
|
||||
image_path = [path for path in koji_wrapper.get_image_path(output["task_id"])
|
||||
if self._is_image(path)]
|
||||
image_path = [
|
||||
path
|
||||
for path in koji_wrapper.get_image_path(output["task_id"])
|
||||
if self._is_image(path)
|
||||
]
|
||||
if len(image_path) != 1:
|
||||
raise RuntimeError('Got %d images from task %d, expected 1.'
|
||||
% (len(image_path), output['task_id']))
|
||||
raise RuntimeError(
|
||||
"Got %d images from task %d, expected 1."
|
||||
% (len(image_path), output["task_id"])
|
||||
)
|
||||
image_path = image_path[0]
|
||||
filename = cmd.get('filename') or os.path.basename(image_path)
|
||||
destination = os.path.join(cmd['dest_dir'], filename)
|
||||
filename = cmd.get("filename") or os.path.basename(image_path)
|
||||
destination = os.path.join(cmd["dest_dir"], filename)
|
||||
shutil.copy2(image_path, destination)
|
||||
|
||||
# copy finished rpm to isos/ (if rpm wrapped ISO was built)
|
||||
@ -201,38 +242,50 @@ class CreateLiveImageThread(WorkerThread):
|
||||
|
||||
if cmd["sign"]:
|
||||
# Sign the rpm wrapped images and get their paths
|
||||
self.pool.log_info("Signing rpm wrapped images in task_id: %s (expected key ID: %s)"
|
||||
% (output["task_id"], compose.conf.get("signing_key_id")))
|
||||
signed_rpm_paths = self._sign_image(koji_wrapper, compose, cmd, output["task_id"])
|
||||
self.pool.log_info(
|
||||
"Signing rpm wrapped images in task_id: %s (expected key ID: %s)"
|
||||
% (output["task_id"], compose.conf.get("signing_key_id"))
|
||||
)
|
||||
signed_rpm_paths = self._sign_image(
|
||||
koji_wrapper, compose, cmd, output["task_id"]
|
||||
)
|
||||
if signed_rpm_paths:
|
||||
rpm_paths = signed_rpm_paths
|
||||
|
||||
for rpm_path in rpm_paths:
|
||||
shutil.copy2(rpm_path, cmd["dest_dir"])
|
||||
|
||||
if cmd['type'] == 'live':
|
||||
if cmd["type"] == "live":
|
||||
# ISO manifest only makes sense for live images
|
||||
self._write_manifest(destination)
|
||||
|
||||
self._add_to_images(compose, variant, subvariant, arch, cmd['type'], self._get_format(image_path), destination)
|
||||
self._add_to_images(
|
||||
compose,
|
||||
variant,
|
||||
subvariant,
|
||||
arch,
|
||||
cmd["type"],
|
||||
self._get_format(image_path),
|
||||
destination,
|
||||
)
|
||||
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output['task_id']))
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
|
||||
|
||||
def _add_to_images(self, compose, variant, subvariant, arch, type, format, path):
|
||||
"""Adds the image to images.json"""
|
||||
img = Image(compose.im)
|
||||
img.type = 'raw-xz' if type == 'appliance' else type
|
||||
img.type = "raw-xz" if type == "appliance" else type
|
||||
img.format = format
|
||||
img.path = os.path.relpath(path, compose.paths.compose.topdir())
|
||||
img.mtime = get_mtime(path)
|
||||
img.size = get_file_size(path)
|
||||
img.arch = arch
|
||||
img.disc_number = 1 # We don't expect multiple disks
|
||||
img.disc_number = 1 # We don't expect multiple disks
|
||||
img.disc_count = 1
|
||||
img.bootable = True
|
||||
img.subvariant = subvariant
|
||||
setattr(img, 'can_fail', self.can_fail)
|
||||
setattr(img, 'deliverable', 'live')
|
||||
setattr(img, "can_fail", self.can_fail)
|
||||
setattr(img, "deliverable", "live")
|
||||
compose.im.add(variant=variant.uid, arch=arch, image=img)
|
||||
|
||||
def _is_image(self, path):
|
||||
@ -246,7 +299,7 @@ class CreateLiveImageThread(WorkerThread):
|
||||
for ext in self.EXTS:
|
||||
if path.endswith(ext):
|
||||
return ext[1:]
|
||||
raise RuntimeError('Getting format for unknown image %s' % path)
|
||||
raise RuntimeError("Getting format for unknown image %s" % path)
|
||||
|
||||
def _write_manifest(self, iso_path):
|
||||
"""Generate manifest for ISO at given path.
|
||||
@ -261,30 +314,43 @@ class CreateLiveImageThread(WorkerThread):
|
||||
signing_command = compose.conf.get("signing_command")
|
||||
|
||||
if not signing_key_id:
|
||||
self.pool.log_warning("Signing is enabled but signing_key_id is not specified")
|
||||
self.pool.log_warning(
|
||||
"Signing is enabled but signing_key_id is not specified"
|
||||
)
|
||||
self.pool.log_warning("Signing skipped")
|
||||
return None
|
||||
if not signing_command:
|
||||
self.pool.log_warning("Signing is enabled but signing_command is not specified")
|
||||
self.pool.log_warning(
|
||||
"Signing is enabled but signing_command is not specified"
|
||||
)
|
||||
self.pool.log_warning("Signing skipped")
|
||||
return None
|
||||
|
||||
# Prepare signing log file
|
||||
signing_log_file = compose.paths.log.log_file(cmd["build_arch"],
|
||||
"live_images-signing-%s" % self.basename)
|
||||
signing_log_file = compose.paths.log.log_file(
|
||||
cmd["build_arch"], "live_images-signing-%s" % self.basename
|
||||
)
|
||||
|
||||
# Sign the rpm wrapped images
|
||||
try:
|
||||
sign_builds_in_task(koji_wrapper, koji_task_id, signing_command,
|
||||
log_file=signing_log_file,
|
||||
signing_key_password=compose.conf.get("signing_key_password"))
|
||||
sign_builds_in_task(
|
||||
koji_wrapper,
|
||||
koji_task_id,
|
||||
signing_command,
|
||||
log_file=signing_log_file,
|
||||
signing_key_password=compose.conf.get("signing_key_password"),
|
||||
)
|
||||
except RuntimeError:
|
||||
self.pool.log_error("Error while signing rpm wrapped images. See log: %s" % signing_log_file)
|
||||
self.pool.log_error(
|
||||
"Error while signing rpm wrapped images. See log: %s" % signing_log_file
|
||||
)
|
||||
raise
|
||||
|
||||
# Get pats to the signed rpms
|
||||
signing_key_id = signing_key_id.lower() # Koji uses lowercase in paths
|
||||
rpm_paths = koji_wrapper.get_signed_wrapped_rpms_paths(koji_task_id, signing_key_id)
|
||||
rpm_paths = koji_wrapper.get_signed_wrapped_rpms_paths(
|
||||
koji_task_id, signing_key_id
|
||||
)
|
||||
|
||||
# Wait untill files are available
|
||||
if wait_paths(rpm_paths, 60 * 15):
|
||||
@ -312,7 +378,9 @@ def wait_paths(paths, timeout=60):
|
||||
return True
|
||||
|
||||
|
||||
def sign_builds_in_task(koji_wrapper, task_id, signing_command, log_file=None, signing_key_password=None):
|
||||
def sign_builds_in_task(
|
||||
koji_wrapper, task_id, signing_command, log_file=None, signing_key_password=None
|
||||
):
|
||||
# Get list of nvrs that should be signed
|
||||
nvrs = koji_wrapper.get_build_nvrs(task_id)
|
||||
if not nvrs:
|
||||
@ -329,7 +397,9 @@ def sign_builds_in_task(koji_wrapper, task_id, signing_command, log_file=None, s
|
||||
|
||||
# Fill password into the signing command
|
||||
if signing_key_password:
|
||||
signing_command = signing_command % {"signing_key_password": signing_key_password}
|
||||
signing_command = signing_command % {
|
||||
"signing_key_password": signing_key_password
|
||||
}
|
||||
|
||||
# Sign the builds
|
||||
run(signing_command, can_fail=False, show_cmd=False, logfile=log_file)
|
||||
|
@ -15,7 +15,8 @@ from productmd.images import Image
|
||||
|
||||
class LiveMediaPhase(PhaseLoggerMixin, ImageConfigMixin, ConfigGuardedPhase):
|
||||
"""class for wrapping up koji spin-livemedia"""
|
||||
name = 'live_media'
|
||||
|
||||
name = "live_media"
|
||||
|
||||
def __init__(self, compose):
|
||||
super(LiveMediaPhase, self).__init__(compose)
|
||||
@ -26,7 +27,7 @@ class LiveMediaPhase(PhaseLoggerMixin, ImageConfigMixin, ConfigGuardedPhase):
|
||||
Get a list of repo urls. First included are those explicitly listed in config,
|
||||
followed by repo for current variant if it's not present in the list.
|
||||
"""
|
||||
repos = shortcuts.force_list(image_conf.get('repo', []))
|
||||
repos = shortcuts.force_list(image_conf.get("repo", []))
|
||||
|
||||
if not variant.is_empty:
|
||||
if variant.uid not in repos:
|
||||
@ -35,49 +36,52 @@ class LiveMediaPhase(PhaseLoggerMixin, ImageConfigMixin, ConfigGuardedPhase):
|
||||
return get_repo_urls(self.compose, repos)
|
||||
|
||||
def _get_arches(self, image_conf, arches):
|
||||
if 'arches' in image_conf:
|
||||
arches = set(image_conf.get('arches', [])) & arches
|
||||
if "arches" in image_conf:
|
||||
arches = set(image_conf.get("arches", [])) & arches
|
||||
return sorted(arches)
|
||||
|
||||
def _get_install_tree(self, image_conf, variant):
|
||||
if 'install_tree_from' in image_conf:
|
||||
variant_uid = image_conf['install_tree_from']
|
||||
if "install_tree_from" in image_conf:
|
||||
variant_uid = image_conf["install_tree_from"]
|
||||
try:
|
||||
variant = self.compose.all_variants[variant_uid]
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
'There is no variant %s to get repo from when building live media for %s.'
|
||||
% (variant_uid, variant.uid))
|
||||
"There is no variant %s to get repo from when building live media for %s."
|
||||
% (variant_uid, variant.uid)
|
||||
)
|
||||
return translate_path(
|
||||
self.compose,
|
||||
self.compose.paths.compose.os_tree('$basearch', variant, create_dir=False)
|
||||
self.compose.paths.compose.os_tree("$basearch", variant, create_dir=False),
|
||||
)
|
||||
|
||||
def run(self):
|
||||
for variant in self.compose.get_variants():
|
||||
arches = set([x for x in variant.arches if x != 'src'])
|
||||
arches = set([x for x in variant.arches if x != "src"])
|
||||
for image_conf in self.get_config_block(variant):
|
||||
subvariant = image_conf.get('subvariant', variant.uid)
|
||||
subvariant = image_conf.get("subvariant", variant.uid)
|
||||
name = image_conf.get(
|
||||
'name', "%s-%s-Live" % (self.compose.ci_base.release.short, subvariant))
|
||||
"name",
|
||||
"%s-%s-Live" % (self.compose.ci_base.release.short, subvariant),
|
||||
)
|
||||
config = {
|
||||
'target': self.get_config(image_conf, 'target'),
|
||||
'arches': self._get_arches(image_conf, arches),
|
||||
'ksfile': image_conf['kickstart'],
|
||||
'ksurl': self.get_ksurl(image_conf),
|
||||
'ksversion': image_conf.get('ksversion'),
|
||||
'scratch': image_conf.get('scratch', False),
|
||||
'release': self.get_release(image_conf),
|
||||
'skip_tag': image_conf.get('skip_tag'),
|
||||
'name': name,
|
||||
'subvariant': subvariant,
|
||||
'repo': self._get_repos(image_conf, variant),
|
||||
'install_tree': self._get_install_tree(image_conf, variant),
|
||||
'version': self.get_version(image_conf),
|
||||
'failable_arches': image_conf.get('failable', []),
|
||||
"target": self.get_config(image_conf, "target"),
|
||||
"arches": self._get_arches(image_conf, arches),
|
||||
"ksfile": image_conf["kickstart"],
|
||||
"ksurl": self.get_ksurl(image_conf),
|
||||
"ksversion": image_conf.get("ksversion"),
|
||||
"scratch": image_conf.get("scratch", False),
|
||||
"release": self.get_release(image_conf),
|
||||
"skip_tag": image_conf.get("skip_tag"),
|
||||
"name": name,
|
||||
"subvariant": subvariant,
|
||||
"repo": self._get_repos(image_conf, variant),
|
||||
"install_tree": self._get_install_tree(image_conf, variant),
|
||||
"version": self.get_version(image_conf),
|
||||
"failable_arches": image_conf.get("failable", []),
|
||||
}
|
||||
if config['failable_arches'] == ['*']:
|
||||
config['failable_arches'] = config['arches']
|
||||
if config["failable_arches"] == ["*"]:
|
||||
config["failable_arches"] = config["arches"]
|
||||
self.pool.add(LiveMediaThread(self.pool))
|
||||
self.pool.queue_put((self.compose, variant, config))
|
||||
|
||||
@ -87,42 +91,56 @@ class LiveMediaPhase(PhaseLoggerMixin, ImageConfigMixin, ConfigGuardedPhase):
|
||||
class LiveMediaThread(WorkerThread):
|
||||
def process(self, item, num):
|
||||
compose, variant, config = item
|
||||
subvariant = config.pop('subvariant')
|
||||
self.failable_arches = config.pop('failable_arches')
|
||||
subvariant = config.pop("subvariant")
|
||||
self.failable_arches = config.pop("failable_arches")
|
||||
self.num = num
|
||||
can_fail = set(self.failable_arches) == set(config['arches'])
|
||||
with failable(compose, can_fail, variant, '*', 'live-media', subvariant,
|
||||
logger=self.pool._logger):
|
||||
can_fail = set(self.failable_arches) == set(config["arches"])
|
||||
with failable(
|
||||
compose,
|
||||
can_fail,
|
||||
variant,
|
||||
"*",
|
||||
"live-media",
|
||||
subvariant,
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(compose, variant, subvariant, config)
|
||||
|
||||
def _get_log_file(self, compose, variant, subvariant, config):
|
||||
arches = '-'.join(config['arches'])
|
||||
return compose.paths.log.log_file(arches, 'livemedia-%s-%s'
|
||||
% (variant.uid, subvariant))
|
||||
arches = "-".join(config["arches"])
|
||||
return compose.paths.log.log_file(
|
||||
arches, "livemedia-%s-%s" % (variant.uid, subvariant)
|
||||
)
|
||||
|
||||
def _run_command(self, koji_wrapper, cmd, compose, log_file):
|
||||
time.sleep(self.num * 3)
|
||||
output = koji_wrapper.run_blocking_cmd(cmd, log_file=log_file)
|
||||
self.pool.log_debug('live media outputs: %s' % (output))
|
||||
if output['retcode'] != 0:
|
||||
self.pool.log_error('Live media task failed.')
|
||||
raise RuntimeError('Live media task failed: %s. See %s for more details.'
|
||||
% (output['task_id'], log_file))
|
||||
self.pool.log_debug("live media outputs: %s" % (output))
|
||||
if output["retcode"] != 0:
|
||||
self.pool.log_error("Live media task failed.")
|
||||
raise RuntimeError(
|
||||
"Live media task failed: %s. See %s for more details."
|
||||
% (output["task_id"], log_file)
|
||||
)
|
||||
return output
|
||||
|
||||
def _get_cmd(self, koji_wrapper, config):
|
||||
"""Replace `arches` (as list) with `arch` as a comma-separated string."""
|
||||
copy = dict(config)
|
||||
copy['arch'] = ','.join(copy.pop('arches', []))
|
||||
copy['can_fail'] = self.failable_arches
|
||||
copy["arch"] = ",".join(copy.pop("arches", []))
|
||||
copy["can_fail"] = self.failable_arches
|
||||
return koji_wrapper.get_live_media_cmd(copy)
|
||||
|
||||
def worker(self, compose, variant, subvariant, config):
|
||||
msg = ('Live media: %s (arches: %s, variant: %s, subvariant: %s)'
|
||||
% (config['name'], ' '.join(config['arches']), variant.uid, subvariant))
|
||||
self.pool.log_info('[BEGIN] %s' % msg)
|
||||
msg = "Live media: %s (arches: %s, variant: %s, subvariant: %s)" % (
|
||||
config["name"],
|
||||
" ".join(config["arches"]),
|
||||
variant.uid,
|
||||
subvariant,
|
||||
)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
|
||||
koji_wrapper = KojiWrapper(compose.conf['koji_profile'])
|
||||
koji_wrapper = KojiWrapper(compose.conf["koji_profile"])
|
||||
cmd = self._get_cmd(koji_wrapper, config)
|
||||
|
||||
log_file = self._get_log_file(compose, variant, subvariant, config)
|
||||
@ -132,51 +150,54 @@ class LiveMediaThread(WorkerThread):
|
||||
image_infos = []
|
||||
|
||||
paths = koji_wrapper.get_image_paths(
|
||||
output['task_id'],
|
||||
callback=lambda arch: log_failed_task(compose, variant, arch, 'live-media', subvariant)
|
||||
output["task_id"],
|
||||
callback=lambda arch: log_failed_task(
|
||||
compose, variant, arch, "live-media", subvariant
|
||||
),
|
||||
)
|
||||
|
||||
for arch, paths in paths.items():
|
||||
for path in paths:
|
||||
if path.endswith('.iso'):
|
||||
image_infos.append({'path': path, 'arch': arch})
|
||||
if path.endswith(".iso"):
|
||||
image_infos.append({"path": path, "arch": arch})
|
||||
|
||||
if len(image_infos) < len(config['arches']) - len(self.failable_arches):
|
||||
if len(image_infos) < len(config["arches"]) - len(self.failable_arches):
|
||||
self.pool.log_error(
|
||||
'Error in koji task %s. Expected to find at least one image '
|
||||
'for each required arch (%s). Got %s.'
|
||||
% (output['task_id'], len(config['arches']), len(image_infos)))
|
||||
raise RuntimeError('Image count mismatch in task %s.' % output['task_id'])
|
||||
"Error in koji task %s. Expected to find at least one image "
|
||||
"for each required arch (%s). Got %s."
|
||||
% (output["task_id"], len(config["arches"]), len(image_infos))
|
||||
)
|
||||
raise RuntimeError("Image count mismatch in task %s." % output["task_id"])
|
||||
|
||||
linker = Linker(logger=self.pool._logger)
|
||||
link_type = compose.conf["link_type"]
|
||||
for image_info in image_infos:
|
||||
image_dir = compose.paths.compose.iso_dir(image_info['arch'], variant)
|
||||
image_dir = compose.paths.compose.iso_dir(image_info["arch"], variant)
|
||||
makedirs(image_dir)
|
||||
relative_image_dir = (
|
||||
compose.paths.compose.iso_dir(image_info['arch'], variant, relative=True)
|
||||
relative_image_dir = compose.paths.compose.iso_dir(
|
||||
image_info["arch"], variant, relative=True
|
||||
)
|
||||
|
||||
# let's not change filename of koji outputs
|
||||
image_dest = os.path.join(image_dir, os.path.basename(image_info['path']))
|
||||
image_dest = os.path.join(image_dir, os.path.basename(image_info["path"]))
|
||||
|
||||
src_file = os.path.realpath(image_info["path"])
|
||||
linker.link(src_file, image_dest, link_type=link_type)
|
||||
|
||||
# Update image manifest
|
||||
img = Image(compose.im)
|
||||
img.type = 'live'
|
||||
img.format = 'iso'
|
||||
img.type = "live"
|
||||
img.format = "iso"
|
||||
img.path = os.path.join(relative_image_dir, os.path.basename(image_dest))
|
||||
img.mtime = get_mtime(image_dest)
|
||||
img.size = get_file_size(image_dest)
|
||||
img.arch = image_info['arch']
|
||||
img.disc_number = 1 # We don't expect multiple disks
|
||||
img.arch = image_info["arch"]
|
||||
img.disc_number = 1 # We don't expect multiple disks
|
||||
img.disc_count = 1
|
||||
img.bootable = True
|
||||
img.subvariant = subvariant
|
||||
setattr(img, 'can_fail', bool(self.failable_arches))
|
||||
setattr(img, 'deliverable', 'live-media')
|
||||
compose.im.add(variant=variant.uid, arch=image_info['arch'], image=img)
|
||||
setattr(img, "can_fail", bool(self.failable_arches))
|
||||
setattr(img, "deliverable", "live-media")
|
||||
compose.im.add(variant=variant.uid, arch=image_info["arch"], image=img)
|
||||
|
||||
self.pool.log_info('[DONE ] %s (task id: %s)' % (msg, output['task_id']))
|
||||
self.pool.log_info("[DONE ] %s (task id: %s)" % (msg, output["task_id"]))
|
||||
|
@ -12,7 +12,7 @@ from ..wrappers import kojiwrapper
|
||||
|
||||
|
||||
class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||
name = 'osbs'
|
||||
name = "osbs"
|
||||
|
||||
def __init__(self, compose):
|
||||
super(OSBSPhase, self).__init__(compose)
|
||||
@ -32,9 +32,10 @@ class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||
"""Create a file with image metadata if the phase actually ran."""
|
||||
if self._skipped:
|
||||
return
|
||||
with open(self.compose.paths.compose.metadata('osbs.json'), 'w') as f:
|
||||
json.dump(self.pool.metadata, f, indent=4, sort_keys=True,
|
||||
separators=(',', ': '))
|
||||
with open(self.compose.paths.compose.metadata("osbs.json"), "w") as f:
|
||||
json.dump(
|
||||
self.pool.metadata, f, indent=4, sort_keys=True, separators=(",", ": ")
|
||||
)
|
||||
|
||||
def request_push(self):
|
||||
"""Store configuration data about where to push the created images and
|
||||
@ -73,100 +74,117 @@ class OSBSThread(WorkerThread):
|
||||
def process(self, item, num):
|
||||
compose, variant, config = item
|
||||
self.num = num
|
||||
with util.failable(compose, bool(config.pop('failable', None)), variant, '*', 'osbs',
|
||||
logger=self.pool._logger):
|
||||
with util.failable(
|
||||
compose,
|
||||
bool(config.pop("failable", None)),
|
||||
variant,
|
||||
"*",
|
||||
"osbs",
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(compose, variant, config)
|
||||
|
||||
def worker(self, compose, variant, config):
|
||||
msg = 'OSBS task for variant %s' % variant.uid
|
||||
self.pool.log_info('[BEGIN] %s' % msg)
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf['koji_profile'])
|
||||
msg = "OSBS task for variant %s" % variant.uid
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
koji.login()
|
||||
|
||||
# Start task
|
||||
source = config.pop('url')
|
||||
target = config.pop('target')
|
||||
priority = config.pop('priority', None)
|
||||
gpgkey = config.pop('gpgkey', None)
|
||||
repos = [self._get_repo(compose, v, gpgkey=gpgkey)
|
||||
for v in [variant.uid] + shortcuts.force_list(config.pop('repo', []))]
|
||||
source = config.pop("url")
|
||||
target = config.pop("target")
|
||||
priority = config.pop("priority", None)
|
||||
gpgkey = config.pop("gpgkey", None)
|
||||
repos = [
|
||||
self._get_repo(compose, v, gpgkey=gpgkey)
|
||||
for v in [variant.uid] + shortcuts.force_list(config.pop("repo", []))
|
||||
]
|
||||
# Deprecated in 4.1.36
|
||||
registry = config.pop("registry", None)
|
||||
|
||||
config['yum_repourls'] = repos
|
||||
config["yum_repourls"] = repos
|
||||
|
||||
task_id = koji.koji_proxy.buildContainer(source, target, config,
|
||||
priority=priority)
|
||||
task_id = koji.koji_proxy.buildContainer(
|
||||
source, target, config, priority=priority
|
||||
)
|
||||
|
||||
# Wait for it to finish and capture the output into log file (even
|
||||
# though there is not much there).
|
||||
log_dir = os.path.join(compose.paths.log.topdir(), 'osbs')
|
||||
log_dir = os.path.join(compose.paths.log.topdir(), "osbs")
|
||||
util.makedirs(log_dir)
|
||||
log_file = os.path.join(log_dir, '%s-%s-watch-task.log'
|
||||
% (variant.uid, self.num))
|
||||
log_file = os.path.join(
|
||||
log_dir, "%s-%s-watch-task.log" % (variant.uid, self.num)
|
||||
)
|
||||
if koji.watch_task(task_id, log_file) != 0:
|
||||
raise RuntimeError('OSBS: task %s failed: see %s for details'
|
||||
% (task_id, log_file))
|
||||
raise RuntimeError(
|
||||
"OSBS: task %s failed: see %s for details" % (task_id, log_file)
|
||||
)
|
||||
|
||||
scratch = config.get('scratch', False)
|
||||
scratch = config.get("scratch", False)
|
||||
nvr = self._add_metadata(variant, task_id, compose, scratch)
|
||||
if nvr:
|
||||
registry = get_registry(compose, nvr, registry)
|
||||
if registry:
|
||||
self.pool.registries[nvr] = registry
|
||||
|
||||
self.pool.log_info('[DONE ] %s' % msg)
|
||||
self.pool.log_info("[DONE ] %s" % msg)
|
||||
|
||||
def _add_metadata(self, variant, task_id, compose, is_scratch):
|
||||
# Create new Koji session. The task could take so long to finish that
|
||||
# our session will expire. This second session does not need to be
|
||||
# authenticated since it will only do reading operations.
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf['koji_profile'])
|
||||
koji = kojiwrapper.KojiWrapper(compose.conf["koji_profile"])
|
||||
|
||||
# Create metadata
|
||||
metadata = {
|
||||
'compose_id': compose.compose_id,
|
||||
'koji_task': task_id,
|
||||
"compose_id": compose.compose_id,
|
||||
"koji_task": task_id,
|
||||
}
|
||||
|
||||
result = koji.koji_proxy.getTaskResult(task_id)
|
||||
if is_scratch:
|
||||
metadata.update({
|
||||
'repositories': result['repositories'],
|
||||
})
|
||||
metadata.update(
|
||||
{"repositories": result["repositories"]}
|
||||
)
|
||||
# add a fake arch of 'scratch', so we can construct the metadata
|
||||
# in same data structure as real builds.
|
||||
self.pool.metadata.setdefault(
|
||||
variant.uid, {}).setdefault('scratch', []).append(metadata)
|
||||
self.pool.metadata.setdefault(variant.uid, {}).setdefault(
|
||||
"scratch", []
|
||||
).append(metadata)
|
||||
return None
|
||||
|
||||
else:
|
||||
build_id = int(result['koji_builds'][0])
|
||||
build_id = int(result["koji_builds"][0])
|
||||
buildinfo = koji.koji_proxy.getBuild(build_id)
|
||||
archives = koji.koji_proxy.listArchives(build_id)
|
||||
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % buildinfo
|
||||
|
||||
metadata.update({
|
||||
'name': buildinfo['name'],
|
||||
'version': buildinfo['version'],
|
||||
'release': buildinfo['release'],
|
||||
'nvr': nvr,
|
||||
'creation_time': buildinfo['creation_time'],
|
||||
})
|
||||
metadata.update(
|
||||
{
|
||||
"name": buildinfo["name"],
|
||||
"version": buildinfo["version"],
|
||||
"release": buildinfo["release"],
|
||||
"nvr": nvr,
|
||||
"creation_time": buildinfo["creation_time"],
|
||||
}
|
||||
)
|
||||
for archive in archives:
|
||||
data = {
|
||||
'filename': archive['filename'],
|
||||
'size': archive['size'],
|
||||
'checksum': archive['checksum'],
|
||||
"filename": archive["filename"],
|
||||
"size": archive["size"],
|
||||
"checksum": archive["checksum"],
|
||||
}
|
||||
data.update(archive['extra'])
|
||||
data.update(archive["extra"])
|
||||
data.update(metadata)
|
||||
arch = archive['extra']['image']['arch']
|
||||
self.pool.log_debug('Created Docker base image %s-%s-%s.%s' % (
|
||||
metadata['name'], metadata['version'], metadata['release'], arch))
|
||||
self.pool.metadata.setdefault(
|
||||
variant.uid, {}).setdefault(arch, []).append(data)
|
||||
arch = archive["extra"]["image"]["arch"]
|
||||
self.pool.log_debug(
|
||||
"Created Docker base image %s-%s-%s.%s"
|
||||
% (metadata["name"], metadata["version"], metadata["release"], arch)
|
||||
)
|
||||
self.pool.metadata.setdefault(variant.uid, {}).setdefault(
|
||||
arch, []
|
||||
).append(data)
|
||||
return nvr
|
||||
|
||||
def _get_repo(self, compose, repo, gpgkey=None):
|
||||
@ -201,17 +219,17 @@ class OSBSThread(WorkerThread):
|
||||
|
||||
repo_file = os.path.join(
|
||||
compose.paths.work.tmp_dir(None, variant),
|
||||
'compose-rpms-%s-%s.repo' % (variant, self.num),
|
||||
"compose-rpms-%s-%s.repo" % (variant, self.num),
|
||||
)
|
||||
|
||||
gpgcheck = 1 if gpgkey else 0
|
||||
with open(repo_file, 'w') as f:
|
||||
f.write('[%s-%s-%s]\n' % (compose.compose_id, variant, self.num))
|
||||
f.write('name=Compose %s (RPMs) - %s\n' % (compose.compose_id, variant))
|
||||
f.write('baseurl=%s\n' % util.translate_path(compose, repo_path))
|
||||
f.write('enabled=1\n')
|
||||
f.write('gpgcheck=%s\n' % gpgcheck)
|
||||
with open(repo_file, "w") as f:
|
||||
f.write("[%s-%s-%s]\n" % (compose.compose_id, variant, self.num))
|
||||
f.write("name=Compose %s (RPMs) - %s\n" % (compose.compose_id, variant))
|
||||
f.write("baseurl=%s\n" % util.translate_path(compose, repo_path))
|
||||
f.write("enabled=1\n")
|
||||
f.write("gpgcheck=%s\n" % gpgcheck)
|
||||
if gpgcheck:
|
||||
f.write('gpgkey=%s\n' % gpgkey)
|
||||
f.write("gpgkey=%s\n" % gpgkey)
|
||||
|
||||
return util.translate_path(compose, repo_file)
|
||||
|
@ -16,7 +16,7 @@ from ..wrappers import scm
|
||||
|
||||
|
||||
class OSTreePhase(ConfigGuardedPhase):
|
||||
name = 'ostree'
|
||||
name = "ostree"
|
||||
|
||||
def __init__(self, compose, pkgset_phase=None):
|
||||
super(OSTreePhase, self).__init__(compose)
|
||||
@ -40,7 +40,7 @@ class OSTreePhase(ConfigGuardedPhase):
|
||||
if isinstance(self.compose.conf.get(self.name), dict):
|
||||
for variant in self.compose.get_variants():
|
||||
for conf in self.get_config_block(variant):
|
||||
for arch in conf.get('arches', []) or variant.arches:
|
||||
for arch in conf.get("arches", []) or variant.arches:
|
||||
self._enqueue(variant, arch, conf)
|
||||
else:
|
||||
# Legacy code path to support original configuration.
|
||||
@ -60,22 +60,31 @@ class OSTreeThread(WorkerThread):
|
||||
def process(self, item, num):
|
||||
compose, variant, arch, config = item
|
||||
self.num = num
|
||||
failable_arches = config.get('failable', [])
|
||||
with util.failable(compose, util.can_arch_fail(failable_arches, arch),
|
||||
variant, arch, 'ostree'):
|
||||
failable_arches = config.get("failable", [])
|
||||
with util.failable(
|
||||
compose, util.can_arch_fail(failable_arches, arch), variant, arch, "ostree"
|
||||
):
|
||||
self.worker(compose, variant, arch, config)
|
||||
|
||||
def worker(self, compose, variant, arch, config):
|
||||
msg = 'OSTree phase for variant %s, arch %s' % (variant.uid, arch)
|
||||
self.pool.log_info('[BEGIN] %s' % msg)
|
||||
workdir = compose.paths.work.topdir('ostree-%d' % self.num)
|
||||
self.logdir = compose.paths.log.topdir('%s/%s/ostree-%d' %
|
||||
(arch, variant.uid, self.num))
|
||||
repodir = os.path.join(workdir, 'config_repo')
|
||||
self._clone_repo(compose, repodir, config['config_url'], config.get('config_branch', 'master'))
|
||||
msg = "OSTree phase for variant %s, arch %s" % (variant.uid, arch)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
workdir = compose.paths.work.topdir("ostree-%d" % self.num)
|
||||
self.logdir = compose.paths.log.topdir(
|
||||
"%s/%s/ostree-%d" % (arch, variant.uid, self.num)
|
||||
)
|
||||
repodir = os.path.join(workdir, "config_repo")
|
||||
self._clone_repo(
|
||||
compose,
|
||||
repodir,
|
||||
config["config_url"],
|
||||
config.get("config_branch", "master"),
|
||||
)
|
||||
|
||||
comps_repo = compose.paths.work.comps_repo('$basearch', variant=variant, create_dir=False)
|
||||
repos = shortcuts.force_list(config['repo']) + self.repos
|
||||
comps_repo = compose.paths.work.comps_repo(
|
||||
"$basearch", variant=variant, create_dir=False
|
||||
)
|
||||
repos = shortcuts.force_list(config["repo"]) + self.repos
|
||||
if compose.has_comps:
|
||||
repos.append(translate_path(compose, comps_repo))
|
||||
repos = get_repo_dicts(repos, logger=self.pool)
|
||||
@ -85,27 +94,35 @@ class OSTreeThread(WorkerThread):
|
||||
|
||||
# repos in configuration can have repo url set to variant UID,
|
||||
# update it to have the actual url that we just translated.
|
||||
new_config.update({'repo': repos})
|
||||
new_config.update({"repo": repos})
|
||||
|
||||
# remove unnecessary (for 'pungi-make-ostree tree' script ) elements
|
||||
# from config, it doesn't hurt to have them, however remove them can
|
||||
# reduce confusion
|
||||
for k in ['ostree_repo', 'treefile', 'config_url', 'config_branch',
|
||||
'failable', 'version', 'update_summary']:
|
||||
for k in [
|
||||
"ostree_repo",
|
||||
"treefile",
|
||||
"config_url",
|
||||
"config_branch",
|
||||
"failable",
|
||||
"version",
|
||||
"update_summary",
|
||||
]:
|
||||
new_config.pop(k, None)
|
||||
|
||||
# write a json file to save the configuration, so 'pungi-make-ostree tree'
|
||||
# can take use of it
|
||||
extra_config_file = os.path.join(workdir, 'extra_config.json')
|
||||
with open(extra_config_file, 'w') as f:
|
||||
extra_config_file = os.path.join(workdir, "extra_config.json")
|
||||
with open(extra_config_file, "w") as f:
|
||||
json.dump(new_config, f, indent=4)
|
||||
|
||||
# Ensure target directory exists, otherwise Koji task will fail to
|
||||
# mount it.
|
||||
util.makedirs(config['ostree_repo'])
|
||||
util.makedirs(config["ostree_repo"])
|
||||
|
||||
self._run_ostree_cmd(compose, variant, arch, config, repodir,
|
||||
extra_config_file=extra_config_file)
|
||||
self._run_ostree_cmd(
|
||||
compose, variant, arch, config, repodir, extra_config_file=extra_config_file
|
||||
)
|
||||
|
||||
if compose.notifier:
|
||||
original_ref = get_ref_from_treefile(
|
||||
@ -120,54 +137,66 @@ class OSTreeThread(WorkerThread):
|
||||
# instead. If the commit id could not be read, an exception will be
|
||||
# raised.
|
||||
commitid = get_commitid_from_commitid_file(
|
||||
os.path.join(self.logdir, 'commitid.log')
|
||||
os.path.join(self.logdir, "commitid.log")
|
||||
)
|
||||
compose.notifier.send(
|
||||
"ostree",
|
||||
variant=variant.uid,
|
||||
arch=arch,
|
||||
ref=ref,
|
||||
commitid=commitid,
|
||||
repo_path=translate_path(compose, config["ostree_repo"]),
|
||||
local_repo_path=config["ostree_repo"],
|
||||
)
|
||||
compose.notifier.send('ostree',
|
||||
variant=variant.uid,
|
||||
arch=arch,
|
||||
ref=ref,
|
||||
commitid=commitid,
|
||||
repo_path=translate_path(compose, config['ostree_repo']),
|
||||
local_repo_path=config['ostree_repo'])
|
||||
|
||||
self.pool.log_info('[DONE ] %s' % (msg))
|
||||
self.pool.log_info("[DONE ] %s" % (msg))
|
||||
|
||||
def _run_ostree_cmd(self, compose, variant, arch, config, config_repo, extra_config_file=None):
|
||||
def _run_ostree_cmd(
|
||||
self, compose, variant, arch, config, config_repo, extra_config_file=None
|
||||
):
|
||||
cmd = [
|
||||
'pungi-make-ostree',
|
||||
'tree',
|
||||
'--repo=%s' % config['ostree_repo'],
|
||||
'--log-dir=%s' % self.logdir,
|
||||
'--treefile=%s' % os.path.join(config_repo, config['treefile']),
|
||||
"pungi-make-ostree",
|
||||
"tree",
|
||||
"--repo=%s" % config["ostree_repo"],
|
||||
"--log-dir=%s" % self.logdir,
|
||||
"--treefile=%s" % os.path.join(config_repo, config["treefile"]),
|
||||
]
|
||||
|
||||
version = util.version_generator(compose, config.get('version'))
|
||||
version = util.version_generator(compose, config.get("version"))
|
||||
if version:
|
||||
cmd.append('--version=%s' % version)
|
||||
cmd.append("--version=%s" % version)
|
||||
|
||||
if extra_config_file:
|
||||
cmd.append('--extra-config=%s' % extra_config_file)
|
||||
cmd.append("--extra-config=%s" % extra_config_file)
|
||||
|
||||
if config.get('update_summary', False):
|
||||
cmd.append('--update-summary')
|
||||
if config.get("update_summary", False):
|
||||
cmd.append("--update-summary")
|
||||
|
||||
ostree_ref = config.get('ostree_ref')
|
||||
ostree_ref = config.get("ostree_ref")
|
||||
if ostree_ref:
|
||||
cmd.append('--ostree-ref=%s' % ostree_ref)
|
||||
cmd.append("--ostree-ref=%s" % ostree_ref)
|
||||
|
||||
if config.get('force_new_commit', False):
|
||||
cmd.append('--force-new-commit')
|
||||
if config.get("force_new_commit", False):
|
||||
cmd.append("--force-new-commit")
|
||||
|
||||
packages = ['pungi', 'ostree', 'rpm-ostree']
|
||||
log_file = os.path.join(self.logdir, 'runroot.log')
|
||||
mounts = [compose.topdir, config['ostree_repo']]
|
||||
packages = ["pungi", "ostree", "rpm-ostree"]
|
||||
log_file = os.path.join(self.logdir, "runroot.log")
|
||||
mounts = [compose.topdir, config["ostree_repo"]]
|
||||
|
||||
runroot = Runroot(compose, phase="ostree")
|
||||
runroot.run(
|
||||
cmd, log_file=log_file, arch=arch, packages=packages,
|
||||
mounts=mounts, new_chroot=True,
|
||||
weight=compose.conf['runroot_weights'].get('ostree'))
|
||||
cmd,
|
||||
log_file=log_file,
|
||||
arch=arch,
|
||||
packages=packages,
|
||||
mounts=mounts,
|
||||
new_chroot=True,
|
||||
weight=compose.conf["runroot_weights"].get("ostree"),
|
||||
)
|
||||
|
||||
def _clone_repo(self, compose, repodir, url, branch):
|
||||
scm.get_dir_from_scm({'scm': 'git', 'repo': url, 'branch': branch, 'dir': '.'},
|
||||
repodir, compose=compose)
|
||||
scm.get_dir_from_scm(
|
||||
{"scm": "git", "repo": url, "branch": branch, "dir": "."},
|
||||
repodir,
|
||||
compose=compose,
|
||||
)
|
||||
|
@ -16,7 +16,7 @@ from ..runroot import Runroot
|
||||
|
||||
|
||||
class OstreeInstallerPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||
name = 'ostree_installer'
|
||||
name = "ostree_installer"
|
||||
|
||||
def __init__(self, compose, buildinstall_phase, pkgset_phase=None):
|
||||
super(OstreeInstallerPhase, self).__init__(compose)
|
||||
@ -27,18 +27,21 @@ class OstreeInstallerPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||
def validate(self):
|
||||
errors = []
|
||||
|
||||
if not self.compose.conf['ostree_installer_overwrite'] and not self.bi.skip():
|
||||
if not self.compose.conf["ostree_installer_overwrite"] and not self.bi.skip():
|
||||
for variant in self.compose.get_variants():
|
||||
for arch in variant.arches:
|
||||
conf = util.get_arch_variant_data(self.compose.conf, self.name,
|
||||
arch, variant)
|
||||
conf = util.get_arch_variant_data(
|
||||
self.compose.conf, self.name, arch, variant
|
||||
)
|
||||
if conf and not variant.is_empty:
|
||||
errors.append('Can not generate ostree installer for %s.%s: '
|
||||
'it has buildinstall running already and the '
|
||||
'files would clash.' % (variant.uid, arch))
|
||||
errors.append(
|
||||
"Can not generate ostree installer for %s.%s: "
|
||||
"it has buildinstall running already and the "
|
||||
"files would clash." % (variant.uid, arch)
|
||||
)
|
||||
|
||||
if errors:
|
||||
raise ValueError('\n'.join(errors))
|
||||
raise ValueError("\n".join(errors))
|
||||
|
||||
def get_repos(self):
|
||||
return [
|
||||
@ -67,38 +70,53 @@ class OstreeInstallerThread(WorkerThread):
|
||||
def process(self, item, num):
|
||||
compose, variant, arch, config = item
|
||||
self.num = num
|
||||
failable_arches = config.get('failable', [])
|
||||
failable_arches = config.get("failable", [])
|
||||
self.can_fail = util.can_arch_fail(failable_arches, arch)
|
||||
with util.failable(compose, self.can_fail, variant, arch, 'ostree-installer',
|
||||
logger=self.pool._logger):
|
||||
with util.failable(
|
||||
compose,
|
||||
self.can_fail,
|
||||
variant,
|
||||
arch,
|
||||
"ostree-installer",
|
||||
logger=self.pool._logger,
|
||||
):
|
||||
self.worker(compose, variant, arch, config)
|
||||
|
||||
def worker(self, compose, variant, arch, config):
|
||||
msg = 'Ostree phase for variant %s, arch %s' % (variant.uid, arch)
|
||||
self.pool.log_info('[BEGIN] %s' % msg)
|
||||
self.logdir = compose.paths.log.topdir('%s/%s/ostree_installer-%s' % (arch, variant, self.num))
|
||||
msg = "Ostree phase for variant %s, arch %s" % (variant.uid, arch)
|
||||
self.pool.log_info("[BEGIN] %s" % msg)
|
||||
self.logdir = compose.paths.log.topdir(
|
||||
"%s/%s/ostree_installer-%s" % (arch, variant, self.num)
|
||||
)
|
||||
|
||||
repos = get_repo_urls(None, # compose==None. Special value says that method should ignore deprecated variant-type repo
|
||||
shortcuts.force_list(config['repo'])
|
||||
+ self.baseurls,
|
||||
arch=arch,
|
||||
logger=self.pool)
|
||||
repos = get_repo_urls(
|
||||
None, # compose==None. Special value says that method should ignore deprecated variant-type repo
|
||||
shortcuts.force_list(config["repo"]) + self.baseurls,
|
||||
arch=arch,
|
||||
logger=self.pool,
|
||||
)
|
||||
if compose.has_comps:
|
||||
repos.append(
|
||||
translate_path(
|
||||
compose,
|
||||
compose.paths.work.comps_repo(
|
||||
'$basearch', variant=variant, create_dir=False
|
||||
"$basearch", variant=variant, create_dir=False
|
||||
),
|
||||
)
|
||||
)
|
||||
repos = [url.replace('$arch', arch) for url in repos]
|
||||
output_dir = os.path.join(compose.paths.work.topdir(arch), variant.uid, 'ostree_installer')
|
||||
repos = [url.replace("$arch", arch) for url in repos]
|
||||
output_dir = os.path.join(
|
||||
compose.paths.work.topdir(arch), variant.uid, "ostree_installer"
|
||||
)
|
||||
util.makedirs(os.path.dirname(output_dir))
|
||||
|
||||
self.template_dir = os.path.join(compose.paths.work.topdir(arch), variant.uid, 'lorax_templates')
|
||||
self._clone_templates(compose, config.get('template_repo'), config.get('template_branch'))
|
||||
disc_type = compose.conf['disc_types'].get('ostree', 'ostree')
|
||||
self.template_dir = os.path.join(
|
||||
compose.paths.work.topdir(arch), variant.uid, "lorax_templates"
|
||||
)
|
||||
self._clone_templates(
|
||||
compose, config.get("template_repo"), config.get("template_branch")
|
||||
)
|
||||
disc_type = compose.conf["disc_types"].get("ostree", "ostree")
|
||||
|
||||
volid = get_volid(compose, arch, variant, disc_type=disc_type)
|
||||
self._run_ostree_cmd(compose, variant, arch, config, repos, output_dir, volid)
|
||||
@ -106,24 +124,29 @@ class OstreeInstallerThread(WorkerThread):
|
||||
filename = compose.get_image_name(arch, variant, disc_type=disc_type)
|
||||
self._copy_image(compose, variant, arch, filename, output_dir)
|
||||
self._add_to_manifest(compose, variant, arch, filename)
|
||||
self.pool.log_info('[DONE ] %s' % (msg))
|
||||
self.pool.log_info("[DONE ] %s" % (msg))
|
||||
|
||||
def _clone_templates(self, compose, url, branch='master'):
|
||||
def _clone_templates(self, compose, url, branch="master"):
|
||||
if not url:
|
||||
self.template_dir = None
|
||||
return
|
||||
scm.get_dir_from_scm({'scm': 'git', 'repo': url, 'branch': branch, 'dir': '.'},
|
||||
self.template_dir, compose=compose)
|
||||
scm.get_dir_from_scm(
|
||||
{"scm": "git", "repo": url, "branch": branch, "dir": "."},
|
||||
self.template_dir,
|
||||
compose=compose,
|
||||
)
|
||||
|
||||
def _get_release(self, compose, config):
|
||||
if 'release' in config:
|
||||
return version_generator(compose, config['release']) or compose.image_release
|
||||
return config.get('release', None)
|
||||
if "release" in config:
|
||||
return (
|
||||
version_generator(compose, config["release"]) or compose.image_release
|
||||
)
|
||||
return config.get("release", None)
|
||||
|
||||
def _copy_image(self, compose, variant, arch, filename, output_dir):
|
||||
iso_path = compose.paths.compose.iso_path(arch, variant, filename)
|
||||
os_path = compose.paths.compose.os_tree(arch, variant)
|
||||
boot_iso = os.path.join(output_dir, 'images', 'boot.iso')
|
||||
boot_iso = os.path.join(output_dir, "images", "boot.iso")
|
||||
|
||||
util.copy_all(output_dir, os_path)
|
||||
try:
|
||||
@ -133,7 +156,9 @@ class OstreeInstallerThread(WorkerThread):
|
||||
|
||||
def _add_to_manifest(self, compose, variant, arch, filename):
|
||||
full_iso_path = compose.paths.compose.iso_path(arch, variant, filename)
|
||||
iso_path = compose.paths.compose.iso_path(arch, variant, filename, relative=True)
|
||||
iso_path = compose.paths.compose.iso_path(
|
||||
arch, variant, filename, relative=True
|
||||
)
|
||||
implant_md5 = iso.get_implanted_md5(full_iso_path)
|
||||
|
||||
img = images.Image(compose.im)
|
||||
@ -148,8 +173,8 @@ class OstreeInstallerThread(WorkerThread):
|
||||
img.bootable = True
|
||||
img.subvariant = variant.uid
|
||||
img.implant_md5 = implant_md5
|
||||
setattr(img, 'can_fail', self.can_fail)
|
||||
setattr(img, 'deliverable', 'ostree-installer')
|
||||
setattr(img, "can_fail", self.can_fail)
|
||||
setattr(img, "deliverable", "ostree-installer")
|
||||
try:
|
||||
img.volume_id = iso.get_volume_id(full_iso_path)
|
||||
except RuntimeError:
|
||||
@ -163,17 +188,21 @@ class OstreeInstallerThread(WorkerThread):
|
||||
"""
|
||||
templates = []
|
||||
for template in config.get(key, []):
|
||||
if template[0] != '/':
|
||||
if template[0] != "/":
|
||||
if not self.template_dir:
|
||||
raise RuntimeError('Relative path to template without setting template_repo.')
|
||||
raise RuntimeError(
|
||||
"Relative path to template without setting template_repo."
|
||||
)
|
||||
template = os.path.join(self.template_dir, template)
|
||||
templates.append(template)
|
||||
return templates
|
||||
|
||||
def _run_ostree_cmd(self, compose, variant, arch, config, source_repo, output_dir, volid):
|
||||
def _run_ostree_cmd(
|
||||
self, compose, variant, arch, config, source_repo, output_dir, volid
|
||||
):
|
||||
lorax_wrapper = lorax.LoraxWrapper()
|
||||
lorax_cmd = lorax_wrapper.get_lorax_cmd(
|
||||
compose.conf['release_name'],
|
||||
compose.conf["release_name"],
|
||||
compose.conf["release_version"],
|
||||
self._get_release(compose, config),
|
||||
repo_baseurl=source_repo,
|
||||
@ -182,25 +211,32 @@ class OstreeInstallerThread(WorkerThread):
|
||||
nomacboot=True,
|
||||
volid=volid,
|
||||
buildarch=get_valid_arches(arch)[0],
|
||||
buildinstallpackages=config.get('installpkgs'),
|
||||
add_template=self._get_templates(config, 'add_template'),
|
||||
add_arch_template=self._get_templates(config, 'add_arch_template'),
|
||||
add_template_var=config.get('add_template_var'),
|
||||
add_arch_template_var=config.get('add_arch_template_var'),
|
||||
rootfs_size=config.get('rootfs_size'),
|
||||
buildinstallpackages=config.get("installpkgs"),
|
||||
add_template=self._get_templates(config, "add_template"),
|
||||
add_arch_template=self._get_templates(config, "add_arch_template"),
|
||||
add_template_var=config.get("add_template_var"),
|
||||
add_arch_template_var=config.get("add_arch_template_var"),
|
||||
rootfs_size=config.get("rootfs_size"),
|
||||
is_final=compose.supported,
|
||||
log_dir=self.logdir,
|
||||
)
|
||||
cmd = 'rm -rf %s && %s' % (shlex_quote(output_dir),
|
||||
' '.join([shlex_quote(x) for x in lorax_cmd]))
|
||||
cmd = "rm -rf %s && %s" % (
|
||||
shlex_quote(output_dir),
|
||||
" ".join([shlex_quote(x) for x in lorax_cmd]),
|
||||
)
|
||||
|
||||
packages = ['pungi', 'lorax', 'ostree']
|
||||
packages += config.get('extra_runroot_pkgs', [])
|
||||
packages = ["pungi", "lorax", "ostree"]
|
||||
packages += config.get("extra_runroot_pkgs", [])
|
||||
|
||||
log_file = os.path.join(self.logdir, 'runroot.log')
|
||||
log_file = os.path.join(self.logdir, "runroot.log")
|
||||
|
||||
runroot = Runroot(compose, phase="ostree_installer")
|
||||
runroot.run(
|
||||
cmd, log_file=log_file, arch=arch, packages=packages,
|
||||
mounts=[compose.topdir], chown_paths=[output_dir],
|
||||
weight=compose.conf['runroot_weights'].get('ostree_installer'))
|
||||
cmd,
|
||||
log_file=log_file,
|
||||
arch=arch,
|
||||
packages=packages,
|
||||
mounts=[compose.topdir],
|
||||
chown_paths=[output_dir],
|
||||
weight=compose.conf["runroot_weights"].get("ostree_installer"),
|
||||
)
|
||||
|
@ -14,7 +14,9 @@ def gather_phases_metadata(source_object):
|
||||
"""
|
||||
|
||||
if not source_object:
|
||||
raise ValueError("PhasesMetadata can not load any data - it got empty parameter")
|
||||
raise ValueError(
|
||||
"PhasesMetadata can not load any data - it got empty parameter"
|
||||
)
|
||||
|
||||
phases = []
|
||||
for item in dir(source_object):
|
||||
@ -23,9 +25,11 @@ def gather_phases_metadata(source_object):
|
||||
continue
|
||||
if issubclass(cls, PhaseBase):
|
||||
try:
|
||||
name_attr = getattr(cls, 'name')
|
||||
name_attr = getattr(cls, "name")
|
||||
phases.append(name_attr)
|
||||
except AttributeError:
|
||||
raise AttributeError("Bad phase-class format: '%s' is missing attribute 'name'" % item)
|
||||
raise AttributeError(
|
||||
"Bad phase-class format: '%s' is missing attribute 'name'" % item
|
||||
)
|
||||
|
||||
return phases
|
||||
|
@ -19,6 +19,7 @@ from pungi.phases.base import PhaseBase
|
||||
|
||||
class PkgsetPhase(PhaseBase):
|
||||
"""PKGSET"""
|
||||
|
||||
name = "pkgset"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@ -30,6 +31,7 @@ class PkgsetPhase(PhaseBase):
|
||||
pkgset_source = "PkgsetSource%s" % self.compose.conf["pkgset_source"]
|
||||
from .source import PkgsetSourceContainer
|
||||
from . import sources
|
||||
|
||||
PkgsetSourceContainer.register_module(sources)
|
||||
container = PkgsetSourceContainer()
|
||||
SourceClass = container[pkgset_source]
|
||||
|
@ -45,8 +45,9 @@ class ReaderThread(WorkerThread):
|
||||
# rpm_info, build_info = item
|
||||
|
||||
if (num % 100 == 0) or (num == self.pool.queue_total):
|
||||
self.pool.package_set.log_debug("Processed %s out of %s packages"
|
||||
% (num, self.pool.queue_total))
|
||||
self.pool.package_set.log_debug(
|
||||
"Processed %s out of %s packages" % (num, self.pool.queue_total)
|
||||
)
|
||||
|
||||
rpm_path = self.pool.package_set.get_package_path(item)
|
||||
if rpm_path is None:
|
||||
@ -79,9 +80,14 @@ class ReaderThread(WorkerThread):
|
||||
|
||||
|
||||
class PackageSetBase(kobo.log.LoggingBase):
|
||||
|
||||
def __init__(self, name, sigkey_ordering, arches=None, logger=None,
|
||||
allow_invalid_sigkeys=False):
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
sigkey_ordering,
|
||||
arches=None,
|
||||
logger=None,
|
||||
allow_invalid_sigkeys=False,
|
||||
):
|
||||
super(PackageSetBase, self).__init__(logger=logger)
|
||||
self.name = name
|
||||
self.file_cache = kobo.pkgset.FileCache(kobo.pkgset.SimpleRpmWrapper)
|
||||
@ -122,14 +128,20 @@ class PackageSetBase(kobo.log.LoggingBase):
|
||||
Raises RuntimeError containing details of RPMs with invalid
|
||||
sigkeys defined in `rpminfos`.
|
||||
"""
|
||||
|
||||
def nvr_formatter(package_info):
|
||||
# joins NVR parts of the package with '-' character.
|
||||
return '-'.join((package_info['name'], package_info['version'], package_info['release']))
|
||||
return "-".join(
|
||||
(package_info["name"], package_info["version"], package_info["release"])
|
||||
)
|
||||
|
||||
def get_error(sigkeys, infos):
|
||||
return "RPM(s) not found for sigs: %s. Check log for details. Unsigned packages:\n%s" % (
|
||||
sigkeys,
|
||||
'\n'.join(sorted(set(nvr_formatter(rpminfo) for rpminfo in infos))),
|
||||
return (
|
||||
"RPM(s) not found for sigs: %s. Check log for details. Unsigned packages:\n%s"
|
||||
% (
|
||||
sigkeys,
|
||||
"\n".join(sorted(set(nvr_formatter(rpminfo) for rpminfo in infos))),
|
||||
)
|
||||
)
|
||||
|
||||
if not isinstance(rpminfos, dict):
|
||||
@ -198,14 +210,15 @@ class PackageSetBase(kobo.log.LoggingBase):
|
||||
# arches (excluding multilib arches)
|
||||
if primary_arch:
|
||||
exclusivearch_list = get_valid_arches(
|
||||
primary_arch, multilib=False, add_noarch=False, add_src=False)
|
||||
primary_arch, multilib=False, add_noarch=False, add_src=False
|
||||
)
|
||||
# We don't want to consider noarch: if a package is true noarch
|
||||
# build (not just a subpackage), it has to have noarch in
|
||||
# ExclusiveArch otherwise rpm will refuse to build it.
|
||||
# This should eventually become a default, but it could have a big
|
||||
# impact and thus it's hidden behind an option.
|
||||
if not exclusive_noarch and 'noarch' in exclusivearch_list:
|
||||
exclusivearch_list.remove('noarch')
|
||||
if not exclusive_noarch and "noarch" in exclusivearch_list:
|
||||
exclusivearch_list.remove("noarch")
|
||||
else:
|
||||
exclusivearch_list = None
|
||||
for arch in arch_list:
|
||||
@ -237,7 +250,7 @@ class PackageSetBase(kobo.log.LoggingBase):
|
||||
for i in self.rpms_by_arch[arch]:
|
||||
rpm_path = i.file_path
|
||||
if remove_path_prefix and rpm_path.startswith(remove_path_prefix):
|
||||
rpm_path = rpm_path[len(remove_path_prefix):]
|
||||
rpm_path = rpm_path[len(remove_path_prefix) :]
|
||||
f.write("%s\n" % rpm_path)
|
||||
|
||||
@staticmethod
|
||||
@ -256,7 +269,7 @@ class PackageSetBase(kobo.log.LoggingBase):
|
||||
"""
|
||||
Saves the current FileCache using the pickle module to `file_path`.
|
||||
"""
|
||||
with open(file_path, 'wb') as f:
|
||||
with open(file_path, "wb") as f:
|
||||
pickle.dump(self.file_cache, f, protocol=pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
|
||||
@ -282,10 +295,19 @@ class FilelistPackageSet(PackageSetBase):
|
||||
|
||||
|
||||
class KojiPackageSet(PackageSetBase):
|
||||
def __init__(self, name, koji_wrapper, sigkey_ordering, arches=None, logger=None,
|
||||
packages=None, allow_invalid_sigkeys=False,
|
||||
populate_only_packages=False, cache_region=None,
|
||||
extra_builds=None):
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
koji_wrapper,
|
||||
sigkey_ordering,
|
||||
arches=None,
|
||||
logger=None,
|
||||
packages=None,
|
||||
allow_invalid_sigkeys=False,
|
||||
populate_only_packages=False,
|
||||
cache_region=None,
|
||||
extra_builds=None,
|
||||
):
|
||||
"""
|
||||
Creates new KojiPackageSet.
|
||||
|
||||
@ -320,7 +342,7 @@ class KojiPackageSet(PackageSetBase):
|
||||
sigkey_ordering=sigkey_ordering,
|
||||
arches=arches,
|
||||
logger=logger,
|
||||
allow_invalid_sigkeys=allow_invalid_sigkeys
|
||||
allow_invalid_sigkeys=allow_invalid_sigkeys,
|
||||
)
|
||||
self.koji_wrapper = koji_wrapper
|
||||
# Names of packages to look for in the Koji tag.
|
||||
@ -356,9 +378,13 @@ class KojiPackageSet(PackageSetBase):
|
||||
builds = []
|
||||
|
||||
builds = self.koji_wrapper.retrying_multicall_map(
|
||||
self.koji_proxy, self.koji_proxy.getBuild, list_of_args=self.extra_builds)
|
||||
self.koji_proxy, self.koji_proxy.getBuild, list_of_args=self.extra_builds
|
||||
)
|
||||
rpms_in_builds = self.koji_wrapper.retrying_multicall_map(
|
||||
self.koji_proxy, self.koji_proxy.listBuildRPMs, list_of_args=self.extra_builds)
|
||||
self.koji_proxy,
|
||||
self.koji_proxy.listBuildRPMs,
|
||||
list_of_args=self.extra_builds,
|
||||
)
|
||||
|
||||
rpms = []
|
||||
for rpms_in_build in rpms_in_builds:
|
||||
@ -371,18 +397,23 @@ class KojiPackageSet(PackageSetBase):
|
||||
|
||||
if self.cache_region:
|
||||
cache_key = "KojiPackageSet.get_latest_rpms_%s_%s_%s" % (
|
||||
tag, str(event), str(inherit))
|
||||
tag,
|
||||
str(event),
|
||||
str(inherit),
|
||||
)
|
||||
cached_response = self.cache_region.get(cache_key)
|
||||
if cached_response:
|
||||
return cached_response
|
||||
else:
|
||||
response = self.koji_proxy.listTaggedRPMS(
|
||||
tag, event=event, inherit=inherit, latest=True)
|
||||
tag, event=event, inherit=inherit, latest=True
|
||||
)
|
||||
self.cache_region.set(cache_key, response)
|
||||
return response
|
||||
else:
|
||||
return self.koji_proxy.listTaggedRPMS(
|
||||
tag, event=event, inherit=inherit, latest=True)
|
||||
tag, event=event, inherit=inherit, latest=True
|
||||
)
|
||||
|
||||
def get_package_path(self, queue_item):
|
||||
rpm_info, build_info = queue_item
|
||||
@ -393,12 +424,14 @@ class KojiPackageSet(PackageSetBase):
|
||||
# we're looking for *signed* copies here
|
||||
continue
|
||||
sigkey = sigkey.lower()
|
||||
rpm_path = os.path.join(pathinfo.build(build_info), pathinfo.signed(rpm_info, sigkey))
|
||||
rpm_path = os.path.join(
|
||||
pathinfo.build(build_info), pathinfo.signed(rpm_info, sigkey)
|
||||
)
|
||||
paths.append(rpm_path)
|
||||
if os.path.isfile(rpm_path):
|
||||
return rpm_path
|
||||
|
||||
if None in self.sigkey_ordering or '' in self.sigkey_ordering:
|
||||
if None in self.sigkey_ordering or "" in self.sigkey_ordering:
|
||||
# use an unsigned copy (if allowed)
|
||||
rpm_path = os.path.join(pathinfo.build(build_info), pathinfo.rpm(rpm_info))
|
||||
paths.append(rpm_path)
|
||||
@ -414,8 +447,10 @@ class KojiPackageSet(PackageSetBase):
|
||||
return rpm_path
|
||||
|
||||
self._invalid_sigkey_rpms.append(rpm_info)
|
||||
self.log_error("RPM %s not found for sigs: %s. Paths checked: %s"
|
||||
% (rpm_info, self.sigkey_ordering, paths))
|
||||
self.log_error(
|
||||
"RPM %s not found for sigs: %s. Paths checked: %s"
|
||||
% (rpm_info, self.sigkey_ordering, paths)
|
||||
)
|
||||
return None
|
||||
|
||||
def populate(self, tag, event=None, inherit=True, include_packages=None):
|
||||
@ -433,7 +468,11 @@ class KojiPackageSet(PackageSetBase):
|
||||
if type(event) is dict:
|
||||
event = event["id"]
|
||||
|
||||
msg = "Getting latest RPMs (tag: %s, event: %s, inherit: %s)" % (tag, event, inherit)
|
||||
msg = "Getting latest RPMs (tag: %s, event: %s, inherit: %s)" % (
|
||||
tag,
|
||||
event,
|
||||
inherit,
|
||||
)
|
||||
self.log_info("[BEGIN] %s" % msg)
|
||||
rpms, builds = self.get_latest_rpms(tag, event, inherit=inherit)
|
||||
extra_rpms, extra_builds = self.get_extra_rpms()
|
||||
@ -442,13 +481,16 @@ class KojiPackageSet(PackageSetBase):
|
||||
|
||||
extra_builds_by_name = {}
|
||||
for build_info in extra_builds:
|
||||
extra_builds_by_name[build_info['name']] = build_info['build_id']
|
||||
extra_builds_by_name[build_info["name"]] = build_info["build_id"]
|
||||
|
||||
builds_by_id = {}
|
||||
exclude_build_id = []
|
||||
for build_info in builds:
|
||||
build_id, build_name = build_info['build_id'], build_info['name']
|
||||
if build_name in extra_builds_by_name and build_id != extra_builds_by_name[build_name]:
|
||||
build_id, build_name = build_info["build_id"], build_info["name"]
|
||||
if (
|
||||
build_name in extra_builds_by_name
|
||||
and build_id != extra_builds_by_name[build_name]
|
||||
):
|
||||
exclude_build_id.append(build_id)
|
||||
else:
|
||||
builds_by_id.setdefault(build_id, build_info)
|
||||
@ -461,9 +503,11 @@ class KojiPackageSet(PackageSetBase):
|
||||
# it would be missing from the package set. Even if it ultimately does
|
||||
# not end in the compose, we need it to extract ExcludeArch and
|
||||
# ExclusiveArch for noarch packages.
|
||||
for rpm_info in itertools.chain((rpm for rpm in rpms if not _is_src(rpm)),
|
||||
(rpm for rpm in rpms if _is_src(rpm))):
|
||||
if rpm_info['build_id'] in exclude_build_id:
|
||||
for rpm_info in itertools.chain(
|
||||
(rpm for rpm in rpms if not _is_src(rpm)),
|
||||
(rpm for rpm in rpms if _is_src(rpm)),
|
||||
):
|
||||
if rpm_info["build_id"] in exclude_build_id:
|
||||
continue
|
||||
|
||||
if self.arches and rpm_info["arch"] not in self.arches:
|
||||
@ -482,8 +526,11 @@ class KojiPackageSet(PackageSetBase):
|
||||
)
|
||||
continue
|
||||
|
||||
if (self.populate_only_packages and self.packages and
|
||||
rpm_info['name'] not in self.packages):
|
||||
if (
|
||||
self.populate_only_packages
|
||||
and self.packages
|
||||
and rpm_info["name"] not in self.packages
|
||||
):
|
||||
skipped_packages_count += 1
|
||||
continue
|
||||
|
||||
@ -494,19 +541,22 @@ class KojiPackageSet(PackageSetBase):
|
||||
result_rpms.append((rpm_info, build_info))
|
||||
if self.populate_only_packages and self.packages:
|
||||
# Only add the package if we already have some whitelist.
|
||||
self.packages.add(build_info['name'])
|
||||
self.packages.add(build_info["name"])
|
||||
|
||||
if skipped_packages_count:
|
||||
self.log_debug("Skipped %d packages, not marked as to be "
|
||||
"included in a compose." % skipped_packages_count)
|
||||
self.log_debug(
|
||||
"Skipped %d packages, not marked as to be "
|
||||
"included in a compose." % skipped_packages_count
|
||||
)
|
||||
|
||||
result = self.read_packages(result_rpms, result_srpms)
|
||||
|
||||
# Check that after reading the packages, every package that is
|
||||
# included in a compose has the right sigkey.
|
||||
if self._invalid_sigkey_rpms:
|
||||
invalid_sigkey_rpms = [rpm for rpm in self._invalid_sigkey_rpms
|
||||
if rpm["name"] in self.packages]
|
||||
invalid_sigkey_rpms = [
|
||||
rpm for rpm in self._invalid_sigkey_rpms if rpm["name"] in self.packages
|
||||
]
|
||||
if invalid_sigkey_rpms:
|
||||
self.raise_invalid_sigkeys_exception(invalid_sigkey_rpms)
|
||||
|
||||
@ -516,4 +566,4 @@ class KojiPackageSet(PackageSetBase):
|
||||
|
||||
def _is_src(rpm_info):
|
||||
"""Check if rpm info object returned by Koji refers to source packages."""
|
||||
return rpm_info['arch'] in ('src', 'nosrc')
|
||||
return rpm_info["arch"] in ("src", "nosrc")
|
||||
|
@ -62,8 +62,9 @@ def variant_dict_from_str(compose, module_str):
|
||||
nsv = module_str.split(":")
|
||||
if len(nsv) > 4:
|
||||
raise ValueError(
|
||||
"Module string \"%s\" is not recognized. "
|
||||
"Only NAME:STREAM[:VERSION[:CONTEXT]] is allowed.")
|
||||
'Module string "%s" is not recognized. '
|
||||
"Only NAME:STREAM[:VERSION[:CONTEXT]] is allowed."
|
||||
)
|
||||
if len(nsv) > 3:
|
||||
module_info["context"] = nsv[3]
|
||||
if len(nsv) > 2:
|
||||
@ -77,23 +78,24 @@ def variant_dict_from_str(compose, module_str):
|
||||
compose.log_warning(
|
||||
"Variant file uses old format of module definition with '-'"
|
||||
"delimiter, please switch to official format defined by "
|
||||
"Modules Naming Policy.")
|
||||
"Modules Naming Policy."
|
||||
)
|
||||
|
||||
module_info = {}
|
||||
# The regex is matching a string which should represent the release number
|
||||
# of a module. The release number is in format: "%Y%m%d%H%M%S"
|
||||
release_regex = re.compile(r"^(\d){14}$")
|
||||
|
||||
section_start = module_str.rfind('-')
|
||||
module_str_first_part = module_str[section_start+1:]
|
||||
section_start = module_str.rfind("-")
|
||||
module_str_first_part = module_str[section_start + 1 :]
|
||||
if release_regex.match(module_str_first_part):
|
||||
module_info['version'] = module_str_first_part
|
||||
module_info["version"] = module_str_first_part
|
||||
module_str = module_str[:section_start]
|
||||
section_start = module_str.rfind('-')
|
||||
module_info['stream'] = module_str[section_start+1:]
|
||||
section_start = module_str.rfind("-")
|
||||
module_info["stream"] = module_str[section_start + 1 :]
|
||||
else:
|
||||
module_info['stream'] = module_str_first_part
|
||||
module_info['name'] = module_str[:section_start]
|
||||
module_info["stream"] = module_str_first_part
|
||||
module_info["name"] = module_str[:section_start]
|
||||
|
||||
return module_info
|
||||
|
||||
@ -120,7 +122,7 @@ def get_koji_modules(compose, koji_wrapper, event, module_info_str):
|
||||
module_info.get("version", "*"),
|
||||
module_info.get("context", "*"),
|
||||
)
|
||||
query_str = query_str.replace('*.*', '*')
|
||||
query_str = query_str.replace("*.*", "*")
|
||||
|
||||
koji_builds = koji_proxy.search(query_str, "build", "glob")
|
||||
|
||||
@ -149,7 +151,7 @@ def get_koji_modules(compose, koji_wrapper, event, module_info_str):
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
if md['state'] == pungi.wrappers.kojiwrapper.KOJI_BUILD_DELETED:
|
||||
if md["state"] == pungi.wrappers.kojiwrapper.KOJI_BUILD_DELETED:
|
||||
compose.log_debug(
|
||||
"Module build %s has been deleted, ignoring it." % build["name"]
|
||||
)
|
||||
@ -166,7 +168,7 @@ def get_koji_modules(compose, koji_wrapper, event, module_info_str):
|
||||
# If there is version provided, then all modules with that version will go
|
||||
# in. In case version is missing, we will find the latest version and
|
||||
# include all modules with that version.
|
||||
if not module_info.get('version'):
|
||||
if not module_info.get("version"):
|
||||
# select all found modules with latest version
|
||||
sorted_modules = sorted(
|
||||
modules, key=lambda item: item["module_version"], reverse=True
|
||||
@ -188,7 +190,9 @@ class PkgsetSourceKoji(pungi.phases.pkgset.source.PkgsetSourceBase):
|
||||
self.koji_wrapper = pungi.wrappers.kojiwrapper.KojiWrapper(koji_profile)
|
||||
# path prefix must contain trailing '/'
|
||||
path_prefix = self.koji_wrapper.koji_module.config.topdir.rstrip("/") + "/"
|
||||
package_sets = get_pkgset_from_koji(self.compose, self.koji_wrapper, path_prefix)
|
||||
package_sets = get_pkgset_from_koji(
|
||||
self.compose, self.koji_wrapper, path_prefix
|
||||
)
|
||||
return (package_sets, path_prefix)
|
||||
|
||||
|
||||
@ -327,7 +331,10 @@ def _get_modules_from_koji(
|
||||
compose.log_info(
|
||||
"Module '%s' in variant '%s' will use Koji tag '%s' "
|
||||
"(as a result of querying module '%s')",
|
||||
nsvc, variant, tag, module["name"]
|
||||
nsvc,
|
||||
variant,
|
||||
tag,
|
||||
module["name"],
|
||||
)
|
||||
|
||||
# Store mapping NSVC --> koji_tag into variant. This is needed
|
||||
@ -450,7 +457,8 @@ def _get_modules_from_koji_tags(
|
||||
# "release" in Koji build and with latest=True, Koji would return
|
||||
# only builds with highest release.
|
||||
module_builds = koji_proxy.listTagged(
|
||||
tag, event=event_id["id"], inherit=True, type="module")
|
||||
tag, event=event_id["id"], inherit=True, type="module"
|
||||
)
|
||||
|
||||
# Filter out builds inherited from non-top tag
|
||||
module_builds = filter_inherited(koji_proxy, event_id, module_builds, tag)
|
||||
@ -482,9 +490,11 @@ def _get_modules_from_koji_tags(
|
||||
latest_builds = []
|
||||
module_builds = sorted(module_builds, key=_key, reverse=True)
|
||||
for ns, ns_builds in groupby(
|
||||
module_builds, key=lambda x: ":".join([x["name"], x["version"]])):
|
||||
module_builds, key=lambda x: ":".join([x["name"], x["version"]])
|
||||
):
|
||||
for nsv, nsv_builds in groupby(
|
||||
ns_builds, key=lambda x: x["release"].split(".")[0]):
|
||||
ns_builds, key=lambda x: x["release"].split(".")[0]
|
||||
):
|
||||
latest_builds += list(nsv_builds)
|
||||
break
|
||||
|
||||
@ -493,8 +503,12 @@ def _get_modules_from_koji_tags(
|
||||
for build in latest_builds:
|
||||
# Get the Build from Koji to get modulemd and module_tag.
|
||||
build = koji_proxy.getBuild(build["build_id"])
|
||||
module_tag = build.get("extra", {}).get("typeinfo", {}).get(
|
||||
"module", {}).get("content_koji_tag", "")
|
||||
module_tag = (
|
||||
build.get("extra", {})
|
||||
.get("typeinfo", {})
|
||||
.get("module", {})
|
||||
.get("content_koji_tag", "")
|
||||
)
|
||||
|
||||
variant_tags[variant].append(module_tag)
|
||||
|
||||
@ -516,7 +530,9 @@ def _get_modules_from_koji_tags(
|
||||
if tag_to_mmd[module_tag]:
|
||||
compose.log_info(
|
||||
"Module %s in variant %s will use Koji tag %s.",
|
||||
nsvc, variant, module_tag
|
||||
nsvc,
|
||||
variant,
|
||||
module_tag,
|
||||
)
|
||||
|
||||
# Store mapping module-uid --> koji_tag into variant. This is
|
||||
@ -543,14 +559,18 @@ def _find_old_file_cache_path(compose, tag_name):
|
||||
compose.ci_base.release.short,
|
||||
compose.ci_base.release.version,
|
||||
compose.ci_base.release.type_suffix,
|
||||
compose.ci_base.base_product.short if compose.ci_base.release.is_layered else None,
|
||||
compose.ci_base.base_product.version if compose.ci_base.release.is_layered else None,
|
||||
compose.ci_base.base_product.short
|
||||
if compose.ci_base.release.is_layered
|
||||
else None,
|
||||
compose.ci_base.base_product.version
|
||||
if compose.ci_base.release.is_layered
|
||||
else None,
|
||||
)
|
||||
if not old_compose_path:
|
||||
return None
|
||||
|
||||
old_file_cache_dir = compose.paths.work.pkgset_file_cache(tag_name)
|
||||
rel_dir = relative_path(old_file_cache_dir, compose.topdir.rstrip('/') + '/')
|
||||
rel_dir = relative_path(old_file_cache_dir, compose.topdir.rstrip("/") + "/")
|
||||
old_file_cache_path = os.path.join(old_compose_path, rel_dir)
|
||||
if not os.path.exists(old_file_cache_path):
|
||||
return None
|
||||
@ -573,12 +593,15 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
# here. This only works if we are not creating bootable images. Those could
|
||||
# include packages that are not in the compose.
|
||||
packages_to_gather, groups = get_packages_to_gather(
|
||||
compose, include_arch=False, include_prepopulated=True)
|
||||
compose, include_arch=False, include_prepopulated=True
|
||||
)
|
||||
if groups:
|
||||
comps = CompsWrapper(compose.paths.work.comps())
|
||||
for group in groups:
|
||||
packages_to_gather += comps.get_packages(group)
|
||||
if compose.conf["gather_method"] == "nodeps" and not compose.conf.get('buildinstall_method'):
|
||||
if compose.conf["gather_method"] == "nodeps" and not compose.conf.get(
|
||||
"buildinstall_method"
|
||||
):
|
||||
populate_only_packages_to_gather = True
|
||||
else:
|
||||
populate_only_packages_to_gather = False
|
||||
@ -605,9 +628,12 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
raise ValueError(
|
||||
"pygobject module or libmodulemd library is not installed, "
|
||||
"support for modules is disabled, but compose contains "
|
||||
"modules.")
|
||||
"modules."
|
||||
)
|
||||
|
||||
if modular_koji_tags or (compose.conf["pkgset_koji_module_tag"] and variant.modules):
|
||||
if modular_koji_tags or (
|
||||
compose.conf["pkgset_koji_module_tag"] and variant.modules
|
||||
):
|
||||
# List modules tagged in particular tags.
|
||||
_get_modules_from_koji_tags(
|
||||
compose, koji_wrapper, event, variant, variant_tags, tag_to_mmd
|
||||
@ -647,12 +673,16 @@ def populate_global_pkgset(compose, koji_wrapper, path_prefix, event):
|
||||
|
||||
pkgset = pungi.phases.pkgset.pkgsets.KojiPackageSet(
|
||||
compose_tag,
|
||||
koji_wrapper, compose.conf["sigkeys"], logger=compose._logger,
|
||||
arches=all_arches, packages=packages_to_gather,
|
||||
koji_wrapper,
|
||||
compose.conf["sigkeys"],
|
||||
logger=compose._logger,
|
||||
arches=all_arches,
|
||||
packages=packages_to_gather,
|
||||
allow_invalid_sigkeys=allow_invalid_sigkeys,
|
||||
populate_only_packages=populate_only_packages_to_gather,
|
||||
cache_region=compose.cache_region,
|
||||
extra_builds=extra_builds)
|
||||
extra_builds=extra_builds,
|
||||
)
|
||||
|
||||
# Check if we have cache for this tag from previous compose. If so, use
|
||||
# it.
|
||||
@ -726,7 +756,9 @@ def get_koji_event_info(compose, koji_wrapper):
|
||||
compose.log_info("Getting koji event")
|
||||
result = get_koji_event_raw(koji_wrapper, compose.koji_event, event_file)
|
||||
if compose.koji_event:
|
||||
compose.log_info("Setting koji event to a custom value: %s" % compose.koji_event)
|
||||
compose.log_info(
|
||||
"Setting koji event to a custom value: %s" % compose.koji_event
|
||||
)
|
||||
else:
|
||||
compose.log_info("Koji event: %s" % result["id"])
|
||||
|
||||
|
@ -47,7 +47,9 @@ def get_pkgset_from_repos(compose):
|
||||
|
||||
pool = LinkerPool.with_workers(10, "hardlink-or-copy", logger=compose._logger)
|
||||
|
||||
path_prefix = os.path.join(compose.paths.work.topdir(arch="global"), "download") + "/"
|
||||
path_prefix = (
|
||||
os.path.join(compose.paths.work.topdir(arch="global"), "download") + "/"
|
||||
)
|
||||
makedirs(path_prefix)
|
||||
|
||||
seen_packages = set()
|
||||
@ -55,7 +57,8 @@ def get_pkgset_from_repos(compose):
|
||||
# write a pungi config for remote repos and a local comps repo
|
||||
repos = {}
|
||||
for num, repo in enumerate(
|
||||
compose.conf["pkgset_repos"].get(arch, []) + compose.conf["pkgset_repos"].get("*", [])
|
||||
compose.conf["pkgset_repos"].get(arch, [])
|
||||
+ compose.conf["pkgset_repos"].get("*", [])
|
||||
):
|
||||
repo_path = repo
|
||||
if "://" not in repo_path:
|
||||
@ -74,16 +77,24 @@ def get_pkgset_from_repos(compose):
|
||||
pungi_dir = compose.paths.work.pungi_download_dir(arch)
|
||||
|
||||
backends = {
|
||||
'yum': pungi.get_pungi_cmd,
|
||||
'dnf': pungi.get_pungi_cmd_dnf,
|
||||
"yum": pungi.get_pungi_cmd,
|
||||
"dnf": pungi.get_pungi_cmd_dnf,
|
||||
}
|
||||
get_cmd = backends[compose.conf['gather_backend']]
|
||||
cmd = get_cmd(pungi_conf, destdir=pungi_dir, name="FOO",
|
||||
selfhosting=True, fulltree=True, multilib_methods=["all"],
|
||||
nodownload=False, full_archlist=True, arch=arch,
|
||||
cache_dir=compose.paths.work.pungi_cache_dir(arch=arch),
|
||||
profiler=profiler)
|
||||
if compose.conf['gather_backend'] == 'yum':
|
||||
get_cmd = backends[compose.conf["gather_backend"]]
|
||||
cmd = get_cmd(
|
||||
pungi_conf,
|
||||
destdir=pungi_dir,
|
||||
name="FOO",
|
||||
selfhosting=True,
|
||||
fulltree=True,
|
||||
multilib_methods=["all"],
|
||||
nodownload=False,
|
||||
full_archlist=True,
|
||||
arch=arch,
|
||||
cache_dir=compose.paths.work.pungi_cache_dir(arch=arch),
|
||||
profiler=profiler,
|
||||
)
|
||||
if compose.conf["gather_backend"] == "yum":
|
||||
cmd.append("--force")
|
||||
|
||||
# TODO: runroot
|
||||
@ -127,7 +138,9 @@ def populate_global_pkgset(compose, file_list, path_prefix):
|
||||
return pkgset
|
||||
|
||||
|
||||
def write_pungi_config(compose, arch, variant, repos=None, comps_repo=None, package_set=None):
|
||||
def write_pungi_config(
|
||||
compose, arch, variant, repos=None, comps_repo=None, package_set=None
|
||||
):
|
||||
"""write pungi config (kickstart) for arch/variant"""
|
||||
pungi_wrapper = PungiWrapper()
|
||||
pungi_cfg = compose.paths.work.pungi_conf(variant=variant, arch=arch)
|
||||
@ -142,4 +155,12 @@ def write_pungi_config(compose, arch, variant, repos=None, comps_repo=None, pack
|
||||
packages.append("system-release")
|
||||
|
||||
prepopulate = get_prepopulate_packages(compose, arch, None)
|
||||
pungi_wrapper.write_kickstart(ks_path=pungi_cfg, repos=repos, groups=grps, packages=packages, exclude_packages=[], comps_repo=None, prepopulate=prepopulate)
|
||||
pungi_wrapper.write_kickstart(
|
||||
ks_path=pungi_cfg,
|
||||
repos=repos,
|
||||
groups=grps,
|
||||
packages=packages,
|
||||
exclude_packages=[],
|
||||
comps_repo=None,
|
||||
prepopulate=prepopulate,
|
||||
)
|
||||
|
@ -47,15 +47,19 @@ def run_repoclosure(compose):
|
||||
if variant.is_empty:
|
||||
continue
|
||||
|
||||
conf = get_arch_variant_data(compose.conf, 'repoclosure_strictness', arch, variant)
|
||||
if conf and conf[-1] == 'off':
|
||||
conf = get_arch_variant_data(
|
||||
compose.conf, "repoclosure_strictness", arch, variant
|
||||
)
|
||||
if conf and conf[-1] == "off":
|
||||
continue
|
||||
|
||||
prefix = "%s-repoclosure" % compose.compose_id
|
||||
lookaside = {}
|
||||
if variant.parent:
|
||||
repo_id = "%s-%s.%s" % (prefix, variant.parent.uid, arch)
|
||||
repo_dir = compose.paths.compose.repository(arch=arch, variant=variant.parent)
|
||||
repo_dir = compose.paths.compose.repository(
|
||||
arch=arch, variant=variant.parent
|
||||
)
|
||||
lookaside[repo_id] = repo_dir
|
||||
|
||||
repos = {}
|
||||
@ -63,8 +67,12 @@ def run_repoclosure(compose):
|
||||
repo_dir = compose.paths.compose.repository(arch=arch, variant=variant)
|
||||
repos[repo_id] = repo_dir
|
||||
|
||||
for i, lookaside_url in enumerate(get_lookaside_repos(compose, arch, variant)):
|
||||
lookaside["%s-lookaside-%s.%s-%s" % (compose.compose_id, variant.uid, arch, i)] = lookaside_url
|
||||
for i, lookaside_url in enumerate(
|
||||
get_lookaside_repos(compose, arch, variant)
|
||||
):
|
||||
lookaside[
|
||||
"%s-lookaside-%s.%s-%s" % (compose.compose_id, variant.uid, arch, i)
|
||||
] = lookaside_url
|
||||
|
||||
logfile = compose.paths.log.log_file(arch, "repoclosure-%s" % variant)
|
||||
|
||||
@ -80,11 +88,12 @@ def run_repoclosure(compose):
|
||||
else:
|
||||
_run_repoclosure_cmd(compose, repos, lookaside, arches, logfile)
|
||||
except RuntimeError as exc:
|
||||
if conf and conf[-1] == 'fatal':
|
||||
if conf and conf[-1] == "fatal":
|
||||
raise
|
||||
else:
|
||||
compose.log_warning('Repoclosure failed for %s.%s\n%s'
|
||||
% (variant.uid, arch, exc))
|
||||
compose.log_warning(
|
||||
"Repoclosure failed for %s.%s\n%s" % (variant.uid, arch, exc)
|
||||
)
|
||||
finally:
|
||||
if methods != "hybrid":
|
||||
_delete_repoclosure_cache_dirs(compose)
|
||||
@ -93,16 +102,18 @@ def run_repoclosure(compose):
|
||||
|
||||
|
||||
def _delete_repoclosure_cache_dirs(compose):
|
||||
if 'dnf' == compose.conf["repoclosure_backend"]:
|
||||
if "dnf" == compose.conf["repoclosure_backend"]:
|
||||
from dnf.const import SYSTEM_CACHEDIR
|
||||
from dnf.util import am_i_root
|
||||
from dnf.yum.misc import getCacheDir
|
||||
|
||||
if am_i_root():
|
||||
top_cache_dir = SYSTEM_CACHEDIR
|
||||
else:
|
||||
top_cache_dir = getCacheDir()
|
||||
else:
|
||||
from yum.misc import getCacheDir
|
||||
|
||||
top_cache_dir = getCacheDir()
|
||||
|
||||
for name in os.listdir(top_cache_dir):
|
||||
@ -115,8 +126,12 @@ def _delete_repoclosure_cache_dirs(compose):
|
||||
|
||||
|
||||
def _run_repoclosure_cmd(compose, repos, lookaside, arches, logfile):
|
||||
cmd = repoclosure.get_repoclosure_cmd(backend=compose.conf["repoclosure_backend"],
|
||||
repos=repos, lookaside=lookaside, arch=arches)
|
||||
cmd = repoclosure.get_repoclosure_cmd(
|
||||
backend=compose.conf["repoclosure_backend"],
|
||||
repos=repos,
|
||||
lookaside=lookaside,
|
||||
arch=arches,
|
||||
)
|
||||
# Use temp working directory directory as workaround for
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=795137
|
||||
with temp_dir(prefix="repoclosure_") as tmp_dir:
|
||||
@ -147,22 +162,26 @@ def check_image_sanity(compose):
|
||||
|
||||
def check_sanity(compose, variant, arch, image):
|
||||
path = os.path.join(compose.paths.compose.topdir(), image.path)
|
||||
deliverable = getattr(image, 'deliverable')
|
||||
can_fail = getattr(image, 'can_fail', False)
|
||||
with failable(compose, can_fail, variant, arch, deliverable,
|
||||
subvariant=image.subvariant):
|
||||
with open(path, 'rb') as f:
|
||||
deliverable = getattr(image, "deliverable")
|
||||
can_fail = getattr(image, "can_fail", False)
|
||||
with failable(
|
||||
compose, can_fail, variant, arch, deliverable, subvariant=image.subvariant
|
||||
):
|
||||
with open(path, "rb") as f:
|
||||
iso = is_iso(f)
|
||||
if image.format == 'iso' and not iso:
|
||||
raise RuntimeError('%s does not look like an ISO file' % path)
|
||||
if (image.arch in ('x86_64', 'i386') and
|
||||
image.bootable and
|
||||
not has_mbr(f) and
|
||||
not has_gpt(f) and
|
||||
not (iso and has_eltorito(f))):
|
||||
if image.format == "iso" and not iso:
|
||||
raise RuntimeError("%s does not look like an ISO file" % path)
|
||||
if (
|
||||
image.arch in ("x86_64", "i386")
|
||||
and image.bootable
|
||||
and not has_mbr(f)
|
||||
and not has_gpt(f)
|
||||
and not (iso and has_eltorito(f))
|
||||
):
|
||||
raise RuntimeError(
|
||||
'%s is supposed to be bootable, but does not have MBR nor '
|
||||
'GPT nor is it a bootable ISO' % path)
|
||||
"%s is supposed to be bootable, but does not have MBR nor "
|
||||
"GPT nor is it a bootable ISO" % path
|
||||
)
|
||||
# If exception is raised above, failable may catch it, in which case
|
||||
# nothing else will happen.
|
||||
|
||||
@ -174,19 +193,19 @@ def _check_magic(f, offset, bytes):
|
||||
|
||||
|
||||
def is_iso(f):
|
||||
return _check_magic(f, 0x8001, b'CD001')
|
||||
return _check_magic(f, 0x8001, b"CD001")
|
||||
|
||||
|
||||
def has_mbr(f):
|
||||
return _check_magic(f, 0x1fe, b'\x55\xAA')
|
||||
return _check_magic(f, 0x1FE, b"\x55\xAA")
|
||||
|
||||
|
||||
def has_gpt(f):
|
||||
return _check_magic(f, 0x200, b'EFI PART')
|
||||
return _check_magic(f, 0x200, b"EFI PART")
|
||||
|
||||
|
||||
def has_eltorito(f):
|
||||
return _check_magic(f, 0x8801, b'CD001\1EL TORITO SPECIFICATION')
|
||||
return _check_magic(f, 0x8801, b"CD001\1EL TORITO SPECIFICATION")
|
||||
|
||||
|
||||
def check_size_limit(compose, variant, arch, img):
|
||||
@ -207,7 +226,9 @@ def check_size_limit(compose, variant, arch, img):
|
||||
compose.conf, "createiso_max_size_is_strict", arch, variant
|
||||
)
|
||||
msg = "ISO %s is too big. Expected max %dB, got %dB" % (
|
||||
img.path, limit, img.size
|
||||
img.path,
|
||||
limit,
|
||||
img.size,
|
||||
)
|
||||
if any(is_strict):
|
||||
raise RuntimeError(msg)
|
||||
|
@ -17,6 +17,7 @@ class WeaverPhase(object):
|
||||
:param phases_schema: two-dimensional array of phases. Top dimension
|
||||
denotes particular pipelines. Second dimension contains phases.
|
||||
"""
|
||||
|
||||
name = "weaver"
|
||||
|
||||
def __init__(self, compose, phases_schema):
|
||||
@ -32,7 +33,10 @@ class WeaverPhase(object):
|
||||
|
||||
def start(self):
|
||||
if self.finished:
|
||||
msg = "Phase '%s' has already finished and can not be started twice" % self.name
|
||||
msg = (
|
||||
"Phase '%s' has already finished and can not be started twice"
|
||||
% self.name
|
||||
)
|
||||
self.pool.log_error(msg)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
@ -59,10 +63,15 @@ class PipelineThread(WorkerThread):
|
||||
"""
|
||||
Launches phases in pipeline sequentially
|
||||
"""
|
||||
|
||||
def process(self, item, num):
|
||||
pipeline = shortcuts.force_list(item)
|
||||
phases_names = ", ".join(phase.name for phase in pipeline)
|
||||
msg = "Running pipeline (%d/%d). Phases: %s" % (num, self.pool.queue_total, phases_names)
|
||||
msg = "Running pipeline (%d/%d). Phases: %s" % (
|
||||
num,
|
||||
self.pool.queue_total,
|
||||
phases_names,
|
||||
)
|
||||
self.pool.log_info("[BEGIN] %s" % (msg,))
|
||||
|
||||
for phase in pipeline:
|
||||
|
@ -64,6 +64,7 @@ class Profiler(object):
|
||||
def decorated(*args, **kwargs):
|
||||
with self:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return decorated
|
||||
|
||||
@classmethod
|
||||
@ -72,5 +73,6 @@ class Profiler(object):
|
||||
results = cls._data.items()
|
||||
results = sorted(results, key=lambda x: x[1]["time"], reverse=True)
|
||||
for name, data in results:
|
||||
print(" %6.2f %5d %s" % (data["time"], data["calls"], name),
|
||||
file=sys.stdout)
|
||||
print(
|
||||
" %6.2f %5d %s" % (data["time"], data["calls"], name), file=sys.stdout
|
||||
)
|
||||
|
@ -83,9 +83,13 @@ class Runroot(kobo.log.LoggingBase):
|
||||
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
|
||||
koji_cmd = koji_wrapper.get_runroot_cmd(
|
||||
runroot_tag, arch, command,
|
||||
channel=runroot_channel, use_shell=True,
|
||||
packages=packages, **kwargs
|
||||
runroot_tag,
|
||||
arch,
|
||||
command,
|
||||
channel=runroot_channel,
|
||||
use_shell=True,
|
||||
packages=packages,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
output = koji_wrapper.run_runroot_cmd(koji_cmd, log_file=log_file)
|
||||
@ -115,8 +119,15 @@ class Runroot(kobo.log.LoggingBase):
|
||||
def _log_file(self, base, suffix):
|
||||
return base.replace(".log", "." + suffix + ".log")
|
||||
|
||||
def _run_openssh(self, command, log_file=None, arch=None, packages=None,
|
||||
chown_paths=None, **kwargs):
|
||||
def _run_openssh(
|
||||
self,
|
||||
command,
|
||||
log_file=None,
|
||||
arch=None,
|
||||
packages=None,
|
||||
chown_paths=None,
|
||||
**kwargs
|
||||
):
|
||||
"""
|
||||
Runs the runroot command on remote machine using ssh.
|
||||
"""
|
||||
@ -176,7 +187,9 @@ class Runroot(kobo.log.LoggingBase):
|
||||
fmt_dict["runroot_key"] = runroot_key
|
||||
self._ssh_run(hostname, user, run_template, fmt_dict, log_file=log_file)
|
||||
|
||||
fmt_dict["command"] = "rpm -qa --qf='%{name}-%{version}-%{release}.%{arch}\n'"
|
||||
fmt_dict[
|
||||
"command"
|
||||
] = "rpm -qa --qf='%{name}-%{version}-%{release}.%{arch}\n'"
|
||||
buildroot_rpms = self._ssh_run(
|
||||
hostname,
|
||||
user,
|
||||
@ -254,8 +267,13 @@ class Runroot(kobo.log.LoggingBase):
|
||||
|
||||
koji_wrapper = kojiwrapper.KojiWrapper(self.compose.conf["koji_profile"])
|
||||
koji_cmd = koji_wrapper.get_pungi_buildinstall_cmd(
|
||||
runroot_tag, arch, args, channel=runroot_channel,
|
||||
chown_uid=os.getuid(), **kwargs)
|
||||
runroot_tag,
|
||||
arch,
|
||||
args,
|
||||
channel=runroot_channel,
|
||||
chown_uid=os.getuid(),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
output = koji_wrapper.run_runroot_cmd(koji_cmd, log_file=log_file)
|
||||
if output["retcode"] != 0:
|
||||
|
@ -11,24 +11,58 @@ from pungi.wrappers.comps import CompsFilter
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--output", help="redirect output to a file")
|
||||
parser.add_argument("--arch", required=True,
|
||||
help="filter groups and packages according to an arch")
|
||||
parser.add_argument("--arch-only-groups", default=False, action="store_true",
|
||||
help="keep only arch groups, remove the rest")
|
||||
parser.add_argument("--arch-only-packages", default=False, action="store_true",
|
||||
help="keep only arch packages, remove the rest")
|
||||
parser.add_argument("--arch-only-environments", default=False, action="store_true",
|
||||
help="keep only arch environments, remove the rest")
|
||||
parser.add_argument("--remove-categories", default=False, action="store_true",
|
||||
help="remove all categories")
|
||||
parser.add_argument("--remove-langpacks", default=False, action="store_true",
|
||||
help="remove the langpacks section")
|
||||
parser.add_argument("--remove-translations", default=False, action="store_true",
|
||||
help="remove all translations")
|
||||
parser.add_argument("--remove-environments", default=False, action="store_true",
|
||||
help="remove all environment sections")
|
||||
parser.add_argument("--keep-empty-group", default=[], action="append", metavar="GROUPID",
|
||||
help="keep groups even if they are empty")
|
||||
parser.add_argument(
|
||||
"--arch", required=True, help="filter groups and packages according to an arch"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch-only-groups",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="keep only arch groups, remove the rest",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch-only-packages",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="keep only arch packages, remove the rest",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch-only-environments",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="keep only arch environments, remove the rest",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--remove-categories",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="remove all categories",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--remove-langpacks",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="remove the langpacks section",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--remove-translations",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="remove all translations",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--remove-environments",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="remove all environment sections",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-empty-group",
|
||||
default=[],
|
||||
action="append",
|
||||
metavar="GROUPID",
|
||||
help="keep groups even if they are empty",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--lookaside-group",
|
||||
default=[],
|
||||
@ -36,13 +70,22 @@ def main():
|
||||
metavar="GROUPID",
|
||||
help="keep this group in environments even if they are not defined in the comps",
|
||||
)
|
||||
parser.add_argument("--no-cleanup", default=False, action="store_true",
|
||||
help="don't remove empty groups and categories")
|
||||
parser.add_argument("--no-reindent", default=False, action="store_true",
|
||||
help="don't re-indent the output")
|
||||
parser.add_argument("comps_file", metavar='COMPS_FILE')
|
||||
parser.add_argument('--variant',
|
||||
help='filter groups and packages according to variant name')
|
||||
parser.add_argument(
|
||||
"--no-cleanup",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="don't remove empty groups and categories",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-reindent",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="don't re-indent the output",
|
||||
)
|
||||
parser.add_argument("comps_file", metavar="COMPS_FILE")
|
||||
parser.add_argument(
|
||||
"--variant", help="filter groups and packages according to variant name"
|
||||
)
|
||||
|
||||
opts = parser.parse_args()
|
||||
|
||||
@ -67,4 +110,4 @@ def main():
|
||||
if opts.remove_environments:
|
||||
f.remove_environments()
|
||||
|
||||
f.write(open(opts.output, 'wb') if opts.output else sys.stdout)
|
||||
f.write(open(opts.output, "wb") if opts.output else sys.stdout)
|
||||
|
@ -34,29 +34,29 @@ class ValidationCompose(pungi.compose.Compose):
|
||||
|
||||
@property
|
||||
def old_composes(self):
|
||||
return '/dummy' if self.has_old_composes else None
|
||||
return "/dummy" if self.has_old_composes else None
|
||||
|
||||
@property
|
||||
def compose_id(self):
|
||||
return 'Dummy-1.0-20160811.t.0'
|
||||
return "Dummy-1.0-20160811.t.0"
|
||||
|
||||
@property
|
||||
def compose_type(self):
|
||||
return 'test'
|
||||
return "test"
|
||||
|
||||
@property
|
||||
def compose_date(self):
|
||||
return '20160811'
|
||||
return "20160811"
|
||||
|
||||
@property
|
||||
def compose_respin(self):
|
||||
return '0'
|
||||
return "0"
|
||||
|
||||
|
||||
def read_variants(compose, config):
|
||||
with pungi.util.temp_dir() as tmp_dir:
|
||||
scm_dict = compose.conf["variants_file"]
|
||||
if isinstance(scm_dict, six.string_types) and scm_dict[0] != '/':
|
||||
if isinstance(scm_dict, six.string_types) and scm_dict[0] != "/":
|
||||
config_dir = os.path.dirname(config)
|
||||
scm_dict = os.path.join(config_dir, scm_dict)
|
||||
files = pungi.wrappers.scm.get_file_from_scm(scm_dict, tmp_dir)
|
||||
@ -144,24 +144,29 @@ def run(config, topdir, has_old, offline, defined_variables, schema_overrides):
|
||||
|
||||
class DumpSchemaAction(argparse.Action):
|
||||
def __call__(self, parser, ns, values, option_string=None):
|
||||
json.dump(pungi.checks.make_schema(), sys.stdout,
|
||||
sort_keys=True, indent=4)
|
||||
print('')
|
||||
json.dump(pungi.checks.make_schema(), sys.stdout, sort_keys=True, indent=4)
|
||||
print("")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--dump-schema', nargs=0, action=DumpSchemaAction,
|
||||
help='print JSON Schema of configuration and exit')
|
||||
parser.add_argument('config', metavar='CONFIG',
|
||||
help='configuration file to validate')
|
||||
parser.add_argument('--old-composes', action='store_true',
|
||||
help='indicate if pungi-koji will be run with --old-composes option')
|
||||
parser.add_argument(
|
||||
"--offline",
|
||||
"--dump-schema",
|
||||
nargs=0,
|
||||
action=DumpSchemaAction,
|
||||
help="print JSON Schema of configuration and exit",
|
||||
)
|
||||
parser.add_argument(
|
||||
"config", metavar="CONFIG", help="configuration file to validate"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--old-composes",
|
||||
action="store_true",
|
||||
help="Do not validate git references in URLs",
|
||||
help="indicate if pungi-koji will be run with --old-composes option",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--offline", action="store_true", help="Do not validate git references in URLs",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-e",
|
||||
|
@ -18,10 +18,7 @@ def parse_args():
|
||||
parser = argparse.ArgumentParser(add_help=True)
|
||||
|
||||
parser.add_argument(
|
||||
'compose',
|
||||
metavar='<compose-path>',
|
||||
nargs=1,
|
||||
help='path to compose',
|
||||
"compose", metavar="<compose-path>", nargs=1, help="path to compose",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
@ -8,18 +8,18 @@ import sys
|
||||
|
||||
|
||||
def send(cmd, data):
|
||||
topic = 'compose.%s' % cmd.replace('-', '.').lower()
|
||||
fedmsg.publish(topic=topic, modname='pungi', msg=data)
|
||||
topic = "compose.%s" % cmd.replace("-", ".").lower()
|
||||
fedmsg.publish(topic=topic, modname="pungi", msg=data)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('cmd')
|
||||
parser.add_argument("cmd")
|
||||
opts = parser.parse_args()
|
||||
|
||||
config = fedmsg.config.load_config()
|
||||
config['active'] = True # Connect out to a fedmsg-relay instance
|
||||
config['cert_prefix'] = 'releng' # Use this cert.
|
||||
config["active"] = True # Connect out to a fedmsg-relay instance
|
||||
config["cert_prefix"] = "releng" # Use this cert.
|
||||
fedmsg.init(**config)
|
||||
|
||||
data = json.load(sys.stdin)
|
||||
|
@ -22,24 +22,32 @@ from pungi_utils import patch_iso
|
||||
|
||||
def main(args=None):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help='Print debugging information')
|
||||
parser.add_argument('--supported', choices=('true', 'false'),
|
||||
help='Override supported bit on the ISO')
|
||||
parser.add_argument('--volume-id',
|
||||
help='Override volume ID on the ISO')
|
||||
parser.add_argument('--force-arch',
|
||||
help='Treat the ISO as bootable on given architecture')
|
||||
parser.add_argument('target', metavar='TARGET_ISO',
|
||||
help='which file to write the result to')
|
||||
parser.add_argument('source', metavar='SOURCE_ISO',
|
||||
help='source ISO to work with')
|
||||
parser.add_argument('dirs', nargs="+", metavar='GRAFT_DIR',
|
||||
help='extra directories to graft on the ISO')
|
||||
parser.add_argument(
|
||||
"-v", "--verbose", action="store_true", help="Print debugging information"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--supported",
|
||||
choices=("true", "false"),
|
||||
help="Override supported bit on the ISO",
|
||||
)
|
||||
parser.add_argument("--volume-id", help="Override volume ID on the ISO")
|
||||
parser.add_argument(
|
||||
"--force-arch", help="Treat the ISO as bootable on given architecture"
|
||||
)
|
||||
parser.add_argument(
|
||||
"target", metavar="TARGET_ISO", help="which file to write the result to"
|
||||
)
|
||||
parser.add_argument("source", metavar="SOURCE_ISO", help="source ISO to work with")
|
||||
parser.add_argument(
|
||||
"dirs",
|
||||
nargs="+",
|
||||
metavar="GRAFT_DIR",
|
||||
help="extra directories to graft on the ISO",
|
||||
)
|
||||
opts = parser.parse_args(args)
|
||||
|
||||
level = logging.DEBUG if opts.verbose else logging.INFO
|
||||
format = '%(levelname)s: %(message)s'
|
||||
format = "%(levelname)s: %(message)s"
|
||||
logging.basicConfig(level=level, format=format)
|
||||
log = logging.getLogger()
|
||||
|
||||
|
@ -30,142 +30,290 @@ def get_arguments(config):
|
||||
|
||||
class SetConfig(Action):
|
||||
def __call__(self, parser, namespace, value, option_string=None):
|
||||
config.set('pungi', self.dest, value)
|
||||
config.set("pungi", self.dest, value)
|
||||
|
||||
parser.add_argument('--version', action='version', version=get_full_version())
|
||||
parser.add_argument("--version", action="version", version=get_full_version())
|
||||
|
||||
# Pulled in from config file to be cli options as part of pykickstart conversion
|
||||
parser.add_argument(
|
||||
"--name", dest="family", type=str, action=SetConfig,
|
||||
help='the name for your distribution (defaults to "Fedora"), DEPRECATED')
|
||||
"--name",
|
||||
dest="family",
|
||||
type=str,
|
||||
action=SetConfig,
|
||||
help='the name for your distribution (defaults to "Fedora"), DEPRECATED',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--family", dest="family", action=SetConfig,
|
||||
help='the family name for your distribution (defaults to "Fedora")')
|
||||
"--family",
|
||||
dest="family",
|
||||
action=SetConfig,
|
||||
help='the family name for your distribution (defaults to "Fedora")',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ver", dest="version", action=SetConfig,
|
||||
help='the version of your distribution (defaults to datestamp)')
|
||||
"--ver",
|
||||
dest="version",
|
||||
action=SetConfig,
|
||||
help="the version of your distribution (defaults to datestamp)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--flavor", dest="variant", action=SetConfig,
|
||||
help='the flavor of your distribution spin (optional), DEPRECATED')
|
||||
"--flavor",
|
||||
dest="variant",
|
||||
action=SetConfig,
|
||||
help="the flavor of your distribution spin (optional), DEPRECATED",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--variant", dest="variant", action=SetConfig,
|
||||
help='the variant of your distribution spin (optional)')
|
||||
"--variant",
|
||||
dest="variant",
|
||||
action=SetConfig,
|
||||
help="the variant of your distribution spin (optional)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--destdir", dest="destdir", action=SetConfig,
|
||||
help='destination directory (defaults to current directory)')
|
||||
"--destdir",
|
||||
dest="destdir",
|
||||
action=SetConfig,
|
||||
help="destination directory (defaults to current directory)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cachedir", dest="cachedir", action=SetConfig,
|
||||
help='package cache directory (defaults to /var/cache/pungi)')
|
||||
"--cachedir",
|
||||
dest="cachedir",
|
||||
action=SetConfig,
|
||||
help="package cache directory (defaults to /var/cache/pungi)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--bugurl", dest="bugurl", action=SetConfig,
|
||||
help='the url for your bug system (defaults to http://bugzilla.redhat.com)')
|
||||
"--bugurl",
|
||||
dest="bugurl",
|
||||
action=SetConfig,
|
||||
help="the url for your bug system (defaults to http://bugzilla.redhat.com)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--selfhosting", action="store_true", dest="selfhosting",
|
||||
help='build a self-hosting tree by following build dependencies (optional)')
|
||||
"--selfhosting",
|
||||
action="store_true",
|
||||
dest="selfhosting",
|
||||
help="build a self-hosting tree by following build dependencies (optional)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--fulltree", action="store_true", dest="fulltree",
|
||||
help='build a tree that includes all packages built from corresponding source rpms (optional)')
|
||||
"--fulltree",
|
||||
action="store_true",
|
||||
dest="fulltree",
|
||||
help="build a tree that includes all packages built from corresponding source rpms (optional)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--nosource", action="store_true", dest="nosource",
|
||||
help='disable gathering of source packages (optional)')
|
||||
"--nosource",
|
||||
action="store_true",
|
||||
dest="nosource",
|
||||
help="disable gathering of source packages (optional)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--nodebuginfo", action="store_true", dest="nodebuginfo",
|
||||
help='disable gathering of debuginfo packages (optional)')
|
||||
"--nodebuginfo",
|
||||
action="store_true",
|
||||
dest="nodebuginfo",
|
||||
help="disable gathering of debuginfo packages (optional)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--nodownload", action="store_true", dest="nodownload",
|
||||
help='disable downloading of packages. instead, print the package URLs (optional)')
|
||||
"--nodownload",
|
||||
action="store_true",
|
||||
dest="nodownload",
|
||||
help="disable downloading of packages. instead, print the package URLs (optional)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--norelnotes", action="store_true", dest="norelnotes",
|
||||
help='disable gathering of release notes (optional); DEPRECATED')
|
||||
"--norelnotes",
|
||||
action="store_true",
|
||||
dest="norelnotes",
|
||||
help="disable gathering of release notes (optional); DEPRECATED",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--nogreedy", action="store_true", dest="nogreedy",
|
||||
help='disable pulling of all providers of package dependencies (optional)')
|
||||
"--nogreedy",
|
||||
action="store_true",
|
||||
dest="nogreedy",
|
||||
help="disable pulling of all providers of package dependencies (optional)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--nodeps", action="store_false", dest="resolve_deps", default=True,
|
||||
help='disable resolving dependencies')
|
||||
"--nodeps",
|
||||
action="store_false",
|
||||
dest="resolve_deps",
|
||||
default=True,
|
||||
help="disable resolving dependencies",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sourceisos", default=False, action="store_true", dest="sourceisos",
|
||||
help='Create the source isos (other arch runs must be done)')
|
||||
"--sourceisos",
|
||||
default=False,
|
||||
action="store_true",
|
||||
dest="sourceisos",
|
||||
help="Create the source isos (other arch runs must be done)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force", default=False, action="store_true",
|
||||
help='Force reuse of an existing destination directory (will overwrite files)')
|
||||
"--force",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Force reuse of an existing destination directory (will overwrite files)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--isfinal", default=False, action="store_true",
|
||||
help='Specify this is a GA tree, which causes betanag to be turned off during install')
|
||||
"--isfinal",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Specify this is a GA tree, which causes betanag to be turned off during install",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--nohash", default=False, action="store_true",
|
||||
help='disable hashing the Packages trees')
|
||||
"--nohash",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="disable hashing the Packages trees",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--full-archlist", action="store_true",
|
||||
help='Use the full arch list for x86_64 (include i686, i386, etc.)')
|
||||
parser.add_argument("--arch", help='Override default (uname based) arch')
|
||||
"--full-archlist",
|
||||
action="store_true",
|
||||
help="Use the full arch list for x86_64 (include i686, i386, etc.)",
|
||||
)
|
||||
parser.add_argument("--arch", help="Override default (uname based) arch")
|
||||
parser.add_argument(
|
||||
"--greedy", metavar="METHOD",
|
||||
help='Greedy method; none, all, build')
|
||||
"--greedy", metavar="METHOD", help="Greedy method; none, all, build"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--multilib", action="append", metavar="METHOD",
|
||||
help='Multilib method; can be specified multiple times; recommended: devel, runtime')
|
||||
"--multilib",
|
||||
action="append",
|
||||
metavar="METHOD",
|
||||
help="Multilib method; can be specified multiple times; recommended: devel, runtime",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--lookaside-repo", action="append", dest="lookaside_repos", metavar="NAME",
|
||||
help='Specify lookaside repo name(s) (packages will used for depsolving but not be included in the output)')
|
||||
"--lookaside-repo",
|
||||
action="append",
|
||||
dest="lookaside_repos",
|
||||
metavar="NAME",
|
||||
help="Specify lookaside repo name(s) (packages will used for depsolving but not be included in the output)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--workdirbase", dest="workdirbase", action=SetConfig,
|
||||
help='base working directory (defaults to destdir + /work)')
|
||||
parser.add_argument("--no-dvd", default=False, action="store_true", dest="no_dvd",
|
||||
help='Do not make a install DVD/CD only the netinstall image and the tree')
|
||||
parser.add_argument("--lorax-conf",
|
||||
help='Path to lorax.conf file (optional)')
|
||||
"--workdirbase",
|
||||
dest="workdirbase",
|
||||
action=SetConfig,
|
||||
help="base working directory (defaults to destdir + /work)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-i", "--installpkgs", default=[], action="append", metavar="STRING",
|
||||
help="Package glob for lorax to install before runtime-install.tmpl runs. (may be listed multiple times)")
|
||||
"--no-dvd",
|
||||
default=False,
|
||||
action="store_true",
|
||||
dest="no_dvd",
|
||||
help="Do not make a install DVD/CD only the netinstall image and the tree",
|
||||
)
|
||||
parser.add_argument("--lorax-conf", help="Path to lorax.conf file (optional)")
|
||||
parser.add_argument(
|
||||
"--multilibconf", default=None, action=SetConfig,
|
||||
help="Path to multilib conf files. Default is /usr/share/pungi/multilib/")
|
||||
|
||||
parser.add_argument("-c", "--config", dest="config", required=True,
|
||||
help='Path to kickstart config file')
|
||||
parser.add_argument("--all-stages", action="store_true", default=True, dest="do_all",
|
||||
help="Enable ALL stages")
|
||||
parser.add_argument("-G", action="store_true", default=False, dest="do_gather",
|
||||
help="Flag to enable processing the Gather stage")
|
||||
parser.add_argument("-C", action="store_true", default=False, dest="do_createrepo",
|
||||
help="Flag to enable processing the Createrepo stage")
|
||||
parser.add_argument("-B", action="store_true", default=False, dest="do_buildinstall",
|
||||
help="Flag to enable processing the BuildInstall stage")
|
||||
parser.add_argument("-I", action="store_true", default=False, dest="do_createiso",
|
||||
help="Flag to enable processing the CreateISO stage")
|
||||
parser.add_argument("--relnotepkgs", dest="relnotepkgs", action=SetConfig,
|
||||
help='Rpms which contain the release notes')
|
||||
"-i",
|
||||
"--installpkgs",
|
||||
default=[],
|
||||
action="append",
|
||||
metavar="STRING",
|
||||
help="Package glob for lorax to install before runtime-install.tmpl runs. (may be listed multiple times)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--relnotefilere", dest="relnotefilere", action=SetConfig,
|
||||
help='Which files are the release notes -- GPL EULA')
|
||||
parser.add_argument("--nomacboot", action="store_true", dest="nomacboot",
|
||||
help='disable setting up macboot as no hfs support ')
|
||||
"--multilibconf",
|
||||
default=None,
|
||||
action=SetConfig,
|
||||
help="Path to multilib conf files. Default is /usr/share/pungi/multilib/",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--rootfs-size", dest="rootfs_size", action=SetConfig, default=False,
|
||||
help='Size of root filesystem in GiB. If not specified, use lorax default value')
|
||||
"-c",
|
||||
"--config",
|
||||
dest="config",
|
||||
required=True,
|
||||
help="Path to kickstart config file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--all-stages",
|
||||
action="store_true",
|
||||
default=True,
|
||||
dest="do_all",
|
||||
help="Enable ALL stages",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-G",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="do_gather",
|
||||
help="Flag to enable processing the Gather stage",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-C",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="do_createrepo",
|
||||
help="Flag to enable processing the Createrepo stage",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-B",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="do_buildinstall",
|
||||
help="Flag to enable processing the BuildInstall stage",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-I",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="do_createiso",
|
||||
help="Flag to enable processing the CreateISO stage",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--relnotepkgs",
|
||||
dest="relnotepkgs",
|
||||
action=SetConfig,
|
||||
help="Rpms which contain the release notes",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--relnotefilere",
|
||||
dest="relnotefilere",
|
||||
action=SetConfig,
|
||||
help="Which files are the release notes -- GPL EULA",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--nomacboot",
|
||||
action="store_true",
|
||||
dest="nomacboot",
|
||||
help="disable setting up macboot as no hfs support ",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--pungirc", dest="pungirc", default='~/.pungirc', action=SetConfig,
|
||||
help='Read pungi options from config file ')
|
||||
"--rootfs-size",
|
||||
dest="rootfs_size",
|
||||
action=SetConfig,
|
||||
default=False,
|
||||
help="Size of root filesystem in GiB. If not specified, use lorax default value",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--pungirc",
|
||||
dest="pungirc",
|
||||
default="~/.pungirc",
|
||||
action=SetConfig,
|
||||
help="Read pungi options from config file ",
|
||||
)
|
||||
|
||||
opts = parser.parse_args()
|
||||
|
||||
if not config.get('pungi', 'variant').isalnum() and not config.get('pungi', 'variant') == '':
|
||||
if (
|
||||
not config.get("pungi", "variant").isalnum()
|
||||
and not config.get("pungi", "variant") == ""
|
||||
):
|
||||
parser.error("Variant must be alphanumeric")
|
||||
|
||||
if opts.do_gather or opts.do_createrepo or opts.do_buildinstall or opts.do_createiso:
|
||||
if (
|
||||
opts.do_gather
|
||||
or opts.do_createrepo
|
||||
or opts.do_buildinstall
|
||||
or opts.do_createiso
|
||||
):
|
||||
opts.do_all = False
|
||||
|
||||
if opts.arch and (opts.do_all or opts.do_buildinstall):
|
||||
parser.error("Cannot override arch while the BuildInstall stage is enabled")
|
||||
|
||||
# set the iso_basename.
|
||||
if not config.get('pungi', 'variant') == '':
|
||||
config.set('pungi', 'iso_basename', '%s-%s' % (config.get('pungi', 'family'), config.get('pungi', 'variant')))
|
||||
if not config.get("pungi", "variant") == "":
|
||||
config.set(
|
||||
"pungi",
|
||||
"iso_basename",
|
||||
"%s-%s" % (config.get("pungi", "family"), config.get("pungi", "variant")),
|
||||
)
|
||||
else:
|
||||
config.set('pungi', 'iso_basename', config.get('pungi', 'family'))
|
||||
config.set("pungi", "iso_basename", config.get("pungi", "family"))
|
||||
|
||||
return opts
|
||||
|
||||
@ -192,45 +340,53 @@ def main():
|
||||
print("INFO: selinux disabled")
|
||||
enforcing = False
|
||||
if enforcing:
|
||||
print("WARNING: SELinux is enforcing. This may lead to a compose with selinux disabled.")
|
||||
print(
|
||||
"WARNING: SELinux is enforcing. This may lead to a compose with selinux disabled."
|
||||
)
|
||||
print("Consider running with setenforce 0.")
|
||||
|
||||
# Set up the kickstart parser and pass in the kickstart file we were handed
|
||||
ksparser = pungi.ks.get_ksparser(ks_path=opts.config)
|
||||
|
||||
if opts.sourceisos:
|
||||
config.set('pungi', 'arch', 'source')
|
||||
config.set("pungi", "arch", "source")
|
||||
|
||||
for part in ksparser.handler.partition.partitions:
|
||||
if part.mountpoint == 'iso':
|
||||
config.set('pungi', 'cdsize', str(part.size))
|
||||
if part.mountpoint == "iso":
|
||||
config.set("pungi", "cdsize", str(part.size))
|
||||
|
||||
config.set('pungi', 'force', str(opts.force))
|
||||
config.set("pungi", "force", str(opts.force))
|
||||
|
||||
if config.get('pungi', 'workdirbase') == '/work':
|
||||
config.set('pungi', 'workdirbase', "%s/work" % config.get('pungi', 'destdir'))
|
||||
if config.get("pungi", "workdirbase") == "/work":
|
||||
config.set("pungi", "workdirbase", "%s/work" % config.get("pungi", "destdir"))
|
||||
# Set up our directories
|
||||
if not os.path.exists(config.get('pungi', 'destdir')):
|
||||
if not os.path.exists(config.get("pungi", "destdir")):
|
||||
try:
|
||||
os.makedirs(config.get('pungi', 'destdir'))
|
||||
os.makedirs(config.get("pungi", "destdir"))
|
||||
except OSError:
|
||||
print("Error: Cannot create destination dir %s" % config.get('pungi', 'destdir'),
|
||||
file=sys.stderr)
|
||||
print(
|
||||
"Error: Cannot create destination dir %s"
|
||||
% config.get("pungi", "destdir"),
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Warning: Reusing existing destination directory.")
|
||||
|
||||
if not os.path.exists(config.get('pungi', 'workdirbase')):
|
||||
if not os.path.exists(config.get("pungi", "workdirbase")):
|
||||
try:
|
||||
os.makedirs(config.get('pungi', 'workdirbase'))
|
||||
os.makedirs(config.get("pungi", "workdirbase"))
|
||||
except OSError:
|
||||
print("Error: Cannot create working base dir %s" % config.get('pungi', 'workdirbase'),
|
||||
file=sys.stderr)
|
||||
print(
|
||||
"Error: Cannot create working base dir %s"
|
||||
% config.get("pungi", "workdirbase"),
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Warning: Reusing existing working base directory.")
|
||||
|
||||
cachedir = config.get('pungi', 'cachedir')
|
||||
cachedir = config.get("pungi", "cachedir")
|
||||
|
||||
if not os.path.exists(cachedir):
|
||||
try:
|
||||
@ -241,32 +397,32 @@ def main():
|
||||
|
||||
# Set debuginfo flag
|
||||
if opts.nodebuginfo:
|
||||
config.set('pungi', 'debuginfo', "False")
|
||||
config.set("pungi", "debuginfo", "False")
|
||||
if opts.greedy:
|
||||
config.set('pungi', 'greedy', opts.greedy)
|
||||
config.set("pungi", "greedy", opts.greedy)
|
||||
else:
|
||||
# XXX: compatibility
|
||||
if opts.nogreedy:
|
||||
config.set('pungi', 'greedy', "none")
|
||||
config.set("pungi", "greedy", "none")
|
||||
else:
|
||||
config.set('pungi', 'greedy', "all")
|
||||
config.set('pungi', 'resolve_deps', str(bool(opts.resolve_deps)))
|
||||
config.set("pungi", "greedy", "all")
|
||||
config.set("pungi", "resolve_deps", str(bool(opts.resolve_deps)))
|
||||
if opts.isfinal:
|
||||
config.set('pungi', 'isfinal', "True")
|
||||
config.set("pungi", "isfinal", "True")
|
||||
if opts.nohash:
|
||||
config.set('pungi', 'nohash', "True")
|
||||
config.set("pungi", "nohash", "True")
|
||||
if opts.full_archlist:
|
||||
config.set('pungi', 'full_archlist', "True")
|
||||
config.set("pungi", "full_archlist", "True")
|
||||
if opts.arch:
|
||||
config.set('pungi', 'arch', opts.arch)
|
||||
config.set("pungi", "arch", opts.arch)
|
||||
if opts.multilib:
|
||||
config.set('pungi', 'multilib', " ".join(opts.multilib))
|
||||
config.set("pungi", "multilib", " ".join(opts.multilib))
|
||||
if opts.lookaside_repos:
|
||||
config.set('pungi', 'lookaside_repos', " ".join(opts.lookaside_repos))
|
||||
config.set("pungi", "lookaside_repos", " ".join(opts.lookaside_repos))
|
||||
if opts.no_dvd:
|
||||
config.set('pungi', 'no_dvd', "True")
|
||||
config.set("pungi", "no_dvd", "True")
|
||||
if opts.nomacboot:
|
||||
config.set('pungi', 'nomacboot', "True")
|
||||
config.set("pungi", "nomacboot", "True")
|
||||
config.set("pungi", "fulltree", str(bool(opts.fulltree)))
|
||||
config.set("pungi", "selfhosting", str(bool(opts.selfhosting)))
|
||||
config.set("pungi", "nosource", str(bool(opts.nosource)))
|
||||
@ -303,7 +459,9 @@ def main():
|
||||
flags_str = ",".join(line["flags"])
|
||||
if flags_str:
|
||||
flags_str = "(%s)" % flags_str
|
||||
sys.stdout.write("DEBUGINFO%s: %s\n" % (flags_str, line["path"]))
|
||||
sys.stdout.write(
|
||||
"DEBUGINFO%s: %s\n" % (flags_str, line["path"])
|
||||
)
|
||||
sys.stdout.flush()
|
||||
else:
|
||||
mypungi.downloadDebuginfo()
|
||||
@ -320,7 +478,10 @@ def main():
|
||||
|
||||
print("RPM size: %s MiB" % (mypungi.size_packages() / 1024 ** 2))
|
||||
if not opts.nodebuginfo:
|
||||
print("DEBUGINFO size: %s MiB" % (mypungi.size_debuginfo() / 1024 ** 2))
|
||||
print(
|
||||
"DEBUGINFO size: %s MiB"
|
||||
% (mypungi.size_debuginfo() / 1024 ** 2)
|
||||
)
|
||||
if not opts.nosource:
|
||||
print("SRPM size: %s MiB" % (mypungi.size_srpms() / 1024 ** 2))
|
||||
|
||||
@ -340,10 +501,13 @@ def main():
|
||||
# Do things slightly different for src.
|
||||
if opts.sourceisos:
|
||||
# we already have all the content gathered
|
||||
mypungi.topdir = os.path.join(config.get('pungi', 'destdir'),
|
||||
config.get('pungi', 'version'),
|
||||
config.get('pungi', 'variant'),
|
||||
'source', 'SRPMS')
|
||||
mypungi.topdir = os.path.join(
|
||||
config.get("pungi", "destdir"),
|
||||
config.get("pungi", "version"),
|
||||
config.get("pungi", "variant"),
|
||||
"source",
|
||||
"SRPMS",
|
||||
)
|
||||
mypungi.doCreaterepo(comps=False)
|
||||
if opts.do_all or opts.do_createiso:
|
||||
mypungi.doCreateIsos()
|
||||
|
@ -18,22 +18,17 @@ from pungi.util import temp_dir
|
||||
def get_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--profiler",
|
||||
action="store_true",
|
||||
"--profiler", action="store_true",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--arch",
|
||||
required=True,
|
||||
"--arch", required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config",
|
||||
metavar="PATH",
|
||||
required=True,
|
||||
help="path to kickstart config file",
|
||||
"--config", metavar="PATH", required=True, help="path to kickstart config file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--download-to",
|
||||
metavar='PATH',
|
||||
metavar="PATH",
|
||||
help="download packages to given directory instead of just printing paths",
|
||||
)
|
||||
|
||||
@ -47,9 +42,7 @@ def get_parser():
|
||||
|
||||
group = parser.add_argument_group("Gather options")
|
||||
group.add_argument(
|
||||
"--nodeps",
|
||||
action="store_true",
|
||||
help="disable resolving dependencies",
|
||||
"--nodeps", action="store_true", help="disable resolving dependencies",
|
||||
)
|
||||
group.add_argument(
|
||||
"--selfhosting",
|
||||
@ -68,9 +61,7 @@ def get_parser():
|
||||
choices=["none", "all", "build"],
|
||||
)
|
||||
group.add_argument(
|
||||
"--multilib",
|
||||
metavar="[METHOD]",
|
||||
action="append",
|
||||
"--multilib", metavar="[METHOD]", action="append",
|
||||
)
|
||||
group.add_argument(
|
||||
"--tempdir",
|
||||
@ -135,13 +126,13 @@ def main(ns, persistdir, cachedir):
|
||||
continue
|
||||
|
||||
if not getattr(ks_repo, "metalink", False):
|
||||
dnf_obj.add_repo(
|
||||
ks_repo.name, ks_repo.baseurl, enablegroups=False
|
||||
)
|
||||
dnf_obj.add_repo(ks_repo.name, ks_repo.baseurl, enablegroups=False)
|
||||
else:
|
||||
dnf_obj.add_repo(
|
||||
ks_repo.name, ks_repo.baseurl, enablegroups=False,
|
||||
metalink=ks_repo.metalink
|
||||
ks_repo.name,
|
||||
ks_repo.baseurl,
|
||||
enablegroups=False,
|
||||
metalink=ks_repo.metalink,
|
||||
)
|
||||
|
||||
for ks_repo in ksparser.handler.repo.repoList:
|
||||
@ -150,8 +141,7 @@ def main(ns, persistdir, cachedir):
|
||||
if not getattr(ks_repo, "metalink", False):
|
||||
dnf_obj.add_repo(ks_repo.name, ks_repo.baseurl)
|
||||
else:
|
||||
dnf_obj.add_repo(ks_repo.name, ks_repo.baseurl,
|
||||
metalink=ks_repo.metalink)
|
||||
dnf_obj.add_repo(ks_repo.name, ks_repo.baseurl, metalink=ks_repo.metalink)
|
||||
|
||||
with Profiler("DnfWrapper.fill_sack()"):
|
||||
dnf_obj.fill_sack(load_system_repo=False, load_available_repos=True)
|
||||
@ -190,7 +180,7 @@ def _get_url(pkg):
|
||||
|
||||
|
||||
def _fmt_flags(flags):
|
||||
return "(%s)" % ",".join(sorted(f.name.replace('_', '-') for f in flags))
|
||||
return "(%s)" % ",".join(sorted(f.name.replace("_", "-") for f in flags))
|
||||
|
||||
|
||||
def deduplicate(gather_obj, items):
|
||||
|
@ -35,7 +35,7 @@ COMPOSE = None
|
||||
def main():
|
||||
global COMPOSE
|
||||
|
||||
PHASES_NAMES_MODIFIED = PHASES_NAMES + ['productimg']
|
||||
PHASES_NAMES_MODIFIED = PHASES_NAMES + ["productimg"]
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
@ -51,19 +51,19 @@ def main():
|
||||
)
|
||||
parser.add_argument(
|
||||
"--label",
|
||||
help="specify compose label (example: Snapshot-1.0); required for production composes"
|
||||
help="specify compose label (example: Snapshot-1.0); required for production composes",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-label",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="make a production compose without label"
|
||||
help="make a production compose without label",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--supported",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="set supported flag on media (automatically on for 'RC-x.y' labels)"
|
||||
help="set supported flag on media (automatically on for 'RC-x.y' labels)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--old-composes",
|
||||
@ -73,11 +73,7 @@ def main():
|
||||
action="append",
|
||||
help="Path to directory with old composes. Reuse an existing repodata from the most recent compose.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config",
|
||||
help="Config file",
|
||||
required=True
|
||||
)
|
||||
parser.add_argument("--config", help="Config file", required=True)
|
||||
parser.add_argument(
|
||||
"--skip-phase",
|
||||
metavar="PHASE",
|
||||
@ -127,7 +123,7 @@ def main():
|
||||
metavar="ID",
|
||||
type=util.parse_koji_event,
|
||||
help="specify a koji event for populating package set, either as event ID "
|
||||
"or a path to a compose from which to reuse the event",
|
||||
"or a path to a compose from which to reuse the event",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
@ -139,14 +135,14 @@ def main():
|
||||
"--notification-script",
|
||||
action="append",
|
||||
default=[],
|
||||
help="script for sending progress notification messages"
|
||||
help="script for sending progress notification messages",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-latest-link",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="no_latest_link",
|
||||
help="don't create latest symbol link to this compose"
|
||||
help="don't create latest symbol link to this compose",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--latest-link-status",
|
||||
@ -159,23 +155,30 @@ def main():
|
||||
"--print-output-dir",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="print the compose directory"
|
||||
help="print the compose directory",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--quiet",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="quiet mode, don't print log on screen"
|
||||
help="quiet mode, don't print log on screen",
|
||||
)
|
||||
|
||||
opts = parser.parse_args()
|
||||
import pungi.notifier
|
||||
|
||||
notifier = pungi.notifier.PungiNotifier(opts.notification_script)
|
||||
|
||||
def fail_to_start(msg, **kwargs):
|
||||
notifier.send('fail-to-start', workdir=opts.target_dir,
|
||||
command=sys.argv, target_dir=opts.target_dir,
|
||||
config=opts.config, detail=msg, **kwargs)
|
||||
notifier.send(
|
||||
"fail-to-start",
|
||||
workdir=opts.target_dir,
|
||||
command=sys.argv,
|
||||
target_dir=opts.target_dir,
|
||||
config=opts.config,
|
||||
detail=msg,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def abort(msg):
|
||||
fail_to_start(msg)
|
||||
@ -184,11 +187,17 @@ def main():
|
||||
if opts.target_dir and not opts.compose_dir:
|
||||
opts.target_dir = os.path.abspath(opts.target_dir)
|
||||
if not os.path.isdir(opts.target_dir):
|
||||
abort("The target directory does not exist or is not a directory: %s" % opts.target_dir)
|
||||
abort(
|
||||
"The target directory does not exist or is not a directory: %s"
|
||||
% opts.target_dir
|
||||
)
|
||||
else:
|
||||
opts.compose_dir = os.path.abspath(opts.compose_dir)
|
||||
if not os.path.isdir(opts.compose_dir):
|
||||
abort("The compose directory does not exist or is not a directory: %s" % opts.compose_dir)
|
||||
abort(
|
||||
"The compose directory does not exist or is not a directory: %s"
|
||||
% opts.compose_dir
|
||||
)
|
||||
|
||||
opts.config = os.path.abspath(opts.config)
|
||||
|
||||
@ -214,12 +223,13 @@ def main():
|
||||
|
||||
conf = util.load_config(opts.config)
|
||||
|
||||
compose_type = opts.compose_type or conf.get('compose_type', 'production')
|
||||
compose_type = opts.compose_type or conf.get("compose_type", "production")
|
||||
if compose_type == "production" and not opts.label and not opts.no_label:
|
||||
abort("must specify label for a production compose")
|
||||
|
||||
# check if all requirements are met
|
||||
import pungi.checks
|
||||
|
||||
if not pungi.checks.check(conf):
|
||||
sys.exit(1)
|
||||
pungi.checks.check_umask(logger)
|
||||
@ -229,8 +239,11 @@ def main():
|
||||
|
||||
# TODO: workaround for config files containing skip_phase = productimg
|
||||
# Remove when all config files are up to date
|
||||
if 'productimg' in opts.skip_phase or 'productimg' in opts.just_phase:
|
||||
print('WARNING: productimg phase has been removed, please remove it from --skip-phase or --just-phase option', file=sys.stderr)
|
||||
if "productimg" in opts.skip_phase or "productimg" in opts.just_phase:
|
||||
print(
|
||||
"WARNING: productimg phase has been removed, please remove it from --skip-phase or --just-phase option",
|
||||
file=sys.stderr,
|
||||
)
|
||||
for err in errors[:]:
|
||||
if "'productimg' is not one of" in err:
|
||||
errors.remove(err)
|
||||
@ -242,29 +255,37 @@ def main():
|
||||
if errors:
|
||||
for error in errors:
|
||||
print(error, file=sys.stderr)
|
||||
fail_to_start('Config validation failed', errors=errors)
|
||||
fail_to_start("Config validation failed", errors=errors)
|
||||
sys.exit(1)
|
||||
|
||||
if opts.target_dir:
|
||||
compose_dir = Compose.get_compose_dir(opts.target_dir, conf, compose_type=compose_type, compose_label=opts.label)
|
||||
compose_dir = Compose.get_compose_dir(
|
||||
opts.target_dir, conf, compose_type=compose_type, compose_label=opts.label
|
||||
)
|
||||
else:
|
||||
compose_dir = opts.compose_dir
|
||||
|
||||
if opts.print_output_dir:
|
||||
print('Compose dir: %s' % compose_dir)
|
||||
print("Compose dir: %s" % compose_dir)
|
||||
|
||||
compose = Compose(conf,
|
||||
topdir=compose_dir,
|
||||
skip_phases=opts.skip_phase,
|
||||
just_phases=opts.just_phase,
|
||||
old_composes=opts.old_composes,
|
||||
koji_event=opts.koji_event,
|
||||
supported=opts.supported,
|
||||
logger=logger,
|
||||
notifier=notifier)
|
||||
compose = Compose(
|
||||
conf,
|
||||
topdir=compose_dir,
|
||||
skip_phases=opts.skip_phase,
|
||||
just_phases=opts.just_phase,
|
||||
old_composes=opts.old_composes,
|
||||
koji_event=opts.koji_event,
|
||||
supported=opts.supported,
|
||||
logger=logger,
|
||||
notifier=notifier,
|
||||
)
|
||||
notifier.compose = compose
|
||||
COMPOSE = compose
|
||||
run_compose(compose, create_latest_link=create_latest_link, latest_link_status=latest_link_status)
|
||||
run_compose(
|
||||
compose,
|
||||
create_latest_link=create_latest_link,
|
||||
latest_link_status=latest_link_status,
|
||||
)
|
||||
|
||||
|
||||
def run_compose(compose, create_latest_link=True, latest_link_status=None):
|
||||
@ -279,7 +300,9 @@ def run_compose(compose, create_latest_link=True, latest_link_status=None):
|
||||
compose.log_info("Pungi version: %s" % get_full_version())
|
||||
compose.log_info("User name: %s" % getpass.getuser())
|
||||
compose.log_info("Working directory: %s" % os.getcwd())
|
||||
compose.log_info("Command line: %s" % " ".join([shlex_quote(arg) for arg in sys.argv]))
|
||||
compose.log_info(
|
||||
"Command line: %s" % " ".join([shlex_quote(arg) for arg in sys.argv])
|
||||
)
|
||||
compose.log_info("Compose top directory: %s" % compose.topdir)
|
||||
compose.log_info("Current timezone offset: %s" % pungi.util.get_tz_offset())
|
||||
compose.read_variants()
|
||||
@ -301,7 +324,9 @@ def run_compose(compose, create_latest_link=True, latest_link_status=None):
|
||||
gather_phase = pungi.phases.GatherPhase(compose, pkgset_phase)
|
||||
extrafiles_phase = pungi.phases.ExtraFilesPhase(compose, pkgset_phase)
|
||||
createrepo_phase = pungi.phases.CreaterepoPhase(compose, pkgset_phase)
|
||||
ostree_installer_phase = pungi.phases.OstreeInstallerPhase(compose, buildinstall_phase, pkgset_phase)
|
||||
ostree_installer_phase = pungi.phases.OstreeInstallerPhase(
|
||||
compose, buildinstall_phase, pkgset_phase
|
||||
)
|
||||
ostree_phase = pungi.phases.OSTreePhase(compose, pkgset_phase)
|
||||
createiso_phase = pungi.phases.CreateisoPhase(compose, buildinstall_phase)
|
||||
extra_isos_phase = pungi.phases.ExtraIsosPhase(compose)
|
||||
@ -313,12 +338,24 @@ def run_compose(compose, create_latest_link=True, latest_link_status=None):
|
||||
test_phase = pungi.phases.TestPhase(compose)
|
||||
|
||||
# check if all config options are set
|
||||
for phase in (init_phase, pkgset_phase, createrepo_phase,
|
||||
buildinstall_phase, gather_phase,
|
||||
extrafiles_phase, createiso_phase, liveimages_phase,
|
||||
livemedia_phase, image_build_phase, image_checksum_phase,
|
||||
test_phase, ostree_phase, ostree_installer_phase,
|
||||
extra_isos_phase, osbs_phase):
|
||||
for phase in (
|
||||
init_phase,
|
||||
pkgset_phase,
|
||||
createrepo_phase,
|
||||
buildinstall_phase,
|
||||
gather_phase,
|
||||
extrafiles_phase,
|
||||
createiso_phase,
|
||||
liveimages_phase,
|
||||
livemedia_phase,
|
||||
image_build_phase,
|
||||
image_checksum_phase,
|
||||
test_phase,
|
||||
ostree_phase,
|
||||
ostree_installer_phase,
|
||||
extra_isos_phase,
|
||||
osbs_phase,
|
||||
):
|
||||
if phase.skip():
|
||||
continue
|
||||
try:
|
||||
@ -330,7 +367,7 @@ def run_compose(compose, create_latest_link=True, latest_link_status=None):
|
||||
for i in errors:
|
||||
compose.log_error(i)
|
||||
print(i)
|
||||
raise RuntimeError('Configuration is not valid')
|
||||
raise RuntimeError("Configuration is not valid")
|
||||
|
||||
# PREP
|
||||
|
||||
@ -338,10 +375,12 @@ def run_compose(compose, create_latest_link=True, latest_link_status=None):
|
||||
# in same way as .validate() or .run()
|
||||
|
||||
# Prep for liveimages - Obtain a password for signing rpm wrapped images
|
||||
if ("signing_key_password_file" in compose.conf
|
||||
and "signing_command" in compose.conf
|
||||
and "%(signing_key_password)s" in compose.conf["signing_command"]
|
||||
and not liveimages_phase.skip()):
|
||||
if (
|
||||
"signing_key_password_file" in compose.conf
|
||||
and "signing_command" in compose.conf
|
||||
and "%(signing_key_password)s" in compose.conf["signing_command"]
|
||||
and not liveimages_phase.skip()
|
||||
):
|
||||
# TODO: Don't require key if signing is turned off
|
||||
# Obtain signing key password
|
||||
signing_key_password = None
|
||||
@ -357,7 +396,11 @@ def run_compose(compose, create_latest_link=True, latest_link_status=None):
|
||||
else:
|
||||
# Use text file with password
|
||||
try:
|
||||
signing_key_password = open(compose.conf["signing_key_password_file"], "r").readline().rstrip('\n')
|
||||
signing_key_password = (
|
||||
open(compose.conf["signing_key_password_file"], "r")
|
||||
.readline()
|
||||
.rstrip("\n")
|
||||
)
|
||||
except IOError:
|
||||
# Filename is not print intentionally in case someone puts password directly into the option
|
||||
err_msg = "Cannot load password from file specified by 'signing_key_password_file' option"
|
||||
@ -388,7 +431,9 @@ def run_compose(compose, create_latest_link=True, latest_link_status=None):
|
||||
# write treeinfo before ISOs are created
|
||||
for variant in compose.get_variants():
|
||||
for arch in variant.arches + ["src"]:
|
||||
pungi.metadata.write_tree_info(compose, arch, variant, bi=buildinstall_phase)
|
||||
pungi.metadata.write_tree_info(
|
||||
compose, arch, variant, bi=buildinstall_phase
|
||||
)
|
||||
|
||||
# write .discinfo and media.repo before ISOs are created
|
||||
for variant in compose.get_variants():
|
||||
@ -441,17 +486,28 @@ def run_compose(compose, create_latest_link=True, latest_link_status=None):
|
||||
if compose.get_status() in [s.upper() for s in latest_link_status]:
|
||||
latest_link = True
|
||||
else:
|
||||
compose.log_warning("Compose status (%s) doesn't match with specified latest-link-status (%s), not create latest link."
|
||||
% (compose.get_status(), str(latest_link_status)))
|
||||
compose.log_warning(
|
||||
"Compose status (%s) doesn't match with specified latest-link-status (%s), not create latest link."
|
||||
% (compose.get_status(), str(latest_link_status))
|
||||
)
|
||||
|
||||
if latest_link:
|
||||
compose_dir = os.path.basename(compose.topdir)
|
||||
if len(compose.conf["release_version"].split(".")) == 1:
|
||||
symlink_name = "latest-%s-%s" % (compose.conf["release_short"], compose.conf["release_version"])
|
||||
symlink_name = "latest-%s-%s" % (
|
||||
compose.conf["release_short"],
|
||||
compose.conf["release_version"],
|
||||
)
|
||||
else:
|
||||
symlink_name = "latest-%s-%s" % (compose.conf["release_short"], ".".join(compose.conf["release_version"].split(".")[:-1]))
|
||||
symlink_name = "latest-%s-%s" % (
|
||||
compose.conf["release_short"],
|
||||
".".join(compose.conf["release_version"].split(".")[:-1]),
|
||||
)
|
||||
if compose.conf.get("base_product_name", ""):
|
||||
symlink_name += "-%s-%s" % (compose.conf["base_product_short"], compose.conf["base_product_version"])
|
||||
symlink_name += "-%s-%s" % (
|
||||
compose.conf["base_product_short"],
|
||||
compose.conf["base_product_version"],
|
||||
)
|
||||
symlink = os.path.join(compose.topdir, "..", symlink_name)
|
||||
|
||||
try:
|
||||
@ -471,8 +527,7 @@ def run_compose(compose, create_latest_link=True, latest_link_status=None):
|
||||
def sigterm_handler(signum, frame):
|
||||
if COMPOSE:
|
||||
COMPOSE.log_error("Compose run failed: signal %s" % signum)
|
||||
COMPOSE.log_error("Traceback:\n%s"
|
||||
% '\n'.join(traceback.format_stack(frame)))
|
||||
COMPOSE.log_error("Traceback:\n%s" % "\n".join(traceback.format_stack(frame)))
|
||||
COMPOSE.log_critical("Compose failed: %s" % COMPOSE.topdir)
|
||||
COMPOSE.write_status("TERMINATED")
|
||||
else:
|
||||
@ -495,6 +550,7 @@ def cli_main():
|
||||
COMPOSE.log_critical("Compose failed: %s" % COMPOSE.topdir)
|
||||
COMPOSE.write_status("DOOMED")
|
||||
import kobo.tback
|
||||
|
||||
with open(tb_path, "wb") as f:
|
||||
f.write(kobo.tback.Traceback().get_traceback())
|
||||
else:
|
||||
|
@ -8,7 +8,7 @@ import sys
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('cmd')
|
||||
parser.add_argument("cmd")
|
||||
opts = parser.parse_args()
|
||||
|
||||
data = json.load(sys.stdin)
|
||||
|
@ -39,40 +39,40 @@ def ts_log(msg):
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('cmd')
|
||||
parser.add_argument("cmd")
|
||||
opts = parser.parse_args()
|
||||
|
||||
if opts.cmd != 'ostree':
|
||||
if opts.cmd != "ostree":
|
||||
# Not an announcement of new ostree commit, nothing to do.
|
||||
sys.exit()
|
||||
|
||||
try:
|
||||
data = json.load(sys.stdin)
|
||||
except ValueError:
|
||||
print('Failed to decode data', file=sys.stderr)
|
||||
print("Failed to decode data", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
repo = data['local_repo_path']
|
||||
commit = data['commitid']
|
||||
repo = data["local_repo_path"]
|
||||
commit = data["commitid"]
|
||||
if not commit:
|
||||
print("No new commit was created, nothing will get signed.")
|
||||
sys.exit(0)
|
||||
|
||||
path = '%s/objects/%s/%s.commitmeta' % (repo, commit[:2], commit[2:])
|
||||
path = "%s/objects/%s/%s.commitmeta" % (repo, commit[:2], commit[2:])
|
||||
|
||||
config = fedmsg.config.load_config()
|
||||
config['active'] = True # Connect out to a fedmsg-relay instance
|
||||
config['cert_prefix'] = 'releng' # Use this cert.
|
||||
config["active"] = True # Connect out to a fedmsg-relay instance
|
||||
config["cert_prefix"] = "releng" # Use this cert.
|
||||
fedmsg.init(**config)
|
||||
topic = 'compose.%s' % opts.cmd.replace('-', '.').lower()
|
||||
topic = "compose.%s" % opts.cmd.replace("-", ".").lower()
|
||||
|
||||
count = 0
|
||||
while not os.path.exists(path):
|
||||
ts_log("Commit not signed yet, waiting...")
|
||||
count += 1
|
||||
if count >= 60: # Repeat every 5 minutes
|
||||
print('Repeating notification')
|
||||
fedmsg.publish(topic=topic, modname='pungi', msg=data)
|
||||
print("Repeating notification")
|
||||
fedmsg.publish(topic=topic, modname="pungi", msg=data)
|
||||
count = 0
|
||||
time.sleep(5)
|
||||
|
||||
|
294
pungi/util.py
294
pungi/util.py
@ -39,13 +39,27 @@ from productmd.common import get_major_version
|
||||
DEBUG_PATTERNS = ["*-debuginfo", "*-debuginfo-*", "*-debugsource"]
|
||||
|
||||
|
||||
def _doRunCommand(command, logger, rundir='/tmp', output=subprocess.PIPE, error=subprocess.PIPE, env=None):
|
||||
def _doRunCommand(
|
||||
command,
|
||||
logger,
|
||||
rundir="/tmp",
|
||||
output=subprocess.PIPE,
|
||||
error=subprocess.PIPE,
|
||||
env=None,
|
||||
):
|
||||
"""Run a command and log the output. Error out if we get something on stderr"""
|
||||
|
||||
logger.info("Running %s" % subprocess.list2cmdline(command))
|
||||
|
||||
p1 = subprocess.Popen(command, cwd=rundir, stdout=output, stderr=error, universal_newlines=True, env=env,
|
||||
close_fds=True)
|
||||
p1 = subprocess.Popen(
|
||||
command,
|
||||
cwd=rundir,
|
||||
stdout=output,
|
||||
stderr=error,
|
||||
universal_newlines=True,
|
||||
env=env,
|
||||
close_fds=True,
|
||||
)
|
||||
(out, err) = p1.communicate()
|
||||
|
||||
if out:
|
||||
@ -54,7 +68,9 @@ def _doRunCommand(command, logger, rundir='/tmp', output=subprocess.PIPE, error=
|
||||
if p1.returncode != 0:
|
||||
logger.error("Got an error from %s" % command[0])
|
||||
logger.error(err)
|
||||
raise OSError("Got an error (%d) from %s: %s" % (p1.returncode, command[0], err))
|
||||
raise OSError(
|
||||
"Got an error (%d) from %s: %s" % (p1.returncode, command[0], err)
|
||||
)
|
||||
|
||||
|
||||
def _link(local, target, logger, force=False):
|
||||
@ -72,7 +88,7 @@ def _link(local, target, logger, force=False):
|
||||
os.link(local, target)
|
||||
except OSError as e:
|
||||
if e.errno != 18: # EXDEV
|
||||
logger.error('Got an error linking from cache: %s' % e)
|
||||
logger.error("Got an error linking from cache: %s" % e)
|
||||
raise OSError(e)
|
||||
|
||||
# Can't hardlink cross file systems
|
||||
@ -86,7 +102,7 @@ def _ensuredir(target, logger, force=False, clean=False):
|
||||
# We have to check existance of a logger, as setting the logger could
|
||||
# itself cause an issue.
|
||||
def whoops(func, path, exc_info):
|
||||
message = 'Could not remove %s' % path
|
||||
message = "Could not remove %s" % path
|
||||
if logger:
|
||||
logger.error(message)
|
||||
else:
|
||||
@ -94,7 +110,7 @@ def _ensuredir(target, logger, force=False, clean=False):
|
||||
sys.exit(1)
|
||||
|
||||
if os.path.exists(target) and not os.path.isdir(target):
|
||||
message = '%s exists but is not a directory.' % target
|
||||
message = "%s exists but is not a directory." % target
|
||||
if logger:
|
||||
logger.error(message)
|
||||
else:
|
||||
@ -109,7 +125,7 @@ def _ensuredir(target, logger, force=False, clean=False):
|
||||
elif force:
|
||||
return
|
||||
else:
|
||||
message = 'Directory %s already exists. Use --force to overwrite.' % target
|
||||
message = "Directory %s already exists. Use --force to overwrite." % target
|
||||
if logger:
|
||||
logger.error(message)
|
||||
else:
|
||||
@ -130,7 +146,7 @@ def _doCheckSum(path, hash, logger):
|
||||
|
||||
# Try to open the file, using binary flag.
|
||||
try:
|
||||
myfile = open(path, 'rb')
|
||||
myfile = open(path, "rb")
|
||||
except IOError as e:
|
||||
logger.error("Could not open file %s: %s" % (path, e))
|
||||
return False
|
||||
@ -138,13 +154,15 @@ def _doCheckSum(path, hash, logger):
|
||||
# Loop through the file reading chunks at a time as to not
|
||||
# put the entire file in memory. That would suck for DVDs
|
||||
while True:
|
||||
chunk = myfile.read(8192) # magic number! Taking suggestions for better blocksize
|
||||
chunk = myfile.read(
|
||||
8192
|
||||
) # magic number! Taking suggestions for better blocksize
|
||||
if not chunk:
|
||||
break # we're done with the file
|
||||
sum.update(chunk)
|
||||
myfile.close()
|
||||
|
||||
return '%s:%s' % (hash, sum.hexdigest())
|
||||
return "%s:%s" % (hash, sum.hexdigest())
|
||||
|
||||
|
||||
def makedirs(path, mode=0o775):
|
||||
@ -168,7 +186,10 @@ def explode_rpm_package(pkg_path, target_dir):
|
||||
"""Explode a rpm package into target_dir."""
|
||||
pkg_path = os.path.abspath(pkg_path)
|
||||
makedirs(target_dir)
|
||||
run("rpm2cpio %s | cpio -iuvmd && chmod -R a+rX ." % shlex_quote(pkg_path), workdir=target_dir)
|
||||
run(
|
||||
"rpm2cpio %s | cpio -iuvmd && chmod -R a+rX ." % shlex_quote(pkg_path),
|
||||
workdir=target_dir,
|
||||
)
|
||||
|
||||
|
||||
def pkg_is_rpm(pkg_obj):
|
||||
@ -232,15 +253,15 @@ def get_arch_variant_data(conf, var_name, arch, variant, keys=None):
|
||||
|
||||
def is_arch_multilib(conf, arch):
|
||||
"""Check if at least one variant has multilib enabled on this variant."""
|
||||
return bool(get_arch_variant_data(conf, 'multilib', arch, None))
|
||||
return bool(get_arch_variant_data(conf, "multilib", arch, None))
|
||||
|
||||
|
||||
def _get_git_ref(fragment):
|
||||
if fragment == 'HEAD':
|
||||
if fragment == "HEAD":
|
||||
return fragment
|
||||
if fragment.startswith('origin/'):
|
||||
branch = fragment.split('/', 1)[1]
|
||||
return 'refs/heads/' + branch
|
||||
if fragment.startswith("origin/"):
|
||||
branch = fragment.split("/", 1)[1]
|
||||
return "refs/heads/" + branch
|
||||
return None
|
||||
|
||||
|
||||
@ -296,15 +317,15 @@ def resolve_git_url(url):
|
||||
|
||||
# Remove git+ prefix from scheme if present. This is for resolving only,
|
||||
# the final result must use original scheme.
|
||||
scheme = r.scheme.replace('git+', '')
|
||||
scheme = r.scheme.replace("git+", "")
|
||||
|
||||
baseurl = urllib.parse.urlunsplit((scheme, r.netloc, r.path, '', ''))
|
||||
baseurl = urllib.parse.urlunsplit((scheme, r.netloc, r.path, "", ""))
|
||||
fragment = resolve_git_ref(baseurl, ref)
|
||||
|
||||
result = urllib.parse.urlunsplit((r.scheme, r.netloc, r.path, r.query, fragment))
|
||||
if '?#' in url:
|
||||
if "?#" in url:
|
||||
# The urllib library drops empty query string. This hack puts it back in.
|
||||
result = result.replace('#', '?#')
|
||||
result = result.replace("#", "?#")
|
||||
return result
|
||||
|
||||
|
||||
@ -313,6 +334,7 @@ class GitUrlResolver(object):
|
||||
URL with fragment describing reference, or url and refname. It will return
|
||||
either url with changed fragment or just resolved ref.
|
||||
"""
|
||||
|
||||
def __init__(self, offline=False):
|
||||
self.offline = offline
|
||||
self.cache = {}
|
||||
@ -373,7 +395,7 @@ def get_variant_data(conf, var_name, variant, keys=None):
|
||||
|
||||
|
||||
def _apply_substitutions(compose, volid):
|
||||
substitutions = compose.conf['volume_id_substitutions'].items()
|
||||
substitutions = compose.conf["volume_id_substitutions"].items()
|
||||
# processing should start with the longest pattern, otherwise, we could
|
||||
# unexpectedly replace a substring of that longest pattern
|
||||
for k, v in sorted(substitutions, key=lambda x: len(x[0]), reverse=True):
|
||||
@ -381,8 +403,7 @@ def _apply_substitutions(compose, volid):
|
||||
return volid
|
||||
|
||||
|
||||
def get_volid(compose, arch, variant=None, disc_type=False,
|
||||
formats=None, **kwargs):
|
||||
def get_volid(compose, arch, variant=None, disc_type=False, formats=None, **kwargs):
|
||||
"""Get ISO volume ID for arch and variant"""
|
||||
if variant and variant.type == "addon":
|
||||
# addons are part of parent variant media
|
||||
@ -398,13 +419,15 @@ def get_volid(compose, arch, variant=None, disc_type=False,
|
||||
else:
|
||||
release_short = compose.conf["release_short"]
|
||||
release_version = compose.conf["release_version"]
|
||||
release_is_layered = True if compose.conf.get("base_product_name", "") else False
|
||||
release_is_layered = (
|
||||
True if compose.conf.get("base_product_name", "") else False
|
||||
)
|
||||
base_product_short = compose.conf.get("base_product_short", "")
|
||||
base_product_version = compose.conf.get("base_product_version", "")
|
||||
variant_uid = variant and variant.uid or None
|
||||
|
||||
products = compose.conf['image_volid_formats']
|
||||
layered_products = compose.conf['image_volid_layered_product_formats']
|
||||
products = compose.conf["image_volid_formats"]
|
||||
layered_products = compose.conf["image_volid_layered_product_formats"]
|
||||
|
||||
volid = None
|
||||
if release_is_layered:
|
||||
@ -418,26 +441,32 @@ def get_volid(compose, arch, variant=None, disc_type=False,
|
||||
if not variant_uid and "%(variant)s" in i:
|
||||
continue
|
||||
try:
|
||||
args = get_format_substs(compose,
|
||||
variant=variant_uid,
|
||||
release_short=release_short,
|
||||
version=release_version,
|
||||
arch=arch,
|
||||
disc_type=disc_type or '',
|
||||
base_product_short=base_product_short,
|
||||
base_product_version=base_product_version,
|
||||
**kwargs)
|
||||
args = get_format_substs(
|
||||
compose,
|
||||
variant=variant_uid,
|
||||
release_short=release_short,
|
||||
version=release_version,
|
||||
arch=arch,
|
||||
disc_type=disc_type or "",
|
||||
base_product_short=base_product_short,
|
||||
base_product_version=base_product_version,
|
||||
**kwargs
|
||||
)
|
||||
volid = (i % args).format(**args)
|
||||
except KeyError as err:
|
||||
raise RuntimeError('Failed to create volume id: unknown format element: %s' % err)
|
||||
raise RuntimeError(
|
||||
"Failed to create volume id: unknown format element: %s" % err
|
||||
)
|
||||
volid = _apply_substitutions(compose, volid)
|
||||
if len(volid) <= 32:
|
||||
break
|
||||
tried.add(volid)
|
||||
|
||||
if volid and len(volid) > 32:
|
||||
raise ValueError("Could not create volume ID longer than 32 bytes, options are %r",
|
||||
sorted(tried, key=len))
|
||||
raise ValueError(
|
||||
"Could not create volume ID longer than 32 bytes, options are %r",
|
||||
sorted(tried, key=len),
|
||||
)
|
||||
|
||||
if compose.conf["restricted_volid"]:
|
||||
# Replace all non-alphanumeric characters and non-underscores) with
|
||||
@ -455,16 +484,22 @@ def get_file_size(path):
|
||||
return os.path.getsize(path)
|
||||
|
||||
|
||||
def find_old_compose(old_compose_dirs, release_short, release_version,
|
||||
release_type_suffix, base_product_short=None,
|
||||
base_product_version=None, allowed_statuses=None):
|
||||
def find_old_compose(
|
||||
old_compose_dirs,
|
||||
release_short,
|
||||
release_version,
|
||||
release_type_suffix,
|
||||
base_product_short=None,
|
||||
base_product_version=None,
|
||||
allowed_statuses=None,
|
||||
):
|
||||
allowed_statuses = allowed_statuses or ("FINISHED", "FINISHED_INCOMPLETE", "DOOMED")
|
||||
composes = []
|
||||
|
||||
def _sortable(compose_id):
|
||||
"""Convert ID to tuple where respin is an integer for proper sorting."""
|
||||
try:
|
||||
prefix, respin = compose_id.rsplit('.', 1)
|
||||
prefix, respin = compose_id.rsplit(".", 1)
|
||||
return (prefix, int(respin))
|
||||
except Exception:
|
||||
return compose_id
|
||||
@ -486,7 +521,7 @@ def find_old_compose(old_compose_dirs, release_short, release_version,
|
||||
if not i.startswith(pattern):
|
||||
continue
|
||||
|
||||
suffix = i[len(pattern):]
|
||||
suffix = i[len(pattern) :]
|
||||
if len(suffix) < 2 or not suffix[1].isdigit():
|
||||
# This covers the case where we are looking for -updates, but there
|
||||
# is an updates-testing as well.
|
||||
@ -504,7 +539,7 @@ def find_old_compose(old_compose_dirs, release_short, release_version,
|
||||
continue
|
||||
|
||||
try:
|
||||
with open(status_path, 'r') as f:
|
||||
with open(status_path, "r") as f:
|
||||
if f.read().strip() in allowed_statuses:
|
||||
composes.append((_sortable(i), os.path.abspath(path)))
|
||||
except:
|
||||
@ -526,7 +561,9 @@ def process_args(fmt, args):
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def failable(compose, can_fail, variant, arch, deliverable, subvariant=None, logger=None):
|
||||
def failable(
|
||||
compose, can_fail, variant, arch, deliverable, subvariant=None, logger=None
|
||||
):
|
||||
"""If a deliverable can fail, log a message and go on as if it succeeded."""
|
||||
if not logger:
|
||||
logger = compose._logger
|
||||
@ -540,17 +577,21 @@ def failable(compose, can_fail, variant, arch, deliverable, subvariant=None, log
|
||||
if not can_fail:
|
||||
raise
|
||||
else:
|
||||
log_failed_task(compose, variant, arch, deliverable, subvariant, logger=logger, exc=exc)
|
||||
log_failed_task(
|
||||
compose, variant, arch, deliverable, subvariant, logger=logger, exc=exc
|
||||
)
|
||||
|
||||
|
||||
def log_failed_task(compose, variant, arch, deliverable, subvariant, logger=None, exc=None):
|
||||
def log_failed_task(
|
||||
compose, variant, arch, deliverable, subvariant, logger=None, exc=None
|
||||
):
|
||||
logger = logger or compose._logger
|
||||
msg = deliverable.replace('-', ' ').capitalize()
|
||||
msg = deliverable.replace("-", " ").capitalize()
|
||||
compose.fail_deliverable(variant, arch, deliverable, subvariant)
|
||||
ident = 'variant %s, arch %s' % (variant.uid if variant else 'None', arch)
|
||||
ident = "variant %s, arch %s" % (variant.uid if variant else "None", arch)
|
||||
if subvariant:
|
||||
ident += ', subvariant %s' % subvariant
|
||||
logger.error('[FAIL] %s (%s) failed, but going on anyway.' % (msg, ident))
|
||||
ident += ", subvariant %s" % subvariant
|
||||
logger.error("[FAIL] %s (%s) failed, but going on anyway." % (msg, ident))
|
||||
if exc:
|
||||
logger.error(str(exc))
|
||||
tb = traceback.format_exc()
|
||||
@ -559,7 +600,7 @@ def log_failed_task(compose, variant, arch, deliverable, subvariant, logger=None
|
||||
|
||||
def can_arch_fail(failable_arches, arch):
|
||||
"""Check if `arch` is in `failable_arches` or `*` can fail."""
|
||||
return '*' in failable_arches or arch in failable_arches
|
||||
return "*" in failable_arches or arch in failable_arches
|
||||
|
||||
|
||||
def get_format_substs(compose, **kwargs):
|
||||
@ -568,15 +609,15 @@ def get_format_substs(compose, **kwargs):
|
||||
Any kwargs will be added as well.
|
||||
"""
|
||||
substs = {
|
||||
'compose_id': compose.compose_id,
|
||||
'release_short': compose.ci_base.release.short,
|
||||
'version': compose.ci_base.release.version,
|
||||
'date': compose.compose_date,
|
||||
'respin': compose.compose_respin,
|
||||
'type': compose.compose_type,
|
||||
'type_suffix': compose.compose_type_suffix,
|
||||
'label': compose.compose_label,
|
||||
'label_major_version': compose.compose_label_major_version,
|
||||
"compose_id": compose.compose_id,
|
||||
"release_short": compose.ci_base.release.short,
|
||||
"version": compose.ci_base.release.version,
|
||||
"date": compose.compose_date,
|
||||
"respin": compose.compose_respin,
|
||||
"type": compose.compose_type,
|
||||
"type_suffix": compose.compose_type_suffix,
|
||||
"label": compose.compose_label,
|
||||
"label_major_version": compose.compose_label_major_version,
|
||||
}
|
||||
substs.update(kwargs)
|
||||
return substs
|
||||
@ -603,7 +644,7 @@ def copy_all(src, dest):
|
||||
"""
|
||||
contents = os.listdir(src)
|
||||
if not contents:
|
||||
raise RuntimeError('Source directory %s is empty.' % src)
|
||||
raise RuntimeError("Source directory %s is empty." % src)
|
||||
makedirs(dest)
|
||||
for item in contents:
|
||||
source = os.path.join(src, item)
|
||||
@ -651,9 +692,9 @@ def levenshtein(a, b):
|
||||
for j in range(1, len(b) + 1):
|
||||
for i in range(1, len(a) + 1):
|
||||
cost = 0 if a[i - 1] == b[j - 1] else 1
|
||||
mat[j][i] = min(mat[j - 1][i] + 1,
|
||||
mat[j][i - 1] + 1,
|
||||
mat[j - 1][i - 1] + cost)
|
||||
mat[j][i] = min(
|
||||
mat[j - 1][i] + 1, mat[j][i - 1] + 1, mat[j - 1][i - 1] + cost
|
||||
)
|
||||
|
||||
return mat[len(b)][len(a)]
|
||||
|
||||
@ -661,10 +702,10 @@ def levenshtein(a, b):
|
||||
@contextlib.contextmanager
|
||||
def temp_dir(log=None, *args, **kwargs):
|
||||
"""Create a temporary directory and ensure it's deleted."""
|
||||
if kwargs.get('dir'):
|
||||
if kwargs.get("dir"):
|
||||
# If we are supposed to create the temp dir in a particular location,
|
||||
# ensure the location already exists.
|
||||
makedirs(kwargs['dir'])
|
||||
makedirs(kwargs["dir"])
|
||||
dir = tempfile.mkdtemp(*args, **kwargs)
|
||||
try:
|
||||
yield dir
|
||||
@ -674,7 +715,7 @@ def temp_dir(log=None, *args, **kwargs):
|
||||
except OSError as exc:
|
||||
# Okay, we failed to delete temporary dir.
|
||||
if log:
|
||||
log.warning('Error removing %s: %s', dir, exc.strerror)
|
||||
log.warning("Error removing %s: %s", dir, exc.strerror)
|
||||
|
||||
|
||||
def run_unmount_cmd(cmd, max_retries=10, path=None, logger=None):
|
||||
@ -687,33 +728,41 @@ def run_unmount_cmd(cmd, max_retries=10, path=None, logger=None):
|
||||
printed in case of failure.
|
||||
"""
|
||||
for i in range(max_retries):
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
universal_newlines=True)
|
||||
proc = subprocess.Popen(
|
||||
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
|
||||
)
|
||||
out, err = proc.communicate()
|
||||
if proc.returncode == 0:
|
||||
# We were successful
|
||||
return
|
||||
if 'Device or resource busy' not in err:
|
||||
raise RuntimeError('Unhandled error when running %r: %r' % (cmd, err))
|
||||
if "Device or resource busy" not in err:
|
||||
raise RuntimeError("Unhandled error when running %r: %r" % (cmd, err))
|
||||
time.sleep(i)
|
||||
# Still busy, there's something wrong.
|
||||
if path and logger:
|
||||
commands = [
|
||||
['ls', '-lA', path],
|
||||
['fuser', '-vm', path],
|
||||
['lsof', '+D', path],
|
||||
["ls", "-lA", path],
|
||||
["fuser", "-vm", path],
|
||||
["lsof", "+D", path],
|
||||
]
|
||||
for c in commands:
|
||||
try:
|
||||
proc = subprocess.Popen(c, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||
universal_newlines=True)
|
||||
proc = subprocess.Popen(
|
||||
c,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True,
|
||||
)
|
||||
out, _ = proc.communicate()
|
||||
logger.debug('`%s` exited with %s and following output:\n%s',
|
||||
' '.join(c), proc.returncode, out)
|
||||
logger.debug(
|
||||
"`%s` exited with %s and following output:\n%s",
|
||||
" ".join(c),
|
||||
proc.returncode,
|
||||
out,
|
||||
)
|
||||
except OSError:
|
||||
logger.debug('`%s` command not available for debugging',
|
||||
' '.join(c))
|
||||
raise RuntimeError('Failed to run %r: Device or resource busy.' % cmd)
|
||||
logger.debug("`%s` command not available for debugging", " ".join(c))
|
||||
raise RuntimeError("Failed to run %r: Device or resource busy." % cmd)
|
||||
|
||||
|
||||
def translate_path_raw(mapping, path):
|
||||
@ -721,7 +770,7 @@ def translate_path_raw(mapping, path):
|
||||
for prefix, newvalue in mapping:
|
||||
prefix = os.path.normpath(prefix)
|
||||
# Strip trailing slashes: the prefix has them stripped by `normpath`.
|
||||
newvalue = newvalue.rstrip('/')
|
||||
newvalue = newvalue.rstrip("/")
|
||||
if normpath.startswith(prefix):
|
||||
# We can't call os.path.normpath on result since it is not actually
|
||||
# a path - http:// would get changed to http:/ and so on.
|
||||
@ -739,7 +788,7 @@ def translate_path(compose, path):
|
||||
return translate_path_raw(mapping, path)
|
||||
|
||||
|
||||
def get_repo_url(compose, repo, arch='$basearch'):
|
||||
def get_repo_url(compose, repo, arch="$basearch"):
|
||||
"""
|
||||
Convert repo to repo URL.
|
||||
|
||||
@ -751,25 +800,27 @@ def get_repo_url(compose, repo, arch='$basearch'):
|
||||
"""
|
||||
if isinstance(repo, dict):
|
||||
try:
|
||||
repo = repo['baseurl']
|
||||
repo = repo["baseurl"]
|
||||
except KeyError:
|
||||
raise RuntimeError('Baseurl is required in repo dict %s' % str(repo))
|
||||
raise RuntimeError("Baseurl is required in repo dict %s" % str(repo))
|
||||
if repo.startswith("/"):
|
||||
# It's an absolute path, translate it and return it
|
||||
return translate_path(compose, repo)
|
||||
if '://' not in repo:
|
||||
if "://" not in repo:
|
||||
# this is a variant name
|
||||
if compose is not None:
|
||||
v = compose.all_variants.get(repo)
|
||||
if not v:
|
||||
raise RuntimeError('There is no variant %s to get repo from.' % repo)
|
||||
raise RuntimeError("There is no variant %s to get repo from." % repo)
|
||||
else:
|
||||
return None
|
||||
repo = translate_path(compose, compose.paths.compose.repository(arch, v, create_dir=False))
|
||||
repo = translate_path(
|
||||
compose, compose.paths.compose.repository(arch, v, create_dir=False)
|
||||
)
|
||||
return repo
|
||||
|
||||
|
||||
def get_repo_urls(compose, repos, arch='$basearch', logger=None):
|
||||
def get_repo_urls(compose, repos, arch="$basearch", logger=None):
|
||||
"""
|
||||
Convert repos to a list of repo URLs.
|
||||
|
||||
@ -782,7 +833,10 @@ def get_repo_urls(compose, repos, arch='$basearch', logger=None):
|
||||
repo = get_repo_url(compose, repo, arch=arch)
|
||||
if repo is None:
|
||||
if logger:
|
||||
logger.log_warning("Variant-type source repository is deprecated and will be ignored during 'OSTreeInstaller' phase: %s" % (repo))
|
||||
logger.log_warning(
|
||||
"Variant-type source repository is deprecated and will be ignored during 'OSTreeInstaller' phase: %s"
|
||||
% (repo)
|
||||
)
|
||||
else:
|
||||
urls.append(repo)
|
||||
return urls
|
||||
@ -792,8 +846,8 @@ def _translate_url_to_repo_id(url):
|
||||
"""
|
||||
Translate url to valid repo id by replacing any invalid char to '_'.
|
||||
"""
|
||||
_REPOID_CHARS = string.ascii_letters + string.digits + '-_.:'
|
||||
return ''.join([s if s in list(_REPOID_CHARS) else '_' for s in url])
|
||||
_REPOID_CHARS = string.ascii_letters + string.digits + "-_.:"
|
||||
return "".join([s if s in list(_REPOID_CHARS) else "_" for s in url])
|
||||
|
||||
|
||||
def get_repo_dict(repo):
|
||||
@ -809,23 +863,23 @@ def get_repo_dict(repo):
|
||||
"""
|
||||
repo_dict = {}
|
||||
if isinstance(repo, dict):
|
||||
url = repo['baseurl']
|
||||
name = repo.get('name', None)
|
||||
if '://' in url:
|
||||
url = repo["baseurl"]
|
||||
name = repo.get("name", None)
|
||||
if "://" in url:
|
||||
if name is None:
|
||||
name = _translate_url_to_repo_id(url)
|
||||
else:
|
||||
# url is variant uid - this possibility is now discontinued
|
||||
return {}
|
||||
repo['name'] = name
|
||||
repo['baseurl'] = url
|
||||
repo["name"] = name
|
||||
repo["baseurl"] = url
|
||||
return repo
|
||||
else:
|
||||
# repo is normal url or variant uid
|
||||
repo_dict = {}
|
||||
if '://' in repo:
|
||||
repo_dict['name'] = _translate_url_to_repo_id(repo)
|
||||
repo_dict['baseurl'] = repo
|
||||
if "://" in repo:
|
||||
repo_dict["name"] = _translate_url_to_repo_id(repo)
|
||||
repo_dict["baseurl"] = repo
|
||||
else:
|
||||
return {}
|
||||
return repo_dict
|
||||
@ -842,7 +896,10 @@ def get_repo_dicts(repos, logger=None):
|
||||
repo_dict = get_repo_dict(repo)
|
||||
if repo_dict == {}:
|
||||
if logger:
|
||||
logger.log_warning("Variant-type source repository is deprecated and will be ignored during 'OSTree' phase: %s" % (repo))
|
||||
logger.log_warning(
|
||||
"Variant-type source repository is deprecated and will be ignored during 'OSTree' phase: %s"
|
||||
% (repo)
|
||||
)
|
||||
else:
|
||||
repo_dicts.append(repo_dict)
|
||||
return repo_dicts
|
||||
@ -852,19 +909,21 @@ def version_generator(compose, gen):
|
||||
"""If ``gen`` is a known generator, create a value. Otherwise return
|
||||
the argument value unchanged.
|
||||
"""
|
||||
if gen == '!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN':
|
||||
return '%s.%s' % (compose.image_version, compose.image_release)
|
||||
elif gen == '!RELEASE_FROM_LABEL_DATE_TYPE_RESPIN':
|
||||
if gen == "!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN":
|
||||
return "%s.%s" % (compose.image_version, compose.image_release)
|
||||
elif gen == "!RELEASE_FROM_LABEL_DATE_TYPE_RESPIN":
|
||||
return compose.image_release
|
||||
elif gen == '!RELEASE_FROM_DATE_RESPIN':
|
||||
return '%s.%s' % (compose.compose_date, compose.compose_respin)
|
||||
elif gen == '!VERSION_FROM_VERSION_DATE_RESPIN':
|
||||
return '%s.%s.%s' % (compose.ci_base.release.version,
|
||||
compose.compose_date,
|
||||
compose.compose_respin)
|
||||
elif gen == '!VERSION_FROM_VERSION':
|
||||
return '%s' % (compose.ci_base.release.version)
|
||||
elif gen and gen[0] == '!':
|
||||
elif gen == "!RELEASE_FROM_DATE_RESPIN":
|
||||
return "%s.%s" % (compose.compose_date, compose.compose_respin)
|
||||
elif gen == "!VERSION_FROM_VERSION_DATE_RESPIN":
|
||||
return "%s.%s.%s" % (
|
||||
compose.ci_base.release.version,
|
||||
compose.compose_date,
|
||||
compose.compose_respin,
|
||||
)
|
||||
elif gen == "!VERSION_FROM_VERSION":
|
||||
return "%s" % (compose.ci_base.release.version)
|
||||
elif gen and gen[0] == "!":
|
||||
raise RuntimeError("Unknown version generator '%s'" % gen)
|
||||
return gen
|
||||
|
||||
@ -873,6 +932,7 @@ def retry(timeout=120, interval=30, wait_on=Exception):
|
||||
""" A decorator that allows to retry a section of code until success or
|
||||
timeout.
|
||||
"""
|
||||
|
||||
def wrapper(function):
|
||||
@functools.wraps(function)
|
||||
def inner(*args, **kwargs):
|
||||
@ -884,13 +944,15 @@ def retry(timeout=120, interval=30, wait_on=Exception):
|
||||
return function(*args, **kwargs)
|
||||
except wait_on:
|
||||
time.sleep(interval)
|
||||
|
||||
return inner
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@retry(wait_on=RuntimeError)
|
||||
def git_ls_remote(baseurl, ref):
|
||||
return run(['git', 'ls-remote', baseurl, ref], universal_newlines=True)
|
||||
return run(["git", "ls-remote", baseurl, ref], universal_newlines=True)
|
||||
|
||||
|
||||
def get_tz_offset():
|
||||
|
@ -41,12 +41,14 @@ if sys.version_info[:2] < (2, 7):
|
||||
xml.dom.minidom.Element = Element
|
||||
|
||||
|
||||
TYPE_MAPPING = collections.OrderedDict([
|
||||
(libcomps.PACKAGE_TYPE_MANDATORY, 'mandatory'),
|
||||
(libcomps.PACKAGE_TYPE_DEFAULT, 'default'),
|
||||
(libcomps.PACKAGE_TYPE_OPTIONAL, 'optional'),
|
||||
(libcomps.PACKAGE_TYPE_CONDITIONAL, 'conditional'),
|
||||
])
|
||||
TYPE_MAPPING = collections.OrderedDict(
|
||||
[
|
||||
(libcomps.PACKAGE_TYPE_MANDATORY, "mandatory"),
|
||||
(libcomps.PACKAGE_TYPE_DEFAULT, "default"),
|
||||
(libcomps.PACKAGE_TYPE_OPTIONAL, "optional"),
|
||||
(libcomps.PACKAGE_TYPE_CONDITIONAL, "conditional"),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class CompsValidationError(ValueError):
|
||||
@ -89,10 +91,13 @@ class CompsFilter(object):
|
||||
If only_arch is set, then only packages for the specified arch are preserved.
|
||||
Multiple arches separated by comma can be specified in the XML.
|
||||
"""
|
||||
self._filter_elements_by_attr("/comps/group/packagelist/packagereq", 'arch', arch, only_arch)
|
||||
self._filter_elements_by_attr(
|
||||
"/comps/group/packagelist/packagereq", "arch", arch, only_arch
|
||||
)
|
||||
if variant:
|
||||
self._filter_elements_by_attr("/comps/group/packagelist/packagereq",
|
||||
'variant', variant, only_arch)
|
||||
self._filter_elements_by_attr(
|
||||
"/comps/group/packagelist/packagereq", "variant", variant, only_arch
|
||||
)
|
||||
|
||||
def filter_groups(self, arch, variant, only_arch=False):
|
||||
"""
|
||||
@ -100,9 +105,9 @@ class CompsFilter(object):
|
||||
If only_arch is set, then only groups for the specified arch are preserved.
|
||||
Multiple arches separated by comma can be specified in the XML.
|
||||
"""
|
||||
self._filter_elements_by_attr("/comps/group", 'arch', arch, only_arch)
|
||||
self._filter_elements_by_attr("/comps/group", "arch", arch, only_arch)
|
||||
if variant:
|
||||
self._filter_elements_by_attr("/comps/group", 'variant', variant, only_arch)
|
||||
self._filter_elements_by_attr("/comps/group", "variant", variant, only_arch)
|
||||
|
||||
def filter_environments(self, arch, variant, only_arch=False):
|
||||
"""
|
||||
@ -110,9 +115,11 @@ class CompsFilter(object):
|
||||
If only_arch is set, then only environments for the specified arch are preserved.
|
||||
Multiple arches separated by comma can be specified in the XML.
|
||||
"""
|
||||
self._filter_elements_by_attr("/comps/environment", 'arch', arch, only_arch)
|
||||
self._filter_elements_by_attr("/comps/environment", "arch", arch, only_arch)
|
||||
if variant:
|
||||
self._filter_elements_by_attr("/comps/environment", 'variant', variant, only_arch)
|
||||
self._filter_elements_by_attr(
|
||||
"/comps/environment", "variant", variant, only_arch
|
||||
)
|
||||
|
||||
def filter_category_groups(self):
|
||||
"""
|
||||
@ -196,7 +203,12 @@ class CompsFilter(object):
|
||||
i.getparent().remove(i)
|
||||
|
||||
def write(self, file_obj):
|
||||
self.tree.write(file_obj, pretty_print=self.reindent, xml_declaration=True, encoding=self.encoding)
|
||||
self.tree.write(
|
||||
file_obj,
|
||||
pretty_print=self.reindent,
|
||||
xml_declaration=True,
|
||||
encoding=self.encoding,
|
||||
)
|
||||
file_obj.write(b"\n")
|
||||
|
||||
def cleanup(self, keep_groups=[], lookaside_groups=[]):
|
||||
@ -235,7 +247,7 @@ class CompsWrapper(object):
|
||||
for grp in self.comps.groups:
|
||||
if grp.id == group:
|
||||
return [pkg.name for pkg in grp.packages]
|
||||
raise KeyError('No such group %r' % group)
|
||||
raise KeyError("No such group %r" % group)
|
||||
|
||||
def get_langpacks(self):
|
||||
langpacks = {}
|
||||
@ -273,11 +285,13 @@ class CompsWrapper(object):
|
||||
|
||||
def generate_comps(self):
|
||||
impl = xml.dom.minidom.getDOMImplementation()
|
||||
doctype = impl.createDocumentType("comps", "-//Red Hat, Inc.//DTD Comps info//EN", "comps.dtd")
|
||||
doctype = impl.createDocumentType(
|
||||
"comps", "-//Red Hat, Inc.//DTD Comps info//EN", "comps.dtd"
|
||||
)
|
||||
doc = impl.createDocument(None, "comps", doctype)
|
||||
msg_elem = doc.documentElement
|
||||
|
||||
for group in sorted(self.comps.groups, key=attrgetter('id')):
|
||||
for group in sorted(self.comps.groups, key=attrgetter("id")):
|
||||
group_node = doc.createElement("group")
|
||||
msg_elem.appendChild(group_node)
|
||||
|
||||
@ -294,13 +308,14 @@ class CompsWrapper(object):
|
||||
for pkg in group.packages:
|
||||
if pkg.type == libcomps.PACKAGE_TYPE_UNKNOWN:
|
||||
raise RuntimeError(
|
||||
'Failed to process comps file. Package %s in group %s has unknown type'
|
||||
% (pkg.name, group.id))
|
||||
"Failed to process comps file. Package %s in group %s has unknown type"
|
||||
% (pkg.name, group.id)
|
||||
)
|
||||
|
||||
packages_by_type[TYPE_MAPPING[pkg.type]].append(pkg)
|
||||
|
||||
for type_name in TYPE_MAPPING.values():
|
||||
for pkg in sorted(packages_by_type[type_name], key=attrgetter('name')):
|
||||
for pkg in sorted(packages_by_type[type_name], key=attrgetter("name")):
|
||||
kwargs = {"type": type_name}
|
||||
if type_name == "conditional":
|
||||
kwargs["requires"] = pkg.requires
|
||||
@ -309,7 +324,9 @@ class CompsWrapper(object):
|
||||
group_node.appendChild(packagelist)
|
||||
|
||||
for category in self.comps.categories:
|
||||
groups = set(x.name for x in category.group_ids) & set(self.get_comps_groups())
|
||||
groups = set(x.name for x in category.group_ids) & set(
|
||||
self.get_comps_groups()
|
||||
)
|
||||
if not groups:
|
||||
continue
|
||||
cat_node = doc.createElement("category")
|
||||
@ -322,7 +339,7 @@ class CompsWrapper(object):
|
||||
|
||||
append_grouplist(doc, cat_node, groups)
|
||||
|
||||
for environment in sorted(self.comps.environments, key=attrgetter('id')):
|
||||
for environment in sorted(self.comps.environments, key=attrgetter("id")):
|
||||
groups = set(x.name for x in environment.group_ids)
|
||||
if not groups:
|
||||
continue
|
||||
@ -337,14 +354,25 @@ class CompsWrapper(object):
|
||||
append_grouplist(doc, env_node, groups)
|
||||
|
||||
if environment.option_ids:
|
||||
append_grouplist(doc, env_node, (x.name for x in environment.option_ids), "optionlist")
|
||||
append_grouplist(
|
||||
doc,
|
||||
env_node,
|
||||
(x.name for x in environment.option_ids),
|
||||
"optionlist",
|
||||
)
|
||||
|
||||
if self.comps.langpacks:
|
||||
lang_node = doc.createElement("langpacks")
|
||||
msg_elem.appendChild(lang_node)
|
||||
|
||||
for name in sorted(self.comps.langpacks):
|
||||
append(doc, lang_node, "match", name=name, install=self.comps.langpacks[name])
|
||||
append(
|
||||
doc,
|
||||
lang_node,
|
||||
"match",
|
||||
name=name,
|
||||
install=self.comps.langpacks[name],
|
||||
)
|
||||
|
||||
return doc
|
||||
|
||||
@ -446,7 +474,7 @@ def append_common_info(doc, parent, obj, force_description=False):
|
||||
append(doc, parent, "name", text, lang=lang)
|
||||
|
||||
if obj.desc or force_description:
|
||||
append(doc, parent, "description", obj.desc or '')
|
||||
append(doc, parent, "description", obj.desc or "")
|
||||
|
||||
for lang in sorted(obj.desc_by_lang):
|
||||
text = obj.desc_by_lang[lang]
|
||||
|
@ -28,13 +28,37 @@ class CreaterepoWrapper(object):
|
||||
self.mergerepo = "mergerepo"
|
||||
self.modifyrepo = "modifyrepo"
|
||||
|
||||
def get_createrepo_cmd(self, directory, baseurl=None, outputdir=None, basedir=None, excludes=None,
|
||||
pkglist=None, groupfile=None, cachedir=None, update=True,
|
||||
update_md_path=None, skip_stat=False, checkts=False, split=False,
|
||||
pretty=True, database=True, checksum=None, unique_md_filenames=True,
|
||||
distro=None, content=None, repo=None, revision=None, deltas=False,
|
||||
oldpackagedirs=None, num_deltas=None, workers=None, use_xz=False,
|
||||
compress_type=None, extra_args=None):
|
||||
def get_createrepo_cmd(
|
||||
self,
|
||||
directory,
|
||||
baseurl=None,
|
||||
outputdir=None,
|
||||
basedir=None,
|
||||
excludes=None,
|
||||
pkglist=None,
|
||||
groupfile=None,
|
||||
cachedir=None,
|
||||
update=True,
|
||||
update_md_path=None,
|
||||
skip_stat=False,
|
||||
checkts=False,
|
||||
split=False,
|
||||
pretty=True,
|
||||
database=True,
|
||||
checksum=None,
|
||||
unique_md_filenames=True,
|
||||
distro=None,
|
||||
content=None,
|
||||
repo=None,
|
||||
revision=None,
|
||||
deltas=False,
|
||||
oldpackagedirs=None,
|
||||
num_deltas=None,
|
||||
workers=None,
|
||||
use_xz=False,
|
||||
compress_type=None,
|
||||
extra_args=None,
|
||||
):
|
||||
# groupfile = /path/to/comps.xml
|
||||
|
||||
cmd = [self.createrepo, directory]
|
||||
@ -129,7 +153,15 @@ class CreaterepoWrapper(object):
|
||||
|
||||
return cmd
|
||||
|
||||
def get_mergerepo_cmd(self, outputdir, repos, database=True, pkglist=None, nogroups=False, noupdateinfo=None):
|
||||
def get_mergerepo_cmd(
|
||||
self,
|
||||
outputdir,
|
||||
repos,
|
||||
database=True,
|
||||
pkglist=None,
|
||||
nogroups=False,
|
||||
noupdateinfo=None,
|
||||
):
|
||||
cmd = [self.mergerepo]
|
||||
|
||||
cmd.append("--outputdir=%s" % outputdir)
|
||||
@ -156,7 +188,9 @@ class CreaterepoWrapper(object):
|
||||
|
||||
return cmd
|
||||
|
||||
def get_modifyrepo_cmd(self, repo_path, file_path, mdtype=None, compress_type=None, remove=False):
|
||||
def get_modifyrepo_cmd(
|
||||
self, repo_path, file_path, mdtype=None, compress_type=None, remove=False
|
||||
):
|
||||
cmd = [self.modifyrepo]
|
||||
|
||||
cmd.append(file_path)
|
||||
|
@ -26,12 +26,7 @@ Pungi).
|
||||
|
||||
|
||||
def get_cmd(
|
||||
conf_file,
|
||||
arch,
|
||||
repos,
|
||||
lookasides,
|
||||
platform=None,
|
||||
filter_packages=None,
|
||||
conf_file, arch, repos, lookasides, platform=None, filter_packages=None,
|
||||
):
|
||||
cmd = ["fus", "--verbose", "--arch", arch]
|
||||
|
||||
@ -64,7 +59,7 @@ def write_config(conf_file, modules, packages):
|
||||
def _prep_path(path):
|
||||
"""Strip file:// from the path if present."""
|
||||
if path.startswith("file://"):
|
||||
return path[len("file://"):]
|
||||
return path[len("file://") :]
|
||||
return path
|
||||
|
||||
|
||||
|
@ -30,76 +30,88 @@ def get_boot_options(arch, createfrom, efi=True, hfs_compat=True):
|
||||
result = []
|
||||
return result
|
||||
|
||||
if arch in ("aarch64", ):
|
||||
if arch in ("aarch64",):
|
||||
result = [
|
||||
'-eltorito-alt-boot',
|
||||
'-e', 'images/efiboot.img',
|
||||
'-no-emul-boot',
|
||||
"-eltorito-alt-boot",
|
||||
"-e",
|
||||
"images/efiboot.img",
|
||||
"-no-emul-boot",
|
||||
]
|
||||
return result
|
||||
|
||||
if arch in ("i386", "i686", "x86_64"):
|
||||
result = [
|
||||
'-b', 'isolinux/isolinux.bin',
|
||||
'-c', 'isolinux/boot.cat',
|
||||
'-no-emul-boot',
|
||||
'-boot-load-size', '4',
|
||||
'-boot-info-table',
|
||||
"-b",
|
||||
"isolinux/isolinux.bin",
|
||||
"-c",
|
||||
"isolinux/boot.cat",
|
||||
"-no-emul-boot",
|
||||
"-boot-load-size",
|
||||
"4",
|
||||
"-boot-info-table",
|
||||
]
|
||||
|
||||
# EFI args
|
||||
if arch == "x86_64":
|
||||
result.extend([
|
||||
'-eltorito-alt-boot',
|
||||
'-e', 'images/efiboot.img',
|
||||
'-no-emul-boot',
|
||||
])
|
||||
result.extend(
|
||||
["-eltorito-alt-boot", "-e", "images/efiboot.img", "-no-emul-boot"]
|
||||
)
|
||||
return result
|
||||
|
||||
if arch == "ia64":
|
||||
result = [
|
||||
'-b', 'images/boot.img',
|
||||
'-no-emul-boot',
|
||||
"-b",
|
||||
"images/boot.img",
|
||||
"-no-emul-boot",
|
||||
]
|
||||
return result
|
||||
|
||||
if arch in ("ppc", "ppc64") or (arch == "ppc64le" and hfs_compat):
|
||||
result = [
|
||||
'-part',
|
||||
'-hfs',
|
||||
'-r',
|
||||
'-l',
|
||||
'-sysid', 'PPC',
|
||||
'-no-desktop',
|
||||
'-allow-multidot',
|
||||
'-chrp-boot',
|
||||
"-map", os.path.join(createfrom, 'mapping'), # -map %s/ppc/mapping
|
||||
'-hfs-bless', "/ppc/mac", # must be the last
|
||||
"-part",
|
||||
"-hfs",
|
||||
"-r",
|
||||
"-l",
|
||||
"-sysid",
|
||||
"PPC",
|
||||
"-no-desktop",
|
||||
"-allow-multidot",
|
||||
"-chrp-boot",
|
||||
"-map",
|
||||
os.path.join(createfrom, "mapping"), # -map %s/ppc/mapping
|
||||
"-hfs-bless",
|
||||
"/ppc/mac", # must be the last
|
||||
]
|
||||
return result
|
||||
|
||||
if arch == "ppc64le" and not hfs_compat:
|
||||
result = [
|
||||
'-r',
|
||||
'-l',
|
||||
'-sysid', 'PPC',
|
||||
'-chrp-boot',
|
||||
"-r",
|
||||
"-l",
|
||||
"-sysid",
|
||||
"PPC",
|
||||
"-chrp-boot",
|
||||
]
|
||||
return result
|
||||
|
||||
if arch == "sparc":
|
||||
result = [
|
||||
'-G', '/boot/isofs.b',
|
||||
'-B', '...',
|
||||
'-s', '/boot/silo.conf',
|
||||
'-sparc-label', '"sparc"',
|
||||
"-G",
|
||||
"/boot/isofs.b",
|
||||
"-B",
|
||||
"...",
|
||||
"-s",
|
||||
"/boot/silo.conf",
|
||||
"-sparc-label",
|
||||
'"sparc"',
|
||||
]
|
||||
return result
|
||||
|
||||
if arch in ("s390", "s390x"):
|
||||
result = [
|
||||
'-eltorito-boot', 'images/cdboot.img',
|
||||
'-no-emul-boot',
|
||||
"-eltorito-boot",
|
||||
"images/cdboot.img",
|
||||
"-no-emul-boot",
|
||||
]
|
||||
return result
|
||||
|
||||
@ -122,7 +134,18 @@ def _truncate_volid(volid):
|
||||
return volid
|
||||
|
||||
|
||||
def get_mkisofs_cmd(iso, paths, appid=None, volid=None, volset=None, exclude=None, verbose=False, boot_args=None, input_charset="utf-8", graft_points=None):
|
||||
def get_mkisofs_cmd(
|
||||
iso,
|
||||
paths,
|
||||
appid=None,
|
||||
volid=None,
|
||||
volset=None,
|
||||
exclude=None,
|
||||
verbose=False,
|
||||
boot_args=None,
|
||||
input_charset="utf-8",
|
||||
graft_points=None,
|
||||
):
|
||||
# following options are always enabled
|
||||
untranslated_filenames = True
|
||||
translation_table = True
|
||||
@ -201,7 +224,7 @@ def get_checkisomd5_data(iso_path, logger=None):
|
||||
retcode, output = run(cmd, universal_newlines=True)
|
||||
items = [line.strip().rsplit(":", 1) for line in output.splitlines()]
|
||||
items = dict([(k, v.strip()) for k, v in items])
|
||||
md5 = items.get(iso_path, '')
|
||||
md5 = items.get(iso_path, "")
|
||||
if len(md5) != 32:
|
||||
# We have seen cases where the command finished successfully, but
|
||||
# returned garbage value. We need to handle it, otherwise there would
|
||||
@ -209,8 +232,10 @@ def get_checkisomd5_data(iso_path, logger=None):
|
||||
# This only logs information about the problem and leaves the hash
|
||||
# empty, which is valid from productmd point of view.
|
||||
if logger:
|
||||
logger.critical('Implanted MD5 in %s is not valid: %r', iso_path, md5)
|
||||
logger.critical('Ran command %r; exit code %r; output %r', cmd, retcode, output)
|
||||
logger.critical("Implanted MD5 in %s is not valid: %r", iso_path, md5)
|
||||
logger.critical(
|
||||
"Ran command %r; exit code %r; output %r", cmd, retcode, output
|
||||
)
|
||||
return None
|
||||
return items
|
||||
|
||||
@ -231,7 +256,9 @@ def get_isohybrid_cmd(iso_path, arch):
|
||||
|
||||
def get_manifest_cmd(iso_name):
|
||||
return "isoinfo -R -f -i %s | grep -v '/TRANS.TBL$' | sort >> %s.manifest" % (
|
||||
shlex_quote(iso_name), shlex_quote(iso_name))
|
||||
shlex_quote(iso_name),
|
||||
shlex_quote(iso_name),
|
||||
)
|
||||
|
||||
|
||||
def get_volume_id(path):
|
||||
@ -289,7 +316,7 @@ def _paths_from_list(root, paths):
|
||||
result = {}
|
||||
for i in paths:
|
||||
i = os.path.normpath(os.path.join(root, i))
|
||||
key = i[len(root):]
|
||||
key = i[len(root) :]
|
||||
result[key] = i
|
||||
return result
|
||||
|
||||
@ -315,7 +342,9 @@ def _scan_tree(path):
|
||||
def _merge_trees(tree1, tree2, exclusive=False):
|
||||
# tree2 has higher priority
|
||||
result = tree2.copy()
|
||||
all_dirs = set([os.path.dirname(i).rstrip("/") for i in result if os.path.dirname(i) != ""])
|
||||
all_dirs = set(
|
||||
[os.path.dirname(i).rstrip("/") for i in result if os.path.dirname(i) != ""]
|
||||
)
|
||||
|
||||
for i in tree1:
|
||||
dn = os.path.dirname(i)
|
||||
@ -408,14 +437,18 @@ def mount(image, logger=None, use_guestmount=True):
|
||||
The yielded path will only be valid in the with block and is removed once
|
||||
the image is unmounted.
|
||||
"""
|
||||
with util.temp_dir(prefix='iso-mount-') as mount_dir:
|
||||
with util.temp_dir(prefix="iso-mount-") as mount_dir:
|
||||
ret, __ = run(["which", "guestmount"], can_fail=True)
|
||||
# return code 0 means that guestmount is available
|
||||
guestmount_available = use_guestmount and not bool(ret)
|
||||
if guestmount_available:
|
||||
# use guestmount to mount the image, which doesn't require root privileges
|
||||
# LIBGUESTFS_BACKEND=direct: running qemu directly without libvirt
|
||||
env = {'LIBGUESTFS_BACKEND': 'direct', 'LIBGUESTFS_DEBUG': '1', 'LIBGUESTFS_TRACE': '1'}
|
||||
env = {
|
||||
"LIBGUESTFS_BACKEND": "direct",
|
||||
"LIBGUESTFS_DEBUG": "1",
|
||||
"LIBGUESTFS_TRACE": "1",
|
||||
}
|
||||
cmd = ["guestmount", "-a", image, "-m", "/dev/sda", mount_dir]
|
||||
# guestmount caches files for faster mounting. However,
|
||||
# systemd-tmpfiles is cleaning it up if the files have not been
|
||||
@ -446,13 +479,14 @@ def mount(image, logger=None, use_guestmount=True):
|
||||
if ret != 0:
|
||||
# The mount command failed, something is wrong. Log the output and raise an exception.
|
||||
if logger:
|
||||
logger.error('Command %s exited with %s and output:\n%s'
|
||||
% (cmd, ret, out))
|
||||
raise RuntimeError('Failed to mount %s' % image)
|
||||
logger.error(
|
||||
"Command %s exited with %s and output:\n%s" % (cmd, ret, out)
|
||||
)
|
||||
raise RuntimeError("Failed to mount %s" % image)
|
||||
try:
|
||||
yield mount_dir
|
||||
finally:
|
||||
if guestmount_available:
|
||||
util.run_unmount_cmd(['fusermount', '-u', mount_dir], path=mount_dir)
|
||||
util.run_unmount_cmd(["fusermount", "-u", mount_dir], path=mount_dir)
|
||||
else:
|
||||
util.run_unmount_cmd(['umount', mount_dir], path=mount_dir)
|
||||
util.run_unmount_cmd(["umount", mount_dir], path=mount_dir)
|
||||
|
@ -21,7 +21,9 @@ from kobo.shortcuts import force_list
|
||||
|
||||
|
||||
class JigdoWrapper(kobo.log.LoggingBase):
|
||||
def get_jigdo_cmd(self, image, files, output_dir, cache=None, no_servers=False, report=None):
|
||||
def get_jigdo_cmd(
|
||||
self, image, files, output_dir, cache=None, no_servers=False, report=None
|
||||
):
|
||||
"""
|
||||
files: [{"path", "label", "uri"}]
|
||||
"""
|
||||
|
@ -30,7 +30,7 @@ from .. import util
|
||||
from ..arch_utils import getBaseArch
|
||||
|
||||
|
||||
KOJI_BUILD_DELETED = koji.BUILD_STATES['DELETED']
|
||||
KOJI_BUILD_DELETED = koji.BUILD_STATES["DELETED"]
|
||||
|
||||
|
||||
class KojiWrapper(object):
|
||||
@ -41,38 +41,65 @@ class KojiWrapper(object):
|
||||
with self.lock:
|
||||
self.koji_module = koji.get_profile_module(profile)
|
||||
session_opts = {}
|
||||
for key in ('krbservice', 'timeout', 'keepalive',
|
||||
'max_retries', 'retry_interval', 'anon_retry',
|
||||
'offline_retry', 'offline_retry_interval',
|
||||
'debug', 'debug_xmlrpc', 'krb_rdns',
|
||||
'serverca',
|
||||
'use_fast_upload'):
|
||||
for key in (
|
||||
"krbservice",
|
||||
"timeout",
|
||||
"keepalive",
|
||||
"max_retries",
|
||||
"retry_interval",
|
||||
"anon_retry",
|
||||
"offline_retry",
|
||||
"offline_retry_interval",
|
||||
"debug",
|
||||
"debug_xmlrpc",
|
||||
"krb_rdns",
|
||||
"serverca",
|
||||
"use_fast_upload",
|
||||
):
|
||||
value = getattr(self.koji_module.config, key, None)
|
||||
if value is not None:
|
||||
session_opts[key] = value
|
||||
self.koji_proxy = koji.ClientSession(self.koji_module.config.server, session_opts)
|
||||
self.koji_proxy = koji.ClientSession(
|
||||
self.koji_module.config.server, session_opts
|
||||
)
|
||||
|
||||
def login(self):
|
||||
"""Authenticate to the hub."""
|
||||
auth_type = self.koji_module.config.authtype
|
||||
if auth_type == 'ssl' or (os.path.isfile(os.path.expanduser(self.koji_module.config.cert))
|
||||
and auth_type is None):
|
||||
self.koji_proxy.ssl_login(os.path.expanduser(self.koji_module.config.cert),
|
||||
os.path.expanduser(self.koji_module.config.ca),
|
||||
os.path.expanduser(self.koji_module.config.serverca))
|
||||
elif auth_type == 'kerberos':
|
||||
if auth_type == "ssl" or (
|
||||
os.path.isfile(os.path.expanduser(self.koji_module.config.cert))
|
||||
and auth_type is None
|
||||
):
|
||||
self.koji_proxy.ssl_login(
|
||||
os.path.expanduser(self.koji_module.config.cert),
|
||||
os.path.expanduser(self.koji_module.config.ca),
|
||||
os.path.expanduser(self.koji_module.config.serverca),
|
||||
)
|
||||
elif auth_type == "kerberos":
|
||||
self.koji_proxy.krb_login(
|
||||
getattr(self.koji_module.config, 'principal', None),
|
||||
getattr(self.koji_module.config, 'keytab', None))
|
||||
getattr(self.koji_module.config, "principal", None),
|
||||
getattr(self.koji_module.config, "keytab", None),
|
||||
)
|
||||
else:
|
||||
raise RuntimeError('Unsupported authentication type in Koji')
|
||||
raise RuntimeError("Unsupported authentication type in Koji")
|
||||
|
||||
def _get_cmd(self, *args):
|
||||
return ["koji", "--profile=%s" % self.profile] + list(args)
|
||||
|
||||
def get_runroot_cmd(self, target, arch, command, quiet=False, use_shell=True,
|
||||
channel=None, packages=None, mounts=None, weight=None,
|
||||
new_chroot=False, chown_paths=None):
|
||||
def get_runroot_cmd(
|
||||
self,
|
||||
target,
|
||||
arch,
|
||||
command,
|
||||
quiet=False,
|
||||
use_shell=True,
|
||||
channel=None,
|
||||
packages=None,
|
||||
mounts=None,
|
||||
weight=None,
|
||||
new_chroot=False,
|
||||
chown_paths=None,
|
||||
):
|
||||
cmd = self._get_cmd("runroot", "--nowait", "--task-id")
|
||||
|
||||
if quiet:
|
||||
@ -111,7 +138,9 @@ class KojiWrapper(object):
|
||||
command = " ".join([shlex_quote(i) for i in command])
|
||||
|
||||
# HACK: remove rpmdb and yum cache
|
||||
command = "rm -f /var/lib/rpm/__db*; rm -rf /var/cache/yum/*; set -x; " + command
|
||||
command = (
|
||||
"rm -f /var/lib/rpm/__db*; rm -rf /var/cache/yum/*; set -x; " + command
|
||||
)
|
||||
|
||||
if chown_paths:
|
||||
paths = " ".join(shlex_quote(pth) for pth in chown_paths)
|
||||
@ -124,8 +153,16 @@ class KojiWrapper(object):
|
||||
return cmd
|
||||
|
||||
def get_pungi_buildinstall_cmd(
|
||||
self, target, arch, args, channel=None, packages=None,
|
||||
mounts=None, weight=None, chown_uid=None):
|
||||
self,
|
||||
target,
|
||||
arch,
|
||||
args,
|
||||
channel=None,
|
||||
packages=None,
|
||||
mounts=None,
|
||||
weight=None,
|
||||
chown_uid=None,
|
||||
):
|
||||
cmd = self._get_cmd("pungi-buildinstall", "--nowait", "--task-id")
|
||||
|
||||
if channel:
|
||||
@ -171,10 +208,10 @@ class KojiWrapper(object):
|
||||
If we are authenticated with a keytab, we need a fresh credentials
|
||||
cache to avoid possible race condition.
|
||||
"""
|
||||
if getattr(self.koji_module.config, 'keytab', None):
|
||||
with util.temp_dir(prefix='krb_ccache') as tempdir:
|
||||
if getattr(self.koji_module.config, "keytab", None):
|
||||
with util.temp_dir(prefix="krb_ccache") as tempdir:
|
||||
env = os.environ.copy()
|
||||
env['KRB5CCNAME'] = 'DIR:%s' % tempdir
|
||||
env["KRB5CCNAME"] = "DIR:%s" % tempdir
|
||||
yield env
|
||||
else:
|
||||
yield None
|
||||
@ -188,11 +225,17 @@ class KojiWrapper(object):
|
||||
"""
|
||||
task_id = None
|
||||
with self.get_koji_cmd_env() as env:
|
||||
retcode, output = run(command, can_fail=True, logfile=log_file,
|
||||
show_cmd=True, env=env, universal_newlines=True)
|
||||
retcode, output = run(
|
||||
command,
|
||||
can_fail=True,
|
||||
logfile=log_file,
|
||||
show_cmd=True,
|
||||
env=env,
|
||||
universal_newlines=True,
|
||||
)
|
||||
|
||||
first_line = output.splitlines()[0]
|
||||
match = re.search(r'^(\d+)$', first_line)
|
||||
match = re.search(r"^(\d+)$", first_line)
|
||||
if not match:
|
||||
raise RuntimeError(
|
||||
"Could not find task ID in output. Command '%s' returned '%s'."
|
||||
@ -209,7 +252,9 @@ class KojiWrapper(object):
|
||||
"task_id": task_id,
|
||||
}
|
||||
|
||||
def get_image_build_cmd(self, config_options, conf_file_dest, wait=True, scratch=False):
|
||||
def get_image_build_cmd(
|
||||
self, config_options, conf_file_dest, wait=True, scratch=False
|
||||
):
|
||||
"""
|
||||
@param config_options
|
||||
@param conf_file_dest - a destination in compose workdir for the conf file to be written
|
||||
@ -219,14 +264,27 @@ class KojiWrapper(object):
|
||||
# Usage: koji image-build [options] <name> <version> <target> <install-tree-url> <arch> [<arch>...]
|
||||
sub_command = "image-build"
|
||||
# The minimum set of options
|
||||
min_options = ("name", "version", "target", "install_tree", "arches", "format", "kickstart", "ksurl", "distro")
|
||||
assert set(min_options).issubset(set(config_options['image-build'].keys())), "image-build requires at least %s got '%s'" % (", ".join(min_options), config_options)
|
||||
min_options = (
|
||||
"name",
|
||||
"version",
|
||||
"target",
|
||||
"install_tree",
|
||||
"arches",
|
||||
"format",
|
||||
"kickstart",
|
||||
"ksurl",
|
||||
"distro",
|
||||
)
|
||||
assert set(min_options).issubset(set(config_options["image-build"].keys())), (
|
||||
"image-build requires at least %s got '%s'"
|
||||
% (", ".join(min_options), config_options)
|
||||
)
|
||||
cfg_parser = configparser.ConfigParser()
|
||||
for section, opts in config_options.items():
|
||||
cfg_parser.add_section(section)
|
||||
for option, value in opts.items():
|
||||
if isinstance(value, list):
|
||||
value = ','.join(value)
|
||||
value = ",".join(value)
|
||||
if not isinstance(value, six.string_types):
|
||||
# Python 3 configparser will reject non-string values.
|
||||
value = str(value)
|
||||
@ -246,42 +304,55 @@ class KojiWrapper(object):
|
||||
|
||||
def get_live_media_cmd(self, options, wait=True):
|
||||
# Usage: koji spin-livemedia [options] <name> <version> <target> <arch> <kickstart-file>
|
||||
cmd = self._get_cmd('spin-livemedia')
|
||||
cmd = self._get_cmd("spin-livemedia")
|
||||
|
||||
for key in ('name', 'version', 'target', 'arch', 'ksfile'):
|
||||
for key in ("name", "version", "target", "arch", "ksfile"):
|
||||
if key not in options:
|
||||
raise ValueError('Expected options to have key "%s"' % key)
|
||||
cmd.append(options[key])
|
||||
if 'install_tree' not in options:
|
||||
if "install_tree" not in options:
|
||||
raise ValueError('Expected options to have key "install_tree"')
|
||||
cmd.append('--install-tree=%s' % options['install_tree'])
|
||||
cmd.append("--install-tree=%s" % options["install_tree"])
|
||||
|
||||
for repo in options.get('repo', []):
|
||||
cmd.append('--repo=%s' % repo)
|
||||
for repo in options.get("repo", []):
|
||||
cmd.append("--repo=%s" % repo)
|
||||
|
||||
if options.get('scratch'):
|
||||
cmd.append('--scratch')
|
||||
if options.get("scratch"):
|
||||
cmd.append("--scratch")
|
||||
|
||||
if options.get('skip_tag'):
|
||||
cmd.append('--skip-tag')
|
||||
if options.get("skip_tag"):
|
||||
cmd.append("--skip-tag")
|
||||
|
||||
if 'ksurl' in options:
|
||||
cmd.append('--ksurl=%s' % options['ksurl'])
|
||||
if "ksurl" in options:
|
||||
cmd.append("--ksurl=%s" % options["ksurl"])
|
||||
|
||||
if 'release' in options:
|
||||
cmd.append('--release=%s' % options['release'])
|
||||
if "release" in options:
|
||||
cmd.append("--release=%s" % options["release"])
|
||||
|
||||
if 'can_fail' in options:
|
||||
cmd.append('--can-fail=%s' % ','.join(options['can_fail']))
|
||||
if "can_fail" in options:
|
||||
cmd.append("--can-fail=%s" % ",".join(options["can_fail"]))
|
||||
|
||||
if wait:
|
||||
cmd.append('--wait')
|
||||
cmd.append("--wait")
|
||||
|
||||
return cmd
|
||||
|
||||
def get_create_image_cmd(self, name, version, target, arch, ks_file, repos,
|
||||
image_type="live", image_format=None, release=None,
|
||||
wait=True, archive=False, specfile=None, ksurl=None):
|
||||
def get_create_image_cmd(
|
||||
self,
|
||||
name,
|
||||
version,
|
||||
target,
|
||||
arch,
|
||||
ks_file,
|
||||
repos,
|
||||
image_type="live",
|
||||
image_format=None,
|
||||
release=None,
|
||||
wait=True,
|
||||
archive=False,
|
||||
specfile=None,
|
||||
ksurl=None,
|
||||
):
|
||||
# Usage: koji spin-livecd [options] <name> <version> <target> <arch> <kickstart-file>
|
||||
# Usage: koji spin-appliance [options] <name> <version> <target> <arch> <kickstart-file>
|
||||
# Examples:
|
||||
@ -327,7 +398,10 @@ class KojiWrapper(object):
|
||||
raise ValueError("Format can be specified only for appliance images'")
|
||||
supported_formats = ["raw", "qcow", "qcow2", "vmx"]
|
||||
if image_format not in supported_formats:
|
||||
raise ValueError("Format is not supported: %s. Supported formats: %s" % (image_format, " ".join(sorted(supported_formats))))
|
||||
raise ValueError(
|
||||
"Format is not supported: %s. Supported formats: %s"
|
||||
% (image_format, " ".join(sorted(supported_formats)))
|
||||
)
|
||||
cmd.append("--format=%s" % image_format)
|
||||
|
||||
if release is not None:
|
||||
@ -350,23 +424,27 @@ class KojiWrapper(object):
|
||||
|
||||
def _has_connection_error(self, output):
|
||||
"""Checks if output indicates connection error."""
|
||||
return re.search('error: failed to connect\n$', output)
|
||||
return re.search("error: failed to connect\n$", output)
|
||||
|
||||
def _has_offline_error(self, output):
|
||||
"""Check if output indicates server offline."""
|
||||
return re.search('koji: ServerOffline:', output)
|
||||
return re.search("koji: ServerOffline:", output)
|
||||
|
||||
def _wait_for_task(self, task_id, logfile=None, max_retries=None):
|
||||
"""Tries to wait for a task to finish. On connection error it will
|
||||
retry with `watch-task` command.
|
||||
"""
|
||||
cmd = self._get_cmd('watch-task', str(task_id))
|
||||
cmd = self._get_cmd("watch-task", str(task_id))
|
||||
attempt = 0
|
||||
|
||||
while True:
|
||||
retcode, output = run(cmd, can_fail=True, logfile=logfile, universal_newlines=True)
|
||||
retcode, output = run(
|
||||
cmd, can_fail=True, logfile=logfile, universal_newlines=True
|
||||
)
|
||||
|
||||
if retcode == 0 or not (self._has_connection_error(output) or self._has_offline_error(output)):
|
||||
if retcode == 0 or not (
|
||||
self._has_connection_error(output) or self._has_offline_error(output)
|
||||
):
|
||||
# Task finished for reason other than connection error or server offline error.
|
||||
return retcode, output
|
||||
|
||||
@ -375,7 +453,9 @@ class KojiWrapper(object):
|
||||
break
|
||||
time.sleep(attempt * 10)
|
||||
|
||||
raise RuntimeError('Failed to wait for task %s. Too many connection errors.' % task_id)
|
||||
raise RuntimeError(
|
||||
"Failed to wait for task %s. Too many connection errors." % task_id
|
||||
)
|
||||
|
||||
def run_blocking_cmd(self, command, log_file=None, max_retries=None):
|
||||
"""
|
||||
@ -384,17 +464,28 @@ class KojiWrapper(object):
|
||||
command finishes.
|
||||
"""
|
||||
with self.get_koji_cmd_env() as env:
|
||||
retcode, output = run(command, can_fail=True, logfile=log_file,
|
||||
env=env, universal_newlines=True)
|
||||
retcode, output = run(
|
||||
command,
|
||||
can_fail=True,
|
||||
logfile=log_file,
|
||||
env=env,
|
||||
universal_newlines=True,
|
||||
)
|
||||
|
||||
match = re.search(r"Created task: (\d+)", output)
|
||||
if not match:
|
||||
raise RuntimeError("Could not find task ID in output. Command '%s' returned '%s'."
|
||||
% (" ".join(command), output))
|
||||
raise RuntimeError(
|
||||
"Could not find task ID in output. Command '%s' returned '%s'."
|
||||
% (" ".join(command), output)
|
||||
)
|
||||
task_id = int(match.groups()[0])
|
||||
|
||||
if retcode != 0 and (self._has_connection_error(output) or self._has_offline_error(output)):
|
||||
retcode, output = self._wait_for_task(task_id, logfile=log_file, max_retries=max_retries)
|
||||
if retcode != 0 and (
|
||||
self._has_connection_error(output) or self._has_offline_error(output)
|
||||
):
|
||||
retcode, output = self._wait_for_task(
|
||||
task_id, logfile=log_file, max_retries=max_retries
|
||||
)
|
||||
|
||||
return {
|
||||
"retcode": retcode,
|
||||
@ -403,7 +494,9 @@ class KojiWrapper(object):
|
||||
}
|
||||
|
||||
def watch_task(self, task_id, log_file=None, max_retries=None):
|
||||
retcode, _ = self._wait_for_task(task_id, logfile=log_file, max_retries=max_retries)
|
||||
retcode, _ = self._wait_for_task(
|
||||
task_id, logfile=log_file, max_retries=max_retries
|
||||
)
|
||||
return retcode
|
||||
|
||||
def get_image_paths(self, task_id, callback=None):
|
||||
@ -420,26 +513,32 @@ class KojiWrapper(object):
|
||||
children_tasks = self.koji_proxy.getTaskChildren(task_id, request=True)
|
||||
|
||||
for child_task in children_tasks:
|
||||
if child_task['method'] not in ['createImage', 'createLiveMedia', 'createAppliance']:
|
||||
if child_task["method"] not in [
|
||||
"createImage",
|
||||
"createLiveMedia",
|
||||
"createAppliance",
|
||||
]:
|
||||
continue
|
||||
|
||||
if child_task['state'] != koji.TASK_STATES['CLOSED']:
|
||||
if child_task["state"] != koji.TASK_STATES["CLOSED"]:
|
||||
# The subtask is failed, which can happen with the can_fail
|
||||
# option. If given, call the callback, and go to next child.
|
||||
if callback:
|
||||
callback(child_task['arch'])
|
||||
callback(child_task["arch"])
|
||||
continue
|
||||
|
||||
is_scratch = child_task['request'][-1].get('scratch', False)
|
||||
task_result = self.koji_proxy.getTaskResult(child_task['id'])
|
||||
is_scratch = child_task["request"][-1].get("scratch", False)
|
||||
task_result = self.koji_proxy.getTaskResult(child_task["id"])
|
||||
|
||||
if is_scratch:
|
||||
topdir = os.path.join(
|
||||
self.koji_module.pathinfo.work(),
|
||||
self.koji_module.pathinfo.taskrelpath(child_task['id'])
|
||||
self.koji_module.pathinfo.taskrelpath(child_task["id"]),
|
||||
)
|
||||
else:
|
||||
build = self.koji_proxy.getImageBuild("%(name)s-%(version)s-%(release)s" % task_result)
|
||||
build = self.koji_proxy.getImageBuild(
|
||||
"%(name)s-%(version)s-%(release)s" % task_result
|
||||
)
|
||||
build["name"] = task_result["name"]
|
||||
build["version"] = task_result["version"]
|
||||
build["release"] = task_result["release"]
|
||||
@ -447,7 +546,9 @@ class KojiWrapper(object):
|
||||
topdir = self.koji_module.pathinfo.imagebuild(build)
|
||||
|
||||
for i in task_result["files"]:
|
||||
result.setdefault(task_result['arch'], []).append(os.path.join(topdir, i))
|
||||
result.setdefault(task_result["arch"], []).append(
|
||||
os.path.join(topdir, i)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@ -460,7 +561,7 @@ class KojiWrapper(object):
|
||||
# scan parent and child tasks for certain methods
|
||||
task_info = None
|
||||
for i in task_info_list:
|
||||
if i["method"] in ("createAppliance", "createLiveCD", 'createImage'):
|
||||
if i["method"] in ("createAppliance", "createLiveCD", "createImage"):
|
||||
task_info = i
|
||||
break
|
||||
|
||||
@ -469,9 +570,14 @@ class KojiWrapper(object):
|
||||
task_result.pop("rpmlist", None)
|
||||
|
||||
if scratch:
|
||||
topdir = os.path.join(self.koji_module.pathinfo.work(), self.koji_module.pathinfo.taskrelpath(task_info["id"]))
|
||||
topdir = os.path.join(
|
||||
self.koji_module.pathinfo.work(),
|
||||
self.koji_module.pathinfo.taskrelpath(task_info["id"]),
|
||||
)
|
||||
else:
|
||||
build = self.koji_proxy.getImageBuild("%(name)s-%(version)s-%(release)s" % task_result)
|
||||
build = self.koji_proxy.getImageBuild(
|
||||
"%(name)s-%(version)s-%(release)s" % task_result
|
||||
)
|
||||
build["name"] = task_result["name"]
|
||||
build["version"] = task_result["version"]
|
||||
build["release"] = task_result["release"]
|
||||
@ -501,7 +607,10 @@ class KojiWrapper(object):
|
||||
task_result = self.koji_proxy.getTaskResult(task_info["id"])
|
||||
|
||||
# Get koji dir with results (rpms, srpms, logs, ...)
|
||||
topdir = os.path.join(self.koji_module.pathinfo.work(), self.koji_module.pathinfo.taskrelpath(task_info["id"]))
|
||||
topdir = os.path.join(
|
||||
self.koji_module.pathinfo.work(),
|
||||
self.koji_module.pathinfo.taskrelpath(task_info["id"]),
|
||||
)
|
||||
|
||||
# TODO: Maybe use different approach for non-scratch builds - see get_image_path()
|
||||
|
||||
@ -550,7 +659,10 @@ class KojiWrapper(object):
|
||||
for i in result_files:
|
||||
rpminfo = self.koji_proxy.getRPM(i)
|
||||
build = self.koji_proxy.getBuild(rpminfo["build_id"])
|
||||
path = os.path.join(self.koji_module.pathinfo.build(build), self.koji_module.pathinfo.signed(rpminfo, sigkey))
|
||||
path = os.path.join(
|
||||
self.koji_module.pathinfo.build(build),
|
||||
self.koji_module.pathinfo.signed(rpminfo, sigkey),
|
||||
)
|
||||
result.append(path)
|
||||
|
||||
return result
|
||||
@ -559,7 +671,9 @@ class KojiWrapper(object):
|
||||
builds = self.koji_proxy.listBuilds(taskID=task_id)
|
||||
return [build.get("nvr") for build in builds if build.get("nvr")]
|
||||
|
||||
def multicall_map(self, koji_session, koji_session_fnc, list_of_args=None, list_of_kwargs=None):
|
||||
def multicall_map(
|
||||
self, koji_session, koji_session_fnc, list_of_args=None, list_of_kwargs=None
|
||||
):
|
||||
"""
|
||||
Calls the `koji_session_fnc` using Koji multicall feature N times based on the list of
|
||||
arguments passed in `list_of_args` and `list_of_kwargs`.
|
||||
@ -578,8 +692,10 @@ class KojiWrapper(object):
|
||||
if list_of_args is None and list_of_kwargs is None:
|
||||
raise ValueError("One of list_of_args or list_of_kwargs must be set.")
|
||||
|
||||
if (type(list_of_args) not in [type(None), list] or
|
||||
type(list_of_kwargs) not in [type(None), list]):
|
||||
if type(list_of_args) not in [type(None), list] or type(list_of_kwargs) not in [
|
||||
type(None),
|
||||
list,
|
||||
]:
|
||||
raise ValueError("list_of_args and list_of_kwargs must be list or None.")
|
||||
|
||||
if list_of_kwargs is None:
|
||||
@ -588,7 +704,9 @@ class KojiWrapper(object):
|
||||
list_of_args = [[]] * len(list_of_kwargs)
|
||||
|
||||
if len(list_of_args) != len(list_of_kwargs):
|
||||
raise ValueError("Length of list_of_args and list_of_kwargs must be the same.")
|
||||
raise ValueError(
|
||||
"Length of list_of_args and list_of_kwargs must be the same."
|
||||
)
|
||||
|
||||
koji_session.multicall = True
|
||||
for args, kwargs in zip(list_of_args, list_of_kwargs):
|
||||
@ -604,8 +722,9 @@ class KojiWrapper(object):
|
||||
return None
|
||||
if type(responses) != list:
|
||||
raise ValueError(
|
||||
"Fault element was returned for multicall of method %r: %r" % (
|
||||
koji_session_fnc, responses))
|
||||
"Fault element was returned for multicall of method %r: %r"
|
||||
% (koji_session_fnc, responses)
|
||||
)
|
||||
|
||||
results = []
|
||||
|
||||
@ -619,13 +738,15 @@ class KojiWrapper(object):
|
||||
if type(response) == list:
|
||||
if not response:
|
||||
raise ValueError(
|
||||
"Empty list returned for multicall of method %r with args %r, %r" % (
|
||||
koji_session_fnc, args, kwargs))
|
||||
"Empty list returned for multicall of method %r with args %r, %r"
|
||||
% (koji_session_fnc, args, kwargs)
|
||||
)
|
||||
results.append(response[0])
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unexpected data returned for multicall of method %r with args %r, %r: %r" % (
|
||||
koji_session_fnc, args, kwargs, response))
|
||||
"Unexpected data returned for multicall of method %r with args %r, %r: %r"
|
||||
% (koji_session_fnc, args, kwargs, response)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
@ -645,12 +766,14 @@ def get_buildroot_rpms(compose, task_id):
|
||||
result = []
|
||||
if task_id:
|
||||
# runroot
|
||||
koji = KojiWrapper(compose.conf['koji_profile'])
|
||||
koji = KojiWrapper(compose.conf["koji_profile"])
|
||||
buildroot_infos = koji.koji_proxy.listBuildroots(taskID=task_id)
|
||||
if not buildroot_infos:
|
||||
children_tasks = koji.koji_proxy.getTaskChildren(task_id)
|
||||
for child_task in children_tasks:
|
||||
buildroot_infos = koji.koji_proxy.listBuildroots(taskID=child_task["id"])
|
||||
buildroot_infos = koji.koji_proxy.listBuildroots(
|
||||
taskID=child_task["id"]
|
||||
)
|
||||
if buildroot_infos:
|
||||
break
|
||||
buildroot_info = buildroot_infos[-1]
|
||||
@ -660,8 +783,10 @@ def get_buildroot_rpms(compose, task_id):
|
||||
result.append(fmt % rpm_info)
|
||||
else:
|
||||
# local
|
||||
retcode, output = run("rpm -qa --qf='%{name}-%{version}-%{release}.%{arch}\n'",
|
||||
universal_newlines=True)
|
||||
retcode, output = run(
|
||||
"rpm -qa --qf='%{name}-%{version}-%{release}.%{arch}\n'",
|
||||
universal_newlines=True,
|
||||
)
|
||||
for i in output.splitlines():
|
||||
if not i:
|
||||
continue
|
||||
|
@ -21,14 +21,29 @@ from ..util import process_args
|
||||
|
||||
|
||||
class LoraxWrapper(object):
|
||||
def get_lorax_cmd(self, product, version, release, repo_baseurl, output_dir,
|
||||
variant=None, bugurl=None, nomacboot=False, noupgrade=False,
|
||||
is_final=False, buildarch=None, volid=None, buildinstallpackages=None,
|
||||
add_template=None, add_arch_template=None,
|
||||
add_template_var=None, add_arch_template_var=None,
|
||||
rootfs_size=None,
|
||||
log_dir=None,
|
||||
dracut_args=None):
|
||||
def get_lorax_cmd(
|
||||
self,
|
||||
product,
|
||||
version,
|
||||
release,
|
||||
repo_baseurl,
|
||||
output_dir,
|
||||
variant=None,
|
||||
bugurl=None,
|
||||
nomacboot=False,
|
||||
noupgrade=False,
|
||||
is_final=False,
|
||||
buildarch=None,
|
||||
volid=None,
|
||||
buildinstallpackages=None,
|
||||
add_template=None,
|
||||
add_arch_template=None,
|
||||
add_template_var=None,
|
||||
add_arch_template_var=None,
|
||||
rootfs_size=None,
|
||||
log_dir=None,
|
||||
dracut_args=None,
|
||||
):
|
||||
cmd = ["lorax"]
|
||||
cmd.append("--product=%s" % product)
|
||||
cmd.append("--version=%s" % version)
|
||||
@ -60,17 +75,17 @@ class LoraxWrapper(object):
|
||||
if volid:
|
||||
cmd.append("--volid=%s" % volid)
|
||||
|
||||
cmd.extend(process_args('--installpkgs=%s', buildinstallpackages))
|
||||
cmd.extend(process_args('--add-template=%s', add_template))
|
||||
cmd.extend(process_args('--add-arch-template=%s', add_arch_template))
|
||||
cmd.extend(process_args('--add-template-var=%s', add_template_var))
|
||||
cmd.extend(process_args('--add-arch-template-var=%s', add_arch_template_var))
|
||||
cmd.extend(process_args("--installpkgs=%s", buildinstallpackages))
|
||||
cmd.extend(process_args("--add-template=%s", add_template))
|
||||
cmd.extend(process_args("--add-arch-template=%s", add_arch_template))
|
||||
cmd.extend(process_args("--add-template-var=%s", add_template_var))
|
||||
cmd.extend(process_args("--add-arch-template-var=%s", add_arch_template_var))
|
||||
|
||||
if log_dir:
|
||||
cmd.append('--logfile=%s' % os.path.join(log_dir, 'lorax.log'))
|
||||
cmd.append("--logfile=%s" % os.path.join(log_dir, "lorax.log"))
|
||||
|
||||
if rootfs_size is not None:
|
||||
cmd.append('--rootfs-size=%s' % (rootfs_size))
|
||||
cmd.append("--rootfs-size=%s" % (rootfs_size))
|
||||
|
||||
for i in force_list(dracut_args or []):
|
||||
cmd.append("--dracut-arg=%s" % i)
|
||||
@ -82,7 +97,22 @@ class LoraxWrapper(object):
|
||||
|
||||
return cmd
|
||||
|
||||
def get_buildinstall_cmd(self, product, version, release, repo_baseurl, output_dir, variant=None, bugurl=None, nomacboot=False, noupgrade=False, is_final=False, buildarch=None, volid=None, brand=None):
|
||||
def get_buildinstall_cmd(
|
||||
self,
|
||||
product,
|
||||
version,
|
||||
release,
|
||||
repo_baseurl,
|
||||
output_dir,
|
||||
variant=None,
|
||||
bugurl=None,
|
||||
nomacboot=False,
|
||||
noupgrade=False,
|
||||
is_final=False,
|
||||
buildarch=None,
|
||||
volid=None,
|
||||
brand=None,
|
||||
):
|
||||
# RHEL 6 compatibility
|
||||
# Usage: buildinstall [--debug] --version <version> --brand <brand> --product <product> --release <comment> --final [--output outputdir] [--discs <discstring>] <root>
|
||||
|
||||
|
@ -29,7 +29,9 @@ PACKAGES_RE = {
|
||||
|
||||
UNRESOLVED_DEPENDENCY_RE = re.compile(r"^.*Unresolvable dependency (.+) in ([^ ]+).*$")
|
||||
|
||||
MISSING_COMPS_PACKAGE_RE = re.compile(r"^.*Could not find a match for (.+) in any configured repo")
|
||||
MISSING_COMPS_PACKAGE_RE = re.compile(
|
||||
r"^.*Could not find a match for (.+) in any configured repo"
|
||||
)
|
||||
|
||||
|
||||
def _write_ks_section(f, section, lines):
|
||||
@ -42,12 +44,20 @@ def _write_ks_section(f, section, lines):
|
||||
|
||||
|
||||
class PungiWrapper(object):
|
||||
|
||||
def write_kickstart(self, ks_path, repos, groups, packages,
|
||||
exclude_packages=None, comps_repo=None,
|
||||
lookaside_repos=None, fulltree_excludes=None,
|
||||
multilib_blacklist=None, multilib_whitelist=None,
|
||||
prepopulate=None):
|
||||
def write_kickstart(
|
||||
self,
|
||||
ks_path,
|
||||
repos,
|
||||
groups,
|
||||
packages,
|
||||
exclude_packages=None,
|
||||
comps_repo=None,
|
||||
lookaside_repos=None,
|
||||
fulltree_excludes=None,
|
||||
multilib_blacklist=None,
|
||||
multilib_whitelist=None,
|
||||
prepopulate=None,
|
||||
):
|
||||
groups = groups or []
|
||||
exclude_packages = exclude_packages or {}
|
||||
lookaside_repos = lookaside_repos or {}
|
||||
@ -95,7 +105,25 @@ class PungiWrapper(object):
|
||||
|
||||
kickstart.close()
|
||||
|
||||
def get_pungi_cmd(self, config, destdir, name, version=None, flavor=None, selfhosting=False, fulltree=False, greedy=None, nodeps=False, nodownload=True, full_archlist=False, arch=None, cache_dir=None, lookaside_repos=None, multilib_methods=None, profiler=False):
|
||||
def get_pungi_cmd(
|
||||
self,
|
||||
config,
|
||||
destdir,
|
||||
name,
|
||||
version=None,
|
||||
flavor=None,
|
||||
selfhosting=False,
|
||||
fulltree=False,
|
||||
greedy=None,
|
||||
nodeps=False,
|
||||
nodownload=True,
|
||||
full_archlist=False,
|
||||
arch=None,
|
||||
cache_dir=None,
|
||||
lookaside_repos=None,
|
||||
multilib_methods=None,
|
||||
profiler=False,
|
||||
):
|
||||
cmd = ["pungi"]
|
||||
|
||||
# Gather stage
|
||||
@ -155,7 +183,25 @@ class PungiWrapper(object):
|
||||
|
||||
return cmd
|
||||
|
||||
def get_pungi_cmd_dnf(self, config, destdir, name, version=None, flavor=None, selfhosting=False, fulltree=False, greedy=None, nodeps=False, nodownload=True, full_archlist=False, arch=None, cache_dir=None, lookaside_repos=None, multilib_methods=None, profiler=False):
|
||||
def get_pungi_cmd_dnf(
|
||||
self,
|
||||
config,
|
||||
destdir,
|
||||
name,
|
||||
version=None,
|
||||
flavor=None,
|
||||
selfhosting=False,
|
||||
fulltree=False,
|
||||
greedy=None,
|
||||
nodeps=False,
|
||||
nodownload=True,
|
||||
full_archlist=False,
|
||||
arch=None,
|
||||
cache_dir=None,
|
||||
lookaside_repos=None,
|
||||
multilib_methods=None,
|
||||
profiler=False,
|
||||
):
|
||||
cmd = ["pungi-gather"]
|
||||
|
||||
# path to a kickstart file
|
||||
@ -223,39 +269,51 @@ class PungiWrapper(object):
|
||||
|
||||
return packages, broken_deps, missing_comps
|
||||
|
||||
def run_pungi(self, ks_file, destdir, name, selfhosting=False, fulltree=False,
|
||||
greedy='', cache_dir=None, arch='', multilib_methods=[],
|
||||
nodeps=False, lookaside_repos=[]):
|
||||
def run_pungi(
|
||||
self,
|
||||
ks_file,
|
||||
destdir,
|
||||
name,
|
||||
selfhosting=False,
|
||||
fulltree=False,
|
||||
greedy="",
|
||||
cache_dir=None,
|
||||
arch="",
|
||||
multilib_methods=[],
|
||||
nodeps=False,
|
||||
lookaside_repos=[],
|
||||
):
|
||||
"""
|
||||
This is a replacement for get_pungi_cmd that runs it in-process. Not
|
||||
all arguments are supported.
|
||||
"""
|
||||
from .. import ks, gather, config
|
||||
|
||||
ksparser = ks.get_ksparser(ks_path=ks_file)
|
||||
cfg = config.Config()
|
||||
cfg.set('pungi', 'destdir', destdir)
|
||||
cfg.set('pungi', 'family', name)
|
||||
cfg.set('pungi', 'iso_basename', name)
|
||||
cfg.set('pungi', 'fulltree', str(fulltree))
|
||||
cfg.set('pungi', 'selfhosting', str(selfhosting))
|
||||
cfg.set('pungi', 'cachedir', cache_dir)
|
||||
cfg.set('pungi', 'full_archlist', "True")
|
||||
cfg.set('pungi', 'workdirbase', "%s/work" % destdir)
|
||||
cfg.set('pungi', 'greedy', greedy)
|
||||
cfg.set('pungi', 'nosource', 'False')
|
||||
cfg.set('pungi', 'nodebuginfo', 'False')
|
||||
cfg.set('pungi', 'force', 'False')
|
||||
cfg.set('pungi', 'resolve_deps', str(not nodeps))
|
||||
cfg.set("pungi", "destdir", destdir)
|
||||
cfg.set("pungi", "family", name)
|
||||
cfg.set("pungi", "iso_basename", name)
|
||||
cfg.set("pungi", "fulltree", str(fulltree))
|
||||
cfg.set("pungi", "selfhosting", str(selfhosting))
|
||||
cfg.set("pungi", "cachedir", cache_dir)
|
||||
cfg.set("pungi", "full_archlist", "True")
|
||||
cfg.set("pungi", "workdirbase", "%s/work" % destdir)
|
||||
cfg.set("pungi", "greedy", greedy)
|
||||
cfg.set("pungi", "nosource", "False")
|
||||
cfg.set("pungi", "nodebuginfo", "False")
|
||||
cfg.set("pungi", "force", "False")
|
||||
cfg.set("pungi", "resolve_deps", str(not nodeps))
|
||||
if arch:
|
||||
cfg.set('pungi', 'arch', arch)
|
||||
cfg.set("pungi", "arch", arch)
|
||||
if multilib_methods:
|
||||
cfg.set('pungi', 'multilib', " ".join(multilib_methods))
|
||||
cfg.set("pungi", "multilib", " ".join(multilib_methods))
|
||||
if lookaside_repos:
|
||||
cfg.set('pungi', 'lookaside_repos', " ".join(lookaside_repos))
|
||||
cfg.set("pungi", "lookaside_repos", " ".join(lookaside_repos))
|
||||
|
||||
mypungi = gather.Pungi(cfg, ksparser)
|
||||
|
||||
with open(os.path.join(destdir, 'out'), 'w') as f:
|
||||
with open(os.path.join(destdir, "out"), "w") as f:
|
||||
with mypungi.yumlock:
|
||||
mypungi._inityum()
|
||||
mypungi.gather()
|
||||
|
@ -19,15 +19,23 @@ import os
|
||||
from kobo.shortcuts import force_list
|
||||
|
||||
|
||||
def get_repoclosure_cmd(backend='yum', arch=None, repos=None, lookaside=None):
|
||||
def get_repoclosure_cmd(backend="yum", arch=None, repos=None, lookaside=None):
|
||||
cmds = {
|
||||
'yum': {'cmd': ['/usr/bin/repoclosure', '--tempcache'], 'repoarg': '--repoid=%s', 'lookaside': '--lookaside=%s'},
|
||||
'dnf': {'cmd': ['dnf', 'repoclosure'], 'repoarg': '--repo=%s', 'lookaside': '--repo=%s'},
|
||||
"yum": {
|
||||
"cmd": ["/usr/bin/repoclosure", "--tempcache"],
|
||||
"repoarg": "--repoid=%s",
|
||||
"lookaside": "--lookaside=%s",
|
||||
},
|
||||
"dnf": {
|
||||
"cmd": ["dnf", "repoclosure"],
|
||||
"repoarg": "--repo=%s",
|
||||
"lookaside": "--repo=%s",
|
||||
},
|
||||
}
|
||||
try:
|
||||
cmd = cmds[backend]['cmd']
|
||||
cmd = cmds[backend]["cmd"]
|
||||
except KeyError:
|
||||
raise RuntimeError('Unknown repoclosure backend: %s' % backend)
|
||||
raise RuntimeError("Unknown repoclosure backend: %s" % backend)
|
||||
|
||||
# There are options that are not exposed here, because we don't need
|
||||
# them.
|
||||
@ -38,17 +46,17 @@ def get_repoclosure_cmd(backend='yum', arch=None, repos=None, lookaside=None):
|
||||
repos = repos or {}
|
||||
for repo_id, repo_path in repos.items():
|
||||
cmd.append("--repofrompath=%s,%s" % (repo_id, _to_url(repo_path)))
|
||||
cmd.append(cmds[backend]['repoarg'] % repo_id)
|
||||
if backend == 'dnf':
|
||||
cmd.append(cmds[backend]["repoarg"] % repo_id)
|
||||
if backend == "dnf":
|
||||
# For dnf we want to add all repos with the --repo option (which
|
||||
# enables only those and not any system repo), and the repos to
|
||||
# check are also listed with the --check option.
|
||||
cmd.append('--check=%s' % repo_id)
|
||||
cmd.append("--check=%s" % repo_id)
|
||||
|
||||
lookaside = lookaside or {}
|
||||
for repo_id, repo_path in lookaside.items():
|
||||
cmd.append("--repofrompath=%s,%s" % (repo_id, _to_url(repo_path)))
|
||||
cmd.append(cmds[backend]['lookaside'] % repo_id)
|
||||
cmd.append(cmds[backend]["lookaside"] % repo_id)
|
||||
|
||||
return cmd
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
@ -25,8 +26,7 @@ from fnmatch import fnmatch
|
||||
|
||||
import kobo.log
|
||||
from kobo.shortcuts import run, force_list
|
||||
from pungi.util import (explode_rpm_package, makedirs, copy_all, temp_dir,
|
||||
retry)
|
||||
from pungi.util import explode_rpm_package, makedirs, copy_all, temp_dir, retry
|
||||
from .kojiwrapper import KojiWrapper
|
||||
|
||||
|
||||
@ -55,31 +55,34 @@ class ScmBase(kobo.log.LoggingBase):
|
||||
universal_newlines=True,
|
||||
)
|
||||
if retcode != 0:
|
||||
self.log_error('Output was: %r' % output)
|
||||
raise RuntimeError('%r failed with exit code %s'
|
||||
% (self.command, retcode))
|
||||
self.log_error("Output was: %r" % output)
|
||||
raise RuntimeError(
|
||||
"%r failed with exit code %s" % (self.command, retcode)
|
||||
)
|
||||
|
||||
|
||||
class FileWrapper(ScmBase):
|
||||
def export_dir(self, scm_root, scm_dir, target_dir, scm_branch=None):
|
||||
self.log_debug("Exporting directory %s from current working directory..."
|
||||
% (scm_dir))
|
||||
self.log_debug(
|
||||
"Exporting directory %s from current working directory..." % (scm_dir)
|
||||
)
|
||||
if scm_root:
|
||||
raise ValueError("FileWrapper: 'scm_root' should be empty.")
|
||||
dirs = glob.glob(scm_dir)
|
||||
if not dirs:
|
||||
raise RuntimeError('No directories matched, can not export.')
|
||||
raise RuntimeError("No directories matched, can not export.")
|
||||
for i in dirs:
|
||||
copy_all(i, target_dir)
|
||||
|
||||
def export_file(self, scm_root, scm_file, target_dir, scm_branch=None):
|
||||
if scm_root:
|
||||
raise ValueError("FileWrapper: 'scm_root' should be empty.")
|
||||
self.log_debug("Exporting file %s from current working directory..."
|
||||
% (scm_file))
|
||||
self.log_debug(
|
||||
"Exporting file %s from current working directory..." % (scm_file)
|
||||
)
|
||||
files = glob.glob(scm_file)
|
||||
if not files:
|
||||
raise RuntimeError('No files matched, can not export.')
|
||||
raise RuntimeError("No files matched, can not export.")
|
||||
for i in files:
|
||||
target_path = os.path.join(target_dir, os.path.basename(i))
|
||||
shutil.copy2(i, target_path)
|
||||
@ -90,10 +93,24 @@ class CvsWrapper(ScmBase):
|
||||
scm_dir = scm_dir.lstrip("/")
|
||||
scm_branch = scm_branch or "HEAD"
|
||||
with temp_dir() as tmp_dir:
|
||||
self.log_debug("Exporting directory %s from CVS %s (branch %s)..."
|
||||
% (scm_dir, scm_root, scm_branch))
|
||||
self.retry_run(["/usr/bin/cvs", "-q", "-d", scm_root, "export", "-r", scm_branch, scm_dir],
|
||||
workdir=tmp_dir, show_cmd=True)
|
||||
self.log_debug(
|
||||
"Exporting directory %s from CVS %s (branch %s)..."
|
||||
% (scm_dir, scm_root, scm_branch)
|
||||
)
|
||||
self.retry_run(
|
||||
[
|
||||
"/usr/bin/cvs",
|
||||
"-q",
|
||||
"-d",
|
||||
scm_root,
|
||||
"export",
|
||||
"-r",
|
||||
scm_branch,
|
||||
scm_dir,
|
||||
],
|
||||
workdir=tmp_dir,
|
||||
show_cmd=True,
|
||||
)
|
||||
copy_all(os.path.join(tmp_dir, scm_dir), target_dir)
|
||||
|
||||
def export_file(self, scm_root, scm_file, target_dir, scm_branch=None):
|
||||
@ -101,16 +118,30 @@ class CvsWrapper(ScmBase):
|
||||
scm_branch = scm_branch or "HEAD"
|
||||
with temp_dir() as tmp_dir:
|
||||
target_path = os.path.join(target_dir, os.path.basename(scm_file))
|
||||
self.log_debug("Exporting file %s from CVS %s (branch %s)..." % (scm_file, scm_root, scm_branch))
|
||||
self.retry_run(["/usr/bin/cvs", "-q", "-d", scm_root, "export", "-r", scm_branch, scm_file],
|
||||
workdir=tmp_dir, show_cmd=True)
|
||||
self.log_debug(
|
||||
"Exporting file %s from CVS %s (branch %s)..."
|
||||
% (scm_file, scm_root, scm_branch)
|
||||
)
|
||||
self.retry_run(
|
||||
[
|
||||
"/usr/bin/cvs",
|
||||
"-q",
|
||||
"-d",
|
||||
scm_root,
|
||||
"export",
|
||||
"-r",
|
||||
scm_branch,
|
||||
scm_file,
|
||||
],
|
||||
workdir=tmp_dir,
|
||||
show_cmd=True,
|
||||
)
|
||||
|
||||
makedirs(target_dir)
|
||||
shutil.copy2(os.path.join(tmp_dir, scm_file), target_path)
|
||||
|
||||
|
||||
class GitWrapper(ScmBase):
|
||||
|
||||
def _clone(self, repo, branch, destdir):
|
||||
"""Get a single commit from a repository.
|
||||
|
||||
@ -142,8 +173,10 @@ class GitWrapper(ScmBase):
|
||||
scm_branch = scm_branch or "master"
|
||||
|
||||
with temp_dir() as tmp_dir:
|
||||
self.log_debug("Exporting directory %s from git %s (branch %s)..."
|
||||
% (scm_dir, scm_root, scm_branch))
|
||||
self.log_debug(
|
||||
"Exporting directory %s from git %s (branch %s)..."
|
||||
% (scm_dir, scm_root, scm_branch)
|
||||
)
|
||||
|
||||
self._clone(scm_root, scm_branch, tmp_dir)
|
||||
|
||||
@ -156,8 +189,10 @@ class GitWrapper(ScmBase):
|
||||
with temp_dir() as tmp_dir:
|
||||
target_path = os.path.join(target_dir, os.path.basename(scm_file))
|
||||
|
||||
self.log_debug("Exporting file %s from git %s (branch %s)..."
|
||||
% (scm_file, scm_root, scm_branch))
|
||||
self.log_debug(
|
||||
"Exporting file %s from git %s (branch %s)..."
|
||||
% (scm_file, scm_root, scm_branch)
|
||||
)
|
||||
|
||||
self._clone(scm_root, scm_branch, tmp_dir)
|
||||
|
||||
@ -175,7 +210,9 @@ class RpmScmWrapper(ScmBase):
|
||||
for rpm in self._list_rpms(scm_root):
|
||||
scm_dir = scm_dir.lstrip("/")
|
||||
with temp_dir() as tmp_dir:
|
||||
self.log_debug("Extracting directory %s from RPM package %s..." % (scm_dir, rpm))
|
||||
self.log_debug(
|
||||
"Extracting directory %s from RPM package %s..." % (scm_dir, rpm)
|
||||
)
|
||||
explode_rpm_package(rpm, tmp_dir)
|
||||
|
||||
makedirs(target_dir)
|
||||
@ -183,14 +220,21 @@ class RpmScmWrapper(ScmBase):
|
||||
if scm_dir.endswith("/"):
|
||||
copy_all(os.path.join(tmp_dir, scm_dir), target_dir)
|
||||
else:
|
||||
run("cp -a %s %s/" % (shlex_quote(os.path.join(tmp_dir, scm_dir)),
|
||||
shlex_quote(target_dir)))
|
||||
run(
|
||||
"cp -a %s %s/"
|
||||
% (
|
||||
shlex_quote(os.path.join(tmp_dir, scm_dir)),
|
||||
shlex_quote(target_dir),
|
||||
)
|
||||
)
|
||||
|
||||
def export_file(self, scm_root, scm_file, target_dir, scm_branch=None):
|
||||
for rpm in self._list_rpms(scm_root):
|
||||
scm_file = scm_file.lstrip("/")
|
||||
with temp_dir() as tmp_dir:
|
||||
self.log_debug("Exporting file %s from RPM file %s..." % (scm_file, rpm))
|
||||
self.log_debug(
|
||||
"Exporting file %s from RPM file %s..." % (scm_file, rpm)
|
||||
)
|
||||
explode_rpm_package(rpm, tmp_dir)
|
||||
|
||||
makedirs(target_dir)
|
||||
@ -232,9 +276,7 @@ class KojiScmWrapper(ScmBase):
|
||||
self._download_build(builds[0], file_pattern, target_dir)
|
||||
|
||||
def _get_from_build(self, build_id, file_pattern, target_dir):
|
||||
self.log_debug(
|
||||
"Exporting file %s from Koji build %s", file_pattern, build_id
|
||||
)
|
||||
self.log_debug("Exporting file %s from Koji build %s", file_pattern, build_id)
|
||||
build = self.proxy.getBuild(build_id)
|
||||
self._download_build(build, file_pattern, target_dir)
|
||||
|
||||
@ -307,7 +349,7 @@ def get_file_from_scm(scm_dict, target_path, compose=None):
|
||||
scm_repo = scm_dict["repo"]
|
||||
scm_file = scm_dict["file"]
|
||||
scm_branch = scm_dict.get("branch", None)
|
||||
command = scm_dict.get('command')
|
||||
command = scm_dict.get("command")
|
||||
|
||||
logger = compose._logger if compose else None
|
||||
scm = _get_wrapper(scm_type, logger=logger, command=command, compose=compose)
|
||||
|
@ -26,8 +26,13 @@ def get_variants_dtd(logger=None):
|
||||
"""
|
||||
variants_dtd = "/usr/share/pungi/variants.dtd"
|
||||
if not os.path.isfile(variants_dtd):
|
||||
devel_variants_dtd = os.path.normpath(os.path.realpath(
|
||||
os.path.join(os.path.dirname(__file__), "..", "..", "share", "variants.dtd")))
|
||||
devel_variants_dtd = os.path.normpath(
|
||||
os.path.realpath(
|
||||
os.path.join(
|
||||
os.path.dirname(__file__), "..", "..", "share", "variants.dtd"
|
||||
)
|
||||
)
|
||||
)
|
||||
msg = "Variants DTD not found: %s" % variants_dtd
|
||||
if os.path.isfile(devel_variants_dtd):
|
||||
if logger:
|
||||
@ -57,7 +62,7 @@ NO_WHITESPACE_ELEMENTS = [
|
||||
class VariantsXmlParser(object):
|
||||
def __init__(self, file_obj, tree_arches=None, tree_variants=None, logger=None):
|
||||
self.tree = lxml.etree.parse(file_obj)
|
||||
with open(get_variants_dtd(logger), 'r') as f:
|
||||
with open(get_variants_dtd(logger), "r") as f:
|
||||
self.dtd = lxml.etree.DTD(f)
|
||||
self.addons = {}
|
||||
self.variants = {}
|
||||
@ -111,10 +116,15 @@ class VariantsXmlParser(object):
|
||||
"parent": parent,
|
||||
}
|
||||
if self.tree_arches:
|
||||
variant_dict["arches"] = [i for i in variant_dict["arches"] if i in self.tree_arches]
|
||||
variant_dict["arches"] = [
|
||||
i for i in variant_dict["arches"] if i in self.tree_arches
|
||||
]
|
||||
if not variant_dict["arches"]:
|
||||
if self.logger:
|
||||
self.logger.info('Excluding variant %s: all its arches are filtered.' % variant_dict['id'])
|
||||
self.logger.info(
|
||||
"Excluding variant %s: all its arches are filtered."
|
||||
% variant_dict["id"]
|
||||
)
|
||||
return None
|
||||
|
||||
for grouplist_node in variant_node.xpath("groups"):
|
||||
@ -141,7 +151,7 @@ class VariantsXmlParser(object):
|
||||
for module_node in modulelist_node.xpath("module"):
|
||||
module = {
|
||||
"name": str(module_node.text),
|
||||
"glob": self._is_true(module_node.attrib.get("glob", "false"))
|
||||
"glob": self._is_true(module_node.attrib.get("glob", "false")),
|
||||
}
|
||||
|
||||
variant_dict["modules"].append(module)
|
||||
@ -151,7 +161,9 @@ class VariantsXmlParser(object):
|
||||
"name": str(kojitag_node.text),
|
||||
}
|
||||
|
||||
variant_dict["modular_koji_tags"] = variant_dict["modular_koji_tags"] or []
|
||||
variant_dict["modular_koji_tags"] = (
|
||||
variant_dict["modular_koji_tags"] or []
|
||||
)
|
||||
variant_dict["modular_koji_tags"].append(kojitag)
|
||||
|
||||
for environments_node in variant_node.xpath("environments"):
|
||||
@ -188,28 +200,37 @@ class VariantsXmlParser(object):
|
||||
|
||||
has_optional = self._is_true(variant_node.attrib.get("has_optional", "false"))
|
||||
if has_optional and not contains_optional:
|
||||
optional = Variant(id="optional", name="optional", type="optional",
|
||||
arches=variant.arches, groups=[], parent=variant)
|
||||
optional = Variant(
|
||||
id="optional",
|
||||
name="optional",
|
||||
type="optional",
|
||||
arches=variant.arches,
|
||||
groups=[],
|
||||
parent=variant,
|
||||
)
|
||||
self.add_child(optional, variant)
|
||||
|
||||
for ref in variant_node.xpath("variants/ref/@id"):
|
||||
try:
|
||||
child_variant = self.parse_variant_node(self.addons[ref], variant)
|
||||
except KeyError:
|
||||
raise RuntimeError("Variant %s references non-existing variant %s"
|
||||
% (variant.uid, ref))
|
||||
raise RuntimeError(
|
||||
"Variant %s references non-existing variant %s" % (variant.uid, ref)
|
||||
)
|
||||
self.add_child(child_variant, variant)
|
||||
|
||||
# XXX: top-level optional
|
||||
# for ref in variant_node.xpath("variants/ref/@id"):
|
||||
# variant["variants"].append(copy.deepcopy(addons[ref]))
|
||||
# XXX: top-level optional
|
||||
# for ref in variant_node.xpath("variants/ref/@id"):
|
||||
# variant["variants"].append(copy.deepcopy(addons[ref]))
|
||||
|
||||
return variant
|
||||
|
||||
def _is_excluded(self, variant):
|
||||
if self.tree_variants and variant.uid not in self.tree_variants:
|
||||
if self.logger:
|
||||
self.logger.info('Excluding variant %s: filtered by configuration.' % variant)
|
||||
self.logger.info(
|
||||
"Excluding variant %s: filtered by configuration." % variant
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -225,7 +246,9 @@ class VariantsXmlParser(object):
|
||||
variant_id = str(variant_node.attrib["id"])
|
||||
self.addons[variant_id] = variant_node
|
||||
|
||||
for variant_node in self.tree.xpath("/variants/variant[@type='layered-product']"):
|
||||
for variant_node in self.tree.xpath(
|
||||
"/variants/variant[@type='layered-product']"
|
||||
):
|
||||
variant_id = str(variant_node.attrib["id"])
|
||||
self.addons[variant_id] = variant_node
|
||||
|
||||
@ -239,9 +262,20 @@ class VariantsXmlParser(object):
|
||||
|
||||
|
||||
class Variant(object):
|
||||
def __init__(self, id, name, type, arches, groups, environments=None,
|
||||
buildinstallpackages=None, is_empty=False, parent=None,
|
||||
modules=None, modular_koji_tags=None):
|
||||
def __init__(
|
||||
self,
|
||||
id,
|
||||
name,
|
||||
type,
|
||||
arches,
|
||||
groups,
|
||||
environments=None,
|
||||
buildinstallpackages=None,
|
||||
is_empty=False,
|
||||
parent=None,
|
||||
modules=None,
|
||||
modular_koji_tags=None,
|
||||
):
|
||||
|
||||
environments = environments or []
|
||||
buildinstallpackages = buildinstallpackages or []
|
||||
@ -257,7 +291,9 @@ class Variant(object):
|
||||
self.modules = sorted(self.modules, key=lambda x: x["name"])
|
||||
self.modular_koji_tags = copy.deepcopy(modular_koji_tags)
|
||||
if self.modular_koji_tags:
|
||||
self.modular_koji_tags = sorted(self.modular_koji_tags, key=lambda x: x["name"])
|
||||
self.modular_koji_tags = sorted(
|
||||
self.modular_koji_tags, key=lambda x: x["name"]
|
||||
)
|
||||
self.buildinstallpackages = sorted(buildinstallpackages)
|
||||
self.variants = {}
|
||||
self.parent = parent
|
||||
@ -275,7 +311,9 @@ class Variant(object):
|
||||
return self.uid
|
||||
|
||||
def __repr__(self):
|
||||
return 'Variant(id="{0.id}", name="{0.name}", type="{0.type}", parent={0.parent})'.format(self)
|
||||
return 'Variant(id="{0.id}", name="{0.name}", type="{0.type}", parent={0.parent})'.format(
|
||||
self
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.type == other.type and self.uid == other.uid
|
||||
@ -284,7 +322,7 @@ class Variant(object):
|
||||
return not (self == other)
|
||||
|
||||
def __lt__(self, other):
|
||||
ORDERING = {'variant': 0, 'addon': 1, 'layered-product': 1, 'optional': 2}
|
||||
ORDERING = {"variant": 0, "addon": 1, "layered-product": 1, "optional": 2}
|
||||
return (ORDERING[self.type], self.uid) < (ORDERING[other.type], other.uid)
|
||||
|
||||
def __le__(self, other):
|
||||
@ -313,11 +351,17 @@ class Variant(object):
|
||||
raise RuntimeError("Only 'variant' can contain another variants.")
|
||||
if variant.id == self.id:
|
||||
# due to os/<variant.id> path -- addon id would conflict with parent variant id
|
||||
raise RuntimeError("Child variant id must be different than parent variant id: %s" % variant.id)
|
||||
raise RuntimeError(
|
||||
"Child variant id must be different than parent variant id: %s"
|
||||
% variant.id
|
||||
)
|
||||
# sometimes an addon or layered product can be part of multiple variants with different set of arches
|
||||
arches = sorted(set(self.arches).intersection(set(variant.arches)))
|
||||
if self.arches and not arches:
|
||||
raise RuntimeError("%s: arch list %s does not intersect with parent arch list: %s" % (variant, variant.arches, self.arches))
|
||||
raise RuntimeError(
|
||||
"%s: arch list %s does not intersect with parent arch list: %s"
|
||||
% (variant, variant.arches, self.arches)
|
||||
)
|
||||
variant.arches = arches
|
||||
self.variants[variant.id] = variant
|
||||
variant.parent = self
|
||||
@ -327,11 +371,12 @@ class Variant(object):
|
||||
|
||||
types = types or ["self"]
|
||||
result = copy.deepcopy(self.groups)
|
||||
for variant in self.get_variants(arch=arch, types=types,
|
||||
recursive=recursive):
|
||||
for variant in self.get_variants(arch=arch, types=types, recursive=recursive):
|
||||
if variant == self:
|
||||
continue
|
||||
for group in variant.get_groups(arch=arch, types=types, recursive=recursive):
|
||||
for group in variant.get_groups(
|
||||
arch=arch, types=types, recursive=recursive
|
||||
):
|
||||
if group not in result:
|
||||
result.append(group)
|
||||
return result
|
||||
@ -344,12 +389,12 @@ class Variant(object):
|
||||
|
||||
types = types or ["self"]
|
||||
result = copy.deepcopy(self.modules)
|
||||
for variant in self.get_variants(arch=arch, types=types,
|
||||
recursive=recursive):
|
||||
for variant in self.get_variants(arch=arch, types=types, recursive=recursive):
|
||||
if variant == self:
|
||||
continue
|
||||
for module in variant.get_modules(arch=arch, types=types,
|
||||
recursive=recursive):
|
||||
for module in variant.get_modules(
|
||||
arch=arch, types=types, recursive=recursive
|
||||
):
|
||||
if module not in result:
|
||||
result.append(module)
|
||||
return result
|
||||
@ -362,12 +407,12 @@ class Variant(object):
|
||||
|
||||
types = types or ["self"]
|
||||
result = copy.deepcopy(self.modular_koji_tags)
|
||||
for variant in self.get_variants(arch=arch, types=types,
|
||||
recursive=recursive):
|
||||
for variant in self.get_variants(arch=arch, types=types, recursive=recursive):
|
||||
if variant == self:
|
||||
continue
|
||||
for koji_tag in variant.get_modular_koji_tags(
|
||||
arch=arch, types=types, recursive=recursive):
|
||||
arch=arch, types=types, recursive=recursive
|
||||
):
|
||||
if koji_tag not in result:
|
||||
result.append(koji_tag)
|
||||
return result
|
||||
@ -398,7 +443,11 @@ class Variant(object):
|
||||
continue
|
||||
result.append(variant)
|
||||
if recursive:
|
||||
result.extend(variant.get_variants(types=[i for i in types if i != "self"], recursive=True))
|
||||
result.extend(
|
||||
variant.get_variants(
|
||||
types=[i for i in types if i != "self"], recursive=True
|
||||
)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
@ -505,12 +505,14 @@ def get_compose_data(compose_path):
|
||||
"release_is_layered": compose.info.release.is_layered,
|
||||
}
|
||||
if compose.info.release.is_layered:
|
||||
data.update({
|
||||
"base_product_name": compose.info.base_product.name,
|
||||
"base_product_short": compose.info.base_product.short,
|
||||
"base_product_version": compose.info.base_product.version,
|
||||
"base_product_type": compose.info.base_product.type,
|
||||
})
|
||||
data.update(
|
||||
{
|
||||
"base_product_name": compose.info.base_product.name,
|
||||
"base_product_short": compose.info.base_product.short,
|
||||
"base_product_version": compose.info.base_product.version,
|
||||
"base_product_type": compose.info.base_product.type,
|
||||
}
|
||||
)
|
||||
return data
|
||||
except Exception as exc:
|
||||
return {}
|
||||
@ -549,6 +551,7 @@ def send_notification(compose_dir, command, parts):
|
||||
if not command:
|
||||
return
|
||||
from pungi.notifier import PungiNotifier
|
||||
|
||||
data = get_compose_data(compose_dir)
|
||||
data["location"] = try_translate_path(parts, compose_dir)
|
||||
notifier = PungiNotifier([command])
|
||||
|
@ -24,60 +24,62 @@ from pungi.wrappers import iso
|
||||
|
||||
|
||||
def sh(log, cmd, *args, **kwargs):
|
||||
log.info('Running: %s', ' '.join(shlex_quote(x) for x in cmd))
|
||||
log.info("Running: %s", " ".join(shlex_quote(x) for x in cmd))
|
||||
ret, out = shortcuts.run(cmd, *args, universal_newlines=True, **kwargs)
|
||||
if out:
|
||||
log.debug('%s', out)
|
||||
log.debug("%s", out)
|
||||
return ret, out
|
||||
|
||||
|
||||
def get_lorax_dir(default='/usr/share/lorax'):
|
||||
def get_lorax_dir(default="/usr/share/lorax"):
|
||||
try:
|
||||
_, out = shortcuts.run(['python3', '-c' 'import pylorax; print(pylorax.find_templates())'],
|
||||
universal_newlines=True)
|
||||
_, out = shortcuts.run(
|
||||
["python3", "-c" "import pylorax; print(pylorax.find_templates())"],
|
||||
universal_newlines=True,
|
||||
)
|
||||
return out.strip()
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
|
||||
def as_bool(arg):
|
||||
if arg == 'true':
|
||||
if arg == "true":
|
||||
return True
|
||||
elif arg == 'false':
|
||||
elif arg == "false":
|
||||
return False
|
||||
else:
|
||||
return arg
|
||||
|
||||
|
||||
def get_arch(log, iso_dir):
|
||||
di_path = os.path.join(iso_dir, '.discinfo')
|
||||
di_path = os.path.join(iso_dir, ".discinfo")
|
||||
if os.path.exists(di_path):
|
||||
di = productmd.discinfo.DiscInfo()
|
||||
di.load(di_path)
|
||||
log.info('Detected bootable ISO for %s (based on .discinfo)', di.arch)
|
||||
log.info("Detected bootable ISO for %s (based on .discinfo)", di.arch)
|
||||
return di.arch
|
||||
|
||||
ti_path = os.path.join(iso_dir, '.treeinfo')
|
||||
ti_path = os.path.join(iso_dir, ".treeinfo")
|
||||
if os.path.exists(ti_path):
|
||||
ti = productmd.treeinfo.TreeInfo()
|
||||
ti.load(ti_path)
|
||||
log.info('Detected bootable ISO for %s (based on .treeinfo)', ti.tree.arch)
|
||||
log.info("Detected bootable ISO for %s (based on .treeinfo)", ti.tree.arch)
|
||||
return ti.tree.arch
|
||||
|
||||
# There is no way to tell the architecture of an ISO file without guessing.
|
||||
# Let's print a warning and continue with assuming unbootable ISO.
|
||||
|
||||
log.warning('Failed to detect arch for ISO, assuming unbootable one.')
|
||||
log.warning('If this is incorrect, use the --force-arch option.')
|
||||
log.warning("Failed to detect arch for ISO, assuming unbootable one.")
|
||||
log.warning("If this is incorrect, use the --force-arch option.")
|
||||
return None
|
||||
|
||||
|
||||
def run(log, opts):
|
||||
# mount source iso
|
||||
log.info('Mounting %s', opts.source)
|
||||
log.info("Mounting %s", opts.source)
|
||||
target = os.path.abspath(opts.target)
|
||||
|
||||
with util.temp_dir(prefix='patch-iso-') as work_dir:
|
||||
with util.temp_dir(prefix="patch-iso-") as work_dir:
|
||||
with iso.mount(opts.source) as source_iso_dir:
|
||||
util.copy_all(source_iso_dir, work_dir)
|
||||
|
||||
@ -94,29 +96,34 @@ def run(log, opts):
|
||||
# create graft points from mounted source iso + overlay dir
|
||||
graft_points = iso.get_graft_points([work_dir] + opts.dirs)
|
||||
# if ks.cfg is detected, patch syslinux + grub to use it
|
||||
if 'ks.cfg' in graft_points:
|
||||
log.info('Adding ks.cfg to boot configs')
|
||||
tweak_configs(work_dir, volume_id, graft_points['ks.cfg'], logger=log)
|
||||
if "ks.cfg" in graft_points:
|
||||
log.info("Adding ks.cfg to boot configs")
|
||||
tweak_configs(work_dir, volume_id, graft_points["ks.cfg"], logger=log)
|
||||
|
||||
arch = opts.force_arch or get_arch(log, work_dir)
|
||||
|
||||
with tempfile.NamedTemporaryFile(prefix='graft-points-') as graft_file:
|
||||
iso.write_graft_points(graft_file.name, graft_points,
|
||||
exclude=["*/TRANS.TBL", "*/boot.cat"])
|
||||
with tempfile.NamedTemporaryFile(prefix="graft-points-") as graft_file:
|
||||
iso.write_graft_points(
|
||||
graft_file.name, graft_points, exclude=["*/TRANS.TBL", "*/boot.cat"]
|
||||
)
|
||||
|
||||
# make the target iso bootable if source iso is bootable
|
||||
boot_args = input_charset = None
|
||||
if arch:
|
||||
boot_args = iso.get_boot_options(
|
||||
arch, os.path.join(get_lorax_dir(), 'config_files/ppc'))
|
||||
input_charset = 'utf-8' if 'ppc' not in arch else None
|
||||
arch, os.path.join(get_lorax_dir(), "config_files/ppc")
|
||||
)
|
||||
input_charset = "utf-8" if "ppc" not in arch else None
|
||||
# Create the target ISO
|
||||
mkisofs_cmd = iso.get_mkisofs_cmd(target, None,
|
||||
volid=volume_id,
|
||||
exclude=["./lost+found"],
|
||||
graft_points=graft_file.name,
|
||||
input_charset=input_charset,
|
||||
boot_args=boot_args)
|
||||
mkisofs_cmd = iso.get_mkisofs_cmd(
|
||||
target,
|
||||
None,
|
||||
volid=volume_id,
|
||||
exclude=["./lost+found"],
|
||||
graft_points=graft_file.name,
|
||||
input_charset=input_charset,
|
||||
boot_args=boot_args,
|
||||
)
|
||||
sh(log, mkisofs_cmd, workdir=work_dir)
|
||||
|
||||
# isohybrid support
|
||||
@ -124,7 +131,9 @@ def run(log, opts):
|
||||
isohybrid_cmd = iso.get_isohybrid_cmd(target, arch)
|
||||
sh(log, isohybrid_cmd)
|
||||
|
||||
supported = as_bool(opts.supported or iso.get_checkisomd5_data(opts.source)['Supported ISO'])
|
||||
supported = as_bool(
|
||||
opts.supported or iso.get_checkisomd5_data(opts.source)["Supported ISO"]
|
||||
)
|
||||
# implantmd5 + supported bit (use the same as on source iso, unless
|
||||
# overriden by --supported option)
|
||||
isomd5sum_cmd = iso.get_implantisomd5_cmd(target, supported)
|
||||
|
@ -47,13 +47,19 @@ def ti_merge(one, two):
|
||||
var.uid = variant.uid
|
||||
var.name = variant.name
|
||||
var.type = variant.type
|
||||
for i in ("debug_packages", "debug_repository", "packages", "repository",
|
||||
"source_packages", "source_repository"):
|
||||
for i in (
|
||||
"debug_packages",
|
||||
"debug_repository",
|
||||
"packages",
|
||||
"repository",
|
||||
"source_packages",
|
||||
"source_repository",
|
||||
):
|
||||
setattr(var, i, getattr(variant, i, None))
|
||||
one.variants.add(var)
|
||||
|
||||
|
||||
DEFAULT_CHECKSUMS = ['md5', 'sha1', 'sha256']
|
||||
DEFAULT_CHECKSUMS = ["md5", "sha1", "sha256"]
|
||||
|
||||
|
||||
class UnifiedISO(object):
|
||||
@ -72,12 +78,12 @@ class UnifiedISO(object):
|
||||
makedirs(temp_topdir)
|
||||
self.temp_dir = tempfile.mkdtemp(prefix="unified_isos_", dir=temp_topdir)
|
||||
|
||||
self.treeinfo = {} # {arch/src: TreeInfo}
|
||||
self.repos = {} # {arch/src: {variant: new_path}
|
||||
self.comps = {} # {arch/src: {variant: old_path}
|
||||
self.productid = {} # {arch/stc: {variant: old_path}
|
||||
self.treeinfo = {} # {arch/src: TreeInfo}
|
||||
self.repos = {} # {arch/src: {variant: new_path}
|
||||
self.comps = {} # {arch/src: {variant: old_path}
|
||||
self.productid = {} # {arch/stc: {variant: old_path}
|
||||
self.conf = self.read_config()
|
||||
self.images = None # productmd.images.Images instance
|
||||
self.images = None # productmd.images.Images instance
|
||||
|
||||
def create(self, delete_temp=True):
|
||||
print("Creating unified ISOs for: {0}".format(self.compose_path))
|
||||
@ -93,8 +99,8 @@ class UnifiedISO(object):
|
||||
shutil.rmtree(self.temp_dir)
|
||||
|
||||
def dump_manifest(self):
|
||||
dest = os.path.join(self.compose_path, 'metadata', 'images.json')
|
||||
tmp_file = dest + '.tmp'
|
||||
dest = os.path.join(self.compose_path, "metadata", "images.json")
|
||||
tmp_file = dest + ".tmp"
|
||||
try:
|
||||
self.get_image_manifest().dump(tmp_file)
|
||||
except Exception:
|
||||
@ -106,7 +112,13 @@ class UnifiedISO(object):
|
||||
os.rename(tmp_file, dest)
|
||||
|
||||
def _link_tree(self, dir, variant, arch):
|
||||
blacklist_files = [".treeinfo", ".discinfo", "boot.iso", "media.repo", "extra_files.json"]
|
||||
blacklist_files = [
|
||||
".treeinfo",
|
||||
".discinfo",
|
||||
"boot.iso",
|
||||
"media.repo",
|
||||
"extra_files.json",
|
||||
]
|
||||
blacklist_dirs = ["repodata"]
|
||||
|
||||
for root, dirs, files in os.walk(dir):
|
||||
@ -120,8 +132,12 @@ class UnifiedISO(object):
|
||||
|
||||
old_path = os.path.join(root, fn)
|
||||
if fn.endswith(".rpm"):
|
||||
new_path = os.path.join(self.temp_dir, "trees", arch, variant.uid, fn)
|
||||
self.repos.setdefault(arch, {})[variant.uid] = os.path.dirname(new_path)
|
||||
new_path = os.path.join(
|
||||
self.temp_dir, "trees", arch, variant.uid, fn
|
||||
)
|
||||
self.repos.setdefault(arch, {})[variant.uid] = os.path.dirname(
|
||||
new_path
|
||||
)
|
||||
else:
|
||||
old_relpath = os.path.relpath(old_path, dir)
|
||||
new_path = os.path.join(self.temp_dir, "trees", arch, old_relpath)
|
||||
@ -130,8 +146,11 @@ class UnifiedISO(object):
|
||||
try:
|
||||
self.linker.link(old_path, new_path)
|
||||
except OSError as exc:
|
||||
print("Failed to link %s to %s: %s" % (old_path, new_path, exc.strerror),
|
||||
file=sys.stderr)
|
||||
print(
|
||||
"Failed to link %s to %s: %s"
|
||||
% (old_path, new_path, exc.strerror),
|
||||
file=sys.stderr,
|
||||
)
|
||||
raise
|
||||
|
||||
def link_to_temp(self):
|
||||
@ -140,7 +159,9 @@ class UnifiedISO(object):
|
||||
for arch in variant.arches:
|
||||
print("Processing: {0}.{1}".format(variant.uid, arch))
|
||||
try:
|
||||
tree_dir = os.path.join(self.compose_path, variant.paths.os_tree[arch])
|
||||
tree_dir = os.path.join(
|
||||
self.compose_path, variant.paths.os_tree[arch]
|
||||
)
|
||||
except KeyError:
|
||||
# The path in metadata is missing: no content there
|
||||
continue
|
||||
@ -151,9 +172,11 @@ class UnifiedISO(object):
|
||||
except IOError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
print('Tree %s.%s has no .treeinfo, skipping...'
|
||||
% (variant.uid, arch),
|
||||
file=sys.stderr)
|
||||
print(
|
||||
"Tree %s.%s has no .treeinfo, skipping..."
|
||||
% (variant.uid, arch),
|
||||
file=sys.stderr,
|
||||
)
|
||||
continue
|
||||
|
||||
arch_ti = self.treeinfo.get(arch)
|
||||
@ -164,27 +187,38 @@ class UnifiedISO(object):
|
||||
ti_merge(arch_ti, ti)
|
||||
|
||||
if arch_ti.tree.arch != arch:
|
||||
raise RuntimeError('Treeinfo arch mismatch')
|
||||
raise RuntimeError("Treeinfo arch mismatch")
|
||||
|
||||
# override paths
|
||||
arch_ti[variant.uid].repository = variant.uid
|
||||
arch_ti[variant.uid].packages = variant.uid
|
||||
|
||||
comps_path = glob.glob(os.path.join(self.compose_path,
|
||||
variant.paths.repository[arch],
|
||||
"repodata", "*comps*.xml"))
|
||||
comps_path = glob.glob(
|
||||
os.path.join(
|
||||
self.compose_path,
|
||||
variant.paths.repository[arch],
|
||||
"repodata",
|
||||
"*comps*.xml",
|
||||
)
|
||||
)
|
||||
if comps_path:
|
||||
self.comps.setdefault(arch, {})[variant.uid] = comps_path[0]
|
||||
|
||||
productid_path = os.path.join(self.compose_path, variant.paths.repository[arch],
|
||||
"repodata", "productid")
|
||||
productid_path = os.path.join(
|
||||
self.compose_path,
|
||||
variant.paths.repository[arch],
|
||||
"repodata",
|
||||
"productid",
|
||||
)
|
||||
self.productid.setdefault(arch, {})[variant.uid] = productid_path
|
||||
|
||||
self._link_tree(tree_dir, variant, arch)
|
||||
|
||||
# sources
|
||||
print("Processing: {0}.{1}".format(variant.uid, "src"))
|
||||
tree_dir = os.path.join(self.compose_path, variant.paths.source_tree[arch])
|
||||
tree_dir = os.path.join(
|
||||
self.compose_path, variant.paths.source_tree[arch]
|
||||
)
|
||||
ti = productmd.treeinfo.TreeInfo()
|
||||
ti.load(os.path.join(tree_dir, ".treeinfo"))
|
||||
|
||||
@ -196,7 +230,7 @@ class UnifiedISO(object):
|
||||
ti_merge(arch_ti, ti)
|
||||
|
||||
if arch_ti.tree.arch != "src":
|
||||
raise RuntimeError('Treeinfo arch mismatch')
|
||||
raise RuntimeError("Treeinfo arch mismatch")
|
||||
|
||||
# override paths
|
||||
arch_ti[variant.uid].repository = variant.uid
|
||||
@ -205,13 +239,15 @@ class UnifiedISO(object):
|
||||
# arch_ti[variant.uid].source_repository = variant.uid
|
||||
# arch_ti[variant.uid].source_packages = variant.uid
|
||||
|
||||
self._link_tree(tree_dir, variant, 'src')
|
||||
self._link_tree(tree_dir, variant, "src")
|
||||
|
||||
# Debuginfo
|
||||
print("Processing: {0}.{1} debuginfo".format(variant.uid, arch))
|
||||
tree_dir = os.path.join(self.compose_path, variant.paths.debug_tree[arch])
|
||||
tree_dir = os.path.join(
|
||||
self.compose_path, variant.paths.debug_tree[arch]
|
||||
)
|
||||
|
||||
debug_arch = 'debug-%s' % arch
|
||||
debug_arch = "debug-%s" % arch
|
||||
|
||||
# We don't have a .treeinfo for debuginfo trees. Let's just
|
||||
# copy the one from binary tree.
|
||||
@ -236,7 +272,9 @@ class UnifiedISO(object):
|
||||
tree_dir = os.path.join(self.temp_dir, "trees", arch)
|
||||
repo_path = self.repos[arch][variant]
|
||||
comps_path = self.comps.get(arch, {}).get(variant, None)
|
||||
cmd = cr.get_createrepo_cmd(repo_path, groupfile=comps_path, update=True)
|
||||
cmd = cr.get_createrepo_cmd(
|
||||
repo_path, groupfile=comps_path, update=True
|
||||
)
|
||||
run(cmd, show_cmd=True)
|
||||
|
||||
productid_path = self.productid.get(arch, {}).get(variant, None)
|
||||
@ -247,15 +285,27 @@ class UnifiedISO(object):
|
||||
|
||||
if os.path.exists(productid_path):
|
||||
shutil.copy2(productid_path, new_path)
|
||||
cmd = cr.get_modifyrepo_cmd(repo_dir, new_path, compress_type="gz")
|
||||
cmd = cr.get_modifyrepo_cmd(
|
||||
repo_dir, new_path, compress_type="gz"
|
||||
)
|
||||
run(cmd)
|
||||
else:
|
||||
print("WARNING: productid not found in {0}.{1}".format(variant, arch))
|
||||
print(
|
||||
"WARNING: productid not found in {0}.{1}".format(
|
||||
variant, arch
|
||||
)
|
||||
)
|
||||
|
||||
print("Inserting new repomd.xml checksum to treeinfo: {0}.{1}".format(variant, arch))
|
||||
print(
|
||||
"Inserting new repomd.xml checksum to treeinfo: {0}.{1}".format(
|
||||
variant, arch
|
||||
)
|
||||
)
|
||||
# insert new repomd.xml checksum to treeinfo
|
||||
repomd_path = os.path.join(repo_path, "repodata", "repomd.xml")
|
||||
ti.checksums.add(os.path.relpath(repomd_path, tree_dir), 'sha256', root_dir=tree_dir)
|
||||
ti.checksums.add(
|
||||
os.path.relpath(repomd_path, tree_dir), "sha256", root_dir=tree_dir
|
||||
)
|
||||
|
||||
# write treeinfo
|
||||
for arch, ti in self.treeinfo.items():
|
||||
@ -270,17 +320,25 @@ class UnifiedISO(object):
|
||||
di_path = os.path.join(self.temp_dir, "trees", arch, ".discinfo")
|
||||
description = "%s %s" % (ti.release.name, ti.release.version)
|
||||
if ti.release.is_layered:
|
||||
description += " for %s %s" % (ti.base_product.name, ti.base_product.version)
|
||||
create_discinfo(di_path, description, arch.split('-', 1)[-1])
|
||||
description += " for %s %s" % (
|
||||
ti.base_product.name,
|
||||
ti.base_product.version,
|
||||
)
|
||||
create_discinfo(di_path, description, arch.split("-", 1)[-1])
|
||||
|
||||
def read_config(self):
|
||||
try:
|
||||
conf_dump = glob.glob(os.path.join(self.compose_path,
|
||||
'../logs/global/config-dump*.global.log'))[0]
|
||||
conf_dump = glob.glob(
|
||||
os.path.join(
|
||||
self.compose_path, "../logs/global/config-dump*.global.log"
|
||||
)
|
||||
)[0]
|
||||
except IndexError:
|
||||
print('Config dump not found, can not adhere to previous settings. '
|
||||
'Expect weird naming and checksums.',
|
||||
file=sys.stderr)
|
||||
print(
|
||||
"Config dump not found, can not adhere to previous settings. "
|
||||
"Expect weird naming and checksums.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return {}
|
||||
with open(conf_dump) as f:
|
||||
return json.load(f)
|
||||
@ -291,8 +349,8 @@ class UnifiedISO(object):
|
||||
|
||||
for typed_arch, ti in self.treeinfo.items():
|
||||
source_dir = os.path.join(self.temp_dir, "trees", typed_arch)
|
||||
arch = typed_arch.split('-', 1)[-1]
|
||||
debuginfo = typed_arch.startswith('debug-')
|
||||
arch = typed_arch.split("-", 1)[-1]
|
||||
debuginfo = typed_arch.startswith("debug-")
|
||||
|
||||
# XXX: HARDCODED
|
||||
disc_type = "dvd"
|
||||
@ -301,7 +359,7 @@ class UnifiedISO(object):
|
||||
if arch == "src":
|
||||
iso_arch = "source"
|
||||
elif debuginfo:
|
||||
iso_arch = arch + '-debuginfo'
|
||||
iso_arch = arch + "-debuginfo"
|
||||
|
||||
iso_name = "%s-%s-%s.iso" % (self.ci.compose.id, iso_arch, disc_type)
|
||||
iso_dir = os.path.join(self.temp_dir, "iso", iso_arch)
|
||||
@ -315,7 +373,11 @@ class UnifiedISO(object):
|
||||
volid += " debuginfo"
|
||||
|
||||
# create ISO
|
||||
run(iso.get_mkisofs_cmd(iso_path, [source_dir], volid=volid, exclude=["./lost+found"]))
|
||||
run(
|
||||
iso.get_mkisofs_cmd(
|
||||
iso_path, [source_dir], volid=volid, exclude=["./lost+found"]
|
||||
)
|
||||
)
|
||||
|
||||
# implant MD5
|
||||
supported = True
|
||||
@ -332,7 +394,7 @@ class UnifiedISO(object):
|
||||
img.arch = arch
|
||||
|
||||
# XXX: HARDCODED
|
||||
img.type = "dvd" if not debuginfo else 'dvd-debuginfo'
|
||||
img.type = "dvd" if not debuginfo else "dvd-debuginfo"
|
||||
img.format = "iso"
|
||||
img.disc_number = 1
|
||||
img.disc_count = 1
|
||||
@ -351,7 +413,7 @@ class UnifiedISO(object):
|
||||
all_arches = [arch]
|
||||
|
||||
for tree_arch in all_arches:
|
||||
if tree_arch.startswith('debug-'):
|
||||
if tree_arch.startswith("debug-"):
|
||||
continue
|
||||
ti = self.treeinfo[tree_arch]
|
||||
for variant_uid in ti.variants:
|
||||
@ -366,49 +428,51 @@ class UnifiedISO(object):
|
||||
for var in self.ci.get_variants(recursive=False)
|
||||
if var.uid != variant_uid
|
||||
]
|
||||
paths_attr = 'isos' if arch != 'src' else 'source_isos'
|
||||
paths_attr = "isos" if arch != "src" else "source_isos"
|
||||
paths = getattr(self.ci.variants[variant.uid].paths, paths_attr)
|
||||
path = paths.get(tree_arch, os.path.join(variant.uid, tree_arch, "iso"))
|
||||
if variant_img.type == 'dvd-debuginfo':
|
||||
prefix, isodir = path.rsplit('/', 1)
|
||||
path = os.path.join(prefix, 'debug', isodir)
|
||||
variant_img.path = os.path.join(
|
||||
path,
|
||||
os.path.basename(img.path)
|
||||
path = paths.get(
|
||||
tree_arch, os.path.join(variant.uid, tree_arch, "iso")
|
||||
)
|
||||
if variant_img.type == "dvd-debuginfo":
|
||||
prefix, isodir = path.rsplit("/", 1)
|
||||
path = os.path.join(prefix, "debug", isodir)
|
||||
variant_img.path = os.path.join(path, os.path.basename(img.path))
|
||||
im.add(variant.uid, tree_arch, variant_img)
|
||||
|
||||
dst = os.path.join(self.compose_path, variant_img.path)
|
||||
print("Linking {0} -> {1}".format(iso_path, dst))
|
||||
makedirs(os.path.dirname(dst))
|
||||
self.linker.link(iso_path, dst)
|
||||
self.linker.link(iso_path + '.manifest', dst + '.manifest')
|
||||
self.linker.link(iso_path + ".manifest", dst + ".manifest")
|
||||
|
||||
def _get_base_filename(self, variant, arch):
|
||||
substs = {
|
||||
'compose_id': self.compose.info.compose.id,
|
||||
'release_short': self.compose.info.release.short,
|
||||
'version': self.compose.info.release.version,
|
||||
'date': self.compose.info.compose.date,
|
||||
'respin': self.compose.info.compose.respin,
|
||||
'type': self.compose.info.compose.type,
|
||||
'type_suffix': self.compose.info.compose.type_suffix,
|
||||
'label': self.compose.info.compose.label,
|
||||
'label_major_version': self.compose.info.compose.label_major_version,
|
||||
'variant': variant,
|
||||
'arch': arch,
|
||||
"compose_id": self.compose.info.compose.id,
|
||||
"release_short": self.compose.info.release.short,
|
||||
"version": self.compose.info.release.version,
|
||||
"date": self.compose.info.compose.date,
|
||||
"respin": self.compose.info.compose.respin,
|
||||
"type": self.compose.info.compose.type,
|
||||
"type_suffix": self.compose.info.compose.type_suffix,
|
||||
"label": self.compose.info.compose.label,
|
||||
"label_major_version": self.compose.info.compose.label_major_version,
|
||||
"variant": variant,
|
||||
"arch": arch,
|
||||
}
|
||||
base_name = self.conf.get('media_checksum_base_filename', '')
|
||||
base_name = self.conf.get("media_checksum_base_filename", "")
|
||||
if base_name:
|
||||
base_name = (base_name % substs).format(**substs)
|
||||
base_name += '-'
|
||||
base_name += "-"
|
||||
return base_name
|
||||
|
||||
def update_checksums(self):
|
||||
make_checksums(self.compose_path, self.get_image_manifest(),
|
||||
self.conf.get('media_checksums', DEFAULT_CHECKSUMS),
|
||||
self.conf.get('media_checksum_one_file', False),
|
||||
self._get_base_filename)
|
||||
make_checksums(
|
||||
self.compose_path,
|
||||
self.get_image_manifest(),
|
||||
self.conf.get("media_checksums", DEFAULT_CHECKSUMS),
|
||||
self.conf.get("media_checksum_one_file", False),
|
||||
self._get_base_filename,
|
||||
)
|
||||
|
||||
def get_image_manifest(self):
|
||||
if not self.images:
|
||||
|
84
setup.py
84
setup.py
@ -24,61 +24,47 @@ packages = sorted(packages)
|
||||
|
||||
|
||||
setup(
|
||||
name = "pungi",
|
||||
version = "4.2.0",
|
||||
description = "Distribution compose tool",
|
||||
url = "https://pagure.io/pungi",
|
||||
author = "Dennis Gilmore",
|
||||
author_email = "dgilmore@fedoraproject.org",
|
||||
license = "GPLv2",
|
||||
|
||||
packages = packages,
|
||||
entry_points = {
|
||||
'console_scripts': [
|
||||
'comps_filter = pungi.scripts.comps_filter:main',
|
||||
'pungi = pungi.scripts.pungi:main',
|
||||
'pungi-create-unified-isos = pungi.scripts.create_unified_isos:main',
|
||||
'pungi-fedmsg-notification = pungi.scripts.fedmsg_notification:main',
|
||||
'pungi-patch-iso = pungi.scripts.patch_iso:cli_main',
|
||||
'pungi-make-ostree = pungi.ostree:main',
|
||||
'pungi-notification-report-progress = pungi.scripts.report_progress:main',
|
||||
'pungi-orchestrate = pungi_utils.orchestrator:main',
|
||||
'pungi-wait-for-signed-ostree-handler = pungi.scripts.wait_for_signed_ostree_handler:main',
|
||||
'pungi-koji = pungi.scripts.pungi_koji:cli_main',
|
||||
'pungi-gather = pungi.scripts.pungi_gather:cli_main',
|
||||
'pungi-config-dump = pungi.scripts.config_dump:cli_main',
|
||||
'pungi-config-validate = pungi.scripts.config_validate:cli_main',
|
||||
name="pungi",
|
||||
version="4.2.0",
|
||||
description="Distribution compose tool",
|
||||
url="https://pagure.io/pungi",
|
||||
author="Dennis Gilmore",
|
||||
author_email="dgilmore@fedoraproject.org",
|
||||
license="GPLv2",
|
||||
packages=packages,
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"comps_filter = pungi.scripts.comps_filter:main",
|
||||
"pungi = pungi.scripts.pungi:main",
|
||||
"pungi-create-unified-isos = pungi.scripts.create_unified_isos:main",
|
||||
"pungi-fedmsg-notification = pungi.scripts.fedmsg_notification:main",
|
||||
"pungi-patch-iso = pungi.scripts.patch_iso:cli_main",
|
||||
"pungi-make-ostree = pungi.ostree:main",
|
||||
"pungi-notification-report-progress = pungi.scripts.report_progress:main",
|
||||
"pungi-orchestrate = pungi_utils.orchestrator:main",
|
||||
"pungi-wait-for-signed-ostree-handler = pungi.scripts.wait_for_signed_ostree_handler:main",
|
||||
"pungi-koji = pungi.scripts.pungi_koji:cli_main",
|
||||
"pungi-gather = pungi.scripts.pungi_gather:cli_main",
|
||||
"pungi-config-dump = pungi.scripts.config_dump:cli_main",
|
||||
"pungi-config-validate = pungi.scripts.config_validate:cli_main",
|
||||
]
|
||||
},
|
||||
scripts = [
|
||||
'contrib/yum-dnf-compare/pungi-compare-depsolving',
|
||||
scripts=["contrib/yum-dnf-compare/pungi-compare-depsolving",],
|
||||
data_files=[
|
||||
("/usr/share/pungi", glob.glob("share/*.xsl")),
|
||||
("/usr/share/pungi", glob.glob("share/*.ks")),
|
||||
("/usr/share/pungi", glob.glob("share/*.dtd")),
|
||||
("/usr/share/pungi/multilib", glob.glob("share/multilib/*")),
|
||||
],
|
||||
data_files = [
|
||||
('/usr/share/pungi', glob.glob('share/*.xsl')),
|
||||
('/usr/share/pungi', glob.glob('share/*.ks')),
|
||||
('/usr/share/pungi', glob.glob('share/*.dtd')),
|
||||
('/usr/share/pungi/multilib', glob.glob('share/multilib/*')),
|
||||
],
|
||||
test_suite = "tests",
|
||||
install_requires = [
|
||||
test_suite="tests",
|
||||
install_requires=[
|
||||
"jsonschema",
|
||||
"kobo",
|
||||
"lxml",
|
||||
"productmd>=1.23",
|
||||
"six",
|
||||
'dogpile.cache',
|
||||
],
|
||||
extras_require={
|
||||
':python_version=="2.7"': [
|
||||
'enum34',
|
||||
"lockfile",
|
||||
'dict.sorted',
|
||||
]
|
||||
},
|
||||
tests_require = [
|
||||
"black",
|
||||
"mock",
|
||||
"nose",
|
||||
"nose-cov",
|
||||
],
|
||||
"dogpile.cache",
|
||||
],
|
||||
extras_require={':python_version=="2.7"': ["enum34", "lockfile", "dict.sorted",]},
|
||||
tests_require=["mock", "nose", "nose-cov",],
|
||||
)
|
||||
|
4
tox.ini
4
tox.ini
@ -1,15 +1,15 @@
|
||||
[flake8]
|
||||
exclude = doc/*,*.pyc,*.py~,*.in,*.spec,*.sh,*.rst,setup.py
|
||||
filename = *.py
|
||||
max-line-length = 88
|
||||
|
||||
# E402: module level import not at top of file
|
||||
# E501: line too long
|
||||
# H301: one import per line
|
||||
# H306: imports not in alphabetical order
|
||||
# E226: missing whitespace around arithmetic operator
|
||||
# W503: line break occured before a binary operator
|
||||
# E203: whitespace before ':'
|
||||
ignore = E501,E402,H301,H306,E226,W503,E203
|
||||
ignore = E402,H301,H306,E226,W503,E203
|
||||
|
||||
[run]
|
||||
omit = tests/*
|
||||
|
Loading…
Reference in New Issue
Block a user