Include phase name in log for some phases
Phases createiso, liveimages, image_build, ostree_installer and osbs are done in parallel, logs from these phases are mixed and and it's not obvious which log message belongs to which phase. This change adds phase name in log message for these phases. The new mixin 'PhaseLoggerMixin' is added to extend a Pungi phase with a logging logger which copy handlers from compose's logger but with formatter changed. Fixes: #58 Signed-off-by: Qixiang Wan <qwan@redhat.com>
This commit is contained in:
parent
9e52c68c82
commit
80fa723b1d
@ -181,7 +181,8 @@ def main():
|
|||||||
|
|
||||||
from pungi.compose import Compose
|
from pungi.compose import Compose
|
||||||
|
|
||||||
logger = logging.Logger("Pungi")
|
logger = logging.getLogger("Pungi")
|
||||||
|
logger.setLevel(logging.DEBUG)
|
||||||
kobo.log.add_stderr_logger(logger)
|
kobo.log.add_stderr_logger(logger)
|
||||||
|
|
||||||
conf = kobo.conf.PyConfigParser()
|
conf = kobo.conf.PyConfigParser()
|
||||||
|
@ -61,12 +61,15 @@ class MediaSplitter(object):
|
|||||||
are added; there is no re-ordering. The number of disk is thus not the
|
are added; there is no re-ordering. The number of disk is thus not the
|
||||||
possible minimum.
|
possible minimum.
|
||||||
"""
|
"""
|
||||||
def __init__(self, media_size, compose=None):
|
def __init__(self, media_size, compose=None, logger=None):
|
||||||
self.media_size = media_size
|
self.media_size = media_size
|
||||||
self.files = [] # to preserve order
|
self.files = [] # to preserve order
|
||||||
self.file_sizes = {}
|
self.file_sizes = {}
|
||||||
self.sticky_files = set()
|
self.sticky_files = set()
|
||||||
self.compose = compose
|
self.compose = compose
|
||||||
|
self.logger = logger
|
||||||
|
if not self.logger and self.compose:
|
||||||
|
self.logger = self.compose._logger
|
||||||
|
|
||||||
def add_file(self, name, size, sticky=False):
|
def add_file(self, name, size, sticky=False):
|
||||||
name = os.path.normpath(name)
|
name = os.path.normpath(name)
|
||||||
@ -120,9 +123,9 @@ class MediaSplitter(object):
|
|||||||
total_size_single += size
|
total_size_single += size
|
||||||
if self.compose:
|
if self.compose:
|
||||||
if self.media_size:
|
if self.media_size:
|
||||||
self.compose.log_debug("MediaSplitter: free space on single media would be %s. "
|
self.logger.debug("MediaSplitter: free space on single media would be %s. "
|
||||||
"Total size of single medium: %s."
|
"Total size of single medium: %s."
|
||||||
% (self.media_size - total_size_single, total_size_single))
|
% (self.media_size - total_size_single, total_size_single))
|
||||||
else:
|
else:
|
||||||
self.compose.log_debug("MediaSplitter: Total size of single medium: %s." % total_size_single)
|
self.logger.debug("MediaSplitter: Total size of single medium: %s." % total_size_single)
|
||||||
return disks
|
return disks
|
||||||
|
@ -13,6 +13,8 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program; if not, see <https://gnu.org/licenses/>.
|
# along with this program; if not, see <https://gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
from pungi import util
|
from pungi import util
|
||||||
|
|
||||||
|
|
||||||
@ -151,3 +153,21 @@ class ImageConfigMixin(object):
|
|||||||
ksurl = self.compose.conf.get('global_ksurl')
|
ksurl = self.compose.conf.get('global_ksurl')
|
||||||
self.compose.conf['private_global_ksurl'] = util.resolve_git_url(ksurl)
|
self.compose.conf['private_global_ksurl'] = util.resolve_git_url(ksurl)
|
||||||
return self.compose.conf['private_global_ksurl']
|
return self.compose.conf['private_global_ksurl']
|
||||||
|
|
||||||
|
|
||||||
|
class PhaseLoggerMixin(object):
|
||||||
|
"""
|
||||||
|
A mixin that can extend a phase with a new logging logger that copy
|
||||||
|
handlers from compose, but with different formatter that includes phase name.
|
||||||
|
"""
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(PhaseLoggerMixin, self).__init__(*args, **kwargs)
|
||||||
|
self.logger = logging.getLogger(self.name.upper())
|
||||||
|
self.logger.setLevel(logging.DEBUG)
|
||||||
|
format = "%(asctime)s [%(name)-16s] [%(levelname)-8s] %(message)s"
|
||||||
|
import copy
|
||||||
|
for handler in self.compose._logger.handlers:
|
||||||
|
hl = copy.copy(handler)
|
||||||
|
hl.setFormatter(logging.Formatter(format, datefmt="%Y-%m-%d %H:%M:%S"))
|
||||||
|
hl.setLevel(logging.DEBUG)
|
||||||
|
self.logger.addHandler(hl)
|
||||||
|
@ -175,7 +175,7 @@ def get_kickstart_file(compose):
|
|||||||
msg = "Getting ks.cfg"
|
msg = "Getting ks.cfg"
|
||||||
kickstart_path = os.path.join(compose.paths.work.topdir(arch="global"), "ks.cfg")
|
kickstart_path = os.path.join(compose.paths.work.topdir(arch="global"), "ks.cfg")
|
||||||
if os.path.exists(kickstart_path):
|
if os.path.exists(kickstart_path):
|
||||||
compose.log_warn("[SKIP ] %s" % msg)
|
compose.log_warning("[SKIP ] %s" % msg)
|
||||||
return kickstart_path
|
return kickstart_path
|
||||||
|
|
||||||
compose.log_info("[BEGIN] %s" % msg)
|
compose.log_info("[BEGIN] %s" % msg)
|
||||||
|
@ -28,7 +28,7 @@ from kobo.shortcuts import run, relative_path
|
|||||||
from pungi.wrappers import iso
|
from pungi.wrappers import iso
|
||||||
from pungi.wrappers.createrepo import CreaterepoWrapper
|
from pungi.wrappers.createrepo import CreaterepoWrapper
|
||||||
from pungi.wrappers.kojiwrapper import KojiWrapper
|
from pungi.wrappers.kojiwrapper import KojiWrapper
|
||||||
from pungi.phases.base import PhaseBase
|
from pungi.phases.base import PhaseBase, PhaseLoggerMixin
|
||||||
from pungi.util import (makedirs, get_volid, get_arch_variant_data, failable,
|
from pungi.util import (makedirs, get_volid, get_arch_variant_data, failable,
|
||||||
get_file_size, get_mtime)
|
get_file_size, get_mtime)
|
||||||
from pungi.media_split import MediaSplitter, convert_media_size
|
from pungi.media_split import MediaSplitter, convert_media_size
|
||||||
@ -37,12 +37,12 @@ from pungi.compose_metadata.discinfo import read_discinfo, write_discinfo
|
|||||||
from .. import createiso
|
from .. import createiso
|
||||||
|
|
||||||
|
|
||||||
class CreateisoPhase(PhaseBase):
|
class CreateisoPhase(PhaseLoggerMixin, PhaseBase):
|
||||||
name = "createiso"
|
name = "createiso"
|
||||||
|
|
||||||
def __init__(self, compose):
|
def __init__(self, compose):
|
||||||
PhaseBase.__init__(self, compose)
|
super(CreateisoPhase, self).__init__(compose)
|
||||||
self.pool = ThreadPool(logger=self.compose._logger)
|
self.pool = ThreadPool(logger=self.logger)
|
||||||
|
|
||||||
def _find_rpms(self, path):
|
def _find_rpms(self, path):
|
||||||
"""Check if there are some RPMs in the path."""
|
"""Check if there are some RPMs in the path."""
|
||||||
@ -69,7 +69,7 @@ class CreateisoPhase(PhaseBase):
|
|||||||
for arch in variant.arches + ["src"]:
|
for arch in variant.arches + ["src"]:
|
||||||
skip_iso = get_arch_variant_data(self.compose.conf, "createiso_skip", arch, variant)
|
skip_iso = get_arch_variant_data(self.compose.conf, "createiso_skip", arch, variant)
|
||||||
if skip_iso == [True]:
|
if skip_iso == [True]:
|
||||||
self.compose.log_info("Skipping createiso for %s.%s due to config option" % (variant, arch))
|
self.logger.info("Skipping createiso for %s.%s due to config option" % (variant, arch))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
volid = get_volid(self.compose, arch, variant, disc_type=disc_type)
|
volid = get_volid(self.compose, arch, variant, disc_type=disc_type)
|
||||||
@ -80,13 +80,14 @@ class CreateisoPhase(PhaseBase):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if not self._find_rpms(os_tree):
|
if not self._find_rpms(os_tree):
|
||||||
self.compose.log_warning("No RPMs found for %s.%s, skipping ISO"
|
self.logger.warn("No RPMs found for %s.%s, skipping ISO"
|
||||||
% (variant.uid, arch))
|
% (variant.uid, arch))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
bootable = self._is_bootable(variant, arch)
|
bootable = self._is_bootable(variant, arch)
|
||||||
|
|
||||||
split_iso_data = split_iso(self.compose, arch, variant, no_split=bootable)
|
split_iso_data = split_iso(self.compose, arch, variant, no_split=bootable,
|
||||||
|
logger=self.logger)
|
||||||
disc_count = len(split_iso_data)
|
disc_count = len(split_iso_data)
|
||||||
|
|
||||||
for disc_num, iso_data in enumerate(split_iso_data):
|
for disc_num, iso_data in enumerate(split_iso_data):
|
||||||
@ -97,7 +98,7 @@ class CreateisoPhase(PhaseBase):
|
|||||||
iso_path = self.compose.paths.compose.iso_path(
|
iso_path = self.compose.paths.compose.iso_path(
|
||||||
arch, variant, filename, symlink_to=symlink_isos_to)
|
arch, variant, filename, symlink_to=symlink_isos_to)
|
||||||
if os.path.isfile(iso_path):
|
if os.path.isfile(iso_path):
|
||||||
self.compose.log_warning("Skipping mkisofs, image already exists: %s" % iso_path)
|
self.logger.warn("Skipping mkisofs, image already exists: %s" % iso_path)
|
||||||
continue
|
continue
|
||||||
deliverables.append(iso_path)
|
deliverables.append(iso_path)
|
||||||
|
|
||||||
@ -158,7 +159,7 @@ class CreateisoPhase(PhaseBase):
|
|||||||
|
|
||||||
class CreateIsoThread(WorkerThread):
|
class CreateIsoThread(WorkerThread):
|
||||||
def fail(self, compose, cmd, variant, arch):
|
def fail(self, compose, cmd, variant, arch):
|
||||||
compose.log_error("CreateISO failed, removing ISO: %s" % cmd["iso_path"])
|
self.pool.log_error("CreateISO failed, removing ISO: %s" % cmd["iso_path"])
|
||||||
try:
|
try:
|
||||||
# remove incomplete ISO
|
# remove incomplete ISO
|
||||||
os.unlink(cmd["iso_path"])
|
os.unlink(cmd["iso_path"])
|
||||||
@ -174,7 +175,7 @@ class CreateIsoThread(WorkerThread):
|
|||||||
def process(self, item, num):
|
def process(self, item, num):
|
||||||
compose, cmd, variant, arch = item
|
compose, cmd, variant, arch = item
|
||||||
can_fail = compose.can_fail(variant, arch, 'iso')
|
can_fail = compose.can_fail(variant, arch, 'iso')
|
||||||
with failable(compose, can_fail, variant, arch, 'iso'):
|
with failable(compose, can_fail, variant, arch, 'iso', logger=self.pool._logger):
|
||||||
self.worker(compose, cmd, variant, arch, num)
|
self.worker(compose, cmd, variant, arch, num)
|
||||||
|
|
||||||
def worker(self, compose, cmd, variant, arch, num):
|
def worker(self, compose, cmd, variant, arch, num):
|
||||||
@ -281,7 +282,7 @@ class CreateIsoThread(WorkerThread):
|
|||||||
variant=str(variant))
|
variant=str(variant))
|
||||||
|
|
||||||
|
|
||||||
def split_iso(compose, arch, variant, no_split=False):
|
def split_iso(compose, arch, variant, no_split=False, logger=None):
|
||||||
"""
|
"""
|
||||||
Split contents of the os/ directory for given tree into chunks fitting on ISO.
|
Split contents of the os/ directory for given tree into chunks fitting on ISO.
|
||||||
|
|
||||||
@ -292,12 +293,14 @@ def split_iso(compose, arch, variant, no_split=False):
|
|||||||
infinite so that everything goes on single disc. A warning is printed if
|
infinite so that everything goes on single disc. A warning is printed if
|
||||||
the size is bigger than configured.
|
the size is bigger than configured.
|
||||||
"""
|
"""
|
||||||
|
if not logger:
|
||||||
|
logger = compose._logger
|
||||||
media_size = compose.conf['iso_size']
|
media_size = compose.conf['iso_size']
|
||||||
media_reserve = compose.conf['split_iso_reserve']
|
media_reserve = compose.conf['split_iso_reserve']
|
||||||
split_size = convert_media_size(media_size) - convert_media_size(media_reserve)
|
split_size = convert_media_size(media_size) - convert_media_size(media_reserve)
|
||||||
real_size = 10**20 if no_split else split_size
|
real_size = 10**20 if no_split else split_size
|
||||||
|
|
||||||
ms = MediaSplitter(real_size, compose)
|
ms = MediaSplitter(real_size, compose, logger=logger)
|
||||||
|
|
||||||
os_tree = compose.paths.compose.os_tree(arch, variant)
|
os_tree = compose.paths.compose.os_tree(arch, variant)
|
||||||
extra_files_dir = compose.paths.work.extra_files_dir(arch, variant)
|
extra_files_dir = compose.paths.work.extra_files_dir(arch, variant)
|
||||||
@ -319,7 +322,7 @@ def split_iso(compose, arch, variant, no_split=False):
|
|||||||
boot_iso_rpath = ti.images.images.get(arch, {}).get("boot.iso", None)
|
boot_iso_rpath = ti.images.images.get(arch, {}).get("boot.iso", None)
|
||||||
if boot_iso_rpath:
|
if boot_iso_rpath:
|
||||||
all_files_ignore.append(boot_iso_rpath)
|
all_files_ignore.append(boot_iso_rpath)
|
||||||
compose.log_debug("split_iso all_files_ignore = %s" % ", ".join(all_files_ignore))
|
logger.debug("split_iso all_files_ignore = %s" % ", ".join(all_files_ignore))
|
||||||
|
|
||||||
for root, dirs, files in os.walk(os_tree):
|
for root, dirs, files in os.walk(os_tree):
|
||||||
for dn in dirs[:]:
|
for dn in dirs[:]:
|
||||||
@ -332,7 +335,7 @@ def split_iso(compose, arch, variant, no_split=False):
|
|||||||
rel_path = relative_path(path, os_tree.rstrip("/") + "/")
|
rel_path = relative_path(path, os_tree.rstrip("/") + "/")
|
||||||
sticky = rel_path in extra_files
|
sticky = rel_path in extra_files
|
||||||
if rel_path in all_files_ignore:
|
if rel_path in all_files_ignore:
|
||||||
compose.log_info("split_iso: Skipping %s" % rel_path)
|
logger.info("split_iso: Skipping %s" % rel_path)
|
||||||
continue
|
continue
|
||||||
if root.startswith(compose.paths.compose.packages(arch, variant)):
|
if root.startswith(compose.paths.compose.packages(arch, variant)):
|
||||||
packages.append((path, os.path.getsize(path), sticky))
|
packages.append((path, os.path.getsize(path), sticky))
|
||||||
@ -344,11 +347,11 @@ def split_iso(compose, arch, variant, no_split=False):
|
|||||||
|
|
||||||
result = ms.split()
|
result = ms.split()
|
||||||
if no_split and result[0]['size'] > split_size:
|
if no_split and result[0]['size'] > split_size:
|
||||||
compose.log_warning('ISO for %s.%s does not fit on single media! '
|
logger.warn('ISO for %s.%s does not fit on single media! '
|
||||||
'It is %s bytes too big. (Total size: %s B)'
|
'It is %s bytes too big. (Total size: %s B)'
|
||||||
% (variant.uid, arch,
|
% (variant.uid, arch,
|
||||||
result[0]['size'] - split_size,
|
result[0]['size'] - split_size,
|
||||||
result[0]['size']))
|
result[0]['size']))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@ -14,13 +14,13 @@ from kobo.threads import ThreadPool, WorkerThread
|
|||||||
from productmd.images import Image
|
from productmd.images import Image
|
||||||
|
|
||||||
|
|
||||||
class ImageBuildPhase(base.ImageConfigMixin, base.ConfigGuardedPhase):
|
class ImageBuildPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase):
|
||||||
"""class for wrapping up koji image-build"""
|
"""class for wrapping up koji image-build"""
|
||||||
name = "image_build"
|
name = "image_build"
|
||||||
|
|
||||||
def __init__(self, compose):
|
def __init__(self, compose):
|
||||||
super(ImageBuildPhase, self).__init__(compose)
|
super(ImageBuildPhase, self).__init__(compose)
|
||||||
self.pool = ThreadPool(logger=self.compose._logger)
|
self.pool = ThreadPool(logger=self.logger)
|
||||||
|
|
||||||
def _get_install_tree(self, image_conf, variant):
|
def _get_install_tree(self, image_conf, variant):
|
||||||
"""
|
"""
|
||||||
@ -139,7 +139,7 @@ class ImageBuildPhase(base.ImageConfigMixin, base.ConfigGuardedPhase):
|
|||||||
|
|
||||||
class CreateImageBuildThread(WorkerThread):
|
class CreateImageBuildThread(WorkerThread):
|
||||||
def fail(self, compose, cmd):
|
def fail(self, compose, cmd):
|
||||||
compose.log_error("CreateImageBuild failed.")
|
self.pool.log_error("CreateImageBuild failed.")
|
||||||
|
|
||||||
def process(self, item, num):
|
def process(self, item, num):
|
||||||
compose, cmd = item
|
compose, cmd = item
|
||||||
@ -148,7 +148,8 @@ class CreateImageBuildThread(WorkerThread):
|
|||||||
failable_arches = cmd.get('failable_arches', [])
|
failable_arches = cmd.get('failable_arches', [])
|
||||||
self.can_fail = bool(failable_arches)
|
self.can_fail = bool(failable_arches)
|
||||||
# TODO handle failure per architecture; currently not possible in single task
|
# TODO handle failure per architecture; currently not possible in single task
|
||||||
with failable(compose, self.can_fail, variant, '*', 'image-build', subvariant):
|
with failable(compose, self.can_fail, variant, '*', 'image-build', subvariant,
|
||||||
|
logger=self.pool._logger):
|
||||||
self.worker(num, compose, variant, subvariant, cmd)
|
self.worker(num, compose, variant, subvariant, cmd)
|
||||||
|
|
||||||
def worker(self, num, compose, variant, subvariant, cmd):
|
def worker(self, num, compose, variant, subvariant, cmd):
|
||||||
@ -207,7 +208,7 @@ class CreateImageBuildThread(WorkerThread):
|
|||||||
# The usecase here is that you can run koji image-build with multiple --format
|
# The usecase here is that you can run koji image-build with multiple --format
|
||||||
# It's ok to do it serialized since we're talking about max 2 images per single
|
# It's ok to do it serialized since we're talking about max 2 images per single
|
||||||
# image_build record
|
# image_build record
|
||||||
linker = Linker(logger=compose._logger)
|
linker = Linker(logger=self.pool._logger)
|
||||||
for image_info in image_infos:
|
for image_info in image_infos:
|
||||||
image_dir = cmd["image_dir"] % {"arch": image_info['arch']}
|
image_dir = cmd["image_dir"] % {"arch": image_info['arch']}
|
||||||
makedirs(image_dir)
|
makedirs(image_dir)
|
||||||
|
@ -37,12 +37,12 @@ if sys.version_info[0] == 3:
|
|||||||
return (a > b) - (a < b)
|
return (a > b) - (a < b)
|
||||||
|
|
||||||
|
|
||||||
class LiveImagesPhase(base.ImageConfigMixin, base.ConfigGuardedPhase):
|
class LiveImagesPhase(base.PhaseLoggerMixin, base.ImageConfigMixin, base.ConfigGuardedPhase):
|
||||||
name = "live_images"
|
name = "live_images"
|
||||||
|
|
||||||
def __init__(self, compose):
|
def __init__(self, compose):
|
||||||
super(LiveImagesPhase, self).__init__(compose)
|
super(LiveImagesPhase, self).__init__(compose)
|
||||||
self.pool = ThreadPool(logger=self.compose._logger)
|
self.pool = ThreadPool(logger=self.logger)
|
||||||
|
|
||||||
def _get_extra_repos(self, arch, variant, extras):
|
def _get_extra_repos(self, arch, variant, extras):
|
||||||
repo = []
|
repo = []
|
||||||
@ -154,7 +154,8 @@ class CreateLiveImageThread(WorkerThread):
|
|||||||
self.failable_arches = cmd.get('failable_arches', [])
|
self.failable_arches = cmd.get('failable_arches', [])
|
||||||
# TODO handle failure per architecture; currently not possible in single task
|
# TODO handle failure per architecture; currently not possible in single task
|
||||||
self.can_fail = bool(self.failable_arches)
|
self.can_fail = bool(self.failable_arches)
|
||||||
with failable(compose, self.can_fail, variant, arch, 'live', cmd.get('subvariant')):
|
with failable(compose, self.can_fail, variant, arch, 'live', cmd.get('subvariant'),
|
||||||
|
logger=self.pool._logger):
|
||||||
self.worker(compose, cmd, variant, arch, num)
|
self.worker(compose, cmd, variant, arch, num)
|
||||||
|
|
||||||
def worker(self, compose, cmd, variant, arch, num):
|
def worker(self, compose, cmd, variant, arch, num):
|
||||||
@ -215,8 +216,8 @@ class CreateLiveImageThread(WorkerThread):
|
|||||||
|
|
||||||
if cmd["sign"]:
|
if cmd["sign"]:
|
||||||
# Sign the rpm wrapped images and get their paths
|
# Sign the rpm wrapped images and get their paths
|
||||||
compose.log_info("Signing rpm wrapped images in task_id: %s (expected key ID: %s)"
|
self.pool.log_info("Signing rpm wrapped images in task_id: %s (expected key ID: %s)"
|
||||||
% (output["task_id"], compose.conf.get("signing_key_id")))
|
% (output["task_id"], compose.conf.get("signing_key_id")))
|
||||||
signed_rpm_paths = self._sign_image(koji_wrapper, compose, cmd, output["task_id"])
|
signed_rpm_paths = self._sign_image(koji_wrapper, compose, cmd, output["task_id"])
|
||||||
if signed_rpm_paths:
|
if signed_rpm_paths:
|
||||||
rpm_paths = signed_rpm_paths
|
rpm_paths = signed_rpm_paths
|
||||||
@ -275,12 +276,12 @@ class CreateLiveImageThread(WorkerThread):
|
|||||||
signing_command = compose.conf.get("signing_command")
|
signing_command = compose.conf.get("signing_command")
|
||||||
|
|
||||||
if not signing_key_id:
|
if not signing_key_id:
|
||||||
compose.log_warning("Signing is enabled but signing_key_id is not specified")
|
self.pool.log_warning("Signing is enabled but signing_key_id is not specified")
|
||||||
compose.log_warning("Signing skipped")
|
self.pool.log_warning("Signing skipped")
|
||||||
return None
|
return None
|
||||||
if not signing_command:
|
if not signing_command:
|
||||||
compose.log_warning("Signing is enabled but signing_command is not specified")
|
self.pool.log_warning("Signing is enabled but signing_command is not specified")
|
||||||
compose.log_warning("Signing skipped")
|
self.pool.log_warning("Signing skipped")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Prepare signing log file
|
# Prepare signing log file
|
||||||
@ -293,7 +294,7 @@ class CreateLiveImageThread(WorkerThread):
|
|||||||
log_file=signing_log_file,
|
log_file=signing_log_file,
|
||||||
signing_key_password=compose.conf.get("signing_key_password"))
|
signing_key_password=compose.conf.get("signing_key_password"))
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
compose.log_error("Error while signing rpm wrapped images. See log: %s" % signing_log_file)
|
self.pool.log_error("Error while signing rpm wrapped images. See log: %s" % signing_log_file)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# Get pats to the signed rpms
|
# Get pats to the signed rpms
|
||||||
@ -306,8 +307,8 @@ class CreateLiveImageThread(WorkerThread):
|
|||||||
return rpm_paths
|
return rpm_paths
|
||||||
|
|
||||||
# Signed RPMs are not available
|
# Signed RPMs are not available
|
||||||
compose.log_warning("Signed files are not available: %s" % rpm_paths)
|
self.pool.log_warning("Signed files are not available: %s" % rpm_paths)
|
||||||
compose.log_warning("Unsigned files will be used")
|
self.pool.log_warning("Unsigned files will be used")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ import time
|
|||||||
from kobo import shortcuts
|
from kobo import shortcuts
|
||||||
|
|
||||||
from pungi.util import get_variant_data, makedirs, get_mtime, get_file_size, failable
|
from pungi.util import get_variant_data, makedirs, get_mtime, get_file_size, failable
|
||||||
from pungi.phases.base import ConfigGuardedPhase, ImageConfigMixin
|
from pungi.phases.base import ConfigGuardedPhase, ImageConfigMixin, PhaseLoggerMixin
|
||||||
from pungi.linker import Linker
|
from pungi.linker import Linker
|
||||||
from pungi.paths import translate_path
|
from pungi.paths import translate_path
|
||||||
from pungi.wrappers.kojiwrapper import KojiWrapper
|
from pungi.wrappers.kojiwrapper import KojiWrapper
|
||||||
@ -13,13 +13,13 @@ from kobo.threads import ThreadPool, WorkerThread
|
|||||||
from productmd.images import Image
|
from productmd.images import Image
|
||||||
|
|
||||||
|
|
||||||
class LiveMediaPhase(ImageConfigMixin, ConfigGuardedPhase):
|
class LiveMediaPhase(PhaseLoggerMixin, ImageConfigMixin, ConfigGuardedPhase):
|
||||||
"""class for wrapping up koji spin-livemedia"""
|
"""class for wrapping up koji spin-livemedia"""
|
||||||
name = 'live_media'
|
name = 'live_media'
|
||||||
|
|
||||||
def __init__(self, compose):
|
def __init__(self, compose):
|
||||||
super(LiveMediaPhase, self).__init__(compose)
|
super(LiveMediaPhase, self).__init__(compose)
|
||||||
self.pool = ThreadPool(logger=self.compose._logger)
|
self.pool = ThreadPool(logger=self.logger)
|
||||||
|
|
||||||
def _get_repos(self, image_conf, variant):
|
def _get_repos(self, image_conf, variant):
|
||||||
"""
|
"""
|
||||||
@ -103,7 +103,8 @@ class LiveMediaThread(WorkerThread):
|
|||||||
self.failable_arches = config.pop('failable_arches')
|
self.failable_arches = config.pop('failable_arches')
|
||||||
self.num = num
|
self.num = num
|
||||||
# TODO handle failure per architecture; currently not possible in single task
|
# TODO handle failure per architecture; currently not possible in single task
|
||||||
with failable(compose, bool(self.failable_arches), variant, '*', 'live-media', subvariant):
|
with failable(compose, bool(self.failable_arches), variant, '*', 'live-media', subvariant,
|
||||||
|
logger=self.pool._logger):
|
||||||
self.worker(compose, variant, subvariant, config)
|
self.worker(compose, variant, subvariant, config)
|
||||||
|
|
||||||
def _get_log_file(self, compose, variant, subvariant, config):
|
def _get_log_file(self, compose, variant, subvariant, config):
|
||||||
@ -116,7 +117,7 @@ class LiveMediaThread(WorkerThread):
|
|||||||
output = koji_wrapper.run_blocking_cmd(cmd, log_file=log_file)
|
output = koji_wrapper.run_blocking_cmd(cmd, log_file=log_file)
|
||||||
self.pool.log_debug('live media outputs: %s' % (output))
|
self.pool.log_debug('live media outputs: %s' % (output))
|
||||||
if output['retcode'] != 0:
|
if output['retcode'] != 0:
|
||||||
compose.log_error('Live media task failed.')
|
self.pool.log_error('Live media task failed.')
|
||||||
raise RuntimeError('Live media task failed: %s. See %s for more details.'
|
raise RuntimeError('Live media task failed: %s. See %s for more details.'
|
||||||
% (output['task_id'], log_file))
|
% (output['task_id'], log_file))
|
||||||
return output
|
return output
|
||||||
@ -154,7 +155,7 @@ class LiveMediaThread(WorkerThread):
|
|||||||
% (output['task_id'], len(config['arches']), len(image_infos)))
|
% (output['task_id'], len(config['arches']), len(image_infos)))
|
||||||
raise RuntimeError('Image count mismatch in task %s.' % output['task_id'])
|
raise RuntimeError('Image count mismatch in task %s.' % output['task_id'])
|
||||||
|
|
||||||
linker = Linker(logger=compose._logger)
|
linker = Linker(logger=self.pool._logger)
|
||||||
link_type = compose.conf["link_type"]
|
link_type = compose.conf["link_type"]
|
||||||
for image_info in image_infos:
|
for image_info in image_infos:
|
||||||
image_dir = compose.paths.compose.iso_dir(image_info['arch'], variant)
|
image_dir = compose.paths.compose.iso_dir(image_info['arch'], variant)
|
||||||
|
@ -4,18 +4,18 @@ import json
|
|||||||
import os
|
import os
|
||||||
from kobo.threads import ThreadPool, WorkerThread
|
from kobo.threads import ThreadPool, WorkerThread
|
||||||
|
|
||||||
from .base import ConfigGuardedPhase
|
from .base import ConfigGuardedPhase, PhaseLoggerMixin
|
||||||
from .. import util
|
from .. import util
|
||||||
from ..wrappers import kojiwrapper
|
from ..wrappers import kojiwrapper
|
||||||
from ..paths import translate_path
|
from ..paths import translate_path
|
||||||
|
|
||||||
|
|
||||||
class OSBSPhase(ConfigGuardedPhase):
|
class OSBSPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||||
name = 'osbs'
|
name = 'osbs'
|
||||||
|
|
||||||
def __init__(self, compose):
|
def __init__(self, compose):
|
||||||
super(OSBSPhase, self).__init__(compose)
|
super(OSBSPhase, self).__init__(compose)
|
||||||
self.pool = ThreadPool(logger=self.compose._logger)
|
self.pool = ThreadPool(logger=self.logger)
|
||||||
self.pool.metadata = {}
|
self.pool.metadata = {}
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
@ -39,7 +39,8 @@ class OSBSThread(WorkerThread):
|
|||||||
def process(self, item, num):
|
def process(self, item, num):
|
||||||
compose, variant, config = item
|
compose, variant, config = item
|
||||||
self.num = num
|
self.num = num
|
||||||
with util.failable(compose, bool(config.pop('failable', None)), variant, '*', 'osbs'):
|
with util.failable(compose, bool(config.pop('failable', None)), variant, '*', 'osbs',
|
||||||
|
logger=self.pool._logger):
|
||||||
self.worker(compose, variant, config)
|
self.worker(compose, variant, config)
|
||||||
|
|
||||||
def worker(self, compose, variant, config):
|
def worker(self, compose, variant, config):
|
||||||
|
@ -7,19 +7,19 @@ from productmd import images
|
|||||||
import pipes
|
import pipes
|
||||||
from kobo import shortcuts
|
from kobo import shortcuts
|
||||||
|
|
||||||
from .base import ConfigGuardedPhase
|
from .base import ConfigGuardedPhase, PhaseLoggerMixin
|
||||||
from .. import util
|
from .. import util
|
||||||
from ..paths import translate_path
|
from ..paths import translate_path
|
||||||
from ..util import get_volid
|
from ..util import get_volid
|
||||||
from ..wrappers import kojiwrapper, iso, lorax, scm
|
from ..wrappers import kojiwrapper, iso, lorax, scm
|
||||||
|
|
||||||
|
|
||||||
class OstreeInstallerPhase(ConfigGuardedPhase):
|
class OstreeInstallerPhase(PhaseLoggerMixin, ConfigGuardedPhase):
|
||||||
name = 'ostree_installer'
|
name = 'ostree_installer'
|
||||||
|
|
||||||
def __init__(self, compose):
|
def __init__(self, compose):
|
||||||
super(OstreeInstallerPhase, self).__init__(compose)
|
super(OstreeInstallerPhase, self).__init__(compose)
|
||||||
self.pool = ThreadPool(logger=self.compose._logger)
|
self.pool = ThreadPool(logger=self.logger)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
for variant in self.compose.get_variants():
|
for variant in self.compose.get_variants():
|
||||||
@ -37,7 +37,8 @@ class OstreeInstallerThread(WorkerThread):
|
|||||||
self.num = num
|
self.num = num
|
||||||
failable_arches = config.get('failable', [])
|
failable_arches = config.get('failable', [])
|
||||||
self.can_fail = util.can_arch_fail(failable_arches, arch)
|
self.can_fail = util.can_arch_fail(failable_arches, arch)
|
||||||
with util.failable(compose, self.can_fail, variant, arch, 'ostree-installer'):
|
with util.failable(compose, self.can_fail, variant, arch, 'ostree-installer',
|
||||||
|
logger=self.pool._logger):
|
||||||
self.worker(compose, variant, arch, config)
|
self.worker(compose, variant, arch, config)
|
||||||
|
|
||||||
def worker(self, compose, variant, arch, config):
|
def worker(self, compose, variant, arch, config):
|
||||||
|
@ -451,8 +451,10 @@ def process_args(fmt, args):
|
|||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def failable(compose, can_fail, variant, arch, deliverable, subvariant=None):
|
def failable(compose, can_fail, variant, arch, deliverable, subvariant=None, logger=None):
|
||||||
"""If a deliverable can fail, log a message and go on as if it succeeded."""
|
"""If a deliverable can fail, log a message and go on as if it succeeded."""
|
||||||
|
if not logger:
|
||||||
|
logger = compose._logger
|
||||||
msg = deliverable.replace('-', ' ').capitalize()
|
msg = deliverable.replace('-', ' ').capitalize()
|
||||||
if can_fail:
|
if can_fail:
|
||||||
compose.attempt_deliverable(variant, arch, deliverable, subvariant)
|
compose.attempt_deliverable(variant, arch, deliverable, subvariant)
|
||||||
@ -468,11 +470,11 @@ def failable(compose, can_fail, variant, arch, deliverable, subvariant=None):
|
|||||||
ident = 'variant %s, arch %s' % (variant.uid if variant else 'None', arch)
|
ident = 'variant %s, arch %s' % (variant.uid if variant else 'None', arch)
|
||||||
if subvariant:
|
if subvariant:
|
||||||
ident += ', subvariant %s' % subvariant
|
ident += ', subvariant %s' % subvariant
|
||||||
compose.log_info('[FAIL] %s (%s) failed, but going on anyway.'
|
logger.info('[FAIL] %s (%s) failed, but going on anyway.'
|
||||||
% (msg, ident))
|
% (msg, ident))
|
||||||
compose.log_info(str(exc))
|
logger.info(str(exc))
|
||||||
tb = traceback.format_exc()
|
tb = traceback.format_exc()
|
||||||
compose.log_debug(tb)
|
logger.debug(tb)
|
||||||
|
|
||||||
|
|
||||||
def can_arch_fail(failable_arches, arch):
|
def can_arch_fail(failable_arches, arch):
|
||||||
|
@ -60,6 +60,11 @@ class DummyCompose(object):
|
|||||||
type='variant', is_empty=False),
|
type='variant', is_empty=False),
|
||||||
}
|
}
|
||||||
self.all_variants = self.variants.copy()
|
self.all_variants = self.variants.copy()
|
||||||
|
|
||||||
|
# for PhaseLoggerMixin
|
||||||
|
self._logger = mock.Mock()
|
||||||
|
self._logger.handlers = [mock.Mock()]
|
||||||
|
|
||||||
self.log_info = mock.Mock()
|
self.log_info = mock.Mock()
|
||||||
self.log_error = mock.Mock()
|
self.log_error = mock.Mock()
|
||||||
self.log_debug = mock.Mock()
|
self.log_debug = mock.Mock()
|
||||||
|
@ -563,7 +563,7 @@ class BuildinstallThreadTestCase(PungiTestCase):
|
|||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, 'x86_64', None, cmd), 0)
|
t.process((compose, 'x86_64', None, cmd), 0)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
compose._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Buildinstall (variant None, arch x86_64) failed, but going on anyway.'),
|
mock.call('[FAIL] Buildinstall (variant None, arch x86_64) failed, but going on anyway.'),
|
||||||
mock.call('Runroot task failed: 1234. See %s/logs/x86_64/buildinstall.x86_64.log for more details.'
|
mock.call('Runroot task failed: 1234. See %s/logs/x86_64/buildinstall.x86_64.log for more details.'
|
||||||
% self.topdir)
|
% self.topdir)
|
||||||
@ -599,7 +599,7 @@ class BuildinstallThreadTestCase(PungiTestCase):
|
|||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, 'x86_64', compose.variants['Server'], cmd), 0)
|
t.process((compose, 'x86_64', compose.variants['Server'], cmd), 0)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
compose._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Buildinstall (variant Server, arch x86_64) failed, but going on anyway.'),
|
mock.call('[FAIL] Buildinstall (variant Server, arch x86_64) failed, but going on anyway.'),
|
||||||
mock.call('Runroot task failed: 1234. See %s/logs/x86_64/buildinstall-Server.x86_64.log for more details.' % self.topdir)
|
mock.call('Runroot task failed: 1234. See %s/logs/x86_64/buildinstall-Server.x86_64.log for more details.' % self.topdir)
|
||||||
])
|
])
|
||||||
|
@ -49,12 +49,13 @@ class CreateisoPhaseTest(helpers.PungiTestCase):
|
|||||||
pool = ThreadPool.return_value
|
pool = ThreadPool.return_value
|
||||||
|
|
||||||
phase = createiso.CreateisoPhase(compose)
|
phase = createiso.CreateisoPhase(compose)
|
||||||
|
phase.logger = mock.Mock()
|
||||||
phase.run()
|
phase.run()
|
||||||
|
|
||||||
self.assertEqual(len(pool.add.call_args_list), 0)
|
self.assertEqual(len(pool.add.call_args_list), 0)
|
||||||
self.assertEqual(pool.queue_put.call_args_list, [])
|
self.assertEqual(pool.queue_put.call_args_list, [])
|
||||||
self.assertItemsEqual(
|
self.assertItemsEqual(
|
||||||
compose.log_warning.call_args_list,
|
phase.logger.warn.call_args_list,
|
||||||
[mock.call('No RPMs found for Everything.x86_64, skipping ISO'),
|
[mock.call('No RPMs found for Everything.x86_64, skipping ISO'),
|
||||||
mock.call('No RPMs found for Everything.amd64, skipping ISO'),
|
mock.call('No RPMs found for Everything.amd64, skipping ISO'),
|
||||||
mock.call('No RPMs found for Everything.src, skipping ISO'),
|
mock.call('No RPMs found for Everything.src, skipping ISO'),
|
||||||
@ -93,7 +94,7 @@ class CreateisoPhaseTest(helpers.PungiTestCase):
|
|||||||
[mock.call(compose, 'x86_64', compose.variants['Server'],
|
[mock.call(compose, 'x86_64', compose.variants['Server'],
|
||||||
disc_count=1, disc_num=1, split_iso_data=disc_data)])
|
disc_count=1, disc_num=1, split_iso_data=disc_data)])
|
||||||
self.assertEqual(split_iso.call_args_list,
|
self.assertEqual(split_iso.call_args_list,
|
||||||
[mock.call(compose, 'x86_64', compose.variants['Server'], no_split=False)])
|
[mock.call(compose, 'x86_64', compose.variants['Server'], no_split=False, logger=phase.logger)])
|
||||||
self.assertEqual(len(pool.add.call_args_list), 1)
|
self.assertEqual(len(pool.add.call_args_list), 1)
|
||||||
self.maxDiff = None
|
self.maxDiff = None
|
||||||
self.assertItemsEqual(
|
self.assertItemsEqual(
|
||||||
@ -160,8 +161,8 @@ class CreateisoPhaseTest(helpers.PungiTestCase):
|
|||||||
disc_count=1, disc_num=1, split_iso_data=disc_data)])
|
disc_count=1, disc_num=1, split_iso_data=disc_data)])
|
||||||
self.assertItemsEqual(
|
self.assertItemsEqual(
|
||||||
split_iso.call_args_list,
|
split_iso.call_args_list,
|
||||||
[mock.call(compose, 'x86_64', compose.variants['Server'], no_split=True),
|
[mock.call(compose, 'x86_64', compose.variants['Server'], no_split=True, logger=phase.logger),
|
||||||
mock.call(compose, 'src', compose.variants['Server'], no_split=False)])
|
mock.call(compose, 'src', compose.variants['Server'], no_split=False, logger=phase.logger)])
|
||||||
self.assertEqual(len(pool.add.call_args_list), 2)
|
self.assertEqual(len(pool.add.call_args_list), 2)
|
||||||
self.maxDiff = None
|
self.maxDiff = None
|
||||||
self.assertItemsEqual(
|
self.assertItemsEqual(
|
||||||
@ -463,11 +464,12 @@ class CreateisoThreadTest(helpers.PungiTestCase):
|
|||||||
run_runroot = KojiWrapper.return_value.run_runroot_cmd
|
run_runroot = KojiWrapper.return_value.run_runroot_cmd
|
||||||
run_runroot.side_effect = helpers.boom
|
run_runroot.side_effect = helpers.boom
|
||||||
|
|
||||||
t = createiso.CreateIsoThread(mock.Mock())
|
pool = mock.Mock()
|
||||||
|
t = createiso.CreateIsoThread(pool)
|
||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, cmd, compose.variants['Server'], 'x86_64'), 1)
|
t.process((compose, cmd, compose.variants['Server'], 'x86_64'), 1)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Iso (variant Server, arch x86_64) failed, but going on anyway.'),
|
mock.call('[FAIL] Iso (variant Server, arch x86_64) failed, but going on anyway.'),
|
||||||
mock.call('BOOM')
|
mock.call('BOOM')
|
||||||
])
|
])
|
||||||
@ -505,11 +507,12 @@ class CreateisoThreadTest(helpers.PungiTestCase):
|
|||||||
'task_id': '1234',
|
'task_id': '1234',
|
||||||
}
|
}
|
||||||
|
|
||||||
t = createiso.CreateIsoThread(mock.Mock())
|
pool = mock.Mock()
|
||||||
|
t = createiso.CreateIsoThread(pool)
|
||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, cmd, compose.variants['Server'], 'x86_64'), 1)
|
t.process((compose, cmd, compose.variants['Server'], 'x86_64'), 1)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Iso (variant Server, arch x86_64) failed, but going on anyway.'),
|
mock.call('[FAIL] Iso (variant Server, arch x86_64) failed, but going on anyway.'),
|
||||||
mock.call('Runroot task failed: 1234. See %s for more details.'
|
mock.call('Runroot task failed: 1234. See %s for more details.'
|
||||||
% (self.topdir + '/logs/x86_64/createiso-image-name.x86_64.log'))
|
% (self.topdir + '/logs/x86_64/createiso-image-name.x86_64.log'))
|
||||||
@ -585,11 +588,12 @@ class CreateisoThreadTest(helpers.PungiTestCase):
|
|||||||
}
|
}
|
||||||
run.side_effect = helpers.boom
|
run.side_effect = helpers.boom
|
||||||
|
|
||||||
t = createiso.CreateIsoThread(mock.Mock())
|
pool = mock.Mock()
|
||||||
|
t = createiso.CreateIsoThread(pool)
|
||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, cmd, compose.variants['Server'], 'x86_64'), 1)
|
t.process((compose, cmd, compose.variants['Server'], 'x86_64'), 1)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Iso (variant Server, arch x86_64) failed, but going on anyway.'),
|
mock.call('[FAIL] Iso (variant Server, arch x86_64) failed, but going on anyway.'),
|
||||||
mock.call('BOOM')
|
mock.call('BOOM')
|
||||||
])
|
])
|
||||||
@ -718,7 +722,7 @@ class SplitIsoTest(helpers.PungiTestCase):
|
|||||||
os.path.join(base_path, 'Packages/b/bash.rpm')],
|
os.path.join(base_path, 'Packages/b/bash.rpm')],
|
||||||
'size': 5400166400}])
|
'size': 5400166400}])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
compose.log_warning.call_args_list,
|
compose._logger.warn.call_args_list,
|
||||||
[mock.call('ISO for Server.x86_64 does not fit on single media! '
|
[mock.call('ISO for Server.x86_64 does not fit on single media! '
|
||||||
'It is 710652160 bytes too big. (Total size: 5400166400 B)')]
|
'It is 710652160 bytes too big. (Total size: 5400166400 B)')]
|
||||||
)
|
)
|
||||||
|
@ -747,7 +747,7 @@ class TestCreateImageBuildThread(PungiTestCase):
|
|||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, cmd), 1)
|
t.process((compose, cmd), 1)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Image build (variant Client, arch *, subvariant Client) failed, but going on anyway.'),
|
mock.call('[FAIL] Image build (variant Client, arch *, subvariant Client) failed, but going on anyway.'),
|
||||||
mock.call('ImageBuild task failed: 1234. See %s for more details.'
|
mock.call('ImageBuild task failed: 1234. See %s for more details.'
|
||||||
% (os.path.join(self.topdir,
|
% (os.path.join(self.topdir,
|
||||||
@ -792,7 +792,7 @@ class TestCreateImageBuildThread(PungiTestCase):
|
|||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, cmd), 1)
|
t.process((compose, cmd), 1)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Image build (variant Client, arch *, subvariant Client) failed, but going on anyway.'),
|
mock.call('[FAIL] Image build (variant Client, arch *, subvariant Client) failed, but going on anyway.'),
|
||||||
mock.call('BOOM'),
|
mock.call('BOOM'),
|
||||||
])
|
])
|
||||||
|
@ -727,7 +727,7 @@ class TestCreateLiveImageThread(PungiTestCase):
|
|||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, cmd, compose.variants['Client'], 'amd64'), 1)
|
t.process((compose, cmd, compose.variants['Client'], 'amd64'), 1)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Live (variant Client, arch amd64, subvariant Client) failed, but going on anyway.'),
|
mock.call('[FAIL] Live (variant Client, arch amd64, subvariant Client) failed, but going on anyway.'),
|
||||||
mock.call('LiveImage task failed: 123. See %s/logs/amd64/liveimage-None-None-xyz.amd64.log for more details.'
|
mock.call('LiveImage task failed: 123. See %s/logs/amd64/liveimage-None-None-xyz.amd64.log for more details.'
|
||||||
% self.topdir)
|
% self.topdir)
|
||||||
@ -766,7 +766,7 @@ class TestCreateLiveImageThread(PungiTestCase):
|
|||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, cmd, compose.variants['Client'], 'amd64'), 1)
|
t.process((compose, cmd, compose.variants['Client'], 'amd64'), 1)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Live (variant Client, arch amd64, subvariant Client) failed, but going on anyway.'),
|
mock.call('[FAIL] Live (variant Client, arch amd64, subvariant Client) failed, but going on anyway.'),
|
||||||
mock.call('BOOM')
|
mock.call('BOOM')
|
||||||
])
|
])
|
||||||
|
@ -516,7 +516,7 @@ class TestLiveMediaThread(PungiTestCase):
|
|||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, compose.variants['Server'], config), 1)
|
t.process((compose, compose.variants['Server'], config), 1)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Live media (variant Server, arch *, subvariant KDE) failed, but going on anyway.'),
|
mock.call('[FAIL] Live media (variant Server, arch *, subvariant KDE) failed, but going on anyway.'),
|
||||||
mock.call('Live media task failed: 1234. See %s for more details.'
|
mock.call('Live media task failed: 1234. See %s for more details.'
|
||||||
% (os.path.join(self.topdir, 'logs/amd64-x86_64/livemedia-Server-KDE.amd64-x86_64.log')))
|
% (os.path.join(self.topdir, 'logs/amd64-x86_64/livemedia-Server-KDE.amd64-x86_64.log')))
|
||||||
@ -559,7 +559,7 @@ class TestLiveMediaThread(PungiTestCase):
|
|||||||
with mock.patch('time.sleep'):
|
with mock.patch('time.sleep'):
|
||||||
t.process((compose, compose.variants['Server'], config), 1)
|
t.process((compose, compose.variants['Server'], config), 1)
|
||||||
|
|
||||||
compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Live media (variant Server, arch *, subvariant KDE) failed, but going on anyway.'),
|
mock.call('[FAIL] Live media (variant Server, arch *, subvariant KDE) failed, but going on anyway.'),
|
||||||
mock.call('BOOM')
|
mock.call('BOOM')
|
||||||
])
|
])
|
||||||
|
@ -65,14 +65,14 @@ class MediaSplitterTestCase(unittest.TestCase):
|
|||||||
|
|
||||||
def assertFreeSpace(self, free, total):
|
def assertFreeSpace(self, free, total):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.compose.mock_calls,
|
self.compose._logger.debug.mock_calls,
|
||||||
[mock.call.log_debug('MediaSplitter: free space on single media would be %s. '
|
[mock.call('MediaSplitter: free space on single media would be %s. '
|
||||||
'Total size of single medium: %s.' % (free, total))])
|
'Total size of single medium: %s.' % (free, total))])
|
||||||
|
|
||||||
def assertUnlimited(self, total):
|
def assertUnlimited(self, total):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.compose.mock_calls,
|
self.compose._logger.debug.mock_calls,
|
||||||
[mock.call.log_debug('MediaSplitter: Total size of single medium: %s.' % total)])
|
[mock.call('MediaSplitter: Total size of single medium: %s.' % total)])
|
||||||
|
|
||||||
def test_sum_size(self):
|
def test_sum_size(self):
|
||||||
ms = media_split.MediaSplitter(bl(100))
|
ms = media_split.MediaSplitter(bl(100))
|
||||||
|
@ -340,7 +340,7 @@ class OstreeThreadTest(helpers.PungiTestCase):
|
|||||||
t = ostree.OstreeInstallerThread(pool)
|
t = ostree.OstreeInstallerThread(pool)
|
||||||
|
|
||||||
t.process((self.compose, self.compose.variants['Everything'], 'x86_64', cfg), 1)
|
t.process((self.compose, self.compose.variants['Everything'], 'x86_64', cfg), 1)
|
||||||
self.compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Ostree installer (variant Everything, arch x86_64) failed, but going on anyway.'),
|
mock.call('[FAIL] Ostree installer (variant Everything, arch x86_64) failed, but going on anyway.'),
|
||||||
mock.call('BOOM')
|
mock.call('BOOM')
|
||||||
])
|
])
|
||||||
@ -370,7 +370,7 @@ class OstreeThreadTest(helpers.PungiTestCase):
|
|||||||
t = ostree.OstreeInstallerThread(pool)
|
t = ostree.OstreeInstallerThread(pool)
|
||||||
|
|
||||||
t.process((self.compose, self.compose.variants['Everything'], 'x86_64', cfg), 1)
|
t.process((self.compose, self.compose.variants['Everything'], 'x86_64', cfg), 1)
|
||||||
self.compose.log_info.assert_has_calls([
|
pool._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Ostree installer (variant Everything, arch x86_64) failed, but going on anyway.'),
|
mock.call('[FAIL] Ostree installer (variant Everything, arch x86_64) failed, but going on anyway.'),
|
||||||
mock.call('Runroot task failed: 1234. See %s/logs/x86_64/ostree_installer/runroot.log for more details.'
|
mock.call('Runroot task failed: 1234. See %s/logs/x86_64/ostree_installer/runroot.log for more details.'
|
||||||
% self.topdir)
|
% self.topdir)
|
||||||
|
@ -147,7 +147,7 @@ class OSTreeThreadTest(helpers.PungiTestCase):
|
|||||||
|
|
||||||
t.process((self.compose, self.compose.variants['Everything'], 'x86_64', self.cfg), 1)
|
t.process((self.compose, self.compose.variants['Everything'], 'x86_64', self.cfg), 1)
|
||||||
|
|
||||||
self.compose.log_info.assert_has_calls([
|
self.compose._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Ostree (variant Everything, arch x86_64) failed, but going on anyway.'),
|
mock.call('[FAIL] Ostree (variant Everything, arch x86_64) failed, but going on anyway.'),
|
||||||
mock.call('Runroot task failed: 1234. See %s for more details.'
|
mock.call('Runroot task failed: 1234. See %s for more details.'
|
||||||
% (self.topdir + '/logs/x86_64/Everything/ostree-1/runroot.log'))
|
% (self.topdir + '/logs/x86_64/Everything/ostree-1/runroot.log'))
|
||||||
@ -166,7 +166,7 @@ class OSTreeThreadTest(helpers.PungiTestCase):
|
|||||||
|
|
||||||
t.process((self.compose, self.compose.variants['Everything'], 'x86_64', self.cfg), 1)
|
t.process((self.compose, self.compose.variants['Everything'], 'x86_64', self.cfg), 1)
|
||||||
|
|
||||||
self.compose.log_info.assert_has_calls([
|
self.compose._logger.info.assert_has_calls([
|
||||||
mock.call('[FAIL] Ostree (variant Everything, arch x86_64) failed, but going on anyway.'),
|
mock.call('[FAIL] Ostree (variant Everything, arch x86_64) failed, but going on anyway.'),
|
||||||
mock.call('BOOM')
|
mock.call('BOOM')
|
||||||
])
|
])
|
||||||
|
Loading…
Reference in New Issue
Block a user