diff --git a/lorax-composer/.buildinfo b/lorax-composer/.buildinfo new file mode 100644 index 00000000..ee6d8d3b --- /dev/null +++ b/lorax-composer/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 972a8f4b79671d03d004b6b084c1be20 +tags: fbb0d17656682115ca4d033fb2f83ba1 diff --git a/lorax-composer/.doctrees/environment.pickle b/lorax-composer/.doctrees/environment.pickle new file mode 100644 index 00000000..1d5ac8e1 Binary files /dev/null and b/lorax-composer/.doctrees/environment.pickle differ diff --git a/lorax-composer/.doctrees/index.doctree b/lorax-composer/.doctrees/index.doctree new file mode 100644 index 00000000..e632e201 Binary files /dev/null and b/lorax-composer/.doctrees/index.doctree differ diff --git a/lorax-composer/.doctrees/intro.doctree b/lorax-composer/.doctrees/intro.doctree new file mode 100644 index 00000000..004bc8f6 Binary files /dev/null and b/lorax-composer/.doctrees/intro.doctree differ diff --git a/lorax-composer/.doctrees/lorax-composer.doctree b/lorax-composer/.doctrees/lorax-composer.doctree new file mode 100644 index 00000000..996b9f0e Binary files /dev/null and b/lorax-composer/.doctrees/lorax-composer.doctree differ diff --git a/lorax-composer/.doctrees/modules.doctree b/lorax-composer/.doctrees/modules.doctree new file mode 100644 index 00000000..dede9510 Binary files /dev/null and b/lorax-composer/.doctrees/modules.doctree differ diff --git a/lorax-composer/.doctrees/pylorax.api.doctree b/lorax-composer/.doctrees/pylorax.api.doctree new file mode 100644 index 00000000..a212f8b7 Binary files /dev/null and b/lorax-composer/.doctrees/pylorax.api.doctree differ diff --git a/lorax-composer/.doctrees/pylorax.doctree b/lorax-composer/.doctrees/pylorax.doctree new file mode 100644 index 00000000..4a6f5f7f Binary files /dev/null and b/lorax-composer/.doctrees/pylorax.doctree differ diff --git a/lorax-composer/_modules/index.html b/lorax-composer/_modules/index.html new file mode 100644 index 00000000..81e79b79 --- /dev/null +++ b/lorax-composer/_modules/index.html @@ -0,0 +1,117 @@ + + + + + + +
+ + +
+#
+# __init__.py
+#
+# Copyright (C) 2010 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+# David Cantrell <dcantrell@redhat.com>
+# Will Woods <wwoods@redhat.com>
+
+# set up logging
+import logging
+logger = logging.getLogger("pylorax")
+logger.addHandler(logging.NullHandler())
+
+import sys
+import os
+import ConfigParser
+import tempfile
+import locale
+from subprocess import CalledProcessError
+import selinux
+
+from pylorax.base import BaseLoraxClass, DataHolder
+import pylorax.output as output
+
+import yum
+import pylorax.ltmpl as ltmpl
+
+import pylorax.imgutils as imgutils
+from pylorax.sysutils import joinpaths, linktree, remove
+from rpmUtils.arch import getBaseArch
+
+from pylorax.treebuilder import RuntimeBuilder, TreeBuilder
+from pylorax.buildstamp import BuildStamp
+from pylorax.treeinfo import TreeInfo
+from pylorax.discinfo import DiscInfo
+from pylorax.executils import runcmd, runcmd_output
+
+# get lorax version
+try:
+ import pylorax.version
+except ImportError:
+ vernum = "devel"
+else:
+ vernum = pylorax.version.num
+
+# List of drivers to remove on ppc64 arch to keep initrd < 32MiB
+REMOVE_PPC64_DRIVERS = "floppy scsi_debug nouveau radeon cirrus mgag200"
+REMOVE_PPC64_MODULES = "drm plymouth"
+
+[docs]class ArchData(DataHolder):
+ lib64_arches = ("x86_64", "ppc64", "ppc64le", "s390x", "ia64", "aarch64")
+ bcj_arch = dict(i386="x86", x86_64="x86",
+ ppc="powerpc", ppc64="powerpc", ppc64le="powerpc",
+ arm="arm", armhfp="arm")
+
+ def __init__(self, buildarch):
+ DataHolder.__init__(self, buildarch=buildarch)
+ self.basearch = getBaseArch(buildarch)
+ self.libdir = "lib64" if self.basearch in self.lib64_arches else "lib"
+ self.bcj = self.bcj_arch.get(self.basearch)
+
+[docs]class Lorax(BaseLoraxClass):
+
+ def __init__(self):
+ BaseLoraxClass.__init__(self)
+ self._configured = False
+ self.conf = None
+ self.outputdir = None
+ self.workdir = None
+ self.inroot = None
+ self.arch = None
+ self.product = None
+ self.debug = False
+
+ # set locale to C
+ locale.setlocale(locale.LC_ALL, 'C')
+
+[docs] def configure(self, conf_file="/etc/lorax/lorax.conf"):
+ self.conf = ConfigParser.SafeConfigParser()
+
+ # set defaults
+ self.conf.add_section("lorax")
+ self.conf.set("lorax", "debug", "1")
+ self.conf.set("lorax", "sharedir", "/usr/share/lorax")
+
+ self.conf.add_section("output")
+ self.conf.set("output", "colors", "1")
+ self.conf.set("output", "encoding", "utf-8")
+ self.conf.set("output", "ignorelist", "/usr/share/lorax/ignorelist")
+
+ self.conf.add_section("templates")
+ self.conf.set("templates", "ramdisk", "ramdisk.ltmpl")
+
+ self.conf.add_section("yum")
+ self.conf.set("yum", "skipbroken", "0")
+
+ self.conf.add_section("compression")
+ self.conf.set("compression", "type", "xz")
+ self.conf.set("compression", "args", "")
+ self.conf.set("compression", "bcj", "on")
+
+ # read the config file
+ if os.path.isfile(conf_file):
+ self.conf.read(conf_file)
+
+ # set up the output
+ self.debug = self.conf.getboolean("lorax", "debug")
+ output_level = output.DEBUG if self.debug else output.INFO
+
+ colors = self.conf.getboolean("output", "colors")
+ encoding = self.conf.get("output", "encoding")
+
+ self.output.basic_config(output_level=output_level,
+ colors=colors, encoding=encoding)
+
+ ignorelist = self.conf.get("output", "ignorelist")
+ if os.path.isfile(ignorelist):
+ with open(ignorelist, "r") as fobj:
+ for line in fobj:
+ line = line.strip()
+ if line and not line.startswith("#"):
+ self.output.ignore(line)
+
+ # cron does not have sbin in PATH,
+ # so we have to add it ourselves
+ os.environ["PATH"] = "{0}:/sbin:/usr/sbin".format(os.environ["PATH"])
+
+ # remove some environmental variables that can cause problems with package scripts
+ env_remove = ('DISPLAY', 'DBUS_SESSION_BUS_ADDRESS')
+ _ = [os.environ.pop(k) for k in env_remove if k in os.environ]
+
+ self._configured = True
+
+[docs] def init_stream_logging(self):
+ sh = logging.StreamHandler()
+ sh.setLevel(logging.INFO)
+ logger.addHandler(sh)
+
+[docs] def init_file_logging(self, logdir, logname="pylorax.log"):
+ fh = logging.FileHandler(filename=joinpaths(logdir, logname), mode="w")
+ fh.setLevel(logging.DEBUG)
+ logger.addHandler(fh)
+
+[docs] def run(self, ybo, product, version, release, variant="", bugurl="",
+ isfinal=False, workdir=None, outputdir=None, buildarch=None, volid=None,
+ domacboot=False, doupgrade=True, remove_temp=False,
+ installpkgs=None,
+ size=2,
+ add_templates=None,
+ add_template_vars=None,
+ add_arch_templates=None,
+ add_arch_template_vars=None,
+ template_tempdir=None):
+
+ assert self._configured
+
+ installpkgs = installpkgs or []
+
+ if domacboot:
+ try:
+ runcmd(["rpm", "-q", "hfsplus-tools"])
+ except CalledProcessError:
+ logger.critical("you need to install hfsplus-tools to create mac images")
+ sys.exit(1)
+
+ # set up work directory
+ self.workdir = workdir or tempfile.mkdtemp(prefix="pylorax.work.")
+ if not os.path.isdir(self.workdir):
+ os.makedirs(self.workdir)
+
+ # set up log directory
+ logdir = '/var/log/lorax'
+ if not os.path.isdir(logdir):
+ os.makedirs(logdir)
+
+ self.init_stream_logging()
+ self.init_file_logging(logdir)
+
+ logger.debug("version is %s", vernum)
+ logger.debug("using work directory %s", self.workdir)
+ logger.debug("using log directory %s", logdir)
+
+ # set up output directory
+ self.outputdir = outputdir or tempfile.mkdtemp(prefix="pylorax.out.")
+ if not os.path.isdir(self.outputdir):
+ os.makedirs(self.outputdir)
+ logger.debug("using output directory %s", self.outputdir)
+
+ # do we have root privileges?
+ logger.info("checking for root privileges")
+ if not os.geteuid() == 0:
+ logger.critical("no root privileges")
+ sys.exit(1)
+
+ # is selinux disabled?
+ # With selinux in enforcing mode the rpcbind package required for
+ # dracut nfs module, which is in turn required by anaconda module,
+ # will not get installed, because it's preinstall scriptlet fails,
+ # resulting in an incomplete initial ramdisk image.
+ # The reason is that the scriptlet runs tools from the shadow-utils
+ # package in chroot, particularly groupadd and useradd to add the
+ # required rpc group and rpc user. This operation fails, because
+ # the selinux context on files in the chroot, that the shadow-utils
+ # tools need to access (/etc/group, /etc/passwd, /etc/shadow etc.),
+ # is wrong and selinux therefore disallows access to these files.
+ logger.info("checking the selinux mode")
+ if selinux.is_selinux_enabled() and selinux.security_getenforce():
+ logger.critical("selinux must be disabled or in Permissive mode")
+ sys.exit(1)
+
+ # do we have a proper yum base object?
+ logger.info("checking yum base object")
+ if not isinstance(ybo, yum.YumBase):
+ logger.critical("no yum base object")
+ sys.exit(1)
+ self.inroot = ybo.conf.installroot
+ logger.debug("using install root: %s", self.inroot)
+
+ if not buildarch:
+ buildarch = get_buildarch(ybo)
+
+ logger.info("setting up build architecture")
+ self.arch = ArchData(buildarch)
+ for attr in ('buildarch', 'basearch', 'libdir'):
+ logger.debug("self.arch.%s = %s", attr, getattr(self.arch,attr))
+
+ logger.info("setting up build parameters")
+ product = DataHolder(name=product, version=version, release=release,
+ variant=variant, bugurl=bugurl, isfinal=isfinal)
+ self.product = product
+ logger.debug("product data: %s", product)
+
+ # NOTE: if you change isolabel, you need to change pungi to match, or
+ # the pungi images won't boot.
+ isolabel = volid or "%s %s %s" % (self.product.name, self.product.version,
+ self.arch.basearch)
+
+ if len(isolabel) > 32:
+ logger.fatal("the volume id cannot be longer than 32 characters")
+ sys.exit(1)
+
+ templatedir = self.conf.get("lorax", "sharedir")
+ # NOTE: rb.root = ybo.conf.installroot (== self.inroot)
+ rb = RuntimeBuilder(product=self.product, arch=self.arch,
+ yum=ybo, templatedir=templatedir,
+ installpkgs=installpkgs,
+ add_templates=add_templates,
+ add_template_vars=add_template_vars)
+
+ logger.info("installing runtime packages")
+ rb.yum.conf.skip_broken = self.conf.getboolean("yum", "skipbroken")
+ rb.install()
+
+ # write .buildstamp
+ buildstamp = BuildStamp(self.product.name, self.product.version,
+ self.product.bugurl, self.product.isfinal, self.arch.buildarch)
+
+ buildstamp.write(joinpaths(self.inroot, ".buildstamp"))
+
+ if self.debug:
+ rb.writepkglists(joinpaths(logdir, "pkglists"))
+ rb.writepkgsizes(joinpaths(logdir, "original-pkgsizes.txt"))
+
+ logger.info("doing post-install configuration")
+ rb.postinstall()
+
+ # write .discinfo
+ discinfo = DiscInfo(self.product.release, self.arch.basearch)
+ discinfo.write(joinpaths(self.outputdir, ".discinfo"))
+
+ logger.info("backing up installroot")
+ installroot = joinpaths(self.workdir, "installroot")
+ linktree(self.inroot, installroot)
+
+ logger.info("generating kernel module metadata")
+ rb.generate_module_data()
+
+ logger.info("cleaning unneeded files")
+ rb.cleanup()
+
+ if self.debug:
+ rb.writepkgsizes(joinpaths(logdir, "final-pkgsizes.txt"))
+
+ logger.info("creating the runtime image")
+ runtime = "images/install.img"
+ compression = self.conf.get("compression", "type")
+ compressargs = self.conf.get("compression", "args").split()
+ if self.conf.getboolean("compression", "bcj"):
+ if self.arch.bcj:
+ compressargs += ["-Xbcj", self.arch.bcj]
+ else:
+ logger.info("no BCJ filter for arch %s", self.arch.basearch)
+ rb.create_runtime(joinpaths(installroot,runtime),
+ compression=compression, compressargs=compressargs)
+
+ logger.info("preparing to build output tree and boot images")
+ treebuilder = TreeBuilder(product=self.product, arch=self.arch,
+ inroot=installroot, outroot=self.outputdir,
+ runtime=runtime, isolabel=isolabel,
+ domacboot=domacboot, doupgrade=doupgrade,
+ templatedir=templatedir,
+ add_templates=add_arch_templates,
+ add_template_vars=add_arch_template_vars,
+ workdir=self.workdir)
+
+ logger.info("rebuilding initramfs images")
+ dracut_args = ["--xz", "--install", "/.buildstamp", "--no-early-microcode", "--add", "fips"]
+ anaconda_args = dracut_args + ["--add", "anaconda pollcdrom"]
+
+ # ppc64 cannot boot an initrd > 32MiB so remove some drivers
+ if self.arch.basearch in ("ppc64", "ppc64le"):
+ dracut_args.extend(["--omit-drivers", REMOVE_PPC64_DRIVERS])
+
+ # Only omit dracut modules from the initrd so that they're kept for
+ # upgrade.img
+ anaconda_args.extend(["--omit", REMOVE_PPC64_MODULES])
+
+ treebuilder.rebuild_initrds(add_args=anaconda_args)
+
+ if doupgrade:
+ # Build upgrade.img. It'd be nice if these could coexist in the same
+ # image, but that would increase the size of the anaconda initramfs,
+ # which worries some people (esp. PPC tftpboot). So they're separate.
+ try:
+ # If possible, use the 'redhat-upgrade-tool' plymouth theme
+ themes = runcmd_output(['plymouth-set-default-theme', '--list'],
+ root=installroot)
+ if 'redhat-upgrade-tool' in themes.splitlines():
+ os.environ['PLYMOUTH_THEME_NAME'] = 'redhat-upgrade-tool'
+ except RuntimeError:
+ pass
+ upgrade_args = dracut_args + ["--add", "system-upgrade convertfs"]
+ treebuilder.rebuild_initrds(add_args=upgrade_args, prefix="upgrade")
+
+ logger.info("populating output tree and building boot images")
+ treebuilder.build()
+
+ # write .treeinfo file and we're done
+ treeinfo = TreeInfo(self.product.name, self.product.version,
+ self.product.variant, self.arch.basearch)
+ for section, data in treebuilder.treeinfo_data.items():
+ treeinfo.add_section(section, data)
+ treeinfo.write(joinpaths(self.outputdir, ".treeinfo"))
+
+ # cleanup
+ if remove_temp:
+ remove(self.workdir)
+
+
+[docs]def get_buildarch(ybo):
+ # get architecture of the available anaconda package
+ buildarch = None
+ for anaconda in ybo.doPackageLists(patterns=["anaconda"]).available:
+ if anaconda.arch != "src":
+ buildarch = anaconda.arch
+ break
+ if not buildarch:
+ logger.critical("no anaconda package in the repository")
+ sys.exit(1)
+
+ return buildarch
+
+#
+# lorax-composer API server
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from pylorax.api.crossdomain import crossdomain
+
+__all__ = ["crossdomain"]
+
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+""" Setup for composing an image
+
+Adding New Output Types
+-----------------------
+
+The new output type must add a kickstart template to ./share/composer/ where the
+name of the kickstart (without the trailing .ks) matches the entry in compose_args.
+
+The kickstart should not have any url or repo entries, these will be added at build
+time. The %packages section should be the last thing, and while it can contain mandatory
+packages required by the output type, it should not have the trailing %end because the
+package NEVRAs will be appended to it at build time.
+
+compose_args should have a name matching the kickstart, and it should set the novirt_install
+parameters needed to generate the desired output. Other types should be set to False.
+
+"""
+import logging
+log = logging.getLogger("lorax-composer")
+
+import os
+from glob import glob
+import pytoml as toml
+import shutil
+from uuid import uuid4
+
+from pyanaconda.simpleconfig import SimpleConfigFile
+
+from pylorax.api.projects import projects_depsolve, dep_nevra
+from pylorax.api.projects import ProjectsError
+from pylorax.api.recipes import read_recipe_and_id
+from pylorax.imgutils import default_image_name
+from pylorax.sysutils import joinpaths
+
+
+[docs]def repo_to_ks(r, url="url"):
+ """ Return a kickstart line with the correct args.
+
+ Set url to "baseurl" if it is a repo, leave it as "url" for the installation url.
+ """
+ cmd = ""
+ if url == "url":
+ if not r.urls:
+ raise RuntimeError("Cannot find a base url for %s" % r.name)
+
+ # url is passed to Anaconda on the cmdline with --repo, so it cannot support a mirror
+ # If a mirror is setup yum will return the list of mirrors in .urls
+ # So just use the first one.
+ cmd += '--%s="%s" ' % (url, r.urls[0])
+ elif r.metalink:
+ # XXX Total Hack
+ # RHEL7 kickstart doesn't support metalink. If the url has 'metalink' in it, rewrite it as 'mirrorlist'
+ if "metalink" in r.metalink:
+ log.info("RHEL7 does not support metalink, translating to mirrorlist")
+ cmd += '--mirrorlist="%s" ' % r.metalink.replace("metalink", "mirrorlist")
+ else:
+ log.error("Could not convert metalink to mirrorlist. %s", r.metalink)
+ raise RuntimeError("Cannot convert metalink to mirrorlist: %s" % r.metalink)
+ elif r.mirrorlist:
+ cmd += '--mirrorlist="%s" ' % r.mirrorlist
+ elif r.baseurl:
+ cmd += '--%s="%s" ' % (url, r.baseurl[0])
+ else:
+ raise RuntimeError("Repo has no baseurl or mirror")
+
+ if r.proxy:
+ cmd += '--proxy="%s" ' % r.proxy
+
+ if not r.sslverify:
+ cmd += '--noverifyssl'
+
+ return cmd
+
+[docs]def start_build(cfg, yumlock, gitlock, branch, recipe_name, compose_type, test_mode=0):
+ """ Start the build
+
+ :param cfg: Configuration object
+ :type cfg: ComposerConfig
+ :param yumlock: Lock and YumBase for depsolving
+ :type yumlock: YumLock
+ :param recipe: The recipe to build
+ :type recipe: str
+ :param compose_type: The type of output to create from the recipe
+ :type compose_type: str
+ :returns: Unique ID for the build that can be used to track its status
+ :rtype: str
+ """
+ share_dir = cfg.get("composer", "share_dir")
+ lib_dir = cfg.get("composer", "lib_dir")
+
+ # Make sure compose_type is valid
+ if compose_type not in compose_types(share_dir):
+ raise RuntimeError("Invalid compose type (%s), must be one of %s" % (compose_type, compose_types(share_dir)))
+
+ with gitlock.lock:
+ (commit_id, recipe) = read_recipe_and_id(gitlock.repo, branch, recipe_name)
+
+ # Combine modules and packages and depsolve the list
+ # TODO include the version/glob in the depsolving
+ module_names = map(lambda m: m["name"], recipe["modules"] or [])
+ package_names = map(lambda p: p["name"], recipe["packages"] or [])
+ projects = sorted(set(module_names+package_names), key=lambda n: n.lower())
+ deps = []
+ try:
+ with yumlock.lock:
+ deps = projects_depsolve(yumlock.yb, projects)
+ except ProjectsError as e:
+ log.error("start_build depsolve: %s", str(e))
+ raise RuntimeError("Problem depsolving %s: %s" % (recipe["name"], str(e)))
+
+ # Create the results directory
+ build_id = str(uuid4())
+ results_dir = joinpaths(lib_dir, "results", build_id)
+ os.makedirs(results_dir)
+
+ # Write the recipe commit hash
+ commit_path = joinpaths(results_dir, "COMMIT")
+ with open(commit_path, "w") as f:
+ f.write(commit_id)
+
+ # Write the original recipe
+ recipe_path = joinpaths(results_dir, "recipe.toml")
+ with open(recipe_path, "w") as f:
+ f.write(recipe.toml())
+
+ # Write the frozen recipe
+ frozen_recipe = recipe.freeze(deps)
+ recipe_path = joinpaths(results_dir, "frozen.toml")
+ with open(recipe_path, "w") as f:
+ f.write(frozen_recipe.toml())
+
+ # Read the kickstart template for this type and copy it into the results
+ ks_template_path = joinpaths(share_dir, "composer", compose_type) + ".ks"
+ shutil.copy(ks_template_path, results_dir)
+ ks_template = open(ks_template_path, "r").read()
+
+ # Write out the dependencies to the results dir
+ deps_path = joinpaths(results_dir, "deps.toml")
+ with open(deps_path, "w") as f:
+ f.write(toml.dumps({"packages":deps}).encode("UTF-8"))
+
+ # Create the final kickstart with repos and package list
+ ks_path = joinpaths(results_dir, "final-kickstart.ks")
+ with open(ks_path, "w") as f:
+ with yumlock.lock:
+ repos = yumlock.yb.repos.listEnabled()
+ if not repos:
+ raise RuntimeError("No enabled repos, canceling build.")
+
+ ks_url = repo_to_ks(repos[0], "url")
+ log.debug("url = %s", ks_url)
+ f.write('url %s\n' % ks_url)
+ for idx, r in enumerate(repos[1:]):
+ ks_repo = repo_to_ks(r, "baseurl")
+ log.debug("repo composer-%s = %s", idx, ks_repo)
+ f.write('repo --name="composer-%s" %s\n' % (idx, ks_repo))
+
+ f.write(ks_template)
+
+ for d in deps:
+ f.write(dep_nevra(d)+"\n")
+
+ f.write("%end\n")
+
+ # Setup the config to pass to novirt_install
+ log_dir = joinpaths(results_dir, "logs/")
+ cfg_args = compose_args(compose_type)
+
+ # Get the title, project, and release version from the host
+ if not os.path.exists("/etc/os-release"):
+ log.error("/etc/os-release is missing, cannot determine product or release version")
+ os_release = SimpleConfigFile("/etc/os-release")
+ os_release.read()
+
+ log.debug("os_release = %s", os_release)
+
+ cfg_args["title"] = os_release.get("PRETTY_NAME")
+ cfg_args["project"] = os_release.get("NAME")
+ cfg_args["releasever"] = os_release.get("VERSION_ID")
+ cfg_args["volid"] = ""
+
+ cfg_args.update({
+ "compression": "xz",
+ "compress_args": [],
+ "ks": [ks_path],
+ "project": "Red Hat Enterprise Linux",
+ "releasever": "7",
+ "logfile": log_dir
+ })
+ with open(joinpaths(results_dir, "config.toml"), "w") as f:
+ f.write(toml.dumps(cfg_args).encode("UTF-8"))
+
+ # Set the initial status
+ open(joinpaths(results_dir, "STATUS"), "w").write("WAITING")
+
+ # Set the test mode, if requested
+ if test_mode > 0:
+ open(joinpaths(results_dir, "TEST"), "w").write("%s" % test_mode)
+
+ log.info("Adding %s with recipe %s output type %s to compose queue", build_id, recipe["name"], compose_type)
+ os.symlink(results_dir, joinpaths(lib_dir, "queue/new/", build_id))
+
+ return build_id
+
+# Supported output types
+[docs]def compose_types(share_dir):
+ """ Returns a list of the supported output types
+
+ The output types come from the kickstart names in /usr/share/lorax/composer/*ks
+ """
+ return [os.path.basename(ks)[:-3] for ks in glob(joinpaths(share_dir, "composer/*.ks"))]
+
+[docs]def compose_args(compose_type):
+ """ Returns the settings to pass to novirt_install for the compose type
+
+ :param compose_type: The type of compose to create, from `compose_types()`
+ :type compose_type: str
+
+ This will return a dict of options that match the ArgumentParser options for livemedia-creator.
+ These are the ones the define the type of output, it's filename, etc.
+ Other options will be filled in by `make_compose()`
+ """
+ _MAP = {"tar": {"make_iso": False,
+ "make_disk": False,
+ "make_fsimage": False,
+ "make_appliance": False,
+ "make_ami": False,
+ "make_tar": True,
+ "make_pxe_live": False,
+ "make_ostree_live": False,
+ "ostree": False,
+ "live_rootfs_keep_size": False,
+ "live_rootfs_size": 0,
+ "qcow2": False,
+ "qcow2_arg": [],
+ "image_name": default_image_name("xz", "root.tar"),
+ "image_only": True,
+ "app_name": None,
+ "app_template": None,
+ "app_file": None
+ },
+ "live-iso": {"make_iso": True,
+ "make_disk": False,
+ "make_fsimage": False,
+ "make_appliance": False,
+ "make_ami": False,
+ "make_tar": False,
+ "make_pxe_live": False,
+ "make_ostree_live": False,
+ "ostree": False,
+ "live_rootfs_keep_size": False,
+ "live_rootfs_size": 0,
+ "qcow2": False,
+ "qcow2_arg": [],
+ "image_name": "live.iso",
+ "fs_label": "Anaconda", # Live booting may expect this to be 'Anaconda'
+ "image_only": False,
+ "app_name": None,
+ "app_template": None,
+ "app_file": None
+ },
+ "partitioned-disk": {"make_iso": False,
+ "make_disk": True,
+ "make_fsimage": False,
+ "make_appliance": False,
+ "make_ami": False,
+ "make_tar": False,
+ "make_pxe_live": False,
+ "make_ostree_live": False,
+ "ostree": False,
+ "live_rootfs_keep_size": False,
+ "live_rootfs_size": 0,
+ "qcow2": False,
+ "qcow2_arg": [],
+ "image_name": "disk.img",
+ "fs_label": "",
+ "image_only": True,
+ "app_name": None,
+ "app_template": None,
+ "app_file": None
+ },
+ }
+ return _MAP[compose_type]
+
+[docs]def move_compose_results(cfg, results_dir):
+ """Move the final image to the results_dir and cleanup the unneeded compose files
+
+ :param cfg: Build configuration
+ :type cfg: DataHolder
+ :param results_dir: Directory to put the results into
+ :type results_dir: str
+ """
+ if cfg["make_tar"]:
+ shutil.move(joinpaths(cfg["result_dir"], cfg["image_name"]), results_dir)
+ elif cfg["make_iso"]:
+ # Output from live iso is always a boot.iso under images/, move and rename it
+ shutil.move(joinpaths(cfg["result_dir"], "images/boot.iso"), joinpaths(results_dir, cfg["image_name"]))
+ elif cfg["make_disk"]:
+ shutil.move(joinpaths(cfg["result_dir"], cfg["image_name"]), joinpaths(results_dir, cfg["image_name"]))
+
+
+ # Cleanup the compose directory, but only if it looks like a compose directory
+ if os.path.basename(cfg["result_dir"]) == "compose":
+ shutil.rmtree(cfg["result_dir"])
+ else:
+ log.error("Incorrect compose directory, not cleaning up")
+
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import ConfigParser
+import grp
+import os
+
+from pylorax.sysutils import joinpaths
+
+[docs]class ComposerConfig(ConfigParser.SafeConfigParser):
+[docs] def get_default(self, section, option, default):
+ try:
+ return self.get(section, option)
+ except ConfigParser.Error:
+ return default
+
+
+[docs]def configure(conf_file="/etc/lorax/composer.conf", root_dir="/", test_config=False):
+ """lorax-composer configuration
+
+ :param conf_file: Path to the config file overriding the default settings
+ :type conf_file: str
+ :param root_dir: Directory to prepend to paths, defaults to /
+ :type root_dir: str
+ :param test_config: Set to True to skip reading conf_file
+ :type test_config: bool
+ """
+ conf = ComposerConfig()
+
+ # set defaults
+ conf.add_section("composer")
+ conf.set("composer", "share_dir", os.path.realpath(joinpaths(root_dir, "/usr/share/lorax/")))
+ conf.set("composer", "lib_dir", os.path.realpath(joinpaths(root_dir, "/var/lib/lorax/composer/")))
+ conf.set("composer", "yum_conf", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/yum.conf")))
+ conf.set("composer", "repo_dir", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/repos.d/")))
+ conf.set("composer", "cache_dir", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/cache/")))
+
+ conf.add_section("users")
+ conf.set("users", "root", "1")
+
+ # Enable all available repo files by default
+ conf.add_section("repos")
+ conf.set("repos", "use_system_repos", "1")
+ conf.set("repos", "enabled", "*")
+
+ if not test_config:
+ # read the config file
+ if os.path.isfile(conf_file):
+ conf.read(conf_file)
+
+ return conf
+
+[docs]def make_yum_dirs(conf):
+ """Make any missing yum directories
+
+ :param conf: The configuration to use
+ :type conf: ComposerConfig
+ :returns: None
+ """
+ for p in ["yum_conf", "repo_dir", "cache_dir"]:
+ p_dir = os.path.dirname(conf.get("composer", p))
+ if not os.path.exists(p_dir):
+ os.makedirs(p_dir)
+
+[docs]def make_queue_dirs(conf, gid):
+ """Make any missing queue directories
+
+ :param conf: The configuration to use
+ :type conf: ComposerConfig
+ :param gid: Group ID that has access to the queue directories
+ :type gid: int
+ :returns: list of errors
+ :rtype: list of str
+ """
+ errors = []
+ lib_dir = conf.get("composer", "lib_dir")
+ for p in ["queue/run", "queue/new", "results"]:
+ p_dir = joinpaths(lib_dir, p)
+ if not os.path.exists(p_dir):
+ orig_umask = os.umask(0)
+ os.makedirs(p_dir, 0o771)
+ os.chown(p_dir, 0, gid)
+ os.umask(orig_umask)
+ else:
+ p_stat = os.stat(p_dir)
+ if p_stat.st_mode & 0o006 != 0:
+ errors.append("Incorrect permissions on %s, no o+rw permissions are allowed." % p_dir)
+
+ if p_stat.st_gid != gid or p_stat.st_uid != 0:
+ gr_name = grp.getgrgid(gid).gr_name
+ errors.append("%s should be owned by root:%s" % (p_dir, gr_name))
+
+ return errors
+
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# crossdomain decorator from - http://flask.pocoo.org/snippets/56/
+from datetime import timedelta
+from flask import make_response, request, current_app
+from functools import update_wrapper
+
+
+[docs]def crossdomain(origin, methods=None, headers=None,
+ max_age=21600, attach_to_all=True,
+ automatic_options=True):
+ if methods is not None:
+ methods = ', '.join(sorted(x.upper() for x in methods))
+ if headers is not None and not isinstance(headers, basestring):
+ headers = ', '.join(x.upper() for x in headers)
+ if not isinstance(origin, list):
+ origin = [origin]
+ if isinstance(max_age, timedelta):
+ max_age = int(max_age.total_seconds())
+
+ def get_methods():
+ if methods is not None:
+ return methods
+
+ options_resp = current_app.make_default_options_response()
+ return options_resp.headers['allow']
+
+ def decorator(f):
+ def wrapped_function(*args, **kwargs):
+ if automatic_options and request.method == 'OPTIONS':
+ resp = current_app.make_default_options_response()
+ else:
+ resp = make_response(f(*args, **kwargs))
+ if not attach_to_all and request.method != 'OPTIONS':
+ return resp
+
+ h = resp.headers
+
+ h.extend([("Access-Control-Allow-Origin", orig) for orig in origin])
+ h['Access-Control-Allow-Methods'] = get_methods()
+ h['Access-Control-Max-Age'] = str(max_age)
+ if headers is not None:
+ h['Access-Control-Allow-Headers'] = headers
+ return resp
+
+ f.provide_automatic_options = False
+ f.required_methods = ['OPTIONS']
+ return update_wrapper(wrapped_function, f)
+ return decorator
+
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("lorax-composer")
+
+import time
+
+from yum.Errors import YumBaseError
+
+TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
+
+
+
+[docs]def api_time(t):
+ """Convert time since epoch to a string
+
+ :param t: Seconds since epoch
+ :type t: int
+ :returns: Time string
+ :rtype: str
+ """
+ return time.strftime(TIME_FORMAT, time.localtime(t))
+
+
+[docs]def api_changelog(changelog):
+ """Convert the changelog to a string
+
+ :param changelog: A list of time, author, string tuples.
+ :type changelog: tuple
+ :returns: The most recent changelog text or ""
+ :rtype: str
+
+ This returns only the most recent changelog entry.
+ """
+ try:
+ entry = changelog[0][2]
+ except IndexError:
+ entry = ""
+ return entry
+
+
+[docs]def yaps_to_project(yaps):
+ """Extract the details from a YumAvailablePackageSqlite object
+
+ :param yaps: Yum object with package details
+ :type yaps: YumAvailablePackageSqlite
+ :returns: A dict with the name, summary, description, and url.
+ :rtype: dict
+
+ upstream_vcs is hard-coded to UPSTREAM_VCS
+ """
+ return {"name": yaps.name,
+ "summary": yaps.summary,
+ "description": yaps.description,
+ "homepage": yaps.url,
+ "upstream_vcs": "UPSTREAM_VCS"}
+
+
+[docs]def yaps_to_project_info(yaps):
+ """Extract the details from a YumAvailablePackageSqlite object
+
+ :param yaps: Yum object with package details
+ :type yaps: YumAvailablePackageSqlite
+ :returns: A dict with the project details, as well as epoch, release, arch, build_time, changelog, ...
+ :rtype: dict
+
+ metadata entries are hard-coded to {}
+ """
+ build = {"epoch": yaps.epoch,
+ "release": yaps.release,
+ "arch": yaps.arch,
+ "build_time": api_time(yaps.buildtime),
+ "changelog": api_changelog(yaps.returnChangelog()),
+ "build_config_ref": "BUILD_CONFIG_REF",
+ "build_env_ref": "BUILD_ENV_REF",
+ "metadata": {},
+ "source": {"license": yaps.license,
+ "version": yaps.version,
+ "source_ref": "SOURCE_REF",
+ "metadata": {}}}
+
+ return {"name": yaps.name,
+ "summary": yaps.summary,
+ "description": yaps.description,
+ "homepage": yaps.url,
+ "upstream_vcs": "UPSTREAM_VCS",
+ "builds": [build]}
+
+
+[docs]def tm_to_dep(tm):
+ """Extract the info from a TransactionMember object
+
+ :param tm: A Yum transaction object
+ :type tm: TransactionMember
+ :returns: A dict with name, epoch, version, release, arch
+ :rtype: dict
+ """
+ return {"name": tm.name,
+ "epoch": tm.epoch,
+ "version": tm.version,
+ "release": tm.release,
+ "arch": tm.arch}
+
+
+[docs]def yaps_to_module(yaps):
+ """Extract the name from a YumAvailablePackageSqlite object
+
+ :param yaps: Yum object with package details
+ :type yaps: YumAvailablePackageSqlite
+ :returns: A dict with name, and group_type
+ :rtype: dict
+
+ group_type is hard-coded to "rpm"
+ """
+ return {"name": yaps.name,
+ "group_type": "rpm"}
+
+
+[docs]def dep_evra(dep):
+ """Return the epoch:version-release.arch for the dep
+
+ :param dep: dependency dict
+ :type dep: dict
+ :returns: epoch:version-release.arch
+ :rtype: str
+ """
+ if dep["epoch"] == "0":
+ return dep["version"]+"-"+dep["release"]+"."+dep["arch"]
+ else:
+ return dep["epoch"]+":"+dep["version"]+"-"+dep["release"]+"."+dep["arch"]
+
+[docs]def dep_nevra(dep):
+ """Return the name-epoch:version-release.arch"""
+ return dep["name"]+"-"+dep_evra(dep)
+
+[docs]def projects_list(yb):
+ """Return a list of projects
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :returns: List of project info dicts with name, summary, description, homepage, upstream_vcs
+ :rtype: list of dicts
+ """
+ try:
+ ybl = yb.doPackageLists(pkgnarrow="available", showdups=False)
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem listing projects: %s" % str(e))
+ finally:
+ yb.closeRpmDB()
+ return sorted(map(yaps_to_project, ybl.available), key=lambda p: p["name"].lower())
+
+
+[docs]def projects_info(yb, project_names):
+ """Return details about specific projects
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :param project_names: List of names of projects to get info about
+ :type project_names: str
+ :returns: List of project info dicts with yaps_to_project as well as epoch, version, release, etc.
+ :rtype: list of dicts
+ """
+ try:
+ ybl = yb.doPackageLists(pkgnarrow="available", patterns=project_names, showdups=False)
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem with info for %s: %s" % (project_names, str(e)))
+ finally:
+ yb.closeRpmDB()
+ return sorted(map(yaps_to_project_info, ybl.available), key=lambda p: p["name"].lower())
+
+
+[docs]def projects_depsolve(yb, project_names):
+ """Return the dependencies for a list of projects
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :param project_names: The projects to find the dependencies for
+ :type project_names: List of Strings
+ :returns: NEVRA's of the project and its dependencies
+ :rtype: list of dicts
+ """
+ try:
+ # This resets the transaction
+ yb.closeRpmDB()
+ for p in project_names:
+ yb.install(pattern=p)
+ (rc, msg) = yb.buildTransaction()
+ if rc not in [0, 1, 2]:
+ raise ProjectsError("There was a problem depsolving %s: %s" % (project_names, msg))
+ yb.tsInfo.makelists()
+ deps = sorted(map(tm_to_dep, yb.tsInfo.installed + yb.tsInfo.depinstalled), key=lambda p: p["name"].lower())
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem depsolving %s: %s" % (project_names, str(e)))
+ finally:
+ yb.closeRpmDB()
+ return deps
+
+
+[docs]def modules_list(yb, module_names):
+ """Return a list of modules
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :param offset: Number of modules to skip
+ :type limit: int
+ :param limit: Maximum number of modules to return
+ :type limit: int
+ :returns: List of module information and total count
+ :rtype: tuple of a list of dicts and an Int
+
+ Modules don't exist in RHEL7 so this only returns projects
+ and sets the type to "rpm"
+ """
+ try:
+ ybl = yb.doPackageLists(pkgnarrow="available", patterns=module_names, showdups=False)
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem listing modules: %s" % str(e))
+ finally:
+ yb.closeRpmDB()
+ return sorted(map(yaps_to_module, ybl.available), key=lambda p: p["name"].lower())
+
+
+[docs]def modules_info(yb, module_names):
+ """Return details about a module, including dependencies
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :param module_names: Names of the modules to get info about
+ :type module_names: str
+ :returns: List of dicts with module details and dependencies.
+ :rtype: list of dicts
+ """
+ try:
+ # Get the info about each module
+ ybl = yb.doPackageLists(pkgnarrow="available", patterns=module_names, showdups=False)
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem with info for %s: %s" % (module_names, str(e)))
+ finally:
+ yb.closeRpmDB()
+
+ modules = sorted(map(yaps_to_project, ybl.available), key=lambda p: p["name"].lower())
+ # Add the dependency info to each one
+ for module in modules:
+ module["dependencies"] = projects_depsolve(yb, [module["name"]])
+
+ return modules
+
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+""" Functions to monitor compose queue and run anaconda"""
+import logging
+log = logging.getLogger("pylorax")
+
+import os
+import grp
+from glob import glob
+import multiprocessing as mp
+import pytoml as toml
+import pwd
+import shutil
+import subprocess
+from subprocess import Popen, PIPE
+import time
+
+from pylorax.api.compose import move_compose_results
+from pylorax.api.recipes import recipe_from_file
+from pylorax.base import DataHolder
+from pylorax.creator import run_creator
+from pylorax.sysutils import joinpaths
+
+[docs]def start_queue_monitor(cfg, uid, gid):
+ """Start the queue monitor as a mp process
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uid: User ID that owns the queue
+ :type uid: int
+ :param gid: Group ID that owns the queue
+ :type gid: int
+ :returns: None
+ """
+ lib_dir = cfg.get("composer", "lib_dir")
+ share_dir = cfg.get("composer", "share_dir")
+ monitor_cfg = DataHolder(composer_dir=lib_dir, share_dir=share_dir, uid=uid, gid=gid)
+ p = mp.Process(target=monitor, args=(monitor_cfg,))
+ p.daemon = True
+ p.start()
+
+[docs]def monitor(cfg):
+ """Monitor the queue for new compose requests
+
+ :param cfg: Configuration settings
+ :type cfg: DataHolder
+ :returns: Does not return
+
+ The queue has 2 subdirectories, new and run. When a compose is ready to be run
+ a symlink to the uniquely named results directory should be placed in ./queue/new/
+
+ When the it is ready to be run (it is checked every 30 seconds or after a previous
+ compose is finished) the symlink will be moved into ./queue/run/ and a STATUS file
+ will be created in the results directory.
+
+ STATUS can contain one of: RUNNING, FINISHED, FAILED
+
+ If the system is restarted while a compose is running it will move any old symlinks
+ from ./queue/run/ to ./queue/new/ and rerun them.
+ """
+ def queue_sort(uuid):
+ """Sort the queue entries by their mtime, not their names"""
+ return os.stat(joinpaths(cfg.composer_dir, "queue/new", uuid)).st_mtime
+
+ # Move any symlinks in the run queue back to the new queue
+ for link in os.listdir(joinpaths(cfg.composer_dir, "queue/run")):
+ src = joinpaths(cfg.composer_dir, "queue/run", link)
+ dst = joinpaths(cfg.composer_dir, "queue/new", link)
+ os.rename(src, dst)
+ log.debug("Moved unfinished compose %s back to new state", src)
+
+ while True:
+ uuids = sorted(os.listdir(joinpaths(cfg.composer_dir, "queue/new")), key=queue_sort)
+
+ # Pick the oldest and move it into ./run/
+ if not uuids:
+ # No composes left to process, sleep for a bit
+ time.sleep(30)
+ else:
+ src = joinpaths(cfg.composer_dir, "queue/new", uuids[0])
+ dst = joinpaths(cfg.composer_dir, "queue/run", uuids[0])
+ try:
+ os.rename(src, dst)
+ except OSError:
+ # The symlink may vanish if uuid_cancel() has been called
+ continue
+
+ log.info("Starting new compose: %s", dst)
+ open(joinpaths(dst, "STATUS"), "w").write("RUNNING\n")
+
+ try:
+ make_compose(cfg, os.path.realpath(dst))
+ log.info("Finished building %s, results are in %s", dst, os.path.realpath(dst))
+ open(joinpaths(dst, "STATUS"), "w").write("FINISHED\n")
+ except Exception as e:
+ import traceback
+ log.error("traceback: %s", traceback.format_exc())
+
+# TODO - Write the error message to an ERROR-LOG file to include with the status
+# log.error("Error running compose: %s", e)
+ open(joinpaths(dst, "STATUS"), "w").write("FAILED\n")
+
+ os.unlink(dst)
+
+[docs]def make_compose(cfg, results_dir):
+ """Run anaconda with the final-kickstart.ks from results_dir
+
+ :param cfg: Configuration settings
+ :type cfg: DataHolder
+ :param results_dir: The directory containing the metadata and results for the build
+ :type results_dir: str
+ :returns: Nothing
+ :raises: May raise various exceptions
+
+ This takes the final-kickstart.ks, and the settings in config.toml and runs Anaconda
+ in no-virt mode (directly on the host operating system). Exceptions should be caught
+ at the higer level.
+
+ If there is a failure, the build artifacts will be cleaned up, and any logs will be
+ moved into logs/anaconda/ and their ownership will be set to the user from the cfg
+ object.
+ """
+
+ # Check on the ks's presence
+ ks_path = joinpaths(results_dir, "final-kickstart.ks")
+ if not os.path.exists(ks_path):
+ raise RuntimeError("Missing kickstart file at %s" % ks_path)
+
+ # The anaconda logs are copied into ./anaconda/ in this directory
+ log_dir = joinpaths(results_dir, "logs/")
+ if not os.path.exists(log_dir):
+ os.makedirs(log_dir)
+
+ # Load the compose configuration
+ cfg_path = joinpaths(results_dir, "config.toml")
+ if not os.path.exists(cfg_path):
+ raise RuntimeError("Missing config.toml for %s" % results_dir)
+ cfg_dict = toml.loads(open(cfg_path, "r").read())
+
+ # The keys in cfg_dict correspond to the arguments setup in livemedia-creator
+ # keys that define what to build should be setup in compose_args, and keys with
+ # defaults should be setup here.
+
+ # Make sure that image_name contains no path components
+ cfg_dict["image_name"] = os.path.basename(cfg_dict["image_name"])
+
+ # Only support novirt installation, set some other defaults
+ cfg_dict["no_virt"] = True
+ cfg_dict["disk_image"] = None
+ cfg_dict["fs_image"] = None
+ cfg_dict["keep_image"] = False
+ cfg_dict["domacboot"] = False
+ cfg_dict["anaconda_args"] = ""
+ cfg_dict["proxy"] = ""
+ cfg_dict["armplatform"] = ""
+ cfg_dict["squashfs_args"] = None
+
+ cfg_dict["lorax_templates"] = cfg.share_dir
+ cfg_dict["tmp"] = "/var/tmp/"
+ cfg_dict["dracut_args"] = None # Use default args for dracut
+
+ # Compose things in a temporary directory inside the results directory
+ cfg_dict["result_dir"] = joinpaths(results_dir, "compose")
+ os.makedirs(cfg_dict["result_dir"])
+
+ install_cfg = DataHolder(**cfg_dict)
+
+ # Some kludges for the 99-copy-logs %post, failure in it will crash the build
+ for f in ["/tmp/NOSAVE_INPUT_KS", "/tmp/NOSAVE_LOGS"]:
+ open(f, "w")
+
+ # Placing a CANCEL file in the results directory will make execWithRedirect send anaconda a SIGTERM
+ def cancel_build():
+ return os.path.exists(joinpaths(results_dir, "CANCEL"))
+
+ log.debug("cfg = %s", install_cfg)
+ try:
+ test_path = joinpaths(results_dir, "TEST")
+ if os.path.exists(test_path):
+ # Pretend to run the compose
+ time.sleep(10)
+ try:
+ test_mode = int(open(test_path, "r").read())
+ except Exception:
+ test_mode = 1
+ if test_mode == 1:
+ raise RuntimeError("TESTING FAILED compose")
+ else:
+ open(joinpaths(results_dir, install_cfg.image_name), "w").write("TEST IMAGE")
+ else:
+ run_creator(install_cfg, callback_func=cancel_build)
+
+ # Extract the results of the compose into results_dir and cleanup the compose directory
+ move_compose_results(install_cfg, results_dir)
+ finally:
+ # Make sure that everything under the results directory is owned by the user
+ user = pwd.getpwuid(cfg.uid).pw_name
+ group = grp.getgrgid(cfg.gid).gr_name
+ log.debug("Install finished, chowning results to %s:%s", user, group)
+ subprocess.call(["chown", "-R", "%s:%s" % (user, group), results_dir])
+
+[docs]def get_compose_type(results_dir):
+ """Return the type of composition.
+
+ :param results_dir: The directory containing the metadata and results for the build
+ :type results_dir: str
+ :returns: The type of compose (eg. 'tar')
+ :rtype: str
+ :raises: RuntimeError if no kickstart template can be found.
+ """
+ # Should only be 2 kickstarts, the final-kickstart.ks and the template
+ t = [os.path.basename(ks)[:-3] for ks in glob(joinpaths(results_dir, "*.ks"))
+ if "final-kickstart" not in ks]
+ if len(t) != 1:
+ raise RuntimeError("Cannot find ks template for build %s" % os.path.basename(results_dir))
+ return t[0]
+
+[docs]def compose_detail(results_dir):
+ """Return details about the build.
+
+ :param results_dir: The directory containing the metadata and results for the build
+ :type results_dir: str
+ :returns: A dictionary with details about the compose
+ :rtype: dict
+
+ The following details are included in the dict:
+
+ * id - The uuid of the comoposition
+ * queue_status - The final status of the composition (FINISHED or FAILED)
+ * timestamp - The time of the last status change
+ * compose_type - The type of output generated (tar, iso, etc.)
+ * recipe - Recipe name
+ * version - Recipe version
+ """
+ # Just in case it went away
+ if not os.path.exists(results_dir):
+ return {}
+
+ build_id = os.path.basename(os.path.abspath(results_dir))
+ status = open(joinpaths(results_dir, "STATUS")).read().strip()
+ mtime = os.stat(joinpaths(results_dir, "STATUS")).st_mtime
+ recipe = recipe_from_file(joinpaths(results_dir, "recipe.toml"))
+
+ compose_type = get_compose_type(results_dir)
+
+ return {"id": build_id,
+ "queue_status": status,
+ "timestamp": mtime,
+ "compose_type": compose_type,
+ "recipe": recipe["name"],
+ "version": recipe["version"]
+ }
+
+[docs]def queue_status(cfg):
+ """Return details about what is in the queue.
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :returns: A list of the new composes, and a list of the running composes
+ :rtype: dict
+
+ This returns a dict with 2 lists. "new" is the list of uuids that are waiting to be built,
+ and "run" has the uuids that are being built (currently limited to 1 at a time).
+ """
+ queue_dir = joinpaths(cfg.get("composer", "lib_dir"), "queue")
+ new_queue = [os.path.realpath(p) for p in glob(joinpaths(queue_dir, "new/*"))]
+ run_queue = [os.path.realpath(p) for p in glob(joinpaths(queue_dir, "run/*"))]
+
+ return {
+ "new": [compose_detail(n) for n in new_queue],
+ "run": [compose_detail(r) for r in run_queue]
+ }
+
+[docs]def uuid_status(cfg, uuid):
+ """Return the details of a specific UUID compose
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: Details about the build
+ :rtype: dict or None
+
+ Returns the same dict as `compose_details()`
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ if os.path.exists(uuid_dir):
+ return compose_detail(uuid_dir)
+ else:
+ return None
+
+[docs]def build_status(cfg, status_filter=None):
+ """Return the details of finished or failed builds
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param status_filter: What builds to return. None == all, "FINISHED", or "FAILED"
+ :type status_filter: str
+ :returns: A list of the build details (from compose_details)
+ :rtype: list of dicts
+
+ This returns a list of build details for each of the matching builds on the
+ system. It does not return the status of builds that have not been finished.
+ Use queue_status() for those.
+ """
+ if status_filter:
+ status_filter = [status_filter]
+ else:
+ status_filter = ["FINISHED", "FAILED"]
+
+ results = []
+ result_dir = joinpaths(cfg.get("composer", "lib_dir"), "results")
+ for build in glob(result_dir + "/*"):
+ log.debug("Checking status of build %s", build)
+
+ status = open(joinpaths(build, "STATUS"), "r").read().strip()
+ if status in status_filter:
+ results.append(compose_detail(build))
+ return results
+
+[docs]def uuid_cancel(cfg, uuid):
+ """Cancel a build and delete its results
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: True if it was canceled and deleted
+ :rtype: bool
+
+ Only call this if the build status is WAITING or RUNNING
+ """
+ # This status can change (and probably will) while it is in the middle of doing this:
+ # It can move from WAITING -> RUNNING or it can move from RUNNING -> FINISHED|FAILED
+
+ # If it is in WAITING remove the symlink and then check to make sure it didn't show up
+ # in RUNNING
+ queue_dir = joinpaths(cfg.get("composer", "lib_dir"), "queue")
+ uuid_new = joinpaths(queue_dir, "new", uuid)
+ if os.path.exists(uuid_new):
+ try:
+ os.unlink(uuid_new)
+ except OSError:
+ # The symlink may vanish if the queue monitor started the build
+ pass
+ uuid_run = joinpaths(queue_dir, "run", uuid)
+ if not os.path.exists(uuid_run):
+ # Successfully removed it before the build started
+ return uuid_delete(cfg, uuid)
+
+ # Tell the build to stop running
+ cancel_path = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid, "CANCEL")
+ open(cancel_path, "w").write("\n")
+
+ # Wait for status to move to FAILED
+ started = time.time()
+ while True:
+ status = uuid_status(cfg, uuid)
+ if status["queue_status"] == "FAILED":
+ break
+
+ # Is this taking too long? Exit anyway and try to cleanup.
+ if time.time() > started + (10 * 60):
+ log.error("Failed to cancel the build of %s", uuid)
+ break
+
+ time.sleep(5)
+
+ # Remove the partial results
+ uuid_delete(cfg, uuid)
+
+[docs]def uuid_delete(cfg, uuid):
+ """Delete all of the results from a compose
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: True if it was deleted
+ :rtype: bool
+ :raises: This will raise an error if the delete failed
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ if not uuid_dir or len(uuid_dir) < 10:
+ raise RuntimeError("Directory length is too short: %s" % uuid_dir)
+ shutil.rmtree(uuid_dir)
+ return True
+
+[docs]def uuid_info(cfg, uuid):
+ """Return information about the composition
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: dictionary of information about the composition
+ :rtype: dict
+ :raises: RuntimeError if there was a problem
+
+ This will return a dict with the following fields populated:
+
+ * id - The uuid of the comoposition
+ * config - containing the configuration settings used to run Anaconda
+ * recipe - The depsolved recipe used to generate the kickstart
+ * commit - The (local) git commit hash for the recipe used
+ * deps - The NEVRA of all of the dependencies used in the composition
+ * compose_type - The type of output generated (tar, iso, etc.)
+ * queue_status - The final status of the composition (FINISHED or FAILED)
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ if not os.path.exists(uuid_dir):
+ raise RuntimeError("%s is not a valid build_id" % uuid)
+
+ # Load the compose configuration
+ cfg_path = joinpaths(uuid_dir, "config.toml")
+ if not os.path.exists(cfg_path):
+ raise RuntimeError("Missing config.toml for %s" % uuid)
+ cfg_dict = toml.loads(open(cfg_path, "r").read())
+
+ frozen_path = joinpaths(uuid_dir, "frozen.toml")
+ if not os.path.exists(frozen_path):
+ raise RuntimeError("Missing frozen.toml for %s" % uuid)
+ frozen_dict = toml.loads(open(frozen_path, "r").read())
+
+ deps_path = joinpaths(uuid_dir, "deps.toml")
+ if not os.path.exists(deps_path):
+ raise RuntimeError("Missing deps.toml for %s" % uuid)
+ deps_dict = toml.loads(open(deps_path, "r").read())
+
+ compose_type = get_compose_type(uuid_dir)
+ status = open(joinpaths(uuid_dir, "STATUS")).read().strip()
+
+ commit_path = joinpaths(uuid_dir, "COMMIT")
+ commit_id = open(commit_path, "r").read().strip()
+
+ return {"id": uuid,
+ "config": cfg_dict,
+ "recipe": frozen_dict,
+ "commit": commit_id,
+ "deps": deps_dict,
+ "compose_type": compose_type,
+ "queue_status": status
+ }
+
+[docs]def uuid_tar(cfg, uuid, metadata=False, image=False, logs=False):
+ """Return a tar of the build data
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :param metadata: Set to true to include all the metadata needed to reproduce the build
+ :type metadata: bool
+ :param image: Set to true to include the output image
+ :type image: bool
+ :param logs: Set to true to include the logs from the build
+ :type logs: bool
+ :returns: A stream of bytes from tar
+ :rtype: A generator
+ :raises: RuntimeError if there was a problem (eg. missing config file)
+
+ This yields an uncompressed tar's data to the caller. It includes
+ the selected data to the caller by returning the Popen stdout from the tar process.
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ if not os.path.exists(uuid_dir):
+ raise RuntimeError("%s is not a valid build_id" % uuid)
+
+ # Load the compose configuration
+ cfg_path = joinpaths(uuid_dir, "config.toml")
+ if not os.path.exists(cfg_path):
+ raise RuntimeError("Missing config.toml for %s" % uuid)
+ cfg_dict = toml.loads(open(cfg_path, "r").read())
+ image_name = cfg_dict["image_name"]
+
+ def include_file(f):
+ if f.endswith("/logs"):
+ return logs
+ if f.endswith(image_name):
+ return image
+ return metadata
+ filenames = [os.path.basename(f) for f in glob(joinpaths(uuid_dir, "*")) if include_file(f)]
+
+ tar = Popen(["tar", "-C", uuid_dir, "-cf-"] + filenames, stdout=PIPE)
+ return tar.stdout
+
+[docs]def uuid_image(cfg, uuid):
+ """Return the filename and full path of the build's image file
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: The image filename and full path
+ :rtype: tuple of strings
+ :raises: RuntimeError if there was a problem (eg. invalid uuid, missing config file)
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ if not os.path.exists(uuid_dir):
+ raise RuntimeError("%s is not a valid build_id" % uuid)
+
+ # Load the compose configuration
+ cfg_path = joinpaths(uuid_dir, "config.toml")
+ if not os.path.exists(cfg_path):
+ raise RuntimeError("Missing config.toml for %s" % uuid)
+ cfg_dict = toml.loads(open(cfg_path, "r").read())
+ image_name = cfg_dict["image_name"]
+
+ return (image_name, joinpaths(uuid_dir, image_name))
+
+[docs]def uuid_log(cfg, uuid, size=1024):
+ """Return `size` kbytes from the end of the anaconda.log
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :param size: Number of kbytes to read. Default is 1024
+ :type size: int
+ :returns: Up to `size` kbytes from the end of the log
+ :rtype: str
+ :raises: RuntimeError if there was a problem (eg. no log file available)
+
+ This function tries to return lines from the end of the log, it will
+ attempt to start on a line boundry, and may return less than `size` kbytes.
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ if not os.path.exists(uuid_dir):
+ raise RuntimeError("%s is not a valid build_id" % uuid)
+
+ # While a build is running the logs will be in /tmp/anaconda.log and when it
+ # has finished they will be in the results directory
+ status = uuid_status(cfg, uuid)
+ if status["queue_status"] == "RUNNING":
+ log_path = "/tmp/anaconda.log"
+ else:
+ log_path = joinpaths(uuid_dir, "logs", "anaconda", "anaconda.log")
+ if not os.path.exists(log_path):
+ raise RuntimeError("No anaconda.log available.")
+
+ with open(log_path, "r") as f:
+ f.seek(0, 2)
+ end = f.tell()
+ if end < 1024 * size:
+ f.seek(0, 0)
+ else:
+ f.seek(end - (1024 * size))
+ # Find the start of the next line and return the rest
+ f.readline()
+ return f.read()
+
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import gi
+gi.require_version("Ggit", "1.0")
+from gi.repository import Ggit as Git
+from gi.repository import Gio
+from gi.repository import GLib
+
+import os
+import pytoml as toml
+import semantic_version as semver
+
+from pylorax.api.projects import dep_evra
+from pylorax.base import DataHolder
+from pylorax.sysutils import joinpaths
+
+
+
+
+
+[docs]class Recipe(dict):
+ """A Recipe of package and modules
+
+ This is a subclass of dict that enforces the constructor arguments
+ and adds a .filename property to return the recipe's filename,
+ and a .toml() function to return the recipe as a TOML string.
+ """
+ def __init__(self, name, description, version, modules, packages):
+ # Check that version is empty or semver compatible
+ if version:
+ semver.Version(version)
+
+ # Make sure modules and packages are listed by their case-insensitive names
+ if modules is not None:
+ modules = sorted(modules, key=lambda m: m["name"].lower())
+ if packages is not None:
+ packages = sorted(packages, key=lambda p: p["name"].lower())
+ dict.__init__(self, name=name,
+ description=description,
+ version=version,
+ modules=modules,
+ packages=packages)
+
+ @property
+[docs] def package_names(self):
+ """Return the names of the packages"""
+ return map(lambda p: p["name"], self["packages"] or [])
+
+ @property
+[docs] def module_names(self):
+ """Return the names of the modules"""
+ return map(lambda m: m["name"], self["modules"] or [])
+
+ @property
+[docs] def filename(self):
+ """Return the Recipe's filename
+
+ Replaces spaces in the name with '-' and appends .toml
+ """
+ return recipe_filename(self.get("name"))
+
+[docs] def toml(self):
+ """Return the Recipe in TOML format"""
+ return toml.dumps(self).encode("UTF-8")
+
+[docs] def bump_version(self, old_version=None):
+ """semver recipe version number bump
+
+ :param old_version: An optional old version number
+ :type old_version: str
+ :returns: The new version number or None
+ :rtype: str
+ :raises: ValueError
+
+ If neither have a version, 0.0.1 is returned
+ If there is no old version the new version is checked and returned
+ If there is no new version, but there is a old one, bump its patch level
+ If the old and new versions are the same, bump the patch level
+ If they are different, check and return the new version
+ """
+ new_version = self.get("version")
+ if not new_version and not old_version:
+ self["version"] = "0.0.1"
+
+ elif new_version and not old_version:
+ semver.Version(new_version)
+ self["version"] = new_version
+
+ elif not new_version or new_version == old_version:
+ new_version = str(semver.Version(old_version).next_patch())
+ self["version"] = new_version
+
+ else:
+ semver.Version(new_version)
+ self["version"] = new_version
+
+ # Return the new version
+ return str(semver.Version(self["version"]))
+
+[docs] def freeze(self, deps):
+ """ Return a new Recipe with full module and package NEVRA
+
+ :param deps: A list of dependency NEVRA to use to fill in the modules and packages
+ :type deps: list(
+ :returns: A new Recipe object
+ :rtype: Recipe
+ """
+ module_names = self.module_names
+ package_names = self.package_names
+
+ new_modules = []
+ new_packages = []
+ for dep in deps:
+ if dep["name"] in package_names:
+ new_packages.append(RecipePackage(dep["name"], dep_evra(dep)))
+ elif dep["name"] in module_names:
+ new_modules.append(RecipeModule(dep["name"], dep_evra(dep)))
+
+ return Recipe(self["name"], self["description"], self["version"],
+ new_modules, new_packages)
+
+[docs]class RecipeModule(dict):
+ def __init__(self, name, version):
+ dict.__init__(self, name=name, version=version)
+
+
+[docs]def recipe_from_file(recipe_path):
+ """Return a recipe file as a Recipe object
+
+ :param recipe_path: Path to the recipe fila
+ :type recipe_path: str
+ :returns: A Recipe object
+ :rtype: Recipe
+ """
+ with open(recipe_path, 'rb') as f:
+ return recipe_from_toml(f.read())
+
+[docs]def recipe_from_toml(recipe_str):
+ """Create a Recipe object from a toml string.
+
+ :param recipe_str: The Recipe TOML string
+ :type recipe_str: str
+ :returns: A Recipe object
+ :rtype: Recipe
+ :raises: TomlError
+ """
+ recipe_dict = toml.loads(recipe_str)
+ return recipe_from_dict(recipe_dict)
+
+[docs]def recipe_from_dict(recipe_dict):
+ """Create a Recipe object from a plain dict.
+
+ :param recipe_dict: A plain dict of the recipe
+ :type recipe_dict: dict
+ :returns: A Recipe object
+ :rtype: Recipe
+ :raises: RecipeError
+ """
+ # Make RecipeModule objects from the toml
+ # The TOML may not have modules or packages in it. Set them to None in this case
+ try:
+ if recipe_dict.get("modules"):
+ modules = [RecipeModule(m.get("name"), m.get("version")) for m in recipe_dict["modules"]]
+ else:
+ modules = []
+ if recipe_dict.get("packages"):
+ packages = [RecipePackage(p.get("name"), p.get("version")) for p in recipe_dict["packages"]]
+ else:
+ packages = []
+ name = recipe_dict["name"]
+ description = recipe_dict["description"]
+ version = recipe_dict.get("version", None)
+ except KeyError as e:
+ raise RecipeError("There was a problem parsing the recipe: %s" % str(e))
+
+ return Recipe(name, description, version, modules, packages)
+
+[docs]def gfile(path):
+ """Convert a string path to GFile for use with Git"""
+ return Gio.file_new_for_path(path)
+
+[docs]def recipe_filename(name):
+ """Return the toml filename for a recipe
+
+ Replaces spaces with '-' and appends '.toml'
+ """
+ # XXX Raise and error if this is empty?
+ return name.replace(" ", "-") + ".toml"
+
+[docs]def head_commit(repo, branch):
+ """Get the branch's HEAD Commit Object
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :returns: Branch's head commit
+ :rtype: Git.Commit
+ :raises: Can raise errors from Ggit
+ """
+ branch_obj = repo.lookup_branch(branch, Git.BranchType.LOCAL)
+ commit_id = branch_obj.get_target()
+ return repo.lookup(commit_id, Git.Commit)
+
+[docs]def prepare_commit(repo, branch, builder):
+ """Prepare for a commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param builder: instance of TreeBuilder
+ :type builder: TreeBuilder
+ :returns: (Tree, Sig, Ref)
+ :rtype: tuple
+ :raises: Can raise errors from Ggit
+ """
+ tree_id = builder.write()
+ tree = repo.lookup(tree_id, Git.Tree)
+ sig = Git.Signature.new_now("bdcs-api-server", "user-email")
+ ref = "refs/heads/%s" % branch
+ return (tree, sig, ref)
+
+[docs]def open_or_create_repo(path):
+ """Open an existing repo, or create a new one
+
+ :param path: path to recipe directory
+ :type path: string
+ :returns: A repository object
+ :rtype: Git.Repository
+ :raises: Can raise errors from Ggit
+
+ A bare git repo will be created in the git directory of the specified path.
+ If a repo already exists it will be opened and returned instead of
+ creating a new one.
+ """
+ Git.init()
+ git_path = joinpaths(path, "git")
+ if os.path.exists(joinpaths(git_path, "HEAD")):
+ return Git.Repository.open(gfile(git_path))
+
+ repo = Git.Repository.init_repository(gfile(git_path), True)
+
+ # Make an initial empty commit
+ sig = Git.Signature.new_now("bdcs-api-server", "user-email")
+ tree_id = repo.get_index().write_tree()
+ tree = repo.lookup(tree_id, Git.Tree)
+ repo.create_commit("HEAD", sig, sig, "UTF-8", "Initial Recipe repository commit", tree, [])
+ return repo
+
+[docs]def write_commit(repo, branch, filename, message, content):
+ """Make a new commit to a repository's branch
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: full path of the file to add
+ :type filename: str
+ :param message: The commit message
+ :type message: str
+ :param content: The data to write
+ :type content: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ try:
+ parent_commit = head_commit(repo, branch)
+ except GLib.GError:
+ # Branch doesn't exist, make a new one based on master
+ master_head = head_commit(repo, "master")
+ repo.create_branch(branch, master_head, 0)
+ parent_commit = head_commit(repo, branch)
+
+ parent_commit = head_commit(repo, branch)
+ blob_id = repo.create_blob_from_buffer(content)
+
+ # Use treebuilder to make a new entry for this filename and blob
+ parent_tree = parent_commit.get_tree()
+ builder = repo.create_tree_builder_from_tree(parent_tree)
+ builder.insert(filename, blob_id, Git.FileMode.BLOB)
+ (tree, sig, ref) = prepare_commit(repo, branch, builder)
+ return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit])
+
+[docs]def read_commit_spec(repo, spec):
+ """Return the raw content of the blob specified by the spec
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param spec: Git revparse spec
+ :type spec: str
+ :returns: Contents of the commit
+ :rtype: str
+ :raises: Can raise errors from Ggit
+
+ eg. To read the README file from master the spec is "master:README"
+ """
+ commit_id = repo.revparse(spec).get_id()
+ blob = repo.lookup(commit_id, Git.Blob)
+ return blob.get_raw_content()
+
+[docs]def read_commit(repo, branch, filename, commit=None):
+ """Return the contents of a file on a specific branch or commit.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: filename to read
+ :type filename: str
+ :param commit: Optional commit hash
+ :type commit: str
+ :returns: The commit id, and the contents of the commit
+ :rtype: tuple(str, str)
+ :raises: Can raise errors from Ggit
+
+ If no commit is passed the master:filename is returned, otherwise it will be
+ commit:filename
+ """
+ if not commit:
+ # Find the most recent commit for filename on the selected branch
+ commits = list_commits(repo, branch, filename, 1)
+ if not commits:
+ raise RecipeError("No commits for %s on the %s branch." % (filename, branch))
+ commit = commits[0].commit
+ return (commit, read_commit_spec(repo, "%s:%s" % (commit, filename)))
+
+[docs]def read_recipe_commit(repo, branch, recipe_name, commit=None):
+ """Read a recipe commit from git and return a Recipe object
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: Recipe name to read
+ :type recipe_name: str
+ :param commit: Optional commit hash
+ :type commit: str
+ :returns: A Recipe object
+ :rtype: Recipe
+ :raises: Can raise errors from Ggit
+
+ If no commit is passed the master:filename is returned, otherwise it will be
+ commit:filename
+ """
+ (_, recipe_toml) = read_commit(repo, branch, recipe_filename(recipe_name), commit)
+ return recipe_from_toml(recipe_toml)
+
+[docs]def read_recipe_and_id(repo, branch, recipe_name, commit=None):
+ """Read a recipe commit and its id from git
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: Recipe name to read
+ :type recipe_name: str
+ :param commit: Optional commit hash
+ :type commit: str
+ :returns: The commit id, and a Recipe object
+ :rtype: tuple(str, Recipe)
+ :raises: Can raise errors from Ggit
+
+ If no commit is passed the master:filename is returned, otherwise it will be
+ commit:filename
+ """
+ (commit_id, recipe_toml) = read_commit(repo, branch, recipe_filename(recipe_name), commit)
+ return (commit_id, recipe_from_toml(recipe_toml))
+
+[docs]def list_branch_files(repo, branch):
+ """Return a sorted list of the files on the branch HEAD
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :returns: A sorted list of the filenames
+ :rtype: list(str)
+ :raises: Can raise errors from Ggit
+ """
+ commit = head_commit(repo, branch).get_id().to_string()
+ return list_commit_files(repo, commit)
+
+[docs]def list_commit_files(repo, commit):
+ """Return a sorted list of the files on a commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param commit: The commit hash to list
+ :type commit: str
+ :returns: A sorted list of the filenames
+ :rtype: list(str)
+ :raises: Can raise errors from Ggit
+ """
+ commit_id = Git.OId.new_from_string(commit)
+ commit_obj = repo.lookup(commit_id, Git.Commit)
+ tree = commit_obj.get_tree()
+ return sorted([tree.get(i).get_name() for i in range(0,tree.size())])
+
+[docs]def delete_recipe(repo, branch, recipe_name):
+ """Delete a recipe from a branch.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: Recipe name to delete
+ :type recipe_name: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ return delete_file(repo, branch, recipe_filename(recipe_name))
+
+[docs]def delete_file(repo, branch, filename):
+ """Delete a file from a branch.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: filename to delete
+ :type filename: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ parent_commit = head_commit(repo, branch)
+ parent_tree = parent_commit.get_tree()
+ builder = repo.create_tree_builder_from_tree(parent_tree)
+ builder.remove(filename)
+ (tree, sig, ref) = prepare_commit(repo, branch, builder)
+ message = "Recipe %s deleted" % filename
+ return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit])
+
+[docs]def revert_recipe(repo, branch, recipe_name, commit):
+ """Revert the contents of a recipe to that of a previous commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: Recipe name to revert
+ :type recipe_name: str
+ :param commit: Commit hash
+ :type commit: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ return revert_file(repo, branch, recipe_filename(recipe_name), commit)
+
+[docs]def revert_file(repo, branch, filename, commit):
+ """Revert the contents of a file to that of a previous commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: filename to revert
+ :type filename: str
+ :param commit: Commit hash
+ :type commit: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ commit_id = Git.OId.new_from_string(commit)
+ commit_obj = repo.lookup(commit_id, Git.Commit)
+ revert_tree = commit_obj.get_tree()
+ entry = revert_tree.get_by_name(filename)
+ blob_id = entry.get_id()
+ parent_commit = head_commit(repo, branch)
+
+ # Use treebuilder to modify the tree
+ parent_tree = parent_commit.get_tree()
+ builder = repo.create_tree_builder_from_tree(parent_tree)
+ builder.insert(filename, blob_id, Git.FileMode.BLOB)
+ (tree, sig, ref) = prepare_commit(repo, branch, builder)
+ commit_hash = commit_id.to_string()
+ message = "Recipe %s reverted to commit %s" % (filename, commit_hash)
+ return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit])
+
+[docs]def commit_recipe(repo, branch, recipe):
+ """Commit a recipe to a branch
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe: Recipe to commit
+ :type recipe: Recipe
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ try:
+ old_recipe = read_recipe_commit(repo, branch, recipe["name"])
+ old_version = old_recipe["version"]
+ except Exception:
+ old_version = None
+
+ recipe.bump_version(old_version)
+ recipe_toml = recipe.toml()
+ message = "Recipe %s, version %s saved." % (recipe["name"], recipe["version"])
+ return write_commit(repo, branch, recipe.filename, message, recipe_toml)
+
+[docs]def commit_recipe_file(repo, branch, filename):
+ """Commit a recipe file to a branch
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: Path to the recipe file to commit
+ :type filename: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit or RecipeFileError
+ """
+ try:
+ recipe = recipe_from_file(filename)
+ except IOError:
+ raise RecipeFileError
+
+ return commit_recipe(repo, branch, recipe)
+
+[docs]def commit_recipe_directory(repo, branch, directory):
+ """Commit all *.toml files from a directory, if they aren't already in git.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param directory: The directory of *.toml recipes to commit
+ :type directory: str
+ :returns: None
+ :raises: Can raise errors from Ggit or RecipeFileError
+
+ Files with Toml or RecipeFileErrors will be skipped, and the remainder will
+ be tried.
+ """
+ dir_files = set([e for e in os.listdir(directory) if e.endswith(".toml")])
+ branch_files = set(list_branch_files(repo, branch))
+ new_files = dir_files.difference(branch_files)
+
+ for f in new_files:
+ # Skip files with errors, but try the others
+ try:
+ commit_recipe_file(repo, branch, joinpaths(directory, f))
+ except (RecipeFileError, toml.TomlError):
+ pass
+
+[docs]def tag_recipe_commit(repo, branch, recipe_name):
+ """Tag a file's most recent commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: Recipe name to tag
+ :type recipe_name: str
+ :returns: Tag id or None if it failed.
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+
+ Uses tag_file_commit()
+ """
+ return tag_file_commit(repo, branch, recipe_filename(recipe_name))
+
+[docs]def tag_file_commit(repo, branch, filename):
+ """Tag a file's most recent commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: Filename to tag
+ :type filename: str
+ :returns: Tag id or None if it failed.
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+
+ This uses git tags, of the form `refs/tags/<branch>/<filename>/r<revision>`
+ Only the most recent recipe commit can be tagged to prevent out of order tagging.
+ Revisions start at 1 and increment for each new commit that is tagged.
+ If the commit has already been tagged it will return false.
+ """
+ file_commits = list_commits(repo, branch, filename)
+ if not file_commits:
+ return None
+
+ # Find the most recently tagged version (may not be one) and add 1 to it.
+ for details in file_commits:
+ if details.revision is not None:
+ new_revision = details.revision + 1
+ break
+ else:
+ new_revision = 1
+
+ name = "%s/%s/r%d" % (branch, filename, new_revision)
+ sig = Git.Signature.new_now("bdcs-api-server", "user-email")
+ commit_id = Git.OId.new_from_string(file_commits[0].commit)
+ commit = repo.lookup(commit_id, Git.Commit)
+ return repo.create_tag(name, commit, sig, name, Git.CreateFlags.NONE)
+
+[docs]def find_commit_tag(repo, branch, filename, commit_id):
+ """Find the tag that matches the commit_id
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: filename to revert
+ :type filename: str
+ :param commit_id: The commit id to check
+ :type commit_id: Git.OId
+ :returns: The tag or None if there isn't one
+ :rtype: str or None
+
+ There should be only 1 tag pointing to a commit, but there may not
+ be a tag at all.
+
+ The tag will look like: 'refs/tags/<branch>/<filename>/r<revision>'
+ """
+ pattern = "%s/%s/r*" % (branch, filename)
+ tags = [t for t in repo.list_tags_match(pattern) if is_commit_tag(repo, commit_id, t)]
+ if len(tags) != 1:
+ return None
+ else:
+ return tags[0]
+
+[docs]def is_commit_tag(repo, commit_id, tag):
+ """Check to see if a tag points to a specific commit.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param commit_id: The commit id to check
+ :type commit_id: Git.OId
+ :param tag: The tag to check
+ :type tag: str
+ :returns: True if the tag points to the commit, False otherwise
+ :rtype: bool
+ """
+ ref = repo.lookup_reference("refs/tags/" + tag)
+ tag_id = ref.get_target()
+ tag = repo.lookup(tag_id, Git.Tag)
+ target_id = tag.get_target_id()
+ return commit_id.compare(target_id) == 0
+
+[docs]def get_revision_from_tag(tag):
+ """Return the revision number from a tag
+
+ :param tag: The tag to exract the revision from
+ :type tag: str
+ :returns: The integer revision or None
+ :rtype: int or None
+
+ The revision is the part after the r in 'branch/filename/rXXX'
+ """
+ if tag is None:
+ return None
+ try:
+ return int(tag.rsplit('r',2)[-1])
+ except (ValueError, IndexError):
+ return None
+
+[docs]class CommitDetails(DataHolder):
+ def __init__(self, commit, timestamp, message, revision=None):
+ DataHolder.__init__(self,
+ commit = commit,
+ timestamp = timestamp,
+ message = message,
+ revision = revision)
+
+[docs]def list_commits(repo, branch, filename, limit=0):
+ """List the commit history of a file on a branch.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: filename to revert
+ :type filename: str
+ :param limit: Number of commits to return (0=all)
+ :type limit: int
+ :returns: A list of commit details
+ :rtype: list(CommitDetails)
+ :raises: Can raise errors from Ggit
+ """
+ revwalk = Git.RevisionWalker.new(repo)
+ revwalk.set_sort_mode(Git.SortMode.TIME)
+ branch_ref = "refs/heads/%s" % branch
+ revwalk.push_ref(branch_ref)
+
+ commits = []
+ while True:
+ commit_id = revwalk.next()
+ if not commit_id:
+ break
+ commit = repo.lookup(commit_id, Git.Commit)
+
+ parents = commit.get_parents()
+ # No parents? Must be the first commit.
+ if parents.get_size() == 0:
+ continue
+
+ tree = commit.get_tree()
+ # Is the filename in this tree? If not, move on.
+ if not tree.get_by_name(filename):
+ continue
+
+ # Is filename different in all of the parent commits?
+ parent_commits = map(parents.get, xrange(0, parents.get_size()))
+ is_diff = all(map(lambda pc: is_parent_diff(repo, filename, tree, pc), parent_commits))
+ # No changes from parents, skip it.
+ if not is_diff:
+ continue
+
+ tag = find_commit_tag(repo, branch, filename, commit.get_id())
+ try:
+ commits.append(get_commit_details(commit, get_revision_from_tag(tag)))
+ if limit and len(commits) > limit:
+ break
+ except CommitTimeValError:
+ # Skip any commits that have trouble converting the time
+ # TODO - log details about this failure
+ pass
+
+ # These will be in reverse time sort order thanks to revwalk
+ return commits
+
+[docs]def get_commit_details(commit, revision=None):
+ """Return the details about a specific commit.
+
+ :param commit: The commit to get details from
+ :type commit: Git.Commit
+ :param revision: Optional commit revision
+ :type revision: int
+ :returns: Details about the commit
+ :rtype: CommitDetails
+ :raises: CommitTimeValError or Ggit exceptions
+
+ """
+ message = commit.get_message()
+ commit_str = commit.get_id().to_string()
+ sig = commit.get_committer()
+
+ datetime = sig.get_time()
+ # XXX What do we do with timezone?
+ _timezone = sig.get_time_zone()
+ timeval = GLib.TimeVal()
+ ok = datetime.to_timeval(timeval)
+ if not ok:
+ raise CommitTimeValError
+ time_str = timeval.to_iso8601()
+
+ return CommitDetails(commit_str, time_str, message, revision)
+
+[docs]def is_parent_diff(repo, filename, tree, parent):
+ """Check to see if the commit is different from its parents
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param filename: filename to revert
+ :type filename: str
+ :param tree: The commit's tree
+ :type tree: Git.Tree
+ :param parent: The commit's parent commit
+ :type parent: Git.Commit
+ :retuns: True if filename in the commit is different from its parents
+ :rtype: bool
+ """
+ diff_opts = Git.DiffOptions.new()
+ diff_opts.set_pathspec([filename])
+ diff = Git.Diff.new_tree_to_tree(repo, parent.get_tree(), tree, diff_opts)
+ return diff.get_num_deltas() > 0
+
+[docs]def find_name(name, lst):
+ """Find the dict matching the name in a list and return it.
+
+ :param name: Name to search for
+ :type name: str
+ :param lst: List of dict's with "name" field
+ :returns: First dict with matching name, or None
+ :rtype: dict or None
+ """
+ for e in lst:
+ if e["name"] == name:
+ return e
+ return None
+
+[docs]def diff_items(title, old_items, new_items):
+ """Return the differences between two lists of dicts.
+
+ :param title: Title of the entry
+ :type title: str
+ :param old_items: List of item dicts with "name" field
+ :type old_items: list(dict)
+ :param new_items: List of item dicts with "name" field
+ :type new_items: list(dict)
+ :returns: List of diff dicts with old/new entries
+ :rtype: list(dict)
+ """
+ diffs = []
+ old_names = set(m["name"] for m in old_items)
+ new_names = set(m["name"] for m in new_items)
+
+ added_items = new_names.difference(old_names)
+ added_items = sorted(added_items, key=lambda n: n.lower())
+
+ removed_items = old_names.difference(new_names)
+ removed_items = sorted(removed_items, key=lambda n: n.lower())
+
+ same_items = old_names.intersection(new_names)
+ same_items = sorted(same_items, key=lambda n: n.lower())
+
+ for name in added_items:
+ diffs.append({"old":None,
+ "new":{title:find_name(name, new_items)}})
+
+ for name in removed_items:
+ diffs.append({"old":{title:find_name(name, old_items)},
+ "new":None})
+
+ for name in same_items:
+ old_item = find_name(name, old_items)
+ new_item = find_name(name, new_items)
+ if old_item != new_item:
+ diffs.append({"old":{title:old_item},
+ "new":{title:new_item}})
+
+ return diffs
+
+
+[docs]def recipe_diff(old_recipe, new_recipe):
+ """Diff two versions of a recipe
+
+ :param old_recipe: The old version of the recipe
+ :type old_recipe: Recipe
+ :param new_recipe: The new version of the recipe
+ :type new_recipe: Recipe
+ :returns: A list of diff dict entries with old/new
+ :rtype: list(dict)
+ """
+
+ diffs = []
+ # These cannot be added or removed, just different
+ for element in ["name", "description", "version"]:
+ if old_recipe[element] != new_recipe[element]:
+ diffs.append({"old":{element.title():old_recipe[element]},
+ "new":{element.title():new_recipe[element]}})
+
+ diffs.extend(diff_items("Module", old_recipe["modules"], new_recipe["modules"]))
+ diffs.extend(diff_items("Package", old_recipe["packages"], new_recipe["packages"]))
+
+ return diffs
+
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("lorax-composer")
+
+from collections import namedtuple
+from flask import Flask, send_from_directory
+from glob import glob
+import os
+
+from pylorax.api.crossdomain import crossdomain
+from pylorax.api.v0 import v0_api
+from pylorax.sysutils import joinpaths
+
+GitLock = namedtuple("GitLock", ["repo", "lock", "dir"])
+YumLock = namedtuple("YumLock", ["yb", "lock"])
+
+server = Flask(__name__)
+
+__all__ = ["server", "GitLock"]
+
+@server.route('/')
+@crossdomain(origin="*")
+def hello_world():
+ return 'Hello, World!'
+
+@server.route("/api/docs/")
+@server.route("/api/docs/<path:path>")
+def api_docs(path=None):
+ # Find the html docs
+ try:
+ # This assumes it is running from the source tree
+ docs_path = os.path.abspath(joinpaths(os.path.dirname(__file__), "../../../docs/html"))
+ except IndexError:
+ docs_path = glob("/usr/share/doc/lorax-*/html/")[0]
+
+ if not path:
+ path="index.html"
+ return send_from_directory(docs_path, path)
+
+v0_api(server)
+
+#
+# Copyright (C) 2017-2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+""" Setup v0 of the API server
+
+v0_api() must be called to setup the API routes for Flask
+
+Status Responses
+----------------
+
+Some requests only return a status/error response.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+ Example response::
+
+ {
+ "status": true
+ }
+
+ Error response::
+
+ {
+ "error": {
+ "msg": "ggit-error: Failed to remove entry. File isn't in the tree - jboss.toml (-1)"
+ },
+ "status": false
+ }
+
+API Routes
+----------
+
+All of the recipes routes support the optional `branch` argument. If it is not
+used then the API will use the `master` branch for recipes. If you want to create
+a new branch use the `new` or `workspace` routes with ?branch=<branch-name> to
+store the new recipe on the new branch.
+
+`/api/v0/test`
+^^^^^^^^^^^^^^
+
+ Return a test string. It is not JSON encoded.
+
+`/api/v0/status`
+^^^^^^^^^^^^^^^^
+ Return the status of the API Server::
+
+ { "api": "0",
+ "build": "devel",
+ "db_supported": false,
+ "db_version": "0",
+ "schema_version": "0" }
+
+`/api/v0/recipes/list`
+^^^^^^^^^^^^^^^^^^^^^^
+
+ List the available recipes::
+
+ { "limit": 20,
+ "offset": 0,
+ "recipes": [
+ "atlas",
+ "development",
+ "glusterfs",
+ "http-server",
+ "jboss",
+ "kubernetes" ],
+ "total": 6 }
+
+`/api/v0/recipes/info/<recipe_names>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the JSON representation of the recipe. This includes 3 top level
+ objects. `changes` which lists whether or not the workspace is different from
+ the most recent commit. `recipes` which lists the JSON representation of the
+ recipe, and `errors` which will list any errors, like non-existant recipes.
+
+ Example::
+
+ {
+ "changes": [
+ {
+ "changed": false,
+ "name": "glusterfs"
+ }
+ ],
+ "errors": [],
+ "recipes": [
+ {
+ "description": "An example GlusterFS server with samba",
+ "modules": [
+ {
+ "name": "glusterfs",
+ "version": "3.7.*"
+ },
+ {
+ "name": "glusterfs-cli",
+ "version": "3.7.*"
+ }
+ ],
+ "name": "glusterfs",
+ "packages": [
+ {
+ "name": "2ping",
+ "version": "3.2.1"
+ },
+ {
+ "name": "samba",
+ "version": "4.2.*"
+ }
+ ],
+ "version": "0.0.6"
+ }
+ ]
+ }
+
+ Error example::
+
+ {
+ "changes": [],
+ "errors": [
+ {
+ "msg": "ggit-error: the path 'missing.toml' does not exist in the given tree (-3)",
+ "recipe": "missing"
+ }
+ ],
+ "recipes": []
+ }
+
+`/api/v0/recipes/changes/<recipe_names>[?offset=0&limit=20]`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the commits to a recipe. By default it returns the first 20 commits, this
+ can be changed by passing `offset` and/or `limit`. The response will include the
+ commit hash, summary, timestamp, and optionally the revision number. The commit
+ hash can be passed to `/api/v0/recipes/diff/` to retrieve the exact changes.
+
+ Example::
+
+ {
+ "errors": [],
+ "limit": 20,
+ "offset": 0,
+ "recipes": [
+ {
+ "changes": [
+ {
+ "commit": "e083921a7ed1cf2eec91ad12b9ad1e70ef3470be",
+ "message": "Recipe glusterfs, version 0.0.6 saved.",
+ "revision": null,
+ "timestamp": "2017-11-23T00:18:13Z"
+ },
+ {
+ "commit": "cee5f4c20fc33ea4d54bfecf56f4ad41ad15f4f3",
+ "message": "Recipe glusterfs, version 0.0.5 saved.",
+ "revision": null,
+ "timestamp": "2017-11-11T01:00:28Z"
+ },
+ {
+ "commit": "29b492f26ed35d80800b536623bafc51e2f0eff2",
+ "message": "Recipe glusterfs, version 0.0.4 saved.",
+ "revision": null,
+ "timestamp": "2017-11-11T00:28:30Z"
+ },
+ {
+ "commit": "03374adbf080fe34f5c6c29f2e49cc2b86958bf2",
+ "message": "Recipe glusterfs, version 0.0.3 saved.",
+ "revision": null,
+ "timestamp": "2017-11-10T23:15:52Z"
+ },
+ {
+ "commit": "0e08ecbb708675bfabc82952599a1712a843779d",
+ "message": "Recipe glusterfs, version 0.0.2 saved.",
+ "revision": null,
+ "timestamp": "2017-11-10T23:14:56Z"
+ },
+ {
+ "commit": "3e11eb87a63d289662cba4b1804a0947a6843379",
+ "message": "Recipe glusterfs, version 0.0.1 saved.",
+ "revision": null,
+ "timestamp": "2017-11-08T00:02:47Z"
+ }
+ ],
+ "name": "glusterfs",
+ "total": 6
+ }
+ ]
+ }
+
+POST `/api/v0/recipes/new`
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Create a new recipe, or update an existing recipe. This supports both JSON and TOML
+ for the recipe format. The recipe should be in the body of the request with the
+ `Content-Type` header set to either `application/json` or `text/x-toml`.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+DELETE `/api/v0/recipes/delete/<recipe_name>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Delete a recipe. The recipe is deleted from the branch, and will no longer
+ be listed by the `list` route. A recipe can be undeleted using the `undo` route
+ to revert to a previous commit.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+POST `/api/v0/recipes/workspace`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Write a recipe to the temporary workspace. This works exactly the same as `new` except
+ that it does not create a commit. JSON and TOML bodies are supported.
+
+ The workspace is meant to be used as a temporary recipe storage for clients.
+ It will be read by the `info` and `diff` routes if it is different from the
+ most recent commit.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+DELETE `/api/v0/recipes/workspace/<recipe_name>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Remove the temporary workspace copy of a recipe. The `info` route will now
+ return the most recent commit of the recipe. Any changes that were in the
+ workspace will be lost.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+POST `/api/v0/recipes/undo/<recipe_name>/<commit>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ This will revert the recipe to a previous commit. The commit hash from the `changes`
+ route can be used in this request.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+POST `/api/v0/recipes/tag/<recipe_name>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Tag a recipe as a new release. This uses git tags with a special format.
+ `refs/tags/<branch>/<filename>/r<revision>`. Only the most recent recipe commit
+ can be tagged. Revisions start at 1 and increment for each new tag
+ (per-recipe). If the commit has already been tagged it will return false.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+`/api/v0/recipes/diff/<recipe_name>/<from_commit>/<to_commit>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the differences between two commits, or the workspace. The commit hash
+ from the `changes` response can be used here, or several special strings:
+
+ - NEWEST will select the newest git commit. This works for `from_commit` or `to_commit`
+ - WORKSPACE will select the workspace copy. This can only be used in `to_commit`
+
+ eg. `/api/v0/recipes/diff/glusterfs/NEWEST/WORKSPACE` will return the differences
+ between the most recent git commit and the contents of the workspace.
+
+ Each entry in the response's diff object contains the old recipe value and the new one.
+ If old is null and new is set, then it was added.
+ If new is null and old is set, then it was removed.
+ If both are set, then it was changed.
+
+ The old/new entries will have the name of the recipe field that was changed. This
+ can be one of: Name, Description, Version, Module, or Package.
+ The contents for these will be the old/new values for them.
+
+ In the example below the version was changed and the ping package was added.
+
+ Example::
+
+ {
+ "diff": [
+ {
+ "new": {
+ "Version": "0.0.6"
+ },
+ "old": {
+ "Version": "0.0.5"
+ }
+ },
+ {
+ "new": {
+ "Package": {
+ "name": "ping",
+ "version": "3.2.1"
+ }
+ },
+ "old": null
+ }
+ ]
+ }
+
+`/api/v0/recipes/freeze/<recipe_names>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return a JSON representation of the recipe with the package and module versions set
+ to the exact versions chosen by depsolving the recipe.
+
+ Example::
+
+ {
+ "errors": [],
+ "recipes": [
+ {
+ "recipe": {
+ "description": "An example GlusterFS server with samba",
+ "modules": [
+ {
+ "name": "glusterfs",
+ "version": "3.8.4-18.4.el7.x86_64"
+ },
+ {
+ "name": "glusterfs-cli",
+ "version": "3.8.4-18.4.el7.x86_64"
+ }
+ ],
+ "name": "glusterfs",
+ "packages": [
+ {
+ "name": "ping",
+ "version": "2:3.2.1-2.el7.noarch"
+ },
+ {
+ "name": "samba",
+ "version": "4.6.2-8.el7.x86_64"
+ }
+ ],
+ "version": "0.0.6"
+ }
+ }
+ ]
+ }
+
+`/api/v0/recipes/depsolve/<recipe_names>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Depsolve the recipe using yum, return the recipe used, and the NEVRAs of the packages
+ chosen to satisfy the recipe's requirements. The response will include a list of results,
+ with the full dependency list in `dependencies`, the NEVRAs for the recipe's direct modules
+ and packages in `modules`, and any error will be in `errors`.
+
+ Example::
+
+ {
+ "errors": [],
+ "recipes": [
+ {
+ "dependencies": [
+ {
+ "arch": "noarch",
+ "epoch": "0",
+ "name": "2ping",
+ "release": "2.el7",
+ "version": "3.2.1"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "acl",
+ "release": "12.el7",
+ "version": "2.2.51"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "audit-libs",
+ "release": "3.el7",
+ "version": "2.7.6"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "avahi-libs",
+ "release": "17.el7",
+ "version": "0.6.31"
+ },
+ ...
+ ],
+ "modules": [
+ {
+ "arch": "noarch",
+ "epoch": "0",
+ "name": "2ping",
+ "release": "2.el7",
+ "version": "3.2.1"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "glusterfs",
+ "release": "18.4.el7",
+ "version": "3.8.4"
+ },
+ ...
+ ],
+ "recipe": {
+ "description": "An example GlusterFS server with samba",
+ "modules": [
+ {
+ "name": "glusterfs",
+ "version": "3.7.*"
+ },
+ ...
+ }
+ }
+ ]
+ }
+
+`/api/v0/projects/list[?offset=0&limit=20]`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ List all of the available projects. By default this returns the first 20 items,
+ but this can be changed by setting the `offset` and `limit` arguments.
+
+ Example::
+
+ {
+ "limit": 20,
+ "offset": 0,
+ "projects": [
+ {
+ "description": "0 A.D. (pronounced \"zero ey-dee\") is a ...",
+ "homepage": "http://play0ad.com",
+ "name": "0ad",
+ "summary": "Cross-Platform RTS Game of Ancient Warfare",
+ "upstream_vcs": "UPSTREAM_VCS"
+ },
+ ...
+ ],
+ "total": 21770
+ }
+
+`/api/v0/projects/info/<project_names>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return information about the comma-separated list of projects. It includes the description
+ of the package along with the list of available builds.
+
+ Example::
+
+ {
+ "projects": [
+ {
+ "builds": [
+ {
+ "arch": "x86_64",
+ "build_config_ref": "BUILD_CONFIG_REF",
+ "build_env_ref": "BUILD_ENV_REF",
+ "build_time": "2017-03-01T08:39:23",
+ "changelog": "- restore incremental backups correctly, files ...",
+ "epoch": "2",
+ "metadata": {},
+ "release": "32.el7",
+ "source": {
+ "license": "GPLv3+",
+ "metadata": {},
+ "source_ref": "SOURCE_REF",
+ "version": "1.26"
+ }
+ }
+ ],
+ "description": "The GNU tar program saves many ...",
+ "homepage": "http://www.gnu.org/software/tar/",
+ "name": "tar",
+ "summary": "A GNU file archiving program",
+ "upstream_vcs": "UPSTREAM_VCS"
+ }
+ ]
+ }
+
+`/api/v0/projects/depsolve/<project_names>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Depsolve the comma-separated list of projects and return the list of NEVRAs needed
+ to satisfy the request.
+
+ Example::
+
+ {
+ "projects": [
+ {
+ "arch": "noarch",
+ "epoch": "0",
+ "name": "basesystem",
+ "release": "7.el7",
+ "version": "10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "bash",
+ "release": "28.el7",
+ "version": "4.2.46"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "filesystem",
+ "release": "21.el7",
+ "version": "3.2"
+ },
+ ...
+ ]
+ }
+
+`/api/v0/modules/list[?offset=0&limit=20]`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return a list of all of the available modules. This includes the name and the
+ group_type, which is always "rpm" for lorax-composer. By default this returns
+ the first 20 items. This can be changed by setting the `offset` and `limit`
+ arguments.
+
+ Example::
+
+ {
+ "limit": 20,
+ "modules": [
+ {
+ "group_type": "rpm",
+ "name": "0ad"
+ },
+ {
+ "group_type": "rpm",
+ "name": "0ad-data"
+ },
+ {
+ "group_type": "rpm",
+ "name": "0install"
+ },
+ {
+ "group_type": "rpm",
+ "name": "2048-cli"
+ },
+ ...
+ ]
+ "total": 21770
+ }
+
+`/api/v0/modules/list/<module_names>[?offset=0&limit=20]`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the list of comma-separated modules. Output is the same as `/modules/list`
+
+ Example::
+
+ {
+ "limit": 20,
+ "modules": [
+ {
+ "group_type": "rpm",
+ "name": "tar"
+ }
+ ],
+ "offset": 0,
+ "total": 1
+ }
+
+`/api/v0/modules/info/<module_names>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the module's dependencies, and the information about the module.
+
+ Example::
+
+ {
+ "modules": [
+ {
+ "dependencies": [
+ {
+ "arch": "noarch",
+ "epoch": "0",
+ "name": "basesystem",
+ "release": "7.el7",
+ "version": "10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "bash",
+ "release": "28.el7",
+ "version": "4.2.46"
+ },
+ ...
+ ],
+ "description": "The GNU tar program saves ...",
+ "homepage": "http://www.gnu.org/software/tar/",
+ "name": "tar",
+ "summary": "A GNU file archiving program",
+ "upstream_vcs": "UPSTREAM_VCS"
+ }
+ ]
+ }
+
+POST `/api/v0/compose`
+^^^^^^^^^^^^^^^^^^^^^^
+
+ Start a compose. The content type should be 'application/json' and the body of the POST
+ should look like this::
+
+ {
+ "recipe_name": "http-server",
+ "compose_type": "tar",
+ "branch": "master"
+ }
+
+ Pass it the name of the recipe, the type of output (from '/api/v0/compose/types'), and the
+ recipe branch to use. 'branch' is optional and will default to master. It will create a new
+ build and add it to the queue. It returns the build uuid and a status if it succeeds::
+
+ {
+ "build_id": "e6fa6db4-9c81-4b70-870f-a697ca405cdf",
+ "status": true
+ }
+
+`/api/v0/compose/types`
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns the list of supported output types that are valid for use with 'POST /api/v0/compose'
+
+ {
+ "types": [
+ {
+ "enabled": true,
+ "name": "tar"
+ }
+ ]
+ }
+
+`/api/v0/compose/queue`
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the status of the build queue. It includes information about the builds waiting,
+ and the build that is running.
+
+ Example::
+
+ {
+ "new": [
+ {
+ "id": "45502a6d-06e8-48a5-a215-2b4174b3614b",
+ "recipe": "glusterfs",
+ "queue_status": "WAITING",
+ "timestamp": 1517362647.4570868,
+ "version": "0.0.6"
+ },
+ {
+ "id": "6d292bd0-bec7-4825-8d7d-41ef9c3e4b73",
+ "recipe": "kubernetes",
+ "queue_status": "WAITING",
+ "timestamp": 1517362659.0034983,
+ "version": "0.0.1"
+ }
+ ],
+ "run": [
+ {
+ "id": "745712b2-96db-44c0-8014-fe925c35e795",
+ "recipe": "glusterfs",
+ "queue_status": "RUNNING",
+ "timestamp": 1517362633.7965999,
+ "version": "0.0.6"
+ }
+ ]
+ }
+
+`/api/v0/compose/finished`
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the details on all of the finished composes on the system.
+
+ Example::
+
+ {
+ "finished": [
+ {
+ "id": "70b84195-9817-4b8a-af92-45e380f39894",
+ "recipe": "glusterfs",
+ "queue_status": "FINISHED",
+ "timestamp": 1517351003.8210032,
+ "version": "0.0.6"
+ },
+ {
+ "id": "e695affd-397f-4af9-9022-add2636e7459",
+ "recipe": "glusterfs",
+ "queue_status": "FINISHED",
+ "timestamp": 1517362289.7193348,
+ "version": "0.0.6"
+ }
+ ]
+ }
+
+`/api/v0/compose/failed`
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the details on all of the failed composes on the system.
+
+ Example::
+
+ {
+ "failed": [
+ {
+ "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a",
+ "recipe": "http-server",
+ "queue_status": "FAILED",
+ "timestamp": 1517523249.9301329,
+ "version": "0.0.2"
+ }
+ ]
+ }
+
+`/api/v0/compose/status/<uuids>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the details for each of the comma-separated list of uuids.
+
+ Example::
+
+ {
+ "uuids": [
+ {
+ "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a",
+ "recipe": "http-server",
+ "queue_status": "FINISHED",
+ "timestamp": 1517523644.2384307,
+ "version": "0.0.2"
+ },
+ {
+ "id": "45502a6d-06e8-48a5-a215-2b4174b3614b",
+ "recipe": "glusterfs",
+ "queue_status": "FINISHED",
+ "timestamp": 1517363442.188399,
+ "version": "0.0.6"
+ }
+ ]
+ }
+
+DELETE `/api/v0/recipes/cancel/<uuid>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Cancel the build, if it is not finished, and delete the results. It will return a
+ status of True if it is successful.
+
+ Example::
+
+ {
+ "status": true,
+ "uuid": "03397f8d-acff-4cdb-bd31-f629b7a948f5"
+ }
+
+DELETE `/api/v0/compose/delete/<uuids>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Delete the list of comma-separated uuids from the compose results.
+
+ Example::
+
+ {
+ "errors": [],
+ "uuids": [
+ {
+ "status": true,
+ "uuid": "ae1bf7e3-7f16-4c9f-b36e-3726a1093fd0"
+ }
+ ]
+ }
+
+`/api/v0/compose/info/<uuid>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Get detailed information about the compose. The returned JSON string will
+ contain the following information:
+
+ * id - The uuid of the comoposition
+ * config - containing the configuration settings used to run Anaconda
+ * recipe - The depsolved recipe used to generate the kickstart
+ * commit - The (local) git commit hash for the recipe used
+ * deps - The NEVRA of all of the dependencies used in the composition
+ * compose_type - The type of output generated (tar, iso, etc.)
+ * queue_status - The final status of the composition (FINISHED or FAILED)
+
+ Example::
+
+ {
+ "commit": "7078e521a54b12eae31c3fd028680da7a0815a4d",
+ "compose_type": "tar",
+ "config": {
+ "anaconda_args": "",
+ "armplatform": "",
+ "compress_args": [],
+ "compression": "xz",
+ "image_name": "root.tar.xz",
+ ...
+ },
+ "deps": {
+ "packages": [
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "acl",
+ "release": "14.el7",
+ "version": "2.2.51"
+ }
+ ]
+ },
+ "id": "c30b7d80-523b-4a23-ad52-61b799739ce8",
+ "queue_status": "FINISHED",
+ "recipe": {
+ "description": "An example kubernetes master",
+ ...
+ }
+ }
+
+`/api/v0/compose/metadata/<uuid>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns a .tar of the metadata used for the build. This includes all the
+ information needed to reproduce the build, including the final kickstart
+ populated with repository and package NEVRA.
+
+ The mime type is set to 'application/x-tar' and the filename is set to
+ UUID-metadata.tar
+
+ The .tar is uncompressed, but is not large.
+
+`/api/v0/compose/results/<uuid>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns a .tar of the metadata, logs, and output image of the build. This
+ includes all the information needed to reproduce the build, including the
+ final kickstart populated with repository and package NEVRA. The output image
+ is already in compressed form so the returned tar is not compressed.
+
+ The mime type is set to 'application/x-tar' and the filename is set to
+ UUID.tar
+
+`/api/v0/compose/logs/<uuid>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns a .tar of the anaconda build logs. The tar is not compressed, but is
+ not large.
+
+ The mime type is set to 'application/x-tar' and the filename is set to
+ UUID-logs.tar
+
+`/api/v0/compose/image/<uuid>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns the output image from the build. The filename is set to the filename
+ from the build. eg. root.tar.xz or boot.iso.
+
+`/api/v0/compose/log/<uuid>[?size=kbytes]`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns the end of the anaconda.log. The size parameter is optional and defaults to 1Mbytes
+ if it is not included. The returned data is raw text from the end of the logfile, starting on
+ a line boundry.
+
+ Example::
+
+ 12:59:24,222 INFO anaconda: Running Thread: AnaConfigurationThread (140629395244800)
+ 12:59:24,223 INFO anaconda: Configuring installed system
+ 12:59:24,912 INFO anaconda: Configuring installed system
+ 12:59:24,912 INFO anaconda: Creating users
+ 12:59:24,913 INFO anaconda: Clearing libuser.conf at /tmp/libuser.Dyy8Gj
+ 12:59:25,154 INFO anaconda: Creating users
+ 12:59:25,155 INFO anaconda: Configuring addons
+ 12:59:25,155 INFO anaconda: Configuring addons
+ 12:59:25,155 INFO anaconda: Generating initramfs
+ 12:59:49,467 INFO anaconda: Generating initramfs
+ 12:59:49,467 INFO anaconda: Running post-installation scripts
+ 12:59:49,467 INFO anaconda: Running kickstart %%post script(s)
+ 12:59:50,782 INFO anaconda: All kickstart %%post script(s) have been run
+ 12:59:50,782 INFO anaconda: Running post-installation scripts
+ 12:59:50,784 INFO anaconda: Thread Done: AnaConfigurationThread (140629395244800)
+
+"""
+
+import logging
+log = logging.getLogger("lorax-composer")
+
+from flask import jsonify, request, Response, send_file
+
+from pylorax.api.compose import start_build, compose_types
+from pylorax.api.crossdomain import crossdomain
+from pylorax.api.projects import projects_list, projects_info, projects_depsolve
+from pylorax.api.projects import modules_list, modules_info, ProjectsError
+from pylorax.api.queue import queue_status, build_status, uuid_delete, uuid_status, uuid_info
+from pylorax.api.queue import uuid_tar, uuid_image, uuid_cancel, uuid_log
+from pylorax.api.recipes import list_branch_files, read_recipe_commit, recipe_filename, list_commits
+from pylorax.api.recipes import recipe_from_dict, recipe_from_toml, commit_recipe, delete_recipe, revert_recipe
+from pylorax.api.recipes import tag_recipe_commit, recipe_diff
+from pylorax.api.workspace import workspace_read, workspace_write, workspace_delete
+
+# The API functions don't actually get called by any code here
+# pylint: disable=unused-variable
+
+[docs]def take_limits(iterable, offset, limit):
+ """ Apply offset and limit to an iterable object
+
+ :param iterable: The object to limit
+ :type iterable: iter
+ :param offset: The number of items to skip
+ :type offset: int
+ :param limit: The total number of items to return
+ :type limit: int
+ :returns: A subset of the iterable
+ """
+ return iterable[offset:][:limit]
+
+[docs]def v0_api(api):
+ # Note that Sphinx will not generate documentations for any of these.
+ @api.route("/api/v0/test")
+ @crossdomain(origin="*")
+ def v0_test():
+ return "API v0 test"
+
+ @api.route("/api/v0/status")
+ @crossdomain(origin="*")
+ def v0_status():
+ return jsonify(build="devel", api="0", db_version="0", schema_version="0", db_supported=False)
+
+ @api.route("/api/v0/recipes/list")
+ @crossdomain(origin="*")
+ def v0_recipes_list():
+ """List the available recipes on a branch."""
+ branch = request.args.get("branch", "master")
+ try:
+ limit = int(request.args.get("limit", "20"))
+ offset = int(request.args.get("offset", "0"))
+ except ValueError as e:
+ return jsonify(error={"msg":str(e)}), 400
+
+ with api.config["GITLOCK"].lock:
+ recipes = take_limits(map(lambda f: f[:-5], list_branch_files(api.config["GITLOCK"].repo, branch)), offset, limit)
+ return jsonify(recipes=recipes, limit=limit, offset=offset, total=len(recipes))
+
+ @api.route("/api/v0/recipes/info/<recipe_names>")
+ @crossdomain(origin="*")
+ def v0_recipes_info(recipe_names):
+ """Return the contents of the recipe, or a list of recipes"""
+ branch = request.args.get("branch", "master")
+ recipes = []
+ changes = []
+ errors = []
+ for recipe_name in [n.strip() for n in recipe_names.split(",")]:
+ exceptions = []
+ # Get the workspace version (if it exists)
+ try:
+ with api.config["GITLOCK"].lock:
+ ws_recipe = workspace_read(api.config["GITLOCK"].repo, branch, recipe_name)
+ except Exception as e:
+ ws_recipe = None
+ exceptions.append(str(e))
+ log.error("(v0_recipes_info) %s", str(e))
+
+ # Get the git version (if it exists)
+ try:
+ with api.config["GITLOCK"].lock:
+ git_recipe = read_recipe_commit(api.config["GITLOCK"].repo, branch, recipe_name)
+ except Exception as e:
+ git_recipe = None
+ exceptions.append(str(e))
+ log.error("(v0_recipes_info) %s", str(e))
+
+ if not ws_recipe and not git_recipe:
+ # Neither recipe, return an error
+ errors.append({"recipe":recipe_name, "msg":", ".join(exceptions)})
+ elif ws_recipe and not git_recipe:
+ # No git recipe, return the workspace recipe
+ changes.append({"name":recipe_name, "changed":True})
+ recipes.append(ws_recipe)
+ elif not ws_recipe and git_recipe:
+ # No workspace recipe, no change, return the git recipe
+ changes.append({"name":recipe_name, "changed":False})
+ recipes.append(git_recipe)
+ else:
+ # Both exist, maybe changed, return the workspace recipe
+ changes.append({"name":recipe_name, "changed":ws_recipe != git_recipe})
+ recipes.append(ws_recipe)
+
+ # Sort all the results by case-insensitive recipe name
+ changes = sorted(changes, key=lambda c: c["name"].lower())
+ recipes = sorted(recipes, key=lambda r: r["name"].lower())
+ errors = sorted(errors, key=lambda e: e["recipe"].lower())
+
+ return jsonify(changes=changes, recipes=recipes, errors=errors)
+
+ @api.route("/api/v0/recipes/changes/<recipe_names>")
+ @crossdomain(origin="*")
+ def v0_recipes_changes(recipe_names):
+ """Return the changes to a recipe or list of recipes"""
+ branch = request.args.get("branch", "master")
+ try:
+ limit = int(request.args.get("limit", "20"))
+ offset = int(request.args.get("offset", "0"))
+ except ValueError as e:
+ return jsonify(error={"msg":str(e)}), 400
+
+ recipes = []
+ errors = []
+ for recipe_name in [n.strip() for n in recipe_names.split(",")]:
+ filename = recipe_filename(recipe_name)
+ try:
+ with api.config["GITLOCK"].lock:
+ commits = take_limits(list_commits(api.config["GITLOCK"].repo, branch, filename), offset, limit)
+ except Exception as e:
+ errors.append({"recipe":recipe_name, "msg":e})
+ log.error("(v0_recipes_changes) %s", str(e))
+ else:
+ recipes.append({"name":recipe_name, "changes":commits, "total":len(commits)})
+
+ recipes = sorted(recipes, key=lambda r: r["name"].lower())
+ errors = sorted(errors, key=lambda e: e["recipe"].lower())
+
+ return jsonify(recipes=recipes, errors=errors, offset=offset, limit=limit)
+
+ @api.route("/api/v0/recipes/new", methods=["POST"])
+ @crossdomain(origin="*")
+ def v0_recipes_new():
+ """Commit a new recipe"""
+ branch = request.args.get("branch", "master")
+ try:
+ if request.headers['Content-Type'] == "text/x-toml":
+ recipe = recipe_from_toml(request.data)
+ else:
+ recipe = recipe_from_dict(request.get_json(cache=False))
+
+ with api.config["GITLOCK"].lock:
+ commit_recipe(api.config["GITLOCK"].repo, branch, recipe)
+
+ # Read the recipe with new version and write it to the workspace
+ recipe = read_recipe_commit(api.config["GITLOCK"].repo, branch, recipe["name"])
+ workspace_write(api.config["GITLOCK"].repo, branch, recipe)
+ except Exception as e:
+ log.error("(v0_recipes_new) %s", str(e))
+ return jsonify(status=False, error={"msg":str(e)}), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/recipes/delete/<recipe_name>", methods=["DELETE"])
+ @crossdomain(origin="*")
+ def v0_recipes_delete(recipe_name):
+ """Delete a recipe from git"""
+ branch = request.args.get("branch", "master")
+ try:
+ with api.config["GITLOCK"].lock:
+ delete_recipe(api.config["GITLOCK"].repo, branch, recipe_name)
+ except Exception as e:
+ log.error("(v0_recipes_delete) %s", str(e))
+ return jsonify(status=False, error={"msg":str(e)}), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/recipes/workspace", methods=["POST"])
+ @crossdomain(origin="*")
+ def v0_recipes_workspace():
+ """Write a recipe to the workspace"""
+ branch = request.args.get("branch", "master")
+ try:
+ if request.headers['Content-Type'] == "text/x-toml":
+ recipe = recipe_from_toml(request.data)
+ else:
+ recipe = recipe_from_dict(request.get_json(cache=False))
+
+ with api.config["GITLOCK"].lock:
+ workspace_write(api.config["GITLOCK"].repo, branch, recipe)
+ except Exception as e:
+ log.error("(v0_recipes_workspace) %s", str(e))
+ return jsonify(status=False, error={"msg":str(e)}), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/recipes/workspace/<recipe_name>", methods=["DELETE"])
+ @crossdomain(origin="*")
+ def v0_recipes_delete_workspace(recipe_name):
+ """Delete a recipe from the workspace"""
+ branch = request.args.get("branch", "master")
+ try:
+ with api.config["GITLOCK"].lock:
+ workspace_delete(api.config["GITLOCK"].repo, branch, recipe_name)
+ except Exception as e:
+ log.error("(v0_recipes_delete_workspace) %s", str(e))
+ return jsonify(status=False, error={"msg":str(e)}), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/recipes/undo/<recipe_name>/<commit>", methods=["POST"])
+ @crossdomain(origin="*")
+ def v0_recipes_undo(recipe_name, commit):
+ """Undo changes to a recipe by reverting to a previous commit."""
+ branch = request.args.get("branch", "master")
+ try:
+ with api.config["GITLOCK"].lock:
+ revert_recipe(api.config["GITLOCK"].repo, branch, recipe_name, commit)
+
+ # Read the new recipe and write it to the workspace
+ recipe = read_recipe_commit(api.config["GITLOCK"].repo, branch, recipe_name)
+ workspace_write(api.config["GITLOCK"].repo, branch, recipe)
+ except Exception as e:
+ log.error("(v0_recipes_undo) %s", str(e))
+ return jsonify(status=False, error={"msg":str(e)}), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/recipes/tag/<recipe_name>", methods=["POST"])
+ @crossdomain(origin="*")
+ def v0_recipes_tag(recipe_name):
+ """Tag a recipe's latest recipe commit as a 'revision'"""
+ branch = request.args.get("branch", "master")
+ try:
+ with api.config["GITLOCK"].lock:
+ tag_recipe_commit(api.config["GITLOCK"].repo, branch, recipe_name)
+ except Exception as e:
+ log.error("(v0_recipes_tag) %s", str(e))
+ return jsonify(status=False, error={"msg":str(e)}), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/recipes/diff/<recipe_name>/<from_commit>/<to_commit>")
+ @crossdomain(origin="*")
+ def v0_recipes_diff(recipe_name, from_commit, to_commit):
+ """Return the differences between two commits of a recipe"""
+ branch = request.args.get("branch", "master")
+ try:
+ if from_commit == "NEWEST":
+ with api.config["GITLOCK"].lock:
+ old_recipe = read_recipe_commit(api.config["GITLOCK"].repo, branch, recipe_name)
+ else:
+ with api.config["GITLOCK"].lock:
+ old_recipe = read_recipe_commit(api.config["GITLOCK"].repo, branch, recipe_name, from_commit)
+ except Exception as e:
+ log.error("(v0_recipes_diff) %s", str(e))
+ return jsonify(error={"msg":str(e)}), 400
+
+ try:
+ if to_commit == "WORKSPACE":
+ with api.config["GITLOCK"].lock:
+ new_recipe = workspace_read(api.config["GITLOCK"].repo, branch, recipe_name)
+ # If there is no workspace, use the newest commit instead
+ if not new_recipe:
+ with api.config["GITLOCK"].lock:
+ new_recipe = read_recipe_commit(api.config["GITLOCK"].repo, branch, recipe_name)
+ elif to_commit == "NEWEST":
+ with api.config["GITLOCK"].lock:
+ new_recipe = read_recipe_commit(api.config["GITLOCK"].repo, branch, recipe_name)
+ else:
+ with api.config["GITLOCK"].lock:
+ new_recipe = read_recipe_commit(api.config["GITLOCK"].repo, branch, recipe_name, to_commit)
+ except Exception as e:
+ log.error("(v0_recipes_diff) %s", str(e))
+ return jsonify(error={"msg":str(e)}), 400
+
+ diff = recipe_diff(old_recipe, new_recipe)
+ return jsonify(diff=diff)
+
+ @api.route("/api/v0/recipes/freeze/<recipe_names>")
+ @crossdomain(origin="*")
+ def v0_recipes_freeze(recipe_names):
+ """Return the recipe with the exact modules and packages selected by depsolve"""
+ branch = request.args.get("branch", "master")
+ recipes = []
+ errors = []
+ for recipe_name in [n.strip() for n in sorted(recipe_names.split(","), key=lambda n: n.lower())]:
+ # get the recipe
+ # Get the workspace version (if it exists)
+ recipe = None
+ try:
+ with api.config["GITLOCK"].lock:
+ recipe = workspace_read(api.config["GITLOCK"].repo, branch, recipe_name)
+ except Exception:
+ pass
+
+ if not recipe:
+ # No workspace version, get the git version (if it exists)
+ try:
+ with api.config["GITLOCK"].lock:
+ recipe = read_recipe_commit(api.config["GITLOCK"].repo, branch, recipe_name)
+ except Exception as e:
+ errors.append({"recipe":recipe_name, "msg":str(e)})
+ log.error("(v0_recipes_freeze) %s", str(e))
+
+ # No recipe found, skip it.
+ if not recipe:
+ errors.append({"recipe":recipe_name, "msg":"Recipe not found"})
+ continue
+
+ # Combine modules and packages and depsolve the list
+ # TODO include the version/glob in the depsolving
+ module_names = recipe.module_names
+ package_names = recipe.package_names
+ projects = sorted(set(module_names+package_names), key=lambda n: n.lower())
+ deps = []
+ try:
+ with api.config["YUMLOCK"].lock:
+ deps = projects_depsolve(api.config["YUMLOCK"].yb, projects)
+ except ProjectsError as e:
+ errors.append({"recipe":recipe_name, "msg":str(e)})
+ log.error("(v0_recipes_freeze) %s", str(e))
+
+ recipes.append({"recipe": recipe.freeze(deps)})
+
+ return jsonify(recipes=recipes, errors=errors)
+
+ @api.route("/api/v0/recipes/depsolve/<recipe_names>")
+ @crossdomain(origin="*")
+ def v0_recipes_depsolve(recipe_names):
+ """Return the dependencies for a recipe"""
+ branch = request.args.get("branch", "master")
+ recipes = []
+ errors = []
+ for recipe_name in [n.strip() for n in sorted(recipe_names.split(","), key=lambda n: n.lower())]:
+ # get the recipe
+ # Get the workspace version (if it exists)
+ recipe = None
+ try:
+ with api.config["GITLOCK"].lock:
+ recipe = workspace_read(api.config["GITLOCK"].repo, branch, recipe_name)
+ except Exception:
+ pass
+
+ if not recipe:
+ # No workspace version, get the git version (if it exists)
+ try:
+ with api.config["GITLOCK"].lock:
+ recipe = read_recipe_commit(api.config["GITLOCK"].repo, branch, recipe_name)
+ except Exception as e:
+ errors.append({"recipe":recipe_name, "msg":str(e)})
+ log.error("(v0_recipes_depsolve) %s", str(e))
+
+ # No recipe found, skip it.
+ if not recipe:
+ errors.append({"recipe":recipe_name, "msg":"Recipe not found"})
+ continue
+
+ # Combine modules and packages and depsolve the list
+ # TODO include the version/glob in the depsolving
+ module_names = map(lambda m: m["name"], recipe["modules"] or [])
+ package_names = map(lambda p: p["name"], recipe["packages"] or [])
+ projects = sorted(set(module_names+package_names), key=lambda n: n.lower())
+ deps = []
+ try:
+ with api.config["YUMLOCK"].lock:
+ deps = projects_depsolve(api.config["YUMLOCK"].yb, projects)
+ except ProjectsError as e:
+ errors.append({"recipe":recipe_name, "msg":str(e)})
+ log.error("(v0_recipes_depsolve) %s", str(e))
+
+ # Get the NEVRA's of the modules and projects, add as "modules"
+ modules = []
+ for dep in deps:
+ if dep["name"] in projects:
+ modules.append(dep)
+ modules = sorted(modules, key=lambda m: m["name"].lower())
+
+ recipes.append({"recipe":recipe, "dependencies":deps, "modules":modules})
+
+ return jsonify(recipes=recipes, errors=errors)
+
+ @api.route("/api/v0/projects/list")
+ @crossdomain(origin="*")
+ def v0_projects_list():
+ """List all of the available projects/packages"""
+ try:
+ limit = int(request.args.get("limit", "20"))
+ offset = int(request.args.get("offset", "0"))
+ except ValueError as e:
+ return jsonify(error={"msg":str(e)}), 400
+
+ try:
+ with api.config["YUMLOCK"].lock:
+ available = projects_list(api.config["YUMLOCK"].yb)
+ except ProjectsError as e:
+ log.error("(v0_projects_list) %s", str(e))
+ return jsonify(error={"msg":str(e)}), 400
+
+ projects = take_limits(available, offset, limit)
+ return jsonify(projects=projects, offset=offset, limit=limit, total=len(available))
+
+ @api.route("/api/v0/projects/info/<project_names>")
+ @crossdomain(origin="*")
+ def v0_projects_info(project_names):
+ """Return detailed information about the listed projects"""
+ try:
+ with api.config["YUMLOCK"].lock:
+ projects = projects_info(api.config["YUMLOCK"].yb, project_names.split(","))
+ except ProjectsError as e:
+ log.error("(v0_projects_info) %s", str(e))
+ return jsonify(error={"msg":str(e)}), 400
+
+ return jsonify(projects=projects)
+
+ @api.route("/api/v0/projects/depsolve/<project_names>")
+ @crossdomain(origin="*")
+ def v0_projects_depsolve(project_names):
+ """Return detailed information about the listed projects"""
+ try:
+ with api.config["YUMLOCK"].lock:
+ deps = projects_depsolve(api.config["YUMLOCK"].yb, project_names.split(","))
+ except ProjectsError as e:
+ log.error("(v0_projects_depsolve) %s", str(e))
+ return jsonify(error={"msg":str(e)}), 400
+
+ return jsonify(projects=deps)
+
+ @api.route("/api/v0/modules/list")
+ @api.route("/api/v0/modules/list/<module_names>")
+ @crossdomain(origin="*")
+ def v0_modules_list(module_names=None):
+ """List available modules, filtering by module_names"""
+ try:
+ limit = int(request.args.get("limit", "20"))
+ offset = int(request.args.get("offset", "0"))
+ except ValueError as e:
+ return jsonify(error={"msg":str(e)}), 400
+
+ if module_names:
+ module_names = module_names.split(",")
+
+ try:
+ with api.config["YUMLOCK"].lock:
+ available = modules_list(api.config["YUMLOCK"].yb, module_names)
+ except ProjectsError as e:
+ log.error("(v0_modules_list) %s", str(e))
+ return jsonify(error={"msg":str(e)}), 400
+
+ modules = take_limits(available, offset, limit)
+ return jsonify(modules=modules, offset=offset, limit=limit, total=len(available))
+
+ @api.route("/api/v0/modules/info/<module_names>")
+ @crossdomain(origin="*")
+ def v0_modules_info(module_names):
+ """Return detailed information about the listed modules"""
+ try:
+ with api.config["YUMLOCK"].lock:
+ modules = modules_info(api.config["YUMLOCK"].yb, module_names.split(","))
+ except ProjectsError as e:
+ log.error("(v0_modules_info) %s", str(e))
+ return jsonify(error={"msg":str(e)}), 400
+
+ return jsonify(modules=modules)
+
+ @api.route("/api/v0/compose", methods=["POST"])
+ @crossdomain(origin="*")
+ def v0_compose_start():
+ """Start a compose
+
+ The body of the post should have these fields:
+ recipe_name - The recipe name from /recipes/list/
+ compose_type - The type of output to create, from /compose/types
+ branch - Optional, defaults to master, selects the git branch to use for the recipe.
+ """
+ # Passing ?test=1 will generate a fake FAILED compose.
+ # Passing ?test=2 will generate a fake FINISHED compose.
+ try:
+ test_mode = int(request.args.get("test", "0"))
+ except ValueError:
+ test_mode = 0
+
+ compose = request.get_json(cache=False)
+
+ errors = []
+ if not compose:
+ return jsonify(status=False, error={"msg":"Missing POST body"}), 400
+
+ if "recipe_name" not in compose:
+ errors.append("No 'recipe_name' in the JSON request")
+ else:
+ recipe_name = compose["recipe_name"]
+
+ if "branch" not in compose or not compose["branch"]:
+ branch = "master"
+ else:
+ branch = compose["branch"]
+
+ if "compose_type" not in compose:
+ errors.append("No 'compose_type' in the JSON request")
+ else:
+ compose_type = compose["compose_type"]
+
+ if errors:
+ return jsonify(status=False, error={"msg":"\n".join(errors)}), 400
+
+ try:
+ build_id = start_build(api.config["COMPOSER_CFG"], api.config["YUMLOCK"], api.config["GITLOCK"],
+ branch, recipe_name, compose_type, test_mode)
+ except Exception as e:
+ return jsonify(status=False, error={"msg":str(e)}), 400
+
+ return jsonify(status=True, build_id=build_id)
+
+ @api.route("/api/v0/compose/types")
+ @crossdomain(origin="*")
+ def v0_compose_types():
+ """Return the list of enabled output types
+
+ (only enabled types are returned)
+ """
+ share_dir = api.config["COMPOSER_CFG"].get("composer", "share_dir")
+ return jsonify(types=[{"name": k, "enabled": True} for k in compose_types(share_dir)])
+
+ @api.route("/api/v0/compose/queue")
+ @crossdomain(origin="*")
+ def v0_compose_queue():
+ """Return the status of the new and running queues"""
+ return jsonify(queue_status(api.config["COMPOSER_CFG"]))
+
+ @api.route("/api/v0/compose/finished")
+ @crossdomain(origin="*")
+ def v0_compose_finished():
+ """Return the list of finished composes"""
+ return jsonify(finished=build_status(api.config["COMPOSER_CFG"], "FINISHED"))
+
+ @api.route("/api/v0/compose/failed")
+ @crossdomain(origin="*")
+ def v0_compose_failed():
+ """Return the list of failed composes"""
+ return jsonify(failed=build_status(api.config["COMPOSER_CFG"], "FAILED"))
+
+ @api.route("/api/v0/compose/status/<uuids>")
+ @crossdomain(origin="*")
+ def v0_compose_status(uuids):
+ """Return the status of the listed uuids"""
+ results = []
+ for uuid in [n.strip().lower() for n in uuids.split(",")]:
+ details = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if details is not None:
+ results.append(details)
+
+ return jsonify(uuids=results)
+
+ @api.route("/api/v0/compose/cancel/<uuid>", methods=["DELETE"])
+ @crossdomain(origin="*")
+ def v0_compose_cancel(uuid):
+ """Cancel a running compose and delete its results directory"""
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, msg="%s is not a valid build uuid" % uuid), 400
+
+ if status["queue_status"] not in ["WAITING", "RUNNING"]:
+ return jsonify(status=False, uuid=uuid, msg="Cannot cancel a build that is in the %s state" % status["queue_status"])
+
+ try:
+ uuid_cancel(api.config["COMPOSER_CFG"], uuid)
+ except Exception as e:
+ return jsonify(status=False, uuid=uuid, msg=str(e))
+ else:
+ return jsonify(status=True, uuid=uuid)
+
+ @api.route("/api/v0/compose/delete/<uuids>", methods=["DELETE"])
+ @crossdomain(origin="*")
+ def v0_compose_delete(uuids):
+ """Delete the compose results for the listed uuids"""
+ results = []
+ errors = []
+ for uuid in [n.strip().lower() for n in uuids.split(",")]:
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ errors.append({"uuid": uuid, "msg": "Not a valid build uuid"})
+ elif status["queue_status"] not in ["FINISHED", "FAILED"]:
+ errors.append({"uuid":uuid, "msg":"Build not in FINISHED or FAILED."})
+ else:
+ try:
+ uuid_delete(api.config["COMPOSER_CFG"], uuid)
+ except Exception as e:
+ errors.append({"uuid":uuid, "msg":str(e)})
+ else:
+ results.append({"uuid":uuid, "status":True})
+ return jsonify(uuids=results, errors=errors)
+
+ @api.route("/api/v0/compose/info/<uuid>")
+ @crossdomain(origin="*")
+ def v0_compose_info(uuid):
+ """Return detailed info about a compose"""
+ try:
+ info = uuid_info(api.config["COMPOSER_CFG"], uuid)
+ except Exception as e:
+ return jsonify(status=False, msg=str(e))
+
+ return jsonify(**info)
+
+ @api.route("/api/v0/compose/metadata/<uuid>")
+ @crossdomain(origin="*")
+ def v0_compose_metadata(uuid):
+ """Return a tar of the metadata for the build"""
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, msg="%s is not a valid build uuid" % uuid), 400
+ if status["queue_status"] not in ["FINISHED", "FAILED"]:
+ return jsonify(status=False, uuid=uuid, msg="Build not in FINISHED or FAILED.")
+ else:
+ return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=True, image=False, logs=False),
+ mimetype="application/x-tar",
+ headers=[("Content-Disposition", "attachment; filename=%s-metadata.tar;" % uuid)],
+ direct_passthrough=True)
+
+ @api.route("/api/v0/compose/results/<uuid>")
+ @crossdomain(origin="*")
+ def v0_compose_results(uuid):
+ """Return a tar of the metadata and the results for the build"""
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, msg="%s is not a valid build uuid" % uuid), 400
+ elif status["queue_status"] not in ["FINISHED", "FAILED"]:
+ return jsonify(status=False, uuid=uuid, msg="Build not in FINISHED or FAILED.")
+ else:
+ return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=True, image=True, logs=True),
+ mimetype="application/x-tar",
+ headers=[("Content-Disposition", "attachment; filename=%s.tar;" % uuid)],
+ direct_passthrough=True)
+
+ @api.route("/api/v0/compose/logs/<uuid>")
+ @crossdomain(origin="*")
+ def v0_compose_logs(uuid):
+ """Return a tar of the metadata for the build"""
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, msg="%s is not a valid build uuid"), 400
+ elif status["queue_status"] not in ["FINISHED", "FAILED"]:
+ return jsonify(status=False, uuid=uuid, msg="Build not in FINISHED or FAILED.")
+ else:
+ return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=False, image=False, logs=True),
+ mimetype="application/x-tar",
+ headers=[("Content-Disposition", "attachment; filename=%s-logs.tar;" % uuid)],
+ direct_passthrough=True)
+
+ @api.route("/api/v0/compose/image/<uuid>")
+ @crossdomain(origin="*")
+ def v0_compose_image(uuid):
+ """Return the output image for the build"""
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, msg="%s is not a valid build uuid" % uuid), 400
+ elif status["queue_status"] not in ["FINISHED", "FAILED"]:
+ return jsonify(status=False, uuid=uuid, msg="Build not in FINISHED or FAILED.")
+ else:
+ image_name, image_path = uuid_image(api.config["COMPOSER_CFG"], uuid)
+
+ # XXX - Will mime type guessing work for all our output?
+ return send_file(image_path, as_attachment=True, attachment_filename=image_name, add_etags=False)
+
+ @api.route("/api/v0/compose/log/<uuid>")
+ @crossdomain(origin="*")
+ def v0_compose_log_tail(uuid):
+ """Return the end of the main anaconda.log, defaults to 1Mbytes"""
+ try:
+ size = int(request.args.get("size", "1024"))
+ except ValueError as e:
+ return jsonify(error={"msg":str(e)}), 400
+
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None or status["queue_status"] == "WAITING":
+ return jsonify(status=False, uuid=uuid, msg="Build has not started yet. No logs to view")
+ try:
+ return Response(uuid_log(api.config["COMPOSER_CFG"], uuid, size), direct_passthrough=True)
+ except RuntimeError as e:
+ return jsonify(status=False, uuid=uuid, msg=str(e))
+
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import os
+
+from pylorax.api.recipes import recipe_filename, recipe_from_toml, RecipeFileError
+from pylorax.sysutils import joinpaths
+
+
+[docs]def workspace_dir(repo, branch):
+ """Create the workspace's path from a Repository and branch
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :returns: The path to the branch's workspace directory
+ :rtype: str
+
+ """
+ repo_path = repo.get_location().get_path()
+ return joinpaths(repo_path, "workspace", branch)
+
+
+[docs]def workspace_read(repo, branch, recipe_name):
+ """Read a Recipe from the branch's workspace
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: The name of the recipe
+ :type recipe_name: str
+ :returns: The workspace copy of the recipe, or None if it doesn't exist
+ :rtype: Recipe or None
+ :raises: RecipeFileError
+ """
+ ws_dir = workspace_dir(repo, branch)
+ if not os.path.isdir(ws_dir):
+ os.makedirs(ws_dir)
+ filename = joinpaths(ws_dir, recipe_filename(recipe_name))
+ if not os.path.exists(filename):
+ return None
+ try:
+ f = open(filename, 'rb')
+ recipe = recipe_from_toml(f.read())
+ except IOError:
+ raise RecipeFileError
+ return recipe
+
+
+[docs]def workspace_write(repo, branch, recipe):
+ """Write a recipe to the workspace
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe: The recipe to write to the workspace
+ :type recipe: Recipe
+ :returns: None
+ :raises: IO related errors
+ """
+ ws_dir = workspace_dir(repo, branch)
+ if not os.path.isdir(ws_dir):
+ os.makedirs(ws_dir)
+ filename = joinpaths(ws_dir, recipe.filename)
+ open(filename, 'wb').write(recipe.toml())
+
+
+[docs]def workspace_delete(repo, branch, recipe_name):
+ """Delete the recipe from the workspace
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: The name of the recipe
+ :type recipe_name: str
+ :returns: None
+ :raises: IO related errors
+ """
+ ws_dir = workspace_dir(repo, branch)
+ filename = joinpaths(ws_dir, recipe_filename(recipe_name))
+ if os.path.exists(filename):
+ os.unlink(filename)
+
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=bad-preconf-access
+
+import logging
+log = logging.getLogger("lorax-composer")
+
+import ConfigParser
+from fnmatch import fnmatchcase
+from glob import glob
+from distutils.util import strtobool
+import os
+import yum
+# This is a hack to short circuit yum's internal logging
+yum.logginglevels._added_handlers = True
+
+from pylorax.sysutils import joinpaths
+
+[docs]def get_base_object(conf):
+ """Get the Yum object with settings from the config file
+
+ :param conf: configuration object
+ :type conf: ComposerParser
+ :returns: A Yum base object
+ :rtype: YumBase
+ """
+ cachedir = os.path.abspath(conf.get("composer", "cache_dir"))
+ yumconf = os.path.abspath(conf.get("composer", "yum_conf"))
+ repodir = os.path.abspath(conf.get("composer", "repo_dir"))
+
+ c = ConfigParser.ConfigParser()
+
+ # add the main section
+ section = "main"
+ data = {"cachedir": cachedir,
+ "keepcache": 0,
+ "gpgcheck": 0,
+ "plugins": 0,
+ "assumeyes": 1,
+ "reposdir": "",
+ "tsflags": "nodocs"}
+
+ if conf.get_default("yum", "proxy", None):
+ data["proxy"] = conf.get("yum", "proxy")
+
+ if conf.has_option("yum", "sslverify") and not conf.getboolean("yum", "sslverify"):
+ data["sslverify"] = "0"
+
+ c.add_section(section)
+ map(lambda (key, value): c.set(section, key, value), data.items())
+
+ # write the yum configuration file
+ with open(yumconf, "w") as f:
+ c.write(f)
+
+ # create the yum base object
+ yb = yum.YumBase()
+
+ yb.preconf.fn = yumconf
+
+ # TODO How to handle this?
+ yb.preconf.root = "/var/tmp/composer/yum/root"
+ if not os.path.isdir(yb.preconf.root):
+ os.makedirs(yb.preconf.root)
+
+ _releasever = conf.get_default("composer", "releasever", None)
+ if not _releasever:
+ distroverpkg = ['system-release(releasever)', 'redhat-release']
+ # Use yum private function to guess the releasever
+ _releasever = yum.config._getsysver("/", distroverpkg)
+ log.info("releasever = %s", _releasever)
+ yb.preconf.releasever = _releasever
+
+ # Turn on as much yum logging as we can
+ yb.preconf.debuglevel = 6
+ yb.preconf.errorlevel = 6
+ yb.logger.setLevel(logging.DEBUG)
+ yb.verbose_logger.setLevel(logging.DEBUG)
+
+ # Gather up all the available repo files, add the ones matching "repos":"enabled" patterns
+ enabled_repos = conf.get("repos", "enabled").split(",")
+ repo_files = glob(joinpaths(repodir, "*.repo"))
+ if not conf.has_option("repos", "use_system_repos") or conf.getboolean("repos", "use_system_repos"):
+ repo_files.extend(glob("/etc/yum.repos.d/*.repo"))
+
+ for repo_file in repo_files:
+ name = os.path.basename(repo_file)[:-5]
+ if any(map(lambda pattern: fnmatchcase(name, pattern), enabled_repos)): # pylint: disable=cell-var-from-loop
+ yb.getReposFromConfigFile(repo_file)
+
+ return yb
+
+#
+# base.py
+#
+# Copyright (C) 2009 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+from abc import ABCMeta, abstractmethod
+import sys
+
+import pylorax.output as output
+
+
+[docs]class BaseLoraxClass(object):
+
+ __metaclass__ = ABCMeta
+
+ @abstractmethod
+ def __init__(self):
+ self.output = output.LoraxOutput()
+
+
+
+
+
+
+[docs]class DataHolder(dict):
+
+ def __init__(self, **kwargs):
+ dict.__init__(self)
+
+ for attr, value in kwargs.items():
+ self[attr] = value
+
+ def __getattr__(self, attr):
+ return self[attr]
+
+ def __setattr__(self, attr, value):
+ self[attr] = value
+
+
+
+#
+# buildstamp.py
+#
+# Copyright (C) 2010 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.buildstamp")
+
+import datetime
+
+
+[docs]class BuildStamp(object):
+
+ def __init__(self, product, version, bugurl, isfinal, buildarch):
+ self.product = product
+ self.version = version
+ self.bugurl = bugurl
+ self.isfinal = isfinal
+
+ now = datetime.datetime.now()
+ now = now.strftime("%Y%m%d%H%M")
+ self.uuid = "{0}.{1}".format(now, buildarch)
+
+[docs] def write(self, outfile):
+ # get lorax version
+ try:
+ import pylorax.version
+ except ImportError:
+ vernum = "devel"
+ else:
+ vernum = pylorax.version.num
+
+ logger.info("writing .buildstamp file")
+ with open(outfile, "w") as fobj:
+ fobj.write("[Main]\n")
+ fobj.write("Product={0.product}\n".format(self))
+ fobj.write("Version={0.version}\n".format(self))
+ fobj.write("BugURL={0.bugurl}\n".format(self))
+ fobj.write("IsFinal={0.isfinal}\n".format(self))
+ fobj.write("UUID={0.uuid}\n".format(self))
+ fobj.write("[Compose]\n")
+ fobj.write("Lorax={0}\n".format(vernum))
+
+#
+# Copyright (C) 2011-2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("pylorax")
+
+import os
+import tempfile
+import subprocess
+import shutil
+import hashlib
+import glob
+
+# Use Mako templates for appliance builder descriptions
+from mako.template import Template
+from mako.exceptions import text_error_template
+
+# Use pykickstart to calculate disk image size
+from pykickstart.parser import KickstartParser
+from pykickstart.version import makeVersion, RHEL7
+
+# Use the Lorax treebuilder branch for iso creation
+from pylorax import ArchData
+from pylorax.base import DataHolder
+from pylorax.treebuilder import TreeBuilder, RuntimeBuilder
+from pylorax.treebuilder import findkernels
+from pylorax.sysutils import joinpaths, remove
+from pylorax.imgutils import Mount, PartitionMount, copytree, mount, umount
+from pylorax.imgutils import mksquashfs, mkrootfsimg
+from pylorax.executils import execWithRedirect, runcmd
+from pylorax.installer import InstallError, novirt_install, virt_install
+
+RUNTIME = "images/install.img"
+
+# Default parameters for rebuilding initramfs, override with --dracut-args
+DRACUT_DEFAULT = ["--xz", "--add", "livenet dmsquash-live convertfs pollcdrom",
+ "--omit", "plymouth", "--no-hostonly", "--no-early-microcode"]
+
+
+[docs]def get_ks_disk_size(ks):
+ """Return the size of the kickstart's disk partitions
+
+ :param ks: The kickstart
+ :type ks: Kickstart object
+ :returns: The size of the disk, in GiB
+ """
+ disk_size = 1 + (sum([p.size for p in ks.handler.partition.partitions]) / 1024)
+ log.info("disk_size = %sGiB", disk_size)
+ return disk_size
+
+[docs]def is_image_mounted(disk_img):
+ """
+ Return True if the disk_img is mounted
+ """
+ with open("/proc/mounts") as mounts:
+ for _mount in mounts:
+ fields = _mount.split()
+ if len(fields) > 2 and fields[1] == disk_img:
+ return True
+ return False
+
+[docs]def find_ostree_root(phys_root):
+ """
+ Find root of ostree deployment
+
+ :param str phys_root: Path to physical root
+ :returns: Relative path of ostree deployment root
+ :rtype: str
+ :raise Exception: More than one deployment roots were found
+ """
+ ostree_root = ""
+ ostree_sysroots = glob.glob(joinpaths(phys_root, "ostree/boot.0/*/*/0"))
+ if ostree_sysroots:
+ if len(ostree_sysroots) > 1:
+ raise Exception("Too many deployment roots found: %s" % ostree_sysroots)
+ ostree_root = os.path.relpath(ostree_sysroots[0], phys_root)
+ return ostree_root
+
+[docs]class KernelInfo(object):
+ """
+ Info about the kernels in boot_dir
+ """
+ def __init__(self, boot_dir):
+ self.boot_dir = boot_dir
+ self.list = self.get_kernels()
+ self.arch = self.get_kernel_arch()
+ log.debug("kernel_list for %s = %s", self.boot_dir, self.list)
+ log.debug("kernel_arch is %s", self.arch)
+
+[docs] def get_kernels(self):
+ """
+ Get a list of the kernels in the boot_dir
+
+ Examine the vmlinuz-* versions and return a list of them
+
+ Ignore any with -rescue- in them, these are dracut rescue images.
+ The user shoud add
+ -dracut-config-rescue
+ to the kickstart to remove them, but catch it here as well.
+ """
+ files = os.listdir(self.boot_dir)
+ return [f[8:] for f in files if f.startswith("vmlinuz-") \
+ and f.find("-rescue-") == -1]
+
+[docs] def get_kernel_arch(self):
+ """
+ Get the arch of the first kernel in boot_dir
+
+ Defaults to i386
+ """
+ if self.list:
+ kernel_arch = self.list[0].split(".")[-1]
+ else:
+ kernel_arch = "i386"
+ return kernel_arch
+
+
+[docs]def make_appliance(disk_img, name, template, outfile, networks=None, ram=1024,
+ vcpus=1, arch=None, title="Linux", project="Linux",
+ releasever="7"):
+ """
+ Generate an appliance description file
+
+ disk_img Full path of the disk image
+ name Name of the appliance, passed to the template
+ template Full path of Mako template
+ outfile Full path of file to write, using template
+ networks List of networks from the kickstart
+ ram Ram, in MB, passed to template. Default is 1024
+ vcpus CPUs, passed to template. Default is 1
+ arch CPU architecture. Default is 'x86_64'
+ title Title, passed to template. Default is 'Linux'
+ project Project, passed to template. Default is 'Linux'
+ releasever Release version, passed to template. Default is 17
+ """
+ if not (disk_img and template and outfile):
+ return None
+
+ log.info("Creating appliance definition using %s", template)
+
+ if not arch:
+ arch = "x86_64"
+
+ log.info("Calculating SHA256 checksum of %s", disk_img)
+ sha256 = hashlib.sha256()
+ with open(disk_img) as f:
+ while True:
+ data = f.read(1024*1024)
+ if not data:
+ break
+ sha256.update(data)
+ log.info("SHA256 of %s is %s", disk_img, sha256.hexdigest())
+ disk_info = DataHolder(name=os.path.basename(disk_img), format="raw",
+ checksum_type="sha256", checksum=sha256.hexdigest())
+ try:
+ result = Template(filename=template).render(disks=[disk_info], name=name,
+ arch=arch, memory=ram, vcpus=vcpus, networks=networks,
+ title=title, project=project, releasever=releasever)
+ except Exception:
+ log.error(text_error_template().render())
+ raise
+
+ with open(outfile, "w") as f:
+ f.write(result)
+
+
+[docs]def make_runtime(opts, mount_dir, work_dir):
+ """
+ Make the squashfs image from a directory
+
+ Result is in work_dir+RUNTIME
+ """
+ kernels = KernelInfo(joinpaths(mount_dir, "boot" ))
+
+ # Fake yum object
+ fake_yum = DataHolder(conf=DataHolder(installroot=mount_dir))
+ # Fake arch with only basearch set
+ arch = ArchData(kernels.arch)
+ # TODO: Need to get release info from someplace...
+ product = DataHolder(name=opts.project, version=opts.releasever, release="",
+ variant="", bugurl="", isfinal=False)
+
+ # This is a mounted image partition, cannot hardlink to it, so just use it
+ # symlink mount_dir/images to work_dir/images so we don't run out of space
+ os.makedirs(joinpaths(work_dir, "images"))
+
+ rb = RuntimeBuilder(product, arch, fake_yum)
+ log.info("Creating runtime")
+ rb.create_runtime(joinpaths(work_dir, RUNTIME), size=None)
+
+[docs]def rebuild_initrds_for_live(opts, sys_root_dir, results_dir):
+ """
+ Rebuild intrds for pxe live image (root=live:http://)
+
+ :param opts: options passed to livemedia-creator
+ :type opts: argparse options
+ :param str sys_root_dir: Path to root of the system
+ :param str results_dir: Path of directory for storing results
+ """
+ if not opts.dracut_args:
+ dracut_args = DRACUT_DEFAULT
+ else:
+ dracut_args = []
+ for arg in opts.dracut_args:
+ dracut_args += arg.split(" ", 1)
+ log.info("dracut args = %s", dracut_args)
+
+ dracut = ["dracut", "--nomdadmconf", "--nolvmconf"] + dracut_args
+
+ kdir = "boot"
+ if opts.ostree:
+ kernels_dir = glob.glob(joinpaths(sys_root_dir, "boot/ostree/*"))[0]
+ kdir = os.path.relpath(kernels_dir, sys_root_dir)
+
+ kernels = [kernel for kernel in findkernels(sys_root_dir, kdir)
+ if hasattr(kernel, "initrd")]
+ if not kernels:
+ raise Exception("No initrds found, cannot rebuild_initrds")
+
+ # Hush some dracut warnings. TODO: bind-mount proc in place?
+ open(joinpaths(sys_root_dir,"/proc/modules"),"w")
+
+ if opts.ostree:
+ # Dracut assumes to have some dirs in disk image
+ # /var/tmp for temp files
+ vartmp_dir = joinpaths(sys_root_dir, "var/tmp")
+ if not os.path.isdir(vartmp_dir):
+ os.mkdir(vartmp_dir)
+ # /root (maybe not fatal)
+ root_dir = joinpaths(sys_root_dir, "var/roothome")
+ if not os.path.isdir(root_dir):
+ os.mkdir(root_dir)
+ # /tmp (maybe not fatal)
+ tmp_dir = joinpaths(sys_root_dir, "sysroot/tmp")
+ if not os.path.isdir(tmp_dir):
+ os.mkdir(tmp_dir)
+
+ for kernel in kernels:
+ outfile = kernel.initrd.path + ".live"
+ log.info("rebuilding %s", outfile)
+
+ kver = kernel.version
+
+ cmd = dracut + [outfile, kver]
+ runcmd(cmd, root=sys_root_dir)
+
+ new_initrd_path = joinpaths(results_dir, os.path.basename(kernel.initrd.path))
+ shutil.move(joinpaths(sys_root_dir, outfile), new_initrd_path)
+ os.chmod(new_initrd_path, 0644)
+ shutil.copy2(joinpaths(sys_root_dir, kernel.path), results_dir)
+
+ os.unlink(joinpaths(sys_root_dir,"/proc/modules"))
+
+[docs]def create_pxe_config(template, images_dir, live_image_name, add_args = None):
+ """
+ Create template for pxe to live configuration
+
+ :param str images_dir: Path of directory with images to be used
+ :param str live_image_name: Name of live rootfs image file
+ :param list add_args: Arguments to be added to initrd= pxe config
+ """
+
+ add_args = add_args or []
+
+ kernels = [kernel for kernel in findkernels(images_dir, kdir="")
+ if hasattr(kernel, "initrd")]
+ if not kernels:
+ return
+
+ kernel = kernels[0]
+
+ add_args_str = " ".join(add_args)
+
+
+ try:
+ result = Template(filename=template).render(kernel=kernel.path,
+ initrd=kernel.initrd.path, liveimg=live_image_name,
+ addargs=add_args_str)
+ except Exception:
+ log.error(text_error_template().render())
+ raise
+
+ with open (joinpaths(images_dir, "PXE_CONFIG"), "w") as f:
+ f.write(result)
+
+[docs]def make_livecd(opts, mount_dir, work_dir):
+ """
+ Take the content from the disk image and make a livecd out of it
+
+ This uses wwood's squashfs live initramfs method:
+ * put the real / into LiveOS/rootfs.img
+ * make a squashfs of the LiveOS/rootfs.img tree
+ * make a simple initramfs with the squashfs.img and /etc/cmdline in it
+ * make a cpio of that tree
+ * append the squashfs.cpio to a dracut initramfs for each kernel installed
+
+ Then on boot dracut reads /etc/cmdline which points to the squashfs.img
+ mounts that and then mounts LiveOS/rootfs.img as /
+
+ """
+ kernels = KernelInfo(joinpaths(mount_dir, "boot" ))
+
+ arch = ArchData(kernels.arch)
+ # TODO: Need to get release info from someplace...
+ product = DataHolder(name=opts.project, version=opts.releasever, release="",
+ variant="", bugurl="", isfinal=False)
+
+ # Link /images to work_dir/images to make the templates happy
+ if os.path.islink(joinpaths(mount_dir, "images")):
+ os.unlink(joinpaths(mount_dir, "images"))
+ execWithRedirect("/bin/ln", ["-s", joinpaths(work_dir, "images"),
+ joinpaths(mount_dir, "images")])
+
+ # The templates expect the config files to be in /tmp/config_files
+ # I think these should be release specific, not from lorax, but for now
+ configdir = joinpaths(opts.lorax_templates,"live/config_files/")
+ configdir_path = "tmp/config_files"
+ fullpath = joinpaths(mount_dir, configdir_path)
+ if os.path.exists(fullpath):
+ remove(fullpath)
+ shutil.copytree(configdir, fullpath)
+
+ isolabel = opts.volid or "{0.name} {0.version} {1.basearch}".format(product, arch)
+ if len(isolabel) > 32:
+ isolabel = isolabel[:32]
+ log.warn("Truncating isolabel to 32 chars: %s", isolabel)
+
+ tb = TreeBuilder(product=product, arch=arch, domacboot=opts.domacboot,
+ inroot=mount_dir, outroot=work_dir,
+ runtime=RUNTIME, isolabel=isolabel,
+ templatedir=joinpaths(opts.lorax_templates,"live/"))
+ log.info( "Rebuilding initrds" )
+ if not opts.dracut_args:
+ dracut_args = DRACUT_DEFAULT
+ else:
+ dracut_args = []
+ for arg in opts.dracut_args:
+ dracut_args += arg.split(" ", 1)
+ log.info("dracut args = %s", dracut_args)
+ tb.rebuild_initrds(add_args=dracut_args)
+ log.info("Building boot.iso")
+ tb.build()
+
+ return work_dir
+
+[docs]def mount_boot_part_over_root(img_mount):
+ """
+ Mount boot partition to /boot of root fs mounted in img_mount
+
+ Used for OSTree so it finds deployment configurations on live rootfs
+
+ param img_mount: object with mounted disk image root partition
+ type img_mount: imgutils.PartitionMount
+ """
+ root_dir = img_mount.mount_dir
+ is_boot_part = lambda dir: os.path.exists(dir+"/loader.0")
+ tmp_mount_dir = tempfile.mkdtemp()
+ sys_root = find_ostree_root(root_dir)
+ sysroot_boot_dir = None
+ for dev, _size in img_mount.loop_devices:
+ if dev is img_mount.mount_dev:
+ continue
+ try:
+ mount("/dev/mapper/"+dev, mnt=tmp_mount_dir)
+ if is_boot_part(tmp_mount_dir):
+ umount(tmp_mount_dir)
+ sysroot_boot_dir = joinpaths(joinpaths(root_dir, sys_root), "boot")
+ mount("/dev/mapper/"+dev, mnt=sysroot_boot_dir)
+ break
+ else:
+ umount(tmp_mount_dir)
+ except subprocess.CalledProcessError as e:
+ log.debug("Looking for boot partition error: %s", e)
+ remove(tmp_mount_dir)
+ return sysroot_boot_dir
+
+[docs]def make_squashfs(disk_img, work_dir, compression="xz"):
+ """
+ Take disk_img and put it into LiveOS/rootfs.img and squashfs this
+ tree into work_dir+RUNTIME
+ """
+ liveos_dir = joinpaths(work_dir, "runtime/LiveOS")
+ os.makedirs(liveos_dir)
+ os.makedirs(os.path.dirname(joinpaths(work_dir, RUNTIME)))
+
+ rc = execWithRedirect("/bin/ln", [disk_img, joinpaths(liveos_dir, "rootfs.img")])
+ if rc != 0:
+ shutil.copy2(disk_img, joinpaths(liveos_dir, "rootfs.img"))
+
+ mksquashfs(joinpaths(work_dir, "runtime"),
+ joinpaths(work_dir, RUNTIME), compression)
+ remove(joinpaths(work_dir, "runtime"))
+
+
+[docs]def make_image(opts, ks, callback_func=None):
+ """
+ Install to an image
+
+ Use virt or anaconda to install to an image.
+
+ Returns the full path of of the image created.
+ """
+ disk_size = get_ks_disk_size(ks)
+
+ if opts.image_name:
+ disk_img = joinpaths(opts.result_dir, opts.image_name)
+ else:
+ disk_img = tempfile.mktemp(prefix="disk", suffix=".img", dir=opts.result_dir)
+ log.info("disk_img = %s", disk_img)
+
+ try:
+ if opts.no_virt:
+ novirt_install(opts, disk_img, disk_size, ks.handler.method.url, callback_func=callback_func)
+ else:
+ install_log = os.path.abspath(os.path.dirname(opts.logfile))+"/virt-install.log"
+ log.info("install_log = %s", install_log)
+
+ virt_install(opts, install_log, disk_img, disk_size)
+ except InstallError as e:
+ log.error("Install failed: %s", e)
+ if not opts.keep_image:
+ log.info("Removing bad disk image")
+ os.unlink(disk_img)
+ raise
+
+ log.info("Disk Image install successful")
+ return disk_img
+
+
+[docs]def make_live_images(opts, work_dir, root_dir, rootfs_image=None, size=None):
+ """
+ Create live images from direcory or rootfs image
+
+ :param opts: options passed to livemedia-creator
+ :type opts: argparse options
+ :param str work_dir: Directory for storing results
+ :param str root_dir: Root directory of live filesystem tree
+ :param str rootfs_image: Path to live rootfs image to be used
+ :returns: Path of directory with created images
+ :rtype: str
+ """
+ sys_root = ""
+ if opts.ostree:
+ sys_root = find_ostree_root(root_dir)
+
+ squashfs_root_dir = joinpaths(work_dir, "squashfs_root")
+ liveos_dir = joinpaths(squashfs_root_dir, "LiveOS")
+ os.makedirs(liveos_dir)
+
+ if rootfs_image:
+ rc = execWithRedirect("/bin/ln", [rootfs_image, joinpaths(liveos_dir, "rootfs.img")])
+ if rc != 0:
+ shutil.copy2(rootfs_image, joinpaths(liveos_dir, "rootfs.img"))
+ else:
+ log.info("Creating live rootfs image")
+ mkrootfsimg(root_dir, joinpaths(liveos_dir, "rootfs.img"), "LiveOS", size=size, sysroot=sys_root)
+
+ log.info("Packing live rootfs image")
+ add_pxe_args = []
+ live_image_name = "live-rootfs.squashfs.img"
+ mksquashfs(squashfs_root_dir,
+ joinpaths(work_dir, live_image_name),
+ opts.compression,
+ opts.compress_args)
+
+ remove(squashfs_root_dir)
+
+ log.info("Rebuilding initramfs for live")
+ rebuild_initrds_for_live(opts, joinpaths(root_dir, sys_root), work_dir)
+
+ if opts.ostree:
+ add_pxe_args.append("ostree=/%s" % sys_root)
+ template = joinpaths(opts.lorax_templates, "pxe-live/pxe-config.tmpl")
+ create_pxe_config(template, work_dir, live_image_name, add_pxe_args)
+
+ return work_dir
+
+[docs]def run_creator(opts, callback_func=None):
+ """Run the image creator process
+
+ :param opts: Commandline options to control the process
+ :type opts: Either a DataHolder or ArgumentParser
+ :returns: The result directory and the disk image path.
+ :rtype: Tuple of str
+
+ This function takes the opts arguments and creates the selected output image.
+ See the cmdline --help for livemedia-creator for the possible options
+
+ (Yes, this is not ideal, but we can fix that later)
+ """
+ result_dir = None
+
+ # Parse the kickstart
+ if opts.ks:
+ ks_version = makeVersion(RHEL7)
+ ks = KickstartParser( ks_version, errorsAreFatal=False, missingIncludeIsFatal=False )
+ ks.readKickstart( opts.ks[0] )
+
+ # Make the disk or filesystem image
+ if not opts.disk_image and not opts.fs_image:
+ if not opts.ks:
+ raise RuntimeError("Image creation requires a kickstart file")
+
+ errors = []
+ if ks.handler.method.method != "url" and opts.no_virt:
+ errors.append("Only url install method is currently supported. Please "
+ "fix your kickstart file." )
+
+ if ks.handler.displaymode.displayMode is not None:
+ errors.append("The kickstart must not set a display mode (text, cmdline, "
+ "graphical), this will interfere with livemedia-creator.")
+
+ if opts.make_fsimage:
+ # Make sure the kickstart isn't using autopart and only has a / mountpoint
+ part_ok = not any(p for p in ks.handler.partition.partitions
+ if p.mountpoint not in ["/", "swap"])
+ if not part_ok or ks.handler.autopart.seen:
+ errors.append("Filesystem images must use a single / part, not autopart or "
+ "multiple partitions. swap is allowed but not used.")
+
+ if errors:
+ raise RuntimeError("\n".join(errors))
+
+ # Make the image. Output of this is either a partitioned disk image or a fsimage
+ # Can also fail with InstallError
+ disk_img = make_image(opts, ks, callback_func=callback_func)
+
+ # Only create the disk image, return that now
+ if opts.image_only:
+ return (result_dir, disk_img)
+
+ if opts.make_iso:
+ work_dir = tempfile.mkdtemp()
+ log.info("working dir is %s", work_dir)
+
+ if (opts.fs_image or opts.no_virt) and not opts.disk_image:
+ # Create iso from a filesystem image
+ disk_img = opts.fs_image or disk_img
+
+ make_squashfs(disk_img, work_dir)
+ with Mount(disk_img, opts="loop") as mount_dir:
+ result_dir = make_livecd(opts, mount_dir, work_dir)
+ else:
+ # Create iso from a partitioned disk image
+ disk_img = opts.disk_image or disk_img
+ with PartitionMount(disk_img) as img_mount:
+ if img_mount and img_mount.mount_dir:
+ make_runtime(opts, img_mount.mount_dir, work_dir)
+ result_dir = make_livecd(opts, img_mount.mount_dir, work_dir)
+
+ # cleanup the mess
+ # cleanup work_dir?
+ if disk_img and not (opts.keep_image or opts.disk_image or opts.fs_image):
+ os.unlink(disk_img)
+ log.info("Disk image erased")
+ disk_img = None
+ elif opts.make_appliance:
+ if not opts.ks:
+ networks = []
+ else:
+ networks = ks.handler.network.network
+ make_appliance(opts.disk_image or disk_img, opts.app_name,
+ opts.app_template, opts.app_file, networks, opts.ram,
+ opts.vcpus, opts.arch, opts.title, opts.project, opts.releasever)
+ elif opts.make_pxe_live:
+ work_dir = tempfile.mkdtemp()
+ log.info("working dir is %s", work_dir)
+
+ if (opts.fs_image or opts.no_virt) and not opts.disk_image:
+ # Create pxe live images from a filesystem image
+ disk_img = opts.fs_image or disk_img
+ with Mount(disk_img, opts="loop") as mnt_dir:
+ result_dir = make_live_images(opts, work_dir, mnt_dir, rootfs_image=disk_img)
+ else:
+ # Create pxe live images from a partitioned disk image
+ disk_img = opts.disk_image or disk_img
+ is_root_part = None
+ if opts.ostree:
+ is_root_part = lambda dir: os.path.exists(dir+"/ostree/deploy")
+ with PartitionMount(disk_img, mount_ok=is_root_part) as img_mount:
+ if img_mount and img_mount.mount_dir:
+ try:
+ mounted_sysroot_boot_dir = None
+ if opts.ostree:
+ mounted_sysroot_boot_dir = mount_boot_part_over_root(img_mount)
+ if opts.live_rootfs_keep_size:
+ size = img_mount.mount_size / 1024**3
+ else:
+ size = opts.live_rootfs_size or None
+ result_dir = make_live_images(opts, work_dir, img_mount.mount_dir, size=size)
+ finally:
+ if mounted_sysroot_boot_dir:
+ umount(mounted_sysroot_boot_dir)
+
+ if opts.result_dir != opts.tmp and result_dir:
+ copytree(result_dir, opts.result_dir, preserve=False)
+ shutil.rmtree( result_dir )
+ result_dir = None
+
+ return (result_dir, disk_img)
+
+#
+# decorators.py
+#
+# Copyright (C) 2009 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+[docs]def singleton(cls):
+ instances = {}
+
+ def get_instance():
+ if cls not in instances:
+ instances[cls] = cls()
+ return instances[cls]
+
+ return get_instance
+
+#
+# discinfo.py
+#
+# Copyright (C) 2010 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.discinfo")
+
+import time
+
+
+[docs]class DiscInfo(object):
+
+ def __init__(self, release, basearch):
+ self.release = release
+ self.basearch = basearch
+
+[docs] def write(self, outfile):
+ logger.info("writing .discinfo file")
+ with open(outfile, "w") as fobj:
+ fobj.write("{0:f}\n".format(time.time()))
+ fobj.write("{0.release}\n".format(self))
+ fobj.write("{0.basearch}\n".format(self))
+
+#
+# executil.py - subprocess execution utility functions
+#
+# Copyright (C) 1999-2011
+# Red Hat, Inc. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Erik Troan <ewt@redhat.com>
+#
+
+import os, sys
+import subprocess
+import time
+import threading
+
+import logging
+log = logging.getLogger("pylorax")
+program_log = logging.getLogger("program")
+
+[docs]class ExecProduct(object):
+ def __init__(self, rc, stdout, stderr):
+ self.rc = rc
+ self.stdout = stdout
+ self.stderr = stderr
+
+[docs]class tee(threading.Thread):
+ """ Python reimplementation of the shell tee process, so we can
+ feed the pipe output into two places at the same time
+ """
+ def __init__(self, inputdesc, outputdesc, logmethod, command):
+ threading.Thread.__init__(self)
+ self.inputdesc = os.fdopen(inputdesc, "r")
+ self.outputdesc = outputdesc
+ self.logmethod = logmethod
+ self.running = True
+ self.command = command
+
+[docs] def run(self):
+ while self.running:
+ try:
+ data = self.inputdesc.readline()
+ except IOError:
+ self.logmethod("Can't read from pipe during a call to %s. "
+ "(program terminated suddenly?)" % self.command)
+ break
+ if data == "":
+ self.running = False
+ else:
+ self.logmethod(data.rstrip('\n'))
+ os.write(self.outputdesc, data)
+
+
+[docs]def execWithRedirect(command, argv, stdin = None, stdout = None,
+ stderr = None, root = None, preexec_fn=None, cwd=None,
+ raise_err=False, callback_func=None, callback_args=None):
+ """ Run an external program and redirect the output to a file.
+ @param command The command to run.
+ @param argv A list of arguments.
+ @param stdin The file descriptor to read stdin from.
+ @param stdout The file descriptor to redirect stdout to.
+ @param stderr The file descriptor to redirect stderr to.
+ @param root The directory to chroot to before running command.
+ @param preexec_fn function to pass to Popen
+ @param cwd working directory to pass to Popen
+ @param raise_err raise CalledProcessError when the returncode is not 0
+ @return The return code of command.
+ """
+ def chroot ():
+ os.chroot(root)
+
+ stdinclose = stdoutclose = stderrclose = lambda : None
+
+ argv = list(argv)
+ if isinstance(stdin, str):
+ if os.access(stdin, os.R_OK):
+ stdin = os.open(stdin, os.O_RDONLY)
+ stdinclose = lambda : os.close(stdin)
+ else:
+ stdin = sys.stdin.fileno()
+ elif isinstance(stdin, int):
+ pass
+ elif stdin is None or not isinstance(stdin, file):
+ stdin = sys.stdin.fileno()
+
+ if isinstance(stdout, str):
+ stdout = os.open(stdout, os.O_RDWR|os.O_CREAT)
+ stdoutclose = lambda : os.close(stdout)
+ elif isinstance(stdout, int):
+ pass
+ elif stdout is None or not isinstance(stdout, file):
+ stdout = sys.stdout.fileno()
+
+ if isinstance(stderr, str):
+ stderr = os.open(stderr, os.O_RDWR|os.O_CREAT)
+ stderrclose = lambda : os.close(stderr)
+ elif isinstance(stderr, int):
+ pass
+ elif stderr is None or not isinstance(stderr, file):
+ stderr = sys.stderr.fileno()
+
+ program_log.info("Running... %s", " ".join([command] + argv))
+
+ #prepare os pipes for feeding tee proceses
+ pstdout, pstdin = os.pipe()
+ perrout, perrin = os.pipe()
+
+ env = os.environ.copy()
+ env.update({"LC_ALL": "C"})
+
+ if root:
+ preexec_fn = chroot
+ cwd = root
+ program_log.info("chrooting into %s", cwd)
+ elif cwd:
+ program_log.info("chdiring into %s", cwd)
+
+ try:
+ #prepare tee proceses
+ proc_std = tee(pstdout, stdout, program_log.info, command)
+ proc_err = tee(perrout, stderr, program_log.error, command)
+
+ #start monitoring the outputs
+ proc_std.start()
+ proc_err.start()
+
+ proc = subprocess.Popen([command] + argv, stdin=stdin,
+ stdout=pstdin,
+ stderr=perrin,
+ preexec_fn=preexec_fn, cwd=cwd,
+ env=env)
+
+ # Wait for the process to finish, calling callback_func to test for early termination
+ while proc.returncode is None:
+ time.sleep(5)
+ if callback_func and callback_func():
+ proc.terminate()
+ callback_func = None
+ proc.poll()
+
+ ret = proc.returncode
+
+ #close the input ends of pipes so we get EOF in the tee processes
+ os.close(pstdin)
+ os.close(perrin)
+
+ #wait for the output to be written and destroy them
+ proc_std.join()
+ del proc_std
+
+ proc_err.join()
+ del proc_err
+
+ stdinclose()
+ stdoutclose()
+ stderrclose()
+ except OSError as e:
+ errstr = "Error running %s: %s" % (command, e.strerror)
+ log.error(errstr)
+ program_log.error(errstr)
+ #close the input ends of pipes so we get EOF in the tee processes
+ os.close(pstdin)
+ os.close(perrin)
+ proc_std.join()
+ proc_err.join()
+
+ stdinclose()
+ stdoutclose()
+ stderrclose()
+ raise RuntimeError, errstr
+
+ if ret and raise_err:
+ raise subprocess.CalledProcessError(ret, [command]+argv)
+
+ return ret
+
+[docs]def execWithCapture(command, argv, stdin = None, stderr = None, root=None,
+ preexec_fn=None, cwd=None, raise_err=False):
+ """ Run an external program and capture standard out.
+ @param command The command to run.
+ @param argv A list of arguments.
+ @param stdin The file descriptor to read stdin from.
+ @param stderr The file descriptor to redirect stderr to.
+ @param root The directory to chroot to before running command.
+ @param preexec_fn function to pass to Popen
+ @param cwd working directory to pass to Popen
+ @param raise_err raise CalledProcessError when the returncode is not 0
+ @return The output of command from stdout.
+ """
+ def chroot():
+ os.chroot(root)
+
+ def closefds ():
+ stdinclose()
+ stderrclose()
+
+ stdinclose = stderrclose = lambda : None
+ rc = ""
+ argv = list(argv)
+
+ if isinstance(stdin, str):
+ if os.access(stdin, os.R_OK):
+ stdin = os.open(stdin, os.O_RDONLY)
+ stdinclose = lambda : os.close(stdin)
+ else:
+ stdin = sys.stdin.fileno()
+ elif isinstance(stdin, int):
+ pass
+ elif stdin is None or not isinstance(stdin, file):
+ stdin = sys.stdin.fileno()
+
+ if isinstance(stderr, str):
+ stderr = os.open(stderr, os.O_RDWR|os.O_CREAT)
+ stderrclose = lambda : os.close(stderr)
+ elif isinstance(stderr, int):
+ pass
+ elif stderr is None or not isinstance(stderr, file):
+ stderr = sys.stderr.fileno()
+
+ program_log.info("Running... %s", " ".join([command] + argv))
+
+ env = os.environ.copy()
+ env.update({"LC_ALL": "C"})
+
+ if root:
+ preexec_fn = chroot
+ cwd = root
+ program_log.info("chrooting into %s", cwd)
+ elif cwd:
+ program_log.info("chdiring into %s", cwd)
+
+ try:
+ proc = subprocess.Popen([command] + argv, stdin=stdin,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn=preexec_fn, cwd=cwd,
+ env=env)
+
+ while True:
+ (outStr, errStr) = proc.communicate()
+ if outStr:
+ map(program_log.info, outStr.splitlines())
+ rc += outStr
+ if errStr:
+ map(program_log.error, errStr.splitlines())
+ os.write(stderr, errStr)
+
+ if proc.returncode is not None:
+ break
+ except OSError as e:
+ log.error ("Error running " + command + ": " + e.strerror)
+ closefds()
+ raise RuntimeError, "Error running " + command + ": " + e.strerror
+
+ closefds()
+ if proc.returncode and raise_err:
+ raise subprocess.CalledProcessError(proc.returncode, [command]+argv)
+
+ return rc
+
+[docs]def execWithCallback(command, argv, stdin = None, stdout = None,
+ stderr = None, echo = True, callback = None,
+ callback_data = None, root = '/'):
+ def closefds ():
+ stdinclose()
+ stdoutclose()
+ stderrclose()
+
+ stdinclose = stdoutclose = stderrclose = lambda : None
+
+ argv = list(argv)
+ if isinstance(stdin, str):
+ if os.access(stdin, os.R_OK):
+ stdin = os.open(stdin, os.O_RDONLY)
+ stdinclose = lambda : os.close(stdin)
+ else:
+ stdin = sys.stdin.fileno()
+ elif isinstance(stdin, int):
+ pass
+ elif stdin is None or not isinstance(stdin, file):
+ stdin = sys.stdin.fileno()
+
+ if isinstance(stdout, str):
+ stdout = os.open(stdout, os.O_RDWR|os.O_CREAT)
+ stdoutclose = lambda : os.close(stdout)
+ elif isinstance(stdout, int):
+ pass
+ elif stdout is None or not isinstance(stdout, file):
+ stdout = sys.stdout.fileno()
+
+ if isinstance(stderr, str):
+ stderr = os.open(stderr, os.O_RDWR|os.O_CREAT)
+ stderrclose = lambda : os.close(stderr)
+ elif isinstance(stderr, int):
+ pass
+ elif stderr is None or not isinstance(stderr, file):
+ stderr = sys.stderr.fileno()
+
+ program_log.info("Running... %s", " ".join([command] + argv))
+
+ p = os.pipe()
+ p_stderr = os.pipe()
+ childpid = os.fork()
+ if not childpid:
+ os.close(p[0])
+ os.close(p_stderr[0])
+ os.dup2(p[1], 1)
+ os.dup2(p_stderr[1], 2)
+ os.dup2(stdin, 0)
+ os.close(stdin)
+ os.close(p[1])
+ os.close(p_stderr[1])
+
+ os.execvp(command, [command] + argv)
+ os._exit(1)
+
+ os.close(p[1])
+ os.close(p_stderr[1])
+
+ log_output = ''
+ while 1:
+ try:
+ s = os.read(p[0], 1)
+ except OSError as e:
+ if e.errno != 4:
+ map(program_log.info, log_output.splitlines())
+ raise IOError, e.args
+
+ if echo:
+ os.write(stdout, s)
+ log_output += s
+
+ if callback:
+ callback(s, callback_data=callback_data)
+
+ # break out early if the sub-process changes status.
+ # no need to flush the stream if the process has exited
+ try:
+ (pid, status) = os.waitpid(childpid,os.WNOHANG)
+ if pid != 0:
+ break
+ except OSError as e:
+ log.critical("exception from waitpid: %s %s", e.errno, e.strerror)
+
+ if len(s) < 1:
+ break
+
+ map(program_log.info, log_output.splitlines())
+
+ log_errors = ''
+ while 1:
+ try:
+ err = os.read(p_stderr[0], 128)
+ except OSError as e:
+ if e.errno != 4:
+ map(program_log.error, log_errors.splitlines())
+ raise IOError, e.args
+ break
+ log_errors += err
+ if len(err) < 1:
+ break
+
+ os.write(stderr, log_errors)
+ map(program_log.error, log_errors.splitlines())
+ os.close(p[0])
+ os.close(p_stderr[0])
+
+ try:
+ #if we didn't already get our child's exit status above, do so now.
+ if not pid:
+ (pid, status) = os.waitpid(childpid, 0)
+ except OSError as e:
+ log.critical("exception from waitpid: %s %s", e.errno, e.strerror)
+
+ closefds()
+
+ rc = 1
+ if os.WIFEXITED(status):
+ rc = os.WEXITSTATUS(status)
+ return ExecProduct(rc, log_output , log_errors)
+
+def _pulseProgressCallback(data, callback_data=None):
+ if callback_data:
+ callback_data.pulse()
+
+[docs]def execWithPulseProgress(command, argv, stdin = None, stdout = None,
+ stderr = None, echo = True, progress = None,
+ root = '/'):
+ return execWithCallback(command, argv, stdin=stdin, stdout=stdout,
+ stderr=stderr, echo=echo, callback=_pulseProgressCallback,
+ callback_data=progress, root=root)
+
+## Run a shell.
+[docs]def execConsole():
+ try:
+ proc = subprocess.Popen(["/bin/sh"])
+ proc.wait()
+ except OSError as e:
+ raise RuntimeError, "Error running /bin/sh: " + e.strerror
+
+[docs]def runcmd(cmd, **kwargs):
+ """ run execWithRedirect with raise_err=True
+ """
+ kwargs["raise_err"] = True
+ return execWithRedirect(cmd[0], cmd[1:], **kwargs)
+
+[docs]def runcmd_output(cmd, **kwargs):
+ """ run execWithCapture with raise_err=True
+ """
+ kwargs["raise_err"] = True
+ return execWithCapture(cmd[0], cmd[1:], **kwargs)
+
+# imgutils.py - utility functions/classes for building disk images
+#
+# Copyright (C) 2011 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Will Woods <wwoods@redhat.com>
+
+import logging
+logger = logging.getLogger("pylorax.imgutils")
+
+import os, tempfile
+from os.path import join, dirname
+from subprocess import Popen, PIPE, CalledProcessError
+import sys
+import time
+import traceback
+import multiprocessing
+from time import sleep
+
+from pylorax.sysutils import cpfile
+from pylorax.executils import execWithRedirect, execWithCapture
+from pylorax.executils import runcmd, runcmd_output
+
+######## Functions for making container images (cpio, tar, squashfs) ##########
+
+[docs]def compress(command, rootdir, outfile, compression="xz", compressargs=None):
+ '''Make a compressed archive of the given rootdir.
+ command is a list of the archiver commands to run
+ compression should be "xz", "gzip", "lzma", "bzip2", or None.
+ compressargs will be used on the compression commandline.'''
+ if compressargs is None:
+ compressargs = ["-9"]
+ if compression not in (None, "xz", "gzip", "lzma", "bzip2"):
+ raise ValueError, "Unknown compression type %s" % compression
+ if compression == "xz":
+ compressargs.insert(0, "--check=crc32")
+ if compression is None:
+ compression = "cat" # this is a little silly
+ compressargs = []
+
+ # make compression run with multiple threads if possible
+ if compression in ("xz", "lzma"):
+ compressargs.insert(0, "-T%d" % multiprocessing.cpu_count())
+
+ logger.debug("find %s -print0 |%s | %s %s > %s", rootdir, " ".join(command),
+ compression, " ".join(compressargs), outfile)
+ find, archive, comp = None, None, None
+ try:
+ find = Popen(["find", ".", "-print0"], stdout=PIPE, cwd=rootdir)
+ archive = Popen(command, stdin=find.stdout, stdout=PIPE, cwd=rootdir)
+ comp = Popen([compression] + compressargs,
+ stdin=archive.stdout, stdout=open(outfile, "wb"))
+ (_stdout, _stderr) = comp.communicate()
+ return comp.returncode
+ except OSError as e:
+ logger.error(e)
+ # Kill off any hanging processes
+ _ = [p.kill() for p in (find, archive, comp) if p]
+ return 1
+
+[docs]def mkcpio(rootdir, outfile, compression="xz", compressargs=None):
+ return compress(["cpio", "--null", "--quiet", "-H", "newc", "-o"],
+ rootdir, outfile, compression, compressargs)
+
+[docs]def mktar(rootdir, outfile, compression="xz", compressargs=None):
+ return compress(["tar", "--no-recursion", "--selinux", "--acls", "--xattrs", "-cf-", "--null", "-T-"],
+ rootdir, outfile, compression, compressargs)
+
+[docs]def mksquashfs(rootdir, outfile, compression="default", compressargs=None):
+ '''Make a squashfs image containing the given rootdir.'''
+ if compressargs is None:
+ compressargs = []
+ if compression != "default":
+ compressargs = ["-comp", compression] + compressargs
+ return execWithRedirect("mksquashfs", [rootdir, outfile] + compressargs)
+
+[docs]def mkrootfsimg(rootdir, outfile, label, size=2, sysroot=""):
+ """
+ Make rootfs image from a directory
+
+ :param str rootdir: Root directory
+ :param str outfile: Path of output image file
+ :param str label: Filesystem label
+ :param int size: Size of the image in GiB, if None computed automatically
+ :param str sysroot: path to system (deployment) root relative to physical root
+ """
+ if size:
+ fssize = size * (1024*1024*1024) # 2GB sparse file compresses down to nothin'
+ else:
+ fssize = None # Let mkext4img figure out the needed size
+
+ mkext4img(rootdir, outfile, label=label, size=fssize)
+ # Reset selinux context on new rootfs
+ with LoopDev(outfile) as loopdev:
+ with Mount(loopdev) as mnt:
+ cmd = [ "setfiles", "-e", "/proc", "-e", "/sys", "-e", "/dev", "-e", "/install",
+ "/etc/selinux/targeted/contexts/files/file_contexts", "/"]
+ root = join(mnt, sysroot.lstrip("/"))
+ runcmd(cmd, root=root)
+
+[docs]def mkdiskfsimage(diskimage, fsimage, label="Anaconda"):
+ """
+ Copy the / partition of a partitioned disk image to an un-partitioned
+ disk image.
+
+ diskimage is the full path to partitioned disk image with a /
+ fsimage is the full path of the output fs image file
+ label is the label to apply to the image. Defaults to "Anaconda"
+ """
+ with PartitionMount(diskimage) as img_mount:
+ if not img_mount or not img_mount.mount_dir:
+ return None
+
+ logger.info("Creating fsimage %s", fsimage)
+ mkext4img(img_mount.mount_dir, fsimage, label=label)
+
+######## Utility functions ###############################################
+
+[docs]def mksparse(outfile, size):
+ '''use os.ftruncate to create a sparse file of the given size.'''
+ fobj = open(outfile, "w")
+ os.ftruncate(fobj.fileno(), size)
+
+[docs]def mkqcow2(outfile, size, options=None):
+ '''use qemu-img to create a file of the given size.
+ options is a list of options passed to qemu-img
+
+ Default format is qcow2, override by passing "-f", fmt
+ in options.
+ '''
+ options = options or []
+ if "-f" not in options:
+ options.extend(["-f", "qcow2"])
+ runcmd(["qemu-img", "create"] + options + [outfile, str(size)])
+
+[docs]def loop_waitfor(loop_dev, outfile):
+ """Make sure the loop device is attached to the outfile.
+
+ It seems that on rare occasions losetup can return before the /dev/loopX is
+ ready for use, causing problems with mkfs. This tries to make sure that the
+ loop device really is associated with the backing file before continuing.
+
+ Raise RuntimeError if it isn't setup after 5 tries.
+ """
+ for _ in xrange(0,5):
+ runcmd(["udevadm", "settle", "--timeout", "300"])
+ ## XXX Note that losetup --list output can be truncated to 64 bytes in some
+ ## situations. Don't use it to lookup backing file, go the other way
+ ## and lookup the loop for the backing file. See util-linux lib/loopdev.c
+ ## loopcxt_get_backing_file()
+ if get_loop_name(outfile) == os.path.basename(loop_dev):
+ return
+
+ # If this really is a race, give it some time to settle down
+ time.sleep(1)
+
+ raise RuntimeError("Unable to setup %s on %s" % (loop_dev, outfile))
+
+[docs]def loop_attach(outfile):
+ '''Attach a loop device to the given file. Return the loop device name.
+ Raises CalledProcessError if losetup fails.'''
+ dev = runcmd_output(["losetup", "--find", "--show", outfile])
+
+ # Sometimes the loop device isn't ready yet, make extra sure before returning
+ loop_waitfor(dev.strip(), outfile)
+ return dev.strip()
+
+[docs]def loop_detach(loopdev):
+ '''Detach the given loop device. Return False on failure.'''
+ return (execWithRedirect("losetup", ["--detach", loopdev]) == 0)
+
+[docs]def get_loop_name(path):
+ '''Return the loop device associated with the path.
+ Raises RuntimeError if more than one loop is associated'''
+ buf = runcmd_output(["losetup", "-j", path])
+ if len(buf.splitlines()) > 1:
+ # there should never be more than one loop device listed
+ raise RuntimeError("multiple loops associated with %s" % path)
+ name = os.path.basename(buf.split(":")[0])
+ return name
+
+[docs]def dm_attach(dev, size, name=None):
+ '''Attach a devicemapper device to the given device, with the given size.
+ If name is None, a random name will be chosen. Returns the device name.
+ raises CalledProcessError if dmsetup fails.'''
+ if name is None:
+ name = tempfile.mktemp(prefix="lorax.imgutils.", dir="")
+ runcmd(["dmsetup", "create", name, "--table",
+ "0 %i linear %s 0" % (size/512, dev)])
+ return name
+
+[docs]def dm_detach(dev):
+ '''Detach the named devicemapper device. Returns False if dmsetup fails.'''
+ dev = dev.replace("/dev/mapper/", "") # strip prefix, if it's there
+ return execWithRedirect("dmsetup", ["remove", dev])
+
+[docs]def mount(dev, opts="", mnt=None):
+ '''Mount the given device at the given mountpoint, using the given opts.
+ opts should be a comma-separated string of mount options.
+ if mnt is none, a temporary directory will be created and its path will be
+ returned.
+ raises CalledProcessError if mount fails.'''
+ if mnt is None:
+ mnt = tempfile.mkdtemp(prefix="lorax.imgutils.")
+ logger.debug("make tmp mountdir %s", mnt)
+ cmd = ["mount"]
+ if opts:
+ cmd += ["-o", opts]
+ cmd += [dev, mnt]
+ runcmd(cmd)
+ return mnt
+
+[docs]def umount(mnt, lazy=False, maxretry=3, retrysleep=1.0):
+ '''Unmount the given mountpoint. If lazy is True, do a lazy umount (-l).
+ If the mount was a temporary dir created by mount, it will be deleted.
+ raises CalledProcessError if umount fails.'''
+ cmd = ["umount"]
+ if lazy: cmd += ["-l"]
+ cmd += [mnt]
+ count = 0
+ while maxretry > 0:
+ try:
+ rv = runcmd(cmd)
+ except CalledProcessError:
+ count += 1
+ if count == maxretry:
+ raise
+ logger.warn("failed to unmount %s. retrying (%d/%d)...",
+ mnt, count, maxretry)
+ if logger.getEffectiveLevel() <= logging.DEBUG:
+ fuser = execWithCapture("fuser", ["-vm", mnt])
+ logger.debug("fuser -vm:\n%s\n", fuser)
+ sleep(retrysleep)
+ else:
+ break
+ if 'lorax.imgutils' in mnt:
+ os.rmdir(mnt)
+ logger.debug("remove tmp mountdir %s", mnt)
+ return (rv == 0)
+
+[docs]def copytree(src, dest, preserve=True):
+ '''Copy a tree of files using cp -a, thus preserving modes, timestamps,
+ links, acls, sparse files, xattrs, selinux contexts, etc.
+ If preserve is False, uses cp -R (useful for modeless filesystems)
+ raises CalledProcessError if copy fails.'''
+ logger.debug("copytree %s %s", src, dest)
+ cp = ["cp", "-a"] if preserve else ["cp", "-R", "-L"]
+ cp += [".", os.path.abspath(dest)]
+ runcmd(cp, cwd=src)
+
+[docs]def do_grafts(grafts, dest, preserve=True):
+ '''Copy each of the items listed in grafts into dest.
+ If the key ends with '/' it's assumed to be a directory which should be
+ created, otherwise just the leading directories will be created.'''
+ for imgpath, filename in grafts.items():
+ if imgpath[-1] == '/':
+ targetdir = join(dest, imgpath)
+ imgpath = imgpath[:-1]
+ else:
+ targetdir = join(dest, dirname(imgpath))
+ if not os.path.isdir(targetdir):
+ os.makedirs(targetdir)
+ if os.path.isdir(filename):
+ copytree(filename, join(dest, imgpath), preserve)
+ else:
+ cpfile(filename, join(dest, imgpath))
+
+[docs]def round_to_blocks(size, blocksize):
+ '''If size isn't a multiple of blocksize, round up to the next multiple'''
+ diff = size % blocksize
+ if diff or not size:
+ size += blocksize - diff
+ return size
+
+# TODO: move filesystem data outside this function
+[docs]def estimate_size(rootdir, graft=None, fstype=None, blocksize=4096, overhead=128):
+ if graft is None:
+ graft = {}
+ getsize = lambda f: os.lstat(f).st_size
+ if fstype == "btrfs":
+ overhead = 64*1024 # don't worry, it's all sparse
+ if fstype == "hfsplus":
+ overhead = 200 # hack to deal with two bootloader copies
+ if fstype in ("vfat", "msdos"):
+ blocksize = 2048
+ getsize = lambda f: os.stat(f).st_size # no symlinks, count as copies
+ total = overhead*blocksize
+ dirlist = graft.values()
+ if rootdir:
+ dirlist.append(rootdir)
+ for root in dirlist:
+ for top, dirs, files in os.walk(root):
+ for f in files + dirs:
+ total += round_to_blocks(getsize(join(top,f)), blocksize)
+ if fstype == "btrfs":
+ total = max(256*1024*1024, total) # btrfs minimum size: 256MB
+ return total
+
+[docs]def default_image_name(compression, basename):
+ """ Return a default image name with the correct suffix for the compression type.
+
+ :param str compression: Compression type
+ :param str basename: Base filename
+ :returns: basename with compression suffix
+
+ If the compression is unknown it defaults to xz
+ """
+ SUFFIXES = {"xz": ".xz", "gzip": ".gz", "bzip2": ".bz2", "lzma": ".lzma"}
+ return basename + SUFFIXES.get(compression, ".xz")
+
+######## Execution contexts - use with the 'with' statement ##############
+
+[docs]class LoopDev(object):
+ def __init__(self, filename, size=None):
+ self.loopdev = None
+ self.filename = filename
+ if size:
+ mksparse(self.filename, size)
+ def __enter__(self):
+ self.loopdev = loop_attach(self.filename)
+ return self.loopdev
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ loop_detach(self.loopdev)
+
+[docs]class DMDev(object):
+ def __init__(self, dev, size, name=None):
+ (self.dev, self.size, self.name) = (dev, size, name)
+ self.mapperdev = None
+ def __enter__(self):
+ self.mapperdev = dm_attach(self.dev, self.size, self.name)
+ return self.mapperdev
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ dm_detach(self.mapperdev)
+
+[docs]class Mount(object):
+ def __init__(self, dev, opts="", mnt=None):
+ (self.dev, self.opts, self.mnt) = (dev, opts, mnt)
+ def __enter__(self):
+ self.mnt = mount(self.dev, self.opts, self.mnt)
+ return self.mnt
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ umount(self.mnt)
+
+[docs]class PartitionMount(object):
+ """ Mount a partitioned image file using kpartx """
+ def __init__(self, disk_img, mount_ok=None):
+ """
+ disk_img is the full path to a partitioned disk image
+ mount_ok is a function that is passed the mount point and
+ returns True if it should be mounted.
+ """
+ self.mount_dir = None
+ self.mount_dev = None
+ self.mount_size = None
+ self.disk_img = disk_img
+ self.mount_ok = mount_ok
+
+ # Default is to mount partition with /etc/passwd
+ if not self.mount_ok:
+ self.mount_ok = lambda mount_dir: os.path.isfile(mount_dir+"/etc/passwd")
+
+ # Example kpartx output
+ # kpartx -p p -v -a /tmp/diskV2DiCW.im
+ # add map loop2p1 (253:2): 0 3481600 linear /dev/loop2 2048
+ # add map loop2p2 (253:3): 0 614400 linear /dev/loop2 3483648
+ kpartx_output = runcmd_output(["kpartx", "-v", "-a", "-s", self.disk_img])
+ logger.debug(kpartx_output)
+
+ # list of (deviceName, sizeInBytes)
+ self.loop_devices = []
+ for line in kpartx_output.splitlines():
+ # add map loop2p3 (253:4): 0 7139328 linear /dev/loop2 528384
+ # 3rd element is size in 512 byte blocks
+ if line.startswith("add map "):
+ fields = line[8:].split()
+ self.loop_devices.append( (fields[0], int(fields[3])*512) )
+
+ def __enter__(self):
+ # Mount the device selected by mount_ok, if possible
+ mount_dir = tempfile.mkdtemp()
+ for dev, size in self.loop_devices:
+ try:
+ mount( "/dev/mapper/"+dev, mnt=mount_dir )
+ if self.mount_ok(mount_dir):
+ self.mount_dir = mount_dir
+ self.mount_dev = dev
+ self.mount_size = size
+ break
+ umount( mount_dir )
+ except CalledProcessError:
+ logger.debug(traceback.format_exc())
+ if self.mount_dir:
+ logger.info("Partition mounted on %s size=%d", self.mount_dir, self.mount_size)
+ else:
+ logger.debug("Unable to mount anything from %s", self.disk_img)
+ os.rmdir(mount_dir)
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ if self.mount_dir:
+ umount( self.mount_dir )
+ os.rmdir(self.mount_dir)
+ self.mount_dir = None
+ execWithRedirect("kpartx", ["-d", "-s", self.disk_img])
+
+
+######## Functions for making filesystem images ##########################
+
+[docs]def mkfsimage(fstype, rootdir, outfile, size=None, mkfsargs=None, mountargs="", graft=None):
+ '''Generic filesystem image creation function.
+ fstype should be a filesystem type - "mkfs.${fstype}" must exist.
+ graft should be a dict: {"some/path/in/image": "local/file/or/dir"};
+ if the path ends with a '/' it's assumed to be a directory.
+ Will raise CalledProcessError if something goes wrong.'''
+ if mkfsargs is None:
+ mkfsargs = []
+ if graft is None:
+ graft = {}
+ preserve = (fstype not in ("msdos", "vfat"))
+ if not size:
+ size = estimate_size(rootdir, graft, fstype)
+ with LoopDev(outfile, size) as loopdev:
+ try:
+ runcmd(["mkfs.%s" % fstype] + mkfsargs + [loopdev])
+ except CalledProcessError as e:
+ logger.error("mkfs exited with a non-zero return code: %d", e.returncode)
+ logger.error(e.output)
+ sys.exit(e.returncode)
+
+ with Mount(loopdev, mountargs) as mnt:
+ if rootdir:
+ copytree(rootdir, mnt, preserve)
+ do_grafts(graft, mnt, preserve)
+
+ # Make absolutely sure that the data has been written
+ runcmd(["sync"])
+
+# convenience functions with useful defaults
+[docs]def mkdosimg(rootdir, outfile, size=None, label="", mountargs="shortname=winnt,umask=0077", graft=None):
+ mkfsimage("msdos", rootdir, outfile, size, mountargs=mountargs,
+ mkfsargs=["-n", label], graft=graft)
+
+[docs]def mkext4img(rootdir, outfile, size=None, label="", mountargs="", graft=None):
+ mkfsimage("ext4", rootdir, outfile, size, mountargs=mountargs,
+ mkfsargs=["-L", label, "-b", "1024", "-m", "0"], graft=graft)
+
+[docs]def mkbtrfsimg(rootdir, outfile, size=None, label="", mountargs="", graft=None):
+ mkfsimage("btrfs", rootdir, outfile, size, mountargs=mountargs,
+ mkfsargs=["-L", label], graft=graft)
+
+[docs]def mkhfsimg(rootdir, outfile, size=None, label="", mountargs="", graft=None):
+ mkfsimage("hfsplus", rootdir, outfile, size, mountargs=mountargs,
+ mkfsargs=["-v", label], graft=graft)
+
+#
+# Copyright (C) 2011-2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("pylorax")
+
+import os
+import shutil
+import sys
+import subprocess
+import tempfile
+from time import sleep
+import uuid
+
+from pylorax.executils import execWithRedirect, execWithCapture
+from pylorax.imgutils import get_loop_name, dm_detach, mount, umount
+from pylorax.imgutils import PartitionMount, mksparse, mkext4img, loop_detach
+from pylorax.imgutils import mktar, mkdiskfsimage, mkqcow2
+from pylorax.logmonitor import LogMonitor
+from pylorax.sysutils import joinpaths
+from pylorax.treebuilder import udev_escape
+
+ROOT_PATH = "/mnt/sysimage/"
+
+# no-virt mode doesn't need libvirt, so make it optional
+try:
+ import libvirt
+except ImportError:
+ libvirt = None
+
+
+
+[docs]class IsoMountpoint(object):
+ """
+ Mount the iso on a temporary directory and check to make sure the
+ vmlinuz and initrd.img files exist
+ Check the iso for a LiveOS directory and set a flag.
+ Extract the iso's label.
+
+ initrd_path can be used to point to a boot.iso tree with a newer
+ initrd.img than the iso has. The iso is still used for stage2.
+ """
+ def __init__( self, iso_path, initrd_path=None ):
+ """ iso_path is the path to a boot.iso
+ initrd_path overrides mounting the iso for access to
+ initrd and vmlinuz.
+ """
+ self.label = None
+ self.iso_path = iso_path
+ self.initrd_path = initrd_path
+
+ if not self.initrd_path:
+ self.mount_dir = mount(self.iso_path, opts="loop")
+ else:
+ self.mount_dir = self.initrd_path
+
+ kernel_list = [("/isolinux/vmlinuz", "/isolinux/initrd.img"),
+ ("/ppc/ppc64/vmlinuz", "/ppc/ppc64/initrd.img")]
+ if os.path.isdir( self.mount_dir+"/repodata" ):
+ self.repo = self.mount_dir
+ else:
+ self.repo = None
+ self.liveos = os.path.isdir( self.mount_dir+"/LiveOS" )
+
+ try:
+ for kernel, initrd in kernel_list:
+ if (os.path.isfile(self.mount_dir+kernel) and
+ os.path.isfile(self.mount_dir+initrd)):
+ self.kernel = self.mount_dir+kernel
+ self.initrd = self.mount_dir+initrd
+ break
+ else:
+ raise Exception("Missing kernel and initrd file in iso, failed"
+ " to search under: {0}".format(kernel_list))
+ except:
+ self.umount()
+ raise
+
+ self.get_iso_label()
+
+
+[docs] def get_iso_label( self ):
+ """
+ Get the iso's label using isoinfo
+ """
+ isoinfo_output = execWithCapture("isoinfo", ["-d", "-i", self.iso_path])
+ log.debug( isoinfo_output )
+ for line in isoinfo_output.splitlines():
+ if line.startswith("Volume id: "):
+ self.label = line[11:]
+ return
+
+
+[docs]class VirtualInstall( object ):
+ """
+ Run virt-install using an iso and kickstart(s)
+ """
+ def __init__( self, iso, ks_paths, disk_img, img_size=2,
+ kernel_args=None, memory=1024, vnc=None, arch=None,
+ log_check=None, virtio_host="127.0.0.1", virtio_port=6080,
+ qcow2=False):
+ """
+
+ iso is an instance of IsoMountpoint
+ ks_paths is a list of paths to a kickstart files. All are injected, the
+ first one is the one executed.
+ disk_img is the path to a disk image (doesn't need to exist)
+ img_size is the size, in GiB, of the image if it doesn't exist
+ kernel_args are extra arguments to pass on the kernel cmdline
+ memory is the amount of ram to assign to the virt
+ vnc is passed to the --graphics command verbatim
+ arch is the optional architecture to use in the virt
+ log_check is a method that returns True of the log indicates an error
+ virtio_host and virtio_port are used to communicate with the log monitor
+ """
+ self.virt_name = "LiveOS-"+str(uuid.uuid4())
+ # add --graphics none later
+ # add whatever serial cmds are needed later
+ args = ["-n", self.virt_name,
+ "-r", str(memory),
+ "--noreboot",
+ "--noautoconsole"]
+
+ args.append("--graphics")
+ if vnc:
+ args.append(vnc)
+ else:
+ args.append("none")
+
+ for ks in ks_paths:
+ args.append("--initrd-inject")
+ args.append(ks)
+
+ disk_opts = "path={0}".format(disk_img)
+ if qcow2:
+ disk_opts += ",format=qcow2"
+ else:
+ disk_opts += ",format=raw"
+ if not os.path.isfile(disk_img):
+ disk_opts += ",size={0}".format(img_size)
+ args.append("--disk")
+ args.append(disk_opts)
+
+ if iso.liveos:
+ disk_opts = "path={0},device=cdrom".format(iso.iso_path)
+ args.append("--disk")
+ args.append(disk_opts)
+
+ extra_args = "ks=file:/{0}".format(os.path.basename(ks_paths[0]))
+ if not vnc:
+ extra_args += " inst.cmdline console=ttyS0"
+ if kernel_args:
+ extra_args += " "+kernel_args
+ if iso.liveos:
+ extra_args += " stage2=hd:LABEL={0}".format(udev_escape(iso.label))
+ args.append("--extra-args")
+ args.append(extra_args)
+
+ args.append("--location")
+ args.append(iso.mount_dir)
+
+ channel_args = "tcp,host={0}:{1},mode=connect,target_type=virtio" \
+ ",name=org.fedoraproject.anaconda.log.0".format(
+ virtio_host, virtio_port)
+ args.append("--channel")
+ args.append(channel_args)
+
+ if arch:
+ args.append("--arch")
+ args.append(arch)
+
+ rc = execWithRedirect("virt-install", args)
+ if rc:
+ raise Exception("Problem starting virtual install")
+
+ conn = libvirt.openReadOnly(None)
+ dom = conn.lookupByName(self.virt_name)
+
+ # TODO: If vnc has been passed, we should look up the port and print that
+ # for the user at this point
+
+ while dom.isActive() and not log_check():
+ sys.stdout.write(".")
+ sys.stdout.flush()
+ sleep(10)
+ print
+
+ if log_check():
+ log.info( "Installation error detected. See logfile." )
+ else:
+ log.info( "Install finished. Or at least virt shut down." )
+
+[docs] def destroy( self ):
+ """
+ Make sure the virt has been shut down and destroyed
+
+ Could use libvirt for this instead.
+ """
+ log.info( "Shutting down %s", self.virt_name)
+ subprocess.call(["virsh", "destroy", self.virt_name])
+ subprocess.call(["virsh", "undefine", self.virt_name])
+
+
+[docs]def novirt_install(opts, disk_img, disk_size, repo_url, callback_func=None):
+ """
+ Use Anaconda to install to a disk image
+ """
+ import selinux
+
+ # Set selinux to Permissive if it is Enforcing
+ selinux_enforcing = False
+ if selinux.is_selinux_enabled() and selinux.security_getenforce():
+ selinux_enforcing = True
+ selinux.security_setenforce(0)
+
+ # Clean up /tmp/ from previous runs to prevent stale info from being used
+ for path in ["/tmp/yum.repos.d/", "/tmp/yum.cache/"]:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+
+ args = ["--kickstart", opts.ks[0], "--cmdline", "--repo", repo_url]
+ if opts.anaconda_args:
+ for arg in opts.anaconda_args:
+ args += arg.split(" ", 1)
+ if opts.proxy:
+ args += ["--proxy", opts.proxy]
+ if opts.armplatform:
+ args += ["--armplatform", opts.armplatform]
+
+ if opts.make_iso or opts.make_fsimage:
+ # Make a blank fs image
+ args += ["--dirinstall"]
+
+ mkext4img(None, disk_img, label=opts.fs_label, size=disk_size * 1024**3)
+ if not os.path.isdir(ROOT_PATH):
+ os.mkdir(ROOT_PATH)
+ mount(disk_img, opts="loop", mnt=ROOT_PATH)
+ elif opts.make_tar:
+ args += ["--dirinstall"]
+
+ # Install directly into ROOT_PATH, make sure it starts clean
+ if os.path.exists(ROOT_PATH):
+ shutil.rmtree(ROOT_PATH)
+ if not os.path.isdir(ROOT_PATH):
+ os.mkdir(ROOT_PATH)
+ else:
+ args += ["--image", disk_img]
+
+ # Create the sparse image
+ mksparse(disk_img, disk_size * 1024**3)
+
+ # Make sure anaconda has the right product and release
+ os.environ["ANACONDA_PRODUCTNAME"] = opts.project
+ os.environ["ANACONDA_PRODUCTVERSION"] = opts.releasever
+ rc = execWithRedirect("anaconda", args, callback_func=callback_func)
+
+ # Move the anaconda logs over to a log directory
+ log_dir = os.path.abspath(os.path.dirname(opts.logfile))
+ log_anaconda = joinpaths(log_dir, "anaconda")
+ if not os.path.isdir(log_anaconda):
+ os.mkdir(log_anaconda)
+ for l in ["anaconda.log", "ifcfg.log", "program.log", "storage.log",
+ "packaging.log", "yum.log"]:
+ if os.path.exists("/tmp/"+l):
+ shutil.copy2("/tmp/"+l, log_anaconda)
+ os.unlink("/tmp/"+l)
+
+ if opts.make_iso or opts.make_fsimage:
+ umount(ROOT_PATH)
+ else:
+ # If anaconda failed the disk image may still be in use by dm
+ execWithRedirect("anaconda-cleanup", [])
+
+ if disk_img:
+ dm_name = os.path.splitext(os.path.basename(disk_img))[0]
+ dm_path = "/dev/mapper/"+dm_name
+ if os.path.exists(dm_path):
+ dm_detach(dm_path)
+ loop_detach(get_loop_name(disk_img))
+
+ if selinux_enforcing:
+ selinux.security_setenforce(1)
+
+ if rc:
+ raise InstallError("novirt_install failed")
+
+ if opts.make_tar:
+ compress_args = []
+ for arg in opts.compress_args:
+ compress_args += arg.split(" ", 1)
+
+ rc = mktar(ROOT_PATH, disk_img, opts.compression, compress_args)
+ shutil.rmtree(ROOT_PATH)
+ log.info("tar finished with rc=%d", rc)
+
+ if rc:
+ raise InstallError("novirt_install failed")
+ elif opts.qcow2:
+ log.info("Converting %s to qcow2", disk_img)
+ qcow2_args = []
+ for arg in opts.qcow2_args:
+ qcow2_args += arg.split(" ", 1)
+
+ # convert the image to qcow2 format
+ if "-O" not in qcow2_args:
+ qcow2_args.extend(["-O", "qcow2"])
+ qcow2_img = tempfile.mktemp(prefix="disk", suffix=".img")
+ execWithRedirect("qemu-img", ["convert"] + qcow2_args + [disk_img, qcow2_img], raise_err=True)
+ execWithRedirect("mv", ["-f", qcow2_img, disk_img], raise_err=True)
+
+
+[docs]def virt_install(opts, install_log, disk_img, disk_size):
+ """
+ Use virt-install to install to a disk image
+
+ install_log is the path to write the log from virt-install
+ disk_img is the full path to the final disk or filesystem image
+ disk_size is the size of the disk to create in GiB
+ """
+ iso_mount = IsoMountpoint(opts.iso, opts.location)
+ log_monitor = LogMonitor(install_log)
+
+ kernel_args = ""
+ if opts.kernel_args:
+ kernel_args += opts.kernel_args
+ if opts.proxy:
+ kernel_args += " proxy="+opts.proxy
+
+ if opts.qcow2 and not opts.make_fsimage:
+ # virt-install can't take all the qcow2 options so create the image first
+ qcow2_args = []
+ for arg in opts.qcow2_args:
+ qcow2_args += arg.split(" ", 1)
+
+ mkqcow2(disk_img, disk_size*1024**3, qcow2_args)
+
+ if opts.make_fsimage or opts.make_tar:
+ diskimg_path = tempfile.mktemp(prefix="disk", suffix=".img")
+ else:
+ diskimg_path = disk_img
+
+ virt = VirtualInstall(iso_mount, opts.ks, diskimg_path, disk_size,
+ kernel_args, opts.ram, opts.vnc, opts.arch,
+ log_check = log_monitor.server.log_check,
+ virtio_host = log_monitor.host,
+ virtio_port = log_monitor.port,
+ qcow2=opts.qcow2)
+
+ virt.destroy()
+ log_monitor.shutdown()
+ iso_mount.umount()
+
+ if log_monitor.server.log_check():
+ raise InstallError("virt_install failed")
+
+ if opts.make_fsimage:
+ mkdiskfsimage(diskimg_path, disk_img, label=opts.fs_label)
+ os.unlink(diskimg_path)
+ elif opts.make_tar:
+ compress_args = []
+ for arg in opts.compress_args:
+ compress_args += arg.split(" ", 1)
+
+ with PartitionMount(diskimg_path) as img_mount:
+ if img_mount and img_mount.mount_dir:
+ rc = mktar(img_mount.mount_dir, disk_img, opts.compression, compress_args)
+ os.unlink(diskimg_path)
+
+ if rc:
+ raise InstallError("virt_install failed")
+
+#
+# Copyright (C) 2011-2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import re
+import socket
+import SocketServer
+import threading
+
+[docs]class LogRequestHandler(SocketServer.BaseRequestHandler):
+ """
+ Handle monitoring and saving the logfiles from the virtual install
+ """
+[docs] def setup(self):
+ if self.server.log_path:
+ self.fp = open(self.server.log_path, "w")
+ else:
+ print "no log_path specified"
+ self.request.settimeout(10)
+
+[docs] def handle(self):
+ """
+ Handle writing incoming data to a logfile and
+ checking the logs for any Tracebacks or other errors that indicate
+ that the install failed.
+ """
+ line = ""
+ while True:
+ if self.server.kill:
+ break
+
+ try:
+ data = self.request.recv(4096)
+ self.fp.write(data)
+ self.fp.flush()
+
+ # check the data for errors and set error flag
+ # need to assemble it into lines so we can test for the error
+ # string.
+ while data:
+ more = data.split("\n", 1)
+ line += more[0]
+ if len(more) > 1:
+ self.iserror(line)
+ line = ""
+ data = more[1]
+ else:
+ data = None
+
+ except socket.timeout:
+ pass
+ except Exception:
+ break
+
+
+[docs] def iserror(self, line):
+ """
+ Check a line to see if it contains an error indicating install failure
+ """
+ simple_tests = ["Traceback (",
+ "Out of memory:",
+ "Call Trace:",
+ "insufficient disk space:"]
+ re_tests = [r"packaging: base repo .* not valid"]
+ for t in simple_tests:
+ if line.find(t) > -1:
+ self.server.log_error = True
+ return
+ for t in re_tests:
+ if re.search(t, line):
+ self.server.log_error = True
+ return
+
+
+[docs]class LogServer(SocketServer.TCPServer):
+ """
+ Add path to logfile
+ Add log error flag
+ Add a kill switch
+ """
+ def __init__(self, log_path, *args, **kwargs):
+ self.kill = False
+ self.log_error = False
+ self.log_path = log_path
+ SocketServer.TCPServer.__init__(self, *args, **kwargs)
+
+
+[docs]class LogMonitor(object):
+ """
+ Contains all the stuff needed to setup a thread to listen to the logs
+ from the virtual install
+ """
+ def __init__(self, log_path, host="localhost", port=0):
+ """
+ Fire up the thread listening for logs
+ """
+ self.server = LogServer(log_path, (host, port), LogRequestHandler)
+ self.host, self.port = self.server.server_address
+ self.log_path = log_path
+ self.server_thread = threading.Thread(target=self.server.handle_request)
+ self.server_thread.daemon = True
+ self.server_thread.start()
+
+
+
+#
+# ltmpl.py
+#
+# Copyright (C) 2009 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+# Will Woods <wwoods@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.ltmpl")
+
+import os, re, glob, shlex, fnmatch
+from os.path import basename, isdir
+from subprocess import CalledProcessError
+import shutil
+
+from pylorax.sysutils import joinpaths, cpfile, mvfile, replace, remove
+from pylorax import yumhelper
+from pylorax.base import DataHolder
+from pylorax.executils import runcmd, runcmd_output
+from pylorax.imgutils import mkcpio
+
+from mako.lookup import TemplateLookup
+from mako.exceptions import text_error_template
+import sys, traceback
+import struct
+
+[docs]class LoraxTemplate(object):
+ def __init__(self, directories=None):
+ if directories is None:
+ directories = ["/usr/share/lorax"]
+ # we have to add ["/"] to the template lookup directories or the
+ # file includes won't work properly for absolute paths
+ self.directories = ["/"] + directories
+ self.lines = []
+
+[docs] def parse(self, template_file, variables):
+ lookup = TemplateLookup(directories=self.directories)
+ template = lookup.get_template(template_file)
+
+ try:
+ textbuf = template.render(**variables)
+ except:
+ logger.error(text_error_template().render())
+ raise
+
+ # split, strip and remove empty lines
+ lines = textbuf.splitlines()
+ lines = map(lambda line: line.strip(), lines)
+ lines = filter(lambda line: line, lines)
+
+ # remove comments
+ lines = filter(lambda line: not line.startswith("#"), lines)
+
+ # mako template now returns unicode strings
+ lines = map(lambda line: line.encode("utf8"), lines)
+
+ # split with shlex and perform brace expansion
+ lines = map(split_and_expand, lines)
+
+ self.lines = lines
+ return lines
+
+[docs]def split_and_expand(line):
+ return [exp for word in shlex.split(line) for exp in brace_expand(word)]
+
+[docs]def brace_expand(s):
+ if not ('{' in s and ',' in s and '}' in s):
+ yield s
+ else:
+ right = s.find('}')
+ left = s[:right].rfind('{')
+ (prefix, choices, suffix) = (s[:left], s[left+1:right], s[right+1:])
+ for choice in choices.split(','):
+ for alt in brace_expand(prefix+choice+suffix):
+ yield alt
+
+[docs]def rglob(pathname, root="/", fatal=False):
+ seen = set()
+ rootlen = len(root)+1
+ for f in glob.iglob(joinpaths(root, pathname)):
+ if f not in seen:
+ seen.add(f)
+ yield f[rootlen:] # remove the root to produce relative path
+ if fatal and not seen:
+ raise IOError, "nothing matching %s in %s" % (pathname, root)
+
+[docs]def rexists(pathname, root=""):
+ # Generator is always True, even with no values;
+ # bool(rglob(...)) won't work here.
+ for _path in rglob(pathname, root):
+ return True
+ return False
+
+# TODO: operate inside an actual chroot for safety? Not that RPM bothers..
+[docs]class LoraxTemplateRunner(object):
+ '''
+ This class parses and executes Lorax templates. Sample usage:
+
+ # install a bunch of packages
+ runner = LoraxTemplateRunner(inroot=rundir, outroot=rundir, yum=yum_obj)
+ runner.run("install-packages.ltmpl")
+
+ # modify a runtime dir
+ runner = LoraxTemplateRunner(inroot=rundir, outroot=newrun)
+ runner.run("runtime-transmogrify.ltmpl")
+
+ NOTES:
+
+ * Parsing procedure is roughly:
+ 1. Mako template expansion (on the whole file)
+ 2. For each line of the result,
+ a. Whitespace splitting (using shlex.split())
+ b. Brace expansion (using brace_expand())
+ c. If the first token is the name of a function, call that function
+ with the rest of the line as arguments
+
+ * Parsing and execution are *separate* passes - so you can't use the result
+ of a command in an %if statement (or any other control statements)!
+
+ * Commands that run external programs (systemctl, gconfset) currently use
+ the *host*'s copy of that program, which may cause problems if there's a
+ big enough difference between the host and the image you're modifying.
+
+ * The commands are not executed under a real chroot, so absolute symlinks
+ will point *outside* the inroot/outroot. Be careful with symlinks!
+
+ ADDING NEW COMMANDS:
+
+ * Each template command is just a method of the LoraxTemplateRunner
+ object - so adding a new command is as easy as adding a new function.
+
+ * Each function gets arguments that correspond to the rest of the tokens
+ on that line (after word splitting and brace expansion)
+
+ * Commands should raise exceptions for errors - don't use sys.exit()
+ '''
+ def __init__(self, inroot, outroot, yum_obj=None, fatalerrors=True,
+ templatedir=None, defaults=None):
+ if defaults is None:
+ defaults = {}
+ self.inroot = inroot
+ self.outroot = outroot
+ self.yum = yum_obj
+ self.fatalerrors = fatalerrors
+ self.templatedir = templatedir or "/usr/share/lorax"
+ self.templatefile = None
+ # some builtin methods
+ self.builtins = DataHolder(exists=lambda p: rexists(p, root=inroot),
+ glob=lambda g: list(rglob(g, root=inroot)))
+ self.defaults = defaults
+ self.results = DataHolder(treeinfo=dict()) # just treeinfo for now
+ # TODO: set up custom logger with a filter to add line info
+
+ def _out(self, path):
+ return joinpaths(self.outroot, path)
+ def _in(self, path):
+ return joinpaths(self.inroot, path)
+
+ def _filelist(self, *pkgs):
+ pkglist = self.yum.doPackageLists(pkgnarrow="installed", patterns=pkgs)
+ return set([f for pkg in pkglist.installed for f in pkg.filelist+pkg.ghostlist])
+
+ def _getsize(self, *files):
+ return sum(os.path.getsize(self._out(f)) for f in files if os.path.isfile(self._out(f)))
+
+[docs] def run(self, templatefile, **variables):
+ for k,v in self.defaults.items() + self.builtins.items():
+ variables.setdefault(k,v)
+ logger.debug("executing %s with variables=%s", templatefile, variables)
+ self.templatefile = templatefile
+ t = LoraxTemplate(directories=[self.templatedir])
+ commands = t.parse(templatefile, variables)
+ self._run(commands)
+
+
+ def _run(self, parsed_template):
+ logger.info("running %s", self.templatefile)
+ for (num, line) in enumerate(parsed_template,1):
+ logger.debug("template line %i: %s", num, " ".join(line))
+ skiperror = False
+ (cmd, args) = (line[0], line[1:])
+ # Following Makefile convention, if the command is prefixed with
+ # a dash ('-'), we'll ignore any errors on that line.
+ if cmd.startswith('-'):
+ cmd = cmd[1:]
+ skiperror = True
+ try:
+ # grab the method named in cmd and pass it the given arguments
+ f = getattr(self, cmd, None)
+ if cmd[0] == '_' or cmd == 'run' or not callable(f):
+ raise ValueError, "unknown command %s" % cmd
+ f(*args)
+ except Exception:
+ if skiperror:
+ logger.debug("ignoring error")
+ continue
+ logger.error("template command error in %s:", self.templatefile)
+ logger.error(" %s", " ".join(line))
+ # format the exception traceback
+ exclines = traceback.format_exception(*sys.exc_info())
+ # skip the bit about "ltmpl.py, in _run()" - we know that
+ exclines.pop(1)
+ # log the "ErrorType: this is what happened" line
+ logger.error(" " + exclines[-1].strip())
+ # and log the entire traceback to the debug log
+ for line in ''.join(exclines).splitlines():
+ logger.debug(" " + line)
+ if self.fatalerrors:
+ raise
+
+[docs] def install(self, srcglob, dest):
+ '''
+ install SRC DEST
+ Copy the given file (or files, if a glob is used) from the input
+ tree to the given destination in the output tree.
+ The path to DEST must exist in the output tree.
+ If DEST is a directory, SRC will be copied into that directory.
+ If DEST doesn't exist, SRC will be copied to a file with that name,
+ assuming the rest of the path exists.
+ This is pretty much like how the 'cp' command works.
+ Examples:
+ install usr/share/myconfig/grub.conf /boot
+ install /usr/share/myconfig/grub.conf.in /boot/grub.conf
+ '''
+ for src in rglob(self._in(srcglob), fatal=True):
+ try:
+ cpfile(src, self._out(dest))
+ except shutil.Error as e:
+ logger.error(e)
+
+[docs] def installimg(self, srcdir, destfile):
+ '''
+ installimg SRCDIR DESTFILE
+ Create a compressed cpio archive of the contents of SRCDIR and place
+ it in DESTFILE.
+
+ If SRCDIR doesn't exist or is empty nothing is created.
+
+ Examples:
+ installimg ${LORAXDIR}/product/ images/product.img
+ installimg ${LORAXDIR}/updates/ images/updates.img
+ '''
+ if not os.path.isdir(self._in(srcdir)) or not os.listdir(self._in(srcdir)):
+ return
+ logger.info("Creating image file %s from contents of %s", self._out(destfile), self._in(srcdir))
+ mkcpio(self._in(srcdir), self._out(destfile))
+
+[docs] def mkdir(self, *dirs):
+ '''
+ mkdir DIR [DIR ...]
+ Create the named DIR(s). Will create leading directories as needed.
+ Example:
+ mkdir /images
+ '''
+ for d in dirs:
+ d = self._out(d)
+ if not isdir(d):
+ os.makedirs(d)
+
+[docs] def replace(self, pat, repl, *fileglobs):
+ '''
+ replace PATTERN REPLACEMENT FILEGLOB [FILEGLOB ...]
+ Find-and-replace the given PATTERN (Python-style regex) with the given
+ REPLACEMENT string for each of the files listed.
+ Example:
+ replace @VERSION@ ${product.version} /boot/grub.conf /boot/isolinux.cfg
+ '''
+ match = False
+ for g in fileglobs:
+ for f in rglob(self._out(g)):
+ match = True
+ replace(f, pat, repl)
+ if not match:
+ raise IOError, "no files matched %s" % " ".join(fileglobs)
+
+[docs] def append(self, filename, data):
+ '''
+ append FILE STRING
+ Append STRING (followed by a newline character) to FILE.
+ Python character escape sequences ('\\n', '\\t', etc.) will be
+ converted to the appropriate characters.
+ Examples:
+ append /etc/depmod.d/dd.conf "search updates built-in"
+ append /etc/resolv.conf ""
+ '''
+ with open(self._out(filename), "a") as fobj:
+ fobj.write(data.decode('string_escape')+"\n")
+
+[docs] def treeinfo(self, section, key, *valuetoks):
+ '''
+ treeinfo SECTION KEY ARG [ARG ...]
+ Add an item to the treeinfo data store.
+ The given SECTION will have a new item added where
+ KEY = ARG ARG ...
+ Example:
+ treeinfo images-${kernel.arch} boot.iso images/boot.iso
+ '''
+ if section not in self.results.treeinfo:
+ self.results.treeinfo[section] = dict()
+ self.results.treeinfo[section][key] = " ".join(valuetoks)
+
+[docs] def installkernel(self, section, src, dest):
+ '''
+ installkernel SECTION SRC DEST
+ Install the kernel from SRC in the input tree to DEST in the output
+ tree, and then add an item to the treeinfo data store, in the named
+ SECTION, where "kernel" = DEST.
+
+ Equivalent to:
+ install SRC DEST
+ treeinfo SECTION kernel DEST
+ '''
+ self.install(src, dest)
+ self.treeinfo(section, "kernel", dest)
+
+[docs] def installinitrd(self, section, src, dest):
+ '''
+ installinitrd SECTION SRC DEST
+ Same as installkernel, but for "initrd".
+ '''
+ self.install(src, dest)
+ self.chmod(dest, '644')
+ self.treeinfo(section, "initrd", dest)
+
+[docs] def installupgradeinitrd(self, section, src, dest):
+ '''
+ installupgradeinitrd SECTION SRC DEST
+ Same as installkernel, but for "upgrade".
+ '''
+ self.install(src, dest)
+ self.chmod(dest, '644')
+ self.treeinfo(section, "upgrade", dest)
+
+[docs] def hardlink(self, src, dest):
+ '''
+ hardlink SRC DEST
+ Create a hardlink at DEST which is linked to SRC.
+ '''
+ if isdir(self._out(dest)):
+ dest = joinpaths(dest, basename(src))
+ os.link(self._out(src), self._out(dest))
+
+[docs] def symlink(self, target, dest):
+ '''
+ symlink SRC DEST
+ Create a symlink at DEST which points to SRC.
+ '''
+ if rexists(self._out(dest)):
+ self.remove(dest)
+ os.symlink(target, self._out(dest))
+
+[docs] def copy(self, src, dest):
+ '''
+ copy SRC DEST
+ Copy SRC to DEST.
+ If DEST is a directory, SRC will be copied inside it.
+ If DEST doesn't exist, SRC will be copied to a file with
+ that name, if the path leading to it exists.
+ '''
+ try:
+ cpfile(self._out(src), self._out(dest))
+ except shutil.Error as e:
+ logger.error(e)
+
+[docs] def move(self, src, dest):
+ '''
+ move SRC DEST
+ Move SRC to DEST.
+ '''
+ mvfile(self._out(src), self._out(dest))
+
+[docs] def remove(self, *fileglobs):
+ '''
+ remove FILEGLOB [FILEGLOB ...]
+ Remove all the named files or directories.
+ Will *not* raise exceptions if the file(s) are not found.
+ '''
+ for g in fileglobs:
+ for f in rglob(self._out(g)):
+ remove(f)
+ logger.debug("removed %s", f)
+
+[docs] def chmod(self, fileglob, mode):
+ '''
+ chmod FILEGLOB OCTALMODE
+ Change the mode of all the files matching FILEGLOB to OCTALMODE.
+ '''
+ for f in rglob(self._out(fileglob), fatal=True):
+ os.chmod(f, int(mode,8))
+
+ # TODO: do we need a new command for gsettings?
+[docs] def gconfset(self, path, keytype, value, outfile=None):
+ '''
+ gconfset PATH KEYTYPE VALUE [OUTFILE]
+ Set the given gconf PATH, with type KEYTYPE, to the given value.
+ OUTFILE defaults to /etc/gconf/gconf.xml.defaults if not given.
+ Example:
+ gconfset /apps/metacity/general/num_workspaces int 1
+ '''
+ if outfile is None:
+ outfile = self._out("etc/gconf/gconf.xml.defaults")
+ cmd = ["gconftool-2", "--direct",
+ "--config-source=xml:readwrite:%s" % outfile,
+ "--set", "--type", keytype, path, value]
+ runcmd(cmd)
+
+[docs] def log(self, msg):
+ '''
+ log MESSAGE
+ Emit the given log message. Be sure to put it in quotes!
+ Example:
+ log "Reticulating splines, please wait..."
+ '''
+ logger.info(msg)
+
+ # TODO: add ssh-keygen, mkisofs(?), find, and other useful commands
+[docs] def runcmd(self, *cmdlist):
+ '''
+ runcmd CMD [--chdir=DIR] [ARG ...]
+ Run the given command with the given arguments.
+ If "--chdir=DIR" is given, change to the named directory
+ before executing the command.
+
+ NOTE: All paths given MUST be COMPLETE, ABSOLUTE PATHS to the file
+ or files mentioned. ${root}/${inroot}/${outroot} are good for
+ constructing these paths.
+
+ FURTHER NOTE: Please use this command only as a last resort!
+ Whenever possible, you should use the existing template commands.
+ If the existing commands don't do what you need, fix them!
+
+ Examples:
+ (this should be replaced with a "find" function)
+ runcmd find ${root} -name "*.pyo" -type f -delete
+ %for f in find(root, name="*.pyo"):
+ remove ${f}
+ %endfor
+ '''
+ cwd = None
+ cmd = cmdlist
+ logger.debug('running command: %s', cmd)
+ if cmd[0].startswith("--chdir="):
+ cwd = cmd[0].split('=',1)[1]
+ cmd = cmd[1:]
+
+ try:
+ _output = runcmd_output(cmd, cwd=cwd)
+ if _output:
+ logger.debug('command output:\n%s', _output)
+ logger.debug("command finished successfully")
+ except CalledProcessError as e:
+ if e.output:
+ logger.debug('command output:\n%s', e.output)
+ logger.debug('command returned failure (%d)', e.returncode)
+ raise
+
+[docs] def installpkg(self, *pkgs):
+ '''
+ installpkg [--required] PKGGLOB [PKGGLOB ...]
+ Request installation of all packages matching the given globs.
+ Note that this is just a *request* - nothing is *actually* installed
+ until the 'run_pkg_transaction' command is given.
+ '''
+ required = False
+ if pkgs[0] == '--required':
+ pkgs = pkgs[1:]
+ required = True
+
+ for p in pkgs:
+ try:
+ self.yum.install(pattern=p)
+ except Exception as e:
+ # FIXME: save exception and re-raise after the loop finishes
+ logger.error("installpkg %s failed: %s",p,str(e))
+ if required:
+ raise
+
+[docs] def removepkg(self, *pkgs):
+ '''
+ removepkg PKGGLOB [PKGGLOB...]
+ Delete the named package(s).
+ IMPLEMENTATION NOTES:
+ RPM scriptlets (%preun/%postun) are *not* run.
+ Files are deleted, but directories are left behind.
+ '''
+ for p in pkgs:
+ filepaths = [f.lstrip('/') for f in self._filelist(p)]
+ # TODO: also remove directories that aren't owned by anything else
+ if filepaths:
+ logger.debug("removepkg %s: %ikb", p, self._getsize(*filepaths)/1024)
+ self.remove(*filepaths)
+ else:
+ logger.debug("removepkg %s: no files to remove!", p)
+
+[docs] def run_pkg_transaction(self):
+ '''
+ run_pkg_transaction
+ Actually install all the packages requested by previous 'installpkg'
+ commands.
+ '''
+ self.yum.buildTransaction()
+ self.yum.repos.setProgressBar(yumhelper.LoraxDownloadCallback())
+ self.yum.processTransaction(callback=yumhelper.LoraxTransactionCallback(),
+ rpmDisplay=yumhelper.LoraxRpmCallback())
+
+ # verify if all packages that were supposed to be installed,
+ # are really installed
+ errs = [t.po for t in self.yum.tsInfo if not self.yum.rpmdb.contains(po=t.po)]
+ for po in errs:
+ logger.error("package '%s' was not installed", po)
+
+ # Write the manifest of installed files to /root/lorax-packages.log
+ with open(self._out("root/lorax-packages.log"), "w") as f:
+ for t in sorted(self.yum.tsInfo):
+ f.write("%s\n" % t.po)
+
+ self.yum.closeRpmDB()
+
+[docs] def removefrom(self, pkg, *globs):
+ '''
+ removefrom PKGGLOB [--allbut] FILEGLOB [FILEGLOB...]
+ Remove all files matching the given file globs from the package
+ (or packages) named.
+ If '--allbut' is used, all the files from the given package(s) will
+ be removed *except* the ones which match the file globs.
+ Examples:
+ removefrom usbutils /usr/bin/*
+ removefrom xfsprogs --allbut /sbin/*
+ '''
+ cmd = "%s %s" % (pkg, " ".join(globs)) # save for later logging
+ keepmatches = False
+ if globs[0] == '--allbut':
+ keepmatches = True
+ globs = globs[1:]
+ # get pkg filelist and find files that match the globs
+ filelist = self._filelist(pkg)
+ matches = set()
+ for g in globs:
+ globs_re = re.compile(fnmatch.translate(g))
+ m = filter(globs_re.match, filelist)
+ if m:
+ matches.update(m)
+ else:
+ logger.debug("removefrom %s %s: no files matched!", pkg, g)
+ # are we removing the matches, or keeping only the matches?
+ if keepmatches:
+ files_to_remove = filelist.difference(matches)
+ else:
+ files_to_remove = matches
+ # remove the files
+ if files_to_remove:
+ logger.debug("%s: removed %i/%i files, %ikb/%ikb", cmd,
+ len(files_to_remove), len(filelist),
+ self._getsize(*files_to_remove)/1024, self._getsize(*filelist)/1024)
+ self.remove(*files_to_remove)
+ else:
+ logger.debug("removefrom %s: no files to remove!", cmd)
+
+[docs] def removekmod(self, *globs):
+ '''
+ removekmod GLOB [GLOB...] [--allbut] KEEPGLOB [KEEPGLOB...]
+ Remove all files and directories matching the given file globs from the kernel
+ modules directory.
+
+ If '--allbut' is used, all the files from the modules will be removed *except*
+ the ones which match the file globs. There must be at least one initial GLOB
+ to search and one KEEPGLOB to keep. The KEEPGLOB is expanded to be *KEEPGLOB*
+ so that it will match anywhere in the path.
+
+ This only removes files from under /lib/modules/*/kernel/
+
+ Examples:
+ removekmod sound drivers/media drivers/hwmon drivers/video
+ removekmod drivers/char --allbut virtio_console hw_random
+ '''
+ cmd = " ".join(globs)
+ if "--allbut" in globs:
+ idx = globs.index("--allbut")
+ if idx == 0:
+ raise ValueError("removekmod needs at least one GLOB before --allbut")
+
+ # Apply keepglobs anywhere they appear in the path
+ keepglobs = globs[idx+1:]
+ if len(keepglobs) == 0:
+ raise ValueError("removekmod needs at least one GLOB after --allbut")
+
+ globs = globs[:idx]
+ else:
+ # Nothing to keep
+ keepglobs = []
+
+ filelist = set()
+ for g in globs:
+ for top_dir in rglob(self._out("/lib/modules/*/kernel/"+g)):
+ for root, _dirs, files in os.walk(top_dir):
+ filelist.update(root+"/"+f for f in files)
+
+ # Remove anything matching keepglobs from the list
+ matches = set()
+ for g in keepglobs:
+ globs_re = re.compile(fnmatch.translate("*"+g+"*"))
+ m = filter(globs_re.match, filelist)
+ if m:
+ matches.update(m)
+ else:
+ logger.debug("removekmod %s: no files matched!", g)
+ remove_files = filelist.difference(matches)
+
+ if remove_files:
+ logger.debug("removekmod: removing %d files", len(remove_files))
+ map(remove, remove_files)
+ else:
+ logger.debug("removekmod %s: no files to remove!", cmd)
+
+[docs] def createaddrsize(self, addr, src, dest):
+ '''
+ createaddrsize INITRD_ADDRESS INITRD ADDRSIZE
+ Create the initrd.addrsize file required in LPAR boot process.
+ Examples:
+ createaddrsize ${INITRD_ADDRESS} ${outroot}/${BOOTDIR}/initrd.img ${outroot}/${BOOTDIR}/initrd.addrsize
+ '''
+ addrsize = open(dest, "wb")
+ addrsize_data = struct.pack(">iiii", 0, int(addr, 16), 0, os.stat(src).st_size)
+ addrsize.write(addrsize_data)
+ addrsize.close()
+
+[docs] def systemctl(self, cmd, *units):
+ '''
+ systemctl [enable|disable|mask] UNIT [UNIT...]
+ Enable, disable, or mask the given systemd units.
+ Examples:
+ systemctl disable lvm2-monitor.service
+ systemctl mask fedora-storage-init.service fedora-configure.service
+ '''
+ if cmd not in ('enable', 'disable', 'mask'):
+ raise ValueError('unsupported systemctl cmd: %s' % cmd)
+ if not units:
+ logger.debug("systemctl: no units given for %s, ignoring", cmd)
+ return
+ self.mkdir("/run/systemd/system") # XXX workaround for systemctl bug
+ systemctl = ['systemctl', '--root', self.outroot, '--no-reload',
+ cmd]
+ # When a unit doesn't exist systemd aborts the command. Run them one at a time.
+ # XXX for some reason 'systemctl enable/disable' always returns 1
+ for unit in units:
+ try:
+ cmd = systemctl + [unit]
+ runcmd(cmd)
+ except CalledProcessError:
+ pass
+
+#
+# sysutils.py
+#
+# Copyright (C) 2009 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+__all__ = ["joinpaths", "touch", "replace", "chown_", "chmod_", "remove",
+ "linktree"]
+
+import sys
+import os
+import re
+import fileinput
+import pwd
+import grp
+import glob
+import shutil
+
+from pylorax.executils import runcmd
+
+[docs]def joinpaths(*args, **kwargs):
+ path = os.path.sep.join(args)
+
+ if kwargs.get("follow_symlinks"):
+ return os.path.realpath(path)
+ else:
+ return path
+
+
+
+[docs]def replace(fname, find, substitute):
+ fin = fileinput.input(fname, inplace=1)
+ pattern = re.compile(find)
+
+ for line in fin:
+ line = pattern.sub(substitute, line)
+ sys.stdout.write(line)
+
+ fin.close()
+
+
+[docs]def chown_(path, user=None, group=None, recursive=False):
+ uid = gid = -1
+
+ if user is not None:
+ uid = pwd.getpwnam(user)[2]
+ if group is not None:
+ gid = grp.getgrnam(group)[2]
+
+ for fname in glob.iglob(path):
+ os.chown(fname, uid, gid)
+
+ if recursive and os.path.isdir(fname):
+ for nested in os.listdir(fname):
+ nested = joinpaths(fname, nested)
+ chown_(nested, user, group, recursive)
+
+
+[docs]def chmod_(path, mode, recursive=False):
+ for fname in glob.iglob(path):
+ os.chmod(fname, mode)
+
+ if recursive and os.path.isdir(fname):
+ for nested in os.listdir(fname):
+ nested = joinpaths(fname, nested)
+ chmod_(nested, mode, recursive)
+
+
+def cpfile(src, dst):
+ shutil.copy2(src, dst)
+ if os.path.isdir(dst):
+ dst = joinpaths(dst, os.path.basename(src))
+
+ return dst
+
+def mvfile(src, dst):
+ if os.path.isdir(dst):
+ dst = joinpaths(dst, os.path.basename(src))
+ os.rename(src, dst)
+ return dst
+
+[docs]def remove(target):
+ if os.path.isdir(target) and not os.path.islink(target):
+ shutil.rmtree(target)
+ else:
+ os.unlink(target)
+
+
+
+# treebuilder.py - handle arch-specific tree building stuff using templates
+#
+# Copyright (C) 2011 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Will Woods <wwoods@redhat.com>
+
+import logging
+logger = logging.getLogger("pylorax.treebuilder")
+
+import os, re
+from os.path import basename
+
+from pylorax.sysutils import joinpaths, remove
+from shutil import copytree, copy2
+from pylorax.base import DataHolder
+from pylorax.ltmpl import LoraxTemplateRunner
+import pylorax.imgutils as imgutils
+from pylorax.executils import runcmd, runcmd_output
+
+templatemap = {
+ 'i386': 'x86.tmpl',
+ 'x86_64': 'x86.tmpl',
+ 'ppc': 'ppc.tmpl',
+ 'ppc64': 'ppc.tmpl',
+ 'ppc64le': 'ppc.tmpl',
+ 's390': 's390.tmpl',
+ 's390x': 's390.tmpl',
+ 'aarch64': 'aarch64.tmpl',
+ 'arm': 'arm.tmpl',
+ 'armhfp': 'arm.tmpl',
+}
+
+[docs]def generate_module_info(moddir, outfile=None):
+ def module_desc(mod):
+ output = runcmd_output(["modinfo", "-F", "description", mod])
+ return output.strip()
+ def read_module_set(name):
+ return set(l.strip() for l in open(joinpaths(moddir,name)) if ".ko" in l)
+ modsets = {'scsi':read_module_set("modules.block"),
+ 'eth':read_module_set("modules.networking")}
+
+ modinfo = list()
+ for root, _dirs, files in os.walk(moddir):
+ for modtype, modset in modsets.items():
+ for mod in modset.intersection(files): # modules in this dir
+ (name, _ext) = os.path.splitext(mod) # foo.ko -> (foo, .ko)
+ desc = module_desc(joinpaths(root,mod)) or "%s driver" % name
+ modinfo.append(dict(name=name, type=modtype, desc=desc))
+
+ out = open(outfile or joinpaths(moddir,"module-info"), "w")
+ out.write("Version 0\n")
+ for mod in sorted(modinfo, key=lambda m: m.get('name')):
+ out.write('{name}\n\t{type}\n\t"{desc:.65}"\n'.format(**mod))
+
+[docs]class RuntimeBuilder(object):
+ '''Builds the anaconda runtime image.'''
+ def __init__(self, product, arch, yum, templatedir=None,
+ installpkgs=None,
+ add_templates=None,
+ add_template_vars=None):
+ root = yum.conf.installroot
+ # use a copy of product so we can modify it locally
+ product = product.copy()
+ product.name = product.name.lower()
+ self.vars = DataHolder(arch=arch, product=product, yum=yum, root=root,
+ basearch=arch.basearch, libdir=arch.libdir)
+ self.yum = yum
+ self._runner = LoraxTemplateRunner(inroot=root, outroot=root,
+ yum_obj=yum, templatedir=templatedir)
+ self.add_templates = add_templates or []
+ self.add_template_vars = add_template_vars or {}
+ self._installpkgs = installpkgs or []
+ self._runner.defaults = self.vars
+
+ def _install_branding(self):
+ release = None
+ for pkg in self.yum.whatProvides('/etc/system-release', None, None):
+ if pkg.name.startswith('generic'):
+ continue
+ else:
+ release = pkg.name
+ break
+
+ if not release:
+ logger.error('could not get the release')
+ return
+
+ # release
+ logger.info('got release: %s', release)
+ self._runner.installpkg(release)
+
+ # logos
+ release, _suffix = release.split('-', 1)
+ self._runner.installpkg('%s-logos' % release)
+
+[docs] def install(self):
+ '''Install packages and do initial setup with runtime-install.tmpl'''
+ self._install_branding()
+ if len(self._installpkgs) > 0:
+ self._runner.installpkg(*self._installpkgs)
+ self._runner.run("runtime-install.tmpl")
+ for tmpl in self.add_templates:
+ self._runner.run(tmpl, **self.add_template_vars)
+
+[docs] def writepkglists(self, pkglistdir):
+ '''debugging data: write out lists of package contents'''
+ if not os.path.isdir(pkglistdir):
+ os.makedirs(pkglistdir)
+ for pkgobj in self.yum.doPackageLists(pkgnarrow='installed').installed:
+ with open(joinpaths(pkglistdir, pkgobj.name), "w") as fobj:
+ for fname in pkgobj.filelist + pkgobj.dirlist:
+ fobj.write("{0}\n".format(fname))
+
+[docs] def postinstall(self):
+ '''Do some post-install setup work with runtime-postinstall.tmpl'''
+ # copy configdir into runtime root beforehand
+ configdir = joinpaths(self._runner.templatedir,"config_files")
+ configdir_path = "tmp/config_files"
+ fullpath = joinpaths(self.vars.root, configdir_path)
+ if os.path.exists(fullpath):
+ remove(fullpath)
+ copytree(configdir, fullpath)
+ self._runner.run("runtime-postinstall.tmpl", configdir=configdir_path)
+
+[docs] def cleanup(self):
+ '''Remove unneeded packages and files with runtime-cleanup.tmpl'''
+ self._runner.run("runtime-cleanup.tmpl")
+
+[docs] def writepkgsizes(self, pkgsizefile):
+ '''debugging data: write a big list of pkg sizes'''
+ fobj = open(pkgsizefile, "w")
+ getsize = lambda f: os.lstat(f).st_size if os.path.exists(f) else 0
+ for p in sorted(self.yum.doPackageLists(pkgnarrow='installed').installed):
+ pkgsize = sum(getsize(joinpaths(self.vars.root,f)) for f in p.filelist)
+ fobj.write("{0.name}.{0.arch}: {1}\n".format(p, pkgsize))
+
+[docs] def generate_module_data(self):
+ root = self.vars.root
+ moddir = joinpaths(root, "lib/modules/")
+ for kver in os.listdir(moddir):
+ ksyms = joinpaths(root, "boot/System.map-%s" % kver)
+ logger.info("doing depmod and module-info for %s", kver)
+ runcmd(["depmod", "-a", "-F", ksyms, "-b", root, kver])
+ generate_module_info(moddir+kver, outfile=moddir+"module-info")
+
+[docs] def create_runtime(self, outfile="/var/tmp/squashfs.img", compression="xz", compressargs=None, size=2):
+ if compressargs is None:
+ compressargs = []
+ # make live rootfs image - must be named "LiveOS/rootfs.img" for dracut
+ workdir = joinpaths(os.path.dirname(outfile), "runtime-workdir")
+ os.makedirs(joinpaths(workdir, "LiveOS"))
+
+ imgutils.mkrootfsimg(self.vars.root, joinpaths(workdir, "LiveOS/rootfs.img"),
+ "Anaconda", size=size)
+
+ # squash the live rootfs and clean up workdir
+ imgutils.mksquashfs(workdir, outfile, compression, compressargs)
+ remove(workdir)
+
+[docs]class TreeBuilder(object):
+ '''Builds the arch-specific boot images.
+ inroot should be the installtree root (the newly-built runtime dir)'''
+ def __init__(self, product, arch, inroot, outroot, runtime, isolabel, domacboot=False, doupgrade=True, templatedir=None, add_templates=None, add_template_vars=None, workdir=None):
+
+ # NOTE: if you pass an arg named "runtime" to a mako template it'll
+ # clobber some mako internal variables - hence "runtime_img".
+ self.vars = DataHolder(arch=arch, product=product, runtime_img=runtime,
+ runtime_base=basename(runtime),
+ inroot=inroot, outroot=outroot,
+ basearch=arch.basearch, libdir=arch.libdir,
+ isolabel=isolabel, udev=udev_escape, domacboot=domacboot, doupgrade=doupgrade,
+ workdir=workdir)
+ self._runner = LoraxTemplateRunner(inroot, outroot, templatedir=templatedir)
+ self._runner.defaults = self.vars
+ self.add_templates = add_templates or []
+ self.add_template_vars = add_template_vars or {}
+ self.templatedir = templatedir
+ self.treeinfo_data = None
+
+ @property
+
+[docs] def rebuild_initrds(self, add_args=None, backup="", prefix=""):
+ '''Rebuild all the initrds in the tree. If backup is specified, each
+ initrd will be renamed with backup as a suffix before rebuilding.
+ If backup is empty, the existing initrd files will be overwritten.
+ If suffix is specified, the existing initrd is untouched and a new
+ image is built with the filename "${prefix}-${kernel.version}.img"
+ '''
+ if add_args is None:
+ add_args = []
+ dracut = ["dracut", "--nomdadmconf", "--nolvmconf"] + add_args
+ if not backup:
+ dracut.append("--force")
+
+ kernels = [kernel for kernel in self.kernels if hasattr(kernel, "initrd")]
+ if not kernels:
+ raise Exception("No initrds found, cannot rebuild_initrds")
+
+ # Hush some dracut warnings. TODO: bind-mount proc in place?
+ open(joinpaths(self.vars.inroot,"/proc/modules"),"w")
+ for kernel in kernels:
+ if prefix:
+ idir = os.path.dirname(kernel.initrd.path)
+ outfile = joinpaths(idir, prefix+'-'+kernel.version+'.img')
+ else:
+ outfile = kernel.initrd.path
+ logger.info("rebuilding %s", outfile)
+ if backup:
+ initrd = joinpaths(self.vars.inroot, outfile)
+ os.rename(initrd, initrd + backup)
+ cmd = dracut + [outfile, kernel.version]
+ runcmd(cmd, root=self.vars.inroot)
+
+ # ppc64 cannot boot images > 32MiB, check size and warn
+ if self.vars.arch.basearch in ("ppc64", "ppc64le") and os.path.exists(outfile):
+ st = os.stat(outfile)
+ if st.st_size > 32 * 1024 * 1024:
+ logging.warning("ppc64 initrd %s is > 32MiB", outfile)
+
+ os.unlink(joinpaths(self.vars.inroot,"/proc/modules"))
+
+[docs] def build(self):
+ templatefile = templatemap[self.vars.arch.basearch]
+ for tmpl in self.add_templates:
+ self._runner.run(tmpl, **self.add_template_vars)
+ self._runner.run(templatefile, kernels=self.kernels)
+ self.treeinfo_data = self._runner.results.treeinfo
+ self.implantisomd5()
+
+[docs] def implantisomd5(self):
+ for _section, data in self.treeinfo_data.items():
+ if 'boot.iso' in data:
+ iso = joinpaths(self.vars.outroot, data['boot.iso'])
+ runcmd(["implantisomd5", iso])
+
+ @property
+[docs] def dracut_hooks_path(self):
+ """ Return the path to the lorax dracut hooks scripts
+
+ Use the configured share dir if it is setup,
+ otherwise default to /usr/share/lorax/dracut_hooks
+ """
+ if self.templatedir:
+ return joinpaths(self.templatedir, "dracut_hooks")
+ else:
+ return "/usr/share/lorax/dracut_hooks"
+
+[docs] def copy_dracut_hooks(self, hooks):
+ """ Copy the hook scripts in hooks into the installroot's /tmp/
+ and return a list of commands to pass to dracut when creating the
+ initramfs
+
+ hooks is a list of tuples with the name of the hook script and the
+ target dracut hook directory
+ (eg. [("99anaconda-copy-ks.sh", "/lib/dracut/hooks/pre-pivot")])
+ """
+ dracut_commands = []
+ for hook_script, dracut_path in hooks:
+ src = joinpaths(self.dracut_hooks_path, hook_script)
+ if not os.path.exists(src):
+ logger.error("Missing lorax dracut hook script %s", src)
+ continue
+ dst = joinpaths(self.vars.inroot, "/tmp/", hook_script)
+ copy2(src, dst)
+ dracut_commands += ["--include", joinpaths("/tmp/", hook_script),
+ dracut_path]
+ return dracut_commands
+
+#### TreeBuilder helper functions
+
+[docs]def findkernels(root="/", kdir="boot"):
+ # To find possible flavors, awk '/BuildKernel/ { print $4 }' kernel.spec
+ flavors = ('debug', 'PAE', 'PAEdebug', 'smp', 'xen', 'lpae', 'tegra')
+ kre = re.compile(r"vmlinuz-(?P<version>.+?\.(?P<arch>[a-z0-9_]+)"
+ r"(\.(?P<flavor>{0}))?)$".format("|".join(flavors)))
+ kernels = []
+ bootfiles = os.listdir(joinpaths(root, kdir))
+ for f in bootfiles:
+ match = kre.match(f)
+ if match:
+ kernel = DataHolder(path=joinpaths(kdir, f))
+ kernel.update(match.groupdict()) # sets version, arch, flavor
+ kernels.append(kernel)
+
+ # look for associated initrd/initramfs/etc.
+ for kernel in kernels:
+ for f in bootfiles:
+ if f.endswith('-'+kernel.version+'.img'):
+ imgtype, _rest = f.split('-',1)
+ # special backwards-compat case
+ if imgtype == 'initramfs':
+ imgtype = 'initrd'
+ kernel[imgtype] = DataHolder(path=joinpaths(kdir, f))
+
+ logger.debug("kernels=%s", kernels)
+ return kernels
+
+# udev whitelist: 'a-zA-Z0-9#+.:=@_-' (see is_whitelisted in libudev-util.c)
+udev_blacklist=' !"$%&\'()*,/;<>?[\\]^`{|}~' # ASCII printable, minus whitelist
+udev_blacklist += ''.join(chr(i) for i in range(32)) # ASCII non-printable
+[docs]def udev_escape(label):
+ out = u''
+ for ch in label.decode('utf8'):
+ out += ch if ch not in udev_blacklist else u'\\x%02x' % ord(ch)
+ return out.encode('utf8')
+
+#
+# treeinfo.py
+#
+# Copyright (C) 2010 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.treeinfo")
+
+import ConfigParser
+import time
+
+
+[docs]class TreeInfo(object):
+
+ def __init__(self, product, version, variant, basearch,
+ packagedir=""):
+
+ self.c = ConfigParser.ConfigParser()
+
+ section = "general"
+ data = {"timestamp": time.time(),
+ "family": product,
+ "version": version,
+ "name": "%s-%s" % (product, version),
+ "variant": variant or "",
+ "arch": basearch,
+ "packagedir": packagedir}
+
+ self.c.add_section(section)
+ map(lambda (key, value): self.c.set(section, key, value), data.items())
+
+[docs] def add_section(self, section, data):
+ if not self.c.has_section(section):
+ self.c.add_section(section)
+
+ map(lambda (key, value): self.c.set(section, key, value), data.items())
+
+[docs] def write(self, outfile):
+ logger.info("writing .treeinfo file")
+ with open(outfile, "w") as fobj:
+ self.c.write(fobj)
+
+#
+# yumhelper.py
+#
+# Copyright (C) 2010 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.yumhelper")
+import re
+import yum, yum.callbacks, yum.rpmtrans
+import pylorax.output as output
+
+__all__ = ['LoraxDownloadCallback', 'LoraxTransactionCallback',
+ 'LoraxRpmCallback']
+
+[docs]class LoraxDownloadCallback(yum.callbacks.DownloadBaseCallback):
+
+ def __init__(self):
+ yum.callbacks.DownloadBaseCallback.__init__(self)
+ self.output = output.LoraxOutput()
+
+ pattern = r"\((?P<pkgno>\d+)/(?P<total>\d+)\):\s+(?P<pkgname>.*)"
+ self.pattern = re.compile(pattern)
+
+[docs] def updateProgress(self, name, frac, fread, ftime):
+ """
+ Update the progress bar
+ @param name: filename
+ @param frac: progress fraction (0 -> 1)
+ @param fread: formated string containing BytesRead
+ @param ftime: formated string containing remaining or elapsed time
+ """
+
+ match = self.pattern.match(name)
+
+ pkgno = 0
+ total = 0
+ pkgname = name
+ if match:
+ pkgno = int(match.group("pkgno"))
+ total = int(match.group("total"))
+ pkgname = match.group("pkgname")
+
+ info = "({0:3d}/{1:3d}) [{2:3.0f}%] downloading "
+ info = info.format(pkgno, total, frac * 100)
+
+ infolen, pkglen = len(info), len(pkgname)
+ if (infolen + pkglen) > self.output.width:
+ pkgname = "{0}...".format(pkgname[:self.output.width-infolen-3])
+
+ msg = "{0}<b>{1}</b>\r".format(info, pkgname)
+ self.output.write(msg)
+ if frac == 1:
+ self.output.write("\n")
+
+
+[docs]class LoraxTransactionCallback(object):
+
+ def __init__(self):
+ self.output = output.LoraxOutput()
+
+[docs] def event(self, state, data=None):
+ if state == yum.callbacks.PT_DOWNLOAD:
+ self.output.write("downloading packages\n")
+ elif state == yum.callbacks.PT_DOWNLOAD_PKGS:
+ pass
+ elif state == yum.callbacks.PT_GPGCHECK:
+ self.output.write("checking package signatures\n")
+ elif state == yum.callbacks.PT_TEST_TRANS:
+ self.output.write("running test transaction\n")
+ elif state == yum.callbacks.PT_TRANSACTION:
+ self.output.write("running transaction\n")
+
+
+[docs]class LoraxRpmCallback(yum.rpmtrans.RPMBaseCallback):
+
+ def __init__(self):
+ yum.rpmtrans.RPMBaseCallback.__init__(self)
+ self.output = output.LoraxOutput()
+
+[docs] def event(self, package, action, te_current, te_total,
+ ts_current, ts_total):
+
+ action_str = self.action[action].encode("utf-8")
+ info = "({0:3d}/{1:3d}) [{2:3.0f}%] {3} "
+ info = info.format(ts_current, ts_total,
+ float(te_current) / float(te_total) * 100,
+ action_str.lower())
+
+ pkg = "{0}".format(package)
+
+ infolen, pkglen = len(info), len(pkg)
+ if (infolen + pkglen) > self.output.width:
+ pkg = "{0}...".format(pkg[:self.output.width-infolen-3])
+
+ msg = "{0}<b>{1}</b>\r".format(info, pkg)
+ self.output.write(msg)
+ if te_current == te_total:
+ self.output.write("\n")
+
+[docs] def filelog(self, package, action):
+ if self.fileaction.get(action) == "Installed":
+ logger.debug("%s installed successfully", package)
+
+
+[docs] def scriptout(self, package, msgs):
+ if msgs:
+ logger.info("%s scriptlet output:\n%s", package, msgs)
+