diff --git a/src/pylorax/api/__init__.py b/src/pylorax/api/__init__.py
new file mode 100644
index 00000000..5ee1ec8a
--- /dev/null
+++ b/src/pylorax/api/__init__.py
@@ -0,0 +1,21 @@
+#
+# lorax-composer API server
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+from pylorax.api.crossdomain import crossdomain
+
+__all__ = ["crossdomain"]
+
diff --git a/src/pylorax/api/compose.py b/src/pylorax/api/compose.py
new file mode 100644
index 00000000..bacde108
--- /dev/null
+++ b/src/pylorax/api/compose.py
@@ -0,0 +1,388 @@
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+""" Setup for composing an image
+
+Adding New Output Types
+-----------------------
+
+The new output type must add a kickstart template to ./share/composer/ where the
+name of the kickstart (without the trailing .ks) matches the entry in compose_args.
+
+The kickstart should not have any url or repo entries, these will be added at build
+time. The %packages section should be the last thing, and while it can contain mandatory
+packages required by the output type, it should not have the trailing %end because the
+package NEVRAs will be appended to it at build time.
+
+compose_args should have a name matching the kickstart, and it should set the novirt_install
+parameters needed to generate the desired output. Other types should be set to False.
+
+"""
+import logging
+log = logging.getLogger("lorax-composer")
+
+import os
+from glob import glob
+from math import ceil
+import pytoml as toml
+import shutil
+from uuid import uuid4
+
+from pyanaconda.simpleconfig import SimpleConfigFile
+
+# Use pykickstart to calculate disk image size
+from pykickstart.parser import KickstartParser
+from pykickstart.version import makeVersion, RHEL7
+
+from pylorax.api.projects import projects_depsolve_with_size, dep_nevra
+from pylorax.api.projects import ProjectsError
+from pylorax.api.recipes import read_recipe_and_id
+from pylorax.imgutils import default_image_name
+from pylorax.sysutils import joinpaths
+
+
+def repo_to_ks(r, url="url"):
+ """ Return a kickstart line with the correct args.
+
+ Set url to "baseurl" if it is a repo, leave it as "url" for the installation url.
+ """
+ cmd = ""
+ if url == "url":
+ if not r.urls:
+ raise RuntimeError("Cannot find a base url for %s" % r.name)
+
+ # url is passed to Anaconda on the cmdline with --repo, so it cannot support a mirror
+ # If a mirror is setup yum will return the list of mirrors in .urls
+ # So just use the first one.
+ cmd += '--%s="%s" ' % (url, r.urls[0])
+ elif r.metalink:
+ # XXX Total Hack
+ # RHEL7 kickstart doesn't support metalink. If the url has 'metalink' in it, rewrite it as 'mirrorlist'
+ if "metalink" in r.metalink:
+ log.info("RHEL7 does not support metalink, translating to mirrorlist")
+ cmd += '--mirrorlist="%s" ' % r.metalink.replace("metalink", "mirrorlist")
+ else:
+ log.error("Could not convert metalink to mirrorlist. %s", r.metalink)
+ raise RuntimeError("Cannot convert metalink to mirrorlist: %s" % r.metalink)
+ elif r.mirrorlist:
+ cmd += '--mirrorlist="%s" ' % r.mirrorlist
+ elif r.baseurl:
+ cmd += '--%s="%s" ' % (url, r.baseurl[0])
+ else:
+ raise RuntimeError("Repo has no baseurl or mirror")
+
+ if r.proxy:
+ cmd += '--proxy="%s" ' % r.proxy
+
+ if not r.sslverify:
+ cmd += '--noverifyssl'
+
+ return cmd
+
+def start_build(cfg, yumlock, gitlock, branch, recipe_name, compose_type, test_mode=0):
+ """ Start the build
+
+ :param cfg: Configuration object
+ :type cfg: ComposerConfig
+ :param yumlock: Lock and YumBase for depsolving
+ :type yumlock: YumLock
+ :param recipe: The recipe to build
+ :type recipe: str
+ :param compose_type: The type of output to create from the recipe
+ :type compose_type: str
+ :returns: Unique ID for the build that can be used to track its status
+ :rtype: str
+ """
+ share_dir = cfg.get("composer", "share_dir")
+ lib_dir = cfg.get("composer", "lib_dir")
+
+ # Make sure compose_type is valid
+ if compose_type not in compose_types(share_dir):
+ raise RuntimeError("Invalid compose type (%s), must be one of %s" % (compose_type, compose_types(share_dir)))
+
+ with gitlock.lock:
+ (commit_id, recipe) = read_recipe_and_id(gitlock.repo, branch, recipe_name)
+
+ # Combine modules and packages and depsolve the list
+ # TODO include the version/glob in the depsolving
+ module_names = [m["name"] for m in recipe["modules"] or []]
+ package_names = [p["name"] for p in recipe["packages"] or []]
+ projects = sorted(set(module_names+package_names), key=lambda n: n.lower())
+ deps = []
+ try:
+ with yumlock.lock:
+ (installed_size, deps) = projects_depsolve_with_size(yumlock.yb, projects, with_core=False)
+ except ProjectsError as e:
+ log.error("start_build depsolve: %s", str(e))
+ raise RuntimeError("Problem depsolving %s: %s" % (recipe["name"], str(e)))
+
+ # Read the kickstart template for this type
+ ks_template_path = joinpaths(share_dir, "composer", compose_type) + ".ks"
+ ks_template = open(ks_template_path, "r").read()
+
+ # How much space will the packages in the default template take?
+ ks_version = makeVersion(RHEL7)
+ ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False)
+ ks.readKickstartFromString(ks_template+"\n%end\n")
+ try:
+ with yumlock.lock:
+ (template_size, _) = projects_depsolve_with_size(yumlock.yb, ks.handler.packages.packageList,
+ with_core=not ks.handler.packages.nocore)
+ except ProjectsError as e:
+ log.error("start_build depsolve: %s", str(e))
+ raise RuntimeError("Problem depsolving %s: %s" % (recipe["name"], str(e)))
+ log.debug("installed_size = %d, template_size=%d", installed_size, template_size)
+
+ # Minimum LMC disk size is 1GiB, and anaconda bumps the estimated size up by 35% (which doesn't always work).
+ installed_size = max(1024**3, int((installed_size+template_size) * 1.4))
+ log.debug("/ partition size = %d", installed_size)
+
+ # Create the results directory
+ build_id = str(uuid4())
+ results_dir = joinpaths(lib_dir, "results", build_id)
+ os.makedirs(results_dir)
+
+ # Write the recipe commit hash
+ commit_path = joinpaths(results_dir, "COMMIT")
+ with open(commit_path, "w") as f:
+ f.write(commit_id)
+
+ # Write the original recipe
+ recipe_path = joinpaths(results_dir, "blueprint.toml")
+ with open(recipe_path, "w") as f:
+ f.write(recipe.toml())
+
+ # Write the frozen recipe
+ frozen_recipe = recipe.freeze(deps)
+ recipe_path = joinpaths(results_dir, "frozen.toml")
+ with open(recipe_path, "w") as f:
+ f.write(frozen_recipe.toml())
+
+ # Write out the dependencies to the results dir
+ deps_path = joinpaths(results_dir, "deps.toml")
+ with open(deps_path, "w") as f:
+ f.write(toml.dumps({"packages":deps}).encode("UTF-8"))
+
+ # Save a copy of the original kickstart
+ shutil.copy(ks_template_path, results_dir)
+
+ # Create the final kickstart with repos and package list
+ ks_path = joinpaths(results_dir, "final-kickstart.ks")
+ with open(ks_path, "w") as f:
+ with yumlock.lock:
+ repos = yumlock.yb.repos.listEnabled()
+ if not repos:
+ raise RuntimeError("No enabled repos, canceling build.")
+
+ ks_url = repo_to_ks(repos[0], "url")
+ log.debug("url = %s", ks_url)
+ f.write('url %s\n' % ks_url)
+ for idx, r in enumerate(repos[1:]):
+ ks_repo = repo_to_ks(r, "baseurl")
+ log.debug("repo composer-%s = %s", idx, ks_repo)
+ f.write('repo --name="composer-%s" %s\n' % (idx, ks_repo))
+
+ # Write the root partition and it's size in MB (rounded up)
+ f.write('part / --fstype="ext4" --size=%d\n' % ceil(installed_size / 1024**2))
+
+ f.write(ks_template)
+
+ for d in deps:
+ f.write(dep_nevra(d)+"\n")
+
+ f.write("%end\n")
+
+ # Setup the config to pass to novirt_install
+ log_dir = joinpaths(results_dir, "logs/")
+ cfg_args = compose_args(compose_type)
+
+ # Get the title, project, and release version from the host
+ if not os.path.exists("/etc/os-release"):
+ log.error("/etc/os-release is missing, cannot determine product or release version")
+ os_release = SimpleConfigFile("/etc/os-release")
+ os_release.read()
+
+ log.debug("os_release = %s", os_release)
+
+ cfg_args["title"] = os_release.get("PRETTY_NAME")
+ cfg_args["project"] = os_release.get("NAME")
+ cfg_args["releasever"] = os_release.get("VERSION_ID")
+ cfg_args["volid"] = ""
+
+ cfg_args.update({
+ "compression": "xz",
+ "compress_args": [],
+ "ks": [ks_path],
+ "project": "Red Hat Enterprise Linux",
+ "releasever": "7",
+ "logfile": log_dir
+ })
+ with open(joinpaths(results_dir, "config.toml"), "w") as f:
+ f.write(toml.dumps(cfg_args).encode("UTF-8"))
+
+ # Set the initial status
+ open(joinpaths(results_dir, "STATUS"), "w").write("WAITING")
+
+ # Set the test mode, if requested
+ if test_mode > 0:
+ open(joinpaths(results_dir, "TEST"), "w").write("%s" % test_mode)
+
+ log.info("Adding %s (%s %s) to compose queue", build_id, recipe["name"], compose_type)
+ os.symlink(results_dir, joinpaths(lib_dir, "queue/new/", build_id))
+
+ return build_id
+
+# Supported output types
+def compose_types(share_dir):
+ r""" Returns a list of the supported output types
+
+ The output types come from the kickstart names in /usr/share/lorax/composer/\*ks
+ """
+ return sorted([os.path.basename(ks)[:-3] for ks in glob(joinpaths(share_dir, "composer/*.ks"))])
+
+def compose_args(compose_type):
+ """ Returns the settings to pass to novirt_install for the compose type
+
+ :param compose_type: The type of compose to create, from `compose_types()`
+ :type compose_type: str
+
+ This will return a dict of options that match the ArgumentParser options for livemedia-creator.
+ These are the ones the define the type of output, it's filename, etc.
+ Other options will be filled in by `make_compose()`
+ """
+ _MAP = {"tar": {"make_iso": False,
+ "make_disk": False,
+ "make_fsimage": False,
+ "make_appliance": False,
+ "make_ami": False,
+ "make_tar": True,
+ "make_pxe_live": False,
+ "make_ostree_live": False,
+ "ostree": False,
+ "live_rootfs_keep_size": False,
+ "live_rootfs_size": 0,
+ "qcow2": False,
+ "qcow2_args": [],
+ "image_name": default_image_name("xz", "root.tar"),
+ "image_only": True,
+ "app_name": None,
+ "app_template": None,
+ "app_file": None
+ },
+ "live-iso": {"make_iso": True,
+ "make_disk": False,
+ "make_fsimage": False,
+ "make_appliance": False,
+ "make_ami": False,
+ "make_tar": False,
+ "make_pxe_live": False,
+ "make_ostree_live": False,
+ "ostree": False,
+ "live_rootfs_keep_size": False,
+ "live_rootfs_size": 0,
+ "qcow2": False,
+ "qcow2_args": [],
+ "image_name": "live.iso",
+ "fs_label": "Anaconda", # Live booting may expect this to be 'Anaconda'
+ "image_only": False,
+ "app_name": None,
+ "app_template": None,
+ "app_file": None
+ },
+ "partitioned-disk": {"make_iso": False,
+ "make_disk": True,
+ "make_fsimage": False,
+ "make_appliance": False,
+ "make_ami": False,
+ "make_tar": False,
+ "make_pxe_live": False,
+ "make_ostree_live": False,
+ "ostree": False,
+ "live_rootfs_keep_size": False,
+ "live_rootfs_size": 0,
+ "qcow2": False,
+ "qcow2_args": [],
+ "image_name": "disk.img",
+ "fs_label": "",
+ "image_only": True,
+ "app_name": None,
+ "app_template": None,
+ "app_file": None
+ },
+ "qcow2": {"make_iso": False,
+ "make_disk": True,
+ "make_fsimage": False,
+ "make_appliance": False,
+ "make_ami": False,
+ "make_tar": False,
+ "make_pxe_live": False,
+ "make_ostree_live": False,
+ "ostree": False,
+ "live_rootfs_keep_size": False,
+ "live_rootfs_size": 0,
+ "qcow2": True,
+ "qcow2_args": [],
+ "image_name": "disk.qcow2",
+ "fs_label": "",
+ "image_only": True,
+ "app_name": None,
+ "app_template": None,
+ "app_file": None
+ },
+ "ext4-filesystem": {"make_iso": False,
+ "make_disk": False,
+ "make_fsimage": True,
+ "make_appliance": False,
+ "make_ami": False,
+ "make_tar": False,
+ "make_pxe_live": False,
+ "make_ostree_live": False,
+ "ostree": False,
+ "live_rootfs_keep_size": False,
+ "live_rootfs_size": 0,
+ "qcow2": False,
+ "qcow2_args": [],
+ "image_name": "filesystem.img",
+ "fs_label": "",
+ "image_only": True,
+ "app_name": None,
+ "app_template": None,
+ "app_file": None
+ },
+ }
+ return _MAP[compose_type]
+
+def move_compose_results(cfg, results_dir):
+ """Move the final image to the results_dir and cleanup the unneeded compose files
+
+ :param cfg: Build configuration
+ :type cfg: DataHolder
+ :param results_dir: Directory to put the results into
+ :type results_dir: str
+ """
+ if cfg["make_tar"]:
+ shutil.move(joinpaths(cfg["result_dir"], cfg["image_name"]), results_dir)
+ elif cfg["make_iso"]:
+ # Output from live iso is always a boot.iso under images/, move and rename it
+ shutil.move(joinpaths(cfg["result_dir"], "images/boot.iso"), joinpaths(results_dir, cfg["image_name"]))
+ elif cfg["make_disk"] or cfg["make_fsimage"]:
+ shutil.move(joinpaths(cfg["result_dir"], cfg["image_name"]), joinpaths(results_dir, cfg["image_name"]))
+
+
+ # Cleanup the compose directory, but only if it looks like a compose directory
+ if os.path.basename(cfg["result_dir"]) == "compose":
+ shutil.rmtree(cfg["result_dir"])
+ else:
+ log.error("Incorrect compose directory, not cleaning up")
diff --git a/src/pylorax/api/config.py b/src/pylorax/api/config.py
new file mode 100644
index 00000000..b7ab36db
--- /dev/null
+++ b/src/pylorax/api/config.py
@@ -0,0 +1,107 @@
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+import configparser
+import grp
+import os
+
+from pylorax.sysutils import joinpaths
+
+class ComposerConfig(configparser.SafeConfigParser):
+ def get_default(self, section, option, default):
+ try:
+ return self.get(section, option)
+ except configparser.Error:
+ return default
+
+
+def configure(conf_file="/etc/lorax/composer.conf", root_dir="/", test_config=False):
+ """lorax-composer configuration
+
+ :param conf_file: Path to the config file overriding the default settings
+ :type conf_file: str
+ :param root_dir: Directory to prepend to paths, defaults to /
+ :type root_dir: str
+ :param test_config: Set to True to skip reading conf_file
+ :type test_config: bool
+ """
+ conf = ComposerConfig()
+
+ # set defaults
+ conf.add_section("composer")
+ conf.set("composer", "share_dir", os.path.realpath(joinpaths(root_dir, "/usr/share/lorax/")))
+ conf.set("composer", "lib_dir", os.path.realpath(joinpaths(root_dir, "/var/lib/lorax/composer/")))
+ conf.set("composer", "yum_conf", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/yum.conf")))
+ conf.set("composer", "yum_root", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/yum/root/")))
+ conf.set("composer", "repo_dir", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/repos.d/")))
+ conf.set("composer", "cache_dir", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/cache/")))
+
+ conf.add_section("users")
+ conf.set("users", "root", "1")
+
+ # Enable all available repo files by default
+ conf.add_section("repos")
+ conf.set("repos", "use_system_repos", "1")
+ conf.set("repos", "enabled", "*")
+
+ if not test_config:
+ # read the config file
+ if os.path.isfile(conf_file):
+ conf.read(conf_file)
+
+ return conf
+
+def make_yum_dirs(conf):
+ """Make any missing yum directories
+
+ :param conf: The configuration to use
+ :type conf: ComposerConfig
+ :returns: None
+ """
+ for p in ["yum_conf", "repo_dir", "cache_dir", "yum_root"]:
+ p_dir = os.path.dirname(conf.get("composer", p))
+ if not os.path.exists(p_dir):
+ os.makedirs(p_dir)
+
+def make_queue_dirs(conf, gid):
+ """Make any missing queue directories
+
+ :param conf: The configuration to use
+ :type conf: ComposerConfig
+ :param gid: Group ID that has access to the queue directories
+ :type gid: int
+ :returns: list of errors
+ :rtype: list of str
+ """
+ errors = []
+ lib_dir = conf.get("composer", "lib_dir")
+ for p in ["queue/run", "queue/new", "results"]:
+ p_dir = joinpaths(lib_dir, p)
+ if not os.path.exists(p_dir):
+ orig_umask = os.umask(0)
+ os.makedirs(p_dir, 0o771)
+ os.chown(p_dir, 0, gid)
+ os.umask(orig_umask)
+ else:
+ p_stat = os.stat(p_dir)
+ if p_stat.st_mode & 0o006 != 0:
+ errors.append("Incorrect permissions on %s, no o+rw permissions are allowed." % p_dir)
+
+ if p_stat.st_gid != gid or p_stat.st_uid != 0:
+ gr_name = grp.getgrgid(gid).gr_name
+ errors.append("%s should be owned by root:%s" % (p_dir, gr_name))
+
+ return errors
diff --git a/src/pylorax/api/crossdomain.py b/src/pylorax/api/crossdomain.py
new file mode 100644
index 00000000..74bfcaaa
--- /dev/null
+++ b/src/pylorax/api/crossdomain.py
@@ -0,0 +1,64 @@
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# crossdomain decorator from - http://flask.pocoo.org/snippets/56/
+from datetime import timedelta
+from flask import make_response, request, current_app
+from functools import update_wrapper
+
+
+def crossdomain(origin, methods=None, headers=None,
+ max_age=21600, attach_to_all=True,
+ automatic_options=True):
+ if methods is not None:
+ methods = ', '.join(sorted(x.upper() for x in methods))
+ if headers is not None and not isinstance(headers, str):
+ headers = ', '.join(x.upper() for x in headers)
+ if not isinstance(origin, list):
+ origin = [origin]
+ if isinstance(max_age, timedelta):
+ max_age = int(max_age.total_seconds())
+
+ def get_methods():
+ if methods is not None:
+ return methods
+
+ options_resp = current_app.make_default_options_response()
+ return options_resp.headers['allow']
+
+ def decorator(f):
+ def wrapped_function(*args, **kwargs):
+ if automatic_options and request.method == 'OPTIONS':
+ resp = current_app.make_default_options_response()
+ else:
+ resp = make_response(f(*args, **kwargs))
+ if not attach_to_all and request.method != 'OPTIONS':
+ return resp
+
+ h = resp.headers
+
+ h.extend([("Access-Control-Allow-Origin", orig) for orig in origin])
+ h['Access-Control-Allow-Methods'] = get_methods()
+ h['Access-Control-Max-Age'] = str(max_age)
+ if headers is not None:
+ h['Access-Control-Allow-Headers'] = headers
+ return resp
+
+ f.provide_automatic_options = False
+ f.required_methods = ['OPTIONS']
+ return update_wrapper(wrapped_function, f)
+ return decorator
diff --git a/src/pylorax/api/projects.py b/src/pylorax/api/projects.py
new file mode 100644
index 00000000..35957cc5
--- /dev/null
+++ b/src/pylorax/api/projects.py
@@ -0,0 +1,311 @@
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+import logging
+log = logging.getLogger("lorax-composer")
+
+import time
+
+from yum.Errors import YumBaseError
+
+TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
+
+
+class ProjectsError(Exception):
+ pass
+
+
+def api_time(t):
+ """Convert time since epoch to a string
+
+ :param t: Seconds since epoch
+ :type t: int
+ :returns: Time string
+ :rtype: str
+ """
+ return time.strftime(TIME_FORMAT, time.localtime(t))
+
+
+def api_changelog(changelog):
+ """Convert the changelog to a string
+
+ :param changelog: A list of time, author, string tuples.
+ :type changelog: tuple
+ :returns: The most recent changelog text or ""
+ :rtype: str
+
+ This returns only the most recent changelog entry.
+ """
+ try:
+ entry = changelog[0][2]
+ except IndexError:
+ entry = ""
+ return entry
+
+
+def yaps_to_project(yaps):
+ """Extract the details from a YumAvailablePackageSqlite object
+
+ :param yaps: Yum object with package details
+ :type yaps: YumAvailablePackageSqlite
+ :returns: A dict with the name, summary, description, and url.
+ :rtype: dict
+
+ upstream_vcs is hard-coded to UPSTREAM_VCS
+ """
+ return {"name": yaps.name,
+ "summary": yaps.summary,
+ "description": yaps.description,
+ "homepage": yaps.url,
+ "upstream_vcs": "UPSTREAM_VCS"}
+
+
+def yaps_to_project_info(yaps):
+ """Extract the details from a YumAvailablePackageSqlite object
+
+ :param yaps: Yum object with package details
+ :type yaps: YumAvailablePackageSqlite
+ :returns: A dict with the project details, as well as epoch, release, arch, build_time, changelog, ...
+ :rtype: dict
+
+ metadata entries are hard-coded to {}
+ """
+ build = {"epoch": int(yaps.epoch),
+ "release": yaps.release,
+ "arch": yaps.arch,
+ "build_time": api_time(yaps.buildtime),
+ "changelog": api_changelog(yaps.returnChangelog()),
+ "build_config_ref": "BUILD_CONFIG_REF",
+ "build_env_ref": "BUILD_ENV_REF",
+ "metadata": {},
+ "source": {"license": yaps.license,
+ "version": yaps.version,
+ "source_ref": "SOURCE_REF",
+ "metadata": {}}}
+
+ return {"name": yaps.name,
+ "summary": yaps.summary,
+ "description": yaps.description,
+ "homepage": yaps.url,
+ "upstream_vcs": "UPSTREAM_VCS",
+ "builds": [build]}
+
+
+def tm_to_dep(tm):
+ """Extract the info from a TransactionMember object
+
+ :param tm: A Yum transaction object
+ :type tm: TransactionMember
+ :returns: A dict with name, epoch, version, release, arch
+ :rtype: dict
+ """
+ return {"name": tm.name,
+ "epoch": int(tm.epoch),
+ "version": tm.version,
+ "release": tm.release,
+ "arch": tm.arch}
+
+
+def yaps_to_module(yaps):
+ """Extract the name from a YumAvailablePackageSqlite object
+
+ :param yaps: Yum object with package details
+ :type yaps: YumAvailablePackageSqlite
+ :returns: A dict with name, and group_type
+ :rtype: dict
+
+ group_type is hard-coded to "rpm"
+ """
+ return {"name": yaps.name,
+ "group_type": "rpm"}
+
+
+def dep_evra(dep):
+ """Return the epoch:version-release.arch for the dep
+
+ :param dep: dependency dict
+ :type dep: dict
+ :returns: epoch:version-release.arch
+ :rtype: str
+ """
+ if dep["epoch"] == 0:
+ return dep["version"]+"-"+dep["release"]+"."+dep["arch"]
+ else:
+ return str(dep["epoch"])+":"+dep["version"]+"-"+dep["release"]+"."+dep["arch"]
+
+def dep_nevra(dep):
+ """Return the name-epoch:version-release.arch"""
+ return dep["name"]+"-"+dep_evra(dep)
+
+def projects_list(yb):
+ """Return a list of projects
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :returns: List of project info dicts with name, summary, description, homepage, upstream_vcs
+ :rtype: list of dicts
+ """
+ try:
+ ybl = yb.doPackageLists(pkgnarrow="available", showdups=False)
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem listing projects: %s" % str(e))
+ finally:
+ yb.closeRpmDB()
+ return sorted(map(yaps_to_project, ybl.available), key=lambda p: p["name"].lower())
+
+
+def projects_info(yb, project_names):
+ """Return details about specific projects
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :param project_names: List of names of projects to get info about
+ :type project_names: str
+ :returns: List of project info dicts with yaps_to_project as well as epoch, version, release, etc.
+ :rtype: list of dicts
+ """
+ try:
+ ybl = yb.doPackageLists(pkgnarrow="available", patterns=project_names, showdups=False)
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem with info for %s: %s" % (project_names, str(e)))
+ finally:
+ yb.closeRpmDB()
+ return sorted(map(yaps_to_project_info, ybl.available), key=lambda p: p["name"].lower())
+
+
+def projects_depsolve(yb, project_names):
+ """Return the dependencies for a list of projects
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :param project_names: The projects to find the dependencies for
+ :type project_names: List of Strings
+ :returns: NEVRA's of the project and its dependencies
+ :rtype: list of dicts
+ """
+ try:
+ # This resets the transaction
+ yb.closeRpmDB()
+ for p in project_names:
+ yb.install(pattern=p)
+ (rc, msg) = yb.buildTransaction()
+ if rc not in [0, 1, 2]:
+ raise ProjectsError("There was a problem depsolving %s: %s" % (project_names, msg))
+ yb.tsInfo.makelists()
+ deps = sorted(map(tm_to_dep, yb.tsInfo.installed + yb.tsInfo.depinstalled), key=lambda p: p["name"].lower())
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem depsolving %s: %s" % (project_names, str(e)))
+ finally:
+ yb.closeRpmDB()
+ return deps
+
+def estimate_size(packages, block_size=4096):
+ """Estimate the installed size of a package list
+
+ :param packages: The packages to be installed
+ :type packages: list of TransactionMember objects
+ :param block_size: The block size to use for rounding up file sizes.
+ :type block_size: int
+ :returns: The estimated size of installed packages
+ :rtype: int
+
+ Estimating actual requirements is difficult without the actual file sizes, which
+ yum doesn't provide access to. So use the file count and block size to estimate
+ a minimum size for each package.
+ """
+ installed_size = 0
+ for p in packages:
+ installed_size += len(p.po.filelist) * block_size
+ installed_size += p.po.installedsize
+ return installed_size
+
+def projects_depsolve_with_size(yb, project_names, with_core=True):
+ """Return the dependencies and installed size for a list of projects
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :param project_names: The projects to find the dependencies for
+ :type project_names: List of Strings
+ :returns: installed size and a list of NEVRA's of the project and its dependencies
+ :rtype: tuple of (int, list of dicts)
+ """
+ try:
+ # This resets the transaction
+ yb.closeRpmDB()
+ for p in project_names:
+ yb.install(pattern=p)
+ if with_core:
+ yb.selectGroup("core", group_package_types=['mandatory', 'default', 'optional'])
+ (rc, msg) = yb.buildTransaction()
+ if rc not in [0, 1, 2]:
+ raise ProjectsError("There was a problem depsolving %s: %s" % (project_names, msg))
+ yb.tsInfo.makelists()
+ installed_size = estimate_size(yb.tsInfo.installed + yb.tsInfo.depinstalled)
+ deps = sorted(map(tm_to_dep, yb.tsInfo.installed + yb.tsInfo.depinstalled), key=lambda p: p["name"].lower())
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem depsolving %s: %s" % (project_names, str(e)))
+ finally:
+ yb.closeRpmDB()
+ return (installed_size, deps)
+
+def modules_list(yb, module_names):
+ """Return a list of modules
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :param offset: Number of modules to skip
+ :type limit: int
+ :param limit: Maximum number of modules to return
+ :type limit: int
+ :returns: List of module information and total count
+ :rtype: tuple of a list of dicts and an Int
+
+ Modules don't exist in RHEL7 so this only returns projects
+ and sets the type to "rpm"
+ """
+ try:
+ ybl = yb.doPackageLists(pkgnarrow="available", patterns=module_names, showdups=False)
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem listing modules: %s" % str(e))
+ finally:
+ yb.closeRpmDB()
+ return sorted(map(yaps_to_module, ybl.available), key=lambda p: p["name"].lower())
+
+
+def modules_info(yb, module_names):
+ """Return details about a module, including dependencies
+
+ :param yb: yum base object
+ :type yb: YumBase
+ :param module_names: Names of the modules to get info about
+ :type module_names: str
+ :returns: List of dicts with module details and dependencies.
+ :rtype: list of dicts
+ """
+ try:
+ # Get the info about each module
+ ybl = yb.doPackageLists(pkgnarrow="available", patterns=module_names, showdups=False)
+ except YumBaseError as e:
+ raise ProjectsError("There was a problem with info for %s: %s" % (module_names, str(e)))
+ finally:
+ yb.closeRpmDB()
+
+ modules = sorted(map(yaps_to_project, ybl.available), key=lambda p: p["name"].lower())
+ # Add the dependency info to each one
+ for module in modules:
+ module["dependencies"] = projects_depsolve(yb, [module["name"]])
+
+ return modules
diff --git a/src/pylorax/api/queue.py b/src/pylorax/api/queue.py
new file mode 100644
index 00000000..1cdf8202
--- /dev/null
+++ b/src/pylorax/api/queue.py
@@ -0,0 +1,604 @@
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+""" Functions to monitor compose queue and run anaconda"""
+import logging
+log = logging.getLogger("pylorax")
+
+import os
+import grp
+from glob import glob
+import multiprocessing as mp
+import pytoml as toml
+import pwd
+import shutil
+import subprocess
+from subprocess import Popen, PIPE
+import time
+
+from pylorax.api.compose import move_compose_results
+from pylorax.api.recipes import recipe_from_file
+from pylorax.base import DataHolder
+from pylorax.creator import run_creator
+from pylorax.sysutils import joinpaths
+
+def start_queue_monitor(cfg, uid, gid):
+ """Start the queue monitor as a mp process
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uid: User ID that owns the queue
+ :type uid: int
+ :param gid: Group ID that owns the queue
+ :type gid: int
+ :returns: None
+ """
+ lib_dir = cfg.get("composer", "lib_dir")
+ share_dir = cfg.get("composer", "share_dir")
+ monitor_cfg = DataHolder(composer_dir=lib_dir, share_dir=share_dir, uid=uid, gid=gid)
+ p = mp.Process(target=monitor, args=(monitor_cfg,))
+ p.daemon = True
+ p.start()
+
+def monitor(cfg):
+ """Monitor the queue for new compose requests
+
+ :param cfg: Configuration settings
+ :type cfg: DataHolder
+ :returns: Does not return
+
+ The queue has 2 subdirectories, new and run. When a compose is ready to be run
+ a symlink to the uniquely named results directory should be placed in ./queue/new/
+
+ When the it is ready to be run (it is checked every 30 seconds or after a previous
+ compose is finished) the symlink will be moved into ./queue/run/ and a STATUS file
+ will be created in the results directory.
+
+ STATUS can contain one of: RUNNING, FINISHED, FAILED
+
+ If the system is restarted while a compose is running it will move any old symlinks
+ from ./queue/run/ to ./queue/new/ and rerun them.
+ """
+ def queue_sort(uuid):
+ """Sort the queue entries by their mtime, not their names"""
+ return os.stat(joinpaths(cfg.composer_dir, "queue/new", uuid)).st_mtime
+
+ # Move any symlinks in the run queue back to the new queue
+ for link in os.listdir(joinpaths(cfg.composer_dir, "queue/run")):
+ src = joinpaths(cfg.composer_dir, "queue/run", link)
+ dst = joinpaths(cfg.composer_dir, "queue/new", link)
+ os.rename(src, dst)
+ log.debug("Moved unfinished compose %s back to new state", src)
+
+ while True:
+ uuids = sorted(os.listdir(joinpaths(cfg.composer_dir, "queue/new")), key=queue_sort)
+
+ # Pick the oldest and move it into ./run/
+ if not uuids:
+ # No composes left to process, sleep for a bit
+ time.sleep(30)
+ else:
+ src = joinpaths(cfg.composer_dir, "queue/new", uuids[0])
+ dst = joinpaths(cfg.composer_dir, "queue/run", uuids[0])
+ try:
+ os.rename(src, dst)
+ except OSError:
+ # The symlink may vanish if uuid_cancel() has been called
+ continue
+
+ log.info("Starting new compose: %s", dst)
+ open(joinpaths(dst, "STATUS"), "w").write("RUNNING\n")
+
+ try:
+ make_compose(cfg, os.path.realpath(dst))
+ log.info("Finished building %s, results are in %s", dst, os.path.realpath(dst))
+ open(joinpaths(dst, "STATUS"), "w").write("FINISHED\n")
+ except Exception:
+ import traceback
+ log.error("traceback: %s", traceback.format_exc())
+
+# TODO - Write the error message to an ERROR-LOG file to include with the status
+# log.error("Error running compose: %s", e)
+ open(joinpaths(dst, "STATUS"), "w").write("FAILED\n")
+
+ os.unlink(dst)
+
+def make_compose(cfg, results_dir):
+ """Run anaconda with the final-kickstart.ks from results_dir
+
+ :param cfg: Configuration settings
+ :type cfg: DataHolder
+ :param results_dir: The directory containing the metadata and results for the build
+ :type results_dir: str
+ :returns: Nothing
+ :raises: May raise various exceptions
+
+ This takes the final-kickstart.ks, and the settings in config.toml and runs Anaconda
+ in no-virt mode (directly on the host operating system). Exceptions should be caught
+ at the higer level.
+
+ If there is a failure, the build artifacts will be cleaned up, and any logs will be
+ moved into logs/anaconda/ and their ownership will be set to the user from the cfg
+ object.
+ """
+
+ # Check on the ks's presence
+ ks_path = joinpaths(results_dir, "final-kickstart.ks")
+ if not os.path.exists(ks_path):
+ raise RuntimeError("Missing kickstart file at %s" % ks_path)
+
+ # The anaconda logs are copied into ./anaconda/ in this directory
+ log_dir = joinpaths(results_dir, "logs/")
+ if not os.path.exists(log_dir):
+ os.makedirs(log_dir)
+
+ # Load the compose configuration
+ cfg_path = joinpaths(results_dir, "config.toml")
+ if not os.path.exists(cfg_path):
+ raise RuntimeError("Missing config.toml for %s" % results_dir)
+ cfg_dict = toml.loads(open(cfg_path, "r").read())
+
+ # The keys in cfg_dict correspond to the arguments setup in livemedia-creator
+ # keys that define what to build should be setup in compose_args, and keys with
+ # defaults should be setup here.
+
+ # Make sure that image_name contains no path components
+ cfg_dict["image_name"] = os.path.basename(cfg_dict["image_name"])
+
+ # Only support novirt installation, set some other defaults
+ cfg_dict["no_virt"] = True
+ cfg_dict["disk_image"] = None
+ cfg_dict["fs_image"] = None
+ cfg_dict["keep_image"] = False
+ cfg_dict["domacboot"] = False
+ cfg_dict["anaconda_args"] = ""
+ cfg_dict["proxy"] = ""
+ cfg_dict["armplatform"] = ""
+ cfg_dict["squashfs_args"] = None
+
+ cfg_dict["lorax_templates"] = cfg.share_dir
+ cfg_dict["tmp"] = "/var/tmp/"
+ cfg_dict["dracut_args"] = None # Use default args for dracut
+
+ # Compose things in a temporary directory inside the results directory
+ cfg_dict["result_dir"] = joinpaths(results_dir, "compose")
+ os.makedirs(cfg_dict["result_dir"])
+
+ install_cfg = DataHolder(**cfg_dict)
+
+ # Some kludges for the 99-copy-logs %post, failure in it will crash the build
+ for f in ["/tmp/NOSAVE_INPUT_KS", "/tmp/NOSAVE_LOGS"]:
+ open(f, "w")
+
+ # Placing a CANCEL file in the results directory will make execWithRedirect send anaconda a SIGTERM
+ def cancel_build():
+ return os.path.exists(joinpaths(results_dir, "CANCEL"))
+
+ log.debug("cfg = %s", install_cfg)
+ try:
+ test_path = joinpaths(results_dir, "TEST")
+ if os.path.exists(test_path):
+ # Pretend to run the compose
+ time.sleep(10)
+ try:
+ test_mode = int(open(test_path, "r").read())
+ except Exception:
+ test_mode = 1
+ if test_mode == 1:
+ raise RuntimeError("TESTING FAILED compose")
+ else:
+ open(joinpaths(results_dir, install_cfg.image_name), "w").write("TEST IMAGE")
+ else:
+ run_creator(install_cfg, callback_func=cancel_build)
+
+ # Extract the results of the compose into results_dir and cleanup the compose directory
+ move_compose_results(install_cfg, results_dir)
+ finally:
+ # Make sure that everything under the results directory is owned by the user
+ user = pwd.getpwuid(cfg.uid).pw_name
+ group = grp.getgrgid(cfg.gid).gr_name
+ log.debug("Install finished, chowning results to %s:%s", user, group)
+ subprocess.call(["chown", "-R", "%s:%s" % (user, group), results_dir])
+
+def get_compose_type(results_dir):
+ """Return the type of composition.
+
+ :param results_dir: The directory containing the metadata and results for the build
+ :type results_dir: str
+ :returns: The type of compose (eg. 'tar')
+ :rtype: str
+ :raises: RuntimeError if no kickstart template can be found.
+ """
+ # Should only be 2 kickstarts, the final-kickstart.ks and the template
+ t = [os.path.basename(ks)[:-3] for ks in glob(joinpaths(results_dir, "*.ks"))
+ if "final-kickstart" not in ks]
+ if len(t) != 1:
+ raise RuntimeError("Cannot find ks template for build %s" % os.path.basename(results_dir))
+ return t[0]
+
+def compose_detail(results_dir):
+ """Return details about the build.
+
+ :param results_dir: The directory containing the metadata and results for the build
+ :type results_dir: str
+ :returns: A dictionary with details about the compose
+ :rtype: dict
+ :raises: IOError if it cannot read the directory, STATUS, or blueprint file.
+
+ The following details are included in the dict:
+
+ * id - The uuid of the comoposition
+ * queue_status - The final status of the composition (FINISHED or FAILED)
+ * timestamp - The time of the last status change
+ * compose_type - The type of output generated (tar, iso, etc.)
+ * blueprint - Blueprint name
+ * version - Blueprint version
+ * image_size - Size of the image, if finished. 0 otherwise.
+ """
+ build_id = os.path.basename(os.path.abspath(results_dir))
+ status = open(joinpaths(results_dir, "STATUS")).read().strip()
+ mtime = os.stat(joinpaths(results_dir, "STATUS")).st_mtime
+ blueprint = recipe_from_file(joinpaths(results_dir, "blueprint.toml"))
+
+ compose_type = get_compose_type(results_dir)
+
+ image_path = get_image_name(results_dir)[1]
+ if status == "FINISHED" and os.path.exists(image_path):
+ image_size = os.stat(image_path).st_size
+ else:
+ image_size = 0
+
+ return {"id": build_id,
+ "queue_status": status,
+ "timestamp": mtime,
+ "compose_type": compose_type,
+ "blueprint": blueprint["name"],
+ "version": blueprint["version"],
+ "image_size": image_size
+ }
+
+def queue_status(cfg):
+ """Return details about what is in the queue.
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :returns: A list of the new composes, and a list of the running composes
+ :rtype: dict
+
+ This returns a dict with 2 lists. "new" is the list of uuids that are waiting to be built,
+ and "run" has the uuids that are being built (currently limited to 1 at a time).
+ """
+ queue_dir = joinpaths(cfg.get("composer", "lib_dir"), "queue")
+ new_queue = [os.path.realpath(p) for p in glob(joinpaths(queue_dir, "new/*"))]
+ run_queue = [os.path.realpath(p) for p in glob(joinpaths(queue_dir, "run/*"))]
+
+ new_details = []
+ for n in new_queue:
+ try:
+ d = compose_detail(n)
+ except IOError:
+ continue
+ new_details.append(d)
+
+ run_details = []
+ for r in run_queue:
+ try:
+ d = compose_detail(r)
+ except IOError:
+ continue
+ run_details.append(d)
+
+ return {
+ "new": new_details,
+ "run": run_details
+ }
+
+def uuid_status(cfg, uuid):
+ """Return the details of a specific UUID compose
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: Details about the build
+ :rtype: dict or None
+
+ Returns the same dict as `compose_details()`
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ try:
+ return compose_detail(uuid_dir)
+ except IOError:
+ return None
+
+def build_status(cfg, status_filter=None):
+ """Return the details of finished or failed builds
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param status_filter: What builds to return. None == all, "FINISHED", or "FAILED"
+ :type status_filter: str
+ :returns: A list of the build details (from compose_details)
+ :rtype: list of dicts
+
+ This returns a list of build details for each of the matching builds on the
+ system. It does not return the status of builds that have not been finished.
+ Use queue_status() for those.
+ """
+ if status_filter:
+ status_filter = [status_filter]
+ else:
+ status_filter = ["FINISHED", "FAILED"]
+
+ results = []
+ result_dir = joinpaths(cfg.get("composer", "lib_dir"), "results")
+ for build in glob(result_dir + "/*"):
+ log.debug("Checking status of build %s", build)
+
+ try:
+ status = open(joinpaths(build, "STATUS"), "r").read().strip()
+ if status in status_filter:
+ results.append(compose_detail(build))
+ except IOError:
+ pass
+ return results
+
+def uuid_cancel(cfg, uuid):
+ """Cancel a build and delete its results
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: True if it was canceled and deleted
+ :rtype: bool
+
+ Only call this if the build status is WAITING or RUNNING
+ """
+ # This status can change (and probably will) while it is in the middle of doing this:
+ # It can move from WAITING -> RUNNING or it can move from RUNNING -> FINISHED|FAILED
+
+ # If it is in WAITING remove the symlink and then check to make sure it didn't show up
+ # in RUNNING
+ queue_dir = joinpaths(cfg.get("composer", "lib_dir"), "queue")
+ uuid_new = joinpaths(queue_dir, "new", uuid)
+ if os.path.exists(uuid_new):
+ try:
+ os.unlink(uuid_new)
+ except OSError:
+ # The symlink may vanish if the queue monitor started the build
+ pass
+ uuid_run = joinpaths(queue_dir, "run", uuid)
+ if not os.path.exists(uuid_run):
+ # Successfully removed it before the build started
+ return uuid_delete(cfg, uuid)
+
+ # Tell the build to stop running
+ cancel_path = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid, "CANCEL")
+ open(cancel_path, "w").write("\n")
+
+ # Wait for status to move to FAILED
+ started = time.time()
+ while True:
+ status = uuid_status(cfg, uuid)
+ if status is None or status["queue_status"] == "FAILED":
+ break
+
+ # Is this taking too long? Exit anyway and try to cleanup.
+ if time.time() > started + (10 * 60):
+ log.error("Failed to cancel the build of %s", uuid)
+ break
+
+ time.sleep(5)
+
+ # Remove the partial results
+ uuid_delete(cfg, uuid)
+
+def uuid_delete(cfg, uuid):
+ """Delete all of the results from a compose
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: True if it was deleted
+ :rtype: bool
+ :raises: This will raise an error if the delete failed
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ if not uuid_dir or len(uuid_dir) < 10:
+ raise RuntimeError("Directory length is too short: %s" % uuid_dir)
+ shutil.rmtree(uuid_dir)
+ return True
+
+def uuid_info(cfg, uuid):
+ """Return information about the composition
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: dictionary of information about the composition
+ :rtype: dict
+ :raises: RuntimeError if there was a problem
+
+ This will return a dict with the following fields populated:
+
+ * id - The uuid of the comoposition
+ * config - containing the configuration settings used to run Anaconda
+ * blueprint - The depsolved blueprint used to generate the kickstart
+ * commit - The (local) git commit hash for the blueprint used
+ * deps - The NEVRA of all of the dependencies used in the composition
+ * compose_type - The type of output generated (tar, iso, etc.)
+ * queue_status - The final status of the composition (FINISHED or FAILED)
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ if not os.path.exists(uuid_dir):
+ raise RuntimeError("%s is not a valid build_id" % uuid)
+
+ # Load the compose configuration
+ cfg_path = joinpaths(uuid_dir, "config.toml")
+ if not os.path.exists(cfg_path):
+ raise RuntimeError("Missing config.toml for %s" % uuid)
+ cfg_dict = toml.loads(open(cfg_path, "r").read())
+
+ frozen_path = joinpaths(uuid_dir, "frozen.toml")
+ if not os.path.exists(frozen_path):
+ raise RuntimeError("Missing frozen.toml for %s" % uuid)
+ frozen_dict = toml.loads(open(frozen_path, "r").read())
+
+ deps_path = joinpaths(uuid_dir, "deps.toml")
+ if not os.path.exists(deps_path):
+ raise RuntimeError("Missing deps.toml for %s" % uuid)
+ deps_dict = toml.loads(open(deps_path, "r").read())
+
+ details = compose_detail(uuid_dir)
+
+ commit_path = joinpaths(uuid_dir, "COMMIT")
+ if not os.path.exists(commit_path):
+ raise RuntimeError("Missing commit hash for %s" % uuid)
+ commit_id = open(commit_path, "r").read().strip()
+
+ return {"id": uuid,
+ "config": cfg_dict,
+ "blueprint": frozen_dict,
+ "commit": commit_id,
+ "deps": deps_dict,
+ "compose_type": details["compose_type"],
+ "queue_status": details["queue_status"],
+ "image_size": details["image_size"]
+ }
+
+def uuid_tar(cfg, uuid, metadata=False, image=False, logs=False):
+ """Return a tar of the build data
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :param metadata: Set to true to include all the metadata needed to reproduce the build
+ :type metadata: bool
+ :param image: Set to true to include the output image
+ :type image: bool
+ :param logs: Set to true to include the logs from the build
+ :type logs: bool
+ :returns: A stream of bytes from tar
+ :rtype: A generator
+ :raises: RuntimeError if there was a problem (eg. missing config file)
+
+ This yields an uncompressed tar's data to the caller. It includes
+ the selected data to the caller by returning the Popen stdout from the tar process.
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ if not os.path.exists(uuid_dir):
+ raise RuntimeError("%s is not a valid build_id" % uuid)
+
+ # Load the compose configuration
+ cfg_path = joinpaths(uuid_dir, "config.toml")
+ if not os.path.exists(cfg_path):
+ raise RuntimeError("Missing config.toml for %s" % uuid)
+ cfg_dict = toml.loads(open(cfg_path, "r").read())
+ image_name = cfg_dict["image_name"]
+
+ def include_file(f):
+ if f.endswith("/logs"):
+ return logs
+ if f.endswith(image_name):
+ return image
+ return metadata
+ filenames = [os.path.basename(f) for f in glob(joinpaths(uuid_dir, "*")) if include_file(f)]
+
+ tar = Popen(["tar", "-C", uuid_dir, "-cf-"] + filenames, stdout=PIPE)
+ return tar.stdout
+
+def uuid_image(cfg, uuid):
+ """Return the filename and full path of the build's image file
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: The image filename and full path
+ :rtype: tuple of strings
+ :raises: RuntimeError if there was a problem (eg. invalid uuid, missing config file)
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ return get_image_name(uuid_dir)
+
+def get_image_name(uuid_dir):
+ """Return the filename and full path of the build's image file
+
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :returns: The image filename and full path
+ :rtype: tuple of strings
+ :raises: RuntimeError if there was a problem (eg. invalid uuid, missing config file)
+ """
+ uuid = os.path.basename(os.path.abspath(uuid_dir))
+ if not os.path.exists(uuid_dir):
+ raise RuntimeError("%s is not a valid build_id" % uuid)
+
+ # Load the compose configuration
+ cfg_path = joinpaths(uuid_dir, "config.toml")
+ if not os.path.exists(cfg_path):
+ raise RuntimeError("Missing config.toml for %s" % uuid)
+ cfg_dict = toml.loads(open(cfg_path, "r").read())
+ image_name = cfg_dict["image_name"]
+
+ return (image_name, joinpaths(uuid_dir, image_name))
+
+def uuid_log(cfg, uuid, size=1024):
+ """Return `size` kbytes from the end of the anaconda.log
+
+ :param cfg: Configuration settings
+ :type cfg: ComposerConfig
+ :param uuid: The UUID of the build
+ :type uuid: str
+ :param size: Number of kbytes to read. Default is 1024
+ :type size: int
+ :returns: Up to `size` kbytes from the end of the log
+ :rtype: str
+ :raises: RuntimeError if there was a problem (eg. no log file available)
+
+ This function tries to return lines from the end of the log, it will
+ attempt to start on a line boundry, and may return less than `size` kbytes.
+ """
+ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
+ if not os.path.exists(uuid_dir):
+ raise RuntimeError("%s is not a valid build_id" % uuid)
+
+ # While a build is running the logs will be in /tmp/anaconda.log and when it
+ # has finished they will be in the results directory
+ status = uuid_status(cfg, uuid)
+ if status is None:
+ raise RuntimeError("Status is missing for %s" % uuid)
+
+ if status["queue_status"] == "RUNNING":
+ log_path = "/tmp/anaconda.log"
+ else:
+ log_path = joinpaths(uuid_dir, "logs", "anaconda", "anaconda.log")
+ if not os.path.exists(log_path):
+ raise RuntimeError("No anaconda.log available.")
+
+ with open(log_path, "r") as f:
+ f.seek(0, 2)
+ end = f.tell()
+ if end < 1024 * size:
+ f.seek(0, 0)
+ else:
+ f.seek(end - (1024 * size))
+ # Find the start of the next line and return the rest
+ f.readline()
+ return f.read()
diff --git a/src/pylorax/api/recipes.py b/src/pylorax/api/recipes.py
new file mode 100644
index 00000000..153b3808
--- /dev/null
+++ b/src/pylorax/api/recipes.py
@@ -0,0 +1,882 @@
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+import gi
+gi.require_version("Ggit", "1.0")
+from gi.repository import Ggit as Git
+from gi.repository import Gio
+from gi.repository import GLib
+
+import os
+import pytoml as toml
+import semantic_version as semver
+
+from pylorax.api.projects import dep_evra
+from pylorax.base import DataHolder
+from pylorax.sysutils import joinpaths
+
+
+class CommitTimeValError(Exception):
+ pass
+
+class RecipeFileError(Exception):
+ pass
+
+class RecipeError(Exception):
+ pass
+
+
+class Recipe(dict):
+ """A Recipe of package and modules
+
+ This is a subclass of dict that enforces the constructor arguments
+ and adds a .filename property to return the recipe's filename,
+ and a .toml() function to return the recipe as a TOML string.
+ """
+ def __init__(self, name, description, version, modules, packages):
+ # Check that version is empty or semver compatible
+ if version:
+ semver.Version(version)
+
+ # Make sure modules and packages are listed by their case-insensitive names
+ if modules is not None:
+ modules = sorted(modules, key=lambda m: m["name"].lower())
+ if packages is not None:
+ packages = sorted(packages, key=lambda p: p["name"].lower())
+ dict.__init__(self, name=name,
+ description=description,
+ version=version,
+ modules=modules,
+ packages=packages)
+
+ @property
+ def package_names(self):
+ """Return the names of the packages"""
+ return [p["name"] for p in self["packages"] or []]
+
+ @property
+ def module_names(self):
+ """Return the names of the modules"""
+ return [m["name"] for m in self["modules"] or []]
+
+ @property
+ def filename(self):
+ """Return the Recipe's filename
+
+ Replaces spaces in the name with '-' and appends .toml
+ """
+ return recipe_filename(self.get("name"))
+
+ def toml(self):
+ """Return the Recipe in TOML format"""
+ return toml.dumps(self).encode("UTF-8")
+
+ def bump_version(self, old_version=None):
+ """semver recipe version number bump
+
+ :param old_version: An optional old version number
+ :type old_version: str
+ :returns: The new version number or None
+ :rtype: str
+ :raises: ValueError
+
+ If neither have a version, 0.0.1 is returned
+ If there is no old version the new version is checked and returned
+ If there is no new version, but there is a old one, bump its patch level
+ If the old and new versions are the same, bump the patch level
+ If they are different, check and return the new version
+ """
+ new_version = self.get("version")
+ if not new_version and not old_version:
+ self["version"] = "0.0.1"
+
+ elif new_version and not old_version:
+ semver.Version(new_version)
+ self["version"] = new_version
+
+ elif not new_version or new_version == old_version:
+ new_version = str(semver.Version(old_version).next_patch())
+ self["version"] = new_version
+
+ else:
+ semver.Version(new_version)
+ self["version"] = new_version
+
+ # Return the new version
+ return str(semver.Version(self["version"]))
+
+ def freeze(self, deps):
+ """ Return a new Recipe with full module and package NEVRA
+
+ :param deps: A list of dependency NEVRA to use to fill in the modules and packages
+ :type deps: list(
+ :returns: A new Recipe object
+ :rtype: Recipe
+ """
+ module_names = self.module_names
+ package_names = self.package_names
+
+ new_modules = []
+ new_packages = []
+ for dep in deps:
+ if dep["name"] in package_names:
+ new_packages.append(RecipePackage(dep["name"], dep_evra(dep)))
+ elif dep["name"] in module_names:
+ new_modules.append(RecipeModule(dep["name"], dep_evra(dep)))
+
+ return Recipe(self["name"], self["description"], self["version"],
+ new_modules, new_packages)
+
+class RecipeModule(dict):
+ def __init__(self, name, version):
+ dict.__init__(self, name=name, version=version)
+
+class RecipePackage(RecipeModule):
+ pass
+
+def recipe_from_file(recipe_path):
+ """Return a recipe file as a Recipe object
+
+ :param recipe_path: Path to the recipe fila
+ :type recipe_path: str
+ :returns: A Recipe object
+ :rtype: Recipe
+ """
+ with open(recipe_path, 'rb') as f:
+ return recipe_from_toml(f.read())
+
+def recipe_from_toml(recipe_str):
+ """Create a Recipe object from a toml string.
+
+ :param recipe_str: The Recipe TOML string
+ :type recipe_str: str
+ :returns: A Recipe object
+ :rtype: Recipe
+ :raises: TomlError
+ """
+ recipe_dict = toml.loads(recipe_str)
+ return recipe_from_dict(recipe_dict)
+
+def recipe_from_dict(recipe_dict):
+ """Create a Recipe object from a plain dict.
+
+ :param recipe_dict: A plain dict of the recipe
+ :type recipe_dict: dict
+ :returns: A Recipe object
+ :rtype: Recipe
+ :raises: RecipeError
+ """
+ # Make RecipeModule objects from the toml
+ # The TOML may not have modules or packages in it. Set them to None in this case
+ try:
+ if recipe_dict.get("modules"):
+ modules = [RecipeModule(m.get("name"), m.get("version")) for m in recipe_dict["modules"]]
+ else:
+ modules = []
+ if recipe_dict.get("packages"):
+ packages = [RecipePackage(p.get("name"), p.get("version")) for p in recipe_dict["packages"]]
+ else:
+ packages = []
+ name = recipe_dict["name"]
+ description = recipe_dict["description"]
+ version = recipe_dict.get("version", None)
+ except KeyError as e:
+ raise RecipeError("There was a problem parsing the recipe: %s" % str(e))
+
+ return Recipe(name, description, version, modules, packages)
+
+def gfile(path):
+ """Convert a string path to GFile for use with Git"""
+ return Gio.file_new_for_path(path)
+
+def recipe_filename(name):
+ """Return the toml filename for a recipe
+
+ Replaces spaces with '-' and appends '.toml'
+ """
+ # XXX Raise and error if this is empty?
+ return name.replace(" ", "-") + ".toml"
+
+def head_commit(repo, branch):
+ """Get the branch's HEAD Commit Object
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :returns: Branch's head commit
+ :rtype: Git.Commit
+ :raises: Can raise errors from Ggit
+ """
+ branch_obj = repo.lookup_branch(branch, Git.BranchType.LOCAL)
+ commit_id = branch_obj.get_target()
+ return repo.lookup(commit_id, Git.Commit)
+
+def prepare_commit(repo, branch, builder):
+ """Prepare for a commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param builder: instance of TreeBuilder
+ :type builder: TreeBuilder
+ :returns: (Tree, Sig, Ref)
+ :rtype: tuple
+ :raises: Can raise errors from Ggit
+ """
+ tree_id = builder.write()
+ tree = repo.lookup(tree_id, Git.Tree)
+ sig = Git.Signature.new_now("bdcs-api-server", "user-email")
+ ref = "refs/heads/%s" % branch
+ return (tree, sig, ref)
+
+def open_or_create_repo(path):
+ """Open an existing repo, or create a new one
+
+ :param path: path to recipe directory
+ :type path: string
+ :returns: A repository object
+ :rtype: Git.Repository
+ :raises: Can raise errors from Ggit
+
+ A bare git repo will be created in the git directory of the specified path.
+ If a repo already exists it will be opened and returned instead of
+ creating a new one.
+ """
+ Git.init()
+ git_path = joinpaths(path, "git")
+ if os.path.exists(joinpaths(git_path, "HEAD")):
+ return Git.Repository.open(gfile(git_path))
+
+ repo = Git.Repository.init_repository(gfile(git_path), True)
+
+ # Make an initial empty commit
+ sig = Git.Signature.new_now("bdcs-api-server", "user-email")
+ tree_id = repo.get_index().write_tree()
+ tree = repo.lookup(tree_id, Git.Tree)
+ repo.create_commit("HEAD", sig, sig, "UTF-8", "Initial Recipe repository commit", tree, [])
+ return repo
+
+def write_commit(repo, branch, filename, message, content):
+ """Make a new commit to a repository's branch
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: full path of the file to add
+ :type filename: str
+ :param message: The commit message
+ :type message: str
+ :param content: The data to write
+ :type content: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ try:
+ parent_commit = head_commit(repo, branch)
+ except GLib.GError:
+ # Branch doesn't exist, make a new one based on master
+ master_head = head_commit(repo, "master")
+ repo.create_branch(branch, master_head, 0)
+ parent_commit = head_commit(repo, branch)
+
+ parent_commit = head_commit(repo, branch)
+ blob_id = repo.create_blob_from_buffer(content)
+
+ # Use treebuilder to make a new entry for this filename and blob
+ parent_tree = parent_commit.get_tree()
+ builder = repo.create_tree_builder_from_tree(parent_tree)
+ builder.insert(filename, blob_id, Git.FileMode.BLOB)
+ (tree, sig, ref) = prepare_commit(repo, branch, builder)
+ return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit])
+
+def read_commit_spec(repo, spec):
+ """Return the raw content of the blob specified by the spec
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param spec: Git revparse spec
+ :type spec: str
+ :returns: Contents of the commit
+ :rtype: str
+ :raises: Can raise errors from Ggit
+
+ eg. To read the README file from master the spec is "master:README"
+ """
+ commit_id = repo.revparse(spec).get_id()
+ blob = repo.lookup(commit_id, Git.Blob)
+ return blob.get_raw_content()
+
+def read_commit(repo, branch, filename, commit=None):
+ """Return the contents of a file on a specific branch or commit.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: filename to read
+ :type filename: str
+ :param commit: Optional commit hash
+ :type commit: str
+ :returns: The commit id, and the contents of the commit
+ :rtype: tuple(str, str)
+ :raises: Can raise errors from Ggit
+
+ If no commit is passed the master:filename is returned, otherwise it will be
+ commit:filename
+ """
+ if not commit:
+ # Find the most recent commit for filename on the selected branch
+ commits = list_commits(repo, branch, filename, 1)
+ if not commits:
+ raise RecipeError("No commits for %s on the %s branch." % (filename, branch))
+ commit = commits[0].commit
+ return (commit, read_commit_spec(repo, "%s:%s" % (commit, filename)))
+
+def read_recipe_commit(repo, branch, recipe_name, commit=None):
+ """Read a recipe commit from git and return a Recipe object
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: Recipe name to read
+ :type recipe_name: str
+ :param commit: Optional commit hash
+ :type commit: str
+ :returns: A Recipe object
+ :rtype: Recipe
+ :raises: Can raise errors from Ggit
+
+ If no commit is passed the master:filename is returned, otherwise it will be
+ commit:filename
+ """
+ (_, recipe_toml) = read_commit(repo, branch, recipe_filename(recipe_name), commit)
+ return recipe_from_toml(recipe_toml)
+
+def read_recipe_and_id(repo, branch, recipe_name, commit=None):
+ """Read a recipe commit and its id from git
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: Recipe name to read
+ :type recipe_name: str
+ :param commit: Optional commit hash
+ :type commit: str
+ :returns: The commit id, and a Recipe object
+ :rtype: tuple(str, Recipe)
+ :raises: Can raise errors from Ggit
+
+ If no commit is passed the master:filename is returned, otherwise it will be
+ commit:filename
+ """
+ (commit_id, recipe_toml) = read_commit(repo, branch, recipe_filename(recipe_name), commit)
+ return (commit_id, recipe_from_toml(recipe_toml))
+
+def list_branch_files(repo, branch):
+ """Return a sorted list of the files on the branch HEAD
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :returns: A sorted list of the filenames
+ :rtype: list(str)
+ :raises: Can raise errors from Ggit
+ """
+ commit = head_commit(repo, branch).get_id().to_string()
+ return list_commit_files(repo, commit)
+
+def list_commit_files(repo, commit):
+ """Return a sorted list of the files on a commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param commit: The commit hash to list
+ :type commit: str
+ :returns: A sorted list of the filenames
+ :rtype: list(str)
+ :raises: Can raise errors from Ggit
+ """
+ commit_id = Git.OId.new_from_string(commit)
+ commit_obj = repo.lookup(commit_id, Git.Commit)
+ tree = commit_obj.get_tree()
+ return sorted([tree.get(i).get_name() for i in range(0, tree.size())])
+
+def delete_recipe(repo, branch, recipe_name):
+ """Delete a recipe from a branch.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: Recipe name to delete
+ :type recipe_name: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ return delete_file(repo, branch, recipe_filename(recipe_name))
+
+def delete_file(repo, branch, filename):
+ """Delete a file from a branch.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: filename to delete
+ :type filename: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ parent_commit = head_commit(repo, branch)
+ parent_tree = parent_commit.get_tree()
+ builder = repo.create_tree_builder_from_tree(parent_tree)
+ builder.remove(filename)
+ (tree, sig, ref) = prepare_commit(repo, branch, builder)
+ message = "Recipe %s deleted" % filename
+ return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit])
+
+def revert_recipe(repo, branch, recipe_name, commit):
+ """Revert the contents of a recipe to that of a previous commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: Recipe name to revert
+ :type recipe_name: str
+ :param commit: Commit hash
+ :type commit: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ return revert_file(repo, branch, recipe_filename(recipe_name), commit)
+
+def revert_file(repo, branch, filename, commit):
+ """Revert the contents of a file to that of a previous commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: filename to revert
+ :type filename: str
+ :param commit: Commit hash
+ :type commit: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ commit_id = Git.OId.new_from_string(commit)
+ commit_obj = repo.lookup(commit_id, Git.Commit)
+ revert_tree = commit_obj.get_tree()
+ entry = revert_tree.get_by_name(filename)
+ blob_id = entry.get_id()
+ parent_commit = head_commit(repo, branch)
+
+ # Use treebuilder to modify the tree
+ parent_tree = parent_commit.get_tree()
+ builder = repo.create_tree_builder_from_tree(parent_tree)
+ builder.insert(filename, blob_id, Git.FileMode.BLOB)
+ (tree, sig, ref) = prepare_commit(repo, branch, builder)
+ commit_hash = commit_id.to_string()
+ message = "%s reverted to commit %s" % (filename, commit_hash)
+ return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit])
+
+def commit_recipe(repo, branch, recipe):
+ """Commit a recipe to a branch
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe: Recipe to commit
+ :type recipe: Recipe
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+ """
+ try:
+ old_recipe = read_recipe_commit(repo, branch, recipe["name"])
+ old_version = old_recipe["version"]
+ except Exception:
+ old_version = None
+
+ recipe.bump_version(old_version)
+ recipe_toml = recipe.toml()
+ message = "Recipe %s, version %s saved." % (recipe["name"], recipe["version"])
+ return write_commit(repo, branch, recipe.filename, message, recipe_toml)
+
+def commit_recipe_file(repo, branch, filename):
+ """Commit a recipe file to a branch
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: Path to the recipe file to commit
+ :type filename: str
+ :returns: OId of the new commit
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit or RecipeFileError
+ """
+ try:
+ recipe = recipe_from_file(filename)
+ except IOError:
+ raise RecipeFileError
+
+ return commit_recipe(repo, branch, recipe)
+
+def commit_recipe_directory(repo, branch, directory):
+ r"""Commit all \*.toml files from a directory, if they aren't already in git.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param directory: The directory of \*.toml recipes to commit
+ :type directory: str
+ :returns: None
+ :raises: Can raise errors from Ggit or RecipeFileError
+
+ Files with Toml or RecipeFileErrors will be skipped, and the remainder will
+ be tried.
+ """
+ dir_files = set([e for e in os.listdir(directory) if e.endswith(".toml")])
+ branch_files = set(list_branch_files(repo, branch))
+ new_files = dir_files.difference(branch_files)
+
+ for f in new_files:
+ # Skip files with errors, but try the others
+ try:
+ commit_recipe_file(repo, branch, joinpaths(directory, f))
+ except (RecipeFileError, toml.TomlError):
+ pass
+
+def tag_recipe_commit(repo, branch, recipe_name):
+ """Tag a file's most recent commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: Recipe name to tag
+ :type recipe_name: str
+ :returns: Tag id or None if it failed.
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+
+ Uses tag_file_commit()
+ """
+ return tag_file_commit(repo, branch, recipe_filename(recipe_name))
+
+def tag_file_commit(repo, branch, filename):
+ """Tag a file's most recent commit
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: Filename to tag
+ :type filename: str
+ :returns: Tag id or None if it failed.
+ :rtype: Git.OId
+ :raises: Can raise errors from Ggit
+
+ This uses git tags, of the form `refs/tags///r`
+ Only the most recent recipe commit can be tagged to prevent out of order tagging.
+ Revisions start at 1 and increment for each new commit that is tagged.
+ If the commit has already been tagged it will return false.
+ """
+ file_commits = list_commits(repo, branch, filename)
+ if not file_commits:
+ return None
+
+ # Find the most recently tagged version (may not be one) and add 1 to it.
+ for details in file_commits:
+ if details.revision is not None:
+ new_revision = details.revision + 1
+ break
+ else:
+ new_revision = 1
+
+ name = "%s/%s/r%d" % (branch, filename, new_revision)
+ sig = Git.Signature.new_now("bdcs-api-server", "user-email")
+ commit_id = Git.OId.new_from_string(file_commits[0].commit)
+ commit = repo.lookup(commit_id, Git.Commit)
+ return repo.create_tag(name, commit, sig, name, Git.CreateFlags.NONE)
+
+def find_commit_tag(repo, branch, filename, commit_id):
+ """Find the tag that matches the commit_id
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: filename to revert
+ :type filename: str
+ :param commit_id: The commit id to check
+ :type commit_id: Git.OId
+ :returns: The tag or None if there isn't one
+ :rtype: str or None
+
+ There should be only 1 tag pointing to a commit, but there may not
+ be a tag at all.
+
+ The tag will look like: 'refs/tags///r'
+ """
+ pattern = "%s/%s/r*" % (branch, filename)
+ tags = [t for t in repo.list_tags_match(pattern) if is_commit_tag(repo, commit_id, t)]
+ if len(tags) != 1:
+ return None
+ else:
+ return tags[0]
+
+def is_commit_tag(repo, commit_id, tag):
+ """Check to see if a tag points to a specific commit.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param commit_id: The commit id to check
+ :type commit_id: Git.OId
+ :param tag: The tag to check
+ :type tag: str
+ :returns: True if the tag points to the commit, False otherwise
+ :rtype: bool
+ """
+ ref = repo.lookup_reference("refs/tags/" + tag)
+ tag_id = ref.get_target()
+ tag = repo.lookup(tag_id, Git.Tag)
+ target_id = tag.get_target_id()
+ return commit_id.compare(target_id) == 0
+
+def get_revision_from_tag(tag):
+ """Return the revision number from a tag
+
+ :param tag: The tag to exract the revision from
+ :type tag: str
+ :returns: The integer revision or None
+ :rtype: int or None
+
+ The revision is the part after the r in 'branch/filename/rXXX'
+ """
+ if tag is None:
+ return None
+ try:
+ return int(tag.rsplit('r', 2)[-1])
+ except (ValueError, IndexError):
+ return None
+
+class CommitDetails(DataHolder):
+ def __init__(self, commit, timestamp, message, revision=None):
+ DataHolder.__init__(self,
+ commit = commit,
+ timestamp = timestamp,
+ message = message,
+ revision = revision)
+
+def list_commits(repo, branch, filename, limit=0):
+ """List the commit history of a file on a branch.
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param filename: filename to revert
+ :type filename: str
+ :param limit: Number of commits to return (0=all)
+ :type limit: int
+ :returns: A list of commit details
+ :rtype: list(CommitDetails)
+ :raises: Can raise errors from Ggit
+ """
+ revwalk = Git.RevisionWalker.new(repo)
+ revwalk.set_sort_mode(Git.SortMode.TIME)
+ branch_ref = "refs/heads/%s" % branch
+ revwalk.push_ref(branch_ref)
+
+ commits = []
+ while True:
+ commit_id = revwalk.next()
+ if not commit_id:
+ break
+ commit = repo.lookup(commit_id, Git.Commit)
+
+ parents = commit.get_parents()
+ # No parents? Must be the first commit.
+ if parents.get_size() == 0:
+ continue
+
+ tree = commit.get_tree()
+ # Is the filename in this tree? If not, move on.
+ if not tree.get_by_name(filename):
+ continue
+
+ # Is filename different in all of the parent commits?
+ parent_commits = list(map(parents.get, range(0, parents.get_size())))
+ is_diff = all([is_parent_diff(repo, filename, tree, pc) for pc in parent_commits])
+ # No changes from parents, skip it.
+ if not is_diff:
+ continue
+
+ tag = find_commit_tag(repo, branch, filename, commit.get_id())
+ try:
+ commits.append(get_commit_details(commit, get_revision_from_tag(tag)))
+ if limit and len(commits) > limit:
+ break
+ except CommitTimeValError:
+ # Skip any commits that have trouble converting the time
+ # TODO - log details about this failure
+ pass
+
+ # These will be in reverse time sort order thanks to revwalk
+ return commits
+
+def get_commit_details(commit, revision=None):
+ """Return the details about a specific commit.
+
+ :param commit: The commit to get details from
+ :type commit: Git.Commit
+ :param revision: Optional commit revision
+ :type revision: int
+ :returns: Details about the commit
+ :rtype: CommitDetails
+ :raises: CommitTimeValError or Ggit exceptions
+
+ """
+ message = commit.get_message()
+ commit_str = commit.get_id().to_string()
+ sig = commit.get_committer()
+
+ datetime = sig.get_time()
+ # XXX What do we do with timezone?
+ _timezone = sig.get_time_zone()
+ timeval = GLib.TimeVal()
+ ok = datetime.to_timeval(timeval)
+ if not ok:
+ raise CommitTimeValError
+ time_str = timeval.to_iso8601()
+
+ return CommitDetails(commit_str, time_str, message, revision)
+
+def is_parent_diff(repo, filename, tree, parent):
+ """Check to see if the commit is different from its parents
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param filename: filename to revert
+ :type filename: str
+ :param tree: The commit's tree
+ :type tree: Git.Tree
+ :param parent: The commit's parent commit
+ :type parent: Git.Commit
+ :retuns: True if filename in the commit is different from its parents
+ :rtype: bool
+ """
+ diff_opts = Git.DiffOptions.new()
+ diff_opts.set_pathspec([filename])
+ diff = Git.Diff.new_tree_to_tree(repo, parent.get_tree(), tree, diff_opts)
+ return diff.get_num_deltas() > 0
+
+def find_name(name, lst):
+ """Find the dict matching the name in a list and return it.
+
+ :param name: Name to search for
+ :type name: str
+ :param lst: List of dict's with "name" field
+ :returns: First dict with matching name, or None
+ :rtype: dict or None
+ """
+ for e in lst:
+ if e["name"] == name:
+ return e
+ return None
+
+def diff_items(title, old_items, new_items):
+ """Return the differences between two lists of dicts.
+
+ :param title: Title of the entry
+ :type title: str
+ :param old_items: List of item dicts with "name" field
+ :type old_items: list(dict)
+ :param new_items: List of item dicts with "name" field
+ :type new_items: list(dict)
+ :returns: List of diff dicts with old/new entries
+ :rtype: list(dict)
+ """
+ diffs = []
+ old_names = set(m["name"] for m in old_items)
+ new_names = set(m["name"] for m in new_items)
+
+ added_items = new_names.difference(old_names)
+ added_items = sorted(added_items, key=lambda n: n.lower())
+
+ removed_items = old_names.difference(new_names)
+ removed_items = sorted(removed_items, key=lambda n: n.lower())
+
+ same_items = old_names.intersection(new_names)
+ same_items = sorted(same_items, key=lambda n: n.lower())
+
+ for name in added_items:
+ diffs.append({"old":None,
+ "new":{title:find_name(name, new_items)}})
+
+ for name in removed_items:
+ diffs.append({"old":{title:find_name(name, old_items)},
+ "new":None})
+
+ for name in same_items:
+ old_item = find_name(name, old_items)
+ new_item = find_name(name, new_items)
+ if old_item != new_item:
+ diffs.append({"old":{title:old_item},
+ "new":{title:new_item}})
+
+ return diffs
+
+
+def recipe_diff(old_recipe, new_recipe):
+ """Diff two versions of a recipe
+
+ :param old_recipe: The old version of the recipe
+ :type old_recipe: Recipe
+ :param new_recipe: The new version of the recipe
+ :type new_recipe: Recipe
+ :returns: A list of diff dict entries with old/new
+ :rtype: list(dict)
+ """
+
+ diffs = []
+ # These cannot be added or removed, just different
+ for element in ["name", "description", "version"]:
+ if old_recipe[element] != new_recipe[element]:
+ diffs.append({"old":{element.title():old_recipe[element]},
+ "new":{element.title():new_recipe[element]}})
+
+ diffs.extend(diff_items("Module", old_recipe["modules"], new_recipe["modules"]))
+ diffs.extend(diff_items("Package", old_recipe["packages"], new_recipe["packages"]))
+
+ return diffs
diff --git a/src/pylorax/api/server.py b/src/pylorax/api/server.py
new file mode 100644
index 00000000..492c0c0d
--- /dev/null
+++ b/src/pylorax/api/server.py
@@ -0,0 +1,77 @@
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+import logging
+log = logging.getLogger("lorax-composer")
+
+from collections import namedtuple
+from flask import Flask, jsonify, redirect, send_from_directory
+from glob import glob
+import os
+
+from pylorax import vernum
+from pylorax.api.crossdomain import crossdomain
+from pylorax.api.v0 import v0_api
+from pylorax.sysutils import joinpaths
+
+GitLock = namedtuple("GitLock", ["repo", "lock", "dir"])
+YumLock = namedtuple("YumLock", ["yb", "lock"])
+
+server = Flask(__name__)
+
+__all__ = ["server", "GitLock"]
+
+@server.route('/')
+def server_root():
+ redirect("/api/docs/")
+
+@server.route("/api/docs/")
+@server.route("/api/docs/")
+def api_docs(path=None):
+ # Find the html docs
+ try:
+ # This assumes it is running from the source tree
+ docs_path = os.path.abspath(joinpaths(os.path.dirname(__file__), "../../../docs/html"))
+ except IndexError:
+ docs_path = glob("/usr/share/doc/lorax-*/html/")[0]
+
+ if not path:
+ path="index.html"
+ return send_from_directory(docs_path, path)
+
+@server.route("/api/status")
+@crossdomain(origin="*")
+def v0_status():
+ """
+ `/api/v0/status`
+ ^^^^^^^^^^^^^^^^
+ Return the status of the API Server::
+
+ { "api": "0",
+ "build": "devel",
+ "db_supported": true,
+ "db_version": "0",
+ "schema_version": "0",
+ "backend": "lorax-composer"}
+ """
+ return jsonify(backend="lorax-composer",
+ build=vernum,
+ api="0",
+ db_version="0",
+ schema_version="0",
+ db_supported=True)
+
+v0_api(server)
diff --git a/src/pylorax/api/v0.py b/src/pylorax/api/v0.py
new file mode 100644
index 00000000..3bf7a2e6
--- /dev/null
+++ b/src/pylorax/api/v0.py
@@ -0,0 +1,1563 @@
+#
+# Copyright (C) 2017-2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+""" Setup v0 of the API server
+
+v0_api() must be called to setup the API routes for Flask
+
+Status Responses
+----------------
+
+Some requests only return a status/error response.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+ Example response::
+
+ {
+ "status": true
+ }
+
+ Error response::
+
+ {
+ "errors": ["ggit-error: Failed to remove entry. File isn't in the tree - jboss.toml (-1)"]
+ "status": false
+ }
+
+API Routes
+----------
+
+All of the blueprints routes support the optional `branch` argument. If it is not
+used then the API will use the `master` branch for blueprints. If you want to create
+a new branch use the `new` or `workspace` routes with ?branch= to
+store the new blueprint on the new branch.
+
+`/api/v0/blueprints/list`
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ List the available blueprints::
+
+ { "limit": 20,
+ "offset": 0,
+ "blueprints": [
+ "atlas",
+ "development",
+ "glusterfs",
+ "http-server",
+ "jboss",
+ "kubernetes" ],
+ "total": 6 }
+
+`/api/v0/blueprints/info/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the JSON representation of the blueprint. This includes 3 top level
+ objects. `changes` which lists whether or not the workspace is different from
+ the most recent commit. `blueprints` which lists the JSON representation of the
+ blueprint, and `errors` which will list any errors, like non-existant blueprints.
+
+ Example::
+
+ {
+ "changes": [
+ {
+ "changed": false,
+ "name": "glusterfs"
+ }
+ ],
+ "errors": [],
+ "blueprints": [
+ {
+ "description": "An example GlusterFS server with samba",
+ "modules": [
+ {
+ "name": "glusterfs",
+ "version": "3.7.*"
+ },
+ {
+ "name": "glusterfs-cli",
+ "version": "3.7.*"
+ }
+ ],
+ "name": "glusterfs",
+ "packages": [
+ {
+ "name": "2ping",
+ "version": "3.2.1"
+ },
+ {
+ "name": "samba",
+ "version": "4.2.*"
+ }
+ ],
+ "version": "0.0.6"
+ }
+ ]
+ }
+
+ Error example::
+
+ {
+ "changes": [],
+ "errors": ["ggit-error: the path 'missing.toml' does not exist in the given tree (-3)"]
+ "blueprints": []
+ }
+
+`/api/v0/blueprints/changes/[?offset=0&limit=20]`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the commits to a blueprint. By default it returns the first 20 commits, this
+ can be changed by passing `offset` and/or `limit`. The response will include the
+ commit hash, summary, timestamp, and optionally the revision number. The commit
+ hash can be passed to `/api/v0/blueprints/diff/` to retrieve the exact changes.
+
+ Example::
+
+ {
+ "errors": [],
+ "limit": 20,
+ "offset": 0,
+ "blueprints": [
+ {
+ "changes": [
+ {
+ "commit": "e083921a7ed1cf2eec91ad12b9ad1e70ef3470be",
+ "message": "blueprint glusterfs, version 0.0.6 saved.",
+ "revision": null,
+ "timestamp": "2017-11-23T00:18:13Z"
+ },
+ {
+ "commit": "cee5f4c20fc33ea4d54bfecf56f4ad41ad15f4f3",
+ "message": "blueprint glusterfs, version 0.0.5 saved.",
+ "revision": null,
+ "timestamp": "2017-11-11T01:00:28Z"
+ },
+ {
+ "commit": "29b492f26ed35d80800b536623bafc51e2f0eff2",
+ "message": "blueprint glusterfs, version 0.0.4 saved.",
+ "revision": null,
+ "timestamp": "2017-11-11T00:28:30Z"
+ },
+ {
+ "commit": "03374adbf080fe34f5c6c29f2e49cc2b86958bf2",
+ "message": "blueprint glusterfs, version 0.0.3 saved.",
+ "revision": null,
+ "timestamp": "2017-11-10T23:15:52Z"
+ },
+ {
+ "commit": "0e08ecbb708675bfabc82952599a1712a843779d",
+ "message": "blueprint glusterfs, version 0.0.2 saved.",
+ "revision": null,
+ "timestamp": "2017-11-10T23:14:56Z"
+ },
+ {
+ "commit": "3e11eb87a63d289662cba4b1804a0947a6843379",
+ "message": "blueprint glusterfs, version 0.0.1 saved.",
+ "revision": null,
+ "timestamp": "2017-11-08T00:02:47Z"
+ }
+ ],
+ "name": "glusterfs",
+ "total": 6
+ }
+ ]
+ }
+
+POST `/api/v0/blueprints/new`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Create a new blueprint, or update an existing blueprint. This supports both JSON and TOML
+ for the blueprint format. The blueprint should be in the body of the request with the
+ `Content-Type` header set to either `application/json` or `text/x-toml`.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+DELETE `/api/v0/blueprints/delete/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Delete a blueprint. The blueprint is deleted from the branch, and will no longer
+ be listed by the `list` route. A blueprint can be undeleted using the `undo` route
+ to revert to a previous commit.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+POST `/api/v0/blueprints/workspace`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Write a blueprint to the temporary workspace. This works exactly the same as `new` except
+ that it does not create a commit. JSON and TOML bodies are supported.
+
+ The workspace is meant to be used as a temporary blueprint storage for clients.
+ It will be read by the `info` and `diff` routes if it is different from the
+ most recent commit.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+DELETE `/api/v0/blueprints/workspace/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Remove the temporary workspace copy of a blueprint. The `info` route will now
+ return the most recent commit of the blueprint. Any changes that were in the
+ workspace will be lost.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+POST `/api/v0/blueprints/undo//`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ This will revert the blueprint to a previous commit. The commit hash from the `changes`
+ route can be used in this request.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+POST `/api/v0/blueprints/tag/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Tag a blueprint as a new release. This uses git tags with a special format.
+ `refs/tags///r`. Only the most recent blueprint commit
+ can be tagged. Revisions start at 1 and increment for each new tag
+ (per-blueprint). If the commit has already been tagged it will return false.
+
+ The response will be a status response with `status` set to true, or an
+ error response with it set to false and an error message included.
+
+`/api/v0/blueprints/diff///`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the differences between two commits, or the workspace. The commit hash
+ from the `changes` response can be used here, or several special strings:
+
+ - NEWEST will select the newest git commit. This works for `from_commit` or `to_commit`
+ - WORKSPACE will select the workspace copy. This can only be used in `to_commit`
+
+ eg. `/api/v0/blueprints/diff/glusterfs/NEWEST/WORKSPACE` will return the differences
+ between the most recent git commit and the contents of the workspace.
+
+ Each entry in the response's diff object contains the old blueprint value and the new one.
+ If old is null and new is set, then it was added.
+ If new is null and old is set, then it was removed.
+ If both are set, then it was changed.
+
+ The old/new entries will have the name of the blueprint field that was changed. This
+ can be one of: Name, Description, Version, Module, or Package.
+ The contents for these will be the old/new values for them.
+
+ In the example below the version was changed and the ping package was added.
+
+ Example::
+
+ {
+ "diff": [
+ {
+ "new": {
+ "Version": "0.0.6"
+ },
+ "old": {
+ "Version": "0.0.5"
+ }
+ },
+ {
+ "new": {
+ "Package": {
+ "name": "ping",
+ "version": "3.2.1"
+ }
+ },
+ "old": null
+ }
+ ]
+ }
+
+`/api/v0/blueprints/freeze/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return a JSON representation of the blueprint with the package and module versions set
+ to the exact versions chosen by depsolving the blueprint.
+
+ Example::
+
+ {
+ "errors": [],
+ "blueprints": [
+ {
+ "blueprint": {
+ "description": "An example GlusterFS server with samba",
+ "modules": [
+ {
+ "name": "glusterfs",
+ "version": "3.8.4-18.4.el7.x86_64"
+ },
+ {
+ "name": "glusterfs-cli",
+ "version": "3.8.4-18.4.el7.x86_64"
+ }
+ ],
+ "name": "glusterfs",
+ "packages": [
+ {
+ "name": "ping",
+ "version": "2:3.2.1-2.el7.noarch"
+ },
+ {
+ "name": "samba",
+ "version": "4.6.2-8.el7.x86_64"
+ }
+ ],
+ "version": "0.0.6"
+ }
+ }
+ ]
+ }
+
+`/api/v0/blueprints/depsolve/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Depsolve the blueprint using yum, return the blueprint used, and the NEVRAs of the packages
+ chosen to satisfy the blueprint's requirements. The response will include a list of results,
+ with the full dependency list in `dependencies`, the NEVRAs for the blueprint's direct modules
+ and packages in `modules`, and any error will be in `errors`.
+
+ Example::
+
+ {
+ "errors": [],
+ "blueprints": [
+ {
+ "dependencies": [
+ {
+ "arch": "noarch",
+ "epoch": "0",
+ "name": "2ping",
+ "release": "2.el7",
+ "version": "3.2.1"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "acl",
+ "release": "12.el7",
+ "version": "2.2.51"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "audit-libs",
+ "release": "3.el7",
+ "version": "2.7.6"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "avahi-libs",
+ "release": "17.el7",
+ "version": "0.6.31"
+ },
+ ...
+ ],
+ "modules": [
+ {
+ "arch": "noarch",
+ "epoch": "0",
+ "name": "2ping",
+ "release": "2.el7",
+ "version": "3.2.1"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "glusterfs",
+ "release": "18.4.el7",
+ "version": "3.8.4"
+ },
+ ...
+ ],
+ "blueprint": {
+ "description": "An example GlusterFS server with samba",
+ "modules": [
+ {
+ "name": "glusterfs",
+ "version": "3.7.*"
+ },
+ ...
+ }
+ }
+ ]
+ }
+
+`/api/v0/projects/list[?offset=0&limit=20]`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ List all of the available projects. By default this returns the first 20 items,
+ but this can be changed by setting the `offset` and `limit` arguments.
+
+ Example::
+
+ {
+ "limit": 20,
+ "offset": 0,
+ "projects": [
+ {
+ "description": "0 A.D. (pronounced \"zero ey-dee\") is a ...",
+ "homepage": "http://play0ad.com",
+ "name": "0ad",
+ "summary": "Cross-Platform RTS Game of Ancient Warfare",
+ "upstream_vcs": "UPSTREAM_VCS"
+ },
+ ...
+ ],
+ "total": 21770
+ }
+
+`/api/v0/projects/info/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return information about the comma-separated list of projects. It includes the description
+ of the package along with the list of available builds.
+
+ Example::
+
+ {
+ "projects": [
+ {
+ "builds": [
+ {
+ "arch": "x86_64",
+ "build_config_ref": "BUILD_CONFIG_REF",
+ "build_env_ref": "BUILD_ENV_REF",
+ "build_time": "2017-03-01T08:39:23",
+ "changelog": "- restore incremental backups correctly, files ...",
+ "epoch": "2",
+ "metadata": {},
+ "release": "32.el7",
+ "source": {
+ "license": "GPLv3+",
+ "metadata": {},
+ "source_ref": "SOURCE_REF",
+ "version": "1.26"
+ }
+ }
+ ],
+ "description": "The GNU tar program saves many ...",
+ "homepage": "http://www.gnu.org/software/tar/",
+ "name": "tar",
+ "summary": "A GNU file archiving program",
+ "upstream_vcs": "UPSTREAM_VCS"
+ }
+ ]
+ }
+
+`/api/v0/projects/depsolve/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Depsolve the comma-separated list of projects and return the list of NEVRAs needed
+ to satisfy the request.
+
+ Example::
+
+ {
+ "projects": [
+ {
+ "arch": "noarch",
+ "epoch": "0",
+ "name": "basesystem",
+ "release": "7.el7",
+ "version": "10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "bash",
+ "release": "28.el7",
+ "version": "4.2.46"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "filesystem",
+ "release": "21.el7",
+ "version": "3.2"
+ },
+ ...
+ ]
+ }
+
+`/api/v0/modules/list[?offset=0&limit=20]`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return a list of all of the available modules. This includes the name and the
+ group_type, which is always "rpm" for lorax-composer. By default this returns
+ the first 20 items. This can be changed by setting the `offset` and `limit`
+ arguments.
+
+ Example::
+
+ {
+ "limit": 20,
+ "modules": [
+ {
+ "group_type": "rpm",
+ "name": "0ad"
+ },
+ {
+ "group_type": "rpm",
+ "name": "0ad-data"
+ },
+ {
+ "group_type": "rpm",
+ "name": "0install"
+ },
+ {
+ "group_type": "rpm",
+ "name": "2048-cli"
+ },
+ ...
+ ]
+ "total": 21770
+ }
+
+`/api/v0/modules/list/[?offset=0&limit=20]`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the list of comma-separated modules. Output is the same as `/modules/list`
+
+ Example::
+
+ {
+ "limit": 20,
+ "modules": [
+ {
+ "group_type": "rpm",
+ "name": "tar"
+ }
+ ],
+ "offset": 0,
+ "total": 1
+ }
+
+`/api/v0/modules/info/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the module's dependencies, and the information about the module.
+
+ Example::
+
+ {
+ "modules": [
+ {
+ "dependencies": [
+ {
+ "arch": "noarch",
+ "epoch": "0",
+ "name": "basesystem",
+ "release": "7.el7",
+ "version": "10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "bash",
+ "release": "28.el7",
+ "version": "4.2.46"
+ },
+ ...
+ ],
+ "description": "The GNU tar program saves ...",
+ "homepage": "http://www.gnu.org/software/tar/",
+ "name": "tar",
+ "summary": "A GNU file archiving program",
+ "upstream_vcs": "UPSTREAM_VCS"
+ }
+ ]
+ }
+
+POST `/api/v0/compose`
+^^^^^^^^^^^^^^^^^^^^^^
+
+ Start a compose. The content type should be 'application/json' and the body of the POST
+ should look like this::
+
+ {
+ "blueprint_name": "http-server",
+ "compose_type": "tar",
+ "branch": "master"
+ }
+
+ Pass it the name of the blueprint, the type of output (from '/api/v0/compose/types'), and the
+ blueprint branch to use. 'branch' is optional and will default to master. It will create a new
+ build and add it to the queue. It returns the build uuid and a status if it succeeds::
+
+ {
+ "build_id": "e6fa6db4-9c81-4b70-870f-a697ca405cdf",
+ "status": true
+ }
+
+`/api/v0/compose/types`
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns the list of supported output types that are valid for use with 'POST /api/v0/compose'
+
+ {
+ "types": [
+ {
+ "enabled": true,
+ "name": "tar"
+ }
+ ]
+ }
+
+`/api/v0/compose/queue`
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the status of the build queue. It includes information about the builds waiting,
+ and the build that is running.
+
+ Example::
+
+ {
+ "new": [
+ {
+ "id": "45502a6d-06e8-48a5-a215-2b4174b3614b",
+ "blueprint": "glusterfs",
+ "queue_status": "WAITING",
+ "timestamp": 1517362647.4570868,
+ "version": "0.0.6"
+ },
+ {
+ "id": "6d292bd0-bec7-4825-8d7d-41ef9c3e4b73",
+ "blueprint": "kubernetes",
+ "queue_status": "WAITING",
+ "timestamp": 1517362659.0034983,
+ "version": "0.0.1"
+ }
+ ],
+ "run": [
+ {
+ "id": "745712b2-96db-44c0-8014-fe925c35e795",
+ "blueprint": "glusterfs",
+ "queue_status": "RUNNING",
+ "timestamp": 1517362633.7965999,
+ "version": "0.0.6"
+ }
+ ]
+ }
+
+`/api/v0/compose/finished`
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the details on all of the finished composes on the system.
+
+ Example::
+
+ {
+ "finished": [
+ {
+ "id": "70b84195-9817-4b8a-af92-45e380f39894",
+ "blueprint": "glusterfs",
+ "queue_status": "FINISHED",
+ "timestamp": 1517351003.8210032,
+ "version": "0.0.6"
+ },
+ {
+ "id": "e695affd-397f-4af9-9022-add2636e7459",
+ "blueprint": "glusterfs",
+ "queue_status": "FINISHED",
+ "timestamp": 1517362289.7193348,
+ "version": "0.0.6"
+ }
+ ]
+ }
+
+`/api/v0/compose/failed`
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the details on all of the failed composes on the system.
+
+ Example::
+
+ {
+ "failed": [
+ {
+ "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a",
+ "blueprint": "http-server",
+ "queue_status": "FAILED",
+ "timestamp": 1517523249.9301329,
+ "version": "0.0.2"
+ }
+ ]
+ }
+
+`/api/v0/compose/status/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Return the details for each of the comma-separated list of uuids.
+
+ Example::
+
+ {
+ "uuids": [
+ {
+ "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a",
+ "blueprint": "http-server",
+ "queue_status": "FINISHED",
+ "timestamp": 1517523644.2384307,
+ "version": "0.0.2"
+ },
+ {
+ "id": "45502a6d-06e8-48a5-a215-2b4174b3614b",
+ "blueprint": "glusterfs",
+ "queue_status": "FINISHED",
+ "timestamp": 1517363442.188399,
+ "version": "0.0.6"
+ }
+ ]
+ }
+
+DELETE `/api/v0/blueprints/cancel/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Cancel the build, if it is not finished, and delete the results. It will return a
+ status of True if it is successful.
+
+ Example::
+
+ {
+ "status": true,
+ "uuid": "03397f8d-acff-4cdb-bd31-f629b7a948f5"
+ }
+
+DELETE `/api/v0/compose/delete/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Delete the list of comma-separated uuids from the compose results.
+
+ Example::
+
+ {
+ "errors": [],
+ "uuids": [
+ {
+ "status": true,
+ "uuid": "ae1bf7e3-7f16-4c9f-b36e-3726a1093fd0"
+ }
+ ]
+ }
+
+`/api/v0/compose/info/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Get detailed information about the compose. The returned JSON string will
+ contain the following information:
+
+ * id - The uuid of the comoposition
+ * config - containing the configuration settings used to run Anaconda
+ * blueprint - The depsolved blueprint used to generate the kickstart
+ * commit - The (local) git commit hash for the blueprint used
+ * deps - The NEVRA of all of the dependencies used in the composition
+ * compose_type - The type of output generated (tar, iso, etc.)
+ * queue_status - The final status of the composition (FINISHED or FAILED)
+
+ Example::
+
+ {
+ "commit": "7078e521a54b12eae31c3fd028680da7a0815a4d",
+ "compose_type": "tar",
+ "config": {
+ "anaconda_args": "",
+ "armplatform": "",
+ "compress_args": [],
+ "compression": "xz",
+ "image_name": "root.tar.xz",
+ ...
+ },
+ "deps": {
+ "packages": [
+ {
+ "arch": "x86_64",
+ "epoch": "0",
+ "name": "acl",
+ "release": "14.el7",
+ "version": "2.2.51"
+ }
+ ]
+ },
+ "id": "c30b7d80-523b-4a23-ad52-61b799739ce8",
+ "queue_status": "FINISHED",
+ "blueprint": {
+ "description": "An example kubernetes master",
+ ...
+ }
+ }
+
+`/api/v0/compose/metadata/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns a .tar of the metadata used for the build. This includes all the
+ information needed to reproduce the build, including the final kickstart
+ populated with repository and package NEVRA.
+
+ The mime type is set to 'application/x-tar' and the filename is set to
+ UUID-metadata.tar
+
+ The .tar is uncompressed, but is not large.
+
+`/api/v0/compose/results/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns a .tar of the metadata, logs, and output image of the build. This
+ includes all the information needed to reproduce the build, including the
+ final kickstart populated with repository and package NEVRA. The output image
+ is already in compressed form so the returned tar is not compressed.
+
+ The mime type is set to 'application/x-tar' and the filename is set to
+ UUID.tar
+
+`/api/v0/compose/logs/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns a .tar of the anaconda build logs. The tar is not compressed, but is
+ not large.
+
+ The mime type is set to 'application/x-tar' and the filename is set to
+ UUID-logs.tar
+
+`/api/v0/compose/image/`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns the output image from the build. The filename is set to the filename
+ from the build with the UUID as a prefix. eg. UUID-root.tar.xz or UUID-boot.iso.
+
+`/api/v0/compose/log/[?size=kbytes]`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Returns the end of the anaconda.log. The size parameter is optional and defaults to 1Mbytes
+ if it is not included. The returned data is raw text from the end of the logfile, starting on
+ a line boundry.
+
+ Example::
+
+ 12:59:24,222 INFO anaconda: Running Thread: AnaConfigurationThread (140629395244800)
+ 12:59:24,223 INFO anaconda: Configuring installed system
+ 12:59:24,912 INFO anaconda: Configuring installed system
+ 12:59:24,912 INFO anaconda: Creating users
+ 12:59:24,913 INFO anaconda: Clearing libuser.conf at /tmp/libuser.Dyy8Gj
+ 12:59:25,154 INFO anaconda: Creating users
+ 12:59:25,155 INFO anaconda: Configuring addons
+ 12:59:25,155 INFO anaconda: Configuring addons
+ 12:59:25,155 INFO anaconda: Generating initramfs
+ 12:59:49,467 INFO anaconda: Generating initramfs
+ 12:59:49,467 INFO anaconda: Running post-installation scripts
+ 12:59:49,467 INFO anaconda: Running kickstart %%post script(s)
+ 12:59:50,782 INFO anaconda: All kickstart %%post script(s) have been run
+ 12:59:50,782 INFO anaconda: Running post-installation scripts
+ 12:59:50,784 INFO anaconda: Thread Done: AnaConfigurationThread (140629395244800)
+
+"""
+
+import logging
+log = logging.getLogger("lorax-composer")
+
+import os
+from flask import jsonify, request, Response, send_file
+
+from pylorax.api.compose import start_build, compose_types
+from pylorax.api.crossdomain import crossdomain
+from pylorax.api.projects import projects_list, projects_info, projects_depsolve
+from pylorax.api.projects import modules_list, modules_info, ProjectsError
+from pylorax.api.queue import queue_status, build_status, uuid_delete, uuid_status, uuid_info
+from pylorax.api.queue import uuid_tar, uuid_image, uuid_cancel, uuid_log
+from pylorax.api.recipes import list_branch_files, read_recipe_commit, recipe_filename, list_commits
+from pylorax.api.recipes import recipe_from_dict, recipe_from_toml, commit_recipe, delete_recipe, revert_recipe
+from pylorax.api.recipes import tag_recipe_commit, recipe_diff
+from pylorax.api.workspace import workspace_read, workspace_write, workspace_delete
+
+# The API functions don't actually get called by any code here
+# pylint: disable=unused-variable
+
+def take_limits(iterable, offset, limit):
+ """ Apply offset and limit to an iterable object
+
+ :param iterable: The object to limit
+ :type iterable: iter
+ :param offset: The number of items to skip
+ :type offset: int
+ :param limit: The total number of items to return
+ :type limit: int
+ :returns: A subset of the iterable
+ """
+ return iterable[offset:][:limit]
+
+def v0_api(api):
+ # Note that Sphinx will not generate documentations for any of these.
+ @api.route("/api/v0/blueprints/list")
+ @crossdomain(origin="*")
+ def v0_blueprints_list():
+ """List the available blueprints on a branch."""
+ branch = request.args.get("branch", "master")
+ try:
+ limit = int(request.args.get("limit", "20"))
+ offset = int(request.args.get("offset", "0"))
+ except ValueError as e:
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ with api.config["GITLOCK"].lock:
+ blueprints = take_limits([f[:-5] for f in list_branch_files(api.config["GITLOCK"].repo, branch)], offset, limit)
+ return jsonify(blueprints=blueprints, limit=limit, offset=offset, total=len(blueprints))
+
+ @api.route("/api/v0/blueprints/info/")
+ @crossdomain(origin="*")
+ def v0_blueprints_info(blueprint_names):
+ """Return the contents of the blueprint, or a list of blueprints"""
+ branch = request.args.get("branch", "master")
+ out_fmt = request.args.get("format", "json")
+ blueprints = []
+ changes = []
+ errors = []
+ for blueprint_name in [n.strip() for n in blueprint_names.split(",")]:
+ exceptions = []
+ # Get the workspace version (if it exists)
+ try:
+ with api.config["GITLOCK"].lock:
+ ws_blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name)
+ except Exception as e:
+ ws_blueprint = None
+ exceptions.append(str(e))
+ log.error("(v0_blueprints_info) %s", str(e))
+
+ # Get the git version (if it exists)
+ try:
+ with api.config["GITLOCK"].lock:
+ git_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name)
+ except Exception as e:
+ git_blueprint = None
+ exceptions.append(str(e))
+ log.error("(v0_blueprints_info) %s", str(e))
+
+ if not ws_blueprint and not git_blueprint:
+ # Neither blueprint, return an error
+ errors.append("%s: %s" % (blueprint_name, ", ".join(exceptions)))
+ elif ws_blueprint and not git_blueprint:
+ # No git blueprint, return the workspace blueprint
+ changes.append({"name":blueprint_name, "changed":True})
+ blueprints.append(ws_blueprint)
+ elif not ws_blueprint and git_blueprint:
+ # No workspace blueprint, no change, return the git blueprint
+ changes.append({"name":blueprint_name, "changed":False})
+ blueprints.append(git_blueprint)
+ else:
+ # Both exist, maybe changed, return the workspace blueprint
+ changes.append({"name":blueprint_name, "changed":ws_blueprint != git_blueprint})
+ blueprints.append(ws_blueprint)
+
+ # Sort all the results by case-insensitive blueprint name
+ changes = sorted(changes, key=lambda c: c["name"].lower())
+ blueprints = sorted(blueprints, key=lambda r: r["name"].lower())
+ errors = sorted(errors, key=lambda e: e.lower())
+
+ if out_fmt == "toml":
+ # With TOML output we just want to dump the raw blueprint, skipping the rest.
+ return "\n\n".join([r.toml() for r in blueprints])
+ else:
+ return jsonify(changes=changes, blueprints=blueprints, errors=errors)
+
+ @api.route("/api/v0/blueprints/changes/")
+ @crossdomain(origin="*")
+ def v0_blueprints_changes(blueprint_names):
+ """Return the changes to a blueprint or list of blueprints"""
+ branch = request.args.get("branch", "master")
+ try:
+ limit = int(request.args.get("limit", "20"))
+ offset = int(request.args.get("offset", "0"))
+ except ValueError as e:
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ blueprints = []
+ errors = []
+ for blueprint_name in [n.strip() for n in blueprint_names.split(",")]:
+ filename = recipe_filename(blueprint_name)
+ try:
+ with api.config["GITLOCK"].lock:
+ commits = take_limits(list_commits(api.config["GITLOCK"].repo, branch, filename), offset, limit)
+ except Exception as e:
+ errors.append("%s: %s" % (blueprint_name, str(e)))
+ log.error("(v0_blueprints_changes) %s", str(e))
+ else:
+ blueprints.append({"name":blueprint_name, "changes":commits, "total":len(commits)})
+
+ blueprints = sorted(blueprints, key=lambda r: r["name"].lower())
+ errors = sorted(errors, key=lambda e: e.lower())
+
+ return jsonify(blueprints=blueprints, errors=errors, offset=offset, limit=limit)
+
+ @api.route("/api/v0/blueprints/new", methods=["POST"])
+ @crossdomain(origin="*")
+ def v0_blueprints_new():
+ """Commit a new blueprint"""
+ branch = request.args.get("branch", "master")
+ try:
+ if request.headers['Content-Type'] == "text/x-toml":
+ blueprint = recipe_from_toml(request.data)
+ else:
+ blueprint = recipe_from_dict(request.get_json(cache=False))
+
+ with api.config["GITLOCK"].lock:
+ commit_recipe(api.config["GITLOCK"].repo, branch, blueprint)
+
+ # Read the blueprint with new version and write it to the workspace
+ blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint["name"])
+ workspace_write(api.config["GITLOCK"].repo, branch, blueprint)
+ except Exception as e:
+ log.error("(v0_blueprints_new) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/blueprints/delete/", methods=["DELETE"])
+ @crossdomain(origin="*")
+ def v0_blueprints_delete(blueprint_name):
+ """Delete a blueprint from git"""
+ branch = request.args.get("branch", "master")
+ try:
+ with api.config["GITLOCK"].lock:
+ delete_recipe(api.config["GITLOCK"].repo, branch, blueprint_name)
+ except Exception as e:
+ log.error("(v0_blueprints_delete) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/blueprints/workspace", methods=["POST"])
+ @crossdomain(origin="*")
+ def v0_blueprints_workspace():
+ """Write a blueprint to the workspace"""
+ branch = request.args.get("branch", "master")
+ try:
+ if request.headers['Content-Type'] == "text/x-toml":
+ blueprint = recipe_from_toml(request.data)
+ else:
+ blueprint = recipe_from_dict(request.get_json(cache=False))
+
+ with api.config["GITLOCK"].lock:
+ workspace_write(api.config["GITLOCK"].repo, branch, blueprint)
+ except Exception as e:
+ log.error("(v0_blueprints_workspace) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/blueprints/workspace/", methods=["DELETE"])
+ @crossdomain(origin="*")
+ def v0_blueprints_delete_workspace(blueprint_name):
+ """Delete a blueprint from the workspace"""
+ branch = request.args.get("branch", "master")
+ try:
+ with api.config["GITLOCK"].lock:
+ workspace_delete(api.config["GITLOCK"].repo, branch, blueprint_name)
+ except Exception as e:
+ log.error("(v0_blueprints_delete_workspace) %s", str(e))
+ return jsonify(status=False, error=[str(e)]), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/blueprints/undo//", methods=["POST"])
+ @crossdomain(origin="*")
+ def v0_blueprints_undo(blueprint_name, commit):
+ """Undo changes to a blueprint by reverting to a previous commit."""
+ branch = request.args.get("branch", "master")
+ try:
+ with api.config["GITLOCK"].lock:
+ revert_recipe(api.config["GITLOCK"].repo, branch, blueprint_name, commit)
+
+ # Read the new recipe and write it to the workspace
+ blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name)
+ workspace_write(api.config["GITLOCK"].repo, branch, blueprint)
+ except Exception as e:
+ log.error("(v0_blueprints_undo) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/blueprints/tag/", methods=["POST"])
+ @crossdomain(origin="*")
+ def v0_blueprints_tag(blueprint_name):
+ """Tag a blueprint's latest blueprint commit as a 'revision'"""
+ branch = request.args.get("branch", "master")
+ try:
+ with api.config["GITLOCK"].lock:
+ tag_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name)
+ except Exception as e:
+ log.error("(v0_blueprints_tag) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+ else:
+ return jsonify(status=True)
+
+ @api.route("/api/v0/blueprints/diff///")
+ @crossdomain(origin="*")
+ def v0_blueprints_diff(blueprint_name, from_commit, to_commit):
+ """Return the differences between two commits of a blueprint"""
+ branch = request.args.get("branch", "master")
+ try:
+ if from_commit == "NEWEST":
+ with api.config["GITLOCK"].lock:
+ old_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name)
+ else:
+ with api.config["GITLOCK"].lock:
+ old_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name, from_commit)
+ except Exception as e:
+ log.error("(v0_blueprints_diff) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ try:
+ if to_commit == "WORKSPACE":
+ with api.config["GITLOCK"].lock:
+ new_blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name)
+ # If there is no workspace, use the newest commit instead
+ if not new_blueprint:
+ with api.config["GITLOCK"].lock:
+ new_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name)
+ elif to_commit == "NEWEST":
+ with api.config["GITLOCK"].lock:
+ new_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name)
+ else:
+ with api.config["GITLOCK"].lock:
+ new_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name, to_commit)
+ except Exception as e:
+ log.error("(v0_blueprints_diff) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ diff = recipe_diff(old_blueprint, new_blueprint)
+ return jsonify(diff=diff)
+
+ @api.route("/api/v0/blueprints/freeze/")
+ @crossdomain(origin="*")
+ def v0_blueprints_freeze(blueprint_names):
+ """Return the blueprint with the exact modules and packages selected by depsolve"""
+ branch = request.args.get("branch", "master")
+ out_fmt = request.args.get("format", "json")
+ blueprints = []
+ errors = []
+ for blueprint_name in [n.strip() for n in sorted(blueprint_names.split(","), key=lambda n: n.lower())]:
+ # get the blueprint
+ # Get the workspace version (if it exists)
+ blueprint = None
+ try:
+ with api.config["GITLOCK"].lock:
+ blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name)
+ except Exception:
+ pass
+
+ if not blueprint:
+ # No workspace version, get the git version (if it exists)
+ try:
+ with api.config["GITLOCK"].lock:
+ blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name)
+ except Exception as e:
+ errors.append("%s: %s" % (blueprint_name, str(e)))
+ log.error("(v0_blueprints_freeze) %s", str(e))
+
+ # No blueprint found, skip it.
+ if not blueprint:
+ errors.append("%s: blueprint_not_found" % (blueprint_name))
+ continue
+
+ # Combine modules and packages and depsolve the list
+ # TODO include the version/glob in the depsolving
+ module_names = blueprint.module_names
+ package_names = blueprint.package_names
+ projects = sorted(set(module_names+package_names), key=lambda n: n.lower())
+ deps = []
+ try:
+ with api.config["YUMLOCK"].lock:
+ deps = projects_depsolve(api.config["YUMLOCK"].yb, projects)
+ except ProjectsError as e:
+ errors.append("%s: %s" % (blueprint_name, str(e)))
+ log.error("(v0_blueprints_freeze) %s", str(e))
+
+ blueprints.append({"blueprint": blueprint.freeze(deps)})
+
+ if out_fmt == "toml":
+ # With TOML output we just want to dump the raw blueprint, skipping the rest.
+ return "\n\n".join([e["blueprint"].toml() for e in blueprints])
+ else:
+ return jsonify(blueprints=blueprints, errors=errors)
+
+ @api.route("/api/v0/blueprints/depsolve/")
+ @crossdomain(origin="*")
+ def v0_blueprints_depsolve(blueprint_names):
+ """Return the dependencies for a blueprint"""
+ branch = request.args.get("branch", "master")
+ blueprints = []
+ errors = []
+ for blueprint_name in [n.strip() for n in sorted(blueprint_names.split(","), key=lambda n: n.lower())]:
+ # get the blueprint
+ # Get the workspace version (if it exists)
+ blueprint = None
+ try:
+ with api.config["GITLOCK"].lock:
+ blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name)
+ except Exception:
+ pass
+
+ if not blueprint:
+ # No workspace version, get the git version (if it exists)
+ try:
+ with api.config["GITLOCK"].lock:
+ blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name)
+ except Exception as e:
+ errors.append("%s: %s" % (blueprint_name, str(e)))
+ log.error("(v0_blueprints_depsolve) %s", str(e))
+
+ # No blueprint found, skip it.
+ if not blueprint:
+ errors.append("%s: blueprint not found" % blueprint_name)
+ continue
+
+ # Combine modules and packages and depsolve the list
+ # TODO include the version/glob in the depsolving
+ module_names = [m["name"] for m in blueprint["modules"] or []]
+ package_names = [p["name"] for p in blueprint["packages"] or []]
+ projects = sorted(set(module_names+package_names), key=lambda n: n.lower())
+ deps = []
+ try:
+ with api.config["YUMLOCK"].lock:
+ deps = projects_depsolve(api.config["YUMLOCK"].yb, projects)
+ except ProjectsError as e:
+ errors.append("%s: %s" % (blueprint_name, str(e)))
+ log.error("(v0_blueprints_depsolve) %s", str(e))
+
+ # Get the NEVRA's of the modules and projects, add as "modules"
+ modules = []
+ for dep in deps:
+ if dep["name"] in projects:
+ modules.append(dep)
+ modules = sorted(modules, key=lambda m: m["name"].lower())
+
+ blueprints.append({"blueprint":blueprint, "dependencies":deps, "modules":modules})
+
+ return jsonify(blueprints=blueprints, errors=errors)
+
+ @api.route("/api/v0/projects/list")
+ @crossdomain(origin="*")
+ def v0_projects_list():
+ """List all of the available projects/packages"""
+ try:
+ limit = int(request.args.get("limit", "20"))
+ offset = int(request.args.get("offset", "0"))
+ except ValueError as e:
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ try:
+ with api.config["YUMLOCK"].lock:
+ available = projects_list(api.config["YUMLOCK"].yb)
+ except ProjectsError as e:
+ log.error("(v0_projects_list) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ projects = take_limits(available, offset, limit)
+ return jsonify(projects=projects, offset=offset, limit=limit, total=len(available))
+
+ @api.route("/api/v0/projects/info/")
+ @crossdomain(origin="*")
+ def v0_projects_info(project_names):
+ """Return detailed information about the listed projects"""
+ try:
+ with api.config["YUMLOCK"].lock:
+ projects = projects_info(api.config["YUMLOCK"].yb, project_names.split(","))
+ except ProjectsError as e:
+ log.error("(v0_projects_info) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ return jsonify(projects=projects)
+
+ @api.route("/api/v0/projects/depsolve/")
+ @crossdomain(origin="*")
+ def v0_projects_depsolve(project_names):
+ """Return detailed information about the listed projects"""
+ try:
+ with api.config["YUMLOCK"].lock:
+ deps = projects_depsolve(api.config["YUMLOCK"].yb, project_names.split(","))
+ except ProjectsError as e:
+ log.error("(v0_projects_depsolve) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ return jsonify(projects=deps)
+
+ @api.route("/api/v0/modules/list")
+ @api.route("/api/v0/modules/list/")
+ @crossdomain(origin="*")
+ def v0_modules_list(module_names=None):
+ """List available modules, filtering by module_names"""
+ try:
+ limit = int(request.args.get("limit", "20"))
+ offset = int(request.args.get("offset", "0"))
+ except ValueError as e:
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ if module_names:
+ module_names = module_names.split(",")
+
+ try:
+ with api.config["YUMLOCK"].lock:
+ available = modules_list(api.config["YUMLOCK"].yb, module_names)
+ except ProjectsError as e:
+ log.error("(v0_modules_list) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ modules = take_limits(available, offset, limit)
+ return jsonify(modules=modules, offset=offset, limit=limit, total=len(available))
+
+ @api.route("/api/v0/modules/info/")
+ @crossdomain(origin="*")
+ def v0_modules_info(module_names):
+ """Return detailed information about the listed modules"""
+ try:
+ with api.config["YUMLOCK"].lock:
+ modules = modules_info(api.config["YUMLOCK"].yb, module_names.split(","))
+ except ProjectsError as e:
+ log.error("(v0_modules_info) %s", str(e))
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ return jsonify(modules=modules)
+
+ @api.route("/api/v0/compose", methods=["POST"])
+ @crossdomain(origin="*")
+ def v0_compose_start():
+ """Start a compose
+
+ The body of the post should have these fields:
+ blueprint_name - The blueprint name from /blueprints/list/
+ compose_type - The type of output to create, from /compose/types
+ branch - Optional, defaults to master, selects the git branch to use for the blueprint.
+ """
+ # Passing ?test=1 will generate a fake FAILED compose.
+ # Passing ?test=2 will generate a fake FINISHED compose.
+ try:
+ test_mode = int(request.args.get("test", "0"))
+ except ValueError:
+ test_mode = 0
+
+ compose = request.get_json(cache=False)
+
+ errors = []
+ if not compose:
+ return jsonify(status=False, errors=["Missing POST body"]), 400
+
+ if "blueprint_name" not in compose:
+ errors.append("No 'blueprint_name' in the JSON request")
+ else:
+ blueprint_name = compose["blueprint_name"]
+
+ if "branch" not in compose or not compose["branch"]:
+ branch = "master"
+ else:
+ branch = compose["branch"]
+
+ if "compose_type" not in compose:
+ errors.append("No 'compose_type' in the JSON request")
+ else:
+ compose_type = compose["compose_type"]
+
+ if errors:
+ return jsonify(status=False, errors=errors), 400
+
+ try:
+ build_id = start_build(api.config["COMPOSER_CFG"], api.config["YUMLOCK"], api.config["GITLOCK"],
+ branch, blueprint_name, compose_type, test_mode)
+ except Exception as e:
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ return jsonify(status=True, build_id=build_id)
+
+ @api.route("/api/v0/compose/types")
+ @crossdomain(origin="*")
+ def v0_compose_types():
+ """Return the list of enabled output types
+
+ (only enabled types are returned)
+ """
+ share_dir = api.config["COMPOSER_CFG"].get("composer", "share_dir")
+ return jsonify(types=[{"name": k, "enabled": True} for k in compose_types(share_dir)])
+
+ @api.route("/api/v0/compose/queue")
+ @crossdomain(origin="*")
+ def v0_compose_queue():
+ """Return the status of the new and running queues"""
+ return jsonify(queue_status(api.config["COMPOSER_CFG"]))
+
+ @api.route("/api/v0/compose/finished")
+ @crossdomain(origin="*")
+ def v0_compose_finished():
+ """Return the list of finished composes"""
+ return jsonify(finished=build_status(api.config["COMPOSER_CFG"], "FINISHED"))
+
+ @api.route("/api/v0/compose/failed")
+ @crossdomain(origin="*")
+ def v0_compose_failed():
+ """Return the list of failed composes"""
+ return jsonify(failed=build_status(api.config["COMPOSER_CFG"], "FAILED"))
+
+ @api.route("/api/v0/compose/status/")
+ @crossdomain(origin="*")
+ def v0_compose_status(uuids):
+ """Return the status of the listed uuids"""
+ results = []
+ for uuid in [n.strip().lower() for n in uuids.split(",")]:
+ details = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if details is not None:
+ results.append(details)
+
+ return jsonify(uuids=results)
+
+ @api.route("/api/v0/compose/cancel/", methods=["DELETE"])
+ @crossdomain(origin="*")
+ def v0_compose_cancel(uuid):
+ """Cancel a running compose and delete its results directory"""
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, errors=["%s is not a valid build uuid" % uuid]), 400
+
+ if status["queue_status"] not in ["WAITING", "RUNNING"]:
+ return jsonify(status=False, errors=["Build %s is not in WAITING or RUNNING." % uuid])
+
+ try:
+ uuid_cancel(api.config["COMPOSER_CFG"], uuid)
+ except Exception as e:
+ return jsonify(status=False, errors=["%s: %s" % (uuid, str(e))]), 400
+ else:
+ return jsonify(status=True, uuid=uuid)
+
+ @api.route("/api/v0/compose/delete/", methods=["DELETE"])
+ @crossdomain(origin="*")
+ def v0_compose_delete(uuids):
+ """Delete the compose results for the listed uuids"""
+ results = []
+ errors = []
+ for uuid in [n.strip().lower() for n in uuids.split(",")]:
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ errors.append("%s is not a valid build uuid" % uuid)
+ elif status["queue_status"] not in ["FINISHED", "FAILED"]:
+ errors.append("Build %s is not in FINISHED or FAILED." % uuid)
+ else:
+ try:
+ uuid_delete(api.config["COMPOSER_CFG"], uuid)
+ except Exception as e:
+ errors.append("%s: %s" % (uuid, str(e)))
+ else:
+ results.append({"uuid":uuid, "status":True})
+ return jsonify(uuids=results, errors=errors)
+
+ @api.route("/api/v0/compose/info/")
+ @crossdomain(origin="*")
+ def v0_compose_info(uuid):
+ """Return detailed info about a compose"""
+ try:
+ info = uuid_info(api.config["COMPOSER_CFG"], uuid)
+ except Exception as e:
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ return jsonify(**info)
+
+ @api.route("/api/v0/compose/metadata/")
+ @crossdomain(origin="*")
+ def v0_compose_metadata(uuid):
+ """Return a tar of the metadata for the build"""
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, errors=["%s is not a valid build uuid" % uuid]), 400
+ if status["queue_status"] not in ["FINISHED", "FAILED"]:
+ return jsonify(status=False, errors=["Build %s not in FINISHED or FAILED state." % uuid]), 400
+ else:
+ return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=True, image=False, logs=False),
+ mimetype="application/x-tar",
+ headers=[("Content-Disposition", "attachment; filename=%s-metadata.tar;" % uuid)],
+ direct_passthrough=True)
+
+ @api.route("/api/v0/compose/results/")
+ @crossdomain(origin="*")
+ def v0_compose_results(uuid):
+ """Return a tar of the metadata and the results for the build"""
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, errors=["%s is not a valid build uuid" % uuid]), 400
+ elif status["queue_status"] not in ["FINISHED", "FAILED"]:
+ return jsonify(status=False, errors=["Build %s not in FINISHED or FAILED state." % uuid]), 400
+ else:
+ return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=True, image=True, logs=True),
+ mimetype="application/x-tar",
+ headers=[("Content-Disposition", "attachment; filename=%s.tar;" % uuid)],
+ direct_passthrough=True)
+
+ @api.route("/api/v0/compose/logs/")
+ @crossdomain(origin="*")
+ def v0_compose_logs(uuid):
+ """Return a tar of the metadata for the build"""
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, errors=["%s is not a valid build uuid" % uuid]), 400
+ elif status["queue_status"] not in ["FINISHED", "FAILED"]:
+ return jsonify(status=False, errors=["Build %s not in FINISHED or FAILED state." % uuid]), 400
+ else:
+ return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=False, image=False, logs=True),
+ mimetype="application/x-tar",
+ headers=[("Content-Disposition", "attachment; filename=%s-logs.tar;" % uuid)],
+ direct_passthrough=True)
+
+ @api.route("/api/v0/compose/image/")
+ @crossdomain(origin="*")
+ def v0_compose_image(uuid):
+ """Return the output image for the build"""
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, errors=["%s is not a valid build uuid" % uuid]), 400
+ elif status["queue_status"] not in ["FINISHED", "FAILED"]:
+ return jsonify(status=False, errors=["Build %s not in FINISHED or FAILED state." % uuid]), 400
+ else:
+ image_name, image_path = uuid_image(api.config["COMPOSER_CFG"], uuid)
+
+ # Make sure it really exists
+ if not os.path.exists(image_path):
+ return jsonify(status=False, errors=["Build %s is missing image file %s" % (uuid, image_name)]), 400
+
+ # Make the image name unique
+ image_name = uuid + "-" + image_name
+ # XXX - Will mime type guessing work for all our output?
+ return send_file(image_path, as_attachment=True, attachment_filename=image_name, add_etags=False)
+
+ @api.route("/api/v0/compose/log/")
+ @crossdomain(origin="*")
+ def v0_compose_log_tail(uuid):
+ """Return the end of the main anaconda.log, defaults to 1Mbytes"""
+ try:
+ size = int(request.args.get("size", "1024"))
+ except ValueError as e:
+ return jsonify(status=False, errors=[str(e)]), 400
+
+ status = uuid_status(api.config["COMPOSER_CFG"], uuid)
+ if status is None:
+ return jsonify(status=False, errors=["%s is not a valid build uuid" % uuid]), 400
+ elif status["queue_status"] == "WAITING":
+ return jsonify(status=False, errors=["Build %s has not started yet. No logs to view" % uuid])
+ try:
+ return Response(uuid_log(api.config["COMPOSER_CFG"], uuid, size), direct_passthrough=True)
+ except RuntimeError as e:
+ return jsonify(status=False, errors=[str(e)]), 400
diff --git a/src/pylorax/api/workspace.py b/src/pylorax/api/workspace.py
new file mode 100644
index 00000000..6d9a9c8f
--- /dev/null
+++ b/src/pylorax/api/workspace.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+import os
+
+from pylorax.api.recipes import recipe_filename, recipe_from_toml, RecipeFileError
+from pylorax.sysutils import joinpaths
+
+
+def workspace_dir(repo, branch):
+ """Create the workspace's path from a Repository and branch
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :returns: The path to the branch's workspace directory
+ :rtype: str
+
+ """
+ repo_path = repo.get_location().get_path()
+ return joinpaths(repo_path, "workspace", branch)
+
+
+def workspace_read(repo, branch, recipe_name):
+ """Read a Recipe from the branch's workspace
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: The name of the recipe
+ :type recipe_name: str
+ :returns: The workspace copy of the recipe, or None if it doesn't exist
+ :rtype: Recipe or None
+ :raises: RecipeFileError
+ """
+ ws_dir = workspace_dir(repo, branch)
+ if not os.path.isdir(ws_dir):
+ os.makedirs(ws_dir)
+ filename = joinpaths(ws_dir, recipe_filename(recipe_name))
+ if not os.path.exists(filename):
+ return None
+ try:
+ f = open(filename, 'rb')
+ recipe = recipe_from_toml(f.read())
+ except IOError:
+ raise RecipeFileError
+ return recipe
+
+
+def workspace_write(repo, branch, recipe):
+ """Write a recipe to the workspace
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe: The recipe to write to the workspace
+ :type recipe: Recipe
+ :returns: None
+ :raises: IO related errors
+ """
+ ws_dir = workspace_dir(repo, branch)
+ if not os.path.isdir(ws_dir):
+ os.makedirs(ws_dir)
+ filename = joinpaths(ws_dir, recipe.filename)
+ open(filename, 'wb').write(recipe.toml())
+
+
+def workspace_delete(repo, branch, recipe_name):
+ """Delete the recipe from the workspace
+
+ :param repo: Open repository
+ :type repo: Git.Repository
+ :param branch: Branch name
+ :type branch: str
+ :param recipe_name: The name of the recipe
+ :type recipe_name: str
+ :returns: None
+ :raises: IO related errors
+ """
+ ws_dir = workspace_dir(repo, branch)
+ filename = joinpaths(ws_dir, recipe_filename(recipe_name))
+ if os.path.exists(filename):
+ os.unlink(filename)
diff --git a/src/pylorax/api/yumbase.py b/src/pylorax/api/yumbase.py
new file mode 100644
index 00000000..9cc29aa8
--- /dev/null
+++ b/src/pylorax/api/yumbase.py
@@ -0,0 +1,113 @@
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# pylint: disable=bad-preconf-access
+
+import logging
+log = logging.getLogger("lorax-composer")
+
+import configparser
+from fnmatch import fnmatchcase
+from glob import glob
+import os
+import yum
+# This is a hack to short circuit yum's internal logging
+yum.logginglevels._added_handlers = True
+
+from pylorax.sysutils import joinpaths
+
+def get_base_object(conf):
+ """Get the Yum object with settings from the config file
+
+ :param conf: configuration object
+ :type conf: ComposerParser
+ :returns: A Yum base object
+ :rtype: YumBase
+ """
+ cachedir = os.path.abspath(conf.get("composer", "cache_dir"))
+ yumconf = os.path.abspath(conf.get("composer", "yum_conf"))
+ yumroot = os.path.abspath(conf.get("composer", "yum_root"))
+ repodir = os.path.abspath(conf.get("composer", "repo_dir"))
+
+ c = configparser.ConfigParser()
+
+ # add the main section
+ section = "main"
+ data = {"cachedir": cachedir,
+ "keepcache": 0,
+ "gpgcheck": 0,
+ "plugins": 0,
+ "assumeyes": 1,
+ "reposdir": "",
+ "tsflags": "nodocs"}
+
+ if conf.get_default("yum", "proxy", None):
+ data["proxy"] = conf.get("yum", "proxy")
+
+ if conf.has_option("yum", "sslverify") and not conf.getboolean("yum", "sslverify"):
+ data["sslverify"] = "0"
+
+ c.add_section(section)
+ list(map(lambda key_value: c.set(section, key_value[0], key_value[1]), list(data.items())))
+
+ # write the yum configuration file
+ with open(yumconf, "w") as f:
+ c.write(f)
+
+ # create the yum base object
+ yb = yum.YumBase()
+
+ yb.preconf.fn = yumconf
+
+ yb.preconf.root = yumroot
+ if not os.path.isdir(yb.preconf.root):
+ os.makedirs(yb.preconf.root)
+
+ _releasever = conf.get_default("composer", "releasever", None)
+ if not _releasever:
+ distroverpkg = ['system-release(releasever)', 'redhat-release']
+ # Use yum private function to guess the releasever
+ _releasever = yum.config._getsysver("/", distroverpkg)
+ log.info("releasever = %s", _releasever)
+ yb.preconf.releasever = _releasever
+
+ # Turn on as much yum logging as we can
+ yb.preconf.debuglevel = 6
+ yb.preconf.errorlevel = 6
+ yb.logger.setLevel(logging.DEBUG)
+ yb.verbose_logger.setLevel(logging.DEBUG)
+
+ # Gather up all the available repo files, add the ones matching "repos":"enabled" patterns
+ enabled_repos = conf.get("repos", "enabled").split(",")
+ repo_files = glob(joinpaths(repodir, "*.repo"))
+ if not conf.has_option("repos", "use_system_repos") or conf.getboolean("repos", "use_system_repos"):
+ repo_files.extend(glob("/etc/yum.repos.d/*.repo"))
+
+ for repo_file in repo_files:
+ name = os.path.basename(repo_file)[:-5]
+ if any([fnmatchcase(name, pattern) for pattern in enabled_repos]): # pylint: disable=cell-var-from-loop
+ yb.getReposFromConfigFile(repo_file)
+
+ # Update the metadata from the enabled repos to speed up later operations
+ log.info("Updating yum repository metadata")
+ for r in yb.repos.sort():
+ r.metadata_expire = 0
+ r.mdpolicy = "group:all"
+ yb.doRepoSetup()
+ yb.repos.doSetup()
+ yb.repos.populateSack(mdtype='all', cacheonly=1)
+
+ return yb
diff --git a/tests/pylorax/test_crossdomain.py b/tests/pylorax/test_crossdomain.py
index d3a585b1..74ed5201 100644
--- a/tests/pylorax/test_crossdomain.py
+++ b/tests/pylorax/test_crossdomain.py
@@ -76,14 +76,14 @@ class CrossdomainTest(unittest.TestCase):
def test_02_with_headers_specified(self):
response = self.server.get("/02")
self.assertEqual(200, response.status_code)
- self.assertEqual('Hello, World!', response.data)
+ self.assertEqual(b'Hello, World!', response.data)
self.assertEqual('TESTING', response.headers['Access-Control-Allow-Headers'])
def test_03_with_max_age_as_timedelta(self):
response = self.server.get("/03")
self.assertEqual(200, response.status_code)
- self.assertEqual('Hello, World!', response.data)
+ self.assertEqual(b'Hello, World!', response.data)
expected_max_age = int(timedelta(days=7).total_seconds())
actual_max_age = int(response.headers['Access-Control-Max-Age'])
@@ -92,7 +92,7 @@ class CrossdomainTest(unittest.TestCase):
def test_04_attach_to_all_false(self):
response = self.server.get("/04")
self.assertEqual(200, response.status_code)
- self.assertEqual('Hello, World!', response.data)
+ self.assertEqual(b'Hello, World!', response.data)
# when attach_to_all is False the decorator will not assign
# the Access-Control-* headers to the response
@@ -103,15 +103,17 @@ class CrossdomainTest(unittest.TestCase):
def test_05_options_request(self):
response = self.server.options("/05")
self.assertEqual(200, response.status_code)
- self.assertEqual('Hello, World!', response.data)
+ self.assertEqual(b'Hello, World!', response.data)
- self.assertEqual(response.headers['Access-Control-Allow-Methods'], 'HEAD, OPTIONS, GET')
+ # Not always in the same order, so test individually
+ for m in ["HEAD", "OPTIONS", "GET"]:
+ self.assertIn(m, response.headers['Access-Control-Allow-Methods'])
def test_06_with_origin_as_list(self):
response = self.server.get("/06")
self.assertEqual(200, response.status_code)
- self.assertEqual('Hello, World!', response.data)
+ self.assertEqual(b'Hello, World!', response.data)
for header, value in response.headers:
if header == 'Access-Control-Allow-Origin':
diff --git a/tests/pylorax/test_projects.py b/tests/pylorax/test_projects.py
index 67cc207a..9184a276 100644
--- a/tests/pylorax/test_projects.py
+++ b/tests/pylorax/test_projects.py
@@ -44,7 +44,7 @@ class Yaps(object):
version = "version"
def returnChangelog(self):
- return [[0,1,"Heavy!"]]
+ return [[0, 1, "Heavy!"]]
class TM(object):
@@ -74,7 +74,7 @@ class ProjectsTest(unittest.TestCase):
self.assertEqual(api_time(499222800), "1985-10-27T01:00:00")
def test_api_changelog(self):
- self.assertEqual(api_changelog([[0,1,"Heavy!"], [0, 1, "Light!"]]), "Heavy!")
+ self.assertEqual(api_changelog([[0, 1, "Heavy!"], [0, 1, "Light!"]]), "Heavy!")
def test_api_changelog_empty_list(self):
self.assertEqual(api_changelog([]), '')
diff --git a/tests/pylorax/test_server.py b/tests/pylorax/test_server.py
index b463c39f..c0ea94b6 100644
--- a/tests/pylorax/test_server.py
+++ b/tests/pylorax/test_server.py
@@ -514,8 +514,8 @@ class ServerTestCase(unittest.TestCase):
self.assertEqual(200, response.status_code)
self.assertTrue(len(response.data) > 1024)
# look for some well known strings inside the documentation
- self.assertRegexpMatches(response.data, r"Lorax [\d.]+ documentation")
- self.assertRegexpMatches(response.data, r"Copyright \d+, Red Hat, Inc.")
+ self.assertRegex(response.data, r"Lorax [\d.]+ documentation")
+ self.assertRegex(response.data, r"Copyright \d+, Red Hat, Inc.")
def test_api_docs(self):
"""Test the /api/docs/"""
diff --git a/tests/pylorax/test_yumbase.py b/tests/pylorax/test_yumbase.py
index b0569028..aaa7fb74 100644
--- a/tests/pylorax/test_yumbase.py
+++ b/tests/pylorax/test_yumbase.py
@@ -19,7 +19,7 @@ import shutil
import tempfile
import unittest
-import ConfigParser
+import configparser
from pylorax.api.config import configure, make_yum_dirs
from pylorax.api.yumbase import get_base_object
@@ -48,7 +48,7 @@ use_system_repos = False
self.yb = get_base_object(config)
# will read the stored yum config file
- self.yumconf = ConfigParser.ConfigParser()
+ self.yumconf = configparser.ConfigParser()
self.yumconf.read([config.get("composer", "yum_conf")])
@classmethod